mirror of
https://codeberg.org/ziglang/zig.git
synced 2025-12-06 05:44:20 +00:00
Merge pull request #17172 from ziglang/ip-structs
compiler: move struct types into InternPool proper
This commit is contained in:
commit
a8d2ed8065
41 changed files with 3630 additions and 2900 deletions
|
|
@ -4758,6 +4758,9 @@ fn structDeclInner(
|
|||
.known_non_opv = false,
|
||||
.known_comptime_only = false,
|
||||
.is_tuple = false,
|
||||
.any_comptime_fields = false,
|
||||
.any_default_inits = false,
|
||||
.any_aligned_fields = false,
|
||||
});
|
||||
return indexToRef(decl_inst);
|
||||
}
|
||||
|
|
@ -4881,6 +4884,9 @@ fn structDeclInner(
|
|||
|
||||
var known_non_opv = false;
|
||||
var known_comptime_only = false;
|
||||
var any_comptime_fields = false;
|
||||
var any_aligned_fields = false;
|
||||
var any_default_inits = false;
|
||||
for (container_decl.ast.members) |member_node| {
|
||||
var member = switch (try containerMember(&block_scope, &namespace.base, &wip_members, member_node)) {
|
||||
.decl => continue,
|
||||
|
|
@ -4910,13 +4916,13 @@ fn structDeclInner(
|
|||
const have_value = member.ast.value_expr != 0;
|
||||
const is_comptime = member.comptime_token != null;
|
||||
|
||||
if (is_comptime and layout == .Packed) {
|
||||
return astgen.failTok(member.comptime_token.?, "packed struct fields cannot be marked comptime", .{});
|
||||
} else if (is_comptime and layout == .Extern) {
|
||||
return astgen.failTok(member.comptime_token.?, "extern struct fields cannot be marked comptime", .{});
|
||||
}
|
||||
|
||||
if (!is_comptime) {
|
||||
if (is_comptime) {
|
||||
switch (layout) {
|
||||
.Packed => return astgen.failTok(member.comptime_token.?, "packed struct fields cannot be marked comptime", .{}),
|
||||
.Extern => return astgen.failTok(member.comptime_token.?, "extern struct fields cannot be marked comptime", .{}),
|
||||
.Auto => any_comptime_fields = true,
|
||||
}
|
||||
} else {
|
||||
known_non_opv = known_non_opv or
|
||||
nodeImpliesMoreThanOnePossibleValue(tree, member.ast.type_expr);
|
||||
known_comptime_only = known_comptime_only or
|
||||
|
|
@ -4942,6 +4948,7 @@ fn structDeclInner(
|
|||
if (layout == .Packed) {
|
||||
try astgen.appendErrorNode(member.ast.align_expr, "unable to override alignment of packed struct fields", .{});
|
||||
}
|
||||
any_aligned_fields = true;
|
||||
const align_ref = try expr(&block_scope, &namespace.base, coerced_align_ri, member.ast.align_expr);
|
||||
if (!block_scope.endsWithNoReturn()) {
|
||||
_ = try block_scope.addBreak(.break_inline, decl_inst, align_ref);
|
||||
|
|
@ -4955,6 +4962,7 @@ fn structDeclInner(
|
|||
}
|
||||
|
||||
if (have_value) {
|
||||
any_default_inits = true;
|
||||
const ri: ResultInfo = .{ .rl = if (field_type == .none) .none else .{ .coerced_ty = field_type } };
|
||||
|
||||
const default_inst = try expr(&block_scope, &namespace.base, ri, member.ast.value_expr);
|
||||
|
|
@ -4982,6 +4990,9 @@ fn structDeclInner(
|
|||
.known_non_opv = known_non_opv,
|
||||
.known_comptime_only = known_comptime_only,
|
||||
.is_tuple = is_tuple,
|
||||
.any_comptime_fields = any_comptime_fields,
|
||||
.any_default_inits = any_default_inits,
|
||||
.any_aligned_fields = any_aligned_fields,
|
||||
});
|
||||
|
||||
wip_members.finishBits(bits_per_field);
|
||||
|
|
@ -12080,6 +12091,9 @@ const GenZir = struct {
|
|||
known_non_opv: bool,
|
||||
known_comptime_only: bool,
|
||||
is_tuple: bool,
|
||||
any_comptime_fields: bool,
|
||||
any_default_inits: bool,
|
||||
any_aligned_fields: bool,
|
||||
}) !void {
|
||||
const astgen = gz.astgen;
|
||||
const gpa = astgen.gpa;
|
||||
|
|
@ -12117,6 +12131,9 @@ const GenZir = struct {
|
|||
.is_tuple = args.is_tuple,
|
||||
.name_strategy = gz.anon_name_strategy,
|
||||
.layout = args.layout,
|
||||
.any_comptime_fields = args.any_comptime_fields,
|
||||
.any_default_inits = args.any_default_inits,
|
||||
.any_aligned_fields = args.any_aligned_fields,
|
||||
}),
|
||||
.operand = payload_index,
|
||||
} },
|
||||
|
|
|
|||
1106
src/InternPool.zig
1106
src/InternPool.zig
File diff suppressed because it is too large
Load diff
576
src/Module.zig
576
src/Module.zig
|
|
@ -105,8 +105,6 @@ comptime_capture_scopes: std.AutoArrayHashMapUnmanaged(CaptureScope.Key, InternP
|
|||
|
||||
/// To be eliminated in a future commit by moving more data into InternPool.
|
||||
/// Current uses that must be eliminated:
|
||||
/// * Struct comptime_args
|
||||
/// * Struct optimized_order
|
||||
/// * comptime pointer mutation
|
||||
/// This memory lives until the Module is destroyed.
|
||||
tmp_hack_arena: std.heap.ArenaAllocator,
|
||||
|
|
@ -678,14 +676,10 @@ pub const Decl = struct {
|
|||
|
||||
/// If the Decl owns its value and it is a struct, return it,
|
||||
/// otherwise null.
|
||||
pub fn getOwnedStruct(decl: Decl, mod: *Module) ?*Struct {
|
||||
return mod.structPtrUnwrap(decl.getOwnedStructIndex(mod));
|
||||
}
|
||||
|
||||
pub fn getOwnedStructIndex(decl: Decl, mod: *Module) Struct.OptionalIndex {
|
||||
if (!decl.owns_tv) return .none;
|
||||
if (decl.val.ip_index == .none) return .none;
|
||||
return mod.intern_pool.indexToStructType(decl.val.toIntern());
|
||||
pub fn getOwnedStruct(decl: Decl, mod: *Module) ?InternPool.Key.StructType {
|
||||
if (!decl.owns_tv) return null;
|
||||
if (decl.val.ip_index == .none) return null;
|
||||
return mod.typeToStruct(decl.val.toType());
|
||||
}
|
||||
|
||||
/// If the Decl owns its value and it is a union, return it,
|
||||
|
|
@ -795,9 +789,10 @@ pub const Decl = struct {
|
|||
return decl.getExternDecl(mod) != .none;
|
||||
}
|
||||
|
||||
pub fn getAlignment(decl: Decl, mod: *Module) u32 {
|
||||
pub fn getAlignment(decl: Decl, mod: *Module) Alignment {
|
||||
assert(decl.has_tv);
|
||||
return @as(u32, @intCast(decl.alignment.toByteUnitsOptional() orelse decl.ty.abiAlignment(mod)));
|
||||
if (decl.alignment != .none) return decl.alignment;
|
||||
return decl.ty.abiAlignment(mod);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
@ -806,218 +801,6 @@ pub const EmitH = struct {
|
|||
fwd_decl: ArrayListUnmanaged(u8) = .{},
|
||||
};
|
||||
|
||||
pub const PropertyBoolean = enum { no, yes, unknown, wip };
|
||||
|
||||
/// Represents the data that a struct declaration provides.
|
||||
pub const Struct = struct {
|
||||
/// Set of field names in declaration order.
|
||||
fields: Fields,
|
||||
/// Represents the declarations inside this struct.
|
||||
namespace: Namespace.Index,
|
||||
/// The Decl that corresponds to the struct itself.
|
||||
owner_decl: Decl.Index,
|
||||
/// Index of the struct_decl ZIR instruction.
|
||||
zir_index: Zir.Inst.Index,
|
||||
/// Indexes into `fields` sorted to be most memory efficient.
|
||||
optimized_order: ?[*]u32 = null,
|
||||
layout: std.builtin.Type.ContainerLayout,
|
||||
/// If the layout is not packed, this is the noreturn type.
|
||||
/// If the layout is packed, this is the backing integer type of the packed struct.
|
||||
/// Whether zig chooses this type or the user specifies it, it is stored here.
|
||||
/// This will be set to the noreturn type until status is `have_layout`.
|
||||
backing_int_ty: Type = Type.noreturn,
|
||||
status: enum {
|
||||
none,
|
||||
field_types_wip,
|
||||
have_field_types,
|
||||
layout_wip,
|
||||
have_layout,
|
||||
fully_resolved_wip,
|
||||
// The types and all its fields have had their layout resolved. Even through pointer,
|
||||
// which `have_layout` does not ensure.
|
||||
fully_resolved,
|
||||
},
|
||||
/// If true, has more than one possible value. However it may still be non-runtime type
|
||||
/// if it is a comptime-only type.
|
||||
/// If false, resolving the fields is necessary to determine whether the type has only
|
||||
/// one possible value.
|
||||
known_non_opv: bool,
|
||||
requires_comptime: PropertyBoolean = .unknown,
|
||||
have_field_inits: bool = false,
|
||||
is_tuple: bool,
|
||||
assumed_runtime_bits: bool = false,
|
||||
|
||||
pub const Index = enum(u32) {
|
||||
_,
|
||||
|
||||
pub fn toOptional(i: Index) OptionalIndex {
|
||||
return @as(OptionalIndex, @enumFromInt(@intFromEnum(i)));
|
||||
}
|
||||
};
|
||||
|
||||
pub const OptionalIndex = enum(u32) {
|
||||
none = std.math.maxInt(u32),
|
||||
_,
|
||||
|
||||
pub fn init(oi: ?Index) OptionalIndex {
|
||||
return @as(OptionalIndex, @enumFromInt(@intFromEnum(oi orelse return .none)));
|
||||
}
|
||||
|
||||
pub fn unwrap(oi: OptionalIndex) ?Index {
|
||||
if (oi == .none) return null;
|
||||
return @as(Index, @enumFromInt(@intFromEnum(oi)));
|
||||
}
|
||||
};
|
||||
|
||||
pub const Fields = std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, Field);
|
||||
|
||||
/// The `Type` and `Value` memory is owned by the arena of the Struct's owner_decl.
|
||||
pub const Field = struct {
|
||||
/// Uses `noreturn` to indicate `anytype`.
|
||||
/// undefined until `status` is >= `have_field_types`.
|
||||
ty: Type,
|
||||
/// Uses `none` to indicate no default.
|
||||
default_val: InternPool.Index,
|
||||
/// Zero means to use the ABI alignment of the type.
|
||||
abi_align: Alignment,
|
||||
/// undefined until `status` is `have_layout`.
|
||||
offset: u32,
|
||||
/// If true then `default_val` is the comptime field value.
|
||||
is_comptime: bool,
|
||||
|
||||
/// Returns the field alignment. If the struct is packed, returns 0.
|
||||
/// Keep implementation in sync with `Sema.structFieldAlignment`.
|
||||
pub fn alignment(
|
||||
field: Field,
|
||||
mod: *Module,
|
||||
layout: std.builtin.Type.ContainerLayout,
|
||||
) u32 {
|
||||
if (field.abi_align.toByteUnitsOptional()) |abi_align| {
|
||||
assert(layout != .Packed);
|
||||
return @as(u32, @intCast(abi_align));
|
||||
}
|
||||
|
||||
const target = mod.getTarget();
|
||||
|
||||
switch (layout) {
|
||||
.Packed => return 0,
|
||||
.Auto => {
|
||||
if (target.ofmt == .c) {
|
||||
return alignmentExtern(field, mod);
|
||||
} else {
|
||||
return field.ty.abiAlignment(mod);
|
||||
}
|
||||
},
|
||||
.Extern => return alignmentExtern(field, mod),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn alignmentExtern(field: Field, mod: *Module) u32 {
|
||||
// This logic is duplicated in Type.abiAlignmentAdvanced.
|
||||
const ty_abi_align = field.ty.abiAlignment(mod);
|
||||
|
||||
if (field.ty.isAbiInt(mod) and field.ty.intInfo(mod).bits >= 128) {
|
||||
// The C ABI requires 128 bit integer fields of structs
|
||||
// to be 16-bytes aligned.
|
||||
return @max(ty_abi_align, 16);
|
||||
}
|
||||
|
||||
return ty_abi_align;
|
||||
}
|
||||
};
|
||||
|
||||
/// Used in `optimized_order` to indicate field that is not present in the
|
||||
/// runtime version of the struct.
|
||||
pub const omitted_field = std.math.maxInt(u32);
|
||||
|
||||
pub fn getFullyQualifiedName(s: *Struct, mod: *Module) !InternPool.NullTerminatedString {
|
||||
return mod.declPtr(s.owner_decl).getFullyQualifiedName(mod);
|
||||
}
|
||||
|
||||
pub fn srcLoc(s: Struct, mod: *Module) SrcLoc {
|
||||
return mod.declPtr(s.owner_decl).srcLoc(mod);
|
||||
}
|
||||
|
||||
pub fn haveFieldTypes(s: Struct) bool {
|
||||
return switch (s.status) {
|
||||
.none,
|
||||
.field_types_wip,
|
||||
=> false,
|
||||
.have_field_types,
|
||||
.layout_wip,
|
||||
.have_layout,
|
||||
.fully_resolved_wip,
|
||||
.fully_resolved,
|
||||
=> true,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn haveLayout(s: Struct) bool {
|
||||
return switch (s.status) {
|
||||
.none,
|
||||
.field_types_wip,
|
||||
.have_field_types,
|
||||
.layout_wip,
|
||||
=> false,
|
||||
.have_layout,
|
||||
.fully_resolved_wip,
|
||||
.fully_resolved,
|
||||
=> true,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn packedFieldBitOffset(s: Struct, mod: *Module, index: usize) u16 {
|
||||
assert(s.layout == .Packed);
|
||||
assert(s.haveLayout());
|
||||
var bit_sum: u64 = 0;
|
||||
for (s.fields.values(), 0..) |field, i| {
|
||||
if (i == index) {
|
||||
return @as(u16, @intCast(bit_sum));
|
||||
}
|
||||
bit_sum += field.ty.bitSize(mod);
|
||||
}
|
||||
unreachable; // index out of bounds
|
||||
}
|
||||
|
||||
pub const RuntimeFieldIterator = struct {
|
||||
module: *Module,
|
||||
struct_obj: *const Struct,
|
||||
index: u32 = 0,
|
||||
|
||||
pub const FieldAndIndex = struct {
|
||||
field: Field,
|
||||
index: u32,
|
||||
};
|
||||
|
||||
pub fn next(it: *RuntimeFieldIterator) ?FieldAndIndex {
|
||||
const mod = it.module;
|
||||
while (true) {
|
||||
var i = it.index;
|
||||
it.index += 1;
|
||||
if (it.struct_obj.fields.count() <= i)
|
||||
return null;
|
||||
|
||||
if (it.struct_obj.optimized_order) |some| {
|
||||
i = some[i];
|
||||
if (i == Module.Struct.omitted_field) return null;
|
||||
}
|
||||
const field = it.struct_obj.fields.values()[i];
|
||||
|
||||
if (!field.is_comptime and field.ty.hasRuntimeBits(mod)) {
|
||||
return FieldAndIndex{ .index = i, .field = field };
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
pub fn runtimeFieldIterator(s: *const Struct, module: *Module) RuntimeFieldIterator {
|
||||
return .{
|
||||
.struct_obj = s,
|
||||
.module = module,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
pub const DeclAdapter = struct {
|
||||
mod: *Module,
|
||||
|
||||
|
|
@ -2893,20 +2676,10 @@ pub fn namespacePtr(mod: *Module, index: Namespace.Index) *Namespace {
|
|||
return mod.intern_pool.namespacePtr(index);
|
||||
}
|
||||
|
||||
pub fn structPtr(mod: *Module, index: Struct.Index) *Struct {
|
||||
return mod.intern_pool.structPtr(index);
|
||||
}
|
||||
|
||||
pub fn namespacePtrUnwrap(mod: *Module, index: Namespace.OptionalIndex) ?*Namespace {
|
||||
return mod.namespacePtr(index.unwrap() orelse return null);
|
||||
}
|
||||
|
||||
/// This one accepts an index from the InternPool and asserts that it is not
|
||||
/// the anonymous empty struct type.
|
||||
pub fn structPtrUnwrap(mod: *Module, index: Struct.OptionalIndex) ?*Struct {
|
||||
return mod.structPtr(index.unwrap() orelse return null);
|
||||
}
|
||||
|
||||
/// Returns true if and only if the Decl is the top level struct associated with a File.
|
||||
pub fn declIsRoot(mod: *Module, decl_index: Decl.Index) bool {
|
||||
const decl = mod.declPtr(decl_index);
|
||||
|
|
@ -3351,11 +3124,11 @@ fn updateZirRefs(mod: *Module, file: *File, old_zir: Zir) !void {
|
|||
|
||||
if (!decl.owns_tv) continue;
|
||||
|
||||
if (decl.getOwnedStruct(mod)) |struct_obj| {
|
||||
struct_obj.zir_index = inst_map.get(struct_obj.zir_index) orelse {
|
||||
if (decl.getOwnedStruct(mod)) |struct_type| {
|
||||
struct_type.setZirIndex(ip, inst_map.get(struct_type.zir_index) orelse {
|
||||
try file.deleted_decls.append(gpa, decl_index);
|
||||
continue;
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
if (decl.getOwnedUnion(mod)) |union_type| {
|
||||
|
|
@ -3870,36 +3643,16 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void {
|
|||
const new_decl = mod.declPtr(new_decl_index);
|
||||
errdefer @panic("TODO error handling");
|
||||
|
||||
const struct_index = try mod.createStruct(.{
|
||||
.owner_decl = new_decl_index,
|
||||
.fields = .{},
|
||||
.zir_index = undefined, // set below
|
||||
.layout = .Auto,
|
||||
.status = .none,
|
||||
.known_non_opv = undefined,
|
||||
.is_tuple = undefined, // set below
|
||||
.namespace = new_namespace_index,
|
||||
});
|
||||
errdefer mod.destroyStruct(struct_index);
|
||||
|
||||
const struct_ty = try mod.intern_pool.get(gpa, .{ .struct_type = .{
|
||||
.index = struct_index.toOptional(),
|
||||
.namespace = new_namespace_index.toOptional(),
|
||||
} });
|
||||
// TODO: figure out InternPool removals for incremental compilation
|
||||
//errdefer mod.intern_pool.remove(struct_ty);
|
||||
|
||||
new_namespace.ty = struct_ty.toType();
|
||||
file.root_decl = new_decl_index.toOptional();
|
||||
|
||||
new_decl.name = try file.fullyQualifiedName(mod);
|
||||
new_decl.name_fully_qualified = true;
|
||||
new_decl.src_line = 0;
|
||||
new_decl.is_pub = true;
|
||||
new_decl.is_exported = false;
|
||||
new_decl.has_align = false;
|
||||
new_decl.has_linksection_or_addrspace = false;
|
||||
new_decl.ty = Type.type;
|
||||
new_decl.val = struct_ty.toValue();
|
||||
new_decl.alignment = .none;
|
||||
new_decl.@"linksection" = .none;
|
||||
new_decl.has_tv = true;
|
||||
|
|
@ -3907,75 +3660,76 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void {
|
|||
new_decl.alive = true; // This Decl corresponds to a File and is therefore always alive.
|
||||
new_decl.analysis = .in_progress;
|
||||
new_decl.generation = mod.generation;
|
||||
new_decl.name_fully_qualified = true;
|
||||
|
||||
if (file.status == .success_zir) {
|
||||
assert(file.zir_loaded);
|
||||
const main_struct_inst = Zir.main_struct_inst;
|
||||
const struct_obj = mod.structPtr(struct_index);
|
||||
struct_obj.zir_index = main_struct_inst;
|
||||
const extended = file.zir.instructions.items(.data)[main_struct_inst].extended;
|
||||
const small = @as(Zir.Inst.StructDecl.Small, @bitCast(extended.small));
|
||||
struct_obj.is_tuple = small.is_tuple;
|
||||
|
||||
var sema_arena = std.heap.ArenaAllocator.init(gpa);
|
||||
defer sema_arena.deinit();
|
||||
const sema_arena_allocator = sema_arena.allocator();
|
||||
|
||||
var comptime_mutable_decls = std.ArrayList(Decl.Index).init(gpa);
|
||||
defer comptime_mutable_decls.deinit();
|
||||
|
||||
var sema: Sema = .{
|
||||
.mod = mod,
|
||||
.gpa = gpa,
|
||||
.arena = sema_arena_allocator,
|
||||
.code = file.zir,
|
||||
.owner_decl = new_decl,
|
||||
.owner_decl_index = new_decl_index,
|
||||
.func_index = .none,
|
||||
.func_is_naked = false,
|
||||
.fn_ret_ty = Type.void,
|
||||
.fn_ret_ty_ies = null,
|
||||
.owner_func_index = .none,
|
||||
.comptime_mutable_decls = &comptime_mutable_decls,
|
||||
};
|
||||
defer sema.deinit();
|
||||
|
||||
if (sema.analyzeStructDecl(new_decl, main_struct_inst, struct_index)) |_| {
|
||||
for (comptime_mutable_decls.items) |decl_index| {
|
||||
const decl = mod.declPtr(decl_index);
|
||||
_ = try decl.internValue(mod);
|
||||
}
|
||||
new_decl.analysis = .complete;
|
||||
} else |err| switch (err) {
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
error.AnalysisFail => {},
|
||||
}
|
||||
|
||||
if (mod.comp.whole_cache_manifest) |whole_cache_manifest| {
|
||||
const source = file.getSource(gpa) catch |err| {
|
||||
try reportRetryableFileError(mod, file, "unable to load source: {s}", .{@errorName(err)});
|
||||
return error.AnalysisFail;
|
||||
};
|
||||
|
||||
const resolved_path = std.fs.path.resolve(
|
||||
gpa,
|
||||
if (file.pkg.root_src_directory.path) |pkg_path|
|
||||
&[_][]const u8{ pkg_path, file.sub_file_path }
|
||||
else
|
||||
&[_][]const u8{file.sub_file_path},
|
||||
) catch |err| {
|
||||
try reportRetryableFileError(mod, file, "unable to resolve path: {s}", .{@errorName(err)});
|
||||
return error.AnalysisFail;
|
||||
};
|
||||
errdefer gpa.free(resolved_path);
|
||||
|
||||
mod.comp.whole_cache_manifest_mutex.lock();
|
||||
defer mod.comp.whole_cache_manifest_mutex.unlock();
|
||||
try whole_cache_manifest.addFilePostContents(resolved_path, source.bytes, source.stat);
|
||||
}
|
||||
} else {
|
||||
if (file.status != .success_zir) {
|
||||
new_decl.analysis = .file_failure;
|
||||
return;
|
||||
}
|
||||
assert(file.zir_loaded);
|
||||
|
||||
var sema_arena = std.heap.ArenaAllocator.init(gpa);
|
||||
defer sema_arena.deinit();
|
||||
const sema_arena_allocator = sema_arena.allocator();
|
||||
|
||||
var comptime_mutable_decls = std.ArrayList(Decl.Index).init(gpa);
|
||||
defer comptime_mutable_decls.deinit();
|
||||
|
||||
var sema: Sema = .{
|
||||
.mod = mod,
|
||||
.gpa = gpa,
|
||||
.arena = sema_arena_allocator,
|
||||
.code = file.zir,
|
||||
.owner_decl = new_decl,
|
||||
.owner_decl_index = new_decl_index,
|
||||
.func_index = .none,
|
||||
.func_is_naked = false,
|
||||
.fn_ret_ty = Type.void,
|
||||
.fn_ret_ty_ies = null,
|
||||
.owner_func_index = .none,
|
||||
.comptime_mutable_decls = &comptime_mutable_decls,
|
||||
};
|
||||
defer sema.deinit();
|
||||
|
||||
const main_struct_inst = Zir.main_struct_inst;
|
||||
const struct_ty = sema.getStructType(
|
||||
new_decl_index,
|
||||
new_namespace_index,
|
||||
main_struct_inst,
|
||||
) catch |err| switch (err) {
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
};
|
||||
// TODO: figure out InternPool removals for incremental compilation
|
||||
//errdefer ip.remove(struct_ty);
|
||||
for (comptime_mutable_decls.items) |decl_index| {
|
||||
const decl = mod.declPtr(decl_index);
|
||||
_ = try decl.internValue(mod);
|
||||
}
|
||||
|
||||
new_namespace.ty = struct_ty.toType();
|
||||
new_decl.val = struct_ty.toValue();
|
||||
new_decl.analysis = .complete;
|
||||
|
||||
if (mod.comp.whole_cache_manifest) |whole_cache_manifest| {
|
||||
const source = file.getSource(gpa) catch |err| {
|
||||
try reportRetryableFileError(mod, file, "unable to load source: {s}", .{@errorName(err)});
|
||||
return error.AnalysisFail;
|
||||
};
|
||||
|
||||
const resolved_path = std.fs.path.resolve(
|
||||
gpa,
|
||||
if (file.pkg.root_src_directory.path) |pkg_path|
|
||||
&[_][]const u8{ pkg_path, file.sub_file_path }
|
||||
else
|
||||
&[_][]const u8{file.sub_file_path},
|
||||
) catch |err| {
|
||||
try reportRetryableFileError(mod, file, "unable to resolve path: {s}", .{@errorName(err)});
|
||||
return error.AnalysisFail;
|
||||
};
|
||||
errdefer gpa.free(resolved_path);
|
||||
|
||||
mod.comp.whole_cache_manifest_mutex.lock();
|
||||
defer mod.comp.whole_cache_manifest_mutex.unlock();
|
||||
try whole_cache_manifest.addFilePostContents(resolved_path, source.bytes, source.stat);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -4055,18 +3809,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
|
|||
};
|
||||
defer sema.deinit();
|
||||
|
||||
if (mod.declIsRoot(decl_index)) {
|
||||
const main_struct_inst = Zir.main_struct_inst;
|
||||
const struct_index = decl.getOwnedStructIndex(mod).unwrap().?;
|
||||
const struct_obj = mod.structPtr(struct_index);
|
||||
// This might not have gotten set in `semaFile` if the first time had
|
||||
// a ZIR failure, so we set it here in case.
|
||||
struct_obj.zir_index = main_struct_inst;
|
||||
try sema.analyzeStructDecl(decl, main_struct_inst, struct_index);
|
||||
decl.analysis = .complete;
|
||||
decl.generation = mod.generation;
|
||||
return false;
|
||||
}
|
||||
assert(!mod.declIsRoot(decl_index));
|
||||
|
||||
var block_scope: Sema.Block = .{
|
||||
.parent = null,
|
||||
|
|
@ -5241,14 +4984,6 @@ pub fn destroyNamespace(mod: *Module, index: Namespace.Index) void {
|
|||
return mod.intern_pool.destroyNamespace(mod.gpa, index);
|
||||
}
|
||||
|
||||
pub fn createStruct(mod: *Module, initialization: Struct) Allocator.Error!Struct.Index {
|
||||
return mod.intern_pool.createStruct(mod.gpa, initialization);
|
||||
}
|
||||
|
||||
pub fn destroyStruct(mod: *Module, index: Struct.Index) void {
|
||||
return mod.intern_pool.destroyStruct(mod.gpa, index);
|
||||
}
|
||||
|
||||
pub fn allocateNewDecl(
|
||||
mod: *Module,
|
||||
namespace: Namespace.Index,
|
||||
|
|
@ -6202,7 +5937,6 @@ pub fn optionalType(mod: *Module, child_type: InternPool.Index) Allocator.Error!
|
|||
|
||||
pub fn ptrType(mod: *Module, info: InternPool.Key.PtrType) Allocator.Error!Type {
|
||||
var canon_info = info;
|
||||
const have_elem_layout = info.child.toType().layoutIsResolved(mod);
|
||||
|
||||
if (info.flags.size == .C) canon_info.flags.is_allowzero = true;
|
||||
|
||||
|
|
@ -6210,17 +5944,17 @@ pub fn ptrType(mod: *Module, info: InternPool.Key.PtrType) Allocator.Error!Type
|
|||
// type, we change it to 0 here. If this causes an assertion trip because the
|
||||
// pointee type needs to be resolved more, that needs to be done before calling
|
||||
// this ptr() function.
|
||||
if (info.flags.alignment.toByteUnitsOptional()) |info_align| {
|
||||
if (have_elem_layout and info_align == info.child.toType().abiAlignment(mod)) {
|
||||
canon_info.flags.alignment = .none;
|
||||
}
|
||||
if (info.flags.alignment != .none and
|
||||
info.flags.alignment == info.child.toType().abiAlignment(mod))
|
||||
{
|
||||
canon_info.flags.alignment = .none;
|
||||
}
|
||||
|
||||
switch (info.flags.vector_index) {
|
||||
// Canonicalize host_size. If it matches the bit size of the pointee type,
|
||||
// we change it to 0 here. If this causes an assertion trip, the pointee type
|
||||
// needs to be resolved before calling this ptr() function.
|
||||
.none => if (have_elem_layout and info.packed_offset.host_size != 0) {
|
||||
.none => if (info.packed_offset.host_size != 0) {
|
||||
const elem_bit_size = info.child.toType().bitSize(mod);
|
||||
assert(info.packed_offset.bit_offset + elem_bit_size <= info.packed_offset.host_size * 8);
|
||||
if (info.packed_offset.host_size * 8 == elem_bit_size) {
|
||||
|
|
@ -6483,7 +6217,7 @@ pub fn intBitsForValue(mod: *Module, val: Value, sign: bool) u16 {
|
|||
return @as(u16, @intCast(big.bitCountTwosComp()));
|
||||
},
|
||||
.lazy_align => |lazy_ty| {
|
||||
return Type.smallestUnsignedBits(lazy_ty.toType().abiAlignment(mod)) + @intFromBool(sign);
|
||||
return Type.smallestUnsignedBits(lazy_ty.toType().abiAlignment(mod).toByteUnits(0)) + @intFromBool(sign);
|
||||
},
|
||||
.lazy_size => |lazy_ty| {
|
||||
return Type.smallestUnsignedBits(lazy_ty.toType().abiSize(mod)) + @intFromBool(sign);
|
||||
|
|
@ -6639,20 +6373,30 @@ pub fn namespaceDeclIndex(mod: *Module, namespace_index: Namespace.Index) Decl.I
|
|||
/// * `@TypeOf(.{})`
|
||||
/// * A struct which has no fields (`struct {}`).
|
||||
/// * Not a struct.
|
||||
pub fn typeToStruct(mod: *Module, ty: Type) ?*Struct {
|
||||
pub fn typeToStruct(mod: *Module, ty: Type) ?InternPool.Key.StructType {
|
||||
if (ty.ip_index == .none) return null;
|
||||
const struct_index = mod.intern_pool.indexToStructType(ty.toIntern()).unwrap() orelse return null;
|
||||
return mod.structPtr(struct_index);
|
||||
return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
|
||||
.struct_type => |t| t,
|
||||
else => null,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn typeToPackedStruct(mod: *Module, ty: Type) ?InternPool.Key.StructType {
|
||||
if (ty.ip_index == .none) return null;
|
||||
return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
|
||||
.struct_type => |t| if (t.layout == .Packed) t else null,
|
||||
else => null,
|
||||
};
|
||||
}
|
||||
|
||||
/// This asserts that the union's enum tag type has been resolved.
|
||||
pub fn typeToUnion(mod: *Module, ty: Type) ?InternPool.UnionType {
|
||||
if (ty.ip_index == .none) return null;
|
||||
const ip = &mod.intern_pool;
|
||||
switch (ip.indexToKey(ty.ip_index)) {
|
||||
.union_type => |k| return ip.loadUnionType(k),
|
||||
else => return null,
|
||||
}
|
||||
return switch (ip.indexToKey(ty.ip_index)) {
|
||||
.union_type => |k| ip.loadUnionType(k),
|
||||
else => null,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn typeToFunc(mod: *Module, ty: Type) ?InternPool.Key.FuncType {
|
||||
|
|
@ -6741,13 +6485,13 @@ pub fn getParamName(mod: *Module, func_index: InternPool.Index, index: u32) [:0]
|
|||
|
||||
pub const UnionLayout = struct {
|
||||
abi_size: u64,
|
||||
abi_align: u32,
|
||||
abi_align: Alignment,
|
||||
most_aligned_field: u32,
|
||||
most_aligned_field_size: u64,
|
||||
biggest_field: u32,
|
||||
payload_size: u64,
|
||||
payload_align: u32,
|
||||
tag_align: u32,
|
||||
payload_align: Alignment,
|
||||
tag_align: Alignment,
|
||||
tag_size: u64,
|
||||
padding: u32,
|
||||
};
|
||||
|
|
@ -6759,35 +6503,37 @@ pub fn getUnionLayout(mod: *Module, u: InternPool.UnionType) UnionLayout {
|
|||
var most_aligned_field_size: u64 = undefined;
|
||||
var biggest_field: u32 = undefined;
|
||||
var payload_size: u64 = 0;
|
||||
var payload_align: u32 = 0;
|
||||
var payload_align: Alignment = .@"1";
|
||||
for (u.field_types.get(ip), 0..) |field_ty, i| {
|
||||
if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
|
||||
|
||||
const field_align = u.fieldAlign(ip, @intCast(i)).toByteUnitsOptional() orelse
|
||||
const explicit_align = u.fieldAlign(ip, @intCast(i));
|
||||
const field_align = if (explicit_align != .none)
|
||||
explicit_align
|
||||
else
|
||||
field_ty.toType().abiAlignment(mod);
|
||||
const field_size = field_ty.toType().abiSize(mod);
|
||||
if (field_size > payload_size) {
|
||||
payload_size = field_size;
|
||||
biggest_field = @intCast(i);
|
||||
}
|
||||
if (field_align > payload_align) {
|
||||
payload_align = @intCast(field_align);
|
||||
if (field_align.compare(.gte, payload_align)) {
|
||||
payload_align = field_align;
|
||||
most_aligned_field = @intCast(i);
|
||||
most_aligned_field_size = field_size;
|
||||
}
|
||||
}
|
||||
payload_align = @max(payload_align, 1);
|
||||
const have_tag = u.flagsPtr(ip).runtime_tag.hasTag();
|
||||
if (!have_tag or !u.enum_tag_ty.toType().hasRuntimeBits(mod)) {
|
||||
return .{
|
||||
.abi_size = std.mem.alignForward(u64, payload_size, payload_align),
|
||||
.abi_size = payload_align.forward(payload_size),
|
||||
.abi_align = payload_align,
|
||||
.most_aligned_field = most_aligned_field,
|
||||
.most_aligned_field_size = most_aligned_field_size,
|
||||
.biggest_field = biggest_field,
|
||||
.payload_size = payload_size,
|
||||
.payload_align = payload_align,
|
||||
.tag_align = 0,
|
||||
.tag_align = .none,
|
||||
.tag_size = 0,
|
||||
.padding = 0,
|
||||
};
|
||||
|
|
@ -6795,29 +6541,29 @@ pub fn getUnionLayout(mod: *Module, u: InternPool.UnionType) UnionLayout {
|
|||
// Put the tag before or after the payload depending on which one's
|
||||
// alignment is greater.
|
||||
const tag_size = u.enum_tag_ty.toType().abiSize(mod);
|
||||
const tag_align = @max(1, u.enum_tag_ty.toType().abiAlignment(mod));
|
||||
const tag_align = u.enum_tag_ty.toType().abiAlignment(mod).max(.@"1");
|
||||
var size: u64 = 0;
|
||||
var padding: u32 = undefined;
|
||||
if (tag_align >= payload_align) {
|
||||
if (tag_align.compare(.gte, payload_align)) {
|
||||
// {Tag, Payload}
|
||||
size += tag_size;
|
||||
size = std.mem.alignForward(u64, size, payload_align);
|
||||
size = payload_align.forward(size);
|
||||
size += payload_size;
|
||||
const prev_size = size;
|
||||
size = std.mem.alignForward(u64, size, tag_align);
|
||||
padding = @as(u32, @intCast(size - prev_size));
|
||||
size = tag_align.forward(size);
|
||||
padding = @intCast(size - prev_size);
|
||||
} else {
|
||||
// {Payload, Tag}
|
||||
size += payload_size;
|
||||
size = std.mem.alignForward(u64, size, tag_align);
|
||||
size = tag_align.forward(size);
|
||||
size += tag_size;
|
||||
const prev_size = size;
|
||||
size = std.mem.alignForward(u64, size, payload_align);
|
||||
padding = @as(u32, @intCast(size - prev_size));
|
||||
size = payload_align.forward(size);
|
||||
padding = @intCast(size - prev_size);
|
||||
}
|
||||
return .{
|
||||
.abi_size = size,
|
||||
.abi_align = @max(tag_align, payload_align),
|
||||
.abi_align = tag_align.max(payload_align),
|
||||
.most_aligned_field = most_aligned_field,
|
||||
.most_aligned_field_size = most_aligned_field_size,
|
||||
.biggest_field = biggest_field,
|
||||
|
|
@ -6834,17 +6580,16 @@ pub fn unionAbiSize(mod: *Module, u: InternPool.UnionType) u64 {
|
|||
}
|
||||
|
||||
/// Returns 0 if the union is represented with 0 bits at runtime.
|
||||
/// TODO: this returns alignment in byte units should should be a u64
|
||||
pub fn unionAbiAlignment(mod: *Module, u: InternPool.UnionType) u32 {
|
||||
pub fn unionAbiAlignment(mod: *Module, u: InternPool.UnionType) Alignment {
|
||||
const ip = &mod.intern_pool;
|
||||
const have_tag = u.flagsPtr(ip).runtime_tag.hasTag();
|
||||
var max_align: u32 = 0;
|
||||
var max_align: Alignment = .none;
|
||||
if (have_tag) max_align = u.enum_tag_ty.toType().abiAlignment(mod);
|
||||
for (u.field_types.get(ip), 0..) |field_ty, field_index| {
|
||||
if (!field_ty.toType().hasRuntimeBits(mod)) continue;
|
||||
|
||||
const field_align = mod.unionFieldNormalAlignment(u, @intCast(field_index));
|
||||
max_align = @max(max_align, field_align);
|
||||
max_align = max_align.max(field_align);
|
||||
}
|
||||
return max_align;
|
||||
}
|
||||
|
|
@ -6852,10 +6597,10 @@ pub fn unionAbiAlignment(mod: *Module, u: InternPool.UnionType) u32 {
|
|||
/// Returns the field alignment, assuming the union is not packed.
|
||||
/// Keep implementation in sync with `Sema.unionFieldAlignment`.
|
||||
/// Prefer to call that function instead of this one during Sema.
|
||||
/// TODO: this returns alignment in byte units should should be a u64
|
||||
pub fn unionFieldNormalAlignment(mod: *Module, u: InternPool.UnionType, field_index: u32) u32 {
|
||||
pub fn unionFieldNormalAlignment(mod: *Module, u: InternPool.UnionType, field_index: u32) Alignment {
|
||||
const ip = &mod.intern_pool;
|
||||
if (u.fieldAlign(ip, field_index).toByteUnitsOptional()) |a| return @intCast(a);
|
||||
const field_align = u.fieldAlign(ip, field_index);
|
||||
if (field_align != .none) return field_align;
|
||||
const field_ty = u.field_types.get(ip)[field_index].toType();
|
||||
return field_ty.abiAlignment(mod);
|
||||
}
|
||||
|
|
@ -6866,3 +6611,64 @@ pub fn unionTagFieldIndex(mod: *Module, u: InternPool.UnionType, enum_tag: Value
|
|||
const enum_type = ip.indexToKey(u.enum_tag_ty).enum_type;
|
||||
return enum_type.tagValueIndex(ip, enum_tag.toIntern());
|
||||
}
|
||||
|
||||
/// Returns the field alignment of a non-packed struct in byte units.
|
||||
/// Keep implementation in sync with `Sema.structFieldAlignment`.
|
||||
/// asserts the layout is not packed.
|
||||
pub fn structFieldAlignment(
|
||||
mod: *Module,
|
||||
explicit_alignment: InternPool.Alignment,
|
||||
field_ty: Type,
|
||||
layout: std.builtin.Type.ContainerLayout,
|
||||
) Alignment {
|
||||
assert(layout != .Packed);
|
||||
if (explicit_alignment != .none) return explicit_alignment;
|
||||
switch (layout) {
|
||||
.Packed => unreachable,
|
||||
.Auto => {
|
||||
if (mod.getTarget().ofmt == .c) {
|
||||
return structFieldAlignmentExtern(mod, field_ty);
|
||||
} else {
|
||||
return field_ty.abiAlignment(mod);
|
||||
}
|
||||
},
|
||||
.Extern => return structFieldAlignmentExtern(mod, field_ty),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the field alignment of an extern struct in byte units.
|
||||
/// This logic is duplicated in Type.abiAlignmentAdvanced.
|
||||
pub fn structFieldAlignmentExtern(mod: *Module, field_ty: Type) Alignment {
|
||||
const ty_abi_align = field_ty.abiAlignment(mod);
|
||||
|
||||
if (field_ty.isAbiInt(mod) and field_ty.intInfo(mod).bits >= 128) {
|
||||
// The C ABI requires 128 bit integer fields of structs
|
||||
// to be 16-bytes aligned.
|
||||
return ty_abi_align.max(.@"16");
|
||||
}
|
||||
|
||||
return ty_abi_align;
|
||||
}
|
||||
|
||||
/// TODO: avoid linear search by storing these in trailing data of packed struct types
|
||||
/// then packedStructFieldByteOffset can be expressed in terms of bits / 8, fixing
|
||||
/// that one too.
|
||||
/// https://github.com/ziglang/zig/issues/17178
|
||||
pub fn structPackedFieldBitOffset(
|
||||
mod: *Module,
|
||||
struct_type: InternPool.Key.StructType,
|
||||
field_index: u32,
|
||||
) u16 {
|
||||
const ip = &mod.intern_pool;
|
||||
assert(struct_type.layout == .Packed);
|
||||
assert(struct_type.haveLayout(ip));
|
||||
var bit_sum: u64 = 0;
|
||||
for (0..struct_type.field_types.len) |i| {
|
||||
if (i == field_index) {
|
||||
return @intCast(bit_sum);
|
||||
}
|
||||
const field_ty = struct_type.field_types.get(ip)[i].toType();
|
||||
bit_sum += field_ty.bitSize(mod);
|
||||
}
|
||||
unreachable; // index out of bounds
|
||||
}
|
||||
|
|
|
|||
1882
src/Sema.zig
1882
src/Sema.zig
File diff suppressed because it is too large
Load diff
|
|
@ -135,9 +135,10 @@ pub fn print(
|
|||
|
||||
var i: u32 = 0;
|
||||
while (i < max_len) : (i += 1) {
|
||||
const elem_val = payload.ptr.elemValue(mod, i) catch |err| switch (err) {
|
||||
const maybe_elem_val = payload.ptr.maybeElemValue(mod, i) catch |err| switch (err) {
|
||||
error.OutOfMemory => @panic("OOM"), // TODO: eliminate this panic
|
||||
};
|
||||
const elem_val = maybe_elem_val orelse return writer.writeAll(".{ (reinterpreted data) }");
|
||||
if (elem_val.isUndef(mod)) break :str;
|
||||
buf[i] = std.math.cast(u8, elem_val.toUnsignedInt(mod)) orelse break :str;
|
||||
}
|
||||
|
|
@ -153,9 +154,10 @@ pub fn print(
|
|||
var i: u32 = 0;
|
||||
while (i < max_len) : (i += 1) {
|
||||
if (i != 0) try writer.writeAll(", ");
|
||||
const elem_val = payload.ptr.elemValue(mod, i) catch |err| switch (err) {
|
||||
const maybe_elem_val = payload.ptr.maybeElemValue(mod, i) catch |err| switch (err) {
|
||||
error.OutOfMemory => @panic("OOM"), // TODO: eliminate this panic
|
||||
};
|
||||
const elem_val = maybe_elem_val orelse return writer.writeAll("(reinterpreted data) }");
|
||||
try print(.{
|
||||
.ty = elem_ty,
|
||||
.val = elem_val,
|
||||
|
|
@ -272,7 +274,8 @@ pub fn print(
|
|||
const max_len = @min(len, max_string_len);
|
||||
var buf: [max_string_len]u8 = undefined;
|
||||
for (buf[0..max_len], 0..) |*c, i| {
|
||||
const elem = try val.elemValue(mod, i);
|
||||
const maybe_elem = try val.maybeElemValue(mod, i);
|
||||
const elem = maybe_elem orelse return writer.writeAll(".{ (reinterpreted data) }");
|
||||
if (elem.isUndef(mod)) break :str;
|
||||
c.* = @as(u8, @intCast(elem.toUnsignedInt(mod)));
|
||||
}
|
||||
|
|
@ -283,9 +286,11 @@ pub fn print(
|
|||
const max_len = @min(len, max_aggregate_items);
|
||||
for (0..max_len) |i| {
|
||||
if (i != 0) try writer.writeAll(", ");
|
||||
const maybe_elem = try val.maybeElemValue(mod, i);
|
||||
const elem = maybe_elem orelse return writer.writeAll("(reinterpreted data) }");
|
||||
try print(.{
|
||||
.ty = elem_ty,
|
||||
.val = try val.elemValue(mod, i),
|
||||
.val = elem,
|
||||
}, writer, level - 1, mod);
|
||||
}
|
||||
if (len > max_aggregate_items) {
|
||||
|
|
@ -350,11 +355,11 @@ pub fn print(
|
|||
const container_ty = ptr_container_ty.childType(mod);
|
||||
switch (container_ty.zigTypeTag(mod)) {
|
||||
.Struct => {
|
||||
if (container_ty.isTuple(mod)) {
|
||||
if (container_ty.structFieldName(@intCast(field.index), mod).unwrap()) |field_name| {
|
||||
try writer.print(".{i}", .{field_name.fmt(ip)});
|
||||
} else {
|
||||
try writer.print("[{d}]", .{field.index});
|
||||
}
|
||||
const field_name = container_ty.structFieldName(@as(usize, @intCast(field.index)), mod);
|
||||
try writer.print(".{i}", .{field_name.fmt(ip)});
|
||||
},
|
||||
.Union => {
|
||||
const field_name = mod.typeToUnion(container_ty).?.field_names.get(ip)[@intCast(field.index)];
|
||||
|
|
@ -432,7 +437,7 @@ fn printAggregate(
|
|||
if (i != 0) try writer.writeAll(", ");
|
||||
|
||||
const field_name = switch (ip.indexToKey(ty.toIntern())) {
|
||||
.struct_type => |x| mod.structPtrUnwrap(x.index).?.fields.keys()[i].toOptional(),
|
||||
.struct_type => |x| x.fieldName(ip, i),
|
||||
.anon_struct_type => |x| if (x.isTuple()) .none else x.names.get(ip)[i].toOptional(),
|
||||
else => unreachable,
|
||||
};
|
||||
|
|
|
|||
|
|
@ -2840,7 +2840,10 @@ pub const Inst = struct {
|
|||
is_tuple: bool,
|
||||
name_strategy: NameStrategy,
|
||||
layout: std.builtin.Type.ContainerLayout,
|
||||
_: u5 = undefined,
|
||||
any_default_inits: bool,
|
||||
any_comptime_fields: bool,
|
||||
any_aligned_fields: bool,
|
||||
_: u2 = undefined,
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -23,6 +23,7 @@ const DW = std.dwarf;
|
|||
const leb128 = std.leb;
|
||||
const log = std.log.scoped(.codegen);
|
||||
const build_options = @import("build_options");
|
||||
const Alignment = InternPool.Alignment;
|
||||
|
||||
const CodeGenError = codegen.CodeGenError;
|
||||
const Result = codegen.Result;
|
||||
|
|
@ -506,11 +507,9 @@ fn gen(self: *Self) !void {
|
|||
// (or w0 when pointer size is 32 bits). As this register
|
||||
// might get overwritten along the way, save the address
|
||||
// to the stack.
|
||||
const ptr_bits = self.target.ptrBitWidth();
|
||||
const ptr_bytes = @divExact(ptr_bits, 8);
|
||||
const ret_ptr_reg = self.registerAlias(.x0, Type.usize);
|
||||
|
||||
const stack_offset = try self.allocMem(ptr_bytes, ptr_bytes, null);
|
||||
const stack_offset = try self.allocMem(8, .@"8", null);
|
||||
|
||||
try self.genSetStack(Type.usize, stack_offset, MCValue{ .register = ret_ptr_reg });
|
||||
self.ret_mcv = MCValue{ .stack_offset = stack_offset };
|
||||
|
|
@ -998,11 +997,11 @@ fn ensureProcessDeathCapacity(self: *Self, additional_count: usize) !void {
|
|||
fn allocMem(
|
||||
self: *Self,
|
||||
abi_size: u32,
|
||||
abi_align: u32,
|
||||
abi_align: Alignment,
|
||||
maybe_inst: ?Air.Inst.Index,
|
||||
) !u32 {
|
||||
assert(abi_size > 0);
|
||||
assert(abi_align > 0);
|
||||
assert(abi_align != .none);
|
||||
|
||||
// In order to efficiently load and store stack items that fit
|
||||
// into registers, we bump up the alignment to the next power of
|
||||
|
|
@ -1010,10 +1009,10 @@ fn allocMem(
|
|||
const adjusted_align = if (abi_size > 8)
|
||||
abi_align
|
||||
else
|
||||
std.math.ceilPowerOfTwoAssert(u32, abi_size);
|
||||
Alignment.fromNonzeroByteUnits(std.math.ceilPowerOfTwoAssert(u64, abi_size));
|
||||
|
||||
// TODO find a free slot instead of always appending
|
||||
const offset = mem.alignForward(u32, self.next_stack_offset, adjusted_align) + abi_size;
|
||||
const offset: u32 = @intCast(adjusted_align.forward(self.next_stack_offset) + abi_size);
|
||||
self.next_stack_offset = offset;
|
||||
self.max_end_stack = @max(self.max_end_stack, self.next_stack_offset);
|
||||
|
||||
|
|
@ -1515,12 +1514,9 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void {
|
|||
const len = try self.resolveInst(bin_op.rhs);
|
||||
const len_ty = self.typeOf(bin_op.rhs);
|
||||
|
||||
const ptr_bits = self.target.ptrBitWidth();
|
||||
const ptr_bytes = @divExact(ptr_bits, 8);
|
||||
|
||||
const stack_offset = try self.allocMem(ptr_bytes * 2, ptr_bytes * 2, inst);
|
||||
const stack_offset = try self.allocMem(16, .@"8", inst);
|
||||
try self.genSetStack(ptr_ty, stack_offset, ptr);
|
||||
try self.genSetStack(len_ty, stack_offset - ptr_bytes, len);
|
||||
try self.genSetStack(len_ty, stack_offset - 8, len);
|
||||
break :result MCValue{ .stack_offset = stack_offset };
|
||||
};
|
||||
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
|
||||
|
|
@ -3285,9 +3281,9 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
|
|||
break :result MCValue{ .register = reg };
|
||||
}
|
||||
|
||||
const optional_abi_size = @as(u32, @intCast(optional_ty.abiSize(mod)));
|
||||
const optional_abi_size: u32 = @intCast(optional_ty.abiSize(mod));
|
||||
const optional_abi_align = optional_ty.abiAlignment(mod);
|
||||
const offset = @as(u32, @intCast(payload_ty.abiSize(mod)));
|
||||
const offset: u32 = @intCast(payload_ty.abiSize(mod));
|
||||
|
||||
const stack_offset = try self.allocMem(optional_abi_size, optional_abi_align, inst);
|
||||
try self.genSetStack(payload_ty, stack_offset, operand);
|
||||
|
|
@ -3376,7 +3372,7 @@ fn airSlicePtr(self: *Self, inst: Air.Inst.Index) !void {
|
|||
fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
|
||||
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
|
||||
const ptr_bits = self.target.ptrBitWidth();
|
||||
const ptr_bits = 64;
|
||||
const ptr_bytes = @divExact(ptr_bits, 8);
|
||||
const mcv = try self.resolveInst(ty_op.operand);
|
||||
switch (mcv) {
|
||||
|
|
@ -3400,7 +3396,7 @@ fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void {
|
|||
fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
|
||||
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
|
||||
const ptr_bits = self.target.ptrBitWidth();
|
||||
const ptr_bits = 64;
|
||||
const ptr_bytes = @divExact(ptr_bits, 8);
|
||||
const mcv = try self.resolveInst(ty_op.operand);
|
||||
switch (mcv) {
|
||||
|
|
@ -4272,8 +4268,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
|
|||
if (info.return_value == .stack_offset) {
|
||||
log.debug("airCall: return by reference", .{});
|
||||
const ret_ty = fn_ty.fnReturnType(mod);
|
||||
const ret_abi_size = @as(u32, @intCast(ret_ty.abiSize(mod)));
|
||||
const ret_abi_align = @as(u32, @intCast(ret_ty.abiAlignment(mod)));
|
||||
const ret_abi_size: u32 = @intCast(ret_ty.abiSize(mod));
|
||||
const ret_abi_align = ret_ty.abiAlignment(mod);
|
||||
const stack_offset = try self.allocMem(ret_abi_size, ret_abi_align, inst);
|
||||
|
||||
const ret_ptr_reg = self.registerAlias(.x0, Type.usize);
|
||||
|
|
@ -5939,11 +5935,8 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void {
|
|||
const ptr = try self.resolveInst(ty_op.operand);
|
||||
const array_ty = ptr_ty.childType(mod);
|
||||
const array_len = @as(u32, @intCast(array_ty.arrayLen(mod)));
|
||||
|
||||
const ptr_bits = self.target.ptrBitWidth();
|
||||
const ptr_bytes = @divExact(ptr_bits, 8);
|
||||
|
||||
const stack_offset = try self.allocMem(ptr_bytes * 2, ptr_bytes * 2, inst);
|
||||
const ptr_bytes = 8;
|
||||
const stack_offset = try self.allocMem(ptr_bytes * 2, .@"8", inst);
|
||||
try self.genSetStack(ptr_ty, stack_offset, ptr);
|
||||
try self.genSetStack(Type.usize, stack_offset - ptr_bytes, .{ .immediate = array_len });
|
||||
break :result MCValue{ .stack_offset = stack_offset };
|
||||
|
|
@ -6254,7 +6247,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
|
|||
|
||||
// We round up NCRN only for non-Apple platforms which allow the 16-byte aligned
|
||||
// values to spread across odd-numbered registers.
|
||||
if (ty.toType().abiAlignment(mod) == 16 and !self.target.isDarwin()) {
|
||||
if (ty.toType().abiAlignment(mod) == .@"16" and !self.target.isDarwin()) {
|
||||
// Round up NCRN to the next even number
|
||||
ncrn += ncrn % 2;
|
||||
}
|
||||
|
|
@ -6272,7 +6265,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
|
|||
ncrn = 8;
|
||||
// TODO Apple allows the arguments on the stack to be non-8-byte aligned provided
|
||||
// that the entire stack space consumed by the arguments is 8-byte aligned.
|
||||
if (ty.toType().abiAlignment(mod) == 8) {
|
||||
if (ty.toType().abiAlignment(mod) == .@"8") {
|
||||
if (nsaa % 8 != 0) {
|
||||
nsaa += 8 - (nsaa % 8);
|
||||
}
|
||||
|
|
@ -6312,10 +6305,10 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
|
|||
|
||||
for (fn_info.param_types.get(ip), result.args) |ty, *result_arg| {
|
||||
if (ty.toType().abiSize(mod) > 0) {
|
||||
const param_size = @as(u32, @intCast(ty.toType().abiSize(mod)));
|
||||
const param_size: u32 = @intCast(ty.toType().abiSize(mod));
|
||||
const param_alignment = ty.toType().abiAlignment(mod);
|
||||
|
||||
stack_offset = std.mem.alignForward(u32, stack_offset, param_alignment);
|
||||
stack_offset = @intCast(param_alignment.forward(stack_offset));
|
||||
result_arg.* = .{ .stack_argument_offset = stack_offset };
|
||||
stack_offset += param_size;
|
||||
} else {
|
||||
|
|
|
|||
|
|
@ -23,6 +23,7 @@ const DW = std.dwarf;
|
|||
const leb128 = std.leb;
|
||||
const log = std.log.scoped(.codegen);
|
||||
const build_options = @import("build_options");
|
||||
const Alignment = InternPool.Alignment;
|
||||
|
||||
const Result = codegen.Result;
|
||||
const CodeGenError = codegen.CodeGenError;
|
||||
|
|
@ -508,7 +509,7 @@ fn gen(self: *Self) !void {
|
|||
// The address of where to store the return value is in
|
||||
// r0. As this register might get overwritten along the
|
||||
// way, save the address to the stack.
|
||||
const stack_offset = try self.allocMem(4, 4, null);
|
||||
const stack_offset = try self.allocMem(4, .@"4", null);
|
||||
|
||||
try self.genSetStack(Type.usize, stack_offset, MCValue{ .register = .r0 });
|
||||
self.ret_mcv = MCValue{ .stack_offset = stack_offset };
|
||||
|
|
@ -986,14 +987,14 @@ fn ensureProcessDeathCapacity(self: *Self, additional_count: usize) !void {
|
|||
fn allocMem(
|
||||
self: *Self,
|
||||
abi_size: u32,
|
||||
abi_align: u32,
|
||||
abi_align: Alignment,
|
||||
maybe_inst: ?Air.Inst.Index,
|
||||
) !u32 {
|
||||
assert(abi_size > 0);
|
||||
assert(abi_align > 0);
|
||||
assert(abi_align != .none);
|
||||
|
||||
// TODO find a free slot instead of always appending
|
||||
const offset = mem.alignForward(u32, self.next_stack_offset, abi_align) + abi_size;
|
||||
const offset: u32 = @intCast(abi_align.forward(self.next_stack_offset) + abi_size);
|
||||
self.next_stack_offset = offset;
|
||||
self.max_end_stack = @max(self.max_end_stack, self.next_stack_offset);
|
||||
|
||||
|
|
@ -1490,7 +1491,7 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void {
|
|||
const len = try self.resolveInst(bin_op.rhs);
|
||||
const len_ty = self.typeOf(bin_op.rhs);
|
||||
|
||||
const stack_offset = try self.allocMem(8, 4, inst);
|
||||
const stack_offset = try self.allocMem(8, .@"4", inst);
|
||||
try self.genSetStack(ptr_ty, stack_offset, ptr);
|
||||
try self.genSetStack(len_ty, stack_offset - 4, len);
|
||||
break :result MCValue{ .stack_offset = stack_offset };
|
||||
|
|
@ -4251,8 +4252,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
|
|||
const r0_lock: ?RegisterLock = if (info.return_value == .stack_offset) blk: {
|
||||
log.debug("airCall: return by reference", .{});
|
||||
const ret_ty = fn_ty.fnReturnType(mod);
|
||||
const ret_abi_size = @as(u32, @intCast(ret_ty.abiSize(mod)));
|
||||
const ret_abi_align = @as(u32, @intCast(ret_ty.abiAlignment(mod)));
|
||||
const ret_abi_size: u32 = @intCast(ret_ty.abiSize(mod));
|
||||
const ret_abi_align = ret_ty.abiAlignment(mod);
|
||||
const stack_offset = try self.allocMem(ret_abi_size, ret_abi_align, inst);
|
||||
|
||||
const ptr_ty = try mod.singleMutPtrType(ret_ty);
|
||||
|
|
@ -5896,7 +5897,7 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void {
|
|||
const array_ty = ptr_ty.childType(mod);
|
||||
const array_len = @as(u32, @intCast(array_ty.arrayLen(mod)));
|
||||
|
||||
const stack_offset = try self.allocMem(8, 8, inst);
|
||||
const stack_offset = try self.allocMem(8, .@"8", inst);
|
||||
try self.genSetStack(ptr_ty, stack_offset, ptr);
|
||||
try self.genSetStack(Type.usize, stack_offset - 4, .{ .immediate = array_len });
|
||||
break :result MCValue{ .stack_offset = stack_offset };
|
||||
|
|
@ -6201,7 +6202,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
|
|||
}
|
||||
|
||||
for (fn_info.param_types.get(ip), result.args) |ty, *result_arg| {
|
||||
if (ty.toType().abiAlignment(mod) == 8)
|
||||
if (ty.toType().abiAlignment(mod) == .@"8")
|
||||
ncrn = std.mem.alignForward(usize, ncrn, 2);
|
||||
|
||||
const param_size = @as(u32, @intCast(ty.toType().abiSize(mod)));
|
||||
|
|
@ -6216,7 +6217,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
|
|||
return self.fail("TODO MCValues split between registers and stack", .{});
|
||||
} else {
|
||||
ncrn = 4;
|
||||
if (ty.toType().abiAlignment(mod) == 8)
|
||||
if (ty.toType().abiAlignment(mod) == .@"8")
|
||||
nsaa = std.mem.alignForward(u32, nsaa, 8);
|
||||
|
||||
result_arg.* = .{ .stack_argument_offset = nsaa };
|
||||
|
|
@ -6252,10 +6253,10 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
|
|||
|
||||
for (fn_info.param_types.get(ip), result.args) |ty, *result_arg| {
|
||||
if (ty.toType().abiSize(mod) > 0) {
|
||||
const param_size = @as(u32, @intCast(ty.toType().abiSize(mod)));
|
||||
const param_size: u32 = @intCast(ty.toType().abiSize(mod));
|
||||
const param_alignment = ty.toType().abiAlignment(mod);
|
||||
|
||||
stack_offset = std.mem.alignForward(u32, stack_offset, param_alignment);
|
||||
stack_offset = @intCast(param_alignment.forward(stack_offset));
|
||||
result_arg.* = .{ .stack_argument_offset = stack_offset };
|
||||
stack_offset += param_size;
|
||||
} else {
|
||||
|
|
|
|||
|
|
@ -47,7 +47,7 @@ pub fn classifyType(ty: Type, mod: *Module, ctx: Context) Class {
|
|||
const field_ty = ty.structFieldType(i, mod);
|
||||
const field_alignment = ty.structFieldAlign(i, mod);
|
||||
const field_size = field_ty.bitSize(mod);
|
||||
if (field_size > 32 or field_alignment > 32) {
|
||||
if (field_size > 32 or field_alignment.compare(.gt, .@"32")) {
|
||||
return Class.arrSize(bit_size, 64);
|
||||
}
|
||||
}
|
||||
|
|
@ -66,7 +66,7 @@ pub fn classifyType(ty: Type, mod: *Module, ctx: Context) Class {
|
|||
|
||||
for (union_obj.field_types.get(ip), 0..) |field_ty, field_index| {
|
||||
if (field_ty.toType().bitSize(mod) > 32 or
|
||||
mod.unionFieldNormalAlignment(union_obj, @intCast(field_index)) > 32)
|
||||
mod.unionFieldNormalAlignment(union_obj, @intCast(field_index)).compare(.gt, .@"32"))
|
||||
{
|
||||
return Class.arrSize(bit_size, 64);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -23,6 +23,7 @@ const leb128 = std.leb;
|
|||
const log = std.log.scoped(.codegen);
|
||||
const build_options = @import("build_options");
|
||||
const codegen = @import("../../codegen.zig");
|
||||
const Alignment = InternPool.Alignment;
|
||||
|
||||
const CodeGenError = codegen.CodeGenError;
|
||||
const Result = codegen.Result;
|
||||
|
|
@ -53,7 +54,7 @@ ret_mcv: MCValue,
|
|||
fn_type: Type,
|
||||
arg_index: usize,
|
||||
src_loc: Module.SrcLoc,
|
||||
stack_align: u32,
|
||||
stack_align: Alignment,
|
||||
|
||||
/// MIR Instructions
|
||||
mir_instructions: std.MultiArrayList(Mir.Inst) = .{},
|
||||
|
|
@ -788,11 +789,10 @@ fn ensureProcessDeathCapacity(self: *Self, additional_count: usize) !void {
|
|||
try table.ensureUnusedCapacity(self.gpa, additional_count);
|
||||
}
|
||||
|
||||
fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u32 {
|
||||
if (abi_align > self.stack_align)
|
||||
self.stack_align = abi_align;
|
||||
fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: Alignment) !u32 {
|
||||
self.stack_align = self.stack_align.max(abi_align);
|
||||
// TODO find a free slot instead of always appending
|
||||
const offset = mem.alignForward(u32, self.next_stack_offset, abi_align);
|
||||
const offset: u32 = @intCast(abi_align.forward(self.next_stack_offset));
|
||||
self.next_stack_offset = offset + abi_size;
|
||||
if (self.next_stack_offset > self.max_end_stack)
|
||||
self.max_end_stack = self.next_stack_offset;
|
||||
|
|
@ -822,8 +822,7 @@ fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue {
|
|||
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)});
|
||||
};
|
||||
const abi_align = elem_ty.abiAlignment(mod);
|
||||
if (abi_align > self.stack_align)
|
||||
self.stack_align = abi_align;
|
||||
self.stack_align = self.stack_align.max(abi_align);
|
||||
|
||||
if (reg_ok) {
|
||||
// Make sure the type can fit in a register before we try to allocate one.
|
||||
|
|
@ -2602,7 +2601,7 @@ const CallMCValues = struct {
|
|||
args: []MCValue,
|
||||
return_value: MCValue,
|
||||
stack_byte_count: u32,
|
||||
stack_align: u32,
|
||||
stack_align: Alignment,
|
||||
|
||||
fn deinit(self: *CallMCValues, func: *Self) void {
|
||||
func.gpa.free(self.args);
|
||||
|
|
@ -2632,7 +2631,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
|
|||
assert(result.args.len == 0);
|
||||
result.return_value = .{ .unreach = {} };
|
||||
result.stack_byte_count = 0;
|
||||
result.stack_align = 1;
|
||||
result.stack_align = .@"1";
|
||||
return result;
|
||||
},
|
||||
.Unspecified, .C => {
|
||||
|
|
@ -2671,7 +2670,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
|
|||
}
|
||||
|
||||
result.stack_byte_count = next_stack_offset;
|
||||
result.stack_align = 16;
|
||||
result.stack_align = .@"16";
|
||||
},
|
||||
else => return self.fail("TODO implement function parameters for {} on riscv64", .{cc}),
|
||||
}
|
||||
|
|
|
|||
|
|
@ -24,6 +24,7 @@ const CodeGenError = codegen.CodeGenError;
|
|||
const Result = @import("../../codegen.zig").Result;
|
||||
const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput;
|
||||
const Endian = std.builtin.Endian;
|
||||
const Alignment = InternPool.Alignment;
|
||||
|
||||
const build_options = @import("build_options");
|
||||
|
||||
|
|
@ -62,7 +63,7 @@ ret_mcv: MCValue,
|
|||
fn_type: Type,
|
||||
arg_index: usize,
|
||||
src_loc: Module.SrcLoc,
|
||||
stack_align: u32,
|
||||
stack_align: Alignment,
|
||||
|
||||
/// MIR Instructions
|
||||
mir_instructions: std.MultiArrayList(Mir.Inst) = .{},
|
||||
|
|
@ -227,7 +228,7 @@ const CallMCValues = struct {
|
|||
args: []MCValue,
|
||||
return_value: MCValue,
|
||||
stack_byte_count: u32,
|
||||
stack_align: u32,
|
||||
stack_align: Alignment,
|
||||
|
||||
fn deinit(self: *CallMCValues, func: *Self) void {
|
||||
func.gpa.free(self.args);
|
||||
|
|
@ -424,7 +425,7 @@ fn gen(self: *Self) !void {
|
|||
|
||||
// Backpatch stack offset
|
||||
const total_stack_size = self.max_end_stack + abi.stack_reserved_area;
|
||||
const stack_size = mem.alignForward(u32, total_stack_size, self.stack_align);
|
||||
const stack_size = self.stack_align.forward(total_stack_size);
|
||||
if (math.cast(i13, stack_size)) |size| {
|
||||
self.mir_instructions.set(save_inst, .{
|
||||
.tag = .save,
|
||||
|
|
@ -880,11 +881,8 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void {
|
|||
const ptr = try self.resolveInst(ty_op.operand);
|
||||
const array_ty = ptr_ty.childType(mod);
|
||||
const array_len = @as(u32, @intCast(array_ty.arrayLen(mod)));
|
||||
|
||||
const ptr_bits = self.target.ptrBitWidth();
|
||||
const ptr_bytes = @divExact(ptr_bits, 8);
|
||||
|
||||
const stack_offset = try self.allocMem(inst, ptr_bytes * 2, ptr_bytes * 2);
|
||||
const ptr_bytes = 8;
|
||||
const stack_offset = try self.allocMem(inst, ptr_bytes * 2, .@"8");
|
||||
try self.genSetStack(ptr_ty, stack_offset, ptr);
|
||||
try self.genSetStack(Type.usize, stack_offset - ptr_bytes, .{ .immediate = array_len });
|
||||
break :result MCValue{ .stack_offset = stack_offset };
|
||||
|
|
@ -2438,11 +2436,8 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void {
|
|||
const ptr_ty = self.typeOf(bin_op.lhs);
|
||||
const len = try self.resolveInst(bin_op.rhs);
|
||||
const len_ty = self.typeOf(bin_op.rhs);
|
||||
|
||||
const ptr_bits = self.target.ptrBitWidth();
|
||||
const ptr_bytes = @divExact(ptr_bits, 8);
|
||||
|
||||
const stack_offset = try self.allocMem(inst, ptr_bytes * 2, ptr_bytes * 2);
|
||||
const ptr_bytes = 8;
|
||||
const stack_offset = try self.allocMem(inst, ptr_bytes * 2, .@"8");
|
||||
try self.genSetStack(ptr_ty, stack_offset, ptr);
|
||||
try self.genSetStack(len_ty, stack_offset - ptr_bytes, len);
|
||||
break :result MCValue{ .stack_offset = stack_offset };
|
||||
|
|
@ -2782,11 +2777,10 @@ fn addInst(self: *Self, inst: Mir.Inst) error{OutOfMemory}!Mir.Inst.Index {
|
|||
return result_index;
|
||||
}
|
||||
|
||||
fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u32 {
|
||||
if (abi_align > self.stack_align)
|
||||
self.stack_align = abi_align;
|
||||
fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: Alignment) !u32 {
|
||||
self.stack_align = self.stack_align.max(abi_align);
|
||||
// TODO find a free slot instead of always appending
|
||||
const offset = mem.alignForward(u32, self.next_stack_offset, abi_align) + abi_size;
|
||||
const offset: u32 = @intCast(abi_align.forward(self.next_stack_offset) + abi_size);
|
||||
self.next_stack_offset = offset;
|
||||
if (self.next_stack_offset > self.max_end_stack)
|
||||
self.max_end_stack = self.next_stack_offset;
|
||||
|
|
@ -2825,8 +2819,7 @@ fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue {
|
|||
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)});
|
||||
};
|
||||
const abi_align = elem_ty.abiAlignment(mod);
|
||||
if (abi_align > self.stack_align)
|
||||
self.stack_align = abi_align;
|
||||
self.stack_align = self.stack_align.max(abi_align);
|
||||
|
||||
if (reg_ok) {
|
||||
// Make sure the type can fit in a register before we try to allocate one.
|
||||
|
|
@ -4479,7 +4472,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView)
|
|||
assert(result.args.len == 0);
|
||||
result.return_value = .{ .unreach = {} };
|
||||
result.stack_byte_count = 0;
|
||||
result.stack_align = 1;
|
||||
result.stack_align = .@"1";
|
||||
return result;
|
||||
},
|
||||
.Unspecified, .C => {
|
||||
|
|
@ -4521,7 +4514,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView)
|
|||
}
|
||||
|
||||
result.stack_byte_count = next_stack_offset;
|
||||
result.stack_align = 16;
|
||||
result.stack_align = .@"16";
|
||||
|
||||
if (ret_ty.zigTypeTag(mod) == .NoReturn) {
|
||||
result.return_value = .{ .unreach = {} };
|
||||
|
|
|
|||
|
|
@ -25,6 +25,7 @@ const target_util = @import("../../target.zig");
|
|||
const Mir = @import("Mir.zig");
|
||||
const Emit = @import("Emit.zig");
|
||||
const abi = @import("abi.zig");
|
||||
const Alignment = InternPool.Alignment;
|
||||
const errUnionPayloadOffset = codegen.errUnionPayloadOffset;
|
||||
const errUnionErrorOffset = codegen.errUnionErrorOffset;
|
||||
|
||||
|
|
@ -709,7 +710,7 @@ stack_size: u32 = 0,
|
|||
/// tool-conventions: https://github.com/WebAssembly/tool-conventions/blob/main/BasicCABI.md
|
||||
/// and also what the llvm backend will emit.
|
||||
/// However, local variables or the usage of `@setAlignStack` can overwrite this default.
|
||||
stack_alignment: u32 = 16,
|
||||
stack_alignment: Alignment = .@"16",
|
||||
|
||||
// For each individual Wasm valtype we store a seperate free list which
|
||||
// allows us to re-use locals that are no longer used. e.g. a temporary local.
|
||||
|
|
@ -991,6 +992,7 @@ fn addExtraAssumeCapacity(func: *CodeGen, extra: anytype) error{OutOfMemory}!u32
|
|||
/// Using a given `Type`, returns the corresponding type
|
||||
fn typeToValtype(ty: Type, mod: *Module) wasm.Valtype {
|
||||
const target = mod.getTarget();
|
||||
const ip = &mod.intern_pool;
|
||||
return switch (ty.zigTypeTag(mod)) {
|
||||
.Float => switch (ty.floatBits(target)) {
|
||||
16 => wasm.Valtype.i32, // stored/loaded as u16
|
||||
|
|
@ -1005,12 +1007,12 @@ fn typeToValtype(ty: Type, mod: *Module) wasm.Valtype {
|
|||
if (info.bits > 32 and info.bits <= 128) break :blk wasm.Valtype.i64;
|
||||
break :blk wasm.Valtype.i32; // represented as pointer to stack
|
||||
},
|
||||
.Struct => switch (ty.containerLayout(mod)) {
|
||||
.Packed => {
|
||||
const struct_obj = mod.typeToStruct(ty).?;
|
||||
return typeToValtype(struct_obj.backing_int_ty, mod);
|
||||
},
|
||||
else => wasm.Valtype.i32,
|
||||
.Struct => {
|
||||
if (mod.typeToPackedStruct(ty)) |packed_struct| {
|
||||
return typeToValtype(packed_struct.backingIntType(ip).toType(), mod);
|
||||
} else {
|
||||
return wasm.Valtype.i32;
|
||||
}
|
||||
},
|
||||
.Vector => switch (determineSimdStoreStrategy(ty, mod)) {
|
||||
.direct => wasm.Valtype.v128,
|
||||
|
|
@ -1285,12 +1287,12 @@ fn genFunc(func: *CodeGen) InnerError!void {
|
|||
// store stack pointer so we can restore it when we return from the function
|
||||
try prologue.append(.{ .tag = .local_tee, .data = .{ .label = func.initial_stack_value.local.value } });
|
||||
// get the total stack size
|
||||
const aligned_stack = std.mem.alignForward(u32, func.stack_size, func.stack_alignment);
|
||||
try prologue.append(.{ .tag = .i32_const, .data = .{ .imm32 = @as(i32, @intCast(aligned_stack)) } });
|
||||
// substract it from the current stack pointer
|
||||
const aligned_stack = func.stack_alignment.forward(func.stack_size);
|
||||
try prologue.append(.{ .tag = .i32_const, .data = .{ .imm32 = @intCast(aligned_stack) } });
|
||||
// subtract it from the current stack pointer
|
||||
try prologue.append(.{ .tag = .i32_sub, .data = .{ .tag = {} } });
|
||||
// Get negative stack aligment
|
||||
try prologue.append(.{ .tag = .i32_const, .data = .{ .imm32 = @as(i32, @intCast(func.stack_alignment)) * -1 } });
|
||||
try prologue.append(.{ .tag = .i32_const, .data = .{ .imm32 = @as(i32, @intCast(func.stack_alignment.toByteUnitsOptional().?)) * -1 } });
|
||||
// Bitwise-and the value to get the new stack pointer to ensure the pointers are aligned with the abi alignment
|
||||
try prologue.append(.{ .tag = .i32_and, .data = .{ .tag = {} } });
|
||||
// store the current stack pointer as the bottom, which will be used to calculate all stack pointer offsets
|
||||
|
|
@ -1438,7 +1440,7 @@ fn lowerArg(func: *CodeGen, cc: std.builtin.CallingConvention, ty: Type, value:
|
|||
});
|
||||
try func.addMemArg(Mir.Inst.Tag.fromOpcode(opcode), .{
|
||||
.offset = value.offset(),
|
||||
.alignment = scalar_type.abiAlignment(mod),
|
||||
.alignment = @intCast(scalar_type.abiAlignment(mod).toByteUnitsOptional().?),
|
||||
});
|
||||
}
|
||||
},
|
||||
|
|
@ -1527,11 +1529,9 @@ fn allocStack(func: *CodeGen, ty: Type) !WValue {
|
|||
};
|
||||
const abi_align = ty.abiAlignment(mod);
|
||||
|
||||
if (abi_align > func.stack_alignment) {
|
||||
func.stack_alignment = abi_align;
|
||||
}
|
||||
func.stack_alignment = func.stack_alignment.max(abi_align);
|
||||
|
||||
const offset = std.mem.alignForward(u32, func.stack_size, abi_align);
|
||||
const offset: u32 = @intCast(abi_align.forward(func.stack_size));
|
||||
defer func.stack_size = offset + abi_size;
|
||||
|
||||
return WValue{ .stack_offset = .{ .value = offset, .references = 1 } };
|
||||
|
|
@ -1560,11 +1560,9 @@ fn allocStackPtr(func: *CodeGen, inst: Air.Inst.Index) !WValue {
|
|||
pointee_ty.fmt(mod), pointee_ty.abiSize(mod),
|
||||
});
|
||||
};
|
||||
if (abi_alignment > func.stack_alignment) {
|
||||
func.stack_alignment = abi_alignment;
|
||||
}
|
||||
func.stack_alignment = func.stack_alignment.max(abi_alignment);
|
||||
|
||||
const offset = std.mem.alignForward(u32, func.stack_size, abi_alignment);
|
||||
const offset: u32 = @intCast(abi_alignment.forward(func.stack_size));
|
||||
defer func.stack_size = offset + abi_size;
|
||||
|
||||
return WValue{ .stack_offset = .{ .value = offset, .references = 1 } };
|
||||
|
|
@ -1749,10 +1747,8 @@ fn isByRef(ty: Type, mod: *Module) bool {
|
|||
return ty.hasRuntimeBitsIgnoreComptime(mod);
|
||||
},
|
||||
.Struct => {
|
||||
if (mod.typeToStruct(ty)) |struct_obj| {
|
||||
if (struct_obj.layout == .Packed and struct_obj.haveFieldTypes()) {
|
||||
return isByRef(struct_obj.backing_int_ty, mod);
|
||||
}
|
||||
if (mod.typeToPackedStruct(ty)) |packed_struct| {
|
||||
return isByRef(packed_struct.backingIntType(ip).toType(), mod);
|
||||
}
|
||||
return ty.hasRuntimeBitsIgnoreComptime(mod);
|
||||
},
|
||||
|
|
@ -2120,7 +2116,7 @@ fn airRet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
|||
});
|
||||
try func.addMemArg(Mir.Inst.Tag.fromOpcode(opcode), .{
|
||||
.offset = operand.offset(),
|
||||
.alignment = scalar_type.abiAlignment(mod),
|
||||
.alignment = @intCast(scalar_type.abiAlignment(mod).toByteUnitsOptional().?),
|
||||
});
|
||||
},
|
||||
else => try func.emitWValue(operand),
|
||||
|
|
@ -2385,19 +2381,19 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
|
|||
},
|
||||
.Vector => switch (determineSimdStoreStrategy(ty, mod)) {
|
||||
.unrolled => {
|
||||
const len = @as(u32, @intCast(abi_size));
|
||||
const len: u32 = @intCast(abi_size);
|
||||
return func.memcpy(lhs, rhs, .{ .imm32 = len });
|
||||
},
|
||||
.direct => {
|
||||
try func.emitWValue(lhs);
|
||||
try func.lowerToStack(rhs);
|
||||
// TODO: Add helper functions for simd opcodes
|
||||
const extra_index = @as(u32, @intCast(func.mir_extra.items.len));
|
||||
const extra_index: u32 = @intCast(func.mir_extra.items.len);
|
||||
// stores as := opcode, offset, alignment (opcode::memarg)
|
||||
try func.mir_extra.appendSlice(func.gpa, &[_]u32{
|
||||
std.wasm.simdOpcode(.v128_store),
|
||||
offset + lhs.offset(),
|
||||
ty.abiAlignment(mod),
|
||||
@intCast(ty.abiAlignment(mod).toByteUnits(0)),
|
||||
});
|
||||
return func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
|
||||
},
|
||||
|
|
@ -2451,7 +2447,10 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
|
|||
// store rhs value at stack pointer's location in memory
|
||||
try func.addMemArg(
|
||||
Mir.Inst.Tag.fromOpcode(opcode),
|
||||
.{ .offset = offset + lhs.offset(), .alignment = ty.abiAlignment(mod) },
|
||||
.{
|
||||
.offset = offset + lhs.offset(),
|
||||
.alignment = @intCast(ty.abiAlignment(mod).toByteUnitsOptional().?),
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
|
|
@ -2510,7 +2509,7 @@ fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValu
|
|||
try func.mir_extra.appendSlice(func.gpa, &[_]u32{
|
||||
std.wasm.simdOpcode(.v128_load),
|
||||
offset + operand.offset(),
|
||||
ty.abiAlignment(mod),
|
||||
@intCast(ty.abiAlignment(mod).toByteUnitsOptional().?),
|
||||
});
|
||||
try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
|
||||
return WValue{ .stack = {} };
|
||||
|
|
@ -2526,7 +2525,10 @@ fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValu
|
|||
|
||||
try func.addMemArg(
|
||||
Mir.Inst.Tag.fromOpcode(opcode),
|
||||
.{ .offset = offset + operand.offset(), .alignment = ty.abiAlignment(mod) },
|
||||
.{
|
||||
.offset = offset + operand.offset(),
|
||||
.alignment = @intCast(ty.abiAlignment(mod).toByteUnitsOptional().?),
|
||||
},
|
||||
);
|
||||
|
||||
return WValue{ .stack = {} };
|
||||
|
|
@ -3023,10 +3025,10 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue
|
|||
else => blk: {
|
||||
const layout: Module.UnionLayout = parent_ty.unionGetLayout(mod);
|
||||
if (layout.payload_size == 0) break :blk 0;
|
||||
if (layout.payload_align > layout.tag_align) break :blk 0;
|
||||
if (layout.payload_align.compare(.gt, layout.tag_align)) break :blk 0;
|
||||
|
||||
// tag is stored first so calculate offset from where payload starts
|
||||
break :blk @as(u32, @intCast(std.mem.alignForward(u64, layout.tag_size, layout.tag_align)));
|
||||
break :blk layout.tag_align.forward(layout.tag_size);
|
||||
},
|
||||
},
|
||||
.Pointer => switch (parent_ty.ptrSize(mod)) {
|
||||
|
|
@ -3103,8 +3105,12 @@ fn toTwosComplement(value: anytype, bits: u7) std.meta.Int(.unsigned, @typeInfo(
|
|||
return @as(WantedT, @intCast(result));
|
||||
}
|
||||
|
||||
/// This function is intended to assert that `isByRef` returns `false` for `ty`.
|
||||
/// However such an assertion fails on the behavior tests currently.
|
||||
fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue {
|
||||
const mod = func.bin_file.base.options.module.?;
|
||||
// TODO: enable this assertion
|
||||
//assert(!isByRef(ty, mod));
|
||||
const ip = &mod.intern_pool;
|
||||
var val = arg_val;
|
||||
switch (ip.indexToKey(val.ip_index)) {
|
||||
|
|
@ -3235,16 +3241,18 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue {
|
|||
val.writeToMemory(ty, mod, &buf) catch unreachable;
|
||||
return func.storeSimdImmd(buf);
|
||||
},
|
||||
.struct_type, .anon_struct_type => {
|
||||
const struct_obj = mod.typeToStruct(ty).?;
|
||||
assert(struct_obj.layout == .Packed);
|
||||
.struct_type => |struct_type| {
|
||||
// non-packed structs are not handled in this function because they
|
||||
// are by-ref types.
|
||||
assert(struct_type.layout == .Packed);
|
||||
var buf: [8]u8 = .{0} ** 8; // zero the buffer so we do not read 0xaa as integer
|
||||
val.writeToPackedMemory(ty, func.bin_file.base.options.module.?, &buf, 0) catch unreachable;
|
||||
val.writeToPackedMemory(ty, mod, &buf, 0) catch unreachable;
|
||||
const backing_int_ty = struct_type.backingIntType(ip).toType();
|
||||
const int_val = try mod.intValue(
|
||||
struct_obj.backing_int_ty,
|
||||
std.mem.readIntLittle(u64, &buf),
|
||||
backing_int_ty,
|
||||
mem.readIntLittle(u64, &buf),
|
||||
);
|
||||
return func.lowerConstant(int_val, struct_obj.backing_int_ty);
|
||||
return func.lowerConstant(int_val, backing_int_ty);
|
||||
},
|
||||
else => unreachable,
|
||||
},
|
||||
|
|
@ -3269,6 +3277,7 @@ fn storeSimdImmd(func: *CodeGen, value: [16]u8) !WValue {
|
|||
|
||||
fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue {
|
||||
const mod = func.bin_file.base.options.module.?;
|
||||
const ip = &mod.intern_pool;
|
||||
switch (ty.zigTypeTag(mod)) {
|
||||
.Bool, .ErrorSet => return WValue{ .imm32 = 0xaaaaaaaa },
|
||||
.Int, .Enum => switch (ty.intInfo(mod).bits) {
|
||||
|
|
@ -3298,9 +3307,8 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue {
|
|||
return WValue{ .imm32 = 0xaaaaaaaa };
|
||||
},
|
||||
.Struct => {
|
||||
const struct_obj = mod.typeToStruct(ty).?;
|
||||
assert(struct_obj.layout == .Packed);
|
||||
return func.emitUndefined(struct_obj.backing_int_ty);
|
||||
const packed_struct = mod.typeToPackedStruct(ty).?;
|
||||
return func.emitUndefined(packed_struct.backingIntType(ip).toType());
|
||||
},
|
||||
else => return func.fail("Wasm TODO: emitUndefined for type: {}\n", .{ty.zigTypeTag(mod)}),
|
||||
}
|
||||
|
|
@ -3340,7 +3348,7 @@ fn intStorageAsI32(storage: InternPool.Key.Int.Storage, mod: *Module) i32 {
|
|||
.i64 => |x| @as(i32, @intCast(x)),
|
||||
.u64 => |x| @as(i32, @bitCast(@as(u32, @intCast(x)))),
|
||||
.big_int => unreachable,
|
||||
.lazy_align => |ty| @as(i32, @bitCast(ty.toType().abiAlignment(mod))),
|
||||
.lazy_align => |ty| @as(i32, @bitCast(@as(u32, @intCast(ty.toType().abiAlignment(mod).toByteUnits(0))))),
|
||||
.lazy_size => |ty| @as(i32, @bitCast(@as(u32, @intCast(ty.toType().abiSize(mod))))),
|
||||
};
|
||||
}
|
||||
|
|
@ -3757,6 +3765,7 @@ fn structFieldPtr(
|
|||
|
||||
fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
||||
const mod = func.bin_file.base.options.module.?;
|
||||
const ip = &mod.intern_pool;
|
||||
const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
|
||||
const struct_field = func.air.extraData(Air.StructField, ty_pl.payload).data;
|
||||
|
||||
|
|
@ -3769,9 +3778,9 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
|||
const result = switch (struct_ty.containerLayout(mod)) {
|
||||
.Packed => switch (struct_ty.zigTypeTag(mod)) {
|
||||
.Struct => result: {
|
||||
const struct_obj = mod.typeToStruct(struct_ty).?;
|
||||
const offset = struct_obj.packedFieldBitOffset(mod, field_index);
|
||||
const backing_ty = struct_obj.backing_int_ty;
|
||||
const packed_struct = mod.typeToPackedStruct(struct_ty).?;
|
||||
const offset = mod.structPackedFieldBitOffset(packed_struct, field_index);
|
||||
const backing_ty = packed_struct.backingIntType(ip).toType();
|
||||
const wasm_bits = toWasmBits(backing_ty.intInfo(mod).bits) orelse {
|
||||
return func.fail("TODO: airStructFieldVal for packed structs larger than 128 bits", .{});
|
||||
};
|
||||
|
|
@ -3793,7 +3802,7 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
|||
const truncated = try func.trunc(shifted_value, int_type, backing_ty);
|
||||
const bitcasted = try func.bitcast(field_ty, int_type, truncated);
|
||||
break :result try bitcasted.toLocal(func, field_ty);
|
||||
} else if (field_ty.isPtrAtRuntime(mod) and struct_obj.fields.count() == 1) {
|
||||
} else if (field_ty.isPtrAtRuntime(mod) and packed_struct.field_types.len == 1) {
|
||||
// In this case we do not have to perform any transformations,
|
||||
// we can simply reuse the operand.
|
||||
break :result func.reuseOperand(struct_field.struct_operand, operand);
|
||||
|
|
@ -4053,7 +4062,7 @@ fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerErro
|
|||
if (pl_ty.hasRuntimeBitsIgnoreComptime(mod)) {
|
||||
try func.addMemArg(.i32_load16_u, .{
|
||||
.offset = operand.offset() + @as(u32, @intCast(errUnionErrorOffset(pl_ty, mod))),
|
||||
.alignment = Type.anyerror.abiAlignment(mod),
|
||||
.alignment = @intCast(Type.anyerror.abiAlignment(mod).toByteUnitsOptional().?),
|
||||
});
|
||||
}
|
||||
|
||||
|
|
@ -4141,7 +4150,10 @@ fn airWrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void
|
|||
try func.emitWValue(err_union);
|
||||
try func.addImm32(0);
|
||||
const err_val_offset = @as(u32, @intCast(errUnionErrorOffset(pl_ty, mod)));
|
||||
try func.addMemArg(.i32_store16, .{ .offset = err_union.offset() + err_val_offset, .alignment = 2 });
|
||||
try func.addMemArg(.i32_store16, .{
|
||||
.offset = err_union.offset() + err_val_offset,
|
||||
.alignment = 2,
|
||||
});
|
||||
break :result err_union;
|
||||
};
|
||||
func.finishAir(inst, result, &.{ty_op.operand});
|
||||
|
|
@ -4977,7 +4989,7 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
|||
try func.mir_extra.appendSlice(func.gpa, &[_]u32{
|
||||
opcode,
|
||||
operand.offset(),
|
||||
elem_ty.abiAlignment(mod),
|
||||
@intCast(elem_ty.abiAlignment(mod).toByteUnitsOptional().?),
|
||||
});
|
||||
try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
|
||||
try func.addLabel(.local_set, result.local.value);
|
||||
|
|
@ -5065,7 +5077,7 @@ fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
|||
std.wasm.simdOpcode(.i8x16_shuffle),
|
||||
} ++ [1]u32{undefined} ** 4;
|
||||
|
||||
var lanes = std.mem.asBytes(operands[1..]);
|
||||
var lanes = mem.asBytes(operands[1..]);
|
||||
for (0..@as(usize, @intCast(mask_len))) |index| {
|
||||
const mask_elem = (try mask.elemValue(mod, index)).toSignedInt(mod);
|
||||
const base_index = if (mask_elem >= 0)
|
||||
|
|
@ -5099,6 +5111,7 @@ fn airReduce(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
|||
|
||||
fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
||||
const mod = func.bin_file.base.options.module.?;
|
||||
const ip = &mod.intern_pool;
|
||||
const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
|
||||
const result_ty = func.typeOfIndex(inst);
|
||||
const len = @as(usize, @intCast(result_ty.arrayLen(mod)));
|
||||
|
|
@ -5150,13 +5163,13 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
|||
if (isByRef(result_ty, mod)) {
|
||||
return func.fail("TODO: airAggregateInit for packed structs larger than 64 bits", .{});
|
||||
}
|
||||
const struct_obj = mod.typeToStruct(result_ty).?;
|
||||
const fields = struct_obj.fields.values();
|
||||
const backing_type = struct_obj.backing_int_ty;
|
||||
const packed_struct = mod.typeToPackedStruct(result_ty).?;
|
||||
const field_types = packed_struct.field_types;
|
||||
const backing_type = packed_struct.backingIntType(ip).toType();
|
||||
|
||||
// ensure the result is zero'd
|
||||
const result = try func.allocLocal(backing_type);
|
||||
if (struct_obj.backing_int_ty.bitSize(mod) <= 32)
|
||||
if (backing_type.bitSize(mod) <= 32)
|
||||
try func.addImm32(0)
|
||||
else
|
||||
try func.addImm64(0);
|
||||
|
|
@ -5164,22 +5177,22 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
|||
|
||||
var current_bit: u16 = 0;
|
||||
for (elements, 0..) |elem, elem_index| {
|
||||
const field = fields[elem_index];
|
||||
if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
|
||||
const field_ty = field_types.get(ip)[elem_index].toType();
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
|
||||
|
||||
const shift_val = if (struct_obj.backing_int_ty.bitSize(mod) <= 32)
|
||||
const shift_val = if (backing_type.bitSize(mod) <= 32)
|
||||
WValue{ .imm32 = current_bit }
|
||||
else
|
||||
WValue{ .imm64 = current_bit };
|
||||
|
||||
const value = try func.resolveInst(elem);
|
||||
const value_bit_size = @as(u16, @intCast(field.ty.bitSize(mod)));
|
||||
const value_bit_size: u16 = @intCast(field_ty.bitSize(mod));
|
||||
const int_ty = try mod.intType(.unsigned, value_bit_size);
|
||||
|
||||
// load our current result on stack so we can perform all transformations
|
||||
// using only stack values. Saving the cost of loads and stores.
|
||||
try func.emitWValue(result);
|
||||
const bitcasted = try func.bitcast(int_ty, field.ty, value);
|
||||
const bitcasted = try func.bitcast(int_ty, field_ty, value);
|
||||
const extended_val = try func.intcast(bitcasted, int_ty, backing_type);
|
||||
// no need to shift any values when the current offset is 0
|
||||
const shifted = if (current_bit != 0) shifted: {
|
||||
|
|
@ -5199,7 +5212,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
|||
if ((try result_ty.structFieldValueComptime(mod, elem_index)) != null) continue;
|
||||
|
||||
const elem_ty = result_ty.structFieldType(elem_index, mod);
|
||||
const elem_size = @as(u32, @intCast(elem_ty.abiSize(mod)));
|
||||
const elem_size: u32 = @intCast(elem_ty.abiSize(mod));
|
||||
const value = try func.resolveInst(elem);
|
||||
try func.store(offset, value, elem_ty, 0);
|
||||
|
||||
|
|
@ -5256,7 +5269,7 @@ fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
|||
if (isByRef(union_ty, mod)) {
|
||||
const result_ptr = try func.allocStack(union_ty);
|
||||
const payload = try func.resolveInst(extra.init);
|
||||
if (layout.tag_align >= layout.payload_align) {
|
||||
if (layout.tag_align.compare(.gte, layout.payload_align)) {
|
||||
if (isByRef(field_ty, mod)) {
|
||||
const payload_ptr = try func.buildPointerOffset(result_ptr, layout.tag_size, .new);
|
||||
try func.store(payload_ptr, payload, field_ty, 0);
|
||||
|
|
@ -5420,9 +5433,9 @@ fn airSetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
|||
|
||||
// when the tag alignment is smaller than the payload, the field will be stored
|
||||
// after the payload.
|
||||
const offset = if (layout.tag_align < layout.payload_align) blk: {
|
||||
break :blk @as(u32, @intCast(layout.payload_size));
|
||||
} else @as(u32, 0);
|
||||
const offset: u32 = if (layout.tag_align.compare(.lt, layout.payload_align)) blk: {
|
||||
break :blk @intCast(layout.payload_size);
|
||||
} else 0;
|
||||
try func.store(union_ptr, new_tag, tag_ty, offset);
|
||||
func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
|
||||
}
|
||||
|
|
@ -5439,9 +5452,9 @@ fn airGetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
|||
const operand = try func.resolveInst(ty_op.operand);
|
||||
// when the tag alignment is smaller than the payload, the field will be stored
|
||||
// after the payload.
|
||||
const offset = if (layout.tag_align < layout.payload_align) blk: {
|
||||
break :blk @as(u32, @intCast(layout.payload_size));
|
||||
} else @as(u32, 0);
|
||||
const offset: u32 = if (layout.tag_align.compare(.lt, layout.payload_align)) blk: {
|
||||
break :blk @intCast(layout.payload_size);
|
||||
} else 0;
|
||||
const tag = try func.load(operand, tag_ty, offset);
|
||||
const result = try tag.toLocal(func, tag_ty);
|
||||
func.finishAir(inst, result, &.{ty_op.operand});
|
||||
|
|
@ -6366,7 +6379,7 @@ fn lowerTry(
|
|||
const err_offset = @as(u32, @intCast(errUnionErrorOffset(pl_ty, mod)));
|
||||
try func.addMemArg(.i32_load16_u, .{
|
||||
.offset = err_union.offset() + err_offset,
|
||||
.alignment = Type.anyerror.abiAlignment(mod),
|
||||
.alignment = @intCast(Type.anyerror.abiAlignment(mod).toByteUnitsOptional().?),
|
||||
});
|
||||
}
|
||||
try func.addTag(.i32_eqz);
|
||||
|
|
@ -7287,7 +7300,7 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
|||
else => |size| return func.fail("TODO: implement `@cmpxchg` for types with abi size '{d}'", .{size}),
|
||||
}, .{
|
||||
.offset = ptr_operand.offset(),
|
||||
.alignment = ty.abiAlignment(mod),
|
||||
.alignment = @intCast(ty.abiAlignment(mod).toByteUnitsOptional().?),
|
||||
});
|
||||
try func.addLabel(.local_tee, val_local.local.value);
|
||||
_ = try func.cmp(.stack, expected_val, ty, .eq);
|
||||
|
|
@ -7349,7 +7362,7 @@ fn airAtomicLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
|||
try func.emitWValue(ptr);
|
||||
try func.addAtomicMemArg(tag, .{
|
||||
.offset = ptr.offset(),
|
||||
.alignment = ty.abiAlignment(mod),
|
||||
.alignment = @intCast(ty.abiAlignment(mod).toByteUnitsOptional().?),
|
||||
});
|
||||
} else {
|
||||
_ = try func.load(ptr, ty, 0);
|
||||
|
|
@ -7410,7 +7423,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
|||
},
|
||||
.{
|
||||
.offset = ptr.offset(),
|
||||
.alignment = ty.abiAlignment(mod),
|
||||
.alignment = @intCast(ty.abiAlignment(mod).toByteUnitsOptional().?),
|
||||
},
|
||||
);
|
||||
const select_res = try func.allocLocal(ty);
|
||||
|
|
@ -7470,7 +7483,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
|||
};
|
||||
try func.addAtomicMemArg(tag, .{
|
||||
.offset = ptr.offset(),
|
||||
.alignment = ty.abiAlignment(mod),
|
||||
.alignment = @intCast(ty.abiAlignment(mod).toByteUnitsOptional().?),
|
||||
});
|
||||
const result = try WValue.toLocal(.stack, func, ty);
|
||||
return func.finishAir(inst, result, &.{ pl_op.operand, extra.operand });
|
||||
|
|
@ -7566,7 +7579,7 @@ fn airAtomicStore(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
|||
try func.lowerToStack(operand);
|
||||
try func.addAtomicMemArg(tag, .{
|
||||
.offset = ptr.offset(),
|
||||
.alignment = ty.abiAlignment(mod),
|
||||
.alignment = @intCast(ty.abiAlignment(mod).toByteUnitsOptional().?),
|
||||
});
|
||||
} else {
|
||||
try func.store(ptr, operand, ty, 0);
|
||||
|
|
|
|||
|
|
@ -28,20 +28,22 @@ pub fn classifyType(ty: Type, mod: *Module) [2]Class {
|
|||
if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return none;
|
||||
switch (ty.zigTypeTag(mod)) {
|
||||
.Struct => {
|
||||
if (ty.containerLayout(mod) == .Packed) {
|
||||
const struct_type = mod.typeToStruct(ty).?;
|
||||
if (struct_type.layout == .Packed) {
|
||||
if (ty.bitSize(mod) <= 64) return direct;
|
||||
return .{ .direct, .direct };
|
||||
}
|
||||
// When the struct type is non-scalar
|
||||
if (ty.structFieldCount(mod) > 1) return memory;
|
||||
// When the struct's alignment is non-natural
|
||||
const field = ty.structFields(mod).values()[0];
|
||||
if (field.abi_align != .none) {
|
||||
if (field.abi_align.toByteUnitsOptional().? > field.ty.abiAlignment(mod)) {
|
||||
return memory;
|
||||
}
|
||||
if (struct_type.field_types.len > 1) {
|
||||
// The struct type is non-scalar.
|
||||
return memory;
|
||||
}
|
||||
return classifyType(field.ty, mod);
|
||||
const field_ty = struct_type.field_types.get(ip)[0].toType();
|
||||
const explicit_align = struct_type.fieldAlign(ip, 0);
|
||||
if (explicit_align != .none) {
|
||||
if (explicit_align.compareStrict(.gt, field_ty.abiAlignment(mod)))
|
||||
return memory;
|
||||
}
|
||||
return classifyType(field_ty, mod);
|
||||
},
|
||||
.Int, .Enum, .ErrorSet, .Vector => {
|
||||
const int_bits = ty.intInfo(mod).bits;
|
||||
|
|
@ -101,15 +103,11 @@ pub fn scalarType(ty: Type, mod: *Module) Type {
|
|||
const ip = &mod.intern_pool;
|
||||
switch (ty.zigTypeTag(mod)) {
|
||||
.Struct => {
|
||||
switch (ty.containerLayout(mod)) {
|
||||
.Packed => {
|
||||
const struct_obj = mod.typeToStruct(ty).?;
|
||||
return scalarType(struct_obj.backing_int_ty, mod);
|
||||
},
|
||||
else => {
|
||||
assert(ty.structFieldCount(mod) == 1);
|
||||
return scalarType(ty.structFieldType(0, mod), mod);
|
||||
},
|
||||
if (mod.typeToPackedStruct(ty)) |packed_struct| {
|
||||
return scalarType(packed_struct.backingIntType(ip).toType(), mod);
|
||||
} else {
|
||||
assert(ty.structFieldCount(mod) == 1);
|
||||
return scalarType(ty.structFieldType(0, mod), mod);
|
||||
}
|
||||
},
|
||||
.Union => {
|
||||
|
|
|
|||
|
|
@ -27,6 +27,7 @@ const Lower = @import("Lower.zig");
|
|||
const Mir = @import("Mir.zig");
|
||||
const Module = @import("../../Module.zig");
|
||||
const InternPool = @import("../../InternPool.zig");
|
||||
const Alignment = InternPool.Alignment;
|
||||
const Target = std.Target;
|
||||
const Type = @import("../../type.zig").Type;
|
||||
const TypedValue = @import("../../TypedValue.zig");
|
||||
|
|
@ -607,19 +608,21 @@ const InstTracking = struct {
|
|||
|
||||
const FrameAlloc = struct {
|
||||
abi_size: u31,
|
||||
abi_align: u5,
|
||||
abi_align: Alignment,
|
||||
ref_count: u16,
|
||||
|
||||
fn init(alloc_abi: struct { size: u64, alignment: u32 }) FrameAlloc {
|
||||
assert(math.isPowerOfTwo(alloc_abi.alignment));
|
||||
fn init(alloc_abi: struct { size: u64, alignment: Alignment }) FrameAlloc {
|
||||
return .{
|
||||
.abi_size = @intCast(alloc_abi.size),
|
||||
.abi_align = math.log2_int(u32, alloc_abi.alignment),
|
||||
.abi_align = alloc_abi.alignment,
|
||||
.ref_count = 0,
|
||||
};
|
||||
}
|
||||
fn initType(ty: Type, mod: *Module) FrameAlloc {
|
||||
return init(.{ .size = ty.abiSize(mod), .alignment = ty.abiAlignment(mod) });
|
||||
return init(.{
|
||||
.size = ty.abiSize(mod),
|
||||
.alignment = ty.abiAlignment(mod),
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
|
|
@ -702,12 +705,12 @@ pub fn generate(
|
|||
@intFromEnum(FrameIndex.stack_frame),
|
||||
FrameAlloc.init(.{
|
||||
.size = 0,
|
||||
.alignment = @intCast(func.analysis(ip).stack_alignment.toByteUnitsOptional() orelse 1),
|
||||
.alignment = func.analysis(ip).stack_alignment.max(.@"1"),
|
||||
}),
|
||||
);
|
||||
function.frame_allocs.set(
|
||||
@intFromEnum(FrameIndex.call_frame),
|
||||
FrameAlloc.init(.{ .size = 0, .alignment = 1 }),
|
||||
FrameAlloc.init(.{ .size = 0, .alignment = .@"1" }),
|
||||
);
|
||||
|
||||
const fn_info = mod.typeToFunc(fn_type).?;
|
||||
|
|
@ -729,15 +732,21 @@ pub fn generate(
|
|||
function.ret_mcv = call_info.return_value;
|
||||
function.frame_allocs.set(@intFromEnum(FrameIndex.ret_addr), FrameAlloc.init(.{
|
||||
.size = Type.usize.abiSize(mod),
|
||||
.alignment = @min(Type.usize.abiAlignment(mod), call_info.stack_align),
|
||||
.alignment = Type.usize.abiAlignment(mod).min(call_info.stack_align),
|
||||
}));
|
||||
function.frame_allocs.set(@intFromEnum(FrameIndex.base_ptr), FrameAlloc.init(.{
|
||||
.size = Type.usize.abiSize(mod),
|
||||
.alignment = @min(Type.usize.abiAlignment(mod) * 2, call_info.stack_align),
|
||||
.alignment = Alignment.min(
|
||||
call_info.stack_align,
|
||||
Alignment.fromNonzeroByteUnits(bin_file.options.target.stackAlignment()),
|
||||
),
|
||||
}));
|
||||
function.frame_allocs.set(
|
||||
@intFromEnum(FrameIndex.args_frame),
|
||||
FrameAlloc.init(.{ .size = call_info.stack_byte_count, .alignment = call_info.stack_align }),
|
||||
FrameAlloc.init(.{
|
||||
.size = call_info.stack_byte_count,
|
||||
.alignment = call_info.stack_align,
|
||||
}),
|
||||
);
|
||||
|
||||
function.gen() catch |err| switch (err) {
|
||||
|
|
@ -2156,8 +2165,8 @@ fn setFrameLoc(
|
|||
) void {
|
||||
const frame_i = @intFromEnum(frame_index);
|
||||
if (aligned) {
|
||||
const alignment = @as(i32, 1) << self.frame_allocs.items(.abi_align)[frame_i];
|
||||
offset.* = mem.alignForward(i32, offset.*, alignment);
|
||||
const alignment = self.frame_allocs.items(.abi_align)[frame_i];
|
||||
offset.* = @intCast(alignment.forward(@intCast(offset.*)));
|
||||
}
|
||||
self.frame_locs.set(frame_i, .{ .base = base, .disp = offset.* });
|
||||
offset.* += self.frame_allocs.items(.abi_size)[frame_i];
|
||||
|
|
@ -2179,7 +2188,7 @@ fn computeFrameLayout(self: *Self) !FrameLayout {
|
|||
const SortContext = struct {
|
||||
frame_align: @TypeOf(frame_align),
|
||||
pub fn lessThan(context: @This(), lhs: FrameIndex, rhs: FrameIndex) bool {
|
||||
return context.frame_align[@intFromEnum(lhs)] > context.frame_align[@intFromEnum(rhs)];
|
||||
return context.frame_align[@intFromEnum(lhs)].compare(.gt, context.frame_align[@intFromEnum(rhs)]);
|
||||
}
|
||||
};
|
||||
const sort_context = SortContext{ .frame_align = frame_align };
|
||||
|
|
@ -2189,8 +2198,8 @@ fn computeFrameLayout(self: *Self) !FrameLayout {
|
|||
const call_frame_align = frame_align[@intFromEnum(FrameIndex.call_frame)];
|
||||
const stack_frame_align = frame_align[@intFromEnum(FrameIndex.stack_frame)];
|
||||
const args_frame_align = frame_align[@intFromEnum(FrameIndex.args_frame)];
|
||||
const needed_align = @max(call_frame_align, stack_frame_align);
|
||||
const need_align_stack = needed_align > args_frame_align;
|
||||
const needed_align = call_frame_align.max(stack_frame_align);
|
||||
const need_align_stack = needed_align.compare(.gt, args_frame_align);
|
||||
|
||||
// Create list of registers to save in the prologue.
|
||||
// TODO handle register classes
|
||||
|
|
@ -2214,21 +2223,21 @@ fn computeFrameLayout(self: *Self) !FrameLayout {
|
|||
self.setFrameLoc(.stack_frame, .rsp, &rsp_offset, true);
|
||||
for (stack_frame_order) |frame_index| self.setFrameLoc(frame_index, .rsp, &rsp_offset, true);
|
||||
rsp_offset += stack_frame_align_offset;
|
||||
rsp_offset = mem.alignForward(i32, rsp_offset, @as(i32, 1) << needed_align);
|
||||
rsp_offset = @intCast(needed_align.forward(@intCast(rsp_offset)));
|
||||
rsp_offset -= stack_frame_align_offset;
|
||||
frame_size[@intFromEnum(FrameIndex.call_frame)] =
|
||||
@intCast(rsp_offset - frame_offset[@intFromEnum(FrameIndex.stack_frame)]);
|
||||
|
||||
return .{
|
||||
.stack_mask = @as(u32, math.maxInt(u32)) << (if (need_align_stack) needed_align else 0),
|
||||
.stack_mask = @as(u32, math.maxInt(u32)) << @intCast(if (need_align_stack) @intFromEnum(needed_align) else 0),
|
||||
.stack_adjust = @intCast(rsp_offset - frame_offset[@intFromEnum(FrameIndex.call_frame)]),
|
||||
.save_reg_list = save_reg_list,
|
||||
};
|
||||
}
|
||||
|
||||
fn getFrameAddrAlignment(self: *Self, frame_addr: FrameAddr) u32 {
|
||||
const alloc_align = @as(u32, 1) << self.frame_allocs.get(@intFromEnum(frame_addr.index)).abi_align;
|
||||
return @min(alloc_align, @as(u32, @bitCast(frame_addr.off)) & (alloc_align - 1));
|
||||
fn getFrameAddrAlignment(self: *Self, frame_addr: FrameAddr) Alignment {
|
||||
const alloc_align = self.frame_allocs.get(@intFromEnum(frame_addr.index)).abi_align;
|
||||
return @enumFromInt(@min(@intFromEnum(alloc_align), @ctz(frame_addr.off)));
|
||||
}
|
||||
|
||||
fn getFrameAddrSize(self: *Self, frame_addr: FrameAddr) u32 {
|
||||
|
|
@ -2241,13 +2250,13 @@ fn allocFrameIndex(self: *Self, alloc: FrameAlloc) !FrameIndex {
|
|||
const frame_align = frame_allocs_slice.items(.abi_align);
|
||||
|
||||
const stack_frame_align = &frame_align[@intFromEnum(FrameIndex.stack_frame)];
|
||||
stack_frame_align.* = @max(stack_frame_align.*, alloc.abi_align);
|
||||
stack_frame_align.* = stack_frame_align.max(alloc.abi_align);
|
||||
|
||||
for (self.free_frame_indices.keys(), 0..) |frame_index, free_i| {
|
||||
const abi_size = frame_size[@intFromEnum(frame_index)];
|
||||
if (abi_size != alloc.abi_size) continue;
|
||||
const abi_align = &frame_align[@intFromEnum(frame_index)];
|
||||
abi_align.* = @max(abi_align.*, alloc.abi_align);
|
||||
abi_align.* = abi_align.max(alloc.abi_align);
|
||||
|
||||
_ = self.free_frame_indices.swapRemoveAt(free_i);
|
||||
return frame_index;
|
||||
|
|
@ -2266,7 +2275,7 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !FrameIndex {
|
|||
.size = math.cast(u32, val_ty.abiSize(mod)) orelse {
|
||||
return self.fail("type '{}' too big to fit into stack frame", .{val_ty.fmt(mod)});
|
||||
},
|
||||
.alignment = @max(ptr_ty.ptrAlignment(mod), 1),
|
||||
.alignment = ptr_ty.ptrAlignment(mod).max(.@"1"),
|
||||
}));
|
||||
}
|
||||
|
||||
|
|
@ -4266,7 +4275,7 @@ fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
|
|||
};
|
||||
defer if (tag_lock) |lock| self.register_manager.unlockReg(lock);
|
||||
|
||||
const adjusted_ptr: MCValue = if (layout.payload_size > 0 and layout.tag_align < layout.payload_align) blk: {
|
||||
const adjusted_ptr: MCValue = if (layout.payload_size > 0 and layout.tag_align.compare(.lt, layout.payload_align)) blk: {
|
||||
// TODO reusing the operand
|
||||
const reg = try self.copyToTmpRegister(ptr_union_ty, ptr);
|
||||
try self.genBinOpMir(
|
||||
|
|
@ -4309,7 +4318,7 @@ fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
|
|||
switch (operand) {
|
||||
.load_frame => |frame_addr| {
|
||||
if (tag_abi_size <= 8) {
|
||||
const off: i32 = if (layout.tag_align < layout.payload_align)
|
||||
const off: i32 = if (layout.tag_align.compare(.lt, layout.payload_align))
|
||||
@intCast(layout.payload_size)
|
||||
else
|
||||
0;
|
||||
|
|
@ -4321,7 +4330,7 @@ fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
|
|||
return self.fail("TODO implement get_union_tag for ABI larger than 8 bytes and operand {}", .{operand});
|
||||
},
|
||||
.register => {
|
||||
const shift: u6 = if (layout.tag_align < layout.payload_align)
|
||||
const shift: u6 = if (layout.tag_align.compare(.lt, layout.payload_align))
|
||||
@intCast(layout.payload_size * 8)
|
||||
else
|
||||
0;
|
||||
|
|
@ -5600,8 +5609,8 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
|
|||
const src_mcv = try self.resolveInst(operand);
|
||||
const field_off: u32 = switch (container_ty.containerLayout(mod)) {
|
||||
.Auto, .Extern => @intCast(container_ty.structFieldOffset(index, mod) * 8),
|
||||
.Packed => if (mod.typeToStruct(container_ty)) |struct_obj|
|
||||
struct_obj.packedFieldBitOffset(mod, index)
|
||||
.Packed => if (mod.typeToStruct(container_ty)) |struct_type|
|
||||
mod.structPackedFieldBitOffset(struct_type, index)
|
||||
else
|
||||
0,
|
||||
};
|
||||
|
|
@ -8084,14 +8093,17 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
|
|||
// We need a properly aligned and sized call frame to be able to call this function.
|
||||
{
|
||||
const needed_call_frame =
|
||||
FrameAlloc.init(.{ .size = info.stack_byte_count, .alignment = info.stack_align });
|
||||
FrameAlloc.init(.{
|
||||
.size = info.stack_byte_count,
|
||||
.alignment = info.stack_align,
|
||||
});
|
||||
const frame_allocs_slice = self.frame_allocs.slice();
|
||||
const stack_frame_size =
|
||||
&frame_allocs_slice.items(.abi_size)[@intFromEnum(FrameIndex.call_frame)];
|
||||
stack_frame_size.* = @max(stack_frame_size.*, needed_call_frame.abi_size);
|
||||
const stack_frame_align =
|
||||
&frame_allocs_slice.items(.abi_align)[@intFromEnum(FrameIndex.call_frame)];
|
||||
stack_frame_align.* = @max(stack_frame_align.*, needed_call_frame.abi_align);
|
||||
stack_frame_align.* = stack_frame_align.max(needed_call_frame.abi_align);
|
||||
}
|
||||
|
||||
try self.spillEflagsIfOccupied();
|
||||
|
|
@ -9944,7 +9956,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr
|
|||
.indirect => try self.moveStrategy(ty, false),
|
||||
.load_frame => |frame_addr| try self.moveStrategy(
|
||||
ty,
|
||||
self.getFrameAddrAlignment(frame_addr) >= ty.abiAlignment(mod),
|
||||
self.getFrameAddrAlignment(frame_addr).compare(.gte, ty.abiAlignment(mod)),
|
||||
),
|
||||
.lea_frame => .{ .move = .{ ._, .lea } },
|
||||
else => unreachable,
|
||||
|
|
@ -9973,10 +9985,8 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr
|
|||
.base = .{ .reg = .ds },
|
||||
.disp = small_addr,
|
||||
});
|
||||
switch (try self.moveStrategy(ty, mem.isAlignedGeneric(
|
||||
u32,
|
||||
switch (try self.moveStrategy(ty, ty.abiAlignment(mod).check(
|
||||
@as(u32, @bitCast(small_addr)),
|
||||
ty.abiAlignment(mod),
|
||||
))) {
|
||||
.move => |tag| try self.asmRegisterMemory(tag, dst_alias, src_mem),
|
||||
.insert_extract => |ie| try self.asmRegisterMemoryImmediate(
|
||||
|
|
@ -10142,22 +10152,14 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal
|
|||
);
|
||||
const src_alias = registerAlias(src_reg, abi_size);
|
||||
switch (try self.moveStrategy(ty, switch (base) {
|
||||
.none => mem.isAlignedGeneric(
|
||||
u32,
|
||||
@as(u32, @bitCast(disp)),
|
||||
ty.abiAlignment(mod),
|
||||
),
|
||||
.none => ty.abiAlignment(mod).check(@as(u32, @bitCast(disp))),
|
||||
.reg => |reg| switch (reg) {
|
||||
.es, .cs, .ss, .ds => mem.isAlignedGeneric(
|
||||
u32,
|
||||
@as(u32, @bitCast(disp)),
|
||||
ty.abiAlignment(mod),
|
||||
),
|
||||
.es, .cs, .ss, .ds => ty.abiAlignment(mod).check(@as(u32, @bitCast(disp))),
|
||||
else => false,
|
||||
},
|
||||
.frame => |frame_index| self.getFrameAddrAlignment(
|
||||
.{ .index = frame_index, .off = disp },
|
||||
) >= ty.abiAlignment(mod),
|
||||
).compare(.gte, ty.abiAlignment(mod)),
|
||||
})) {
|
||||
.move => |tag| try self.asmMemoryRegister(tag, dst_mem, src_alias),
|
||||
.insert_extract, .vex_insert_extract => |ie| try self.asmMemoryRegisterImmediate(
|
||||
|
|
@ -11079,7 +11081,7 @@ fn airTagName(self: *Self, inst: Air.Inst.Index) !void {
|
|||
stack_frame_size.* = @max(stack_frame_size.*, needed_call_frame.abi_size);
|
||||
const stack_frame_align =
|
||||
&frame_allocs_slice.items(.abi_align)[@intFromEnum(FrameIndex.call_frame)];
|
||||
stack_frame_align.* = @max(stack_frame_align.*, needed_call_frame.abi_align);
|
||||
stack_frame_align.* = stack_frame_align.max(needed_call_frame.abi_align);
|
||||
}
|
||||
|
||||
try self.spillEflagsIfOccupied();
|
||||
|
|
@ -11418,13 +11420,14 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
|
|||
const frame_index =
|
||||
try self.allocFrameIndex(FrameAlloc.initType(result_ty, mod));
|
||||
if (result_ty.containerLayout(mod) == .Packed) {
|
||||
const struct_obj = mod.typeToStruct(result_ty).?;
|
||||
const struct_type = mod.typeToStruct(result_ty).?;
|
||||
try self.genInlineMemset(
|
||||
.{ .lea_frame = .{ .index = frame_index } },
|
||||
.{ .immediate = 0 },
|
||||
.{ .immediate = result_ty.abiSize(mod) },
|
||||
);
|
||||
for (elements, 0..) |elem, elem_i| {
|
||||
for (elements, 0..) |elem, elem_i_usize| {
|
||||
const elem_i: u32 = @intCast(elem_i_usize);
|
||||
if ((try result_ty.structFieldValueComptime(mod, elem_i)) != null) continue;
|
||||
|
||||
const elem_ty = result_ty.structFieldType(elem_i, mod);
|
||||
|
|
@ -11437,7 +11440,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
|
|||
}
|
||||
const elem_abi_size: u32 = @intCast(elem_ty.abiSize(mod));
|
||||
const elem_abi_bits = elem_abi_size * 8;
|
||||
const elem_off = struct_obj.packedFieldBitOffset(mod, elem_i);
|
||||
const elem_off = mod.structPackedFieldBitOffset(struct_type, elem_i);
|
||||
const elem_byte_off: i32 = @intCast(elem_off / elem_abi_bits * elem_abi_size);
|
||||
const elem_bit_off = elem_off % elem_abi_bits;
|
||||
const elem_mcv = try self.resolveInst(elem);
|
||||
|
|
@ -11576,13 +11579,13 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void {
|
|||
const tag_val = try mod.enumValueFieldIndex(tag_ty, field_index);
|
||||
const tag_int_val = try tag_val.intFromEnum(tag_ty, mod);
|
||||
const tag_int = tag_int_val.toUnsignedInt(mod);
|
||||
const tag_off: i32 = if (layout.tag_align < layout.payload_align)
|
||||
const tag_off: i32 = if (layout.tag_align.compare(.lt, layout.payload_align))
|
||||
@intCast(layout.payload_size)
|
||||
else
|
||||
0;
|
||||
try self.genCopy(tag_ty, dst_mcv.address().offset(tag_off).deref(), .{ .immediate = tag_int });
|
||||
|
||||
const pl_off: i32 = if (layout.tag_align < layout.payload_align)
|
||||
const pl_off: i32 = if (layout.tag_align.compare(.lt, layout.payload_align))
|
||||
0
|
||||
else
|
||||
@intCast(layout.tag_size);
|
||||
|
|
@ -11823,7 +11826,7 @@ const CallMCValues = struct {
|
|||
args: []MCValue,
|
||||
return_value: InstTracking,
|
||||
stack_byte_count: u31,
|
||||
stack_align: u31,
|
||||
stack_align: Alignment,
|
||||
|
||||
fn deinit(self: *CallMCValues, func: *Self) void {
|
||||
func.gpa.free(self.args);
|
||||
|
|
@ -11867,12 +11870,12 @@ fn resolveCallingConventionValues(
|
|||
.Naked => {
|
||||
assert(result.args.len == 0);
|
||||
result.return_value = InstTracking.init(.unreach);
|
||||
result.stack_align = 8;
|
||||
result.stack_align = .@"8";
|
||||
},
|
||||
.C => {
|
||||
var param_reg_i: usize = 0;
|
||||
var param_sse_reg_i: usize = 0;
|
||||
result.stack_align = 16;
|
||||
result.stack_align = .@"16";
|
||||
|
||||
switch (self.target.os.tag) {
|
||||
.windows => {
|
||||
|
|
@ -11957,7 +11960,7 @@ fn resolveCallingConventionValues(
|
|||
}
|
||||
|
||||
const param_size: u31 = @intCast(ty.abiSize(mod));
|
||||
const param_align: u31 = @intCast(ty.abiAlignment(mod));
|
||||
const param_align: u31 = @intCast(ty.abiAlignment(mod).toByteUnitsOptional().?);
|
||||
result.stack_byte_count =
|
||||
mem.alignForward(u31, result.stack_byte_count, param_align);
|
||||
arg.* = .{ .load_frame = .{
|
||||
|
|
@ -11968,7 +11971,7 @@ fn resolveCallingConventionValues(
|
|||
}
|
||||
},
|
||||
.Unspecified => {
|
||||
result.stack_align = 16;
|
||||
result.stack_align = .@"16";
|
||||
|
||||
// Return values
|
||||
if (ret_ty.zigTypeTag(mod) == .NoReturn) {
|
||||
|
|
@ -11997,7 +12000,7 @@ fn resolveCallingConventionValues(
|
|||
continue;
|
||||
}
|
||||
const param_size: u31 = @intCast(ty.abiSize(mod));
|
||||
const param_align: u31 = @intCast(ty.abiAlignment(mod));
|
||||
const param_align: u31 = @intCast(ty.abiAlignment(mod).toByteUnitsOptional().?);
|
||||
result.stack_byte_count =
|
||||
mem.alignForward(u31, result.stack_byte_count, param_align);
|
||||
arg.* = .{ .load_frame = .{
|
||||
|
|
@ -12010,7 +12013,7 @@ fn resolveCallingConventionValues(
|
|||
else => return self.fail("TODO implement function parameters and return values for {} on x86_64", .{cc}),
|
||||
}
|
||||
|
||||
result.stack_byte_count = mem.alignForward(u31, result.stack_byte_count, result.stack_align);
|
||||
result.stack_byte_count = @intCast(result.stack_align.forward(result.stack_byte_count));
|
||||
return result;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -210,8 +210,9 @@ pub fn classifySystemV(ty: Type, mod: *Module, ctx: Context) [8]Class {
|
|||
// it contains unaligned fields, it has class MEMORY"
|
||||
// "If the size of the aggregate exceeds a single eightbyte, each is classified
|
||||
// separately.".
|
||||
const struct_type = mod.typeToStruct(ty).?;
|
||||
const ty_size = ty.abiSize(mod);
|
||||
if (ty.containerLayout(mod) == .Packed) {
|
||||
if (struct_type.layout == .Packed) {
|
||||
assert(ty_size <= 128);
|
||||
result[0] = .integer;
|
||||
if (ty_size > 64) result[1] = .integer;
|
||||
|
|
@ -222,15 +223,13 @@ pub fn classifySystemV(ty: Type, mod: *Module, ctx: Context) [8]Class {
|
|||
|
||||
var result_i: usize = 0; // out of 8
|
||||
var byte_i: usize = 0; // out of 8
|
||||
const fields = ty.structFields(mod);
|
||||
for (fields.values()) |field| {
|
||||
if (field.abi_align != .none) {
|
||||
if (field.abi_align.toByteUnitsOptional().? < field.ty.abiAlignment(mod)) {
|
||||
return memory_class;
|
||||
}
|
||||
}
|
||||
const field_size = field.ty.abiSize(mod);
|
||||
const field_class_array = classifySystemV(field.ty, mod, .other);
|
||||
for (struct_type.field_types.get(ip), 0..) |field_ty_ip, i| {
|
||||
const field_ty = field_ty_ip.toType();
|
||||
const field_align = struct_type.fieldAlign(ip, i);
|
||||
if (field_align != .none and field_align.compare(.lt, field_ty.abiAlignment(mod)))
|
||||
return memory_class;
|
||||
const field_size = field_ty.abiSize(mod);
|
||||
const field_class_array = classifySystemV(field_ty, mod, .other);
|
||||
const field_class = std.mem.sliceTo(&field_class_array, .none);
|
||||
if (byte_i + field_size <= 8) {
|
||||
// Combine this field with the previous one.
|
||||
|
|
@ -341,10 +340,11 @@ pub fn classifySystemV(ty: Type, mod: *Module, ctx: Context) [8]Class {
|
|||
return memory_class;
|
||||
|
||||
for (union_obj.field_types.get(ip), 0..) |field_ty, field_index| {
|
||||
if (union_obj.fieldAlign(ip, @intCast(field_index)).toByteUnitsOptional()) |a| {
|
||||
if (a < field_ty.toType().abiAlignment(mod)) {
|
||||
return memory_class;
|
||||
}
|
||||
const field_align = union_obj.fieldAlign(ip, @intCast(field_index));
|
||||
if (field_align != .none and
|
||||
field_align.compare(.lt, field_ty.toType().abiAlignment(mod)))
|
||||
{
|
||||
return memory_class;
|
||||
}
|
||||
// Combine this field with the previous one.
|
||||
const field_class = classifySystemV(field_ty.toType(), mod, .other);
|
||||
|
|
@ -533,13 +533,3 @@ const Register = @import("bits.zig").Register;
|
|||
const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager;
|
||||
const Type = @import("../../type.zig").Type;
|
||||
const Value = @import("../../value.zig").Value;
|
||||
|
||||
fn _field(comptime tag: Type.Tag, offset: u32) Module.Struct.Field {
|
||||
return .{
|
||||
.ty = Type.initTag(tag),
|
||||
.default_val = Value.initTag(.unreachable_value),
|
||||
.abi_align = 0,
|
||||
.offset = offset,
|
||||
.is_comptime = false,
|
||||
};
|
||||
}
|
||||
|
|
|
|||
101
src/codegen.zig
101
src/codegen.zig
|
|
@ -22,6 +22,7 @@ const Type = @import("type.zig").Type;
|
|||
const TypedValue = @import("TypedValue.zig");
|
||||
const Value = @import("value.zig").Value;
|
||||
const Zir = @import("Zir.zig");
|
||||
const Alignment = InternPool.Alignment;
|
||||
|
||||
pub const Result = union(enum) {
|
||||
/// The `code` parameter passed to `generateSymbol` has the value ok.
|
||||
|
|
@ -116,7 +117,8 @@ pub fn generateLazySymbol(
|
|||
bin_file: *link.File,
|
||||
src_loc: Module.SrcLoc,
|
||||
lazy_sym: link.File.LazySymbol,
|
||||
alignment: *u32,
|
||||
// TODO don't use an "out" parameter like this; put it in the result instead
|
||||
alignment: *Alignment,
|
||||
code: *std.ArrayList(u8),
|
||||
debug_output: DebugInfoOutput,
|
||||
reloc_info: RelocInfo,
|
||||
|
|
@ -141,7 +143,7 @@ pub fn generateLazySymbol(
|
|||
}
|
||||
|
||||
if (lazy_sym.ty.isAnyError(mod)) {
|
||||
alignment.* = 4;
|
||||
alignment.* = .@"4";
|
||||
const err_names = mod.global_error_set.keys();
|
||||
mem.writeInt(u32, try code.addManyAsArray(4), @as(u32, @intCast(err_names.len)), endian);
|
||||
var offset = code.items.len;
|
||||
|
|
@ -157,7 +159,7 @@ pub fn generateLazySymbol(
|
|||
mem.writeInt(u32, code.items[offset..][0..4], @as(u32, @intCast(code.items.len)), endian);
|
||||
return Result.ok;
|
||||
} else if (lazy_sym.ty.zigTypeTag(mod) == .Enum) {
|
||||
alignment.* = 1;
|
||||
alignment.* = .@"1";
|
||||
for (lazy_sym.ty.enumFields(mod)) |tag_name_ip| {
|
||||
const tag_name = mod.intern_pool.stringToSlice(tag_name_ip);
|
||||
try code.ensureUnusedCapacity(tag_name.len + 1);
|
||||
|
|
@ -273,7 +275,7 @@ pub fn generateSymbol(
|
|||
const abi_align = typed_value.ty.abiAlignment(mod);
|
||||
|
||||
// error value first when its type is larger than the error union's payload
|
||||
if (error_align > payload_align) {
|
||||
if (error_align.order(payload_align) == .gt) {
|
||||
try code.writer().writeInt(u16, err_val, endian);
|
||||
}
|
||||
|
||||
|
|
@ -291,7 +293,7 @@ pub fn generateSymbol(
|
|||
.fail => |em| return .{ .fail = em },
|
||||
}
|
||||
const unpadded_end = code.items.len - begin;
|
||||
const padded_end = mem.alignForward(u64, unpadded_end, abi_align);
|
||||
const padded_end = abi_align.forward(unpadded_end);
|
||||
const padding = math.cast(usize, padded_end - unpadded_end) orelse return error.Overflow;
|
||||
|
||||
if (padding > 0) {
|
||||
|
|
@ -300,11 +302,11 @@ pub fn generateSymbol(
|
|||
}
|
||||
|
||||
// Payload size is larger than error set, so emit our error set last
|
||||
if (error_align <= payload_align) {
|
||||
if (error_align.compare(.lte, payload_align)) {
|
||||
const begin = code.items.len;
|
||||
try code.writer().writeInt(u16, err_val, endian);
|
||||
const unpadded_end = code.items.len - begin;
|
||||
const padded_end = mem.alignForward(u64, unpadded_end, abi_align);
|
||||
const padded_end = abi_align.forward(unpadded_end);
|
||||
const padding = math.cast(usize, padded_end - unpadded_end) orelse return error.Overflow;
|
||||
|
||||
if (padding > 0) {
|
||||
|
|
@ -474,23 +476,18 @@ pub fn generateSymbol(
|
|||
}
|
||||
}
|
||||
},
|
||||
.struct_type => |struct_type| {
|
||||
const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
|
||||
|
||||
if (struct_obj.layout == .Packed) {
|
||||
const fields = struct_obj.fields.values();
|
||||
.struct_type => |struct_type| switch (struct_type.layout) {
|
||||
.Packed => {
|
||||
const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse
|
||||
return error.Overflow;
|
||||
const current_pos = code.items.len;
|
||||
try code.resize(current_pos + abi_size);
|
||||
var bits: u16 = 0;
|
||||
|
||||
for (fields, 0..) |field, index| {
|
||||
const field_ty = field.ty;
|
||||
|
||||
for (struct_type.field_types.get(ip), 0..) |field_ty, index| {
|
||||
const field_val = switch (aggregate.storage) {
|
||||
.bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
|
||||
.ty = field_ty.toIntern(),
|
||||
.ty = field_ty,
|
||||
.storage = .{ .u64 = bytes[index] },
|
||||
} }),
|
||||
.elems => |elems| elems[index],
|
||||
|
|
@ -499,48 +496,51 @@ pub fn generateSymbol(
|
|||
|
||||
// pointer may point to a decl which must be marked used
|
||||
// but can also result in a relocation. Therefore we handle those separately.
|
||||
if (field_ty.zigTypeTag(mod) == .Pointer) {
|
||||
const field_size = math.cast(usize, field_ty.abiSize(mod)) orelse
|
||||
if (field_ty.toType().zigTypeTag(mod) == .Pointer) {
|
||||
const field_size = math.cast(usize, field_ty.toType().abiSize(mod)) orelse
|
||||
return error.Overflow;
|
||||
var tmp_list = try std.ArrayList(u8).initCapacity(code.allocator, field_size);
|
||||
defer tmp_list.deinit();
|
||||
switch (try generateSymbol(bin_file, src_loc, .{
|
||||
.ty = field_ty,
|
||||
.ty = field_ty.toType(),
|
||||
.val = field_val.toValue(),
|
||||
}, &tmp_list, debug_output, reloc_info)) {
|
||||
.ok => @memcpy(code.items[current_pos..][0..tmp_list.items.len], tmp_list.items),
|
||||
.fail => |em| return Result{ .fail = em },
|
||||
}
|
||||
} else {
|
||||
field_val.toValue().writeToPackedMemory(field_ty, mod, code.items[current_pos..], bits) catch unreachable;
|
||||
field_val.toValue().writeToPackedMemory(field_ty.toType(), mod, code.items[current_pos..], bits) catch unreachable;
|
||||
}
|
||||
bits += @as(u16, @intCast(field_ty.bitSize(mod)));
|
||||
bits += @as(u16, @intCast(field_ty.toType().bitSize(mod)));
|
||||
}
|
||||
} else {
|
||||
},
|
||||
.Auto, .Extern => {
|
||||
const struct_begin = code.items.len;
|
||||
const fields = struct_obj.fields.values();
|
||||
const field_types = struct_type.field_types.get(ip);
|
||||
const offsets = struct_type.offsets.get(ip);
|
||||
|
||||
var it = typed_value.ty.iterateStructOffsets(mod);
|
||||
|
||||
while (it.next()) |field_offset| {
|
||||
const field_ty = fields[field_offset.field].ty;
|
||||
|
||||
if (!field_ty.hasRuntimeBits(mod)) continue;
|
||||
var it = struct_type.iterateRuntimeOrder(ip);
|
||||
while (it.next()) |field_index| {
|
||||
const field_ty = field_types[field_index];
|
||||
if (!field_ty.toType().hasRuntimeBits(mod)) continue;
|
||||
|
||||
const field_val = switch (ip.indexToKey(typed_value.val.toIntern()).aggregate.storage) {
|
||||
.bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
|
||||
.ty = field_ty.toIntern(),
|
||||
.storage = .{ .u64 = bytes[field_offset.field] },
|
||||
.ty = field_ty,
|
||||
.storage = .{ .u64 = bytes[field_index] },
|
||||
} }),
|
||||
.elems => |elems| elems[field_offset.field],
|
||||
.elems => |elems| elems[field_index],
|
||||
.repeated_elem => |elem| elem,
|
||||
};
|
||||
|
||||
const padding = math.cast(usize, field_offset.offset - (code.items.len - struct_begin)) orelse return error.Overflow;
|
||||
const padding = math.cast(
|
||||
usize,
|
||||
offsets[field_index] - (code.items.len - struct_begin),
|
||||
) orelse return error.Overflow;
|
||||
if (padding > 0) try code.appendNTimes(0, padding);
|
||||
|
||||
switch (try generateSymbol(bin_file, src_loc, .{
|
||||
.ty = field_ty,
|
||||
.ty = field_ty.toType(),
|
||||
.val = field_val.toValue(),
|
||||
}, code, debug_output, reloc_info)) {
|
||||
.ok => {},
|
||||
|
|
@ -548,9 +548,16 @@ pub fn generateSymbol(
|
|||
}
|
||||
}
|
||||
|
||||
const padding = math.cast(usize, std.mem.alignForward(u64, it.offset, @max(it.big_align, 1)) - (code.items.len - struct_begin)) orelse return error.Overflow;
|
||||
const size = struct_type.size(ip).*;
|
||||
const alignment = struct_type.flagsPtr(ip).alignment.toByteUnitsOptional().?;
|
||||
|
||||
const padding = math.cast(
|
||||
usize,
|
||||
std.mem.alignForward(u64, size, @max(alignment, 1)) -
|
||||
(code.items.len - struct_begin),
|
||||
) orelse return error.Overflow;
|
||||
if (padding > 0) try code.appendNTimes(0, padding);
|
||||
}
|
||||
},
|
||||
},
|
||||
else => unreachable,
|
||||
},
|
||||
|
|
@ -565,7 +572,7 @@ pub fn generateSymbol(
|
|||
}
|
||||
|
||||
// Check if we should store the tag first.
|
||||
if (layout.tag_size > 0 and layout.tag_align >= layout.payload_align) {
|
||||
if (layout.tag_size > 0 and layout.tag_align.compare(.gte, layout.payload_align)) {
|
||||
switch (try generateSymbol(bin_file, src_loc, .{
|
||||
.ty = typed_value.ty.unionTagType(mod).?,
|
||||
.val = un.tag.toValue(),
|
||||
|
|
@ -595,7 +602,7 @@ pub fn generateSymbol(
|
|||
}
|
||||
}
|
||||
|
||||
if (layout.tag_size > 0 and layout.tag_align < layout.payload_align) {
|
||||
if (layout.tag_size > 0 and layout.tag_align.compare(.lt, layout.payload_align)) {
|
||||
switch (try generateSymbol(bin_file, src_loc, .{
|
||||
.ty = union_obj.enum_tag_ty.toType(),
|
||||
.val = un.tag.toValue(),
|
||||
|
|
@ -695,9 +702,9 @@ fn lowerParentPtr(
|
|||
@intCast(field.index),
|
||||
mod,
|
||||
)),
|
||||
.Packed => if (mod.typeToStruct(base_type.toType())) |struct_obj|
|
||||
math.divExact(u16, struct_obj.packedFieldBitOffset(
|
||||
mod,
|
||||
.Packed => if (mod.typeToStruct(base_type.toType())) |struct_type|
|
||||
math.divExact(u16, mod.structPackedFieldBitOffset(
|
||||
struct_type,
|
||||
@intCast(field.index),
|
||||
), 8) catch |err| switch (err) {
|
||||
error.UnexpectedRemainder => 0,
|
||||
|
|
@ -844,12 +851,12 @@ fn genDeclRef(
|
|||
// TODO this feels clunky. Perhaps we should check for it in `genTypedValue`?
|
||||
if (tv.ty.castPtrToFn(mod)) |fn_ty| {
|
||||
if (mod.typeToFunc(fn_ty).?.is_generic) {
|
||||
return GenResult.mcv(.{ .immediate = fn_ty.abiAlignment(mod) });
|
||||
return GenResult.mcv(.{ .immediate = fn_ty.abiAlignment(mod).toByteUnitsOptional().? });
|
||||
}
|
||||
} else if (tv.ty.zigTypeTag(mod) == .Pointer) {
|
||||
const elem_ty = tv.ty.elemType2(mod);
|
||||
if (!elem_ty.hasRuntimeBits(mod)) {
|
||||
return GenResult.mcv(.{ .immediate = elem_ty.abiAlignment(mod) });
|
||||
return GenResult.mcv(.{ .immediate = elem_ty.abiAlignment(mod).toByteUnitsOptional().? });
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1036,10 +1043,10 @@ pub fn errUnionPayloadOffset(payload_ty: Type, mod: *Module) u64 {
|
|||
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return 0;
|
||||
const payload_align = payload_ty.abiAlignment(mod);
|
||||
const error_align = Type.anyerror.abiAlignment(mod);
|
||||
if (payload_align >= error_align or !payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
|
||||
if (payload_align.compare(.gte, error_align) or !payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
|
||||
return 0;
|
||||
} else {
|
||||
return mem.alignForward(u64, Type.anyerror.abiSize(mod), payload_align);
|
||||
return payload_align.forward(Type.anyerror.abiSize(mod));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1047,8 +1054,8 @@ pub fn errUnionErrorOffset(payload_ty: Type, mod: *Module) u64 {
|
|||
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return 0;
|
||||
const payload_align = payload_ty.abiAlignment(mod);
|
||||
const error_align = Type.anyerror.abiAlignment(mod);
|
||||
if (payload_align >= error_align and payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
|
||||
return mem.alignForward(u64, payload_ty.abiSize(mod), error_align);
|
||||
if (payload_align.compare(.gte, error_align) and payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
|
||||
return error_align.forward(payload_ty.abiSize(mod));
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@ const LazySrcLoc = Module.LazySrcLoc;
|
|||
const Air = @import("../Air.zig");
|
||||
const Liveness = @import("../Liveness.zig");
|
||||
const InternPool = @import("../InternPool.zig");
|
||||
const Alignment = InternPool.Alignment;
|
||||
|
||||
const BigIntLimb = std.math.big.Limb;
|
||||
const BigInt = std.math.big.int;
|
||||
|
|
@ -292,7 +293,7 @@ pub const Function = struct {
|
|||
|
||||
const result: CValue = if (lowersToArray(ty, mod)) result: {
|
||||
const writer = f.object.code_header.writer();
|
||||
const alignment = 0;
|
||||
const alignment: Alignment = .none;
|
||||
const decl_c_value = try f.allocLocalValue(ty, alignment);
|
||||
const gpa = f.object.dg.gpa;
|
||||
try f.allocs.put(gpa, decl_c_value.new_local, false);
|
||||
|
|
@ -318,25 +319,25 @@ pub const Function = struct {
|
|||
/// Skips the reuse logic. This function should be used for any persistent allocation, i.e.
|
||||
/// those which go into `allocs`. This function does not add the resulting local into `allocs`;
|
||||
/// that responsibility lies with the caller.
|
||||
fn allocLocalValue(f: *Function, ty: Type, alignment: u32) !CValue {
|
||||
fn allocLocalValue(f: *Function, ty: Type, alignment: Alignment) !CValue {
|
||||
const mod = f.object.dg.module;
|
||||
const gpa = f.object.dg.gpa;
|
||||
try f.locals.append(gpa, .{
|
||||
.cty_idx = try f.typeToIndex(ty, .complete),
|
||||
.alignas = CType.AlignAs.init(alignment, ty.abiAlignment(mod)),
|
||||
});
|
||||
return .{ .new_local = @as(LocalIndex, @intCast(f.locals.items.len - 1)) };
|
||||
return .{ .new_local = @intCast(f.locals.items.len - 1) };
|
||||
}
|
||||
|
||||
fn allocLocal(f: *Function, inst: Air.Inst.Index, ty: Type) !CValue {
|
||||
const result = try f.allocAlignedLocal(ty, .{}, 0);
|
||||
const result = try f.allocAlignedLocal(ty, .{}, .none);
|
||||
log.debug("%{d}: allocating t{d}", .{ inst, result.new_local });
|
||||
return result;
|
||||
}
|
||||
|
||||
/// Only allocates the local; does not print anything. Will attempt to re-use locals, so should
|
||||
/// not be used for persistent locals (i.e. those in `allocs`).
|
||||
fn allocAlignedLocal(f: *Function, ty: Type, _: CQualifiers, alignment: u32) !CValue {
|
||||
fn allocAlignedLocal(f: *Function, ty: Type, _: CQualifiers, alignment: Alignment) !CValue {
|
||||
const mod = f.object.dg.module;
|
||||
if (f.free_locals_map.getPtr(.{
|
||||
.cty_idx = try f.typeToIndex(ty, .complete),
|
||||
|
|
@ -1299,139 +1300,134 @@ pub const DeclGen = struct {
|
|||
}
|
||||
try writer.writeByte('}');
|
||||
},
|
||||
.struct_type => |struct_type| {
|
||||
const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
|
||||
switch (struct_obj.layout) {
|
||||
.Auto, .Extern => {
|
||||
if (!location.isInitializer()) {
|
||||
.struct_type => |struct_type| switch (struct_type.layout) {
|
||||
.Auto, .Extern => {
|
||||
if (!location.isInitializer()) {
|
||||
try writer.writeByte('(');
|
||||
try dg.renderType(writer, ty);
|
||||
try writer.writeByte(')');
|
||||
}
|
||||
|
||||
try writer.writeByte('{');
|
||||
var empty = true;
|
||||
for (0..struct_type.field_types.len) |field_i| {
|
||||
const field_ty = struct_type.field_types.get(ip)[field_i].toType();
|
||||
if (struct_type.fieldIsComptime(ip, field_i)) continue;
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
|
||||
|
||||
if (!empty) try writer.writeByte(',');
|
||||
const field_val = switch (ip.indexToKey(val.ip_index).aggregate.storage) {
|
||||
.bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
|
||||
.ty = field_ty.toIntern(),
|
||||
.storage = .{ .u64 = bytes[field_i] },
|
||||
} }),
|
||||
.elems => |elems| elems[field_i],
|
||||
.repeated_elem => |elem| elem,
|
||||
};
|
||||
try dg.renderValue(writer, field_ty, field_val.toValue(), initializer_type);
|
||||
|
||||
empty = false;
|
||||
}
|
||||
try writer.writeByte('}');
|
||||
},
|
||||
.Packed => {
|
||||
const int_info = ty.intInfo(mod);
|
||||
|
||||
const bits = Type.smallestUnsignedBits(int_info.bits - 1);
|
||||
const bit_offset_ty = try mod.intType(.unsigned, bits);
|
||||
const field_types = struct_type.field_types.get(ip);
|
||||
|
||||
var bit_offset: u64 = 0;
|
||||
var eff_num_fields: usize = 0;
|
||||
|
||||
for (field_types) |field_ty| {
|
||||
if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
|
||||
eff_num_fields += 1;
|
||||
}
|
||||
|
||||
if (eff_num_fields == 0) {
|
||||
try writer.writeByte('(');
|
||||
try dg.renderValue(writer, ty, Value.undef, initializer_type);
|
||||
try writer.writeByte(')');
|
||||
} else if (ty.bitSize(mod) > 64) {
|
||||
// zig_or_u128(zig_or_u128(zig_shl_u128(a, a_off), zig_shl_u128(b, b_off)), zig_shl_u128(c, c_off))
|
||||
var num_or = eff_num_fields - 1;
|
||||
while (num_or > 0) : (num_or -= 1) {
|
||||
try writer.writeAll("zig_or_");
|
||||
try dg.renderTypeForBuiltinFnName(writer, ty);
|
||||
try writer.writeByte('(');
|
||||
try dg.renderType(writer, ty);
|
||||
try writer.writeByte(')');
|
||||
}
|
||||
|
||||
try writer.writeByte('{');
|
||||
var empty = true;
|
||||
for (struct_obj.fields.values(), 0..) |field, field_i| {
|
||||
if (field.is_comptime) continue;
|
||||
if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
|
||||
var eff_index: usize = 0;
|
||||
var needs_closing_paren = false;
|
||||
for (field_types, 0..) |field_ty, field_i| {
|
||||
if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
|
||||
|
||||
if (!empty) try writer.writeByte(',');
|
||||
const field_val = switch (ip.indexToKey(val.ip_index).aggregate.storage) {
|
||||
.bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
|
||||
.ty = field.ty.toIntern(),
|
||||
.ty = field_ty,
|
||||
.storage = .{ .u64 = bytes[field_i] },
|
||||
} }),
|
||||
.elems => |elems| elems[field_i],
|
||||
.repeated_elem => |elem| elem,
|
||||
};
|
||||
try dg.renderValue(writer, field.ty, field_val.toValue(), initializer_type);
|
||||
|
||||
empty = false;
|
||||
}
|
||||
try writer.writeByte('}');
|
||||
},
|
||||
.Packed => {
|
||||
const int_info = ty.intInfo(mod);
|
||||
|
||||
const bits = Type.smallestUnsignedBits(int_info.bits - 1);
|
||||
const bit_offset_ty = try mod.intType(.unsigned, bits);
|
||||
|
||||
var bit_offset: u64 = 0;
|
||||
var eff_num_fields: usize = 0;
|
||||
|
||||
for (struct_obj.fields.values()) |field| {
|
||||
if (field.is_comptime) continue;
|
||||
if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
|
||||
|
||||
eff_num_fields += 1;
|
||||
}
|
||||
|
||||
if (eff_num_fields == 0) {
|
||||
try writer.writeByte('(');
|
||||
try dg.renderValue(writer, ty, Value.undef, initializer_type);
|
||||
try writer.writeByte(')');
|
||||
} else if (ty.bitSize(mod) > 64) {
|
||||
// zig_or_u128(zig_or_u128(zig_shl_u128(a, a_off), zig_shl_u128(b, b_off)), zig_shl_u128(c, c_off))
|
||||
var num_or = eff_num_fields - 1;
|
||||
while (num_or > 0) : (num_or -= 1) {
|
||||
try writer.writeAll("zig_or_");
|
||||
const cast_context = IntCastContext{ .value = .{ .value = field_val.toValue() } };
|
||||
if (bit_offset != 0) {
|
||||
try writer.writeAll("zig_shl_");
|
||||
try dg.renderTypeForBuiltinFnName(writer, ty);
|
||||
try writer.writeByte('(');
|
||||
}
|
||||
|
||||
var eff_index: usize = 0;
|
||||
var needs_closing_paren = false;
|
||||
for (struct_obj.fields.values(), 0..) |field, field_i| {
|
||||
if (field.is_comptime) continue;
|
||||
if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
|
||||
|
||||
const field_val = switch (ip.indexToKey(val.ip_index).aggregate.storage) {
|
||||
.bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
|
||||
.ty = field.ty.toIntern(),
|
||||
.storage = .{ .u64 = bytes[field_i] },
|
||||
} }),
|
||||
.elems => |elems| elems[field_i],
|
||||
.repeated_elem => |elem| elem,
|
||||
};
|
||||
const cast_context = IntCastContext{ .value = .{ .value = field_val.toValue() } };
|
||||
if (bit_offset != 0) {
|
||||
try writer.writeAll("zig_shl_");
|
||||
try dg.renderTypeForBuiltinFnName(writer, ty);
|
||||
try writer.writeByte('(');
|
||||
try dg.renderIntCast(writer, ty, cast_context, field.ty, .FunctionArgument);
|
||||
try writer.writeAll(", ");
|
||||
const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset);
|
||||
try dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument);
|
||||
try writer.writeByte(')');
|
||||
} else {
|
||||
try dg.renderIntCast(writer, ty, cast_context, field.ty, .FunctionArgument);
|
||||
}
|
||||
|
||||
if (needs_closing_paren) try writer.writeByte(')');
|
||||
if (eff_index != eff_num_fields - 1) try writer.writeAll(", ");
|
||||
|
||||
bit_offset += field.ty.bitSize(mod);
|
||||
needs_closing_paren = true;
|
||||
eff_index += 1;
|
||||
}
|
||||
} else {
|
||||
try writer.writeByte('(');
|
||||
// a << a_off | b << b_off | c << c_off
|
||||
var empty = true;
|
||||
for (struct_obj.fields.values(), 0..) |field, field_i| {
|
||||
if (field.is_comptime) continue;
|
||||
if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
|
||||
|
||||
if (!empty) try writer.writeAll(" | ");
|
||||
try writer.writeByte('(');
|
||||
try dg.renderType(writer, ty);
|
||||
try dg.renderIntCast(writer, ty, cast_context, field_ty.toType(), .FunctionArgument);
|
||||
try writer.writeAll(", ");
|
||||
const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset);
|
||||
try dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument);
|
||||
try writer.writeByte(')');
|
||||
|
||||
const field_val = switch (ip.indexToKey(val.ip_index).aggregate.storage) {
|
||||
.bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
|
||||
.ty = field.ty.toIntern(),
|
||||
.storage = .{ .u64 = bytes[field_i] },
|
||||
} }),
|
||||
.elems => |elems| elems[field_i],
|
||||
.repeated_elem => |elem| elem,
|
||||
};
|
||||
|
||||
if (bit_offset != 0) {
|
||||
try dg.renderValue(writer, field.ty, field_val.toValue(), .Other);
|
||||
try writer.writeAll(" << ");
|
||||
const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset);
|
||||
try dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument);
|
||||
} else {
|
||||
try dg.renderValue(writer, field.ty, field_val.toValue(), .Other);
|
||||
}
|
||||
|
||||
bit_offset += field.ty.bitSize(mod);
|
||||
empty = false;
|
||||
} else {
|
||||
try dg.renderIntCast(writer, ty, cast_context, field_ty.toType(), .FunctionArgument);
|
||||
}
|
||||
try writer.writeByte(')');
|
||||
|
||||
if (needs_closing_paren) try writer.writeByte(')');
|
||||
if (eff_index != eff_num_fields - 1) try writer.writeAll(", ");
|
||||
|
||||
bit_offset += field_ty.toType().bitSize(mod);
|
||||
needs_closing_paren = true;
|
||||
eff_index += 1;
|
||||
}
|
||||
},
|
||||
}
|
||||
} else {
|
||||
try writer.writeByte('(');
|
||||
// a << a_off | b << b_off | c << c_off
|
||||
var empty = true;
|
||||
for (field_types, 0..) |field_ty, field_i| {
|
||||
if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
|
||||
|
||||
if (!empty) try writer.writeAll(" | ");
|
||||
try writer.writeByte('(');
|
||||
try dg.renderType(writer, ty);
|
||||
try writer.writeByte(')');
|
||||
|
||||
const field_val = switch (ip.indexToKey(val.ip_index).aggregate.storage) {
|
||||
.bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
|
||||
.ty = field_ty,
|
||||
.storage = .{ .u64 = bytes[field_i] },
|
||||
} }),
|
||||
.elems => |elems| elems[field_i],
|
||||
.repeated_elem => |elem| elem,
|
||||
};
|
||||
|
||||
if (bit_offset != 0) {
|
||||
try dg.renderValue(writer, field_ty.toType(), field_val.toValue(), .Other);
|
||||
try writer.writeAll(" << ");
|
||||
const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset);
|
||||
try dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument);
|
||||
} else {
|
||||
try dg.renderValue(writer, field_ty.toType(), field_val.toValue(), .Other);
|
||||
}
|
||||
|
||||
bit_offset += field_ty.toType().bitSize(mod);
|
||||
empty = false;
|
||||
}
|
||||
try writer.writeByte(')');
|
||||
}
|
||||
},
|
||||
},
|
||||
else => unreachable,
|
||||
},
|
||||
|
|
@ -1723,7 +1719,7 @@ pub const DeclGen = struct {
|
|||
ty: Type,
|
||||
name: CValue,
|
||||
qualifiers: CQualifiers,
|
||||
alignment: u64,
|
||||
alignment: Alignment,
|
||||
kind: CType.Kind,
|
||||
) error{ OutOfMemory, AnalysisFail }!void {
|
||||
const mod = dg.module;
|
||||
|
|
@ -1854,7 +1850,7 @@ pub const DeclGen = struct {
|
|||
decl.ty,
|
||||
.{ .decl = decl_index },
|
||||
CQualifiers.init(.{ .@"const" = variable.is_const }),
|
||||
@as(u32, @intCast(decl.alignment.toByteUnits(0))),
|
||||
decl.alignment,
|
||||
.complete,
|
||||
);
|
||||
try fwd_decl_writer.writeAll(";\n");
|
||||
|
|
@ -2460,7 +2456,7 @@ pub fn genErrDecls(o: *Object) !void {
|
|||
} });
|
||||
|
||||
try writer.writeAll("static ");
|
||||
try o.dg.renderTypeAndName(writer, name_ty, .{ .identifier = identifier }, Const, 0, .complete);
|
||||
try o.dg.renderTypeAndName(writer, name_ty, .{ .identifier = identifier }, Const, .none, .complete);
|
||||
try writer.writeAll(" = ");
|
||||
try o.dg.renderValue(writer, name_ty, name_val.toValue(), .StaticInitializer);
|
||||
try writer.writeAll(";\n");
|
||||
|
|
@ -2472,7 +2468,7 @@ pub fn genErrDecls(o: *Object) !void {
|
|||
});
|
||||
|
||||
try writer.writeAll("static ");
|
||||
try o.dg.renderTypeAndName(writer, name_array_ty, .{ .identifier = array_identifier }, Const, 0, .complete);
|
||||
try o.dg.renderTypeAndName(writer, name_array_ty, .{ .identifier = array_identifier }, Const, .none, .complete);
|
||||
try writer.writeAll(" = {");
|
||||
for (mod.global_error_set.keys(), 0..) |name_nts, value| {
|
||||
const name = mod.intern_pool.stringToSlice(name_nts);
|
||||
|
|
@ -2523,7 +2519,7 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void {
|
|||
try w.writeByte(' ');
|
||||
try w.writeAll(fn_name);
|
||||
try w.writeByte('(');
|
||||
try o.dg.renderTypeAndName(w, enum_ty, .{ .identifier = "tag" }, Const, 0, .complete);
|
||||
try o.dg.renderTypeAndName(w, enum_ty, .{ .identifier = "tag" }, Const, .none, .complete);
|
||||
try w.writeAll(") {\n switch (tag) {\n");
|
||||
for (enum_ty.enumFields(mod), 0..) |name_ip, index_usize| {
|
||||
const index = @as(u32, @intCast(index_usize));
|
||||
|
|
@ -2546,7 +2542,7 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void {
|
|||
try w.print(" case {}: {{\n static ", .{
|
||||
try o.dg.fmtIntLiteral(enum_ty, int_val, .Other),
|
||||
});
|
||||
try o.dg.renderTypeAndName(w, name_ty, .{ .identifier = "name" }, Const, 0, .complete);
|
||||
try o.dg.renderTypeAndName(w, name_ty, .{ .identifier = "name" }, Const, .none, .complete);
|
||||
try w.writeAll(" = ");
|
||||
try o.dg.renderValue(w, name_ty, name_val.toValue(), .Initializer);
|
||||
try w.writeAll(";\n return (");
|
||||
|
|
@ -2706,7 +2702,7 @@ pub fn genDecl(o: *Object) !void {
|
|||
if (variable.is_weak_linkage) try w.writeAll("zig_weak_linkage ");
|
||||
if (mod.intern_pool.stringToSliceUnwrap(decl.@"linksection")) |s|
|
||||
try w.print("zig_linksection(\"{s}\", ", .{s});
|
||||
try o.dg.renderTypeAndName(w, tv.ty, decl_c_value, .{}, decl.alignment.toByteUnits(0), .complete);
|
||||
try o.dg.renderTypeAndName(w, tv.ty, decl_c_value, .{}, decl.alignment, .complete);
|
||||
if (decl.@"linksection" != .none) try w.writeAll(", read, write)");
|
||||
try w.writeAll(" = ");
|
||||
try o.dg.renderValue(w, tv.ty, variable.init.toValue(), .StaticInitializer);
|
||||
|
|
@ -2717,14 +2713,14 @@ pub fn genDecl(o: *Object) !void {
|
|||
const fwd_decl_writer = o.dg.fwd_decl.writer();
|
||||
|
||||
try fwd_decl_writer.writeAll(if (is_global) "zig_extern " else "static ");
|
||||
try o.dg.renderTypeAndName(fwd_decl_writer, tv.ty, decl_c_value, Const, decl.alignment.toByteUnits(0), .complete);
|
||||
try o.dg.renderTypeAndName(fwd_decl_writer, tv.ty, decl_c_value, Const, decl.alignment, .complete);
|
||||
try fwd_decl_writer.writeAll(";\n");
|
||||
|
||||
const w = o.writer();
|
||||
if (!is_global) try w.writeAll("static ");
|
||||
if (mod.intern_pool.stringToSliceUnwrap(decl.@"linksection")) |s|
|
||||
try w.print("zig_linksection(\"{s}\", ", .{s});
|
||||
try o.dg.renderTypeAndName(w, tv.ty, decl_c_value, Const, decl.alignment.toByteUnits(0), .complete);
|
||||
try o.dg.renderTypeAndName(w, tv.ty, decl_c_value, Const, decl.alignment, .complete);
|
||||
if (decl.@"linksection" != .none) try w.writeAll(", read)");
|
||||
try w.writeAll(" = ");
|
||||
try o.dg.renderValue(w, tv.ty, tv.val, .StaticInitializer);
|
||||
|
|
@ -3353,8 +3349,8 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
|
|||
|
||||
try reap(f, inst, &.{ty_op.operand});
|
||||
|
||||
const is_aligned = if (ptr_info.flags.alignment.toByteUnitsOptional()) |alignment|
|
||||
alignment >= src_ty.abiAlignment(mod)
|
||||
const is_aligned = if (ptr_info.flags.alignment != .none)
|
||||
ptr_info.flags.alignment.compare(.gte, src_ty.abiAlignment(mod))
|
||||
else
|
||||
true;
|
||||
const is_array = lowersToArray(src_ty, mod);
|
||||
|
|
@ -3625,8 +3621,8 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
|
|||
return .none;
|
||||
}
|
||||
|
||||
const is_aligned = if (ptr_info.flags.alignment.toByteUnitsOptional()) |alignment|
|
||||
alignment >= src_ty.abiAlignment(mod)
|
||||
const is_aligned = if (ptr_info.flags.alignment != .none)
|
||||
ptr_info.flags.alignment.compare(.gte, src_ty.abiAlignment(mod))
|
||||
else
|
||||
true;
|
||||
const is_array = lowersToArray(ptr_info.child.toType(), mod);
|
||||
|
|
@ -4847,7 +4843,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
|
|||
if (is_reg) {
|
||||
const output_ty = if (output == .none) inst_ty else f.typeOf(output).childType(mod);
|
||||
try writer.writeAll("register ");
|
||||
const alignment = 0;
|
||||
const alignment: Alignment = .none;
|
||||
const local_value = try f.allocLocalValue(output_ty, alignment);
|
||||
try f.allocs.put(gpa, local_value.new_local, false);
|
||||
try f.object.dg.renderTypeAndName(writer, output_ty, local_value, .{}, alignment, .complete);
|
||||
|
|
@ -4880,7 +4876,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
|
|||
if (asmInputNeedsLocal(f, constraint, input_val)) {
|
||||
const input_ty = f.typeOf(input);
|
||||
if (is_reg) try writer.writeAll("register ");
|
||||
const alignment = 0;
|
||||
const alignment: Alignment = .none;
|
||||
const local_value = try f.allocLocalValue(input_ty, alignment);
|
||||
try f.allocs.put(gpa, local_value.new_local, false);
|
||||
try f.object.dg.renderTypeAndName(writer, input_ty, local_value, Const, alignment, .complete);
|
||||
|
|
@ -5230,7 +5226,8 @@ fn fieldLocation(
|
|||
const container_ty = container_ptr_ty.childType(mod);
|
||||
return switch (container_ty.zigTypeTag(mod)) {
|
||||
.Struct => switch (container_ty.containerLayout(mod)) {
|
||||
.Auto, .Extern => for (field_index..container_ty.structFieldCount(mod)) |next_field_index| {
|
||||
.Auto, .Extern => for (field_index..container_ty.structFieldCount(mod)) |next_field_index_usize| {
|
||||
const next_field_index: u32 = @intCast(next_field_index_usize);
|
||||
if (container_ty.structFieldIsComptime(next_field_index, mod)) continue;
|
||||
const field_ty = container_ty.structFieldType(next_field_index, mod);
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
|
||||
|
|
@ -5238,7 +5235,7 @@ fn fieldLocation(
|
|||
break .{ .field = if (container_ty.isSimpleTuple(mod))
|
||||
.{ .field = next_field_index }
|
||||
else
|
||||
.{ .identifier = ip.stringToSlice(container_ty.structFieldName(next_field_index, mod)) } };
|
||||
.{ .identifier = ip.stringToSlice(container_ty.legacyStructFieldName(next_field_index, mod)) } };
|
||||
} else if (container_ty.hasRuntimeBitsIgnoreComptime(mod)) .end else .begin,
|
||||
.Packed => if (field_ptr_ty.ptrInfo(mod).packed_offset.host_size == 0)
|
||||
.{ .byte_offset = container_ty.packedStructFieldByteOffset(field_index, mod) + @divExact(container_ptr_ty.ptrInfo(mod).packed_offset.bit_offset, 8) }
|
||||
|
|
@ -5425,14 +5422,14 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
|
|||
.Auto, .Extern => if (struct_ty.isSimpleTuple(mod))
|
||||
.{ .field = extra.field_index }
|
||||
else
|
||||
.{ .identifier = ip.stringToSlice(struct_ty.structFieldName(extra.field_index, mod)) },
|
||||
.{ .identifier = ip.stringToSlice(struct_ty.legacyStructFieldName(extra.field_index, mod)) },
|
||||
.Packed => {
|
||||
const struct_obj = mod.typeToStruct(struct_ty).?;
|
||||
const struct_type = mod.typeToStruct(struct_ty).?;
|
||||
const int_info = struct_ty.intInfo(mod);
|
||||
|
||||
const bit_offset_ty = try mod.intType(.unsigned, Type.smallestUnsignedBits(int_info.bits - 1));
|
||||
|
||||
const bit_offset = struct_obj.packedFieldBitOffset(mod, extra.field_index);
|
||||
const bit_offset = mod.structPackedFieldBitOffset(struct_type, extra.field_index);
|
||||
const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset);
|
||||
|
||||
const field_int_signedness = if (inst_ty.isAbiInt(mod))
|
||||
|
|
@ -5487,7 +5484,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
|
|||
.anon_struct_type => |anon_struct_type| if (anon_struct_type.names.len == 0)
|
||||
.{ .field = extra.field_index }
|
||||
else
|
||||
.{ .identifier = ip.stringToSlice(struct_ty.structFieldName(extra.field_index, mod)) },
|
||||
.{ .identifier = ip.stringToSlice(struct_ty.legacyStructFieldName(extra.field_index, mod)) },
|
||||
|
||||
.union_type => |union_type| field_name: {
|
||||
const union_obj = ip.loadUnionType(union_type);
|
||||
|
|
@ -6820,7 +6817,8 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
|
|||
}
|
||||
},
|
||||
.Struct => switch (inst_ty.containerLayout(mod)) {
|
||||
.Auto, .Extern => for (resolved_elements, 0..) |element, field_i| {
|
||||
.Auto, .Extern => for (resolved_elements, 0..) |element, field_i_usize| {
|
||||
const field_i: u32 = @intCast(field_i_usize);
|
||||
if (inst_ty.structFieldIsComptime(field_i, mod)) continue;
|
||||
const field_ty = inst_ty.structFieldType(field_i, mod);
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
|
||||
|
|
@ -6829,7 +6827,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
|
|||
try f.writeCValueMember(writer, local, if (inst_ty.isSimpleTuple(mod))
|
||||
.{ .field = field_i }
|
||||
else
|
||||
.{ .identifier = ip.stringToSlice(inst_ty.structFieldName(field_i, mod)) });
|
||||
.{ .identifier = ip.stringToSlice(inst_ty.legacyStructFieldName(field_i, mod)) });
|
||||
try a.assign(f, writer);
|
||||
try f.writeCValue(writer, element, .Other);
|
||||
try a.end(f, writer);
|
||||
|
|
|
|||
|
|
@ -283,14 +283,20 @@ pub const CType = extern union {
|
|||
@"align": Alignment,
|
||||
abi: Alignment,
|
||||
|
||||
pub fn init(alignment: u64, abi_alignment: u32) AlignAs {
|
||||
const @"align" = Alignment.fromByteUnits(alignment);
|
||||
const abi_align = Alignment.fromNonzeroByteUnits(abi_alignment);
|
||||
pub fn init(@"align": Alignment, abi_align: Alignment) AlignAs {
|
||||
assert(abi_align != .none);
|
||||
return .{
|
||||
.@"align" = if (@"align" != .none) @"align" else abi_align,
|
||||
.abi = abi_align,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn initByteUnits(alignment: u64, abi_alignment: u32) AlignAs {
|
||||
return init(
|
||||
Alignment.fromByteUnits(alignment),
|
||||
Alignment.fromNonzeroByteUnits(abi_alignment),
|
||||
);
|
||||
}
|
||||
pub fn abiAlign(ty: Type, mod: *Module) AlignAs {
|
||||
const abi_align = ty.abiAlignment(mod);
|
||||
return init(abi_align, abi_align);
|
||||
|
|
@ -1360,6 +1366,7 @@ pub const CType = extern union {
|
|||
|
||||
pub fn initType(self: *@This(), ty: Type, kind: Kind, lookup: Lookup) !void {
|
||||
const mod = lookup.getModule();
|
||||
const ip = &mod.intern_pool;
|
||||
|
||||
self.* = undefined;
|
||||
if (!ty.isFnOrHasRuntimeBitsIgnoreComptime(mod))
|
||||
|
|
@ -1382,12 +1389,12 @@ pub const CType = extern union {
|
|||
.array => switch (kind) {
|
||||
.forward, .complete, .global => {
|
||||
const abi_size = ty.abiSize(mod);
|
||||
const abi_align = ty.abiAlignment(mod);
|
||||
const abi_align = ty.abiAlignment(mod).toByteUnits(0);
|
||||
self.storage = .{ .seq = .{ .base = .{ .tag = .array }, .data = .{
|
||||
.len = @divExact(abi_size, abi_align),
|
||||
.elem_type = tagFromIntInfo(.{
|
||||
.signedness = .unsigned,
|
||||
.bits = @as(u16, @intCast(abi_align * 8)),
|
||||
.bits = @intCast(abi_align * 8),
|
||||
}).toIndex(),
|
||||
} } };
|
||||
self.value = .{ .cty = initPayload(&self.storage.seq) };
|
||||
|
|
@ -1488,10 +1495,10 @@ pub const CType = extern union {
|
|||
},
|
||||
|
||||
.Struct, .Union => |zig_ty_tag| if (ty.containerLayout(mod) == .Packed) {
|
||||
if (mod.typeToStruct(ty)) |struct_obj| {
|
||||
try self.initType(struct_obj.backing_int_ty, kind, lookup);
|
||||
if (mod.typeToPackedStruct(ty)) |packed_struct| {
|
||||
try self.initType(packed_struct.backingIntType(ip).toType(), kind, lookup);
|
||||
} else {
|
||||
const bits = @as(u16, @intCast(ty.bitSize(mod)));
|
||||
const bits: u16 = @intCast(ty.bitSize(mod));
|
||||
const int_ty = try mod.intType(.unsigned, bits);
|
||||
try self.initType(int_ty, kind, lookup);
|
||||
}
|
||||
|
|
@ -1722,7 +1729,6 @@ pub const CType = extern union {
|
|||
|
||||
.Fn => {
|
||||
const info = mod.typeToFunc(ty).?;
|
||||
const ip = &mod.intern_pool;
|
||||
if (!info.is_generic) {
|
||||
if (lookup.isMutable()) {
|
||||
const param_kind: Kind = switch (kind) {
|
||||
|
|
@ -1947,7 +1953,8 @@ pub const CType = extern union {
|
|||
|
||||
const fields_pl = try arena.alloc(Payload.Fields.Field, c_fields_len);
|
||||
var c_field_i: usize = 0;
|
||||
for (0..fields_len) |field_i| {
|
||||
for (0..fields_len) |field_i_usize| {
|
||||
const field_i: u32 = @intCast(field_i_usize);
|
||||
const field_ty = ty.structFieldType(field_i, mod);
|
||||
if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i, mod)) or
|
||||
!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
|
||||
|
|
@ -1958,7 +1965,7 @@ pub const CType = extern union {
|
|||
std.fmt.allocPrintZ(arena, "f{}", .{field_i})
|
||||
else
|
||||
arena.dupeZ(u8, ip.stringToSlice(switch (zig_ty_tag) {
|
||||
.Struct => ty.structFieldName(field_i, mod),
|
||||
.Struct => ty.legacyStructFieldName(field_i, mod),
|
||||
.Union => mod.typeToUnion(ty).?.field_names.get(ip)[field_i],
|
||||
else => unreachable,
|
||||
})),
|
||||
|
|
@ -2091,7 +2098,8 @@ pub const CType = extern union {
|
|||
.Struct => ty.structFieldCount(mod),
|
||||
.Union => mod.typeToUnion(ty).?.field_names.len,
|
||||
else => unreachable,
|
||||
}) |field_i| {
|
||||
}) |field_i_usize| {
|
||||
const field_i: u32 = @intCast(field_i_usize);
|
||||
const field_ty = ty.structFieldType(field_i, mod);
|
||||
if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i, mod)) or
|
||||
!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
|
||||
|
|
@ -2110,7 +2118,7 @@ pub const CType = extern union {
|
|||
std.fmt.bufPrintZ(&name_buf, "f{}", .{field_i}) catch unreachable
|
||||
else
|
||||
ip.stringToSlice(switch (zig_ty_tag) {
|
||||
.Struct => ty.structFieldName(field_i, mod),
|
||||
.Struct => ty.legacyStructFieldName(field_i, mod),
|
||||
.Union => mod.typeToUnion(ty).?.field_names.get(ip)[field_i],
|
||||
else => unreachable,
|
||||
}),
|
||||
|
|
@ -2219,7 +2227,8 @@ pub const CType = extern union {
|
|||
.Struct => ty.structFieldCount(mod),
|
||||
.Union => mod.typeToUnion(ty).?.field_names.len,
|
||||
else => unreachable,
|
||||
}) |field_i| {
|
||||
}) |field_i_usize| {
|
||||
const field_i: u32 = @intCast(field_i_usize);
|
||||
const field_ty = ty.structFieldType(field_i, mod);
|
||||
if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i, mod)) or
|
||||
!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
|
||||
|
|
@ -2234,7 +2243,7 @@ pub const CType = extern union {
|
|||
std.fmt.bufPrint(&name_buf, "f{}", .{field_i}) catch unreachable
|
||||
else
|
||||
mod.intern_pool.stringToSlice(switch (zig_ty_tag) {
|
||||
.Struct => ty.structFieldName(field_i, mod),
|
||||
.Struct => ty.legacyStructFieldName(field_i, mod),
|
||||
.Union => mod.typeToUnion(ty).?.field_names.get(ip)[field_i],
|
||||
else => unreachable,
|
||||
}));
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
|
|
@ -792,24 +792,28 @@ pub const DeclGen = struct {
|
|||
},
|
||||
.vector_type => return dg.todo("indirect constant of type {}", .{ty.fmt(mod)}),
|
||||
.struct_type => {
|
||||
const struct_ty = mod.typeToStruct(ty).?;
|
||||
if (struct_ty.layout == .Packed) {
|
||||
const struct_type = mod.typeToStruct(ty).?;
|
||||
if (struct_type.layout == .Packed) {
|
||||
return dg.todo("packed struct constants", .{});
|
||||
}
|
||||
|
||||
// TODO iterate with runtime order instead so that struct field
|
||||
// reordering can be enabled for this backend.
|
||||
const struct_begin = self.size;
|
||||
for (struct_ty.fields.values(), 0..) |field, i| {
|
||||
if (field.is_comptime or !field.ty.hasRuntimeBits(mod)) continue;
|
||||
for (struct_type.field_types.get(ip), 0..) |field_ty, i_usize| {
|
||||
const i: u32 = @intCast(i_usize);
|
||||
if (struct_type.fieldIsComptime(ip, i)) continue;
|
||||
if (!field_ty.toType().hasRuntimeBits(mod)) continue;
|
||||
|
||||
const field_val = switch (aggregate.storage) {
|
||||
.bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
|
||||
.ty = field.ty.toIntern(),
|
||||
.ty = field_ty,
|
||||
.storage = .{ .u64 = bytes[i] },
|
||||
} }),
|
||||
.elems => |elems| elems[i],
|
||||
.repeated_elem => |elem| elem,
|
||||
};
|
||||
try self.lower(field.ty, field_val.toValue());
|
||||
try self.lower(field_ty.toType(), field_val.toValue());
|
||||
|
||||
// Add padding if required.
|
||||
// TODO: Add to type generation as well?
|
||||
|
|
@ -838,7 +842,7 @@ pub const DeclGen = struct {
|
|||
const active_field_ty = union_obj.field_types.get(ip)[active_field].toType();
|
||||
|
||||
const has_tag = layout.tag_size != 0;
|
||||
const tag_first = layout.tag_align >= layout.payload_align;
|
||||
const tag_first = layout.tag_align.compare(.gte, layout.payload_align);
|
||||
|
||||
if (has_tag and tag_first) {
|
||||
try self.lower(ty.unionTagTypeSafety(mod).?, un.tag.toValue());
|
||||
|
|
@ -1094,7 +1098,7 @@ pub const DeclGen = struct {
|
|||
val,
|
||||
.UniformConstant,
|
||||
false,
|
||||
alignment,
|
||||
@intCast(alignment.toByteUnits(0)),
|
||||
);
|
||||
log.debug("indirect constant: index = {}", .{@intFromEnum(spv_decl_index)});
|
||||
try self.func.decl_deps.put(self.spv.gpa, spv_decl_index, {});
|
||||
|
|
@ -1180,7 +1184,7 @@ pub const DeclGen = struct {
|
|||
var member_names = std.BoundedArray(CacheString, 4){};
|
||||
|
||||
const has_tag = layout.tag_size != 0;
|
||||
const tag_first = layout.tag_align >= layout.payload_align;
|
||||
const tag_first = layout.tag_align.compare(.gte, layout.payload_align);
|
||||
const u8_ty_ref = try self.intType(.unsigned, 8); // TODO: What if Int8Type is not enabled?
|
||||
|
||||
if (has_tag and tag_first) {
|
||||
|
|
@ -1333,7 +1337,7 @@ pub const DeclGen = struct {
|
|||
} });
|
||||
},
|
||||
.Struct => {
|
||||
const struct_ty = switch (ip.indexToKey(ty.toIntern())) {
|
||||
const struct_type = switch (ip.indexToKey(ty.toIntern())) {
|
||||
.anon_struct_type => |tuple| {
|
||||
const member_types = try self.gpa.alloc(CacheRef, tuple.values.len);
|
||||
defer self.gpa.free(member_types);
|
||||
|
|
@ -1350,13 +1354,12 @@ pub const DeclGen = struct {
|
|||
.member_types = member_types[0..member_index],
|
||||
} });
|
||||
},
|
||||
.struct_type => |struct_ty| struct_ty,
|
||||
.struct_type => |struct_type| struct_type,
|
||||
else => unreachable,
|
||||
};
|
||||
|
||||
const struct_obj = mod.structPtrUnwrap(struct_ty.index).?;
|
||||
if (struct_obj.layout == .Packed) {
|
||||
return try self.resolveType(struct_obj.backing_int_ty, .direct);
|
||||
if (struct_type.layout == .Packed) {
|
||||
return try self.resolveType(struct_type.backingIntType(ip).toType(), .direct);
|
||||
}
|
||||
|
||||
var member_types = std.ArrayList(CacheRef).init(self.gpa);
|
||||
|
|
@ -1365,16 +1368,15 @@ pub const DeclGen = struct {
|
|||
var member_names = std.ArrayList(CacheString).init(self.gpa);
|
||||
defer member_names.deinit();
|
||||
|
||||
var it = struct_obj.runtimeFieldIterator(mod);
|
||||
while (it.next()) |field_and_index| {
|
||||
const field = field_and_index.field;
|
||||
const index = field_and_index.index;
|
||||
const field_name = ip.stringToSlice(struct_obj.fields.keys()[index]);
|
||||
try member_types.append(try self.resolveType(field.ty, .indirect));
|
||||
var it = struct_type.iterateRuntimeOrder(ip);
|
||||
while (it.next()) |field_index| {
|
||||
const field_ty = struct_type.field_types.get(ip)[field_index];
|
||||
const field_name = ip.stringToSlice(struct_type.field_names.get(ip)[field_index]);
|
||||
try member_types.append(try self.resolveType(field_ty.toType(), .indirect));
|
||||
try member_names.append(try self.spv.resolveString(field_name));
|
||||
}
|
||||
|
||||
const name = ip.stringToSlice(try struct_obj.getFullyQualifiedName(self.module));
|
||||
const name = ip.stringToSlice(try mod.declPtr(struct_type.decl.unwrap().?).getFullyQualifiedName(mod));
|
||||
|
||||
return try self.spv.resolve(.{ .struct_type = .{
|
||||
.name = try self.spv.resolveString(name),
|
||||
|
|
@ -1500,7 +1502,7 @@ pub const DeclGen = struct {
|
|||
const error_align = Type.anyerror.abiAlignment(mod);
|
||||
const payload_align = payload_ty.abiAlignment(mod);
|
||||
|
||||
const error_first = error_align > payload_align;
|
||||
const error_first = error_align.compare(.gt, payload_align);
|
||||
return .{
|
||||
.payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(mod),
|
||||
.error_first = error_first,
|
||||
|
|
@ -1662,7 +1664,7 @@ pub const DeclGen = struct {
|
|||
init_val,
|
||||
actual_storage_class,
|
||||
final_storage_class == .Generic,
|
||||
@as(u32, @intCast(decl.alignment.toByteUnits(0))),
|
||||
@intCast(decl.alignment.toByteUnits(0)),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
@ -2603,7 +2605,7 @@ pub const DeclGen = struct {
|
|||
if (layout.payload_size == 0) return union_handle;
|
||||
|
||||
const tag_ty = un_ty.unionTagTypeSafety(mod).?;
|
||||
const tag_index = @intFromBool(layout.tag_align < layout.payload_align);
|
||||
const tag_index = @intFromBool(layout.tag_align.compare(.lt, layout.payload_align));
|
||||
return try self.extractField(tag_ty, union_handle, tag_index);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1118,7 +1118,7 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In
|
|||
},
|
||||
};
|
||||
|
||||
const required_alignment = tv.ty.abiAlignment(mod);
|
||||
const required_alignment: u32 = @intCast(tv.ty.abiAlignment(mod).toByteUnits(0));
|
||||
const atom = self.getAtomPtr(atom_index);
|
||||
atom.size = @as(u32, @intCast(code.len));
|
||||
atom.getSymbolPtr(self).value = try self.allocateAtom(atom_index, atom.size, required_alignment);
|
||||
|
|
@ -1196,7 +1196,7 @@ fn updateLazySymbolAtom(
|
|||
const gpa = self.base.allocator;
|
||||
const mod = self.base.options.module.?;
|
||||
|
||||
var required_alignment: u32 = undefined;
|
||||
var required_alignment: InternPool.Alignment = .none;
|
||||
var code_buffer = std.ArrayList(u8).init(gpa);
|
||||
defer code_buffer.deinit();
|
||||
|
||||
|
|
@ -1240,7 +1240,7 @@ fn updateLazySymbolAtom(
|
|||
symbol.section_number = @as(coff.SectionNumber, @enumFromInt(section_index + 1));
|
||||
symbol.type = .{ .complex_type = .NULL, .base_type = .NULL };
|
||||
|
||||
const vaddr = try self.allocateAtom(atom_index, code_len, required_alignment);
|
||||
const vaddr = try self.allocateAtom(atom_index, code_len, @intCast(required_alignment.toByteUnits(0)));
|
||||
errdefer self.freeAtom(atom_index);
|
||||
|
||||
log.debug("allocated atom for {s} at 0x{x}", .{ name, vaddr });
|
||||
|
|
@ -1322,7 +1322,7 @@ fn updateDeclCode(self: *Coff, decl_index: Module.Decl.Index, code: []u8, comple
|
|||
const decl_name = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod));
|
||||
|
||||
log.debug("updateDeclCode {s}{*}", .{ decl_name, decl });
|
||||
const required_alignment = decl.getAlignment(mod);
|
||||
const required_alignment: u32 = @intCast(decl.getAlignment(mod).toByteUnits(0));
|
||||
|
||||
const decl_metadata = self.decls.get(decl_index).?;
|
||||
const atom_index = decl_metadata.atom;
|
||||
|
|
|
|||
|
|
@ -341,37 +341,51 @@ pub const DeclState = struct {
|
|||
try leb128.writeULEB128(dbg_info_buffer.writer(), field_off);
|
||||
}
|
||||
},
|
||||
.struct_type => |struct_type| s: {
|
||||
const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse break :s;
|
||||
.struct_type => |struct_type| {
|
||||
// DW.AT.name, DW.FORM.string
|
||||
try ty.print(dbg_info_buffer.writer(), mod);
|
||||
try dbg_info_buffer.append(0);
|
||||
|
||||
if (struct_obj.layout == .Packed) {
|
||||
if (struct_type.layout == .Packed) {
|
||||
log.debug("TODO implement .debug_info for packed structs", .{});
|
||||
break :blk;
|
||||
}
|
||||
|
||||
for (
|
||||
struct_obj.fields.keys(),
|
||||
struct_obj.fields.values(),
|
||||
0..,
|
||||
) |field_name_ip, field, field_index| {
|
||||
if (!field.ty.hasRuntimeBits(mod)) continue;
|
||||
const field_name = ip.stringToSlice(field_name_ip);
|
||||
// DW.AT.member
|
||||
try dbg_info_buffer.ensureUnusedCapacity(field_name.len + 2);
|
||||
dbg_info_buffer.appendAssumeCapacity(@intFromEnum(AbbrevKind.struct_member));
|
||||
// DW.AT.name, DW.FORM.string
|
||||
dbg_info_buffer.appendSliceAssumeCapacity(field_name);
|
||||
dbg_info_buffer.appendAssumeCapacity(0);
|
||||
// DW.AT.type, DW.FORM.ref4
|
||||
var index = dbg_info_buffer.items.len;
|
||||
try dbg_info_buffer.resize(index + 4);
|
||||
try self.addTypeRelocGlobal(atom_index, field.ty, @as(u32, @intCast(index)));
|
||||
// DW.AT.data_member_location, DW.FORM.udata
|
||||
const field_off = ty.structFieldOffset(field_index, mod);
|
||||
try leb128.writeULEB128(dbg_info_buffer.writer(), field_off);
|
||||
if (struct_type.isTuple(ip)) {
|
||||
for (struct_type.field_types.get(ip), struct_type.offsets.get(ip), 0..) |field_ty, field_off, field_index| {
|
||||
if (!field_ty.toType().hasRuntimeBits(mod)) continue;
|
||||
// DW.AT.member
|
||||
try dbg_info_buffer.append(@intFromEnum(AbbrevKind.struct_member));
|
||||
// DW.AT.name, DW.FORM.string
|
||||
try dbg_info_buffer.writer().print("{d}\x00", .{field_index});
|
||||
// DW.AT.type, DW.FORM.ref4
|
||||
var index = dbg_info_buffer.items.len;
|
||||
try dbg_info_buffer.resize(index + 4);
|
||||
try self.addTypeRelocGlobal(atom_index, field_ty.toType(), @as(u32, @intCast(index)));
|
||||
// DW.AT.data_member_location, DW.FORM.udata
|
||||
try leb128.writeULEB128(dbg_info_buffer.writer(), field_off);
|
||||
}
|
||||
} else {
|
||||
for (
|
||||
struct_type.field_names.get(ip),
|
||||
struct_type.field_types.get(ip),
|
||||
struct_type.offsets.get(ip),
|
||||
) |field_name_ip, field_ty, field_off| {
|
||||
if (!field_ty.toType().hasRuntimeBits(mod)) continue;
|
||||
const field_name = ip.stringToSlice(field_name_ip);
|
||||
// DW.AT.member
|
||||
try dbg_info_buffer.ensureUnusedCapacity(field_name.len + 2);
|
||||
dbg_info_buffer.appendAssumeCapacity(@intFromEnum(AbbrevKind.struct_member));
|
||||
// DW.AT.name, DW.FORM.string
|
||||
dbg_info_buffer.appendSliceAssumeCapacity(field_name);
|
||||
dbg_info_buffer.appendAssumeCapacity(0);
|
||||
// DW.AT.type, DW.FORM.ref4
|
||||
var index = dbg_info_buffer.items.len;
|
||||
try dbg_info_buffer.resize(index + 4);
|
||||
try self.addTypeRelocGlobal(atom_index, field_ty.toType(), @intCast(index));
|
||||
// DW.AT.data_member_location, DW.FORM.udata
|
||||
try leb128.writeULEB128(dbg_info_buffer.writer(), field_off);
|
||||
}
|
||||
}
|
||||
},
|
||||
else => unreachable,
|
||||
|
|
@ -416,8 +430,8 @@ pub const DeclState = struct {
|
|||
.Union => {
|
||||
const union_obj = mod.typeToUnion(ty).?;
|
||||
const layout = mod.getUnionLayout(union_obj);
|
||||
const payload_offset = if (layout.tag_align >= layout.payload_align) layout.tag_size else 0;
|
||||
const tag_offset = if (layout.tag_align >= layout.payload_align) 0 else layout.payload_size;
|
||||
const payload_offset = if (layout.tag_align.compare(.gte, layout.payload_align)) layout.tag_size else 0;
|
||||
const tag_offset = if (layout.tag_align.compare(.gte, layout.payload_align)) 0 else layout.payload_size;
|
||||
// TODO this is temporary to match current state of unions in Zig - we don't yet have
|
||||
// safety checks implemented meaning the implicit tag is not yet stored and generated
|
||||
// for untagged unions.
|
||||
|
|
@ -496,11 +510,11 @@ pub const DeclState = struct {
|
|||
.ErrorUnion => {
|
||||
const error_ty = ty.errorUnionSet(mod);
|
||||
const payload_ty = ty.errorUnionPayload(mod);
|
||||
const payload_align = if (payload_ty.isNoReturn(mod)) 0 else payload_ty.abiAlignment(mod);
|
||||
const payload_align = if (payload_ty.isNoReturn(mod)) .none else payload_ty.abiAlignment(mod);
|
||||
const error_align = Type.anyerror.abiAlignment(mod);
|
||||
const abi_size = ty.abiSize(mod);
|
||||
const payload_off = if (error_align >= payload_align) Type.anyerror.abiSize(mod) else 0;
|
||||
const error_off = if (error_align >= payload_align) 0 else payload_ty.abiSize(mod);
|
||||
const payload_off = if (error_align.compare(.gte, payload_align)) Type.anyerror.abiSize(mod) else 0;
|
||||
const error_off = if (error_align.compare(.gte, payload_align)) 0 else payload_ty.abiSize(mod);
|
||||
|
||||
// DW.AT.structure_type
|
||||
try dbg_info_buffer.append(@intFromEnum(AbbrevKind.struct_type));
|
||||
|
|
|
|||
|
|
@ -409,7 +409,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
|
|||
const image_base = self.calcImageBase();
|
||||
|
||||
if (self.phdr_table_index == null) {
|
||||
self.phdr_table_index = @as(u16, @intCast(self.phdrs.items.len));
|
||||
self.phdr_table_index = @intCast(self.phdrs.items.len);
|
||||
const p_align: u16 = switch (self.ptr_width) {
|
||||
.p32 => @alignOf(elf.Elf32_Phdr),
|
||||
.p64 => @alignOf(elf.Elf64_Phdr),
|
||||
|
|
@ -428,7 +428,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
|
|||
}
|
||||
|
||||
if (self.phdr_table_load_index == null) {
|
||||
self.phdr_table_load_index = @as(u16, @intCast(self.phdrs.items.len));
|
||||
self.phdr_table_load_index = @intCast(self.phdrs.items.len);
|
||||
// TODO Same as for GOT
|
||||
try self.phdrs.append(gpa, .{
|
||||
.p_type = elf.PT_LOAD,
|
||||
|
|
@ -444,7 +444,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
|
|||
}
|
||||
|
||||
if (self.phdr_load_re_index == null) {
|
||||
self.phdr_load_re_index = @as(u16, @intCast(self.phdrs.items.len));
|
||||
self.phdr_load_re_index = @intCast(self.phdrs.items.len);
|
||||
const file_size = self.base.options.program_code_size_hint;
|
||||
const p_align = self.page_size;
|
||||
const off = self.findFreeSpace(file_size, p_align);
|
||||
|
|
@ -465,7 +465,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
|
|||
}
|
||||
|
||||
if (self.phdr_got_index == null) {
|
||||
self.phdr_got_index = @as(u16, @intCast(self.phdrs.items.len));
|
||||
self.phdr_got_index = @intCast(self.phdrs.items.len);
|
||||
const file_size = @as(u64, ptr_size) * self.base.options.symbol_count_hint;
|
||||
// We really only need ptr alignment but since we are using PROGBITS, linux requires
|
||||
// page align.
|
||||
|
|
@ -490,7 +490,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
|
|||
}
|
||||
|
||||
if (self.phdr_load_ro_index == null) {
|
||||
self.phdr_load_ro_index = @as(u16, @intCast(self.phdrs.items.len));
|
||||
self.phdr_load_ro_index = @intCast(self.phdrs.items.len);
|
||||
// TODO Find a hint about how much data need to be in rodata ?
|
||||
const file_size = 1024;
|
||||
// Same reason as for GOT
|
||||
|
|
@ -513,7 +513,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
|
|||
}
|
||||
|
||||
if (self.phdr_load_rw_index == null) {
|
||||
self.phdr_load_rw_index = @as(u16, @intCast(self.phdrs.items.len));
|
||||
self.phdr_load_rw_index = @intCast(self.phdrs.items.len);
|
||||
// TODO Find a hint about how much data need to be in data ?
|
||||
const file_size = 1024;
|
||||
// Same reason as for GOT
|
||||
|
|
@ -536,7 +536,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
|
|||
}
|
||||
|
||||
if (self.phdr_load_zerofill_index == null) {
|
||||
self.phdr_load_zerofill_index = @as(u16, @intCast(self.phdrs.items.len));
|
||||
self.phdr_load_zerofill_index = @intCast(self.phdrs.items.len);
|
||||
const p_align = if (self.base.options.target.os.tag == .linux) self.page_size else @as(u16, ptr_size);
|
||||
const off = self.phdrs.items[self.phdr_load_rw_index.?].p_offset;
|
||||
log.debug("found PT_LOAD zerofill free space 0x{x} to 0x{x}", .{ off, off });
|
||||
|
|
@ -556,7 +556,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
|
|||
}
|
||||
|
||||
if (self.shstrtab_section_index == null) {
|
||||
self.shstrtab_section_index = @as(u16, @intCast(self.shdrs.items.len));
|
||||
self.shstrtab_section_index = @intCast(self.shdrs.items.len);
|
||||
assert(self.shstrtab.buffer.items.len == 0);
|
||||
try self.shstrtab.buffer.append(gpa, 0); // need a 0 at position 0
|
||||
const off = self.findFreeSpace(self.shstrtab.buffer.items.len, 1);
|
||||
|
|
@ -578,7 +578,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
|
|||
}
|
||||
|
||||
if (self.strtab_section_index == null) {
|
||||
self.strtab_section_index = @as(u16, @intCast(self.shdrs.items.len));
|
||||
self.strtab_section_index = @intCast(self.shdrs.items.len);
|
||||
assert(self.strtab.buffer.items.len == 0);
|
||||
try self.strtab.buffer.append(gpa, 0); // need a 0 at position 0
|
||||
const off = self.findFreeSpace(self.strtab.buffer.items.len, 1);
|
||||
|
|
@ -600,7 +600,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
|
|||
}
|
||||
|
||||
if (self.text_section_index == null) {
|
||||
self.text_section_index = @as(u16, @intCast(self.shdrs.items.len));
|
||||
self.text_section_index = @intCast(self.shdrs.items.len);
|
||||
const phdr = &self.phdrs.items[self.phdr_load_re_index.?];
|
||||
try self.shdrs.append(gpa, .{
|
||||
.sh_name = try self.shstrtab.insert(gpa, ".text"),
|
||||
|
|
@ -620,7 +620,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
|
|||
}
|
||||
|
||||
if (self.got_section_index == null) {
|
||||
self.got_section_index = @as(u16, @intCast(self.shdrs.items.len));
|
||||
self.got_section_index = @intCast(self.shdrs.items.len);
|
||||
const phdr = &self.phdrs.items[self.phdr_got_index.?];
|
||||
try self.shdrs.append(gpa, .{
|
||||
.sh_name = try self.shstrtab.insert(gpa, ".got"),
|
||||
|
|
@ -639,7 +639,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
|
|||
}
|
||||
|
||||
if (self.rodata_section_index == null) {
|
||||
self.rodata_section_index = @as(u16, @intCast(self.shdrs.items.len));
|
||||
self.rodata_section_index = @intCast(self.shdrs.items.len);
|
||||
const phdr = &self.phdrs.items[self.phdr_load_ro_index.?];
|
||||
try self.shdrs.append(gpa, .{
|
||||
.sh_name = try self.shstrtab.insert(gpa, ".rodata"),
|
||||
|
|
@ -659,7 +659,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
|
|||
}
|
||||
|
||||
if (self.data_section_index == null) {
|
||||
self.data_section_index = @as(u16, @intCast(self.shdrs.items.len));
|
||||
self.data_section_index = @intCast(self.shdrs.items.len);
|
||||
const phdr = &self.phdrs.items[self.phdr_load_rw_index.?];
|
||||
try self.shdrs.append(gpa, .{
|
||||
.sh_name = try self.shstrtab.insert(gpa, ".data"),
|
||||
|
|
@ -679,7 +679,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
|
|||
}
|
||||
|
||||
if (self.bss_section_index == null) {
|
||||
self.bss_section_index = @as(u16, @intCast(self.shdrs.items.len));
|
||||
self.bss_section_index = @intCast(self.shdrs.items.len);
|
||||
const phdr = &self.phdrs.items[self.phdr_load_zerofill_index.?];
|
||||
try self.shdrs.append(gpa, .{
|
||||
.sh_name = try self.shstrtab.insert(gpa, ".bss"),
|
||||
|
|
@ -699,7 +699,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
|
|||
}
|
||||
|
||||
if (self.symtab_section_index == null) {
|
||||
self.symtab_section_index = @as(u16, @intCast(self.shdrs.items.len));
|
||||
self.symtab_section_index = @intCast(self.shdrs.items.len);
|
||||
const min_align: u16 = if (small_ptr) @alignOf(elf.Elf32_Sym) else @alignOf(elf.Elf64_Sym);
|
||||
const each_size: u64 = if (small_ptr) @sizeOf(elf.Elf32_Sym) else @sizeOf(elf.Elf64_Sym);
|
||||
const file_size = self.base.options.symbol_count_hint * each_size;
|
||||
|
|
@ -714,7 +714,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
|
|||
.sh_size = file_size,
|
||||
// The section header index of the associated string table.
|
||||
.sh_link = self.strtab_section_index.?,
|
||||
.sh_info = @as(u32, @intCast(self.symbols.items.len)),
|
||||
.sh_info = @intCast(self.symbols.items.len),
|
||||
.sh_addralign = min_align,
|
||||
.sh_entsize = each_size,
|
||||
});
|
||||
|
|
@ -723,7 +723,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
|
|||
|
||||
if (self.dwarf) |*dw| {
|
||||
if (self.debug_str_section_index == null) {
|
||||
self.debug_str_section_index = @as(u16, @intCast(self.shdrs.items.len));
|
||||
self.debug_str_section_index = @intCast(self.shdrs.items.len);
|
||||
assert(dw.strtab.buffer.items.len == 0);
|
||||
try dw.strtab.buffer.append(gpa, 0);
|
||||
try self.shdrs.append(gpa, .{
|
||||
|
|
@ -743,7 +743,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
|
|||
}
|
||||
|
||||
if (self.debug_info_section_index == null) {
|
||||
self.debug_info_section_index = @as(u16, @intCast(self.shdrs.items.len));
|
||||
self.debug_info_section_index = @intCast(self.shdrs.items.len);
|
||||
const file_size_hint = 200;
|
||||
const p_align = 1;
|
||||
const off = self.findFreeSpace(file_size_hint, p_align);
|
||||
|
|
@ -768,7 +768,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
|
|||
}
|
||||
|
||||
if (self.debug_abbrev_section_index == null) {
|
||||
self.debug_abbrev_section_index = @as(u16, @intCast(self.shdrs.items.len));
|
||||
self.debug_abbrev_section_index = @intCast(self.shdrs.items.len);
|
||||
const file_size_hint = 128;
|
||||
const p_align = 1;
|
||||
const off = self.findFreeSpace(file_size_hint, p_align);
|
||||
|
|
@ -793,7 +793,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
|
|||
}
|
||||
|
||||
if (self.debug_aranges_section_index == null) {
|
||||
self.debug_aranges_section_index = @as(u16, @intCast(self.shdrs.items.len));
|
||||
self.debug_aranges_section_index = @intCast(self.shdrs.items.len);
|
||||
const file_size_hint = 160;
|
||||
const p_align = 16;
|
||||
const off = self.findFreeSpace(file_size_hint, p_align);
|
||||
|
|
@ -818,7 +818,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
|
|||
}
|
||||
|
||||
if (self.debug_line_section_index == null) {
|
||||
self.debug_line_section_index = @as(u16, @intCast(self.shdrs.items.len));
|
||||
self.debug_line_section_index = @intCast(self.shdrs.items.len);
|
||||
const file_size_hint = 250;
|
||||
const p_align = 1;
|
||||
const off = self.findFreeSpace(file_size_hint, p_align);
|
||||
|
|
@ -2666,12 +2666,12 @@ fn updateDeclCode(
|
|||
|
||||
const old_size = atom_ptr.size;
|
||||
const old_vaddr = atom_ptr.value;
|
||||
atom_ptr.alignment = math.log2_int(u64, required_alignment);
|
||||
atom_ptr.alignment = required_alignment;
|
||||
atom_ptr.size = code.len;
|
||||
|
||||
if (old_size > 0 and self.base.child_pid == null) {
|
||||
const capacity = atom_ptr.capacity(self);
|
||||
const need_realloc = code.len > capacity or !mem.isAlignedGeneric(u64, sym.value, required_alignment);
|
||||
const need_realloc = code.len > capacity or !required_alignment.check(sym.value);
|
||||
if (need_realloc) {
|
||||
try atom_ptr.grow(self);
|
||||
log.debug("growing {s} from 0x{x} to 0x{x}", .{ decl_name, old_vaddr, atom_ptr.value });
|
||||
|
|
@ -2869,7 +2869,7 @@ fn updateLazySymbol(self: *Elf, sym: link.File.LazySymbol, symbol_index: Symbol.
|
|||
const mod = self.base.options.module.?;
|
||||
const zig_module = self.file(self.zig_module_index.?).?.zig_module;
|
||||
|
||||
var required_alignment: u32 = undefined;
|
||||
var required_alignment: InternPool.Alignment = .none;
|
||||
var code_buffer = std.ArrayList(u8).init(gpa);
|
||||
defer code_buffer.deinit();
|
||||
|
||||
|
|
@ -2918,7 +2918,7 @@ fn updateLazySymbol(self: *Elf, sym: link.File.LazySymbol, symbol_index: Symbol.
|
|||
const atom_ptr = local_sym.atom(self).?;
|
||||
atom_ptr.alive = true;
|
||||
atom_ptr.name_offset = name_str_index;
|
||||
atom_ptr.alignment = math.log2_int(u64, required_alignment);
|
||||
atom_ptr.alignment = required_alignment;
|
||||
atom_ptr.size = code.len;
|
||||
|
||||
try atom_ptr.allocate(self);
|
||||
|
|
@ -2995,7 +2995,7 @@ pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl_index: Module
|
|||
const atom_ptr = local_sym.atom(self).?;
|
||||
atom_ptr.alive = true;
|
||||
atom_ptr.name_offset = name_str_index;
|
||||
atom_ptr.alignment = math.log2_int(u64, required_alignment);
|
||||
atom_ptr.alignment = required_alignment;
|
||||
atom_ptr.size = code.len;
|
||||
|
||||
try atom_ptr.allocate(self);
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ file_index: File.Index = 0,
|
|||
size: u64 = 0,
|
||||
|
||||
/// Alignment of this atom as a power of two.
|
||||
alignment: u8 = 0,
|
||||
alignment: Alignment = .@"1",
|
||||
|
||||
/// Index of the input section.
|
||||
input_section_index: Index = 0,
|
||||
|
|
@ -42,6 +42,8 @@ fde_end: u32 = 0,
|
|||
prev_index: Index = 0,
|
||||
next_index: Index = 0,
|
||||
|
||||
pub const Alignment = @import("../../InternPool.zig").Alignment;
|
||||
|
||||
pub fn name(self: Atom, elf_file: *Elf) []const u8 {
|
||||
return elf_file.strtab.getAssumeExists(self.name_offset);
|
||||
}
|
||||
|
|
@ -112,7 +114,6 @@ pub fn allocate(self: *Atom, elf_file: *Elf) !void {
|
|||
const free_list = &meta.free_list;
|
||||
const last_atom_index = &meta.last_atom_index;
|
||||
const new_atom_ideal_capacity = Elf.padToIdeal(self.size);
|
||||
const alignment = try std.math.powi(u64, 2, self.alignment);
|
||||
|
||||
// We use these to indicate our intention to update metadata, placing the new atom,
|
||||
// and possibly removing a free list node.
|
||||
|
|
@ -136,7 +137,7 @@ pub fn allocate(self: *Atom, elf_file: *Elf) !void {
|
|||
const ideal_capacity_end_vaddr = std.math.add(u64, big_atom.value, ideal_capacity) catch ideal_capacity;
|
||||
const capacity_end_vaddr = big_atom.value + cap;
|
||||
const new_start_vaddr_unaligned = capacity_end_vaddr - new_atom_ideal_capacity;
|
||||
const new_start_vaddr = std.mem.alignBackward(u64, new_start_vaddr_unaligned, alignment);
|
||||
const new_start_vaddr = self.alignment.backward(new_start_vaddr_unaligned);
|
||||
if (new_start_vaddr < ideal_capacity_end_vaddr) {
|
||||
// Additional bookkeeping here to notice if this free list node
|
||||
// should be deleted because the block that it points to has grown to take up
|
||||
|
|
@ -163,7 +164,7 @@ pub fn allocate(self: *Atom, elf_file: *Elf) !void {
|
|||
} else if (elf_file.atom(last_atom_index.*)) |last| {
|
||||
const ideal_capacity = Elf.padToIdeal(last.size);
|
||||
const ideal_capacity_end_vaddr = last.value + ideal_capacity;
|
||||
const new_start_vaddr = std.mem.alignForward(u64, ideal_capacity_end_vaddr, alignment);
|
||||
const new_start_vaddr = self.alignment.forward(ideal_capacity_end_vaddr);
|
||||
// Set up the metadata to be updated, after errors are no longer possible.
|
||||
atom_placement = last.atom_index;
|
||||
break :blk new_start_vaddr;
|
||||
|
|
@ -192,7 +193,7 @@ pub fn allocate(self: *Atom, elf_file: *Elf) !void {
|
|||
elf_file.debug_aranges_section_dirty = true;
|
||||
}
|
||||
}
|
||||
shdr.sh_addralign = @max(shdr.sh_addralign, alignment);
|
||||
shdr.sh_addralign = @max(shdr.sh_addralign, self.alignment.toByteUnitsOptional().?);
|
||||
|
||||
// This function can also reallocate an atom.
|
||||
// In this case we need to "unplug" it from its previous location before
|
||||
|
|
@ -224,10 +225,8 @@ pub fn shrink(self: *Atom, elf_file: *Elf) void {
|
|||
}
|
||||
|
||||
pub fn grow(self: *Atom, elf_file: *Elf) !void {
|
||||
const alignment = try std.math.powi(u64, 2, self.alignment);
|
||||
const align_ok = std.mem.alignBackward(u64, self.value, alignment) == self.value;
|
||||
const need_realloc = !align_ok or self.size > self.capacity(elf_file);
|
||||
if (need_realloc) try self.allocate(elf_file);
|
||||
if (!self.alignment.check(self.value) or self.size > self.capacity(elf_file))
|
||||
try self.allocate(elf_file);
|
||||
}
|
||||
|
||||
pub fn free(self: *Atom, elf_file: *Elf) void {
|
||||
|
|
|
|||
|
|
@ -181,10 +181,10 @@ fn addAtom(self: *Object, shdr: elf.Elf64_Shdr, shndx: u16, name: [:0]const u8,
|
|||
const data = try self.shdrContents(shndx);
|
||||
const chdr = @as(*align(1) const elf.Elf64_Chdr, @ptrCast(data.ptr)).*;
|
||||
atom.size = chdr.ch_size;
|
||||
atom.alignment = math.log2_int(u64, chdr.ch_addralign);
|
||||
atom.alignment = Alignment.fromNonzeroByteUnits(chdr.ch_addralign);
|
||||
} else {
|
||||
atom.size = shdr.sh_size;
|
||||
atom.alignment = math.log2_int(u64, shdr.sh_addralign);
|
||||
atom.alignment = Alignment.fromNonzeroByteUnits(shdr.sh_addralign);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -571,7 +571,7 @@ pub fn convertCommonSymbols(self: *Object, elf_file: *Elf) !void {
|
|||
atom.file = self.index;
|
||||
atom.size = this_sym.st_size;
|
||||
const alignment = this_sym.st_value;
|
||||
atom.alignment = math.log2_int(u64, alignment);
|
||||
atom.alignment = Alignment.fromNonzeroByteUnits(alignment);
|
||||
|
||||
var sh_flags: u32 = elf.SHF_ALLOC | elf.SHF_WRITE;
|
||||
if (is_tls) sh_flags |= elf.SHF_TLS;
|
||||
|
|
@ -870,3 +870,4 @@ const Fde = eh_frame.Fde;
|
|||
const File = @import("file.zig").File;
|
||||
const StringTable = @import("../strtab.zig").StringTable;
|
||||
const Symbol = @import("Symbol.zig");
|
||||
const Alignment = Atom.Alignment;
|
||||
|
|
|
|||
|
|
@ -1425,7 +1425,7 @@ pub fn allocateSpecialSymbols(self: *MachO) !void {
|
|||
|
||||
const CreateAtomOpts = struct {
|
||||
size: u64 = 0,
|
||||
alignment: u32 = 0,
|
||||
alignment: Alignment = .@"1",
|
||||
};
|
||||
|
||||
pub fn createAtom(self: *MachO, sym_index: u32, opts: CreateAtomOpts) !Atom.Index {
|
||||
|
|
@ -1473,7 +1473,7 @@ pub fn createTentativeDefAtoms(self: *MachO) !void {
|
|||
|
||||
const atom_index = try self.createAtom(global.sym_index, .{
|
||||
.size = size,
|
||||
.alignment = alignment,
|
||||
.alignment = @enumFromInt(alignment),
|
||||
});
|
||||
const atom = self.getAtomPtr(atom_index);
|
||||
atom.file = global.file;
|
||||
|
|
@ -1493,7 +1493,7 @@ pub fn createDyldPrivateAtom(self: *MachO) !void {
|
|||
const sym_index = try self.allocateSymbol();
|
||||
const atom_index = try self.createAtom(sym_index, .{
|
||||
.size = @sizeOf(u64),
|
||||
.alignment = 3,
|
||||
.alignment = .@"8",
|
||||
});
|
||||
try self.atom_by_index_table.putNoClobber(self.base.allocator, sym_index, atom_index);
|
||||
|
||||
|
|
@ -1510,7 +1510,7 @@ pub fn createDyldPrivateAtom(self: *MachO) !void {
|
|||
switch (self.mode) {
|
||||
.zld => self.addAtomToSection(atom_index),
|
||||
.incremental => {
|
||||
sym.n_value = try self.allocateAtom(atom_index, atom.size, @alignOf(u64));
|
||||
sym.n_value = try self.allocateAtom(atom_index, atom.size, .@"8");
|
||||
log.debug("allocated dyld_private atom at 0x{x}", .{sym.n_value});
|
||||
var buffer: [@sizeOf(u64)]u8 = [_]u8{0} ** @sizeOf(u64);
|
||||
try self.writeAtom(atom_index, &buffer);
|
||||
|
|
@ -1521,7 +1521,7 @@ pub fn createDyldPrivateAtom(self: *MachO) !void {
|
|||
fn createThreadLocalDescriptorAtom(self: *MachO, sym_name: []const u8, target: SymbolWithLoc) !Atom.Index {
|
||||
const gpa = self.base.allocator;
|
||||
const size = 3 * @sizeOf(u64);
|
||||
const required_alignment: u32 = 1;
|
||||
const required_alignment: Alignment = .@"1";
|
||||
const sym_index = try self.allocateSymbol();
|
||||
const atom_index = try self.createAtom(sym_index, .{});
|
||||
try self.atom_by_index_table.putNoClobber(gpa, sym_index, atom_index);
|
||||
|
|
@ -2030,10 +2030,10 @@ fn shrinkAtom(self: *MachO, atom_index: Atom.Index, new_block_size: u64) void {
|
|||
// capacity, insert a free list node for it.
|
||||
}
|
||||
|
||||
fn growAtom(self: *MachO, atom_index: Atom.Index, new_atom_size: u64, alignment: u64) !u64 {
|
||||
fn growAtom(self: *MachO, atom_index: Atom.Index, new_atom_size: u64, alignment: Alignment) !u64 {
|
||||
const atom = self.getAtom(atom_index);
|
||||
const sym = atom.getSymbol(self);
|
||||
const align_ok = mem.alignBackward(u64, sym.n_value, alignment) == sym.n_value;
|
||||
const align_ok = alignment.check(sym.n_value);
|
||||
const need_realloc = !align_ok or new_atom_size > atom.capacity(self);
|
||||
if (!need_realloc) return sym.n_value;
|
||||
return self.allocateAtom(atom_index, new_atom_size, alignment);
|
||||
|
|
@ -2350,7 +2350,7 @@ fn updateLazySymbolAtom(
|
|||
const gpa = self.base.allocator;
|
||||
const mod = self.base.options.module.?;
|
||||
|
||||
var required_alignment: u32 = undefined;
|
||||
var required_alignment: Alignment = .none;
|
||||
var code_buffer = std.ArrayList(u8).init(gpa);
|
||||
defer code_buffer.deinit();
|
||||
|
||||
|
|
@ -2617,7 +2617,7 @@ fn updateDeclCode(self: *MachO, decl_index: Module.Decl.Index, code: []u8) !u64
|
|||
sym.n_desc = 0;
|
||||
|
||||
const capacity = atom.capacity(self);
|
||||
const need_realloc = code_len > capacity or !mem.isAlignedGeneric(u64, sym.n_value, required_alignment);
|
||||
const need_realloc = code_len > capacity or !required_alignment.check(sym.n_value);
|
||||
|
||||
if (need_realloc) {
|
||||
const vaddr = try self.growAtom(atom_index, code_len, required_alignment);
|
||||
|
|
@ -3204,7 +3204,7 @@ pub fn addAtomToSection(self: *MachO, atom_index: Atom.Index) void {
|
|||
self.sections.set(sym.n_sect - 1, section);
|
||||
}
|
||||
|
||||
fn allocateAtom(self: *MachO, atom_index: Atom.Index, new_atom_size: u64, alignment: u64) !u64 {
|
||||
fn allocateAtom(self: *MachO, atom_index: Atom.Index, new_atom_size: u64, alignment: Alignment) !u64 {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
|
|
@ -3247,7 +3247,7 @@ fn allocateAtom(self: *MachO, atom_index: Atom.Index, new_atom_size: u64, alignm
|
|||
const ideal_capacity_end_vaddr = math.add(u64, sym.n_value, ideal_capacity) catch ideal_capacity;
|
||||
const capacity_end_vaddr = sym.n_value + capacity;
|
||||
const new_start_vaddr_unaligned = capacity_end_vaddr - new_atom_ideal_capacity;
|
||||
const new_start_vaddr = mem.alignBackward(u64, new_start_vaddr_unaligned, alignment);
|
||||
const new_start_vaddr = alignment.backward(new_start_vaddr_unaligned);
|
||||
if (new_start_vaddr < ideal_capacity_end_vaddr) {
|
||||
// Additional bookkeeping here to notice if this free list node
|
||||
// should be deleted because the atom that it points to has grown to take up
|
||||
|
|
@ -3276,11 +3276,11 @@ fn allocateAtom(self: *MachO, atom_index: Atom.Index, new_atom_size: u64, alignm
|
|||
const last_symbol = last.getSymbol(self);
|
||||
const ideal_capacity = if (requires_padding) padToIdeal(last.size) else last.size;
|
||||
const ideal_capacity_end_vaddr = last_symbol.n_value + ideal_capacity;
|
||||
const new_start_vaddr = mem.alignForward(u64, ideal_capacity_end_vaddr, alignment);
|
||||
const new_start_vaddr = alignment.forward(ideal_capacity_end_vaddr);
|
||||
atom_placement = last_index;
|
||||
break :blk new_start_vaddr;
|
||||
} else {
|
||||
break :blk mem.alignForward(u64, segment.vmaddr, alignment);
|
||||
break :blk alignment.forward(segment.vmaddr);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
@ -3295,10 +3295,8 @@ fn allocateAtom(self: *MachO, atom_index: Atom.Index, new_atom_size: u64, alignm
|
|||
self.segment_table_dirty = true;
|
||||
}
|
||||
|
||||
const align_pow = @as(u32, @intCast(math.log2(alignment)));
|
||||
if (header.@"align" < align_pow) {
|
||||
header.@"align" = align_pow;
|
||||
}
|
||||
assert(alignment != .none);
|
||||
header.@"align" = @min(header.@"align", @intFromEnum(alignment));
|
||||
self.getAtomPtr(atom_index).size = new_atom_size;
|
||||
|
||||
if (atom.prev_index) |prev_index| {
|
||||
|
|
@ -3338,7 +3336,7 @@ pub fn getGlobalSymbol(self: *MachO, name: []const u8, lib_name: ?[]const u8) !u
|
|||
|
||||
pub fn writeSegmentHeaders(self: *MachO, writer: anytype) !void {
|
||||
for (self.segments.items, 0..) |seg, i| {
|
||||
const indexes = self.getSectionIndexes(@as(u8, @intCast(i)));
|
||||
const indexes = self.getSectionIndexes(@intCast(i));
|
||||
var out_seg = seg;
|
||||
out_seg.cmdsize = @sizeOf(macho.segment_command_64);
|
||||
out_seg.nsects = 0;
|
||||
|
|
@ -5526,6 +5524,7 @@ const Trie = @import("MachO/Trie.zig");
|
|||
const Type = @import("../type.zig").Type;
|
||||
const TypedValue = @import("../TypedValue.zig");
|
||||
const Value = @import("../value.zig").Value;
|
||||
const Alignment = Atom.Alignment;
|
||||
|
||||
pub const DebugSymbols = @import("MachO/DebugSymbols.zig");
|
||||
pub const Bind = @import("MachO/dyld_info/bind.zig").Bind(*const MachO, SymbolWithLoc);
|
||||
|
|
|
|||
|
|
@ -28,13 +28,15 @@ size: u64 = 0,
|
|||
|
||||
/// Alignment of this atom as a power of 2.
|
||||
/// For instance, aligmment of 0 should be read as 2^0 = 1 byte aligned.
|
||||
alignment: u32 = 0,
|
||||
alignment: Alignment = .@"1",
|
||||
|
||||
/// Points to the previous and next neighbours
|
||||
/// TODO use the same trick as with symbols: reserve index 0 as null atom
|
||||
next_index: ?Index = null,
|
||||
prev_index: ?Index = null,
|
||||
|
||||
pub const Alignment = @import("../../InternPool.zig").Alignment;
|
||||
|
||||
pub const Index = u32;
|
||||
|
||||
pub const Binding = struct {
|
||||
|
|
|
|||
|
|
@ -382,7 +382,7 @@ pub fn splitRegularSections(self: *Object, macho_file: *MachO, object_id: u32) !
|
|||
const out_sect_id = (try Atom.getOutputSection(macho_file, sect)) orelse continue;
|
||||
if (sect.size == 0) continue;
|
||||
|
||||
const sect_id = @as(u8, @intCast(id));
|
||||
const sect_id: u8 = @intCast(id);
|
||||
const sym_index = self.getSectionAliasSymbolIndex(sect_id);
|
||||
const atom_index = try self.createAtomFromSubsection(
|
||||
macho_file,
|
||||
|
|
@ -391,7 +391,7 @@ pub fn splitRegularSections(self: *Object, macho_file: *MachO, object_id: u32) !
|
|||
sym_index,
|
||||
1,
|
||||
sect.size,
|
||||
sect.@"align",
|
||||
Alignment.fromLog2Units(sect.@"align"),
|
||||
out_sect_id,
|
||||
);
|
||||
macho_file.addAtomToSection(atom_index);
|
||||
|
|
@ -470,7 +470,7 @@ pub fn splitRegularSections(self: *Object, macho_file: *MachO, object_id: u32) !
|
|||
sym_index,
|
||||
1,
|
||||
atom_size,
|
||||
sect.@"align",
|
||||
Alignment.fromLog2Units(sect.@"align"),
|
||||
out_sect_id,
|
||||
);
|
||||
if (!sect.isZerofill()) {
|
||||
|
|
@ -494,10 +494,10 @@ pub fn splitRegularSections(self: *Object, macho_file: *MachO, object_id: u32) !
|
|||
else
|
||||
sect.addr + sect.size - addr;
|
||||
|
||||
const atom_align = if (addr > 0)
|
||||
const atom_align = Alignment.fromLog2Units(if (addr > 0)
|
||||
@min(@ctz(addr), sect.@"align")
|
||||
else
|
||||
sect.@"align";
|
||||
sect.@"align");
|
||||
|
||||
const atom_index = try self.createAtomFromSubsection(
|
||||
macho_file,
|
||||
|
|
@ -532,7 +532,7 @@ pub fn splitRegularSections(self: *Object, macho_file: *MachO, object_id: u32) !
|
|||
sect_start_index,
|
||||
sect_loc.len,
|
||||
sect.size,
|
||||
sect.@"align",
|
||||
Alignment.fromLog2Units(sect.@"align"),
|
||||
out_sect_id,
|
||||
);
|
||||
if (!sect.isZerofill()) {
|
||||
|
|
@ -551,11 +551,14 @@ fn createAtomFromSubsection(
|
|||
inner_sym_index: u32,
|
||||
inner_nsyms_trailing: u32,
|
||||
size: u64,
|
||||
alignment: u32,
|
||||
alignment: Alignment,
|
||||
out_sect_id: u8,
|
||||
) !Atom.Index {
|
||||
const gpa = macho_file.base.allocator;
|
||||
const atom_index = try macho_file.createAtom(sym_index, .{ .size = size, .alignment = alignment });
|
||||
const atom_index = try macho_file.createAtom(sym_index, .{
|
||||
.size = size,
|
||||
.alignment = alignment,
|
||||
});
|
||||
const atom = macho_file.getAtomPtr(atom_index);
|
||||
atom.inner_sym_index = inner_sym_index;
|
||||
atom.inner_nsyms_trailing = inner_nsyms_trailing;
|
||||
|
|
@ -1115,3 +1118,4 @@ const MachO = @import("../MachO.zig");
|
|||
const Platform = @import("load_commands.zig").Platform;
|
||||
const SymbolWithLoc = MachO.SymbolWithLoc;
|
||||
const UnwindInfo = @import("UnwindInfo.zig");
|
||||
const Alignment = Atom.Alignment;
|
||||
|
|
|
|||
|
|
@ -104,7 +104,7 @@ pub fn createThunks(macho_file: *MachO, sect_id: u8) !void {
|
|||
|
||||
while (true) {
|
||||
const atom = macho_file.getAtom(group_end);
|
||||
offset = mem.alignForward(u64, offset, try math.powi(u32, 2, atom.alignment));
|
||||
offset = atom.alignment.forward(offset);
|
||||
|
||||
const sym = macho_file.getSymbolPtr(atom.getSymbolWithLoc());
|
||||
sym.n_value = offset;
|
||||
|
|
@ -112,7 +112,7 @@ pub fn createThunks(macho_file: *MachO, sect_id: u8) !void {
|
|||
|
||||
macho_file.logAtom(group_end, log);
|
||||
|
||||
header.@"align" = @max(header.@"align", atom.alignment);
|
||||
header.@"align" = @max(header.@"align", atom.alignment.toLog2Units());
|
||||
|
||||
allocated.putAssumeCapacityNoClobber(group_end, {});
|
||||
|
||||
|
|
@ -196,7 +196,7 @@ fn allocateThunk(
|
|||
|
||||
macho_file.logAtom(atom_index, log);
|
||||
|
||||
header.@"align" = @max(header.@"align", atom.alignment);
|
||||
header.@"align" = @max(header.@"align", atom.alignment.toLog2Units());
|
||||
|
||||
if (end_atom_index == atom_index) break;
|
||||
|
||||
|
|
@ -326,7 +326,10 @@ fn isReachable(
|
|||
|
||||
fn createThunkAtom(macho_file: *MachO) !Atom.Index {
|
||||
const sym_index = try macho_file.allocateSymbol();
|
||||
const atom_index = try macho_file.createAtom(sym_index, .{ .size = @sizeOf(u32) * 3, .alignment = 2 });
|
||||
const atom_index = try macho_file.createAtom(sym_index, .{
|
||||
.size = @sizeOf(u32) * 3,
|
||||
.alignment = .@"4",
|
||||
});
|
||||
const sym = macho_file.getSymbolPtr(.{ .sym_index = sym_index });
|
||||
sym.n_type = macho.N_SECT;
|
||||
sym.n_sect = macho_file.text_section_index.? + 1;
|
||||
|
|
|
|||
|
|
@ -985,19 +985,16 @@ fn calcSectionSizes(macho_file: *MachO) !void {
|
|||
|
||||
while (true) {
|
||||
const atom = macho_file.getAtom(atom_index);
|
||||
const atom_alignment = try math.powi(u32, 2, atom.alignment);
|
||||
const atom_offset = mem.alignForward(u64, header.size, atom_alignment);
|
||||
const atom_offset = atom.alignment.forward(header.size);
|
||||
const padding = atom_offset - header.size;
|
||||
|
||||
const sym = macho_file.getSymbolPtr(atom.getSymbolWithLoc());
|
||||
sym.n_value = atom_offset;
|
||||
|
||||
header.size += padding + atom.size;
|
||||
header.@"align" = @max(header.@"align", atom.alignment);
|
||||
header.@"align" = @max(header.@"align", atom.alignment.toLog2Units());
|
||||
|
||||
if (atom.next_index) |next_index| {
|
||||
atom_index = next_index;
|
||||
} else break;
|
||||
atom_index = atom.next_index orelse break;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1106,7 +1106,7 @@ fn updateLazySymbolAtom(self: *Plan9, sym: File.LazySymbol, atom_index: Atom.Ind
|
|||
const gpa = self.base.allocator;
|
||||
const mod = self.base.options.module.?;
|
||||
|
||||
var required_alignment: u32 = undefined;
|
||||
var required_alignment: InternPool.Alignment = .none;
|
||||
var code_buffer = std.ArrayList(u8).init(gpa);
|
||||
defer code_buffer.deinit();
|
||||
|
||||
|
|
|
|||
|
|
@ -187,8 +187,10 @@ debug_pubtypes_atom: ?Atom.Index = null,
|
|||
/// rather than by the linker.
|
||||
synthetic_functions: std.ArrayListUnmanaged(Atom.Index) = .{},
|
||||
|
||||
pub const Alignment = types.Alignment;
|
||||
|
||||
pub const Segment = struct {
|
||||
alignment: u32,
|
||||
alignment: Alignment,
|
||||
size: u32,
|
||||
offset: u32,
|
||||
flags: u32,
|
||||
|
|
@ -1490,7 +1492,7 @@ fn finishUpdateDecl(wasm: *Wasm, decl_index: Module.Decl.Index, code: []const u8
|
|||
try atom.code.appendSlice(wasm.base.allocator, code);
|
||||
try wasm.resolved_symbols.put(wasm.base.allocator, atom.symbolLoc(), {});
|
||||
|
||||
atom.size = @as(u32, @intCast(code.len));
|
||||
atom.size = @intCast(code.len);
|
||||
if (code.len == 0) return;
|
||||
atom.alignment = decl.getAlignment(mod);
|
||||
}
|
||||
|
|
@ -2050,7 +2052,7 @@ fn parseAtom(wasm: *Wasm, atom_index: Atom.Index, kind: Kind) !void {
|
|||
};
|
||||
|
||||
const segment: *Segment = &wasm.segments.items[final_index];
|
||||
segment.alignment = @max(segment.alignment, atom.alignment);
|
||||
segment.alignment = segment.alignment.max(atom.alignment);
|
||||
|
||||
try wasm.appendAtomAtIndex(final_index, atom_index);
|
||||
}
|
||||
|
|
@ -2121,7 +2123,7 @@ fn allocateAtoms(wasm: *Wasm) !void {
|
|||
}
|
||||
}
|
||||
}
|
||||
offset = std.mem.alignForward(u32, offset, atom.alignment);
|
||||
offset = @intCast(atom.alignment.forward(offset));
|
||||
atom.offset = offset;
|
||||
log.debug("Atom '{s}' allocated from 0x{x:0>8} to 0x{x:0>8} size={d}", .{
|
||||
symbol_loc.getName(wasm),
|
||||
|
|
@ -2132,7 +2134,7 @@ fn allocateAtoms(wasm: *Wasm) !void {
|
|||
offset += atom.size;
|
||||
atom_index = atom.prev orelse break;
|
||||
}
|
||||
segment.size = std.mem.alignForward(u32, offset, segment.alignment);
|
||||
segment.size = @intCast(segment.alignment.forward(offset));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -2351,7 +2353,7 @@ fn createSyntheticFunction(
|
|||
.offset = 0,
|
||||
.sym_index = loc.index,
|
||||
.file = null,
|
||||
.alignment = 1,
|
||||
.alignment = .@"1",
|
||||
.next = null,
|
||||
.prev = null,
|
||||
.code = function_body.moveToUnmanaged(),
|
||||
|
|
@ -2382,11 +2384,11 @@ pub fn createFunction(
|
|||
const atom_index = @as(Atom.Index, @intCast(wasm.managed_atoms.items.len));
|
||||
const atom = try wasm.managed_atoms.addOne(wasm.base.allocator);
|
||||
atom.* = .{
|
||||
.size = @as(u32, @intCast(function_body.items.len)),
|
||||
.size = @intCast(function_body.items.len),
|
||||
.offset = 0,
|
||||
.sym_index = loc.index,
|
||||
.file = null,
|
||||
.alignment = 1,
|
||||
.alignment = .@"1",
|
||||
.next = null,
|
||||
.prev = null,
|
||||
.code = function_body.moveToUnmanaged(),
|
||||
|
|
@ -2734,8 +2736,8 @@ fn setupMemory(wasm: *Wasm) !void {
|
|||
const page_size = std.wasm.page_size; // 64kb
|
||||
// Use the user-provided stack size or else we use 1MB by default
|
||||
const stack_size = wasm.base.options.stack_size_override orelse page_size * 16;
|
||||
const stack_alignment = 16; // wasm's stack alignment as specified by tool-convention
|
||||
const heap_alignment = 16; // wasm's heap alignment as specified by tool-convention
|
||||
const stack_alignment: Alignment = .@"16"; // wasm's stack alignment as specified by tool-convention
|
||||
const heap_alignment: Alignment = .@"16"; // wasm's heap alignment as specified by tool-convention
|
||||
|
||||
// Always place the stack at the start by default
|
||||
// unless the user specified the global-base flag
|
||||
|
|
@ -2748,7 +2750,7 @@ fn setupMemory(wasm: *Wasm) !void {
|
|||
const is_obj = wasm.base.options.output_mode == .Obj;
|
||||
|
||||
if (place_stack_first and !is_obj) {
|
||||
memory_ptr = std.mem.alignForward(u64, memory_ptr, stack_alignment);
|
||||
memory_ptr = stack_alignment.forward(memory_ptr);
|
||||
memory_ptr += stack_size;
|
||||
// We always put the stack pointer global at index 0
|
||||
wasm.wasm_globals.items[0].init.i32_const = @as(i32, @bitCast(@as(u32, @intCast(memory_ptr))));
|
||||
|
|
@ -2758,7 +2760,7 @@ fn setupMemory(wasm: *Wasm) !void {
|
|||
var data_seg_it = wasm.data_segments.iterator();
|
||||
while (data_seg_it.next()) |entry| {
|
||||
const segment = &wasm.segments.items[entry.value_ptr.*];
|
||||
memory_ptr = std.mem.alignForward(u64, memory_ptr, segment.alignment);
|
||||
memory_ptr = segment.alignment.forward(memory_ptr);
|
||||
|
||||
// set TLS-related symbols
|
||||
if (mem.eql(u8, entry.key_ptr.*, ".tdata")) {
|
||||
|
|
@ -2768,7 +2770,7 @@ fn setupMemory(wasm: *Wasm) !void {
|
|||
}
|
||||
if (wasm.findGlobalSymbol("__tls_align")) |loc| {
|
||||
const sym = loc.getSymbol(wasm);
|
||||
wasm.wasm_globals.items[sym.index - wasm.imported_globals_count].init.i32_const = @intCast(segment.alignment);
|
||||
wasm.wasm_globals.items[sym.index - wasm.imported_globals_count].init.i32_const = @intCast(segment.alignment.toByteUnitsOptional().?);
|
||||
}
|
||||
if (wasm.findGlobalSymbol("__tls_base")) |loc| {
|
||||
const sym = loc.getSymbol(wasm);
|
||||
|
|
@ -2795,7 +2797,7 @@ fn setupMemory(wasm: *Wasm) !void {
|
|||
}
|
||||
|
||||
if (!place_stack_first and !is_obj) {
|
||||
memory_ptr = std.mem.alignForward(u64, memory_ptr, stack_alignment);
|
||||
memory_ptr = stack_alignment.forward(memory_ptr);
|
||||
memory_ptr += stack_size;
|
||||
wasm.wasm_globals.items[0].init.i32_const = @as(i32, @bitCast(@as(u32, @intCast(memory_ptr))));
|
||||
}
|
||||
|
|
@ -2804,7 +2806,7 @@ fn setupMemory(wasm: *Wasm) !void {
|
|||
// We must set its virtual address so it can be used in relocations.
|
||||
if (wasm.findGlobalSymbol("__heap_base")) |loc| {
|
||||
const symbol = loc.getSymbol(wasm);
|
||||
symbol.virtual_address = @as(u32, @intCast(mem.alignForward(u64, memory_ptr, heap_alignment)));
|
||||
symbol.virtual_address = @intCast(heap_alignment.forward(memory_ptr));
|
||||
}
|
||||
|
||||
// Setup the max amount of pages
|
||||
|
|
@ -2879,7 +2881,7 @@ pub fn getMatchingSegment(wasm: *Wasm, object_index: u16, relocatable_index: u32
|
|||
flags |= @intFromEnum(Segment.Flag.WASM_DATA_SEGMENT_IS_PASSIVE);
|
||||
}
|
||||
try wasm.segments.append(wasm.base.allocator, .{
|
||||
.alignment = 1,
|
||||
.alignment = .@"1",
|
||||
.size = 0,
|
||||
.offset = 0,
|
||||
.flags = flags,
|
||||
|
|
@ -2954,7 +2956,7 @@ pub fn getMatchingSegment(wasm: *Wasm, object_index: u16, relocatable_index: u32
|
|||
/// Appends a new segment with default field values
|
||||
fn appendDummySegment(wasm: *Wasm) !void {
|
||||
try wasm.segments.append(wasm.base.allocator, .{
|
||||
.alignment = 1,
|
||||
.alignment = .@"1",
|
||||
.size = 0,
|
||||
.offset = 0,
|
||||
.flags = 0,
|
||||
|
|
@ -3011,7 +3013,7 @@ fn populateErrorNameTable(wasm: *Wasm) !void {
|
|||
// the pointers into the list using addends which are appended to the relocation.
|
||||
const names_atom_index = try wasm.createAtom();
|
||||
const names_atom = wasm.getAtomPtr(names_atom_index);
|
||||
names_atom.alignment = 1;
|
||||
names_atom.alignment = .@"1";
|
||||
const sym_name = try wasm.string_table.put(wasm.base.allocator, "__zig_err_names");
|
||||
const names_symbol = &wasm.symbols.items[names_atom.sym_index];
|
||||
names_symbol.* = .{
|
||||
|
|
@ -3085,7 +3087,7 @@ pub fn createDebugSectionForIndex(wasm: *Wasm, index: *?u32, name: []const u8) !
|
|||
.flags = @intFromEnum(Symbol.Flag.WASM_SYM_BINDING_LOCAL),
|
||||
};
|
||||
|
||||
atom.alignment = 1; // debug sections are always 1-byte-aligned
|
||||
atom.alignment = .@"1"; // debug sections are always 1-byte-aligned
|
||||
return atom_index;
|
||||
}
|
||||
|
||||
|
|
@ -4724,12 +4726,12 @@ fn emitSegmentInfo(wasm: *Wasm, binary_bytes: *std.ArrayList(u8)) !void {
|
|||
for (wasm.segment_info.values()) |segment_info| {
|
||||
log.debug("Emit segment: {s} align({d}) flags({b})", .{
|
||||
segment_info.name,
|
||||
@ctz(segment_info.alignment),
|
||||
segment_info.alignment,
|
||||
segment_info.flags,
|
||||
});
|
||||
try leb.writeULEB128(writer, @as(u32, @intCast(segment_info.name.len)));
|
||||
try writer.writeAll(segment_info.name);
|
||||
try leb.writeULEB128(writer, @ctz(segment_info.alignment));
|
||||
try leb.writeULEB128(writer, segment_info.alignment.toLog2Units());
|
||||
try leb.writeULEB128(writer, segment_info.flags);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ relocs: std.ArrayListUnmanaged(types.Relocation) = .{},
|
|||
/// Contains the binary data of an atom, which can be non-relocated
|
||||
code: std.ArrayListUnmanaged(u8) = .{},
|
||||
/// For code this is 1, for data this is set to the highest value of all segments
|
||||
alignment: u32,
|
||||
alignment: Wasm.Alignment,
|
||||
/// Offset into the section where the atom lives, this already accounts
|
||||
/// for alignment.
|
||||
offset: u32,
|
||||
|
|
@ -43,7 +43,7 @@ pub const Index = u32;
|
|||
|
||||
/// Represents a default empty wasm `Atom`
|
||||
pub const empty: Atom = .{
|
||||
.alignment = 1,
|
||||
.alignment = .@"1",
|
||||
.file = null,
|
||||
.next = null,
|
||||
.offset = 0,
|
||||
|
|
|
|||
|
|
@ -8,6 +8,7 @@ const types = @import("types.zig");
|
|||
const std = @import("std");
|
||||
const Wasm = @import("../Wasm.zig");
|
||||
const Symbol = @import("Symbol.zig");
|
||||
const Alignment = types.Alignment;
|
||||
|
||||
const Allocator = std.mem.Allocator;
|
||||
const leb = std.leb;
|
||||
|
|
@ -88,12 +89,9 @@ const RelocatableData = struct {
|
|||
/// meta data of the given object file.
|
||||
/// NOTE: Alignment is encoded as a power of 2, so we shift the symbol's
|
||||
/// alignment to retrieve the natural alignment.
|
||||
pub fn getAlignment(relocatable_data: RelocatableData, object: *const Object) u32 {
|
||||
if (relocatable_data.type != .data) return 1;
|
||||
const data_alignment = object.segment_info[relocatable_data.index].alignment;
|
||||
if (data_alignment == 0) return 1;
|
||||
// Decode from power of 2 to natural alignment
|
||||
return @as(u32, 1) << @as(u5, @intCast(data_alignment));
|
||||
pub fn getAlignment(relocatable_data: RelocatableData, object: *const Object) Alignment {
|
||||
if (relocatable_data.type != .data) return .@"1";
|
||||
return object.segment_info[relocatable_data.index].alignment;
|
||||
}
|
||||
|
||||
/// Returns the symbol kind that corresponds to the relocatable section
|
||||
|
|
@ -671,7 +669,7 @@ fn Parser(comptime ReaderType: type) type {
|
|||
try reader.readNoEof(name);
|
||||
segment.* = .{
|
||||
.name = name,
|
||||
.alignment = try leb.readULEB128(u32, reader),
|
||||
.alignment = @enumFromInt(try leb.readULEB128(u32, reader)),
|
||||
.flags = try leb.readULEB128(u32, reader),
|
||||
};
|
||||
log.debug("Found segment: {s} align({d}) flags({b})", .{
|
||||
|
|
@ -919,7 +917,7 @@ pub fn parseIntoAtoms(object: *Object, gpa: Allocator, object_index: u16, wasm_b
|
|||
continue; // found unknown section, so skip parsing into atom as we do not know how to handle it.
|
||||
};
|
||||
|
||||
const atom_index = @as(Atom.Index, @intCast(wasm_bin.managed_atoms.items.len));
|
||||
const atom_index: Atom.Index = @intCast(wasm_bin.managed_atoms.items.len);
|
||||
const atom = try wasm_bin.managed_atoms.addOne(gpa);
|
||||
atom.* = Atom.empty;
|
||||
atom.file = object_index;
|
||||
|
|
@ -984,7 +982,7 @@ pub fn parseIntoAtoms(object: *Object, gpa: Allocator, object_index: u16, wasm_b
|
|||
|
||||
const segment: *Wasm.Segment = &wasm_bin.segments.items[final_index];
|
||||
if (relocatable_data.type == .data) { //code section and debug sections are 1-byte aligned
|
||||
segment.alignment = @max(segment.alignment, atom.alignment);
|
||||
segment.alignment = segment.alignment.max(atom.alignment);
|
||||
}
|
||||
|
||||
try wasm_bin.appendAtomAtIndex(final_index, atom_index);
|
||||
|
|
|
|||
|
|
@ -109,11 +109,13 @@ pub const SubsectionType = enum(u8) {
|
|||
WASM_SYMBOL_TABLE = 8,
|
||||
};
|
||||
|
||||
pub const Alignment = @import("../../InternPool.zig").Alignment;
|
||||
|
||||
pub const Segment = struct {
|
||||
/// Segment's name, encoded as UTF-8 bytes.
|
||||
name: []const u8,
|
||||
/// The required alignment of the segment, encoded as a power of 2
|
||||
alignment: u32,
|
||||
alignment: Alignment,
|
||||
/// Bitfield containing flags for a segment
|
||||
flags: u32,
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
const std = @import("std");
|
||||
const Type = @import("type.zig").Type;
|
||||
const AddressSpace = std.builtin.AddressSpace;
|
||||
const Alignment = @import("InternPool.zig").Alignment;
|
||||
|
||||
pub const ArchOsAbi = struct {
|
||||
arch: std.Target.Cpu.Arch,
|
||||
|
|
@ -595,13 +596,13 @@ pub fn llvmMachineAbi(target: std.Target) ?[:0]const u8 {
|
|||
}
|
||||
|
||||
/// This function returns 1 if function alignment is not observable or settable.
|
||||
pub fn defaultFunctionAlignment(target: std.Target) u32 {
|
||||
pub fn defaultFunctionAlignment(target: std.Target) Alignment {
|
||||
return switch (target.cpu.arch) {
|
||||
.arm, .armeb => 4,
|
||||
.aarch64, .aarch64_32, .aarch64_be => 4,
|
||||
.sparc, .sparcel, .sparc64 => 4,
|
||||
.riscv64 => 2,
|
||||
else => 1,
|
||||
.arm, .armeb => .@"4",
|
||||
.aarch64, .aarch64_32, .aarch64_be => .@"4",
|
||||
.sparc, .sparcel, .sparc64 => .@"4",
|
||||
.riscv64 => .@"2",
|
||||
else => .@"1",
|
||||
};
|
||||
}
|
||||
|
||||
|
|
|
|||
665
src/type.zig
665
src/type.zig
File diff suppressed because it is too large
Load diff
246
src/value.zig
246
src/value.zig
|
|
@ -462,7 +462,7 @@ pub const Value = struct {
|
|||
if (opt_sema) |sema| try sema.resolveTypeLayout(ty.toType());
|
||||
const x = switch (int.storage) {
|
||||
else => unreachable,
|
||||
.lazy_align => ty.toType().abiAlignment(mod),
|
||||
.lazy_align => ty.toType().abiAlignment(mod).toByteUnits(0),
|
||||
.lazy_size => ty.toType().abiSize(mod),
|
||||
};
|
||||
return BigIntMutable.init(&space.limbs, x).toConst();
|
||||
|
|
@ -523,9 +523,9 @@ pub const Value = struct {
|
|||
.u64 => |x| x,
|
||||
.i64 => |x| std.math.cast(u64, x),
|
||||
.lazy_align => |ty| if (opt_sema) |sema|
|
||||
(try ty.toType().abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar
|
||||
(try ty.toType().abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar.toByteUnits(0)
|
||||
else
|
||||
ty.toType().abiAlignment(mod),
|
||||
ty.toType().abiAlignment(mod).toByteUnits(0),
|
||||
.lazy_size => |ty| if (opt_sema) |sema|
|
||||
(try ty.toType().abiSizeAdvanced(mod, .{ .sema = sema })).scalar
|
||||
else
|
||||
|
|
@ -569,9 +569,9 @@ pub const Value = struct {
|
|||
.int => |int| switch (int.storage) {
|
||||
.big_int => |big_int| big_int.to(i64) catch unreachable,
|
||||
.i64 => |x| x,
|
||||
.u64 => |x| @as(i64, @intCast(x)),
|
||||
.lazy_align => |ty| @as(i64, @intCast(ty.toType().abiAlignment(mod))),
|
||||
.lazy_size => |ty| @as(i64, @intCast(ty.toType().abiSize(mod))),
|
||||
.u64 => |x| @intCast(x),
|
||||
.lazy_align => |ty| @intCast(ty.toType().abiAlignment(mod).toByteUnits(0)),
|
||||
.lazy_size => |ty| @intCast(ty.toType().abiSize(mod)),
|
||||
},
|
||||
else => unreachable,
|
||||
},
|
||||
|
|
@ -612,10 +612,11 @@ pub const Value = struct {
|
|||
const target = mod.getTarget();
|
||||
const endian = target.cpu.arch.endian();
|
||||
if (val.isUndef(mod)) {
|
||||
const size = @as(usize, @intCast(ty.abiSize(mod)));
|
||||
const size: usize = @intCast(ty.abiSize(mod));
|
||||
@memset(buffer[0..size], 0xaa);
|
||||
return;
|
||||
}
|
||||
const ip = &mod.intern_pool;
|
||||
switch (ty.zigTypeTag(mod)) {
|
||||
.Void => {},
|
||||
.Bool => {
|
||||
|
|
@ -656,40 +657,44 @@ pub const Value = struct {
|
|||
const byte_count = (@as(usize, @intCast(ty.bitSize(mod))) + 7) / 8;
|
||||
return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0);
|
||||
},
|
||||
.Struct => switch (ty.containerLayout(mod)) {
|
||||
.Auto => return error.IllDefinedMemoryLayout,
|
||||
.Extern => for (ty.structFields(mod).values(), 0..) |field, i| {
|
||||
const off = @as(usize, @intCast(ty.structFieldOffset(i, mod)));
|
||||
const field_val = switch (val.ip_index) {
|
||||
.none => switch (val.tag()) {
|
||||
.bytes => {
|
||||
buffer[off] = val.castTag(.bytes).?.data[i];
|
||||
continue;
|
||||
.Struct => {
|
||||
const struct_type = mod.typeToStruct(ty) orelse return error.IllDefinedMemoryLayout;
|
||||
switch (struct_type.layout) {
|
||||
.Auto => return error.IllDefinedMemoryLayout,
|
||||
.Extern => for (0..struct_type.field_types.len) |i| {
|
||||
const off: usize = @intCast(ty.structFieldOffset(i, mod));
|
||||
const field_val = switch (val.ip_index) {
|
||||
.none => switch (val.tag()) {
|
||||
.bytes => {
|
||||
buffer[off] = val.castTag(.bytes).?.data[i];
|
||||
continue;
|
||||
},
|
||||
.aggregate => val.castTag(.aggregate).?.data[i],
|
||||
.repeated => val.castTag(.repeated).?.data,
|
||||
else => unreachable,
|
||||
},
|
||||
.aggregate => val.castTag(.aggregate).?.data[i],
|
||||
.repeated => val.castTag(.repeated).?.data,
|
||||
else => unreachable,
|
||||
},
|
||||
else => switch (mod.intern_pool.indexToKey(val.toIntern()).aggregate.storage) {
|
||||
.bytes => |bytes| {
|
||||
buffer[off] = bytes[i];
|
||||
continue;
|
||||
},
|
||||
.elems => |elems| elems[i],
|
||||
.repeated_elem => |elem| elem,
|
||||
}.toValue(),
|
||||
};
|
||||
try writeToMemory(field_val, field.ty, mod, buffer[off..]);
|
||||
},
|
||||
.Packed => {
|
||||
const byte_count = (@as(usize, @intCast(ty.bitSize(mod))) + 7) / 8;
|
||||
return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0);
|
||||
},
|
||||
else => switch (ip.indexToKey(val.toIntern()).aggregate.storage) {
|
||||
.bytes => |bytes| {
|
||||
buffer[off] = bytes[i];
|
||||
continue;
|
||||
},
|
||||
.elems => |elems| elems[i],
|
||||
.repeated_elem => |elem| elem,
|
||||
}.toValue(),
|
||||
};
|
||||
const field_ty = struct_type.field_types.get(ip)[i].toType();
|
||||
try writeToMemory(field_val, field_ty, mod, buffer[off..]);
|
||||
},
|
||||
.Packed => {
|
||||
const byte_count = (@as(usize, @intCast(ty.bitSize(mod))) + 7) / 8;
|
||||
return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0);
|
||||
},
|
||||
}
|
||||
},
|
||||
.ErrorSet => {
|
||||
// TODO revisit this when we have the concept of the error tag type
|
||||
const Int = u16;
|
||||
const name = switch (mod.intern_pool.indexToKey(val.toIntern())) {
|
||||
const name = switch (ip.indexToKey(val.toIntern())) {
|
||||
.err => |err| err.name,
|
||||
.error_union => |error_union| error_union.val.err_name,
|
||||
else => unreachable,
|
||||
|
|
@ -790,24 +795,24 @@ pub const Value = struct {
|
|||
bits += elem_bit_size;
|
||||
}
|
||||
},
|
||||
.Struct => switch (ty.containerLayout(mod)) {
|
||||
.Auto => unreachable, // Sema is supposed to have emitted a compile error already
|
||||
.Extern => unreachable, // Handled in non-packed writeToMemory
|
||||
.Packed => {
|
||||
var bits: u16 = 0;
|
||||
const fields = ty.structFields(mod).values();
|
||||
const storage = ip.indexToKey(val.toIntern()).aggregate.storage;
|
||||
for (fields, 0..) |field, i| {
|
||||
const field_bits = @as(u16, @intCast(field.ty.bitSize(mod)));
|
||||
const field_val = switch (storage) {
|
||||
.bytes => unreachable,
|
||||
.elems => |elems| elems[i],
|
||||
.repeated_elem => |elem| elem,
|
||||
};
|
||||
try field_val.toValue().writeToPackedMemory(field.ty, mod, buffer, bit_offset + bits);
|
||||
bits += field_bits;
|
||||
}
|
||||
},
|
||||
.Struct => {
|
||||
const struct_type = ip.indexToKey(ty.toIntern()).struct_type;
|
||||
// Sema is supposed to have emitted a compile error already in the case of Auto,
|
||||
// and Extern is handled in non-packed writeToMemory.
|
||||
assert(struct_type.layout == .Packed);
|
||||
var bits: u16 = 0;
|
||||
const storage = ip.indexToKey(val.toIntern()).aggregate.storage;
|
||||
for (0..struct_type.field_types.len) |i| {
|
||||
const field_ty = struct_type.field_types.get(ip)[i].toType();
|
||||
const field_bits: u16 = @intCast(field_ty.bitSize(mod));
|
||||
const field_val = switch (storage) {
|
||||
.bytes => unreachable,
|
||||
.elems => |elems| elems[i],
|
||||
.repeated_elem => |elem| elem,
|
||||
};
|
||||
try field_val.toValue().writeToPackedMemory(field_ty, mod, buffer, bit_offset + bits);
|
||||
bits += field_bits;
|
||||
}
|
||||
},
|
||||
.Union => {
|
||||
const union_obj = mod.typeToUnion(ty).?;
|
||||
|
|
@ -852,6 +857,7 @@ pub const Value = struct {
|
|||
buffer: []const u8,
|
||||
arena: Allocator,
|
||||
) Allocator.Error!Value {
|
||||
const ip = &mod.intern_pool;
|
||||
const target = mod.getTarget();
|
||||
const endian = target.cpu.arch.endian();
|
||||
switch (ty.zigTypeTag(mod)) {
|
||||
|
|
@ -926,25 +932,29 @@ pub const Value = struct {
|
|||
const byte_count = (@as(usize, @intCast(ty.bitSize(mod))) + 7) / 8;
|
||||
return readFromPackedMemory(ty, mod, buffer[0..byte_count], 0, arena);
|
||||
},
|
||||
.Struct => switch (ty.containerLayout(mod)) {
|
||||
.Auto => unreachable, // Sema is supposed to have emitted a compile error already
|
||||
.Extern => {
|
||||
const fields = ty.structFields(mod).values();
|
||||
const field_vals = try arena.alloc(InternPool.Index, fields.len);
|
||||
for (field_vals, fields, 0..) |*field_val, field, i| {
|
||||
const off = @as(usize, @intCast(ty.structFieldOffset(i, mod)));
|
||||
const sz = @as(usize, @intCast(field.ty.abiSize(mod)));
|
||||
field_val.* = try (try readFromMemory(field.ty, mod, buffer[off..(off + sz)], arena)).intern(field.ty, mod);
|
||||
}
|
||||
return (try mod.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .elems = field_vals },
|
||||
} })).toValue();
|
||||
},
|
||||
.Packed => {
|
||||
const byte_count = (@as(usize, @intCast(ty.bitSize(mod))) + 7) / 8;
|
||||
return readFromPackedMemory(ty, mod, buffer[0..byte_count], 0, arena);
|
||||
},
|
||||
.Struct => {
|
||||
const struct_type = mod.typeToStruct(ty).?;
|
||||
switch (struct_type.layout) {
|
||||
.Auto => unreachable, // Sema is supposed to have emitted a compile error already
|
||||
.Extern => {
|
||||
const field_types = struct_type.field_types;
|
||||
const field_vals = try arena.alloc(InternPool.Index, field_types.len);
|
||||
for (field_vals, 0..) |*field_val, i| {
|
||||
const field_ty = field_types.get(ip)[i].toType();
|
||||
const off: usize = @intCast(ty.structFieldOffset(i, mod));
|
||||
const sz: usize = @intCast(field_ty.abiSize(mod));
|
||||
field_val.* = try (try readFromMemory(field_ty, mod, buffer[off..(off + sz)], arena)).intern(field_ty, mod);
|
||||
}
|
||||
return (try mod.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .elems = field_vals },
|
||||
} })).toValue();
|
||||
},
|
||||
.Packed => {
|
||||
const byte_count = (@as(usize, @intCast(ty.bitSize(mod))) + 7) / 8;
|
||||
return readFromPackedMemory(ty, mod, buffer[0..byte_count], 0, arena);
|
||||
},
|
||||
}
|
||||
},
|
||||
.ErrorSet => {
|
||||
// TODO revisit this when we have the concept of the error tag type
|
||||
|
|
@ -992,6 +1002,7 @@ pub const Value = struct {
|
|||
bit_offset: usize,
|
||||
arena: Allocator,
|
||||
) Allocator.Error!Value {
|
||||
const ip = &mod.intern_pool;
|
||||
const target = mod.getTarget();
|
||||
const endian = target.cpu.arch.endian();
|
||||
switch (ty.zigTypeTag(mod)) {
|
||||
|
|
@ -1070,23 +1081,22 @@ pub const Value = struct {
|
|||
.storage = .{ .elems = elems },
|
||||
} })).toValue();
|
||||
},
|
||||
.Struct => switch (ty.containerLayout(mod)) {
|
||||
.Auto => unreachable, // Sema is supposed to have emitted a compile error already
|
||||
.Extern => unreachable, // Handled by non-packed readFromMemory
|
||||
.Packed => {
|
||||
var bits: u16 = 0;
|
||||
const fields = ty.structFields(mod).values();
|
||||
const field_vals = try arena.alloc(InternPool.Index, fields.len);
|
||||
for (fields, 0..) |field, i| {
|
||||
const field_bits = @as(u16, @intCast(field.ty.bitSize(mod)));
|
||||
field_vals[i] = try (try readFromPackedMemory(field.ty, mod, buffer, bit_offset + bits, arena)).intern(field.ty, mod);
|
||||
bits += field_bits;
|
||||
}
|
||||
return (try mod.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .elems = field_vals },
|
||||
} })).toValue();
|
||||
},
|
||||
.Struct => {
|
||||
// Sema is supposed to have emitted a compile error already for Auto layout structs,
|
||||
// and Extern is handled by non-packed readFromMemory.
|
||||
const struct_type = mod.typeToPackedStruct(ty).?;
|
||||
var bits: u16 = 0;
|
||||
const field_vals = try arena.alloc(InternPool.Index, struct_type.field_types.len);
|
||||
for (field_vals, 0..) |*field_val, i| {
|
||||
const field_ty = struct_type.field_types.get(ip)[i].toType();
|
||||
const field_bits: u16 = @intCast(field_ty.bitSize(mod));
|
||||
field_val.* = try (try readFromPackedMemory(field_ty, mod, buffer, bit_offset + bits, arena)).intern(field_ty, mod);
|
||||
bits += field_bits;
|
||||
}
|
||||
return (try mod.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .elems = field_vals },
|
||||
} })).toValue();
|
||||
},
|
||||
.Pointer => {
|
||||
assert(!ty.isSlice(mod)); // No well defined layout.
|
||||
|
|
@ -1105,18 +1115,18 @@ pub const Value = struct {
|
|||
pub fn toFloat(val: Value, comptime T: type, mod: *Module) T {
|
||||
return switch (mod.intern_pool.indexToKey(val.toIntern())) {
|
||||
.int => |int| switch (int.storage) {
|
||||
.big_int => |big_int| @as(T, @floatCast(bigIntToFloat(big_int.limbs, big_int.positive))),
|
||||
.big_int => |big_int| @floatCast(bigIntToFloat(big_int.limbs, big_int.positive)),
|
||||
inline .u64, .i64 => |x| {
|
||||
if (T == f80) {
|
||||
@panic("TODO we can't lower this properly on non-x86 llvm backend yet");
|
||||
}
|
||||
return @as(T, @floatFromInt(x));
|
||||
return @floatFromInt(x);
|
||||
},
|
||||
.lazy_align => |ty| @as(T, @floatFromInt(ty.toType().abiAlignment(mod))),
|
||||
.lazy_size => |ty| @as(T, @floatFromInt(ty.toType().abiSize(mod))),
|
||||
.lazy_align => |ty| @floatFromInt(ty.toType().abiAlignment(mod).toByteUnits(0)),
|
||||
.lazy_size => |ty| @floatFromInt(ty.toType().abiSize(mod)),
|
||||
},
|
||||
.float => |float| switch (float.storage) {
|
||||
inline else => |x| @as(T, @floatCast(x)),
|
||||
inline else => |x| @floatCast(x),
|
||||
},
|
||||
else => unreachable,
|
||||
};
|
||||
|
|
@ -1255,7 +1265,8 @@ pub const Value = struct {
|
|||
.int => |int| switch (int.storage) {
|
||||
.big_int => |big_int| big_int.orderAgainstScalar(0),
|
||||
inline .u64, .i64 => |x| std.math.order(x, 0),
|
||||
.lazy_align, .lazy_size => |ty| return if (ty.toType().hasRuntimeBitsAdvanced(
|
||||
.lazy_align => .gt, // alignment is never 0
|
||||
.lazy_size => |ty| return if (ty.toType().hasRuntimeBitsAdvanced(
|
||||
mod,
|
||||
false,
|
||||
if (opt_sema) |sema| .{ .sema = sema } else .eager,
|
||||
|
|
@ -1510,33 +1521,38 @@ pub const Value = struct {
|
|||
/// Asserts the value is a single-item pointer to an array, or an array,
|
||||
/// or an unknown-length pointer, and returns the element value at the index.
|
||||
pub fn elemValue(val: Value, mod: *Module, index: usize) Allocator.Error!Value {
|
||||
return (try val.maybeElemValue(mod, index)).?;
|
||||
}
|
||||
|
||||
/// Like `elemValue`, but returns `null` instead of asserting on failure.
|
||||
pub fn maybeElemValue(val: Value, mod: *Module, index: usize) Allocator.Error!?Value {
|
||||
return switch (val.ip_index) {
|
||||
.none => switch (val.tag()) {
|
||||
.bytes => try mod.intValue(Type.u8, val.castTag(.bytes).?.data[index]),
|
||||
.repeated => val.castTag(.repeated).?.data,
|
||||
.aggregate => val.castTag(.aggregate).?.data[index],
|
||||
.slice => val.castTag(.slice).?.data.ptr.elemValue(mod, index),
|
||||
else => unreachable,
|
||||
.slice => val.castTag(.slice).?.data.ptr.maybeElemValue(mod, index),
|
||||
else => null,
|
||||
},
|
||||
else => switch (mod.intern_pool.indexToKey(val.toIntern())) {
|
||||
.undef => |ty| (try mod.intern(.{
|
||||
.undef = ty.toType().elemType2(mod).toIntern(),
|
||||
})).toValue(),
|
||||
.ptr => |ptr| switch (ptr.addr) {
|
||||
.decl => |decl| mod.declPtr(decl).val.elemValue(mod, index),
|
||||
.decl => |decl| mod.declPtr(decl).val.maybeElemValue(mod, index),
|
||||
.mut_decl => |mut_decl| (try mod.declPtr(mut_decl.decl).internValue(mod))
|
||||
.toValue().elemValue(mod, index),
|
||||
.int, .eu_payload => unreachable,
|
||||
.opt_payload => |base| base.toValue().elemValue(mod, index),
|
||||
.comptime_field => |field_val| field_val.toValue().elemValue(mod, index),
|
||||
.elem => |elem| elem.base.toValue().elemValue(mod, index + @as(usize, @intCast(elem.index))),
|
||||
.toValue().maybeElemValue(mod, index),
|
||||
.int, .eu_payload => null,
|
||||
.opt_payload => |base| base.toValue().maybeElemValue(mod, index),
|
||||
.comptime_field => |field_val| field_val.toValue().maybeElemValue(mod, index),
|
||||
.elem => |elem| elem.base.toValue().maybeElemValue(mod, index + @as(usize, @intCast(elem.index))),
|
||||
.field => |field| if (field.base.toValue().pointerDecl(mod)) |decl_index| {
|
||||
const base_decl = mod.declPtr(decl_index);
|
||||
const field_val = try base_decl.val.fieldValue(mod, @as(usize, @intCast(field.index)));
|
||||
return field_val.elemValue(mod, index);
|
||||
} else unreachable,
|
||||
return field_val.maybeElemValue(mod, index);
|
||||
} else null,
|
||||
},
|
||||
.opt => |opt| opt.val.toValue().elemValue(mod, index),
|
||||
.opt => |opt| opt.val.toValue().maybeElemValue(mod, index),
|
||||
.aggregate => |aggregate| {
|
||||
const len = mod.intern_pool.aggregateTypeLen(aggregate.ty);
|
||||
if (index < len) return switch (aggregate.storage) {
|
||||
|
|
@ -1550,7 +1566,7 @@ pub const Value = struct {
|
|||
assert(index == len);
|
||||
return mod.intern_pool.indexToKey(aggregate.ty).array_type.sentinel.toValue();
|
||||
},
|
||||
else => unreachable,
|
||||
else => null,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
|
@ -1875,9 +1891,9 @@ pub const Value = struct {
|
|||
},
|
||||
inline .u64, .i64 => |x| floatFromIntInner(x, float_ty, mod),
|
||||
.lazy_align => |ty| if (opt_sema) |sema| {
|
||||
return floatFromIntInner((try ty.toType().abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar, float_ty, mod);
|
||||
return floatFromIntInner((try ty.toType().abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar.toByteUnits(0), float_ty, mod);
|
||||
} else {
|
||||
return floatFromIntInner(ty.toType().abiAlignment(mod), float_ty, mod);
|
||||
return floatFromIntInner(ty.toType().abiAlignment(mod).toByteUnits(0), float_ty, mod);
|
||||
},
|
||||
.lazy_size => |ty| if (opt_sema) |sema| {
|
||||
return floatFromIntInner((try ty.toType().abiSizeAdvanced(mod, .{ .sema = sema })).scalar, float_ty, mod);
|
||||
|
|
@ -1892,11 +1908,11 @@ pub const Value = struct {
|
|||
fn floatFromIntInner(x: anytype, dest_ty: Type, mod: *Module) !Value {
|
||||
const target = mod.getTarget();
|
||||
const storage: InternPool.Key.Float.Storage = switch (dest_ty.floatBits(target)) {
|
||||
16 => .{ .f16 = @as(f16, @floatFromInt(x)) },
|
||||
32 => .{ .f32 = @as(f32, @floatFromInt(x)) },
|
||||
64 => .{ .f64 = @as(f64, @floatFromInt(x)) },
|
||||
80 => .{ .f80 = @as(f80, @floatFromInt(x)) },
|
||||
128 => .{ .f128 = @as(f128, @floatFromInt(x)) },
|
||||
16 => .{ .f16 = @floatFromInt(x) },
|
||||
32 => .{ .f32 = @floatFromInt(x) },
|
||||
64 => .{ .f64 = @floatFromInt(x) },
|
||||
80 => .{ .f80 = @floatFromInt(x) },
|
||||
128 => .{ .f128 = @floatFromInt(x) },
|
||||
else => unreachable,
|
||||
};
|
||||
return (try mod.intern(.{ .float = .{
|
||||
|
|
|
|||
|
|
@ -619,3 +619,58 @@ test "sub-aligned pointer field access" {
|
|||
.Little => try expect(x == 0x09080706),
|
||||
}
|
||||
}
|
||||
|
||||
test "alignment of zero-bit types is respected" {
|
||||
if (true) return error.SkipZigTest; // TODO
|
||||
|
||||
const S = struct { arr: [0]usize = .{} };
|
||||
|
||||
comptime assert(@alignOf(void) == 1);
|
||||
comptime assert(@alignOf(u0) == 1);
|
||||
comptime assert(@alignOf([0]usize) == @alignOf(usize));
|
||||
comptime assert(@alignOf(S) == @alignOf(usize));
|
||||
|
||||
var s: S = .{};
|
||||
var v32: void align(32) = {};
|
||||
var x32: u0 align(32) = 0;
|
||||
var s32: S align(32) = .{};
|
||||
|
||||
var zero: usize = 0;
|
||||
|
||||
try expect(@intFromPtr(&s) % @alignOf(usize) == 0);
|
||||
try expect(@intFromPtr(&s.arr) % @alignOf(usize) == 0);
|
||||
try expect(@intFromPtr(s.arr[zero..zero].ptr) % @alignOf(usize) == 0);
|
||||
try expect(@intFromPtr(&v32) % 32 == 0);
|
||||
try expect(@intFromPtr(&x32) % 32 == 0);
|
||||
try expect(@intFromPtr(&s32) % 32 == 0);
|
||||
try expect(@intFromPtr(&s32.arr) % 32 == 0);
|
||||
try expect(@intFromPtr(s32.arr[zero..zero].ptr) % 32 == 0);
|
||||
}
|
||||
|
||||
test "zero-bit fields in extern struct pad fields appropriately" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
|
||||
|
||||
const S = extern struct {
|
||||
x: u8,
|
||||
a: [0]u16 = .{},
|
||||
y: u8,
|
||||
};
|
||||
|
||||
// `a` should give `S` alignment 2, and pad the `arr` field.
|
||||
comptime assert(@alignOf(S) == 2);
|
||||
comptime assert(@sizeOf(S) == 4);
|
||||
comptime assert(@offsetOf(S, "x") == 0);
|
||||
comptime assert(@offsetOf(S, "a") == 2);
|
||||
comptime assert(@offsetOf(S, "y") == 2);
|
||||
|
||||
var s: S = .{ .x = 100, .y = 200 };
|
||||
|
||||
try expect(@intFromPtr(&s) % 2 == 0);
|
||||
try expect(@intFromPtr(&s.y) - @intFromPtr(&s.x) == 2);
|
||||
try expect(@intFromPtr(&s.y) == @intFromPtr(&s.a));
|
||||
try expect(@fieldParentPtr(S, "a", &s.a) == &s);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,24 +18,13 @@ test "@alignOf(T) before referencing T" {
|
|||
}
|
||||
|
||||
test "comparison of @alignOf(T) against zero" {
|
||||
{
|
||||
const T = struct { x: u32 };
|
||||
try expect(!(@alignOf(T) == 0));
|
||||
try expect(@alignOf(T) != 0);
|
||||
try expect(!(@alignOf(T) < 0));
|
||||
try expect(!(@alignOf(T) <= 0));
|
||||
try expect(@alignOf(T) > 0);
|
||||
try expect(@alignOf(T) >= 0);
|
||||
}
|
||||
{
|
||||
const T = struct {};
|
||||
try expect(@alignOf(T) == 0);
|
||||
try expect(!(@alignOf(T) != 0));
|
||||
try expect(!(@alignOf(T) < 0));
|
||||
try expect(@alignOf(T) <= 0);
|
||||
try expect(!(@alignOf(T) > 0));
|
||||
try expect(@alignOf(T) >= 0);
|
||||
}
|
||||
const T = struct { x: u32 };
|
||||
try expect(!(@alignOf(T) == 0));
|
||||
try expect(@alignOf(T) != 0);
|
||||
try expect(!(@alignOf(T) < 0));
|
||||
try expect(!(@alignOf(T) <= 0));
|
||||
try expect(@alignOf(T) > 0);
|
||||
try expect(@alignOf(T) >= 0);
|
||||
}
|
||||
|
||||
test "correct alignment for elements and slices of aligned array" {
|
||||
|
|
|
|||
|
|
@ -37,7 +37,7 @@ test "switch on empty tagged union" {
|
|||
test "empty union" {
|
||||
const U = union {};
|
||||
try expect(@sizeOf(U) == 0);
|
||||
try expect(@alignOf(U) == 0);
|
||||
try expect(@alignOf(U) == 1);
|
||||
}
|
||||
|
||||
test "empty extern union" {
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue