mirror of
https://codeberg.org/ziglang/zig.git
synced 2025-12-06 05:44:20 +00:00
compute LLVMTypes in ParamTypeIterator (#13592)
follow up on #13376 - fixes a bug in the x86_64 C ABI. Co-authored-by: Veikka Tuominen <git@vexu.eu>
This commit is contained in:
parent
fca776f8f5
commit
476b946802
3 changed files with 77 additions and 137 deletions
|
|
@ -507,3 +507,48 @@ pub const RegisterClass = struct {
|
|||
break :blk set;
|
||||
};
|
||||
};
|
||||
|
||||
const testing = std.testing;
|
||||
const Module = @import("../../Module.zig");
|
||||
const Value = @import("../../value.zig").Value;
|
||||
const builtin = @import("builtin");
|
||||
|
||||
fn _field(comptime tag: Type.Tag, offset: u32) Module.Struct.Field {
|
||||
return .{
|
||||
.ty = Type.initTag(tag),
|
||||
.default_val = Value.initTag(.unreachable_value),
|
||||
.abi_align = 0,
|
||||
.offset = offset,
|
||||
.is_comptime = false,
|
||||
};
|
||||
}
|
||||
|
||||
test "C_C_D" {
|
||||
var fields = Module.Struct.Fields{};
|
||||
// const C_C_D = extern struct { v1: i8, v2: i8, v3: f64 };
|
||||
try fields.ensureTotalCapacity(testing.allocator, 3);
|
||||
defer fields.deinit(testing.allocator);
|
||||
fields.putAssumeCapacity("v1", _field(.i8, 0));
|
||||
fields.putAssumeCapacity("v2", _field(.i8, 1));
|
||||
fields.putAssumeCapacity("v3", _field(.f64, 4));
|
||||
|
||||
var C_C_D_struct = Module.Struct{
|
||||
.fields = fields,
|
||||
.namespace = undefined,
|
||||
.owner_decl = undefined,
|
||||
.zir_index = undefined,
|
||||
.layout = .Extern,
|
||||
.status = .fully_resolved,
|
||||
.known_non_opv = true,
|
||||
};
|
||||
var C_C_D = Type.Payload.Struct{ .data = &C_C_D_struct };
|
||||
|
||||
try testing.expectEqual(
|
||||
[_]Class{ .integer, .sse, .none, .none, .none, .none, .none, .none },
|
||||
classifySystemV(Type.initPayload(&C_C_D.base), builtin.target, .ret),
|
||||
);
|
||||
try testing.expectEqual(
|
||||
[_]Class{ .integer, .sse, .none, .none, .none, .none, .none, .none },
|
||||
classifySystemV(Type.initPayload(&C_C_D.base), builtin.target, .arg),
|
||||
);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1049,60 +1049,20 @@ pub const Object = struct {
|
|||
const aggregate = builder.buildInsertValue(partial, len_param, 1, "");
|
||||
try args.append(aggregate);
|
||||
},
|
||||
.multiple_llvm_ints => {
|
||||
.multiple_llvm_types => {
|
||||
assert(!it.byval_attr);
|
||||
const llvm_ints = it.llvm_types_buffer[0..it.llvm_types_len];
|
||||
const field_types = it.llvm_types_buffer[0..it.llvm_types_len];
|
||||
const param_ty = fn_info.param_types[it.zig_index - 1];
|
||||
const param_llvm_ty = try dg.lowerType(param_ty);
|
||||
const param_alignment = param_ty.abiAlignment(target);
|
||||
const arg_ptr = buildAllocaInner(builder, llvm_func, false, param_llvm_ty, param_alignment, target);
|
||||
var field_types_buf: [8]*llvm.Type = undefined;
|
||||
const field_types = field_types_buf[0..llvm_ints.len];
|
||||
for (llvm_ints) |int_bits, i| {
|
||||
field_types[i] = dg.context.intType(int_bits);
|
||||
}
|
||||
const ints_llvm_ty = dg.context.structType(field_types.ptr, @intCast(c_uint, field_types.len), .False);
|
||||
const casted_ptr = builder.buildBitCast(arg_ptr, ints_llvm_ty.pointerType(0), "");
|
||||
for (llvm_ints) |_, field_i_usize| {
|
||||
const llvm_ty = dg.context.structType(field_types.ptr, @intCast(c_uint, field_types.len), .False);
|
||||
const casted_ptr = builder.buildBitCast(arg_ptr, llvm_ty.pointerType(0), "");
|
||||
for (field_types) |_, field_i_usize| {
|
||||
const field_i = @intCast(c_uint, field_i_usize);
|
||||
const param = llvm_func.getParam(llvm_arg_i);
|
||||
llvm_arg_i += 1;
|
||||
const field_ptr = builder.buildStructGEP(ints_llvm_ty, casted_ptr, field_i, "");
|
||||
const store_inst = builder.buildStore(param, field_ptr);
|
||||
store_inst.setAlignment(target.cpu.arch.ptrBitWidth() / 8);
|
||||
}
|
||||
|
||||
const is_by_ref = isByRef(param_ty);
|
||||
const loaded = if (is_by_ref) arg_ptr else l: {
|
||||
const load_inst = builder.buildLoad(param_llvm_ty, arg_ptr, "");
|
||||
load_inst.setAlignment(param_alignment);
|
||||
break :l load_inst;
|
||||
};
|
||||
try args.append(loaded);
|
||||
},
|
||||
.multiple_llvm_float => {
|
||||
assert(!it.byval_attr);
|
||||
const llvm_floats = it.llvm_types_buffer[0..it.llvm_types_len];
|
||||
const param_ty = fn_info.param_types[it.zig_index - 1];
|
||||
const param_llvm_ty = try dg.lowerType(param_ty);
|
||||
const param_alignment = param_ty.abiAlignment(target);
|
||||
const arg_ptr = buildAllocaInner(builder, llvm_func, false, param_llvm_ty, param_alignment, target);
|
||||
var field_types_buf: [8]*llvm.Type = undefined;
|
||||
const field_types = field_types_buf[0..llvm_floats.len];
|
||||
for (llvm_floats) |float_bits, i| {
|
||||
switch (float_bits) {
|
||||
64 => field_types[i] = dg.context.doubleType(),
|
||||
80 => field_types[i] = dg.context.x86FP80Type(),
|
||||
else => {},
|
||||
}
|
||||
}
|
||||
const floats_llvm_ty = dg.context.structType(field_types.ptr, @intCast(c_uint, field_types.len), .False);
|
||||
const casted_ptr = builder.buildBitCast(arg_ptr, floats_llvm_ty.pointerType(0), "");
|
||||
for (llvm_floats) |_, field_i_usize| {
|
||||
const field_i = @intCast(c_uint, field_i_usize);
|
||||
const param = llvm_func.getParam(llvm_arg_i);
|
||||
llvm_arg_i += 1;
|
||||
const field_ptr = builder.buildStructGEP(floats_llvm_ty, casted_ptr, field_i, "");
|
||||
const field_ptr = builder.buildStructGEP(llvm_ty, casted_ptr, field_i, "");
|
||||
const store_inst = builder.buildStore(param, field_ptr);
|
||||
store_inst.setAlignment(target.cpu.arch.ptrBitWidth() / 8);
|
||||
}
|
||||
|
|
@ -2626,8 +2586,7 @@ pub const DeclGen = struct {
|
|||
// No attributes needed for these.
|
||||
.no_bits,
|
||||
.abi_sized_int,
|
||||
.multiple_llvm_ints,
|
||||
.multiple_llvm_float,
|
||||
.multiple_llvm_types,
|
||||
.as_u16,
|
||||
.float_array,
|
||||
.i32_array,
|
||||
|
|
@ -3167,25 +3126,8 @@ pub const DeclGen = struct {
|
|||
llvm_params.appendAssumeCapacity(ptr_llvm_ty);
|
||||
llvm_params.appendAssumeCapacity(len_llvm_ty);
|
||||
},
|
||||
.multiple_llvm_ints => {
|
||||
const llvm_ints = it.llvm_types_buffer[0..it.llvm_types_len];
|
||||
try llvm_params.ensureUnusedCapacity(it.llvm_types_len);
|
||||
for (llvm_ints) |int_bits| {
|
||||
const big_int_ty = dg.context.intType(int_bits);
|
||||
llvm_params.appendAssumeCapacity(big_int_ty);
|
||||
}
|
||||
},
|
||||
.multiple_llvm_float => {
|
||||
const llvm_ints = it.llvm_types_buffer[0..it.llvm_types_len];
|
||||
try llvm_params.ensureUnusedCapacity(it.llvm_types_len);
|
||||
for (llvm_ints) |float_bits| {
|
||||
const float_ty = switch (float_bits) {
|
||||
64 => dg.context.doubleType(),
|
||||
80 => dg.context.x86FP80Type(),
|
||||
else => unreachable,
|
||||
};
|
||||
llvm_params.appendAssumeCapacity(float_ty);
|
||||
}
|
||||
.multiple_llvm_types => {
|
||||
try llvm_params.appendSlice(it.llvm_types_buffer[0..it.llvm_types_len]);
|
||||
},
|
||||
.as_u16 => {
|
||||
try llvm_params.append(dg.context.intType(16));
|
||||
|
|
@ -4824,10 +4766,10 @@ pub const FuncGen = struct {
|
|||
llvm_args.appendAssumeCapacity(ptr);
|
||||
llvm_args.appendAssumeCapacity(len);
|
||||
},
|
||||
.multiple_llvm_ints => {
|
||||
.multiple_llvm_types => {
|
||||
const arg = args[it.zig_index - 1];
|
||||
const param_ty = self.air.typeOf(arg);
|
||||
const llvm_ints = it.llvm_types_buffer[0..it.llvm_types_len];
|
||||
const llvm_types = it.llvm_types_buffer[0..it.llvm_types_len];
|
||||
const llvm_arg = try self.resolveInst(arg);
|
||||
const is_by_ref = isByRef(param_ty);
|
||||
const arg_ptr = if (is_by_ref) llvm_arg else p: {
|
||||
|
|
@ -4837,51 +4779,13 @@ pub const FuncGen = struct {
|
|||
break :p p;
|
||||
};
|
||||
|
||||
var field_types_buf: [8]*llvm.Type = undefined;
|
||||
const field_types = field_types_buf[0..llvm_ints.len];
|
||||
for (llvm_ints) |int_bits, i| {
|
||||
field_types[i] = self.dg.context.intType(int_bits);
|
||||
}
|
||||
const ints_llvm_ty = self.dg.context.structType(field_types.ptr, @intCast(c_uint, field_types.len), .False);
|
||||
const casted_ptr = self.builder.buildBitCast(arg_ptr, ints_llvm_ty.pointerType(0), "");
|
||||
const llvm_ty = self.dg.context.structType(llvm_types.ptr, @intCast(c_uint, llvm_types.len), .False);
|
||||
const casted_ptr = self.builder.buildBitCast(arg_ptr, llvm_ty.pointerType(0), "");
|
||||
try llvm_args.ensureUnusedCapacity(it.llvm_types_len);
|
||||
for (llvm_ints) |_, i_usize| {
|
||||
for (llvm_types) |field_ty, i_usize| {
|
||||
const i = @intCast(c_uint, i_usize);
|
||||
const field_ptr = self.builder.buildStructGEP(ints_llvm_ty, casted_ptr, i, "");
|
||||
const load_inst = self.builder.buildLoad(field_types[i], field_ptr, "");
|
||||
load_inst.setAlignment(target.cpu.arch.ptrBitWidth() / 8);
|
||||
llvm_args.appendAssumeCapacity(load_inst);
|
||||
}
|
||||
},
|
||||
.multiple_llvm_float => {
|
||||
const arg = args[it.zig_index - 1];
|
||||
const param_ty = self.air.typeOf(arg);
|
||||
const llvm_floats = it.llvm_types_buffer[0..it.llvm_types_len];
|
||||
const llvm_arg = try self.resolveInst(arg);
|
||||
const is_by_ref = isByRef(param_ty);
|
||||
const arg_ptr = if (is_by_ref) llvm_arg else p: {
|
||||
const p = self.buildAlloca(llvm_arg.typeOf(), null);
|
||||
const store_inst = self.builder.buildStore(llvm_arg, p);
|
||||
store_inst.setAlignment(param_ty.abiAlignment(target));
|
||||
break :p p;
|
||||
};
|
||||
|
||||
var field_types_buf: [8]*llvm.Type = undefined;
|
||||
const field_types = field_types_buf[0..llvm_floats.len];
|
||||
for (llvm_floats) |float_bits, i| {
|
||||
switch (float_bits) {
|
||||
64 => field_types[i] = self.dg.context.doubleType(),
|
||||
80 => field_types[i] = self.dg.context.x86FP80Type(),
|
||||
else => {},
|
||||
}
|
||||
}
|
||||
const floats_llvm_ty = self.dg.context.structType(field_types.ptr, @intCast(c_uint, field_types.len), .False);
|
||||
const casted_ptr = self.builder.buildBitCast(arg_ptr, floats_llvm_ty.pointerType(0), "");
|
||||
try llvm_args.ensureUnusedCapacity(it.llvm_types_len);
|
||||
for (llvm_floats) |_, i_usize| {
|
||||
const i = @intCast(c_uint, i_usize);
|
||||
const field_ptr = self.builder.buildStructGEP(floats_llvm_ty, casted_ptr, i, "");
|
||||
const load_inst = self.builder.buildLoad(field_types[i], field_ptr, "");
|
||||
const field_ptr = self.builder.buildStructGEP(llvm_ty, casted_ptr, i, "");
|
||||
const load_inst = self.builder.buildLoad(field_ty, field_ptr, "");
|
||||
load_inst.setAlignment(target.cpu.arch.ptrBitWidth() / 8);
|
||||
llvm_args.appendAssumeCapacity(load_inst);
|
||||
}
|
||||
|
|
@ -10473,7 +10377,7 @@ const ParamTypeIterator = struct {
|
|||
llvm_index: u32,
|
||||
target: std.Target,
|
||||
llvm_types_len: u32,
|
||||
llvm_types_buffer: [8]u16,
|
||||
llvm_types_buffer: [8]*llvm.Type,
|
||||
byval_attr: bool,
|
||||
|
||||
const Lowering = union(enum) {
|
||||
|
|
@ -10481,8 +10385,7 @@ const ParamTypeIterator = struct {
|
|||
byval,
|
||||
byref,
|
||||
abi_sized_int,
|
||||
multiple_llvm_ints,
|
||||
multiple_llvm_float,
|
||||
multiple_llvm_types,
|
||||
slice,
|
||||
as_u16,
|
||||
float_array: u8,
|
||||
|
|
@ -10515,7 +10418,7 @@ const ParamTypeIterator = struct {
|
|||
it.zig_index += 1;
|
||||
return .no_bits;
|
||||
}
|
||||
|
||||
const dg = it.dg;
|
||||
switch (it.fn_info.cc) {
|
||||
.Unspecified, .Inline => {
|
||||
it.zig_index += 1;
|
||||
|
|
@ -10584,28 +10487,28 @@ const ParamTypeIterator = struct {
|
|||
it.llvm_index += 1;
|
||||
return .byval;
|
||||
}
|
||||
var llvm_types_buffer: [8]u16 = undefined;
|
||||
var llvm_types_buffer: [8]*llvm.Type = undefined;
|
||||
var llvm_types_index: u32 = 0;
|
||||
for (classes) |class| {
|
||||
switch (class) {
|
||||
.integer => {
|
||||
llvm_types_buffer[llvm_types_index] = 64;
|
||||
llvm_types_buffer[llvm_types_index] = dg.context.intType(64);
|
||||
llvm_types_index += 1;
|
||||
},
|
||||
.sse => {
|
||||
llvm_types_buffer[llvm_types_index] = 64;
|
||||
llvm_types_buffer[llvm_types_index] = dg.context.doubleType();
|
||||
llvm_types_index += 1;
|
||||
},
|
||||
.sseup => {
|
||||
llvm_types_buffer[llvm_types_index] = 64;
|
||||
llvm_types_buffer[llvm_types_index] = dg.context.doubleType();
|
||||
llvm_types_index += 1;
|
||||
},
|
||||
.x87 => {
|
||||
llvm_types_buffer[llvm_types_index] = 80;
|
||||
llvm_types_buffer[llvm_types_index] = dg.context.x86FP80Type();
|
||||
llvm_types_index += 1;
|
||||
},
|
||||
.x87up => {
|
||||
llvm_types_buffer[llvm_types_index] = 80;
|
||||
llvm_types_buffer[llvm_types_index] = dg.context.x86FP80Type();
|
||||
llvm_types_index += 1;
|
||||
},
|
||||
.complex_x87 => {
|
||||
|
|
@ -10625,7 +10528,7 @@ const ParamTypeIterator = struct {
|
|||
it.llvm_types_len = llvm_types_index;
|
||||
it.llvm_index += llvm_types_index;
|
||||
it.zig_index += 1;
|
||||
return if (classes[0] == .integer) .multiple_llvm_ints else .multiple_llvm_float;
|
||||
return .multiple_llvm_types;
|
||||
},
|
||||
},
|
||||
.wasm32 => {
|
||||
|
|
@ -10649,8 +10552,8 @@ const ParamTypeIterator = struct {
|
|||
.byval => return .byval,
|
||||
.integer => {
|
||||
it.llvm_types_len = 1;
|
||||
it.llvm_types_buffer[0] = 64;
|
||||
return .multiple_llvm_ints;
|
||||
it.llvm_types_buffer[0] = dg.context.intType(64);
|
||||
return .multiple_llvm_types;
|
||||
},
|
||||
.double_integer => return Lowering{ .i64_array = 2 },
|
||||
}
|
||||
|
|
|
|||
|
|
@ -851,8 +851,6 @@ pub inline fn expectOk(c_err: c_int) !void {
|
|||
/// Tests for Double + Char struct
|
||||
const DC = extern struct { v1: f64, v2: u8 };
|
||||
test "DC: Zig passes to C" {
|
||||
if (builtin.target.cpu.arch == .x86_64 and builtin.target.os.tag != .windows)
|
||||
return error.SkipZigTest;
|
||||
if (comptime builtin.cpu.arch.isMIPS()) return error.SkipZigTest;
|
||||
if (comptime builtin.cpu.arch.isRISCV()) return error.SkipZigTest;
|
||||
if (comptime builtin.cpu.arch.isPPC()) return error.SkipZigTest;
|
||||
|
|
@ -866,8 +864,6 @@ test "DC: Zig returns to C" {
|
|||
try expectOk(c_assert_ret_DC());
|
||||
}
|
||||
test "DC: C passes to Zig" {
|
||||
if (builtin.target.cpu.arch == .x86_64 and builtin.target.os.tag != .windows)
|
||||
return error.SkipZigTest;
|
||||
if (comptime builtin.cpu.arch.isMIPS()) return error.SkipZigTest;
|
||||
if (comptime builtin.cpu.arch.isRISCV()) return error.SkipZigTest;
|
||||
if (comptime builtin.cpu.arch.isPPC()) return error.SkipZigTest;
|
||||
|
|
@ -900,8 +896,7 @@ pub export fn zig_ret_DC() DC {
|
|||
const CFF = extern struct { v1: u8, v2: f32, v3: f32 };
|
||||
|
||||
test "CFF: Zig passes to C" {
|
||||
if (builtin.target.cpu.arch.isX86() and builtin.target.os.tag != .windows)
|
||||
return error.SkipZigTest;
|
||||
if (builtin.target.cpu.arch == .x86) return error.SkipZigTest;
|
||||
if (comptime builtin.cpu.arch.isMIPS()) return error.SkipZigTest;
|
||||
if (comptime builtin.cpu.arch.isPPC()) return error.SkipZigTest;
|
||||
if (comptime builtin.cpu.arch.isPPC64()) return error.SkipZigTest;
|
||||
|
|
@ -914,8 +909,7 @@ test "CFF: Zig returns to C" {
|
|||
try expectOk(c_assert_ret_CFF());
|
||||
}
|
||||
test "CFF: C passes to Zig" {
|
||||
if (builtin.target.cpu.arch.isX86() and builtin.target.os.tag != .windows)
|
||||
return error.SkipZigTest;
|
||||
if (builtin.target.cpu.arch == .x86) return error.SkipZigTest;
|
||||
if (comptime builtin.cpu.arch.isMIPS()) return error.SkipZigTest;
|
||||
if (comptime builtin.cpu.arch.isPPC()) return error.SkipZigTest;
|
||||
if (comptime builtin.cpu.arch.isPPC64()) return error.SkipZigTest;
|
||||
|
|
@ -950,8 +944,7 @@ pub export fn zig_ret_CFF() CFF {
|
|||
const PD = extern struct { v1: ?*anyopaque, v2: f64 };
|
||||
|
||||
test "PD: Zig passes to C" {
|
||||
if (builtin.target.cpu.arch.isX86() and builtin.target.os.tag != .windows)
|
||||
return error.SkipZigTest;
|
||||
if (builtin.target.cpu.arch == .x86) return error.SkipZigTest;
|
||||
if (comptime builtin.cpu.arch.isMIPS()) return error.SkipZigTest;
|
||||
if (comptime builtin.cpu.arch.isPPC()) return error.SkipZigTest;
|
||||
if (comptime builtin.cpu.arch.isPPC64()) return error.SkipZigTest;
|
||||
|
|
@ -964,8 +957,7 @@ test "PD: Zig returns to C" {
|
|||
try expectOk(c_assert_ret_PD());
|
||||
}
|
||||
test "PD: C passes to Zig" {
|
||||
if (builtin.target.cpu.arch.isX86() and builtin.target.os.tag != .windows)
|
||||
return error.SkipZigTest;
|
||||
if (builtin.target.cpu.arch == .x86) return error.SkipZigTest;
|
||||
if (comptime builtin.cpu.arch.isMIPS()) return error.SkipZigTest;
|
||||
if (comptime builtin.cpu.arch.isPPC()) return error.SkipZigTest;
|
||||
if (comptime builtin.cpu.arch.isPPC64()) return error.SkipZigTest;
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue