stage2-wasm: pass behavior test, disable one not passing before

This commit is contained in:
Pavel Verigo 2025-10-16 08:49:44 +02:00
parent 337762114f
commit c1f8fa0043
8 changed files with 215 additions and 48 deletions

View file

@ -1230,12 +1230,17 @@ pub fn generate(
fn generateInner(cg: *CodeGen, any_returns: bool) InnerError!Mir {
const zcu = cg.pt.zcu;
try cg.branches.append(cg.gpa, .{});
// clean up outer branch
defer {
var const_branch = cg.branches.pop().?;
const_branch.deinit(cg.gpa);
assert(cg.branches.items.len == 0); // missing branch merge
}
try cg.branches.append(cg.gpa, .{});
defer {
var outer_branch = cg.branches.pop().?;
outer_branch.deinit(cg.gpa);
assert(cg.branches.items.len == 0); // missing branch merge
}
// Generate MIR for function body
try cg.genBody(cg.air.getMainBody());
@ -1384,7 +1389,7 @@ fn lowerArg(cg: *CodeGen, cc: std.builtin.CallingConvention, ty: Type, value: WV
return cg.lowerToStack(value);
} else {
switch (value) {
.nav_ref, .stack_offset => _ = try cg.load(value, scalar_type, 0),
.nav_ref, .uav_ref, .stack_offset => _ = try cg.load(value, scalar_type, 0),
.dead => unreachable,
else => try cg.emitWValue(value),
}
@ -2401,6 +2406,7 @@ fn store(cg: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerErr
} else if (abi_size > 16) {
assert(offset == 0);
try cg.memcpy(lhs, rhs, .{ .imm32 = @as(u32, @intCast(ty.abiSize(zcu))) });
return;
},
else => if (abi_size > 8) {
return cg.fail("TODO: `store` for type `{f}` with abisize `{d}`", .{ ty.fmt(pt), abi_size });
@ -2450,7 +2456,8 @@ fn airLoad(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const loaded = try cg.load(operand, ty, 0);
const ty_size = ty.abiSize(zcu);
if (ty.isAbiInt(zcu) and ty_size * 8 > ty.bitSize(zcu)) {
const int_elem_ty = try pt.intType(.unsigned, @intCast(ty_size * 8));
const int_info = ty.intInfo(zcu);
const int_elem_ty = try pt.intType(int_info.signedness, @intCast(ty_size * 8));
break :result try cg.trunc(loaded, ty, int_elem_ty);
} else {
break :result loaded;
@ -2496,11 +2503,15 @@ fn load(cg: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValue
}
const abi_size: u8 = @intCast(ty.abiSize(zcu));
const is_signed = switch (ty.zigTypeTag(zcu)) {
.int, .@"struct", .@"enum" => ty.intInfo(zcu).signedness == .signed,
else => false,
};
const opcode = buildOpcode(.{
.valtype1 = typeToValtype(ty, zcu, cg.target),
.width = abi_size * 8,
.op = .load,
.signedness = if (ty.isSignedInt(zcu)) .signed else .unsigned,
.signedness = if (is_signed) .signed else .unsigned,
});
try cg.addMemArg(
@ -3026,10 +3037,15 @@ fn wrapOperand(cg: *CodeGen, operand: WValue, ty: Type) InnerError!WValue {
if (wasm_bits == int_bits) return operand;
const is_signed = switch (ty.zigTypeTag(zcu)) {
.int, .@"struct", .@"enum" => ty.intInfo(zcu).signedness == .signed,
else => false,
};
switch (wasm_bits) {
32 => {
try cg.emitWValue(operand);
if (ty.isSignedInt(zcu)) {
if (is_signed) {
try cg.addImm32(32 - int_bits);
try cg.addTag(.i32_shl);
try cg.addImm32(32 - int_bits);
@ -3042,7 +3058,7 @@ fn wrapOperand(cg: *CodeGen, operand: WValue, ty: Type) InnerError!WValue {
},
64 => {
try cg.emitWValue(operand);
if (ty.isSignedInt(zcu)) {
if (is_signed) {
try cg.addImm64(64 - int_bits);
try cg.addTag(.i64_shl);
try cg.addImm64(64 - int_bits);
@ -3063,7 +3079,7 @@ fn wrapOperand(cg: *CodeGen, operand: WValue, ty: Type) InnerError!WValue {
try cg.emitWValue(result);
_ = try cg.load(operand, Type.u64, 8);
if (ty.isSignedInt(zcu)) {
if (is_signed) {
try cg.addImm64(128 - int_bits);
try cg.addTag(.i64_shl);
try cg.addImm64(128 - int_bits);
@ -3083,12 +3099,41 @@ fn wrapOperand(cg: *CodeGen, operand: WValue, ty: Type) InnerError!WValue {
fn lowerPtr(cg: *CodeGen, ptr_val: InternPool.Index, prev_offset: u64) InnerError!WValue {
const pt = cg.pt;
const zcu = pt.zcu;
const ptr = zcu.intern_pool.indexToKey(ptr_val).ptr;
const ip = &zcu.intern_pool;
const ptr = ip.indexToKey(ptr_val).ptr;
const offset: u64 = prev_offset + ptr.byte_offset;
return switch (ptr.base_addr) {
.nav => |nav| return .{ .nav_ref = .{ .nav_index = nav, .offset = @intCast(offset) } },
.uav => |uav| return .{ .uav_ref = .{ .ip_index = uav.val, .offset = @intCast(offset), .orig_ptr_ty = uav.orig_ty } },
.int => return cg.lowerConstant(try pt.intValue(Type.usize, offset), Type.usize),
.nav => |nav| {
const nav_val = ip.getNav(nav);
const nav_ty = Type.fromInterned(nav_val.typeOf(ip));
const is_fn_body = nav_ty.zigTypeTag(zcu) == .@"fn";
if (!is_fn_body and !nav_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
return switch (cg.ptr_size) {
.wasm32 => .{ .imm32 = 0xaaaaaaaa },
.wasm64 => .{ .imm64 = 0xaaaaaaaaaaaaaaaa },
};
}
return .{ .nav_ref = .{ .nav_index = nav, .offset = @intCast(offset) } };
},
.uav => |uav| {
const uav_ty = Type.fromInterned(ip.typeOf(uav.val));
const is_fn_body = uav_ty.zigTypeTag(zcu) == .@"fn";
if (!is_fn_body and !uav_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
return switch (cg.ptr_size) {
.wasm32 => .{ .imm32 = 0xaaaaaaaa },
.wasm64 => .{ .imm64 = 0xaaaaaaaaaaaaaaaa },
};
}
return .{ .uav_ref = .{ .ip_index = uav.val, .offset = @intCast(offset), .orig_ptr_ty = uav.orig_ty } };
},
.int => return switch (cg.ptr_size) {
.wasm32 => .{ .imm32 = @intCast(offset) },
.wasm64 => .{ .imm64 = offset },
},
.eu_payload => |eu_ptr| try cg.lowerPtr(
eu_ptr,
offset + codegen.errUnionPayloadOffset(
@ -7116,7 +7161,7 @@ fn airTagName(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const result_ptr = try cg.allocStack(cg.typeOfIndex(inst));
try cg.lowerToStack(result_ptr);
try cg.emitWValue(operand);
try cg.lowerToStack(operand);
try cg.addInst(.{ .tag = .call_tag_name, .data = .{ .ip_index = enum_ty.toIntern() } });
return cg.finishAir(inst, result_ptr, &.{un_op});

View file

@ -201,6 +201,7 @@ pub const Env = enum {
.wasm => switch (feature) {
.stdio_listen,
.incremental,
.legalize,
.wasm_backend,
.wasm_linker,
=> true,

View file

@ -1853,7 +1853,6 @@ fn emitTagNameFunction(
) !void {
const comp = wasm.base.comp;
const gpa = comp.gpa;
const diags = &comp.link_diags;
const zcu = comp.zcu.?;
const ip = &zcu.intern_pool;
const enum_type = ip.loadEnumType(enum_type_ip);
@ -1884,52 +1883,74 @@ fn emitTagNameFunction(
appendReservedUleb32(code, 0);
} else {
const int_info = Zcu.Type.intInfo(.fromInterned(enum_type.tag_ty), zcu);
const outer_block_type: std.wasm.BlockType = switch (int_info.bits) {
0...32 => .i32,
33...64 => .i64,
else => return diags.fail("wasm linker does not yet implement @tagName for sparse enums with more than 64 bit integer tag types", .{}),
};
const is_big_int = int_info.bits > 64;
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.local_get));
appendReservedUleb32(code, 0);
// Outer block that computes table offset.
// use i32 block type since we're computing a table offset
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.block));
code.appendAssumeCapacity(@intFromEnum(outer_block_type));
code.appendAssumeCapacity(@intFromEnum(std.wasm.BlockType.i32));
for (tag_values, 0..) |tag_value, tag_index| {
// block for this if case
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.block));
code.appendAssumeCapacity(@intFromEnum(std.wasm.BlockType.empty));
const val: Zcu.Value = .fromInterned(tag_value);
if (is_big_int) {
var val_space: Zcu.Value.BigIntSpace = undefined;
const val_bigint = val.toBigInt(&val_space, zcu);
const num_limbs = (int_info.bits + 63) / 64;
const limbs = try gpa.alloc(u64, num_limbs);
defer gpa.free(limbs);
val_bigint.writeTwosComplement(@ptrCast(limbs), .little);
try code.ensureUnusedCapacity(gpa, 7 * 5 + 6 + 1 * 6 + 20 * num_limbs);
for (0..num_limbs) |limb_index| {
// load the limb from memory (local 1 is pointer)
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.local_get));
appendReservedUleb32(code, 1);
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i64_load));
appendReservedUleb32(code, @ctz(@as(u32, 8))); // alignment for i64
appendReservedUleb32(code, @intCast(limb_index * 8));
appendReservedI64Const(code, limbs[limb_index]);
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i64_ne));
// if not equal, break out of current case block
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.br_if));
appendReservedUleb32(code, 0);
}
} else {
// Tag value whose name should be returned.
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.local_get));
appendReservedUleb32(code, 1);
const val: Zcu.Value = .fromInterned(tag_value);
switch (outer_block_type) {
.i32 => {
if (int_info.bits <= 32) {
const x: u32 = switch (int_info.signedness) {
.signed => @bitCast(@as(i32, @intCast(val.toSignedInt(zcu)))),
.unsigned => @intCast(val.toUnsignedInt(zcu)),
};
appendReservedI32Const(code, x);
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_ne));
},
.i64 => {
} else {
const x: u64 = switch (int_info.signedness) {
.signed => @bitCast(val.toSignedInt(zcu)),
.unsigned => val.toUnsignedInt(zcu),
};
appendReservedI64Const(code, x);
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i64_ne));
},
else => unreachable,
}
// if they're not equal, break out of current branch
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.br_if));
appendReservedUleb32(code, 0);
}
// Put the table offset of the result on the stack.
appendReservedI32Const(code, @intCast(tag_index * slice_abi_size));
@ -1962,7 +1983,7 @@ fn appendReservedI32Const(bytes: *ArrayList(u8), val: u32) void {
bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_const));
var w: std.Io.Writer = .fromArrayList(bytes);
defer bytes.* = w.toArrayList();
return w.writeSleb128(val) catch |err| switch (err) {
return w.writeSleb128(@as(i32, @bitCast(val))) catch |err| switch (err) {
error.WriteFailed => unreachable,
};
}
@ -1972,7 +1993,7 @@ fn appendReservedI64Const(bytes: *ArrayList(u8), val: u64) void {
bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i64_const));
var w: std.Io.Writer = .fromArrayList(bytes);
defer bytes.* = w.toArrayList();
return w.writeSleb128(val) catch |err| switch (err) {
return w.writeSleb128(@as(i64, @bitCast(val))) catch |err| switch (err) {
error.WriteFailed => unreachable,
};
}

View file

@ -1084,6 +1084,102 @@ test "tag name with large enum values" {
try expect(mem.eql(u8, @tagName(kdf), "argon2id"));
}
test "@tagName with exotic integer enum types" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
const S = struct {
fn testEnumSigned(comptime T: type) !void {
{
const E1 = enum(T) {
a = -125,
b = 125,
c = std.math.minInt(T),
d = std.math.maxInt(T),
};
var e: E1 = .a;
try expect(mem.eql(u8, @tagName(e), "a"));
e = .b;
try expect(mem.eql(u8, @tagName(e), "b"));
e = .c;
try expect(mem.eql(u8, @tagName(e), "c"));
e = .d;
try expect(mem.eql(u8, @tagName(e), "d"));
}
{
const E2 = enum(T) {
a = -125,
b = 125,
c = std.math.minInt(T),
d = std.math.maxInt(T),
_,
};
var e: E2 = .a;
try expect(mem.eql(u8, @tagName(e), "a"));
e = .b;
try expect(mem.eql(u8, @tagName(e), "b"));
e = .c;
try expect(mem.eql(u8, @tagName(e), "c"));
e = .d;
try expect(mem.eql(u8, @tagName(e), "d"));
}
}
fn testEnumUnsigned(comptime T: type) !void {
{
const E1 = enum(T) {
a = std.math.maxInt(T) - 125,
b = 125,
c = std.math.minInt(T),
d = std.math.maxInt(T),
};
var e: E1 = .a;
try expect(mem.eql(u8, @tagName(e), "a"));
e = .b;
try expect(mem.eql(u8, @tagName(e), "b"));
e = .c;
try expect(mem.eql(u8, @tagName(e), "c"));
e = .d;
try expect(mem.eql(u8, @tagName(e), "d"));
}
{
const E2 = enum(T) {
a = std.math.maxInt(T) - 125,
b = 125,
c = std.math.minInt(T),
d = std.math.maxInt(T),
_,
};
var e: E2 = .a;
try expect(mem.eql(u8, @tagName(e), "a"));
e = .b;
try expect(mem.eql(u8, @tagName(e), "b"));
e = .c;
try expect(mem.eql(u8, @tagName(e), "c"));
e = .d;
try expect(mem.eql(u8, @tagName(e), "d"));
}
}
fn doTheTest() !void {
try testEnumSigned(i33);
try testEnumSigned(i95);
try testEnumSigned(i127);
try testEnumUnsigned(u33);
try testEnumUnsigned(u95);
try testEnumUnsigned(u127);
}
};
try S.doTheTest();
try comptime S.doTheTest();
}
test "@tagName in callconv(.c) function" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;

View file

@ -1033,6 +1033,7 @@ test "@fieldParentPtr packed struct first zero-bit field" {
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
const C = packed struct {
a: u0 = 0,
@ -1139,6 +1140,7 @@ test "@fieldParentPtr packed struct middle zero-bit field" {
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
const C = packed struct {
a: f32 = 3.14,
@ -1245,6 +1247,7 @@ test "@fieldParentPtr packed struct last zero-bit field" {
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
const C = packed struct {
a: f32 = 3.14,

View file

@ -749,6 +749,7 @@ test "packed struct with fp fields" {
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
const S = packed struct {
data0: f32,

View file

@ -9,6 +9,7 @@ const expectEqual = std.testing.expectEqual;
test "implicit cast vector to array - bool" {
if (builtin.cpu.arch == .aarch64_be and builtin.zig_backend == .stage2_llvm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -32,6 +33,7 @@ test "implicit cast vector to array - bool" {
test "implicit cast array to vector - bool" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {

View file

@ -1366,18 +1366,16 @@ const test_targets = blk: {
// WASI Targets
// Disabled due to no active maintainer (feel free to fix the failures
// and then re-enable at any time). The failures occur due to backend
// miscompilation of different AIR from the frontend.
//.{
// .target = .{
// .cpu_arch = .wasm32,
// .os_tag = .wasi,
// .abi = .none,
// },
// .use_llvm = false,
// .use_lld = false,
//},
.{
.target = .{
.cpu_arch = .wasm32,
.os_tag = .wasi,
.abi = .none,
},
.skip_modules = &.{ "compiler-rt", "std" },
.use_llvm = false,
.use_lld = false,
},
.{
.target = .{
.cpu_arch = .wasm32,