mirror of
https://codeberg.org/ziglang/zig.git
synced 2025-12-06 13:54:21 +00:00
x86_64: fix compiler rt test failures
This commit is contained in:
parent
1fecf86ebf
commit
f6f2708d82
30 changed files with 211 additions and 206 deletions
|
|
@ -35,8 +35,6 @@ fn test__addtf3(a: f128, b: f128, expected_hi: u64, expected_lo: u64) !void {
|
|||
}
|
||||
|
||||
test "addtf3" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try test__addtf3(qnan128, 0x1.23456789abcdefp+5, 0x7fff800000000000, 0x0);
|
||||
|
||||
// NaN + any = NaN
|
||||
|
|
@ -106,8 +104,6 @@ fn test__addxf3(a: f80, b: f80, expected: u80) !void {
|
|||
}
|
||||
|
||||
test "addxf3" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
// NaN + any = NaN
|
||||
try test__addxf3(qnan80, 0x1.23456789abcdefp+5, @as(u80, @bitCast(qnan80)));
|
||||
try test__addxf3(@as(f80, @bitCast(@as(u80, 0x7fff_8000_8000_3000_0000))), 0x1.23456789abcdefp+5, @as(u80, @bitCast(qnan80)));
|
||||
|
|
|
|||
|
|
@ -23,8 +23,6 @@ fn simple_addoti4(a: i128, b: i128, overflow: *c_int) i128 {
|
|||
}
|
||||
|
||||
test "addoti4" {
|
||||
if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const min: i128 = math.minInt(i128);
|
||||
const max: i128 = math.maxInt(i128);
|
||||
var i: i128 = 1;
|
||||
|
|
|
|||
|
|
@ -98,8 +98,6 @@ pub inline fn cmp_f80(comptime RT: type, a: f80, b: f80) RT {
|
|||
}
|
||||
|
||||
test "cmp_f80" {
|
||||
if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
inline for (.{ LE, GE }) |RT| {
|
||||
try std.testing.expect(cmp_f80(RT, 1.0, 1.0) == RT.Equal);
|
||||
try std.testing.expect(cmp_f80(RT, 0.0, -0.0) == RT.Equal);
|
||||
|
|
|
|||
|
|
@ -134,8 +134,6 @@ pub fn cosl(x: c_longdouble) callconv(.C) c_longdouble {
|
|||
}
|
||||
|
||||
test "cos32" {
|
||||
if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const epsilon = 0.00001;
|
||||
|
||||
try expect(math.approxEqAbs(f32, cosf(0.0), 1.0, epsilon));
|
||||
|
|
|
|||
|
|
@ -39,8 +39,6 @@ fn test__divxf3(a: f80, b: f80) !void {
|
|||
}
|
||||
|
||||
test "divxf3" {
|
||||
if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
// NaN / any = NaN
|
||||
try expect__divxf3_result(math.nan(f80), 0x1.23456789abcdefp+5, 0x7fffC000000000000000);
|
||||
// inf / any(except inf and nan) = inf
|
||||
|
|
|
|||
|
|
@ -23,9 +23,6 @@ fn test_fmodx_infs() !void {
|
|||
}
|
||||
|
||||
test "fmodx" {
|
||||
if (builtin.zig_backend == .stage2_x86_64 and
|
||||
!comptime std.Target.x86.featureSetHas(builtin.cpu.features, .lzcnt)) return error.SkipZigTest;
|
||||
|
||||
try test_fmodx(6.4, 4.0, 2.4);
|
||||
try test_fmodx(6.4, -4.0, 2.4);
|
||||
try test_fmodx(-6.4, 4.0, -2.4);
|
||||
|
|
|
|||
|
|
@ -41,8 +41,6 @@ fn test__fixunssfsi(a: f32, expected: u32) !void {
|
|||
}
|
||||
|
||||
test "fixsfsi" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try test__fixsfsi(-math.floatMax(f32), math.minInt(i32));
|
||||
|
||||
try test__fixsfsi(-0x1.FFFFFFFFFFFFFp+1023, math.minInt(i32));
|
||||
|
|
@ -71,6 +69,7 @@ test "fixsfsi" {
|
|||
try test__fixsfsi(-1.0, -1);
|
||||
try test__fixsfsi(-0.99, 0);
|
||||
try test__fixsfsi(-0.5, 0);
|
||||
|
||||
try test__fixsfsi(-math.floatMin(f32), 0);
|
||||
try test__fixsfsi(0.0, 0);
|
||||
try test__fixsfsi(math.floatMin(f32), 0);
|
||||
|
|
@ -106,8 +105,6 @@ test "fixsfsi" {
|
|||
}
|
||||
|
||||
test "fixunssfsi" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try test__fixunssfsi(0.0, 0);
|
||||
|
||||
try test__fixunssfsi(0.5, 0);
|
||||
|
|
@ -147,8 +144,6 @@ fn test__fixunssfdi(a: f32, expected: u64) !void {
|
|||
}
|
||||
|
||||
test "fixsfdi" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try test__fixsfdi(-math.floatMax(f32), math.minInt(i64));
|
||||
|
||||
try test__fixsfdi(-0x1.FFFFFFFFFFFFFp+1023, math.minInt(i64));
|
||||
|
|
@ -204,8 +199,6 @@ test "fixsfdi" {
|
|||
}
|
||||
|
||||
test "fixunssfdi" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try test__fixunssfdi(0.0, 0);
|
||||
|
||||
try test__fixunssfdi(0.5, 0);
|
||||
|
|
@ -244,8 +237,6 @@ fn test__fixunssfti(a: f32, expected: u128) !void {
|
|||
}
|
||||
|
||||
test "fixsfti" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try test__fixsfti(-math.floatMax(f32), math.minInt(i128));
|
||||
|
||||
try test__fixsfti(-0x1.FFFFFFFFFFFFFp+1023, math.minInt(i128));
|
||||
|
|
@ -317,8 +308,6 @@ test "fixsfti" {
|
|||
}
|
||||
|
||||
test "fixunssfti" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try test__fixunssfti(0.0, 0);
|
||||
|
||||
try test__fixunssfti(0.5, 0);
|
||||
|
|
@ -365,8 +354,6 @@ fn test__fixunsdfsi(a: f64, expected: u32) !void {
|
|||
}
|
||||
|
||||
test "fixdfsi" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try test__fixdfsi(-math.floatMax(f64), math.minInt(i32));
|
||||
|
||||
try test__fixdfsi(-0x1.FFFFFFFFFFFFFp+1023, math.minInt(i32));
|
||||
|
|
@ -428,8 +415,6 @@ test "fixdfsi" {
|
|||
}
|
||||
|
||||
test "fixunsdfsi" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try test__fixunsdfsi(0.0, 0);
|
||||
|
||||
try test__fixunsdfsi(0.5, 0);
|
||||
|
|
@ -472,8 +457,6 @@ fn test__fixunsdfdi(a: f64, expected: u64) !void {
|
|||
}
|
||||
|
||||
test "fixdfdi" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try test__fixdfdi(-math.floatMax(f64), math.minInt(i64));
|
||||
|
||||
try test__fixdfdi(-0x1.FFFFFFFFFFFFFp+1023, math.minInt(i64));
|
||||
|
|
@ -527,8 +510,6 @@ test "fixdfdi" {
|
|||
}
|
||||
|
||||
test "fixunsdfdi" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try test__fixunsdfdi(0.0, 0);
|
||||
try test__fixunsdfdi(0.5, 0);
|
||||
try test__fixunsdfdi(0.99, 0);
|
||||
|
|
@ -571,8 +552,6 @@ fn test__fixunsdfti(a: f64, expected: u128) !void {
|
|||
}
|
||||
|
||||
test "fixdfti" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try test__fixdfti(-math.floatMax(f64), math.minInt(i128));
|
||||
|
||||
try test__fixdfti(-0x1.FFFFFFFFFFFFFp+1023, math.minInt(i128));
|
||||
|
|
@ -626,8 +605,6 @@ test "fixdfti" {
|
|||
}
|
||||
|
||||
test "fixunsdfti" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try test__fixunsdfti(0.0, 0);
|
||||
|
||||
try test__fixunsdfti(0.5, 0);
|
||||
|
|
@ -677,8 +654,6 @@ fn test__fixunstfsi(a: f128, expected: u32) !void {
|
|||
}
|
||||
|
||||
test "fixtfsi" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try test__fixtfsi(-math.floatMax(f128), math.minInt(i32));
|
||||
|
||||
try test__fixtfsi(-0x1.FFFFFFFFFFFFFp+1023, math.minInt(i32));
|
||||
|
|
@ -742,8 +717,6 @@ test "fixtfsi" {
|
|||
}
|
||||
|
||||
test "fixunstfsi" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try test__fixunstfsi(math.inf(f128), 0xffffffff);
|
||||
try test__fixunstfsi(0, 0x0);
|
||||
try test__fixunstfsi(0x1.23456789abcdefp+5, 0x24);
|
||||
|
|
@ -767,8 +740,6 @@ fn test__fixunstfdi(a: f128, expected: u64) !void {
|
|||
}
|
||||
|
||||
test "fixtfdi" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try test__fixtfdi(-math.floatMax(f128), math.minInt(i64));
|
||||
|
||||
try test__fixtfdi(-0x1.FFFFFFFFFFFFFp+1023, math.minInt(i64));
|
||||
|
|
@ -832,8 +803,6 @@ test "fixtfdi" {
|
|||
}
|
||||
|
||||
test "fixunstfdi" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try test__fixunstfdi(0.0, 0);
|
||||
|
||||
try test__fixunstfdi(0.5, 0);
|
||||
|
|
@ -886,8 +855,6 @@ fn test__fixunstfti(a: f128, expected: u128) !void {
|
|||
}
|
||||
|
||||
test "fixtfti" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try test__fixtfti(-math.floatMax(f128), math.minInt(i128));
|
||||
|
||||
try test__fixtfti(-0x1.FFFFFFFFFFFFFp+1023, math.minInt(i128));
|
||||
|
|
@ -969,8 +936,6 @@ fn test__fixunshfti(a: f16, expected: u128) !void {
|
|||
}
|
||||
|
||||
test "fixunshfti for f16" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try test__fixunshfti(math.inf(f16), math.maxInt(u128));
|
||||
try test__fixunshfti(math.floatMax(f16), 65504);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -7,8 +7,6 @@ fn test__negti2(a: i128, expected: i128) !void {
|
|||
}
|
||||
|
||||
test "negti2" {
|
||||
if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
// TODO ensuring that math.minInt(i128); returns error
|
||||
|
||||
try test__negti2(-3, 3);
|
||||
|
|
|
|||
|
|
@ -34,9 +34,6 @@ fn test__powixf2(a: f80, b: i32, expected: f80) !void {
|
|||
}
|
||||
|
||||
test "powihf2" {
|
||||
if (builtin.zig_backend == .stage2_x86_64 and
|
||||
!comptime std.Target.x86.featureSetHas(builtin.cpu.features, .f16c)) return error.SkipZigTest;
|
||||
|
||||
const inf_f16 = math.inf(f16);
|
||||
try test__powisf2(0, 0, 1);
|
||||
try test__powihf2(1, 0, 1);
|
||||
|
|
@ -356,8 +353,6 @@ test "powidf2" {
|
|||
}
|
||||
|
||||
test "powitf2" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const inf_f128 = math.inf(f128);
|
||||
try test__powitf2(0, 0, 1);
|
||||
try test__powitf2(1, 0, 1);
|
||||
|
|
@ -463,8 +458,6 @@ test "powitf2" {
|
|||
}
|
||||
|
||||
test "powixf2" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const inf_f80 = math.inf(f80);
|
||||
try test__powixf2(0, 0, 1);
|
||||
try test__powixf2(1, 0, 1);
|
||||
|
|
|
|||
|
|
@ -140,8 +140,6 @@ pub fn sinl(x: c_longdouble) callconv(.C) c_longdouble {
|
|||
}
|
||||
|
||||
test "sin32" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const epsilon = 0.00001;
|
||||
|
||||
try expect(math.approxEqAbs(f32, sinf(0.0), 0.0, epsilon));
|
||||
|
|
|
|||
|
|
@ -27,8 +27,6 @@ pub fn simple_suboti4(a: i128, b: i128, overflow: *c_int) i128 {
|
|||
}
|
||||
|
||||
test "suboti3" {
|
||||
if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const min: i128 = math.minInt(i128);
|
||||
const max: i128 = math.maxInt(i128);
|
||||
var i: i128 = 1;
|
||||
|
|
|
|||
|
|
@ -131,8 +131,6 @@ test "tan" {
|
|||
}
|
||||
|
||||
test "tan32" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const epsilon = 0.00001;
|
||||
|
||||
try expect(math.approxEqAbs(f32, tanf(0.0), 0.0, epsilon));
|
||||
|
|
|
|||
|
|
@ -440,6 +440,8 @@ test "Condition - multi signal" {
|
|||
return error.SkipZigTest;
|
||||
}
|
||||
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const num_threads = 4;
|
||||
const num_iterations = 4;
|
||||
|
||||
|
|
|
|||
|
|
@ -363,8 +363,6 @@ test "phc format - hash without salt" {
|
|||
}
|
||||
|
||||
test "phc format - calcSize" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const s = "$scrypt$v=1$ln=15,r=8,p=1$c2FsdHNhbHQ$dGVzdHBhc3M";
|
||||
const v = try deserialize(struct {
|
||||
alg_id: []const u8,
|
||||
|
|
|
|||
|
|
@ -1054,8 +1054,6 @@ fn isOpcodeRegisterLocation(opcode: u8) bool {
|
|||
|
||||
const testing = std.testing;
|
||||
test "DWARF expressions" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const allocator = std.testing.allocator;
|
||||
|
||||
const options = ExpressionOptions{};
|
||||
|
|
|
|||
|
|
@ -2435,8 +2435,6 @@ test "float.hexadecimal" {
|
|||
}
|
||||
|
||||
test "float.hexadecimal.precision" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try expectFmt("f16: 0x1.5p-2", "f16: {x:.1}", .{@as(f16, 1.0 / 3.0)});
|
||||
try expectFmt("f32: 0x1.555p-2", "f32: {x:.3}", .{@as(f32, 1.0 / 3.0)});
|
||||
try expectFmt("f64: 0x1.55555p-2", "f64: {x:.5}", .{@as(f64, 1.0 / 3.0)});
|
||||
|
|
|
|||
|
|
@ -1,6 +1,5 @@
|
|||
const std = @import("std");
|
||||
const assert = std.debug.assert;
|
||||
const builtin = @import("builtin");
|
||||
const mem = std.mem;
|
||||
const meta = std.meta;
|
||||
|
||||
|
|
@ -253,8 +252,6 @@ test "typeContainsSlice" {
|
|||
}
|
||||
|
||||
test "hash pointer" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const array = [_]u32{ 123, 123, 123 };
|
||||
const a = &array[0];
|
||||
const b = &array[1];
|
||||
|
|
@ -275,8 +272,6 @@ test "hash pointer" {
|
|||
}
|
||||
|
||||
test "hash slice shallow" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
// Allocate one array dynamically so that we're assured it is not merged
|
||||
// with the other by the optimization passes.
|
||||
const array1 = try std.testing.allocator.create([6]u32);
|
||||
|
|
@ -295,8 +290,6 @@ test "hash slice shallow" {
|
|||
}
|
||||
|
||||
test "hash slice deep" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
// Allocate one array dynamically so that we're assured it is not merged
|
||||
// with the other by the optimization passes.
|
||||
const array1 = try std.testing.allocator.create([6]u32);
|
||||
|
|
@ -313,8 +306,6 @@ test "hash slice deep" {
|
|||
}
|
||||
|
||||
test "hash struct deep" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const Foo = struct {
|
||||
a: u32,
|
||||
b: u16,
|
||||
|
|
@ -354,8 +345,6 @@ test "hash struct deep" {
|
|||
}
|
||||
|
||||
test "testHash optional" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const a: ?u32 = 123;
|
||||
const b: ?u32 = null;
|
||||
try testing.expectEqual(testHash(a), testHash(@as(u32, 123)));
|
||||
|
|
@ -364,8 +353,6 @@ test "testHash optional" {
|
|||
}
|
||||
|
||||
test "testHash array" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const a = [_]u32{ 1, 2, 3 };
|
||||
const h = testHash(a);
|
||||
var hasher = Wyhash.init(0);
|
||||
|
|
@ -382,8 +369,6 @@ test "testHash multi-dimensional array" {
|
|||
}
|
||||
|
||||
test "testHash struct" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const Foo = struct {
|
||||
a: u32 = 1,
|
||||
b: u32 = 2,
|
||||
|
|
@ -399,8 +384,6 @@ test "testHash struct" {
|
|||
}
|
||||
|
||||
test "testHash union" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const Foo = union(enum) {
|
||||
A: u32,
|
||||
B: bool,
|
||||
|
|
@ -425,8 +408,6 @@ test "testHash union" {
|
|||
}
|
||||
|
||||
test "testHash vector" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const a: @Vector(4, u32) = [_]u32{ 1, 2, 3, 4 };
|
||||
const b: @Vector(4, u32) = [_]u32{ 1, 2, 3, 5 };
|
||||
try testing.expect(testHash(a) == testHash(a));
|
||||
|
|
@ -439,8 +420,6 @@ test "testHash vector" {
|
|||
}
|
||||
|
||||
test "testHash error union" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const Errors = error{Test};
|
||||
const Foo = struct {
|
||||
a: u32 = 1,
|
||||
|
|
|
|||
|
|
@ -242,8 +242,6 @@ test "smhasher" {
|
|||
}
|
||||
|
||||
test "iterative api" {
|
||||
if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const Test = struct {
|
||||
fn do() !void {
|
||||
try verify.iterativeApi(Wyhash);
|
||||
|
|
|
|||
|
|
@ -164,8 +164,6 @@ pub fn bitReader(
|
|||
}
|
||||
|
||||
test "api coverage" {
|
||||
if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const mem_be = [_]u8{ 0b11001101, 0b00001011 };
|
||||
const mem_le = [_]u8{ 0b00011101, 0b10010101 };
|
||||
|
||||
|
|
|
|||
|
|
@ -1,5 +1,4 @@
|
|||
const std = @import("std");
|
||||
const builtin = @import("builtin");
|
||||
const io = std.io;
|
||||
const meta = std.meta;
|
||||
const trait = std.trait;
|
||||
|
|
@ -10,7 +9,7 @@ const expectError = std.testing.expectError;
|
|||
const mem = std.mem;
|
||||
const fs = std.fs;
|
||||
const File = std.fs.File;
|
||||
const native_endian = builtin.target.cpu.arch.endian();
|
||||
const native_endian = @import("builtin").target.cpu.arch.endian();
|
||||
|
||||
const tmpDir = std.testing.tmpDir;
|
||||
|
||||
|
|
@ -61,8 +60,6 @@ test "write a file, read it, then delete it" {
|
|||
}
|
||||
|
||||
test "BitStreams with File Stream" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
var tmp = tmpDir(.{});
|
||||
defer tmp.cleanup();
|
||||
|
||||
|
|
|
|||
|
|
@ -1,4 +1,3 @@
|
|||
const builtin = @import("builtin");
|
||||
const std = @import("std");
|
||||
const testing = std.testing;
|
||||
|
||||
|
|
@ -215,8 +214,6 @@ fn test_read_uleb128_seq(comptime T: type, comptime N: usize, encoded: []const u
|
|||
}
|
||||
|
||||
test "deserialize signed LEB128" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
// Truncated
|
||||
try testing.expectError(error.EndOfStream, test_read_stream_ileb128(i64, "\x80"));
|
||||
|
||||
|
|
@ -363,8 +360,6 @@ test "serialize unsigned LEB128" {
|
|||
}
|
||||
|
||||
test "serialize signed LEB128" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
// explicitly test i0 because starting `t` at 0
|
||||
// will break the while loop
|
||||
try test_write_leb128(@as(i0, 0));
|
||||
|
|
|
|||
|
|
@ -990,8 +990,6 @@ test "big.int mul 0*0" {
|
|||
}
|
||||
|
||||
test "big.int mul large" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
var a = try Managed.initCapacity(testing.allocator, 50);
|
||||
defer a.deinit();
|
||||
var b = try Managed.initCapacity(testing.allocator, 100);
|
||||
|
|
@ -1075,8 +1073,6 @@ test "big.int mulWrap multi-multi signed" {
|
|||
}
|
||||
|
||||
test "big.int mulWrap large" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
var a = try Managed.initCapacity(testing.allocator, 50);
|
||||
defer a.deinit();
|
||||
var b = try Managed.initCapacity(testing.allocator, 100);
|
||||
|
|
|
|||
|
|
@ -108,8 +108,6 @@ test "64" {
|
|||
}
|
||||
|
||||
test "80" {
|
||||
if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try expect(ilogbX(f80, 0.0) == fp_ilogb0);
|
||||
try expect(ilogbX(f80, 0.5) == -1);
|
||||
try expect(ilogbX(f80, 0.8923) == -1);
|
||||
|
|
|
|||
|
|
@ -67,8 +67,6 @@ pub fn ldexp(x: anytype, n: i32) @TypeOf(x) {
|
|||
}
|
||||
|
||||
test "math.ldexp" {
|
||||
if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
// subnormals
|
||||
try expect(ldexp(@as(f16, 0x1.1FFp14), -14 - 9 - 15) == math.floatTrueMin(f16));
|
||||
try expect(ldexp(@as(f32, 0x1.3FFFFFp-1), -126 - 22) == math.floatTrueMin(f32));
|
||||
|
|
|
|||
|
|
@ -3495,59 +3495,140 @@ fn airMulSat(self: *Self, inst: Air.Inst.Index) !void {
|
|||
const mod = self.bin_file.options.module.?;
|
||||
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
|
||||
const ty = self.typeOf(bin_op.lhs);
|
||||
if (ty.zigTypeTag(mod) == .Vector or ty.abiSize(mod) > 8) return self.fail(
|
||||
"TODO implement airMulSat for {}",
|
||||
.{ty.fmt(mod)},
|
||||
);
|
||||
|
||||
try self.spillRegisters(&.{ .rax, .rdx });
|
||||
const reg_locks = self.register_manager.lockRegs(2, .{ .rax, .rdx });
|
||||
defer for (reg_locks) |reg_lock| if (reg_lock) |lock| self.register_manager.unlockReg(lock);
|
||||
const result = result: {
|
||||
if (ty.toIntern() == .i128_type) {
|
||||
const ptr_c_int = try mod.singleMutPtrType(Type.c_int);
|
||||
const overflow = try self.allocTempRegOrMem(Type.c_int, false);
|
||||
|
||||
const lhs_mcv = try self.resolveInst(bin_op.lhs);
|
||||
const lhs_lock = switch (lhs_mcv) {
|
||||
.register => |reg| self.register_manager.lockRegAssumeUnused(reg),
|
||||
else => null,
|
||||
const dst_mcv = try self.genCall(.{ .lib = .{
|
||||
.return_type = .i128_type,
|
||||
.param_types = &.{ .i128_type, .i128_type, ptr_c_int.toIntern() },
|
||||
.callee = "__muloti4",
|
||||
} }, &.{ Type.i128, Type.i128, ptr_c_int }, &.{
|
||||
.{ .air_ref = bin_op.lhs },
|
||||
.{ .air_ref = bin_op.rhs },
|
||||
overflow.address(),
|
||||
});
|
||||
const dst_locks = self.register_manager.lockRegsAssumeUnused(2, dst_mcv.register_pair);
|
||||
defer for (dst_locks) |lock| self.register_manager.unlockReg(lock);
|
||||
|
||||
const tmp_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
|
||||
const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
|
||||
defer self.register_manager.unlockReg(tmp_lock);
|
||||
|
||||
const lhs_mcv = try self.resolveInst(bin_op.lhs);
|
||||
const mat_lhs_mcv = switch (lhs_mcv) {
|
||||
.load_symbol => mat_lhs_mcv: {
|
||||
// TODO clean this up!
|
||||
const addr_reg = try self.copyToTmpRegister(Type.usize, lhs_mcv.address());
|
||||
break :mat_lhs_mcv MCValue{ .indirect = .{ .reg = addr_reg } };
|
||||
},
|
||||
else => lhs_mcv,
|
||||
};
|
||||
const mat_lhs_lock = switch (mat_lhs_mcv) {
|
||||
.indirect => |reg_off| self.register_manager.lockReg(reg_off.reg),
|
||||
else => null,
|
||||
};
|
||||
defer if (mat_lhs_lock) |lock| self.register_manager.unlockReg(lock);
|
||||
if (mat_lhs_mcv.isMemory()) try self.asmRegisterMemory(
|
||||
.{ ._, .mov },
|
||||
tmp_reg,
|
||||
mat_lhs_mcv.address().offset(8).deref().mem(.qword),
|
||||
) else try self.asmRegisterRegister(
|
||||
.{ ._, .mov },
|
||||
tmp_reg,
|
||||
mat_lhs_mcv.register_pair[1],
|
||||
);
|
||||
|
||||
const rhs_mcv = try self.resolveInst(bin_op.rhs);
|
||||
const mat_rhs_mcv = switch (rhs_mcv) {
|
||||
.load_symbol => mat_rhs_mcv: {
|
||||
// TODO clean this up!
|
||||
const addr_reg = try self.copyToTmpRegister(Type.usize, rhs_mcv.address());
|
||||
break :mat_rhs_mcv MCValue{ .indirect = .{ .reg = addr_reg } };
|
||||
},
|
||||
else => rhs_mcv,
|
||||
};
|
||||
const mat_rhs_lock = switch (mat_rhs_mcv) {
|
||||
.indirect => |reg_off| self.register_manager.lockReg(reg_off.reg),
|
||||
else => null,
|
||||
};
|
||||
defer if (mat_rhs_lock) |lock| self.register_manager.unlockReg(lock);
|
||||
if (mat_rhs_mcv.isMemory()) try self.asmRegisterMemory(
|
||||
.{ ._, .xor },
|
||||
tmp_reg,
|
||||
mat_rhs_mcv.address().offset(8).deref().mem(.qword),
|
||||
) else try self.asmRegisterRegister(
|
||||
.{ ._, .xor },
|
||||
tmp_reg,
|
||||
mat_rhs_mcv.register_pair[1],
|
||||
);
|
||||
|
||||
try self.asmRegisterImmediate(.{ ._r, .sa }, tmp_reg, Immediate.u(63));
|
||||
try self.asmRegister(.{ ._, .not }, tmp_reg);
|
||||
try self.asmMemoryImmediate(.{ ._, .cmp }, overflow.mem(.dword), Immediate.s(0));
|
||||
try self.freeValue(overflow);
|
||||
try self.asmCmovccRegisterRegister(.ne, dst_mcv.register_pair[0], tmp_reg);
|
||||
try self.asmRegisterImmediate(.{ ._c, .bt }, tmp_reg, Immediate.u(63));
|
||||
try self.asmCmovccRegisterRegister(.ne, dst_mcv.register_pair[1], tmp_reg);
|
||||
break :result dst_mcv;
|
||||
}
|
||||
|
||||
if (ty.zigTypeTag(mod) == .Vector or ty.abiSize(mod) > 8) return self.fail(
|
||||
"TODO implement airMulSat for {}",
|
||||
.{ty.fmt(mod)},
|
||||
);
|
||||
|
||||
try self.spillRegisters(&.{ .rax, .rdx });
|
||||
const reg_locks = self.register_manager.lockRegs(2, .{ .rax, .rdx });
|
||||
defer for (reg_locks) |reg_lock| if (reg_lock) |lock| self.register_manager.unlockReg(lock);
|
||||
|
||||
const lhs_mcv = try self.resolveInst(bin_op.lhs);
|
||||
const lhs_lock = switch (lhs_mcv) {
|
||||
.register => |reg| self.register_manager.lockRegAssumeUnused(reg),
|
||||
else => null,
|
||||
};
|
||||
defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock);
|
||||
|
||||
const rhs_mcv = try self.resolveInst(bin_op.rhs);
|
||||
const rhs_lock = switch (rhs_mcv) {
|
||||
.register => |reg| self.register_manager.lockReg(reg),
|
||||
else => null,
|
||||
};
|
||||
defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock);
|
||||
|
||||
const limit_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
|
||||
const limit_mcv = MCValue{ .register = limit_reg };
|
||||
const limit_lock = self.register_manager.lockRegAssumeUnused(limit_reg);
|
||||
defer self.register_manager.unlockReg(limit_lock);
|
||||
|
||||
const reg_bits = self.regBitSize(ty);
|
||||
const cc: Condition = if (ty.isSignedInt(mod)) cc: {
|
||||
try self.genSetReg(limit_reg, ty, lhs_mcv);
|
||||
try self.genBinOpMir(.{ ._, .xor }, ty, limit_mcv, rhs_mcv);
|
||||
try self.genShiftBinOpMir(.{ ._r, .sa }, ty, limit_mcv, .{ .immediate = reg_bits - 1 });
|
||||
try self.genBinOpMir(.{ ._, .xor }, ty, limit_mcv, .{
|
||||
.immediate = (@as(u64, 1) << @intCast(reg_bits - 1)) - 1,
|
||||
});
|
||||
break :cc .o;
|
||||
} else cc: {
|
||||
try self.genSetReg(limit_reg, ty, .{
|
||||
.immediate = @as(u64, math.maxInt(u64)) >> @intCast(64 - reg_bits),
|
||||
});
|
||||
break :cc .c;
|
||||
};
|
||||
|
||||
const dst_mcv = try self.genMulDivBinOp(.mul, inst, ty, ty, lhs_mcv, rhs_mcv);
|
||||
const cmov_abi_size = @max(@as(u32, @intCast(ty.abiSize(mod))), 2);
|
||||
try self.asmCmovccRegisterRegister(
|
||||
cc,
|
||||
registerAlias(dst_mcv.register, cmov_abi_size),
|
||||
registerAlias(limit_reg, cmov_abi_size),
|
||||
);
|
||||
break :result dst_mcv;
|
||||
};
|
||||
defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock);
|
||||
|
||||
const rhs_mcv = try self.resolveInst(bin_op.rhs);
|
||||
const rhs_lock = switch (rhs_mcv) {
|
||||
.register => |reg| self.register_manager.lockReg(reg),
|
||||
else => null,
|
||||
};
|
||||
defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock);
|
||||
|
||||
const limit_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
|
||||
const limit_mcv = MCValue{ .register = limit_reg };
|
||||
const limit_lock = self.register_manager.lockRegAssumeUnused(limit_reg);
|
||||
defer self.register_manager.unlockReg(limit_lock);
|
||||
|
||||
const reg_bits = self.regBitSize(ty);
|
||||
const cc: Condition = if (ty.isSignedInt(mod)) cc: {
|
||||
try self.genSetReg(limit_reg, ty, lhs_mcv);
|
||||
try self.genBinOpMir(.{ ._, .xor }, ty, limit_mcv, rhs_mcv);
|
||||
try self.genShiftBinOpMir(.{ ._r, .sa }, ty, limit_mcv, .{ .immediate = reg_bits - 1 });
|
||||
try self.genBinOpMir(.{ ._, .xor }, ty, limit_mcv, .{
|
||||
.immediate = (@as(u64, 1) << @intCast(reg_bits - 1)) - 1,
|
||||
});
|
||||
break :cc .o;
|
||||
} else cc: {
|
||||
try self.genSetReg(limit_reg, ty, .{
|
||||
.immediate = @as(u64, math.maxInt(u64)) >> @intCast(64 - reg_bits),
|
||||
});
|
||||
break :cc .c;
|
||||
};
|
||||
|
||||
const dst_mcv = try self.genMulDivBinOp(.mul, inst, ty, ty, lhs_mcv, rhs_mcv);
|
||||
const cmov_abi_size = @max(@as(u32, @intCast(ty.abiSize(mod))), 2);
|
||||
try self.asmCmovccRegisterRegister(
|
||||
cc,
|
||||
registerAlias(dst_mcv.register, cmov_abi_size),
|
||||
registerAlias(limit_reg, cmov_abi_size),
|
||||
);
|
||||
|
||||
return self.finishAir(inst, dst_mcv, .{ bin_op.lhs, bin_op.rhs, .none });
|
||||
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
|
||||
}
|
||||
|
||||
fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
||||
|
|
@ -3701,10 +3782,13 @@ fn genSetFrameTruncatedOverflowCompare(
|
|||
const ty = tuple_ty.structFieldType(0, mod);
|
||||
const int_info = ty.intInfo(mod);
|
||||
|
||||
const hi_limb_bits = (int_info.bits - 1) % 64 + 1;
|
||||
const hi_limb_ty = try mod.intType(int_info.signedness, hi_limb_bits);
|
||||
const hi_bits = (int_info.bits - 1) % 64 + 1;
|
||||
const hi_ty = try mod.intType(int_info.signedness, hi_bits);
|
||||
|
||||
const rest_ty = try mod.intType(.unsigned, int_info.bits - hi_limb_bits);
|
||||
const limb_bits: u16 = @intCast(if (int_info.bits <= 64) self.regBitSize(ty) else 64);
|
||||
const limb_ty = try mod.intType(int_info.signedness, limb_bits);
|
||||
|
||||
const rest_ty = try mod.intType(.unsigned, int_info.bits - hi_bits);
|
||||
|
||||
const temp_regs =
|
||||
try self.register_manager.allocRegs(3, .{ null, null, null }, abi.RegisterClass.gp);
|
||||
|
|
@ -3720,9 +3804,9 @@ fn genSetFrameTruncatedOverflowCompare(
|
|||
src_mcv.address().offset(int_info.bits / 64 * 8).deref()
|
||||
else
|
||||
src_mcv;
|
||||
try self.genSetReg(scratch_reg, hi_limb_ty, hi_limb_mcv);
|
||||
try self.truncateRegister(hi_limb_ty, scratch_reg);
|
||||
try self.genBinOpMir(.{ ._, .cmp }, hi_limb_ty, .{ .register = scratch_reg }, hi_limb_mcv);
|
||||
try self.genSetReg(scratch_reg, limb_ty, hi_limb_mcv);
|
||||
try self.truncateRegister(hi_ty, scratch_reg);
|
||||
try self.genBinOpMir(.{ ._, .cmp }, limb_ty, .{ .register = scratch_reg }, hi_limb_mcv);
|
||||
|
||||
const eq_reg = temp_regs[2];
|
||||
if (overflow_cc) |_| {
|
||||
|
|
@ -3740,7 +3824,7 @@ fn genSetFrameTruncatedOverflowCompare(
|
|||
try self.genSetMem(
|
||||
.{ .frame = frame_index },
|
||||
payload_off + hi_limb_off,
|
||||
hi_limb_ty,
|
||||
limb_ty,
|
||||
.{ .register = scratch_reg },
|
||||
);
|
||||
try self.genSetMem(
|
||||
|
|
@ -4084,6 +4168,7 @@ fn genInlineIntDivFloor(self: *Self, ty: Type, lhs: MCValue, rhs: MCValue) !MCVa
|
|||
}
|
||||
|
||||
fn airShlShrBinOp(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const mod = self.bin_file.options.module.?;
|
||||
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
|
||||
|
||||
try self.spillRegisters(&.{.rcx});
|
||||
|
|
@ -4095,9 +4180,38 @@ fn airShlShrBinOp(self: *Self, inst: Air.Inst.Index) !void {
|
|||
const lhs_ty = self.typeOf(bin_op.lhs);
|
||||
const rhs_ty = self.typeOf(bin_op.rhs);
|
||||
|
||||
const result = try self.genShiftBinOp(tag, inst, lhs, rhs, lhs_ty, rhs_ty);
|
||||
const dst_mcv = try self.genShiftBinOp(tag, inst, lhs, rhs, lhs_ty, rhs_ty);
|
||||
switch (tag) {
|
||||
.shr, .shr_exact, .shl_exact => {},
|
||||
.shl => switch (dst_mcv) {
|
||||
.register => |dst_reg| try self.truncateRegister(lhs_ty, dst_reg),
|
||||
.register_pair => |dst_regs| try self.truncateRegister(lhs_ty, dst_regs[1]),
|
||||
.load_frame => |frame_addr| {
|
||||
const tmp_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
|
||||
const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
|
||||
defer self.register_manager.unlockReg(tmp_lock);
|
||||
|
||||
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
|
||||
const lhs_bits: u31 = @intCast(lhs_ty.bitSize(mod));
|
||||
const tmp_ty = if (lhs_bits > 64) Type.usize else lhs_ty;
|
||||
const off = frame_addr.off + lhs_bits / 64 * 8;
|
||||
try self.genSetReg(
|
||||
tmp_reg,
|
||||
tmp_ty,
|
||||
.{ .load_frame = .{ .index = frame_addr.index, .off = off } },
|
||||
);
|
||||
try self.truncateRegister(lhs_ty, tmp_reg);
|
||||
try self.genSetMem(
|
||||
.{ .frame = frame_addr.index },
|
||||
off,
|
||||
tmp_ty,
|
||||
.{ .register = tmp_reg },
|
||||
);
|
||||
},
|
||||
else => {},
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
return self.finishAir(inst, dst_mcv, .{ bin_op.lhs, bin_op.rhs, .none });
|
||||
}
|
||||
|
||||
fn airShlSat(self: *Self, inst: Air.Inst.Index) !void {
|
||||
|
|
@ -4529,18 +4643,24 @@ fn airSlicePtr(self: *Self, inst: Air.Inst.Index) !void {
|
|||
fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
|
||||
|
||||
const operand = try self.resolveInst(ty_op.operand);
|
||||
const dst_mcv: MCValue = blk: {
|
||||
switch (operand) {
|
||||
.load_frame => |frame_addr| break :blk .{ .load_frame = .{
|
||||
.index = frame_addr.index,
|
||||
.off = frame_addr.off + 8,
|
||||
} },
|
||||
else => return self.fail("TODO implement slice_len for {}", .{operand}),
|
||||
const result: MCValue = result: {
|
||||
const src_mcv = try self.resolveInst(ty_op.operand);
|
||||
switch (src_mcv) {
|
||||
.load_frame => |frame_addr| {
|
||||
const len_mcv: MCValue = .{ .load_frame = .{
|
||||
.index = frame_addr.index,
|
||||
.off = frame_addr.off + 8,
|
||||
} };
|
||||
if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) break :result len_mcv;
|
||||
|
||||
const dst_mcv = try self.allocRegOrMem(inst, true);
|
||||
try self.genCopy(Type.usize, dst_mcv, len_mcv);
|
||||
break :result dst_mcv;
|
||||
},
|
||||
else => return self.fail("TODO implement slice_len for {}", .{src_mcv}),
|
||||
}
|
||||
};
|
||||
|
||||
return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none });
|
||||
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
|
||||
}
|
||||
|
||||
fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void {
|
||||
|
|
@ -4933,13 +5053,19 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void {
|
|||
const has_lzcnt = self.hasFeature(.lzcnt);
|
||||
if (src_bits > 64 and !has_lzcnt) {
|
||||
var callee_buf: ["__clz?i2".len]u8 = undefined;
|
||||
break :result try self.genCall(.{ .lib = .{
|
||||
const result = try self.genCall(.{ .lib = .{
|
||||
.return_type = .i32_type,
|
||||
.param_types = &.{src_ty.toIntern()},
|
||||
.callee = std.fmt.bufPrint(&callee_buf, "__clz{c}i2", .{
|
||||
intCompilerRtAbiName(src_bits),
|
||||
}) catch unreachable,
|
||||
} }, &.{src_ty}, &.{.{ .air_ref = ty_op.operand }});
|
||||
if (src_bits < 128) try self.asmRegisterImmediate(
|
||||
.{ ._, .sub },
|
||||
result.register,
|
||||
Immediate.u(128 - src_bits),
|
||||
);
|
||||
break :result result;
|
||||
}
|
||||
|
||||
const src_mcv = try self.resolveInst(ty_op.operand);
|
||||
|
|
@ -5590,7 +5716,7 @@ fn floatSign(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, ty: Type)
|
|||
defer if (src_lock) |lock| self.register_manager.unlockReg(lock);
|
||||
|
||||
const dst_mcv: MCValue = .{ .register = .st0 };
|
||||
if (std.meta.eql(src_mcv, dst_mcv) and self.reuseOperand(inst, operand, 0, src_mcv))
|
||||
if (!std.meta.eql(src_mcv, dst_mcv) or !self.reuseOperand(inst, operand, 0, src_mcv))
|
||||
try self.register_manager.getReg(.st0, inst);
|
||||
|
||||
try self.genCopy(ty, dst_mcv, src_mcv);
|
||||
|
|
@ -7317,6 +7443,7 @@ fn genShiftBinOp(
|
|||
});
|
||||
|
||||
assert(rhs_ty.abiSize(mod) == 1);
|
||||
try self.spillEflagsIfOccupied();
|
||||
|
||||
const lhs_abi_size = lhs_ty.abiSize(mod);
|
||||
if (lhs_abi_size > 16) return self.fail("TODO implement genShiftBinOp for {}", .{
|
||||
|
|
@ -7821,7 +7948,7 @@ fn genBinOp(
|
|||
}) .{ .lhs = rhs_air, .rhs = lhs_air } else .{ .lhs = lhs_air, .rhs = rhs_air };
|
||||
|
||||
const lhs_mcv = try self.resolveInst(ordered_air.lhs);
|
||||
const rhs_mcv = try self.resolveInst(ordered_air.rhs);
|
||||
var rhs_mcv = try self.resolveInst(ordered_air.rhs);
|
||||
switch (lhs_mcv) {
|
||||
.immediate => |imm| switch (imm) {
|
||||
0 => switch (air_tag) {
|
||||
|
|
@ -7893,6 +8020,7 @@ fn genBinOp(
|
|||
copied_to_dst = false
|
||||
else
|
||||
try self.genCopy(lhs_ty, dst_mcv, lhs_mcv);
|
||||
rhs_mcv = try self.resolveInst(ordered_air.rhs);
|
||||
break :dst dst_mcv;
|
||||
};
|
||||
const dst_locks: [2]?RegisterLock = switch (dst_mcv) {
|
||||
|
|
@ -9224,6 +9352,7 @@ fn genBinOpMir(
|
|||
) !void {
|
||||
const mod = self.bin_file.options.module.?;
|
||||
const abi_size: u32 = @intCast(ty.abiSize(mod));
|
||||
try self.spillEflagsIfOccupied();
|
||||
switch (dst_mcv) {
|
||||
.none,
|
||||
.unreach,
|
||||
|
|
@ -11154,11 +11283,11 @@ fn airLoop(self: *Self, inst: Air.Inst.Index) !void {
|
|||
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
|
||||
const loop = self.air.extraData(Air.Block, ty_pl.payload);
|
||||
const body = self.air.extra[loop.end..][0..loop.data.body_len];
|
||||
const jmp_target: Mir.Inst.Index = @intCast(self.mir_instructions.len);
|
||||
|
||||
self.scope_generation += 1;
|
||||
const state = try self.saveState();
|
||||
|
||||
const jmp_target: Mir.Inst.Index = @intCast(self.mir_instructions.len);
|
||||
try self.genBody(body);
|
||||
try self.restoreState(state, &.{}, .{
|
||||
.emit_instructions = true,
|
||||
|
|
@ -11336,14 +11465,14 @@ fn airBr(self: *Self, inst: Air.Inst.Index) !void {
|
|||
.close_scope = false,
|
||||
});
|
||||
|
||||
// Stop tracking block result without forgetting tracking info
|
||||
try self.freeValue(block_tracking.short);
|
||||
|
||||
// Emit a jump with a relocation. It will be patched up after the block ends.
|
||||
// Leave the jump offset undefined
|
||||
const jmp_reloc = try self.asmJmpReloc(undefined);
|
||||
try block_data.relocs.append(self.gpa, jmp_reloc);
|
||||
|
||||
// Stop tracking block result without forgetting tracking info
|
||||
try self.freeValue(block_tracking.short);
|
||||
|
||||
self.finishAirBookkeeping();
|
||||
}
|
||||
|
||||
|
|
@ -12660,7 +12789,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr
|
|||
registerAlias(dst_reg, abi_size),
|
||||
.{
|
||||
.base = .{ .reg = addr_reg },
|
||||
.mod = .{ .rm = .{ .size = Memory.Size.fromSize(abi_size) } },
|
||||
.mod = .{ .rm = .{ .size = self.memSize(ty) } },
|
||||
},
|
||||
);
|
||||
},
|
||||
|
|
@ -15350,11 +15479,10 @@ fn truncateRegister(self: *Self, ty: Type, reg: Register) !void {
|
|||
.signedness = .unsigned,
|
||||
.bits = @intCast(ty.bitSize(mod)),
|
||||
};
|
||||
const max_reg_bit_width = Register.rax.bitSize();
|
||||
const shift = math.cast(u6, 64 - int_info.bits % 64) orelse return;
|
||||
try self.spillEflagsIfOccupied();
|
||||
switch (int_info.signedness) {
|
||||
.signed => {
|
||||
const shift: u6 = @intCast(max_reg_bit_width - int_info.bits);
|
||||
try self.genShiftBinOpMir(
|
||||
.{ ._l, .sa },
|
||||
Type.isize,
|
||||
|
|
@ -15369,7 +15497,6 @@ fn truncateRegister(self: *Self, ty: Type, reg: Register) !void {
|
|||
);
|
||||
},
|
||||
.unsigned => {
|
||||
const shift: u6 = @intCast(max_reg_bit_width - int_info.bits);
|
||||
const mask = ~@as(u64, 0) >> shift;
|
||||
if (int_info.bits <= 32) {
|
||||
try self.genBinOpMir(
|
||||
|
|
|
|||
|
|
@ -4,7 +4,6 @@ const expect = std.testing.expect;
|
|||
|
||||
test "exporting enum type and value" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
|
@ -21,7 +20,6 @@ test "exporting enum type and value" {
|
|||
|
||||
test "exporting with internal linkage" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
|
@ -37,7 +35,6 @@ test "exporting with internal linkage" {
|
|||
|
||||
test "exporting using field access" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
|
|
|||
|
|
@ -577,7 +577,6 @@ test "pass and return comptime-only types" {
|
|||
|
||||
test "pointer to alias behaves same as pointer to function" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
|
|
|||
|
|
@ -6,7 +6,6 @@ var pos = [2]f32{ 0.0, 0.0 };
|
|||
test "store to global array" {
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
|
@ -20,7 +19,6 @@ var vpos = @Vector(2, f32){ 0.0, 0.0 };
|
|||
test "store to global vector" {
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
|
|
|||
|
|
@ -23,7 +23,6 @@ const snan_f128: f128 = math.snan(f128);
|
|||
|
||||
test "nan memory equality" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
|
|
|||
|
|
@ -1241,11 +1241,6 @@ pub fn addCAbiTests(b: *std.Build, skip_non_native: bool, skip_release: bool) *S
|
|||
continue;
|
||||
}
|
||||
|
||||
if (c_abi_target.use_llvm == false and optimize_mode == .ReleaseFast) {
|
||||
// panic: unrecognized command line argument
|
||||
continue;
|
||||
}
|
||||
|
||||
const test_step = b.addTest(.{
|
||||
.name = b.fmt("test-c-abi-{s}-{s}-{s}{s}{s}{s}", .{
|
||||
c_abi_target.target.zigTriple(b.allocator) catch @panic("OOM"),
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue