mirror of
https://codeberg.org/ziglang/zig.git
synced 2025-12-06 13:54:21 +00:00
Merge pull request #25772 from mlugg/kill-dead-code
compiler: rewrite some legalizations, and remove a bunch of dead code
This commit is contained in:
commit
181b25ce4f
39 changed files with 1480 additions and 3882 deletions
|
|
@ -211,10 +211,10 @@ set(ZIG_STAGE2_SOURCES
|
|||
lib/compiler_rt/absvti2.zig
|
||||
lib/compiler_rt/adddf3.zig
|
||||
lib/compiler_rt/addf3.zig
|
||||
lib/compiler_rt/addo.zig
|
||||
lib/compiler_rt/addsf3.zig
|
||||
lib/compiler_rt/addtf3.zig
|
||||
lib/compiler_rt/addvsi3.zig
|
||||
lib/compiler_rt/addvdi3.zig
|
||||
lib/compiler_rt/addxf3.zig
|
||||
lib/compiler_rt/arm.zig
|
||||
lib/compiler_rt/atomics.zig
|
||||
|
|
@ -354,7 +354,6 @@ set(ZIG_STAGE2_SOURCES
|
|||
lib/compiler_rt/sqrt.zig
|
||||
lib/compiler_rt/stack_probe.zig
|
||||
lib/compiler_rt/subdf3.zig
|
||||
lib/compiler_rt/subo.zig
|
||||
lib/compiler_rt/subsf3.zig
|
||||
lib/compiler_rt/subtf3.zig
|
||||
lib/compiler_rt/subvdi3.zig
|
||||
|
|
|
|||
|
|
@ -28,12 +28,13 @@ comptime {
|
|||
_ = @import("compiler_rt/negv.zig");
|
||||
|
||||
_ = @import("compiler_rt/addvsi3.zig");
|
||||
_ = @import("compiler_rt/addvdi3.zig");
|
||||
|
||||
_ = @import("compiler_rt/subvsi3.zig");
|
||||
_ = @import("compiler_rt/subvdi3.zig");
|
||||
|
||||
_ = @import("compiler_rt/mulvsi3.zig");
|
||||
|
||||
_ = @import("compiler_rt/addo.zig");
|
||||
_ = @import("compiler_rt/subo.zig");
|
||||
_ = @import("compiler_rt/mulo.zig");
|
||||
|
||||
// Float routines
|
||||
|
|
|
|||
|
|
@ -1,46 +0,0 @@
|
|||
const std = @import("std");
|
||||
const common = @import("./common.zig");
|
||||
pub const panic = @import("common.zig").panic;
|
||||
|
||||
comptime {
|
||||
@export(&__addosi4, .{ .name = "__addosi4", .linkage = common.linkage, .visibility = common.visibility });
|
||||
@export(&__addodi4, .{ .name = "__addodi4", .linkage = common.linkage, .visibility = common.visibility });
|
||||
@export(&__addoti4, .{ .name = "__addoti4", .linkage = common.linkage, .visibility = common.visibility });
|
||||
}
|
||||
|
||||
// addo - add overflow
|
||||
// * return a+%b.
|
||||
// * return if a+b overflows => 1 else => 0
|
||||
// - addoXi4_generic as default
|
||||
|
||||
inline fn addoXi4_generic(comptime ST: type, a: ST, b: ST, overflow: *c_int) ST {
|
||||
@setRuntimeSafety(common.test_safety);
|
||||
overflow.* = 0;
|
||||
const sum: ST = a +% b;
|
||||
// Hackers Delight: section Overflow Detection, subsection Signed Add/Subtract
|
||||
// Let sum = a +% b == a + b + carry == wraparound addition.
|
||||
// Overflow in a+b+carry occurs, iff a and b have opposite signs
|
||||
// and the sign of a+b+carry is the same as a (or equivalently b).
|
||||
// Slower routine: res = ~(a ^ b) & ((sum ^ a)
|
||||
// Faster routine: res = (sum ^ a) & (sum ^ b)
|
||||
// Overflow occurred, iff (res < 0)
|
||||
if (((sum ^ a) & (sum ^ b)) < 0)
|
||||
overflow.* = 1;
|
||||
return sum;
|
||||
}
|
||||
|
||||
pub fn __addosi4(a: i32, b: i32, overflow: *c_int) callconv(.c) i32 {
|
||||
return addoXi4_generic(i32, a, b, overflow);
|
||||
}
|
||||
pub fn __addodi4(a: i64, b: i64, overflow: *c_int) callconv(.c) i64 {
|
||||
return addoXi4_generic(i64, a, b, overflow);
|
||||
}
|
||||
pub fn __addoti4(a: i128, b: i128, overflow: *c_int) callconv(.c) i128 {
|
||||
return addoXi4_generic(i128, a, b, overflow);
|
||||
}
|
||||
|
||||
test {
|
||||
_ = @import("addosi4_test.zig");
|
||||
_ = @import("addodi4_test.zig");
|
||||
_ = @import("addoti4_test.zig");
|
||||
}
|
||||
|
|
@ -1,77 +0,0 @@
|
|||
const addv = @import("addo.zig");
|
||||
const std = @import("std");
|
||||
const testing = std.testing;
|
||||
const math = std.math;
|
||||
|
||||
fn test__addodi4(a: i64, b: i64) !void {
|
||||
var result_ov: c_int = undefined;
|
||||
var expected_ov: c_int = undefined;
|
||||
const result = addv.__addodi4(a, b, &result_ov);
|
||||
const expected: i64 = simple_addodi4(a, b, &expected_ov);
|
||||
try testing.expectEqual(expected, result);
|
||||
try testing.expectEqual(expected_ov, result_ov);
|
||||
}
|
||||
|
||||
fn simple_addodi4(a: i64, b: i64, overflow: *c_int) i64 {
|
||||
overflow.* = 0;
|
||||
const min: i64 = math.minInt(i64);
|
||||
const max: i64 = math.maxInt(i64);
|
||||
if (((a > 0) and (b > max - a)) or
|
||||
((a < 0) and (b < min - a)))
|
||||
overflow.* = 1;
|
||||
return a +% b;
|
||||
}
|
||||
|
||||
test "addodi4" {
|
||||
const min: i64 = math.minInt(i64);
|
||||
const max: i64 = math.maxInt(i64);
|
||||
var i: i64 = 1;
|
||||
while (i < max) : (i *|= 2) {
|
||||
try test__addodi4(i, i);
|
||||
try test__addodi4(-i, -i);
|
||||
try test__addodi4(i, -i);
|
||||
try test__addodi4(-i, i);
|
||||
}
|
||||
|
||||
// edge cases
|
||||
// 0 + 0 = 0
|
||||
// MIN + MIN overflow
|
||||
// MAX + MAX overflow
|
||||
// 0 + MIN MIN
|
||||
// 0 + MAX MAX
|
||||
// MIN + 0 MIN
|
||||
// MAX + 0 MAX
|
||||
// MIN + MAX -1
|
||||
// MAX + MIN -1
|
||||
try test__addodi4(0, 0);
|
||||
try test__addodi4(min, min);
|
||||
try test__addodi4(max, max);
|
||||
try test__addodi4(0, min);
|
||||
try test__addodi4(0, max);
|
||||
try test__addodi4(min, 0);
|
||||
try test__addodi4(max, 0);
|
||||
try test__addodi4(min, max);
|
||||
try test__addodi4(max, min);
|
||||
|
||||
// derived edge cases
|
||||
// MIN+1 + MIN overflow
|
||||
// MAX-1 + MAX overflow
|
||||
// 1 + MIN = MIN+1
|
||||
// -1 + MIN overflow
|
||||
// -1 + MAX = MAX-1
|
||||
// +1 + MAX overflow
|
||||
// MIN + 1 = MIN+1
|
||||
// MIN + -1 overflow
|
||||
// MAX + 1 overflow
|
||||
// MAX + -1 = MAX-1
|
||||
try test__addodi4(min + 1, min);
|
||||
try test__addodi4(max - 1, max);
|
||||
try test__addodi4(1, min);
|
||||
try test__addodi4(-1, min);
|
||||
try test__addodi4(-1, max);
|
||||
try test__addodi4(1, max);
|
||||
try test__addodi4(min, 1);
|
||||
try test__addodi4(min, -1);
|
||||
try test__addodi4(max, -1);
|
||||
try test__addodi4(max, 1);
|
||||
}
|
||||
|
|
@ -1,78 +0,0 @@
|
|||
const addv = @import("addo.zig");
|
||||
const testing = @import("std").testing;
|
||||
|
||||
fn test__addosi4(a: i32, b: i32) !void {
|
||||
var result_ov: c_int = undefined;
|
||||
var expected_ov: c_int = undefined;
|
||||
const result = addv.__addosi4(a, b, &result_ov);
|
||||
const expected: i32 = simple_addosi4(a, b, &expected_ov);
|
||||
try testing.expectEqual(expected, result);
|
||||
try testing.expectEqual(expected_ov, result_ov);
|
||||
}
|
||||
|
||||
fn simple_addosi4(a: i32, b: i32, overflow: *c_int) i32 {
|
||||
overflow.* = 0;
|
||||
const min: i32 = -2147483648;
|
||||
const max: i32 = 2147483647;
|
||||
if (((a > 0) and (b > max - a)) or
|
||||
((a < 0) and (b < min - a)))
|
||||
overflow.* = 1;
|
||||
return a +% b;
|
||||
}
|
||||
|
||||
test "addosi4" {
|
||||
// -2^31 <= i32 <= 2^31-1
|
||||
// 2^31 = 2147483648
|
||||
// 2^31-1 = 2147483647
|
||||
const min: i32 = -2147483648;
|
||||
const max: i32 = 2147483647;
|
||||
var i: i32 = 1;
|
||||
while (i < max) : (i *|= 2) {
|
||||
try test__addosi4(i, i);
|
||||
try test__addosi4(-i, -i);
|
||||
try test__addosi4(i, -i);
|
||||
try test__addosi4(-i, i);
|
||||
}
|
||||
|
||||
// edge cases
|
||||
// 0 + 0 = 0
|
||||
// MIN + MIN overflow
|
||||
// MAX + MAX overflow
|
||||
// 0 + MIN MIN
|
||||
// 0 + MAX MAX
|
||||
// MIN + 0 MIN
|
||||
// MAX + 0 MAX
|
||||
// MIN + MAX -1
|
||||
// MAX + MIN -1
|
||||
try test__addosi4(0, 0);
|
||||
try test__addosi4(min, min);
|
||||
try test__addosi4(max, max);
|
||||
try test__addosi4(0, min);
|
||||
try test__addosi4(0, max);
|
||||
try test__addosi4(min, 0);
|
||||
try test__addosi4(max, 0);
|
||||
try test__addosi4(min, max);
|
||||
try test__addosi4(max, min);
|
||||
|
||||
// derived edge cases
|
||||
// MIN+1 + MIN overflow
|
||||
// MAX-1 + MAX overflow
|
||||
// 1 + MIN = MIN+1
|
||||
// -1 + MIN overflow
|
||||
// -1 + MAX = MAX-1
|
||||
// +1 + MAX overflow
|
||||
// MIN + 1 = MIN+1
|
||||
// MIN + -1 overflow
|
||||
// MAX + 1 overflow
|
||||
// MAX + -1 = MAX-1
|
||||
try test__addosi4(min + 1, min);
|
||||
try test__addosi4(max - 1, max);
|
||||
try test__addosi4(1, min);
|
||||
try test__addosi4(-1, min);
|
||||
try test__addosi4(-1, max);
|
||||
try test__addosi4(1, max);
|
||||
try test__addosi4(min, 1);
|
||||
try test__addosi4(min, -1);
|
||||
try test__addosi4(max, -1);
|
||||
try test__addosi4(max, 1);
|
||||
}
|
||||
|
|
@ -1,80 +0,0 @@
|
|||
const addv = @import("addo.zig");
|
||||
const builtin = @import("builtin");
|
||||
const std = @import("std");
|
||||
const testing = std.testing;
|
||||
const math = std.math;
|
||||
|
||||
fn test__addoti4(a: i128, b: i128) !void {
|
||||
var result_ov: c_int = undefined;
|
||||
var expected_ov: c_int = undefined;
|
||||
const result = addv.__addoti4(a, b, &result_ov);
|
||||
const expected: i128 = simple_addoti4(a, b, &expected_ov);
|
||||
try testing.expectEqual(expected, result);
|
||||
try testing.expectEqual(expected_ov, result_ov);
|
||||
}
|
||||
|
||||
fn simple_addoti4(a: i128, b: i128, overflow: *c_int) i128 {
|
||||
overflow.* = 0;
|
||||
const min: i128 = math.minInt(i128);
|
||||
const max: i128 = math.maxInt(i128);
|
||||
if (((a > 0) and (b > max - a)) or
|
||||
((a < 0) and (b < min - a)))
|
||||
overflow.* = 1;
|
||||
return a +% b;
|
||||
}
|
||||
|
||||
test "addoti4" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
|
||||
const min: i128 = math.minInt(i128);
|
||||
const max: i128 = math.maxInt(i128);
|
||||
var i: i128 = 1;
|
||||
while (i < max) : (i *|= 2) {
|
||||
try test__addoti4(i, i);
|
||||
try test__addoti4(-i, -i);
|
||||
try test__addoti4(i, -i);
|
||||
try test__addoti4(-i, i);
|
||||
}
|
||||
|
||||
// edge cases
|
||||
// 0 + 0 = 0
|
||||
// MIN + MIN overflow
|
||||
// MAX + MAX overflow
|
||||
// 0 + MIN MIN
|
||||
// 0 + MAX MAX
|
||||
// MIN + 0 MIN
|
||||
// MAX + 0 MAX
|
||||
// MIN + MAX -1
|
||||
// MAX + MIN -1
|
||||
try test__addoti4(0, 0);
|
||||
try test__addoti4(min, min);
|
||||
try test__addoti4(max, max);
|
||||
try test__addoti4(0, min);
|
||||
try test__addoti4(0, max);
|
||||
try test__addoti4(min, 0);
|
||||
try test__addoti4(max, 0);
|
||||
try test__addoti4(min, max);
|
||||
try test__addoti4(max, min);
|
||||
|
||||
// derived edge cases
|
||||
// MIN+1 + MIN overflow
|
||||
// MAX-1 + MAX overflow
|
||||
// 1 + MIN = MIN+1
|
||||
// -1 + MIN overflow
|
||||
// -1 + MAX = MAX-1
|
||||
// +1 + MAX overflow
|
||||
// MIN + 1 = MIN+1
|
||||
// MIN + -1 overflow
|
||||
// MAX + 1 overflow
|
||||
// MAX + -1 = MAX-1
|
||||
try test__addoti4(min + 1, min);
|
||||
try test__addoti4(max - 1, max);
|
||||
try test__addoti4(1, min);
|
||||
try test__addoti4(-1, min);
|
||||
try test__addoti4(-1, max);
|
||||
try test__addoti4(1, max);
|
||||
try test__addoti4(min, 1);
|
||||
try test__addoti4(min, -1);
|
||||
try test__addoti4(max, -1);
|
||||
try test__addoti4(max, 1);
|
||||
}
|
||||
26
lib/compiler_rt/addvdi3.zig
Normal file
26
lib/compiler_rt/addvdi3.zig
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
const common = @import("./common.zig");
|
||||
const testing = @import("std").testing;
|
||||
|
||||
pub const panic = common.panic;
|
||||
|
||||
comptime {
|
||||
@export(&__addvdi3, .{ .name = "__addvdi3", .linkage = common.linkage, .visibility = common.visibility });
|
||||
}
|
||||
|
||||
pub fn __addvdi3(a: i64, b: i64) callconv(.c) i64 {
|
||||
const sum = a +% b;
|
||||
// Overflow occurred iff both operands have the same sign, and the sign of the sum does
|
||||
// not match it. In other words, iff the sum sign is not the sign of either operand.
|
||||
if (((sum ^ a) & (sum ^ b)) < 0) @panic("compiler-rt: integer overflow");
|
||||
return sum;
|
||||
}
|
||||
|
||||
test "addvdi3" {
|
||||
// const min: i64 = -9223372036854775808
|
||||
// const max: i64 = 9223372036854775807
|
||||
// TODO write panic handler for testing panics
|
||||
// try test__addvdi3(-9223372036854775808, -1, -1); // panic
|
||||
// try test__addvdi3(9223372036854775807, 1, 1); // panic
|
||||
try testing.expectEqual(-9223372036854775808, __addvdi3(-9223372036854775807, -1));
|
||||
try testing.expectEqual(9223372036854775807, __addvdi3(9223372036854775806, 1));
|
||||
}
|
||||
|
|
@ -1,4 +1,3 @@
|
|||
const addv = @import("addo.zig");
|
||||
const common = @import("./common.zig");
|
||||
const testing = @import("std").testing;
|
||||
|
||||
|
|
@ -9,9 +8,10 @@ comptime {
|
|||
}
|
||||
|
||||
pub fn __addvsi3(a: i32, b: i32) callconv(.c) i32 {
|
||||
var overflow: c_int = 0;
|
||||
const sum = addv.__addosi4(a, b, &overflow);
|
||||
if (overflow != 0) @panic("compiler-rt: integer overflow");
|
||||
const sum = a +% b;
|
||||
// Overflow occurred iff both operands have the same sign, and the sign of the sum does
|
||||
// not match it. In other words, iff the sum sign is not the sign of either operand.
|
||||
if (((sum ^ a) & (sum ^ b)) < 0) @panic("compiler-rt: integer overflow");
|
||||
return sum;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,47 +0,0 @@
|
|||
//! subo - subtract overflow
|
||||
//! * return a-%b.
|
||||
//! * return if a-b overflows => 1 else => 0
|
||||
//! - suboXi4_generic as default
|
||||
|
||||
const std = @import("std");
|
||||
const builtin = @import("builtin");
|
||||
const common = @import("common.zig");
|
||||
|
||||
pub const panic = common.panic;
|
||||
|
||||
comptime {
|
||||
@export(&__subosi4, .{ .name = "__subosi4", .linkage = common.linkage, .visibility = common.visibility });
|
||||
@export(&__subodi4, .{ .name = "__subodi4", .linkage = common.linkage, .visibility = common.visibility });
|
||||
@export(&__suboti4, .{ .name = "__suboti4", .linkage = common.linkage, .visibility = common.visibility });
|
||||
}
|
||||
|
||||
pub fn __subosi4(a: i32, b: i32, overflow: *c_int) callconv(.c) i32 {
|
||||
return suboXi4_generic(i32, a, b, overflow);
|
||||
}
|
||||
pub fn __subodi4(a: i64, b: i64, overflow: *c_int) callconv(.c) i64 {
|
||||
return suboXi4_generic(i64, a, b, overflow);
|
||||
}
|
||||
pub fn __suboti4(a: i128, b: i128, overflow: *c_int) callconv(.c) i128 {
|
||||
return suboXi4_generic(i128, a, b, overflow);
|
||||
}
|
||||
|
||||
inline fn suboXi4_generic(comptime ST: type, a: ST, b: ST, overflow: *c_int) ST {
|
||||
overflow.* = 0;
|
||||
const sum: ST = a -% b;
|
||||
// Hackers Delight: section Overflow Detection, subsection Signed Add/Subtract
|
||||
// Let sum = a -% b == a - b - carry == wraparound subtraction.
|
||||
// Overflow in a-b-carry occurs, iff a and b have opposite signs
|
||||
// and the sign of a-b-carry is opposite of a (or equivalently same as b).
|
||||
// Faster routine: res = (a ^ b) & (sum ^ a)
|
||||
// Slower routine: res = (sum^a) & ~(sum^b)
|
||||
// Overflow occurred, iff (res < 0)
|
||||
if (((a ^ b) & (sum ^ a)) < 0)
|
||||
overflow.* = 1;
|
||||
return sum;
|
||||
}
|
||||
|
||||
test {
|
||||
_ = @import("subosi4_test.zig");
|
||||
_ = @import("subodi4_test.zig");
|
||||
_ = @import("suboti4_test.zig");
|
||||
}
|
||||
|
|
@ -1,81 +0,0 @@
|
|||
const subo = @import("subo.zig");
|
||||
const std = @import("std");
|
||||
const testing = std.testing;
|
||||
const math = std.math;
|
||||
|
||||
fn test__subodi4(a: i64, b: i64) !void {
|
||||
var result_ov: c_int = undefined;
|
||||
var expected_ov: c_int = undefined;
|
||||
const result = subo.__subodi4(a, b, &result_ov);
|
||||
const expected: i64 = simple_subodi4(a, b, &expected_ov);
|
||||
try testing.expectEqual(expected, result);
|
||||
try testing.expectEqual(expected_ov, result_ov);
|
||||
}
|
||||
|
||||
// 2 cases on evaluating `a-b`:
|
||||
// 1. `a-b` may underflow, iff b>0 && a<0 and a-b < min <=> a<min+b
|
||||
// 2. `a-b` may overflow, iff b<0 && a>0 and a-b > max <=> a>max+b
|
||||
// `-b` evaluation may overflow, iff b==min, but this is handled by the hardware
|
||||
pub fn simple_subodi4(a: i64, b: i64, overflow: *c_int) i64 {
|
||||
overflow.* = 0;
|
||||
const min: i64 = math.minInt(i64);
|
||||
const max: i64 = math.maxInt(i64);
|
||||
if (((b > 0) and (a < min + b)) or
|
||||
((b < 0) and (a > max + b)))
|
||||
overflow.* = 1;
|
||||
return a -% b;
|
||||
}
|
||||
|
||||
test "subodi3" {
|
||||
const min: i64 = math.minInt(i64);
|
||||
const max: i64 = math.maxInt(i64);
|
||||
var i: i64 = 1;
|
||||
while (i < max) : (i *|= 2) {
|
||||
try test__subodi4(i, i);
|
||||
try test__subodi4(-i, -i);
|
||||
try test__subodi4(i, -i);
|
||||
try test__subodi4(-i, i);
|
||||
}
|
||||
|
||||
// edge cases
|
||||
// 0 - 0 = 0
|
||||
// MIN - MIN = 0
|
||||
// MAX - MAX = 0
|
||||
// 0 - MIN overflow
|
||||
// 0 - MAX = MIN+1
|
||||
// MIN - 0 = MIN
|
||||
// MAX - 0 = MAX
|
||||
// MIN - MAX overflow
|
||||
// MAX - MIN overflow
|
||||
try test__subodi4(0, 0);
|
||||
try test__subodi4(min, min);
|
||||
try test__subodi4(max, max);
|
||||
try test__subodi4(0, min);
|
||||
try test__subodi4(0, max);
|
||||
try test__subodi4(min, 0);
|
||||
try test__subodi4(max, 0);
|
||||
try test__subodi4(min, max);
|
||||
try test__subodi4(max, min);
|
||||
|
||||
// derived edge cases
|
||||
// MIN+1 - MIN = 1
|
||||
// MAX-1 - MAX = -1
|
||||
// 1 - MIN overflow
|
||||
// -1 - MIN = MAX
|
||||
// -1 - MAX = MIN
|
||||
// +1 - MAX = MIN+2
|
||||
// MIN - 1 overflow
|
||||
// MIN - -1 = MIN+1
|
||||
// MAX - 1 = MAX-1
|
||||
// MAX - -1 overflow
|
||||
try test__subodi4(min + 1, min);
|
||||
try test__subodi4(max - 1, max);
|
||||
try test__subodi4(1, min);
|
||||
try test__subodi4(-1, min);
|
||||
try test__subodi4(-1, max);
|
||||
try test__subodi4(1, max);
|
||||
try test__subodi4(min, 1);
|
||||
try test__subodi4(min, -1);
|
||||
try test__subodi4(max, -1);
|
||||
try test__subodi4(max, 1);
|
||||
}
|
||||
|
|
@ -1,82 +0,0 @@
|
|||
const subo = @import("subo.zig");
|
||||
const testing = @import("std").testing;
|
||||
|
||||
fn test__subosi4(a: i32, b: i32) !void {
|
||||
var result_ov: c_int = undefined;
|
||||
var expected_ov: c_int = undefined;
|
||||
const result = subo.__subosi4(a, b, &result_ov);
|
||||
const expected: i32 = simple_subosi4(a, b, &expected_ov);
|
||||
try testing.expectEqual(expected, result);
|
||||
try testing.expectEqual(expected_ov, result_ov);
|
||||
}
|
||||
|
||||
// 2 cases on evaluating `a-b`:
|
||||
// 1. `a-b` may underflow, iff b>0 && a<0 and a-b < min <=> a<min+b
|
||||
// 2. `a-b` may overflow, iff b<0 && a>0 and a-b > max <=> a>max+b
|
||||
// `-b` evaluation may overflow, iff b==min, but this is handled by the hardware
|
||||
pub fn simple_subosi4(a: i32, b: i32, overflow: *c_int) i32 {
|
||||
overflow.* = 0;
|
||||
const min: i32 = -2147483648;
|
||||
const max: i32 = 2147483647;
|
||||
if (((b > 0) and (a < min + b)) or
|
||||
((b < 0) and (a > max + b)))
|
||||
overflow.* = 1;
|
||||
return a -% b;
|
||||
}
|
||||
|
||||
test "subosi3" {
|
||||
// -2^31 <= i32 <= 2^31-1
|
||||
// 2^31 = 2147483648
|
||||
// 2^31-1 = 2147483647
|
||||
const min: i32 = -2147483648;
|
||||
const max: i32 = 2147483647;
|
||||
var i: i32 = 1;
|
||||
while (i < max) : (i *|= 2) {
|
||||
try test__subosi4(i, i);
|
||||
try test__subosi4(-i, -i);
|
||||
try test__subosi4(i, -i);
|
||||
try test__subosi4(-i, i);
|
||||
}
|
||||
|
||||
// edge cases
|
||||
// 0 - 0 = 0
|
||||
// MIN - MIN = 0
|
||||
// MAX - MAX = 0
|
||||
// 0 - MIN overflow
|
||||
// 0 - MAX = MIN+1
|
||||
// MIN - 0 = MIN
|
||||
// MAX - 0 = MAX
|
||||
// MIN - MAX overflow
|
||||
// MAX - MIN overflow
|
||||
try test__subosi4(0, 0);
|
||||
try test__subosi4(min, min);
|
||||
try test__subosi4(max, max);
|
||||
try test__subosi4(0, min);
|
||||
try test__subosi4(0, max);
|
||||
try test__subosi4(min, 0);
|
||||
try test__subosi4(max, 0);
|
||||
try test__subosi4(min, max);
|
||||
try test__subosi4(max, min);
|
||||
|
||||
// derived edge cases
|
||||
// MIN+1 - MIN = 1
|
||||
// MAX-1 - MAX = -1
|
||||
// 1 - MIN overflow
|
||||
// -1 - MIN = MAX
|
||||
// -1 - MAX = MIN
|
||||
// +1 - MAX = MIN+2
|
||||
// MIN - 1 overflow
|
||||
// MIN - -1 = MIN+1
|
||||
// MAX - 1 = MAX-1
|
||||
// MAX - -1 overflow
|
||||
try test__subosi4(min + 1, min);
|
||||
try test__subosi4(max - 1, max);
|
||||
try test__subosi4(1, min);
|
||||
try test__subosi4(-1, min);
|
||||
try test__subosi4(-1, max);
|
||||
try test__subosi4(1, max);
|
||||
try test__subosi4(min, 1);
|
||||
try test__subosi4(min, -1);
|
||||
try test__subosi4(max, -1);
|
||||
try test__subosi4(max, 1);
|
||||
}
|
||||
|
|
@ -1,84 +0,0 @@
|
|||
const subo = @import("subo.zig");
|
||||
const builtin = @import("builtin");
|
||||
const std = @import("std");
|
||||
const testing = std.testing;
|
||||
const math = std.math;
|
||||
|
||||
fn test__suboti4(a: i128, b: i128) !void {
|
||||
var result_ov: c_int = undefined;
|
||||
var expected_ov: c_int = undefined;
|
||||
const result = subo.__suboti4(a, b, &result_ov);
|
||||
const expected: i128 = simple_suboti4(a, b, &expected_ov);
|
||||
try testing.expectEqual(expected, result);
|
||||
try testing.expectEqual(expected_ov, result_ov);
|
||||
}
|
||||
|
||||
// 2 cases on evaluating `a-b`:
|
||||
// 1. `a-b` may underflow, iff b>0 && a<0 and a-b < min <=> a<min+b
|
||||
// 2. `a-b` may overflow, iff b<0 && a>0 and a-b > max <=> a>max+b
|
||||
// `-b` evaluation may overflow, iff b==min, but this is handled by the hardware
|
||||
pub fn simple_suboti4(a: i128, b: i128, overflow: *c_int) i128 {
|
||||
overflow.* = 0;
|
||||
const min: i128 = math.minInt(i128);
|
||||
const max: i128 = math.maxInt(i128);
|
||||
if (((b > 0) and (a < min + b)) or
|
||||
((b < 0) and (a > max + b)))
|
||||
overflow.* = 1;
|
||||
return a -% b;
|
||||
}
|
||||
|
||||
test "suboti3" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
|
||||
const min: i128 = math.minInt(i128);
|
||||
const max: i128 = math.maxInt(i128);
|
||||
var i: i128 = 1;
|
||||
while (i < max) : (i *|= 2) {
|
||||
try test__suboti4(i, i);
|
||||
try test__suboti4(-i, -i);
|
||||
try test__suboti4(i, -i);
|
||||
try test__suboti4(-i, i);
|
||||
}
|
||||
|
||||
// edge cases
|
||||
// 0 - 0 = 0
|
||||
// MIN - MIN = 0
|
||||
// MAX - MAX = 0
|
||||
// 0 - MIN overflow
|
||||
// 0 - MAX = MIN+1
|
||||
// MIN - 0 = MIN
|
||||
// MAX - 0 = MAX
|
||||
// MIN - MAX overflow
|
||||
// MAX - MIN overflow
|
||||
try test__suboti4(0, 0);
|
||||
try test__suboti4(min, min);
|
||||
try test__suboti4(max, max);
|
||||
try test__suboti4(0, min);
|
||||
try test__suboti4(0, max);
|
||||
try test__suboti4(min, 0);
|
||||
try test__suboti4(max, 0);
|
||||
try test__suboti4(min, max);
|
||||
try test__suboti4(max, min);
|
||||
|
||||
// derived edge cases
|
||||
// MIN+1 - MIN = 1
|
||||
// MAX-1 - MAX = -1
|
||||
// 1 - MIN overflow
|
||||
// -1 - MIN = MAX
|
||||
// -1 - MAX = MIN
|
||||
// +1 - MAX = MIN+2
|
||||
// MIN - 1 overflow
|
||||
// MIN - -1 = MIN+1
|
||||
// MAX - 1 = MAX-1
|
||||
// MAX - -1 overflow
|
||||
try test__suboti4(min + 1, min);
|
||||
try test__suboti4(max - 1, max);
|
||||
try test__suboti4(1, min);
|
||||
try test__suboti4(-1, min);
|
||||
try test__suboti4(-1, max);
|
||||
try test__suboti4(1, max);
|
||||
try test__suboti4(min, 1);
|
||||
try test__suboti4(min, -1);
|
||||
try test__suboti4(max, -1);
|
||||
try test__suboti4(max, 1);
|
||||
}
|
||||
|
|
@ -1,4 +1,3 @@
|
|||
const subv = @import("subo.zig");
|
||||
const common = @import("./common.zig");
|
||||
const testing = @import("std").testing;
|
||||
|
||||
|
|
@ -9,9 +8,10 @@ comptime {
|
|||
}
|
||||
|
||||
pub fn __subvdi3(a: i64, b: i64) callconv(.c) i64 {
|
||||
var overflow: c_int = 0;
|
||||
const sum = subv.__subodi4(a, b, &overflow);
|
||||
if (overflow != 0) @panic("compiler-rt: integer overflow");
|
||||
const sum = a -% b;
|
||||
// Overflow occurred iff the operands have opposite signs, and the sign of the
|
||||
// sum is the opposite of the lhs sign.
|
||||
if (((a ^ b) & (sum ^ a)) < 0) @panic("compiler-rt: integer overflow");
|
||||
return sum;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,4 +1,3 @@
|
|||
const subv = @import("subo.zig");
|
||||
const common = @import("./common.zig");
|
||||
const testing = @import("std").testing;
|
||||
|
||||
|
|
@ -9,9 +8,10 @@ comptime {
|
|||
}
|
||||
|
||||
pub fn __subvsi3(a: i32, b: i32) callconv(.c) i32 {
|
||||
var overflow: c_int = 0;
|
||||
const sum = subv.__subosi4(a, b, &overflow);
|
||||
if (overflow != 0) @panic("compiler-rt: integer overflow");
|
||||
const sum = a -% b;
|
||||
// Overflow occurred iff the operands have opposite signs, and the sign of the
|
||||
// sum is the opposite of the lhs sign.
|
||||
if (((a ^ b) & (sum ^ a)) < 0) @panic("compiler-rt: integer overflow");
|
||||
return sum;
|
||||
}
|
||||
|
||||
|
|
|
|||
36
lib/zig.h
36
lib/zig.h
|
|
@ -809,15 +809,13 @@ static inline bool zig_addo_u32(uint32_t *res, uint32_t lhs, uint32_t rhs, uint8
|
|||
#endif
|
||||
}
|
||||
|
||||
zig_extern int32_t __addosi4(int32_t lhs, int32_t rhs, int *overflow);
|
||||
static inline bool zig_addo_i32(int32_t *res, int32_t lhs, int32_t rhs, uint8_t bits) {
|
||||
#if zig_has_builtin(add_overflow) || defined(zig_gcc)
|
||||
int32_t full_res;
|
||||
bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
|
||||
#else
|
||||
int overflow_int;
|
||||
int32_t full_res = __addosi4(lhs, rhs, &overflow_int);
|
||||
bool overflow = overflow_int != 0;
|
||||
int32_t full_res = (int32_t)((uint32_t)lhs + (uint32_t)rhs);
|
||||
bool overflow = ((full_res ^ lhs) & (full_res ^ rhs)) < 0;
|
||||
#endif
|
||||
*res = zig_wrap_i32(full_res, bits);
|
||||
return overflow || full_res < zig_minInt_i(32, bits) || full_res > zig_maxInt_i(32, bits);
|
||||
|
|
@ -835,15 +833,13 @@ static inline bool zig_addo_u64(uint64_t *res, uint64_t lhs, uint64_t rhs, uint8
|
|||
#endif
|
||||
}
|
||||
|
||||
zig_extern int64_t __addodi4(int64_t lhs, int64_t rhs, int *overflow);
|
||||
static inline bool zig_addo_i64(int64_t *res, int64_t lhs, int64_t rhs, uint8_t bits) {
|
||||
#if zig_has_builtin(add_overflow) || defined(zig_gcc)
|
||||
int64_t full_res;
|
||||
bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
|
||||
#else
|
||||
int overflow_int;
|
||||
int64_t full_res = __addodi4(lhs, rhs, &overflow_int);
|
||||
bool overflow = overflow_int != 0;
|
||||
int64_t full_res = (int64_t)((uint64_t)lhs + (uint64_t)rhs);
|
||||
bool overflow = ((full_res ^ lhs) & (full_res ^ rhs)) < 0;
|
||||
#endif
|
||||
*res = zig_wrap_i64(full_res, bits);
|
||||
return overflow || full_res < zig_minInt_i(64, bits) || full_res > zig_maxInt_i(64, bits);
|
||||
|
|
@ -917,15 +913,13 @@ static inline bool zig_subo_u32(uint32_t *res, uint32_t lhs, uint32_t rhs, uint8
|
|||
#endif
|
||||
}
|
||||
|
||||
zig_extern int32_t __subosi4(int32_t lhs, int32_t rhs, int *overflow);
|
||||
static inline bool zig_subo_i32(int32_t *res, int32_t lhs, int32_t rhs, uint8_t bits) {
|
||||
#if zig_has_builtin(sub_overflow) || defined(zig_gcc)
|
||||
int32_t full_res;
|
||||
bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
|
||||
#else
|
||||
int overflow_int;
|
||||
int32_t full_res = __subosi4(lhs, rhs, &overflow_int);
|
||||
bool overflow = overflow_int != 0;
|
||||
int32_t full_res = (int32_t)((uint32_t)lhs - (uint32_t)rhs);
|
||||
bool overflow = ((lhs ^ rhs) & (full_res ^ lhs)) < 0;
|
||||
#endif
|
||||
*res = zig_wrap_i32(full_res, bits);
|
||||
return overflow || full_res < zig_minInt_i(32, bits) || full_res > zig_maxInt_i(32, bits);
|
||||
|
|
@ -943,15 +937,13 @@ static inline bool zig_subo_u64(uint64_t *res, uint64_t lhs, uint64_t rhs, uint8
|
|||
#endif
|
||||
}
|
||||
|
||||
zig_extern int64_t __subodi4(int64_t lhs, int64_t rhs, int *overflow);
|
||||
static inline bool zig_subo_i64(int64_t *res, int64_t lhs, int64_t rhs, uint8_t bits) {
|
||||
#if zig_has_builtin(sub_overflow) || defined(zig_gcc)
|
||||
int64_t full_res;
|
||||
bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
|
||||
#else
|
||||
int overflow_int;
|
||||
int64_t full_res = __subodi4(lhs, rhs, &overflow_int);
|
||||
bool overflow = overflow_int != 0;
|
||||
int64_t full_res = (int64_t)((uint64_t)lhs - (uint64_t)rhs);
|
||||
bool overflow = ((lhs ^ rhs) & (full_res ^ lhs)) < 0;
|
||||
#endif
|
||||
*res = zig_wrap_i64(full_res, bits);
|
||||
return overflow || full_res < zig_minInt_i(64, bits) || full_res > zig_maxInt_i(64, bits);
|
||||
|
|
@ -1755,15 +1747,13 @@ static inline bool zig_addo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, uint
|
|||
#endif
|
||||
}
|
||||
|
||||
zig_extern zig_i128 __addoti4(zig_i128 lhs, zig_i128 rhs, int *overflow);
|
||||
static inline bool zig_addo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, uint8_t bits) {
|
||||
#if zig_has_builtin(add_overflow)
|
||||
zig_i128 full_res;
|
||||
bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
|
||||
#else
|
||||
int overflow_int;
|
||||
zig_i128 full_res = __addoti4(lhs, rhs, &overflow_int);
|
||||
bool overflow = overflow_int != 0;
|
||||
zig_i128 full_res = (zig_i128)((zig_u128)lhs + (zig_u128)rhs);
|
||||
bool overflow = ((full_res ^ lhs) & (full_res ^ rhs)) < 0;
|
||||
#endif
|
||||
*res = zig_wrap_i128(full_res, bits);
|
||||
return overflow || full_res < zig_minInt_i(128, bits) || full_res > zig_maxInt_i(128, bits);
|
||||
|
|
@ -1781,15 +1771,13 @@ static inline bool zig_subo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, uint
|
|||
#endif
|
||||
}
|
||||
|
||||
zig_extern zig_i128 __suboti4(zig_i128 lhs, zig_i128 rhs, int *overflow);
|
||||
static inline bool zig_subo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, uint8_t bits) {
|
||||
#if zig_has_builtin(sub_overflow)
|
||||
zig_i128 full_res;
|
||||
bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
|
||||
#else
|
||||
int overflow_int;
|
||||
zig_i128 full_res = __suboti4(lhs, rhs, &overflow_int);
|
||||
bool overflow = overflow_int != 0;
|
||||
zig_i128 full_res = (zig_i128)((zig_u128)lhs - (zig_u128)rhs);
|
||||
bool overflow = ((lhs ^ rhs) & (full_res ^ lhs)) < 0;
|
||||
#endif
|
||||
*res = zig_wrap_i128(full_res, bits);
|
||||
return overflow || full_res < zig_minInt_i(128, bits) || full_res > zig_maxInt_i(128, bits);
|
||||
|
|
|
|||
40
src/Air.zig
40
src/Air.zig
|
|
@ -660,8 +660,8 @@ pub const Inst = struct {
|
|||
/// Given a pointer to a slice, return a pointer to the pointer of the slice.
|
||||
/// Uses the `ty_op` field.
|
||||
ptr_slice_ptr_ptr,
|
||||
/// Given an (array value or vector value) and element index,
|
||||
/// return the element value at that index.
|
||||
/// Given an (array value or vector value) and element index, return the element value at
|
||||
/// that index. If the lhs is a vector value, the index is guaranteed to be comptime-known.
|
||||
/// Result type is the element type of the array operand.
|
||||
/// Uses the `bin_op` field.
|
||||
array_elem_val,
|
||||
|
|
@ -874,10 +874,6 @@ pub const Inst = struct {
|
|||
/// Uses the `ty_pl` field.
|
||||
save_err_return_trace_index,
|
||||
|
||||
/// Store an element to a vector pointer at an index.
|
||||
/// Uses the `vector_store_elem` field.
|
||||
vector_store_elem,
|
||||
|
||||
/// Compute a pointer to a `Nav` at runtime, always one of:
|
||||
///
|
||||
/// * `threadlocal var`
|
||||
|
|
@ -919,6 +915,26 @@ pub const Inst = struct {
|
|||
/// Operand is unused and set to Ref.none
|
||||
work_group_id,
|
||||
|
||||
// The remaining instructions are not emitted by Sema. They are only emitted by `Legalize`,
|
||||
// depending on the enabled features. As such, backends can consider them `unreachable` if
|
||||
// they do not enable the relevant legalizations.
|
||||
|
||||
/// Given a pointer to a vector, a runtime-known index, and a scalar value, store the value
|
||||
/// into the vector at the given index. Zig does not support this operation, but `Legalize`
|
||||
/// may emit it when scalarizing vector operations.
|
||||
///
|
||||
/// Uses the `pl_op` field with payload `Bin`. `operand` is the vector pointer. `lhs` is the
|
||||
/// element index of type `usize`. `rhs` is the element value. Result is always void.
|
||||
legalize_vec_store_elem,
|
||||
/// Given a vector value and a runtime-known index, return the element value at that index.
|
||||
/// This instruction is similar to `array_elem_val`; the only difference is that the index
|
||||
/// here is runtime-known, which is usually not allowed for vectors. `Legalize` may emit
|
||||
/// this instruction when scalarizing vector operations.
|
||||
///
|
||||
/// Uses the `bin_op` field. `lhs` is the vector pointer. `rhs` is the element index. Result
|
||||
/// type is the vector element type.
|
||||
legalize_vec_elem_val,
|
||||
|
||||
pub fn fromCmpOp(op: std.math.CompareOperator, optimized: bool) Tag {
|
||||
switch (op) {
|
||||
.lt => return if (optimized) .cmp_lt_optimized else .cmp_lt,
|
||||
|
|
@ -1220,11 +1236,6 @@ pub const Inst = struct {
|
|||
operand: Ref,
|
||||
operation: std.builtin.ReduceOp,
|
||||
},
|
||||
vector_store_elem: struct {
|
||||
vector_ptr: Ref,
|
||||
// Index into a different array.
|
||||
payload: u32,
|
||||
},
|
||||
ty_nav: struct {
|
||||
ty: InternPool.Index,
|
||||
nav: InternPool.Nav.Index,
|
||||
|
|
@ -1689,8 +1700,8 @@ pub fn typeOfIndex(air: *const Air, inst: Air.Inst.Index, ip: *const InternPool)
|
|||
.set_union_tag,
|
||||
.prefetch,
|
||||
.set_err_return_trace,
|
||||
.vector_store_elem,
|
||||
.c_va_end,
|
||||
.legalize_vec_store_elem,
|
||||
=> return .void,
|
||||
|
||||
.slice_len,
|
||||
|
|
@ -1709,7 +1720,7 @@ pub fn typeOfIndex(air: *const Air, inst: Air.Inst.Index, ip: *const InternPool)
|
|||
return .fromInterned(ip.funcTypeReturnType(callee_ty.toIntern()));
|
||||
},
|
||||
|
||||
.slice_elem_val, .ptr_elem_val, .array_elem_val => {
|
||||
.slice_elem_val, .ptr_elem_val, .array_elem_val, .legalize_vec_elem_val => {
|
||||
const ptr_ty = air.typeOf(datas[@intFromEnum(inst)].bin_op.lhs, ip);
|
||||
return ptr_ty.childTypeIp(ip);
|
||||
},
|
||||
|
|
@ -1857,7 +1868,6 @@ pub fn mustLower(air: Air, inst: Air.Inst.Index, ip: *const InternPool) bool {
|
|||
.prefetch,
|
||||
.wasm_memory_grow,
|
||||
.set_err_return_trace,
|
||||
.vector_store_elem,
|
||||
.c_va_arg,
|
||||
.c_va_copy,
|
||||
.c_va_end,
|
||||
|
|
@ -1868,6 +1878,7 @@ pub fn mustLower(air: Air, inst: Air.Inst.Index, ip: *const InternPool) bool {
|
|||
.intcast_safe,
|
||||
.int_from_float_safe,
|
||||
.int_from_float_optimized_safe,
|
||||
.legalize_vec_store_elem,
|
||||
=> true,
|
||||
|
||||
.add,
|
||||
|
|
@ -2013,6 +2024,7 @@ pub fn mustLower(air: Air, inst: Air.Inst.Index, ip: *const InternPool) bool {
|
|||
.work_item_id,
|
||||
.work_group_size,
|
||||
.work_group_id,
|
||||
.legalize_vec_elem_val,
|
||||
=> false,
|
||||
|
||||
.is_non_null_ptr, .is_null_ptr, .is_non_err_ptr, .is_err_ptr => air.typeOf(data.un_op, ip).isVolatilePtrIp(ip),
|
||||
|
|
|
|||
2293
src/Air/Legalize.zig
2293
src/Air/Legalize.zig
File diff suppressed because it is too large
Load diff
|
|
@ -458,17 +458,12 @@ fn analyzeInst(
|
|||
.memset_safe,
|
||||
.memcpy,
|
||||
.memmove,
|
||||
.legalize_vec_elem_val,
|
||||
=> {
|
||||
const o = inst_datas[@intFromEnum(inst)].bin_op;
|
||||
return analyzeOperands(a, pass, data, inst, .{ o.lhs, o.rhs, .none });
|
||||
},
|
||||
|
||||
.vector_store_elem => {
|
||||
const o = inst_datas[@intFromEnum(inst)].vector_store_elem;
|
||||
const extra = a.air.extraData(Air.Bin, o.payload).data;
|
||||
return analyzeOperands(a, pass, data, inst, .{ o.vector_ptr, extra.lhs, extra.rhs });
|
||||
},
|
||||
|
||||
.arg,
|
||||
.alloc,
|
||||
.ret_ptr,
|
||||
|
|
@ -775,6 +770,12 @@ fn analyzeInst(
|
|||
const pl_op = inst_datas[@intFromEnum(inst)].pl_op;
|
||||
return analyzeOperands(a, pass, data, inst, .{ pl_op.operand, .none, .none });
|
||||
},
|
||||
|
||||
.legalize_vec_store_elem => {
|
||||
const pl_op = inst_datas[@intFromEnum(inst)].pl_op;
|
||||
const bin = a.air.extraData(Air.Bin, pl_op.payload).data;
|
||||
return analyzeOperands(a, pass, data, inst, .{ pl_op.operand, bin.lhs, bin.rhs });
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -272,6 +272,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
|
|||
.memset_safe,
|
||||
.memcpy,
|
||||
.memmove,
|
||||
.legalize_vec_elem_val,
|
||||
=> {
|
||||
const bin_op = data[@intFromEnum(inst)].bin_op;
|
||||
try self.verifyInstOperands(inst, .{ bin_op.lhs, bin_op.rhs, .none });
|
||||
|
|
@ -322,11 +323,6 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
|
|||
const extra = self.air.extraData(Air.Bin, pl_op.payload).data;
|
||||
try self.verifyInstOperands(inst, .{ extra.lhs, extra.rhs, pl_op.operand });
|
||||
},
|
||||
.vector_store_elem => {
|
||||
const vector_store_elem = data[@intFromEnum(inst)].vector_store_elem;
|
||||
const extra = self.air.extraData(Air.Bin, vector_store_elem.payload).data;
|
||||
try self.verifyInstOperands(inst, .{ vector_store_elem.vector_ptr, extra.lhs, extra.rhs });
|
||||
},
|
||||
.cmpxchg_strong,
|
||||
.cmpxchg_weak,
|
||||
=> {
|
||||
|
|
@ -582,6 +578,11 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
|
|||
|
||||
try self.verifyInst(inst);
|
||||
},
|
||||
.legalize_vec_store_elem => {
|
||||
const pl_op = data[@intFromEnum(inst)].pl_op;
|
||||
const bin = self.air.extraData(Air.Bin, pl_op.payload).data;
|
||||
try self.verifyInstOperands(inst, .{ pl_op.operand, bin.lhs, bin.rhs });
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -171,6 +171,7 @@ const Writer = struct {
|
|||
.memmove,
|
||||
.memset,
|
||||
.memset_safe,
|
||||
.legalize_vec_elem_val,
|
||||
=> try w.writeBinOp(s, inst),
|
||||
|
||||
.is_null,
|
||||
|
|
@ -330,8 +331,8 @@ const Writer = struct {
|
|||
.shuffle_two => try w.writeShuffleTwo(s, inst),
|
||||
.reduce, .reduce_optimized => try w.writeReduce(s, inst),
|
||||
.cmp_vector, .cmp_vector_optimized => try w.writeCmpVector(s, inst),
|
||||
.vector_store_elem => try w.writeVectorStoreElem(s, inst),
|
||||
.runtime_nav_ptr => try w.writeRuntimeNavPtr(s, inst),
|
||||
.legalize_vec_store_elem => try w.writeLegalizeVecStoreElem(s, inst),
|
||||
|
||||
.work_item_id,
|
||||
.work_group_size,
|
||||
|
|
@ -509,6 +510,18 @@ const Writer = struct {
|
|||
try w.writeOperand(s, inst, 2, pl_op.operand);
|
||||
}
|
||||
|
||||
fn writeLegalizeVecStoreElem(w: *Writer, s: *std.Io.Writer, inst: Air.Inst.Index) Error!void {
|
||||
const pl_op = w.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
|
||||
const bin = w.air.extraData(Air.Bin, pl_op.payload).data;
|
||||
|
||||
try w.writeOperand(s, inst, 0, pl_op.operand);
|
||||
try s.writeAll(", ");
|
||||
try w.writeOperand(s, inst, 1, bin.lhs);
|
||||
try s.writeAll(", ");
|
||||
try w.writeOperand(s, inst, 2, bin.rhs);
|
||||
try s.writeAll(", ");
|
||||
}
|
||||
|
||||
fn writeShuffleOne(w: *Writer, s: *std.Io.Writer, inst: Air.Inst.Index) Error!void {
|
||||
const unwrapped = w.air.unwrapShuffleOne(w.pt.zcu, inst);
|
||||
try w.writeType(s, unwrapped.result_ty);
|
||||
|
|
@ -576,17 +589,6 @@ const Writer = struct {
|
|||
try w.writeOperand(s, inst, 1, extra.rhs);
|
||||
}
|
||||
|
||||
fn writeVectorStoreElem(w: *Writer, s: *std.Io.Writer, inst: Air.Inst.Index) Error!void {
|
||||
const data = w.air.instructions.items(.data)[@intFromEnum(inst)].vector_store_elem;
|
||||
const extra = w.air.extraData(Air.VectorCmp, data.payload).data;
|
||||
|
||||
try w.writeOperand(s, inst, 0, data.vector_ptr);
|
||||
try s.writeAll(", ");
|
||||
try w.writeOperand(s, inst, 1, extra.lhs);
|
||||
try s.writeAll(", ");
|
||||
try w.writeOperand(s, inst, 2, extra.rhs);
|
||||
}
|
||||
|
||||
fn writeRuntimeNavPtr(w: *Writer, s: *std.Io.Writer, inst: Air.Inst.Index) Error!void {
|
||||
const ip = &w.pt.zcu.intern_pool;
|
||||
const ty_nav = w.air.instructions.items(.data)[@intFromEnum(inst)].ty_nav;
|
||||
|
|
|
|||
|
|
@ -88,6 +88,7 @@ fn checkBody(air: Air, body: []const Air.Inst.Index, zcu: *Zcu) bool {
|
|||
.atomic_store_monotonic,
|
||||
.atomic_store_release,
|
||||
.atomic_store_seq_cst,
|
||||
.legalize_vec_elem_val,
|
||||
=> {
|
||||
if (!checkRef(data.bin_op.lhs, zcu)) return false;
|
||||
if (!checkRef(data.bin_op.rhs, zcu)) return false;
|
||||
|
|
@ -316,19 +317,13 @@ fn checkBody(air: Air, body: []const Air.Inst.Index, zcu: *Zcu) bool {
|
|||
if (!checkRef(data.prefetch.ptr, zcu)) return false;
|
||||
},
|
||||
|
||||
.vector_store_elem => {
|
||||
const bin = air.extraData(Air.Bin, data.vector_store_elem.payload).data;
|
||||
if (!checkRef(data.vector_store_elem.vector_ptr, zcu)) return false;
|
||||
if (!checkRef(bin.lhs, zcu)) return false;
|
||||
if (!checkRef(bin.rhs, zcu)) return false;
|
||||
},
|
||||
|
||||
.runtime_nav_ptr => {
|
||||
if (!checkType(.fromInterned(data.ty_nav.ty), zcu)) return false;
|
||||
},
|
||||
|
||||
.select,
|
||||
.mul_add,
|
||||
.legalize_vec_store_elem,
|
||||
=> {
|
||||
const bin = air.extraData(Air.Bin, data.pl_op.payload).data;
|
||||
if (!checkRef(data.pl_op.operand, zcu)) return false;
|
||||
|
|
|
|||
|
|
@ -2104,7 +2104,6 @@ pub const Key = union(enum) {
|
|||
|
||||
pub const VectorIndex = enum(u16) {
|
||||
none = std.math.maxInt(u16),
|
||||
runtime = std.math.maxInt(u16) - 1,
|
||||
_,
|
||||
};
|
||||
|
||||
|
|
@ -3739,10 +3738,8 @@ pub const LoadedStructType = struct {
|
|||
return s.field_inits.get(ip)[i];
|
||||
}
|
||||
|
||||
/// Returns `none` in the case the struct is a tuple.
|
||||
pub fn fieldName(s: LoadedStructType, ip: *const InternPool, i: usize) OptionalNullTerminatedString {
|
||||
if (s.field_names.len == 0) return .none;
|
||||
return s.field_names.get(ip)[i].toOptional();
|
||||
pub fn fieldName(s: LoadedStructType, ip: *const InternPool, i: usize) NullTerminatedString {
|
||||
return s.field_names.get(ip)[i];
|
||||
}
|
||||
|
||||
pub fn fieldIsComptime(s: LoadedStructType, ip: *const InternPool, i: usize) bool {
|
||||
|
|
|
|||
123
src/Sema.zig
123
src/Sema.zig
|
|
@ -15919,24 +15919,30 @@ fn zirOverflowArithmetic(
|
|||
},
|
||||
.mul_with_overflow => {
|
||||
// If either of the arguments is zero, the result is zero and no overflow occured.
|
||||
// If either of the arguments is one, the result is the other and no overflow occured.
|
||||
// Otherwise, if either of the arguments is undefined, both results are undefined.
|
||||
const scalar_one = try pt.intValue(dest_ty.scalarType(zcu), 1);
|
||||
if (maybe_lhs_val) |lhs_val| {
|
||||
if (!lhs_val.isUndef(zcu)) {
|
||||
if (try lhs_val.compareAllWithZeroSema(.eq, pt)) {
|
||||
if (!lhs_val.isUndef(zcu) and try lhs_val.compareAllWithZeroSema(.eq, pt)) {
|
||||
break :result .{ .overflow_bit = try sema.splat(overflow_ty, .zero_u1), .inst = lhs };
|
||||
} else if (try sema.compareAll(lhs_val, .eq, try sema.splat(dest_ty, scalar_one), dest_ty)) {
|
||||
break :result .{ .overflow_bit = try sema.splat(overflow_ty, .zero_u1), .inst = rhs };
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (maybe_rhs_val) |rhs_val| {
|
||||
if (!rhs_val.isUndef(zcu)) {
|
||||
if (try rhs_val.compareAllWithZeroSema(.eq, pt)) {
|
||||
if (!rhs_val.isUndef(zcu) and try rhs_val.compareAllWithZeroSema(.eq, pt)) {
|
||||
break :result .{ .overflow_bit = try sema.splat(overflow_ty, .zero_u1), .inst = rhs };
|
||||
} else if (try sema.compareAll(rhs_val, .eq, try sema.splat(dest_ty, scalar_one), dest_ty)) {
|
||||
}
|
||||
}
|
||||
// If either of the arguments is one, the result is the other and no overflow occured.
|
||||
const dest_scalar_ty = dest_ty.scalarType(zcu);
|
||||
const dest_scalar_int = dest_scalar_ty.intInfo(zcu);
|
||||
// We could still be working with i1, where '1' is not a legal value!
|
||||
if (!(dest_scalar_int.bits == 1 and dest_scalar_int.signedness == .signed)) {
|
||||
const scalar_one = try pt.intValue(dest_scalar_ty, 1);
|
||||
const vec_one = try sema.splat(dest_ty, scalar_one);
|
||||
if (maybe_lhs_val) |lhs_val| {
|
||||
if (!lhs_val.isUndef(zcu) and try sema.compareAll(lhs_val, .eq, vec_one, dest_ty)) {
|
||||
break :result .{ .overflow_bit = try sema.splat(overflow_ty, .zero_u1), .inst = rhs };
|
||||
}
|
||||
}
|
||||
if (maybe_rhs_val) |rhs_val| {
|
||||
if (!rhs_val.isUndef(zcu) and try sema.compareAll(rhs_val, .eq, vec_one, dest_ty)) {
|
||||
break :result .{ .overflow_bit = try sema.splat(overflow_ty, .zero_u1), .inst = lhs };
|
||||
}
|
||||
}
|
||||
|
|
@ -15947,7 +15953,6 @@ fn zirOverflowArithmetic(
|
|||
if (lhs_val.isUndef(zcu) or rhs_val.isUndef(zcu)) {
|
||||
break :result .{ .overflow_bit = .undef, .wrapped = .undef };
|
||||
}
|
||||
|
||||
const result = try arith.mulWithOverflow(sema, dest_ty, lhs_val, rhs_val);
|
||||
break :result .{ .overflow_bit = result.overflow_bit, .wrapped = result.wrapped_result };
|
||||
}
|
||||
|
|
@ -17751,10 +17756,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
|
|||
try ty.resolveStructFieldInits(pt);
|
||||
|
||||
for (struct_field_vals, 0..) |*field_val, field_index| {
|
||||
const field_name = if (struct_type.fieldName(ip, field_index).unwrap()) |field_name|
|
||||
field_name
|
||||
else
|
||||
try ip.getOrPutStringFmt(gpa, pt.tid, "{d}", .{field_index}, .no_embedded_nulls);
|
||||
const field_name = struct_type.fieldName(ip, field_index);
|
||||
const field_name_len = field_name.length(ip);
|
||||
const field_ty: Type = .fromInterned(struct_type.field_types.get(ip)[field_index]);
|
||||
const field_init = struct_type.fieldInit(ip, field_index);
|
||||
|
|
@ -28345,6 +28347,10 @@ fn elemPtrArray(
|
|||
break :o index;
|
||||
} else null;
|
||||
|
||||
if (offset == null and array_ty.zigTypeTag(zcu) == .vector) {
|
||||
return sema.fail(block, elem_index_src, "vector index not comptime known", .{});
|
||||
}
|
||||
|
||||
const elem_ptr_ty = try array_ptr_ty.elemPtrType(offset, pt);
|
||||
|
||||
if (maybe_undef_array_ptr_val) |array_ptr_val| {
|
||||
|
|
@ -28362,10 +28368,6 @@ fn elemPtrArray(
|
|||
try sema.validateRuntimeValue(block, array_ptr_src, array_ptr);
|
||||
}
|
||||
|
||||
if (offset == null and array_ty.zigTypeTag(zcu) == .vector) {
|
||||
return sema.fail(block, elem_index_src, "vector index not comptime known", .{});
|
||||
}
|
||||
|
||||
// Runtime check is only needed if unable to comptime check.
|
||||
if (oob_safety and block.wantSafety() and offset == null) {
|
||||
const len_inst = try pt.intRef(.usize, array_len);
|
||||
|
|
@ -30397,22 +30399,6 @@ fn storePtr2(
|
|||
|
||||
const is_ret = air_tag == .ret_ptr;
|
||||
|
||||
// Detect if we are storing an array operand to a bitcasted vector pointer.
|
||||
// If so, we instead reach through the bitcasted pointer to the vector pointer,
|
||||
// bitcast the array operand to a vector, and then lower this as a store of
|
||||
// a vector value to a vector pointer. This generally results in better code,
|
||||
// as well as working around an LLVM bug:
|
||||
// https://github.com/ziglang/zig/issues/11154
|
||||
if (sema.obtainBitCastedVectorPtr(ptr)) |vector_ptr| {
|
||||
const vector_ty = sema.typeOf(vector_ptr).childType(zcu);
|
||||
const vector = sema.coerceExtra(block, vector_ty, uncasted_operand, operand_src, .{ .is_ret = is_ret }) catch |err| switch (err) {
|
||||
error.NotCoercible => unreachable,
|
||||
else => |e| return e,
|
||||
};
|
||||
try sema.storePtr2(block, src, vector_ptr, ptr_src, vector, operand_src, .store);
|
||||
return;
|
||||
}
|
||||
|
||||
const operand = sema.coerceExtra(block, elem_ty, uncasted_operand, operand_src, .{ .is_ret = is_ret }) catch |err| switch (err) {
|
||||
error.NotCoercible => unreachable,
|
||||
else => |e| return e,
|
||||
|
|
@ -30445,29 +30431,6 @@ fn storePtr2(
|
|||
|
||||
try sema.requireRuntimeBlock(block, src, runtime_src);
|
||||
|
||||
if (ptr_ty.ptrInfo(zcu).flags.vector_index == .runtime) {
|
||||
const ptr_inst = ptr.toIndex().?;
|
||||
const air_tags = sema.air_instructions.items(.tag);
|
||||
if (air_tags[@intFromEnum(ptr_inst)] == .ptr_elem_ptr) {
|
||||
const ty_pl = sema.air_instructions.items(.data)[@intFromEnum(ptr_inst)].ty_pl;
|
||||
const bin_op = sema.getTmpAir().extraData(Air.Bin, ty_pl.payload).data;
|
||||
_ = try block.addInst(.{
|
||||
.tag = .vector_store_elem,
|
||||
.data = .{ .vector_store_elem = .{
|
||||
.vector_ptr = bin_op.lhs,
|
||||
.payload = try block.sema.addExtra(Air.Bin{
|
||||
.lhs = bin_op.rhs,
|
||||
.rhs = operand,
|
||||
}),
|
||||
} },
|
||||
});
|
||||
return;
|
||||
}
|
||||
return sema.fail(block, ptr_src, "unable to determine vector element index of type '{f}'", .{
|
||||
ptr_ty.fmt(pt),
|
||||
});
|
||||
}
|
||||
|
||||
const store_inst = if (is_ret)
|
||||
try block.addBinOp(.store, ptr, operand)
|
||||
else
|
||||
|
|
@ -30567,37 +30530,6 @@ fn markMaybeComptimeAllocRuntime(sema: *Sema, block: *Block, alloc_inst: Air.Ins
|
|||
}
|
||||
}
|
||||
|
||||
/// Traverse an arbitrary number of bitcasted pointers and return the underyling vector
|
||||
/// pointer. Only if the final element type matches the vector element type, and the
|
||||
/// lengths match.
|
||||
fn obtainBitCastedVectorPtr(sema: *Sema, ptr: Air.Inst.Ref) ?Air.Inst.Ref {
|
||||
const pt = sema.pt;
|
||||
const zcu = pt.zcu;
|
||||
const array_ty = sema.typeOf(ptr).childType(zcu);
|
||||
if (array_ty.zigTypeTag(zcu) != .array) return null;
|
||||
var ptr_ref = ptr;
|
||||
var ptr_inst = ptr_ref.toIndex() orelse return null;
|
||||
const air_datas = sema.air_instructions.items(.data);
|
||||
const air_tags = sema.air_instructions.items(.tag);
|
||||
const vector_ty = while (air_tags[@intFromEnum(ptr_inst)] == .bitcast) {
|
||||
ptr_ref = air_datas[@intFromEnum(ptr_inst)].ty_op.operand;
|
||||
if (!sema.isKnownZigType(ptr_ref, .pointer)) return null;
|
||||
const child_ty = sema.typeOf(ptr_ref).childType(zcu);
|
||||
if (child_ty.zigTypeTag(zcu) == .vector) break child_ty;
|
||||
ptr_inst = ptr_ref.toIndex() orelse return null;
|
||||
} else return null;
|
||||
|
||||
// We have a pointer-to-array and a pointer-to-vector. If the elements and
|
||||
// lengths match, return the result.
|
||||
if (array_ty.childType(zcu).eql(vector_ty.childType(zcu), zcu) and
|
||||
array_ty.arrayLen(zcu) == vector_ty.vectorLen(zcu))
|
||||
{
|
||||
return ptr_ref;
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/// Call when you have Value objects rather than Air instructions, and you want to
|
||||
/// assert the store must be done at comptime.
|
||||
fn storePtrVal(
|
||||
|
|
@ -35577,8 +35509,13 @@ fn structFieldInits(
|
|||
const default_val = try sema.resolveConstValue(&block_scope, init_src, coerced, null);
|
||||
|
||||
if (default_val.canMutateComptimeVarState(zcu)) {
|
||||
const field_name = struct_type.fieldName(ip, field_i).unwrap().?;
|
||||
return sema.failWithContainsReferenceToComptimeVar(&block_scope, init_src, field_name, "field default value", default_val);
|
||||
return sema.failWithContainsReferenceToComptimeVar(
|
||||
&block_scope,
|
||||
init_src,
|
||||
struct_type.fieldName(ip, field_i),
|
||||
"field default value",
|
||||
default_val,
|
||||
);
|
||||
}
|
||||
struct_type.field_inits.get(ip)[field_i] = default_val.toIntern();
|
||||
}
|
||||
|
|
|
|||
|
|
@ -24,7 +24,6 @@ pub fn loadComptimePtr(sema: *Sema, block: *Block, src: LazySrcLoc, ptr: Value)
|
|||
const child_bits = Type.fromInterned(ptr_info.child).bitSize(zcu);
|
||||
const bit_offset = ptr_info.packed_offset.bit_offset + switch (ptr_info.flags.vector_index) {
|
||||
.none => 0,
|
||||
.runtime => return .runtime_load,
|
||||
else => |idx| switch (pt.zcu.getTarget().cpu.arch.endian()) {
|
||||
.little => child_bits * @intFromEnum(idx),
|
||||
.big => host_bits - child_bits * (@intFromEnum(idx) + 1), // element order reversed on big endian
|
||||
|
|
@ -81,7 +80,6 @@ pub fn storeComptimePtr(
|
|||
};
|
||||
const bit_offset = ptr_info.packed_offset.bit_offset + switch (ptr_info.flags.vector_index) {
|
||||
.none => 0,
|
||||
.runtime => return .runtime_store,
|
||||
else => |idx| switch (zcu.getTarget().cpu.arch.endian()) {
|
||||
.little => Type.fromInterned(ptr_info.child).bitSize(zcu) * @intFromEnum(idx),
|
||||
.big => host_bits - Type.fromInterned(ptr_info.child).bitSize(zcu) * (@intFromEnum(idx) + 1), // element order reversed on big endian
|
||||
|
|
|
|||
10
src/Type.zig
10
src/Type.zig
|
|
@ -198,9 +198,7 @@ pub fn print(ty: Type, writer: *std.Io.Writer, pt: Zcu.PerThread) std.Io.Writer.
|
|||
info.packed_offset.bit_offset, info.packed_offset.host_size,
|
||||
});
|
||||
}
|
||||
if (info.flags.vector_index == .runtime) {
|
||||
try writer.writeAll(":?");
|
||||
} else if (info.flags.vector_index != .none) {
|
||||
if (info.flags.vector_index != .none) {
|
||||
try writer.print(":{d}", .{@intFromEnum(info.flags.vector_index)});
|
||||
}
|
||||
try writer.writeAll(") ");
|
||||
|
|
@ -3113,7 +3111,7 @@ pub fn enumTagFieldIndex(ty: Type, enum_tag: Value, zcu: *const Zcu) ?u32 {
|
|||
pub fn structFieldName(ty: Type, index: usize, zcu: *const Zcu) InternPool.OptionalNullTerminatedString {
|
||||
const ip = &zcu.intern_pool;
|
||||
return switch (ip.indexToKey(ty.toIntern())) {
|
||||
.struct_type => ip.loadStructType(ty.toIntern()).fieldName(ip, index),
|
||||
.struct_type => ip.loadStructType(ty.toIntern()).fieldName(ip, index).toOptional(),
|
||||
.tuple_type => .none,
|
||||
else => unreachable,
|
||||
};
|
||||
|
|
@ -3558,7 +3556,7 @@ pub fn packedStructFieldPtrInfo(
|
|||
} else .{
|
||||
switch (zcu.comp.getZigBackend()) {
|
||||
else => (running_bits + 7) / 8,
|
||||
.stage2_x86_64 => @intCast(struct_ty.abiSize(zcu)),
|
||||
.stage2_x86_64, .stage2_c => @intCast(struct_ty.abiSize(zcu)),
|
||||
},
|
||||
bit_offset,
|
||||
};
|
||||
|
|
@ -3985,7 +3983,7 @@ pub fn elemPtrType(ptr_ty: Type, offset: ?usize, pt: Zcu.PerThread) !Type {
|
|||
break :blk .{
|
||||
.host_size = @intCast(parent_ty.arrayLen(zcu)),
|
||||
.alignment = parent_ty.abiAlignment(zcu),
|
||||
.vector_index = if (offset) |some| @enumFromInt(some) else .runtime,
|
||||
.vector_index = @enumFromInt(offset.?),
|
||||
};
|
||||
} else .{};
|
||||
|
||||
|
|
|
|||
161
src/Value.zig
161
src/Value.zig
|
|
@ -574,166 +574,37 @@ pub fn writeToPackedMemory(
|
|||
}
|
||||
}
|
||||
|
||||
/// Load a Value from the contents of `buffer`.
|
||||
/// Load a Value from the contents of `buffer`, where `ty` is an unsigned integer type.
|
||||
///
|
||||
/// Asserts that buffer.len >= ty.abiSize(). The buffer is allowed to extend past
|
||||
/// the end of the value in memory.
|
||||
pub fn readFromMemory(
|
||||
pub fn readUintFromMemory(
|
||||
ty: Type,
|
||||
pt: Zcu.PerThread,
|
||||
buffer: []const u8,
|
||||
arena: Allocator,
|
||||
) error{
|
||||
IllDefinedMemoryLayout,
|
||||
Unimplemented,
|
||||
OutOfMemory,
|
||||
}!Value {
|
||||
) Allocator.Error!Value {
|
||||
const zcu = pt.zcu;
|
||||
const ip = &zcu.intern_pool;
|
||||
const target = zcu.getTarget();
|
||||
const endian = target.cpu.arch.endian();
|
||||
switch (ty.zigTypeTag(zcu)) {
|
||||
.void => return Value.void,
|
||||
.bool => {
|
||||
if (buffer[0] == 0) {
|
||||
return Value.false;
|
||||
} else {
|
||||
return Value.true;
|
||||
}
|
||||
},
|
||||
.int, .@"enum" => |ty_tag| {
|
||||
const int_ty = switch (ty_tag) {
|
||||
.int => ty,
|
||||
.@"enum" => ty.intTagType(zcu),
|
||||
else => unreachable,
|
||||
};
|
||||
const int_info = int_ty.intInfo(zcu);
|
||||
const bits = int_info.bits;
|
||||
const byte_count: u16 = @intCast((@as(u17, bits) + 7) / 8);
|
||||
if (bits == 0 or buffer.len == 0) return zcu.getCoerced(try zcu.intValue(int_ty, 0), ty);
|
||||
const endian = zcu.getTarget().cpu.arch.endian();
|
||||
|
||||
if (bits <= 64) switch (int_info.signedness) { // Fast path for integers <= u64
|
||||
.signed => {
|
||||
const val = std.mem.readVarInt(i64, buffer[0..byte_count], endian);
|
||||
const result = (val << @as(u6, @intCast(64 - bits))) >> @as(u6, @intCast(64 - bits));
|
||||
return zcu.getCoerced(try zcu.intValue(int_ty, result), ty);
|
||||
},
|
||||
.unsigned => {
|
||||
assert(ty.isUnsignedInt(zcu));
|
||||
const bits = ty.intInfo(zcu).bits;
|
||||
const byte_count: u16 = @intCast((@as(u17, bits) + 7) / 8);
|
||||
|
||||
assert(buffer.len >= byte_count);
|
||||
|
||||
if (bits <= 64) {
|
||||
const val = std.mem.readVarInt(u64, buffer[0..byte_count], endian);
|
||||
const result = (val << @as(u6, @intCast(64 - bits))) >> @as(u6, @intCast(64 - bits));
|
||||
return zcu.getCoerced(try zcu.intValue(int_ty, result), ty);
|
||||
},
|
||||
} else { // Slow path, we have to construct a big-int
|
||||
return pt.intValue(ty, result);
|
||||
} else {
|
||||
const Limb = std.math.big.Limb;
|
||||
const limb_count = (byte_count + @sizeOf(Limb) - 1) / @sizeOf(Limb);
|
||||
const limbs_buffer = try arena.alloc(Limb, limb_count);
|
||||
|
||||
var bigint = BigIntMutable.init(limbs_buffer, 0);
|
||||
bigint.readTwosComplement(buffer[0..byte_count], bits, endian, int_info.signedness);
|
||||
return zcu.getCoerced(try zcu.intValue_big(int_ty, bigint.toConst()), ty);
|
||||
}
|
||||
},
|
||||
.float => return Value.fromInterned(try pt.intern(.{ .float = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = switch (ty.floatBits(target)) {
|
||||
16 => .{ .f16 = @bitCast(std.mem.readInt(u16, buffer[0..2], endian)) },
|
||||
32 => .{ .f32 = @bitCast(std.mem.readInt(u32, buffer[0..4], endian)) },
|
||||
64 => .{ .f64 = @bitCast(std.mem.readInt(u64, buffer[0..8], endian)) },
|
||||
80 => .{ .f80 = @bitCast(std.mem.readInt(u80, buffer[0..10], endian)) },
|
||||
128 => .{ .f128 = @bitCast(std.mem.readInt(u128, buffer[0..16], endian)) },
|
||||
else => unreachable,
|
||||
},
|
||||
} })),
|
||||
.array => {
|
||||
const elem_ty = ty.childType(zcu);
|
||||
const elem_size = elem_ty.abiSize(zcu);
|
||||
const elems = try arena.alloc(InternPool.Index, @intCast(ty.arrayLen(zcu)));
|
||||
var offset: usize = 0;
|
||||
for (elems) |*elem| {
|
||||
elem.* = (try readFromMemory(elem_ty, zcu, buffer[offset..], arena)).toIntern();
|
||||
offset += @intCast(elem_size);
|
||||
}
|
||||
return pt.aggregateValue(ty, elems);
|
||||
},
|
||||
.vector => {
|
||||
// We use byte_count instead of abi_size here, so that any padding bytes
|
||||
// follow the data bytes, on both big- and little-endian systems.
|
||||
const byte_count = (@as(usize, @intCast(ty.bitSize(zcu))) + 7) / 8;
|
||||
return readFromPackedMemory(ty, zcu, buffer[0..byte_count], 0, arena);
|
||||
},
|
||||
.@"struct" => {
|
||||
const struct_type = zcu.typeToStruct(ty).?;
|
||||
switch (struct_type.layout) {
|
||||
.auto => unreachable, // Sema is supposed to have emitted a compile error already
|
||||
.@"extern" => {
|
||||
const field_types = struct_type.field_types;
|
||||
const field_vals = try arena.alloc(InternPool.Index, field_types.len);
|
||||
for (field_vals, 0..) |*field_val, i| {
|
||||
const field_ty = Type.fromInterned(field_types.get(ip)[i]);
|
||||
const off: usize = @intCast(ty.structFieldOffset(i, zcu));
|
||||
const sz: usize = @intCast(field_ty.abiSize(zcu));
|
||||
field_val.* = (try readFromMemory(field_ty, zcu, buffer[off..(off + sz)], arena)).toIntern();
|
||||
}
|
||||
return pt.aggregateValue(ty, field_vals);
|
||||
},
|
||||
.@"packed" => {
|
||||
const byte_count = (@as(usize, @intCast(ty.bitSize(zcu))) + 7) / 8;
|
||||
return readFromPackedMemory(ty, zcu, buffer[0..byte_count], 0, arena);
|
||||
},
|
||||
}
|
||||
},
|
||||
.error_set => {
|
||||
const bits = zcu.errorSetBits();
|
||||
const byte_count: u16 = @intCast((@as(u17, bits) + 7) / 8);
|
||||
const int = std.mem.readVarInt(u64, buffer[0..byte_count], endian);
|
||||
const index = (int << @as(u6, @intCast(64 - bits))) >> @as(u6, @intCast(64 - bits));
|
||||
const name = zcu.global_error_set.keys()[@intCast(index)];
|
||||
|
||||
return Value.fromInterned(try pt.intern(.{ .err = .{
|
||||
.ty = ty.toIntern(),
|
||||
.name = name,
|
||||
} }));
|
||||
},
|
||||
.@"union" => switch (ty.containerLayout(zcu)) {
|
||||
.auto => return error.IllDefinedMemoryLayout,
|
||||
.@"extern" => {
|
||||
const union_size = ty.abiSize(zcu);
|
||||
const array_ty = try zcu.arrayType(.{ .len = union_size, .child = .u8_type });
|
||||
const val = (try readFromMemory(array_ty, zcu, buffer, arena)).toIntern();
|
||||
return Value.fromInterned(try pt.internUnion(.{
|
||||
.ty = ty.toIntern(),
|
||||
.tag = .none,
|
||||
.val = val,
|
||||
}));
|
||||
},
|
||||
.@"packed" => {
|
||||
const byte_count = (@as(usize, @intCast(ty.bitSize(zcu))) + 7) / 8;
|
||||
return readFromPackedMemory(ty, zcu, buffer[0..byte_count], 0, arena);
|
||||
},
|
||||
},
|
||||
.pointer => {
|
||||
assert(!ty.isSlice(zcu)); // No well defined layout.
|
||||
const int_val = try readFromMemory(Type.usize, zcu, buffer, arena);
|
||||
return Value.fromInterned(try pt.intern(.{ .ptr = .{
|
||||
.ty = ty.toIntern(),
|
||||
.base_addr = .int,
|
||||
.byte_offset = int_val.toUnsignedInt(zcu),
|
||||
} }));
|
||||
},
|
||||
.optional => {
|
||||
assert(ty.isPtrLikeOptional(zcu));
|
||||
const child_ty = ty.optionalChild(zcu);
|
||||
const child_val = try readFromMemory(child_ty, zcu, buffer, arena);
|
||||
return Value.fromInterned(try pt.intern(.{ .opt = .{
|
||||
.ty = ty.toIntern(),
|
||||
.val = switch (child_val.orderAgainstZero(pt)) {
|
||||
.lt => unreachable,
|
||||
.eq => .none,
|
||||
.gt => child_val.toIntern(),
|
||||
},
|
||||
} }));
|
||||
},
|
||||
else => return error.Unimplemented,
|
||||
var bigint: BigIntMutable = .init(limbs_buffer, 0);
|
||||
bigint.readTwosComplement(buffer[0..byte_count], bits, endian, .unsigned);
|
||||
return pt.intValue_big(ty, bigint.toConst());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -3512,7 +3512,6 @@ pub fn ptrType(pt: Zcu.PerThread, info: InternPool.Key.PtrType) Allocator.Error!
|
|||
canon_info.packed_offset.host_size = 0;
|
||||
}
|
||||
},
|
||||
.runtime => {},
|
||||
_ => assert(@intFromEnum(info.flags.vector_index) < info.packed_offset.host_size),
|
||||
}
|
||||
|
||||
|
|
@ -3663,21 +3662,40 @@ pub fn intRef(pt: Zcu.PerThread, ty: Type, x: anytype) Allocator.Error!Air.Inst.
|
|||
}
|
||||
|
||||
pub fn intValue_big(pt: Zcu.PerThread, ty: Type, x: BigIntConst) Allocator.Error!Value {
|
||||
return Value.fromInterned(try pt.intern(.{ .int = .{
|
||||
if (ty.toIntern() != .comptime_int_type) {
|
||||
const int_info = ty.intInfo(pt.zcu);
|
||||
assert(x.fitsInTwosComp(int_info.signedness, int_info.bits));
|
||||
}
|
||||
return .fromInterned(try pt.intern(.{ .int = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .big_int = x },
|
||||
} }));
|
||||
}
|
||||
|
||||
pub fn intValue_u64(pt: Zcu.PerThread, ty: Type, x: u64) Allocator.Error!Value {
|
||||
return Value.fromInterned(try pt.intern(.{ .int = .{
|
||||
if (ty.toIntern() != .comptime_int_type and x != 0) {
|
||||
const int_info = ty.intInfo(pt.zcu);
|
||||
const unsigned_bits = int_info.bits - @intFromBool(int_info.signedness == .signed);
|
||||
assert(unsigned_bits >= std.math.log2(x) + 1);
|
||||
}
|
||||
return .fromInterned(try pt.intern(.{ .int = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .u64 = x },
|
||||
} }));
|
||||
}
|
||||
|
||||
pub fn intValue_i64(pt: Zcu.PerThread, ty: Type, x: i64) Allocator.Error!Value {
|
||||
return Value.fromInterned(try pt.intern(.{ .int = .{
|
||||
if (ty.toIntern() != .comptime_int_type and x != 0) {
|
||||
const int_info = ty.intInfo(pt.zcu);
|
||||
const unsigned_bits = int_info.bits - @intFromBool(int_info.signedness == .signed);
|
||||
if (x > 0) {
|
||||
assert(unsigned_bits >= std.math.log2(x) + 1);
|
||||
} else {
|
||||
assert(int_info.signedness == .signed);
|
||||
assert(unsigned_bits >= std.math.log2_int_ceil(u64, @abs(x)));
|
||||
}
|
||||
}
|
||||
return .fromInterned(try pt.intern(.{ .int = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .i64 = x },
|
||||
} }));
|
||||
|
|
|
|||
|
|
@ -134,6 +134,10 @@ pub fn analyze(isel: *Select, air_body: []const Air.Inst.Index) !void {
|
|||
var air_inst_index = air_body[air_body_index];
|
||||
const initial_def_order_len = isel.def_order.count();
|
||||
air_tag: switch (air_tags[@intFromEnum(air_inst_index)]) {
|
||||
// No "scalarize" legalizations are enabled, so these instructions never appear.
|
||||
.legalize_vec_elem_val => unreachable,
|
||||
.legalize_vec_store_elem => unreachable,
|
||||
|
||||
.arg,
|
||||
.ret_addr,
|
||||
.frame_addr,
|
||||
|
|
@ -826,18 +830,6 @@ pub fn analyze(isel: *Select, air_body: []const Air.Inst.Index) !void {
|
|||
|
||||
try isel.analyzeUse(un_op);
|
||||
|
||||
air_body_index += 1;
|
||||
air_inst_index = air_body[air_body_index];
|
||||
continue :air_tag air_tags[@intFromEnum(air_inst_index)];
|
||||
},
|
||||
.vector_store_elem => {
|
||||
const vector_store_elem = air_data[@intFromEnum(air_inst_index)].vector_store_elem;
|
||||
const bin_op = isel.air.extraData(Air.Bin, vector_store_elem.payload).data;
|
||||
|
||||
try isel.analyzeUse(vector_store_elem.vector_ptr);
|
||||
try isel.analyzeUse(bin_op.lhs);
|
||||
try isel.analyzeUse(bin_op.rhs);
|
||||
|
||||
air_body_index += 1;
|
||||
air_inst_index = air_body[air_body_index];
|
||||
continue :air_tag air_tags[@intFromEnum(air_inst_index)];
|
||||
|
|
@ -962,6 +954,11 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) error{ OutOfMemory,
|
|||
};
|
||||
air_tag: switch (air.next().?) {
|
||||
else => |air_tag| return isel.fail("unimplemented {t}", .{air_tag}),
|
||||
|
||||
// No "scalarize" legalizations are enabled, so these instructions never appear.
|
||||
.legalize_vec_elem_val => unreachable,
|
||||
.legalize_vec_store_elem => unreachable,
|
||||
|
||||
.arg => {
|
||||
const arg_vi = isel.live_values.fetchRemove(air.inst_index).?.value;
|
||||
defer arg_vi.deref(isel);
|
||||
|
|
|
|||
|
|
@ -37,6 +37,7 @@ pub fn legalizeFeatures(_: *const std.Target) ?*const Air.Legalize.Features {
|
|||
.expand_packed_load = true,
|
||||
.expand_packed_store = true,
|
||||
.expand_packed_struct_field_val = true,
|
||||
.expand_packed_aggregate_init = true,
|
||||
}),
|
||||
};
|
||||
}
|
||||
|
|
@ -1392,114 +1393,21 @@ pub const DeclGen = struct {
|
|||
try w.writeByte('}');
|
||||
},
|
||||
.@"packed" => {
|
||||
const int_info = ty.intInfo(zcu);
|
||||
|
||||
const bits = Type.smallestUnsignedBits(int_info.bits - 1);
|
||||
const bit_offset_ty = try pt.intType(.unsigned, bits);
|
||||
|
||||
var bit_offset: u64 = 0;
|
||||
var eff_num_fields: usize = 0;
|
||||
|
||||
for (0..loaded_struct.field_types.len) |field_index| {
|
||||
const field_ty: Type = .fromInterned(loaded_struct.field_types.get(ip)[field_index]);
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
|
||||
eff_num_fields += 1;
|
||||
}
|
||||
|
||||
if (eff_num_fields == 0) {
|
||||
try w.writeByte('(');
|
||||
try dg.renderUndefValue(w, ty, location);
|
||||
try w.writeByte(')');
|
||||
} else if (ty.bitSize(zcu) > 64) {
|
||||
// zig_or_u128(zig_or_u128(zig_shl_u128(a, a_off), zig_shl_u128(b, b_off)), zig_shl_u128(c, c_off))
|
||||
var num_or = eff_num_fields - 1;
|
||||
while (num_or > 0) : (num_or -= 1) {
|
||||
try w.writeAll("zig_or_");
|
||||
try dg.renderTypeForBuiltinFnName(w, ty);
|
||||
try w.writeByte('(');
|
||||
}
|
||||
|
||||
var eff_index: usize = 0;
|
||||
var needs_closing_paren = false;
|
||||
for (0..loaded_struct.field_types.len) |field_index| {
|
||||
const field_ty: Type = .fromInterned(loaded_struct.field_types.get(ip)[field_index]);
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
|
||||
|
||||
const field_val = switch (ip.indexToKey(val.toIntern()).aggregate.storage) {
|
||||
.bytes => |bytes| try pt.intern(.{ .int = .{
|
||||
.ty = field_ty.toIntern(),
|
||||
.storage = .{ .u64 = bytes.at(field_index, ip) },
|
||||
} }),
|
||||
.elems => |elems| elems[field_index],
|
||||
.repeated_elem => |elem| elem,
|
||||
// https://github.com/ziglang/zig/issues/24657 will eliminate most of the
|
||||
// following logic, leaving only the recursive `renderValue` call. Once
|
||||
// that proposal is implemented, a `packed struct` will literally be
|
||||
// represented in the InternPool by its comptime-known backing integer.
|
||||
var arena: std.heap.ArenaAllocator = .init(zcu.gpa);
|
||||
defer arena.deinit();
|
||||
const backing_ty: Type = .fromInterned(loaded_struct.backingIntTypeUnordered(ip));
|
||||
const buf = try arena.allocator().alloc(u8, @intCast(ty.abiSize(zcu)));
|
||||
val.writeToMemory(pt, buf) catch |err| switch (err) {
|
||||
error.IllDefinedMemoryLayout => unreachable,
|
||||
error.OutOfMemory => |e| return e,
|
||||
error.ReinterpretDeclRef, error.Unimplemented => return dg.fail("TODO: C backend: lower packed struct value", .{}),
|
||||
};
|
||||
const cast_context = IntCastContext{ .value = .{ .value = Value.fromInterned(field_val) } };
|
||||
if (bit_offset != 0) {
|
||||
try w.writeAll("zig_shl_");
|
||||
try dg.renderTypeForBuiltinFnName(w, ty);
|
||||
try w.writeByte('(');
|
||||
try dg.renderIntCast(w, ty, cast_context, field_ty, .FunctionArgument);
|
||||
try w.writeAll(", ");
|
||||
try dg.renderValue(w, try pt.intValue(bit_offset_ty, bit_offset), .FunctionArgument);
|
||||
try w.writeByte(')');
|
||||
} else {
|
||||
try dg.renderIntCast(w, ty, cast_context, field_ty, .FunctionArgument);
|
||||
}
|
||||
|
||||
if (needs_closing_paren) try w.writeByte(')');
|
||||
if (eff_index != eff_num_fields - 1) try w.writeAll(", ");
|
||||
|
||||
bit_offset += field_ty.bitSize(zcu);
|
||||
needs_closing_paren = true;
|
||||
eff_index += 1;
|
||||
}
|
||||
} else {
|
||||
try w.writeByte('(');
|
||||
// a << a_off | b << b_off | c << c_off
|
||||
var empty = true;
|
||||
for (0..loaded_struct.field_types.len) |field_index| {
|
||||
const field_ty: Type = .fromInterned(loaded_struct.field_types.get(ip)[field_index]);
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
|
||||
|
||||
if (!empty) try w.writeAll(" | ");
|
||||
try w.writeByte('(');
|
||||
try dg.renderCType(w, ctype);
|
||||
try w.writeByte(')');
|
||||
|
||||
const field_val = switch (ip.indexToKey(val.toIntern()).aggregate.storage) {
|
||||
.bytes => |bytes| try pt.intern(.{ .int = .{
|
||||
.ty = field_ty.toIntern(),
|
||||
.storage = .{ .u64 = bytes.at(field_index, ip) },
|
||||
} }),
|
||||
.elems => |elems| elems[field_index],
|
||||
.repeated_elem => |elem| elem,
|
||||
};
|
||||
|
||||
const field_int_info: std.builtin.Type.Int = if (field_ty.isAbiInt(zcu))
|
||||
field_ty.intInfo(zcu)
|
||||
else
|
||||
.{ .signedness = .unsigned, .bits = undefined };
|
||||
switch (field_int_info.signedness) {
|
||||
.signed => {
|
||||
try w.writeByte('(');
|
||||
try dg.renderValue(w, Value.fromInterned(field_val), .Other);
|
||||
try w.writeAll(" & ");
|
||||
const field_uint_ty = try pt.intType(.unsigned, field_int_info.bits);
|
||||
try dg.renderValue(w, try field_uint_ty.maxIntScalar(pt, field_uint_ty), .Other);
|
||||
try w.writeByte(')');
|
||||
},
|
||||
.unsigned => try dg.renderValue(w, Value.fromInterned(field_val), .Other),
|
||||
}
|
||||
if (bit_offset != 0) {
|
||||
try w.writeAll(" << ");
|
||||
try dg.renderValue(w, try pt.intValue(bit_offset_ty, bit_offset), .FunctionArgument);
|
||||
}
|
||||
|
||||
bit_offset += field_ty.bitSize(zcu);
|
||||
empty = false;
|
||||
}
|
||||
try w.writeByte(')');
|
||||
}
|
||||
const backing_val: Value = try .readUintFromMemory(backing_ty, pt, buf, arena.allocator());
|
||||
return dg.renderValue(w, backing_val, location);
|
||||
},
|
||||
}
|
||||
},
|
||||
|
|
@ -1507,18 +1415,26 @@ pub const DeclGen = struct {
|
|||
},
|
||||
.un => |un| {
|
||||
const loaded_union = ip.loadUnionType(ty.toIntern());
|
||||
if (loaded_union.flagsUnordered(ip).layout == .@"packed") {
|
||||
// https://github.com/ziglang/zig/issues/24657 will eliminate most of the
|
||||
// following logic, leaving only the recursive `renderValue` call. Once
|
||||
// that proposal is implemented, a `packed union` will literally be
|
||||
// represented in the InternPool by its comptime-known backing integer.
|
||||
var arena: std.heap.ArenaAllocator = .init(zcu.gpa);
|
||||
defer arena.deinit();
|
||||
const backing_ty = try ty.unionBackingType(pt);
|
||||
const buf = try arena.allocator().alloc(u8, @intCast(ty.abiSize(zcu)));
|
||||
val.writeToMemory(pt, buf) catch |err| switch (err) {
|
||||
error.IllDefinedMemoryLayout => unreachable,
|
||||
error.OutOfMemory => |e| return e,
|
||||
error.ReinterpretDeclRef, error.Unimplemented => return dg.fail("TODO: C backend: lower packed union value", .{}),
|
||||
};
|
||||
const backing_val: Value = try .readUintFromMemory(backing_ty, pt, buf, arena.allocator());
|
||||
return dg.renderValue(w, backing_val, location);
|
||||
}
|
||||
if (un.tag == .none) {
|
||||
const backing_ty = try ty.unionBackingType(pt);
|
||||
switch (loaded_union.flagsUnordered(ip).layout) {
|
||||
.@"packed" => {
|
||||
if (!location.isInitializer()) {
|
||||
try w.writeByte('(');
|
||||
try dg.renderType(w, backing_ty);
|
||||
try w.writeByte(')');
|
||||
}
|
||||
try dg.renderValue(w, Value.fromInterned(un.val), location);
|
||||
},
|
||||
.@"extern" => {
|
||||
assert(loaded_union.flagsUnordered(ip).layout == .@"extern");
|
||||
if (location == .StaticInitializer) {
|
||||
return dg.fail("TODO: C backend: implement extern union backing type rendering in static initializers", .{});
|
||||
}
|
||||
|
|
@ -1531,9 +1447,6 @@ pub const DeclGen = struct {
|
|||
try w.writeAll("){");
|
||||
try dg.renderValue(w, Value.fromInterned(un.val), location);
|
||||
try w.writeAll("})");
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
} else {
|
||||
if (!location.isInitializer()) {
|
||||
try w.writeByte('(');
|
||||
|
|
@ -1544,21 +1457,6 @@ pub const DeclGen = struct {
|
|||
const field_index = zcu.unionTagFieldIndex(loaded_union, Value.fromInterned(un.tag)).?;
|
||||
const field_ty: Type = .fromInterned(loaded_union.field_types.get(ip)[field_index]);
|
||||
const field_name = loaded_union.loadTagType(ip).names.get(ip)[field_index];
|
||||
if (loaded_union.flagsUnordered(ip).layout == .@"packed") {
|
||||
if (field_ty.hasRuntimeBits(zcu)) {
|
||||
if (field_ty.isPtrAtRuntime(zcu)) {
|
||||
try w.writeByte('(');
|
||||
try dg.renderCType(w, ctype);
|
||||
try w.writeByte(')');
|
||||
} else if (field_ty.zigTypeTag(zcu) == .float) {
|
||||
try w.writeByte('(');
|
||||
try dg.renderCType(w, ctype);
|
||||
try w.writeByte(')');
|
||||
}
|
||||
try dg.renderValue(w, Value.fromInterned(un.val), location);
|
||||
} else try w.writeByte('0');
|
||||
return;
|
||||
}
|
||||
|
||||
const has_tag = loaded_union.hasTag(ip);
|
||||
if (has_tag) try w.writeByte('{');
|
||||
|
|
@ -1745,9 +1643,11 @@ pub const DeclGen = struct {
|
|||
}
|
||||
return w.writeByte('}');
|
||||
},
|
||||
.@"packed" => return w.print("{f}", .{
|
||||
try dg.fmtIntLiteralHex(try pt.undefValue(ty), .Other),
|
||||
}),
|
||||
.@"packed" => return dg.renderUndefValue(
|
||||
w,
|
||||
.fromInterned(loaded_struct.backingIntTypeUnordered(ip)),
|
||||
location,
|
||||
),
|
||||
}
|
||||
},
|
||||
.tuple_type => |tuple_info| {
|
||||
|
|
@ -1815,9 +1715,11 @@ pub const DeclGen = struct {
|
|||
}
|
||||
if (has_tag) try w.writeByte('}');
|
||||
},
|
||||
.@"packed" => return w.print("{f}", .{
|
||||
try dg.fmtIntLiteralHex(try pt.undefValue(ty), .Other),
|
||||
}),
|
||||
.@"packed" => return dg.renderUndefValue(
|
||||
w,
|
||||
try ty.unionBackingType(pt),
|
||||
location,
|
||||
),
|
||||
}
|
||||
},
|
||||
.error_union_type => |error_union_type| switch (ctype.info(ctype_pool)) {
|
||||
|
|
@ -2445,10 +2347,7 @@ pub const DeclGen = struct {
|
|||
const ty = val.typeOf(zcu);
|
||||
return .{ .data = .{
|
||||
.dg = dg,
|
||||
.int_info = if (ty.zigTypeTag(zcu) == .@"union" and ty.containerLayout(zcu) == .@"packed")
|
||||
.{ .signedness = .unsigned, .bits = @intCast(ty.bitSize(zcu)) }
|
||||
else
|
||||
ty.intInfo(zcu),
|
||||
.int_info = ty.intInfo(zcu),
|
||||
.kind = kind,
|
||||
.ctype = try dg.ctypeFromType(ty, kind),
|
||||
.val = val,
|
||||
|
|
@ -3426,6 +3325,10 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) Error!void {
|
|||
// zig fmt: off
|
||||
.inferred_alloc, .inferred_alloc_comptime => unreachable,
|
||||
|
||||
// No "scalarize" legalizations are enabled, so these instructions never appear.
|
||||
.legalize_vec_elem_val => unreachable,
|
||||
.legalize_vec_store_elem => unreachable,
|
||||
|
||||
.arg => try airArg(f, inst),
|
||||
|
||||
.breakpoint => try airBreakpoint(f),
|
||||
|
|
@ -3656,7 +3559,6 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) Error!void {
|
|||
|
||||
.is_named_enum_value => return f.fail("TODO: C backend: implement is_named_enum_value", .{}),
|
||||
.error_set_has_value => return f.fail("TODO: C backend: implement error_set_has_value", .{}),
|
||||
.vector_store_elem => return f.fail("TODO: C backend: implement vector_store_elem", .{}),
|
||||
|
||||
.runtime_nav_ptr => try airRuntimeNavPtr(f, inst),
|
||||
|
||||
|
|
@ -3899,6 +3801,24 @@ fn airAlloc(f: *Function, inst: Air.Inst.Index) !CValue {
|
|||
});
|
||||
log.debug("%{d}: allocated unfreeable t{d}", .{ inst, local.new_local });
|
||||
try f.allocs.put(zcu.gpa, local.new_local, true);
|
||||
|
||||
switch (elem_ty.zigTypeTag(zcu)) {
|
||||
.@"struct", .@"union" => switch (elem_ty.containerLayout(zcu)) {
|
||||
.@"packed" => {
|
||||
// For packed aggregates, we zero-initialize to try and work around a design flaw
|
||||
// related to how `packed`, `undefined`, and RLS interact. See comment in `airStore`
|
||||
// for details.
|
||||
const w = &f.object.code.writer;
|
||||
try w.print("memset(&t{d}, 0x00, sizeof(", .{local.new_local});
|
||||
try f.renderType(w, elem_ty);
|
||||
try w.writeAll("));");
|
||||
try f.object.newline();
|
||||
},
|
||||
.auto, .@"extern" => {},
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
|
||||
return .{ .local_ref = local.new_local };
|
||||
}
|
||||
|
||||
|
|
@ -3918,6 +3838,24 @@ fn airRetPtr(f: *Function, inst: Air.Inst.Index) !CValue {
|
|||
});
|
||||
log.debug("%{d}: allocated unfreeable t{d}", .{ inst, local.new_local });
|
||||
try f.allocs.put(zcu.gpa, local.new_local, true);
|
||||
|
||||
switch (elem_ty.zigTypeTag(zcu)) {
|
||||
.@"struct", .@"union" => switch (elem_ty.containerLayout(zcu)) {
|
||||
.@"packed" => {
|
||||
// For packed aggregates, we zero-initialize to try and work around a design flaw
|
||||
// related to how `packed`, `undefined`, and RLS interact. See comment in `airStore`
|
||||
// for details.
|
||||
const w = &f.object.code.writer;
|
||||
try w.print("memset(&t{d}, 0x00, sizeof(", .{local.new_local});
|
||||
try f.renderType(w, elem_ty);
|
||||
try w.writeAll("));");
|
||||
try f.object.newline();
|
||||
},
|
||||
.auto, .@"extern" => {},
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
|
||||
return .{ .local_ref = local.new_local };
|
||||
}
|
||||
|
||||
|
|
@ -3956,6 +3894,10 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
|
|||
const ptr_info = ptr_scalar_ty.ptrInfo(zcu);
|
||||
const src_ty: Type = .fromInterned(ptr_info.child);
|
||||
|
||||
// `Air.Legalize.Feature.expand_packed_load` should ensure that the only
|
||||
// bit-pointers we see here are vector element pointers.
|
||||
assert(ptr_info.packed_offset.host_size == 0 or ptr_info.flags.vector_index != .none);
|
||||
|
||||
if (!src_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
|
||||
try reap(f, inst, &.{ty_op.operand});
|
||||
return .none;
|
||||
|
|
@ -3987,40 +3929,6 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
|
|||
try w.writeAll(", sizeof(");
|
||||
try f.renderType(w, src_ty);
|
||||
try w.writeAll("))");
|
||||
} else if (ptr_info.packed_offset.host_size > 0 and ptr_info.flags.vector_index == .none) {
|
||||
const host_bits: u16 = ptr_info.packed_offset.host_size * 8;
|
||||
const host_ty = try pt.intType(.unsigned, host_bits);
|
||||
|
||||
const bit_offset_ty = try pt.intType(.unsigned, Type.smallestUnsignedBits(host_bits - 1));
|
||||
const bit_offset_val = try pt.intValue(bit_offset_ty, ptr_info.packed_offset.bit_offset);
|
||||
|
||||
const field_ty = try pt.intType(.unsigned, @as(u16, @intCast(src_ty.bitSize(zcu))));
|
||||
|
||||
try f.writeCValue(w, local, .Other);
|
||||
try v.elem(f, w);
|
||||
try w.writeAll(" = (");
|
||||
try f.renderType(w, src_ty);
|
||||
try w.writeAll(")zig_wrap_");
|
||||
try f.object.dg.renderTypeForBuiltinFnName(w, field_ty);
|
||||
try w.writeAll("((");
|
||||
try f.renderType(w, field_ty);
|
||||
try w.writeByte(')');
|
||||
const cant_cast = host_ty.isInt(zcu) and host_ty.bitSize(zcu) > 64;
|
||||
if (cant_cast) {
|
||||
if (field_ty.bitSize(zcu) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{});
|
||||
try w.writeAll("zig_lo_");
|
||||
try f.object.dg.renderTypeForBuiltinFnName(w, host_ty);
|
||||
try w.writeByte('(');
|
||||
}
|
||||
try w.writeAll("zig_shr_");
|
||||
try f.object.dg.renderTypeForBuiltinFnName(w, host_ty);
|
||||
try w.writeByte('(');
|
||||
try f.writeCValueDeref(w, operand);
|
||||
try v.elem(f, w);
|
||||
try w.print(", {f})", .{try f.fmtIntLiteralDec(bit_offset_val)});
|
||||
if (cant_cast) try w.writeByte(')');
|
||||
try f.object.dg.renderBuiltinInfo(w, field_ty, .bits);
|
||||
try w.writeByte(')');
|
||||
} else {
|
||||
try f.writeCValue(w, local, .Other);
|
||||
try v.elem(f, w);
|
||||
|
|
@ -4213,6 +4121,10 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
|
|||
const ptr_scalar_ty = ptr_ty.scalarType(zcu);
|
||||
const ptr_info = ptr_scalar_ty.ptrInfo(zcu);
|
||||
|
||||
// `Air.Legalize.Feature.expand_packed_store` should ensure that the only
|
||||
// bit-pointers we see here are vector element pointers.
|
||||
assert(ptr_info.packed_offset.host_size == 0 or ptr_info.flags.vector_index != .none);
|
||||
|
||||
const ptr_val = try f.resolveInst(bin_op.lhs);
|
||||
const src_ty = f.typeOf(bin_op.rhs);
|
||||
|
||||
|
|
@ -4222,9 +4134,24 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
|
|||
if (val_is_undef) {
|
||||
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
|
||||
if (safety and ptr_info.packed_offset.host_size == 0) {
|
||||
// If the thing we're initializing is a packed struct/union, we set to 0 instead of
|
||||
// 0xAA. This is a hack to work around a problem with partially-undefined packed
|
||||
// aggregates. If we used 0xAA here, then a later initialization through RLS would
|
||||
// not zero the high padding bits (for a packed type which is not 8/16/32/64/etc bits),
|
||||
// so we would get a miscompilation. Using 0x00 here avoids this bug in some cases. It
|
||||
// is *not* a correct fix; for instance it misses any case where packed structs are
|
||||
// nested in other aggregates. A proper fix for this will involve changing the language,
|
||||
// such as to remove RLS. This just prevents miscompilations in *some* common cases.
|
||||
const byte_str: []const u8 = switch (src_ty.zigTypeTag(zcu)) {
|
||||
else => "0xaa",
|
||||
.@"struct", .@"union" => switch (src_ty.containerLayout(zcu)) {
|
||||
.auto, .@"extern" => "0xaa",
|
||||
.@"packed" => "0x00",
|
||||
},
|
||||
};
|
||||
try w.writeAll("memset(");
|
||||
try f.writeCValue(w, ptr_val, .FunctionArgument);
|
||||
try w.writeAll(", 0xaa, sizeof(");
|
||||
try w.print(", {s}, sizeof(", .{byte_str});
|
||||
try f.renderType(w, .fromInterned(ptr_info.child));
|
||||
try w.writeAll("));");
|
||||
try f.object.newline();
|
||||
|
|
@ -4277,66 +4204,6 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
|
|||
try w.writeByte(';');
|
||||
try f.object.newline();
|
||||
try v.end(f, inst, w);
|
||||
} else if (ptr_info.packed_offset.host_size > 0 and ptr_info.flags.vector_index == .none) {
|
||||
const host_bits = ptr_info.packed_offset.host_size * 8;
|
||||
const host_ty = try pt.intType(.unsigned, host_bits);
|
||||
|
||||
const bit_offset_ty = try pt.intType(.unsigned, Type.smallestUnsignedBits(host_bits - 1));
|
||||
const bit_offset_val = try pt.intValue(bit_offset_ty, ptr_info.packed_offset.bit_offset);
|
||||
|
||||
const src_bits = src_ty.bitSize(zcu);
|
||||
|
||||
const ExpectedContents = [BigInt.Managed.default_capacity]BigIntLimb;
|
||||
var stack align(@alignOf(ExpectedContents)) =
|
||||
std.heap.stackFallback(@sizeOf(ExpectedContents), f.object.dg.gpa);
|
||||
|
||||
var mask = try BigInt.Managed.initCapacity(stack.get(), BigInt.calcTwosCompLimbCount(host_bits));
|
||||
defer mask.deinit();
|
||||
|
||||
try mask.setTwosCompIntLimit(.max, .unsigned, @intCast(src_bits));
|
||||
try mask.shiftLeft(&mask, ptr_info.packed_offset.bit_offset);
|
||||
try mask.bitNotWrap(&mask, .unsigned, host_bits);
|
||||
|
||||
const mask_val = try pt.intValue_big(host_ty, mask.toConst());
|
||||
|
||||
const v = try Vectorize.start(f, inst, w, ptr_ty);
|
||||
const a = try Assignment.start(f, w, src_scalar_ctype);
|
||||
try f.writeCValueDeref(w, ptr_val);
|
||||
try v.elem(f, w);
|
||||
try a.assign(f, w);
|
||||
try w.writeAll("zig_or_");
|
||||
try f.object.dg.renderTypeForBuiltinFnName(w, host_ty);
|
||||
try w.writeAll("(zig_and_");
|
||||
try f.object.dg.renderTypeForBuiltinFnName(w, host_ty);
|
||||
try w.writeByte('(');
|
||||
try f.writeCValueDeref(w, ptr_val);
|
||||
try v.elem(f, w);
|
||||
try w.print(", {f}), zig_shl_", .{try f.fmtIntLiteralHex(mask_val)});
|
||||
try f.object.dg.renderTypeForBuiltinFnName(w, host_ty);
|
||||
try w.writeByte('(');
|
||||
const cant_cast = host_ty.isInt(zcu) and host_ty.bitSize(zcu) > 64;
|
||||
if (cant_cast) {
|
||||
if (src_ty.bitSize(zcu) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{});
|
||||
try w.writeAll("zig_make_");
|
||||
try f.object.dg.renderTypeForBuiltinFnName(w, host_ty);
|
||||
try w.writeAll("(0, ");
|
||||
} else {
|
||||
try w.writeByte('(');
|
||||
try f.renderType(w, host_ty);
|
||||
try w.writeByte(')');
|
||||
}
|
||||
|
||||
if (src_ty.isPtrAtRuntime(zcu)) {
|
||||
try w.writeByte('(');
|
||||
try f.renderType(w, .usize);
|
||||
try w.writeByte(')');
|
||||
}
|
||||
try f.writeCValue(w, src_val, .Other);
|
||||
try v.elem(f, w);
|
||||
if (cant_cast) try w.writeByte(')');
|
||||
try w.print(", {f}))", .{try f.fmtIntLiteralDec(bit_offset_val)});
|
||||
try a.end(f, w);
|
||||
try v.end(f, inst, w);
|
||||
} else {
|
||||
switch (ptr_val) {
|
||||
.local_ref => |ptr_local_index| switch (src_val) {
|
||||
|
|
@ -6015,10 +5882,7 @@ fn fieldLocation(
|
|||
else if (!field_ptr_ty.childType(zcu).hasRuntimeBitsIgnoreComptime(zcu))
|
||||
.{ .byte_offset = loaded_struct.offsets.get(ip)[field_index] }
|
||||
else
|
||||
.{ .field = if (loaded_struct.fieldName(ip, field_index).unwrap()) |field_name|
|
||||
.{ .identifier = field_name.toSlice(ip) }
|
||||
else
|
||||
.{ .field = field_index } },
|
||||
.{ .field = .{ .identifier = loaded_struct.fieldName(ip, field_index).toSlice(ip) } },
|
||||
.@"packed" => if (field_ptr_ty.ptrInfo(zcu).packed_offset.host_size == 0)
|
||||
.{ .byte_offset = @divExact(zcu.structPackedFieldBitOffset(loaded_struct, field_index) +
|
||||
container_ptr_ty.ptrInfo(zcu).packed_offset.bit_offset, 8) }
|
||||
|
|
@ -6202,115 +6066,20 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
|
|||
// Ensure complete type definition is visible before accessing fields.
|
||||
_ = try f.ctypeFromType(struct_ty, .complete);
|
||||
|
||||
assert(struct_ty.containerLayout(zcu) != .@"packed"); // `Air.Legalize.Feature.expand_packed_struct_field_val` handles this case
|
||||
const field_name: CValue = switch (ip.indexToKey(struct_ty.toIntern())) {
|
||||
.struct_type => field_name: {
|
||||
const loaded_struct = ip.loadStructType(struct_ty.toIntern());
|
||||
switch (loaded_struct.layout) {
|
||||
.auto, .@"extern" => break :field_name if (loaded_struct.fieldName(ip, extra.field_index).unwrap()) |field_name|
|
||||
.{ .identifier = field_name.toSlice(ip) }
|
||||
else
|
||||
.{ .field = extra.field_index },
|
||||
.@"packed" => {
|
||||
const int_info = struct_ty.intInfo(zcu);
|
||||
|
||||
const bit_offset_ty = try pt.intType(.unsigned, Type.smallestUnsignedBits(int_info.bits - 1));
|
||||
|
||||
const bit_offset = zcu.structPackedFieldBitOffset(loaded_struct, extra.field_index);
|
||||
|
||||
const field_int_signedness = if (inst_ty.isAbiInt(zcu))
|
||||
inst_ty.intInfo(zcu).signedness
|
||||
else
|
||||
.unsigned;
|
||||
const field_int_ty = try pt.intType(field_int_signedness, @as(u16, @intCast(inst_ty.bitSize(zcu))));
|
||||
|
||||
const temp_local = try f.allocLocal(inst, field_int_ty);
|
||||
try f.writeCValue(w, temp_local, .Other);
|
||||
try w.writeAll(" = zig_wrap_");
|
||||
try f.object.dg.renderTypeForBuiltinFnName(w, field_int_ty);
|
||||
try w.writeAll("((");
|
||||
try f.renderType(w, field_int_ty);
|
||||
try w.writeByte(')');
|
||||
const cant_cast = int_info.bits > 64;
|
||||
if (cant_cast) {
|
||||
if (field_int_ty.bitSize(zcu) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{});
|
||||
try w.writeAll("zig_lo_");
|
||||
try f.object.dg.renderTypeForBuiltinFnName(w, struct_ty);
|
||||
try w.writeByte('(');
|
||||
}
|
||||
if (bit_offset > 0) {
|
||||
try w.writeAll("zig_shr_");
|
||||
try f.object.dg.renderTypeForBuiltinFnName(w, struct_ty);
|
||||
try w.writeByte('(');
|
||||
}
|
||||
try f.writeCValue(w, struct_byval, .Other);
|
||||
if (bit_offset > 0) try w.print(", {f})", .{
|
||||
try f.fmtIntLiteralDec(try pt.intValue(bit_offset_ty, bit_offset)),
|
||||
});
|
||||
if (cant_cast) try w.writeByte(')');
|
||||
try f.object.dg.renderBuiltinInfo(w, field_int_ty, .bits);
|
||||
try w.writeAll(");");
|
||||
try f.object.newline();
|
||||
if (inst_ty.eql(field_int_ty, zcu)) return temp_local;
|
||||
|
||||
const local = try f.allocLocal(inst, inst_ty);
|
||||
if (local.new_local != temp_local.new_local) {
|
||||
try w.writeAll("memcpy(");
|
||||
try f.writeCValue(w, .{ .local_ref = local.new_local }, .FunctionArgument);
|
||||
try w.writeAll(", ");
|
||||
try f.writeCValue(w, .{ .local_ref = temp_local.new_local }, .FunctionArgument);
|
||||
try w.writeAll(", sizeof(");
|
||||
try f.renderType(w, inst_ty);
|
||||
try w.writeAll("));");
|
||||
try f.object.newline();
|
||||
}
|
||||
try freeLocal(f, inst, temp_local.new_local, null);
|
||||
return local;
|
||||
},
|
||||
.struct_type => .{ .identifier = struct_ty.structFieldName(extra.field_index, zcu).unwrap().?.toSlice(ip) },
|
||||
.union_type => name: {
|
||||
const union_type = ip.loadUnionType(struct_ty.toIntern());
|
||||
const enum_tag_ty: Type = .fromInterned(union_type.enum_tag_ty);
|
||||
const field_name_str = enum_tag_ty.enumFieldName(extra.field_index, zcu).toSlice(ip);
|
||||
if (union_type.hasTag(ip)) {
|
||||
break :name .{ .payload_identifier = field_name_str };
|
||||
} else {
|
||||
break :name .{ .identifier = field_name_str };
|
||||
}
|
||||
},
|
||||
.tuple_type => .{ .field = extra.field_index },
|
||||
.union_type => field_name: {
|
||||
const loaded_union = ip.loadUnionType(struct_ty.toIntern());
|
||||
switch (loaded_union.flagsUnordered(ip).layout) {
|
||||
.auto, .@"extern" => {
|
||||
const name = loaded_union.loadTagType(ip).names.get(ip)[extra.field_index];
|
||||
break :field_name if (loaded_union.hasTag(ip))
|
||||
.{ .payload_identifier = name.toSlice(ip) }
|
||||
else
|
||||
.{ .identifier = name.toSlice(ip) };
|
||||
},
|
||||
.@"packed" => {
|
||||
const operand_lval = if (struct_byval == .constant) blk: {
|
||||
const operand_local = try f.allocLocal(inst, struct_ty);
|
||||
try f.writeCValue(w, operand_local, .Other);
|
||||
try w.writeAll(" = ");
|
||||
try f.writeCValue(w, struct_byval, .Other);
|
||||
try w.writeByte(';');
|
||||
try f.object.newline();
|
||||
break :blk operand_local;
|
||||
} else struct_byval;
|
||||
const local = try f.allocLocal(inst, inst_ty);
|
||||
if (switch (local) {
|
||||
.new_local, .local => |local_index| switch (operand_lval) {
|
||||
.new_local, .local => |operand_local_index| local_index != operand_local_index,
|
||||
else => true,
|
||||
},
|
||||
else => true,
|
||||
}) {
|
||||
try w.writeAll("memcpy(&");
|
||||
try f.writeCValue(w, local, .Other);
|
||||
try w.writeAll(", &");
|
||||
try f.writeCValue(w, operand_lval, .Other);
|
||||
try w.writeAll(", sizeof(");
|
||||
try f.renderType(w, inst_ty);
|
||||
try w.writeAll("));");
|
||||
try f.object.newline();
|
||||
}
|
||||
try f.freeCValue(inst, operand_lval);
|
||||
return local;
|
||||
},
|
||||
}
|
||||
},
|
||||
else => unreachable,
|
||||
};
|
||||
|
||||
|
|
@ -7702,98 +7471,13 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
|
|||
if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
|
||||
|
||||
const a = try Assignment.start(f, w, try f.ctypeFromType(field_ty, .complete));
|
||||
try f.writeCValueMember(w, local, if (loaded_struct.fieldName(ip, field_index).unwrap()) |field_name|
|
||||
.{ .identifier = field_name.toSlice(ip) }
|
||||
else
|
||||
.{ .field = field_index });
|
||||
try f.writeCValueMember(w, local, .{ .identifier = loaded_struct.fieldName(ip, field_index).toSlice(ip) });
|
||||
try a.assign(f, w);
|
||||
try f.writeCValue(w, resolved_elements[field_index], .Other);
|
||||
try a.end(f, w);
|
||||
}
|
||||
},
|
||||
.@"packed" => {
|
||||
try f.writeCValue(w, local, .Other);
|
||||
try w.writeAll(" = ");
|
||||
|
||||
const backing_int_ty: Type = .fromInterned(loaded_struct.backingIntTypeUnordered(ip));
|
||||
const int_info = backing_int_ty.intInfo(zcu);
|
||||
|
||||
const bit_offset_ty = try pt.intType(.unsigned, Type.smallestUnsignedBits(int_info.bits - 1));
|
||||
|
||||
var bit_offset: u64 = 0;
|
||||
|
||||
var empty = true;
|
||||
for (0..elements.len) |field_index| {
|
||||
if (inst_ty.structFieldIsComptime(field_index, zcu)) continue;
|
||||
const field_ty = inst_ty.fieldType(field_index, zcu);
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
|
||||
|
||||
if (!empty) {
|
||||
try w.writeAll("zig_or_");
|
||||
try f.object.dg.renderTypeForBuiltinFnName(w, inst_ty);
|
||||
try w.writeByte('(');
|
||||
}
|
||||
empty = false;
|
||||
}
|
||||
empty = true;
|
||||
for (resolved_elements, 0..) |element, field_index| {
|
||||
if (inst_ty.structFieldIsComptime(field_index, zcu)) continue;
|
||||
const field_ty = inst_ty.fieldType(field_index, zcu);
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
|
||||
|
||||
if (!empty) try w.writeAll(", ");
|
||||
// TODO: Skip this entire shift if val is 0?
|
||||
try w.writeAll("zig_shlw_");
|
||||
try f.object.dg.renderTypeForBuiltinFnName(w, inst_ty);
|
||||
try w.writeByte('(');
|
||||
|
||||
if (field_ty.isAbiInt(zcu)) {
|
||||
try w.writeAll("zig_and_");
|
||||
try f.object.dg.renderTypeForBuiltinFnName(w, inst_ty);
|
||||
try w.writeByte('(');
|
||||
}
|
||||
|
||||
if (inst_ty.isAbiInt(zcu) and (field_ty.isAbiInt(zcu) or field_ty.isPtrAtRuntime(zcu))) {
|
||||
try f.renderIntCast(w, inst_ty, element, .{}, field_ty, .FunctionArgument);
|
||||
} else {
|
||||
try w.writeByte('(');
|
||||
try f.renderType(w, inst_ty);
|
||||
try w.writeByte(')');
|
||||
if (field_ty.isPtrAtRuntime(zcu)) {
|
||||
try w.writeByte('(');
|
||||
try f.renderType(w, switch (int_info.signedness) {
|
||||
.unsigned => .usize,
|
||||
.signed => .isize,
|
||||
});
|
||||
try w.writeByte(')');
|
||||
}
|
||||
try f.writeCValue(w, element, .Other);
|
||||
}
|
||||
|
||||
if (field_ty.isAbiInt(zcu)) {
|
||||
try w.writeAll(", ");
|
||||
const field_int_info = field_ty.intInfo(zcu);
|
||||
const field_mask = if (int_info.signedness == .signed and int_info.bits == field_int_info.bits)
|
||||
try pt.intValue(backing_int_ty, -1)
|
||||
else
|
||||
try (try pt.intType(.unsigned, field_int_info.bits)).maxIntScalar(pt, backing_int_ty);
|
||||
try f.object.dg.renderValue(w, field_mask, .FunctionArgument);
|
||||
try w.writeByte(')');
|
||||
}
|
||||
|
||||
try w.print(", {f}", .{
|
||||
try f.fmtIntLiteralDec(try pt.intValue(bit_offset_ty, bit_offset)),
|
||||
});
|
||||
try f.object.dg.renderBuiltinInfo(w, inst_ty, .bits);
|
||||
try w.writeByte(')');
|
||||
if (!empty) try w.writeByte(')');
|
||||
|
||||
bit_offset += field_ty.bitSize(zcu);
|
||||
empty = false;
|
||||
}
|
||||
try w.writeByte(';');
|
||||
try f.object.newline();
|
||||
},
|
||||
.@"packed" => unreachable, // `Air.Legalize.Feature.expand_packed_struct_init` handles this case
|
||||
}
|
||||
},
|
||||
.tuple_type => |tuple_info| for (0..tuple_info.types.len) |field_index| {
|
||||
|
|
@ -7828,9 +7512,10 @@ fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue {
|
|||
try reap(f, inst, &.{extra.init});
|
||||
|
||||
const w = &f.object.code.writer;
|
||||
const local = try f.allocLocal(inst, union_ty);
|
||||
if (loaded_union.flagsUnordered(ip).layout == .@"packed") return f.moveCValue(inst, union_ty, payload);
|
||||
|
||||
const local = try f.allocLocal(inst, union_ty);
|
||||
|
||||
const field: CValue = if (union_ty.unionTagTypeSafety(zcu)) |tag_ty| field: {
|
||||
const layout = union_ty.unionGetLayout(zcu);
|
||||
if (layout.tag_size != 0) {
|
||||
|
|
|
|||
|
|
@ -2514,11 +2514,7 @@ pub const Pool = struct {
|
|||
kind.noParameter(),
|
||||
);
|
||||
if (field_ctype.index == .void) continue;
|
||||
const field_name = if (loaded_struct.fieldName(ip, field_index)
|
||||
.unwrap()) |field_name|
|
||||
try pool.string(allocator, field_name.toSlice(ip))
|
||||
else
|
||||
String.fromUnnamed(@intCast(field_index));
|
||||
const field_name = try pool.string(allocator, loaded_struct.fieldName(ip, field_index).toSlice(ip));
|
||||
const field_alignas = AlignAs.fromAlignment(.{
|
||||
.@"align" = loaded_struct.fieldAlign(ip, field_index),
|
||||
.abi = field_type.abiAlignment(zcu),
|
||||
|
|
|
|||
|
|
@ -2409,8 +2409,7 @@ pub const Object = struct {
|
|||
const field_size = field_ty.abiSize(zcu);
|
||||
const field_align = ty.fieldAlignment(field_index, zcu);
|
||||
const field_offset = ty.structFieldOffset(field_index, zcu);
|
||||
const field_name = struct_type.fieldName(ip, field_index).unwrap() orelse
|
||||
try ip.getOrPutStringFmt(gpa, pt.tid, "{d}", .{field_index}, .no_embedded_nulls);
|
||||
const field_name = struct_type.fieldName(ip, field_index);
|
||||
fields.appendAssumeCapacity(try o.builder.debugMemberType(
|
||||
try o.builder.metadataString(field_name.toSlice(ip)),
|
||||
null, // File
|
||||
|
|
@ -4885,6 +4884,11 @@ pub const FuncGen = struct {
|
|||
|
||||
const val: Builder.Value = switch (air_tags[@intFromEnum(inst)]) {
|
||||
// zig fmt: off
|
||||
|
||||
// No "scalarize" legalizations are enabled, so these instructions never appear.
|
||||
.legalize_vec_elem_val => unreachable,
|
||||
.legalize_vec_store_elem => unreachable,
|
||||
|
||||
.add => try self.airAdd(inst, .normal),
|
||||
.add_optimized => try self.airAdd(inst, .fast),
|
||||
.add_wrap => try self.airAddWrap(inst),
|
||||
|
|
@ -5091,8 +5095,6 @@ pub const FuncGen = struct {
|
|||
.wasm_memory_size => try self.airWasmMemorySize(inst),
|
||||
.wasm_memory_grow => try self.airWasmMemoryGrow(inst),
|
||||
|
||||
.vector_store_elem => try self.airVectorStoreElem(inst),
|
||||
|
||||
.runtime_nav_ptr => try self.airRuntimeNavPtr(inst),
|
||||
|
||||
.inferred_alloc, .inferred_alloc_comptime => unreachable,
|
||||
|
|
@ -6871,16 +6873,14 @@ pub const FuncGen = struct {
|
|||
const array_llvm_ty = try o.lowerType(pt, array_ty);
|
||||
const elem_ty = array_ty.childType(zcu);
|
||||
if (isByRef(array_ty, zcu)) {
|
||||
const indices: [2]Builder.Value = .{
|
||||
try o.builder.intValue(try o.lowerType(pt, Type.usize), 0), rhs,
|
||||
};
|
||||
const elem_ptr = try self.wip.gep(.inbounds, array_llvm_ty, array_llvm_val, &.{
|
||||
try o.builder.intValue(try o.lowerType(pt, Type.usize), 0),
|
||||
rhs,
|
||||
}, "");
|
||||
if (isByRef(elem_ty, zcu)) {
|
||||
const elem_ptr = try self.wip.gep(.inbounds, array_llvm_ty, array_llvm_val, &indices, "");
|
||||
const elem_alignment = elem_ty.abiAlignment(zcu).toLlvm();
|
||||
return self.loadByRef(elem_ptr, elem_ty, elem_alignment, .normal);
|
||||
} else {
|
||||
const elem_ptr =
|
||||
try self.wip.gep(.inbounds, array_llvm_ty, array_llvm_val, &indices, "");
|
||||
return self.loadTruncate(.normal, elem_ty, elem_ptr, .default);
|
||||
}
|
||||
}
|
||||
|
|
@ -8138,33 +8138,6 @@ pub const FuncGen = struct {
|
|||
}, "");
|
||||
}
|
||||
|
||||
fn airVectorStoreElem(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
|
||||
const o = self.ng.object;
|
||||
const pt = self.ng.pt;
|
||||
const zcu = pt.zcu;
|
||||
const data = self.air.instructions.items(.data)[@intFromEnum(inst)].vector_store_elem;
|
||||
const extra = self.air.extraData(Air.Bin, data.payload).data;
|
||||
|
||||
const vector_ptr = try self.resolveInst(data.vector_ptr);
|
||||
const vector_ptr_ty = self.typeOf(data.vector_ptr);
|
||||
const index = try self.resolveInst(extra.lhs);
|
||||
const operand = try self.resolveInst(extra.rhs);
|
||||
|
||||
self.maybeMarkAllowZeroAccess(vector_ptr_ty.ptrInfo(zcu));
|
||||
|
||||
// TODO: Emitting a load here is a violation of volatile semantics. Not fixable in general.
|
||||
// https://github.com/ziglang/zig/issues/18652#issuecomment-2452844908
|
||||
const access_kind: Builder.MemoryAccessKind =
|
||||
if (vector_ptr_ty.isVolatilePtr(zcu)) .@"volatile" else .normal;
|
||||
const elem_llvm_ty = try o.lowerType(pt, vector_ptr_ty.childType(zcu));
|
||||
const alignment = vector_ptr_ty.ptrAlignment(zcu).toLlvm();
|
||||
const loaded = try self.wip.load(access_kind, elem_llvm_ty, vector_ptr, alignment, "");
|
||||
|
||||
const new_vector = try self.wip.insertElement(loaded, operand, index, "");
|
||||
_ = try self.store(vector_ptr, vector_ptr_ty, new_vector, .none);
|
||||
return .none;
|
||||
}
|
||||
|
||||
fn airRuntimeNavPtr(fg: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
|
||||
const o = fg.ng.object;
|
||||
const pt = fg.ng.pt;
|
||||
|
|
@ -8301,8 +8274,7 @@ pub const FuncGen = struct {
|
|||
const rhs = try self.resolveInst(bin_op.rhs);
|
||||
const inst_ty = self.typeOfIndex(inst);
|
||||
const scalar_ty = inst_ty.scalarType(zcu);
|
||||
|
||||
if (scalar_ty.isAnyFloat()) return self.todo("saturating float add", .{});
|
||||
assert(scalar_ty.zigTypeTag(zcu) == .int);
|
||||
return self.wip.callIntrinsic(
|
||||
.normal,
|
||||
.none,
|
||||
|
|
@ -8342,8 +8314,7 @@ pub const FuncGen = struct {
|
|||
const rhs = try self.resolveInst(bin_op.rhs);
|
||||
const inst_ty = self.typeOfIndex(inst);
|
||||
const scalar_ty = inst_ty.scalarType(zcu);
|
||||
|
||||
if (scalar_ty.isAnyFloat()) return self.todo("saturating float sub", .{});
|
||||
assert(scalar_ty.zigTypeTag(zcu) == .int);
|
||||
return self.wip.callIntrinsic(
|
||||
.normal,
|
||||
.none,
|
||||
|
|
@ -8383,8 +8354,7 @@ pub const FuncGen = struct {
|
|||
const rhs = try self.resolveInst(bin_op.rhs);
|
||||
const inst_ty = self.typeOfIndex(inst);
|
||||
const scalar_ty = inst_ty.scalarType(zcu);
|
||||
|
||||
if (scalar_ty.isAnyFloat()) return self.todo("saturating float mul", .{});
|
||||
assert(scalar_ty.zigTypeTag(zcu) == .int);
|
||||
return self.wip.callIntrinsic(
|
||||
.normal,
|
||||
.none,
|
||||
|
|
@ -11452,7 +11422,6 @@ pub const FuncGen = struct {
|
|||
const access_kind: Builder.MemoryAccessKind =
|
||||
if (info.flags.is_volatile) .@"volatile" else .normal;
|
||||
|
||||
assert(info.flags.vector_index != .runtime);
|
||||
if (info.flags.vector_index != .none) {
|
||||
const index_u32 = try o.builder.intValue(.i32, info.flags.vector_index);
|
||||
const vec_elem_ty = try o.lowerType(pt, elem_ty);
|
||||
|
|
@ -11522,7 +11491,6 @@ pub const FuncGen = struct {
|
|||
const access_kind: Builder.MemoryAccessKind =
|
||||
if (info.flags.is_volatile) .@"volatile" else .normal;
|
||||
|
||||
assert(info.flags.vector_index != .runtime);
|
||||
if (info.flags.vector_index != .none) {
|
||||
const index_u32 = try o.builder.intValue(.i32, info.flags.vector_index);
|
||||
const vec_elem_ty = try o.lowerType(pt, elem_ty);
|
||||
|
|
|
|||
|
|
@ -1391,6 +1391,11 @@ fn genBody(func: *Func, body: []const Air.Inst.Index) InnerError!void {
|
|||
const tag = air_tags[@intFromEnum(inst)];
|
||||
switch (tag) {
|
||||
// zig fmt: off
|
||||
|
||||
// No "scalarize" legalizations are enabled, so these instructions never appear.
|
||||
.legalize_vec_elem_val => unreachable,
|
||||
.legalize_vec_store_elem => unreachable,
|
||||
|
||||
.add,
|
||||
.add_wrap,
|
||||
.sub,
|
||||
|
|
@ -1633,7 +1638,6 @@ fn genBody(func: *Func, body: []const Air.Inst.Index) InnerError!void {
|
|||
|
||||
.is_named_enum_value => return func.fail("TODO implement is_named_enum_value", .{}),
|
||||
.error_set_has_value => return func.fail("TODO implement error_set_has_value", .{}),
|
||||
.vector_store_elem => return func.fail("TODO implement vector_store_elem", .{}),
|
||||
|
||||
.c_va_arg => return func.fail("TODO implement c_va_arg", .{}),
|
||||
.c_va_copy => return func.fail("TODO implement c_va_copy", .{}),
|
||||
|
|
|
|||
|
|
@ -479,6 +479,11 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
|
|||
self.reused_operands = @TypeOf(self.reused_operands).initEmpty();
|
||||
switch (air_tags[@intFromEnum(inst)]) {
|
||||
// zig fmt: off
|
||||
|
||||
// No "scalarize" legalizations are enabled, so these instructions never appear.
|
||||
.legalize_vec_elem_val => unreachable,
|
||||
.legalize_vec_store_elem => unreachable,
|
||||
|
||||
.ptr_add => try self.airPtrArithmetic(inst, .ptr_add),
|
||||
.ptr_sub => try self.airPtrArithmetic(inst, .ptr_sub),
|
||||
|
||||
|
|
@ -702,7 +707,6 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
|
|||
|
||||
.is_named_enum_value => @panic("TODO implement is_named_enum_value"),
|
||||
.error_set_has_value => @panic("TODO implement error_set_has_value"),
|
||||
.vector_store_elem => @panic("TODO implement vector_store_elem"),
|
||||
.runtime_nav_ptr => @panic("TODO implement runtime_nav_ptr"),
|
||||
|
||||
.c_va_arg => return self.fail("TODO implement c_va_arg", .{}),
|
||||
|
|
|
|||
|
|
@ -1520,8 +1520,7 @@ fn resolveType(cg: *CodeGen, ty: Type, repr: Repr) Error!Id {
|
|||
const field_ty: Type = .fromInterned(struct_type.field_types.get(ip)[field_index]);
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
|
||||
|
||||
const field_name = struct_type.fieldName(ip, field_index).unwrap() orelse
|
||||
try ip.getOrPutStringFmt(zcu.gpa, pt.tid, "{d}", .{field_index}, .no_embedded_nulls);
|
||||
const field_name = struct_type.fieldName(ip, field_index);
|
||||
try member_types.append(try cg.resolveType(field_ty, .indirect));
|
||||
try member_names.append(field_name.toSlice(ip));
|
||||
try member_offsets.append(@intCast(ty.structFieldOffset(field_index, zcu)));
|
||||
|
|
@ -2726,8 +2725,6 @@ fn genInst(cg: *CodeGen, inst: Air.Inst.Index) Error!void {
|
|||
.ptr_elem_val => try cg.airPtrElemVal(inst),
|
||||
.array_elem_val => try cg.airArrayElemVal(inst),
|
||||
|
||||
.vector_store_elem => return cg.airVectorStoreElem(inst),
|
||||
|
||||
.set_union_tag => return cg.airSetUnionTag(inst),
|
||||
.get_union_tag => try cg.airGetUnionTag(inst),
|
||||
.union_init => try cg.airUnionInit(inst),
|
||||
|
|
@ -4446,29 +4443,6 @@ fn airPtrElemVal(cg: *CodeGen, inst: Air.Inst.Index) !?Id {
|
|||
return try cg.load(elem_ty, elem_ptr_id, .{ .is_volatile = ptr_ty.isVolatilePtr(zcu) });
|
||||
}
|
||||
|
||||
fn airVectorStoreElem(cg: *CodeGen, inst: Air.Inst.Index) !void {
|
||||
const zcu = cg.module.zcu;
|
||||
const data = cg.air.instructions.items(.data)[@intFromEnum(inst)].vector_store_elem;
|
||||
const extra = cg.air.extraData(Air.Bin, data.payload).data;
|
||||
|
||||
const vector_ptr_ty = cg.typeOf(data.vector_ptr);
|
||||
const vector_ty = vector_ptr_ty.childType(zcu);
|
||||
const scalar_ty = vector_ty.scalarType(zcu);
|
||||
|
||||
const scalar_ty_id = try cg.resolveType(scalar_ty, .indirect);
|
||||
const storage_class = cg.module.storageClass(vector_ptr_ty.ptrAddressSpace(zcu));
|
||||
const scalar_ptr_ty_id = try cg.module.ptrType(scalar_ty_id, storage_class);
|
||||
|
||||
const vector_ptr = try cg.resolve(data.vector_ptr);
|
||||
const index = try cg.resolve(extra.lhs);
|
||||
const operand = try cg.resolve(extra.rhs);
|
||||
|
||||
const elem_ptr_id = try cg.accessChainId(scalar_ptr_ty_id, vector_ptr, &.{index});
|
||||
try cg.store(scalar_ty, elem_ptr_id, operand, .{
|
||||
.is_volatile = vector_ptr_ty.isVolatilePtr(zcu),
|
||||
});
|
||||
}
|
||||
|
||||
fn airSetUnionTag(cg: *CodeGen, inst: Air.Inst.Index) !void {
|
||||
const zcu = cg.module.zcu;
|
||||
const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
|
||||
|
|
|
|||
|
|
@ -1786,6 +1786,10 @@ fn buildPointerOffset(cg: *CodeGen, ptr_value: WValue, offset: u64, action: enum
|
|||
fn genInst(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
||||
const air_tags = cg.air.instructions.items(.tag);
|
||||
return switch (air_tags[@intFromEnum(inst)]) {
|
||||
// No "scalarize" legalizations are enabled, so these instructions never appear.
|
||||
.legalize_vec_elem_val => unreachable,
|
||||
.legalize_vec_store_elem => unreachable,
|
||||
|
||||
.inferred_alloc, .inferred_alloc_comptime => unreachable,
|
||||
|
||||
.add => cg.airBinOp(inst, .add),
|
||||
|
|
@ -1978,7 +1982,6 @@ fn genInst(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
|||
.save_err_return_trace_index,
|
||||
.is_named_enum_value,
|
||||
.addrspace_cast,
|
||||
.vector_store_elem,
|
||||
.c_va_arg,
|
||||
.c_va_copy,
|
||||
.c_va_end,
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
|
|
@ -3158,11 +3158,7 @@ fn updateComptimeNavInner(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPoo
|
|||
.struct_field
|
||||
else
|
||||
.struct_field);
|
||||
if (loaded_struct.fieldName(ip, field_index).unwrap()) |field_name| try wip_nav.strp(field_name.toSlice(ip)) else {
|
||||
var field_name_buf: [std.fmt.count("{d}", .{std.math.maxInt(u32)})]u8 = undefined;
|
||||
const field_name = std.fmt.bufPrint(&field_name_buf, "{d}", .{field_index}) catch unreachable;
|
||||
try wip_nav.strp(field_name);
|
||||
}
|
||||
try wip_nav.strp(loaded_struct.fieldName(ip, field_index).toSlice(ip));
|
||||
try wip_nav.refType(field_type);
|
||||
if (!is_comptime) {
|
||||
try diw.writeUleb128(loaded_struct.offsets.get(ip)[field_index]);
|
||||
|
|
@ -3187,7 +3183,7 @@ fn updateComptimeNavInner(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPoo
|
|||
var field_bit_offset: u16 = 0;
|
||||
for (0..loaded_struct.field_types.len) |field_index| {
|
||||
try wip_nav.abbrevCode(.packed_struct_field);
|
||||
try wip_nav.strp(loaded_struct.fieldName(ip, field_index).unwrap().?.toSlice(ip));
|
||||
try wip_nav.strp(loaded_struct.fieldName(ip, field_index).toSlice(ip));
|
||||
const field_type: Type = .fromInterned(loaded_struct.field_types.get(ip)[field_index]);
|
||||
try wip_nav.refType(field_type);
|
||||
try diw.writeUleb128(field_bit_offset);
|
||||
|
|
@ -4269,11 +4265,7 @@ fn updateLazyValue(
|
|||
.comptime_value_field_runtime_bits
|
||||
else
|
||||
continue);
|
||||
if (loaded_struct_type.fieldName(ip, field_index).unwrap()) |field_name| try wip_nav.strp(field_name.toSlice(ip)) else {
|
||||
var field_name_buf: [std.fmt.count("{d}", .{std.math.maxInt(u32)})]u8 = undefined;
|
||||
const field_name = std.fmt.bufPrint(&field_name_buf, "{d}", .{field_index}) catch unreachable;
|
||||
try wip_nav.strp(field_name);
|
||||
}
|
||||
try wip_nav.strp(loaded_struct_type.fieldName(ip, field_index).toSlice(ip));
|
||||
const field_value: Value = .fromInterned(switch (aggregate.storage) {
|
||||
.bytes => unreachable,
|
||||
.elems => |elems| elems[field_index],
|
||||
|
|
@ -4467,11 +4459,7 @@ fn updateContainerTypeWriterError(
|
|||
.struct_field
|
||||
else
|
||||
.struct_field);
|
||||
if (loaded_struct.fieldName(ip, field_index).unwrap()) |field_name| try wip_nav.strp(field_name.toSlice(ip)) else {
|
||||
var field_name_buf: [std.fmt.count("{d}", .{std.math.maxInt(u32)})]u8 = undefined;
|
||||
const field_name = std.fmt.bufPrint(&field_name_buf, "{d}", .{field_index}) catch unreachable;
|
||||
try wip_nav.strp(field_name);
|
||||
}
|
||||
try wip_nav.strp(loaded_struct.fieldName(ip, field_index).toSlice(ip));
|
||||
try wip_nav.refType(field_type);
|
||||
if (!is_comptime) {
|
||||
try diw.writeUleb128(loaded_struct.offsets.get(ip)[field_index]);
|
||||
|
|
@ -4573,11 +4561,7 @@ fn updateContainerTypeWriterError(
|
|||
.struct_field
|
||||
else
|
||||
.struct_field);
|
||||
if (loaded_struct.fieldName(ip, field_index).unwrap()) |field_name| try wip_nav.strp(field_name.toSlice(ip)) else {
|
||||
var field_name_buf: [std.fmt.count("{d}", .{std.math.maxInt(u32)})]u8 = undefined;
|
||||
const field_name = std.fmt.bufPrint(&field_name_buf, "{d}", .{field_index}) catch unreachable;
|
||||
try wip_nav.strp(field_name);
|
||||
}
|
||||
try wip_nav.strp(loaded_struct.fieldName(ip, field_index).toSlice(ip));
|
||||
try wip_nav.refType(field_type);
|
||||
if (!is_comptime) {
|
||||
try diw.writeUleb128(loaded_struct.offsets.get(ip)[field_index]);
|
||||
|
|
@ -4600,7 +4584,7 @@ fn updateContainerTypeWriterError(
|
|||
var field_bit_offset: u16 = 0;
|
||||
for (0..loaded_struct.field_types.len) |field_index| {
|
||||
try wip_nav.abbrevCode(.packed_struct_field);
|
||||
try wip_nav.strp(loaded_struct.fieldName(ip, field_index).unwrap().?.toSlice(ip));
|
||||
try wip_nav.strp(loaded_struct.fieldName(ip, field_index).toSlice(ip));
|
||||
const field_type: Type = .fromInterned(loaded_struct.field_types.get(ip)[field_index]);
|
||||
try wip_nav.refType(field_type);
|
||||
try diw.writeUleb128(field_bit_offset);
|
||||
|
|
|
|||
59
stage1/zig.h
59
stage1/zig.h
|
|
@ -40,6 +40,8 @@
|
|||
#elif defined(__mips__)
|
||||
#define zig_mips32
|
||||
#define zig_mips
|
||||
#elif defined(__or1k__)
|
||||
#define zig_or1k
|
||||
#elif defined(__powerpc64__)
|
||||
#define zig_powerpc64
|
||||
#define zig_powerpc
|
||||
|
|
@ -72,6 +74,9 @@
|
|||
#elif defined (__x86_64__) || (defined(zig_msvc) && defined(_M_X64))
|
||||
#define zig_x86_64
|
||||
#define zig_x86
|
||||
#elif defined(__I86__)
|
||||
#define zig_x86_16
|
||||
#define zig_x86
|
||||
#endif
|
||||
|
||||
#if defined(zig_msvc) || __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
||||
|
|
@ -82,9 +87,7 @@
|
|||
#define zig_big_endian 1
|
||||
#endif
|
||||
|
||||
#if defined(_AIX)
|
||||
#define zig_aix
|
||||
#elif defined(__MACH__)
|
||||
#if defined(__MACH__)
|
||||
#define zig_darwin
|
||||
#elif defined(__DragonFly__)
|
||||
#define zig_dragonfly
|
||||
|
|
@ -114,20 +117,14 @@
|
|||
#define zig_wasi
|
||||
#elif defined(_WIN32)
|
||||
#define zig_windows
|
||||
#elif defined(__MVS__)
|
||||
#define zig_zos
|
||||
#endif
|
||||
|
||||
#if defined(zig_windows)
|
||||
#define zig_coff
|
||||
#elif defined(__ELF__)
|
||||
#define zig_elf
|
||||
#elif defined(zig_zos)
|
||||
#define zig_goff
|
||||
#elif defined(zig_darwin)
|
||||
#define zig_macho
|
||||
#elif defined(zig_aix)
|
||||
#define zig_xcoff
|
||||
#endif
|
||||
|
||||
#define zig_concat(lhs, rhs) lhs##rhs
|
||||
|
|
@ -390,12 +387,16 @@
|
|||
#define zig_trap() __asm__ volatile(".word 0x0")
|
||||
#elif defined(zig_mips)
|
||||
#define zig_trap() __asm__ volatile(".word 0x3d")
|
||||
#elif defined(zig_or1k)
|
||||
#define zig_trap() __asm__ volatile("l.cust8")
|
||||
#elif defined(zig_riscv)
|
||||
#define zig_trap() __asm__ volatile("unimp")
|
||||
#elif defined(zig_s390x)
|
||||
#define zig_trap() __asm__ volatile("j 0x2")
|
||||
#elif defined(zig_sparc)
|
||||
#define zig_trap() __asm__ volatile("illtrap")
|
||||
#elif defined(zig_x86_16)
|
||||
#define zig_trap() __asm__ volatile("int $0x3")
|
||||
#elif defined(zig_x86)
|
||||
#define zig_trap() __asm__ volatile("ud2")
|
||||
#else
|
||||
|
|
@ -422,6 +423,8 @@
|
|||
#define zig_breakpoint() __asm__ volatile("break 0x0")
|
||||
#elif defined(zig_mips)
|
||||
#define zig_breakpoint() __asm__ volatile("break")
|
||||
#elif defined(zig_or1k)
|
||||
#define zig_breakpoint() __asm__ volatile("l.trap 0x0")
|
||||
#elif defined(zig_powerpc)
|
||||
#define zig_breakpoint() __asm__ volatile("trap")
|
||||
#elif defined(zig_riscv)
|
||||
|
|
@ -804,15 +807,13 @@ static inline bool zig_addo_u32(uint32_t *res, uint32_t lhs, uint32_t rhs, uint8
|
|||
#endif
|
||||
}
|
||||
|
||||
zig_extern int32_t __addosi4(int32_t lhs, int32_t rhs, int *overflow);
|
||||
static inline bool zig_addo_i32(int32_t *res, int32_t lhs, int32_t rhs, uint8_t bits) {
|
||||
#if zig_has_builtin(add_overflow) || defined(zig_gcc)
|
||||
int32_t full_res;
|
||||
bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
|
||||
#else
|
||||
int overflow_int;
|
||||
int32_t full_res = __addosi4(lhs, rhs, &overflow_int);
|
||||
bool overflow = overflow_int != 0;
|
||||
int32_t full_res = (int32_t)((uint32_t)lhs + (uint32_t)rhs);
|
||||
bool overflow = ((full_res ^ lhs) & (full_res ^ rhs)) < 0;
|
||||
#endif
|
||||
*res = zig_wrap_i32(full_res, bits);
|
||||
return overflow || full_res < zig_minInt_i(32, bits) || full_res > zig_maxInt_i(32, bits);
|
||||
|
|
@ -830,15 +831,13 @@ static inline bool zig_addo_u64(uint64_t *res, uint64_t lhs, uint64_t rhs, uint8
|
|||
#endif
|
||||
}
|
||||
|
||||
zig_extern int64_t __addodi4(int64_t lhs, int64_t rhs, int *overflow);
|
||||
static inline bool zig_addo_i64(int64_t *res, int64_t lhs, int64_t rhs, uint8_t bits) {
|
||||
#if zig_has_builtin(add_overflow) || defined(zig_gcc)
|
||||
int64_t full_res;
|
||||
bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
|
||||
#else
|
||||
int overflow_int;
|
||||
int64_t full_res = __addodi4(lhs, rhs, &overflow_int);
|
||||
bool overflow = overflow_int != 0;
|
||||
int64_t full_res = (int64_t)((uint64_t)lhs + (uint64_t)rhs);
|
||||
bool overflow = ((full_res ^ lhs) & (full_res ^ rhs)) < 0;
|
||||
#endif
|
||||
*res = zig_wrap_i64(full_res, bits);
|
||||
return overflow || full_res < zig_minInt_i(64, bits) || full_res > zig_maxInt_i(64, bits);
|
||||
|
|
@ -912,15 +911,13 @@ static inline bool zig_subo_u32(uint32_t *res, uint32_t lhs, uint32_t rhs, uint8
|
|||
#endif
|
||||
}
|
||||
|
||||
zig_extern int32_t __subosi4(int32_t lhs, int32_t rhs, int *overflow);
|
||||
static inline bool zig_subo_i32(int32_t *res, int32_t lhs, int32_t rhs, uint8_t bits) {
|
||||
#if zig_has_builtin(sub_overflow) || defined(zig_gcc)
|
||||
int32_t full_res;
|
||||
bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
|
||||
#else
|
||||
int overflow_int;
|
||||
int32_t full_res = __subosi4(lhs, rhs, &overflow_int);
|
||||
bool overflow = overflow_int != 0;
|
||||
int32_t full_res = (int32_t)((uint32_t)lhs - (uint32_t)rhs);
|
||||
bool overflow = ((lhs ^ rhs) & (full_res ^ lhs)) < 0;
|
||||
#endif
|
||||
*res = zig_wrap_i32(full_res, bits);
|
||||
return overflow || full_res < zig_minInt_i(32, bits) || full_res > zig_maxInt_i(32, bits);
|
||||
|
|
@ -938,15 +935,13 @@ static inline bool zig_subo_u64(uint64_t *res, uint64_t lhs, uint64_t rhs, uint8
|
|||
#endif
|
||||
}
|
||||
|
||||
zig_extern int64_t __subodi4(int64_t lhs, int64_t rhs, int *overflow);
|
||||
static inline bool zig_subo_i64(int64_t *res, int64_t lhs, int64_t rhs, uint8_t bits) {
|
||||
#if zig_has_builtin(sub_overflow) || defined(zig_gcc)
|
||||
int64_t full_res;
|
||||
bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
|
||||
#else
|
||||
int overflow_int;
|
||||
int64_t full_res = __subodi4(lhs, rhs, &overflow_int);
|
||||
bool overflow = overflow_int != 0;
|
||||
int64_t full_res = (int64_t)((uint64_t)lhs - (uint64_t)rhs);
|
||||
bool overflow = ((lhs ^ rhs) & (full_res ^ lhs)) < 0;
|
||||
#endif
|
||||
*res = zig_wrap_i64(full_res, bits);
|
||||
return overflow || full_res < zig_minInt_i(64, bits) || full_res > zig_maxInt_i(64, bits);
|
||||
|
|
@ -1750,15 +1745,13 @@ static inline bool zig_addo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, uint
|
|||
#endif
|
||||
}
|
||||
|
||||
zig_extern zig_i128 __addoti4(zig_i128 lhs, zig_i128 rhs, int *overflow);
|
||||
static inline bool zig_addo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, uint8_t bits) {
|
||||
#if zig_has_builtin(add_overflow)
|
||||
zig_i128 full_res;
|
||||
bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
|
||||
#else
|
||||
int overflow_int;
|
||||
zig_i128 full_res = __addoti4(lhs, rhs, &overflow_int);
|
||||
bool overflow = overflow_int != 0;
|
||||
zig_i128 full_res = (zig_i128)((zig_u128)lhs + (zig_u128)rhs);
|
||||
bool overflow = ((full_res ^ lhs) & (full_res ^ rhs)) < 0;
|
||||
#endif
|
||||
*res = zig_wrap_i128(full_res, bits);
|
||||
return overflow || full_res < zig_minInt_i(128, bits) || full_res > zig_maxInt_i(128, bits);
|
||||
|
|
@ -1776,15 +1769,13 @@ static inline bool zig_subo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, uint
|
|||
#endif
|
||||
}
|
||||
|
||||
zig_extern zig_i128 __suboti4(zig_i128 lhs, zig_i128 rhs, int *overflow);
|
||||
static inline bool zig_subo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, uint8_t bits) {
|
||||
#if zig_has_builtin(sub_overflow)
|
||||
zig_i128 full_res;
|
||||
bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
|
||||
#else
|
||||
int overflow_int;
|
||||
zig_i128 full_res = __suboti4(lhs, rhs, &overflow_int);
|
||||
bool overflow = overflow_int != 0;
|
||||
zig_i128 full_res = (zig_i128)((zig_u128)lhs - (zig_u128)rhs);
|
||||
bool overflow = ((lhs ^ rhs) & (full_res ^ lhs)) < 0;
|
||||
#endif
|
||||
*res = zig_wrap_i128(full_res, bits);
|
||||
return overflow || full_res < zig_minInt_i(128, bits) || full_res > zig_maxInt_i(128, bits);
|
||||
|
|
@ -4213,7 +4204,7 @@ static inline void zig_loongarch_cpucfg(uint32_t word, uint32_t* result) {
|
|||
#endif
|
||||
}
|
||||
|
||||
#elif defined(zig_x86)
|
||||
#elif defined(zig_x86) && !defined(zig_x86_16)
|
||||
|
||||
static inline void zig_x86_cpuid(uint32_t leaf_id, uint32_t subid, uint32_t* eax, uint32_t* ebx, uint32_t* ecx, uint32_t* edx) {
|
||||
#if defined(zig_msvc)
|
||||
|
|
|
|||
|
|
@ -218,10 +218,13 @@ test "union with specified enum tag" {
|
|||
}
|
||||
|
||||
test "packed union generates correctly aligned type" {
|
||||
// This test will be removed after the following accepted proposal is implemented:
|
||||
// https://github.com/ziglang/zig/issues/24657
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
|
||||
|
||||
const U = packed union {
|
||||
f1: *const fn () error{TestUnexpectedResult}!void,
|
||||
|
|
@ -1544,7 +1547,7 @@ test "packed union field pointer has correct alignment" {
|
|||
|
||||
const host_size = switch (builtin.zig_backend) {
|
||||
else => comptime std.math.divCeil(comptime_int, @bitSizeOf(S), 8) catch unreachable,
|
||||
.stage2_x86_64 => @sizeOf(S),
|
||||
.stage2_x86_64, .stage2_c => @sizeOf(S),
|
||||
};
|
||||
comptime assert(@TypeOf(ap) == *align(4:2:host_size) u20);
|
||||
comptime assert(@TypeOf(bp) == *align(1:2:host_size) u20);
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue