mirror of
https://codeberg.org/ziglang/zig.git
synced 2025-12-06 13:54:21 +00:00
Sema: Improve comptime arithmetic undef handling
This commit expands on the foundations laid by https://github.com/ziglang/zig/pull/23177 and moves even more `Sema`-only functionality from `Value` to `Sema.arith`. Specifically all shift and bitwise operations, `@truncate`, `@bitReverse` and `@byteSwap` have been moved and adapted to the new rules around `undefined`. Especially the comptime shift operations have been basically rewritten, fixing many open issues in the process. New rules applied to operators: * `<<`, `@shlExact`, `@shlWithOverflow`, `>>`, `@shrExact`: compile error if any operand is undef * `<<|`, `~`, `^`, `@truncate`, `@bitReverse`, `@byteSwap`: return undef if any operand is undef * `&`, `|`: Return undef if both operands are undef, turn undef into actual `0xAA` bytes otherwise Additionally this commit canonicalizes the representation of aggregates with all-undefined members in the `InternPool` by disallowing them and enforcing the usage of a single typed `undef` value instead. This reduces the amount of edge cases and fixes a bunch of bugs related to partially undefined vecs. List of operations directly affected by this patch: * `<<`, `<<|`, `@shlExact`, `@shlWithOverflow` * `>>`, `@shrExact` * `&`, `|`, `~`, `^` and their atomic rmw + reduce pendants * `@truncate`, `@bitReverse`, `@byteSwap`
This commit is contained in:
parent
749f10af49
commit
d0586da18e
22 changed files with 5446 additions and 4759 deletions
|
|
@ -6077,7 +6077,7 @@ fn cmpxchgWeakButNotAtomic(comptime T: type, ptr: *T, expected_value: T, new_val
|
||||||
{#header_close#}
|
{#header_close#}
|
||||||
{#header_open|Exact Left Shift Overflow#}
|
{#header_open|Exact Left Shift Overflow#}
|
||||||
<p>At compile-time:</p>
|
<p>At compile-time:</p>
|
||||||
{#code|test_comptime_shlExact_overwlow.zig#}
|
{#code|test_comptime_shlExact_overflow.zig#}
|
||||||
|
|
||||||
<p>At runtime:</p>
|
<p>At runtime:</p>
|
||||||
{#code|runtime_shlExact_overflow.zig#}
|
{#code|runtime_shlExact_overflow.zig#}
|
||||||
|
|
|
||||||
|
|
@ -3,4 +3,4 @@ comptime {
|
||||||
_ = x;
|
_ = x;
|
||||||
}
|
}
|
||||||
|
|
||||||
// test_error=operation caused overflow
|
// test_error=overflow of integer type 'u8' with value '340'
|
||||||
|
|
@ -8401,24 +8401,33 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All
|
||||||
assert(sentinel == .none or elem == sentinel);
|
assert(sentinel == .none or elem == sentinel);
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
switch (ty_key) {
|
if (aggregate.storage.values().len > 0) switch (ty_key) {
|
||||||
.array_type, .vector_type => {
|
.array_type, .vector_type => {
|
||||||
|
var any_defined = false;
|
||||||
for (aggregate.storage.values()) |elem| {
|
for (aggregate.storage.values()) |elem| {
|
||||||
|
if (!ip.isUndef(elem)) any_defined = true;
|
||||||
assert(ip.typeOf(elem) == child);
|
assert(ip.typeOf(elem) == child);
|
||||||
}
|
}
|
||||||
|
assert(any_defined);
|
||||||
},
|
},
|
||||||
.struct_type => {
|
.struct_type => {
|
||||||
|
var any_defined = false;
|
||||||
for (aggregate.storage.values(), ip.loadStructType(aggregate.ty).field_types.get(ip)) |elem, field_ty| {
|
for (aggregate.storage.values(), ip.loadStructType(aggregate.ty).field_types.get(ip)) |elem, field_ty| {
|
||||||
|
if (!ip.isUndef(elem)) any_defined = true;
|
||||||
assert(ip.typeOf(elem) == field_ty);
|
assert(ip.typeOf(elem) == field_ty);
|
||||||
}
|
}
|
||||||
|
assert(any_defined);
|
||||||
},
|
},
|
||||||
.tuple_type => |tuple_type| {
|
.tuple_type => |tuple_type| {
|
||||||
|
var any_defined = false;
|
||||||
for (aggregate.storage.values(), tuple_type.types.get(ip)) |elem, ty| {
|
for (aggregate.storage.values(), tuple_type.types.get(ip)) |elem, ty| {
|
||||||
|
if (!ip.isUndef(elem)) any_defined = true;
|
||||||
assert(ip.typeOf(elem) == ty);
|
assert(ip.typeOf(elem) == ty);
|
||||||
}
|
}
|
||||||
|
assert(any_defined);
|
||||||
},
|
},
|
||||||
else => unreachable,
|
else => unreachable,
|
||||||
}
|
};
|
||||||
|
|
||||||
if (len == 0) {
|
if (len == 0) {
|
||||||
items.appendAssumeCapacity(.{
|
items.appendAssumeCapacity(.{
|
||||||
|
|
|
||||||
816
src/Sema.zig
816
src/Sema.zig
File diff suppressed because it is too large
Load diff
1016
src/Sema/arith.zig
1016
src/Sema/arith.zig
File diff suppressed because it is too large
Load diff
|
|
@ -491,10 +491,7 @@ const PackValueBits = struct {
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
return pt.aggregateValue(ty, elems);
|
||||||
.ty = ty.toIntern(),
|
|
||||||
.storage = .{ .elems = elems },
|
|
||||||
} }));
|
|
||||||
},
|
},
|
||||||
.array => {
|
.array => {
|
||||||
// Each element is padded up to its ABI size. The final element does not have trailing padding.
|
// Each element is padded up to its ABI size. The final element does not have trailing padding.
|
||||||
|
|
@ -525,10 +522,7 @@ const PackValueBits = struct {
|
||||||
try pack.padding(elem_ty.bitSize(zcu));
|
try pack.padding(elem_ty.bitSize(zcu));
|
||||||
}
|
}
|
||||||
|
|
||||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
return pt.aggregateValue(ty, elems);
|
||||||
.ty = ty.toIntern(),
|
|
||||||
.storage = .{ .elems = elems },
|
|
||||||
} }));
|
|
||||||
},
|
},
|
||||||
.@"struct" => switch (ty.containerLayout(zcu)) {
|
.@"struct" => switch (ty.containerLayout(zcu)) {
|
||||||
.auto => unreachable, // ill-defined layout
|
.auto => unreachable, // ill-defined layout
|
||||||
|
|
@ -568,10 +562,7 @@ const PackValueBits = struct {
|
||||||
const val = (try ty.structFieldValueComptime(pt, field_idx)).?;
|
const val = (try ty.structFieldValueComptime(pt, field_idx)).?;
|
||||||
elem.* = val.toIntern();
|
elem.* = val.toIntern();
|
||||||
}
|
}
|
||||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
return pt.aggregateValue(ty, elems);
|
||||||
.ty = ty.toIntern(),
|
|
||||||
.storage = .{ .elems = elems },
|
|
||||||
} }));
|
|
||||||
},
|
},
|
||||||
.@"packed" => {
|
.@"packed" => {
|
||||||
// All fields are in order with no padding.
|
// All fields are in order with no padding.
|
||||||
|
|
@ -581,10 +572,7 @@ const PackValueBits = struct {
|
||||||
const field_ty = ty.fieldType(i, zcu);
|
const field_ty = ty.fieldType(i, zcu);
|
||||||
elem.* = (try pack.get(field_ty)).toIntern();
|
elem.* = (try pack.get(field_ty)).toIntern();
|
||||||
}
|
}
|
||||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
return pt.aggregateValue(ty, elems);
|
||||||
.ty = ty.toIntern(),
|
|
||||||
.storage = .{ .elems = elems },
|
|
||||||
} }));
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
.@"union" => {
|
.@"union" => {
|
||||||
|
|
|
||||||
720
src/Value.zig
720
src/Value.zig
|
|
@ -653,10 +653,7 @@ pub fn readFromMemory(
|
||||||
elem.* = (try readFromMemory(elem_ty, zcu, buffer[offset..], arena)).toIntern();
|
elem.* = (try readFromMemory(elem_ty, zcu, buffer[offset..], arena)).toIntern();
|
||||||
offset += @intCast(elem_size);
|
offset += @intCast(elem_size);
|
||||||
}
|
}
|
||||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
return pt.aggregateValue(ty, elems);
|
||||||
.ty = ty.toIntern(),
|
|
||||||
.storage = .{ .elems = elems },
|
|
||||||
} }));
|
|
||||||
},
|
},
|
||||||
.vector => {
|
.vector => {
|
||||||
// We use byte_count instead of abi_size here, so that any padding bytes
|
// We use byte_count instead of abi_size here, so that any padding bytes
|
||||||
|
|
@ -677,10 +674,7 @@ pub fn readFromMemory(
|
||||||
const sz: usize = @intCast(field_ty.abiSize(zcu));
|
const sz: usize = @intCast(field_ty.abiSize(zcu));
|
||||||
field_val.* = (try readFromMemory(field_ty, zcu, buffer[off..(off + sz)], arena)).toIntern();
|
field_val.* = (try readFromMemory(field_ty, zcu, buffer[off..(off + sz)], arena)).toIntern();
|
||||||
}
|
}
|
||||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
return pt.aggregateValue(ty, field_vals);
|
||||||
.ty = ty.toIntern(),
|
|
||||||
.storage = .{ .elems = field_vals },
|
|
||||||
} }));
|
|
||||||
},
|
},
|
||||||
.@"packed" => {
|
.@"packed" => {
|
||||||
const byte_count = (@as(usize, @intCast(ty.bitSize(zcu))) + 7) / 8;
|
const byte_count = (@as(usize, @intCast(ty.bitSize(zcu))) + 7) / 8;
|
||||||
|
|
@ -826,10 +820,7 @@ pub fn readFromPackedMemory(
|
||||||
elems[tgt_elem_i] = (try readFromPackedMemory(elem_ty, pt, buffer, bit_offset + bits, arena)).toIntern();
|
elems[tgt_elem_i] = (try readFromPackedMemory(elem_ty, pt, buffer, bit_offset + bits, arena)).toIntern();
|
||||||
bits += elem_bit_size;
|
bits += elem_bit_size;
|
||||||
}
|
}
|
||||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
return pt.aggregateValue(ty, elems);
|
||||||
.ty = ty.toIntern(),
|
|
||||||
.storage = .{ .elems = elems },
|
|
||||||
} }));
|
|
||||||
},
|
},
|
||||||
.@"struct" => {
|
.@"struct" => {
|
||||||
// Sema is supposed to have emitted a compile error already for Auto layout structs,
|
// Sema is supposed to have emitted a compile error already for Auto layout structs,
|
||||||
|
|
@ -843,10 +834,7 @@ pub fn readFromPackedMemory(
|
||||||
field_val.* = (try readFromPackedMemory(field_ty, pt, buffer, bit_offset + bits, arena)).toIntern();
|
field_val.* = (try readFromPackedMemory(field_ty, pt, buffer, bit_offset + bits, arena)).toIntern();
|
||||||
bits += field_bits;
|
bits += field_bits;
|
||||||
}
|
}
|
||||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
return pt.aggregateValue(ty, field_vals);
|
||||||
.ty = ty.toIntern(),
|
|
||||||
.storage = .{ .elems = field_vals },
|
|
||||||
} }));
|
|
||||||
},
|
},
|
||||||
.@"union" => switch (ty.containerLayout(zcu)) {
|
.@"union" => switch (ty.containerLayout(zcu)) {
|
||||||
.auto, .@"extern" => unreachable, // Handled by non-packed readFromMemory
|
.auto, .@"extern" => unreachable, // Handled by non-packed readFromMemory
|
||||||
|
|
@ -925,43 +913,6 @@ pub fn popCount(val: Value, ty: Type, zcu: *Zcu) u64 {
|
||||||
return @intCast(bigint.popCount(ty.intInfo(zcu).bits));
|
return @intCast(bigint.popCount(ty.intInfo(zcu).bits));
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bitReverse(val: Value, ty: Type, pt: Zcu.PerThread, arena: Allocator) !Value {
|
|
||||||
const zcu = pt.zcu;
|
|
||||||
const info = ty.intInfo(zcu);
|
|
||||||
|
|
||||||
var buffer: Value.BigIntSpace = undefined;
|
|
||||||
const operand_bigint = val.toBigInt(&buffer, zcu);
|
|
||||||
|
|
||||||
const limbs = try arena.alloc(
|
|
||||||
std.math.big.Limb,
|
|
||||||
std.math.big.int.calcTwosCompLimbCount(info.bits),
|
|
||||||
);
|
|
||||||
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
|
|
||||||
result_bigint.bitReverse(operand_bigint, info.signedness, info.bits);
|
|
||||||
|
|
||||||
return pt.intValue_big(ty, result_bigint.toConst());
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn byteSwap(val: Value, ty: Type, pt: Zcu.PerThread, arena: Allocator) !Value {
|
|
||||||
const zcu = pt.zcu;
|
|
||||||
const info = ty.intInfo(zcu);
|
|
||||||
|
|
||||||
// Bit count must be evenly divisible by 8
|
|
||||||
assert(info.bits % 8 == 0);
|
|
||||||
|
|
||||||
var buffer: Value.BigIntSpace = undefined;
|
|
||||||
const operand_bigint = val.toBigInt(&buffer, zcu);
|
|
||||||
|
|
||||||
const limbs = try arena.alloc(
|
|
||||||
std.math.big.Limb,
|
|
||||||
std.math.big.int.calcTwosCompLimbCount(info.bits),
|
|
||||||
);
|
|
||||||
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
|
|
||||||
result_bigint.byteSwap(operand_bigint, info.signedness, info.bits / 8);
|
|
||||||
|
|
||||||
return pt.intValue_big(ty, result_bigint.toConst());
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Asserts the value is an integer and not undefined.
|
/// Asserts the value is an integer and not undefined.
|
||||||
/// Returns the number of bits the value requires to represent stored in twos complement form.
|
/// Returns the number of bits the value requires to represent stored in twos complement form.
|
||||||
pub fn intBitCountTwosComp(self: Value, zcu: *Zcu) usize {
|
pub fn intBitCountTwosComp(self: Value, zcu: *Zcu) usize {
|
||||||
|
|
@ -1386,15 +1337,10 @@ pub fn isUndef(val: Value, zcu: *const Zcu) bool {
|
||||||
return zcu.intern_pool.isUndef(val.toIntern());
|
return zcu.intern_pool.isUndef(val.toIntern());
|
||||||
}
|
}
|
||||||
|
|
||||||
/// TODO: check for cases such as array that is not marked undef but all the element
|
|
||||||
/// values are marked undef, or struct that is not marked undef but all fields are marked
|
|
||||||
/// undef, etc.
|
|
||||||
pub fn isUndefDeep(val: Value, zcu: *const Zcu) bool {
|
|
||||||
return val.isUndef(zcu);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// `val` must have a numeric or vector type.
|
/// `val` must have a numeric or vector type.
|
||||||
/// Returns whether `val` is undefined or contains any undefined elements.
|
/// Returns whether `val` is undefined or contains any undefined elements.
|
||||||
|
/// Returns the index of the first undefined element it encounters
|
||||||
|
/// or `null` if no element is undefined.
|
||||||
pub fn anyScalarIsUndef(val: Value, zcu: *const Zcu) bool {
|
pub fn anyScalarIsUndef(val: Value, zcu: *const Zcu) bool {
|
||||||
switch (zcu.intern_pool.indexToKey(val.toIntern())) {
|
switch (zcu.intern_pool.indexToKey(val.toIntern())) {
|
||||||
.undef => return true,
|
.undef => return true,
|
||||||
|
|
@ -1530,10 +1476,7 @@ pub fn floatFromIntAdvanced(
|
||||||
const elem_val = try val.elemValue(pt, i);
|
const elem_val = try val.elemValue(pt, i);
|
||||||
scalar.* = (try floatFromIntScalar(elem_val, scalar_ty, pt, strat)).toIntern();
|
scalar.* = (try floatFromIntScalar(elem_val, scalar_ty, pt, strat)).toIntern();
|
||||||
}
|
}
|
||||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
return pt.aggregateValue(float_ty, result_data);
|
||||||
.ty = float_ty.toIntern(),
|
|
||||||
.storage = .{ .elems = result_data },
|
|
||||||
} }));
|
|
||||||
}
|
}
|
||||||
return floatFromIntScalar(val, float_ty, pt, strat);
|
return floatFromIntScalar(val, float_ty, pt, strat);
|
||||||
}
|
}
|
||||||
|
|
@ -1605,273 +1548,6 @@ pub fn numberMin(lhs: Value, rhs: Value, zcu: *Zcu) Value {
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
/// operands must be (vectors of) integers or bools; handles undefined scalars.
|
|
||||||
pub fn bitwiseNot(val: Value, ty: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
|
|
||||||
const zcu = pt.zcu;
|
|
||||||
if (ty.zigTypeTag(zcu) == .vector) {
|
|
||||||
const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(zcu));
|
|
||||||
const scalar_ty = ty.scalarType(zcu);
|
|
||||||
for (result_data, 0..) |*scalar, i| {
|
|
||||||
const elem_val = try val.elemValue(pt, i);
|
|
||||||
scalar.* = (try bitwiseNotScalar(elem_val, scalar_ty, arena, pt)).toIntern();
|
|
||||||
}
|
|
||||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
|
||||||
.ty = ty.toIntern(),
|
|
||||||
.storage = .{ .elems = result_data },
|
|
||||||
} }));
|
|
||||||
}
|
|
||||||
return bitwiseNotScalar(val, ty, arena, pt);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// operands must be integers or bools; handles undefined.
|
|
||||||
pub fn bitwiseNotScalar(val: Value, ty: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
|
|
||||||
const zcu = pt.zcu;
|
|
||||||
if (val.isUndef(zcu)) return Value.fromInterned(try pt.intern(.{ .undef = ty.toIntern() }));
|
|
||||||
if (ty.toIntern() == .bool_type) return makeBool(!val.toBool());
|
|
||||||
|
|
||||||
const info = ty.intInfo(zcu);
|
|
||||||
|
|
||||||
if (info.bits == 0) {
|
|
||||||
return val;
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO is this a performance issue? maybe we should try the operation without
|
|
||||||
// resorting to BigInt first.
|
|
||||||
var val_space: Value.BigIntSpace = undefined;
|
|
||||||
const val_bigint = val.toBigInt(&val_space, zcu);
|
|
||||||
const limbs = try arena.alloc(
|
|
||||||
std.math.big.Limb,
|
|
||||||
std.math.big.int.calcTwosCompLimbCount(info.bits),
|
|
||||||
);
|
|
||||||
|
|
||||||
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
|
|
||||||
result_bigint.bitNotWrap(val_bigint, info.signedness, info.bits);
|
|
||||||
return pt.intValue_big(ty, result_bigint.toConst());
|
|
||||||
}
|
|
||||||
|
|
||||||
/// operands must be (vectors of) integers or bools; handles undefined scalars.
|
|
||||||
pub fn bitwiseAnd(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value {
|
|
||||||
const zcu = pt.zcu;
|
|
||||||
if (ty.zigTypeTag(zcu) == .vector) {
|
|
||||||
const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(zcu));
|
|
||||||
const scalar_ty = ty.scalarType(zcu);
|
|
||||||
for (result_data, 0..) |*scalar, i| {
|
|
||||||
const lhs_elem = try lhs.elemValue(pt, i);
|
|
||||||
const rhs_elem = try rhs.elemValue(pt, i);
|
|
||||||
scalar.* = (try bitwiseAndScalar(lhs_elem, rhs_elem, scalar_ty, allocator, pt)).toIntern();
|
|
||||||
}
|
|
||||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
|
||||||
.ty = ty.toIntern(),
|
|
||||||
.storage = .{ .elems = result_data },
|
|
||||||
} }));
|
|
||||||
}
|
|
||||||
return bitwiseAndScalar(lhs, rhs, ty, allocator, pt);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// operands must be integers or bools; handles undefined.
|
|
||||||
pub fn bitwiseAndScalar(orig_lhs: Value, orig_rhs: Value, ty: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
|
|
||||||
const zcu = pt.zcu;
|
|
||||||
// If one operand is defined, we turn the other into `0xAA` so the bitwise AND can
|
|
||||||
// still zero out some bits.
|
|
||||||
// TODO: ideally we'd still like tracking for the undef bits. Related: #19634.
|
|
||||||
const lhs: Value, const rhs: Value = make_defined: {
|
|
||||||
const lhs_undef = orig_lhs.isUndef(zcu);
|
|
||||||
const rhs_undef = orig_rhs.isUndef(zcu);
|
|
||||||
break :make_defined switch ((@as(u2, @intFromBool(lhs_undef)) << 1) | @intFromBool(rhs_undef)) {
|
|
||||||
0b00 => .{ orig_lhs, orig_rhs },
|
|
||||||
0b01 => .{ orig_lhs, try intValueAa(ty, arena, pt) },
|
|
||||||
0b10 => .{ try intValueAa(ty, arena, pt), orig_rhs },
|
|
||||||
0b11 => return pt.undefValue(ty),
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
if (ty.toIntern() == .bool_type) return makeBool(lhs.toBool() and rhs.toBool());
|
|
||||||
|
|
||||||
// TODO is this a performance issue? maybe we should try the operation without
|
|
||||||
// resorting to BigInt first.
|
|
||||||
var lhs_space: Value.BigIntSpace = undefined;
|
|
||||||
var rhs_space: Value.BigIntSpace = undefined;
|
|
||||||
const lhs_bigint = lhs.toBigInt(&lhs_space, zcu);
|
|
||||||
const rhs_bigint = rhs.toBigInt(&rhs_space, zcu);
|
|
||||||
const limbs = try arena.alloc(
|
|
||||||
std.math.big.Limb,
|
|
||||||
// + 1 for negatives
|
|
||||||
@max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1,
|
|
||||||
);
|
|
||||||
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
|
|
||||||
result_bigint.bitAnd(lhs_bigint, rhs_bigint);
|
|
||||||
return pt.intValue_big(ty, result_bigint.toConst());
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Given an integer or boolean type, creates an value of that with the bit pattern 0xAA.
|
|
||||||
/// This is used to convert undef values into 0xAA when performing e.g. bitwise operations.
|
|
||||||
fn intValueAa(ty: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
|
|
||||||
const zcu = pt.zcu;
|
|
||||||
if (ty.toIntern() == .bool_type) return Value.true;
|
|
||||||
const info = ty.intInfo(zcu);
|
|
||||||
|
|
||||||
const buf = try arena.alloc(u8, (info.bits + 7) / 8);
|
|
||||||
@memset(buf, 0xAA);
|
|
||||||
|
|
||||||
const limbs = try arena.alloc(
|
|
||||||
std.math.big.Limb,
|
|
||||||
std.math.big.int.calcTwosCompLimbCount(info.bits),
|
|
||||||
);
|
|
||||||
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
|
|
||||||
result_bigint.readTwosComplement(buf, info.bits, zcu.getTarget().cpu.arch.endian(), info.signedness);
|
|
||||||
return pt.intValue_big(ty, result_bigint.toConst());
|
|
||||||
}
|
|
||||||
|
|
||||||
/// operands must be (vectors of) integers or bools; handles undefined scalars.
|
|
||||||
pub fn bitwiseNand(lhs: Value, rhs: Value, ty: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
|
|
||||||
const zcu = pt.zcu;
|
|
||||||
if (ty.zigTypeTag(zcu) == .vector) {
|
|
||||||
const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(zcu));
|
|
||||||
const scalar_ty = ty.scalarType(zcu);
|
|
||||||
for (result_data, 0..) |*scalar, i| {
|
|
||||||
const lhs_elem = try lhs.elemValue(pt, i);
|
|
||||||
const rhs_elem = try rhs.elemValue(pt, i);
|
|
||||||
scalar.* = (try bitwiseNandScalar(lhs_elem, rhs_elem, scalar_ty, arena, pt)).toIntern();
|
|
||||||
}
|
|
||||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
|
||||||
.ty = ty.toIntern(),
|
|
||||||
.storage = .{ .elems = result_data },
|
|
||||||
} }));
|
|
||||||
}
|
|
||||||
return bitwiseNandScalar(lhs, rhs, ty, arena, pt);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// operands must be integers or bools; handles undefined.
|
|
||||||
pub fn bitwiseNandScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
|
|
||||||
const zcu = pt.zcu;
|
|
||||||
if (lhs.isUndef(zcu) or rhs.isUndef(zcu)) return Value.fromInterned(try pt.intern(.{ .undef = ty.toIntern() }));
|
|
||||||
if (ty.toIntern() == .bool_type) return makeBool(!(lhs.toBool() and rhs.toBool()));
|
|
||||||
|
|
||||||
const anded = try bitwiseAnd(lhs, rhs, ty, arena, pt);
|
|
||||||
const all_ones = if (ty.isSignedInt(zcu)) try pt.intValue(ty, -1) else try ty.maxIntScalar(pt, ty);
|
|
||||||
return bitwiseXor(anded, all_ones, ty, arena, pt);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// operands must be (vectors of) integers or bools; handles undefined scalars.
|
|
||||||
pub fn bitwiseOr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value {
|
|
||||||
const zcu = pt.zcu;
|
|
||||||
if (ty.zigTypeTag(zcu) == .vector) {
|
|
||||||
const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(zcu));
|
|
||||||
const scalar_ty = ty.scalarType(zcu);
|
|
||||||
for (result_data, 0..) |*scalar, i| {
|
|
||||||
const lhs_elem = try lhs.elemValue(pt, i);
|
|
||||||
const rhs_elem = try rhs.elemValue(pt, i);
|
|
||||||
scalar.* = (try bitwiseOrScalar(lhs_elem, rhs_elem, scalar_ty, allocator, pt)).toIntern();
|
|
||||||
}
|
|
||||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
|
||||||
.ty = ty.toIntern(),
|
|
||||||
.storage = .{ .elems = result_data },
|
|
||||||
} }));
|
|
||||||
}
|
|
||||||
return bitwiseOrScalar(lhs, rhs, ty, allocator, pt);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// operands must be integers or bools; handles undefined.
|
|
||||||
pub fn bitwiseOrScalar(orig_lhs: Value, orig_rhs: Value, ty: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
|
|
||||||
// If one operand is defined, we turn the other into `0xAA` so the bitwise AND can
|
|
||||||
// still zero out some bits.
|
|
||||||
// TODO: ideally we'd still like tracking for the undef bits. Related: #19634.
|
|
||||||
const zcu = pt.zcu;
|
|
||||||
const lhs: Value, const rhs: Value = make_defined: {
|
|
||||||
const lhs_undef = orig_lhs.isUndef(zcu);
|
|
||||||
const rhs_undef = orig_rhs.isUndef(zcu);
|
|
||||||
break :make_defined switch ((@as(u2, @intFromBool(lhs_undef)) << 1) | @intFromBool(rhs_undef)) {
|
|
||||||
0b00 => .{ orig_lhs, orig_rhs },
|
|
||||||
0b01 => .{ orig_lhs, try intValueAa(ty, arena, pt) },
|
|
||||||
0b10 => .{ try intValueAa(ty, arena, pt), orig_rhs },
|
|
||||||
0b11 => return pt.undefValue(ty),
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
if (ty.toIntern() == .bool_type) return makeBool(lhs.toBool() or rhs.toBool());
|
|
||||||
|
|
||||||
// TODO is this a performance issue? maybe we should try the operation without
|
|
||||||
// resorting to BigInt first.
|
|
||||||
var lhs_space: Value.BigIntSpace = undefined;
|
|
||||||
var rhs_space: Value.BigIntSpace = undefined;
|
|
||||||
const lhs_bigint = lhs.toBigInt(&lhs_space, zcu);
|
|
||||||
const rhs_bigint = rhs.toBigInt(&rhs_space, zcu);
|
|
||||||
const limbs = try arena.alloc(
|
|
||||||
std.math.big.Limb,
|
|
||||||
@max(lhs_bigint.limbs.len, rhs_bigint.limbs.len),
|
|
||||||
);
|
|
||||||
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
|
|
||||||
result_bigint.bitOr(lhs_bigint, rhs_bigint);
|
|
||||||
return pt.intValue_big(ty, result_bigint.toConst());
|
|
||||||
}
|
|
||||||
|
|
||||||
/// operands must be (vectors of) integers or bools; handles undefined scalars.
|
|
||||||
pub fn bitwiseXor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value {
|
|
||||||
const zcu = pt.zcu;
|
|
||||||
if (ty.zigTypeTag(zcu) == .vector) {
|
|
||||||
const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(zcu));
|
|
||||||
const scalar_ty = ty.scalarType(zcu);
|
|
||||||
for (result_data, 0..) |*scalar, i| {
|
|
||||||
const lhs_elem = try lhs.elemValue(pt, i);
|
|
||||||
const rhs_elem = try rhs.elemValue(pt, i);
|
|
||||||
scalar.* = (try bitwiseXorScalar(lhs_elem, rhs_elem, scalar_ty, allocator, pt)).toIntern();
|
|
||||||
}
|
|
||||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
|
||||||
.ty = ty.toIntern(),
|
|
||||||
.storage = .{ .elems = result_data },
|
|
||||||
} }));
|
|
||||||
}
|
|
||||||
return bitwiseXorScalar(lhs, rhs, ty, allocator, pt);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// operands must be integers or bools; handles undefined.
|
|
||||||
pub fn bitwiseXorScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
|
|
||||||
const zcu = pt.zcu;
|
|
||||||
if (lhs.isUndef(zcu) or rhs.isUndef(zcu)) return Value.fromInterned(try pt.intern(.{ .undef = ty.toIntern() }));
|
|
||||||
if (ty.toIntern() == .bool_type) return makeBool(lhs.toBool() != rhs.toBool());
|
|
||||||
|
|
||||||
// TODO is this a performance issue? maybe we should try the operation without
|
|
||||||
// resorting to BigInt first.
|
|
||||||
var lhs_space: Value.BigIntSpace = undefined;
|
|
||||||
var rhs_space: Value.BigIntSpace = undefined;
|
|
||||||
const lhs_bigint = lhs.toBigInt(&lhs_space, zcu);
|
|
||||||
const rhs_bigint = rhs.toBigInt(&rhs_space, zcu);
|
|
||||||
const limbs = try arena.alloc(
|
|
||||||
std.math.big.Limb,
|
|
||||||
// + 1 for negatives
|
|
||||||
@max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1,
|
|
||||||
);
|
|
||||||
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
|
|
||||||
result_bigint.bitXor(lhs_bigint, rhs_bigint);
|
|
||||||
return pt.intValue_big(ty, result_bigint.toConst());
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn intModScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value {
|
|
||||||
// TODO is this a performance issue? maybe we should try the operation without
|
|
||||||
// resorting to BigInt first.
|
|
||||||
const zcu = pt.zcu;
|
|
||||||
var lhs_space: Value.BigIntSpace = undefined;
|
|
||||||
var rhs_space: Value.BigIntSpace = undefined;
|
|
||||||
const lhs_bigint = lhs.toBigInt(&lhs_space, zcu);
|
|
||||||
const rhs_bigint = rhs.toBigInt(&rhs_space, zcu);
|
|
||||||
const limbs_q = try allocator.alloc(
|
|
||||||
std.math.big.Limb,
|
|
||||||
lhs_bigint.limbs.len,
|
|
||||||
);
|
|
||||||
const limbs_r = try allocator.alloc(
|
|
||||||
std.math.big.Limb,
|
|
||||||
rhs_bigint.limbs.len,
|
|
||||||
);
|
|
||||||
const limbs_buffer = try allocator.alloc(
|
|
||||||
std.math.big.Limb,
|
|
||||||
std.math.big.int.calcDivLimbsBufferLen(lhs_bigint.limbs.len, rhs_bigint.limbs.len),
|
|
||||||
);
|
|
||||||
var result_q = BigIntMutable{ .limbs = limbs_q, .positive = undefined, .len = undefined };
|
|
||||||
var result_r = BigIntMutable{ .limbs = limbs_r, .positive = undefined, .len = undefined };
|
|
||||||
result_q.divFloor(&result_r, lhs_bigint, rhs_bigint, limbs_buffer);
|
|
||||||
return pt.intValue_big(ty, result_r.toConst());
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns true if the value is a floating point type and is NaN. Returns false otherwise.
|
/// Returns true if the value is a floating point type and is NaN. Returns false otherwise.
|
||||||
pub fn isNan(val: Value, zcu: *const Zcu) bool {
|
pub fn isNan(val: Value, zcu: *const Zcu) bool {
|
||||||
return switch (zcu.intern_pool.indexToKey(val.toIntern())) {
|
return switch (zcu.intern_pool.indexToKey(val.toIntern())) {
|
||||||
|
|
@ -1892,6 +1568,7 @@ pub fn isInf(val: Value, zcu: *const Zcu) bool {
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns true if the value is a floating point type and is negative infinite. Returns false otherwise.
|
||||||
pub fn isNegativeInf(val: Value, zcu: *const Zcu) bool {
|
pub fn isNegativeInf(val: Value, zcu: *const Zcu) bool {
|
||||||
return switch (zcu.intern_pool.indexToKey(val.toIntern())) {
|
return switch (zcu.intern_pool.indexToKey(val.toIntern())) {
|
||||||
.float => |float| switch (float.storage) {
|
.float => |float| switch (float.storage) {
|
||||||
|
|
@ -1901,387 +1578,6 @@ pub fn isNegativeInf(val: Value, zcu: *const Zcu) bool {
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn floatRem(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
|
|
||||||
if (float_type.zigTypeTag(pt.zcu) == .vector) {
|
|
||||||
const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(pt.zcu));
|
|
||||||
const scalar_ty = float_type.scalarType(pt.zcu);
|
|
||||||
for (result_data, 0..) |*scalar, i| {
|
|
||||||
const lhs_elem = try lhs.elemValue(pt, i);
|
|
||||||
const rhs_elem = try rhs.elemValue(pt, i);
|
|
||||||
scalar.* = (try floatRemScalar(lhs_elem, rhs_elem, scalar_ty, pt)).toIntern();
|
|
||||||
}
|
|
||||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
|
||||||
.ty = float_type.toIntern(),
|
|
||||||
.storage = .{ .elems = result_data },
|
|
||||||
} }));
|
|
||||||
}
|
|
||||||
return floatRemScalar(lhs, rhs, float_type, pt);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn floatRemScalar(lhs: Value, rhs: Value, float_type: Type, pt: Zcu.PerThread) !Value {
|
|
||||||
const zcu = pt.zcu;
|
|
||||||
const target = pt.zcu.getTarget();
|
|
||||||
const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
|
|
||||||
16 => .{ .f16 = @rem(lhs.toFloat(f16, zcu), rhs.toFloat(f16, zcu)) },
|
|
||||||
32 => .{ .f32 = @rem(lhs.toFloat(f32, zcu), rhs.toFloat(f32, zcu)) },
|
|
||||||
64 => .{ .f64 = @rem(lhs.toFloat(f64, zcu), rhs.toFloat(f64, zcu)) },
|
|
||||||
80 => .{ .f80 = @rem(lhs.toFloat(f80, zcu), rhs.toFloat(f80, zcu)) },
|
|
||||||
128 => .{ .f128 = @rem(lhs.toFloat(f128, zcu), rhs.toFloat(f128, zcu)) },
|
|
||||||
else => unreachable,
|
|
||||||
};
|
|
||||||
return Value.fromInterned(try pt.intern(.{ .float = .{
|
|
||||||
.ty = float_type.toIntern(),
|
|
||||||
.storage = storage,
|
|
||||||
} }));
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn floatMod(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
|
|
||||||
if (float_type.zigTypeTag(pt.zcu) == .vector) {
|
|
||||||
const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(pt.zcu));
|
|
||||||
const scalar_ty = float_type.scalarType(pt.zcu);
|
|
||||||
for (result_data, 0..) |*scalar, i| {
|
|
||||||
const lhs_elem = try lhs.elemValue(pt, i);
|
|
||||||
const rhs_elem = try rhs.elemValue(pt, i);
|
|
||||||
scalar.* = (try floatModScalar(lhs_elem, rhs_elem, scalar_ty, pt)).toIntern();
|
|
||||||
}
|
|
||||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
|
||||||
.ty = float_type.toIntern(),
|
|
||||||
.storage = .{ .elems = result_data },
|
|
||||||
} }));
|
|
||||||
}
|
|
||||||
return floatModScalar(lhs, rhs, float_type, pt);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn floatModScalar(lhs: Value, rhs: Value, float_type: Type, pt: Zcu.PerThread) !Value {
|
|
||||||
const zcu = pt.zcu;
|
|
||||||
const target = zcu.getTarget();
|
|
||||||
const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
|
|
||||||
16 => .{ .f16 = @mod(lhs.toFloat(f16, zcu), rhs.toFloat(f16, zcu)) },
|
|
||||||
32 => .{ .f32 = @mod(lhs.toFloat(f32, zcu), rhs.toFloat(f32, zcu)) },
|
|
||||||
64 => .{ .f64 = @mod(lhs.toFloat(f64, zcu), rhs.toFloat(f64, zcu)) },
|
|
||||||
80 => .{ .f80 = @mod(lhs.toFloat(f80, zcu), rhs.toFloat(f80, zcu)) },
|
|
||||||
128 => .{ .f128 = @mod(lhs.toFloat(f128, zcu), rhs.toFloat(f128, zcu)) },
|
|
||||||
else => unreachable,
|
|
||||||
};
|
|
||||||
return Value.fromInterned(try pt.intern(.{ .float = .{
|
|
||||||
.ty = float_type.toIntern(),
|
|
||||||
.storage = storage,
|
|
||||||
} }));
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn intTrunc(val: Value, ty: Type, allocator: Allocator, signedness: std.builtin.Signedness, bits: u16, pt: Zcu.PerThread) !Value {
|
|
||||||
const zcu = pt.zcu;
|
|
||||||
if (ty.zigTypeTag(zcu) == .vector) {
|
|
||||||
const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(zcu));
|
|
||||||
const scalar_ty = ty.scalarType(zcu);
|
|
||||||
for (result_data, 0..) |*scalar, i| {
|
|
||||||
const elem_val = try val.elemValue(pt, i);
|
|
||||||
scalar.* = (try intTruncScalar(elem_val, scalar_ty, allocator, signedness, bits, pt)).toIntern();
|
|
||||||
}
|
|
||||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
|
||||||
.ty = ty.toIntern(),
|
|
||||||
.storage = .{ .elems = result_data },
|
|
||||||
} }));
|
|
||||||
}
|
|
||||||
return intTruncScalar(val, ty, allocator, signedness, bits, pt);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// This variant may vectorize on `bits`. Asserts that `bits` is a (vector of) `u16`.
|
|
||||||
pub fn intTruncBitsAsValue(
|
|
||||||
val: Value,
|
|
||||||
ty: Type,
|
|
||||||
allocator: Allocator,
|
|
||||||
signedness: std.builtin.Signedness,
|
|
||||||
bits: Value,
|
|
||||||
pt: Zcu.PerThread,
|
|
||||||
) !Value {
|
|
||||||
const zcu = pt.zcu;
|
|
||||||
if (ty.zigTypeTag(zcu) == .vector) {
|
|
||||||
const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(zcu));
|
|
||||||
const scalar_ty = ty.scalarType(zcu);
|
|
||||||
for (result_data, 0..) |*scalar, i| {
|
|
||||||
const elem_val = try val.elemValue(pt, i);
|
|
||||||
const bits_elem = try bits.elemValue(pt, i);
|
|
||||||
scalar.* = (try intTruncScalar(elem_val, scalar_ty, allocator, signedness, @intCast(bits_elem.toUnsignedInt(zcu)), pt)).toIntern();
|
|
||||||
}
|
|
||||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
|
||||||
.ty = ty.toIntern(),
|
|
||||||
.storage = .{ .elems = result_data },
|
|
||||||
} }));
|
|
||||||
}
|
|
||||||
return intTruncScalar(val, ty, allocator, signedness, @intCast(bits.toUnsignedInt(zcu)), pt);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn intTruncScalar(
|
|
||||||
val: Value,
|
|
||||||
ty: Type,
|
|
||||||
allocator: Allocator,
|
|
||||||
signedness: std.builtin.Signedness,
|
|
||||||
bits: u16,
|
|
||||||
pt: Zcu.PerThread,
|
|
||||||
) !Value {
|
|
||||||
const zcu = pt.zcu;
|
|
||||||
if (bits == 0) return pt.intValue(ty, 0);
|
|
||||||
|
|
||||||
if (val.isUndef(zcu)) return pt.undefValue(ty);
|
|
||||||
|
|
||||||
var val_space: Value.BigIntSpace = undefined;
|
|
||||||
const val_bigint = val.toBigInt(&val_space, zcu);
|
|
||||||
|
|
||||||
const limbs = try allocator.alloc(
|
|
||||||
std.math.big.Limb,
|
|
||||||
std.math.big.int.calcTwosCompLimbCount(bits),
|
|
||||||
);
|
|
||||||
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
|
|
||||||
|
|
||||||
result_bigint.truncate(val_bigint, signedness, bits);
|
|
||||||
return pt.intValue_big(ty, result_bigint.toConst());
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn shl(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value {
|
|
||||||
const zcu = pt.zcu;
|
|
||||||
if (ty.zigTypeTag(zcu) == .vector) {
|
|
||||||
const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(zcu));
|
|
||||||
const scalar_ty = ty.scalarType(zcu);
|
|
||||||
for (result_data, 0..) |*scalar, i| {
|
|
||||||
const lhs_elem = try lhs.elemValue(pt, i);
|
|
||||||
const rhs_elem = try rhs.elemValue(pt, i);
|
|
||||||
scalar.* = (try shlScalar(lhs_elem, rhs_elem, scalar_ty, allocator, pt)).toIntern();
|
|
||||||
}
|
|
||||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
|
||||||
.ty = ty.toIntern(),
|
|
||||||
.storage = .{ .elems = result_data },
|
|
||||||
} }));
|
|
||||||
}
|
|
||||||
return shlScalar(lhs, rhs, ty, allocator, pt);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn shlScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value {
|
|
||||||
// TODO is this a performance issue? maybe we should try the operation without
|
|
||||||
// resorting to BigInt first.
|
|
||||||
const zcu = pt.zcu;
|
|
||||||
var lhs_space: Value.BigIntSpace = undefined;
|
|
||||||
const lhs_bigint = lhs.toBigInt(&lhs_space, zcu);
|
|
||||||
const shift: usize = @intCast(rhs.toUnsignedInt(zcu));
|
|
||||||
const limbs = try allocator.alloc(
|
|
||||||
std.math.big.Limb,
|
|
||||||
lhs_bigint.limbs.len + (shift / (@sizeOf(std.math.big.Limb) * 8)) + 1,
|
|
||||||
);
|
|
||||||
var result_bigint = BigIntMutable{
|
|
||||||
.limbs = limbs,
|
|
||||||
.positive = undefined,
|
|
||||||
.len = undefined,
|
|
||||||
};
|
|
||||||
result_bigint.shiftLeft(lhs_bigint, shift);
|
|
||||||
if (ty.toIntern() != .comptime_int_type) {
|
|
||||||
const int_info = ty.intInfo(zcu);
|
|
||||||
result_bigint.truncate(result_bigint.toConst(), int_info.signedness, int_info.bits);
|
|
||||||
}
|
|
||||||
|
|
||||||
return pt.intValue_big(ty, result_bigint.toConst());
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn shlWithOverflow(
|
|
||||||
lhs: Value,
|
|
||||||
rhs: Value,
|
|
||||||
ty: Type,
|
|
||||||
allocator: Allocator,
|
|
||||||
pt: Zcu.PerThread,
|
|
||||||
) !OverflowArithmeticResult {
|
|
||||||
if (ty.zigTypeTag(pt.zcu) == .vector) {
|
|
||||||
const vec_len = ty.vectorLen(pt.zcu);
|
|
||||||
const overflowed_data = try allocator.alloc(InternPool.Index, vec_len);
|
|
||||||
const result_data = try allocator.alloc(InternPool.Index, vec_len);
|
|
||||||
const scalar_ty = ty.scalarType(pt.zcu);
|
|
||||||
for (overflowed_data, result_data, 0..) |*of, *scalar, i| {
|
|
||||||
const lhs_elem = try lhs.elemValue(pt, i);
|
|
||||||
const rhs_elem = try rhs.elemValue(pt, i);
|
|
||||||
const of_math_result = try shlWithOverflowScalar(lhs_elem, rhs_elem, scalar_ty, allocator, pt);
|
|
||||||
of.* = of_math_result.overflow_bit.toIntern();
|
|
||||||
scalar.* = of_math_result.wrapped_result.toIntern();
|
|
||||||
}
|
|
||||||
return OverflowArithmeticResult{
|
|
||||||
.overflow_bit = Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
|
||||||
.ty = (try pt.vectorType(.{ .len = vec_len, .child = .u1_type })).toIntern(),
|
|
||||||
.storage = .{ .elems = overflowed_data },
|
|
||||||
} })),
|
|
||||||
.wrapped_result = Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
|
||||||
.ty = ty.toIntern(),
|
|
||||||
.storage = .{ .elems = result_data },
|
|
||||||
} })),
|
|
||||||
};
|
|
||||||
}
|
|
||||||
return shlWithOverflowScalar(lhs, rhs, ty, allocator, pt);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn shlWithOverflowScalar(
|
|
||||||
lhs: Value,
|
|
||||||
rhs: Value,
|
|
||||||
ty: Type,
|
|
||||||
allocator: Allocator,
|
|
||||||
pt: Zcu.PerThread,
|
|
||||||
) !OverflowArithmeticResult {
|
|
||||||
const zcu = pt.zcu;
|
|
||||||
const info = ty.intInfo(zcu);
|
|
||||||
var lhs_space: Value.BigIntSpace = undefined;
|
|
||||||
const lhs_bigint = lhs.toBigInt(&lhs_space, zcu);
|
|
||||||
const shift: usize = @intCast(rhs.toUnsignedInt(zcu));
|
|
||||||
const limbs = try allocator.alloc(
|
|
||||||
std.math.big.Limb,
|
|
||||||
lhs_bigint.limbs.len + (shift / (@sizeOf(std.math.big.Limb) * 8)) + 1,
|
|
||||||
);
|
|
||||||
var result_bigint = BigIntMutable{
|
|
||||||
.limbs = limbs,
|
|
||||||
.positive = undefined,
|
|
||||||
.len = undefined,
|
|
||||||
};
|
|
||||||
result_bigint.shiftLeft(lhs_bigint, shift);
|
|
||||||
const overflowed = !result_bigint.toConst().fitsInTwosComp(info.signedness, info.bits);
|
|
||||||
if (overflowed) {
|
|
||||||
result_bigint.truncate(result_bigint.toConst(), info.signedness, info.bits);
|
|
||||||
}
|
|
||||||
return OverflowArithmeticResult{
|
|
||||||
.overflow_bit = try pt.intValue(Type.u1, @intFromBool(overflowed)),
|
|
||||||
.wrapped_result = try pt.intValue_big(ty, result_bigint.toConst()),
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn shlSat(
|
|
||||||
lhs: Value,
|
|
||||||
rhs: Value,
|
|
||||||
ty: Type,
|
|
||||||
arena: Allocator,
|
|
||||||
pt: Zcu.PerThread,
|
|
||||||
) !Value {
|
|
||||||
if (ty.zigTypeTag(pt.zcu) == .vector) {
|
|
||||||
const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(pt.zcu));
|
|
||||||
const scalar_ty = ty.scalarType(pt.zcu);
|
|
||||||
for (result_data, 0..) |*scalar, i| {
|
|
||||||
const lhs_elem = try lhs.elemValue(pt, i);
|
|
||||||
const rhs_elem = try rhs.elemValue(pt, i);
|
|
||||||
scalar.* = (try shlSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, pt)).toIntern();
|
|
||||||
}
|
|
||||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
|
||||||
.ty = ty.toIntern(),
|
|
||||||
.storage = .{ .elems = result_data },
|
|
||||||
} }));
|
|
||||||
}
|
|
||||||
return shlSatScalar(lhs, rhs, ty, arena, pt);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn shlSatScalar(
|
|
||||||
lhs: Value,
|
|
||||||
rhs: Value,
|
|
||||||
ty: Type,
|
|
||||||
arena: Allocator,
|
|
||||||
pt: Zcu.PerThread,
|
|
||||||
) !Value {
|
|
||||||
// TODO is this a performance issue? maybe we should try the operation without
|
|
||||||
// resorting to BigInt first.
|
|
||||||
const zcu = pt.zcu;
|
|
||||||
const info = ty.intInfo(zcu);
|
|
||||||
|
|
||||||
var lhs_space: Value.BigIntSpace = undefined;
|
|
||||||
const lhs_bigint = lhs.toBigInt(&lhs_space, zcu);
|
|
||||||
const shift: usize = @intCast(rhs.toUnsignedInt(zcu));
|
|
||||||
const limbs = try arena.alloc(
|
|
||||||
std.math.big.Limb,
|
|
||||||
std.math.big.int.calcTwosCompLimbCount(info.bits),
|
|
||||||
);
|
|
||||||
var result_bigint = BigIntMutable{
|
|
||||||
.limbs = limbs,
|
|
||||||
.positive = undefined,
|
|
||||||
.len = undefined,
|
|
||||||
};
|
|
||||||
result_bigint.shiftLeftSat(lhs_bigint, shift, info.signedness, info.bits);
|
|
||||||
return pt.intValue_big(ty, result_bigint.toConst());
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn shlTrunc(
|
|
||||||
lhs: Value,
|
|
||||||
rhs: Value,
|
|
||||||
ty: Type,
|
|
||||||
arena: Allocator,
|
|
||||||
pt: Zcu.PerThread,
|
|
||||||
) !Value {
|
|
||||||
if (ty.zigTypeTag(pt.zcu) == .vector) {
|
|
||||||
const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(pt.zcu));
|
|
||||||
const scalar_ty = ty.scalarType(pt.zcu);
|
|
||||||
for (result_data, 0..) |*scalar, i| {
|
|
||||||
const lhs_elem = try lhs.elemValue(pt, i);
|
|
||||||
const rhs_elem = try rhs.elemValue(pt, i);
|
|
||||||
scalar.* = (try shlTruncScalar(lhs_elem, rhs_elem, scalar_ty, arena, pt)).toIntern();
|
|
||||||
}
|
|
||||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
|
||||||
.ty = ty.toIntern(),
|
|
||||||
.storage = .{ .elems = result_data },
|
|
||||||
} }));
|
|
||||||
}
|
|
||||||
return shlTruncScalar(lhs, rhs, ty, arena, pt);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn shlTruncScalar(
|
|
||||||
lhs: Value,
|
|
||||||
rhs: Value,
|
|
||||||
ty: Type,
|
|
||||||
arena: Allocator,
|
|
||||||
pt: Zcu.PerThread,
|
|
||||||
) !Value {
|
|
||||||
const shifted = try lhs.shl(rhs, ty, arena, pt);
|
|
||||||
const int_info = ty.intInfo(pt.zcu);
|
|
||||||
const truncated = try shifted.intTrunc(ty, arena, int_info.signedness, int_info.bits, pt);
|
|
||||||
return truncated;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn shr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value {
|
|
||||||
if (ty.zigTypeTag(pt.zcu) == .vector) {
|
|
||||||
const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(pt.zcu));
|
|
||||||
const scalar_ty = ty.scalarType(pt.zcu);
|
|
||||||
for (result_data, 0..) |*scalar, i| {
|
|
||||||
const lhs_elem = try lhs.elemValue(pt, i);
|
|
||||||
const rhs_elem = try rhs.elemValue(pt, i);
|
|
||||||
scalar.* = (try shrScalar(lhs_elem, rhs_elem, scalar_ty, allocator, pt)).toIntern();
|
|
||||||
}
|
|
||||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
|
||||||
.ty = ty.toIntern(),
|
|
||||||
.storage = .{ .elems = result_data },
|
|
||||||
} }));
|
|
||||||
}
|
|
||||||
return shrScalar(lhs, rhs, ty, allocator, pt);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn shrScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value {
|
|
||||||
// TODO is this a performance issue? maybe we should try the operation without
|
|
||||||
// resorting to BigInt first.
|
|
||||||
const zcu = pt.zcu;
|
|
||||||
var lhs_space: Value.BigIntSpace = undefined;
|
|
||||||
const lhs_bigint = lhs.toBigInt(&lhs_space, zcu);
|
|
||||||
const shift: usize = @intCast(rhs.toUnsignedInt(zcu));
|
|
||||||
|
|
||||||
const result_limbs = lhs_bigint.limbs.len -| (shift / (@sizeOf(std.math.big.Limb) * 8));
|
|
||||||
if (result_limbs == 0) {
|
|
||||||
// The shift is enough to remove all the bits from the number, which means the
|
|
||||||
// result is 0 or -1 depending on the sign.
|
|
||||||
if (lhs_bigint.positive) {
|
|
||||||
return pt.intValue(ty, 0);
|
|
||||||
} else {
|
|
||||||
return pt.intValue(ty, -1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const limbs = try allocator.alloc(
|
|
||||||
std.math.big.Limb,
|
|
||||||
result_limbs,
|
|
||||||
);
|
|
||||||
var result_bigint = BigIntMutable{
|
|
||||||
.limbs = limbs,
|
|
||||||
.positive = undefined,
|
|
||||||
.len = undefined,
|
|
||||||
};
|
|
||||||
result_bigint.shiftRight(lhs_bigint, shift);
|
|
||||||
return pt.intValue_big(ty, result_bigint.toConst());
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn sqrt(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
|
pub fn sqrt(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
|
||||||
if (float_type.zigTypeTag(pt.zcu) == .vector) {
|
if (float_type.zigTypeTag(pt.zcu) == .vector) {
|
||||||
const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(pt.zcu));
|
const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(pt.zcu));
|
||||||
|
|
|
||||||
|
|
@ -3672,6 +3672,31 @@ pub fn unionValue(pt: Zcu.PerThread, union_ty: Type, tag: Value, val: Value) All
|
||||||
}));
|
}));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn aggregateValue(pt: Zcu.PerThread, ty: Type, elems: []const InternPool.Index) Allocator.Error!Value {
|
||||||
|
for (elems) |elem| {
|
||||||
|
if (!Value.fromInterned(elem).isUndef(pt.zcu)) break;
|
||||||
|
} else { // all-undef
|
||||||
|
return pt.undefValue(ty);
|
||||||
|
}
|
||||||
|
return .fromInterned(try pt.intern(.{ .aggregate = .{
|
||||||
|
.ty = ty.toIntern(),
|
||||||
|
.storage = .{ .elems = elems },
|
||||||
|
} }));
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Asserts that `ty` is either an array or a vector.
|
||||||
|
pub fn aggregateSplatValue(pt: Zcu.PerThread, ty: Type, repeated: Value) Allocator.Error!Value {
|
||||||
|
switch (ty.zigTypeTag(pt.zcu)) {
|
||||||
|
.array, .vector => {},
|
||||||
|
else => unreachable,
|
||||||
|
}
|
||||||
|
if (repeated.isUndef(pt.zcu)) return pt.undefValue(ty);
|
||||||
|
return .fromInterned(try pt.intern(.{ .aggregate = .{
|
||||||
|
.ty = ty.toIntern(),
|
||||||
|
.storage = .{ .repeated_elem = repeated.toIntern() },
|
||||||
|
} }));
|
||||||
|
}
|
||||||
|
|
||||||
/// This function casts the float representation down to the representation of the type, potentially
|
/// This function casts the float representation down to the representation of the type, potentially
|
||||||
/// losing data if the representation wasn't correct.
|
/// losing data if the representation wasn't correct.
|
||||||
pub fn floatValue(pt: Zcu.PerThread, ty: Type, x: anytype) Allocator.Error!Value {
|
pub fn floatValue(pt: Zcu.PerThread, ty: Type, x: anytype) Allocator.Error!Value {
|
||||||
|
|
|
||||||
|
|
@ -3131,7 +3131,7 @@ fn lowerConstant(cg: *CodeGen, val: Value, ty: Type) InnerError!WValue {
|
||||||
const zcu = pt.zcu;
|
const zcu = pt.zcu;
|
||||||
assert(!isByRef(ty, zcu, cg.target));
|
assert(!isByRef(ty, zcu, cg.target));
|
||||||
const ip = &zcu.intern_pool;
|
const ip = &zcu.intern_pool;
|
||||||
if (val.isUndefDeep(zcu)) return cg.emitUndefined(ty);
|
if (val.isUndef(zcu)) return cg.emitUndefined(ty);
|
||||||
|
|
||||||
switch (ip.indexToKey(val.ip_index)) {
|
switch (ip.indexToKey(val.ip_index)) {
|
||||||
.int_type,
|
.int_type,
|
||||||
|
|
|
||||||
|
|
@ -327,7 +327,7 @@ pub fn generateSymbol(
|
||||||
|
|
||||||
log.debug("generateSymbol: val = {f}", .{val.fmtValue(pt)});
|
log.debug("generateSymbol: val = {f}", .{val.fmtValue(pt)});
|
||||||
|
|
||||||
if (val.isUndefDeep(zcu)) {
|
if (val.isUndef(zcu)) {
|
||||||
const abi_size = math.cast(usize, ty.abiSize(zcu)) orelse return error.Overflow;
|
const abi_size = math.cast(usize, ty.abiSize(zcu)) orelse return error.Overflow;
|
||||||
try code.appendNTimes(gpa, 0xaa, abi_size);
|
try code.appendNTimes(gpa, 0xaa, abi_size);
|
||||||
return;
|
return;
|
||||||
|
|
|
||||||
|
|
@ -1012,7 +1012,7 @@ pub const DeclGen = struct {
|
||||||
};
|
};
|
||||||
|
|
||||||
const ty = val.typeOf(zcu);
|
const ty = val.typeOf(zcu);
|
||||||
if (val.isUndefDeep(zcu)) return dg.renderUndefValue(w, ty, location);
|
if (val.isUndef(zcu)) return dg.renderUndefValue(w, ty, location);
|
||||||
const ctype = try dg.ctypeFromType(ty, location.toCTypeKind());
|
const ctype = try dg.ctypeFromType(ty, location.toCTypeKind());
|
||||||
switch (ip.indexToKey(val.toIntern())) {
|
switch (ip.indexToKey(val.toIntern())) {
|
||||||
// types, not values
|
// types, not values
|
||||||
|
|
@ -4216,7 +4216,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
|
||||||
const ptr_val = try f.resolveInst(bin_op.lhs);
|
const ptr_val = try f.resolveInst(bin_op.lhs);
|
||||||
const src_ty = f.typeOf(bin_op.rhs);
|
const src_ty = f.typeOf(bin_op.rhs);
|
||||||
|
|
||||||
const val_is_undef = if (try f.air.value(bin_op.rhs, pt)) |v| v.isUndefDeep(zcu) else false;
|
const val_is_undef = if (try f.air.value(bin_op.rhs, pt)) |v| v.isUndef(zcu) else false;
|
||||||
|
|
||||||
const w = &f.object.code.writer;
|
const w = &f.object.code.writer;
|
||||||
if (val_is_undef) {
|
if (val_is_undef) {
|
||||||
|
|
@ -4942,7 +4942,7 @@ fn airDbgVar(f: *Function, inst: Air.Inst.Index) !CValue {
|
||||||
const tag = f.air.instructions.items(.tag)[@intFromEnum(inst)];
|
const tag = f.air.instructions.items(.tag)[@intFromEnum(inst)];
|
||||||
const pl_op = f.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
|
const pl_op = f.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
|
||||||
const name: Air.NullTerminatedString = @enumFromInt(pl_op.payload);
|
const name: Air.NullTerminatedString = @enumFromInt(pl_op.payload);
|
||||||
const operand_is_undef = if (try f.air.value(pl_op.operand, pt)) |v| v.isUndefDeep(zcu) else false;
|
const operand_is_undef = if (try f.air.value(pl_op.operand, pt)) |v| v.isUndef(zcu) else false;
|
||||||
if (!operand_is_undef) _ = try f.resolveInst(pl_op.operand);
|
if (!operand_is_undef) _ = try f.resolveInst(pl_op.operand);
|
||||||
|
|
||||||
try reap(f, inst, &.{pl_op.operand});
|
try reap(f, inst, &.{pl_op.operand});
|
||||||
|
|
@ -7117,7 +7117,7 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
|
||||||
const value = try f.resolveInst(bin_op.rhs);
|
const value = try f.resolveInst(bin_op.rhs);
|
||||||
const elem_ty = f.typeOf(bin_op.rhs);
|
const elem_ty = f.typeOf(bin_op.rhs);
|
||||||
const elem_abi_size = elem_ty.abiSize(zcu);
|
const elem_abi_size = elem_ty.abiSize(zcu);
|
||||||
const val_is_undef = if (try f.air.value(bin_op.rhs, pt)) |val| val.isUndefDeep(zcu) else false;
|
const val_is_undef = if (try f.air.value(bin_op.rhs, pt)) |val| val.isUndef(zcu) else false;
|
||||||
const w = &f.object.code.writer;
|
const w = &f.object.code.writer;
|
||||||
|
|
||||||
if (val_is_undef) {
|
if (val_is_undef) {
|
||||||
|
|
@ -8338,7 +8338,7 @@ fn formatIntLiteral(data: FormatIntLiteralContext, w: *std.io.Writer) std.io.Wri
|
||||||
defer allocator.free(undef_limbs);
|
defer allocator.free(undef_limbs);
|
||||||
|
|
||||||
var int_buf: Value.BigIntSpace = undefined;
|
var int_buf: Value.BigIntSpace = undefined;
|
||||||
const int = if (data.val.isUndefDeep(zcu)) blk: {
|
const int = if (data.val.isUndef(zcu)) blk: {
|
||||||
undef_limbs = allocator.alloc(BigIntLimb, BigInt.calcTwosCompLimbCount(data.int_info.bits)) catch return error.WriteFailed;
|
undef_limbs = allocator.alloc(BigIntLimb, BigInt.calcTwosCompLimbCount(data.int_info.bits)) catch return error.WriteFailed;
|
||||||
@memset(undef_limbs, undefPattern(BigIntLimb));
|
@memset(undef_limbs, undefPattern(BigIntLimb));
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -3575,7 +3575,7 @@ pub const Object = struct {
|
||||||
const val = Value.fromInterned(arg_val);
|
const val = Value.fromInterned(arg_val);
|
||||||
const val_key = ip.indexToKey(val.toIntern());
|
const val_key = ip.indexToKey(val.toIntern());
|
||||||
|
|
||||||
if (val.isUndefDeep(zcu)) return o.builder.undefConst(llvm_int_ty);
|
if (val.isUndef(zcu)) return o.builder.undefConst(llvm_int_ty);
|
||||||
|
|
||||||
const ty = Type.fromInterned(val_key.typeOf());
|
const ty = Type.fromInterned(val_key.typeOf());
|
||||||
switch (val_key) {
|
switch (val_key) {
|
||||||
|
|
@ -3666,7 +3666,7 @@ pub const Object = struct {
|
||||||
const val = Value.fromInterned(arg_val);
|
const val = Value.fromInterned(arg_val);
|
||||||
const val_key = ip.indexToKey(val.toIntern());
|
const val_key = ip.indexToKey(val.toIntern());
|
||||||
|
|
||||||
if (val.isUndefDeep(zcu)) {
|
if (val.isUndef(zcu)) {
|
||||||
return o.builder.undefConst(try o.lowerType(pt, Type.fromInterned(val_key.typeOf())));
|
return o.builder.undefConst(try o.lowerType(pt, Type.fromInterned(val_key.typeOf())));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -5574,7 +5574,7 @@ pub const FuncGen = struct {
|
||||||
const ptr_ty = try pt.singleMutPtrType(ret_ty);
|
const ptr_ty = try pt.singleMutPtrType(ret_ty);
|
||||||
|
|
||||||
const operand = try self.resolveInst(un_op);
|
const operand = try self.resolveInst(un_op);
|
||||||
const val_is_undef = if (try self.air.value(un_op, pt)) |val| val.isUndefDeep(zcu) else false;
|
const val_is_undef = if (try self.air.value(un_op, pt)) |val| val.isUndef(zcu) else false;
|
||||||
if (val_is_undef and safety) undef: {
|
if (val_is_undef and safety) undef: {
|
||||||
const ptr_info = ptr_ty.ptrInfo(zcu);
|
const ptr_info = ptr_ty.ptrInfo(zcu);
|
||||||
const needs_bitmask = (ptr_info.packed_offset.host_size != 0);
|
const needs_bitmask = (ptr_info.packed_offset.host_size != 0);
|
||||||
|
|
@ -5629,7 +5629,7 @@ pub const FuncGen = struct {
|
||||||
|
|
||||||
const abi_ret_ty = try lowerFnRetTy(o, pt, fn_info);
|
const abi_ret_ty = try lowerFnRetTy(o, pt, fn_info);
|
||||||
const operand = try self.resolveInst(un_op);
|
const operand = try self.resolveInst(un_op);
|
||||||
const val_is_undef = if (try self.air.value(un_op, pt)) |val| val.isUndefDeep(zcu) else false;
|
const val_is_undef = if (try self.air.value(un_op, pt)) |val| val.isUndef(zcu) else false;
|
||||||
const alignment = ret_ty.abiAlignment(zcu).toLlvm();
|
const alignment = ret_ty.abiAlignment(zcu).toLlvm();
|
||||||
|
|
||||||
if (val_is_undef and safety) {
|
if (val_is_undef and safety) {
|
||||||
|
|
@ -9673,7 +9673,7 @@ pub const FuncGen = struct {
|
||||||
const ptr_ty = self.typeOf(bin_op.lhs);
|
const ptr_ty = self.typeOf(bin_op.lhs);
|
||||||
const operand_ty = ptr_ty.childType(zcu);
|
const operand_ty = ptr_ty.childType(zcu);
|
||||||
|
|
||||||
const val_is_undef = if (try self.air.value(bin_op.rhs, pt)) |val| val.isUndefDeep(zcu) else false;
|
const val_is_undef = if (try self.air.value(bin_op.rhs, pt)) |val| val.isUndef(zcu) else false;
|
||||||
if (val_is_undef) {
|
if (val_is_undef) {
|
||||||
const owner_mod = self.ng.ownerModule();
|
const owner_mod = self.ng.ownerModule();
|
||||||
|
|
||||||
|
|
@ -10014,7 +10014,7 @@ pub const FuncGen = struct {
|
||||||
self.maybeMarkAllowZeroAccess(ptr_ty.ptrInfo(zcu));
|
self.maybeMarkAllowZeroAccess(ptr_ty.ptrInfo(zcu));
|
||||||
|
|
||||||
if (try self.air.value(bin_op.rhs, pt)) |elem_val| {
|
if (try self.air.value(bin_op.rhs, pt)) |elem_val| {
|
||||||
if (elem_val.isUndefDeep(zcu)) {
|
if (elem_val.isUndef(zcu)) {
|
||||||
// Even if safety is disabled, we still emit a memset to undefined since it conveys
|
// Even if safety is disabled, we still emit a memset to undefined since it conveys
|
||||||
// extra information to LLVM. However, safety makes the difference between using
|
// extra information to LLVM. However, safety makes the difference between using
|
||||||
// 0xaa or actual undefined for the fill byte.
|
// 0xaa or actual undefined for the fill byte.
|
||||||
|
|
|
||||||
|
|
@ -1303,7 +1303,7 @@ fn getNavOutputSection(coff: *Coff, nav_index: InternPool.Nav.Index) u16 {
|
||||||
const zig_ty = ty.zigTypeTag(zcu);
|
const zig_ty = ty.zigTypeTag(zcu);
|
||||||
const val = Value.fromInterned(nav.status.fully_resolved.val);
|
const val = Value.fromInterned(nav.status.fully_resolved.val);
|
||||||
const index: u16 = blk: {
|
const index: u16 = blk: {
|
||||||
if (val.isUndefDeep(zcu)) {
|
if (val.isUndef(zcu)) {
|
||||||
// TODO in release-fast and release-small, we should put undef in .bss
|
// TODO in release-fast and release-small, we should put undef in .bss
|
||||||
break :blk coff.data_section_index.?;
|
break :blk coff.data_section_index.?;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1197,7 +1197,7 @@ fn getNavShdrIndex(
|
||||||
self.data_relro_index = try self.addSectionSymbol(gpa, try self.addString(gpa, ".data.rel.ro"), osec);
|
self.data_relro_index = try self.addSectionSymbol(gpa, try self.addString(gpa, ".data.rel.ro"), osec);
|
||||||
return osec;
|
return osec;
|
||||||
}
|
}
|
||||||
if (nav_init != .none and Value.fromInterned(nav_init).isUndefDeep(zcu))
|
if (nav_init != .none and Value.fromInterned(nav_init).isUndef(zcu))
|
||||||
return switch (zcu.navFileScope(nav_index).mod.?.optimize_mode) {
|
return switch (zcu.navFileScope(nav_index).mod.?.optimize_mode) {
|
||||||
.Debug, .ReleaseSafe => {
|
.Debug, .ReleaseSafe => {
|
||||||
if (self.data_index) |symbol_index|
|
if (self.data_index) |symbol_index|
|
||||||
|
|
|
||||||
|
|
@ -1175,7 +1175,7 @@ fn getNavOutputSection(
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
if (is_const) return macho_file.zig_const_sect_index.?;
|
if (is_const) return macho_file.zig_const_sect_index.?;
|
||||||
if (nav_init != .none and Value.fromInterned(nav_init).isUndefDeep(zcu))
|
if (nav_init != .none and Value.fromInterned(nav_init).isUndef(zcu))
|
||||||
return switch (zcu.navFileScope(nav_index).mod.?.optimize_mode) {
|
return switch (zcu.navFileScope(nav_index).mod.?.optimize_mode) {
|
||||||
.Debug, .ReleaseSafe => macho_file.zig_data_sect_index.?,
|
.Debug, .ReleaseSafe => macho_file.zig_data_sect_index.?,
|
||||||
.ReleaseFast, .ReleaseSmall => macho_file.zig_bss_sect_index.?,
|
.ReleaseFast, .ReleaseSmall => macho_file.zig_bss_sect_index.?,
|
||||||
|
|
|
||||||
|
|
@ -65,10 +65,7 @@ pub const MutableValue = union(enum) {
|
||||||
.ty = sv.ty,
|
.ty = sv.ty,
|
||||||
.val = (try sv.child.intern(pt, arena)).toIntern(),
|
.val = (try sv.child.intern(pt, arena)).toIntern(),
|
||||||
} }),
|
} }),
|
||||||
.repeated => |sv| try pt.intern(.{ .aggregate = .{
|
.repeated => |sv| return pt.aggregateSplatValue(.fromInterned(sv.ty), try sv.child.intern(pt, arena)),
|
||||||
.ty = sv.ty,
|
|
||||||
.storage = .{ .repeated_elem = (try sv.child.intern(pt, arena)).toIntern() },
|
|
||||||
} }),
|
|
||||||
.bytes => |b| try pt.intern(.{ .aggregate = .{
|
.bytes => |b| try pt.intern(.{ .aggregate = .{
|
||||||
.ty = b.ty,
|
.ty = b.ty,
|
||||||
.storage = .{ .bytes = try pt.zcu.intern_pool.getOrPutString(pt.zcu.gpa, pt.tid, b.data, .maybe_embedded_nulls) },
|
.storage = .{ .bytes = try pt.zcu.intern_pool.getOrPutString(pt.zcu.gpa, pt.tid, b.data, .maybe_embedded_nulls) },
|
||||||
|
|
@ -78,10 +75,7 @@ pub const MutableValue = union(enum) {
|
||||||
for (a.elems, elems) |mut_elem, *interned_elem| {
|
for (a.elems, elems) |mut_elem, *interned_elem| {
|
||||||
interned_elem.* = (try mut_elem.intern(pt, arena)).toIntern();
|
interned_elem.* = (try mut_elem.intern(pt, arena)).toIntern();
|
||||||
}
|
}
|
||||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
return pt.aggregateValue(.fromInterned(a.ty), elems);
|
||||||
.ty = a.ty,
|
|
||||||
.storage = .{ .elems = elems },
|
|
||||||
} }));
|
|
||||||
},
|
},
|
||||||
.slice => |s| try pt.intern(.{ .slice = .{
|
.slice => |s| try pt.intern(.{ .slice = .{
|
||||||
.ty = s.ty,
|
.ty = s.ty,
|
||||||
|
|
|
||||||
|
|
@ -154,12 +154,6 @@ test "Saturating Shift Left where lhs is of a computed type" {
|
||||||
try expect(value.exponent == 0);
|
try expect(value.exponent == 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
comptime {
|
|
||||||
var image: [1]u8 = undefined;
|
|
||||||
_ = ℑ
|
|
||||||
_ = @shlExact(@as(u16, image[0]), 8);
|
|
||||||
}
|
|
||||||
|
|
||||||
test "Saturating Shift Left" {
|
test "Saturating Shift Left" {
|
||||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
|
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
|
||||||
|
|
@ -202,3 +196,10 @@ test "Saturating Shift Left" {
|
||||||
try expectEqual(170141183460469231731687303715884105727, S.shlSat(@as(i128, 0x2fe6bc5448c55ce18252e2c9d4477750), 0x31));
|
try expectEqual(170141183460469231731687303715884105727, S.shlSat(@as(i128, 0x2fe6bc5448c55ce18252e2c9d4477750), 0x31));
|
||||||
try expectEqual(0, S.shlSat(@as(i128, 0), 127));
|
try expectEqual(0, S.shlSat(@as(i128, 0), 127));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
test "shift by partially undef vector" {
|
||||||
|
comptime {
|
||||||
|
const a: @Vector(1, u8) = .{undefined};
|
||||||
|
_ = a >> @splat(4);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -30,7 +30,9 @@ export fn d(rhs: @Vector(3, i32)) void {
|
||||||
//
|
//
|
||||||
// :2:25: error: shift by negative amount '-1'
|
// :2:25: error: shift by negative amount '-1'
|
||||||
// :7:12: error: shift by negative amount '-2'
|
// :7:12: error: shift by negative amount '-2'
|
||||||
// :11:47: error: shift by negative amount '-3' at index '0'
|
// :11:47: error: shift by negative amount '-3'
|
||||||
// :16:27: error: shift by negative amount '-4' at index '1'
|
// :11:47: note: when computing vector element at index '0'
|
||||||
|
// :16:27: error: shift by negative amount '-4'
|
||||||
|
// :16:27: note: when computing vector element at index '1'
|
||||||
// :20:25: error: shift by signed type 'i32'
|
// :20:25: error: shift by signed type 'i32'
|
||||||
// :24:40: error: shift by signed type '@Vector(3, i32)'
|
// :24:40: error: shift by signed type '@Vector(3, i32)'
|
||||||
|
|
|
||||||
10
test/cases/compile_errors/shift_by_larger_than_usize.zig
Normal file
10
test/cases/compile_errors/shift_by_larger_than_usize.zig
Normal file
|
|
@ -0,0 +1,10 @@
|
||||||
|
export fn f() usize {
|
||||||
|
const a = comptime 0 <<| (1 << @bitSizeOf(usize));
|
||||||
|
return a;
|
||||||
|
}
|
||||||
|
|
||||||
|
// error
|
||||||
|
// backend=stage2,llvm
|
||||||
|
// target=x86_64-linux
|
||||||
|
//
|
||||||
|
// :2:30: error: this implementation only supports comptime shift amounts of up to 2^64 - 1 bits
|
||||||
|
|
@ -7,4 +7,4 @@ comptime {
|
||||||
// backend=stage2
|
// backend=stage2
|
||||||
// target=native
|
// target=native
|
||||||
//
|
//
|
||||||
// :2:15: error: operation caused overflow
|
// :2:25: error: overflow of integer type 'u8' with value '340'
|
||||||
|
|
|
||||||
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
Loading…
Add table
Reference in a new issue