mirror of
https://codeberg.org/ziglang/zig.git
synced 2025-12-06 05:44:20 +00:00
Merge pull request #24674 from Justus2308/undef-shift-bitwise
Sema: Improve comptime arithmetic undef handling
This commit is contained in:
commit
a495628862
34 changed files with 13303 additions and 9303 deletions
|
|
@ -6077,7 +6077,7 @@ fn cmpxchgWeakButNotAtomic(comptime T: type, ptr: *T, expected_value: T, new_val
|
|||
{#header_close#}
|
||||
{#header_open|Exact Left Shift Overflow#}
|
||||
<p>At compile-time:</p>
|
||||
{#code|test_comptime_shlExact_overwlow.zig#}
|
||||
{#code|test_comptime_shlExact_overflow.zig#}
|
||||
|
||||
<p>At runtime:</p>
|
||||
{#code|runtime_shlExact_overflow.zig#}
|
||||
|
|
|
|||
|
|
@ -3,4 +3,4 @@ comptime {
|
|||
_ = x;
|
||||
}
|
||||
|
||||
// test_error=operation caused overflow
|
||||
// test_error=overflow of integer type 'u8' with value '340'
|
||||
|
|
@ -941,10 +941,7 @@ fn scalarizeBlockPayload(l: *Legalize, orig_inst: Air.Inst.Index, comptime form:
|
|||
.lhs = Air.internedToRef(try pt.intern(.{ .ptr = .{
|
||||
.ty = (try pt.manyConstPtrType(mask_elem_ty)).toIntern(),
|
||||
.base_addr = .{ .uav = .{
|
||||
.val = try pt.intern(.{ .aggregate = .{
|
||||
.ty = mask_ty.toIntern(),
|
||||
.storage = .{ .elems = mask_elems },
|
||||
} }),
|
||||
.val = (try pt.aggregateValue(mask_ty, mask_elems)).toIntern(),
|
||||
.orig_ty = (try pt.singleConstPtrType(mask_ty)).toIntern(),
|
||||
} },
|
||||
.byte_offset = 0,
|
||||
|
|
@ -1023,10 +1020,7 @@ fn scalarizeBlockPayload(l: *Legalize, orig_inst: Air.Inst.Index, comptime form:
|
|||
break :operand_b Air.internedToRef(try pt.intern(.{ .ptr = .{
|
||||
.ty = (try pt.manyConstPtrType(elem_ty)).toIntern(),
|
||||
.base_addr = .{ .uav = .{
|
||||
.val = try pt.intern(.{ .aggregate = .{
|
||||
.ty = ct_elems_ty.toIntern(),
|
||||
.storage = .{ .elems = ct_elems.keys() },
|
||||
} }),
|
||||
.val = (try pt.aggregateValue(ct_elems_ty, ct_elems.keys())).toIntern(),
|
||||
.orig_ty = (try pt.singleConstPtrType(ct_elems_ty)).toIntern(),
|
||||
} },
|
||||
.byte_offset = 0,
|
||||
|
|
@ -2550,10 +2544,7 @@ fn floatFromBigIntVal(
|
|||
else => unreachable,
|
||||
};
|
||||
if (is_vector) {
|
||||
return .fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = float_ty.toIntern(),
|
||||
.storage = .{ .repeated_elem = scalar_val.toIntern() },
|
||||
} }));
|
||||
return pt.aggregateSplatValue(float_ty, scalar_val);
|
||||
} else {
|
||||
return scalar_val;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2036,6 +2036,8 @@ pub const Key = union(enum) {
|
|||
/// Each element/field stored as an `Index`.
|
||||
/// In the case of sentinel-terminated arrays, the sentinel value *is* stored,
|
||||
/// so the slice length will be one more than the type's array length.
|
||||
/// There must be at least one element which is not `undefined`. If all elements are
|
||||
/// undefined, instead create an undefined value of the aggregate type.
|
||||
aggregate: Aggregate,
|
||||
/// An instance of a union.
|
||||
un: Union,
|
||||
|
|
@ -8401,24 +8403,33 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All
|
|||
assert(sentinel == .none or elem == sentinel);
|
||||
},
|
||||
}
|
||||
switch (ty_key) {
|
||||
if (aggregate.storage.values().len > 0) switch (ty_key) {
|
||||
.array_type, .vector_type => {
|
||||
var any_defined = false;
|
||||
for (aggregate.storage.values()) |elem| {
|
||||
if (!ip.isUndef(elem)) any_defined = true;
|
||||
assert(ip.typeOf(elem) == child);
|
||||
}
|
||||
assert(any_defined); // aggregate fields must not be all undefined
|
||||
},
|
||||
.struct_type => {
|
||||
var any_defined = false;
|
||||
for (aggregate.storage.values(), ip.loadStructType(aggregate.ty).field_types.get(ip)) |elem, field_ty| {
|
||||
if (!ip.isUndef(elem)) any_defined = true;
|
||||
assert(ip.typeOf(elem) == field_ty);
|
||||
}
|
||||
assert(any_defined); // aggregate fields must not be all undefined
|
||||
},
|
||||
.tuple_type => |tuple_type| {
|
||||
var any_defined = false;
|
||||
for (aggregate.storage.values(), tuple_type.types.get(ip)) |elem, ty| {
|
||||
if (!ip.isUndef(elem)) any_defined = true;
|
||||
assert(ip.typeOf(elem) == ty);
|
||||
}
|
||||
assert(any_defined); // aggregate fields must not be all undefined
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
};
|
||||
|
||||
if (len == 0) {
|
||||
items.appendAssumeCapacity(.{
|
||||
|
|
|
|||
1152
src/Sema.zig
1152
src/Sema.zig
File diff suppressed because it is too large
Load diff
|
|
@ -120,10 +120,7 @@ fn lowerExprAnonResTy(self: *LowerZon, node: Zoir.Node.Index) CompileError!Inter
|
|||
.values = values,
|
||||
},
|
||||
);
|
||||
return pt.intern(.{ .aggregate = .{
|
||||
.ty = ty,
|
||||
.storage = .{ .elems = values },
|
||||
} });
|
||||
return (try pt.aggregateValue(.fromInterned(ty), values)).toIntern();
|
||||
},
|
||||
.struct_literal => |init| {
|
||||
const elems = try self.sema.arena.alloc(InternPool.Index, init.names.len);
|
||||
|
|
@ -205,10 +202,7 @@ fn lowerExprAnonResTy(self: *LowerZon, node: Zoir.Node.Index) CompileError!Inter
|
|||
try self.sema.declareDependency(.{ .interned = struct_ty });
|
||||
try self.sema.addTypeReferenceEntry(self.nodeSrc(node), struct_ty);
|
||||
|
||||
return try pt.intern(.{ .aggregate = .{
|
||||
.ty = struct_ty,
|
||||
.storage = .{ .elems = elems },
|
||||
} });
|
||||
return (try pt.aggregateValue(.fromInterned(struct_ty), elems)).toIntern();
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
@ -638,10 +632,7 @@ fn lowerArray(self: *LowerZon, node: Zoir.Node.Index, res_ty: Type) !InternPool.
|
|||
elems[elems.len - 1] = sentinel.toIntern();
|
||||
}
|
||||
|
||||
return self.sema.pt.intern(.{ .aggregate = .{
|
||||
.ty = res_ty.toIntern(),
|
||||
.storage = .{ .elems = elems },
|
||||
} });
|
||||
return (try self.sema.pt.aggregateValue(res_ty, elems)).toIntern();
|
||||
}
|
||||
|
||||
fn lowerEnum(self: *LowerZon, node: Zoir.Node.Index, res_ty: Type) !InternPool.Index {
|
||||
|
|
@ -752,10 +743,7 @@ fn lowerTuple(self: *LowerZon, node: Zoir.Node.Index, res_ty: Type) !InternPool.
|
|||
}
|
||||
}
|
||||
|
||||
return self.sema.pt.intern(.{ .aggregate = .{
|
||||
.ty = res_ty.toIntern(),
|
||||
.storage = .{ .elems = elems },
|
||||
} });
|
||||
return (try self.sema.pt.aggregateValue(res_ty, elems)).toIntern();
|
||||
}
|
||||
|
||||
fn lowerStruct(self: *LowerZon, node: Zoir.Node.Index, res_ty: Type) !InternPool.Index {
|
||||
|
|
@ -815,12 +803,7 @@ fn lowerStruct(self: *LowerZon, node: Zoir.Node.Index, res_ty: Type) !InternPool
|
|||
if (value.* == .none) return self.fail(node, "missing field '{f}'", .{name.fmt(ip)});
|
||||
}
|
||||
|
||||
return self.sema.pt.intern(.{ .aggregate = .{
|
||||
.ty = res_ty.toIntern(),
|
||||
.storage = .{
|
||||
.elems = field_values,
|
||||
},
|
||||
} });
|
||||
return (try self.sema.pt.aggregateValue(res_ty, field_values)).toIntern();
|
||||
}
|
||||
|
||||
fn lowerSlice(self: *LowerZon, node: Zoir.Node.Index, res_ty: Type) !InternPool.Index {
|
||||
|
|
@ -867,16 +850,13 @@ fn lowerSlice(self: *LowerZon, node: Zoir.Node.Index, res_ty: Type) !InternPool.
|
|||
elems[elems.len - 1] = ptr_info.sentinel;
|
||||
}
|
||||
|
||||
const array_ty = try self.sema.pt.intern(.{ .array_type = .{
|
||||
const array_ty = try self.sema.pt.arrayType(.{
|
||||
.len = elems.len,
|
||||
.sentinel = ptr_info.sentinel,
|
||||
.child = ptr_info.child,
|
||||
} });
|
||||
});
|
||||
|
||||
const array = try self.sema.pt.intern(.{ .aggregate = .{
|
||||
.ty = array_ty,
|
||||
.storage = .{ .elems = elems },
|
||||
} });
|
||||
const array_val = try self.sema.pt.aggregateValue(array_ty, elems);
|
||||
|
||||
const many_item_ptr_type = try self.sema.pt.intern(.{ .ptr_type = .{
|
||||
.child = ptr_info.child,
|
||||
|
|
@ -894,8 +874,8 @@ fn lowerSlice(self: *LowerZon, node: Zoir.Node.Index, res_ty: Type) !InternPool.
|
|||
.ty = many_item_ptr_type,
|
||||
.base_addr = .{
|
||||
.uav = .{
|
||||
.orig_ty = (try self.sema.pt.singleConstPtrType(.fromInterned(array_ty))).toIntern(),
|
||||
.val = array,
|
||||
.orig_ty = (try self.sema.pt.singleConstPtrType(array_ty)).toIntern(),
|
||||
.val = array_val.toIntern(),
|
||||
},
|
||||
},
|
||||
.byte_offset = 0,
|
||||
|
|
@ -994,8 +974,5 @@ fn lowerVector(self: *LowerZon, node: Zoir.Node.Index, res_ty: Type) !InternPool
|
|||
elem.* = try self.lowerExprKnownResTy(elem_nodes.at(@intCast(i)), .fromInterned(vector_info.child));
|
||||
}
|
||||
|
||||
return self.sema.pt.intern(.{ .aggregate = .{
|
||||
.ty = res_ty.toIntern(),
|
||||
.storage = .{ .elems = elems },
|
||||
} });
|
||||
return (try self.sema.pt.aggregateValue(res_ty, elems)).toIntern();
|
||||
}
|
||||
|
|
|
|||
1002
src/Sema/arith.zig
1002
src/Sema/arith.zig
File diff suppressed because it is too large
Load diff
|
|
@ -491,10 +491,7 @@ const PackValueBits = struct {
|
|||
}
|
||||
},
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .elems = elems },
|
||||
} }));
|
||||
return pt.aggregateValue(ty, elems);
|
||||
},
|
||||
.array => {
|
||||
// Each element is padded up to its ABI size. The final element does not have trailing padding.
|
||||
|
|
@ -525,10 +522,7 @@ const PackValueBits = struct {
|
|||
try pack.padding(elem_ty.bitSize(zcu));
|
||||
}
|
||||
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .elems = elems },
|
||||
} }));
|
||||
return pt.aggregateValue(ty, elems);
|
||||
},
|
||||
.@"struct" => switch (ty.containerLayout(zcu)) {
|
||||
.auto => unreachable, // ill-defined layout
|
||||
|
|
@ -568,10 +562,7 @@ const PackValueBits = struct {
|
|||
const val = (try ty.structFieldValueComptime(pt, field_idx)).?;
|
||||
elem.* = val.toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .elems = elems },
|
||||
} }));
|
||||
return pt.aggregateValue(ty, elems);
|
||||
},
|
||||
.@"packed" => {
|
||||
// All fields are in order with no padding.
|
||||
|
|
@ -581,10 +572,7 @@ const PackValueBits = struct {
|
|||
const field_ty = ty.fieldType(i, zcu);
|
||||
elem.* = (try pack.get(field_ty)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .elems = elems },
|
||||
} }));
|
||||
return pt.aggregateValue(ty, elems);
|
||||
},
|
||||
},
|
||||
.@"union" => {
|
||||
|
|
|
|||
|
|
@ -980,13 +980,14 @@ fn unflattenArray(
|
|||
elems: []const InternPool.Index,
|
||||
next_idx: *u64,
|
||||
) Allocator.Error!Value {
|
||||
const zcu = sema.pt.zcu;
|
||||
const pt = sema.pt;
|
||||
const zcu = pt.zcu;
|
||||
const arena = sema.arena;
|
||||
|
||||
if (ty.zigTypeTag(zcu) != .array) {
|
||||
const val = Value.fromInterned(elems[@intCast(next_idx.*)]);
|
||||
next_idx.* += 1;
|
||||
return sema.pt.getCoerced(val, ty);
|
||||
return pt.getCoerced(val, ty);
|
||||
}
|
||||
|
||||
const elem_ty = ty.childType(zcu);
|
||||
|
|
@ -998,10 +999,7 @@ fn unflattenArray(
|
|||
// TODO: validate sentinel
|
||||
_ = try unflattenArray(sema, elem_ty, elems, next_idx);
|
||||
}
|
||||
return Value.fromInterned(try sema.pt.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .elems = buf },
|
||||
} }));
|
||||
return pt.aggregateValue(ty, buf);
|
||||
}
|
||||
|
||||
/// Given a `MutableValue` representing a potentially-nested array, treats `index` as an index into
|
||||
|
|
|
|||
32
src/Type.zig
32
src/Type.zig
|
|
@ -2490,15 +2490,11 @@ pub fn onePossibleValue(starting_type: Type, pt: Zcu.PerThread) !?Value {
|
|||
|
||||
inline .array_type, .vector_type => |seq_type, seq_tag| {
|
||||
const has_sentinel = seq_tag == .array_type and seq_type.sentinel != .none;
|
||||
if (seq_type.len + @intFromBool(has_sentinel) == 0) return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .elems = &.{} },
|
||||
} }));
|
||||
if (seq_type.len + @intFromBool(has_sentinel) == 0) {
|
||||
return try pt.aggregateValue(ty, &.{});
|
||||
}
|
||||
if (try Type.fromInterned(seq_type.child).onePossibleValue(pt)) |opv| {
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .repeated_elem = opv.toIntern() },
|
||||
} }));
|
||||
return try pt.aggregateSplatValue(ty, opv);
|
||||
}
|
||||
return null;
|
||||
},
|
||||
|
|
@ -2567,10 +2563,7 @@ pub fn onePossibleValue(starting_type: Type, pt: Zcu.PerThread) !?Value {
|
|||
|
||||
// In this case the struct has no runtime-known fields and
|
||||
// therefore has one possible value.
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .elems = field_vals },
|
||||
} }));
|
||||
return try pt.aggregateValue(ty, field_vals);
|
||||
},
|
||||
|
||||
.tuple_type => |tuple| {
|
||||
|
|
@ -2582,10 +2575,7 @@ pub fn onePossibleValue(starting_type: Type, pt: Zcu.PerThread) !?Value {
|
|||
// TODO: write something like getCoercedInts to avoid needing to dupe
|
||||
const duped_values = try zcu.gpa.dupe(InternPool.Index, tuple.values.get(ip));
|
||||
defer zcu.gpa.free(duped_values);
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .elems = duped_values },
|
||||
} }));
|
||||
return try pt.aggregateValue(ty, duped_values);
|
||||
},
|
||||
|
||||
.union_type => {
|
||||
|
|
@ -2957,10 +2947,7 @@ pub fn getParentNamespace(ty: Type, zcu: *Zcu) InternPool.OptionalNamespaceIndex
|
|||
pub fn minInt(ty: Type, pt: Zcu.PerThread, dest_ty: Type) !Value {
|
||||
const zcu = pt.zcu;
|
||||
const scalar = try minIntScalar(ty.scalarType(zcu), pt, dest_ty.scalarType(zcu));
|
||||
return if (ty.zigTypeTag(zcu) == .vector) Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = dest_ty.toIntern(),
|
||||
.storage = .{ .repeated_elem = scalar.toIntern() },
|
||||
} })) else scalar;
|
||||
return if (ty.zigTypeTag(zcu) == .vector) pt.aggregateSplatValue(dest_ty, scalar) else scalar;
|
||||
}
|
||||
|
||||
/// Asserts that the type is an integer.
|
||||
|
|
@ -2987,10 +2974,7 @@ pub fn minIntScalar(ty: Type, pt: Zcu.PerThread, dest_ty: Type) !Value {
|
|||
pub fn maxInt(ty: Type, pt: Zcu.PerThread, dest_ty: Type) !Value {
|
||||
const zcu = pt.zcu;
|
||||
const scalar = try maxIntScalar(ty.scalarType(zcu), pt, dest_ty.scalarType(zcu));
|
||||
return if (ty.zigTypeTag(zcu) == .vector) Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = dest_ty.toIntern(),
|
||||
.storage = .{ .repeated_elem = scalar.toIntern() },
|
||||
} })) else scalar;
|
||||
return if (ty.zigTypeTag(zcu) == .vector) pt.aggregateSplatValue(dest_ty, scalar) else scalar;
|
||||
}
|
||||
|
||||
/// The returned Value will have type dest_ty.
|
||||
|
|
|
|||
823
src/Value.zig
823
src/Value.zig
|
|
@ -653,10 +653,7 @@ pub fn readFromMemory(
|
|||
elem.* = (try readFromMemory(elem_ty, zcu, buffer[offset..], arena)).toIntern();
|
||||
offset += @intCast(elem_size);
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .elems = elems },
|
||||
} }));
|
||||
return pt.aggregateValue(ty, elems);
|
||||
},
|
||||
.vector => {
|
||||
// We use byte_count instead of abi_size here, so that any padding bytes
|
||||
|
|
@ -677,10 +674,7 @@ pub fn readFromMemory(
|
|||
const sz: usize = @intCast(field_ty.abiSize(zcu));
|
||||
field_val.* = (try readFromMemory(field_ty, zcu, buffer[off..(off + sz)], arena)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .elems = field_vals },
|
||||
} }));
|
||||
return pt.aggregateValue(ty, field_vals);
|
||||
},
|
||||
.@"packed" => {
|
||||
const byte_count = (@as(usize, @intCast(ty.bitSize(zcu))) + 7) / 8;
|
||||
|
|
@ -826,10 +820,7 @@ pub fn readFromPackedMemory(
|
|||
elems[tgt_elem_i] = (try readFromPackedMemory(elem_ty, pt, buffer, bit_offset + bits, arena)).toIntern();
|
||||
bits += elem_bit_size;
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .elems = elems },
|
||||
} }));
|
||||
return pt.aggregateValue(ty, elems);
|
||||
},
|
||||
.@"struct" => {
|
||||
// Sema is supposed to have emitted a compile error already for Auto layout structs,
|
||||
|
|
@ -843,10 +834,7 @@ pub fn readFromPackedMemory(
|
|||
field_val.* = (try readFromPackedMemory(field_ty, pt, buffer, bit_offset + bits, arena)).toIntern();
|
||||
bits += field_bits;
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .elems = field_vals },
|
||||
} }));
|
||||
return pt.aggregateValue(ty, field_vals);
|
||||
},
|
||||
.@"union" => switch (ty.containerLayout(zcu)) {
|
||||
.auto, .@"extern" => unreachable, // Handled by non-packed readFromMemory
|
||||
|
|
@ -925,43 +913,6 @@ pub fn popCount(val: Value, ty: Type, zcu: *Zcu) u64 {
|
|||
return @intCast(bigint.popCount(ty.intInfo(zcu).bits));
|
||||
}
|
||||
|
||||
pub fn bitReverse(val: Value, ty: Type, pt: Zcu.PerThread, arena: Allocator) !Value {
|
||||
const zcu = pt.zcu;
|
||||
const info = ty.intInfo(zcu);
|
||||
|
||||
var buffer: Value.BigIntSpace = undefined;
|
||||
const operand_bigint = val.toBigInt(&buffer, zcu);
|
||||
|
||||
const limbs = try arena.alloc(
|
||||
std.math.big.Limb,
|
||||
std.math.big.int.calcTwosCompLimbCount(info.bits),
|
||||
);
|
||||
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
|
||||
result_bigint.bitReverse(operand_bigint, info.signedness, info.bits);
|
||||
|
||||
return pt.intValue_big(ty, result_bigint.toConst());
|
||||
}
|
||||
|
||||
pub fn byteSwap(val: Value, ty: Type, pt: Zcu.PerThread, arena: Allocator) !Value {
|
||||
const zcu = pt.zcu;
|
||||
const info = ty.intInfo(zcu);
|
||||
|
||||
// Bit count must be evenly divisible by 8
|
||||
assert(info.bits % 8 == 0);
|
||||
|
||||
var buffer: Value.BigIntSpace = undefined;
|
||||
const operand_bigint = val.toBigInt(&buffer, zcu);
|
||||
|
||||
const limbs = try arena.alloc(
|
||||
std.math.big.Limb,
|
||||
std.math.big.int.calcTwosCompLimbCount(info.bits),
|
||||
);
|
||||
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
|
||||
result_bigint.byteSwap(operand_bigint, info.signedness, info.bits / 8);
|
||||
|
||||
return pt.intValue_big(ty, result_bigint.toConst());
|
||||
}
|
||||
|
||||
/// Asserts the value is an integer and not undefined.
|
||||
/// Returns the number of bits the value requires to represent stored in twos complement form.
|
||||
pub fn intBitCountTwosComp(self: Value, zcu: *Zcu) usize {
|
||||
|
|
@ -1386,15 +1337,10 @@ pub fn isUndef(val: Value, zcu: *const Zcu) bool {
|
|||
return zcu.intern_pool.isUndef(val.toIntern());
|
||||
}
|
||||
|
||||
/// TODO: check for cases such as array that is not marked undef but all the element
|
||||
/// values are marked undef, or struct that is not marked undef but all fields are marked
|
||||
/// undef, etc.
|
||||
pub fn isUndefDeep(val: Value, zcu: *const Zcu) bool {
|
||||
return val.isUndef(zcu);
|
||||
}
|
||||
|
||||
/// `val` must have a numeric or vector type.
|
||||
/// Returns whether `val` is undefined or contains any undefined elements.
|
||||
/// Returns the index of the first undefined element it encounters
|
||||
/// or `null` if no element is undefined.
|
||||
pub fn anyScalarIsUndef(val: Value, zcu: *const Zcu) bool {
|
||||
switch (zcu.intern_pool.indexToKey(val.toIntern())) {
|
||||
.undef => return true,
|
||||
|
|
@ -1530,10 +1476,7 @@ pub fn floatFromIntAdvanced(
|
|||
const elem_val = try val.elemValue(pt, i);
|
||||
scalar.* = (try floatFromIntScalar(elem_val, scalar_ty, pt, strat)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = float_ty.toIntern(),
|
||||
.storage = .{ .elems = result_data },
|
||||
} }));
|
||||
return pt.aggregateValue(float_ty, result_data);
|
||||
}
|
||||
return floatFromIntScalar(val, float_ty, pt, strat);
|
||||
}
|
||||
|
|
@ -1605,273 +1548,6 @@ pub fn numberMin(lhs: Value, rhs: Value, zcu: *Zcu) Value {
|
|||
};
|
||||
}
|
||||
|
||||
/// operands must be (vectors of) integers or bools; handles undefined scalars.
|
||||
pub fn bitwiseNot(val: Value, ty: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
|
||||
const zcu = pt.zcu;
|
||||
if (ty.zigTypeTag(zcu) == .vector) {
|
||||
const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(zcu));
|
||||
const scalar_ty = ty.scalarType(zcu);
|
||||
for (result_data, 0..) |*scalar, i| {
|
||||
const elem_val = try val.elemValue(pt, i);
|
||||
scalar.* = (try bitwiseNotScalar(elem_val, scalar_ty, arena, pt)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .elems = result_data },
|
||||
} }));
|
||||
}
|
||||
return bitwiseNotScalar(val, ty, arena, pt);
|
||||
}
|
||||
|
||||
/// operands must be integers or bools; handles undefined.
|
||||
pub fn bitwiseNotScalar(val: Value, ty: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
|
||||
const zcu = pt.zcu;
|
||||
if (val.isUndef(zcu)) return Value.fromInterned(try pt.intern(.{ .undef = ty.toIntern() }));
|
||||
if (ty.toIntern() == .bool_type) return makeBool(!val.toBool());
|
||||
|
||||
const info = ty.intInfo(zcu);
|
||||
|
||||
if (info.bits == 0) {
|
||||
return val;
|
||||
}
|
||||
|
||||
// TODO is this a performance issue? maybe we should try the operation without
|
||||
// resorting to BigInt first.
|
||||
var val_space: Value.BigIntSpace = undefined;
|
||||
const val_bigint = val.toBigInt(&val_space, zcu);
|
||||
const limbs = try arena.alloc(
|
||||
std.math.big.Limb,
|
||||
std.math.big.int.calcTwosCompLimbCount(info.bits),
|
||||
);
|
||||
|
||||
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
|
||||
result_bigint.bitNotWrap(val_bigint, info.signedness, info.bits);
|
||||
return pt.intValue_big(ty, result_bigint.toConst());
|
||||
}
|
||||
|
||||
/// operands must be (vectors of) integers or bools; handles undefined scalars.
|
||||
pub fn bitwiseAnd(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value {
|
||||
const zcu = pt.zcu;
|
||||
if (ty.zigTypeTag(zcu) == .vector) {
|
||||
const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(zcu));
|
||||
const scalar_ty = ty.scalarType(zcu);
|
||||
for (result_data, 0..) |*scalar, i| {
|
||||
const lhs_elem = try lhs.elemValue(pt, i);
|
||||
const rhs_elem = try rhs.elemValue(pt, i);
|
||||
scalar.* = (try bitwiseAndScalar(lhs_elem, rhs_elem, scalar_ty, allocator, pt)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .elems = result_data },
|
||||
} }));
|
||||
}
|
||||
return bitwiseAndScalar(lhs, rhs, ty, allocator, pt);
|
||||
}
|
||||
|
||||
/// operands must be integers or bools; handles undefined.
|
||||
pub fn bitwiseAndScalar(orig_lhs: Value, orig_rhs: Value, ty: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
|
||||
const zcu = pt.zcu;
|
||||
// If one operand is defined, we turn the other into `0xAA` so the bitwise AND can
|
||||
// still zero out some bits.
|
||||
// TODO: ideally we'd still like tracking for the undef bits. Related: #19634.
|
||||
const lhs: Value, const rhs: Value = make_defined: {
|
||||
const lhs_undef = orig_lhs.isUndef(zcu);
|
||||
const rhs_undef = orig_rhs.isUndef(zcu);
|
||||
break :make_defined switch ((@as(u2, @intFromBool(lhs_undef)) << 1) | @intFromBool(rhs_undef)) {
|
||||
0b00 => .{ orig_lhs, orig_rhs },
|
||||
0b01 => .{ orig_lhs, try intValueAa(ty, arena, pt) },
|
||||
0b10 => .{ try intValueAa(ty, arena, pt), orig_rhs },
|
||||
0b11 => return pt.undefValue(ty),
|
||||
};
|
||||
};
|
||||
|
||||
if (ty.toIntern() == .bool_type) return makeBool(lhs.toBool() and rhs.toBool());
|
||||
|
||||
// TODO is this a performance issue? maybe we should try the operation without
|
||||
// resorting to BigInt first.
|
||||
var lhs_space: Value.BigIntSpace = undefined;
|
||||
var rhs_space: Value.BigIntSpace = undefined;
|
||||
const lhs_bigint = lhs.toBigInt(&lhs_space, zcu);
|
||||
const rhs_bigint = rhs.toBigInt(&rhs_space, zcu);
|
||||
const limbs = try arena.alloc(
|
||||
std.math.big.Limb,
|
||||
// + 1 for negatives
|
||||
@max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1,
|
||||
);
|
||||
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
|
||||
result_bigint.bitAnd(lhs_bigint, rhs_bigint);
|
||||
return pt.intValue_big(ty, result_bigint.toConst());
|
||||
}
|
||||
|
||||
/// Given an integer or boolean type, creates an value of that with the bit pattern 0xAA.
|
||||
/// This is used to convert undef values into 0xAA when performing e.g. bitwise operations.
|
||||
fn intValueAa(ty: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
|
||||
const zcu = pt.zcu;
|
||||
if (ty.toIntern() == .bool_type) return Value.true;
|
||||
const info = ty.intInfo(zcu);
|
||||
|
||||
const buf = try arena.alloc(u8, (info.bits + 7) / 8);
|
||||
@memset(buf, 0xAA);
|
||||
|
||||
const limbs = try arena.alloc(
|
||||
std.math.big.Limb,
|
||||
std.math.big.int.calcTwosCompLimbCount(info.bits),
|
||||
);
|
||||
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
|
||||
result_bigint.readTwosComplement(buf, info.bits, zcu.getTarget().cpu.arch.endian(), info.signedness);
|
||||
return pt.intValue_big(ty, result_bigint.toConst());
|
||||
}
|
||||
|
||||
/// operands must be (vectors of) integers or bools; handles undefined scalars.
|
||||
pub fn bitwiseNand(lhs: Value, rhs: Value, ty: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
|
||||
const zcu = pt.zcu;
|
||||
if (ty.zigTypeTag(zcu) == .vector) {
|
||||
const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(zcu));
|
||||
const scalar_ty = ty.scalarType(zcu);
|
||||
for (result_data, 0..) |*scalar, i| {
|
||||
const lhs_elem = try lhs.elemValue(pt, i);
|
||||
const rhs_elem = try rhs.elemValue(pt, i);
|
||||
scalar.* = (try bitwiseNandScalar(lhs_elem, rhs_elem, scalar_ty, arena, pt)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .elems = result_data },
|
||||
} }));
|
||||
}
|
||||
return bitwiseNandScalar(lhs, rhs, ty, arena, pt);
|
||||
}
|
||||
|
||||
/// operands must be integers or bools; handles undefined.
|
||||
pub fn bitwiseNandScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
|
||||
const zcu = pt.zcu;
|
||||
if (lhs.isUndef(zcu) or rhs.isUndef(zcu)) return Value.fromInterned(try pt.intern(.{ .undef = ty.toIntern() }));
|
||||
if (ty.toIntern() == .bool_type) return makeBool(!(lhs.toBool() and rhs.toBool()));
|
||||
|
||||
const anded = try bitwiseAnd(lhs, rhs, ty, arena, pt);
|
||||
const all_ones = if (ty.isSignedInt(zcu)) try pt.intValue(ty, -1) else try ty.maxIntScalar(pt, ty);
|
||||
return bitwiseXor(anded, all_ones, ty, arena, pt);
|
||||
}
|
||||
|
||||
/// operands must be (vectors of) integers or bools; handles undefined scalars.
|
||||
pub fn bitwiseOr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value {
|
||||
const zcu = pt.zcu;
|
||||
if (ty.zigTypeTag(zcu) == .vector) {
|
||||
const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(zcu));
|
||||
const scalar_ty = ty.scalarType(zcu);
|
||||
for (result_data, 0..) |*scalar, i| {
|
||||
const lhs_elem = try lhs.elemValue(pt, i);
|
||||
const rhs_elem = try rhs.elemValue(pt, i);
|
||||
scalar.* = (try bitwiseOrScalar(lhs_elem, rhs_elem, scalar_ty, allocator, pt)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .elems = result_data },
|
||||
} }));
|
||||
}
|
||||
return bitwiseOrScalar(lhs, rhs, ty, allocator, pt);
|
||||
}
|
||||
|
||||
/// operands must be integers or bools; handles undefined.
|
||||
pub fn bitwiseOrScalar(orig_lhs: Value, orig_rhs: Value, ty: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
|
||||
// If one operand is defined, we turn the other into `0xAA` so the bitwise AND can
|
||||
// still zero out some bits.
|
||||
// TODO: ideally we'd still like tracking for the undef bits. Related: #19634.
|
||||
const zcu = pt.zcu;
|
||||
const lhs: Value, const rhs: Value = make_defined: {
|
||||
const lhs_undef = orig_lhs.isUndef(zcu);
|
||||
const rhs_undef = orig_rhs.isUndef(zcu);
|
||||
break :make_defined switch ((@as(u2, @intFromBool(lhs_undef)) << 1) | @intFromBool(rhs_undef)) {
|
||||
0b00 => .{ orig_lhs, orig_rhs },
|
||||
0b01 => .{ orig_lhs, try intValueAa(ty, arena, pt) },
|
||||
0b10 => .{ try intValueAa(ty, arena, pt), orig_rhs },
|
||||
0b11 => return pt.undefValue(ty),
|
||||
};
|
||||
};
|
||||
|
||||
if (ty.toIntern() == .bool_type) return makeBool(lhs.toBool() or rhs.toBool());
|
||||
|
||||
// TODO is this a performance issue? maybe we should try the operation without
|
||||
// resorting to BigInt first.
|
||||
var lhs_space: Value.BigIntSpace = undefined;
|
||||
var rhs_space: Value.BigIntSpace = undefined;
|
||||
const lhs_bigint = lhs.toBigInt(&lhs_space, zcu);
|
||||
const rhs_bigint = rhs.toBigInt(&rhs_space, zcu);
|
||||
const limbs = try arena.alloc(
|
||||
std.math.big.Limb,
|
||||
@max(lhs_bigint.limbs.len, rhs_bigint.limbs.len),
|
||||
);
|
||||
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
|
||||
result_bigint.bitOr(lhs_bigint, rhs_bigint);
|
||||
return pt.intValue_big(ty, result_bigint.toConst());
|
||||
}
|
||||
|
||||
/// operands must be (vectors of) integers or bools; handles undefined scalars.
|
||||
pub fn bitwiseXor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value {
|
||||
const zcu = pt.zcu;
|
||||
if (ty.zigTypeTag(zcu) == .vector) {
|
||||
const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(zcu));
|
||||
const scalar_ty = ty.scalarType(zcu);
|
||||
for (result_data, 0..) |*scalar, i| {
|
||||
const lhs_elem = try lhs.elemValue(pt, i);
|
||||
const rhs_elem = try rhs.elemValue(pt, i);
|
||||
scalar.* = (try bitwiseXorScalar(lhs_elem, rhs_elem, scalar_ty, allocator, pt)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .elems = result_data },
|
||||
} }));
|
||||
}
|
||||
return bitwiseXorScalar(lhs, rhs, ty, allocator, pt);
|
||||
}
|
||||
|
||||
/// operands must be integers or bools; handles undefined.
|
||||
pub fn bitwiseXorScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
|
||||
const zcu = pt.zcu;
|
||||
if (lhs.isUndef(zcu) or rhs.isUndef(zcu)) return Value.fromInterned(try pt.intern(.{ .undef = ty.toIntern() }));
|
||||
if (ty.toIntern() == .bool_type) return makeBool(lhs.toBool() != rhs.toBool());
|
||||
|
||||
// TODO is this a performance issue? maybe we should try the operation without
|
||||
// resorting to BigInt first.
|
||||
var lhs_space: Value.BigIntSpace = undefined;
|
||||
var rhs_space: Value.BigIntSpace = undefined;
|
||||
const lhs_bigint = lhs.toBigInt(&lhs_space, zcu);
|
||||
const rhs_bigint = rhs.toBigInt(&rhs_space, zcu);
|
||||
const limbs = try arena.alloc(
|
||||
std.math.big.Limb,
|
||||
// + 1 for negatives
|
||||
@max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1,
|
||||
);
|
||||
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
|
||||
result_bigint.bitXor(lhs_bigint, rhs_bigint);
|
||||
return pt.intValue_big(ty, result_bigint.toConst());
|
||||
}
|
||||
|
||||
pub fn intModScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value {
|
||||
// TODO is this a performance issue? maybe we should try the operation without
|
||||
// resorting to BigInt first.
|
||||
const zcu = pt.zcu;
|
||||
var lhs_space: Value.BigIntSpace = undefined;
|
||||
var rhs_space: Value.BigIntSpace = undefined;
|
||||
const lhs_bigint = lhs.toBigInt(&lhs_space, zcu);
|
||||
const rhs_bigint = rhs.toBigInt(&rhs_space, zcu);
|
||||
const limbs_q = try allocator.alloc(
|
||||
std.math.big.Limb,
|
||||
lhs_bigint.limbs.len,
|
||||
);
|
||||
const limbs_r = try allocator.alloc(
|
||||
std.math.big.Limb,
|
||||
rhs_bigint.limbs.len,
|
||||
);
|
||||
const limbs_buffer = try allocator.alloc(
|
||||
std.math.big.Limb,
|
||||
std.math.big.int.calcDivLimbsBufferLen(lhs_bigint.limbs.len, rhs_bigint.limbs.len),
|
||||
);
|
||||
var result_q = BigIntMutable{ .limbs = limbs_q, .positive = undefined, .len = undefined };
|
||||
var result_r = BigIntMutable{ .limbs = limbs_r, .positive = undefined, .len = undefined };
|
||||
result_q.divFloor(&result_r, lhs_bigint, rhs_bigint, limbs_buffer);
|
||||
return pt.intValue_big(ty, result_r.toConst());
|
||||
}
|
||||
|
||||
/// Returns true if the value is a floating point type and is NaN. Returns false otherwise.
|
||||
pub fn isNan(val: Value, zcu: *const Zcu) bool {
|
||||
return switch (zcu.intern_pool.indexToKey(val.toIntern())) {
|
||||
|
|
@ -1892,6 +1568,7 @@ pub fn isInf(val: Value, zcu: *const Zcu) bool {
|
|||
};
|
||||
}
|
||||
|
||||
/// Returns true if the value is a floating point type and is negative infinite. Returns false otherwise.
|
||||
pub fn isNegativeInf(val: Value, zcu: *const Zcu) bool {
|
||||
return switch (zcu.intern_pool.indexToKey(val.toIntern())) {
|
||||
.float => |float| switch (float.storage) {
|
||||
|
|
@ -1901,387 +1578,6 @@ pub fn isNegativeInf(val: Value, zcu: *const Zcu) bool {
|
|||
};
|
||||
}
|
||||
|
||||
pub fn floatRem(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
|
||||
if (float_type.zigTypeTag(pt.zcu) == .vector) {
|
||||
const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(pt.zcu));
|
||||
const scalar_ty = float_type.scalarType(pt.zcu);
|
||||
for (result_data, 0..) |*scalar, i| {
|
||||
const lhs_elem = try lhs.elemValue(pt, i);
|
||||
const rhs_elem = try rhs.elemValue(pt, i);
|
||||
scalar.* = (try floatRemScalar(lhs_elem, rhs_elem, scalar_ty, pt)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = float_type.toIntern(),
|
||||
.storage = .{ .elems = result_data },
|
||||
} }));
|
||||
}
|
||||
return floatRemScalar(lhs, rhs, float_type, pt);
|
||||
}
|
||||
|
||||
pub fn floatRemScalar(lhs: Value, rhs: Value, float_type: Type, pt: Zcu.PerThread) !Value {
|
||||
const zcu = pt.zcu;
|
||||
const target = pt.zcu.getTarget();
|
||||
const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
|
||||
16 => .{ .f16 = @rem(lhs.toFloat(f16, zcu), rhs.toFloat(f16, zcu)) },
|
||||
32 => .{ .f32 = @rem(lhs.toFloat(f32, zcu), rhs.toFloat(f32, zcu)) },
|
||||
64 => .{ .f64 = @rem(lhs.toFloat(f64, zcu), rhs.toFloat(f64, zcu)) },
|
||||
80 => .{ .f80 = @rem(lhs.toFloat(f80, zcu), rhs.toFloat(f80, zcu)) },
|
||||
128 => .{ .f128 = @rem(lhs.toFloat(f128, zcu), rhs.toFloat(f128, zcu)) },
|
||||
else => unreachable,
|
||||
};
|
||||
return Value.fromInterned(try pt.intern(.{ .float = .{
|
||||
.ty = float_type.toIntern(),
|
||||
.storage = storage,
|
||||
} }));
|
||||
}
|
||||
|
||||
pub fn floatMod(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
|
||||
if (float_type.zigTypeTag(pt.zcu) == .vector) {
|
||||
const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(pt.zcu));
|
||||
const scalar_ty = float_type.scalarType(pt.zcu);
|
||||
for (result_data, 0..) |*scalar, i| {
|
||||
const lhs_elem = try lhs.elemValue(pt, i);
|
||||
const rhs_elem = try rhs.elemValue(pt, i);
|
||||
scalar.* = (try floatModScalar(lhs_elem, rhs_elem, scalar_ty, pt)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = float_type.toIntern(),
|
||||
.storage = .{ .elems = result_data },
|
||||
} }));
|
||||
}
|
||||
return floatModScalar(lhs, rhs, float_type, pt);
|
||||
}
|
||||
|
||||
pub fn floatModScalar(lhs: Value, rhs: Value, float_type: Type, pt: Zcu.PerThread) !Value {
|
||||
const zcu = pt.zcu;
|
||||
const target = zcu.getTarget();
|
||||
const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
|
||||
16 => .{ .f16 = @mod(lhs.toFloat(f16, zcu), rhs.toFloat(f16, zcu)) },
|
||||
32 => .{ .f32 = @mod(lhs.toFloat(f32, zcu), rhs.toFloat(f32, zcu)) },
|
||||
64 => .{ .f64 = @mod(lhs.toFloat(f64, zcu), rhs.toFloat(f64, zcu)) },
|
||||
80 => .{ .f80 = @mod(lhs.toFloat(f80, zcu), rhs.toFloat(f80, zcu)) },
|
||||
128 => .{ .f128 = @mod(lhs.toFloat(f128, zcu), rhs.toFloat(f128, zcu)) },
|
||||
else => unreachable,
|
||||
};
|
||||
return Value.fromInterned(try pt.intern(.{ .float = .{
|
||||
.ty = float_type.toIntern(),
|
||||
.storage = storage,
|
||||
} }));
|
||||
}
|
||||
|
||||
pub fn intTrunc(val: Value, ty: Type, allocator: Allocator, signedness: std.builtin.Signedness, bits: u16, pt: Zcu.PerThread) !Value {
|
||||
const zcu = pt.zcu;
|
||||
if (ty.zigTypeTag(zcu) == .vector) {
|
||||
const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(zcu));
|
||||
const scalar_ty = ty.scalarType(zcu);
|
||||
for (result_data, 0..) |*scalar, i| {
|
||||
const elem_val = try val.elemValue(pt, i);
|
||||
scalar.* = (try intTruncScalar(elem_val, scalar_ty, allocator, signedness, bits, pt)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .elems = result_data },
|
||||
} }));
|
||||
}
|
||||
return intTruncScalar(val, ty, allocator, signedness, bits, pt);
|
||||
}
|
||||
|
||||
/// This variant may vectorize on `bits`. Asserts that `bits` is a (vector of) `u16`.
|
||||
pub fn intTruncBitsAsValue(
|
||||
val: Value,
|
||||
ty: Type,
|
||||
allocator: Allocator,
|
||||
signedness: std.builtin.Signedness,
|
||||
bits: Value,
|
||||
pt: Zcu.PerThread,
|
||||
) !Value {
|
||||
const zcu = pt.zcu;
|
||||
if (ty.zigTypeTag(zcu) == .vector) {
|
||||
const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(zcu));
|
||||
const scalar_ty = ty.scalarType(zcu);
|
||||
for (result_data, 0..) |*scalar, i| {
|
||||
const elem_val = try val.elemValue(pt, i);
|
||||
const bits_elem = try bits.elemValue(pt, i);
|
||||
scalar.* = (try intTruncScalar(elem_val, scalar_ty, allocator, signedness, @intCast(bits_elem.toUnsignedInt(zcu)), pt)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .elems = result_data },
|
||||
} }));
|
||||
}
|
||||
return intTruncScalar(val, ty, allocator, signedness, @intCast(bits.toUnsignedInt(zcu)), pt);
|
||||
}
|
||||
|
||||
pub fn intTruncScalar(
|
||||
val: Value,
|
||||
ty: Type,
|
||||
allocator: Allocator,
|
||||
signedness: std.builtin.Signedness,
|
||||
bits: u16,
|
||||
pt: Zcu.PerThread,
|
||||
) !Value {
|
||||
const zcu = pt.zcu;
|
||||
if (bits == 0) return pt.intValue(ty, 0);
|
||||
|
||||
if (val.isUndef(zcu)) return pt.undefValue(ty);
|
||||
|
||||
var val_space: Value.BigIntSpace = undefined;
|
||||
const val_bigint = val.toBigInt(&val_space, zcu);
|
||||
|
||||
const limbs = try allocator.alloc(
|
||||
std.math.big.Limb,
|
||||
std.math.big.int.calcTwosCompLimbCount(bits),
|
||||
);
|
||||
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
|
||||
|
||||
result_bigint.truncate(val_bigint, signedness, bits);
|
||||
return pt.intValue_big(ty, result_bigint.toConst());
|
||||
}
|
||||
|
||||
pub fn shl(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value {
|
||||
const zcu = pt.zcu;
|
||||
if (ty.zigTypeTag(zcu) == .vector) {
|
||||
const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(zcu));
|
||||
const scalar_ty = ty.scalarType(zcu);
|
||||
for (result_data, 0..) |*scalar, i| {
|
||||
const lhs_elem = try lhs.elemValue(pt, i);
|
||||
const rhs_elem = try rhs.elemValue(pt, i);
|
||||
scalar.* = (try shlScalar(lhs_elem, rhs_elem, scalar_ty, allocator, pt)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .elems = result_data },
|
||||
} }));
|
||||
}
|
||||
return shlScalar(lhs, rhs, ty, allocator, pt);
|
||||
}
|
||||
|
||||
pub fn shlScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value {
|
||||
// TODO is this a performance issue? maybe we should try the operation without
|
||||
// resorting to BigInt first.
|
||||
const zcu = pt.zcu;
|
||||
var lhs_space: Value.BigIntSpace = undefined;
|
||||
const lhs_bigint = lhs.toBigInt(&lhs_space, zcu);
|
||||
const shift: usize = @intCast(rhs.toUnsignedInt(zcu));
|
||||
const limbs = try allocator.alloc(
|
||||
std.math.big.Limb,
|
||||
lhs_bigint.limbs.len + (shift / (@sizeOf(std.math.big.Limb) * 8)) + 1,
|
||||
);
|
||||
var result_bigint = BigIntMutable{
|
||||
.limbs = limbs,
|
||||
.positive = undefined,
|
||||
.len = undefined,
|
||||
};
|
||||
result_bigint.shiftLeft(lhs_bigint, shift);
|
||||
if (ty.toIntern() != .comptime_int_type) {
|
||||
const int_info = ty.intInfo(zcu);
|
||||
result_bigint.truncate(result_bigint.toConst(), int_info.signedness, int_info.bits);
|
||||
}
|
||||
|
||||
return pt.intValue_big(ty, result_bigint.toConst());
|
||||
}
|
||||
|
||||
pub fn shlWithOverflow(
|
||||
lhs: Value,
|
||||
rhs: Value,
|
||||
ty: Type,
|
||||
allocator: Allocator,
|
||||
pt: Zcu.PerThread,
|
||||
) !OverflowArithmeticResult {
|
||||
if (ty.zigTypeTag(pt.zcu) == .vector) {
|
||||
const vec_len = ty.vectorLen(pt.zcu);
|
||||
const overflowed_data = try allocator.alloc(InternPool.Index, vec_len);
|
||||
const result_data = try allocator.alloc(InternPool.Index, vec_len);
|
||||
const scalar_ty = ty.scalarType(pt.zcu);
|
||||
for (overflowed_data, result_data, 0..) |*of, *scalar, i| {
|
||||
const lhs_elem = try lhs.elemValue(pt, i);
|
||||
const rhs_elem = try rhs.elemValue(pt, i);
|
||||
const of_math_result = try shlWithOverflowScalar(lhs_elem, rhs_elem, scalar_ty, allocator, pt);
|
||||
of.* = of_math_result.overflow_bit.toIntern();
|
||||
scalar.* = of_math_result.wrapped_result.toIntern();
|
||||
}
|
||||
return OverflowArithmeticResult{
|
||||
.overflow_bit = Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = (try pt.vectorType(.{ .len = vec_len, .child = .u1_type })).toIntern(),
|
||||
.storage = .{ .elems = overflowed_data },
|
||||
} })),
|
||||
.wrapped_result = Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .elems = result_data },
|
||||
} })),
|
||||
};
|
||||
}
|
||||
return shlWithOverflowScalar(lhs, rhs, ty, allocator, pt);
|
||||
}
|
||||
|
||||
pub fn shlWithOverflowScalar(
|
||||
lhs: Value,
|
||||
rhs: Value,
|
||||
ty: Type,
|
||||
allocator: Allocator,
|
||||
pt: Zcu.PerThread,
|
||||
) !OverflowArithmeticResult {
|
||||
const zcu = pt.zcu;
|
||||
const info = ty.intInfo(zcu);
|
||||
var lhs_space: Value.BigIntSpace = undefined;
|
||||
const lhs_bigint = lhs.toBigInt(&lhs_space, zcu);
|
||||
const shift: usize = @intCast(rhs.toUnsignedInt(zcu));
|
||||
const limbs = try allocator.alloc(
|
||||
std.math.big.Limb,
|
||||
lhs_bigint.limbs.len + (shift / (@sizeOf(std.math.big.Limb) * 8)) + 1,
|
||||
);
|
||||
var result_bigint = BigIntMutable{
|
||||
.limbs = limbs,
|
||||
.positive = undefined,
|
||||
.len = undefined,
|
||||
};
|
||||
result_bigint.shiftLeft(lhs_bigint, shift);
|
||||
const overflowed = !result_bigint.toConst().fitsInTwosComp(info.signedness, info.bits);
|
||||
if (overflowed) {
|
||||
result_bigint.truncate(result_bigint.toConst(), info.signedness, info.bits);
|
||||
}
|
||||
return OverflowArithmeticResult{
|
||||
.overflow_bit = try pt.intValue(Type.u1, @intFromBool(overflowed)),
|
||||
.wrapped_result = try pt.intValue_big(ty, result_bigint.toConst()),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn shlSat(
|
||||
lhs: Value,
|
||||
rhs: Value,
|
||||
ty: Type,
|
||||
arena: Allocator,
|
||||
pt: Zcu.PerThread,
|
||||
) !Value {
|
||||
if (ty.zigTypeTag(pt.zcu) == .vector) {
|
||||
const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(pt.zcu));
|
||||
const scalar_ty = ty.scalarType(pt.zcu);
|
||||
for (result_data, 0..) |*scalar, i| {
|
||||
const lhs_elem = try lhs.elemValue(pt, i);
|
||||
const rhs_elem = try rhs.elemValue(pt, i);
|
||||
scalar.* = (try shlSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, pt)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .elems = result_data },
|
||||
} }));
|
||||
}
|
||||
return shlSatScalar(lhs, rhs, ty, arena, pt);
|
||||
}
|
||||
|
||||
pub fn shlSatScalar(
|
||||
lhs: Value,
|
||||
rhs: Value,
|
||||
ty: Type,
|
||||
arena: Allocator,
|
||||
pt: Zcu.PerThread,
|
||||
) !Value {
|
||||
// TODO is this a performance issue? maybe we should try the operation without
|
||||
// resorting to BigInt first.
|
||||
const zcu = pt.zcu;
|
||||
const info = ty.intInfo(zcu);
|
||||
|
||||
var lhs_space: Value.BigIntSpace = undefined;
|
||||
const lhs_bigint = lhs.toBigInt(&lhs_space, zcu);
|
||||
const shift: usize = @intCast(rhs.toUnsignedInt(zcu));
|
||||
const limbs = try arena.alloc(
|
||||
std.math.big.Limb,
|
||||
std.math.big.int.calcTwosCompLimbCount(info.bits),
|
||||
);
|
||||
var result_bigint = BigIntMutable{
|
||||
.limbs = limbs,
|
||||
.positive = undefined,
|
||||
.len = undefined,
|
||||
};
|
||||
result_bigint.shiftLeftSat(lhs_bigint, shift, info.signedness, info.bits);
|
||||
return pt.intValue_big(ty, result_bigint.toConst());
|
||||
}
|
||||
|
||||
pub fn shlTrunc(
|
||||
lhs: Value,
|
||||
rhs: Value,
|
||||
ty: Type,
|
||||
arena: Allocator,
|
||||
pt: Zcu.PerThread,
|
||||
) !Value {
|
||||
if (ty.zigTypeTag(pt.zcu) == .vector) {
|
||||
const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(pt.zcu));
|
||||
const scalar_ty = ty.scalarType(pt.zcu);
|
||||
for (result_data, 0..) |*scalar, i| {
|
||||
const lhs_elem = try lhs.elemValue(pt, i);
|
||||
const rhs_elem = try rhs.elemValue(pt, i);
|
||||
scalar.* = (try shlTruncScalar(lhs_elem, rhs_elem, scalar_ty, arena, pt)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .elems = result_data },
|
||||
} }));
|
||||
}
|
||||
return shlTruncScalar(lhs, rhs, ty, arena, pt);
|
||||
}
|
||||
|
||||
pub fn shlTruncScalar(
|
||||
lhs: Value,
|
||||
rhs: Value,
|
||||
ty: Type,
|
||||
arena: Allocator,
|
||||
pt: Zcu.PerThread,
|
||||
) !Value {
|
||||
const shifted = try lhs.shl(rhs, ty, arena, pt);
|
||||
const int_info = ty.intInfo(pt.zcu);
|
||||
const truncated = try shifted.intTrunc(ty, arena, int_info.signedness, int_info.bits, pt);
|
||||
return truncated;
|
||||
}
|
||||
|
||||
pub fn shr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value {
|
||||
if (ty.zigTypeTag(pt.zcu) == .vector) {
|
||||
const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(pt.zcu));
|
||||
const scalar_ty = ty.scalarType(pt.zcu);
|
||||
for (result_data, 0..) |*scalar, i| {
|
||||
const lhs_elem = try lhs.elemValue(pt, i);
|
||||
const rhs_elem = try rhs.elemValue(pt, i);
|
||||
scalar.* = (try shrScalar(lhs_elem, rhs_elem, scalar_ty, allocator, pt)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .elems = result_data },
|
||||
} }));
|
||||
}
|
||||
return shrScalar(lhs, rhs, ty, allocator, pt);
|
||||
}
|
||||
|
||||
pub fn shrScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value {
|
||||
// TODO is this a performance issue? maybe we should try the operation without
|
||||
// resorting to BigInt first.
|
||||
const zcu = pt.zcu;
|
||||
var lhs_space: Value.BigIntSpace = undefined;
|
||||
const lhs_bigint = lhs.toBigInt(&lhs_space, zcu);
|
||||
const shift: usize = @intCast(rhs.toUnsignedInt(zcu));
|
||||
|
||||
const result_limbs = lhs_bigint.limbs.len -| (shift / (@sizeOf(std.math.big.Limb) * 8));
|
||||
if (result_limbs == 0) {
|
||||
// The shift is enough to remove all the bits from the number, which means the
|
||||
// result is 0 or -1 depending on the sign.
|
||||
if (lhs_bigint.positive) {
|
||||
return pt.intValue(ty, 0);
|
||||
} else {
|
||||
return pt.intValue(ty, -1);
|
||||
}
|
||||
}
|
||||
|
||||
const limbs = try allocator.alloc(
|
||||
std.math.big.Limb,
|
||||
result_limbs,
|
||||
);
|
||||
var result_bigint = BigIntMutable{
|
||||
.limbs = limbs,
|
||||
.positive = undefined,
|
||||
.len = undefined,
|
||||
};
|
||||
result_bigint.shiftRight(lhs_bigint, shift);
|
||||
return pt.intValue_big(ty, result_bigint.toConst());
|
||||
}
|
||||
|
||||
pub fn sqrt(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
|
||||
if (float_type.zigTypeTag(pt.zcu) == .vector) {
|
||||
const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(pt.zcu));
|
||||
|
|
@ -2290,10 +1586,7 @@ pub fn sqrt(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !
|
|||
const elem_val = try val.elemValue(pt, i);
|
||||
scalar.* = (try sqrtScalar(elem_val, scalar_ty, pt)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = float_type.toIntern(),
|
||||
.storage = .{ .elems = result_data },
|
||||
} }));
|
||||
return pt.aggregateValue(float_type, result_data);
|
||||
}
|
||||
return sqrtScalar(val, float_type, pt);
|
||||
}
|
||||
|
|
@ -2324,10 +1617,7 @@ pub fn sin(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !V
|
|||
const elem_val = try val.elemValue(pt, i);
|
||||
scalar.* = (try sinScalar(elem_val, scalar_ty, pt)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = float_type.toIntern(),
|
||||
.storage = .{ .elems = result_data },
|
||||
} }));
|
||||
return pt.aggregateValue(float_type, result_data);
|
||||
}
|
||||
return sinScalar(val, float_type, pt);
|
||||
}
|
||||
|
|
@ -2358,10 +1648,7 @@ pub fn cos(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !V
|
|||
const elem_val = try val.elemValue(pt, i);
|
||||
scalar.* = (try cosScalar(elem_val, scalar_ty, pt)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = float_type.toIntern(),
|
||||
.storage = .{ .elems = result_data },
|
||||
} }));
|
||||
return pt.aggregateValue(float_type, result_data);
|
||||
}
|
||||
return cosScalar(val, float_type, pt);
|
||||
}
|
||||
|
|
@ -2392,10 +1679,7 @@ pub fn tan(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !V
|
|||
const elem_val = try val.elemValue(pt, i);
|
||||
scalar.* = (try tanScalar(elem_val, scalar_ty, pt)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = float_type.toIntern(),
|
||||
.storage = .{ .elems = result_data },
|
||||
} }));
|
||||
return pt.aggregateValue(float_type, result_data);
|
||||
}
|
||||
return tanScalar(val, float_type, pt);
|
||||
}
|
||||
|
|
@ -2426,10 +1710,7 @@ pub fn exp(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !V
|
|||
const elem_val = try val.elemValue(pt, i);
|
||||
scalar.* = (try expScalar(elem_val, scalar_ty, pt)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = float_type.toIntern(),
|
||||
.storage = .{ .elems = result_data },
|
||||
} }));
|
||||
return pt.aggregateValue(float_type, result_data);
|
||||
}
|
||||
return expScalar(val, float_type, pt);
|
||||
}
|
||||
|
|
@ -2460,10 +1741,7 @@ pub fn exp2(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !
|
|||
const elem_val = try val.elemValue(pt, i);
|
||||
scalar.* = (try exp2Scalar(elem_val, scalar_ty, pt)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = float_type.toIntern(),
|
||||
.storage = .{ .elems = result_data },
|
||||
} }));
|
||||
return pt.aggregateValue(float_type, result_data);
|
||||
}
|
||||
return exp2Scalar(val, float_type, pt);
|
||||
}
|
||||
|
|
@ -2494,10 +1772,7 @@ pub fn log(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !V
|
|||
const elem_val = try val.elemValue(pt, i);
|
||||
scalar.* = (try logScalar(elem_val, scalar_ty, pt)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = float_type.toIntern(),
|
||||
.storage = .{ .elems = result_data },
|
||||
} }));
|
||||
return pt.aggregateValue(float_type, result_data);
|
||||
}
|
||||
return logScalar(val, float_type, pt);
|
||||
}
|
||||
|
|
@ -2528,10 +1803,7 @@ pub fn log2(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !
|
|||
const elem_val = try val.elemValue(pt, i);
|
||||
scalar.* = (try log2Scalar(elem_val, scalar_ty, pt)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = float_type.toIntern(),
|
||||
.storage = .{ .elems = result_data },
|
||||
} }));
|
||||
return pt.aggregateValue(float_type, result_data);
|
||||
}
|
||||
return log2Scalar(val, float_type, pt);
|
||||
}
|
||||
|
|
@ -2562,10 +1834,7 @@ pub fn log10(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread)
|
|||
const elem_val = try val.elemValue(pt, i);
|
||||
scalar.* = (try log10Scalar(elem_val, scalar_ty, pt)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = float_type.toIntern(),
|
||||
.storage = .{ .elems = result_data },
|
||||
} }));
|
||||
return pt.aggregateValue(float_type, result_data);
|
||||
}
|
||||
return log10Scalar(val, float_type, pt);
|
||||
}
|
||||
|
|
@ -2596,10 +1865,7 @@ pub fn abs(val: Value, ty: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
|
|||
const elem_val = try val.elemValue(pt, i);
|
||||
scalar.* = (try absScalar(elem_val, scalar_ty, pt, arena)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .elems = result_data },
|
||||
} }));
|
||||
return pt.aggregateValue(ty, result_data);
|
||||
}
|
||||
return absScalar(val, ty, pt, arena);
|
||||
}
|
||||
|
|
@ -2649,10 +1915,7 @@ pub fn floor(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread)
|
|||
const elem_val = try val.elemValue(pt, i);
|
||||
scalar.* = (try floorScalar(elem_val, scalar_ty, pt)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = float_type.toIntern(),
|
||||
.storage = .{ .elems = result_data },
|
||||
} }));
|
||||
return pt.aggregateValue(float_type, result_data);
|
||||
}
|
||||
return floorScalar(val, float_type, pt);
|
||||
}
|
||||
|
|
@ -2683,10 +1946,7 @@ pub fn ceil(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !
|
|||
const elem_val = try val.elemValue(pt, i);
|
||||
scalar.* = (try ceilScalar(elem_val, scalar_ty, pt)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = float_type.toIntern(),
|
||||
.storage = .{ .elems = result_data },
|
||||
} }));
|
||||
return pt.aggregateValue(float_type, result_data);
|
||||
}
|
||||
return ceilScalar(val, float_type, pt);
|
||||
}
|
||||
|
|
@ -2717,10 +1977,7 @@ pub fn round(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread)
|
|||
const elem_val = try val.elemValue(pt, i);
|
||||
scalar.* = (try roundScalar(elem_val, scalar_ty, pt)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = float_type.toIntern(),
|
||||
.storage = .{ .elems = result_data },
|
||||
} }));
|
||||
return pt.aggregateValue(float_type, result_data);
|
||||
}
|
||||
return roundScalar(val, float_type, pt);
|
||||
}
|
||||
|
|
@ -2751,10 +2008,7 @@ pub fn trunc(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread)
|
|||
const elem_val = try val.elemValue(pt, i);
|
||||
scalar.* = (try truncScalar(elem_val, scalar_ty, pt)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = float_type.toIntern(),
|
||||
.storage = .{ .elems = result_data },
|
||||
} }));
|
||||
return pt.aggregateValue(float_type, result_data);
|
||||
}
|
||||
return truncScalar(val, float_type, pt);
|
||||
}
|
||||
|
|
@ -2794,10 +2048,7 @@ pub fn mulAdd(
|
|||
const addend_elem = try addend.elemValue(pt, i);
|
||||
scalar.* = (try mulAddScalar(scalar_ty, mulend1_elem, mulend2_elem, addend_elem, pt)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = float_type.toIntern(),
|
||||
.storage = .{ .elems = result_data },
|
||||
} }));
|
||||
return pt.aggregateValue(float_type, result_data);
|
||||
}
|
||||
return mulAddScalar(float_type, mulend1, mulend2, addend, pt);
|
||||
}
|
||||
|
|
@ -3682,17 +2933,17 @@ pub fn resolveLazy(
|
|||
}
|
||||
if (resolved_elems.len > 0) resolved_elems[i] = resolved_elem;
|
||||
}
|
||||
return if (resolved_elems.len == 0) val else Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = aggregate.ty,
|
||||
.storage = .{ .elems = resolved_elems },
|
||||
} }));
|
||||
return if (resolved_elems.len == 0)
|
||||
val
|
||||
else
|
||||
pt.aggregateValue(.fromInterned(aggregate.ty), resolved_elems);
|
||||
},
|
||||
.repeated_elem => |elem| {
|
||||
const resolved_elem = (try Value.fromInterned(elem).resolveLazy(arena, pt)).toIntern();
|
||||
return if (resolved_elem == elem) val else Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = aggregate.ty,
|
||||
.storage = .{ .repeated_elem = resolved_elem },
|
||||
} }));
|
||||
const resolved_elem = try Value.fromInterned(elem).resolveLazy(arena, pt);
|
||||
return if (resolved_elem.toIntern() == elem)
|
||||
val
|
||||
else
|
||||
pt.aggregateSplatValue(.fromInterned(aggregate.ty), resolved_elem);
|
||||
},
|
||||
},
|
||||
.un => |un| {
|
||||
|
|
@ -3909,10 +3160,7 @@ pub fn uninterpret(val: anytype, ty: Type, pt: Zcu.PerThread) error{ OutOfMemory
|
|||
const field_ty = ty.fieldType(field_idx, zcu);
|
||||
field_val.* = (try uninterpret(@field(val, field.name), field_ty, pt)).toIntern();
|
||||
}
|
||||
return .fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .elems = &field_vals },
|
||||
} }));
|
||||
return pt.aggregateValue(ty, &field_vals);
|
||||
},
|
||||
.by_name => {
|
||||
const struct_obj = zcu.typeToStruct(ty) orelse return error.TypeMismatch;
|
||||
|
|
@ -3934,10 +3182,7 @@ pub fn uninterpret(val: anytype, ty: Type, pt: Zcu.PerThread) error{ OutOfMemory
|
|||
field_val.* = default_init;
|
||||
}
|
||||
}
|
||||
return .fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .elems = field_vals },
|
||||
} }));
|
||||
return pt.aggregateValue(ty, field_vals);
|
||||
},
|
||||
},
|
||||
};
|
||||
|
|
|
|||
|
|
@ -3327,10 +3327,7 @@ pub fn populateTestFunctions(pt: Zcu.PerThread) Allocator.Error!void {
|
|||
.byte_offset = 0,
|
||||
} }),
|
||||
};
|
||||
test_fn_val.* = try pt.intern(.{ .aggregate = .{
|
||||
.ty = test_fn_ty.toIntern(),
|
||||
.storage = .{ .elems = &test_fn_fields },
|
||||
} });
|
||||
test_fn_val.* = (try pt.aggregateValue(test_fn_ty, &test_fn_fields)).toIntern();
|
||||
}
|
||||
|
||||
const array_ty = try pt.arrayType(.{
|
||||
|
|
@ -3338,13 +3335,9 @@ pub fn populateTestFunctions(pt: Zcu.PerThread) Allocator.Error!void {
|
|||
.child = test_fn_ty.toIntern(),
|
||||
.sentinel = .none,
|
||||
});
|
||||
const array_val = try pt.intern(.{ .aggregate = .{
|
||||
.ty = array_ty.toIntern(),
|
||||
.storage = .{ .elems = test_fn_vals },
|
||||
} });
|
||||
break :array .{
|
||||
.orig_ty = (try pt.singleConstPtrType(array_ty)).toIntern(),
|
||||
.val = array_val,
|
||||
.val = (try pt.aggregateValue(array_ty, test_fn_vals)).toIntern(),
|
||||
};
|
||||
};
|
||||
|
||||
|
|
@ -3672,6 +3665,31 @@ pub fn unionValue(pt: Zcu.PerThread, union_ty: Type, tag: Value, val: Value) All
|
|||
}));
|
||||
}
|
||||
|
||||
pub fn aggregateValue(pt: Zcu.PerThread, ty: Type, elems: []const InternPool.Index) Allocator.Error!Value {
|
||||
for (elems) |elem| {
|
||||
if (!Value.fromInterned(elem).isUndef(pt.zcu)) break;
|
||||
} else if (elems.len > 0) {
|
||||
return pt.undefValue(ty); // all-undef
|
||||
}
|
||||
return .fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .elems = elems },
|
||||
} }));
|
||||
}
|
||||
|
||||
/// Asserts that `ty` is either an array or a vector.
|
||||
pub fn aggregateSplatValue(pt: Zcu.PerThread, ty: Type, repeated_elem: Value) Allocator.Error!Value {
|
||||
switch (ty.zigTypeTag(pt.zcu)) {
|
||||
.array, .vector => {},
|
||||
else => unreachable,
|
||||
}
|
||||
if (repeated_elem.isUndef(pt.zcu)) return pt.undefValue(ty);
|
||||
return .fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .repeated_elem = repeated_elem.toIntern() },
|
||||
} }));
|
||||
}
|
||||
|
||||
/// This function casts the float representation down to the representation of the type, potentially
|
||||
/// losing data if the representation wasn't correct.
|
||||
pub fn floatValue(pt: Zcu.PerThread, ty: Type, x: anytype) Allocator.Error!Value {
|
||||
|
|
|
|||
|
|
@ -3131,7 +3131,7 @@ fn lowerConstant(cg: *CodeGen, val: Value, ty: Type) InnerError!WValue {
|
|||
const zcu = pt.zcu;
|
||||
assert(!isByRef(ty, zcu, cg.target));
|
||||
const ip = &zcu.intern_pool;
|
||||
if (val.isUndefDeep(zcu)) return cg.emitUndefined(ty);
|
||||
if (val.isUndef(zcu)) return cg.emitUndefined(ty);
|
||||
|
||||
switch (ip.indexToKey(val.ip_index)) {
|
||||
.int_type,
|
||||
|
|
|
|||
|
|
@ -170058,12 +170058,9 @@ fn airTrunc(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|||
});
|
||||
const splat_abi_size: u32 = @intCast(splat_ty.abiSize(zcu));
|
||||
|
||||
const splat_val = try pt.intern(.{ .aggregate = .{
|
||||
.ty = splat_ty.ip_index,
|
||||
.storage = .{ .repeated_elem = mask_val.ip_index },
|
||||
} });
|
||||
const splat_val = try pt.aggregateSplatValue(splat_ty, mask_val);
|
||||
|
||||
const splat_mcv = try self.lowerValue(.fromInterned(splat_val));
|
||||
const splat_mcv = try self.lowerValue(splat_val);
|
||||
const splat_addr_mcv: MCValue = switch (splat_mcv) {
|
||||
.memory, .indirect, .load_frame => splat_mcv.address(),
|
||||
else => .{ .register = try self.copyToTmpRegister(.usize, splat_mcv.address()) },
|
||||
|
|
@ -171693,12 +171690,12 @@ fn airShlShrBinOp(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|||
defer self.register_manager.unlockReg(shift_lock);
|
||||
|
||||
const mask_ty = try pt.vectorType(.{ .len = 16, .child = .u8_type });
|
||||
const mask_mcv = try self.lowerValue(.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = mask_ty.toIntern(),
|
||||
.storage = .{ .elems = &([1]InternPool.Index{
|
||||
const mask_mcv = try self.lowerValue(try pt.aggregateValue(
|
||||
mask_ty,
|
||||
&([1]InternPool.Index{
|
||||
(try rhs_ty.childType(zcu).maxIntScalar(pt, .u8)).toIntern(),
|
||||
} ++ [1]InternPool.Index{.zero_u8} ** 15) },
|
||||
} })));
|
||||
} ++ [1]InternPool.Index{.zero_u8} ** 15),
|
||||
));
|
||||
const mask_addr_reg = try self.copyToTmpRegister(.usize, mask_mcv.address());
|
||||
const mask_addr_lock = self.register_manager.lockRegAssumeUnused(mask_addr_reg);
|
||||
defer self.register_manager.unlockReg(mask_addr_lock);
|
||||
|
|
@ -181139,10 +181136,7 @@ fn genSetReg(
|
|||
.child = .u8_type,
|
||||
});
|
||||
try self.genSetReg(dst_reg, full_ty, try self.lowerValue(
|
||||
.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = full_ty.toIntern(),
|
||||
.storage = .{ .repeated_elem = (try pt.intValue(.u8, 0xaa)).toIntern() },
|
||||
} })),
|
||||
try pt.aggregateSplatValue(full_ty, try pt.intValue(.u8, 0xaa)),
|
||||
), opts);
|
||||
},
|
||||
.x87 => try self.genSetReg(dst_reg, .f80, try self.lowerValue(
|
||||
|
|
@ -183565,10 +183559,7 @@ fn airSelect(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|||
mask_elem_ty,
|
||||
@as(u8, 1) << @truncate(bit),
|
||||
)).toIntern();
|
||||
const mask_mcv = try self.lowerValue(.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = mask_ty.toIntern(),
|
||||
.storage = .{ .elems = mask_elems },
|
||||
} })));
|
||||
const mask_mcv = try self.lowerValue(try pt.aggregateValue(mask_ty, mask_elems));
|
||||
const mask_mem: Memory = .{
|
||||
.base = .{ .reg = try self.copyToTmpRegister(.usize, mask_mcv.address()) },
|
||||
.mod = .{ .rm = .{ .size = self.memSize(ty) } },
|
||||
|
|
@ -184296,10 +184287,9 @@ fn airShuffle(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|||
else
|
||||
try select_mask_elem_ty.minIntScalar(pt, select_mask_elem_ty)).toIntern();
|
||||
}
|
||||
const select_mask_mcv = try self.lowerValue(.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = select_mask_ty.toIntern(),
|
||||
.storage = .{ .elems = select_mask_elems[0..mask_elems.len] },
|
||||
} })));
|
||||
const select_mask_mcv = try self.lowerValue(
|
||||
try pt.aggregateValue(select_mask_ty, select_mask_elems[0..mask_elems.len]),
|
||||
);
|
||||
|
||||
if (self.hasFeature(.sse4_1)) {
|
||||
const mir_tag: Mir.Inst.FixedTag = .{
|
||||
|
|
@ -184441,10 +184431,9 @@ fn airShuffle(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|||
})).toIntern();
|
||||
}
|
||||
const lhs_mask_ty = try pt.vectorType(.{ .len = max_abi_size, .child = .u8_type });
|
||||
const lhs_mask_mcv = try self.lowerValue(.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = lhs_mask_ty.toIntern(),
|
||||
.storage = .{ .elems = lhs_mask_elems[0..max_abi_size] },
|
||||
} })));
|
||||
const lhs_mask_mcv = try self.lowerValue(
|
||||
try pt.aggregateValue(lhs_mask_ty, lhs_mask_elems[0..max_abi_size]),
|
||||
);
|
||||
const lhs_mask_mem: Memory = .{
|
||||
.base = .{ .reg = try self.copyToTmpRegister(.usize, lhs_mask_mcv.address()) },
|
||||
.mod = .{ .rm = .{ .size = .fromSize(@max(max_abi_size, 16)) } },
|
||||
|
|
@ -184472,10 +184461,9 @@ fn airShuffle(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|||
})).toIntern();
|
||||
}
|
||||
const rhs_mask_ty = try pt.vectorType(.{ .len = max_abi_size, .child = .u8_type });
|
||||
const rhs_mask_mcv = try self.lowerValue(.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = rhs_mask_ty.toIntern(),
|
||||
.storage = .{ .elems = rhs_mask_elems[0..max_abi_size] },
|
||||
} })));
|
||||
const rhs_mask_mcv = try self.lowerValue(
|
||||
try pt.aggregateValue(rhs_mask_ty, rhs_mask_elems[0..max_abi_size]),
|
||||
);
|
||||
const rhs_mask_mem: Memory = .{
|
||||
.base = .{ .reg = try self.copyToTmpRegister(.usize, rhs_mask_mcv.address()) },
|
||||
.mod = .{ .rm = .{ .size = .fromSize(@max(max_abi_size, 16)) } },
|
||||
|
|
@ -192924,36 +192912,30 @@ const Select = struct {
|
|||
break :res_scalar .{ res_scalar_ty, try pt.intValue_big(res_scalar_ty, res_big_int.toConst()) };
|
||||
},
|
||||
};
|
||||
const res_val: Value = if (res_vector_len) |len| .fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = (try pt.vectorType(.{
|
||||
.len = len,
|
||||
.child = res_scalar_ty.toIntern(),
|
||||
})).toIntern(),
|
||||
.storage = .{ .repeated_elem = res_scalar_val.toIntern() },
|
||||
} })) else res_scalar_val;
|
||||
const res_val = if (res_vector_len) |len| try pt.aggregateSplatValue(try pt.vectorType(.{
|
||||
.len = len,
|
||||
.child = res_scalar_ty.toIntern(),
|
||||
}), res_scalar_val) else res_scalar_val;
|
||||
return .{ try cg.tempMemFromValue(res_val), true };
|
||||
},
|
||||
.f64_0x1p52_0x1p84_mem => .{ try cg.tempMemFromValue(.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = (try pt.vectorType(.{ .len = 2, .child = .f64_type })).toIntern(),
|
||||
.storage = .{ .elems = &.{
|
||||
.f64_0x1p52_0x1p84_mem => .{ try cg.tempMemFromValue(
|
||||
try pt.aggregateValue(try pt.vectorType(.{ .len = 2, .child = .f64_type }), &.{
|
||||
(try pt.floatValue(.f64, @as(f64, 0x1p52))).toIntern(),
|
||||
(try pt.floatValue(.f64, @as(f64, 0x1p84))).toIntern(),
|
||||
} },
|
||||
} }))), true },
|
||||
.u32_0x1p52_hi_0x1p84_hi_0_0_mem => .{ try cg.tempMemFromValue(.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = (try pt.vectorType(.{ .len = 4, .child = .u32_type })).toIntern(),
|
||||
.storage = .{ .elems = &(.{
|
||||
}),
|
||||
), true },
|
||||
.u32_0x1p52_hi_0x1p84_hi_0_0_mem => .{ try cg.tempMemFromValue(
|
||||
try pt.aggregateValue(try pt.vectorType(.{ .len = 4, .child = .u32_type }), &(.{
|
||||
(try pt.intValue(.u32, @as(u64, @bitCast(@as(f64, 0x1p52))) >> 32)).toIntern(),
|
||||
(try pt.intValue(.u32, @as(u64, @bitCast(@as(f64, 0x1p84))) >> 32)).toIntern(),
|
||||
} ++ .{(try pt.intValue(.u32, 0)).toIntern()} ** 2) },
|
||||
} }))), true },
|
||||
.f32_0_0x1p64_mem => .{ try cg.tempMemFromValue(.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = (try pt.vectorType(.{ .len = 2, .child = .f32_type })).toIntern(),
|
||||
.storage = .{ .elems = &.{
|
||||
} ++ .{(try pt.intValue(.u32, 0)).toIntern()} ** 2)),
|
||||
), true },
|
||||
.f32_0_0x1p64_mem => .{ try cg.tempMemFromValue(
|
||||
try pt.aggregateValue(try pt.vectorType(.{ .len = 2, .child = .f32_type }), &.{
|
||||
(try pt.floatValue(.f32, @as(f32, 0))).toIntern(),
|
||||
(try pt.floatValue(.f32, @as(f32, 0x1p64))).toIntern(),
|
||||
} },
|
||||
} }))), true },
|
||||
}),
|
||||
), true },
|
||||
.pshufb_splat_mem => |splat_spec| {
|
||||
const zcu = pt.zcu;
|
||||
assert(spec.type.isVector(zcu) and spec.type.childType(zcu).toIntern() == .u8_type);
|
||||
|
|
@ -193110,13 +193092,10 @@ const Select = struct {
|
|||
const mem_size = cg.unalignedSize(spec.type);
|
||||
return .{ try cg.tempMemFromAlignedValue(
|
||||
if (mem_size < 16) .fromByteUnits(mem_size) else .none,
|
||||
.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = if (mem_size < 16)
|
||||
(try pt.arrayType(.{ .len = elems.len, .child = elem_ty.toIntern() })).toIntern()
|
||||
else
|
||||
spec.type.toIntern(),
|
||||
.storage = .{ .elems = elems },
|
||||
} })),
|
||||
try pt.aggregateValue(if (mem_size < 16) try pt.arrayType(.{
|
||||
.len = elems.len,
|
||||
.child = elem_ty.toIntern(),
|
||||
}) else spec.type, elems),
|
||||
), true };
|
||||
},
|
||||
.splat_float_mem => |splat_spec| {
|
||||
|
|
@ -193133,10 +193112,7 @@ const Select = struct {
|
|||
.zero => 0.0,
|
||||
}))).toIntern());
|
||||
@memset(elems[inside_len..], (try pt.floatValue(elem_ty, splat_spec.outside)).toIntern());
|
||||
return .{ try cg.tempMemFromValue(.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = spec.type.toIntern(),
|
||||
.storage = .{ .elems = elems },
|
||||
} }))), true };
|
||||
return .{ try cg.tempMemFromValue(try pt.aggregateValue(spec.type, elems)), true };
|
||||
},
|
||||
.frame => |frame_index| .{ try cg.tempInit(spec.type, .{ .load_frame = .{
|
||||
.index = frame_index,
|
||||
|
|
|
|||
|
|
@ -327,7 +327,7 @@ pub fn generateSymbol(
|
|||
|
||||
log.debug("generateSymbol: val = {f}", .{val.fmtValue(pt)});
|
||||
|
||||
if (val.isUndefDeep(zcu)) {
|
||||
if (val.isUndef(zcu)) {
|
||||
const abi_size = math.cast(usize, ty.abiSize(zcu)) orelse return error.Overflow;
|
||||
try code.appendNTimes(gpa, 0xaa, abi_size);
|
||||
return;
|
||||
|
|
|
|||
|
|
@ -1012,7 +1012,7 @@ pub const DeclGen = struct {
|
|||
};
|
||||
|
||||
const ty = val.typeOf(zcu);
|
||||
if (val.isUndefDeep(zcu)) return dg.renderUndefValue(w, ty, location);
|
||||
if (val.isUndef(zcu)) return dg.renderUndefValue(w, ty, location);
|
||||
const ctype = try dg.ctypeFromType(ty, location.toCTypeKind());
|
||||
switch (ip.indexToKey(val.toIntern())) {
|
||||
// types, not values
|
||||
|
|
@ -4216,7 +4216,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
|
|||
const ptr_val = try f.resolveInst(bin_op.lhs);
|
||||
const src_ty = f.typeOf(bin_op.rhs);
|
||||
|
||||
const val_is_undef = if (try f.air.value(bin_op.rhs, pt)) |v| v.isUndefDeep(zcu) else false;
|
||||
const val_is_undef = if (try f.air.value(bin_op.rhs, pt)) |v| v.isUndef(zcu) else false;
|
||||
|
||||
const w = &f.object.code.writer;
|
||||
if (val_is_undef) {
|
||||
|
|
@ -4942,7 +4942,7 @@ fn airDbgVar(f: *Function, inst: Air.Inst.Index) !CValue {
|
|||
const tag = f.air.instructions.items(.tag)[@intFromEnum(inst)];
|
||||
const pl_op = f.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
|
||||
const name: Air.NullTerminatedString = @enumFromInt(pl_op.payload);
|
||||
const operand_is_undef = if (try f.air.value(pl_op.operand, pt)) |v| v.isUndefDeep(zcu) else false;
|
||||
const operand_is_undef = if (try f.air.value(pl_op.operand, pt)) |v| v.isUndef(zcu) else false;
|
||||
if (!operand_is_undef) _ = try f.resolveInst(pl_op.operand);
|
||||
|
||||
try reap(f, inst, &.{pl_op.operand});
|
||||
|
|
@ -7117,7 +7117,7 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
|
|||
const value = try f.resolveInst(bin_op.rhs);
|
||||
const elem_ty = f.typeOf(bin_op.rhs);
|
||||
const elem_abi_size = elem_ty.abiSize(zcu);
|
||||
const val_is_undef = if (try f.air.value(bin_op.rhs, pt)) |val| val.isUndefDeep(zcu) else false;
|
||||
const val_is_undef = if (try f.air.value(bin_op.rhs, pt)) |val| val.isUndef(zcu) else false;
|
||||
const w = &f.object.code.writer;
|
||||
|
||||
if (val_is_undef) {
|
||||
|
|
@ -8338,7 +8338,7 @@ fn formatIntLiteral(data: FormatIntLiteralContext, w: *std.io.Writer) std.io.Wri
|
|||
defer allocator.free(undef_limbs);
|
||||
|
||||
var int_buf: Value.BigIntSpace = undefined;
|
||||
const int = if (data.val.isUndefDeep(zcu)) blk: {
|
||||
const int = if (data.val.isUndef(zcu)) blk: {
|
||||
undef_limbs = allocator.alloc(BigIntLimb, BigInt.calcTwosCompLimbCount(data.int_info.bits)) catch return error.WriteFailed;
|
||||
@memset(undef_limbs, undefPattern(BigIntLimb));
|
||||
|
||||
|
|
|
|||
|
|
@ -3575,7 +3575,7 @@ pub const Object = struct {
|
|||
const val = Value.fromInterned(arg_val);
|
||||
const val_key = ip.indexToKey(val.toIntern());
|
||||
|
||||
if (val.isUndefDeep(zcu)) return o.builder.undefConst(llvm_int_ty);
|
||||
if (val.isUndef(zcu)) return o.builder.undefConst(llvm_int_ty);
|
||||
|
||||
const ty = Type.fromInterned(val_key.typeOf());
|
||||
switch (val_key) {
|
||||
|
|
@ -3666,7 +3666,7 @@ pub const Object = struct {
|
|||
const val = Value.fromInterned(arg_val);
|
||||
const val_key = ip.indexToKey(val.toIntern());
|
||||
|
||||
if (val.isUndefDeep(zcu)) {
|
||||
if (val.isUndef(zcu)) {
|
||||
return o.builder.undefConst(try o.lowerType(pt, Type.fromInterned(val_key.typeOf())));
|
||||
}
|
||||
|
||||
|
|
@ -5574,7 +5574,7 @@ pub const FuncGen = struct {
|
|||
const ptr_ty = try pt.singleMutPtrType(ret_ty);
|
||||
|
||||
const operand = try self.resolveInst(un_op);
|
||||
const val_is_undef = if (try self.air.value(un_op, pt)) |val| val.isUndefDeep(zcu) else false;
|
||||
const val_is_undef = if (try self.air.value(un_op, pt)) |val| val.isUndef(zcu) else false;
|
||||
if (val_is_undef and safety) undef: {
|
||||
const ptr_info = ptr_ty.ptrInfo(zcu);
|
||||
const needs_bitmask = (ptr_info.packed_offset.host_size != 0);
|
||||
|
|
@ -5629,7 +5629,7 @@ pub const FuncGen = struct {
|
|||
|
||||
const abi_ret_ty = try lowerFnRetTy(o, pt, fn_info);
|
||||
const operand = try self.resolveInst(un_op);
|
||||
const val_is_undef = if (try self.air.value(un_op, pt)) |val| val.isUndefDeep(zcu) else false;
|
||||
const val_is_undef = if (try self.air.value(un_op, pt)) |val| val.isUndef(zcu) else false;
|
||||
const alignment = ret_ty.abiAlignment(zcu).toLlvm();
|
||||
|
||||
if (val_is_undef and safety) {
|
||||
|
|
@ -9680,7 +9680,7 @@ pub const FuncGen = struct {
|
|||
const ptr_ty = self.typeOf(bin_op.lhs);
|
||||
const operand_ty = ptr_ty.childType(zcu);
|
||||
|
||||
const val_is_undef = if (try self.air.value(bin_op.rhs, pt)) |val| val.isUndefDeep(zcu) else false;
|
||||
const val_is_undef = if (try self.air.value(bin_op.rhs, pt)) |val| val.isUndef(zcu) else false;
|
||||
if (val_is_undef) {
|
||||
const owner_mod = self.ng.ownerModule();
|
||||
|
||||
|
|
@ -10021,7 +10021,7 @@ pub const FuncGen = struct {
|
|||
self.maybeMarkAllowZeroAccess(ptr_ty.ptrInfo(zcu));
|
||||
|
||||
if (try self.air.value(bin_op.rhs, pt)) |elem_val| {
|
||||
if (elem_val.isUndefDeep(zcu)) {
|
||||
if (elem_val.isUndef(zcu)) {
|
||||
// Even if safety is disabled, we still emit a memset to undefined since it conveys
|
||||
// extra information to LLVM. However, safety makes the difference between using
|
||||
// 0xaa or actual undefined for the fill byte.
|
||||
|
|
|
|||
|
|
@ -779,7 +779,7 @@ fn constant(cg: *CodeGen, ty: Type, val: Value, repr: Repr) Error!Id {
|
|||
const ip = &zcu.intern_pool;
|
||||
|
||||
log.debug("lowering constant: ty = {f}, val = {f}, key = {s}", .{ ty.fmt(pt), val.fmtValue(pt), @tagName(ip.indexToKey(val.toIntern())) });
|
||||
if (val.isUndefDeep(zcu)) {
|
||||
if (val.isUndef(zcu)) {
|
||||
return cg.module.constUndef(result_ty_id);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1303,7 +1303,7 @@ fn getNavOutputSection(coff: *Coff, nav_index: InternPool.Nav.Index) u16 {
|
|||
const zig_ty = ty.zigTypeTag(zcu);
|
||||
const val = Value.fromInterned(nav.status.fully_resolved.val);
|
||||
const index: u16 = blk: {
|
||||
if (val.isUndefDeep(zcu)) {
|
||||
if (val.isUndef(zcu)) {
|
||||
// TODO in release-fast and release-small, we should put undef in .bss
|
||||
break :blk coff.data_section_index.?;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1197,7 +1197,7 @@ fn getNavShdrIndex(
|
|||
self.data_relro_index = try self.addSectionSymbol(gpa, try self.addString(gpa, ".data.rel.ro"), osec);
|
||||
return osec;
|
||||
}
|
||||
if (nav_init != .none and Value.fromInterned(nav_init).isUndefDeep(zcu))
|
||||
if (nav_init != .none and Value.fromInterned(nav_init).isUndef(zcu))
|
||||
return switch (zcu.navFileScope(nav_index).mod.?.optimize_mode) {
|
||||
.Debug, .ReleaseSafe => {
|
||||
if (self.data_index) |symbol_index|
|
||||
|
|
|
|||
|
|
@ -1175,7 +1175,7 @@ fn getNavOutputSection(
|
|||
);
|
||||
}
|
||||
if (is_const) return macho_file.zig_const_sect_index.?;
|
||||
if (nav_init != .none and Value.fromInterned(nav_init).isUndefDeep(zcu))
|
||||
if (nav_init != .none and Value.fromInterned(nav_init).isUndef(zcu))
|
||||
return switch (zcu.navFileScope(nav_index).mod.?.optimize_mode) {
|
||||
.Debug, .ReleaseSafe => macho_file.zig_data_sect_index.?,
|
||||
.ReleaseFast, .ReleaseSmall => macho_file.zig_bss_sect_index.?,
|
||||
|
|
|
|||
|
|
@ -65,10 +65,7 @@ pub const MutableValue = union(enum) {
|
|||
.ty = sv.ty,
|
||||
.val = (try sv.child.intern(pt, arena)).toIntern(),
|
||||
} }),
|
||||
.repeated => |sv| try pt.intern(.{ .aggregate = .{
|
||||
.ty = sv.ty,
|
||||
.storage = .{ .repeated_elem = (try sv.child.intern(pt, arena)).toIntern() },
|
||||
} }),
|
||||
.repeated => |sv| return pt.aggregateSplatValue(.fromInterned(sv.ty), try sv.child.intern(pt, arena)),
|
||||
.bytes => |b| try pt.intern(.{ .aggregate = .{
|
||||
.ty = b.ty,
|
||||
.storage = .{ .bytes = try pt.zcu.intern_pool.getOrPutString(pt.zcu.gpa, pt.tid, b.data, .maybe_embedded_nulls) },
|
||||
|
|
@ -78,10 +75,7 @@ pub const MutableValue = union(enum) {
|
|||
for (a.elems, elems) |mut_elem, *interned_elem| {
|
||||
interned_elem.* = (try mut_elem.intern(pt, arena)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = a.ty,
|
||||
.storage = .{ .elems = elems },
|
||||
} }));
|
||||
return pt.aggregateValue(.fromInterned(a.ty), elems);
|
||||
},
|
||||
.slice => |s| try pt.intern(.{ .slice = .{
|
||||
.ty = s.ty,
|
||||
|
|
|
|||
|
|
@ -154,12 +154,6 @@ test "Saturating Shift Left where lhs is of a computed type" {
|
|||
try expect(value.exponent == 0);
|
||||
}
|
||||
|
||||
comptime {
|
||||
var image: [1]u8 = undefined;
|
||||
_ = ℑ
|
||||
_ = @shlExact(@as(u16, image[0]), 8);
|
||||
}
|
||||
|
||||
test "Saturating Shift Left" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
|
||||
|
|
|
|||
|
|
@ -30,7 +30,9 @@ export fn d(rhs: @Vector(3, i32)) void {
|
|||
//
|
||||
// :2:25: error: shift by negative amount '-1'
|
||||
// :7:12: error: shift by negative amount '-2'
|
||||
// :11:47: error: shift by negative amount '-3' at index '0'
|
||||
// :16:27: error: shift by negative amount '-4' at index '1'
|
||||
// :11:47: error: shift by negative amount '-3'
|
||||
// :11:47: note: when computing vector element at index '0'
|
||||
// :16:27: error: shift by negative amount '-4'
|
||||
// :16:27: note: when computing vector element at index '1'
|
||||
// :20:25: error: shift by signed type 'i32'
|
||||
// :24:40: error: shift by signed type '@Vector(3, i32)'
|
||||
|
|
|
|||
9
test/cases/compile_errors/shift_by_larger_than_usize.zig
Normal file
9
test/cases/compile_errors/shift_by_larger_than_usize.zig
Normal file
|
|
@ -0,0 +1,9 @@
|
|||
export fn f() usize {
|
||||
const a = comptime 0 <<| (1 << @bitSizeOf(usize));
|
||||
return a;
|
||||
}
|
||||
|
||||
// error
|
||||
// target=x86_64-linux
|
||||
//
|
||||
// :2:30: error: this implementation only supports comptime shift amounts of up to 2^64 - 1 bits
|
||||
|
|
@ -7,4 +7,4 @@ comptime {
|
|||
// backend=stage2
|
||||
// target=native
|
||||
//
|
||||
// :2:15: error: operation caused overflow
|
||||
// :2:15: error: overflow of integer type 'u8' with value '340'
|
||||
|
|
|
|||
11
test/cases/compile_errors/shl_exact_on_undefined_value.zig
Normal file
11
test/cases/compile_errors/shl_exact_on_undefined_value.zig
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
comptime {
|
||||
var a: i64 = undefined;
|
||||
var b: u6 = undefined;
|
||||
_ = &a;
|
||||
_ = &b;
|
||||
_ = @shlExact(a, b);
|
||||
}
|
||||
|
||||
// error
|
||||
//
|
||||
// :6:19: error: use of undefined value here causes illegal behavior
|
||||
11
test/cases/compile_errors/shl_on_undefined_value.zig
Normal file
11
test/cases/compile_errors/shl_on_undefined_value.zig
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
comptime {
|
||||
var a: i64 = undefined;
|
||||
var b: u6 = undefined;
|
||||
_ = &a;
|
||||
_ = &b;
|
||||
_ = a << b;
|
||||
}
|
||||
|
||||
// error
|
||||
//
|
||||
// :6:9: error: use of undefined value here causes illegal behavior
|
||||
|
|
@ -0,0 +1,11 @@
|
|||
comptime {
|
||||
var a: i64 = undefined;
|
||||
var b: u6 = undefined;
|
||||
_ = &a;
|
||||
_ = &b;
|
||||
_ = @shlWithOverflow(a, b);
|
||||
}
|
||||
|
||||
// error
|
||||
//
|
||||
// :6:26: error: use of undefined value here causes illegal behavior
|
||||
11
test/cases/compile_errors/shr_exact_on_undefined_value.zig
Normal file
11
test/cases/compile_errors/shr_exact_on_undefined_value.zig
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
comptime {
|
||||
var a: i64 = undefined;
|
||||
var b: u6 = undefined;
|
||||
_ = &a;
|
||||
_ = &b;
|
||||
_ = @shrExact(a, b);
|
||||
}
|
||||
|
||||
// error
|
||||
//
|
||||
// :6:19: error: use of undefined value here causes illegal behavior
|
||||
11
test/cases/compile_errors/shr_on_undefined_value.zig
Normal file
11
test/cases/compile_errors/shr_on_undefined_value.zig
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
comptime {
|
||||
var a: i64 = undefined;
|
||||
var b: u6 = undefined;
|
||||
_ = &a;
|
||||
_ = &b;
|
||||
_ = a >> b;
|
||||
}
|
||||
|
||||
// error
|
||||
//
|
||||
// :6:9: error: use of undefined value here causes illegal behavior
|
||||
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
3004
test/cases/compile_errors/undef_shifts_are_illegal.zig
Normal file
3004
test/cases/compile_errors/undef_shifts_are_illegal.zig
Normal file
File diff suppressed because it is too large
Load diff
Loading…
Add table
Reference in a new issue