mirror of
https://codeberg.org/ziglang/zig.git
synced 2025-12-06 13:54:21 +00:00
behavior: get more test cases passing with llvm
This commit is contained in:
parent
c8b0d4d149
commit
2d5bc01469
10 changed files with 753 additions and 803 deletions
|
|
@ -621,8 +621,7 @@ pub const Key = union(enum) {
|
||||||
|
|
||||||
pub fn hashWithHasher(key: Key, hasher: *std.hash.Wyhash, ip: *const InternPool) void {
|
pub fn hashWithHasher(key: Key, hasher: *std.hash.Wyhash, ip: *const InternPool) void {
|
||||||
const KeyTag = @typeInfo(Key).Union.tag_type.?;
|
const KeyTag = @typeInfo(Key).Union.tag_type.?;
|
||||||
const key_tag: KeyTag = key;
|
std.hash.autoHash(hasher, @as(KeyTag, key));
|
||||||
std.hash.autoHash(hasher, key_tag);
|
|
||||||
switch (key) {
|
switch (key) {
|
||||||
inline .int_type,
|
inline .int_type,
|
||||||
.ptr_type,
|
.ptr_type,
|
||||||
|
|
@ -710,39 +709,58 @@ pub const Key = union(enum) {
|
||||||
|
|
||||||
.aggregate => |aggregate| {
|
.aggregate => |aggregate| {
|
||||||
std.hash.autoHash(hasher, aggregate.ty);
|
std.hash.autoHash(hasher, aggregate.ty);
|
||||||
switch (ip.indexToKey(aggregate.ty)) {
|
const len = ip.aggregateTypeLen(aggregate.ty);
|
||||||
.array_type => |array_type| if (array_type.child == .u8_type) {
|
const child = switch (ip.indexToKey(aggregate.ty)) {
|
||||||
switch (aggregate.storage) {
|
.array_type => |array_type| array_type.child,
|
||||||
.bytes => |bytes| for (bytes) |byte| std.hash.autoHash(hasher, byte),
|
.vector_type => |vector_type| vector_type.child,
|
||||||
.elems => |elems| {
|
.anon_struct_type, .struct_type => .none,
|
||||||
var buffer: Key.Int.Storage.BigIntSpace = undefined;
|
else => unreachable,
|
||||||
for (elems) |elem| std.hash.autoHash(
|
};
|
||||||
|
|
||||||
|
if (child == .u8_type) {
|
||||||
|
switch (aggregate.storage) {
|
||||||
|
.bytes => |bytes| for (bytes[0..@intCast(usize, len)]) |byte| {
|
||||||
|
std.hash.autoHash(hasher, KeyTag.int);
|
||||||
|
std.hash.autoHash(hasher, byte);
|
||||||
|
},
|
||||||
|
.elems => |elems| for (elems[0..@intCast(usize, len)]) |elem| {
|
||||||
|
const elem_key = ip.indexToKey(elem);
|
||||||
|
std.hash.autoHash(hasher, @as(KeyTag, elem_key));
|
||||||
|
switch (elem_key) {
|
||||||
|
.undef => {},
|
||||||
|
.int => |int| std.hash.autoHash(
|
||||||
hasher,
|
hasher,
|
||||||
ip.indexToKey(elem).int.storage.toBigInt(&buffer).to(u8) catch
|
@intCast(u8, int.storage.u64),
|
||||||
unreachable,
|
),
|
||||||
);
|
else => unreachable,
|
||||||
},
|
}
|
||||||
.repeated_elem => |elem| {
|
},
|
||||||
const len = ip.aggregateTypeLen(aggregate.ty);
|
.repeated_elem => |elem| {
|
||||||
var buffer: Key.Int.Storage.BigIntSpace = undefined;
|
const elem_key = ip.indexToKey(elem);
|
||||||
const byte = ip.indexToKey(elem).int.storage.toBigInt(&buffer).to(u8) catch
|
var remaining = len;
|
||||||
unreachable;
|
while (remaining > 0) : (remaining -= 1) {
|
||||||
var i: u64 = 0;
|
std.hash.autoHash(hasher, @as(KeyTag, elem_key));
|
||||||
while (i < len) : (i += 1) std.hash.autoHash(hasher, byte);
|
switch (elem_key) {
|
||||||
},
|
.undef => {},
|
||||||
}
|
.int => |int| std.hash.autoHash(
|
||||||
return;
|
hasher,
|
||||||
},
|
@intCast(u8, int.storage.u64),
|
||||||
else => {},
|
),
|
||||||
|
else => unreachable,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (aggregate.storage) {
|
switch (aggregate.storage) {
|
||||||
.bytes => unreachable,
|
.bytes => unreachable,
|
||||||
.elems => |elems| for (elems) |elem| std.hash.autoHash(hasher, elem),
|
.elems => |elems| for (elems[0..@intCast(usize, len)]) |elem|
|
||||||
|
std.hash.autoHash(hasher, elem),
|
||||||
.repeated_elem => |elem| {
|
.repeated_elem => |elem| {
|
||||||
const len = ip.aggregateTypeLen(aggregate.ty);
|
var remaining = len;
|
||||||
var i: u64 = 0;
|
while (remaining > 0) : (remaining -= 1) std.hash.autoHash(hasher, elem);
|
||||||
while (i < len) : (i += 1) std.hash.autoHash(hasher, elem);
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
@ -960,9 +978,10 @@ pub const Key = union(enum) {
|
||||||
const b_info = b.aggregate;
|
const b_info = b.aggregate;
|
||||||
if (a_info.ty != b_info.ty) return false;
|
if (a_info.ty != b_info.ty) return false;
|
||||||
|
|
||||||
|
const len = ip.aggregateTypeLen(a_info.ty);
|
||||||
const StorageTag = @typeInfo(Key.Aggregate.Storage).Union.tag_type.?;
|
const StorageTag = @typeInfo(Key.Aggregate.Storage).Union.tag_type.?;
|
||||||
if (@as(StorageTag, a_info.storage) != @as(StorageTag, b_info.storage)) {
|
if (@as(StorageTag, a_info.storage) != @as(StorageTag, b_info.storage)) {
|
||||||
for (0..@intCast(usize, ip.aggregateTypeLen(a_info.ty))) |elem_index| {
|
for (0..@intCast(usize, len)) |elem_index| {
|
||||||
const a_elem = switch (a_info.storage) {
|
const a_elem = switch (a_info.storage) {
|
||||||
.bytes => |bytes| ip.getIfExists(.{ .int = .{
|
.bytes => |bytes| ip.getIfExists(.{ .int = .{
|
||||||
.ty = .u8_type,
|
.ty = .u8_type,
|
||||||
|
|
@ -987,11 +1006,19 @@ pub const Key = union(enum) {
|
||||||
switch (a_info.storage) {
|
switch (a_info.storage) {
|
||||||
.bytes => |a_bytes| {
|
.bytes => |a_bytes| {
|
||||||
const b_bytes = b_info.storage.bytes;
|
const b_bytes = b_info.storage.bytes;
|
||||||
return std.mem.eql(u8, a_bytes, b_bytes);
|
return std.mem.eql(
|
||||||
|
u8,
|
||||||
|
a_bytes[0..@intCast(usize, len)],
|
||||||
|
b_bytes[0..@intCast(usize, len)],
|
||||||
|
);
|
||||||
},
|
},
|
||||||
.elems => |a_elems| {
|
.elems => |a_elems| {
|
||||||
const b_elems = b_info.storage.elems;
|
const b_elems = b_info.storage.elems;
|
||||||
return std.mem.eql(Index, a_elems, b_elems);
|
return std.mem.eql(
|
||||||
|
Index,
|
||||||
|
a_elems[0..@intCast(usize, len)],
|
||||||
|
b_elems[0..@intCast(usize, len)],
|
||||||
|
);
|
||||||
},
|
},
|
||||||
.repeated_elem => |a_elem| {
|
.repeated_elem => |a_elem| {
|
||||||
const b_elem = b_info.storage.repeated_elem;
|
const b_elem = b_info.storage.repeated_elem;
|
||||||
|
|
@ -2691,7 +2718,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
|
||||||
},
|
},
|
||||||
.bytes => {
|
.bytes => {
|
||||||
const extra = ip.extraData(Bytes, data);
|
const extra = ip.extraData(Bytes, data);
|
||||||
const len = @intCast(u32, ip.aggregateTypeLen(extra.ty));
|
const len = @intCast(u32, ip.aggregateTypeLenIncludingSentinel(extra.ty));
|
||||||
return .{ .aggregate = .{
|
return .{ .aggregate = .{
|
||||||
.ty = extra.ty,
|
.ty = extra.ty,
|
||||||
.storage = .{ .bytes = ip.string_bytes.items[@enumToInt(extra.bytes)..][0..len] },
|
.storage = .{ .bytes = ip.string_bytes.items[@enumToInt(extra.bytes)..][0..len] },
|
||||||
|
|
@ -2699,7 +2726,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
|
||||||
},
|
},
|
||||||
.aggregate => {
|
.aggregate => {
|
||||||
const extra = ip.extraDataTrail(Aggregate, data);
|
const extra = ip.extraDataTrail(Aggregate, data);
|
||||||
const len = @intCast(u32, ip.aggregateTypeLen(extra.data.ty));
|
const len = @intCast(u32, ip.aggregateTypeLenIncludingSentinel(extra.data.ty));
|
||||||
const fields = @ptrCast([]const Index, ip.extra.items[extra.end..][0..len]);
|
const fields = @ptrCast([]const Index, ip.extra.items[extra.end..][0..len]);
|
||||||
return .{ .aggregate = .{
|
return .{ .aggregate = .{
|
||||||
.ty = extra.data.ty,
|
.ty = extra.data.ty,
|
||||||
|
|
@ -3145,7 +3172,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
|
||||||
}),
|
}),
|
||||||
}),
|
}),
|
||||||
.int => |int| {
|
.int => |int| {
|
||||||
assert(int != .none);
|
assert(ip.typeOf(int) == .usize_type);
|
||||||
ip.items.appendAssumeCapacity(.{
|
ip.items.appendAssumeCapacity(.{
|
||||||
.tag = .ptr_int,
|
.tag = .ptr_int,
|
||||||
.data = try ip.addExtra(gpa, PtrAddr{
|
.data = try ip.addExtra(gpa, PtrAddr{
|
||||||
|
|
@ -3452,7 +3479,11 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
|
||||||
|
|
||||||
.enum_tag => |enum_tag| {
|
.enum_tag => |enum_tag| {
|
||||||
assert(ip.isEnumType(enum_tag.ty));
|
assert(ip.isEnumType(enum_tag.ty));
|
||||||
assert(ip.indexToKey(enum_tag.int) == .int);
|
switch (ip.indexToKey(enum_tag.ty)) {
|
||||||
|
.simple_type => assert(ip.isIntegerType(ip.typeOf(enum_tag.int))),
|
||||||
|
.enum_type => |enum_type| assert(ip.typeOf(enum_tag.int) == enum_type.tag_ty),
|
||||||
|
else => unreachable,
|
||||||
|
}
|
||||||
ip.items.appendAssumeCapacity(.{
|
ip.items.appendAssumeCapacity(.{
|
||||||
.tag = .enum_tag,
|
.tag = .enum_tag,
|
||||||
.data = try ip.addExtra(gpa, enum_tag),
|
.data = try ip.addExtra(gpa, enum_tag),
|
||||||
|
|
@ -3501,21 +3532,43 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
|
||||||
|
|
||||||
.aggregate => |aggregate| {
|
.aggregate => |aggregate| {
|
||||||
const ty_key = ip.indexToKey(aggregate.ty);
|
const ty_key = ip.indexToKey(aggregate.ty);
|
||||||
const aggregate_len = ip.aggregateTypeLen(aggregate.ty);
|
const len = ip.aggregateTypeLen(aggregate.ty);
|
||||||
|
const child = switch (ty_key) {
|
||||||
|
.array_type => |array_type| array_type.child,
|
||||||
|
.vector_type => |vector_type| vector_type.child,
|
||||||
|
.anon_struct_type, .struct_type => .none,
|
||||||
|
else => unreachable,
|
||||||
|
};
|
||||||
|
const sentinel = switch (ty_key) {
|
||||||
|
.array_type => |array_type| array_type.sentinel,
|
||||||
|
.vector_type, .anon_struct_type, .struct_type => .none,
|
||||||
|
else => unreachable,
|
||||||
|
};
|
||||||
|
const len_including_sentinel = len + @boolToInt(sentinel != .none);
|
||||||
switch (aggregate.storage) {
|
switch (aggregate.storage) {
|
||||||
.bytes => |bytes| {
|
.bytes => |bytes| {
|
||||||
assert(ty_key.array_type.child == .u8_type);
|
assert(child == .u8_type);
|
||||||
assert(bytes.len == aggregate_len);
|
if (bytes.len != len) {
|
||||||
|
assert(bytes.len == len_including_sentinel);
|
||||||
|
assert(bytes[len] == ip.indexToKey(sentinel).int.storage.u64);
|
||||||
|
unreachable;
|
||||||
|
}
|
||||||
},
|
},
|
||||||
.elems => |elems| {
|
.elems => |elems| {
|
||||||
assert(elems.len == aggregate_len);
|
if (elems.len != len) {
|
||||||
|
assert(elems.len == len_including_sentinel);
|
||||||
|
assert(elems[len] == sentinel);
|
||||||
|
unreachable;
|
||||||
|
}
|
||||||
|
},
|
||||||
|
.repeated_elem => |elem| {
|
||||||
|
assert(sentinel == .none or elem == sentinel);
|
||||||
},
|
},
|
||||||
.repeated_elem => {},
|
|
||||||
}
|
}
|
||||||
switch (ty_key) {
|
switch (ty_key) {
|
||||||
inline .array_type, .vector_type => |seq_type| {
|
.array_type, .vector_type => {
|
||||||
for (aggregate.storage.values()) |elem| {
|
for (aggregate.storage.values()) |elem| {
|
||||||
assert(ip.typeOf(elem) == seq_type.child);
|
assert(ip.typeOf(elem) == child);
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
.struct_type => |struct_type| {
|
.struct_type => |struct_type| {
|
||||||
|
|
@ -3534,7 +3587,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
|
||||||
else => unreachable,
|
else => unreachable,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (aggregate_len == 0) {
|
if (len == 0) {
|
||||||
ip.items.appendAssumeCapacity(.{
|
ip.items.appendAssumeCapacity(.{
|
||||||
.tag = .only_possible_value,
|
.tag = .only_possible_value,
|
||||||
.data = @enumToInt(aggregate.ty),
|
.data = @enumToInt(aggregate.ty),
|
||||||
|
|
@ -3543,41 +3596,43 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (ty_key) {
|
switch (ty_key) {
|
||||||
.anon_struct_type => |anon_struct_type| {
|
.anon_struct_type => |anon_struct_type| opv: {
|
||||||
if (switch (aggregate.storage) {
|
switch (aggregate.storage) {
|
||||||
.bytes => |bytes| for (anon_struct_type.values, bytes) |value, byte| {
|
.bytes => |bytes| for (anon_struct_type.values, bytes) |value, byte| {
|
||||||
if (value != ip.getIfExists(.{ .int = .{
|
if (value != ip.getIfExists(.{ .int = .{
|
||||||
.ty = .u8_type,
|
.ty = .u8_type,
|
||||||
.storage = .{ .u64 = byte },
|
.storage = .{ .u64 = byte },
|
||||||
} })) break false;
|
} })) break :opv;
|
||||||
} else true,
|
},
|
||||||
.elems => |elems| std.mem.eql(Index, anon_struct_type.values, elems),
|
.elems => |elems| if (!std.mem.eql(
|
||||||
|
Index,
|
||||||
|
anon_struct_type.values,
|
||||||
|
elems,
|
||||||
|
)) break :opv,
|
||||||
.repeated_elem => |elem| for (anon_struct_type.values) |value| {
|
.repeated_elem => |elem| for (anon_struct_type.values) |value| {
|
||||||
if (value != elem) break false;
|
if (value != elem) break :opv;
|
||||||
} else true,
|
},
|
||||||
}) {
|
|
||||||
// This encoding works thanks to the fact that, as we just verified,
|
|
||||||
// the type itself contains a slice of values that can be provided
|
|
||||||
// in the aggregate fields.
|
|
||||||
ip.items.appendAssumeCapacity(.{
|
|
||||||
.tag = .only_possible_value,
|
|
||||||
.data = @enumToInt(aggregate.ty),
|
|
||||||
});
|
|
||||||
return @intToEnum(Index, ip.items.len - 1);
|
|
||||||
}
|
}
|
||||||
|
// This encoding works thanks to the fact that, as we just verified,
|
||||||
|
// the type itself contains a slice of values that can be provided
|
||||||
|
// in the aggregate fields.
|
||||||
|
ip.items.appendAssumeCapacity(.{
|
||||||
|
.tag = .only_possible_value,
|
||||||
|
.data = @enumToInt(aggregate.ty),
|
||||||
|
});
|
||||||
|
return @intToEnum(Index, ip.items.len - 1);
|
||||||
},
|
},
|
||||||
else => {},
|
else => {},
|
||||||
}
|
}
|
||||||
|
|
||||||
if (switch (aggregate.storage) {
|
repeated: {
|
||||||
.bytes => |bytes| for (bytes[1..]) |byte| {
|
switch (aggregate.storage) {
|
||||||
if (byte != bytes[0]) break false;
|
.bytes => |bytes| for (bytes[1..@intCast(usize, len)]) |byte|
|
||||||
} else true,
|
if (byte != bytes[0]) break :repeated,
|
||||||
.elems => |elems| for (elems[1..]) |elem| {
|
.elems => |elems| for (elems[1..@intCast(usize, len)]) |elem|
|
||||||
if (elem != elems[0]) break false;
|
if (elem != elems[0]) break :repeated,
|
||||||
} else true,
|
.repeated_elem => {},
|
||||||
.repeated_elem => true,
|
}
|
||||||
}) {
|
|
||||||
const elem = switch (aggregate.storage) {
|
const elem = switch (aggregate.storage) {
|
||||||
.bytes => |bytes| elem: {
|
.bytes => |bytes| elem: {
|
||||||
_ = ip.map.pop();
|
_ = ip.map.pop();
|
||||||
|
|
@ -3607,42 +3662,48 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
|
||||||
return @intToEnum(Index, ip.items.len - 1);
|
return @intToEnum(Index, ip.items.len - 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (ty_key) {
|
if (child == .u8_type) bytes: {
|
||||||
.array_type => |array_type| if (array_type.child == .u8_type) {
|
const string_bytes_index = ip.string_bytes.items.len;
|
||||||
const len_including_sentinel = aggregate_len + @boolToInt(array_type.sentinel != .none);
|
try ip.string_bytes.ensureUnusedCapacity(gpa, len_including_sentinel + 1);
|
||||||
try ip.string_bytes.ensureUnusedCapacity(gpa, len_including_sentinel + 1);
|
try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Bytes).Struct.fields.len);
|
||||||
try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Bytes).Struct.fields.len);
|
switch (aggregate.storage) {
|
||||||
var buffer: Key.Int.Storage.BigIntSpace = undefined;
|
.bytes => |bytes| ip.string_bytes.appendSliceAssumeCapacity(bytes),
|
||||||
switch (aggregate.storage) {
|
.elems => |elems| for (elems) |elem| switch (ip.indexToKey(elem)) {
|
||||||
.bytes => |bytes| ip.string_bytes.appendSliceAssumeCapacity(bytes),
|
.undef => {
|
||||||
.elems => |elems| for (elems) |elem| ip.string_bytes.appendAssumeCapacity(
|
ip.string_bytes.shrinkRetainingCapacity(string_bytes_index);
|
||||||
ip.indexToKey(elem).int.storage.toBigInt(&buffer).to(u8) catch unreachable,
|
break :bytes;
|
||||||
|
},
|
||||||
|
.int => |int| ip.string_bytes.appendAssumeCapacity(
|
||||||
|
@intCast(u8, int.storage.u64),
|
||||||
),
|
),
|
||||||
.repeated_elem => |elem| @memset(
|
else => unreachable,
|
||||||
ip.string_bytes.addManyAsSliceAssumeCapacity(aggregate_len),
|
},
|
||||||
ip.indexToKey(elem).int.storage.toBigInt(&buffer).to(u8) catch unreachable,
|
.repeated_elem => |elem| switch (ip.indexToKey(elem)) {
|
||||||
|
.undef => break :bytes,
|
||||||
|
.int => |int| @memset(
|
||||||
|
ip.string_bytes.addManyAsSliceAssumeCapacity(len),
|
||||||
|
@intCast(u8, int.storage.u64),
|
||||||
),
|
),
|
||||||
}
|
else => unreachable,
|
||||||
if (array_type.sentinel != .none) ip.string_bytes.appendAssumeCapacity(
|
},
|
||||||
ip.indexToKey(array_type.sentinel).int.storage.toBigInt(&buffer).to(u8) catch
|
}
|
||||||
unreachable,
|
if (sentinel != .none) ip.string_bytes.appendAssumeCapacity(
|
||||||
);
|
@intCast(u8, ip.indexToKey(sentinel).int.storage.u64),
|
||||||
const bytes = try ip.getOrPutTrailingString(gpa, len_including_sentinel);
|
);
|
||||||
ip.items.appendAssumeCapacity(.{
|
const bytes = try ip.getOrPutTrailingString(gpa, len_including_sentinel);
|
||||||
.tag = .bytes,
|
ip.items.appendAssumeCapacity(.{
|
||||||
.data = ip.addExtraAssumeCapacity(Bytes{
|
.tag = .bytes,
|
||||||
.ty = aggregate.ty,
|
.data = ip.addExtraAssumeCapacity(Bytes{
|
||||||
.bytes = bytes.toString(),
|
.ty = aggregate.ty,
|
||||||
}),
|
.bytes = bytes.toString(),
|
||||||
});
|
}),
|
||||||
return @intToEnum(Index, ip.items.len - 1);
|
});
|
||||||
},
|
return @intToEnum(Index, ip.items.len - 1);
|
||||||
else => {},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
try ip.extra.ensureUnusedCapacity(
|
try ip.extra.ensureUnusedCapacity(
|
||||||
gpa,
|
gpa,
|
||||||
@typeInfo(Aggregate).Struct.fields.len + aggregate_len,
|
@typeInfo(Aggregate).Struct.fields.len + len_including_sentinel,
|
||||||
);
|
);
|
||||||
ip.items.appendAssumeCapacity(.{
|
ip.items.appendAssumeCapacity(.{
|
||||||
.tag = .aggregate,
|
.tag = .aggregate,
|
||||||
|
|
@ -3651,6 +3712,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
|
||||||
}),
|
}),
|
||||||
});
|
});
|
||||||
ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, aggregate.storage.elems));
|
ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, aggregate.storage.elems));
|
||||||
|
if (sentinel != .none) ip.extra.appendAssumeCapacity(@enumToInt(sentinel));
|
||||||
},
|
},
|
||||||
|
|
||||||
.un => |un| {
|
.un => |un| {
|
||||||
|
|
@ -4183,10 +4245,12 @@ pub fn sliceLen(ip: InternPool, i: Index) Index {
|
||||||
/// Given an existing value, returns the same value but with the supplied type.
|
/// Given an existing value, returns the same value but with the supplied type.
|
||||||
/// Only some combinations are allowed:
|
/// Only some combinations are allowed:
|
||||||
/// * identity coercion
|
/// * identity coercion
|
||||||
|
/// * undef => any
|
||||||
/// * int <=> int
|
/// * int <=> int
|
||||||
/// * int <=> enum
|
/// * int <=> enum
|
||||||
/// * enum_literal => enum
|
/// * enum_literal => enum
|
||||||
/// * ptr <=> ptr
|
/// * ptr <=> ptr
|
||||||
|
/// * int => ptr
|
||||||
/// * null_value => opt
|
/// * null_value => opt
|
||||||
/// * payload => opt
|
/// * payload => opt
|
||||||
/// * error set <=> error set
|
/// * error set <=> error set
|
||||||
|
|
@ -4194,68 +4258,93 @@ pub fn sliceLen(ip: InternPool, i: Index) Index {
|
||||||
/// * error set => error union
|
/// * error set => error union
|
||||||
/// * payload => error union
|
/// * payload => error union
|
||||||
/// * fn <=> fn
|
/// * fn <=> fn
|
||||||
|
/// * array <=> array
|
||||||
|
/// * array <=> vector
|
||||||
|
/// * vector <=> vector
|
||||||
pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Allocator.Error!Index {
|
pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Allocator.Error!Index {
|
||||||
const old_ty = ip.typeOf(val);
|
const old_ty = ip.typeOf(val);
|
||||||
if (old_ty == new_ty) return val;
|
if (old_ty == new_ty) return val;
|
||||||
switch (ip.indexToKey(val)) {
|
switch (val) {
|
||||||
.extern_func => |extern_func| if (ip.isFunctionType(new_ty))
|
.undef => return ip.get(gpa, .{ .undef = new_ty }),
|
||||||
return ip.get(gpa, .{ .extern_func = .{
|
.null_value => if (ip.isOptionalType(new_ty))
|
||||||
|
return ip.get(gpa, .{ .opt = .{
|
||||||
.ty = new_ty,
|
.ty = new_ty,
|
||||||
.decl = extern_func.decl,
|
.val = .none,
|
||||||
.lib_name = extern_func.lib_name,
|
|
||||||
} }),
|
} }),
|
||||||
.func => |func| if (ip.isFunctionType(new_ty))
|
else => switch (ip.indexToKey(val)) {
|
||||||
return ip.get(gpa, .{ .func = .{
|
.undef => return ip.get(gpa, .{ .undef = new_ty }),
|
||||||
.ty = new_ty,
|
.extern_func => |extern_func| if (ip.isFunctionType(new_ty))
|
||||||
.index = func.index,
|
return ip.get(gpa, .{ .extern_func = .{
|
||||||
} }),
|
.ty = new_ty,
|
||||||
.int => |int| if (ip.isIntegerType(new_ty))
|
.decl = extern_func.decl,
|
||||||
return getCoercedInts(ip, gpa, int, new_ty)
|
.lib_name = extern_func.lib_name,
|
||||||
else if (ip.isEnumType(new_ty))
|
} }),
|
||||||
return ip.get(gpa, .{ .enum_tag = .{
|
.func => |func| if (ip.isFunctionType(new_ty))
|
||||||
.ty = new_ty,
|
return ip.get(gpa, .{ .func = .{
|
||||||
.int = val,
|
.ty = new_ty,
|
||||||
} }),
|
.index = func.index,
|
||||||
.enum_tag => |enum_tag| if (ip.isIntegerType(new_ty))
|
} }),
|
||||||
return getCoercedInts(ip, gpa, ip.indexToKey(enum_tag.int).int, new_ty),
|
.int => |int| if (ip.isIntegerType(new_ty))
|
||||||
.enum_literal => |enum_literal| switch (ip.indexToKey(new_ty)) {
|
return getCoercedInts(ip, gpa, int, new_ty)
|
||||||
.enum_type => |enum_type| {
|
else if (ip.isEnumType(new_ty))
|
||||||
const index = enum_type.nameIndex(ip, enum_literal).?;
|
|
||||||
return ip.get(gpa, .{ .enum_tag = .{
|
return ip.get(gpa, .{ .enum_tag = .{
|
||||||
.ty = new_ty,
|
.ty = new_ty,
|
||||||
.int = if (enum_type.values.len != 0)
|
.int = val,
|
||||||
enum_type.values[index]
|
} })
|
||||||
else
|
else if (ip.isPointerType(new_ty))
|
||||||
try ip.get(gpa, .{ .int = .{
|
return ip.get(gpa, .{ .ptr = .{
|
||||||
.ty = enum_type.tag_ty,
|
.ty = new_ty,
|
||||||
.storage = .{ .u64 = index },
|
.addr = .{ .int = val },
|
||||||
} }),
|
} }),
|
||||||
} });
|
.enum_tag => |enum_tag| if (ip.isIntegerType(new_ty))
|
||||||
|
return getCoercedInts(ip, gpa, ip.indexToKey(enum_tag.int).int, new_ty),
|
||||||
|
.enum_literal => |enum_literal| switch (ip.indexToKey(new_ty)) {
|
||||||
|
.enum_type => |enum_type| {
|
||||||
|
const index = enum_type.nameIndex(ip, enum_literal).?;
|
||||||
|
return ip.get(gpa, .{ .enum_tag = .{
|
||||||
|
.ty = new_ty,
|
||||||
|
.int = if (enum_type.values.len != 0)
|
||||||
|
enum_type.values[index]
|
||||||
|
else
|
||||||
|
try ip.get(gpa, .{ .int = .{
|
||||||
|
.ty = enum_type.tag_ty,
|
||||||
|
.storage = .{ .u64 = index },
|
||||||
|
} }),
|
||||||
|
} });
|
||||||
|
},
|
||||||
|
else => {},
|
||||||
},
|
},
|
||||||
|
.ptr => |ptr| if (ip.isPointerType(new_ty))
|
||||||
|
return ip.get(gpa, .{ .ptr = .{
|
||||||
|
.ty = new_ty,
|
||||||
|
.addr = ptr.addr,
|
||||||
|
.len = ptr.len,
|
||||||
|
} }),
|
||||||
|
.err => |err| if (ip.isErrorSetType(new_ty))
|
||||||
|
return ip.get(gpa, .{ .err = .{
|
||||||
|
.ty = new_ty,
|
||||||
|
.name = err.name,
|
||||||
|
} })
|
||||||
|
else if (ip.isErrorUnionType(new_ty))
|
||||||
|
return ip.get(gpa, .{ .error_union = .{
|
||||||
|
.ty = new_ty,
|
||||||
|
.val = .{ .err_name = err.name },
|
||||||
|
} }),
|
||||||
|
.error_union => |error_union| if (ip.isErrorUnionType(new_ty))
|
||||||
|
return ip.get(gpa, .{ .error_union = .{
|
||||||
|
.ty = new_ty,
|
||||||
|
.val = error_union.val,
|
||||||
|
} }),
|
||||||
|
.aggregate => |aggregate| return ip.get(gpa, .{ .aggregate = .{
|
||||||
|
.ty = new_ty,
|
||||||
|
.storage = switch (aggregate.storage) {
|
||||||
|
.bytes => |bytes| .{ .bytes = bytes[0..@intCast(usize, ip.aggregateTypeLen(new_ty))] },
|
||||||
|
.elems => |elems| .{ .elems = elems[0..@intCast(usize, ip.aggregateTypeLen(new_ty))] },
|
||||||
|
.repeated_elem => |elem| .{ .repeated_elem = elem },
|
||||||
|
},
|
||||||
|
} }),
|
||||||
else => {},
|
else => {},
|
||||||
},
|
},
|
||||||
.ptr => |ptr| if (ip.isPointerType(new_ty))
|
|
||||||
return ip.get(gpa, .{ .ptr = .{
|
|
||||||
.ty = new_ty,
|
|
||||||
.addr = ptr.addr,
|
|
||||||
.len = ptr.len,
|
|
||||||
} }),
|
|
||||||
.err => |err| if (ip.isErrorSetType(new_ty))
|
|
||||||
return ip.get(gpa, .{ .err = .{
|
|
||||||
.ty = new_ty,
|
|
||||||
.name = err.name,
|
|
||||||
} })
|
|
||||||
else if (ip.isErrorUnionType(new_ty))
|
|
||||||
return ip.get(gpa, .{ .error_union = .{
|
|
||||||
.ty = new_ty,
|
|
||||||
.val = .{ .err_name = err.name },
|
|
||||||
} }),
|
|
||||||
.error_union => |error_union| if (ip.isErrorUnionType(new_ty))
|
|
||||||
return ip.get(gpa, .{ .error_union = .{
|
|
||||||
.ty = new_ty,
|
|
||||||
.val = error_union.val,
|
|
||||||
} }),
|
|
||||||
else => {},
|
|
||||||
}
|
}
|
||||||
switch (ip.indexToKey(new_ty)) {
|
switch (ip.indexToKey(new_ty)) {
|
||||||
.opt_type => |child_type| switch (val) {
|
.opt_type => |child_type| switch (val) {
|
||||||
|
|
@ -4527,7 +4616,7 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void {
|
||||||
|
|
||||||
.type_function => b: {
|
.type_function => b: {
|
||||||
const info = ip.extraData(TypeFunction, data);
|
const info = ip.extraData(TypeFunction, data);
|
||||||
break :b @sizeOf(TypeFunction) + (@sizeOf(u32) * info.params_len);
|
break :b @sizeOf(TypeFunction) + (@sizeOf(Index) * info.params_len);
|
||||||
},
|
},
|
||||||
|
|
||||||
.undef => 0,
|
.undef => 0,
|
||||||
|
|
@ -4570,14 +4659,14 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void {
|
||||||
|
|
||||||
.bytes => b: {
|
.bytes => b: {
|
||||||
const info = ip.extraData(Bytes, data);
|
const info = ip.extraData(Bytes, data);
|
||||||
const len = @intCast(u32, ip.aggregateTypeLen(info.ty));
|
const len = @intCast(u32, ip.aggregateTypeLenIncludingSentinel(info.ty));
|
||||||
break :b @sizeOf(Bytes) + len +
|
break :b @sizeOf(Bytes) + len +
|
||||||
@boolToInt(ip.string_bytes.items[@enumToInt(info.bytes) + len - 1] != 0);
|
@boolToInt(ip.string_bytes.items[@enumToInt(info.bytes) + len - 1] != 0);
|
||||||
},
|
},
|
||||||
.aggregate => b: {
|
.aggregate => b: {
|
||||||
const info = ip.extraData(Aggregate, data);
|
const info = ip.extraData(Aggregate, data);
|
||||||
const fields_len = @intCast(u32, ip.aggregateTypeLen(info.ty));
|
const fields_len = @intCast(u32, ip.aggregateTypeLenIncludingSentinel(info.ty));
|
||||||
break :b @sizeOf(Aggregate) + (@sizeOf(u32) * fields_len);
|
break :b @sizeOf(Aggregate) + (@sizeOf(Index) * fields_len);
|
||||||
},
|
},
|
||||||
.repeated => @sizeOf(Repeated),
|
.repeated => @sizeOf(Repeated),
|
||||||
|
|
||||||
|
|
@ -4889,6 +4978,16 @@ pub fn aggregateTypeLen(ip: InternPool, ty: Index) u64 {
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn aggregateTypeLenIncludingSentinel(ip: InternPool, ty: Index) u64 {
|
||||||
|
return switch (ip.indexToKey(ty)) {
|
||||||
|
.struct_type => |struct_type| ip.structPtrConst(struct_type.index.unwrap() orelse return 0).fields.count(),
|
||||||
|
.anon_struct_type => |anon_struct_type| anon_struct_type.types.len,
|
||||||
|
.array_type => |array_type| array_type.len + @boolToInt(array_type.sentinel != .none),
|
||||||
|
.vector_type => |vector_type| vector_type.len,
|
||||||
|
else => unreachable,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
pub fn isNoReturn(ip: InternPool, ty: Index) bool {
|
pub fn isNoReturn(ip: InternPool, ty: Index) bool {
|
||||||
return switch (ty) {
|
return switch (ty) {
|
||||||
.noreturn_type => true,
|
.noreturn_type => true,
|
||||||
|
|
|
||||||
|
|
@ -99,6 +99,7 @@ monomorphed_funcs: MonomorphedFuncsSet = .{},
|
||||||
/// The set of all comptime function calls that have been cached so that future calls
|
/// The set of all comptime function calls that have been cached so that future calls
|
||||||
/// with the same parameters will get the same return value.
|
/// with the same parameters will get the same return value.
|
||||||
memoized_calls: MemoizedCallSet = .{},
|
memoized_calls: MemoizedCallSet = .{},
|
||||||
|
memoized_call_args: MemoizedCall.Args = .{},
|
||||||
/// Contains the values from `@setAlignStack`. A sparse table is used here
|
/// Contains the values from `@setAlignStack`. A sparse table is used here
|
||||||
/// instead of a field of `Fn` because usage of `@setAlignStack` is rare, while
|
/// instead of a field of `Fn` because usage of `@setAlignStack` is rare, while
|
||||||
/// functions are many.
|
/// functions are many.
|
||||||
|
|
@ -230,46 +231,30 @@ pub const MemoizedCallSet = std.HashMapUnmanaged(
|
||||||
);
|
);
|
||||||
|
|
||||||
pub const MemoizedCall = struct {
|
pub const MemoizedCall = struct {
|
||||||
module: *Module,
|
args: *const Args,
|
||||||
|
|
||||||
|
pub const Args = std.ArrayListUnmanaged(InternPool.Index);
|
||||||
|
|
||||||
pub const Key = struct {
|
pub const Key = struct {
|
||||||
func: Fn.Index,
|
func: Fn.Index,
|
||||||
args: []TypedValue,
|
args_index: u32,
|
||||||
};
|
args_count: u32,
|
||||||
|
|
||||||
pub const Result = struct {
|
pub fn args(key: Key, ctx: MemoizedCall) []InternPool.Index {
|
||||||
val: Value,
|
return ctx.args.items[key.args_index..][0..key.args_count];
|
||||||
arena: std.heap.ArenaAllocator.State,
|
|
||||||
};
|
|
||||||
|
|
||||||
pub fn eql(ctx: @This(), a: Key, b: Key) bool {
|
|
||||||
if (a.func != b.func) return false;
|
|
||||||
|
|
||||||
assert(a.args.len == b.args.len);
|
|
||||||
for (a.args, 0..) |a_arg, arg_i| {
|
|
||||||
const b_arg = b.args[arg_i];
|
|
||||||
if (!a_arg.eql(b_arg, ctx.module)) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
};
|
||||||
|
|
||||||
return true;
|
pub const Result = InternPool.Index;
|
||||||
|
|
||||||
|
pub fn eql(ctx: MemoizedCall, a: Key, b: Key) bool {
|
||||||
|
return a.func == b.func and mem.eql(InternPool.Index, a.args(ctx), b.args(ctx));
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Must match `Sema.GenericCallAdapter.hash`.
|
pub fn hash(ctx: MemoizedCall, key: Key) u64 {
|
||||||
pub fn hash(ctx: @This(), key: Key) u64 {
|
|
||||||
var hasher = std.hash.Wyhash.init(0);
|
var hasher = std.hash.Wyhash.init(0);
|
||||||
|
|
||||||
// The generic function Decl is guaranteed to be the first dependency
|
|
||||||
// of each of its instantiations.
|
|
||||||
std.hash.autoHash(&hasher, key.func);
|
std.hash.autoHash(&hasher, key.func);
|
||||||
|
std.hash.autoHashStrat(&hasher, key.args(ctx), .Deep);
|
||||||
// This logic must be kept in sync with the logic in `analyzeCall` that
|
|
||||||
// computes the hash.
|
|
||||||
for (key.args) |arg| {
|
|
||||||
arg.hash(&hasher, ctx.module);
|
|
||||||
}
|
|
||||||
|
|
||||||
return hasher.final();
|
return hasher.final();
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
@ -883,6 +868,10 @@ pub const Decl = struct {
|
||||||
return decl.ty.abiAlignment(mod);
|
return decl.ty.abiAlignment(mod);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn intern(decl: *Decl, mod: *Module) Allocator.Error!void {
|
||||||
|
decl.val = (try decl.val.intern(decl.ty, mod)).toValue();
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
/// This state is attached to every Decl when Module emit_h is non-null.
|
/// This state is attached to every Decl when Module emit_h is non-null.
|
||||||
|
|
@ -3325,15 +3314,8 @@ pub fn deinit(mod: *Module) void {
|
||||||
mod.test_functions.deinit(gpa);
|
mod.test_functions.deinit(gpa);
|
||||||
mod.align_stack_fns.deinit(gpa);
|
mod.align_stack_fns.deinit(gpa);
|
||||||
mod.monomorphed_funcs.deinit(gpa);
|
mod.monomorphed_funcs.deinit(gpa);
|
||||||
|
mod.memoized_call_args.deinit(gpa);
|
||||||
{
|
mod.memoized_calls.deinit(gpa);
|
||||||
var it = mod.memoized_calls.iterator();
|
|
||||||
while (it.next()) |entry| {
|
|
||||||
gpa.free(entry.key_ptr.args);
|
|
||||||
entry.value_ptr.arena.promote(gpa).deinit();
|
|
||||||
}
|
|
||||||
mod.memoized_calls.deinit(gpa);
|
|
||||||
}
|
|
||||||
|
|
||||||
mod.decls_free_list.deinit(gpa);
|
mod.decls_free_list.deinit(gpa);
|
||||||
mod.allocated_decls.deinit(gpa);
|
mod.allocated_decls.deinit(gpa);
|
||||||
|
|
@ -5894,6 +5876,7 @@ pub fn initNewAnonDecl(
|
||||||
typed_value: TypedValue,
|
typed_value: TypedValue,
|
||||||
name: [:0]u8,
|
name: [:0]u8,
|
||||||
) !void {
|
) !void {
|
||||||
|
assert(typed_value.ty.toIntern() == mod.intern_pool.typeOf(typed_value.val.toIntern()));
|
||||||
errdefer mod.gpa.free(name);
|
errdefer mod.gpa.free(name);
|
||||||
|
|
||||||
const new_decl = mod.declPtr(new_decl_index);
|
const new_decl = mod.declPtr(new_decl_index);
|
||||||
|
|
@ -6645,7 +6628,7 @@ pub fn markDeclAlive(mod: *Module, decl: *Decl) Allocator.Error!void {
|
||||||
if (decl.alive) return;
|
if (decl.alive) return;
|
||||||
decl.alive = true;
|
decl.alive = true;
|
||||||
|
|
||||||
decl.val = (try decl.val.intern(decl.ty, mod)).toValue();
|
try decl.intern(mod);
|
||||||
|
|
||||||
// This is the first time we are marking this Decl alive. We must
|
// This is the first time we are marking this Decl alive. We must
|
||||||
// therefore recurse into its value and mark any Decl it references
|
// therefore recurse into its value and mark any Decl it references
|
||||||
|
|
@ -6749,15 +6732,19 @@ pub fn ptrType(mod: *Module, info: InternPool.Key.PtrType) Allocator.Error!Type
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Canonicalize host_size. If it matches the bit size of the pointee type,
|
switch (info.vector_index) {
|
||||||
// we change it to 0 here. If this causes an assertion trip, the pointee type
|
// Canonicalize host_size. If it matches the bit size of the pointee type,
|
||||||
// needs to be resolved before calling this ptr() function.
|
// we change it to 0 here. If this causes an assertion trip, the pointee type
|
||||||
if (info.host_size != 0) {
|
// needs to be resolved before calling this ptr() function.
|
||||||
const elem_bit_size = info.elem_type.toType().bitSize(mod);
|
.none => if (info.host_size != 0) {
|
||||||
assert(info.bit_offset + elem_bit_size <= info.host_size * 8);
|
const elem_bit_size = info.elem_type.toType().bitSize(mod);
|
||||||
if (info.host_size * 8 == elem_bit_size) {
|
assert(info.bit_offset + elem_bit_size <= info.host_size * 8);
|
||||||
canon_info.host_size = 0;
|
if (info.host_size * 8 == elem_bit_size) {
|
||||||
}
|
canon_info.host_size = 0;
|
||||||
|
}
|
||||||
|
},
|
||||||
|
.runtime => {},
|
||||||
|
_ => assert(@enumToInt(info.vector_index) < info.host_size),
|
||||||
}
|
}
|
||||||
|
|
||||||
return (try intern(mod, .{ .ptr_type = canon_info })).toType();
|
return (try intern(mod, .{ .ptr_type = canon_info })).toType();
|
||||||
|
|
|
||||||
|
|
@ -1,18 +1,18 @@
|
||||||
const std = @import("std");
|
const std = @import("std");
|
||||||
|
const assert = std.debug.assert;
|
||||||
const Order = std.math.Order;
|
const Order = std.math.Order;
|
||||||
|
|
||||||
const RangeSet = @This();
|
const InternPool = @import("InternPool.zig");
|
||||||
const Module = @import("Module.zig");
|
const Module = @import("Module.zig");
|
||||||
|
const RangeSet = @This();
|
||||||
const SwitchProngSrc = @import("Module.zig").SwitchProngSrc;
|
const SwitchProngSrc = @import("Module.zig").SwitchProngSrc;
|
||||||
const Type = @import("type.zig").Type;
|
|
||||||
const Value = @import("value.zig").Value;
|
|
||||||
|
|
||||||
ranges: std.ArrayList(Range),
|
ranges: std.ArrayList(Range),
|
||||||
module: *Module,
|
module: *Module,
|
||||||
|
|
||||||
pub const Range = struct {
|
pub const Range = struct {
|
||||||
first: Value,
|
first: InternPool.Index,
|
||||||
last: Value,
|
last: InternPool.Index,
|
||||||
src: SwitchProngSrc,
|
src: SwitchProngSrc,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
@ -29,18 +29,27 @@ pub fn deinit(self: *RangeSet) void {
|
||||||
|
|
||||||
pub fn add(
|
pub fn add(
|
||||||
self: *RangeSet,
|
self: *RangeSet,
|
||||||
first: Value,
|
first: InternPool.Index,
|
||||||
last: Value,
|
last: InternPool.Index,
|
||||||
ty: Type,
|
|
||||||
src: SwitchProngSrc,
|
src: SwitchProngSrc,
|
||||||
) !?SwitchProngSrc {
|
) !?SwitchProngSrc {
|
||||||
|
const mod = self.module;
|
||||||
|
const ip = &mod.intern_pool;
|
||||||
|
|
||||||
|
const ty = ip.typeOf(first);
|
||||||
|
assert(ty == ip.typeOf(last));
|
||||||
|
|
||||||
for (self.ranges.items) |range| {
|
for (self.ranges.items) |range| {
|
||||||
if (last.compareScalar(.gte, range.first, ty, self.module) and
|
assert(ty == ip.typeOf(range.first));
|
||||||
first.compareScalar(.lte, range.last, ty, self.module))
|
assert(ty == ip.typeOf(range.last));
|
||||||
|
|
||||||
|
if (last.toValue().compareScalar(.gte, range.first.toValue(), ty.toType(), mod) and
|
||||||
|
first.toValue().compareScalar(.lte, range.last.toValue(), ty.toType(), mod))
|
||||||
{
|
{
|
||||||
return range.src; // They overlap.
|
return range.src; // They overlap.
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
try self.ranges.append(.{
|
try self.ranges.append(.{
|
||||||
.first = first,
|
.first = first,
|
||||||
.last = last,
|
.last = last,
|
||||||
|
|
@ -49,30 +58,29 @@ pub fn add(
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
const LessThanContext = struct { ty: Type, module: *Module };
|
|
||||||
|
|
||||||
/// Assumes a and b do not overlap
|
/// Assumes a and b do not overlap
|
||||||
fn lessThan(ctx: LessThanContext, a: Range, b: Range) bool {
|
fn lessThan(mod: *Module, a: Range, b: Range) bool {
|
||||||
return a.first.compareScalar(.lt, b.first, ctx.ty, ctx.module);
|
const ty = mod.intern_pool.typeOf(a.first).toType();
|
||||||
|
return a.first.toValue().compareScalar(.lt, b.first.toValue(), ty, mod);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn spans(self: *RangeSet, first: Value, last: Value, ty: Type) !bool {
|
pub fn spans(self: *RangeSet, first: InternPool.Index, last: InternPool.Index) !bool {
|
||||||
|
const mod = self.module;
|
||||||
|
const ip = &mod.intern_pool;
|
||||||
|
assert(ip.typeOf(first) == ip.typeOf(last));
|
||||||
|
|
||||||
if (self.ranges.items.len == 0)
|
if (self.ranges.items.len == 0)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
const mod = self.module;
|
std.mem.sort(Range, self.ranges.items, mod, lessThan);
|
||||||
std.mem.sort(Range, self.ranges.items, LessThanContext{
|
|
||||||
.ty = ty,
|
|
||||||
.module = mod,
|
|
||||||
}, lessThan);
|
|
||||||
|
|
||||||
if (!self.ranges.items[0].first.eql(first, ty, mod) or
|
if (self.ranges.items[0].first != first or
|
||||||
!self.ranges.items[self.ranges.items.len - 1].last.eql(last, ty, mod))
|
self.ranges.items[self.ranges.items.len - 1].last != last)
|
||||||
{
|
{
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
var space: Value.BigIntSpace = undefined;
|
var space: InternPool.Key.Int.Storage.BigIntSpace = undefined;
|
||||||
|
|
||||||
var counter = try std.math.big.int.Managed.init(self.ranges.allocator);
|
var counter = try std.math.big.int.Managed.init(self.ranges.allocator);
|
||||||
defer counter.deinit();
|
defer counter.deinit();
|
||||||
|
|
@ -83,10 +91,10 @@ pub fn spans(self: *RangeSet, first: Value, last: Value, ty: Type) !bool {
|
||||||
const prev = self.ranges.items[i];
|
const prev = self.ranges.items[i];
|
||||||
|
|
||||||
// prev.last + 1 == cur.first
|
// prev.last + 1 == cur.first
|
||||||
try counter.copy(prev.last.toBigInt(&space, mod));
|
try counter.copy(prev.last.toValue().toBigInt(&space, mod));
|
||||||
try counter.addScalar(&counter, 1);
|
try counter.addScalar(&counter, 1);
|
||||||
|
|
||||||
const cur_start_int = cur.first.toBigInt(&space, mod);
|
const cur_start_int = cur.first.toValue().toBigInt(&space, mod);
|
||||||
if (!cur_start_int.eq(counter.toConst())) {
|
if (!cur_start_int.eq(counter.toConst())) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
675
src/Sema.zig
675
src/Sema.zig
File diff suppressed because it is too large
Load diff
|
|
@ -957,7 +957,7 @@ pub fn genTypedValue(
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
.Bool => {
|
.Bool => {
|
||||||
return GenResult.mcv(.{ .immediate = @boolToInt(typed_value.val.toBool(mod)) });
|
return GenResult.mcv(.{ .immediate = @boolToInt(typed_value.val.toBool()) });
|
||||||
},
|
},
|
||||||
.Optional => {
|
.Optional => {
|
||||||
if (typed_value.ty.isPtrLikeOptional(mod)) {
|
if (typed_value.ty.isPtrLikeOptional(mod)) {
|
||||||
|
|
|
||||||
|
|
@ -2003,7 +2003,7 @@ pub const Object = struct {
|
||||||
mod.intern_pool.stringToSlice(tuple.names[i])
|
mod.intern_pool.stringToSlice(tuple.names[i])
|
||||||
else
|
else
|
||||||
try std.fmt.allocPrintZ(gpa, "{d}", .{i});
|
try std.fmt.allocPrintZ(gpa, "{d}", .{i});
|
||||||
defer gpa.free(field_name);
|
defer if (tuple.names.len == 0) gpa.free(field_name);
|
||||||
|
|
||||||
try di_fields.append(gpa, dib.createMemberType(
|
try di_fields.append(gpa, dib.createMemberType(
|
||||||
fwd_decl.toScope(),
|
fwd_decl.toScope(),
|
||||||
|
|
@ -2461,13 +2461,13 @@ pub const DeclGen = struct {
|
||||||
if (decl.@"linksection") |section| global.setSection(section);
|
if (decl.@"linksection") |section| global.setSection(section);
|
||||||
assert(decl.has_tv);
|
assert(decl.has_tv);
|
||||||
const init_val = if (decl.val.getVariable(mod)) |variable| init_val: {
|
const init_val = if (decl.val.getVariable(mod)) |variable| init_val: {
|
||||||
break :init_val variable.init.toValue();
|
break :init_val variable.init;
|
||||||
} else init_val: {
|
} else init_val: {
|
||||||
global.setGlobalConstant(.True);
|
global.setGlobalConstant(.True);
|
||||||
break :init_val decl.val;
|
break :init_val decl.val.toIntern();
|
||||||
};
|
};
|
||||||
if (init_val.toIntern() != .unreachable_value) {
|
if (init_val != .none) {
|
||||||
const llvm_init = try dg.lowerValue(.{ .ty = decl.ty, .val = init_val });
|
const llvm_init = try dg.lowerValue(.{ .ty = decl.ty, .val = init_val.toValue() });
|
||||||
if (global.globalGetValueType() == llvm_init.typeOf()) {
|
if (global.globalGetValueType() == llvm_init.typeOf()) {
|
||||||
global.setInitializer(llvm_init);
|
global.setInitializer(llvm_init);
|
||||||
} else {
|
} else {
|
||||||
|
|
@ -2748,7 +2748,7 @@ pub const DeclGen = struct {
|
||||||
if (std.debug.runtime_safety and false) check: {
|
if (std.debug.runtime_safety and false) check: {
|
||||||
if (t.zigTypeTag(mod) == .Opaque) break :check;
|
if (t.zigTypeTag(mod) == .Opaque) break :check;
|
||||||
if (!t.hasRuntimeBits(mod)) break :check;
|
if (!t.hasRuntimeBits(mod)) break :check;
|
||||||
if (!llvm_ty.isSized().toBool(mod)) break :check;
|
if (!llvm_ty.isSized().toBool()) break :check;
|
||||||
|
|
||||||
const zig_size = t.abiSize(mod);
|
const zig_size = t.abiSize(mod);
|
||||||
const llvm_size = dg.object.target_data.abiSizeOfType(llvm_ty);
|
const llvm_size = dg.object.target_data.abiSizeOfType(llvm_ty);
|
||||||
|
|
@ -3239,7 +3239,7 @@ pub const DeclGen = struct {
|
||||||
=> unreachable, // non-runtime values
|
=> unreachable, // non-runtime values
|
||||||
.false, .true => {
|
.false, .true => {
|
||||||
const llvm_type = try dg.lowerType(tv.ty);
|
const llvm_type = try dg.lowerType(tv.ty);
|
||||||
return if (tv.val.toBool(mod)) llvm_type.constAllOnes() else llvm_type.constNull();
|
return if (tv.val.toBool()) llvm_type.constAllOnes() else llvm_type.constNull();
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
.variable,
|
.variable,
|
||||||
|
|
@ -3522,15 +3522,19 @@ pub const DeclGen = struct {
|
||||||
const elem_ty = vector_type.child.toType();
|
const elem_ty = vector_type.child.toType();
|
||||||
const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_type.len);
|
const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_type.len);
|
||||||
defer dg.gpa.free(llvm_elems);
|
defer dg.gpa.free(llvm_elems);
|
||||||
|
const llvm_i8 = dg.context.intType(8);
|
||||||
for (llvm_elems, 0..) |*llvm_elem, i| {
|
for (llvm_elems, 0..) |*llvm_elem, i| {
|
||||||
llvm_elem.* = try dg.lowerValue(.{
|
llvm_elem.* = switch (aggregate.storage) {
|
||||||
.ty = elem_ty,
|
.bytes => |bytes| llvm_i8.constInt(bytes[i], .False),
|
||||||
.val = switch (aggregate.storage) {
|
.elems => |elems| try dg.lowerValue(.{
|
||||||
.bytes => unreachable,
|
.ty = elem_ty,
|
||||||
.elems => |elems| elems[i],
|
.val = elems[i].toValue(),
|
||||||
.repeated_elem => |elem| elem,
|
}),
|
||||||
}.toValue(),
|
.repeated_elem => |elem| try dg.lowerValue(.{
|
||||||
});
|
.ty = elem_ty,
|
||||||
|
.val = elem.toValue(),
|
||||||
|
}),
|
||||||
|
};
|
||||||
}
|
}
|
||||||
return llvm.constVector(
|
return llvm.constVector(
|
||||||
llvm_elems.ptr,
|
llvm_elems.ptr,
|
||||||
|
|
|
||||||
|
|
@ -654,7 +654,7 @@ pub const DeclGen = struct {
|
||||||
.@"unreachable",
|
.@"unreachable",
|
||||||
.generic_poison,
|
.generic_poison,
|
||||||
=> unreachable, // non-runtime values
|
=> unreachable, // non-runtime values
|
||||||
.false, .true => try self.addConstBool(val.toBool(mod)),
|
.false, .true => try self.addConstBool(val.toBool()),
|
||||||
},
|
},
|
||||||
.variable,
|
.variable,
|
||||||
.extern_func,
|
.extern_func,
|
||||||
|
|
@ -974,7 +974,6 @@ pub const DeclGen = struct {
|
||||||
/// This function should only be called during function code generation.
|
/// This function should only be called during function code generation.
|
||||||
fn constant(self: *DeclGen, ty: Type, val: Value, repr: Repr) !IdRef {
|
fn constant(self: *DeclGen, ty: Type, val: Value, repr: Repr) !IdRef {
|
||||||
const mod = self.module;
|
const mod = self.module;
|
||||||
const target = self.getTarget();
|
|
||||||
const result_ty_ref = try self.resolveType(ty, repr);
|
const result_ty_ref = try self.resolveType(ty, repr);
|
||||||
|
|
||||||
log.debug("constant: ty = {}, val = {}", .{ ty.fmt(self.module), val.fmtValue(ty, self.module) });
|
log.debug("constant: ty = {}, val = {}", .{ ty.fmt(self.module), val.fmtValue(ty, self.module) });
|
||||||
|
|
@ -991,51 +990,8 @@ pub const DeclGen = struct {
|
||||||
return try self.spv.constInt(result_ty_ref, val.toUnsignedInt(mod));
|
return try self.spv.constInt(result_ty_ref, val.toUnsignedInt(mod));
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
.Bool => switch (repr) {
|
.Bool => {
|
||||||
.direct => return try self.spv.constBool(result_ty_ref, val.toBool(mod)),
|
@compileError("TODO merge conflict failure");
|
||||||
.indirect => return try self.spv.constInt(result_ty_ref, @boolToInt(val.toBool(mod))),
|
|
||||||
},
|
|
||||||
.Float => return switch (ty.floatBits(target)) {
|
|
||||||
16 => try self.spv.resolveId(.{ .float = .{ .ty = result_ty_ref, .value = .{ .float16 = val.toFloat(f16, mod) } } }),
|
|
||||||
32 => try self.spv.resolveId(.{ .float = .{ .ty = result_ty_ref, .value = .{ .float32 = val.toFloat(f32, mod) } } }),
|
|
||||||
64 => try self.spv.resolveId(.{ .float = .{ .ty = result_ty_ref, .value = .{ .float64 = val.toFloat(f64, mod) } } }),
|
|
||||||
80, 128 => unreachable, // TODO
|
|
||||||
else => unreachable,
|
|
||||||
},
|
|
||||||
.ErrorSet => {
|
|
||||||
const value = switch (val.tag()) {
|
|
||||||
.@"error" => blk: {
|
|
||||||
const err_name = val.castTag(.@"error").?.data.name;
|
|
||||||
const kv = try self.module.getErrorValue(err_name);
|
|
||||||
break :blk @intCast(u16, kv.value);
|
|
||||||
},
|
|
||||||
.zero => 0,
|
|
||||||
else => unreachable,
|
|
||||||
};
|
|
||||||
|
|
||||||
return try self.spv.constInt(result_ty_ref, value);
|
|
||||||
},
|
|
||||||
.ErrorUnion => {
|
|
||||||
const payload_ty = ty.errorUnionPayload();
|
|
||||||
const is_pl = val.errorUnionIsPayload();
|
|
||||||
const error_val = if (!is_pl) val else Value.initTag(.zero);
|
|
||||||
|
|
||||||
const eu_layout = self.errorUnionLayout(payload_ty);
|
|
||||||
if (!eu_layout.payload_has_bits) {
|
|
||||||
return try self.constant(Type.anyerror, error_val, repr);
|
|
||||||
}
|
|
||||||
|
|
||||||
const payload_val = if (val.castTag(.eu_payload)) |pl| pl.data else Value.undef;
|
|
||||||
|
|
||||||
var members: [2]IdRef = undefined;
|
|
||||||
if (eu_layout.error_first) {
|
|
||||||
members[0] = try self.constant(Type.anyerror, error_val, .indirect);
|
|
||||||
members[1] = try self.constant(payload_ty, payload_val, .indirect);
|
|
||||||
} else {
|
|
||||||
members[0] = try self.constant(payload_ty, payload_val, .indirect);
|
|
||||||
members[1] = try self.constant(Type.anyerror, error_val, .indirect);
|
|
||||||
}
|
|
||||||
return try self.spv.constComposite(result_ty_ref, &members);
|
|
||||||
},
|
},
|
||||||
// TODO: We can handle most pointers here (decl refs etc), because now they emit an extra
|
// TODO: We can handle most pointers here (decl refs etc), because now they emit an extra
|
||||||
// OpVariable that is not really required.
|
// OpVariable that is not really required.
|
||||||
|
|
|
||||||
37
src/type.zig
37
src/type.zig
|
|
@ -2481,25 +2481,32 @@ pub const Type = struct {
|
||||||
.struct_type => |struct_type| {
|
.struct_type => |struct_type| {
|
||||||
if (mod.structPtrUnwrap(struct_type.index)) |s| {
|
if (mod.structPtrUnwrap(struct_type.index)) |s| {
|
||||||
assert(s.haveFieldTypes());
|
assert(s.haveFieldTypes());
|
||||||
for (s.fields.values()) |field| {
|
const field_vals = try mod.gpa.alloc(InternPool.Index, s.fields.count());
|
||||||
if (field.is_comptime) continue;
|
defer mod.gpa.free(field_vals);
|
||||||
if ((try field.ty.onePossibleValue(mod)) != null) continue;
|
for (field_vals, s.fields.values()) |*field_val, field| {
|
||||||
return null;
|
if (field.is_comptime) {
|
||||||
|
field_val.* = try field.default_val.intern(field.ty, mod);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (try field.ty.onePossibleValue(mod)) |field_opv| {
|
||||||
|
field_val.* = try field_opv.intern(field.ty, mod);
|
||||||
|
} else return null;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
// In this case the struct has no runtime-known fields and
|
|
||||||
// therefore has one possible value.
|
|
||||||
|
|
||||||
// TODO: this is incorrect for structs with comptime fields, I think
|
// In this case the struct has no runtime-known fields and
|
||||||
// we should use a temporary allocator to construct an aggregate that
|
// therefore has one possible value.
|
||||||
// is populated with the comptime values and then intern that value here.
|
return (try mod.intern(.{ .aggregate = .{
|
||||||
// This TODO is repeated in the redundant implementation of
|
.ty = ty.toIntern(),
|
||||||
// one-possible-value logic in Sema.zig.
|
.storage = .{ .elems = field_vals },
|
||||||
const empty = try mod.intern(.{ .aggregate = .{
|
} })).toValue();
|
||||||
|
}
|
||||||
|
|
||||||
|
// In this case the struct has no fields at all and
|
||||||
|
// therefore has one possible value.
|
||||||
|
return (try mod.intern(.{ .aggregate = .{
|
||||||
.ty = ty.toIntern(),
|
.ty = ty.toIntern(),
|
||||||
.storage = .{ .elems = &.{} },
|
.storage = .{ .elems = &.{} },
|
||||||
} });
|
} })).toValue();
|
||||||
return empty.toValue();
|
|
||||||
},
|
},
|
||||||
|
|
||||||
.anon_struct_type => |tuple| {
|
.anon_struct_type => |tuple| {
|
||||||
|
|
|
||||||
200
src/value.zig
200
src/value.zig
|
|
@ -385,7 +385,7 @@ pub const Value = struct {
|
||||||
} });
|
} });
|
||||||
},
|
},
|
||||||
.aggregate => {
|
.aggregate => {
|
||||||
const old_elems = val.castTag(.aggregate).?.data;
|
const old_elems = val.castTag(.aggregate).?.data[0..ty.arrayLen(mod)];
|
||||||
const new_elems = try mod.gpa.alloc(InternPool.Index, old_elems.len);
|
const new_elems = try mod.gpa.alloc(InternPool.Index, old_elems.len);
|
||||||
defer mod.gpa.free(new_elems);
|
defer mod.gpa.free(new_elems);
|
||||||
const ty_key = mod.intern_pool.indexToKey(ty.toIntern());
|
const ty_key = mod.intern_pool.indexToKey(ty.toIntern());
|
||||||
|
|
@ -656,7 +656,7 @@ pub const Value = struct {
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn toBool(val: Value, _: *const Module) bool {
|
pub fn toBool(val: Value) bool {
|
||||||
return switch (val.toIntern()) {
|
return switch (val.toIntern()) {
|
||||||
.bool_true => true,
|
.bool_true => true,
|
||||||
.bool_false => false,
|
.bool_false => false,
|
||||||
|
|
@ -697,7 +697,7 @@ pub const Value = struct {
|
||||||
switch (ty.zigTypeTag(mod)) {
|
switch (ty.zigTypeTag(mod)) {
|
||||||
.Void => {},
|
.Void => {},
|
||||||
.Bool => {
|
.Bool => {
|
||||||
buffer[0] = @boolToInt(val.toBool(mod));
|
buffer[0] = @boolToInt(val.toBool());
|
||||||
},
|
},
|
||||||
.Int, .Enum => {
|
.Int, .Enum => {
|
||||||
const int_info = ty.intInfo(mod);
|
const int_info = ty.intInfo(mod);
|
||||||
|
|
@ -736,13 +736,20 @@ pub const Value = struct {
|
||||||
},
|
},
|
||||||
.Struct => switch (ty.containerLayout(mod)) {
|
.Struct => switch (ty.containerLayout(mod)) {
|
||||||
.Auto => return error.IllDefinedMemoryLayout,
|
.Auto => return error.IllDefinedMemoryLayout,
|
||||||
.Extern => {
|
.Extern => for (ty.structFields(mod).values(), 0..) |field, i| {
|
||||||
const fields = ty.structFields(mod).values();
|
const off = @intCast(usize, ty.structFieldOffset(i, mod));
|
||||||
const field_vals = val.castTag(.aggregate).?.data;
|
const field_val = switch (val.ip_index) {
|
||||||
for (fields, 0..) |field, i| {
|
.none => val.castTag(.aggregate).?.data[i],
|
||||||
const off = @intCast(usize, ty.structFieldOffset(i, mod));
|
else => switch (mod.intern_pool.indexToKey(val.toIntern()).aggregate.storage) {
|
||||||
try writeToMemory(field_vals[i], field.ty, mod, buffer[off..]);
|
.bytes => |bytes| {
|
||||||
}
|
buffer[off] = bytes[i];
|
||||||
|
continue;
|
||||||
|
},
|
||||||
|
.elems => |elems| elems[i],
|
||||||
|
.repeated_elem => |elem| elem,
|
||||||
|
}.toValue(),
|
||||||
|
};
|
||||||
|
try writeToMemory(field_val, field.ty, mod, buffer[off..]);
|
||||||
},
|
},
|
||||||
.Packed => {
|
.Packed => {
|
||||||
const byte_count = (@intCast(usize, ty.bitSize(mod)) + 7) / 8;
|
const byte_count = (@intCast(usize, ty.bitSize(mod)) + 7) / 8;
|
||||||
|
|
@ -812,7 +819,7 @@ pub const Value = struct {
|
||||||
.Little => bit_offset / 8,
|
.Little => bit_offset / 8,
|
||||||
.Big => buffer.len - bit_offset / 8 - 1,
|
.Big => buffer.len - bit_offset / 8 - 1,
|
||||||
};
|
};
|
||||||
if (val.toBool(mod)) {
|
if (val.toBool()) {
|
||||||
buffer[byte_index] |= (@as(u8, 1) << @intCast(u3, bit_offset % 8));
|
buffer[byte_index] |= (@as(u8, 1) << @intCast(u3, bit_offset % 8));
|
||||||
} else {
|
} else {
|
||||||
buffer[byte_index] &= ~(@as(u8, 1) << @intCast(u3, bit_offset % 8));
|
buffer[byte_index] &= ~(@as(u8, 1) << @intCast(u3, bit_offset % 8));
|
||||||
|
|
@ -1331,24 +1338,7 @@ pub const Value = struct {
|
||||||
.gt => {},
|
.gt => {},
|
||||||
}
|
}
|
||||||
|
|
||||||
const lhs_float = lhs.isFloat(mod);
|
if (lhs.isFloat(mod) or rhs.isFloat(mod)) {
|
||||||
const rhs_float = rhs.isFloat(mod);
|
|
||||||
if (lhs_float and rhs_float) {
|
|
||||||
const lhs_tag = lhs.tag();
|
|
||||||
const rhs_tag = rhs.tag();
|
|
||||||
if (lhs_tag == rhs_tag) {
|
|
||||||
const lhs_storage = mod.intern_pool.indexToKey(lhs.toIntern()).float.storage;
|
|
||||||
const rhs_storage = mod.intern_pool.indexToKey(rhs.toIntern()).float.storage;
|
|
||||||
const lhs128: f128 = switch (lhs_storage) {
|
|
||||||
inline else => |x| x,
|
|
||||||
};
|
|
||||||
const rhs128: f128 = switch (rhs_storage) {
|
|
||||||
inline else => |x| x,
|
|
||||||
};
|
|
||||||
return std.math.order(lhs128, rhs128);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (lhs_float or rhs_float) {
|
|
||||||
const lhs_f128 = lhs.toFloat(f128, mod);
|
const lhs_f128 = lhs.toFloat(f128, mod);
|
||||||
const rhs_f128 = rhs.toFloat(f128, mod);
|
const rhs_f128 = rhs.toFloat(f128, mod);
|
||||||
return std.math.order(lhs_f128, rhs_f128);
|
return std.math.order(lhs_f128, rhs_f128);
|
||||||
|
|
@ -1669,86 +1659,6 @@ pub const Value = struct {
|
||||||
return (try orderAdvanced(a, b, mod, opt_sema)).compare(.eq);
|
return (try orderAdvanced(a, b, mod, opt_sema)).compare(.eq);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// This function is used by hash maps and so treats floating-point NaNs as equal
|
|
||||||
/// to each other, and not equal to other floating-point values.
|
|
||||||
pub fn hash(val: Value, ty: Type, hasher: *std.hash.Wyhash, mod: *Module) void {
|
|
||||||
if (val.ip_index != .none) {
|
|
||||||
// The InternPool data structure hashes based on Key to make interned objects
|
|
||||||
// unique. An Index can be treated simply as u32 value for the
|
|
||||||
// purpose of Type/Value hashing and equality.
|
|
||||||
std.hash.autoHash(hasher, val.toIntern());
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
const zig_ty_tag = ty.zigTypeTag(mod);
|
|
||||||
std.hash.autoHash(hasher, zig_ty_tag);
|
|
||||||
if (val.isUndef(mod)) return;
|
|
||||||
// The value is runtime-known and shouldn't affect the hash.
|
|
||||||
if (val.isRuntimeValue(mod)) return;
|
|
||||||
|
|
||||||
switch (zig_ty_tag) {
|
|
||||||
.Opaque => unreachable, // Cannot hash opaque types
|
|
||||||
|
|
||||||
.Void,
|
|
||||||
.NoReturn,
|
|
||||||
.Undefined,
|
|
||||||
.Null,
|
|
||||||
=> {},
|
|
||||||
|
|
||||||
.Type,
|
|
||||||
.Float,
|
|
||||||
.ComptimeFloat,
|
|
||||||
.Bool,
|
|
||||||
.Int,
|
|
||||||
.ComptimeInt,
|
|
||||||
.Pointer,
|
|
||||||
.Optional,
|
|
||||||
.ErrorUnion,
|
|
||||||
.ErrorSet,
|
|
||||||
.Enum,
|
|
||||||
.EnumLiteral,
|
|
||||||
.Fn,
|
|
||||||
=> unreachable, // handled via ip_index check above
|
|
||||||
.Array, .Vector => {
|
|
||||||
const len = ty.arrayLen(mod);
|
|
||||||
const elem_ty = ty.childType(mod);
|
|
||||||
var index: usize = 0;
|
|
||||||
while (index < len) : (index += 1) {
|
|
||||||
const elem_val = val.elemValue(mod, index) catch |err| switch (err) {
|
|
||||||
// Will be solved when arrays and vectors get migrated to the intern pool.
|
|
||||||
error.OutOfMemory => @panic("OOM"),
|
|
||||||
};
|
|
||||||
elem_val.hash(elem_ty, hasher, mod);
|
|
||||||
}
|
|
||||||
},
|
|
||||||
.Struct => {
|
|
||||||
switch (val.tag()) {
|
|
||||||
.aggregate => {
|
|
||||||
const field_values = val.castTag(.aggregate).?.data;
|
|
||||||
for (field_values, 0..) |field_val, i| {
|
|
||||||
const field_ty = ty.structFieldType(i, mod);
|
|
||||||
field_val.hash(field_ty, hasher, mod);
|
|
||||||
}
|
|
||||||
},
|
|
||||||
else => unreachable,
|
|
||||||
}
|
|
||||||
},
|
|
||||||
.Union => {
|
|
||||||
const union_obj = val.cast(Payload.Union).?.data;
|
|
||||||
if (ty.unionTagType(mod)) |tag_ty| {
|
|
||||||
union_obj.tag.hash(tag_ty, hasher, mod);
|
|
||||||
}
|
|
||||||
const active_field_ty = ty.unionFieldType(union_obj.tag, mod);
|
|
||||||
union_obj.val.hash(active_field_ty, hasher, mod);
|
|
||||||
},
|
|
||||||
.Frame => {
|
|
||||||
@panic("TODO implement hashing frame values");
|
|
||||||
},
|
|
||||||
.AnyFrame => {
|
|
||||||
@panic("TODO implement hashing anyframe values");
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// This is a more conservative hash function that produces equal hashes for values
|
/// This is a more conservative hash function that produces equal hashes for values
|
||||||
/// that can coerce into each other.
|
/// that can coerce into each other.
|
||||||
/// This function is used by hash maps and so treats floating-point NaNs as equal
|
/// This function is used by hash maps and so treats floating-point NaNs as equal
|
||||||
|
|
@ -1820,35 +1730,6 @@ pub const Value = struct {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub const ArrayHashContext = struct {
|
|
||||||
ty: Type,
|
|
||||||
mod: *Module,
|
|
||||||
|
|
||||||
pub fn hash(self: @This(), val: Value) u32 {
|
|
||||||
const other_context: HashContext = .{ .ty = self.ty, .mod = self.mod };
|
|
||||||
return @truncate(u32, other_context.hash(val));
|
|
||||||
}
|
|
||||||
pub fn eql(self: @This(), a: Value, b: Value, b_index: usize) bool {
|
|
||||||
_ = b_index;
|
|
||||||
return a.eql(b, self.ty, self.mod);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
pub const HashContext = struct {
|
|
||||||
ty: Type,
|
|
||||||
mod: *Module,
|
|
||||||
|
|
||||||
pub fn hash(self: @This(), val: Value) u64 {
|
|
||||||
var hasher = std.hash.Wyhash.init(0);
|
|
||||||
val.hash(self.ty, &hasher, self.mod);
|
|
||||||
return hasher.final();
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn eql(self: @This(), a: Value, b: Value) bool {
|
|
||||||
return a.eql(b, self.ty, self.mod);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
pub fn isComptimeMutablePtr(val: Value, mod: *Module) bool {
|
pub fn isComptimeMutablePtr(val: Value, mod: *Module) bool {
|
||||||
return switch (mod.intern_pool.indexToKey(val.toIntern())) {
|
return switch (mod.intern_pool.indexToKey(val.toIntern())) {
|
||||||
.ptr => |ptr| switch (ptr.addr) {
|
.ptr => |ptr| switch (ptr.addr) {
|
||||||
|
|
@ -1919,14 +1800,25 @@ pub const Value = struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn sliceLen(val: Value, mod: *Module) u64 {
|
pub fn sliceLen(val: Value, mod: *Module) u64 {
|
||||||
return mod.intern_pool.sliceLen(val.toIntern()).toValue().toUnsignedInt(mod);
|
const ptr = mod.intern_pool.indexToKey(val.toIntern()).ptr;
|
||||||
|
return switch (ptr.len) {
|
||||||
|
.none => switch (mod.intern_pool.indexToKey(switch (ptr.addr) {
|
||||||
|
.decl => |decl| mod.declPtr(decl).ty.toIntern(),
|
||||||
|
.mut_decl => |mut_decl| mod.declPtr(mut_decl.decl).ty.toIntern(),
|
||||||
|
.comptime_field => |comptime_field| mod.intern_pool.typeOf(comptime_field),
|
||||||
|
else => unreachable,
|
||||||
|
})) {
|
||||||
|
.array_type => |array_type| array_type.len,
|
||||||
|
else => 1,
|
||||||
|
},
|
||||||
|
else => ptr.len.toValue().toUnsignedInt(mod),
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Asserts the value is a single-item pointer to an array, or an array,
|
/// Asserts the value is a single-item pointer to an array, or an array,
|
||||||
/// or an unknown-length pointer, and returns the element value at the index.
|
/// or an unknown-length pointer, and returns the element value at the index.
|
||||||
pub fn elemValue(val: Value, mod: *Module, index: usize) Allocator.Error!Value {
|
pub fn elemValue(val: Value, mod: *Module, index: usize) Allocator.Error!Value {
|
||||||
return switch (val.ip_index) {
|
return switch (val.ip_index) {
|
||||||
.undef => Value.undef,
|
|
||||||
.none => switch (val.tag()) {
|
.none => switch (val.tag()) {
|
||||||
.repeated => val.castTag(.repeated).?.data,
|
.repeated => val.castTag(.repeated).?.data,
|
||||||
.aggregate => val.castTag(.aggregate).?.data[index],
|
.aggregate => val.castTag(.aggregate).?.data[index],
|
||||||
|
|
@ -1934,6 +1826,9 @@ pub const Value = struct {
|
||||||
else => unreachable,
|
else => unreachable,
|
||||||
},
|
},
|
||||||
else => switch (mod.intern_pool.indexToKey(val.toIntern())) {
|
else => switch (mod.intern_pool.indexToKey(val.toIntern())) {
|
||||||
|
.undef => |ty| (try mod.intern(.{
|
||||||
|
.undef = ty.toType().elemType2(mod).toIntern(),
|
||||||
|
})).toValue(),
|
||||||
.ptr => |ptr| switch (ptr.addr) {
|
.ptr => |ptr| switch (ptr.addr) {
|
||||||
.decl => |decl| mod.declPtr(decl).val.elemValue(mod, index),
|
.decl => |decl| mod.declPtr(decl).val.elemValue(mod, index),
|
||||||
.mut_decl => |mut_decl| mod.declPtr(mut_decl.decl).val.elemValue(mod, index),
|
.mut_decl => |mut_decl| mod.declPtr(mut_decl.decl).val.elemValue(mod, index),
|
||||||
|
|
@ -2492,7 +2387,7 @@ pub const Value = struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
return OverflowArithmeticResult{
|
return OverflowArithmeticResult{
|
||||||
.overflow_bit = boolToInt(overflowed),
|
.overflow_bit = try mod.intValue(Type.u1, @boolToInt(overflowed)),
|
||||||
.wrapped_result = try mod.intValue_big(ty, result_bigint.toConst()),
|
.wrapped_result = try mod.intValue_big(ty, result_bigint.toConst()),
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
@ -2645,7 +2540,8 @@ pub const Value = struct {
|
||||||
|
|
||||||
/// operands must be integers; handles undefined.
|
/// operands must be integers; handles undefined.
|
||||||
pub fn bitwiseNotScalar(val: Value, ty: Type, arena: Allocator, mod: *Module) !Value {
|
pub fn bitwiseNotScalar(val: Value, ty: Type, arena: Allocator, mod: *Module) !Value {
|
||||||
if (val.isUndef(mod)) return Value.undef;
|
if (val.isUndef(mod)) return (try mod.intern(.{ .undef = ty.toIntern() })).toValue();
|
||||||
|
if (ty.toIntern() == .bool_type) return makeBool(!val.toBool());
|
||||||
|
|
||||||
const info = ty.intInfo(mod);
|
const info = ty.intInfo(mod);
|
||||||
|
|
||||||
|
|
@ -2687,7 +2583,8 @@ pub const Value = struct {
|
||||||
|
|
||||||
/// operands must be integers; handles undefined.
|
/// operands must be integers; handles undefined.
|
||||||
pub fn bitwiseAndScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value {
|
pub fn bitwiseAndScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value {
|
||||||
if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.undef;
|
if (lhs.isUndef(mod) or rhs.isUndef(mod)) return (try mod.intern(.{ .undef = ty.toIntern() })).toValue();
|
||||||
|
if (ty.toIntern() == .bool_type) return makeBool(lhs.toBool() and rhs.toBool());
|
||||||
|
|
||||||
// TODO is this a performance issue? maybe we should try the operation without
|
// TODO is this a performance issue? maybe we should try the operation without
|
||||||
// resorting to BigInt first.
|
// resorting to BigInt first.
|
||||||
|
|
@ -2725,7 +2622,8 @@ pub const Value = struct {
|
||||||
|
|
||||||
/// operands must be integers; handles undefined.
|
/// operands must be integers; handles undefined.
|
||||||
pub fn bitwiseNandScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value {
|
pub fn bitwiseNandScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value {
|
||||||
if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.undef;
|
if (lhs.isUndef(mod) or rhs.isUndef(mod)) return (try mod.intern(.{ .undef = ty.toIntern() })).toValue();
|
||||||
|
if (ty.toIntern() == .bool_type) return makeBool(!(lhs.toBool() and rhs.toBool()));
|
||||||
|
|
||||||
const anded = try bitwiseAnd(lhs, rhs, ty, arena, mod);
|
const anded = try bitwiseAnd(lhs, rhs, ty, arena, mod);
|
||||||
const all_ones = if (ty.isSignedInt(mod)) try mod.intValue(ty, -1) else try ty.maxIntScalar(mod, ty);
|
const all_ones = if (ty.isSignedInt(mod)) try mod.intValue(ty, -1) else try ty.maxIntScalar(mod, ty);
|
||||||
|
|
@ -2752,7 +2650,8 @@ pub const Value = struct {
|
||||||
|
|
||||||
/// operands must be integers; handles undefined.
|
/// operands must be integers; handles undefined.
|
||||||
pub fn bitwiseOrScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value {
|
pub fn bitwiseOrScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value {
|
||||||
if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.undef;
|
if (lhs.isUndef(mod) or rhs.isUndef(mod)) return (try mod.intern(.{ .undef = ty.toIntern() })).toValue();
|
||||||
|
if (ty.toIntern() == .bool_type) return makeBool(lhs.toBool() or rhs.toBool());
|
||||||
|
|
||||||
// TODO is this a performance issue? maybe we should try the operation without
|
// TODO is this a performance issue? maybe we should try the operation without
|
||||||
// resorting to BigInt first.
|
// resorting to BigInt first.
|
||||||
|
|
@ -2789,7 +2688,8 @@ pub const Value = struct {
|
||||||
|
|
||||||
/// operands must be integers; handles undefined.
|
/// operands must be integers; handles undefined.
|
||||||
pub fn bitwiseXorScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value {
|
pub fn bitwiseXorScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value {
|
||||||
if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.undef;
|
if (lhs.isUndef(mod) or rhs.isUndef(mod)) return (try mod.intern(.{ .undef = ty.toIntern() })).toValue();
|
||||||
|
if (ty.toIntern() == .bool_type) return makeBool(lhs.toBool() != rhs.toBool());
|
||||||
|
|
||||||
// TODO is this a performance issue? maybe we should try the operation without
|
// TODO is this a performance issue? maybe we should try the operation without
|
||||||
// resorting to BigInt first.
|
// resorting to BigInt first.
|
||||||
|
|
@ -3233,7 +3133,7 @@ pub const Value = struct {
|
||||||
result_bigint.truncate(result_bigint.toConst(), info.signedness, info.bits);
|
result_bigint.truncate(result_bigint.toConst(), info.signedness, info.bits);
|
||||||
}
|
}
|
||||||
return OverflowArithmeticResult{
|
return OverflowArithmeticResult{
|
||||||
.overflow_bit = boolToInt(overflowed),
|
.overflow_bit = try mod.intValue(Type.u1, @boolToInt(overflowed)),
|
||||||
.wrapped_result = try mod.intValue_big(ty, result_bigint.toConst()),
|
.wrapped_result = try mod.intValue_big(ty, result_bigint.toConst()),
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
@ -4267,12 +4167,6 @@ pub const Value = struct {
|
||||||
return if (x) Value.true else Value.false;
|
return if (x) Value.true else Value.false;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn boolToInt(x: bool) Value {
|
|
||||||
const zero: Value = .{ .ip_index = .zero, .legacy = undefined };
|
|
||||||
const one: Value = .{ .ip_index = .one, .legacy = undefined };
|
|
||||||
return if (x) one else zero;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub const RuntimeIndex = InternPool.RuntimeIndex;
|
pub const RuntimeIndex = InternPool.RuntimeIndex;
|
||||||
|
|
||||||
/// This function is used in the debugger pretty formatters in tools/ to fetch the
|
/// This function is used in the debugger pretty formatters in tools/ to fetch the
|
||||||
|
|
|
||||||
|
|
@ -354,8 +354,8 @@ def Zir_Inst__Zir_Inst_Ref_SummaryProvider(value, _=None):
|
||||||
|
|
||||||
def Air_Inst__Air_Inst_Ref_SummaryProvider(value, _=None):
|
def Air_Inst__Air_Inst_Ref_SummaryProvider(value, _=None):
|
||||||
members = value.type.enum_members
|
members = value.type.enum_members
|
||||||
# ignore .none
|
# ignore .var_args_param_type and .none
|
||||||
return value if any(value.unsigned == member.unsigned for member in members) else 'instructions[%d]' % (value.unsigned + 1 - len(members))
|
return value if any(value.unsigned == member.unsigned for member in members) else 'instructions[%d]' % (value.unsigned + 2 - len(members))
|
||||||
|
|
||||||
class Module_Decl__Module_Decl_Index_SynthProvider:
|
class Module_Decl__Module_Decl_Index_SynthProvider:
|
||||||
def __init__(self, value, _=None): self.value = value
|
def __init__(self, value, _=None): self.value = value
|
||||||
|
|
@ -365,7 +365,7 @@ class Module_Decl__Module_Decl_Index_SynthProvider:
|
||||||
mod = frame.FindVariable('mod') or frame.FindVariable('module')
|
mod = frame.FindVariable('mod') or frame.FindVariable('module')
|
||||||
if mod: break
|
if mod: break
|
||||||
else: return
|
else: return
|
||||||
self.ptr = mod.GetChildMemberWithName('allocated_decls').GetChildAtIndex(self.value.unsigned).Clone('decl')
|
self.ptr = mod.GetChildMemberWithName('allocated_decls').GetChildAtIndex(self.value.unsigned).address_of.Clone('decl')
|
||||||
except: pass
|
except: pass
|
||||||
def has_children(self): return True
|
def has_children(self): return True
|
||||||
def num_children(self): return 1
|
def num_children(self): return 1
|
||||||
|
|
|
||||||
Loading…
Add table
Reference in a new issue