mirror of
https://codeberg.org/ziglang/zig.git
synced 2025-12-06 05:44:20 +00:00
Merge pull request #19562 from Snektron/spirv-remove-cache
spirv: remove cache
This commit is contained in:
commit
f45ba7d0c1
16 changed files with 1103 additions and 2092 deletions
File diff suppressed because it is too large
Load diff
|
|
@ -9,10 +9,9 @@ const Opcode = spec.Opcode;
|
|||
const Word = spec.Word;
|
||||
const IdRef = spec.IdRef;
|
||||
const IdResult = spec.IdResult;
|
||||
const StorageClass = spec.StorageClass;
|
||||
|
||||
const SpvModule = @import("Module.zig");
|
||||
const CacheRef = SpvModule.CacheRef;
|
||||
const CacheKey = SpvModule.CacheKey;
|
||||
|
||||
/// Represents a token in the assembly template.
|
||||
const Token = struct {
|
||||
|
|
@ -127,16 +126,16 @@ const AsmValue = union(enum) {
|
|||
value: IdRef,
|
||||
|
||||
/// This result-value represents a type registered into the module's type system.
|
||||
ty: CacheRef,
|
||||
ty: IdRef,
|
||||
|
||||
/// Retrieve the result-id of this AsmValue. Asserts that this AsmValue
|
||||
/// is of a variant that allows the result to be obtained (not an unresolved
|
||||
/// forward declaration, not in the process of being declared, etc).
|
||||
pub fn resultId(self: AsmValue, spv: *const SpvModule) IdRef {
|
||||
pub fn resultId(self: AsmValue) IdRef {
|
||||
return switch (self) {
|
||||
.just_declared, .unresolved_forward_reference => unreachable,
|
||||
.value => |result| result,
|
||||
.ty => |ref| spv.resultId(ref),
|
||||
.ty => |result| result,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
|
@ -292,9 +291,10 @@ fn processInstruction(self: *Assembler) !void {
|
|||
/// refers to the result.
|
||||
fn processTypeInstruction(self: *Assembler) !AsmValue {
|
||||
const operands = self.inst.operands.items;
|
||||
const ref = switch (self.inst.opcode) {
|
||||
.OpTypeVoid => try self.spv.resolve(.void_type),
|
||||
.OpTypeBool => try self.spv.resolve(.bool_type),
|
||||
const section = &self.spv.sections.types_globals_constants;
|
||||
const id = switch (self.inst.opcode) {
|
||||
.OpTypeVoid => try self.spv.voidType(),
|
||||
.OpTypeBool => try self.spv.boolType(),
|
||||
.OpTypeInt => blk: {
|
||||
const signedness: std.builtin.Signedness = switch (operands[2].literal32) {
|
||||
0 => .unsigned,
|
||||
|
|
@ -317,43 +317,49 @@ fn processTypeInstruction(self: *Assembler) !AsmValue {
|
|||
return self.fail(0, "{} is not a valid bit count for floats (expected 16, 32 or 64)", .{bits});
|
||||
},
|
||||
}
|
||||
break :blk try self.spv.resolve(.{ .float_type = .{ .bits = @intCast(bits) } });
|
||||
break :blk try self.spv.floatType(@intCast(bits));
|
||||
},
|
||||
.OpTypeVector => blk: {
|
||||
const child_type = try self.resolveRefId(operands[1].ref_id);
|
||||
break :blk try self.spv.vectorType(operands[2].literal32, child_type);
|
||||
},
|
||||
.OpTypeVector => try self.spv.resolve(.{ .vector_type = .{
|
||||
.component_type = try self.resolveTypeRef(operands[1].ref_id),
|
||||
.component_count = operands[2].literal32,
|
||||
} }),
|
||||
.OpTypeArray => {
|
||||
// TODO: The length of an OpTypeArray is determined by a constant (which may be a spec constant),
|
||||
// and so some consideration must be taken when entering this in the type system.
|
||||
return self.todo("process OpTypeArray", .{});
|
||||
},
|
||||
.OpTypePointer => blk: {
|
||||
break :blk try self.spv.resolve(.{
|
||||
.ptr_type = .{
|
||||
.storage_class = @enumFromInt(operands[1].value),
|
||||
.child_type = try self.resolveTypeRef(operands[2].ref_id),
|
||||
// TODO: This should be a proper reference resolved via OpTypeForwardPointer
|
||||
.fwd = @enumFromInt(std.math.maxInt(u32)),
|
||||
},
|
||||
const storage_class: StorageClass = @enumFromInt(operands[1].value);
|
||||
const child_type = try self.resolveRefId(operands[2].ref_id);
|
||||
const result_id = self.spv.allocId();
|
||||
try section.emit(self.spv.gpa, .OpTypePointer, .{
|
||||
.id_result = result_id,
|
||||
.storage_class = storage_class,
|
||||
.type = child_type,
|
||||
});
|
||||
break :blk result_id;
|
||||
},
|
||||
.OpTypeFunction => blk: {
|
||||
const param_operands = operands[2..];
|
||||
const param_types = try self.spv.gpa.alloc(CacheRef, param_operands.len);
|
||||
const return_type = try self.resolveRefId(operands[1].ref_id);
|
||||
|
||||
const param_types = try self.spv.gpa.alloc(IdRef, param_operands.len);
|
||||
defer self.spv.gpa.free(param_types);
|
||||
for (param_types, 0..) |*param, i| {
|
||||
param.* = try self.resolveTypeRef(param_operands[i].ref_id);
|
||||
for (param_types, param_operands) |*param, operand| {
|
||||
param.* = try self.resolveRefId(operand.ref_id);
|
||||
}
|
||||
break :blk try self.spv.resolve(.{ .function_type = .{
|
||||
.return_type = try self.resolveTypeRef(operands[1].ref_id),
|
||||
.parameters = param_types,
|
||||
} });
|
||||
const result_id = self.spv.allocId();
|
||||
try section.emit(self.spv.gpa, .OpTypeFunction, .{
|
||||
.id_result = result_id,
|
||||
.return_type = return_type,
|
||||
.id_ref_2 = param_types,
|
||||
});
|
||||
break :blk result_id;
|
||||
},
|
||||
else => return self.todo("process type instruction {s}", .{@tagName(self.inst.opcode)}),
|
||||
};
|
||||
|
||||
return AsmValue{ .ty = ref };
|
||||
return AsmValue{ .ty = id };
|
||||
}
|
||||
|
||||
/// Emit `self.inst` into `self.spv` and `self.func`, and return the AsmValue
|
||||
|
|
@ -410,7 +416,7 @@ fn processGenericInstruction(self: *Assembler) !?AsmValue {
|
|||
.ref_id => |index| {
|
||||
const result = try self.resolveRef(index);
|
||||
try section.ensureUnusedCapacity(self.spv.gpa, 1);
|
||||
section.writeOperand(spec.IdRef, result.resultId(self.spv));
|
||||
section.writeOperand(spec.IdRef, result.resultId());
|
||||
},
|
||||
.string => |offset| {
|
||||
const text = std.mem.sliceTo(self.inst.string_bytes.items[offset..], 0);
|
||||
|
|
@ -459,18 +465,9 @@ fn resolveRef(self: *Assembler, ref: AsmValue.Ref) !AsmValue {
|
|||
}
|
||||
}
|
||||
|
||||
/// Resolve a value reference as type.
|
||||
fn resolveTypeRef(self: *Assembler, ref: AsmValue.Ref) !CacheRef {
|
||||
fn resolveRefId(self: *Assembler, ref: AsmValue.Ref) !IdRef {
|
||||
const value = try self.resolveRef(ref);
|
||||
switch (value) {
|
||||
.just_declared, .unresolved_forward_reference => unreachable,
|
||||
.ty => |ty_ref| return ty_ref,
|
||||
else => {
|
||||
const name = self.value_map.keys()[ref];
|
||||
// TODO: Improve source location.
|
||||
return self.fail(0, "expected operand %{s} to refer to a type", .{name});
|
||||
},
|
||||
}
|
||||
return value.resultId();
|
||||
}
|
||||
|
||||
/// Attempt to parse an instruction into `self.inst`.
|
||||
|
|
@ -709,22 +706,41 @@ fn parseContextDependentNumber(self: *Assembler) !void {
|
|||
assert(self.inst.opcode == .OpConstant or self.inst.opcode == .OpSpecConstant);
|
||||
|
||||
const tok = self.currentToken();
|
||||
const result_type_ref = try self.resolveTypeRef(self.inst.operands.items[0].ref_id);
|
||||
const result_type = self.spv.cache.lookup(result_type_ref);
|
||||
switch (result_type) {
|
||||
.int_type => |int| {
|
||||
try self.parseContextDependentInt(int.signedness, int.bits);
|
||||
},
|
||||
.float_type => |float| {
|
||||
switch (float.bits) {
|
||||
const result = try self.resolveRef(self.inst.operands.items[0].ref_id);
|
||||
const result_id = result.resultId();
|
||||
// We are going to cheat a little bit: The types we are interested in, int and float,
|
||||
// are added to the module and cached via self.spv.intType and self.spv.floatType. Therefore,
|
||||
// we can determine the width of these types by directly checking the cache.
|
||||
// This only works if the Assembler and codegen both use spv.intType and spv.floatType though.
|
||||
// We don't expect there to be many of these types, so just look it up every time.
|
||||
// TODO: Count be improved to be a little bit more efficent.
|
||||
|
||||
{
|
||||
var it = self.spv.cache.int_types.iterator();
|
||||
while (it.next()) |entry| {
|
||||
const id = entry.value_ptr.*;
|
||||
if (id != result_id) continue;
|
||||
const info = entry.key_ptr.*;
|
||||
return try self.parseContextDependentInt(info.signedness, info.bits);
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
var it = self.spv.cache.float_types.iterator();
|
||||
while (it.next()) |entry| {
|
||||
const id = entry.value_ptr.*;
|
||||
if (id != result_id) continue;
|
||||
const info = entry.key_ptr.*;
|
||||
switch (info.bits) {
|
||||
16 => try self.parseContextDependentFloat(16),
|
||||
32 => try self.parseContextDependentFloat(32),
|
||||
64 => try self.parseContextDependentFloat(64),
|
||||
else => return self.fail(tok.start, "cannot parse {}-bit float literal", .{float.bits}),
|
||||
else => return self.fail(tok.start, "cannot parse {}-bit info literal", .{info.bits}),
|
||||
}
|
||||
},
|
||||
else => return self.fail(tok.start, "cannot parse literal constant", .{}),
|
||||
}
|
||||
}
|
||||
|
||||
return self.fail(tok.start, "cannot parse literal constant", .{});
|
||||
}
|
||||
|
||||
fn parseContextDependentInt(self: *Assembler, signedness: std.builtin.Signedness, width: u32) !void {
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
|
|
@ -20,11 +20,6 @@ const IdResultType = spec.IdResultType;
|
|||
|
||||
const Section = @import("Section.zig");
|
||||
|
||||
const Cache = @import("Cache.zig");
|
||||
pub const CacheKey = Cache.Key;
|
||||
pub const CacheRef = Cache.Ref;
|
||||
pub const CacheString = Cache.String;
|
||||
|
||||
/// This structure represents a function that isc in-progress of being emitted.
|
||||
/// Commonly, the contents of this structure will be merged with the appropriate
|
||||
/// sections of the module and re-used. Note that the SPIR-V module system makes
|
||||
|
|
@ -98,7 +93,7 @@ pub const EntryPoint = struct {
|
|||
/// The declaration that should be exported.
|
||||
decl_index: Decl.Index,
|
||||
/// The name of the kernel to be exported.
|
||||
name: CacheString,
|
||||
name: []const u8,
|
||||
/// Calling Convention
|
||||
execution_model: spec.ExecutionModel,
|
||||
};
|
||||
|
|
@ -106,6 +101,9 @@ pub const EntryPoint = struct {
|
|||
/// A general-purpose allocator which may be used to allocate resources for this module
|
||||
gpa: Allocator,
|
||||
|
||||
/// Arena for things that need to live for the length of this program.
|
||||
arena: std.heap.ArenaAllocator,
|
||||
|
||||
/// Module layout, according to SPIR-V Spec section 2.4, "Logical Layout of a Module".
|
||||
sections: struct {
|
||||
/// Capability instructions
|
||||
|
|
@ -143,14 +141,21 @@ sections: struct {
|
|||
/// SPIR-V instructions return result-ids. This variable holds the module-wide counter for these.
|
||||
next_result_id: Word,
|
||||
|
||||
/// Cache for results of OpString instructions for module file names fed to OpSource.
|
||||
/// Since OpString is pretty much only used for those, we don't need to keep track of all strings,
|
||||
/// just the ones for OpLine. Note that OpLine needs the result of OpString, and not that of OpSource.
|
||||
source_file_names: std.AutoArrayHashMapUnmanaged(CacheString, IdRef) = .{},
|
||||
/// Cache for results of OpString instructions.
|
||||
strings: std.StringArrayHashMapUnmanaged(IdRef) = .{},
|
||||
|
||||
/// SPIR-V type- and constant cache. This structure is used to store information about these in a more
|
||||
/// efficient manner.
|
||||
cache: Cache = .{},
|
||||
/// Some types shouldn't be emitted more than one time, but cannot be caught by
|
||||
/// the `intern_map` during codegen. Sometimes, IDs are compared to check if
|
||||
/// types are the same, so we can't delay until the dedup pass. Therefore,
|
||||
/// this is an ad-hoc structure to cache types where required.
|
||||
/// According to the SPIR-V specification, section 2.8, this includes all non-aggregate
|
||||
/// non-pointer types.
|
||||
cache: struct {
|
||||
bool_type: ?IdRef = null,
|
||||
void_type: ?IdRef = null,
|
||||
int_types: std.AutoHashMapUnmanaged(std.builtin.Type.Int, IdRef) = .{},
|
||||
float_types: std.AutoHashMapUnmanaged(std.builtin.Type.Float, IdRef) = .{},
|
||||
} = .{},
|
||||
|
||||
/// Set of Decls, referred to by Decl.Index.
|
||||
decls: std.ArrayListUnmanaged(Decl) = .{},
|
||||
|
|
@ -168,6 +173,7 @@ extended_instruction_set: std.AutoHashMapUnmanaged(spec.InstructionSet, IdRef) =
|
|||
pub fn init(gpa: Allocator) Module {
|
||||
return .{
|
||||
.gpa = gpa,
|
||||
.arena = std.heap.ArenaAllocator.init(gpa),
|
||||
.next_result_id = 1, // 0 is an invalid SPIR-V result id, so start counting at 1.
|
||||
};
|
||||
}
|
||||
|
|
@ -184,8 +190,10 @@ pub fn deinit(self: *Module) void {
|
|||
self.sections.types_globals_constants.deinit(self.gpa);
|
||||
self.sections.functions.deinit(self.gpa);
|
||||
|
||||
self.source_file_names.deinit(self.gpa);
|
||||
self.cache.deinit(self);
|
||||
self.strings.deinit(self.gpa);
|
||||
|
||||
self.cache.int_types.deinit(self.gpa);
|
||||
self.cache.float_types.deinit(self.gpa);
|
||||
|
||||
self.decls.deinit(self.gpa);
|
||||
self.decl_deps.deinit(self.gpa);
|
||||
|
|
@ -193,40 +201,37 @@ pub fn deinit(self: *Module) void {
|
|||
self.entry_points.deinit(self.gpa);
|
||||
|
||||
self.extended_instruction_set.deinit(self.gpa);
|
||||
self.arena.deinit();
|
||||
|
||||
self.* = undefined;
|
||||
}
|
||||
|
||||
pub fn allocId(self: *Module) spec.IdResult {
|
||||
defer self.next_result_id += 1;
|
||||
return @enumFromInt(self.next_result_id);
|
||||
pub const IdRange = struct {
|
||||
base: u32,
|
||||
len: u32,
|
||||
|
||||
pub fn at(range: IdRange, i: usize) IdResult {
|
||||
assert(i < range.len);
|
||||
return @enumFromInt(range.base + i);
|
||||
}
|
||||
};
|
||||
|
||||
pub fn allocIds(self: *Module, n: u32) IdRange {
|
||||
defer self.next_result_id += n;
|
||||
return .{
|
||||
.base = self.next_result_id,
|
||||
.len = n,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn allocIds(self: *Module, n: u32) spec.IdResult {
|
||||
defer self.next_result_id += n;
|
||||
return @enumFromInt(self.next_result_id);
|
||||
pub fn allocId(self: *Module) IdResult {
|
||||
return self.allocIds(1).at(0);
|
||||
}
|
||||
|
||||
pub fn idBound(self: Module) Word {
|
||||
return self.next_result_id;
|
||||
}
|
||||
|
||||
pub fn resolve(self: *Module, key: CacheKey) !CacheRef {
|
||||
return self.cache.resolve(self, key);
|
||||
}
|
||||
|
||||
pub fn resultId(self: *const Module, ref: CacheRef) IdResult {
|
||||
return self.cache.resultId(ref);
|
||||
}
|
||||
|
||||
pub fn resolveId(self: *Module, key: CacheKey) !IdResult {
|
||||
return self.resultId(try self.resolve(key));
|
||||
}
|
||||
|
||||
pub fn resolveString(self: *Module, str: []const u8) !CacheString {
|
||||
return try self.cache.addString(self, str);
|
||||
}
|
||||
|
||||
fn addEntryPointDeps(
|
||||
self: *Module,
|
||||
decl_index: Decl.Index,
|
||||
|
|
@ -271,7 +276,7 @@ fn entryPoints(self: *Module) !Section {
|
|||
try entry_points.emit(self.gpa, .OpEntryPoint, .{
|
||||
.execution_model = entry_point.execution_model,
|
||||
.entry_point = entry_point_id,
|
||||
.name = self.cache.getString(entry_point.name).?,
|
||||
.name = entry_point.name,
|
||||
.interface = interface.items,
|
||||
});
|
||||
}
|
||||
|
|
@ -286,9 +291,6 @@ pub fn finalize(self: *Module, a: Allocator, target: std.Target) ![]Word {
|
|||
var entry_points = try self.entryPoints();
|
||||
defer entry_points.deinit(self.gpa);
|
||||
|
||||
var types_constants = try self.cache.materialize(self);
|
||||
defer types_constants.deinit(self.gpa);
|
||||
|
||||
const header = [_]Word{
|
||||
spec.magic_number,
|
||||
// TODO: From cpu features
|
||||
|
|
@ -331,7 +333,6 @@ pub fn finalize(self: *Module, a: Allocator, target: std.Target) ![]Word {
|
|||
self.sections.debug_strings.toWords(),
|
||||
self.sections.debug_names.toWords(),
|
||||
self.sections.annotations.toWords(),
|
||||
types_constants.toWords(),
|
||||
self.sections.types_globals_constants.toWords(),
|
||||
self.sections.functions.toWords(),
|
||||
};
|
||||
|
|
@ -376,83 +377,126 @@ pub fn importInstructionSet(self: *Module, set: spec.InstructionSet) !IdRef {
|
|||
return result_id;
|
||||
}
|
||||
|
||||
/// Fetch the result-id of an OpString instruction that encodes the path of the source
|
||||
/// file of the decl. This function may also emit an OpSource with source-level information regarding
|
||||
/// the decl.
|
||||
pub fn resolveSourceFileName(self: *Module, path: []const u8) !IdRef {
|
||||
const path_ref = try self.resolveString(path);
|
||||
const result = try self.source_file_names.getOrPut(self.gpa, path_ref);
|
||||
if (!result.found_existing) {
|
||||
const file_result_id = self.allocId();
|
||||
result.value_ptr.* = file_result_id;
|
||||
try self.sections.debug_strings.emit(self.gpa, .OpString, .{
|
||||
.id_result = file_result_id,
|
||||
.string = path,
|
||||
});
|
||||
/// Fetch the result-id of an instruction corresponding to a string.
|
||||
pub fn resolveString(self: *Module, string: []const u8) !IdRef {
|
||||
if (self.strings.get(string)) |id| {
|
||||
return id;
|
||||
}
|
||||
|
||||
return result.value_ptr.*;
|
||||
const id = self.allocId();
|
||||
try self.strings.put(self.gpa, try self.arena.allocator().dupe(u8, string), id);
|
||||
|
||||
try self.sections.debug_strings.emit(self.gpa, .OpString, .{
|
||||
.id_result = id,
|
||||
.string = string,
|
||||
});
|
||||
|
||||
return id;
|
||||
}
|
||||
|
||||
pub fn intType(self: *Module, signedness: std.builtin.Signedness, bits: u16) !CacheRef {
|
||||
return try self.resolve(.{ .int_type = .{
|
||||
.signedness = signedness,
|
||||
.bits = bits,
|
||||
} });
|
||||
}
|
||||
|
||||
pub fn vectorType(self: *Module, len: u32, elem_ty_ref: CacheRef) !CacheRef {
|
||||
return try self.resolve(.{ .vector_type = .{
|
||||
.component_type = elem_ty_ref,
|
||||
.component_count = len,
|
||||
} });
|
||||
}
|
||||
|
||||
pub fn arrayType(self: *Module, len: u32, elem_ty_ref: CacheRef) !CacheRef {
|
||||
const len_ty_ref = try self.resolve(.{ .int_type = .{
|
||||
.signedness = .unsigned,
|
||||
.bits = 32,
|
||||
} });
|
||||
const len_ref = try self.resolve(.{ .int = .{
|
||||
.ty = len_ty_ref,
|
||||
.value = .{ .uint64 = len },
|
||||
} });
|
||||
return try self.resolve(.{ .array_type = .{
|
||||
.element_type = elem_ty_ref,
|
||||
.length = len_ref,
|
||||
} });
|
||||
}
|
||||
|
||||
pub fn constInt(self: *Module, ty_ref: CacheRef, value: anytype) !IdRef {
|
||||
const ty = self.cache.lookup(ty_ref).int_type;
|
||||
const Value = Cache.Key.Int.Value;
|
||||
return try self.resolveId(.{ .int = .{
|
||||
.ty = ty_ref,
|
||||
.value = switch (ty.signedness) {
|
||||
.signed => Value{ .int64 = @intCast(value) },
|
||||
.unsigned => Value{ .uint64 = @intCast(value) },
|
||||
},
|
||||
} });
|
||||
}
|
||||
|
||||
pub fn constUndef(self: *Module, ty_ref: CacheRef) !IdRef {
|
||||
return try self.resolveId(.{ .undef = .{ .ty = ty_ref } });
|
||||
}
|
||||
|
||||
pub fn constNull(self: *Module, ty_ref: CacheRef) !IdRef {
|
||||
return try self.resolveId(.{ .null = .{ .ty = ty_ref } });
|
||||
}
|
||||
|
||||
pub fn constBool(self: *Module, ty_ref: CacheRef, value: bool) !IdRef {
|
||||
return try self.resolveId(.{ .bool = .{ .ty = ty_ref, .value = value } });
|
||||
}
|
||||
|
||||
pub fn constComposite(self: *Module, ty_ref: CacheRef, members: []const IdRef) !IdRef {
|
||||
pub fn structType(self: *Module, types: []const IdRef, maybe_names: ?[]const []const u8) !IdRef {
|
||||
const result_id = self.allocId();
|
||||
try self.sections.types_globals_constants.emit(self.gpa, .OpSpecConstantComposite, .{
|
||||
.id_result_type = self.resultId(ty_ref),
|
||||
|
||||
try self.sections.types_globals_constants.emit(self.gpa, .OpTypeStruct, .{
|
||||
.id_result = result_id,
|
||||
.id_ref = types,
|
||||
});
|
||||
|
||||
if (maybe_names) |names| {
|
||||
assert(names.len == types.len);
|
||||
for (names, 0..) |name, i| {
|
||||
try self.memberDebugName(result_id, @intCast(i), name);
|
||||
}
|
||||
}
|
||||
|
||||
return result_id;
|
||||
}
|
||||
|
||||
pub fn boolType(self: *Module) !IdRef {
|
||||
if (self.cache.bool_type) |id| return id;
|
||||
|
||||
const result_id = self.allocId();
|
||||
try self.sections.types_globals_constants.emit(self.gpa, .OpTypeBool, .{
|
||||
.id_result = result_id,
|
||||
});
|
||||
self.cache.bool_type = result_id;
|
||||
return result_id;
|
||||
}
|
||||
|
||||
pub fn voidType(self: *Module) !IdRef {
|
||||
if (self.cache.void_type) |id| return id;
|
||||
|
||||
const result_id = self.allocId();
|
||||
try self.sections.types_globals_constants.emit(self.gpa, .OpTypeVoid, .{
|
||||
.id_result = result_id,
|
||||
});
|
||||
self.cache.void_type = result_id;
|
||||
try self.debugName(result_id, "void");
|
||||
return result_id;
|
||||
}
|
||||
|
||||
pub fn intType(self: *Module, signedness: std.builtin.Signedness, bits: u16) !IdRef {
|
||||
assert(bits > 0);
|
||||
const entry = try self.cache.int_types.getOrPut(self.gpa, .{ .signedness = signedness, .bits = bits });
|
||||
if (!entry.found_existing) {
|
||||
const result_id = self.allocId();
|
||||
entry.value_ptr.* = result_id;
|
||||
try self.sections.types_globals_constants.emit(self.gpa, .OpTypeInt, .{
|
||||
.id_result = result_id,
|
||||
.width = bits,
|
||||
.signedness = switch (signedness) {
|
||||
.signed => 1,
|
||||
.unsigned => 0,
|
||||
},
|
||||
});
|
||||
|
||||
switch (signedness) {
|
||||
.signed => try self.debugNameFmt(result_id, "i{}", .{bits}),
|
||||
.unsigned => try self.debugNameFmt(result_id, "u{}", .{bits}),
|
||||
}
|
||||
}
|
||||
return entry.value_ptr.*;
|
||||
}
|
||||
|
||||
pub fn floatType(self: *Module, bits: u16) !IdRef {
|
||||
assert(bits > 0);
|
||||
const entry = try self.cache.float_types.getOrPut(self.gpa, .{ .bits = bits });
|
||||
if (!entry.found_existing) {
|
||||
const result_id = self.allocId();
|
||||
entry.value_ptr.* = result_id;
|
||||
try self.sections.types_globals_constants.emit(self.gpa, .OpTypeFloat, .{
|
||||
.id_result = result_id,
|
||||
.width = bits,
|
||||
});
|
||||
try self.debugNameFmt(result_id, "f{}", .{bits});
|
||||
}
|
||||
return entry.value_ptr.*;
|
||||
}
|
||||
|
||||
pub fn vectorType(self: *Module, len: u32, child_id: IdRef) !IdRef {
|
||||
const result_id = self.allocId();
|
||||
try self.sections.types_globals_constants.emit(self.gpa, .OpTypeVector, .{
|
||||
.id_result = result_id,
|
||||
.component_type = child_id,
|
||||
.component_count = len,
|
||||
});
|
||||
return result_id;
|
||||
}
|
||||
|
||||
pub fn constUndef(self: *Module, ty_id: IdRef) !IdRef {
|
||||
const result_id = self.allocId();
|
||||
try self.sections.types_globals_constants.emit(self.gpa, .OpUndef, .{
|
||||
.id_result_type = ty_id,
|
||||
.id_result = result_id,
|
||||
});
|
||||
return result_id;
|
||||
}
|
||||
|
||||
pub fn constNull(self: *Module, ty_id: IdRef) !IdRef {
|
||||
const result_id = self.allocId();
|
||||
try self.sections.types_globals_constants.emit(self.gpa, .OpConstantNull, .{
|
||||
.id_result_type = ty_id,
|
||||
.id_result = result_id,
|
||||
.constituents = members,
|
||||
});
|
||||
return result_id;
|
||||
}
|
||||
|
|
@ -520,7 +564,7 @@ pub fn declareEntryPoint(
|
|||
) !void {
|
||||
try self.entry_points.append(self.gpa, .{
|
||||
.decl_index = decl_index,
|
||||
.name = try self.resolveString(name),
|
||||
.name = try self.arena.allocator().dupe(u8, name),
|
||||
.execution_model = execution_model,
|
||||
});
|
||||
}
|
||||
|
|
|
|||
|
|
@ -245,7 +245,7 @@ pub fn flushModule(self: *SpirV, arena: Allocator, prog_node: *std.Progress.Node
|
|||
const module = try spv.finalize(arena, target);
|
||||
errdefer arena.free(module);
|
||||
|
||||
const linked_module = self.linkModule(arena, module) catch |err| switch (err) {
|
||||
const linked_module = self.linkModule(arena, module, &sub_prog_node) catch |err| switch (err) {
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
else => |other| {
|
||||
log.err("error while linking: {s}\n", .{@errorName(other)});
|
||||
|
|
@ -256,7 +256,7 @@ pub fn flushModule(self: *SpirV, arena: Allocator, prog_node: *std.Progress.Node
|
|||
try self.base.file.?.writeAll(std.mem.sliceAsBytes(linked_module));
|
||||
}
|
||||
|
||||
fn linkModule(self: *SpirV, a: Allocator, module: []Word) ![]Word {
|
||||
fn linkModule(self: *SpirV, a: Allocator, module: []Word, progress: *std.Progress.Node) ![]Word {
|
||||
_ = self;
|
||||
|
||||
const lower_invocation_globals = @import("SpirV/lower_invocation_globals.zig");
|
||||
|
|
@ -267,9 +267,9 @@ fn linkModule(self: *SpirV, a: Allocator, module: []Word) ![]Word {
|
|||
defer parser.deinit();
|
||||
var binary = try parser.parse(module);
|
||||
|
||||
try lower_invocation_globals.run(&parser, &binary);
|
||||
try prune_unused.run(&parser, &binary);
|
||||
try dedup.run(&parser, &binary);
|
||||
try lower_invocation_globals.run(&parser, &binary, progress);
|
||||
try prune_unused.run(&parser, &binary, progress);
|
||||
try dedup.run(&parser, &binary, progress);
|
||||
|
||||
return binary.finalize(a);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -116,7 +116,8 @@ pub const Instruction = struct {
|
|||
const instruction_len = self.words[self.offset] >> 16;
|
||||
defer self.offset += instruction_len;
|
||||
defer self.index += 1;
|
||||
assert(instruction_len != 0 and self.offset < self.words.len); // Verified in BinaryModule.parse.
|
||||
assert(instruction_len != 0);
|
||||
assert(self.offset < self.words.len);
|
||||
|
||||
return Instruction{
|
||||
.opcode = @enumFromInt(self.words[self.offset] & 0xFFFF),
|
||||
|
|
|
|||
|
|
@ -47,6 +47,10 @@ const ModuleInfo = struct {
|
|||
result_id_index: u16,
|
||||
/// The first decoration in `self.decorations`.
|
||||
first_decoration: u32,
|
||||
|
||||
fn operands(self: Entity, binary: *const BinaryModule) []const Word {
|
||||
return binary.instructions[self.first_operand..][0..self.num_operands];
|
||||
}
|
||||
};
|
||||
|
||||
/// Maps result-id to Entity's
|
||||
|
|
@ -210,10 +214,41 @@ const EntityContext = struct {
|
|||
|
||||
const entity = self.info.entities.values()[index];
|
||||
|
||||
// If the current pointer is recursive, don't immediately add it to the map. This is to ensure that
|
||||
// if the current pointer is already recursive, it gets the same hash a pointer that points to the
|
||||
// same child but has a different result-id.
|
||||
if (entity.kind == .OpTypePointer) {
|
||||
// This may be either a pointer that is forward-referenced in the future,
|
||||
// or a forward reference to a pointer.
|
||||
const entry = try self.ptr_map_a.getOrPut(self.a, id);
|
||||
// Note: We use the **struct** here instead of the pointer itself, to avoid an edge case like this:
|
||||
//
|
||||
// A - C*'
|
||||
// \
|
||||
// C - C*'
|
||||
// /
|
||||
// B - C*"
|
||||
//
|
||||
// In this case, hashing A goes like
|
||||
// A -> C*' -> C -> C*' recursion
|
||||
// And hashing B goes like
|
||||
// B -> C*" -> C -> C*' -> C -> C*' recursion
|
||||
// The are several calls to ptrType in codegen that may C*' and C*" to be generated as separate
|
||||
// types. This is not a problem for C itself though - this can only be generated through resolveType()
|
||||
// and so ensures equality by Zig's type system. Technically the above problem is still present, but it
|
||||
// would only be present in a structure such as
|
||||
//
|
||||
// A - C*' - C'
|
||||
// \
|
||||
// C*" - C - C*
|
||||
// /
|
||||
// B
|
||||
//
|
||||
// where there is a duplicate definition of struct C. Resolving this requires a much more time consuming
|
||||
// algorithm though, and because we don't expect any correctness issues with it, we leave that for now.
|
||||
|
||||
// TODO: Do we need to mind the storage class here? Its going to be recursive regardless, right?
|
||||
const struct_id: ResultId = @enumFromInt(entity.operands(self.binary)[2]);
|
||||
const entry = try self.ptr_map_a.getOrPut(self.a, struct_id);
|
||||
if (entry.found_existing) {
|
||||
// Pointer already seen. Hash the index instead of recursing into its children.
|
||||
std.hash.autoHash(hasher, entry.index);
|
||||
|
|
@ -228,12 +263,17 @@ const EntityContext = struct {
|
|||
for (decorations) |decoration| {
|
||||
try self.hashEntity(hasher, decoration);
|
||||
}
|
||||
|
||||
if (entity.kind == .OpTypePointer) {
|
||||
const struct_id: ResultId = @enumFromInt(entity.operands(self.binary)[2]);
|
||||
assert(self.ptr_map_a.swapRemove(struct_id));
|
||||
}
|
||||
}
|
||||
|
||||
fn hashEntity(self: *EntityContext, hasher: *std.hash.Wyhash, entity: ModuleInfo.Entity) !void {
|
||||
std.hash.autoHash(hasher, entity.kind);
|
||||
// Process operands
|
||||
const operands = self.binary.instructions[entity.first_operand..][0..entity.num_operands];
|
||||
const operands = entity.operands(self.binary);
|
||||
for (operands, 0..) |operand, i| {
|
||||
if (i == entity.result_id_index) {
|
||||
// Not relevant, skip...
|
||||
|
|
@ -273,12 +313,19 @@ const EntityContext = struct {
|
|||
const entity_a = self.info.entities.values()[index_a];
|
||||
const entity_b = self.info.entities.values()[index_b];
|
||||
|
||||
if (entity_a.kind != entity_b.kind) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (entity_a.kind == .OpTypePointer) {
|
||||
// May be a forward reference, or should be saved as a potential
|
||||
// forward reference in the future. Whatever the case, it should
|
||||
// be the same for both a and b.
|
||||
const entry_a = try self.ptr_map_a.getOrPut(self.a, id_a);
|
||||
const entry_b = try self.ptr_map_b.getOrPut(self.a, id_b);
|
||||
const struct_id_a: ResultId = @enumFromInt(entity_a.operands(self.binary)[2]);
|
||||
const struct_id_b: ResultId = @enumFromInt(entity_b.operands(self.binary)[2]);
|
||||
|
||||
const entry_a = try self.ptr_map_a.getOrPut(self.a, struct_id_a);
|
||||
const entry_b = try self.ptr_map_b.getOrPut(self.a, struct_id_b);
|
||||
|
||||
if (entry_a.found_existing != entry_b.found_existing) return false;
|
||||
if (entry_a.index != entry_b.index) return false;
|
||||
|
|
@ -306,6 +353,14 @@ const EntityContext = struct {
|
|||
}
|
||||
}
|
||||
|
||||
if (entity_a.kind == .OpTypePointer) {
|
||||
const struct_id_a: ResultId = @enumFromInt(entity_a.operands(self.binary)[2]);
|
||||
const struct_id_b: ResultId = @enumFromInt(entity_b.operands(self.binary)[2]);
|
||||
|
||||
assert(self.ptr_map_a.swapRemove(struct_id_a));
|
||||
assert(self.ptr_map_b.swapRemove(struct_id_b));
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
@ -316,8 +371,8 @@ const EntityContext = struct {
|
|||
return false;
|
||||
}
|
||||
|
||||
const operands_a = self.binary.instructions[entity_a.first_operand..][0..entity_a.num_operands];
|
||||
const operands_b = self.binary.instructions[entity_b.first_operand..][0..entity_b.num_operands];
|
||||
const operands_a = entity_a.operands(self.binary);
|
||||
const operands_b = entity_b.operands(self.binary);
|
||||
|
||||
// Note: returns false for operands that have explicit defaults in optional operands... oh well
|
||||
if (operands_a.len != operands_b.len) {
|
||||
|
|
@ -363,7 +418,11 @@ const EntityHashContext = struct {
|
|||
}
|
||||
};
|
||||
|
||||
pub fn run(parser: *BinaryModule.Parser, binary: *BinaryModule) !void {
|
||||
pub fn run(parser: *BinaryModule.Parser, binary: *BinaryModule, progress: *std.Progress.Node) !void {
|
||||
var sub_node = progress.start("deduplicate", 0);
|
||||
sub_node.activate();
|
||||
defer sub_node.end();
|
||||
|
||||
var arena = std.heap.ArenaAllocator.init(parser.a);
|
||||
defer arena.deinit();
|
||||
const a = arena.allocator();
|
||||
|
|
@ -376,6 +435,7 @@ pub fn run(parser: *BinaryModule.Parser, binary: *BinaryModule) !void {
|
|||
.info = &info,
|
||||
.binary = binary,
|
||||
};
|
||||
|
||||
for (info.entities.keys()) |id| {
|
||||
_ = try ctx.hash(id);
|
||||
}
|
||||
|
|
@ -395,6 +455,8 @@ pub fn run(parser: *BinaryModule.Parser, binary: *BinaryModule) !void {
|
|||
}
|
||||
}
|
||||
|
||||
sub_node.setEstimatedTotalItems(binary.instructions.len);
|
||||
|
||||
// Now process the module, and replace instructions where needed.
|
||||
var section = Section{};
|
||||
var it = binary.iterateInstructions();
|
||||
|
|
@ -402,6 +464,8 @@ pub fn run(parser: *BinaryModule.Parser, binary: *BinaryModule) !void {
|
|||
var new_operands = std.ArrayList(u32).init(a);
|
||||
var emitted_ptrs = std.AutoHashMap(ResultId, void).init(a);
|
||||
while (it.next()) |inst| {
|
||||
defer sub_node.setCompletedItems(inst.offset);
|
||||
|
||||
// Result-id can only be the first or second operand
|
||||
const inst_spec = parser.getInstSpec(inst.opcode).?;
|
||||
|
||||
|
|
@ -454,7 +518,7 @@ pub fn run(parser: *BinaryModule.Parser, binary: *BinaryModule) !void {
|
|||
if (entity.kind == .OpTypePointer and !emitted_ptrs.contains(id)) {
|
||||
// Grab the pointer's storage class from its operands in the original
|
||||
// module.
|
||||
const storage_class: spec.StorageClass = @enumFromInt(binary.instructions[entity.first_operand + 1]);
|
||||
const storage_class: spec.StorageClass = @enumFromInt(entity.operands(binary)[1]);
|
||||
try section.emit(a, .OpTypeForwardPointer, .{
|
||||
.pointer_type = id,
|
||||
.storage_class = storage_class,
|
||||
|
|
|
|||
|
|
@ -682,7 +682,11 @@ const ModuleBuilder = struct {
|
|||
}
|
||||
};
|
||||
|
||||
pub fn run(parser: *BinaryModule.Parser, binary: *BinaryModule) !void {
|
||||
pub fn run(parser: *BinaryModule.Parser, binary: *BinaryModule, progress: *std.Progress.Node) !void {
|
||||
var sub_node = progress.start("Lower invocation globals", 6);
|
||||
sub_node.activate();
|
||||
defer sub_node.end();
|
||||
|
||||
var arena = std.heap.ArenaAllocator.init(parser.a);
|
||||
defer arena.deinit();
|
||||
const a = arena.allocator();
|
||||
|
|
@ -691,10 +695,16 @@ pub fn run(parser: *BinaryModule.Parser, binary: *BinaryModule) !void {
|
|||
try info.resolve(a);
|
||||
|
||||
var builder = try ModuleBuilder.init(a, binary.*, info);
|
||||
sub_node.completeOne();
|
||||
try builder.deriveNewFnInfo(info);
|
||||
sub_node.completeOne();
|
||||
try builder.processPreamble(binary.*, info);
|
||||
sub_node.completeOne();
|
||||
try builder.emitFunctionTypes(info);
|
||||
sub_node.completeOne();
|
||||
try builder.rewriteFunctions(parser, binary.*, info);
|
||||
sub_node.completeOne();
|
||||
try builder.emitNewEntryPoints(info);
|
||||
sub_node.completeOne();
|
||||
try builder.finalize(parser.a, binary);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -255,7 +255,11 @@ fn removeIdsFromMap(a: Allocator, map: anytype, info: ModuleInfo, alive_marker:
|
|||
}
|
||||
}
|
||||
|
||||
pub fn run(parser: *BinaryModule.Parser, binary: *BinaryModule) !void {
|
||||
pub fn run(parser: *BinaryModule.Parser, binary: *BinaryModule, progress: *std.Progress.Node) !void {
|
||||
var sub_node = progress.start("Prune unused IDs", 0);
|
||||
sub_node.activate();
|
||||
defer sub_node.end();
|
||||
|
||||
var arena = std.heap.ArenaAllocator.init(parser.a);
|
||||
defer arena.deinit();
|
||||
const a = arena.allocator();
|
||||
|
|
@ -285,9 +289,13 @@ pub fn run(parser: *BinaryModule.Parser, binary: *BinaryModule) !void {
|
|||
|
||||
var section = Section{};
|
||||
|
||||
sub_node.setEstimatedTotalItems(binary.instructions.len);
|
||||
|
||||
var new_functions_section: ?usize = null;
|
||||
var it = binary.iterateInstructions();
|
||||
skip: while (it.next()) |inst| {
|
||||
defer sub_node.setCompletedItems(inst.offset);
|
||||
|
||||
const inst_spec = parser.getInstSpec(inst.opcode).?;
|
||||
|
||||
reemit: {
|
||||
|
|
|
|||
|
|
@ -23,8 +23,6 @@ test "simple destructure" {
|
|||
}
|
||||
|
||||
test "destructure with comptime syntax" {
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
||||
const S = struct {
|
||||
fn doTheTest() !void {
|
||||
{
|
||||
|
|
|
|||
|
|
@ -181,7 +181,6 @@ test "function with complex callconv and return type expressions" {
|
|||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
||||
try expect(fComplexCallconvRet(3).x == 9);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -447,7 +447,6 @@ test "return type of generic function is function pointer" {
|
|||
|
||||
test "coerced function body has inequal value with its uncoerced body" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
||||
const S = struct {
|
||||
const A = B(i32, c);
|
||||
|
|
|
|||
|
|
@ -12,7 +12,6 @@ const math = std.math;
|
|||
test "assignment operators" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
||||
var i: u32 = 0;
|
||||
i += 5;
|
||||
|
|
@ -188,7 +187,6 @@ test "@ctz vectors" {
|
|||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
||||
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .aarch64) {
|
||||
// This regressed with LLVM 14:
|
||||
|
|
|
|||
|
|
@ -850,8 +850,6 @@ test "inline switch range that includes the maximum value of the switched type"
|
|||
}
|
||||
|
||||
test "nested break ignores switch conditions and breaks instead" {
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
||||
const S = struct {
|
||||
fn register_to_address(ident: []const u8) !u8 {
|
||||
const reg: u8 = if (std.mem.eql(u8, ident, "zero")) 0x00 else blk: {
|
||||
|
|
|
|||
|
|
@ -1750,7 +1750,6 @@ test "reinterpret extern union" {
|
|||
// https://github.com/ziglang/zig/issues/19389
|
||||
return error.SkipZigTest;
|
||||
}
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
||||
const U = extern union {
|
||||
foo: u8,
|
||||
|
|
|
|||
|
|
@ -76,7 +76,6 @@ test "vector int operators" {
|
|||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
||||
const S = struct {
|
||||
fn doTheTest() !void {
|
||||
|
|
@ -1037,7 +1036,6 @@ test "multiplication-assignment operator with an array operand" {
|
|||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
||||
const S = struct {
|
||||
fn doTheTest() !void {
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue