spirv: ziggify and remove unknown spirv features

`OpCapability` and `OpExtension` now can also be emitted from inline assembly
This commit is contained in:
Ali Cheraghi 2025-02-15 00:08:05 +03:30
parent 85169bbba2
commit d5e1cb3ea2
No known key found for this signature in database
GPG key ID: 7AD1F6DBB6B5A0DA
9 changed files with 154 additions and 3215 deletions

View file

@ -1184,7 +1184,7 @@ pub const Cpu = struct {
pub const Set = struct { pub const Set = struct {
ints: [usize_count]usize, ints: [usize_count]usize,
pub const needed_bit_count = 398; pub const needed_bit_count = 288;
pub const byte_count = (needed_bit_count + 7) / 8; pub const byte_count = (needed_bit_count + 7) / 8;
pub const usize_count = (byte_count + (@sizeOf(usize) - 1)) / @sizeOf(usize); pub const usize_count = (byte_count + (@sizeOf(usize) - 1)) / @sizeOf(usize);
pub const Index = std.math.Log2Int(std.meta.Int(.unsigned, usize_count * @bitSizeOf(usize))); pub const Index = std.math.Log2Int(std.meta.Int(.unsigned, usize_count * @bitSizeOf(usize)));

File diff suppressed because it is too large Load diff

View file

@ -552,7 +552,7 @@ const NavGen = struct {
} }
fn castToGeneric(self: *NavGen, type_id: IdRef, ptr_id: IdRef) !IdRef { fn castToGeneric(self: *NavGen, type_id: IdRef, ptr_id: IdRef) !IdRef {
if (self.spv.hasFeature(.Kernel)) { if (self.spv.hasFeature(.kernel)) {
const result_id = self.spv.allocId(); const result_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpPtrCastToGeneric, .{ try self.func.body.emit(self.spv.gpa, .OpPtrCastToGeneric, .{
.id_result_type = type_id, .id_result_type = type_id,
@ -591,10 +591,10 @@ const NavGen = struct {
// 8, 16 and 64-bit integers require the Int8, Int16 and Inr64 capabilities respectively. // 8, 16 and 64-bit integers require the Int8, Int16 and Inr64 capabilities respectively.
// 32-bit integers are always supported (see spec, 2.16.1, Data rules). // 32-bit integers are always supported (see spec, 2.16.1, Data rules).
const ints = [_]struct { bits: u16, feature: ?Target.spirv.Feature }{ const ints = [_]struct { bits: u16, feature: ?Target.spirv.Feature }{
.{ .bits = 8, .feature = .Int8 }, .{ .bits = 8, .feature = .int8 },
.{ .bits = 16, .feature = .Int16 }, .{ .bits = 16, .feature = .int16 },
.{ .bits = 32, .feature = null }, .{ .bits = 32, .feature = null },
.{ .bits = 64, .feature = .Int64 }, .{ .bits = 64, .feature = .int64 },
}; };
for (ints) |int| { for (ints) |int| {
@ -612,7 +612,7 @@ const NavGen = struct {
/// is no way of knowing whether those are actually supported. /// is no way of knowing whether those are actually supported.
/// TODO: Maybe this should be cached? /// TODO: Maybe this should be cached?
fn largestSupportedIntBits(self: *NavGen) u16 { fn largestSupportedIntBits(self: *NavGen) u16 {
return if (self.spv.hasFeature(.Int64)) 64 else 32; return if (self.spv.hasFeature(.int64)) 64 else 32;
} }
/// Checks whether the type is "composite int", an integer consisting of multiple native integers. These are represented by /// Checks whether the type is "composite int", an integer consisting of multiple native integers. These are represented by
@ -644,7 +644,7 @@ const NavGen = struct {
if (elem_ty.isNumeric(zcu) or elem_ty.toIntern() == .bool_type) { if (elem_ty.isNumeric(zcu) or elem_ty.toIntern() == .bool_type) {
if (len > 1 and len <= 4) return true; if (len > 1 and len <= 4) return true;
if (self.spv.hasFeature(.Vector16)) return (len == 8 or len == 16); if (self.spv.hasFeature(.vector16)) return (len == 8 or len == 16);
} }
return false; return false;
@ -1241,7 +1241,7 @@ const NavGen = struct {
}; };
// Kernel only supports unsigned ints. // Kernel only supports unsigned ints.
if (self.spv.hasFeature(.Kernel)) { if (self.spv.hasFeature(.kernel)) {
return self.spv.intType(.unsigned, backing_bits); return self.spv.intType(.unsigned, backing_bits);
} }
@ -1465,10 +1465,10 @@ const NavGen = struct {
// so if the float is not supported, just return an error. // so if the float is not supported, just return an error.
const bits = ty.floatBits(target); const bits = ty.floatBits(target);
const supported = switch (bits) { const supported = switch (bits) {
16 => Target.spirv.featureSetHas(target.cpu.features, .Float16), 16 => self.spv.hasFeature(.float16),
// 32-bit floats are always supported (see spec, 2.16.1, Data rules). // 32-bit floats are always supported (see spec, 2.16.1, Data rules).
32 => true, 32 => true,
64 => Target.spirv.featureSetHas(target.cpu.features, .Float64), 64 => self.spv.hasFeature(.float64),
else => false, else => false,
}; };
@ -1511,7 +1511,7 @@ const NavGen = struct {
return try self.arrayType(1, elem_ty_id); return try self.arrayType(1, elem_ty_id);
} else { } else {
const result_id = try self.arrayType(total_len, elem_ty_id); const result_id = try self.arrayType(total_len, elem_ty_id);
if (self.spv.hasFeature(.Shader)) { if (self.spv.hasFeature(.shader)) {
try self.spv.decorate(result_id, .{ .ArrayStride = .{ try self.spv.decorate(result_id, .{ .ArrayStride = .{
.array_stride = @intCast(elem_ty.abiSize(zcu)), .array_stride = @intCast(elem_ty.abiSize(zcu)),
} }); } });
@ -1645,7 +1645,7 @@ const NavGen = struct {
continue; continue;
} }
if (self.spv.hasFeature(.Shader)) { if (self.spv.hasFeature(.shader)) {
try self.spv.decorateMember(result_id, index, .{ .Offset = .{ try self.spv.decorateMember(result_id, index, .{ .Offset = .{
.byte_offset = @intCast(ty.structFieldOffset(field_index, zcu)), .byte_offset = @intCast(ty.structFieldOffset(field_index, zcu)),
} }); } });
@ -1748,10 +1748,10 @@ const NavGen = struct {
fn spvStorageClass(self: *NavGen, as: std.builtin.AddressSpace) StorageClass { fn spvStorageClass(self: *NavGen, as: std.builtin.AddressSpace) StorageClass {
return switch (as) { return switch (as) {
.generic => if (self.spv.hasFeature(.GenericPointer)) .Generic else .Function, .generic => if (self.spv.hasFeature(.generic_pointer)) .Generic else .Function,
.shared => .Workgroup, .shared => .Workgroup,
.local => .Function, .local => .Function,
.global => if (self.spv.hasFeature(.Shader)) .PhysicalStorageBuffer else .CrossWorkgroup, .global => if (self.spv.hasFeature(.shader)) .PhysicalStorageBuffer else .CrossWorkgroup,
.constant => .UniformConstant, .constant => .UniformConstant,
.push_constant => .PushConstant, .push_constant => .PushConstant,
.input => .Input, .input => .Input,
@ -2461,7 +2461,7 @@ const NavGen = struct {
// TODO: These instructions don't seem to be working // TODO: These instructions don't seem to be working
// properly for LLVM-based backends on OpenCL for 8- and // properly for LLVM-based backends on OpenCL for 8- and
// 16-component vectors. // 16-component vectors.
.i_abs => if (self.spv.hasFeature(.Vector16) and v.components() >= 8) v.unroll() else v, .i_abs => if (self.spv.hasFeature(.vector16) and v.components() >= 8) v.unroll() else v,
else => v, else => v,
}; };
}; };
@ -3650,7 +3650,7 @@ const NavGen = struct {
// depending on the result type. Do that when // depending on the result type. Do that when
// bitCast is implemented for vectors. // bitCast is implemented for vectors.
// This is only relevant for Vulkan // This is only relevant for Vulkan
assert(self.spv.hasFeature(.Kernel)); // TODO assert(self.spv.hasFeature(.kernel)); // TODO
return try self.normalize(abs_value, self.arithmeticTypeInfo(result_ty)); return try self.normalize(abs_value, self.arithmeticTypeInfo(result_ty));
}, },
@ -3968,7 +3968,7 @@ const NavGen = struct {
.float, .bool => unreachable, .float, .bool => unreachable,
} }
assert(self.spv.hasFeature(.Kernel)); // TODO assert(self.spv.hasFeature(.kernel)); // TODO
const count = try self.buildUnary(op, operand); const count = try self.buildUnary(op, operand);
@ -4204,7 +4204,7 @@ const NavGen = struct {
defer self.gpa.free(ids); defer self.gpa.free(ids);
const result_id = self.spv.allocId(); const result_id = self.spv.allocId();
if (self.spv.hasFeature(.Kernel)) { if (self.spv.hasFeature(.kernel)) {
try self.func.body.emit(self.spv.gpa, .OpInBoundsPtrAccessChain, .{ try self.func.body.emit(self.spv.gpa, .OpInBoundsPtrAccessChain, .{
.id_result_type = result_ty_id, .id_result_type = result_ty_id,
.id_result = result_id, .id_result = result_id,
@ -5290,7 +5290,7 @@ const NavGen = struct {
.initializer = options.initializer, .initializer = options.initializer,
}); });
if (self.spv.hasFeature(.Shader)) return var_id; if (self.spv.hasFeature(.shader)) return var_id;
switch (options.storage_class) { switch (options.storage_class) {
.Generic => { .Generic => {

View file

@ -274,6 +274,16 @@ fn processInstruction(self: *Assembler) !void {
.OpEntryPoint => { .OpEntryPoint => {
return self.fail(0, "cannot export entry points via OpEntryPoint, export the kernel using callconv(.Kernel)", .{}); return self.fail(0, "cannot export entry points via OpEntryPoint, export the kernel using callconv(.Kernel)", .{});
}, },
.OpCapability => {
try self.spv.addCapability(@enumFromInt(self.inst.operands.items[0].value));
return;
},
.OpExtension => {
const ext_name_offset = self.inst.operands.items[0].string;
const ext_name = std.mem.sliceTo(self.inst.string_bytes.items[ext_name_offset..], 0);
try self.spv.addExtension(ext_name);
return;
},
.OpExtInstImport => blk: { .OpExtInstImport => blk: {
const set_name_offset = self.inst.operands.items[1].string; const set_name_offset = self.inst.operands.items[1].string;
const set_name = std.mem.sliceTo(self.inst.string_bytes.items[set_name_offset..], 0); const set_name = std.mem.sliceTo(self.inst.string_bytes.items[set_name_offset..], 0);

View file

@ -183,8 +183,11 @@ cache: struct {
array_types: std.AutoHashMapUnmanaged(struct { IdRef, IdRef }, IdRef) = .empty, array_types: std.AutoHashMapUnmanaged(struct { IdRef, IdRef }, IdRef) = .empty,
function_types: DeepHashMap(struct { IdRef, []const IdRef }, IdRef) = .empty, function_types: DeepHashMap(struct { IdRef, []const IdRef }, IdRef) = .empty,
builtins: std.AutoHashMapUnmanaged(struct { IdRef, spec.BuiltIn }, Decl.Index) = .empty, capabilities: std.AutoHashMapUnmanaged(spec.Capability, void) = .empty,
extensions: std.StringHashMapUnmanaged(void) = .empty,
extended_instruction_set: std.AutoHashMapUnmanaged(spec.InstructionSet, IdRef) = .empty,
decorations: std.AutoHashMapUnmanaged(struct { IdRef, spec.Decoration }, void) = .empty, decorations: std.AutoHashMapUnmanaged(struct { IdRef, spec.Decoration }, void) = .empty,
builtins: std.AutoHashMapUnmanaged(struct { IdRef, spec.BuiltIn }, Decl.Index) = .empty,
bool_const: [2]?IdRef = .{ null, null }, bool_const: [2]?IdRef = .{ null, null },
} = .{}, } = .{},
@ -199,9 +202,6 @@ decl_deps: std.ArrayListUnmanaged(Decl.Index) = .empty,
/// The list of entry points that should be exported from this module. /// The list of entry points that should be exported from this module.
entry_points: std.ArrayListUnmanaged(EntryPoint) = .empty, entry_points: std.ArrayListUnmanaged(EntryPoint) = .empty,
/// The list of extended instruction sets that should be imported.
extended_instruction_set: std.AutoHashMapUnmanaged(spec.InstructionSet, IdRef) = .empty,
pub fn init(gpa: Allocator, target: std.Target) Module { pub fn init(gpa: Allocator, target: std.Target) Module {
const version_minor: u8 = blk: { const version_minor: u8 = blk: {
// Prefer higher versions // Prefer higher versions
@ -242,15 +242,16 @@ pub fn deinit(self: *Module) void {
self.cache.vector_types.deinit(self.gpa); self.cache.vector_types.deinit(self.gpa);
self.cache.array_types.deinit(self.gpa); self.cache.array_types.deinit(self.gpa);
self.cache.function_types.deinit(self.gpa); self.cache.function_types.deinit(self.gpa);
self.cache.builtins.deinit(self.gpa); self.cache.capabilities.deinit(self.gpa);
self.cache.extensions.deinit(self.gpa);
self.cache.extended_instruction_set.deinit(self.gpa);
self.cache.decorations.deinit(self.gpa); self.cache.decorations.deinit(self.gpa);
self.cache.builtins.deinit(self.gpa);
self.decls.deinit(self.gpa); self.decls.deinit(self.gpa);
self.decl_deps.deinit(self.gpa); self.decl_deps.deinit(self.gpa);
self.entry_points.deinit(self.gpa); self.entry_points.deinit(self.gpa);
self.extended_instruction_set.deinit(self.gpa);
self.arena.deinit(); self.arena.deinit();
self.* = undefined; self.* = undefined;
@ -339,9 +340,61 @@ fn entryPoints(self: *Module) !Section {
} }
pub fn finalize(self: *Module, a: Allocator) ![]Word { pub fn finalize(self: *Module, a: Allocator) ![]Word {
// Emit capabilities and extensions
for (std.Target.spirv.all_features) |feature| {
if (self.target.cpu.features.isEnabled(feature.index)) {
const feature_tag: std.Target.spirv.Feature = @enumFromInt(feature.index);
switch (feature_tag) {
.v1_0, .v1_1, .v1_2, .v1_3, .v1_4, .v1_5, .v1_6 => {},
.int8 => try self.addCapability(.Int8),
.int16 => try self.addCapability(.Int16),
.int64 => try self.addCapability(.Int64),
.float16 => try self.addCapability(.Float16),
.float64 => try self.addCapability(.Float64),
.addresses => if (self.hasFeature(.shader)) {
try self.addCapability(.PhysicalStorageBufferAddresses);
try self.addExtension("SPV_KHR_physical_storage_buffer");
} else {
try self.addCapability(.Addresses);
},
.matrix => try self.addCapability(.Matrix),
.kernel => try self.addCapability(.Kernel),
.generic_pointer => try self.addCapability(.GenericPointer),
.vector16 => try self.addCapability(.Vector16),
.shader => try self.addCapability(.Shader),
}
}
}
// Emit memory model
const addressing_model: spec.AddressingModel = blk: {
if (self.hasFeature(.shader)) {
break :blk switch (self.target.cpu.arch) {
.spirv32 => .Logical, // TODO: I don't think this will ever be implemented.
.spirv64 => .PhysicalStorageBuffer64,
else => unreachable,
};
} else if (self.hasFeature(.kernel)) {
break :blk switch (self.target.cpu.arch) {
.spirv32 => .Physical32,
.spirv64 => .Physical64,
else => unreachable,
};
}
unreachable;
};
try self.sections.memory_model.emit(self.gpa, .OpMemoryModel, .{
.addressing_model = addressing_model,
.memory_model = switch (self.target.os.tag) {
.opencl => .OpenCL,
.vulkan, .opengl => .GLSL450,
else => unreachable,
},
});
// See SPIR-V Spec section 2.3, "Physical Layout of a SPIR-V Module and Instruction" // See SPIR-V Spec section 2.3, "Physical Layout of a SPIR-V Module and Instruction"
// TODO: Audit calls to allocId() in this function to make it idempotent. // TODO: Audit calls to allocId() in this function to make it idempotent.
var entry_points = try self.entryPoints(); var entry_points = try self.entryPoints();
defer entry_points.deinit(self.gpa); defer entry_points.deinit(self.gpa);
@ -405,11 +458,23 @@ pub fn addFunction(self: *Module, decl_index: Decl.Index, func: Fn) !void {
try self.declareDeclDeps(decl_index, func.decl_deps.keys()); try self.declareDeclDeps(decl_index, func.decl_deps.keys());
} }
pub fn addCapability(self: *Module, cap: spec.Capability) !void {
const entry = try self.cache.capabilities.getOrPut(self.gpa, cap);
if (entry.found_existing) return;
try self.sections.capabilities.emit(self.gpa, .OpCapability, .{ .capability = cap });
}
pub fn addExtension(self: *Module, ext: []const u8) !void {
const entry = try self.cache.extensions.getOrPut(self.gpa, ext);
if (entry.found_existing) return;
try self.sections.extensions.emit(self.gpa, .OpExtension, .{ .name = ext });
}
/// Imports or returns the existing id of an extended instruction set /// Imports or returns the existing id of an extended instruction set
pub fn importInstructionSet(self: *Module, set: spec.InstructionSet) !IdRef { pub fn importInstructionSet(self: *Module, set: spec.InstructionSet) !IdRef {
assert(set != .core); assert(set != .core);
const gop = try self.extended_instruction_set.getOrPut(self.gpa, set); const gop = try self.cache.extended_instruction_set.getOrPut(self.gpa, set);
if (gop.found_existing) return gop.value_ptr.*; if (gop.found_existing) return gop.value_ptr.*;
const result_id = self.allocId(); const result_id = self.allocId();

View file

@ -232,13 +232,9 @@ pub fn flushModule(
const diags = &comp.link_diags; const diags = &comp.link_diags;
const gpa = comp.gpa; const gpa = comp.gpa;
try writeCapabilities(spv);
try writeMemoryModel(spv);
// We need to export the list of error names somewhere so that we can pretty-print them in the // We need to export the list of error names somewhere so that we can pretty-print them in the
// executor. This is not really an important thing though, so we can just dump it in any old // executor. This is not really an important thing though, so we can just dump it in any old
// nonsemantic instruction. For now, just put it in OpSourceExtension with a special name. // nonsemantic instruction. For now, just put it in OpSourceExtension with a special name.
var error_info = std.ArrayList(u8).init(self.object.gpa); var error_info = std.ArrayList(u8).init(self.object.gpa);
defer error_info.deinit(); defer error_info.deinit();
@ -297,65 +293,3 @@ fn linkModule(self: *SpirV, a: Allocator, module: []Word, progress: std.Progress
return binary.finalize(a); return binary.finalize(a);
} }
fn writeCapabilities(spv: *SpvModule) !void {
var caps: std.ArrayList(spec.Capability) = .init(spv.gpa);
var extensions: std.ArrayList([]const u8) = .init(spv.gpa);
defer {
caps.deinit();
extensions.deinit();
}
// Currently all spirv target features name are mapped to a Capability or an Extension.
// Except for versions which we ignore.
for (std.Target.spirv.all_features, 0..) |_, i| {
if (spv.target.cpu.features.isEnabled(@intCast(i))) {
const feature: std.Target.spirv.Feature = @enumFromInt(i);
const name = @tagName(feature);
if (std.meta.stringToEnum(spec.Capability, name)) |cap| {
try caps.append(cap);
} else if (std.mem.startsWith(u8, name, "SPV_")) {
try extensions.append(name);
}
}
}
for (caps.items) |cap| {
try spv.sections.capabilities.emit(spv.gpa, .OpCapability, .{
.capability = cap,
});
}
for (extensions.items) |ext| {
try spv.sections.extensions.emit(spv.gpa, .OpExtension, .{ .name = ext });
}
}
fn writeMemoryModel(spv: *SpvModule) !void {
const addressing_model: spec.AddressingModel = blk: {
if (spv.hasFeature(.Shader)) {
break :blk switch (spv.target.cpu.arch) {
.spirv32 => .Logical, // TODO: I don't think this will ever be implemented.
.spirv64 => .PhysicalStorageBuffer64,
else => unreachable,
};
} else if (spv.hasFeature(.Kernel)) {
break :blk switch (spv.target.cpu.arch) {
.spirv32 => .Physical32,
.spirv64 => .Physical64,
else => unreachable,
};
}
unreachable;
};
const memory_model: spec.MemoryModel = switch (spv.target.os.tag) {
.opencl => .OpenCL,
.vulkan, .opengl => .GLSL450,
else => unreachable,
};
try spv.sections.memory_model.emit(spv.gpa, .OpMemoryModel, .{
.addressing_model = addressing_model,
.memory_model = memory_model,
});
}

View file

@ -43,7 +43,6 @@ pub fn build(b: *std.Build) void {
"../../tools/update_clang_options.zig", "../../tools/update_clang_options.zig",
"../../tools/update_cpu_features.zig", "../../tools/update_cpu_features.zig",
"../../tools/update_glibc.zig", "../../tools/update_glibc.zig",
"../../tools/update_spirv_features.zig",
}) |tool_src_path| { }) |tool_src_path| {
const tool = b.addTest(.{ const tool = b.addTest(.{
.name = std.fs.path.stem(tool_src_path), .name = std.fs.path.stem(tool_src_path),

View file

@ -1072,13 +1072,6 @@ const targets = [_]ArchTarget{
.td_name = "Sparc", .td_name = "Sparc",
}, },
}, },
// TODO: merge tools/update_spirv_features.zig into this script
//.{
// .zig_name = "spirv",
// .llvm = .{
// .name = "SPIRV",
// },
//},
.{ .{
.zig_name = "s390x", .zig_name = "s390x",
.llvm = .{ .llvm = .{

View file

@ -1,361 +0,0 @@
//! This tool generates SPIR-V features from the grammar files in the SPIRV-Headers
//! (https://github.com/KhronosGroup/SPIRV-Headers/) and SPIRV-Registry (https://github.com/KhronosGroup/SPIRV-Registry/)
//! repositories. Currently it only generates a basic feature set definition consisting of versions, extensions and capabilities.
//! There is a lot left to be desired, as currently dependencies of extensions and dependencies on extensions aren't generated.
//! This is because there are some peculiarities in the SPIR-V registries:
//! - Capabilities may depend on multiple extensions, which cannot be modelled yet by std.Target.
//! - Extension dependencies are not documented in a machine-readable manner.
//! - Note that the grammar spec also contains definitions from extensions which aren't actually official. Most of these seem to be
//! from an intel project (https://github.com/intel/llvm/, https://github.com/intel/llvm/tree/sycl/sycl/doc/extensions/SPIRV),
//! and so ONLY extensions in the SPIRV-Registry should be included.
const std = @import("std");
const fs = std.fs;
const Allocator = std.mem.Allocator;
const g = @import("spirv/grammar.zig");
const Version = struct {
major: u32,
minor: u32,
fn parse(str: []const u8) !Version {
var it = std.mem.splitScalar(u8, str, '.');
const major = it.first();
const minor = it.next() orelse return error.InvalidVersion;
if (it.next() != null) return error.InvalidVersion;
return Version{
.major = std.fmt.parseInt(u32, major, 10) catch return error.InvalidVersion,
.minor = std.fmt.parseInt(u32, minor, 10) catch return error.InvalidVersion,
};
}
fn eql(a: Version, b: Version) bool {
return a.major == b.major and a.minor == b.minor;
}
fn lessThan(ctx: void, a: Version, b: Version) bool {
_ = ctx;
return if (a.major == b.major)
a.minor < b.minor
else
a.major < b.major;
}
};
pub fn main() !void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
const allocator = arena.allocator();
const args = try std.process.argsAlloc(allocator);
if (args.len <= 1) {
usageAndExit(std.io.getStdErr(), args[0], 1);
}
if (std.mem.eql(u8, args[1], "--help")) {
usageAndExit(std.io.getStdErr(), args[0], 0);
}
if (args.len != 3) {
usageAndExit(std.io.getStdErr(), args[0], 1);
}
const spirv_headers_root = args[1];
const spirv_registry_root = args[2];
if (std.mem.startsWith(u8, spirv_headers_root, "-") or std.mem.startsWith(u8, spirv_registry_root, "-")) {
usageAndExit(std.io.getStdErr(), args[0], 1);
}
// Required for json parsing.
@setEvalBranchQuota(10000);
const registry_path = try fs.path.join(allocator, &.{ spirv_headers_root, "include", "spirv", "unified1", "spirv.core.grammar.json" });
const registry_json = try std.fs.cwd().readFileAlloc(allocator, registry_path, std.math.maxInt(usize));
var scanner = std.json.Scanner.initCompleteInput(allocator, registry_json);
var diagnostics = std.json.Diagnostics{};
scanner.enableDiagnostics(&diagnostics);
const registry = std.json.parseFromTokenSourceLeaky(g.CoreRegistry, allocator, &scanner, .{ .ignore_unknown_fields = true }) catch |err| {
std.debug.print("line,col: {},{}\n", .{ diagnostics.getLine(), diagnostics.getColumn() });
return err;
};
const capabilities = for (registry.operand_kinds) |opkind| {
if (std.mem.eql(u8, opkind.kind, "Capability"))
break opkind.enumerants orelse return error.InvalidRegistry;
} else return error.InvalidRegistry;
const extensions = try gather_extensions(allocator, spirv_registry_root);
const versions = try gatherVersions(allocator, registry);
var bw = std.io.bufferedWriter(std.io.getStdOut().writer());
const w = bw.writer();
try w.writeAll(
\\//! This file is auto-generated by tools/update_spirv_features.zig.
\\//! TODO: Dependencies of capabilities on extensions.
\\//! TODO: Dependencies of extensions on extensions.
\\//! TODO: Dependencies of extensions on versions.
\\
\\const std = @import("../std.zig");
\\const CpuFeature = std.Target.Cpu.Feature;
\\const CpuModel = std.Target.Cpu.Model;
\\
\\pub const Feature = enum {
\\
);
for (versions) |ver| {
try w.print(" v{}_{},\n", .{ ver.major, ver.minor });
}
for (extensions) |ext| {
try w.print(" {p},\n", .{std.zig.fmtId(ext)});
}
for (capabilities) |cap| {
try w.print(" {p},\n", .{std.zig.fmtId(cap.enumerant)});
}
try w.writeAll(
\\};
\\
\\pub const featureSet = CpuFeature.FeatureSetFns(Feature).featureSet;
\\pub const featureSetHas = CpuFeature.FeatureSetFns(Feature).featureSetHas;
\\pub const featureSetHasAny = CpuFeature.FeatureSetFns(Feature).featureSetHasAny;
\\pub const featureSetHasAll = CpuFeature.FeatureSetFns(Feature).featureSetHasAll;
\\
\\pub const all_features = blk: {
\\ @setEvalBranchQuota(2000);
\\ const len = @typeInfo(Feature).@"enum".fields.len;
\\ std.debug.assert(len <= CpuFeature.Set.needed_bit_count);
\\ var result: [len]CpuFeature = undefined;
\\
);
for (versions, 0..) |ver, i| {
try w.print(
\\ result[@intFromEnum(Feature.v{0}_{1})] = .{{
\\ .llvm_name = null,
\\ .description = "SPIR-V version {0}.{1}",
\\
, .{ ver.major, ver.minor });
if (i == 0) {
try w.writeAll(
\\ .dependencies = featureSet(&[_]Feature{}),
\\ };
\\
);
} else {
try w.print(
\\ .dependencies = featureSet(&[_]Feature{{
\\ .v{}_{},
\\ }}),
\\ }};
\\
, .{ versions[i - 1].major, versions[i - 1].minor });
}
}
// TODO: Extension dependencies.
for (extensions) |ext| {
try w.print(
\\ result[@intFromEnum(Feature.{p_})] = .{{
\\ .llvm_name = null,
\\ .description = "SPIR-V extension {s}",
\\ .dependencies = featureSet(&[_]Feature{{}}),
\\ }};
\\
, .{
std.zig.fmtId(ext),
ext,
});
}
// TODO: Capability extension dependencies.
for (capabilities) |cap| {
try w.print(
\\ result[@intFromEnum(Feature.{p_})] = .{{
\\ .llvm_name = null,
\\ .description = "Enable SPIR-V capability {s}",
\\ .dependencies = featureSet(&[_]Feature{{
\\
, .{
std.zig.fmtId(cap.enumerant),
cap.enumerant,
});
if (cap.version) |ver_str| blk: {
if (std.mem.eql(u8, ver_str, "None")) break :blk;
const ver = try Version.parse(ver_str);
try w.print(" .v{}_{},\n", .{ ver.major, ver.minor });
}
for (cap.capabilities) |cap_dep| {
try w.print(" .{p_},\n", .{std.zig.fmtId(cap_dep)});
}
try w.writeAll(
\\ }),
\\ };
\\
);
}
try w.writeAll(
\\ const ti = @typeInfo(Feature);
\\ for (&result, 0..) |*elem, i| {
\\ elem.index = i;
\\ elem.name = ti.@"enum".fields[i].name;
\\ }
\\ break :blk result;
\\};
\\
\\pub const cpu = struct {
\\ pub const generic: CpuModel = .{
\\ .name = "generic",
\\ .llvm_name = "generic",
\\ .features = featureSet(&[_]Feature{ .v1_0 }),
\\ };
\\
\\ pub const vulkan_v1_2: CpuModel = .{
\\ .name = "vulkan_v1_2",
\\ .llvm_name = null,
\\ .features = featureSet(&[_]Feature{
\\ .v1_5,
\\ .Shader,
\\ .PhysicalStorageBufferAddresses,
\\ .VariablePointers,
\\ .VariablePointersStorageBuffer,
\\ .SPV_KHR_physical_storage_buffer,
\\ }),
\\ };
\\
\\ pub const opencl_v2: CpuModel = .{
\\ .name = "opencl_v2",
\\ .llvm_name = null,
\\ .features = featureSet(&[_]Feature{
\\ .v1_2,
\\ .Kernel,
\\ .Addresses,
\\ .GenericPointer,
\\ }),
\\ };
\\};
);
try bw.flush();
}
/// SPIRV-Registry should hold all extensions currently registered for SPIR-V.
/// The *.grammar.json in SPIRV-Headers should have most of these as well, but with this we're sure to get only the actually
/// registered ones.
/// TODO: Unfortunately, neither repository contains a machine-readable list of extension dependencies.
fn gather_extensions(allocator: Allocator, spirv_registry_root: []const u8) ![]const []const u8 {
const extensions_path = try fs.path.join(allocator, &.{ spirv_registry_root, "extensions" });
var extensions_dir = try fs.cwd().openDir(extensions_path, .{ .iterate = true });
defer extensions_dir.close();
var extensions = std.ArrayList([]const u8).init(allocator);
var vendor_it = extensions_dir.iterate();
while (try vendor_it.next()) |vendor_entry| {
std.debug.assert(vendor_entry.kind == .directory); // If this fails, the structure of SPIRV-Registry has changed.
const vendor_dir = try extensions_dir.openDir(vendor_entry.name, .{ .iterate = true });
var ext_it = vendor_dir.iterate();
while (try ext_it.next()) |ext_entry| {
// There is both a HTML and asciidoc version of every spec (as well as some other directories),
// we need just the name, but to avoid duplicates here we will just skip anything thats not asciidoc.
if (!std.mem.endsWith(u8, ext_entry.name, ".asciidoc"))
continue;
// Unfortunately, some extension filenames are incorrect, so we need to look for the string in the 'Name Strings' section.
// This has the following format:
// ```
// Name Strings
// ------------
//
// SPV_EXT_name
// ```
// OR
// ```
// == Name Strings
//
// SPV_EXT_name
// ```
const ext_spec = try vendor_dir.readFileAlloc(allocator, ext_entry.name, std.math.maxInt(usize));
const name_strings = "Name Strings";
const name_strings_offset = std.mem.indexOf(u8, ext_spec, name_strings) orelse return error.InvalidRegistry;
// As the specs are inconsistent on this next part, just skip any newlines/minuses
var ext_start = name_strings_offset + name_strings.len + 1;
while (std.ascii.isWhitespace(ext_spec[ext_start]) or ext_spec[ext_start] == '-') {
ext_start += 1;
}
const ext_end = std.mem.indexOfScalarPos(u8, ext_spec, ext_start, '\n') orelse return error.InvalidRegistry;
const ext = std.mem.trim(u8, ext_spec[ext_start..ext_end], &std.ascii.whitespace);
// Ignore invalid/incomplete extensions
if (std.mem.eql(u8, ext, "{extension_name}")) continue;
std.debug.assert(std.mem.startsWith(u8, ext, "SPV_")); // Sanity check, all extensions should have a name like SPV_VENDOR_extension.
try extensions.append(try allocator.dupe(u8, ext));
}
}
return extensions.items;
}
fn insertVersion(versions: *std.ArrayList(Version), version: ?[]const u8) !void {
const ver_str = version orelse return;
if (std.mem.eql(u8, ver_str, "None")) return;
const ver = try Version.parse(ver_str);
for (versions.items) |existing_ver| {
if (ver.eql(existing_ver)) return;
}
try versions.append(ver);
}
fn gatherVersions(allocator: Allocator, registry: g.CoreRegistry) ![]const Version {
// Expected number of versions is small
var versions = std.ArrayList(Version).init(allocator);
for (registry.instructions) |inst| {
try insertVersion(&versions, inst.version);
}
for (registry.operand_kinds) |opkind| {
const enumerants = opkind.enumerants orelse continue;
for (enumerants) |enumerant| {
try insertVersion(&versions, enumerant.version);
}
}
std.mem.sort(Version, versions.items, {}, Version.lessThan);
return versions.items;
}
fn usageAndExit(file: fs.File, arg0: []const u8, code: u8) noreturn {
file.writer().print(
\\Usage: {s} /path/git/SPIRV-Headers /path/git/SPIRV-Registry
\\
\\Prints to stdout Zig code which can be used to replace the file lib/std/target/spirv.zig.
\\
\\SPIRV-Headers can be cloned from https://github.com/KhronosGroup/SPIRV-Headers,
\\SPIRV-Registry can be cloned from https://github.com/KhronosGroup/SPIRV-Registry.
\\
, .{arg0}) catch std.process.exit(1);
std.process.exit(code);
}