mirror of
https://codeberg.org/ziglang/zig.git
synced 2025-12-06 05:44:20 +00:00
* Introduce `-Ddebug-extensions` for enabling compiler debug helpers * Replace safety mode checks with `std.debug.runtime_safety` * Replace debugger helper checks with `!builtin.strip_debug_info` Sometimes, you just have to debug optimized compilers...
38911 lines
1.6 MiB
38911 lines
1.6 MiB
//! Semantic analysis of ZIR instructions.
|
|
//! Shared to every Block. Stored on the stack.
|
|
//! State used for compiling a ZIR into AIR.
|
|
//! Transforms untyped ZIR instructions into semantically-analyzed AIR instructions.
|
|
//! Does type checking, comptime control flow, and safety-check generation.
|
|
//! This is the the heart of the Zig compiler.
|
|
|
|
mod: *Module,
|
|
/// Alias to `mod.gpa`.
|
|
gpa: Allocator,
|
|
/// Points to the temporary arena allocator of the Sema.
|
|
/// This arena will be cleared when the sema is destroyed.
|
|
arena: Allocator,
|
|
code: Zir,
|
|
air_instructions: std.MultiArrayList(Air.Inst) = .{},
|
|
air_extra: std.ArrayListUnmanaged(u32) = .{},
|
|
/// Maps ZIR to AIR.
|
|
inst_map: InstMap = .{},
|
|
/// When analyzing an inline function call, owner_decl is the Decl of the caller
|
|
/// and `src_decl` of `Block` is the `Decl` of the callee.
|
|
/// This `Decl` owns the arena memory of this `Sema`.
|
|
owner_decl: *Decl,
|
|
owner_decl_index: InternPool.DeclIndex,
|
|
/// For an inline or comptime function call, this will be the root parent function
|
|
/// which contains the callsite. Corresponds to `owner_decl`.
|
|
/// This could be `none`, a `func_decl`, or a `func_instance`.
|
|
owner_func_index: InternPool.Index,
|
|
/// The function this ZIR code is the body of, according to the source code.
|
|
/// This starts out the same as `owner_func_index` and then diverges in the case of
|
|
/// an inline or comptime function call.
|
|
/// This could be `none`, a `func_decl`, or a `func_instance`.
|
|
func_index: InternPool.Index,
|
|
/// Whether the type of func_index has a calling convention of `.Naked`.
|
|
func_is_naked: bool,
|
|
/// Used to restore the error return trace when returning a non-error from a function.
|
|
error_return_trace_index_on_fn_entry: Air.Inst.Ref = .none,
|
|
comptime_err_ret_trace: *std.ArrayList(Module.SrcLoc),
|
|
/// When semantic analysis needs to know the return type of the function whose body
|
|
/// is being analyzed, this `Type` should be used instead of going through `func`.
|
|
/// This will correctly handle the case of a comptime/inline function call of a
|
|
/// generic function which uses a type expression for the return type.
|
|
/// The type will be `void` in the case that `func` is `null`.
|
|
fn_ret_ty: Type,
|
|
/// In case of the return type being an error union with an inferred error
|
|
/// set, this is the inferred error set. `null` otherwise. Allocated with
|
|
/// `Sema.arena`.
|
|
fn_ret_ty_ies: ?*InferredErrorSet,
|
|
branch_quota: u32 = default_branch_quota,
|
|
branch_count: u32 = 0,
|
|
/// Populated when returning `error.ComptimeBreak`. Used to communicate the
|
|
/// break instruction up the stack to find the corresponding Block.
|
|
comptime_break_inst: Zir.Inst.Index = undefined,
|
|
decl_val_table: std.AutoHashMapUnmanaged(InternPool.DeclIndex, Air.Inst.Ref) = .{},
|
|
/// When doing a generic function instantiation, this array collects a value
|
|
/// for each parameter of the generic owner. `none` for non-comptime parameters.
|
|
/// This is a separate array from `block.params` so that it can be passed
|
|
/// directly to `comptime_args` when calling `InternPool.getFuncInstance`.
|
|
/// This memory is allocated by a parent `Sema` in the temporary arena, and is
|
|
/// used only to add a `func_instance` into the `InternPool`.
|
|
comptime_args: []InternPool.Index = &.{},
|
|
/// Used to communicate from a generic function instantiation to the logic that
|
|
/// creates a generic function instantiation value in `funcCommon`.
|
|
generic_owner: InternPool.Index = .none,
|
|
/// When `generic_owner` is not none, this contains the generic function
|
|
/// instantiation callsite so that compile errors on the parameter types of the
|
|
/// instantiation can point back to the instantiation site in addition to the
|
|
/// declaration site.
|
|
generic_call_src: LazySrcLoc = .unneeded,
|
|
/// Corresponds to `generic_call_src`.
|
|
generic_call_decl: InternPool.OptionalDeclIndex = .none,
|
|
/// The key is types that must be fully resolved prior to machine code
|
|
/// generation pass. Types are added to this set when resolving them
|
|
/// immediately could cause a dependency loop, but they do need to be resolved
|
|
/// before machine code generation passes process the AIR.
|
|
/// It would work fine if this were an array list instead of an array hash map.
|
|
/// I chose array hash map with the intention to save time by omitting
|
|
/// duplicates.
|
|
types_to_resolve: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .{},
|
|
/// These are lazily created runtime blocks from block_inline instructions.
|
|
/// They are created when an break_inline passes through a runtime condition, because
|
|
/// Sema must convert comptime control flow to runtime control flow, which means
|
|
/// breaking from a block.
|
|
post_hoc_blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, *LabeledBlock) = .{},
|
|
/// Populated with the last compile error created.
|
|
err: ?*Module.ErrorMsg = null,
|
|
/// Set to true when analyzing a func type instruction so that nested generic
|
|
/// function types will emit generic poison instead of a partial type.
|
|
no_partial_func_ty: bool = false,
|
|
|
|
/// The temporary arena is used for the memory of the `InferredAlloc` values
|
|
/// here so the values can be dropped without any cleanup.
|
|
unresolved_inferred_allocs: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, InferredAlloc) = .{},
|
|
|
|
/// Indices of comptime-mutable decls created by this Sema. These decls' values
|
|
/// should be interned after analysis completes, as they may refer to memory in
|
|
/// the Sema arena.
|
|
/// TODO: this is a workaround for memory bugs triggered by the removal of
|
|
/// Decl.value_arena. A better solution needs to be found. Probably this will
|
|
/// involve transitioning comptime-mutable memory away from using Decls at all.
|
|
comptime_mutable_decls: *std.ArrayList(InternPool.DeclIndex),
|
|
|
|
/// This is populated when `@setAlignStack` occurs so that if there is a duplicate
|
|
/// one encountered, the conflicting source location can be shown.
|
|
prev_stack_alignment_src: ?LazySrcLoc = null,
|
|
|
|
/// While analyzing a type which has a special InternPool index, this is set to the index at which
|
|
/// the struct/enum/union type created should be placed. Otherwise, it is `.none`.
|
|
builtin_type_target_index: InternPool.Index = .none,
|
|
|
|
/// Links every pointer derived from a base `alloc` back to that `alloc`. Used
|
|
/// to detect comptime-known `const`s.
|
|
/// TODO: ZIR liveness analysis would allow us to remove elements from this map.
|
|
base_allocs: std.AutoHashMapUnmanaged(Air.Inst.Index, Air.Inst.Index) = .{},
|
|
|
|
/// Runtime `alloc`s are placed in this map to track all comptime-known writes
|
|
/// before the corresponding `make_ptr_const` instruction.
|
|
/// If any store to the alloc depends on a runtime condition or stores a runtime
|
|
/// value, the corresponding element in this map is erased, to indicate that the
|
|
/// alloc is not comptime-known.
|
|
/// If the alloc remains in this map when `make_ptr_const` is reached, its value
|
|
/// is comptime-known, and all stores to the pointer must be applied at comptime
|
|
/// to determine the comptime value.
|
|
/// Backed by gpa.
|
|
maybe_comptime_allocs: std.AutoHashMapUnmanaged(Air.Inst.Index, MaybeComptimeAlloc) = .{},
|
|
|
|
const MaybeComptimeAlloc = struct {
|
|
/// The runtime index of the `alloc` instruction.
|
|
runtime_index: Value.RuntimeIndex,
|
|
/// Backed by sema.arena. Tracks all comptime-known stores to this `alloc`. Due to
|
|
/// RLS, a single comptime-known allocation may have arbitrarily many stores.
|
|
/// This may also contain `set_union_tag` instructions.
|
|
stores: std.ArrayListUnmanaged(Air.Inst.Index) = .{},
|
|
/// Backed by sema.arena. Contains instructions such as `optional_payload_ptr_set`
|
|
/// which have side effects so will not be elided by Liveness: we must rewrite these
|
|
/// instructions to be nops instead of relying on Liveness.
|
|
non_elideable_pointers: std.ArrayListUnmanaged(Air.Inst.Index) = .{},
|
|
};
|
|
|
|
const std = @import("std");
|
|
const math = std.math;
|
|
const mem = std.mem;
|
|
const Allocator = mem.Allocator;
|
|
const assert = std.debug.assert;
|
|
const log = std.log.scoped(.sema);
|
|
|
|
const Sema = @This();
|
|
const Value = @import("Value.zig");
|
|
const Type = @import("type.zig").Type;
|
|
const TypedValue = @import("TypedValue.zig");
|
|
const Air = @import("Air.zig");
|
|
const Zir = std.zig.Zir;
|
|
const Module = @import("Module.zig");
|
|
const trace = @import("tracy.zig").trace;
|
|
const Namespace = Module.Namespace;
|
|
const CompileError = Module.CompileError;
|
|
const SemaError = Module.SemaError;
|
|
const Decl = Module.Decl;
|
|
const CaptureScope = Module.CaptureScope;
|
|
const LazySrcLoc = std.zig.LazySrcLoc;
|
|
const RangeSet = @import("RangeSet.zig");
|
|
const target_util = @import("target.zig");
|
|
const Package = @import("Package.zig");
|
|
const crash_report = @import("crash_report.zig");
|
|
const build_options = @import("build_options");
|
|
const Compilation = @import("Compilation.zig");
|
|
const InternPool = @import("InternPool.zig");
|
|
const Alignment = InternPool.Alignment;
|
|
|
|
pub const default_branch_quota = 1000;
|
|
pub const default_reference_trace_len = 2;
|
|
|
|
pub const InferredErrorSet = struct {
|
|
/// The function body from which this error set originates.
|
|
/// This is `none` in the case of a comptime/inline function call, corresponding to
|
|
/// `InternPool.Index.adhoc_inferred_error_set_type`.
|
|
/// The function's resolved error set is not set until analysis of the
|
|
/// function body completes.
|
|
func: InternPool.Index,
|
|
/// All currently known errors that this error set contains. This includes
|
|
/// direct additions via `return error.Foo;`, and possibly also errors that
|
|
/// are returned from any dependent functions.
|
|
errors: NameMap = .{},
|
|
/// Other inferred error sets which this inferred error set should include.
|
|
inferred_error_sets: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .{},
|
|
/// The regular error set created by resolving this inferred error set.
|
|
resolved: InternPool.Index = .none,
|
|
|
|
pub const NameMap = std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void);
|
|
|
|
pub fn addErrorSet(
|
|
self: *InferredErrorSet,
|
|
err_set_ty: Type,
|
|
ip: *InternPool,
|
|
arena: Allocator,
|
|
) !void {
|
|
switch (err_set_ty.toIntern()) {
|
|
.anyerror_type => self.resolved = .anyerror_type,
|
|
.adhoc_inferred_error_set_type => {}, // Adding an inferred error set to itself.
|
|
|
|
else => switch (ip.indexToKey(err_set_ty.toIntern())) {
|
|
.error_set_type => |error_set_type| {
|
|
for (error_set_type.names.get(ip)) |name| {
|
|
try self.errors.put(arena, name, {});
|
|
}
|
|
},
|
|
.inferred_error_set_type => {
|
|
try self.inferred_error_sets.put(arena, err_set_ty.toIntern(), {});
|
|
},
|
|
else => unreachable,
|
|
},
|
|
}
|
|
}
|
|
};
|
|
|
|
/// Stores the mapping from `Zir.Inst.Index -> Air.Inst.Ref`, which is used by sema to resolve
|
|
/// instructions during analysis.
|
|
/// Instead of a hash table approach, InstMap is simply a slice that is indexed into using the
|
|
/// zir instruction index and a start offset. An index is not pressent in the map if the value
|
|
/// at the index is `Air.Inst.Ref.none`.
|
|
/// `ensureSpaceForInstructions` can be called to force InstMap to have a mapped range that
|
|
/// includes all instructions in a slice. After calling this function, `putAssumeCapacity*` can
|
|
/// be called safely for any of the instructions passed in.
|
|
pub const InstMap = struct {
|
|
items: []Air.Inst.Ref = &[_]Air.Inst.Ref{},
|
|
start: Zir.Inst.Index = @enumFromInt(0),
|
|
|
|
pub fn deinit(map: InstMap, allocator: mem.Allocator) void {
|
|
allocator.free(map.items);
|
|
}
|
|
|
|
pub fn get(map: InstMap, key: Zir.Inst.Index) ?Air.Inst.Ref {
|
|
if (!map.contains(key)) return null;
|
|
return map.items[@intFromEnum(key) - @intFromEnum(map.start)];
|
|
}
|
|
|
|
pub fn putAssumeCapacity(
|
|
map: *InstMap,
|
|
key: Zir.Inst.Index,
|
|
ref: Air.Inst.Ref,
|
|
) void {
|
|
map.items[@intFromEnum(key) - @intFromEnum(map.start)] = ref;
|
|
}
|
|
|
|
pub fn putAssumeCapacityNoClobber(
|
|
map: *InstMap,
|
|
key: Zir.Inst.Index,
|
|
ref: Air.Inst.Ref,
|
|
) void {
|
|
assert(!map.contains(key));
|
|
map.putAssumeCapacity(key, ref);
|
|
}
|
|
|
|
pub const GetOrPutResult = struct {
|
|
value_ptr: *Air.Inst.Ref,
|
|
found_existing: bool,
|
|
};
|
|
|
|
pub fn getOrPutAssumeCapacity(
|
|
map: *InstMap,
|
|
key: Zir.Inst.Index,
|
|
) GetOrPutResult {
|
|
const index = @intFromEnum(key) - @intFromEnum(map.start);
|
|
return GetOrPutResult{
|
|
.value_ptr = &map.items[index],
|
|
.found_existing = map.items[index] != .none,
|
|
};
|
|
}
|
|
|
|
pub fn remove(map: InstMap, key: Zir.Inst.Index) bool {
|
|
if (!map.contains(key)) return false;
|
|
map.items[@intFromEnum(key) - @intFromEnum(map.start)] = .none;
|
|
return true;
|
|
}
|
|
|
|
pub fn contains(map: InstMap, key: Zir.Inst.Index) bool {
|
|
return map.items[@intFromEnum(key) - @intFromEnum(map.start)] != .none;
|
|
}
|
|
|
|
pub fn ensureSpaceForInstructions(
|
|
map: *InstMap,
|
|
allocator: mem.Allocator,
|
|
insts: []const Zir.Inst.Index,
|
|
) !void {
|
|
const start, const end = mem.minMax(u32, @ptrCast(insts));
|
|
const map_start = @intFromEnum(map.start);
|
|
if (map_start <= start and end < map.items.len + map_start)
|
|
return;
|
|
|
|
const old_start = if (map.items.len == 0) start else map_start;
|
|
var better_capacity = map.items.len;
|
|
var better_start = old_start;
|
|
while (true) {
|
|
const extra_capacity = better_capacity / 2 + 16;
|
|
better_capacity += extra_capacity;
|
|
better_start -|= @intCast(extra_capacity / 2);
|
|
if (better_start <= start and end < better_capacity + better_start)
|
|
break;
|
|
}
|
|
|
|
const start_diff = old_start - better_start;
|
|
const new_items = try allocator.alloc(Air.Inst.Ref, better_capacity);
|
|
@memset(new_items[0..start_diff], .none);
|
|
@memcpy(new_items[start_diff..][0..map.items.len], map.items);
|
|
@memset(new_items[start_diff + map.items.len ..], .none);
|
|
|
|
allocator.free(map.items);
|
|
map.items = new_items;
|
|
map.start = @enumFromInt(better_start);
|
|
}
|
|
};
|
|
|
|
/// This is the context needed to semantically analyze ZIR instructions and
|
|
/// produce AIR instructions.
|
|
/// This is a temporary structure stored on the stack; references to it are valid only
|
|
/// during semantic analysis of the block.
|
|
pub const Block = struct {
|
|
parent: ?*Block,
|
|
/// Shared among all child blocks.
|
|
sema: *Sema,
|
|
/// The namespace to use for lookups from this source block
|
|
/// When analyzing fields, this is different from src_decl.src_namespace.
|
|
namespace: InternPool.NamespaceIndex,
|
|
/// The AIR instructions generated for this block.
|
|
instructions: std.ArrayListUnmanaged(Air.Inst.Index),
|
|
// `param` instructions are collected here to be used by the `func` instruction.
|
|
/// When doing a generic function instantiation, this array collects a type
|
|
/// for each *runtime-known* parameter. This array corresponds to the instance
|
|
/// function type, while `Sema.comptime_args` corresponds to the generic owner
|
|
/// function type.
|
|
/// This memory is allocated by a parent `Sema` in the temporary arena, and is
|
|
/// used to add a `func_instance` into the `InternPool`.
|
|
params: std.MultiArrayList(Param) = .{},
|
|
|
|
wip_capture_scope: CaptureScope.Index,
|
|
|
|
label: ?*Label = null,
|
|
inlining: ?*Inlining,
|
|
/// If runtime_index is not 0 then one of these is guaranteed to be non null.
|
|
runtime_cond: ?Module.SrcLoc = null,
|
|
runtime_loop: ?Module.SrcLoc = null,
|
|
/// This Decl is the Decl according to the Zig source code corresponding to this Block.
|
|
/// This can vary during inline or comptime function calls. See `Sema.owner_decl`
|
|
/// for the one that will be the same for all Block instances.
|
|
src_decl: InternPool.DeclIndex,
|
|
/// Non zero if a non-inline loop or a runtime conditional have been encountered.
|
|
/// Stores to comptime variables are only allowed when var.runtime_index <= runtime_index.
|
|
runtime_index: Value.RuntimeIndex = .zero,
|
|
inline_block: Zir.Inst.OptionalIndex = .none,
|
|
|
|
comptime_reason: ?*const ComptimeReason = null,
|
|
// TODO is_comptime and comptime_reason should probably be merged together.
|
|
is_comptime: bool,
|
|
is_typeof: bool = false,
|
|
|
|
/// Keep track of the active error return trace index around blocks so that we can correctly
|
|
/// pop the error trace upon block exit.
|
|
error_return_trace_index: Air.Inst.Ref = .none,
|
|
|
|
/// when null, it is determined by build mode, changed by @setRuntimeSafety
|
|
want_safety: ?bool = null,
|
|
|
|
/// What mode to generate float operations in, set by @setFloatMode
|
|
float_mode: std.builtin.FloatMode = .Strict,
|
|
|
|
c_import_buf: ?*std.ArrayList(u8) = null,
|
|
|
|
/// If not `null`, this boolean is set when a `dbg_var_ptr` or `dbg_var_val`
|
|
/// instruction is emitted. It signals that the innermost lexically
|
|
/// enclosing `block`/`block_inline` should be translated into a real AIR
|
|
/// `block` in order for codegen to match lexical scoping for debug vars.
|
|
need_debug_scope: ?*bool = null,
|
|
|
|
const ComptimeReason = union(enum) {
|
|
c_import: struct {
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
},
|
|
comptime_ret_ty: struct {
|
|
block: *Block,
|
|
func: Air.Inst.Ref,
|
|
func_src: LazySrcLoc,
|
|
return_ty: Type,
|
|
},
|
|
|
|
fn explain(cr: ComptimeReason, sema: *Sema, msg: ?*Module.ErrorMsg) !void {
|
|
const parent = msg orelse return;
|
|
const mod = sema.mod;
|
|
const prefix = "expression is evaluated at comptime because ";
|
|
switch (cr) {
|
|
.c_import => |ci| {
|
|
try sema.errNote(ci.block, ci.src, parent, prefix ++ "it is inside a @cImport", .{});
|
|
},
|
|
.comptime_ret_ty => |rt| {
|
|
const src_loc = if (try sema.funcDeclSrc(rt.func)) |fn_decl| blk: {
|
|
var src_loc = fn_decl.srcLoc(mod);
|
|
src_loc.lazy = .{ .node_offset_fn_type_ret_ty = 0 };
|
|
break :blk src_loc;
|
|
} else blk: {
|
|
const src_decl = mod.declPtr(rt.block.src_decl);
|
|
break :blk src_decl.toSrcLoc(rt.func_src, mod);
|
|
};
|
|
if (rt.return_ty.isGenericPoison()) {
|
|
return mod.errNoteNonLazy(src_loc, parent, prefix ++ "the generic function was instantiated with a comptime-only return type", .{});
|
|
}
|
|
try mod.errNoteNonLazy(
|
|
src_loc,
|
|
parent,
|
|
prefix ++ "the function returns a comptime-only type '{}'",
|
|
.{rt.return_ty.fmt(mod)},
|
|
);
|
|
try sema.explainWhyTypeIsComptime(parent, src_loc, rt.return_ty);
|
|
},
|
|
}
|
|
}
|
|
};
|
|
|
|
const Param = struct {
|
|
/// `none` means `anytype`.
|
|
ty: InternPool.Index,
|
|
is_comptime: bool,
|
|
name: Zir.NullTerminatedString,
|
|
};
|
|
|
|
/// This `Block` maps a block ZIR instruction to the corresponding
|
|
/// AIR instruction for break instruction analysis.
|
|
pub const Label = struct {
|
|
zir_block: Zir.Inst.Index,
|
|
merges: Merges,
|
|
};
|
|
|
|
/// This `Block` indicates that an inline function call is happening
|
|
/// and return instructions should be analyzed as a break instruction
|
|
/// to this AIR block instruction.
|
|
/// It is shared among all the blocks in an inline or comptime called
|
|
/// function.
|
|
pub const Inlining = struct {
|
|
call_block: *Block,
|
|
call_src: LazySrcLoc,
|
|
has_comptime_args: bool,
|
|
func: InternPool.Index,
|
|
comptime_result: Air.Inst.Ref,
|
|
merges: Merges,
|
|
};
|
|
|
|
pub const Merges = struct {
|
|
block_inst: Air.Inst.Index,
|
|
/// Separate array list from break_inst_list so that it can be passed directly
|
|
/// to resolvePeerTypes.
|
|
results: std.ArrayListUnmanaged(Air.Inst.Ref),
|
|
/// Keeps track of the break instructions so that the operand can be replaced
|
|
/// if we need to add type coercion at the end of block analysis.
|
|
/// Same indexes, capacity, length as `results`.
|
|
br_list: std.ArrayListUnmanaged(Air.Inst.Index),
|
|
/// Keeps the source location of the rhs operand of the break instruction,
|
|
/// to enable more precise compile errors.
|
|
/// Same indexes, capacity, length as `results`.
|
|
src_locs: std.ArrayListUnmanaged(?LazySrcLoc),
|
|
|
|
pub fn deinit(merges: *@This(), allocator: mem.Allocator) void {
|
|
merges.results.deinit(allocator);
|
|
merges.br_list.deinit(allocator);
|
|
merges.src_locs.deinit(allocator);
|
|
}
|
|
};
|
|
|
|
/// For debugging purposes.
|
|
pub fn dump(block: *Block, mod: Module) void {
|
|
Zir.dumpBlock(mod, block);
|
|
}
|
|
|
|
pub fn makeSubBlock(parent: *Block) Block {
|
|
return .{
|
|
.parent = parent,
|
|
.sema = parent.sema,
|
|
.src_decl = parent.src_decl,
|
|
.namespace = parent.namespace,
|
|
.instructions = .{},
|
|
.wip_capture_scope = parent.wip_capture_scope,
|
|
.label = null,
|
|
.inlining = parent.inlining,
|
|
.is_comptime = parent.is_comptime,
|
|
.comptime_reason = parent.comptime_reason,
|
|
.is_typeof = parent.is_typeof,
|
|
.runtime_cond = parent.runtime_cond,
|
|
.runtime_loop = parent.runtime_loop,
|
|
.runtime_index = parent.runtime_index,
|
|
.want_safety = parent.want_safety,
|
|
.float_mode = parent.float_mode,
|
|
.c_import_buf = parent.c_import_buf,
|
|
.error_return_trace_index = parent.error_return_trace_index,
|
|
.need_debug_scope = parent.need_debug_scope,
|
|
};
|
|
}
|
|
|
|
pub fn wantSafety(block: *const Block) bool {
|
|
return block.want_safety orelse switch (block.sema.mod.optimizeMode()) {
|
|
.Debug => true,
|
|
.ReleaseSafe => true,
|
|
.ReleaseFast => false,
|
|
.ReleaseSmall => false,
|
|
};
|
|
}
|
|
|
|
pub fn getFileScope(block: *Block, mod: *Module) *Module.File {
|
|
return mod.namespacePtr(block.namespace).file_scope;
|
|
}
|
|
|
|
fn addTy(
|
|
block: *Block,
|
|
tag: Air.Inst.Tag,
|
|
ty: Type,
|
|
) error{OutOfMemory}!Air.Inst.Ref {
|
|
return block.addInst(.{
|
|
.tag = tag,
|
|
.data = .{ .ty = ty },
|
|
});
|
|
}
|
|
|
|
fn addTyOp(
|
|
block: *Block,
|
|
tag: Air.Inst.Tag,
|
|
ty: Type,
|
|
operand: Air.Inst.Ref,
|
|
) error{OutOfMemory}!Air.Inst.Ref {
|
|
return block.addInst(.{
|
|
.tag = tag,
|
|
.data = .{ .ty_op = .{
|
|
.ty = Air.internedToRef(ty.toIntern()),
|
|
.operand = operand,
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn addBitCast(block: *Block, ty: Type, operand: Air.Inst.Ref) Allocator.Error!Air.Inst.Ref {
|
|
return block.addInst(.{
|
|
.tag = .bitcast,
|
|
.data = .{ .ty_op = .{
|
|
.ty = Air.internedToRef(ty.toIntern()),
|
|
.operand = operand,
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn addNoOp(block: *Block, tag: Air.Inst.Tag) error{OutOfMemory}!Air.Inst.Ref {
|
|
return block.addInst(.{
|
|
.tag = tag,
|
|
.data = .{ .no_op = {} },
|
|
});
|
|
}
|
|
|
|
fn addUnOp(
|
|
block: *Block,
|
|
tag: Air.Inst.Tag,
|
|
operand: Air.Inst.Ref,
|
|
) error{OutOfMemory}!Air.Inst.Ref {
|
|
return block.addInst(.{
|
|
.tag = tag,
|
|
.data = .{ .un_op = operand },
|
|
});
|
|
}
|
|
|
|
fn addBr(
|
|
block: *Block,
|
|
target_block: Air.Inst.Index,
|
|
operand: Air.Inst.Ref,
|
|
) error{OutOfMemory}!Air.Inst.Ref {
|
|
return block.addInst(.{
|
|
.tag = .br,
|
|
.data = .{ .br = .{
|
|
.block_inst = target_block,
|
|
.operand = operand,
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn addBinOp(
|
|
block: *Block,
|
|
tag: Air.Inst.Tag,
|
|
lhs: Air.Inst.Ref,
|
|
rhs: Air.Inst.Ref,
|
|
) error{OutOfMemory}!Air.Inst.Ref {
|
|
return block.addInst(.{
|
|
.tag = tag,
|
|
.data = .{ .bin_op = .{
|
|
.lhs = lhs,
|
|
.rhs = rhs,
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn addStructFieldPtr(
|
|
block: *Block,
|
|
struct_ptr: Air.Inst.Ref,
|
|
field_index: u32,
|
|
ptr_field_ty: Type,
|
|
) !Air.Inst.Ref {
|
|
const ty = Air.internedToRef(ptr_field_ty.toIntern());
|
|
const tag: Air.Inst.Tag = switch (field_index) {
|
|
0 => .struct_field_ptr_index_0,
|
|
1 => .struct_field_ptr_index_1,
|
|
2 => .struct_field_ptr_index_2,
|
|
3 => .struct_field_ptr_index_3,
|
|
else => {
|
|
return block.addInst(.{
|
|
.tag = .struct_field_ptr,
|
|
.data = .{ .ty_pl = .{
|
|
.ty = ty,
|
|
.payload = try block.sema.addExtra(Air.StructField{
|
|
.struct_operand = struct_ptr,
|
|
.field_index = field_index,
|
|
}),
|
|
} },
|
|
});
|
|
},
|
|
};
|
|
return block.addInst(.{
|
|
.tag = tag,
|
|
.data = .{ .ty_op = .{
|
|
.ty = ty,
|
|
.operand = struct_ptr,
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn addStructFieldVal(
|
|
block: *Block,
|
|
struct_val: Air.Inst.Ref,
|
|
field_index: u32,
|
|
field_ty: Type,
|
|
) !Air.Inst.Ref {
|
|
return block.addInst(.{
|
|
.tag = .struct_field_val,
|
|
.data = .{ .ty_pl = .{
|
|
.ty = Air.internedToRef(field_ty.toIntern()),
|
|
.payload = try block.sema.addExtra(Air.StructField{
|
|
.struct_operand = struct_val,
|
|
.field_index = field_index,
|
|
}),
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn addSliceElemPtr(
|
|
block: *Block,
|
|
slice: Air.Inst.Ref,
|
|
elem_index: Air.Inst.Ref,
|
|
elem_ptr_ty: Type,
|
|
) !Air.Inst.Ref {
|
|
return block.addInst(.{
|
|
.tag = .slice_elem_ptr,
|
|
.data = .{ .ty_pl = .{
|
|
.ty = Air.internedToRef(elem_ptr_ty.toIntern()),
|
|
.payload = try block.sema.addExtra(Air.Bin{
|
|
.lhs = slice,
|
|
.rhs = elem_index,
|
|
}),
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn addPtrElemPtr(
|
|
block: *Block,
|
|
array_ptr: Air.Inst.Ref,
|
|
elem_index: Air.Inst.Ref,
|
|
elem_ptr_ty: Type,
|
|
) !Air.Inst.Ref {
|
|
const ty_ref = Air.internedToRef(elem_ptr_ty.toIntern());
|
|
return block.addPtrElemPtrTypeRef(array_ptr, elem_index, ty_ref);
|
|
}
|
|
|
|
fn addPtrElemPtrTypeRef(
|
|
block: *Block,
|
|
array_ptr: Air.Inst.Ref,
|
|
elem_index: Air.Inst.Ref,
|
|
elem_ptr_ty: Air.Inst.Ref,
|
|
) !Air.Inst.Ref {
|
|
return block.addInst(.{
|
|
.tag = .ptr_elem_ptr,
|
|
.data = .{ .ty_pl = .{
|
|
.ty = elem_ptr_ty,
|
|
.payload = try block.sema.addExtra(Air.Bin{
|
|
.lhs = array_ptr,
|
|
.rhs = elem_index,
|
|
}),
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn addCmpVector(block: *Block, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref, cmp_op: std.math.CompareOperator) !Air.Inst.Ref {
|
|
const sema = block.sema;
|
|
const mod = sema.mod;
|
|
return block.addInst(.{
|
|
.tag = if (block.float_mode == .Optimized) .cmp_vector_optimized else .cmp_vector,
|
|
.data = .{ .ty_pl = .{
|
|
.ty = Air.internedToRef((try mod.vectorType(.{
|
|
.len = sema.typeOf(lhs).vectorLen(mod),
|
|
.child = .bool_type,
|
|
})).toIntern()),
|
|
.payload = try sema.addExtra(Air.VectorCmp{
|
|
.lhs = lhs,
|
|
.rhs = rhs,
|
|
.op = Air.VectorCmp.encodeOp(cmp_op),
|
|
}),
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn addAggregateInit(
|
|
block: *Block,
|
|
aggregate_ty: Type,
|
|
elements: []const Air.Inst.Ref,
|
|
) !Air.Inst.Ref {
|
|
const sema = block.sema;
|
|
const ty_ref = Air.internedToRef(aggregate_ty.toIntern());
|
|
try sema.air_extra.ensureUnusedCapacity(sema.gpa, elements.len);
|
|
const extra_index: u32 = @intCast(sema.air_extra.items.len);
|
|
sema.appendRefsAssumeCapacity(elements);
|
|
|
|
return block.addInst(.{
|
|
.tag = .aggregate_init,
|
|
.data = .{ .ty_pl = .{
|
|
.ty = ty_ref,
|
|
.payload = extra_index,
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn addUnionInit(
|
|
block: *Block,
|
|
union_ty: Type,
|
|
field_index: u32,
|
|
init: Air.Inst.Ref,
|
|
) !Air.Inst.Ref {
|
|
return block.addInst(.{
|
|
.tag = .union_init,
|
|
.data = .{ .ty_pl = .{
|
|
.ty = Air.internedToRef(union_ty.toIntern()),
|
|
.payload = try block.sema.addExtra(Air.UnionInit{
|
|
.field_index = field_index,
|
|
.init = init,
|
|
}),
|
|
} },
|
|
});
|
|
}
|
|
|
|
pub fn addInst(block: *Block, inst: Air.Inst) error{OutOfMemory}!Air.Inst.Ref {
|
|
return (try block.addInstAsIndex(inst)).toRef();
|
|
}
|
|
|
|
pub fn addInstAsIndex(block: *Block, inst: Air.Inst) error{OutOfMemory}!Air.Inst.Index {
|
|
const sema = block.sema;
|
|
const gpa = sema.gpa;
|
|
|
|
try sema.air_instructions.ensureUnusedCapacity(gpa, 1);
|
|
try block.instructions.ensureUnusedCapacity(gpa, 1);
|
|
|
|
const result_index: Air.Inst.Index = @enumFromInt(sema.air_instructions.len);
|
|
sema.air_instructions.appendAssumeCapacity(inst);
|
|
block.instructions.appendAssumeCapacity(result_index);
|
|
return result_index;
|
|
}
|
|
|
|
/// Insert an instruction into the block at `index`. Moves all following
|
|
/// instructions forward in the block to make room. Operation is O(N).
|
|
pub fn insertInst(block: *Block, index: Air.Inst.Index, inst: Air.Inst) error{OutOfMemory}!Air.Inst.Ref {
|
|
return (try block.insertInstAsIndex(index, inst)).toRef();
|
|
}
|
|
|
|
pub fn insertInstAsIndex(block: *Block, index: Air.Inst.Index, inst: Air.Inst) error{OutOfMemory}!Air.Inst.Index {
|
|
const sema = block.sema;
|
|
const gpa = sema.gpa;
|
|
|
|
try sema.air_instructions.ensureUnusedCapacity(gpa, 1);
|
|
|
|
const result_index: Air.Inst.Index = @enumFromInt(sema.air_instructions.len);
|
|
sema.air_instructions.appendAssumeCapacity(inst);
|
|
|
|
try block.instructions.insert(gpa, @intFromEnum(index), result_index);
|
|
return result_index;
|
|
}
|
|
|
|
fn addUnreachable(block: *Block, src: LazySrcLoc, safety_check: bool) !void {
|
|
if (safety_check and block.wantSafety()) {
|
|
try block.sema.safetyPanic(block, src, .unreach);
|
|
} else {
|
|
_ = try block.addNoOp(.unreach);
|
|
}
|
|
}
|
|
|
|
pub fn ownerModule(block: Block) *Package.Module {
|
|
const zcu = block.sema.mod;
|
|
return zcu.namespacePtr(block.namespace).file_scope.mod;
|
|
}
|
|
|
|
pub fn startAnonDecl(block: *Block) !WipAnonDecl {
|
|
return WipAnonDecl{
|
|
.block = block,
|
|
.finished = false,
|
|
};
|
|
}
|
|
|
|
pub const WipAnonDecl = struct {
|
|
block: *Block,
|
|
finished: bool,
|
|
|
|
pub fn deinit(wad: *WipAnonDecl) void {
|
|
wad.* = undefined;
|
|
}
|
|
|
|
/// `alignment` value of 0 means to use ABI alignment.
|
|
pub fn finish(wad: *WipAnonDecl, ty: Type, val: Value, alignment: Alignment) !InternPool.DeclIndex {
|
|
const sema = wad.block.sema;
|
|
// Do this ahead of time because `createAnonymousDecl` depends on calling
|
|
// `type.hasRuntimeBits()`.
|
|
_ = try sema.typeHasRuntimeBits(ty);
|
|
const new_decl_index = try sema.mod.createAnonymousDecl(wad.block, .{
|
|
.ty = ty,
|
|
.val = val,
|
|
});
|
|
const new_decl = sema.mod.declPtr(new_decl_index);
|
|
new_decl.alignment = alignment;
|
|
errdefer sema.mod.abortAnonDecl(new_decl_index);
|
|
wad.finished = true;
|
|
try sema.mod.finalizeAnonDecl(new_decl_index);
|
|
return new_decl_index;
|
|
}
|
|
};
|
|
};
|
|
|
|
const LabeledBlock = struct {
|
|
block: Block,
|
|
label: Block.Label,
|
|
|
|
fn destroy(lb: *LabeledBlock, gpa: Allocator) void {
|
|
lb.block.instructions.deinit(gpa);
|
|
lb.label.merges.deinit(gpa);
|
|
gpa.destroy(lb);
|
|
}
|
|
};
|
|
|
|
/// The value stored in the inferred allocation. This will go into
|
|
/// peer type resolution. This is stored in a separate list so that
|
|
/// the items are contiguous in memory and thus can be passed to
|
|
/// `Module.resolvePeerTypes`.
|
|
const InferredAlloc = struct {
|
|
/// The placeholder `store` instructions used before the result pointer type
|
|
/// is known. These should be rewritten to perform any required coercions
|
|
/// when the type is resolved.
|
|
/// Allocated from `sema.arena`.
|
|
prongs: std.ArrayListUnmanaged(Air.Inst.Index) = .{},
|
|
};
|
|
|
|
const NeededComptimeReason = struct {
|
|
needed_comptime_reason: []const u8,
|
|
block_comptime_reason: ?*const Block.ComptimeReason = null,
|
|
};
|
|
|
|
pub fn deinit(sema: *Sema) void {
|
|
const gpa = sema.gpa;
|
|
sema.air_instructions.deinit(gpa);
|
|
sema.air_extra.deinit(gpa);
|
|
sema.inst_map.deinit(gpa);
|
|
sema.decl_val_table.deinit(gpa);
|
|
sema.types_to_resolve.deinit(gpa);
|
|
{
|
|
var it = sema.post_hoc_blocks.iterator();
|
|
while (it.next()) |entry| {
|
|
const labeled_block = entry.value_ptr.*;
|
|
labeled_block.destroy(gpa);
|
|
}
|
|
sema.post_hoc_blocks.deinit(gpa);
|
|
}
|
|
sema.unresolved_inferred_allocs.deinit(gpa);
|
|
sema.base_allocs.deinit(gpa);
|
|
sema.maybe_comptime_allocs.deinit(gpa);
|
|
sema.* = undefined;
|
|
}
|
|
|
|
/// Returns only the result from the body that is specified.
|
|
/// Only appropriate to call when it is determined at comptime that this body
|
|
/// has no peers.
|
|
fn resolveBody(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
body: []const Zir.Inst.Index,
|
|
/// This is the instruction that a break instruction within `body` can
|
|
/// use to return from the body.
|
|
body_inst: Zir.Inst.Index,
|
|
) CompileError!Air.Inst.Ref {
|
|
const break_data = (try sema.analyzeBodyBreak(block, body)) orelse
|
|
return .unreachable_value;
|
|
// For comptime control flow, we need to detect when `analyzeBody` reports
|
|
// that we need to break from an outer block. In such case we
|
|
// use Zig's error mechanism to send control flow up the stack until
|
|
// we find the corresponding block to this break.
|
|
if (block.is_comptime and break_data.block_inst != body_inst) {
|
|
sema.comptime_break_inst = break_data.inst;
|
|
return error.ComptimeBreak;
|
|
}
|
|
return try sema.resolveInst(break_data.operand);
|
|
}
|
|
|
|
fn analyzeBodyRuntimeBreak(sema: *Sema, block: *Block, body: []const Zir.Inst.Index) !void {
|
|
_ = sema.analyzeBodyInner(block, body) catch |err| switch (err) {
|
|
error.ComptimeBreak => {
|
|
const zir_datas = sema.code.instructions.items(.data);
|
|
const break_data = zir_datas[@intFromEnum(sema.comptime_break_inst)].@"break";
|
|
const extra = sema.code.extraData(Zir.Inst.Break, break_data.payload_index).data;
|
|
try sema.addRuntimeBreak(block, .{
|
|
.block_inst = extra.block_inst,
|
|
.operand = break_data.operand,
|
|
.inst = sema.comptime_break_inst,
|
|
});
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
}
|
|
|
|
pub fn analyzeBody(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
body: []const Zir.Inst.Index,
|
|
) !void {
|
|
_ = sema.analyzeBodyInner(block, body) catch |err| switch (err) {
|
|
error.ComptimeBreak => unreachable, // unexpected comptime control flow
|
|
else => |e| return e,
|
|
};
|
|
}
|
|
|
|
const BreakData = struct {
|
|
block_inst: Zir.Inst.Index,
|
|
operand: Zir.Inst.Ref,
|
|
inst: Zir.Inst.Index,
|
|
};
|
|
|
|
pub fn analyzeBodyBreak(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
body: []const Zir.Inst.Index,
|
|
) CompileError!?BreakData {
|
|
const break_inst = sema.analyzeBodyInner(block, body) catch |err| switch (err) {
|
|
error.ComptimeBreak => sema.comptime_break_inst,
|
|
else => |e| return e,
|
|
};
|
|
if (block.instructions.items.len != 0 and
|
|
sema.isNoReturn(block.instructions.items[block.instructions.items.len - 1].toRef()))
|
|
return null;
|
|
const break_data = sema.code.instructions.items(.data)[@intFromEnum(break_inst)].@"break";
|
|
const extra = sema.code.extraData(Zir.Inst.Break, break_data.payload_index).data;
|
|
return BreakData{
|
|
.block_inst = extra.block_inst,
|
|
.operand = break_data.operand,
|
|
.inst = break_inst,
|
|
};
|
|
}
|
|
|
|
/// ZIR instructions which are always `noreturn` return this. This matches the
|
|
/// return type of `analyzeBody` so that we can tail call them.
|
|
/// Only appropriate to return when the instruction is known to be NoReturn
|
|
/// solely based on the ZIR tag.
|
|
const always_noreturn: CompileError!Zir.Inst.Index = @as(Zir.Inst.Index, undefined);
|
|
|
|
/// This function is the main loop of `Sema` and it can be used in two different ways:
|
|
/// * The traditional way where there are N breaks out of the block and peer type
|
|
/// resolution is done on the break operands. In this case, the `Zir.Inst.Index`
|
|
/// part of the return value will be `undefined`, and callsites should ignore it,
|
|
/// finding the block result value via the block scope.
|
|
/// * The "flat" way. There is only 1 break out of the block, and it is with a `break_inline`
|
|
/// instruction. In this case, the `Zir.Inst.Index` part of the return value will be
|
|
/// the break instruction. This communicates both which block the break applies to, as
|
|
/// well as the operand. No block scope needs to be created for this strategy.
|
|
fn analyzeBodyInner(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
body: []const Zir.Inst.Index,
|
|
) CompileError!Zir.Inst.Index {
|
|
// No tracy calls here, to avoid interfering with the tail call mechanism.
|
|
|
|
try sema.inst_map.ensureSpaceForInstructions(sema.gpa, body);
|
|
|
|
// Most of the time, we don't need to construct a new capture scope for a
|
|
// block. However, successive iterations of comptime loops can capture
|
|
// different values for the same Zir.Inst.Index, so in those cases, we will
|
|
// have to create nested capture scopes; see the `.repeat` case below.
|
|
const parent_capture_scope = block.wip_capture_scope;
|
|
|
|
const mod = sema.mod;
|
|
const map = &sema.inst_map;
|
|
const tags = sema.code.instructions.items(.tag);
|
|
const datas = sema.code.instructions.items(.data);
|
|
|
|
var crash_info = crash_report.prepAnalyzeBody(sema, block, body);
|
|
crash_info.push();
|
|
defer crash_info.pop();
|
|
|
|
// We use a while (true) loop here to avoid a redundant way of breaking out of
|
|
// the loop. The only way to break out of the loop is with a `noreturn`
|
|
// instruction.
|
|
var i: u32 = 0;
|
|
const result = while (true) {
|
|
crash_info.setBodyIndex(i);
|
|
const inst = body[i];
|
|
std.log.scoped(.sema_zir).debug("sema ZIR {s} %{d}", .{
|
|
mod.namespacePtr(mod.declPtr(block.src_decl).src_namespace).file_scope.sub_file_path, inst,
|
|
});
|
|
const air_inst: Air.Inst.Ref = switch (tags[@intFromEnum(inst)]) {
|
|
// zig fmt: off
|
|
.alloc => try sema.zirAlloc(block, inst),
|
|
.alloc_inferred => try sema.zirAllocInferred(block, true),
|
|
.alloc_inferred_mut => try sema.zirAllocInferred(block, false),
|
|
.alloc_inferred_comptime => try sema.zirAllocInferredComptime(true),
|
|
.alloc_inferred_comptime_mut => try sema.zirAllocInferredComptime(false),
|
|
.alloc_mut => try sema.zirAllocMut(block, inst),
|
|
.alloc_comptime_mut => try sema.zirAllocComptime(block, inst),
|
|
.make_ptr_const => try sema.zirMakePtrConst(block, inst),
|
|
.anyframe_type => try sema.zirAnyframeType(block, inst),
|
|
.array_cat => try sema.zirArrayCat(block, inst),
|
|
.array_mul => try sema.zirArrayMul(block, inst),
|
|
.array_type => try sema.zirArrayType(block, inst),
|
|
.array_type_sentinel => try sema.zirArrayTypeSentinel(block, inst),
|
|
.vector_type => try sema.zirVectorType(block, inst),
|
|
.as_node => try sema.zirAsNode(block, inst),
|
|
.as_shift_operand => try sema.zirAsShiftOperand(block, inst),
|
|
.bit_and => try sema.zirBitwise(block, inst, .bit_and),
|
|
.bit_not => try sema.zirBitNot(block, inst),
|
|
.bit_or => try sema.zirBitwise(block, inst, .bit_or),
|
|
.bitcast => try sema.zirBitcast(block, inst),
|
|
.suspend_block => try sema.zirSuspendBlock(block, inst),
|
|
.bool_not => try sema.zirBoolNot(block, inst),
|
|
.bool_br_and => try sema.zirBoolBr(block, inst, false),
|
|
.bool_br_or => try sema.zirBoolBr(block, inst, true),
|
|
.c_import => try sema.zirCImport(block, inst),
|
|
.call => try sema.zirCall(block, inst, .direct),
|
|
.field_call => try sema.zirCall(block, inst, .field),
|
|
.closure_get => try sema.zirClosureGet(block, inst),
|
|
.cmp_lt => try sema.zirCmp(block, inst, .lt),
|
|
.cmp_lte => try sema.zirCmp(block, inst, .lte),
|
|
.cmp_eq => try sema.zirCmpEq(block, inst, .eq, Air.Inst.Tag.fromCmpOp(.eq, block.float_mode == .Optimized)),
|
|
.cmp_gte => try sema.zirCmp(block, inst, .gte),
|
|
.cmp_gt => try sema.zirCmp(block, inst, .gt),
|
|
.cmp_neq => try sema.zirCmpEq(block, inst, .neq, Air.Inst.Tag.fromCmpOp(.neq, block.float_mode == .Optimized)),
|
|
.decl_ref => try sema.zirDeclRef(block, inst),
|
|
.decl_val => try sema.zirDeclVal(block, inst),
|
|
.load => try sema.zirLoad(block, inst),
|
|
.elem_ptr => try sema.zirElemPtr(block, inst),
|
|
.elem_ptr_node => try sema.zirElemPtrNode(block, inst),
|
|
.elem_val => try sema.zirElemVal(block, inst),
|
|
.elem_val_node => try sema.zirElemValNode(block, inst),
|
|
.elem_val_imm => try sema.zirElemValImm(block, inst),
|
|
.elem_type => try sema.zirElemType(block, inst),
|
|
.indexable_ptr_elem_type => try sema.zirIndexablePtrElemType(block, inst),
|
|
.vector_elem_type => try sema.zirVectorElemType(block, inst),
|
|
.enum_literal => try sema.zirEnumLiteral(block, inst),
|
|
.int_from_enum => try sema.zirIntFromEnum(block, inst),
|
|
.enum_from_int => try sema.zirEnumFromInt(block, inst),
|
|
.err_union_code => try sema.zirErrUnionCode(block, inst),
|
|
.err_union_code_ptr => try sema.zirErrUnionCodePtr(block, inst),
|
|
.err_union_payload_unsafe => try sema.zirErrUnionPayload(block, inst),
|
|
.err_union_payload_unsafe_ptr => try sema.zirErrUnionPayloadPtr(block, inst),
|
|
.error_union_type => try sema.zirErrorUnionType(block, inst),
|
|
.error_value => try sema.zirErrorValue(block, inst),
|
|
.field_ptr => try sema.zirFieldPtr(block, inst),
|
|
.field_ptr_named => try sema.zirFieldPtrNamed(block, inst),
|
|
.field_val => try sema.zirFieldVal(block, inst),
|
|
.field_val_named => try sema.zirFieldValNamed(block, inst),
|
|
.func => try sema.zirFunc(block, inst, false),
|
|
.func_inferred => try sema.zirFunc(block, inst, true),
|
|
.func_fancy => try sema.zirFuncFancy(block, inst),
|
|
.import => try sema.zirImport(block, inst),
|
|
.indexable_ptr_len => try sema.zirIndexablePtrLen(block, inst),
|
|
.int => try sema.zirInt(block, inst),
|
|
.int_big => try sema.zirIntBig(block, inst),
|
|
.float => try sema.zirFloat(block, inst),
|
|
.float128 => try sema.zirFloat128(block, inst),
|
|
.int_type => try sema.zirIntType(inst),
|
|
.is_non_err => try sema.zirIsNonErr(block, inst),
|
|
.is_non_err_ptr => try sema.zirIsNonErrPtr(block, inst),
|
|
.ret_is_non_err => try sema.zirRetIsNonErr(block, inst),
|
|
.is_non_null => try sema.zirIsNonNull(block, inst),
|
|
.is_non_null_ptr => try sema.zirIsNonNullPtr(block, inst),
|
|
.merge_error_sets => try sema.zirMergeErrorSets(block, inst),
|
|
.negate => try sema.zirNegate(block, inst),
|
|
.negate_wrap => try sema.zirNegateWrap(block, inst),
|
|
.optional_payload_safe => try sema.zirOptionalPayload(block, inst, true),
|
|
.optional_payload_safe_ptr => try sema.zirOptionalPayloadPtr(block, inst, true),
|
|
.optional_payload_unsafe => try sema.zirOptionalPayload(block, inst, false),
|
|
.optional_payload_unsafe_ptr => try sema.zirOptionalPayloadPtr(block, inst, false),
|
|
.optional_type => try sema.zirOptionalType(block, inst),
|
|
.ptr_type => try sema.zirPtrType(block, inst),
|
|
.ref => try sema.zirRef(block, inst),
|
|
.ret_err_value_code => try sema.zirRetErrValueCode(inst),
|
|
.shr => try sema.zirShr(block, inst, .shr),
|
|
.shr_exact => try sema.zirShr(block, inst, .shr_exact),
|
|
.slice_end => try sema.zirSliceEnd(block, inst),
|
|
.slice_sentinel => try sema.zirSliceSentinel(block, inst),
|
|
.slice_start => try sema.zirSliceStart(block, inst),
|
|
.slice_length => try sema.zirSliceLength(block, inst),
|
|
.str => try sema.zirStr(inst),
|
|
.switch_block => try sema.zirSwitchBlock(block, inst, false),
|
|
.switch_block_ref => try sema.zirSwitchBlock(block, inst, true),
|
|
.switch_block_err_union => try sema.zirSwitchBlockErrUnion(block, inst),
|
|
.type_info => try sema.zirTypeInfo(block, inst),
|
|
.size_of => try sema.zirSizeOf(block, inst),
|
|
.bit_size_of => try sema.zirBitSizeOf(block, inst),
|
|
.typeof => try sema.zirTypeof(block, inst),
|
|
.typeof_builtin => try sema.zirTypeofBuiltin(block, inst),
|
|
.typeof_log2_int_type => try sema.zirTypeofLog2IntType(block, inst),
|
|
.xor => try sema.zirBitwise(block, inst, .xor),
|
|
.struct_init_empty => try sema.zirStructInitEmpty(block, inst),
|
|
.struct_init_empty_result => try sema.zirStructInitEmptyResult(block, inst, false),
|
|
.struct_init_empty_ref_result => try sema.zirStructInitEmptyResult(block, inst, true),
|
|
.struct_init_anon => try sema.zirStructInitAnon(block, inst),
|
|
.struct_init => try sema.zirStructInit(block, inst, false),
|
|
.struct_init_ref => try sema.zirStructInit(block, inst, true),
|
|
.struct_init_field_type => try sema.zirStructInitFieldType(block, inst),
|
|
.struct_init_field_ptr => try sema.zirStructInitFieldPtr(block, inst),
|
|
.array_init_anon => try sema.zirArrayInitAnon(block, inst),
|
|
.array_init => try sema.zirArrayInit(block, inst, false),
|
|
.array_init_ref => try sema.zirArrayInit(block, inst, true),
|
|
.array_init_elem_type => try sema.zirArrayInitElemType(block, inst),
|
|
.array_init_elem_ptr => try sema.zirArrayInitElemPtr(block, inst),
|
|
.union_init => try sema.zirUnionInit(block, inst),
|
|
.field_type_ref => try sema.zirFieldTypeRef(block, inst),
|
|
.int_from_ptr => try sema.zirIntFromPtr(block, inst),
|
|
.align_of => try sema.zirAlignOf(block, inst),
|
|
.int_from_bool => try sema.zirIntFromBool(block, inst),
|
|
.embed_file => try sema.zirEmbedFile(block, inst),
|
|
.error_name => try sema.zirErrorName(block, inst),
|
|
.tag_name => try sema.zirTagName(block, inst),
|
|
.type_name => try sema.zirTypeName(block, inst),
|
|
.frame_type => try sema.zirFrameType(block, inst),
|
|
.frame_size => try sema.zirFrameSize(block, inst),
|
|
.int_from_float => try sema.zirIntFromFloat(block, inst),
|
|
.float_from_int => try sema.zirFloatFromInt(block, inst),
|
|
.ptr_from_int => try sema.zirPtrFromInt(block, inst),
|
|
.float_cast => try sema.zirFloatCast(block, inst),
|
|
.int_cast => try sema.zirIntCast(block, inst),
|
|
.ptr_cast => try sema.zirPtrCast(block, inst),
|
|
.truncate => try sema.zirTruncate(block, inst),
|
|
.has_decl => try sema.zirHasDecl(block, inst),
|
|
.has_field => try sema.zirHasField(block, inst),
|
|
.byte_swap => try sema.zirByteSwap(block, inst),
|
|
.bit_reverse => try sema.zirBitReverse(block, inst),
|
|
.bit_offset_of => try sema.zirBitOffsetOf(block, inst),
|
|
.offset_of => try sema.zirOffsetOf(block, inst),
|
|
.splat => try sema.zirSplat(block, inst),
|
|
.reduce => try sema.zirReduce(block, inst),
|
|
.shuffle => try sema.zirShuffle(block, inst),
|
|
.atomic_load => try sema.zirAtomicLoad(block, inst),
|
|
.atomic_rmw => try sema.zirAtomicRmw(block, inst),
|
|
.mul_add => try sema.zirMulAdd(block, inst),
|
|
.builtin_call => try sema.zirBuiltinCall(block, inst),
|
|
.field_parent_ptr => try sema.zirFieldParentPtr(block, inst),
|
|
.@"resume" => try sema.zirResume(block, inst),
|
|
.@"await" => try sema.zirAwait(block, inst),
|
|
.for_len => try sema.zirForLen(block, inst),
|
|
.validate_array_init_ref_ty => try sema.zirValidateArrayInitRefTy(block, inst),
|
|
.opt_eu_base_ptr_init => try sema.zirOptEuBasePtrInit(block, inst),
|
|
.coerce_ptr_elem_ty => try sema.zirCoercePtrElemTy(block, inst),
|
|
|
|
.clz => try sema.zirBitCount(block, inst, .clz, Value.clz),
|
|
.ctz => try sema.zirBitCount(block, inst, .ctz, Value.ctz),
|
|
.pop_count => try sema.zirBitCount(block, inst, .popcount, Value.popCount),
|
|
.abs => try sema.zirAbs(block, inst),
|
|
|
|
.sqrt => try sema.zirUnaryMath(block, inst, .sqrt, Value.sqrt),
|
|
.sin => try sema.zirUnaryMath(block, inst, .sin, Value.sin),
|
|
.cos => try sema.zirUnaryMath(block, inst, .cos, Value.cos),
|
|
.tan => try sema.zirUnaryMath(block, inst, .tan, Value.tan),
|
|
.exp => try sema.zirUnaryMath(block, inst, .exp, Value.exp),
|
|
.exp2 => try sema.zirUnaryMath(block, inst, .exp2, Value.exp2),
|
|
.log => try sema.zirUnaryMath(block, inst, .log, Value.log),
|
|
.log2 => try sema.zirUnaryMath(block, inst, .log2, Value.log2),
|
|
.log10 => try sema.zirUnaryMath(block, inst, .log10, Value.log10),
|
|
.floor => try sema.zirUnaryMath(block, inst, .floor, Value.floor),
|
|
.ceil => try sema.zirUnaryMath(block, inst, .ceil, Value.ceil),
|
|
.round => try sema.zirUnaryMath(block, inst, .round, Value.round),
|
|
.trunc => try sema.zirUnaryMath(block, inst, .trunc_float, Value.trunc),
|
|
|
|
.error_set_decl => try sema.zirErrorSetDecl(block, inst, .parent),
|
|
.error_set_decl_anon => try sema.zirErrorSetDecl(block, inst, .anon),
|
|
.error_set_decl_func => try sema.zirErrorSetDecl(block, inst, .func),
|
|
|
|
.add => try sema.zirArithmetic(block, inst, .add, true),
|
|
.addwrap => try sema.zirArithmetic(block, inst, .addwrap, true),
|
|
.add_sat => try sema.zirArithmetic(block, inst, .add_sat, true),
|
|
.add_unsafe => try sema.zirArithmetic(block, inst, .add_unsafe, false),
|
|
.mul => try sema.zirArithmetic(block, inst, .mul, true),
|
|
.mulwrap => try sema.zirArithmetic(block, inst, .mulwrap, true),
|
|
.mul_sat => try sema.zirArithmetic(block, inst, .mul_sat, true),
|
|
.sub => try sema.zirArithmetic(block, inst, .sub, true),
|
|
.subwrap => try sema.zirArithmetic(block, inst, .subwrap, true),
|
|
.sub_sat => try sema.zirArithmetic(block, inst, .sub_sat, true),
|
|
|
|
.div => try sema.zirDiv(block, inst),
|
|
.div_exact => try sema.zirDivExact(block, inst),
|
|
.div_floor => try sema.zirDivFloor(block, inst),
|
|
.div_trunc => try sema.zirDivTrunc(block, inst),
|
|
|
|
.mod_rem => try sema.zirModRem(block, inst),
|
|
.mod => try sema.zirMod(block, inst),
|
|
.rem => try sema.zirRem(block, inst),
|
|
|
|
.max => try sema.zirMinMax(block, inst, .max),
|
|
.min => try sema.zirMinMax(block, inst, .min),
|
|
|
|
.shl => try sema.zirShl(block, inst, .shl),
|
|
.shl_exact => try sema.zirShl(block, inst, .shl_exact),
|
|
.shl_sat => try sema.zirShl(block, inst, .shl_sat),
|
|
|
|
.ret_ptr => try sema.zirRetPtr(block),
|
|
.ret_type => Air.internedToRef(sema.fn_ret_ty.toIntern()),
|
|
|
|
// Instructions that we know to *always* be noreturn based solely on their tag.
|
|
// These functions match the return type of analyzeBody so that we can
|
|
// tail call them here.
|
|
.compile_error => break sema.zirCompileError(block, inst),
|
|
.ret_implicit => break sema.zirRetImplicit(block, inst),
|
|
.ret_node => break sema.zirRetNode(block, inst),
|
|
.ret_load => break sema.zirRetLoad(block, inst),
|
|
.ret_err_value => break sema.zirRetErrValue(block, inst),
|
|
.@"unreachable" => break sema.zirUnreachable(block, inst),
|
|
.panic => break sema.zirPanic(block, inst),
|
|
.trap => break sema.zirTrap(block, inst),
|
|
// zig fmt: on
|
|
|
|
// This instruction never exists in an analyzed body. It exists only in the declaration
|
|
// list for a container type.
|
|
.declaration => unreachable,
|
|
|
|
.extended => ext: {
|
|
const extended = datas[@intFromEnum(inst)].extended;
|
|
break :ext switch (extended.opcode) {
|
|
// zig fmt: off
|
|
.variable => try sema.zirVarExtended( block, extended),
|
|
.struct_decl => try sema.zirStructDecl( block, extended, inst),
|
|
.enum_decl => try sema.zirEnumDecl( block, extended, inst),
|
|
.union_decl => try sema.zirUnionDecl( block, extended, inst),
|
|
.opaque_decl => try sema.zirOpaqueDecl( block, extended, inst),
|
|
.this => try sema.zirThis( block, extended),
|
|
.ret_addr => try sema.zirRetAddr( block, extended),
|
|
.builtin_src => try sema.zirBuiltinSrc( block, extended),
|
|
.error_return_trace => try sema.zirErrorReturnTrace( block),
|
|
.frame => try sema.zirFrame( block, extended),
|
|
.frame_address => try sema.zirFrameAddress( block, extended),
|
|
.alloc => try sema.zirAllocExtended( block, extended),
|
|
.builtin_extern => try sema.zirBuiltinExtern( block, extended),
|
|
.@"asm" => try sema.zirAsm( block, extended, false),
|
|
.asm_expr => try sema.zirAsm( block, extended, true),
|
|
.typeof_peer => try sema.zirTypeofPeer( block, extended),
|
|
.compile_log => try sema.zirCompileLog( extended),
|
|
.min_multi => try sema.zirMinMaxMulti( block, extended, .min),
|
|
.max_multi => try sema.zirMinMaxMulti( block, extended, .max),
|
|
.add_with_overflow => try sema.zirOverflowArithmetic(block, extended, extended.opcode),
|
|
.sub_with_overflow => try sema.zirOverflowArithmetic(block, extended, extended.opcode),
|
|
.mul_with_overflow => try sema.zirOverflowArithmetic(block, extended, extended.opcode),
|
|
.shl_with_overflow => try sema.zirOverflowArithmetic(block, extended, extended.opcode),
|
|
.c_undef => try sema.zirCUndef( block, extended),
|
|
.c_include => try sema.zirCInclude( block, extended),
|
|
.c_define => try sema.zirCDefine( block, extended),
|
|
.wasm_memory_size => try sema.zirWasmMemorySize( block, extended),
|
|
.wasm_memory_grow => try sema.zirWasmMemoryGrow( block, extended),
|
|
.prefetch => try sema.zirPrefetch( block, extended),
|
|
.error_cast => try sema.zirErrorCast( block, extended),
|
|
.await_nosuspend => try sema.zirAwaitNosuspend( block, extended),
|
|
.select => try sema.zirSelect( block, extended),
|
|
.int_from_error => try sema.zirIntFromError( block, extended),
|
|
.error_from_int => try sema.zirErrorFromInt( block, extended),
|
|
.reify => try sema.zirReify( block, extended, inst),
|
|
.builtin_async_call => try sema.zirBuiltinAsyncCall( block, extended),
|
|
.cmpxchg => try sema.zirCmpxchg( block, extended),
|
|
.c_va_arg => try sema.zirCVaArg( block, extended),
|
|
.c_va_copy => try sema.zirCVaCopy( block, extended),
|
|
.c_va_end => try sema.zirCVaEnd( block, extended),
|
|
.c_va_start => try sema.zirCVaStart( block, extended),
|
|
.ptr_cast_full => try sema.zirPtrCastFull( block, extended),
|
|
.ptr_cast_no_dest => try sema.zirPtrCastNoDest( block, extended),
|
|
.work_item_id => try sema.zirWorkItem( block, extended, extended.opcode),
|
|
.work_group_size => try sema.zirWorkItem( block, extended, extended.opcode),
|
|
.work_group_id => try sema.zirWorkItem( block, extended, extended.opcode),
|
|
.in_comptime => try sema.zirInComptime( block),
|
|
// zig fmt: on
|
|
|
|
.fence => {
|
|
try sema.zirFence(block, extended);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.set_float_mode => {
|
|
try sema.zirSetFloatMode(block, extended);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.set_align_stack => {
|
|
try sema.zirSetAlignStack(block, extended);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.set_cold => {
|
|
try sema.zirSetCold(block, extended);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.breakpoint => {
|
|
if (!block.is_comptime) {
|
|
_ = try block.addNoOp(.breakpoint);
|
|
}
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.restore_err_ret_index => {
|
|
try sema.zirRestoreErrRetIndex(block, extended);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.value_placeholder => unreachable, // never appears in a body
|
|
};
|
|
},
|
|
|
|
// Instructions that we know can *never* be noreturn based solely on
|
|
// their tag. We avoid needlessly checking if they are noreturn and
|
|
// continue the loop.
|
|
// We also know that they cannot be referenced later, so we avoid
|
|
// putting them into the map.
|
|
.dbg_stmt => {
|
|
try sema.zirDbgStmt(block, inst);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.dbg_var_ptr => {
|
|
try sema.zirDbgVar(block, inst, .dbg_var_ptr);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.dbg_var_val => {
|
|
try sema.zirDbgVar(block, inst, .dbg_var_val);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.ensure_err_union_payload_void => {
|
|
try sema.zirEnsureErrUnionPayloadVoid(block, inst);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.ensure_result_non_error => {
|
|
try sema.zirEnsureResultNonError(block, inst);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.ensure_result_used => {
|
|
try sema.zirEnsureResultUsed(block, inst);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.set_eval_branch_quota => {
|
|
try sema.zirSetEvalBranchQuota(block, inst);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.atomic_store => {
|
|
try sema.zirAtomicStore(block, inst);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.store_node => {
|
|
try sema.zirStoreNode(block, inst);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.store_to_inferred_ptr => {
|
|
try sema.zirStoreToInferredPtr(block, inst);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.resolve_inferred_alloc => {
|
|
try sema.zirResolveInferredAlloc(block, inst);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.validate_struct_init_ty => {
|
|
try sema.zirValidateStructInitTy(block, inst, false);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.validate_struct_init_result_ty => {
|
|
try sema.zirValidateStructInitTy(block, inst, true);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.validate_array_init_ty => {
|
|
try sema.zirValidateArrayInitTy(block, inst, false);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.validate_array_init_result_ty => {
|
|
try sema.zirValidateArrayInitTy(block, inst, true);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.validate_ptr_struct_init => {
|
|
try sema.zirValidatePtrStructInit(block, inst);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.validate_ptr_array_init => {
|
|
try sema.zirValidatePtrArrayInit(block, inst);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.validate_deref => {
|
|
try sema.zirValidateDeref(block, inst);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.validate_destructure => {
|
|
try sema.zirValidateDestructure(block, inst);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.validate_ref_ty => {
|
|
try sema.zirValidateRefTy(block, inst);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.@"export" => {
|
|
try sema.zirExport(block, inst);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.export_value => {
|
|
try sema.zirExportValue(block, inst);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.set_runtime_safety => {
|
|
try sema.zirSetRuntimeSafety(block, inst);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.param => {
|
|
try sema.zirParam(block, inst, false);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.param_comptime => {
|
|
try sema.zirParam(block, inst, true);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.param_anytype => {
|
|
try sema.zirParamAnytype(block, inst, false);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.param_anytype_comptime => {
|
|
try sema.zirParamAnytype(block, inst, true);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.closure_capture => {
|
|
try sema.zirClosureCapture(block, inst);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.memcpy => {
|
|
try sema.zirMemcpy(block, inst);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.memset => {
|
|
try sema.zirMemset(block, inst);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.check_comptime_control_flow => {
|
|
if (!block.is_comptime) {
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const src = inst_data.src();
|
|
const inline_block = inst_data.operand.toIndex().?;
|
|
|
|
var check_block = block;
|
|
const target_runtime_index = while (true) {
|
|
if (check_block.inline_block == inline_block.toOptional()) {
|
|
break check_block.runtime_index;
|
|
}
|
|
check_block = check_block.parent.?;
|
|
};
|
|
|
|
if (@intFromEnum(target_runtime_index) < @intFromEnum(block.runtime_index)) {
|
|
const runtime_src = block.runtime_cond orelse block.runtime_loop.?;
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "comptime control flow inside runtime block", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
try mod.errNoteNonLazy(runtime_src, msg, "runtime control flow here", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
}
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.save_err_ret_index => {
|
|
try sema.zirSaveErrRetIndex(block, inst);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.restore_err_ret_index_unconditional => {
|
|
const un_node = datas[@intFromEnum(inst)].un_node;
|
|
try sema.restoreErrRetIndex(block, un_node.src(), un_node.operand, .none);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.restore_err_ret_index_fn_entry => {
|
|
const un_node = datas[@intFromEnum(inst)].un_node;
|
|
try sema.restoreErrRetIndex(block, un_node.src(), .none, un_node.operand);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
|
|
// Special case instructions to handle comptime control flow.
|
|
.@"break" => {
|
|
if (block.is_comptime) {
|
|
break inst; // same as break_inline
|
|
} else {
|
|
break sema.zirBreak(block, inst);
|
|
}
|
|
},
|
|
.break_inline => {
|
|
if (block.is_comptime) {
|
|
break inst;
|
|
} else {
|
|
sema.comptime_break_inst = inst;
|
|
return error.ComptimeBreak;
|
|
}
|
|
},
|
|
.repeat => {
|
|
if (block.is_comptime) {
|
|
// Send comptime control flow back to the beginning of this block.
|
|
const src = LazySrcLoc.nodeOffset(datas[@intFromEnum(inst)].node);
|
|
try sema.emitBackwardBranch(block, src);
|
|
|
|
// We need to construct new capture scopes for the next loop iteration so it
|
|
// can capture values without clobbering the earlier iteration's captures.
|
|
block.wip_capture_scope = try mod.createCaptureScope(parent_capture_scope);
|
|
|
|
i = 0;
|
|
continue;
|
|
} else {
|
|
break always_noreturn;
|
|
}
|
|
},
|
|
.repeat_inline => {
|
|
// Send comptime control flow back to the beginning of this block.
|
|
const src = LazySrcLoc.nodeOffset(datas[@intFromEnum(inst)].node);
|
|
try sema.emitBackwardBranch(block, src);
|
|
|
|
// We need to construct new capture scopes for the next loop iteration so it
|
|
// can capture values without clobbering the earlier iteration's captures.
|
|
block.wip_capture_scope = try mod.createCaptureScope(parent_capture_scope);
|
|
|
|
i = 0;
|
|
continue;
|
|
},
|
|
.loop => blk: {
|
|
if (!block.is_comptime) break :blk try sema.zirLoop(block, inst);
|
|
// Same as `block_inline`. TODO https://github.com/ziglang/zig/issues/8220
|
|
const inst_data = datas[@intFromEnum(inst)].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index);
|
|
const inline_body = sema.code.bodySlice(extra.end, extra.data.body_len);
|
|
|
|
// Create a temporary child block so that this loop is properly
|
|
// labeled for any .restore_err_ret_index instructions
|
|
var child_block = block.makeSubBlock();
|
|
|
|
var label: Block.Label = .{
|
|
.zir_block = inst,
|
|
.merges = undefined,
|
|
};
|
|
child_block.label = &label;
|
|
|
|
// Write these instructions directly into the parent block
|
|
child_block.instructions = block.instructions;
|
|
defer block.instructions = child_block.instructions;
|
|
|
|
const break_data = (try sema.analyzeBodyBreak(&child_block, inline_body)) orelse
|
|
break always_noreturn;
|
|
if (inst == break_data.block_inst) {
|
|
break :blk try sema.resolveInst(break_data.operand);
|
|
} else {
|
|
break break_data.inst;
|
|
}
|
|
},
|
|
.block, .block_comptime => blk: {
|
|
if (!block.is_comptime) {
|
|
break :blk try sema.zirBlock(block, inst, tags[@intFromEnum(inst)] == .block_comptime);
|
|
}
|
|
// Same as `block_inline`. TODO https://github.com/ziglang/zig/issues/8220
|
|
const inst_data = datas[@intFromEnum(inst)].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index);
|
|
const inline_body = sema.code.bodySlice(extra.end, extra.data.body_len);
|
|
|
|
// Create a temporary child block so that this block is properly
|
|
// labeled for any .restore_err_ret_index instructions
|
|
var child_block = block.makeSubBlock();
|
|
|
|
var label: Block.Label = .{
|
|
.zir_block = inst,
|
|
.merges = undefined,
|
|
};
|
|
child_block.label = &label;
|
|
|
|
// Write these instructions directly into the parent block
|
|
child_block.instructions = block.instructions;
|
|
defer block.instructions = child_block.instructions;
|
|
|
|
const break_data = (try sema.analyzeBodyBreak(&child_block, inline_body)) orelse
|
|
break always_noreturn;
|
|
if (inst == break_data.block_inst) {
|
|
break :blk try sema.resolveInst(break_data.operand);
|
|
} else {
|
|
break break_data.inst;
|
|
}
|
|
},
|
|
.block_inline => blk: {
|
|
// Directly analyze the block body without introducing a new block.
|
|
// However, in the case of a corresponding break_inline which reaches
|
|
// through a runtime conditional branch, we must retroactively emit
|
|
// a block, so we remember the block index here just in case.
|
|
const block_index = block.instructions.items.len;
|
|
const inst_data = datas[@intFromEnum(inst)].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index);
|
|
const inline_body = sema.code.bodySlice(extra.end, extra.data.body_len);
|
|
const gpa = sema.gpa;
|
|
|
|
const opt_break_data, const need_debug_scope = b: {
|
|
// Create a temporary child block so that this inline block is properly
|
|
// labeled for any .restore_err_ret_index instructions
|
|
var child_block = block.makeSubBlock();
|
|
var need_debug_scope = false;
|
|
child_block.need_debug_scope = &need_debug_scope;
|
|
|
|
// If this block contains a function prototype, we need to reset the
|
|
// current list of parameters and restore it later.
|
|
// Note: this probably needs to be resolved in a more general manner.
|
|
const tag_index = @intFromEnum(inline_body[inline_body.len - 1]);
|
|
child_block.inline_block = (if (tags[tag_index] == .repeat_inline)
|
|
inline_body[0]
|
|
else
|
|
inst).toOptional();
|
|
|
|
var label: Block.Label = .{
|
|
.zir_block = inst,
|
|
.merges = undefined,
|
|
};
|
|
child_block.label = &label;
|
|
|
|
// Write these instructions directly into the parent block
|
|
child_block.instructions = block.instructions;
|
|
defer block.instructions = child_block.instructions;
|
|
|
|
const result = try sema.analyzeBodyBreak(&child_block, inline_body);
|
|
if (need_debug_scope) {
|
|
_ = try sema.ensurePostHoc(block, inst);
|
|
}
|
|
break :b .{ result, need_debug_scope };
|
|
};
|
|
|
|
// A runtime conditional branch that needs a post-hoc block to be
|
|
// emitted communicates this by mapping the block index into the inst map.
|
|
if (map.get(inst)) |new_block_ref| ph: {
|
|
// Comptime control flow populates the map, so we don't actually know
|
|
// if this is a post-hoc runtime block until we check the
|
|
// post_hoc_block map.
|
|
const new_block_inst = new_block_ref.toIndex() orelse break :ph;
|
|
const labeled_block = sema.post_hoc_blocks.get(new_block_inst) orelse
|
|
break :ph;
|
|
|
|
// In this case we need to move all the instructions starting at
|
|
// block_index from the current block into this new one.
|
|
|
|
if (opt_break_data) |break_data| {
|
|
// This is a comptime break which we now change to a runtime break
|
|
// since it crosses a runtime branch.
|
|
// It may pass through our currently being analyzed block_inline or it
|
|
// may point directly to it. In the latter case, this modifies the
|
|
// block that we looked up in the post_hoc_blocks map above.
|
|
try sema.addRuntimeBreak(block, break_data);
|
|
}
|
|
|
|
try labeled_block.block.instructions.appendSlice(gpa, block.instructions.items[block_index..]);
|
|
block.instructions.items.len = block_index;
|
|
|
|
const block_result = try sema.analyzeBlockBody(block, inst_data.src(), &labeled_block.block, &labeled_block.label.merges, need_debug_scope);
|
|
{
|
|
// Destroy the ad-hoc block entry so that it does not interfere with
|
|
// the next iteration of comptime control flow, if any.
|
|
labeled_block.destroy(gpa);
|
|
assert(sema.post_hoc_blocks.remove(new_block_inst));
|
|
}
|
|
|
|
break :blk block_result;
|
|
}
|
|
|
|
const break_data = opt_break_data orelse break always_noreturn;
|
|
if (inst == break_data.block_inst) {
|
|
break :blk try sema.resolveInst(break_data.operand);
|
|
} else {
|
|
break break_data.inst;
|
|
}
|
|
},
|
|
.condbr => blk: {
|
|
if (!block.is_comptime) break sema.zirCondbr(block, inst);
|
|
// Same as condbr_inline. TODO https://github.com/ziglang/zig/issues/8220
|
|
const inst_data = datas[@intFromEnum(inst)].pl_node;
|
|
const cond_src: LazySrcLoc = .{ .node_offset_if_cond = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.CondBr, inst_data.payload_index);
|
|
const then_body = sema.code.bodySlice(extra.end, extra.data.then_body_len);
|
|
const else_body = sema.code.bodySlice(
|
|
extra.end + then_body.len,
|
|
extra.data.else_body_len,
|
|
);
|
|
const cond = try sema.resolveInstConst(block, cond_src, extra.data.condition, .{
|
|
.needed_comptime_reason = "condition in comptime branch must be comptime-known",
|
|
.block_comptime_reason = block.comptime_reason,
|
|
});
|
|
const inline_body = if (cond.val.toBool()) then_body else else_body;
|
|
|
|
try sema.maybeErrorUnwrapCondbr(block, inline_body, extra.data.condition, cond_src);
|
|
const break_data = (try sema.analyzeBodyBreak(block, inline_body)) orelse
|
|
break always_noreturn;
|
|
if (inst == break_data.block_inst) {
|
|
break :blk try sema.resolveInst(break_data.operand);
|
|
} else {
|
|
break break_data.inst;
|
|
}
|
|
},
|
|
.condbr_inline => blk: {
|
|
const inst_data = datas[@intFromEnum(inst)].pl_node;
|
|
const cond_src: LazySrcLoc = .{ .node_offset_if_cond = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.CondBr, inst_data.payload_index);
|
|
const then_body = sema.code.bodySlice(extra.end, extra.data.then_body_len);
|
|
const else_body = sema.code.bodySlice(
|
|
extra.end + then_body.len,
|
|
extra.data.else_body_len,
|
|
);
|
|
const cond = try sema.resolveInstConst(block, cond_src, extra.data.condition, .{
|
|
.needed_comptime_reason = "condition in comptime branch must be comptime-known",
|
|
.block_comptime_reason = block.comptime_reason,
|
|
});
|
|
const inline_body = if (cond.val.toBool()) then_body else else_body;
|
|
|
|
try sema.maybeErrorUnwrapCondbr(block, inline_body, extra.data.condition, cond_src);
|
|
const old_runtime_index = block.runtime_index;
|
|
defer block.runtime_index = old_runtime_index;
|
|
const break_data = (try sema.analyzeBodyBreak(block, inline_body)) orelse
|
|
break always_noreturn;
|
|
if (inst == break_data.block_inst) {
|
|
break :blk try sema.resolveInst(break_data.operand);
|
|
} else {
|
|
break break_data.inst;
|
|
}
|
|
},
|
|
.@"try" => blk: {
|
|
if (!block.is_comptime) break :blk try sema.zirTry(block, inst);
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const src = inst_data.src();
|
|
const operand_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.Try, inst_data.payload_index);
|
|
const inline_body = sema.code.bodySlice(extra.end, extra.data.body_len);
|
|
const err_union = try sema.resolveInst(extra.data.operand);
|
|
const err_union_ty = sema.typeOf(err_union);
|
|
if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) {
|
|
return sema.fail(block, operand_src, "expected error union type, found '{}'", .{
|
|
err_union_ty.fmt(mod),
|
|
});
|
|
}
|
|
const is_non_err = try sema.analyzeIsNonErrComptimeOnly(block, operand_src, err_union);
|
|
assert(is_non_err != .none);
|
|
const is_non_err_val = try sema.resolveConstDefinedValue(block, operand_src, is_non_err, .{
|
|
.needed_comptime_reason = "try operand inside comptime block must be comptime-known",
|
|
.block_comptime_reason = block.comptime_reason,
|
|
});
|
|
if (is_non_err_val.toBool()) {
|
|
break :blk try sema.analyzeErrUnionPayload(block, src, err_union_ty, err_union, operand_src, false);
|
|
}
|
|
const break_data = (try sema.analyzeBodyBreak(block, inline_body)) orelse
|
|
break always_noreturn;
|
|
if (inst == break_data.block_inst) {
|
|
break :blk try sema.resolveInst(break_data.operand);
|
|
} else {
|
|
break break_data.inst;
|
|
}
|
|
},
|
|
.try_ptr => blk: {
|
|
if (!block.is_comptime) break :blk try sema.zirTryPtr(block, inst);
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const src = inst_data.src();
|
|
const operand_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.Try, inst_data.payload_index);
|
|
const inline_body = sema.code.bodySlice(extra.end, extra.data.body_len);
|
|
const operand = try sema.resolveInst(extra.data.operand);
|
|
const err_union = try sema.analyzeLoad(block, src, operand, operand_src);
|
|
const is_non_err = try sema.analyzeIsNonErrComptimeOnly(block, operand_src, err_union);
|
|
assert(is_non_err != .none);
|
|
const is_non_err_val = try sema.resolveConstDefinedValue(block, operand_src, is_non_err, .{
|
|
.needed_comptime_reason = "try operand inside comptime block must be comptime-known",
|
|
.block_comptime_reason = block.comptime_reason,
|
|
});
|
|
if (is_non_err_val.toBool()) {
|
|
break :blk try sema.analyzeErrUnionPayloadPtr(block, src, operand, false, false);
|
|
}
|
|
const break_data = (try sema.analyzeBodyBreak(block, inline_body)) orelse
|
|
break always_noreturn;
|
|
if (inst == break_data.block_inst) {
|
|
break :blk try sema.resolveInst(break_data.operand);
|
|
} else {
|
|
break break_data.inst;
|
|
}
|
|
},
|
|
.@"defer" => blk: {
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].@"defer";
|
|
const defer_body = sema.code.bodySlice(inst_data.index, inst_data.len);
|
|
const break_inst = sema.analyzeBodyInner(block, defer_body) catch |err| switch (err) {
|
|
error.ComptimeBreak => sema.comptime_break_inst,
|
|
else => |e| return e,
|
|
};
|
|
if (break_inst != defer_body[defer_body.len - 1]) break always_noreturn;
|
|
break :blk .void_value;
|
|
},
|
|
.defer_err_code => blk: {
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].defer_err_code;
|
|
const extra = sema.code.extraData(Zir.Inst.DeferErrCode, inst_data.payload_index).data;
|
|
const defer_body = sema.code.bodySlice(extra.index, extra.len);
|
|
const err_code = try sema.resolveInst(inst_data.err_code);
|
|
map.putAssumeCapacity(extra.remapped_err_code, err_code);
|
|
const break_inst = sema.analyzeBodyInner(block, defer_body) catch |err| switch (err) {
|
|
error.ComptimeBreak => sema.comptime_break_inst,
|
|
else => |e| return e,
|
|
};
|
|
if (break_inst != defer_body[defer_body.len - 1]) break always_noreturn;
|
|
break :blk .void_value;
|
|
},
|
|
};
|
|
if (sema.isNoReturn(air_inst)) {
|
|
// We're going to assume that the body itself is noreturn, so let's ensure that now
|
|
assert(block.instructions.items.len > 0);
|
|
assert(sema.isNoReturn(block.instructions.items[block.instructions.items.len - 1].toRef()));
|
|
break always_noreturn;
|
|
}
|
|
map.putAssumeCapacity(inst, air_inst);
|
|
i += 1;
|
|
};
|
|
|
|
// We may have overwritten the capture scope due to a `repeat` instruction where
|
|
// the body had a capture; restore it now.
|
|
block.wip_capture_scope = parent_capture_scope;
|
|
|
|
return result;
|
|
}
|
|
|
|
pub fn resolveInstAllowNone(sema: *Sema, zir_ref: Zir.Inst.Ref) !Air.Inst.Ref {
|
|
if (zir_ref == .none) {
|
|
return .none;
|
|
} else {
|
|
return resolveInst(sema, zir_ref);
|
|
}
|
|
}
|
|
|
|
pub fn resolveInst(sema: *Sema, zir_ref: Zir.Inst.Ref) !Air.Inst.Ref {
|
|
assert(zir_ref != .none);
|
|
if (zir_ref.toIndex()) |i| {
|
|
const inst = sema.inst_map.get(i).?;
|
|
if (inst == .generic_poison) return error.GenericPoison;
|
|
return inst;
|
|
}
|
|
// First section of indexes correspond to a set number of constant values.
|
|
// We intentionally map the same indexes to the same values between ZIR and AIR.
|
|
return @enumFromInt(@intFromEnum(zir_ref));
|
|
}
|
|
|
|
fn resolveConstBool(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
zir_ref: Zir.Inst.Ref,
|
|
reason: NeededComptimeReason,
|
|
) !bool {
|
|
const air_inst = try sema.resolveInst(zir_ref);
|
|
const wanted_type = Type.bool;
|
|
const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src);
|
|
const val = try sema.resolveConstDefinedValue(block, src, coerced_inst, reason);
|
|
return val.toBool();
|
|
}
|
|
|
|
pub fn resolveConstString(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
zir_ref: Zir.Inst.Ref,
|
|
reason: NeededComptimeReason,
|
|
) ![]u8 {
|
|
const air_inst = try sema.resolveInst(zir_ref);
|
|
const wanted_type = Type.slice_const_u8;
|
|
const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src);
|
|
const val = try sema.resolveConstDefinedValue(block, src, coerced_inst, reason);
|
|
return val.toAllocatedBytes(wanted_type, sema.arena, sema.mod);
|
|
}
|
|
|
|
pub fn resolveConstStringIntern(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
zir_ref: Zir.Inst.Ref,
|
|
reason: NeededComptimeReason,
|
|
) !InternPool.NullTerminatedString {
|
|
const air_inst = try sema.resolveInst(zir_ref);
|
|
const wanted_type = Type.slice_const_u8;
|
|
const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src);
|
|
const val = try sema.resolveConstDefinedValue(block, src, coerced_inst, reason);
|
|
return val.toIpString(wanted_type, sema.mod);
|
|
}
|
|
|
|
pub fn resolveType(sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref) !Type {
|
|
const air_inst = try sema.resolveInst(zir_ref);
|
|
assert(air_inst != .var_args_param_type);
|
|
const ty = try sema.analyzeAsType(block, src, air_inst);
|
|
if (ty.isGenericPoison()) return error.GenericPoison;
|
|
return ty;
|
|
}
|
|
|
|
fn resolveDestType(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
zir_ref: Zir.Inst.Ref,
|
|
strat: enum { remove_eu_opt, remove_eu, remove_opt },
|
|
builtin_name: []const u8,
|
|
) !Type {
|
|
const mod = sema.mod;
|
|
const remove_eu = switch (strat) {
|
|
.remove_eu_opt, .remove_eu => true,
|
|
.remove_opt => false,
|
|
};
|
|
const remove_opt = switch (strat) {
|
|
.remove_eu_opt, .remove_opt => true,
|
|
.remove_eu => false,
|
|
};
|
|
|
|
const raw_ty = sema.resolveType(block, src, zir_ref) catch |err| switch (err) {
|
|
error.GenericPoison => {
|
|
// Cast builtins use their result type as the destination type, but
|
|
// it could be an anytype argument, which we can't catch in AstGen.
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "{s} must have a known result type", .{builtin_name});
|
|
errdefer msg.destroy(sema.gpa);
|
|
switch (sema.genericPoisonReason(zir_ref)) {
|
|
.anytype_param => |call_src| try sema.errNote(block, call_src, msg, "result type is unknown due to anytype parameter", .{}),
|
|
.anyopaque_ptr => |ptr_src| try sema.errNote(block, ptr_src, msg, "result type is unknown due to opaque pointer type", .{}),
|
|
.unknown => {},
|
|
}
|
|
try sema.errNote(block, src, msg, "use @as to provide explicit result type", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
|
|
if (remove_eu and raw_ty.zigTypeTag(mod) == .ErrorUnion) {
|
|
const eu_child = raw_ty.errorUnionPayload(mod);
|
|
if (remove_opt and eu_child.zigTypeTag(mod) == .Optional) {
|
|
return eu_child.childType(mod);
|
|
}
|
|
return eu_child;
|
|
}
|
|
if (remove_opt and raw_ty.zigTypeTag(mod) == .Optional) {
|
|
return raw_ty.childType(mod);
|
|
}
|
|
return raw_ty;
|
|
}
|
|
|
|
const GenericPoisonReason = union(enum) {
|
|
anytype_param: LazySrcLoc,
|
|
anyopaque_ptr: LazySrcLoc,
|
|
unknown,
|
|
};
|
|
|
|
/// Backtracks through ZIR instructions to determine the reason a generic poison
|
|
/// type was created. Used for error reporting.
|
|
fn genericPoisonReason(sema: *Sema, ref: Zir.Inst.Ref) GenericPoisonReason {
|
|
var cur = ref;
|
|
while (true) {
|
|
const inst = cur.toIndex() orelse return .unknown;
|
|
switch (sema.code.instructions.items(.tag)[@intFromEnum(inst)]) {
|
|
.validate_array_init_ref_ty => {
|
|
const pl_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.ArrayInitRefTy, pl_node.payload_index).data;
|
|
cur = extra.ptr_ty;
|
|
},
|
|
.array_init_elem_type => {
|
|
const bin = sema.code.instructions.items(.data)[@intFromEnum(inst)].bin;
|
|
cur = bin.lhs;
|
|
},
|
|
.indexable_ptr_elem_type, .vector_elem_type => {
|
|
const un_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
cur = un_node.operand;
|
|
},
|
|
.struct_init_field_type => {
|
|
const pl_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.FieldType, pl_node.payload_index).data;
|
|
cur = extra.container_type;
|
|
},
|
|
.elem_type => {
|
|
// There are two cases here: the pointer type may already have been
|
|
// generic poison, or it may have been an anyopaque pointer.
|
|
const un_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const operand_ref = sema.resolveInst(un_node.operand) catch |err| switch (err) {
|
|
error.GenericPoison => unreachable, // this is a type, not a value
|
|
};
|
|
const operand_val = operand_ref.toInterned() orelse return .unknown;
|
|
if (operand_val == .generic_poison_type) {
|
|
// The pointer was generic poison - keep looking.
|
|
cur = un_node.operand;
|
|
} else {
|
|
// This must be an anyopaque pointer!
|
|
return .{ .anyopaque_ptr = un_node.src() };
|
|
}
|
|
},
|
|
.call, .field_call => {
|
|
// A function call can never return generic poison, so we must be
|
|
// evaluating an `anytype` function parameter.
|
|
// TODO: better source location - function decl rather than call
|
|
const pl_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
return .{ .anytype_param = pl_node.src() };
|
|
},
|
|
else => return .unknown,
|
|
}
|
|
}
|
|
}
|
|
|
|
fn analyzeAsType(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
air_inst: Air.Inst.Ref,
|
|
) !Type {
|
|
const wanted_type = Type.type;
|
|
const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src);
|
|
const val = try sema.resolveConstDefinedValue(block, src, coerced_inst, .{
|
|
.needed_comptime_reason = "types must be comptime-known",
|
|
});
|
|
return val.toType();
|
|
}
|
|
|
|
pub fn setupErrorReturnTrace(sema: *Sema, block: *Block, last_arg_index: usize) !void {
|
|
const mod = sema.mod;
|
|
const comp = mod.comp;
|
|
const gpa = sema.gpa;
|
|
const ip = &mod.intern_pool;
|
|
if (!comp.config.any_error_tracing) return;
|
|
|
|
assert(!block.is_comptime);
|
|
var err_trace_block = block.makeSubBlock();
|
|
defer err_trace_block.instructions.deinit(gpa);
|
|
|
|
const src: LazySrcLoc = .unneeded;
|
|
|
|
// var addrs: [err_return_trace_addr_count]usize = undefined;
|
|
const err_return_trace_addr_count = 32;
|
|
const addr_arr_ty = try mod.arrayType(.{
|
|
.len = err_return_trace_addr_count,
|
|
.child = .usize_type,
|
|
});
|
|
const addrs_ptr = try err_trace_block.addTy(.alloc, try mod.singleMutPtrType(addr_arr_ty));
|
|
|
|
// var st: StackTrace = undefined;
|
|
const stack_trace_ty = try sema.getBuiltinType("StackTrace");
|
|
try sema.resolveTypeFields(stack_trace_ty);
|
|
const st_ptr = try err_trace_block.addTy(.alloc, try mod.singleMutPtrType(stack_trace_ty));
|
|
|
|
// st.instruction_addresses = &addrs;
|
|
const instruction_addresses_field_name = try ip.getOrPutString(gpa, "instruction_addresses");
|
|
const addr_field_ptr = try sema.fieldPtr(&err_trace_block, src, st_ptr, instruction_addresses_field_name, src, true);
|
|
try sema.storePtr2(&err_trace_block, src, addr_field_ptr, src, addrs_ptr, src, .store);
|
|
|
|
// st.index = 0;
|
|
const index_field_name = try ip.getOrPutString(gpa, "index");
|
|
const index_field_ptr = try sema.fieldPtr(&err_trace_block, src, st_ptr, index_field_name, src, true);
|
|
try sema.storePtr2(&err_trace_block, src, index_field_ptr, src, .zero_usize, src, .store);
|
|
|
|
// @errorReturnTrace() = &st;
|
|
_ = try err_trace_block.addUnOp(.set_err_return_trace, st_ptr);
|
|
|
|
try block.instructions.insertSlice(gpa, last_arg_index, err_trace_block.instructions.items);
|
|
}
|
|
|
|
/// Return the Value corresponding to a given AIR ref, or `null` if it refers to a runtime value.
|
|
/// InternPool key `variable` is considered a runtime value.
|
|
/// Generic poison causes `error.GenericPoison` to be returned.
|
|
fn resolveValue(sema: *Sema, inst: Air.Inst.Ref) CompileError!?Value {
|
|
const val = (try sema.resolveValueAllowVariables(inst)) orelse return null;
|
|
if (val.isGenericPoison()) return error.GenericPoison;
|
|
if (sema.mod.intern_pool.isVariable(val.toIntern())) return null;
|
|
return val;
|
|
}
|
|
|
|
/// Like `resolveValue`, but emits an error if the value is not comptime-known.
|
|
fn resolveConstValue(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
inst: Air.Inst.Ref,
|
|
reason: NeededComptimeReason,
|
|
) CompileError!Value {
|
|
return try sema.resolveValue(inst) orelse {
|
|
return sema.failWithNeededComptime(block, src, reason);
|
|
};
|
|
}
|
|
|
|
/// Like `resolveValue`, but emits an error if the value is comptime-known to be undefined.
|
|
fn resolveDefinedValue(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
air_ref: Air.Inst.Ref,
|
|
) CompileError!?Value {
|
|
const mod = sema.mod;
|
|
const val = try sema.resolveValue(air_ref) orelse return null;
|
|
if (val.isUndef(mod)) {
|
|
return sema.failWithUseOfUndef(block, src);
|
|
}
|
|
return val;
|
|
}
|
|
|
|
/// Like `resolveValue`, but emits an error if the value is not comptime-known or is undefined.
|
|
fn resolveConstDefinedValue(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
air_ref: Air.Inst.Ref,
|
|
reason: NeededComptimeReason,
|
|
) CompileError!Value {
|
|
const val = try sema.resolveConstValue(block, src, air_ref, reason);
|
|
if (val.isUndef(sema.mod)) return sema.failWithUseOfUndef(block, src);
|
|
return val;
|
|
}
|
|
|
|
/// Like `resolveValue`, but recursively resolves lazy values before returning.
|
|
fn resolveValueResolveLazy(sema: *Sema, inst: Air.Inst.Ref) CompileError!?Value {
|
|
return try sema.resolveLazyValue((try sema.resolveValue(inst)) orelse return null);
|
|
}
|
|
|
|
/// Like `resolveValue`, but any pointer value which does not correspond
|
|
/// to a comptime-known integer (e.g. a decl pointer) returns `null`.
|
|
/// Lazy values are recursively resolved.
|
|
fn resolveValueIntable(sema: *Sema, inst: Air.Inst.Ref) CompileError!?Value {
|
|
const val = (try sema.resolveValue(inst)) orelse return null;
|
|
if (sema.mod.intern_pool.getBackingAddrTag(val.toIntern())) |addr| switch (addr) {
|
|
.decl, .anon_decl, .mut_decl, .comptime_field => return null,
|
|
.int => {},
|
|
.eu_payload, .opt_payload, .elem, .field => unreachable,
|
|
};
|
|
return try sema.resolveLazyValue(val);
|
|
}
|
|
|
|
/// Returns all InternPool keys representing values, including `variable`, `undef`, and `generic_poison`.
|
|
fn resolveValueAllowVariables(sema: *Sema, inst: Air.Inst.Ref) CompileError!?Value {
|
|
assert(inst != .none);
|
|
// First section of indexes correspond to a set number of constant values.
|
|
if (@intFromEnum(inst) < InternPool.static_len) {
|
|
return Value.fromInterned(@as(InternPool.Index, @enumFromInt(@intFromEnum(inst))));
|
|
}
|
|
|
|
const air_tags = sema.air_instructions.items(.tag);
|
|
if (try sema.typeHasOnePossibleValue(sema.typeOf(inst))) |opv| {
|
|
if (inst.toInterned()) |ip_index| {
|
|
const val = Value.fromInterned(ip_index);
|
|
if (val.getVariable(sema.mod) != null) return val;
|
|
}
|
|
return opv;
|
|
}
|
|
const ip_index = inst.toInterned() orelse {
|
|
switch (air_tags[@intFromEnum(inst.toIndex().?)]) {
|
|
.inferred_alloc => unreachable,
|
|
.inferred_alloc_comptime => unreachable,
|
|
else => return null,
|
|
}
|
|
};
|
|
const val = Value.fromInterned(ip_index);
|
|
if (val.isPtrToThreadLocal(sema.mod)) return null;
|
|
return val;
|
|
}
|
|
|
|
/// Returns a compile error if the value has tag `variable`. See `resolveInstValue` for
|
|
/// a function that does not.
|
|
pub fn resolveInstConst(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
zir_ref: Zir.Inst.Ref,
|
|
reason: NeededComptimeReason,
|
|
) CompileError!TypedValue {
|
|
const air_ref = try sema.resolveInst(zir_ref);
|
|
const val = try sema.resolveConstDefinedValue(block, src, air_ref, reason);
|
|
return .{
|
|
.ty = sema.typeOf(air_ref),
|
|
.val = val,
|
|
};
|
|
}
|
|
|
|
/// Value Tag may be `undef` or `variable`.
|
|
/// See `resolveInstConst` for an alternative.
|
|
pub fn resolveInstValueAllowVariables(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
zir_ref: Zir.Inst.Ref,
|
|
reason: NeededComptimeReason,
|
|
) CompileError!TypedValue {
|
|
const air_ref = try sema.resolveInst(zir_ref);
|
|
const val = try sema.resolveValueAllowVariables(air_ref) orelse {
|
|
return sema.failWithNeededComptime(block, src, reason);
|
|
};
|
|
if (val.isGenericPoison()) return error.GenericPoison;
|
|
return .{
|
|
.ty = sema.typeOf(air_ref),
|
|
.val = val,
|
|
};
|
|
}
|
|
|
|
fn failWithNeededComptime(sema: *Sema, block: *Block, src: LazySrcLoc, reason: NeededComptimeReason) CompileError {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "unable to resolve comptime value", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, src, msg, "{s}", .{reason.needed_comptime_reason});
|
|
|
|
if (reason.block_comptime_reason) |block_comptime_reason| {
|
|
try block_comptime_reason.explain(sema, msg);
|
|
}
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
|
|
fn failWithUseOfUndef(sema: *Sema, block: *Block, src: LazySrcLoc) CompileError {
|
|
return sema.fail(block, src, "use of undefined value here causes undefined behavior", .{});
|
|
}
|
|
|
|
fn failWithDivideByZero(sema: *Sema, block: *Block, src: LazySrcLoc) CompileError {
|
|
return sema.fail(block, src, "division by zero here causes undefined behavior", .{});
|
|
}
|
|
|
|
fn failWithModRemNegative(sema: *Sema, block: *Block, src: LazySrcLoc, lhs_ty: Type, rhs_ty: Type) CompileError {
|
|
return sema.fail(block, src, "remainder division with '{}' and '{}': signed integers and floats must use @rem or @mod", .{
|
|
lhs_ty.fmt(sema.mod), rhs_ty.fmt(sema.mod),
|
|
});
|
|
}
|
|
|
|
fn failWithExpectedOptionalType(sema: *Sema, block: *Block, src: LazySrcLoc, optional_ty: Type) CompileError {
|
|
return sema.fail(block, src, "expected optional type, found '{}'", .{optional_ty.fmt(sema.mod)});
|
|
}
|
|
|
|
fn failWithArrayInitNotSupported(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError {
|
|
const mod = sema.mod;
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "type '{}' does not support array initialization syntax", .{
|
|
ty.fmt(mod),
|
|
});
|
|
errdefer msg.destroy(sema.gpa);
|
|
if (ty.isSlice(mod)) {
|
|
try sema.errNote(block, src, msg, "inferred array length is specified with an underscore: '[_]{}'", .{ty.elemType2(mod).fmt(mod)});
|
|
}
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
|
|
fn failWithStructInitNotSupported(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError {
|
|
return sema.fail(block, src, "type '{}' does not support struct initialization syntax", .{
|
|
ty.fmt(sema.mod),
|
|
});
|
|
}
|
|
|
|
fn failWithErrorSetCodeMissing(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
dest_err_set_ty: Type,
|
|
src_err_set_ty: Type,
|
|
) CompileError {
|
|
return sema.fail(block, src, "expected type '{}', found type '{}'", .{
|
|
dest_err_set_ty.fmt(sema.mod), src_err_set_ty.fmt(sema.mod),
|
|
});
|
|
}
|
|
|
|
fn failWithIntegerOverflow(sema: *Sema, block: *Block, src: LazySrcLoc, int_ty: Type, val: Value, vector_index: usize) CompileError {
|
|
const mod = sema.mod;
|
|
if (int_ty.zigTypeTag(mod) == .Vector) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "overflow of vector type '{}' with value '{}'", .{
|
|
int_ty.fmt(sema.mod), val.fmtValue(int_ty, sema.mod),
|
|
});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, src, msg, "when computing vector element at index '{d}'", .{vector_index});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
return sema.fail(block, src, "overflow of integer type '{}' with value '{}'", .{
|
|
int_ty.fmt(sema.mod), val.fmtValue(int_ty, sema.mod),
|
|
});
|
|
}
|
|
|
|
fn failWithInvalidComptimeFieldStore(sema: *Sema, block: *Block, init_src: LazySrcLoc, container_ty: Type, field_index: usize) CompileError {
|
|
const mod = sema.mod;
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, init_src, "value stored in comptime field does not match the default value of the field", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
const struct_type = mod.typeToStruct(container_ty) orelse break :msg msg;
|
|
const default_value_src = mod.fieldSrcLoc(struct_type.decl.unwrap().?, .{
|
|
.index = field_index,
|
|
.range = .value,
|
|
});
|
|
try mod.errNoteNonLazy(default_value_src, msg, "default value set here", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
|
|
fn failWithUseOfAsync(sema: *Sema, block: *Block, src: LazySrcLoc) CompileError {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "async has not been implemented in the self-hosted compiler yet", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
|
|
fn failWithInvalidFieldAccess(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
object_ty: Type,
|
|
field_name: InternPool.NullTerminatedString,
|
|
) CompileError {
|
|
const mod = sema.mod;
|
|
const inner_ty = if (object_ty.isSinglePointer(mod)) object_ty.childType(mod) else object_ty;
|
|
|
|
if (inner_ty.zigTypeTag(mod) == .Optional) opt: {
|
|
const child_ty = inner_ty.optionalChild(mod);
|
|
if (!typeSupportsFieldAccess(mod, child_ty, field_name)) break :opt;
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "optional type '{}' does not support field access", .{object_ty.fmt(sema.mod)});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, src, msg, "consider using '.?', 'orelse', or 'if'", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
} else if (inner_ty.zigTypeTag(mod) == .ErrorUnion) err: {
|
|
const child_ty = inner_ty.errorUnionPayload(mod);
|
|
if (!typeSupportsFieldAccess(mod, child_ty, field_name)) break :err;
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "error union type '{}' does not support field access", .{object_ty.fmt(sema.mod)});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, src, msg, "consider using 'try', 'catch', or 'if'", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
return sema.fail(block, src, "type '{}' does not support field access", .{object_ty.fmt(sema.mod)});
|
|
}
|
|
|
|
fn typeSupportsFieldAccess(mod: *const Module, ty: Type, field_name: InternPool.NullTerminatedString) bool {
|
|
const ip = &mod.intern_pool;
|
|
switch (ty.zigTypeTag(mod)) {
|
|
.Array => return ip.stringEqlSlice(field_name, "len"),
|
|
.Pointer => {
|
|
const ptr_info = ty.ptrInfo(mod);
|
|
if (ptr_info.flags.size == .Slice) {
|
|
return ip.stringEqlSlice(field_name, "ptr") or ip.stringEqlSlice(field_name, "len");
|
|
} else if (Type.fromInterned(ptr_info.child).zigTypeTag(mod) == .Array) {
|
|
return ip.stringEqlSlice(field_name, "len");
|
|
} else return false;
|
|
},
|
|
.Type, .Struct, .Union => return true,
|
|
else => return false,
|
|
}
|
|
}
|
|
|
|
fn failWithComptimeErrorRetTrace(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
name: InternPool.NullTerminatedString,
|
|
) CompileError {
|
|
const mod = sema.mod;
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "caught unexpected error '{}'", .{name.fmt(&mod.intern_pool)});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
for (sema.comptime_err_ret_trace.items) |src_loc| {
|
|
try mod.errNoteNonLazy(src_loc, msg, "error returned here", .{});
|
|
}
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
|
|
/// We don't return a pointer to the new error note because the pointer
|
|
/// becomes invalid when you add another one.
|
|
fn errNote(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
parent: *Module.ErrorMsg,
|
|
comptime format: []const u8,
|
|
args: anytype,
|
|
) error{OutOfMemory}!void {
|
|
const mod = sema.mod;
|
|
const src_decl = mod.declPtr(block.src_decl);
|
|
return mod.errNoteNonLazy(src_decl.toSrcLoc(src, mod), parent, format, args);
|
|
}
|
|
|
|
fn addFieldErrNote(
|
|
sema: *Sema,
|
|
container_ty: Type,
|
|
field_index: usize,
|
|
parent: *Module.ErrorMsg,
|
|
comptime format: []const u8,
|
|
args: anytype,
|
|
) !void {
|
|
@setCold(true);
|
|
const mod = sema.mod;
|
|
const decl_index = container_ty.getOwnerDecl(mod);
|
|
const decl = mod.declPtr(decl_index);
|
|
|
|
const field_src = blk: {
|
|
const tree = decl.getFileScope(mod).getTree(sema.gpa) catch |err| {
|
|
log.err("unable to load AST to report compile error: {s}", .{@errorName(err)});
|
|
break :blk decl.srcLoc(mod);
|
|
};
|
|
|
|
const container_node = decl.relativeToNodeIndex(0);
|
|
const node_tags = tree.nodes.items(.tag);
|
|
var buf: [2]std.zig.Ast.Node.Index = undefined;
|
|
const container_decl = tree.fullContainerDecl(&buf, container_node) orelse break :blk decl.srcLoc(mod);
|
|
|
|
var it_index: usize = 0;
|
|
for (container_decl.ast.members) |member_node| {
|
|
switch (node_tags[member_node]) {
|
|
.container_field_init,
|
|
.container_field_align,
|
|
.container_field,
|
|
=> {
|
|
if (it_index == field_index) {
|
|
break :blk decl.nodeOffsetSrcLoc(decl.nodeIndexToRelative(member_node), mod);
|
|
}
|
|
it_index += 1;
|
|
},
|
|
else => continue,
|
|
}
|
|
}
|
|
unreachable;
|
|
};
|
|
try mod.errNoteNonLazy(field_src, parent, format, args);
|
|
}
|
|
|
|
fn errMsg(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
comptime format: []const u8,
|
|
args: anytype,
|
|
) error{ NeededSourceLocation, OutOfMemory }!*Module.ErrorMsg {
|
|
const mod = sema.mod;
|
|
if (src == .unneeded) return error.NeededSourceLocation;
|
|
const src_decl = mod.declPtr(block.src_decl);
|
|
return Module.ErrorMsg.create(sema.gpa, src_decl.toSrcLoc(src, mod), format, args);
|
|
}
|
|
|
|
pub fn fail(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
comptime format: []const u8,
|
|
args: anytype,
|
|
) CompileError {
|
|
const err_msg = try sema.errMsg(block, src, format, args);
|
|
inline for (args) |arg| {
|
|
if (@TypeOf(arg) == Type.Formatter) {
|
|
try addDeclaredHereNote(sema, err_msg, arg.data.ty);
|
|
}
|
|
}
|
|
return sema.failWithOwnedErrorMsg(block, err_msg);
|
|
}
|
|
|
|
fn failWithOwnedErrorMsg(sema: *Sema, block: ?*Block, err_msg: *Module.ErrorMsg) error{ AnalysisFail, OutOfMemory } {
|
|
@setCold(true);
|
|
const gpa = sema.gpa;
|
|
const mod = sema.mod;
|
|
|
|
ref: {
|
|
errdefer err_msg.destroy(gpa);
|
|
|
|
if (build_options.enable_debug_extensions and mod.comp.debug_compile_errors) {
|
|
var wip_errors: std.zig.ErrorBundle.Wip = undefined;
|
|
wip_errors.init(gpa) catch unreachable;
|
|
Compilation.addModuleErrorMsg(mod, &wip_errors, err_msg.*) catch unreachable;
|
|
std.debug.print("compile error during Sema:\n", .{});
|
|
var error_bundle = wip_errors.toOwnedBundle("") catch unreachable;
|
|
error_bundle.renderToStdErr(.{ .ttyconf = .no_color });
|
|
crash_report.compilerPanic("unexpected compile error occurred", null, null);
|
|
}
|
|
|
|
try mod.failed_decls.ensureUnusedCapacity(gpa, 1);
|
|
try mod.failed_files.ensureUnusedCapacity(gpa, 1);
|
|
|
|
if (block) |start_block| {
|
|
var block_it = start_block;
|
|
while (block_it.inlining) |inlining| {
|
|
try sema.errNote(
|
|
inlining.call_block,
|
|
inlining.call_src,
|
|
err_msg,
|
|
"called from here",
|
|
.{},
|
|
);
|
|
block_it = inlining.call_block;
|
|
}
|
|
|
|
const max_references = refs: {
|
|
if (mod.comp.reference_trace) |num| break :refs num;
|
|
// Do not add multiple traces without explicit request.
|
|
if (mod.failed_decls.count() > 0) break :ref;
|
|
break :refs default_reference_trace_len;
|
|
};
|
|
|
|
var referenced_by = if (sema.owner_func_index != .none)
|
|
mod.funcOwnerDeclIndex(sema.owner_func_index)
|
|
else
|
|
sema.owner_decl_index;
|
|
var reference_stack = std.ArrayList(Module.ErrorMsg.Trace).init(gpa);
|
|
defer reference_stack.deinit();
|
|
|
|
// Avoid infinite loops.
|
|
var seen = std.AutoHashMap(InternPool.DeclIndex, void).init(gpa);
|
|
defer seen.deinit();
|
|
|
|
while (mod.reference_table.get(referenced_by)) |ref| {
|
|
const gop = try seen.getOrPut(ref.referencer);
|
|
if (gop.found_existing) break;
|
|
if (reference_stack.items.len < max_references) {
|
|
const decl = mod.declPtr(ref.referencer);
|
|
try reference_stack.append(.{
|
|
.decl = decl.name,
|
|
.src_loc = decl.toSrcLoc(ref.src, mod),
|
|
});
|
|
}
|
|
referenced_by = ref.referencer;
|
|
}
|
|
err_msg.reference_trace = try reference_stack.toOwnedSlice();
|
|
err_msg.hidden_references = @intCast(seen.count() -| max_references);
|
|
}
|
|
}
|
|
const ip = &mod.intern_pool;
|
|
if (sema.owner_func_index != .none) {
|
|
ip.funcAnalysis(sema.owner_func_index).state = .sema_failure;
|
|
} else {
|
|
sema.owner_decl.analysis = .sema_failure;
|
|
}
|
|
if (sema.func_index != .none) {
|
|
ip.funcAnalysis(sema.func_index).state = .sema_failure;
|
|
}
|
|
const gop = mod.failed_decls.getOrPutAssumeCapacity(sema.owner_decl_index);
|
|
if (gop.found_existing) {
|
|
// If there are multiple errors for the same Decl, prefer the first one added.
|
|
sema.err = null;
|
|
err_msg.destroy(gpa);
|
|
} else {
|
|
sema.err = err_msg;
|
|
gop.value_ptr.* = err_msg;
|
|
}
|
|
return error.AnalysisFail;
|
|
}
|
|
|
|
/// Given an ErrorMsg, modify its message and source location to the given values, turning the
|
|
/// original message into a note. Notes on the original message are preserved as further notes.
|
|
/// Reference trace is preserved.
|
|
fn reparentOwnedErrorMsg(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
msg: *Module.ErrorMsg,
|
|
comptime format: []const u8,
|
|
args: anytype,
|
|
) !void {
|
|
const mod = sema.mod;
|
|
const src_decl = mod.declPtr(block.src_decl);
|
|
const resolved_src = src_decl.toSrcLoc(src, mod);
|
|
const msg_str = try std.fmt.allocPrint(mod.gpa, format, args);
|
|
|
|
const orig_notes = msg.notes.len;
|
|
msg.notes = try sema.gpa.realloc(msg.notes, orig_notes + 1);
|
|
std.mem.copyBackwards(Module.ErrorMsg, msg.notes[1..], msg.notes[0..orig_notes]);
|
|
msg.notes[0] = .{
|
|
.src_loc = msg.src_loc,
|
|
.msg = msg.msg,
|
|
};
|
|
|
|
msg.src_loc = resolved_src;
|
|
msg.msg = msg_str;
|
|
}
|
|
|
|
const align_ty = Type.u29;
|
|
|
|
fn analyzeAsAlign(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
air_ref: Air.Inst.Ref,
|
|
) !Alignment {
|
|
const alignment_big = try sema.analyzeAsInt(block, src, air_ref, align_ty, .{
|
|
.needed_comptime_reason = "alignment must be comptime-known",
|
|
});
|
|
return sema.validateAlign(block, src, alignment_big);
|
|
}
|
|
|
|
fn validateAlign(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
alignment: u64,
|
|
) !Alignment {
|
|
const result = try validateAlignAllowZero(sema, block, src, alignment);
|
|
if (result == .none) return sema.fail(block, src, "alignment must be >= 1", .{});
|
|
return result;
|
|
}
|
|
|
|
fn validateAlignAllowZero(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
alignment: u64,
|
|
) !Alignment {
|
|
if (alignment == 0) return .none;
|
|
if (!std.math.isPowerOfTwo(alignment)) {
|
|
return sema.fail(block, src, "alignment value '{d}' is not a power of two", .{
|
|
alignment,
|
|
});
|
|
}
|
|
return Alignment.fromNonzeroByteUnits(alignment);
|
|
}
|
|
|
|
pub fn resolveAlign(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
zir_ref: Zir.Inst.Ref,
|
|
) !Alignment {
|
|
const air_ref = try sema.resolveInst(zir_ref);
|
|
return sema.analyzeAsAlign(block, src, air_ref);
|
|
}
|
|
|
|
fn resolveInt(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
zir_ref: Zir.Inst.Ref,
|
|
dest_ty: Type,
|
|
reason: NeededComptimeReason,
|
|
) !u64 {
|
|
const air_ref = try sema.resolveInst(zir_ref);
|
|
return sema.analyzeAsInt(block, src, air_ref, dest_ty, reason);
|
|
}
|
|
|
|
fn analyzeAsInt(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
air_ref: Air.Inst.Ref,
|
|
dest_ty: Type,
|
|
reason: NeededComptimeReason,
|
|
) !u64 {
|
|
const mod = sema.mod;
|
|
const coerced = try sema.coerce(block, dest_ty, air_ref, src);
|
|
const val = try sema.resolveConstDefinedValue(block, src, coerced, reason);
|
|
return (try val.getUnsignedIntAdvanced(mod, sema)).?;
|
|
}
|
|
|
|
pub fn getStructType(
|
|
sema: *Sema,
|
|
decl: InternPool.DeclIndex,
|
|
namespace: InternPool.NamespaceIndex,
|
|
tracked_inst: InternPool.TrackedInst.Index,
|
|
) !InternPool.Index {
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
const ip = &mod.intern_pool;
|
|
const zir_index = tracked_inst.resolve(ip);
|
|
const extended = sema.code.instructions.items(.data)[@intFromEnum(zir_index)].extended;
|
|
assert(extended.opcode == .struct_decl);
|
|
const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small);
|
|
|
|
var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.StructDecl).Struct.fields.len;
|
|
const fields_len = if (small.has_fields_len) blk: {
|
|
const fields_len = sema.code.extra[extra_index];
|
|
extra_index += 1;
|
|
break :blk fields_len;
|
|
} else 0;
|
|
const decls_len = if (small.has_decls_len) blk: {
|
|
const decls_len = sema.code.extra[extra_index];
|
|
extra_index += 1;
|
|
break :blk decls_len;
|
|
} else 0;
|
|
|
|
if (small.has_backing_int) {
|
|
const backing_int_body_len = sema.code.extra[extra_index];
|
|
extra_index += 1; // backing_int_body_len
|
|
if (backing_int_body_len == 0) {
|
|
extra_index += 1; // backing_int_ref
|
|
} else {
|
|
extra_index += backing_int_body_len; // backing_int_body_inst
|
|
}
|
|
}
|
|
|
|
const decls = sema.code.bodySlice(extra_index, decls_len);
|
|
try mod.scanNamespace(namespace, decls, mod.declPtr(decl));
|
|
extra_index += decls_len;
|
|
|
|
const ty = try ip.getStructType(gpa, .{
|
|
.decl = decl,
|
|
.namespace = namespace.toOptional(),
|
|
.zir_index = tracked_inst.toOptional(),
|
|
.layout = small.layout,
|
|
.known_non_opv = small.known_non_opv,
|
|
.is_tuple = small.is_tuple,
|
|
.fields_len = fields_len,
|
|
.requires_comptime = if (small.known_comptime_only) .yes else .unknown,
|
|
.any_default_inits = small.any_default_inits,
|
|
.any_comptime_fields = small.any_comptime_fields,
|
|
.inits_resolved = false,
|
|
.any_aligned_fields = small.any_aligned_fields,
|
|
});
|
|
|
|
return ty;
|
|
}
|
|
|
|
fn zirStructDecl(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
extended: Zir.Inst.Extended.InstData,
|
|
inst: Zir.Inst.Index,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small);
|
|
const src = sema.code.extraData(Zir.Inst.StructDecl, extended.operand).data.src();
|
|
|
|
// Because these three things each reference each other, `undefined`
|
|
// placeholders are used before being set after the struct type gains an
|
|
// InternPool index.
|
|
|
|
const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
|
|
.ty = Type.noreturn,
|
|
.val = Value.@"unreachable",
|
|
}, small.name_strategy, "struct", inst);
|
|
const new_decl = mod.declPtr(new_decl_index);
|
|
new_decl.owns_tv = true;
|
|
errdefer mod.abortAnonDecl(new_decl_index);
|
|
|
|
if (sema.mod.comp.debug_incremental) {
|
|
try ip.addDependency(
|
|
sema.gpa,
|
|
InternPool.Depender.wrap(.{ .decl = new_decl_index }),
|
|
.{ .src_hash = try ip.trackZir(sema.gpa, block.getFileScope(mod), inst) },
|
|
);
|
|
}
|
|
|
|
const new_namespace_index = try mod.createNamespace(.{
|
|
.parent = block.namespace.toOptional(),
|
|
.decl_index = new_decl_index,
|
|
.file_scope = block.getFileScope(mod),
|
|
});
|
|
errdefer mod.destroyNamespace(new_namespace_index);
|
|
|
|
const struct_ty = ty: {
|
|
const tracked_inst = try ip.trackZir(mod.gpa, block.getFileScope(mod), inst);
|
|
const ty = try sema.getStructType(new_decl_index, new_namespace_index, tracked_inst);
|
|
if (sema.builtin_type_target_index != .none) {
|
|
ip.resolveBuiltinType(sema.builtin_type_target_index, ty);
|
|
break :ty sema.builtin_type_target_index;
|
|
}
|
|
break :ty ty;
|
|
};
|
|
// TODO: figure out InternPool removals for incremental compilation
|
|
//errdefer ip.remove(struct_ty);
|
|
|
|
new_decl.ty = Type.type;
|
|
new_decl.val = Value.fromInterned(struct_ty);
|
|
|
|
const decl_val = sema.analyzeDeclVal(block, src, new_decl_index);
|
|
try mod.finalizeAnonDecl(new_decl_index);
|
|
return decl_val;
|
|
}
|
|
|
|
fn createAnonymousDeclTypeNamed(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
typed_value: TypedValue,
|
|
name_strategy: Zir.Inst.NameStrategy,
|
|
anon_prefix: []const u8,
|
|
inst: ?Zir.Inst.Index,
|
|
) !InternPool.DeclIndex {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
const gpa = sema.gpa;
|
|
const namespace = block.namespace;
|
|
const src_scope = block.wip_capture_scope;
|
|
const src_decl = mod.declPtr(block.src_decl);
|
|
const src_node = src_decl.relativeToNodeIndex(src.node_offset.x);
|
|
const new_decl_index = try mod.allocateNewDecl(namespace, src_node, src_scope);
|
|
errdefer mod.destroyDecl(new_decl_index);
|
|
|
|
switch (name_strategy) {
|
|
.anon => {
|
|
// It would be neat to have "struct:line:column" but this name has
|
|
// to survive incremental updates, where it may have been shifted down
|
|
// or up to a different line, but unchanged, and thus not unnecessarily
|
|
// semantically analyzed.
|
|
// This name is also used as the key in the parent namespace so it cannot be
|
|
// renamed.
|
|
|
|
const name = mod.intern_pool.getOrPutStringFmt(gpa, "{}__{s}_{d}", .{
|
|
src_decl.name.fmt(&mod.intern_pool), anon_prefix, @intFromEnum(new_decl_index),
|
|
}) catch unreachable;
|
|
try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, typed_value, name);
|
|
return new_decl_index;
|
|
},
|
|
.parent => {
|
|
const name = mod.declPtr(block.src_decl).name;
|
|
try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, typed_value, name);
|
|
return new_decl_index;
|
|
},
|
|
.func => {
|
|
const fn_info = sema.code.getFnInfo(ip.funcZirBodyInst(sema.func_index).resolve(ip));
|
|
const zir_tags = sema.code.instructions.items(.tag);
|
|
|
|
var buf = std.ArrayList(u8).init(gpa);
|
|
defer buf.deinit();
|
|
|
|
const writer = buf.writer();
|
|
try writer.print("{}(", .{mod.declPtr(block.src_decl).name.fmt(&mod.intern_pool)});
|
|
|
|
var arg_i: usize = 0;
|
|
for (fn_info.param_body) |zir_inst| switch (zir_tags[@intFromEnum(zir_inst)]) {
|
|
.param, .param_comptime, .param_anytype, .param_anytype_comptime => {
|
|
const arg = sema.inst_map.get(zir_inst).?;
|
|
// If this is being called in a generic function then analyzeCall will
|
|
// have already resolved the args and this will work.
|
|
// If not then this is a struct type being returned from a non-generic
|
|
// function and the name doesn't matter since it will later
|
|
// result in a compile error.
|
|
const arg_val = sema.resolveConstValue(block, .unneeded, arg, undefined) catch
|
|
return sema.createAnonymousDeclTypeNamed(block, src, typed_value, .anon, anon_prefix, null);
|
|
|
|
if (arg_i != 0) try writer.writeByte(',');
|
|
try writer.print("{}", .{arg_val.fmtValue(sema.typeOf(arg), sema.mod)});
|
|
|
|
arg_i += 1;
|
|
continue;
|
|
},
|
|
else => continue,
|
|
};
|
|
|
|
try writer.writeByte(')');
|
|
const name = try mod.intern_pool.getOrPutString(gpa, buf.items);
|
|
try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, typed_value, name);
|
|
return new_decl_index;
|
|
},
|
|
.dbg_var => {
|
|
const ref = inst.?.toRef();
|
|
const zir_tags = sema.code.instructions.items(.tag);
|
|
const zir_data = sema.code.instructions.items(.data);
|
|
for (@intFromEnum(inst.?)..zir_tags.len) |i| switch (zir_tags[i]) {
|
|
.dbg_var_ptr, .dbg_var_val => {
|
|
if (zir_data[i].str_op.operand != ref) continue;
|
|
|
|
const name = try mod.intern_pool.getOrPutStringFmt(gpa, "{}.{s}", .{
|
|
src_decl.name.fmt(&mod.intern_pool), zir_data[i].str_op.getStr(sema.code),
|
|
});
|
|
|
|
try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, typed_value, name);
|
|
return new_decl_index;
|
|
},
|
|
else => {},
|
|
};
|
|
return sema.createAnonymousDeclTypeNamed(block, src, typed_value, .anon, anon_prefix, null);
|
|
},
|
|
}
|
|
}
|
|
|
|
fn zirEnumDecl(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
extended: Zir.Inst.Extended.InstData,
|
|
inst: Zir.Inst.Index,
|
|
) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
const small: Zir.Inst.EnumDecl.Small = @bitCast(extended.small);
|
|
const extra = sema.code.extraData(Zir.Inst.EnumDecl, extended.operand);
|
|
var extra_index: usize = extra.end;
|
|
|
|
const src = extra.data.src();
|
|
const tag_ty_src: LazySrcLoc = .{ .node_offset_container_tag = src.node_offset.x };
|
|
|
|
const tag_type_ref = if (small.has_tag_type) blk: {
|
|
const tag_type_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]);
|
|
extra_index += 1;
|
|
break :blk tag_type_ref;
|
|
} else .none;
|
|
|
|
const body_len = if (small.has_body_len) blk: {
|
|
const body_len = sema.code.extra[extra_index];
|
|
extra_index += 1;
|
|
break :blk body_len;
|
|
} else 0;
|
|
|
|
const fields_len = if (small.has_fields_len) blk: {
|
|
const fields_len = sema.code.extra[extra_index];
|
|
extra_index += 1;
|
|
break :blk fields_len;
|
|
} else 0;
|
|
|
|
const decls_len = if (small.has_decls_len) blk: {
|
|
const decls_len = sema.code.extra[extra_index];
|
|
extra_index += 1;
|
|
break :blk decls_len;
|
|
} else 0;
|
|
|
|
// Because these three things each reference each other, `undefined`
|
|
// placeholders are used before being set after the enum type gains an
|
|
// InternPool index.
|
|
|
|
var done = false;
|
|
const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
|
|
.ty = Type.noreturn,
|
|
.val = Value.@"unreachable",
|
|
}, small.name_strategy, "enum", inst);
|
|
const new_decl = mod.declPtr(new_decl_index);
|
|
new_decl.owns_tv = true;
|
|
errdefer if (!done) mod.abortAnonDecl(new_decl_index);
|
|
|
|
if (sema.mod.comp.debug_incremental) {
|
|
try mod.intern_pool.addDependency(
|
|
sema.gpa,
|
|
InternPool.Depender.wrap(.{ .decl = new_decl_index }),
|
|
.{ .src_hash = try mod.intern_pool.trackZir(sema.gpa, block.getFileScope(mod), inst) },
|
|
);
|
|
}
|
|
|
|
const new_namespace_index = try mod.createNamespace(.{
|
|
.parent = block.namespace.toOptional(),
|
|
.decl_index = new_decl_index,
|
|
.file_scope = block.getFileScope(mod),
|
|
});
|
|
errdefer if (!done) mod.destroyNamespace(new_namespace_index);
|
|
|
|
const decls = sema.code.bodySlice(extra_index, decls_len);
|
|
try mod.scanNamespace(new_namespace_index, decls, new_decl);
|
|
extra_index += decls_len;
|
|
|
|
const body = sema.code.bodySlice(extra_index, body_len);
|
|
extra_index += body.len;
|
|
|
|
const bit_bags_count = std.math.divCeil(usize, fields_len, 32) catch unreachable;
|
|
const body_end = extra_index;
|
|
extra_index += bit_bags_count;
|
|
|
|
const any_values = for (sema.code.extra[body_end..][0..bit_bags_count]) |bag| {
|
|
if (bag != 0) break true;
|
|
} else false;
|
|
|
|
const incomplete_enum = incomplete_enum: {
|
|
var incomplete_enum = try mod.intern_pool.getIncompleteEnum(gpa, .{
|
|
.decl = new_decl_index,
|
|
.namespace = new_namespace_index.toOptional(),
|
|
.fields_len = fields_len,
|
|
.has_values = any_values,
|
|
.tag_mode = if (small.nonexhaustive)
|
|
.nonexhaustive
|
|
else if (tag_type_ref == .none)
|
|
.auto
|
|
else
|
|
.explicit,
|
|
.zir_index = (try mod.intern_pool.trackZir(sema.gpa, block.getFileScope(mod), inst)).toOptional(),
|
|
});
|
|
if (sema.builtin_type_target_index != .none) {
|
|
mod.intern_pool.resolveBuiltinType(sema.builtin_type_target_index, incomplete_enum.index);
|
|
incomplete_enum.index = sema.builtin_type_target_index;
|
|
}
|
|
break :incomplete_enum incomplete_enum;
|
|
};
|
|
// TODO: figure out InternPool removals for incremental compilation
|
|
//errdefer if (!done) mod.intern_pool.remove(incomplete_enum.index);
|
|
|
|
new_decl.ty = Type.type;
|
|
new_decl.val = Value.fromInterned(incomplete_enum.index);
|
|
|
|
const decl_val = try sema.analyzeDeclVal(block, src, new_decl_index);
|
|
try mod.finalizeAnonDecl(new_decl_index);
|
|
done = true;
|
|
|
|
const int_tag_ty = ty: {
|
|
// We create a block for the field type instructions because they
|
|
// may need to reference Decls from inside the enum namespace.
|
|
// Within the field type, default value, and alignment expressions, the "owner decl"
|
|
// should be the enum itself.
|
|
|
|
const prev_owner_decl = sema.owner_decl;
|
|
const prev_owner_decl_index = sema.owner_decl_index;
|
|
sema.owner_decl = new_decl;
|
|
sema.owner_decl_index = new_decl_index;
|
|
defer {
|
|
sema.owner_decl = prev_owner_decl;
|
|
sema.owner_decl_index = prev_owner_decl_index;
|
|
}
|
|
|
|
const prev_owner_func_index = sema.owner_func_index;
|
|
sema.owner_func_index = .none;
|
|
defer sema.owner_func_index = prev_owner_func_index;
|
|
|
|
const prev_func_index = sema.func_index;
|
|
sema.func_index = .none;
|
|
defer sema.func_index = prev_func_index;
|
|
|
|
var enum_block: Block = .{
|
|
.parent = null,
|
|
.sema = sema,
|
|
.src_decl = new_decl_index,
|
|
.namespace = new_namespace_index,
|
|
.wip_capture_scope = try mod.createCaptureScope(new_decl.src_scope),
|
|
.instructions = .{},
|
|
.inlining = null,
|
|
.is_comptime = true,
|
|
};
|
|
defer enum_block.instructions.deinit(sema.gpa);
|
|
|
|
if (body.len != 0) {
|
|
try sema.analyzeBody(&enum_block, body);
|
|
}
|
|
|
|
if (tag_type_ref != .none) {
|
|
const ty = try sema.resolveType(block, tag_ty_src, tag_type_ref);
|
|
if (ty.zigTypeTag(mod) != .Int and ty.zigTypeTag(mod) != .ComptimeInt) {
|
|
return sema.fail(block, tag_ty_src, "expected integer tag type, found '{}'", .{ty.fmt(sema.mod)});
|
|
}
|
|
incomplete_enum.setTagType(&mod.intern_pool, ty.toIntern());
|
|
break :ty ty;
|
|
} else if (fields_len == 0) {
|
|
break :ty try mod.intType(.unsigned, 0);
|
|
} else {
|
|
const bits = std.math.log2_int_ceil(usize, fields_len);
|
|
break :ty try mod.intType(.unsigned, bits);
|
|
}
|
|
};
|
|
|
|
if (small.nonexhaustive and int_tag_ty.toIntern() != .comptime_int_type) {
|
|
if (fields_len > 1 and std.math.log2_int(u64, fields_len) == int_tag_ty.bitSize(mod)) {
|
|
return sema.fail(block, src, "non-exhaustive enum specifies every value", .{});
|
|
}
|
|
}
|
|
|
|
var bit_bag_index: usize = body_end;
|
|
var cur_bit_bag: u32 = undefined;
|
|
var field_i: u32 = 0;
|
|
var last_tag_val: ?Value = null;
|
|
while (field_i < fields_len) : (field_i += 1) {
|
|
if (field_i % 32 == 0) {
|
|
cur_bit_bag = sema.code.extra[bit_bag_index];
|
|
bit_bag_index += 1;
|
|
}
|
|
const has_tag_value = @as(u1, @truncate(cur_bit_bag)) != 0;
|
|
cur_bit_bag >>= 1;
|
|
|
|
const field_name_index: Zir.NullTerminatedString = @enumFromInt(sema.code.extra[extra_index]);
|
|
const field_name_zir = sema.code.nullTerminatedString(field_name_index);
|
|
extra_index += 2; // field name, doc comment
|
|
|
|
const field_name = try mod.intern_pool.getOrPutString(gpa, field_name_zir);
|
|
assert(incomplete_enum.addFieldName(&mod.intern_pool, field_name) == null);
|
|
|
|
const tag_overflow = if (has_tag_value) overflow: {
|
|
const tag_val_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]);
|
|
extra_index += 1;
|
|
const tag_inst = try sema.resolveInst(tag_val_ref);
|
|
last_tag_val = sema.resolveConstDefinedValue(block, .unneeded, tag_inst, undefined) catch |err| switch (err) {
|
|
error.NeededSourceLocation => {
|
|
const value_src = mod.fieldSrcLoc(new_decl_index, .{
|
|
.index = field_i,
|
|
.range = .value,
|
|
}).lazy;
|
|
_ = try sema.resolveConstDefinedValue(block, value_src, tag_inst, .{
|
|
.needed_comptime_reason = "enum tag value must be comptime-known",
|
|
});
|
|
unreachable;
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
if (!(try sema.intFitsInType(last_tag_val.?, int_tag_ty, null))) break :overflow true;
|
|
last_tag_val = try mod.getCoerced(last_tag_val.?, int_tag_ty);
|
|
if (incomplete_enum.addFieldValue(&mod.intern_pool, last_tag_val.?.toIntern())) |other_index| {
|
|
const value_src = mod.fieldSrcLoc(new_decl_index, .{
|
|
.index = field_i,
|
|
.range = .value,
|
|
}).lazy;
|
|
const other_field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = other_index }).lazy;
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, value_src, "enum tag value {} already taken", .{last_tag_val.?.fmtValue(int_tag_ty, sema.mod)});
|
|
errdefer msg.destroy(gpa);
|
|
try sema.errNote(block, other_field_src, msg, "other occurrence here", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
break :overflow false;
|
|
} else if (any_values) overflow: {
|
|
var overflow: ?usize = null;
|
|
last_tag_val = if (last_tag_val) |val|
|
|
try sema.intAdd(val, try mod.intValue(int_tag_ty, 1), int_tag_ty, &overflow)
|
|
else
|
|
try mod.intValue(int_tag_ty, 0);
|
|
if (overflow != null) break :overflow true;
|
|
if (incomplete_enum.addFieldValue(&mod.intern_pool, last_tag_val.?.toIntern())) |other_index| {
|
|
const field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = field_i }).lazy;
|
|
const other_field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = other_index }).lazy;
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, field_src, "enum tag value {} already taken", .{last_tag_val.?.fmtValue(int_tag_ty, sema.mod)});
|
|
errdefer msg.destroy(gpa);
|
|
try sema.errNote(block, other_field_src, msg, "other occurrence here", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
break :overflow false;
|
|
} else overflow: {
|
|
last_tag_val = try mod.intValue(Type.comptime_int, field_i);
|
|
if (!try sema.intFitsInType(last_tag_val.?, int_tag_ty, null)) break :overflow true;
|
|
last_tag_val = try mod.getCoerced(last_tag_val.?, int_tag_ty);
|
|
break :overflow false;
|
|
};
|
|
|
|
if (tag_overflow) {
|
|
const value_src = mod.fieldSrcLoc(new_decl_index, .{
|
|
.index = field_i,
|
|
.range = if (has_tag_value) .value else .name,
|
|
}).lazy;
|
|
const msg = try sema.errMsg(block, value_src, "enumeration value '{}' too large for type '{}'", .{
|
|
last_tag_val.?.fmtValue(int_tag_ty, mod), int_tag_ty.fmt(mod),
|
|
});
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
}
|
|
return decl_val;
|
|
}
|
|
|
|
fn zirUnionDecl(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
extended: Zir.Inst.Extended.InstData,
|
|
inst: Zir.Inst.Index,
|
|
) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
const small: Zir.Inst.UnionDecl.Small = @bitCast(extended.small);
|
|
const extra = sema.code.extraData(Zir.Inst.UnionDecl, extended.operand);
|
|
var extra_index: usize = extra.end;
|
|
|
|
const src = extra.data.src();
|
|
|
|
extra_index += @intFromBool(small.has_tag_type);
|
|
extra_index += @intFromBool(small.has_body_len);
|
|
const fields_len = if (small.has_fields_len) blk: {
|
|
const fields_len = sema.code.extra[extra_index];
|
|
extra_index += 1;
|
|
break :blk fields_len;
|
|
} else 0;
|
|
|
|
const decls_len = if (small.has_decls_len) blk: {
|
|
const decls_len = sema.code.extra[extra_index];
|
|
extra_index += 1;
|
|
break :blk decls_len;
|
|
} else 0;
|
|
|
|
// Because these three things each reference each other, `undefined`
|
|
// placeholders are used before being set after the union type gains an
|
|
// InternPool index.
|
|
|
|
const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
|
|
.ty = Type.noreturn,
|
|
.val = Value.@"unreachable",
|
|
}, small.name_strategy, "union", inst);
|
|
const new_decl = mod.declPtr(new_decl_index);
|
|
new_decl.owns_tv = true;
|
|
errdefer mod.abortAnonDecl(new_decl_index);
|
|
|
|
if (sema.mod.comp.debug_incremental) {
|
|
try mod.intern_pool.addDependency(
|
|
sema.gpa,
|
|
InternPool.Depender.wrap(.{ .decl = new_decl_index }),
|
|
.{ .src_hash = try mod.intern_pool.trackZir(sema.gpa, block.getFileScope(mod), inst) },
|
|
);
|
|
}
|
|
|
|
const new_namespace_index = try mod.createNamespace(.{
|
|
.parent = block.namespace.toOptional(),
|
|
.decl_index = new_decl_index,
|
|
.file_scope = block.getFileScope(mod),
|
|
});
|
|
errdefer mod.destroyNamespace(new_namespace_index);
|
|
|
|
const union_ty = ty: {
|
|
const ty = try mod.intern_pool.getUnionType(gpa, .{
|
|
.flags = .{
|
|
.layout = small.layout,
|
|
.status = .none,
|
|
.runtime_tag = if (small.has_tag_type or small.auto_enum_tag)
|
|
.tagged
|
|
else if (small.layout != .Auto)
|
|
.none
|
|
else switch (block.wantSafety()) {
|
|
true => .safety,
|
|
false => .none,
|
|
},
|
|
.any_aligned_fields = small.any_aligned_fields,
|
|
.requires_comptime = .unknown,
|
|
.assumed_runtime_bits = false,
|
|
.assumed_pointer_aligned = false,
|
|
.alignment = .none,
|
|
},
|
|
.decl = new_decl_index,
|
|
.namespace = new_namespace_index,
|
|
.zir_index = (try mod.intern_pool.trackZir(gpa, block.getFileScope(mod), inst)).toOptional(),
|
|
.fields_len = fields_len,
|
|
.enum_tag_ty = .none,
|
|
.field_types = &.{},
|
|
.field_aligns = &.{},
|
|
});
|
|
if (sema.builtin_type_target_index != .none) {
|
|
mod.intern_pool.resolveBuiltinType(sema.builtin_type_target_index, ty);
|
|
break :ty sema.builtin_type_target_index;
|
|
}
|
|
break :ty ty;
|
|
};
|
|
// TODO: figure out InternPool removals for incremental compilation
|
|
//errdefer mod.intern_pool.remove(union_ty);
|
|
|
|
new_decl.ty = Type.type;
|
|
new_decl.val = Value.fromInterned(union_ty);
|
|
|
|
const decls = sema.code.bodySlice(extra_index, decls_len);
|
|
try mod.scanNamespace(new_namespace_index, decls, new_decl);
|
|
|
|
const decl_val = sema.analyzeDeclVal(block, src, new_decl_index);
|
|
try mod.finalizeAnonDecl(new_decl_index);
|
|
return decl_val;
|
|
}
|
|
|
|
fn zirOpaqueDecl(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
extended: Zir.Inst.Extended.InstData,
|
|
inst: Zir.Inst.Index,
|
|
) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const small: Zir.Inst.OpaqueDecl.Small = @bitCast(extended.small);
|
|
const extra = sema.code.extraData(Zir.Inst.OpaqueDecl, extended.operand);
|
|
var extra_index: usize = extra.end;
|
|
|
|
const src = extra.data.src();
|
|
|
|
const decls_len = if (small.has_decls_len) blk: {
|
|
const decls_len = sema.code.extra[extra_index];
|
|
extra_index += 1;
|
|
break :blk decls_len;
|
|
} else 0;
|
|
|
|
// Because these three things each reference each other, `undefined`
|
|
// placeholders are used in two places before being set after the opaque
|
|
// type gains an InternPool index.
|
|
|
|
const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
|
|
.ty = Type.noreturn,
|
|
.val = Value.@"unreachable",
|
|
}, small.name_strategy, "opaque", inst);
|
|
const new_decl = mod.declPtr(new_decl_index);
|
|
new_decl.owns_tv = true;
|
|
errdefer mod.abortAnonDecl(new_decl_index);
|
|
|
|
if (sema.mod.comp.debug_incremental) {
|
|
try mod.intern_pool.addDependency(
|
|
sema.gpa,
|
|
InternPool.Depender.wrap(.{ .decl = new_decl_index }),
|
|
.{ .src_hash = try mod.intern_pool.trackZir(sema.gpa, block.getFileScope(mod), inst) },
|
|
);
|
|
}
|
|
|
|
const new_namespace_index = try mod.createNamespace(.{
|
|
.parent = block.namespace.toOptional(),
|
|
.decl_index = new_decl_index,
|
|
.file_scope = block.getFileScope(mod),
|
|
});
|
|
errdefer mod.destroyNamespace(new_namespace_index);
|
|
|
|
const opaque_ty = try mod.intern(.{ .opaque_type = .{
|
|
.decl = new_decl_index,
|
|
.namespace = new_namespace_index,
|
|
.zir_index = (try mod.intern_pool.trackZir(sema.gpa, block.getFileScope(mod), inst)).toOptional(),
|
|
} });
|
|
// TODO: figure out InternPool removals for incremental compilation
|
|
//errdefer mod.intern_pool.remove(opaque_ty);
|
|
|
|
new_decl.ty = Type.type;
|
|
new_decl.val = Value.fromInterned(opaque_ty);
|
|
|
|
const decls = sema.code.bodySlice(extra_index, decls_len);
|
|
try mod.scanNamespace(new_namespace_index, decls, new_decl);
|
|
|
|
const decl_val = sema.analyzeDeclVal(block, src, new_decl_index);
|
|
try mod.finalizeAnonDecl(new_decl_index);
|
|
return decl_val;
|
|
}
|
|
|
|
fn zirErrorSetDecl(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
name_strategy: Zir.Inst.NameStrategy,
|
|
) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const src = inst_data.src();
|
|
const extra = sema.code.extraData(Zir.Inst.ErrorSetDecl, inst_data.payload_index);
|
|
|
|
var names: InferredErrorSet.NameMap = .{};
|
|
try names.ensureUnusedCapacity(sema.arena, extra.data.fields_len);
|
|
|
|
var extra_index: u32 = @intCast(extra.end);
|
|
const extra_index_end = extra_index + (extra.data.fields_len * 2);
|
|
while (extra_index < extra_index_end) : (extra_index += 2) { // +2 to skip over doc_string
|
|
const name_index: Zir.NullTerminatedString = @enumFromInt(sema.code.extra[extra_index]);
|
|
const name = sema.code.nullTerminatedString(name_index);
|
|
const name_ip = try mod.intern_pool.getOrPutString(gpa, name);
|
|
_ = try mod.getErrorValue(name_ip);
|
|
const result = names.getOrPutAssumeCapacity(name_ip);
|
|
assert(!result.found_existing); // verified in AstGen
|
|
}
|
|
|
|
const error_set_ty = try mod.errorSetFromUnsortedNames(names.keys());
|
|
|
|
const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
|
|
.ty = Type.type,
|
|
.val = error_set_ty.toValue(),
|
|
}, name_strategy, "error", inst);
|
|
const new_decl = mod.declPtr(new_decl_index);
|
|
new_decl.owns_tv = true;
|
|
errdefer mod.abortAnonDecl(new_decl_index);
|
|
|
|
const decl_val = sema.analyzeDeclVal(block, src, new_decl_index);
|
|
try mod.finalizeAnonDecl(new_decl_index);
|
|
return decl_val;
|
|
}
|
|
|
|
fn zirRetPtr(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
if (block.is_comptime or try sema.typeRequiresComptime(sema.fn_ret_ty)) {
|
|
try sema.resolveTypeFields(sema.fn_ret_ty);
|
|
return sema.analyzeComptimeAlloc(block, sema.fn_ret_ty, .none);
|
|
}
|
|
|
|
const target = sema.mod.getTarget();
|
|
const ptr_type = try sema.ptrType(.{
|
|
.child = sema.fn_ret_ty.toIntern(),
|
|
.flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
|
|
});
|
|
|
|
if (block.inlining != null) {
|
|
// We are inlining a function call; this should be emitted as an alloc, not a ret_ptr.
|
|
// TODO when functions gain result location support, the inlining struct in
|
|
// Block should contain the return pointer, and we would pass that through here.
|
|
try sema.queueFullTypeResolution(sema.fn_ret_ty);
|
|
return block.addTy(.alloc, ptr_type);
|
|
}
|
|
|
|
return block.addTy(.ret_ptr, ptr_type);
|
|
}
|
|
|
|
fn zirRef(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_tok;
|
|
const operand = try sema.resolveInst(inst_data.operand);
|
|
return sema.analyzeRef(block, inst_data.src(), operand);
|
|
}
|
|
|
|
fn zirEnsureResultUsed(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const operand = try sema.resolveInst(inst_data.operand);
|
|
const src = inst_data.src();
|
|
|
|
return sema.ensureResultUsed(block, sema.typeOf(operand), src);
|
|
}
|
|
|
|
fn ensureResultUsed(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
ty: Type,
|
|
src: LazySrcLoc,
|
|
) CompileError!void {
|
|
const mod = sema.mod;
|
|
switch (ty.zigTypeTag(mod)) {
|
|
.Void, .NoReturn => return,
|
|
.ErrorSet, .ErrorUnion => {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "error is ignored", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, src, msg, "consider using 'try', 'catch', or 'if'", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
},
|
|
else => {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "value of type '{}' ignored", .{ty.fmt(sema.mod)});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, src, msg, "all non-void values must be used", .{});
|
|
try sema.errNote(block, src, msg, "this error can be suppressed by assigning the value to '_'", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
},
|
|
}
|
|
}
|
|
|
|
fn zirEnsureResultNonError(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const operand = try sema.resolveInst(inst_data.operand);
|
|
const src = inst_data.src();
|
|
const operand_ty = sema.typeOf(operand);
|
|
switch (operand_ty.zigTypeTag(mod)) {
|
|
.ErrorSet, .ErrorUnion => {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "error is discarded", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, src, msg, "consider using 'try', 'catch', or 'if'", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
},
|
|
else => return,
|
|
}
|
|
}
|
|
|
|
fn zirEnsureErrUnionPayloadVoid(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const src = inst_data.src();
|
|
const operand = try sema.resolveInst(inst_data.operand);
|
|
const operand_ty = sema.typeOf(operand);
|
|
const err_union_ty = if (operand_ty.zigTypeTag(mod) == .Pointer)
|
|
operand_ty.childType(mod)
|
|
else
|
|
operand_ty;
|
|
if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) return;
|
|
const payload_ty = err_union_ty.errorUnionPayload(mod).zigTypeTag(mod);
|
|
if (payload_ty != .Void and payload_ty != .NoReturn) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "error union payload is ignored", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, src, msg, "payload value can be explicitly ignored with '|_|'", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
}
|
|
|
|
fn zirIndexablePtrLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const src = inst_data.src();
|
|
const object = try sema.resolveInst(inst_data.operand);
|
|
|
|
return indexablePtrLen(sema, block, src, object);
|
|
}
|
|
|
|
fn indexablePtrLen(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
object: Air.Inst.Ref,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const object_ty = sema.typeOf(object);
|
|
const is_pointer_to = object_ty.isSinglePointer(mod);
|
|
const indexable_ty = if (is_pointer_to) object_ty.childType(mod) else object_ty;
|
|
try checkIndexable(sema, block, src, indexable_ty);
|
|
const field_name = try mod.intern_pool.getOrPutString(sema.gpa, "len");
|
|
return sema.fieldVal(block, src, object, field_name, src);
|
|
}
|
|
|
|
fn indexablePtrLenOrNone(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
operand: Air.Inst.Ref,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const operand_ty = sema.typeOf(operand);
|
|
try checkMemOperand(sema, block, src, operand_ty);
|
|
if (operand_ty.ptrSize(mod) == .Many) return .none;
|
|
const field_name = try mod.intern_pool.getOrPutString(sema.gpa, "len");
|
|
return sema.fieldVal(block, src, operand, field_name, src);
|
|
}
|
|
|
|
fn zirAllocExtended(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
extended: Zir.Inst.Extended.InstData,
|
|
) CompileError!Air.Inst.Ref {
|
|
const gpa = sema.gpa;
|
|
const extra = sema.code.extraData(Zir.Inst.AllocExtended, extended.operand);
|
|
const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = extra.data.src_node };
|
|
const align_src: LazySrcLoc = .{ .node_offset_var_decl_align = extra.data.src_node };
|
|
const small: Zir.Inst.AllocExtended.Small = @bitCast(extended.small);
|
|
|
|
var extra_index: usize = extra.end;
|
|
|
|
const var_ty: Type = if (small.has_type) blk: {
|
|
const type_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]);
|
|
extra_index += 1;
|
|
break :blk try sema.resolveType(block, ty_src, type_ref);
|
|
} else undefined;
|
|
|
|
const alignment = if (small.has_align) blk: {
|
|
const align_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]);
|
|
extra_index += 1;
|
|
const alignment = try sema.resolveAlign(block, align_src, align_ref);
|
|
break :blk alignment;
|
|
} else .none;
|
|
|
|
if (block.is_comptime or small.is_comptime) {
|
|
if (small.has_type) {
|
|
return sema.analyzeComptimeAlloc(block, var_ty, alignment);
|
|
} else {
|
|
try sema.air_instructions.append(gpa, .{
|
|
.tag = .inferred_alloc_comptime,
|
|
.data = .{ .inferred_alloc_comptime = .{
|
|
.decl_index = undefined,
|
|
.alignment = alignment,
|
|
.is_const = small.is_const,
|
|
} },
|
|
});
|
|
return @as(Air.Inst.Index, @enumFromInt(sema.air_instructions.len - 1)).toRef();
|
|
}
|
|
}
|
|
|
|
if (small.has_type) {
|
|
if (!small.is_const) {
|
|
try sema.validateVarType(block, ty_src, var_ty, false);
|
|
}
|
|
const target = sema.mod.getTarget();
|
|
try sema.resolveTypeLayout(var_ty);
|
|
const ptr_type = try sema.ptrType(.{
|
|
.child = var_ty.toIntern(),
|
|
.flags = .{
|
|
.alignment = alignment,
|
|
.address_space = target_util.defaultAddressSpace(target, .local),
|
|
},
|
|
});
|
|
const ptr = try block.addTy(.alloc, ptr_type);
|
|
if (small.is_const) {
|
|
const ptr_inst = ptr.toIndex().?;
|
|
try sema.maybe_comptime_allocs.put(gpa, ptr_inst, .{ .runtime_index = block.runtime_index });
|
|
try sema.base_allocs.put(gpa, ptr_inst, ptr_inst);
|
|
}
|
|
return ptr;
|
|
}
|
|
|
|
const result_index = try block.addInstAsIndex(.{
|
|
.tag = .inferred_alloc,
|
|
.data = .{ .inferred_alloc = .{
|
|
.alignment = alignment,
|
|
.is_const = small.is_const,
|
|
} },
|
|
});
|
|
try sema.unresolved_inferred_allocs.putNoClobber(gpa, result_index, .{});
|
|
if (small.is_const) {
|
|
try sema.maybe_comptime_allocs.put(gpa, result_index, .{ .runtime_index = block.runtime_index });
|
|
try sema.base_allocs.put(gpa, result_index, result_index);
|
|
}
|
|
return result_index.toRef();
|
|
}
|
|
|
|
fn zirAllocComptime(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node };
|
|
const var_ty = try sema.resolveType(block, ty_src, inst_data.operand);
|
|
return sema.analyzeComptimeAlloc(block, var_ty, .none);
|
|
}
|
|
|
|
fn zirMakePtrConst(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const alloc = try sema.resolveInst(inst_data.operand);
|
|
const alloc_ty = sema.typeOf(alloc);
|
|
const ptr_info = alloc_ty.ptrInfo(mod);
|
|
const elem_ty = Type.fromInterned(ptr_info.child);
|
|
|
|
if (try sema.resolveComptimeKnownAllocValue(block, alloc, null)) |val| {
|
|
const new_mut_ptr = Air.internedToRef((try mod.intern(.{ .ptr = .{
|
|
.ty = alloc_ty.toIntern(),
|
|
.addr = .{ .anon_decl = .{
|
|
.val = val,
|
|
.orig_ty = alloc_ty.toIntern(),
|
|
} },
|
|
} })));
|
|
return sema.makePtrConst(block, new_mut_ptr);
|
|
}
|
|
|
|
// If this is already a comptime-known allocation, we don't want to emit an error - the stores
|
|
// were already performed at comptime! Just make the pointer constant as normal.
|
|
implicit_ct: {
|
|
const ptr_val = try sema.resolveValue(alloc) orelse break :implicit_ct;
|
|
if (!ptr_val.isComptimeMutablePtr(mod)) {
|
|
// It could still be a constant pointer to a decl.
|
|
switch (mod.intern_pool.indexToKey(ptr_val.toIntern()).ptr.addr) {
|
|
.anon_decl => |anon_decl| {
|
|
if (mod.intern_pool.isVariable(anon_decl.val))
|
|
break :implicit_ct;
|
|
},
|
|
else => {
|
|
const decl_index = ptr_val.pointerDecl(mod) orelse break :implicit_ct;
|
|
const decl_val = mod.declPtr(decl_index).val.toIntern();
|
|
if (mod.intern_pool.isVariable(decl_val)) break :implicit_ct;
|
|
},
|
|
}
|
|
}
|
|
return sema.makePtrConst(block, alloc);
|
|
}
|
|
|
|
if (try sema.typeRequiresComptime(elem_ty)) {
|
|
// The value was initialized through RLS, so we didn't detect the runtime condition earlier.
|
|
// TODO: source location of runtime control flow
|
|
const init_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
|
|
return sema.fail(block, init_src, "value with comptime-only type '{}' depends on runtime control flow", .{elem_ty.fmt(mod)});
|
|
}
|
|
|
|
// This is a runtime value.
|
|
return sema.makePtrConst(block, alloc);
|
|
}
|
|
|
|
/// If `alloc` is an inferred allocation, `resolved_inferred_ty` is taken to be its resolved
|
|
/// type. Otherwise, it may be `null`, and the type will be inferred from `alloc`.
|
|
fn resolveComptimeKnownAllocValue(sema: *Sema, block: *Block, alloc: Air.Inst.Ref, resolved_alloc_ty: ?Type) CompileError!?InternPool.Index {
|
|
const mod = sema.mod;
|
|
|
|
const alloc_ty = resolved_alloc_ty orelse sema.typeOf(alloc);
|
|
const ptr_info = alloc_ty.ptrInfo(mod);
|
|
const elem_ty = Type.fromInterned(ptr_info.child);
|
|
|
|
const alloc_inst = alloc.toIndex() orelse return null;
|
|
const comptime_info = sema.maybe_comptime_allocs.fetchRemove(alloc_inst) orelse return null;
|
|
const stores = comptime_info.value.stores.items;
|
|
|
|
// Since the entry existed in `maybe_comptime_allocs`, the allocation is comptime-known.
|
|
// We will resolve and return its value.
|
|
|
|
// We expect to have emitted at least one store, unless the elem type is OPV.
|
|
if (stores.len == 0) {
|
|
const val = (try sema.typeHasOnePossibleValue(elem_ty)).?.toIntern();
|
|
return sema.finishResolveComptimeKnownAllocValue(val, alloc_inst, comptime_info.value);
|
|
}
|
|
|
|
// In general, we want to create a comptime alloc of the correct type and
|
|
// apply the stores to that alloc in order. However, before going to all
|
|
// that effort, let's optimize for the common case of a single store.
|
|
|
|
simple: {
|
|
if (stores.len != 1) break :simple;
|
|
const store_inst = stores[0];
|
|
const store_data = sema.air_instructions.items(.data)[@intFromEnum(store_inst)].bin_op;
|
|
if (store_data.lhs != alloc) break :simple;
|
|
|
|
const val = store_data.rhs.toInterned().?;
|
|
assert(mod.intern_pool.typeOf(val) == elem_ty.toIntern());
|
|
return sema.finishResolveComptimeKnownAllocValue(val, alloc_inst, comptime_info.value);
|
|
}
|
|
|
|
// The simple strategy failed: we must create a mutable comptime alloc and
|
|
// perform all of the runtime store operations at comptime.
|
|
|
|
var anon_decl = try block.startAnonDecl(); // TODO: comptime value mutation without Decl
|
|
defer anon_decl.deinit();
|
|
const decl_index = try anon_decl.finish(elem_ty, try mod.undefValue(elem_ty), ptr_info.flags.alignment);
|
|
|
|
const decl_ptr = try mod.intern(.{ .ptr = .{
|
|
.ty = alloc_ty.toIntern(),
|
|
.addr = .{ .mut_decl = .{
|
|
.decl = decl_index,
|
|
.runtime_index = block.runtime_index,
|
|
} },
|
|
} });
|
|
|
|
// Maps from pointers into the runtime allocs, to comptime-mutable pointers into the mut decl.
|
|
var ptr_mapping = std.AutoHashMap(Air.Inst.Index, InternPool.Index).init(sema.arena);
|
|
try ptr_mapping.ensureTotalCapacity(@intCast(stores.len));
|
|
ptr_mapping.putAssumeCapacity(alloc_inst, decl_ptr);
|
|
|
|
var to_map = try std.ArrayList(Air.Inst.Index).initCapacity(sema.arena, stores.len);
|
|
for (stores) |store_inst| {
|
|
const bin_op = sema.air_instructions.items(.data)[@intFromEnum(store_inst)].bin_op;
|
|
to_map.appendAssumeCapacity(bin_op.lhs.toIndex().?);
|
|
}
|
|
|
|
const tmp_air = sema.getTmpAir();
|
|
|
|
while (to_map.popOrNull()) |air_ptr| {
|
|
if (ptr_mapping.contains(air_ptr)) continue;
|
|
const PointerMethod = union(enum) {
|
|
same_addr,
|
|
opt_payload,
|
|
eu_payload,
|
|
field: u32,
|
|
elem: u64,
|
|
};
|
|
const inst_tag = tmp_air.instructions.items(.tag)[@intFromEnum(air_ptr)];
|
|
const air_parent_ptr: Air.Inst.Ref, const method: PointerMethod = switch (inst_tag) {
|
|
.struct_field_ptr => blk: {
|
|
const data = tmp_air.extraData(
|
|
Air.StructField,
|
|
tmp_air.instructions.items(.data)[@intFromEnum(air_ptr)].ty_pl.payload,
|
|
).data;
|
|
break :blk .{
|
|
data.struct_operand,
|
|
.{ .field = data.field_index },
|
|
};
|
|
},
|
|
.struct_field_ptr_index_0,
|
|
.struct_field_ptr_index_1,
|
|
.struct_field_ptr_index_2,
|
|
.struct_field_ptr_index_3,
|
|
=> .{
|
|
tmp_air.instructions.items(.data)[@intFromEnum(air_ptr)].ty_op.operand,
|
|
.{ .field = switch (inst_tag) {
|
|
.struct_field_ptr_index_0 => 0,
|
|
.struct_field_ptr_index_1 => 1,
|
|
.struct_field_ptr_index_2 => 2,
|
|
.struct_field_ptr_index_3 => 3,
|
|
else => unreachable,
|
|
} },
|
|
},
|
|
.ptr_slice_ptr_ptr => .{
|
|
tmp_air.instructions.items(.data)[@intFromEnum(air_ptr)].ty_op.operand,
|
|
.{ .field = Value.slice_ptr_index },
|
|
},
|
|
.ptr_slice_len_ptr => .{
|
|
tmp_air.instructions.items(.data)[@intFromEnum(air_ptr)].ty_op.operand,
|
|
.{ .field = Value.slice_len_index },
|
|
},
|
|
.ptr_elem_ptr => blk: {
|
|
const data = tmp_air.extraData(
|
|
Air.Bin,
|
|
tmp_air.instructions.items(.data)[@intFromEnum(air_ptr)].ty_pl.payload,
|
|
).data;
|
|
const idx_val = (try sema.resolveValue(data.rhs)).?;
|
|
break :blk .{
|
|
data.lhs,
|
|
.{ .elem = try idx_val.toUnsignedIntAdvanced(sema) },
|
|
};
|
|
},
|
|
.bitcast => .{
|
|
tmp_air.instructions.items(.data)[@intFromEnum(air_ptr)].ty_op.operand,
|
|
.same_addr,
|
|
},
|
|
.optional_payload_ptr_set => .{
|
|
tmp_air.instructions.items(.data)[@intFromEnum(air_ptr)].ty_op.operand,
|
|
.opt_payload,
|
|
},
|
|
.errunion_payload_ptr_set => .{
|
|
tmp_air.instructions.items(.data)[@intFromEnum(air_ptr)].ty_op.operand,
|
|
.eu_payload,
|
|
},
|
|
else => unreachable,
|
|
};
|
|
|
|
const decl_parent_ptr = ptr_mapping.get(air_parent_ptr.toIndex().?) orelse {
|
|
// Resolve the parent pointer first.
|
|
// Note that we add in what seems like the wrong order, because we're popping from the end of this array.
|
|
try to_map.appendSlice(&.{ air_ptr, air_parent_ptr.toIndex().? });
|
|
continue;
|
|
};
|
|
const new_ptr_ty = tmp_air.typeOfIndex(air_ptr, &mod.intern_pool).toIntern();
|
|
const new_ptr = switch (method) {
|
|
.same_addr => try mod.intern_pool.getCoerced(sema.gpa, decl_parent_ptr, new_ptr_ty),
|
|
.opt_payload => try mod.intern(.{ .ptr = .{
|
|
.ty = new_ptr_ty,
|
|
.addr = .{ .opt_payload = decl_parent_ptr },
|
|
} }),
|
|
.eu_payload => try mod.intern(.{ .ptr = .{
|
|
.ty = new_ptr_ty,
|
|
.addr = .{ .eu_payload = decl_parent_ptr },
|
|
} }),
|
|
.field => |field_idx| try mod.intern(.{ .ptr = .{
|
|
.ty = new_ptr_ty,
|
|
.addr = .{ .field = .{
|
|
.base = decl_parent_ptr,
|
|
.index = field_idx,
|
|
} },
|
|
} }),
|
|
.elem => |elem_idx| (try Value.fromInterned(decl_parent_ptr).elemPtr(Type.fromInterned(new_ptr_ty), @intCast(elem_idx), mod)).toIntern(),
|
|
};
|
|
try ptr_mapping.put(air_ptr, new_ptr);
|
|
}
|
|
|
|
// We have a correlation between AIR pointers and decl pointers. Perform all stores at comptime.
|
|
|
|
for (stores) |store_inst| {
|
|
switch (sema.air_instructions.items(.tag)[@intFromEnum(store_inst)]) {
|
|
.set_union_tag => {
|
|
// If this tag has an OPV payload, there won't be a corresponding
|
|
// store instruction, so we must set the union payload now.
|
|
const bin_op = sema.air_instructions.items(.data)[@intFromEnum(store_inst)].bin_op;
|
|
const air_ptr_inst = bin_op.lhs.toIndex().?;
|
|
const tag_val = (try sema.resolveValue(bin_op.rhs)).?;
|
|
const union_ty = sema.typeOf(bin_op.lhs).childType(mod);
|
|
const payload_ty = union_ty.unionFieldType(tag_val, mod).?;
|
|
if (try sema.typeHasOnePossibleValue(payload_ty)) |payload_val| {
|
|
const new_ptr = ptr_mapping.get(air_ptr_inst).?;
|
|
const store_val = try mod.unionValue(union_ty, tag_val, payload_val);
|
|
try sema.storePtrVal(block, .unneeded, Value.fromInterned(new_ptr), store_val, union_ty);
|
|
}
|
|
},
|
|
.store, .store_safe => {
|
|
const bin_op = sema.air_instructions.items(.data)[@intFromEnum(store_inst)].bin_op;
|
|
const air_ptr_inst = bin_op.lhs.toIndex().?;
|
|
const store_val = (try sema.resolveValue(bin_op.rhs)).?;
|
|
const new_ptr = ptr_mapping.get(air_ptr_inst).?;
|
|
try sema.storePtrVal(block, .unneeded, Value.fromInterned(new_ptr), store_val, Type.fromInterned(mod.intern_pool.typeOf(store_val.toIntern())));
|
|
},
|
|
else => unreachable,
|
|
}
|
|
}
|
|
|
|
// The value is finalized - load it!
|
|
const val = (try sema.pointerDeref(block, .unneeded, Value.fromInterned(decl_ptr), alloc_ty)).?.toIntern();
|
|
return sema.finishResolveComptimeKnownAllocValue(val, alloc_inst, comptime_info.value);
|
|
}
|
|
|
|
/// Given the resolved comptime-known value, rewrites the dead AIR to not
|
|
/// create a runtime stack allocation.
|
|
/// Same return type as `resolveComptimeKnownAllocValue` so we can tail call.
|
|
fn finishResolveComptimeKnownAllocValue(sema: *Sema, result_val: InternPool.Index, alloc_inst: Air.Inst.Index, comptime_info: MaybeComptimeAlloc) CompileError!?InternPool.Index {
|
|
// We're almost done - we have the resolved comptime value. We just need to
|
|
// eliminate the now-dead runtime instructions.
|
|
|
|
// We will rewrite the AIR to eliminate the alloc and all stores to it.
|
|
// This will cause instructions deriving field pointers etc of the alloc to
|
|
// become invalid, however, since we are removing all stores to those pointers,
|
|
// they will be eliminated by Liveness before they reach codegen.
|
|
|
|
// The specifics of this instruction aren't really important: we just want
|
|
// Liveness to elide it.
|
|
const nop_inst: Air.Inst = .{ .tag = .bitcast, .data = .{ .ty_op = .{ .ty = .u8_type, .operand = .zero_u8 } } };
|
|
|
|
sema.air_instructions.set(@intFromEnum(alloc_inst), nop_inst);
|
|
for (comptime_info.stores.items) |store_inst| {
|
|
sema.air_instructions.set(@intFromEnum(store_inst), nop_inst);
|
|
}
|
|
for (comptime_info.non_elideable_pointers.items) |ptr_inst| {
|
|
sema.air_instructions.set(@intFromEnum(ptr_inst), nop_inst);
|
|
}
|
|
|
|
return result_val;
|
|
}
|
|
|
|
fn makePtrTyConst(sema: *Sema, ptr_ty: Type) CompileError!Type {
|
|
var ptr_info = ptr_ty.ptrInfo(sema.mod);
|
|
ptr_info.flags.is_const = true;
|
|
return sema.ptrType(ptr_info);
|
|
}
|
|
|
|
fn makePtrConst(sema: *Sema, block: *Block, alloc: Air.Inst.Ref) CompileError!Air.Inst.Ref {
|
|
const alloc_ty = sema.typeOf(alloc);
|
|
const const_ptr_ty = try sema.makePtrTyConst(alloc_ty);
|
|
|
|
// Detect if a comptime value simply needs to have its type changed.
|
|
if (try sema.resolveValue(alloc)) |val| {
|
|
return Air.internedToRef((try sema.mod.getCoerced(val, const_ptr_ty)).toIntern());
|
|
}
|
|
|
|
return block.addBitCast(const_ptr_ty, alloc);
|
|
}
|
|
|
|
fn zirAllocInferredComptime(
|
|
sema: *Sema,
|
|
is_const: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const gpa = sema.gpa;
|
|
|
|
try sema.air_instructions.append(gpa, .{
|
|
.tag = .inferred_alloc_comptime,
|
|
.data = .{ .inferred_alloc_comptime = .{
|
|
.decl_index = undefined,
|
|
.alignment = .none,
|
|
.is_const = is_const,
|
|
} },
|
|
});
|
|
return @as(Air.Inst.Index, @enumFromInt(sema.air_instructions.len - 1)).toRef();
|
|
}
|
|
|
|
fn zirAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node };
|
|
const var_ty = try sema.resolveType(block, ty_src, inst_data.operand);
|
|
if (block.is_comptime) {
|
|
return sema.analyzeComptimeAlloc(block, var_ty, .none);
|
|
}
|
|
const target = sema.mod.getTarget();
|
|
const ptr_type = try sema.ptrType(.{
|
|
.child = var_ty.toIntern(),
|
|
.flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
|
|
});
|
|
try sema.queueFullTypeResolution(var_ty);
|
|
const ptr = try block.addTy(.alloc, ptr_type);
|
|
const ptr_inst = ptr.toIndex().?;
|
|
try sema.maybe_comptime_allocs.put(sema.gpa, ptr_inst, .{ .runtime_index = block.runtime_index });
|
|
try sema.base_allocs.put(sema.gpa, ptr_inst, ptr_inst);
|
|
return ptr;
|
|
}
|
|
|
|
fn zirAllocMut(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node };
|
|
const var_ty = try sema.resolveType(block, ty_src, inst_data.operand);
|
|
if (block.is_comptime) {
|
|
return sema.analyzeComptimeAlloc(block, var_ty, .none);
|
|
}
|
|
try sema.validateVarType(block, ty_src, var_ty, false);
|
|
const target = sema.mod.getTarget();
|
|
const ptr_type = try sema.ptrType(.{
|
|
.child = var_ty.toIntern(),
|
|
.flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
|
|
});
|
|
try sema.queueFullTypeResolution(var_ty);
|
|
return block.addTy(.alloc, ptr_type);
|
|
}
|
|
|
|
fn zirAllocInferred(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
is_const: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const gpa = sema.gpa;
|
|
|
|
if (block.is_comptime) {
|
|
try sema.air_instructions.append(gpa, .{
|
|
.tag = .inferred_alloc_comptime,
|
|
.data = .{ .inferred_alloc_comptime = .{
|
|
.decl_index = undefined,
|
|
.alignment = .none,
|
|
.is_const = is_const,
|
|
} },
|
|
});
|
|
return @as(Air.Inst.Index, @enumFromInt(sema.air_instructions.len - 1)).toRef();
|
|
}
|
|
|
|
const result_index = try block.addInstAsIndex(.{
|
|
.tag = .inferred_alloc,
|
|
.data = .{ .inferred_alloc = .{
|
|
.alignment = .none,
|
|
.is_const = is_const,
|
|
} },
|
|
});
|
|
try sema.unresolved_inferred_allocs.putNoClobber(gpa, result_index, .{});
|
|
try sema.maybe_comptime_allocs.put(gpa, result_index, .{ .runtime_index = block.runtime_index });
|
|
try sema.base_allocs.put(sema.gpa, result_index, result_index);
|
|
return result_index.toRef();
|
|
}
|
|
|
|
fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const src = inst_data.src();
|
|
const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node };
|
|
const ptr = try sema.resolveInst(inst_data.operand);
|
|
const ptr_inst = ptr.toIndex().?;
|
|
const target = mod.getTarget();
|
|
|
|
switch (sema.air_instructions.items(.tag)[@intFromEnum(ptr_inst)]) {
|
|
.inferred_alloc_comptime => {
|
|
const iac = sema.air_instructions.items(.data)[@intFromEnum(ptr_inst)].inferred_alloc_comptime;
|
|
const decl_index = iac.decl_index;
|
|
|
|
const decl = mod.declPtr(decl_index);
|
|
if (iac.is_const) _ = try decl.internValue(mod);
|
|
const final_elem_ty = decl.ty;
|
|
const final_ptr_ty = try sema.ptrType(.{
|
|
.child = final_elem_ty.toIntern(),
|
|
.flags = .{
|
|
.is_const = false,
|
|
.alignment = iac.alignment,
|
|
.address_space = target_util.defaultAddressSpace(target, .local),
|
|
},
|
|
});
|
|
|
|
if (std.debug.runtime_safety) {
|
|
// The inferred_alloc_comptime should never be referenced again
|
|
sema.air_instructions.set(@intFromEnum(ptr_inst), .{ .tag = undefined, .data = undefined });
|
|
}
|
|
|
|
try sema.maybeQueueFuncBodyAnalysis(decl_index);
|
|
|
|
const interned = try mod.intern(.{ .ptr = .{
|
|
.ty = final_ptr_ty.toIntern(),
|
|
.addr = if (!iac.is_const) .{ .mut_decl = .{
|
|
.decl = decl_index,
|
|
.runtime_index = block.runtime_index,
|
|
} } else .{ .decl = decl_index },
|
|
} });
|
|
|
|
// Remap the ZIR operand to the resolved pointer value
|
|
sema.inst_map.putAssumeCapacity(inst_data.operand.toIndex().?, Air.internedToRef(interned));
|
|
},
|
|
.inferred_alloc => {
|
|
const ia1 = sema.air_instructions.items(.data)[@intFromEnum(ptr_inst)].inferred_alloc;
|
|
const ia2 = sema.unresolved_inferred_allocs.fetchSwapRemove(ptr_inst).?.value;
|
|
const peer_vals = try sema.arena.alloc(Air.Inst.Ref, ia2.prongs.items.len);
|
|
for (peer_vals, ia2.prongs.items) |*peer_val, store_inst| {
|
|
assert(sema.air_instructions.items(.tag)[@intFromEnum(store_inst)] == .store);
|
|
const bin_op = sema.air_instructions.items(.data)[@intFromEnum(store_inst)].bin_op;
|
|
peer_val.* = bin_op.rhs;
|
|
}
|
|
const final_elem_ty = try sema.resolvePeerTypes(block, ty_src, peer_vals, .none);
|
|
|
|
const final_ptr_ty = try sema.ptrType(.{
|
|
.child = final_elem_ty.toIntern(),
|
|
.flags = .{
|
|
.alignment = ia1.alignment,
|
|
.address_space = target_util.defaultAddressSpace(target, .local),
|
|
},
|
|
});
|
|
|
|
if (!ia1.is_const) {
|
|
try sema.validateVarType(block, ty_src, final_elem_ty, false);
|
|
} else if (try sema.resolveComptimeKnownAllocValue(block, ptr, final_ptr_ty)) |val| {
|
|
const const_ptr_ty = (try sema.makePtrTyConst(final_ptr_ty)).toIntern();
|
|
const new_const_ptr = try mod.intern(.{ .ptr = .{
|
|
.ty = const_ptr_ty,
|
|
.addr = .{ .anon_decl = .{
|
|
.val = val,
|
|
.orig_ty = const_ptr_ty,
|
|
} },
|
|
} });
|
|
|
|
// Remap the ZIR oeprand to the resolved pointer value
|
|
sema.inst_map.putAssumeCapacity(inst_data.operand.toIndex().?, Air.internedToRef(new_const_ptr));
|
|
|
|
// Unless the block is comptime, `alloc_inferred` always produces
|
|
// a runtime constant. The final inferred type needs to be
|
|
// fully resolved so it can be lowered in codegen.
|
|
try sema.resolveTypeFully(final_elem_ty);
|
|
|
|
return;
|
|
}
|
|
|
|
if (try sema.typeRequiresComptime(final_elem_ty)) {
|
|
// The alloc wasn't comptime-known per the above logic, so the
|
|
// type cannot be comptime-only.
|
|
// TODO: source location of runtime control flow
|
|
return sema.fail(block, src, "value with comptime-only type '{}' depends on runtime control flow", .{final_elem_ty.fmt(mod)});
|
|
}
|
|
|
|
try sema.queueFullTypeResolution(final_elem_ty);
|
|
|
|
// Change it to a normal alloc.
|
|
sema.air_instructions.set(@intFromEnum(ptr_inst), .{
|
|
.tag = .alloc,
|
|
.data = .{ .ty = final_ptr_ty },
|
|
});
|
|
|
|
// Now we need to go back over all the store instructions, and do the logic as if
|
|
// the new result ptr type was available.
|
|
|
|
for (ia2.prongs.items) |placeholder_inst| {
|
|
var replacement_block = block.makeSubBlock();
|
|
defer replacement_block.instructions.deinit(gpa);
|
|
|
|
assert(sema.air_instructions.items(.tag)[@intFromEnum(placeholder_inst)] == .store);
|
|
const bin_op = sema.air_instructions.items(.data)[@intFromEnum(placeholder_inst)].bin_op;
|
|
try sema.storePtr2(&replacement_block, src, bin_op.lhs, src, bin_op.rhs, src, .store);
|
|
|
|
// If only one instruction is produced then we can replace the store
|
|
// placeholder instruction with this instruction; no need for an entire block.
|
|
if (replacement_block.instructions.items.len == 1) {
|
|
const only_inst = replacement_block.instructions.items[0];
|
|
sema.air_instructions.set(@intFromEnum(placeholder_inst), sema.air_instructions.get(@intFromEnum(only_inst)));
|
|
continue;
|
|
}
|
|
|
|
// Here we replace the placeholder store instruction with a block
|
|
// that does the actual store logic.
|
|
_ = try replacement_block.addBr(placeholder_inst, .void_value);
|
|
try sema.air_extra.ensureUnusedCapacity(
|
|
gpa,
|
|
@typeInfo(Air.Block).Struct.fields.len + replacement_block.instructions.items.len,
|
|
);
|
|
sema.air_instructions.set(@intFromEnum(placeholder_inst), .{
|
|
.tag = .block,
|
|
.data = .{ .ty_pl = .{
|
|
.ty = .void_type,
|
|
.payload = sema.addExtraAssumeCapacity(Air.Block{
|
|
.body_len = @intCast(replacement_block.instructions.items.len),
|
|
}),
|
|
} },
|
|
});
|
|
sema.air_extra.appendSliceAssumeCapacity(@ptrCast(replacement_block.instructions.items));
|
|
}
|
|
},
|
|
else => unreachable,
|
|
}
|
|
}
|
|
|
|
fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
const ip = &mod.intern_pool;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.MultiOp, inst_data.payload_index);
|
|
const args = sema.code.refSlice(extra.end, extra.data.operands_len);
|
|
const src = inst_data.src();
|
|
|
|
var len: Air.Inst.Ref = .none;
|
|
var len_val: ?Value = null;
|
|
var len_idx: u32 = undefined;
|
|
var any_runtime = false;
|
|
|
|
const runtime_arg_lens = try gpa.alloc(Air.Inst.Ref, args.len);
|
|
defer gpa.free(runtime_arg_lens);
|
|
|
|
// First pass to look for comptime values.
|
|
for (args, 0..) |zir_arg, i_usize| {
|
|
const i: u32 = @intCast(i_usize);
|
|
runtime_arg_lens[i] = .none;
|
|
if (zir_arg == .none) continue;
|
|
const object = try sema.resolveInst(zir_arg);
|
|
const object_ty = sema.typeOf(object);
|
|
// Each arg could be an indexable, or a range, in which case the length
|
|
// is passed directly as an integer.
|
|
const is_int = switch (object_ty.zigTypeTag(mod)) {
|
|
.Int, .ComptimeInt => true,
|
|
else => false,
|
|
};
|
|
const arg_src: LazySrcLoc = .{ .for_input = .{
|
|
.for_node_offset = inst_data.src_node,
|
|
.input_index = i,
|
|
} };
|
|
const arg_len_uncoerced = if (is_int) object else l: {
|
|
if (!object_ty.isIndexable(mod)) {
|
|
// Instead of using checkIndexable we customize this error.
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, arg_src, "type '{}' is not indexable and not a range", .{object_ty.fmt(sema.mod)});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, arg_src, msg, "for loop operand must be a range, array, slice, tuple, or vector", .{});
|
|
|
|
if (object_ty.zigTypeTag(mod) == .ErrorUnion) {
|
|
try sema.errNote(block, arg_src, msg, "consider using 'try', 'catch', or 'if'", .{});
|
|
}
|
|
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
if (!object_ty.indexableHasLen(mod)) continue;
|
|
|
|
break :l try sema.fieldVal(block, arg_src, object, try ip.getOrPutString(gpa, "len"), arg_src);
|
|
};
|
|
const arg_len = try sema.coerce(block, Type.usize, arg_len_uncoerced, arg_src);
|
|
if (len == .none) {
|
|
len = arg_len;
|
|
len_idx = i;
|
|
}
|
|
if (try sema.resolveDefinedValue(block, src, arg_len)) |arg_val| {
|
|
if (len_val) |v| {
|
|
if (!(try sema.valuesEqual(arg_val, v, Type.usize))) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "non-matching for loop lengths", .{});
|
|
errdefer msg.destroy(gpa);
|
|
const a_src: LazySrcLoc = .{ .for_input = .{
|
|
.for_node_offset = inst_data.src_node,
|
|
.input_index = len_idx,
|
|
} };
|
|
try sema.errNote(block, a_src, msg, "length {} here", .{
|
|
v.fmtValue(Type.usize, sema.mod),
|
|
});
|
|
try sema.errNote(block, arg_src, msg, "length {} here", .{
|
|
arg_val.fmtValue(Type.usize, sema.mod),
|
|
});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
} else {
|
|
len = arg_len;
|
|
len_val = arg_val;
|
|
len_idx = i;
|
|
}
|
|
continue;
|
|
}
|
|
runtime_arg_lens[i] = arg_len;
|
|
any_runtime = true;
|
|
}
|
|
|
|
if (len == .none) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "unbounded for loop", .{});
|
|
errdefer msg.destroy(gpa);
|
|
for (args, 0..) |zir_arg, i_usize| {
|
|
const i: u32 = @intCast(i_usize);
|
|
if (zir_arg == .none) continue;
|
|
const object = try sema.resolveInst(zir_arg);
|
|
const object_ty = sema.typeOf(object);
|
|
// Each arg could be an indexable, or a range, in which case the length
|
|
// is passed directly as an integer.
|
|
switch (object_ty.zigTypeTag(mod)) {
|
|
.Int, .ComptimeInt => continue,
|
|
else => {},
|
|
}
|
|
const arg_src: LazySrcLoc = .{ .for_input = .{
|
|
.for_node_offset = inst_data.src_node,
|
|
.input_index = i,
|
|
} };
|
|
try sema.errNote(block, arg_src, msg, "type '{}' has no upper bound", .{
|
|
object_ty.fmt(sema.mod),
|
|
});
|
|
}
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
|
|
// Now for the runtime checks.
|
|
if (any_runtime and block.wantSafety()) {
|
|
for (runtime_arg_lens, 0..) |arg_len, i| {
|
|
if (arg_len == .none) continue;
|
|
if (i == len_idx) continue;
|
|
const ok = try block.addBinOp(.cmp_eq, len, arg_len);
|
|
try sema.addSafetyCheck(block, src, ok, .for_len_mismatch);
|
|
}
|
|
}
|
|
|
|
return len;
|
|
}
|
|
|
|
/// Given any single pointer, retrieve a pointer to the payload of any optional
|
|
/// or error union pointed to, initializing these pointers along the way.
|
|
/// Given a `*E!?T`, returns a (valid) `*T`.
|
|
/// May invalidate already-stored payload data.
|
|
fn optEuBasePtrInit(sema: *Sema, block: *Block, ptr: Air.Inst.Ref, src: LazySrcLoc) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
var base_ptr = ptr;
|
|
while (true) switch (sema.typeOf(base_ptr).childType(mod).zigTypeTag(mod)) {
|
|
.ErrorUnion => base_ptr = try sema.analyzeErrUnionPayloadPtr(block, src, base_ptr, false, true),
|
|
.Optional => base_ptr = try sema.analyzeOptionalPayloadPtr(block, src, base_ptr, false, true),
|
|
else => break,
|
|
};
|
|
try sema.checkKnownAllocPtr(ptr, base_ptr);
|
|
return base_ptr;
|
|
}
|
|
|
|
fn zirOptEuBasePtrInit(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const un_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const ptr = try sema.resolveInst(un_node.operand);
|
|
return sema.optEuBasePtrInit(block, ptr, un_node.src());
|
|
}
|
|
|
|
fn zirCoercePtrElemTy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const pl_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const src = pl_node.src();
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, pl_node.payload_index).data;
|
|
const uncoerced_val = try sema.resolveInst(extra.rhs);
|
|
const maybe_wrapped_ptr_ty = sema.resolveType(block, .unneeded, extra.lhs) catch |err| switch (err) {
|
|
error.GenericPoison => return uncoerced_val,
|
|
else => |e| return e,
|
|
};
|
|
const ptr_ty = maybe_wrapped_ptr_ty.optEuBaseType(mod);
|
|
assert(ptr_ty.zigTypeTag(mod) == .Pointer); // validated by a previous instruction
|
|
const elem_ty = ptr_ty.childType(mod);
|
|
switch (ptr_ty.ptrSize(mod)) {
|
|
.One => {
|
|
const uncoerced_ty = sema.typeOf(uncoerced_val);
|
|
if (elem_ty.zigTypeTag(mod) == .Array and elem_ty.childType(mod).toIntern() == uncoerced_ty.toIntern()) {
|
|
// We're trying to initialize a *[1]T with a reference to a T - don't perform any coercion.
|
|
return uncoerced_val;
|
|
}
|
|
// If the destination type is anyopaque, don't coerce - the pointer will coerce instead.
|
|
if (elem_ty.toIntern() == .anyopaque_type) {
|
|
return uncoerced_val;
|
|
} else {
|
|
return sema.coerce(block, elem_ty, uncoerced_val, src);
|
|
}
|
|
},
|
|
.Slice, .Many => {
|
|
// Our goal is to coerce `uncoerced_val` to an array of `elem_ty`.
|
|
const val_ty = sema.typeOf(uncoerced_val);
|
|
switch (val_ty.zigTypeTag(mod)) {
|
|
.Array, .Vector => {},
|
|
else => if (!val_ty.isTuple(mod)) {
|
|
return sema.fail(block, src, "expected array of '{}', found '{}'", .{ elem_ty.fmt(mod), val_ty.fmt(mod) });
|
|
},
|
|
}
|
|
const want_ty = try mod.arrayType(.{
|
|
.len = val_ty.arrayLen(mod),
|
|
.child = elem_ty.toIntern(),
|
|
.sentinel = if (ptr_ty.sentinel(mod)) |s| s.toIntern() else .none,
|
|
});
|
|
return sema.coerce(block, want_ty, uncoerced_val, src);
|
|
},
|
|
.C => {
|
|
// There's nothing meaningful to do here, because we don't know if this is meant to be a
|
|
// single-pointer or a many-pointer.
|
|
return uncoerced_val;
|
|
},
|
|
}
|
|
}
|
|
|
|
fn zirValidateRefTy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
|
|
const mod = sema.mod;
|
|
const un_tok = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_tok;
|
|
const src = un_tok.src();
|
|
// In case of GenericPoison, we don't actually have a type, so this will be
|
|
// treated as an untyped address-of operator.
|
|
if (un_tok.operand == .var_args_param_type) return;
|
|
const operand_air_inst = sema.resolveInst(un_tok.operand) catch |err| switch (err) {
|
|
error.GenericPoison => return,
|
|
else => |e| return e,
|
|
};
|
|
if (operand_air_inst == .var_args_param_type) return;
|
|
const ty_operand = sema.analyzeAsType(block, src, operand_air_inst) catch |err| switch (err) {
|
|
error.GenericPoison => return,
|
|
else => |e| return e,
|
|
};
|
|
if (ty_operand.isGenericPoison()) return;
|
|
if (ty_operand.optEuBaseType(mod).zigTypeTag(mod) != .Pointer) {
|
|
return sema.failWithOwnedErrorMsg(block, msg: {
|
|
const msg = try sema.errMsg(block, src, "expected type '{}', found pointer", .{ty_operand.fmt(mod)});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, src, msg, "address-of operator always returns a pointer", .{});
|
|
break :msg msg;
|
|
});
|
|
}
|
|
}
|
|
|
|
fn zirValidateArrayInitRefTy(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const pl_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const src = pl_node.src();
|
|
const extra = sema.code.extraData(Zir.Inst.ArrayInitRefTy, pl_node.payload_index).data;
|
|
const maybe_wrapped_ptr_ty = sema.resolveType(block, .unneeded, extra.ptr_ty) catch |err| switch (err) {
|
|
error.GenericPoison => return .generic_poison_type,
|
|
else => |e| return e,
|
|
};
|
|
const ptr_ty = maybe_wrapped_ptr_ty.optEuBaseType(mod);
|
|
assert(ptr_ty.zigTypeTag(mod) == .Pointer); // validated by a previous instruction
|
|
if (ptr_ty.isSlice(mod)) {
|
|
// Use array of correct length
|
|
const arr_ty = try mod.arrayType(.{
|
|
.len = extra.elem_count,
|
|
.child = ptr_ty.childType(mod).toIntern(),
|
|
.sentinel = if (ptr_ty.sentinel(mod)) |s| s.toIntern() else .none,
|
|
});
|
|
return Air.internedToRef(arr_ty.toIntern());
|
|
}
|
|
// Otherwise, we just want the pointer child type
|
|
const ret_ty = ptr_ty.childType(mod);
|
|
if (ret_ty.toIntern() == .anyopaque_type) {
|
|
// The actual array type is unknown, which we represent with a generic poison.
|
|
return .generic_poison_type;
|
|
}
|
|
const arr_ty = ret_ty.optEuBaseType(mod);
|
|
try sema.validateArrayInitTy(block, src, src, extra.elem_count, arr_ty);
|
|
return Air.internedToRef(ret_ty.toIntern());
|
|
}
|
|
|
|
fn zirValidateArrayInitTy(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
is_result_ty: bool,
|
|
) CompileError!void {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const src = inst_data.src();
|
|
const ty_src: LazySrcLoc = if (is_result_ty) src else .{ .node_offset_init_ty = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.ArrayInit, inst_data.payload_index).data;
|
|
const ty = sema.resolveType(block, ty_src, extra.ty) catch |err| switch (err) {
|
|
// It's okay for the type to be unknown: this will result in an anonymous array init.
|
|
error.GenericPoison => return,
|
|
else => |e| return e,
|
|
};
|
|
const arr_ty = if (is_result_ty) ty.optEuBaseType(mod) else ty;
|
|
return sema.validateArrayInitTy(block, src, ty_src, extra.init_count, arr_ty);
|
|
}
|
|
|
|
fn validateArrayInitTy(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
ty_src: LazySrcLoc,
|
|
init_count: u32,
|
|
ty: Type,
|
|
) CompileError!void {
|
|
const mod = sema.mod;
|
|
switch (ty.zigTypeTag(mod)) {
|
|
.Array => {
|
|
const array_len = ty.arrayLen(mod);
|
|
if (init_count != array_len) {
|
|
return sema.fail(block, src, "expected {d} array elements; found {d}", .{
|
|
array_len, init_count,
|
|
});
|
|
}
|
|
return;
|
|
},
|
|
.Vector => {
|
|
const array_len = ty.arrayLen(mod);
|
|
if (init_count != array_len) {
|
|
return sema.fail(block, src, "expected {d} vector elements; found {d}", .{
|
|
array_len, init_count,
|
|
});
|
|
}
|
|
return;
|
|
},
|
|
.Struct => if (ty.isTuple(mod)) {
|
|
try sema.resolveTypeFields(ty);
|
|
const array_len = ty.arrayLen(mod);
|
|
if (init_count > array_len) {
|
|
return sema.fail(block, src, "expected at most {d} tuple fields; found {d}", .{
|
|
array_len, init_count,
|
|
});
|
|
}
|
|
return;
|
|
},
|
|
else => {},
|
|
}
|
|
return sema.failWithArrayInitNotSupported(block, ty_src, ty);
|
|
}
|
|
|
|
fn zirValidateStructInitTy(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
is_result_ty: bool,
|
|
) CompileError!void {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const src = inst_data.src();
|
|
const ty = sema.resolveType(block, src, inst_data.operand) catch |err| switch (err) {
|
|
// It's okay for the type to be unknown: this will result in an anonymous struct init.
|
|
error.GenericPoison => return,
|
|
else => |e| return e,
|
|
};
|
|
const struct_ty = if (is_result_ty) ty.optEuBaseType(mod) else ty;
|
|
|
|
switch (struct_ty.zigTypeTag(mod)) {
|
|
.Struct, .Union => return,
|
|
else => {},
|
|
}
|
|
return sema.failWithStructInitNotSupported(block, src, struct_ty);
|
|
}
|
|
|
|
fn zirValidatePtrStructInit(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
) CompileError!void {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const validate_inst = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const init_src = validate_inst.src();
|
|
const validate_extra = sema.code.extraData(Zir.Inst.Block, validate_inst.payload_index);
|
|
const instrs = sema.code.bodySlice(validate_extra.end, validate_extra.data.body_len);
|
|
const field_ptr_data = sema.code.instructions.items(.data)[@intFromEnum(instrs[0])].pl_node;
|
|
const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data;
|
|
const object_ptr = try sema.resolveInst(field_ptr_extra.lhs);
|
|
const agg_ty = sema.typeOf(object_ptr).childType(mod).optEuBaseType(mod);
|
|
switch (agg_ty.zigTypeTag(mod)) {
|
|
.Struct => return sema.validateStructInit(
|
|
block,
|
|
agg_ty,
|
|
init_src,
|
|
instrs,
|
|
),
|
|
.Union => return sema.validateUnionInit(
|
|
block,
|
|
agg_ty,
|
|
init_src,
|
|
instrs,
|
|
object_ptr,
|
|
),
|
|
else => unreachable,
|
|
}
|
|
}
|
|
|
|
fn validateUnionInit(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
union_ty: Type,
|
|
init_src: LazySrcLoc,
|
|
instrs: []const Zir.Inst.Index,
|
|
union_ptr: Air.Inst.Ref,
|
|
) CompileError!void {
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
|
|
if (instrs.len != 1) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(
|
|
block,
|
|
init_src,
|
|
"cannot initialize multiple union fields at once; unions can only have one active field",
|
|
.{},
|
|
);
|
|
errdefer msg.destroy(gpa);
|
|
|
|
for (instrs[1..]) |inst| {
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const inst_src: LazySrcLoc = .{ .node_offset_initializer = inst_data.src_node };
|
|
try sema.errNote(block, inst_src, msg, "additional initializer here", .{});
|
|
}
|
|
try sema.addDeclaredHereNote(msg, union_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
|
|
if (block.is_comptime and
|
|
(try sema.resolveDefinedValue(block, init_src, union_ptr)) != null)
|
|
{
|
|
// In this case, comptime machinery already did everything. No work to do here.
|
|
return;
|
|
}
|
|
|
|
const field_ptr = instrs[0];
|
|
const field_ptr_data = sema.code.instructions.items(.data)[@intFromEnum(field_ptr)].pl_node;
|
|
const field_src: LazySrcLoc = .{ .node_offset_initializer = field_ptr_data.src_node };
|
|
const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data;
|
|
const field_name = try mod.intern_pool.getOrPutString(gpa, sema.code.nullTerminatedString(field_ptr_extra.field_name_start));
|
|
const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_src);
|
|
const air_tags = sema.air_instructions.items(.tag);
|
|
const air_datas = sema.air_instructions.items(.data);
|
|
const field_ptr_ref = sema.inst_map.get(field_ptr).?;
|
|
|
|
// Our task here is to determine if the union is comptime-known. In such case,
|
|
// we erase the runtime AIR instructions for initializing the union, and replace
|
|
// the mapping with the comptime value. Either way, we will need to populate the tag.
|
|
|
|
// We expect to see something like this in the current block AIR:
|
|
// %a = alloc(*const U)
|
|
// %b = bitcast(*U, %a)
|
|
// %c = field_ptr(..., %b)
|
|
// %e!= store(%c!, %d!)
|
|
// If %d is a comptime operand, the union is comptime.
|
|
// If the union is comptime, we want `first_block_index`
|
|
// to point at %c so that the bitcast becomes the last instruction in the block.
|
|
//
|
|
// Store instruction may be missing; if field type has only one possible value, this case is handled below.
|
|
//
|
|
// In the case of a comptime-known pointer to a union, the
|
|
// the field_ptr instruction is missing, so we have to pattern-match
|
|
// based only on the store instructions.
|
|
// `first_block_index` needs to point to the `field_ptr` if it exists;
|
|
// the `store` otherwise.
|
|
var first_block_index = block.instructions.items.len;
|
|
var block_index = block.instructions.items.len - 1;
|
|
var init_val: ?Value = null;
|
|
while (block_index > 0) : (block_index -= 1) {
|
|
const store_inst = block.instructions.items[block_index];
|
|
if (store_inst.toRef() == field_ptr_ref) {
|
|
first_block_index = block_index;
|
|
break;
|
|
}
|
|
switch (air_tags[@intFromEnum(store_inst)]) {
|
|
.store, .store_safe => {},
|
|
else => continue,
|
|
}
|
|
const bin_op = air_datas[@intFromEnum(store_inst)].bin_op;
|
|
var ptr_ref = bin_op.lhs;
|
|
if (ptr_ref.toIndex()) |ptr_inst| if (air_tags[@intFromEnum(ptr_inst)] == .bitcast) {
|
|
ptr_ref = air_datas[@intFromEnum(ptr_inst)].ty_op.operand;
|
|
};
|
|
if (ptr_ref != field_ptr_ref) continue;
|
|
first_block_index = @min(if (field_ptr_ref.toIndex()) |field_ptr_inst|
|
|
std.mem.lastIndexOfScalar(
|
|
Air.Inst.Index,
|
|
block.instructions.items[0..block_index],
|
|
field_ptr_inst,
|
|
).?
|
|
else
|
|
block_index, first_block_index);
|
|
init_val = try sema.resolveValue(bin_op.rhs);
|
|
break;
|
|
}
|
|
|
|
const tag_ty = union_ty.unionTagTypeHypothetical(mod);
|
|
const tag_val = try mod.enumValueFieldIndex(tag_ty, field_index);
|
|
const field_type = union_ty.unionFieldType(tag_val, mod).?;
|
|
|
|
if (try sema.typeHasOnePossibleValue(field_type)) |field_only_value| {
|
|
init_val = field_only_value;
|
|
}
|
|
|
|
if (init_val) |val| {
|
|
// Our task is to delete all the `field_ptr` and `store` instructions, and insert
|
|
// instead a single `store` to the result ptr with a comptime union value.
|
|
block_index = first_block_index;
|
|
for (block.instructions.items[first_block_index..]) |cur_inst| {
|
|
switch (air_tags[@intFromEnum(cur_inst)]) {
|
|
.struct_field_ptr,
|
|
.struct_field_ptr_index_0,
|
|
.struct_field_ptr_index_1,
|
|
.struct_field_ptr_index_2,
|
|
.struct_field_ptr_index_3,
|
|
=> if (cur_inst.toRef() == field_ptr_ref) continue,
|
|
.bitcast => if (air_datas[@intFromEnum(cur_inst)].ty_op.operand == field_ptr_ref) continue,
|
|
.store, .store_safe => {
|
|
var ptr_ref = air_datas[@intFromEnum(cur_inst)].bin_op.lhs;
|
|
if (ptr_ref.toIndex()) |ptr_inst| if (air_tags[@intFromEnum(ptr_inst)] == .bitcast) {
|
|
ptr_ref = air_datas[@intFromEnum(ptr_inst)].ty_op.operand;
|
|
};
|
|
if (ptr_ref == field_ptr_ref) continue;
|
|
},
|
|
else => {},
|
|
}
|
|
block.instructions.items[block_index] = cur_inst;
|
|
block_index += 1;
|
|
}
|
|
block.instructions.shrinkRetainingCapacity(block_index);
|
|
|
|
const union_val = try mod.intern(.{ .un = .{
|
|
.ty = union_ty.toIntern(),
|
|
.tag = tag_val.toIntern(),
|
|
.val = val.toIntern(),
|
|
} });
|
|
const union_init = Air.internedToRef(union_val);
|
|
try sema.storePtr2(block, init_src, union_ptr, init_src, union_init, init_src, .store);
|
|
return;
|
|
} else if (try sema.typeRequiresComptime(union_ty)) {
|
|
return sema.failWithNeededComptime(block, field_ptr_data.src(), .{
|
|
.needed_comptime_reason = "initializer of comptime only union must be comptime-known",
|
|
});
|
|
}
|
|
|
|
const new_tag = Air.internedToRef(tag_val.toIntern());
|
|
const set_tag_inst = try block.addBinOp(.set_union_tag, union_ptr, new_tag);
|
|
try sema.checkComptimeKnownStore(block, set_tag_inst);
|
|
}
|
|
|
|
fn validateStructInit(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
struct_ty: Type,
|
|
init_src: LazySrcLoc,
|
|
instrs: []const Zir.Inst.Index,
|
|
) CompileError!void {
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
const ip = &mod.intern_pool;
|
|
|
|
const field_indices = try gpa.alloc(u32, instrs.len);
|
|
defer gpa.free(field_indices);
|
|
|
|
// Maps field index to field_ptr index of where it was already initialized.
|
|
const found_fields = try gpa.alloc(Zir.Inst.OptionalIndex, struct_ty.structFieldCount(mod));
|
|
defer gpa.free(found_fields);
|
|
@memset(found_fields, .none);
|
|
|
|
var struct_ptr_zir_ref: Zir.Inst.Ref = undefined;
|
|
|
|
for (instrs, field_indices) |field_ptr, *field_index| {
|
|
const field_ptr_data = sema.code.instructions.items(.data)[@intFromEnum(field_ptr)].pl_node;
|
|
const field_src: LazySrcLoc = .{ .node_offset_initializer = field_ptr_data.src_node };
|
|
const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data;
|
|
struct_ptr_zir_ref = field_ptr_extra.lhs;
|
|
const field_name = try ip.getOrPutString(
|
|
gpa,
|
|
sema.code.nullTerminatedString(field_ptr_extra.field_name_start),
|
|
);
|
|
field_index.* = if (struct_ty.isTuple(mod))
|
|
try sema.tupleFieldIndex(block, struct_ty, field_name, field_src)
|
|
else
|
|
try sema.structFieldIndex(block, struct_ty, field_name, field_src);
|
|
assert(found_fields[field_index.*] == .none);
|
|
found_fields[field_index.*] = field_ptr.toOptional();
|
|
}
|
|
|
|
var root_msg: ?*Module.ErrorMsg = null;
|
|
errdefer if (root_msg) |msg| msg.destroy(sema.gpa);
|
|
|
|
const struct_ptr = try sema.resolveInst(struct_ptr_zir_ref);
|
|
if (block.is_comptime and
|
|
(try sema.resolveDefinedValue(block, init_src, struct_ptr)) != null)
|
|
{
|
|
try sema.resolveStructLayout(struct_ty);
|
|
// In this case the only thing we need to do is evaluate the implicit
|
|
// store instructions for default field values, and report any missing fields.
|
|
// Avoid the cost of the extra machinery for detecting a comptime struct init value.
|
|
for (found_fields, 0..) |field_ptr, i_usize| {
|
|
const i: u32 = @intCast(i_usize);
|
|
if (field_ptr != .none) continue;
|
|
|
|
try sema.resolveStructFieldInits(struct_ty);
|
|
const default_val = struct_ty.structFieldDefaultValue(i, mod);
|
|
if (default_val.toIntern() == .unreachable_value) {
|
|
const field_name = struct_ty.structFieldName(i, mod).unwrap() orelse {
|
|
const template = "missing tuple field with index {d}";
|
|
if (root_msg) |msg| {
|
|
try sema.errNote(block, init_src, msg, template, .{i});
|
|
} else {
|
|
root_msg = try sema.errMsg(block, init_src, template, .{i});
|
|
}
|
|
continue;
|
|
};
|
|
const template = "missing struct field: {}";
|
|
const args = .{field_name.fmt(ip)};
|
|
if (root_msg) |msg| {
|
|
try sema.errNote(block, init_src, msg, template, args);
|
|
} else {
|
|
root_msg = try sema.errMsg(block, init_src, template, args);
|
|
}
|
|
continue;
|
|
}
|
|
|
|
const field_src = init_src; // TODO better source location
|
|
const default_field_ptr = if (struct_ty.isTuple(mod))
|
|
try sema.tupleFieldPtr(block, init_src, struct_ptr, field_src, @intCast(i), true)
|
|
else
|
|
try sema.structFieldPtrByIndex(block, init_src, struct_ptr, @intCast(i), field_src, struct_ty, true);
|
|
const init = Air.internedToRef(default_val.toIntern());
|
|
try sema.storePtr2(block, init_src, default_field_ptr, init_src, init, field_src, .store);
|
|
}
|
|
|
|
if (root_msg) |msg| {
|
|
if (mod.typeToStruct(struct_ty)) |struct_type| {
|
|
const decl = mod.declPtr(struct_type.decl.unwrap().?);
|
|
const fqn = try decl.fullyQualifiedName(mod);
|
|
try mod.errNoteNonLazy(
|
|
decl.srcLoc(mod),
|
|
msg,
|
|
"struct '{}' declared here",
|
|
.{fqn.fmt(ip)},
|
|
);
|
|
}
|
|
root_msg = null;
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
|
|
return;
|
|
}
|
|
|
|
var struct_is_comptime = true;
|
|
var first_block_index = block.instructions.items.len;
|
|
|
|
const require_comptime = try sema.typeRequiresComptime(struct_ty);
|
|
const air_tags = sema.air_instructions.items(.tag);
|
|
const air_datas = sema.air_instructions.items(.data);
|
|
|
|
try sema.resolveStructFieldInits(struct_ty);
|
|
|
|
// We collect the comptime field values in case the struct initialization
|
|
// ends up being comptime-known.
|
|
const field_values = try sema.arena.alloc(InternPool.Index, struct_ty.structFieldCount(mod));
|
|
|
|
field: for (found_fields, 0..) |opt_field_ptr, i_usize| {
|
|
const i: u32 = @intCast(i_usize);
|
|
if (opt_field_ptr.unwrap()) |field_ptr| {
|
|
// Determine whether the value stored to this pointer is comptime-known.
|
|
const field_ty = struct_ty.structFieldType(i, mod);
|
|
if (try sema.typeHasOnePossibleValue(field_ty)) |opv| {
|
|
field_values[i] = opv.toIntern();
|
|
continue;
|
|
}
|
|
|
|
const field_ptr_ref = sema.inst_map.get(field_ptr).?;
|
|
|
|
//std.debug.print("validateStructInit (field_ptr_ref=%{d}):\n", .{field_ptr_ref});
|
|
//for (block.instructions.items) |item| {
|
|
// std.debug.print(" %{d} = {s}\n", .{item, @tagName(air_tags[@intFromEnum(item)])});
|
|
//}
|
|
|
|
// We expect to see something like this in the current block AIR:
|
|
// %a = field_ptr(...)
|
|
// store(%a, %b)
|
|
// With an optional bitcast between the store and the field_ptr.
|
|
// If %b is a comptime operand, this field is comptime.
|
|
//
|
|
// However, in the case of a comptime-known pointer to a struct, the
|
|
// the field_ptr instruction is missing, so we have to pattern-match
|
|
// based only on the store instructions.
|
|
// `first_block_index` needs to point to the `field_ptr` if it exists;
|
|
// the `store` otherwise.
|
|
|
|
// Possible performance enhancement: save the `block_index` between iterations
|
|
// of the for loop.
|
|
var block_index = block.instructions.items.len;
|
|
while (block_index > 0) {
|
|
block_index -= 1;
|
|
const store_inst = block.instructions.items[block_index];
|
|
if (store_inst.toRef() == field_ptr_ref) {
|
|
struct_is_comptime = false;
|
|
continue :field;
|
|
}
|
|
switch (air_tags[@intFromEnum(store_inst)]) {
|
|
.store, .store_safe => {},
|
|
else => continue,
|
|
}
|
|
const bin_op = air_datas[@intFromEnum(store_inst)].bin_op;
|
|
var ptr_ref = bin_op.lhs;
|
|
if (ptr_ref.toIndex()) |ptr_inst| if (air_tags[@intFromEnum(ptr_inst)] == .bitcast) {
|
|
ptr_ref = air_datas[@intFromEnum(ptr_inst)].ty_op.operand;
|
|
};
|
|
if (ptr_ref != field_ptr_ref) continue;
|
|
first_block_index = @min(if (field_ptr_ref.toIndex()) |field_ptr_inst|
|
|
std.mem.lastIndexOfScalar(
|
|
Air.Inst.Index,
|
|
block.instructions.items[0..block_index],
|
|
field_ptr_inst,
|
|
).?
|
|
else
|
|
block_index, first_block_index);
|
|
if (try sema.resolveValue(bin_op.rhs)) |val| {
|
|
field_values[i] = val.toIntern();
|
|
} else if (require_comptime) {
|
|
const field_ptr_data = sema.code.instructions.items(.data)[@intFromEnum(field_ptr)].pl_node;
|
|
return sema.failWithNeededComptime(block, field_ptr_data.src(), .{
|
|
.needed_comptime_reason = "initializer of comptime only struct must be comptime-known",
|
|
});
|
|
} else {
|
|
struct_is_comptime = false;
|
|
}
|
|
continue :field;
|
|
}
|
|
struct_is_comptime = false;
|
|
continue :field;
|
|
}
|
|
|
|
const default_val = struct_ty.structFieldDefaultValue(i, mod);
|
|
if (default_val.toIntern() == .unreachable_value) {
|
|
const field_name = struct_ty.structFieldName(i, mod).unwrap() orelse {
|
|
const template = "missing tuple field with index {d}";
|
|
if (root_msg) |msg| {
|
|
try sema.errNote(block, init_src, msg, template, .{i});
|
|
} else {
|
|
root_msg = try sema.errMsg(block, init_src, template, .{i});
|
|
}
|
|
continue;
|
|
};
|
|
const template = "missing struct field: {}";
|
|
const args = .{field_name.fmt(ip)};
|
|
if (root_msg) |msg| {
|
|
try sema.errNote(block, init_src, msg, template, args);
|
|
} else {
|
|
root_msg = try sema.errMsg(block, init_src, template, args);
|
|
}
|
|
continue;
|
|
}
|
|
field_values[i] = default_val.toIntern();
|
|
}
|
|
|
|
if (root_msg) |msg| {
|
|
if (mod.typeToStruct(struct_ty)) |struct_type| {
|
|
const decl = mod.declPtr(struct_type.decl.unwrap().?);
|
|
const fqn = try decl.fullyQualifiedName(mod);
|
|
try mod.errNoteNonLazy(
|
|
decl.srcLoc(mod),
|
|
msg,
|
|
"struct '{}' declared here",
|
|
.{fqn.fmt(ip)},
|
|
);
|
|
}
|
|
root_msg = null;
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
|
|
if (struct_is_comptime) {
|
|
// Our task is to delete all the `field_ptr` and `store` instructions, and insert
|
|
// instead a single `store` to the struct_ptr with a comptime struct value.
|
|
var init_index: usize = 0;
|
|
var field_ptr_ref = Air.Inst.Ref.none;
|
|
var block_index = first_block_index;
|
|
for (block.instructions.items[first_block_index..]) |cur_inst| {
|
|
while (field_ptr_ref == .none and init_index < instrs.len) : (init_index += 1) {
|
|
const field_ty = struct_ty.structFieldType(field_indices[init_index], mod);
|
|
if (try field_ty.onePossibleValue(mod)) |_| continue;
|
|
field_ptr_ref = sema.inst_map.get(instrs[init_index]).?;
|
|
}
|
|
switch (air_tags[@intFromEnum(cur_inst)]) {
|
|
.struct_field_ptr,
|
|
.struct_field_ptr_index_0,
|
|
.struct_field_ptr_index_1,
|
|
.struct_field_ptr_index_2,
|
|
.struct_field_ptr_index_3,
|
|
=> if (cur_inst.toRef() == field_ptr_ref) continue,
|
|
.bitcast => if (air_datas[@intFromEnum(cur_inst)].ty_op.operand == field_ptr_ref) continue,
|
|
.store, .store_safe => {
|
|
var ptr_ref = air_datas[@intFromEnum(cur_inst)].bin_op.lhs;
|
|
if (ptr_ref.toIndex()) |ptr_inst| if (air_tags[@intFromEnum(ptr_inst)] == .bitcast) {
|
|
ptr_ref = air_datas[@intFromEnum(ptr_inst)].ty_op.operand;
|
|
};
|
|
if (ptr_ref == field_ptr_ref) {
|
|
field_ptr_ref = .none;
|
|
continue;
|
|
}
|
|
},
|
|
else => {},
|
|
}
|
|
block.instructions.items[block_index] = cur_inst;
|
|
block_index += 1;
|
|
}
|
|
block.instructions.shrinkRetainingCapacity(block_index);
|
|
|
|
const struct_val = try mod.intern(.{ .aggregate = .{
|
|
.ty = struct_ty.toIntern(),
|
|
.storage = .{ .elems = field_values },
|
|
} });
|
|
const struct_init = Air.internedToRef(struct_val);
|
|
try sema.storePtr2(block, init_src, struct_ptr, init_src, struct_init, init_src, .store);
|
|
return;
|
|
}
|
|
try sema.resolveStructLayout(struct_ty);
|
|
|
|
// Our task is to insert `store` instructions for all the default field values.
|
|
for (found_fields, 0..) |field_ptr, i| {
|
|
if (field_ptr != .none) continue;
|
|
|
|
const field_src = init_src; // TODO better source location
|
|
const default_field_ptr = if (struct_ty.isTuple(mod))
|
|
try sema.tupleFieldPtr(block, init_src, struct_ptr, field_src, @intCast(i), true)
|
|
else
|
|
try sema.structFieldPtrByIndex(block, init_src, struct_ptr, @intCast(i), field_src, struct_ty, true);
|
|
try sema.checkKnownAllocPtr(struct_ptr, default_field_ptr);
|
|
const init = Air.internedToRef(field_values[i]);
|
|
try sema.storePtr2(block, init_src, default_field_ptr, init_src, init, field_src, .store);
|
|
}
|
|
}
|
|
|
|
fn zirValidatePtrArrayInit(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
) CompileError!void {
|
|
const mod = sema.mod;
|
|
const validate_inst = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const init_src = validate_inst.src();
|
|
const validate_extra = sema.code.extraData(Zir.Inst.Block, validate_inst.payload_index);
|
|
const instrs = sema.code.bodySlice(validate_extra.end, validate_extra.data.body_len);
|
|
const first_elem_ptr_data = sema.code.instructions.items(.data)[@intFromEnum(instrs[0])].pl_node;
|
|
const elem_ptr_extra = sema.code.extraData(Zir.Inst.ElemPtrImm, first_elem_ptr_data.payload_index).data;
|
|
const array_ptr = try sema.resolveInst(elem_ptr_extra.ptr);
|
|
const array_ty = sema.typeOf(array_ptr).childType(mod).optEuBaseType(mod);
|
|
const array_len = array_ty.arrayLen(mod);
|
|
|
|
// Collect the comptime element values in case the array literal ends up
|
|
// being comptime-known.
|
|
const element_vals = try sema.arena.alloc(
|
|
InternPool.Index,
|
|
try sema.usizeCast(block, init_src, array_len),
|
|
);
|
|
|
|
if (instrs.len != array_len) switch (array_ty.zigTypeTag(mod)) {
|
|
.Struct => {
|
|
var root_msg: ?*Module.ErrorMsg = null;
|
|
errdefer if (root_msg) |msg| msg.destroy(sema.gpa);
|
|
|
|
try sema.resolveStructFieldInits(array_ty);
|
|
var i = instrs.len;
|
|
while (i < array_len) : (i += 1) {
|
|
const default_val = array_ty.structFieldDefaultValue(i, mod).toIntern();
|
|
if (default_val == .unreachable_value) {
|
|
const template = "missing tuple field with index {d}";
|
|
if (root_msg) |msg| {
|
|
try sema.errNote(block, init_src, msg, template, .{i});
|
|
} else {
|
|
root_msg = try sema.errMsg(block, init_src, template, .{i});
|
|
}
|
|
continue;
|
|
}
|
|
|
|
element_vals[i] = default_val;
|
|
}
|
|
|
|
if (root_msg) |msg| {
|
|
root_msg = null;
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
},
|
|
.Array => {
|
|
return sema.fail(block, init_src, "expected {d} array elements; found {d}", .{
|
|
array_len, instrs.len,
|
|
});
|
|
},
|
|
.Vector => {
|
|
return sema.fail(block, init_src, "expected {d} vector elements; found {d}", .{
|
|
array_len, instrs.len,
|
|
});
|
|
},
|
|
else => unreachable,
|
|
};
|
|
|
|
if (block.is_comptime and
|
|
(try sema.resolveDefinedValue(block, init_src, array_ptr)) != null)
|
|
{
|
|
// In this case the comptime machinery will have evaluated the store instructions
|
|
// at comptime so we have almost nothing to do here. However, in case of a
|
|
// sentinel-terminated array, the sentinel will not have been populated by
|
|
// any ZIR instructions at comptime; we need to do that here.
|
|
if (array_ty.sentinel(mod)) |sentinel_val| {
|
|
const array_len_ref = try mod.intRef(Type.usize, array_len);
|
|
const sentinel_ptr = try sema.elemPtrArray(block, init_src, init_src, array_ptr, init_src, array_len_ref, true, true);
|
|
const sentinel = Air.internedToRef(sentinel_val.toIntern());
|
|
try sema.storePtr2(block, init_src, sentinel_ptr, init_src, sentinel, init_src, .store);
|
|
}
|
|
return;
|
|
}
|
|
|
|
// If the array has one possible value, the value is always comptime-known.
|
|
if (try sema.typeHasOnePossibleValue(array_ty)) |array_opv| {
|
|
const array_init = Air.internedToRef(array_opv.toIntern());
|
|
try sema.storePtr2(block, init_src, array_ptr, init_src, array_init, init_src, .store);
|
|
return;
|
|
}
|
|
|
|
var array_is_comptime = true;
|
|
var first_block_index = block.instructions.items.len;
|
|
|
|
const air_tags = sema.air_instructions.items(.tag);
|
|
const air_datas = sema.air_instructions.items(.data);
|
|
|
|
outer: for (instrs, 0..) |elem_ptr, i| {
|
|
// Determine whether the value stored to this pointer is comptime-known.
|
|
|
|
if (array_ty.isTuple(mod)) {
|
|
if (array_ty.structFieldIsComptime(i, mod))
|
|
try sema.resolveStructFieldInits(array_ty);
|
|
if (try array_ty.structFieldValueComptime(mod, i)) |opv| {
|
|
element_vals[i] = opv.toIntern();
|
|
continue;
|
|
}
|
|
}
|
|
|
|
const elem_ptr_ref = sema.inst_map.get(elem_ptr).?;
|
|
|
|
// We expect to see something like this in the current block AIR:
|
|
// %a = elem_ptr(...)
|
|
// store(%a, %b)
|
|
// With an optional bitcast between the store and the elem_ptr.
|
|
// If %b is a comptime operand, this element is comptime.
|
|
//
|
|
// However, in the case of a comptime-known pointer to an array, the
|
|
// the elem_ptr instruction is missing, so we have to pattern-match
|
|
// based only on the store instructions.
|
|
// `first_block_index` needs to point to the `elem_ptr` if it exists;
|
|
// the `store` otherwise.
|
|
//
|
|
// This is nearly identical to similar logic in `validateStructInit`.
|
|
|
|
// Possible performance enhancement: save the `block_index` between iterations
|
|
// of the for loop.
|
|
var block_index = block.instructions.items.len;
|
|
while (block_index > 0) {
|
|
block_index -= 1;
|
|
const store_inst = block.instructions.items[block_index];
|
|
if (store_inst.toRef() == elem_ptr_ref) {
|
|
array_is_comptime = false;
|
|
continue :outer;
|
|
}
|
|
switch (air_tags[@intFromEnum(store_inst)]) {
|
|
.store, .store_safe => {},
|
|
else => continue,
|
|
}
|
|
const bin_op = air_datas[@intFromEnum(store_inst)].bin_op;
|
|
var ptr_ref = bin_op.lhs;
|
|
if (ptr_ref.toIndex()) |ptr_inst| if (air_tags[@intFromEnum(ptr_inst)] == .bitcast) {
|
|
ptr_ref = air_datas[@intFromEnum(ptr_inst)].ty_op.operand;
|
|
};
|
|
if (ptr_ref != elem_ptr_ref) continue;
|
|
first_block_index = @min(if (elem_ptr_ref.toIndex()) |elem_ptr_inst|
|
|
std.mem.lastIndexOfScalar(
|
|
Air.Inst.Index,
|
|
block.instructions.items[0..block_index],
|
|
elem_ptr_inst,
|
|
).?
|
|
else
|
|
block_index, first_block_index);
|
|
if (try sema.resolveValue(bin_op.rhs)) |val| {
|
|
element_vals[i] = val.toIntern();
|
|
} else {
|
|
array_is_comptime = false;
|
|
}
|
|
continue :outer;
|
|
}
|
|
array_is_comptime = false;
|
|
continue :outer;
|
|
}
|
|
|
|
if (array_is_comptime) {
|
|
if (try sema.resolveDefinedValue(block, init_src, array_ptr)) |ptr_val| {
|
|
switch (mod.intern_pool.indexToKey(ptr_val.toIntern())) {
|
|
.ptr => |ptr| switch (ptr.addr) {
|
|
.comptime_field => return, // This store was validated by the individual elem ptrs.
|
|
else => {},
|
|
},
|
|
else => {},
|
|
}
|
|
}
|
|
|
|
// Our task is to delete all the `elem_ptr` and `store` instructions, and insert
|
|
// instead a single `store` to the array_ptr with a comptime struct value.
|
|
var elem_index: usize = 0;
|
|
var elem_ptr_ref = Air.Inst.Ref.none;
|
|
var block_index = first_block_index;
|
|
for (block.instructions.items[first_block_index..]) |cur_inst| {
|
|
while (elem_ptr_ref == .none and elem_index < instrs.len) : (elem_index += 1) {
|
|
if (array_ty.isTuple(mod) and array_ty.structFieldIsComptime(elem_index, mod)) continue;
|
|
elem_ptr_ref = sema.inst_map.get(instrs[elem_index]).?;
|
|
}
|
|
switch (air_tags[@intFromEnum(cur_inst)]) {
|
|
.ptr_elem_ptr => if (cur_inst.toRef() == elem_ptr_ref) continue,
|
|
.bitcast => if (air_datas[@intFromEnum(cur_inst)].ty_op.operand == elem_ptr_ref) continue,
|
|
.store, .store_safe => {
|
|
var ptr_ref = air_datas[@intFromEnum(cur_inst)].bin_op.lhs;
|
|
if (ptr_ref.toIndex()) |ptr_inst| if (air_tags[@intFromEnum(ptr_inst)] == .bitcast) {
|
|
ptr_ref = air_datas[@intFromEnum(ptr_inst)].ty_op.operand;
|
|
};
|
|
if (ptr_ref == elem_ptr_ref) {
|
|
elem_ptr_ref = .none;
|
|
continue;
|
|
}
|
|
},
|
|
else => {},
|
|
}
|
|
block.instructions.items[block_index] = cur_inst;
|
|
block_index += 1;
|
|
}
|
|
block.instructions.shrinkRetainingCapacity(block_index);
|
|
|
|
const array_val = try mod.intern(.{ .aggregate = .{
|
|
.ty = array_ty.toIntern(),
|
|
.storage = .{ .elems = element_vals },
|
|
} });
|
|
const array_init = Air.internedToRef(array_val);
|
|
try sema.storePtr2(block, init_src, array_ptr, init_src, array_init, init_src, .store);
|
|
}
|
|
}
|
|
|
|
fn zirValidateDeref(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const src = inst_data.src();
|
|
const operand = try sema.resolveInst(inst_data.operand);
|
|
const operand_ty = sema.typeOf(operand);
|
|
|
|
if (operand_ty.zigTypeTag(mod) != .Pointer) {
|
|
return sema.fail(block, src, "cannot dereference non-pointer type '{}'", .{operand_ty.fmt(mod)});
|
|
} else switch (operand_ty.ptrSize(mod)) {
|
|
.One, .C => {},
|
|
.Many => return sema.fail(block, src, "index syntax required for unknown-length pointer type '{}'", .{operand_ty.fmt(mod)}),
|
|
.Slice => return sema.fail(block, src, "index syntax required for slice type '{}'", .{operand_ty.fmt(mod)}),
|
|
}
|
|
|
|
if ((try sema.typeHasOnePossibleValue(operand_ty.childType(mod))) != null) {
|
|
// No need to validate the actual pointer value, we don't need it!
|
|
return;
|
|
}
|
|
|
|
const elem_ty = operand_ty.elemType2(mod);
|
|
if (try sema.resolveValue(operand)) |val| {
|
|
if (val.isUndef(mod)) {
|
|
return sema.fail(block, src, "cannot dereference undefined value", .{});
|
|
}
|
|
} else if (try sema.typeRequiresComptime(elem_ty)) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(
|
|
block,
|
|
src,
|
|
"values of type '{}' must be comptime-known, but operand value is runtime-known",
|
|
.{elem_ty.fmt(mod)},
|
|
);
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
const src_decl = mod.declPtr(block.src_decl);
|
|
try sema.explainWhyTypeIsComptime(msg, src_decl.toSrcLoc(src, mod), elem_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
}
|
|
|
|
fn zirValidateDestructure(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.ValidateDestructure, inst_data.payload_index).data;
|
|
const src = inst_data.src();
|
|
const destructure_src = LazySrcLoc.nodeOffset(extra.destructure_node);
|
|
const operand = try sema.resolveInst(extra.operand);
|
|
const operand_ty = sema.typeOf(operand);
|
|
|
|
const can_destructure = switch (operand_ty.zigTypeTag(mod)) {
|
|
.Array, .Vector => true,
|
|
.Struct => operand_ty.isTuple(mod),
|
|
else => false,
|
|
};
|
|
|
|
if (!can_destructure) {
|
|
return sema.failWithOwnedErrorMsg(block, msg: {
|
|
const msg = try sema.errMsg(block, src, "type '{}' cannot be destructured", .{operand_ty.fmt(mod)});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, destructure_src, msg, "result destructured here", .{});
|
|
break :msg msg;
|
|
});
|
|
}
|
|
|
|
if (operand_ty.arrayLen(mod) != extra.expect_len) {
|
|
return sema.failWithOwnedErrorMsg(block, msg: {
|
|
const msg = try sema.errMsg(block, src, "expected {} elements for destructure, found {}", .{
|
|
extra.expect_len,
|
|
operand_ty.arrayLen(mod),
|
|
});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, destructure_src, msg, "result destructured here", .{});
|
|
break :msg msg;
|
|
});
|
|
}
|
|
}
|
|
|
|
fn failWithBadMemberAccess(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
agg_ty: Type,
|
|
field_src: LazySrcLoc,
|
|
field_name: InternPool.NullTerminatedString,
|
|
) CompileError {
|
|
const mod = sema.mod;
|
|
const kw_name = switch (agg_ty.zigTypeTag(mod)) {
|
|
.Union => "union",
|
|
.Struct => "struct",
|
|
.Opaque => "opaque",
|
|
.Enum => "enum",
|
|
else => unreachable,
|
|
};
|
|
if (agg_ty.getOwnerDeclOrNull(mod)) |some| if (mod.declIsRoot(some)) {
|
|
return sema.fail(block, field_src, "root struct of file '{}' has no member named '{}'", .{
|
|
agg_ty.fmt(mod), field_name.fmt(&mod.intern_pool),
|
|
});
|
|
};
|
|
|
|
return sema.fail(block, field_src, "{s} '{}' has no member named '{}'", .{
|
|
kw_name, agg_ty.fmt(mod), field_name.fmt(&mod.intern_pool),
|
|
});
|
|
}
|
|
|
|
fn failWithBadStructFieldAccess(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
struct_type: InternPool.Key.StructType,
|
|
field_src: LazySrcLoc,
|
|
field_name: InternPool.NullTerminatedString,
|
|
) CompileError {
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
const decl = mod.declPtr(struct_type.decl.unwrap().?);
|
|
const fqn = try decl.fullyQualifiedName(mod);
|
|
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(
|
|
block,
|
|
field_src,
|
|
"no field named '{}' in struct '{}'",
|
|
.{ field_name.fmt(&mod.intern_pool), fqn.fmt(&mod.intern_pool) },
|
|
);
|
|
errdefer msg.destroy(gpa);
|
|
try mod.errNoteNonLazy(decl.srcLoc(mod), msg, "struct declared here", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
|
|
fn failWithBadUnionFieldAccess(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
union_obj: InternPool.UnionType,
|
|
field_src: LazySrcLoc,
|
|
field_name: InternPool.NullTerminatedString,
|
|
) CompileError {
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
|
|
const decl = mod.declPtr(union_obj.decl);
|
|
const fqn = try decl.fullyQualifiedName(mod);
|
|
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(
|
|
block,
|
|
field_src,
|
|
"no field named '{}' in union '{}'",
|
|
.{ field_name.fmt(&mod.intern_pool), fqn.fmt(&mod.intern_pool) },
|
|
);
|
|
errdefer msg.destroy(gpa);
|
|
try mod.errNoteNonLazy(decl.srcLoc(mod), msg, "union declared here", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
|
|
fn addDeclaredHereNote(sema: *Sema, parent: *Module.ErrorMsg, decl_ty: Type) !void {
|
|
const mod = sema.mod;
|
|
const src_loc = decl_ty.declSrcLocOrNull(mod) orelse return;
|
|
const category = switch (decl_ty.zigTypeTag(mod)) {
|
|
.Union => "union",
|
|
.Struct => "struct",
|
|
.Enum => "enum",
|
|
.Opaque => "opaque",
|
|
.ErrorSet => "error set",
|
|
else => unreachable,
|
|
};
|
|
try mod.errNoteNonLazy(src_loc, parent, "{s} declared here", .{category});
|
|
}
|
|
|
|
fn zirStoreToInferredPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const pl_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const src = pl_node.src();
|
|
const bin = sema.code.extraData(Zir.Inst.Bin, pl_node.payload_index).data;
|
|
const ptr = try sema.resolveInst(bin.lhs);
|
|
const operand = try sema.resolveInst(bin.rhs);
|
|
const ptr_inst = ptr.toIndex().?;
|
|
const air_datas = sema.air_instructions.items(.data);
|
|
|
|
switch (sema.air_instructions.items(.tag)[@intFromEnum(ptr_inst)]) {
|
|
.inferred_alloc_comptime => {
|
|
const iac = &air_datas[@intFromEnum(ptr_inst)].inferred_alloc_comptime;
|
|
return sema.storeToInferredAllocComptime(block, src, operand, iac);
|
|
},
|
|
.inferred_alloc => {
|
|
const ia = sema.unresolved_inferred_allocs.getPtr(ptr_inst).?;
|
|
return sema.storeToInferredAlloc(block, ptr, operand, ia);
|
|
},
|
|
else => unreachable,
|
|
}
|
|
}
|
|
|
|
fn storeToInferredAlloc(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
ptr: Air.Inst.Ref,
|
|
operand: Air.Inst.Ref,
|
|
inferred_alloc: *InferredAlloc,
|
|
) CompileError!void {
|
|
// Create a store instruction as a placeholder. This will be replaced by a
|
|
// proper store sequence once we know the stored type.
|
|
const dummy_store = try block.addBinOp(.store, ptr, operand);
|
|
try sema.checkComptimeKnownStore(block, dummy_store);
|
|
// Add the stored instruction to the set we will use to resolve peer types
|
|
// for the inferred allocation.
|
|
try inferred_alloc.prongs.append(sema.arena, dummy_store.toIndex().?);
|
|
}
|
|
|
|
fn storeToInferredAllocComptime(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
operand: Air.Inst.Ref,
|
|
iac: *Air.Inst.Data.InferredAllocComptime,
|
|
) CompileError!void {
|
|
const operand_ty = sema.typeOf(operand);
|
|
// There will be only one store_to_inferred_ptr because we are running at comptime.
|
|
// The alloc will turn into a Decl.
|
|
if (try sema.resolveValue(operand)) |operand_val| {
|
|
var anon_decl = try block.startAnonDecl(); // TODO: comptime value mutation without Decl
|
|
defer anon_decl.deinit();
|
|
iac.decl_index = try anon_decl.finish(operand_ty, operand_val, iac.alignment);
|
|
try sema.comptime_mutable_decls.append(iac.decl_index);
|
|
return;
|
|
}
|
|
|
|
return sema.failWithNeededComptime(block, src, .{
|
|
.needed_comptime_reason = "value being stored to a comptime variable must be comptime-known",
|
|
});
|
|
}
|
|
|
|
fn zirSetEvalBranchQuota(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const src = inst_data.src();
|
|
const quota: u32 = @intCast(try sema.resolveInt(block, src, inst_data.operand, Type.u32, .{
|
|
.needed_comptime_reason = "eval branch quota must be comptime-known",
|
|
}));
|
|
sema.branch_quota = @max(sema.branch_quota, quota);
|
|
}
|
|
|
|
fn zirStoreNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const zir_tags = sema.code.instructions.items(.tag);
|
|
const zir_datas = sema.code.instructions.items(.data);
|
|
const inst_data = zir_datas[@intFromEnum(inst)].pl_node;
|
|
const src = inst_data.src();
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const ptr = try sema.resolveInst(extra.lhs);
|
|
const operand = try sema.resolveInst(extra.rhs);
|
|
|
|
const is_ret = if (extra.lhs.toIndex()) |ptr_index|
|
|
zir_tags[@intFromEnum(ptr_index)] == .ret_ptr
|
|
else
|
|
false;
|
|
|
|
// Check for the possibility of this pattern:
|
|
// %a = ret_ptr
|
|
// %b = store(%a, %c)
|
|
// Where %c is an error union or error set. In such case we need to add
|
|
// to the current function's inferred error set, if any.
|
|
if (is_ret and sema.fn_ret_ty_ies != null) switch (sema.typeOf(operand).zigTypeTag(mod)) {
|
|
.ErrorUnion, .ErrorSet => try sema.addToInferredErrorSet(operand),
|
|
else => {},
|
|
};
|
|
|
|
const ptr_src: LazySrcLoc = .{ .node_offset_store_ptr = inst_data.src_node };
|
|
const operand_src: LazySrcLoc = .{ .node_offset_store_operand = inst_data.src_node };
|
|
const air_tag: Air.Inst.Tag = if (is_ret)
|
|
.ret_ptr
|
|
else if (block.wantSafety())
|
|
.store_safe
|
|
else
|
|
.store;
|
|
return sema.storePtr2(block, src, ptr, ptr_src, operand, operand_src, air_tag);
|
|
}
|
|
|
|
fn zirStr(sema: *Sema, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const bytes = sema.code.instructions.items(.data)[@intFromEnum(inst)].str.get(sema.code);
|
|
return sema.addStrLitNoAlias(bytes);
|
|
}
|
|
|
|
fn addStrLit(sema: *Sema, bytes: []const u8) CompileError!Air.Inst.Ref {
|
|
const duped_bytes = try sema.arena.dupe(u8, bytes);
|
|
return addStrLitNoAlias(sema, duped_bytes);
|
|
}
|
|
|
|
/// Safe to call when `bytes` does not point into `InternPool`.
|
|
fn addStrLitNoAlias(sema: *Sema, bytes: []const u8) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const array_ty = try mod.arrayType(.{
|
|
.len = bytes.len,
|
|
.sentinel = .zero_u8,
|
|
.child = .u8_type,
|
|
});
|
|
const val = try mod.intern(.{ .aggregate = .{
|
|
.ty = array_ty.toIntern(),
|
|
.storage = .{ .bytes = bytes },
|
|
} });
|
|
return anonDeclRef(sema, val);
|
|
}
|
|
|
|
fn anonDeclRef(sema: *Sema, val: InternPool.Index) CompileError!Air.Inst.Ref {
|
|
return Air.internedToRef(try refValue(sema, val));
|
|
}
|
|
|
|
fn refValue(sema: *Sema, val: InternPool.Index) CompileError!InternPool.Index {
|
|
const mod = sema.mod;
|
|
const ptr_ty = (try sema.ptrType(.{
|
|
.child = mod.intern_pool.typeOf(val),
|
|
.flags = .{
|
|
.alignment = .none,
|
|
.is_const = true,
|
|
.address_space = .generic,
|
|
},
|
|
})).toIntern();
|
|
return mod.intern(.{ .ptr = .{
|
|
.ty = ptr_ty,
|
|
.addr = .{ .anon_decl = .{
|
|
.val = val,
|
|
.orig_ty = ptr_ty,
|
|
} },
|
|
} });
|
|
}
|
|
|
|
fn zirInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
_ = block;
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const int = sema.code.instructions.items(.data)[@intFromEnum(inst)].int;
|
|
return sema.mod.intRef(Type.comptime_int, int);
|
|
}
|
|
|
|
fn zirIntBig(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
_ = block;
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const int = sema.code.instructions.items(.data)[@intFromEnum(inst)].str;
|
|
const byte_count = int.len * @sizeOf(std.math.big.Limb);
|
|
const limb_bytes = sema.code.string_bytes[@intFromEnum(int.start)..][0..byte_count];
|
|
|
|
// TODO: this allocation and copy is only needed because the limbs may be unaligned.
|
|
// If ZIR is adjusted so that big int limbs are guaranteed to be aligned, these
|
|
// two lines can be removed.
|
|
const limbs = try sema.arena.alloc(std.math.big.Limb, int.len);
|
|
@memcpy(mem.sliceAsBytes(limbs), limb_bytes);
|
|
|
|
return Air.internedToRef((try mod.intValue_big(Type.comptime_int, .{
|
|
.limbs = limbs,
|
|
.positive = true,
|
|
})).toIntern());
|
|
}
|
|
|
|
fn zirFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
_ = block;
|
|
const number = sema.code.instructions.items(.data)[@intFromEnum(inst)].float;
|
|
return Air.internedToRef((try sema.mod.floatValue(
|
|
Type.comptime_float,
|
|
number,
|
|
)).toIntern());
|
|
}
|
|
|
|
fn zirFloat128(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
_ = block;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.Float128, inst_data.payload_index).data;
|
|
const number = extra.get();
|
|
return Air.internedToRef((try sema.mod.floatValue(Type.comptime_float, number)).toIntern());
|
|
}
|
|
|
|
fn zirCompileError(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const src = inst_data.src();
|
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const msg = try sema.resolveConstString(block, operand_src, inst_data.operand, .{
|
|
.needed_comptime_reason = "compile error string must be comptime-known",
|
|
});
|
|
return sema.fail(block, src, "{s}", .{msg});
|
|
}
|
|
|
|
fn zirCompileLog(
|
|
sema: *Sema,
|
|
extended: Zir.Inst.Extended.InstData,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
|
|
var managed = mod.compile_log_text.toManaged(sema.gpa);
|
|
defer sema.mod.compile_log_text = managed.moveToUnmanaged();
|
|
const writer = managed.writer();
|
|
|
|
const extra = sema.code.extraData(Zir.Inst.NodeMultiOp, extended.operand);
|
|
const src_node = extra.data.src_node;
|
|
const args = sema.code.refSlice(extra.end, extended.small);
|
|
|
|
for (args, 0..) |arg_ref, i| {
|
|
if (i != 0) try writer.print(", ", .{});
|
|
|
|
const arg = try sema.resolveInst(arg_ref);
|
|
const arg_ty = sema.typeOf(arg);
|
|
if (try sema.resolveValueResolveLazy(arg)) |val| {
|
|
try writer.print("@as({}, {})", .{
|
|
arg_ty.fmt(mod), val.fmtValue(arg_ty, mod),
|
|
});
|
|
} else {
|
|
try writer.print("@as({}, [runtime value])", .{arg_ty.fmt(mod)});
|
|
}
|
|
}
|
|
try writer.print("\n", .{});
|
|
|
|
const decl_index = if (sema.func_index != .none)
|
|
mod.funcOwnerDeclIndex(sema.func_index)
|
|
else
|
|
sema.owner_decl_index;
|
|
const gop = try mod.compile_log_decls.getOrPut(sema.gpa, decl_index);
|
|
if (!gop.found_existing) {
|
|
gop.value_ptr.* = src_node;
|
|
}
|
|
return .void_value;
|
|
}
|
|
|
|
fn zirPanic(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index {
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const src = inst_data.src();
|
|
const msg_inst = try sema.resolveInst(inst_data.operand);
|
|
|
|
// `panicWithMsg` would perform this coercion for us, but we can get a better
|
|
// source location if we do it here.
|
|
const coerced_msg = try sema.coerce(block, Type.slice_const_u8, msg_inst, .{ .node_offset_builtin_call_arg0 = inst_data.src_node });
|
|
|
|
if (block.is_comptime) {
|
|
return sema.fail(block, src, "encountered @panic at comptime", .{});
|
|
}
|
|
try sema.panicWithMsg(block, src, coerced_msg, .@"@panic");
|
|
return always_noreturn;
|
|
}
|
|
|
|
fn zirTrap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index {
|
|
const src_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].node;
|
|
const src = LazySrcLoc.nodeOffset(src_node);
|
|
if (block.is_comptime)
|
|
return sema.fail(block, src, "encountered @trap at comptime", .{});
|
|
_ = try block.addNoOp(.trap);
|
|
return always_noreturn;
|
|
}
|
|
|
|
fn zirLoop(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const src = inst_data.src();
|
|
const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index);
|
|
const body = sema.code.bodySlice(extra.end, extra.data.body_len);
|
|
const gpa = sema.gpa;
|
|
|
|
// AIR expects a block outside the loop block too.
|
|
// Reserve space for a Loop instruction so that generated Break instructions can
|
|
// point to it, even if it doesn't end up getting used because the code ends up being
|
|
// comptime evaluated.
|
|
const block_inst: Air.Inst.Index = @enumFromInt(sema.air_instructions.len);
|
|
const loop_inst: Air.Inst.Index = @enumFromInt(@intFromEnum(block_inst) + 1);
|
|
try sema.air_instructions.ensureUnusedCapacity(gpa, 2);
|
|
sema.air_instructions.appendAssumeCapacity(.{
|
|
.tag = .block,
|
|
.data = undefined,
|
|
});
|
|
sema.air_instructions.appendAssumeCapacity(.{
|
|
.tag = .loop,
|
|
.data = .{ .ty_pl = .{
|
|
.ty = .noreturn_type,
|
|
.payload = undefined,
|
|
} },
|
|
});
|
|
var label: Block.Label = .{
|
|
.zir_block = inst,
|
|
.merges = .{
|
|
.src_locs = .{},
|
|
.results = .{},
|
|
.br_list = .{},
|
|
.block_inst = block_inst,
|
|
},
|
|
};
|
|
var child_block = parent_block.makeSubBlock();
|
|
child_block.label = &label;
|
|
child_block.runtime_cond = null;
|
|
child_block.runtime_loop = mod.declPtr(child_block.src_decl).toSrcLoc(src, mod);
|
|
child_block.runtime_index.increment();
|
|
const merges = &child_block.label.?.merges;
|
|
|
|
defer child_block.instructions.deinit(gpa);
|
|
defer merges.deinit(gpa);
|
|
|
|
var loop_block = child_block.makeSubBlock();
|
|
defer loop_block.instructions.deinit(gpa);
|
|
|
|
try sema.analyzeBody(&loop_block, body);
|
|
|
|
const loop_block_len = loop_block.instructions.items.len;
|
|
if (loop_block_len > 0 and sema.typeOf(loop_block.instructions.items[loop_block_len - 1].toRef()).isNoReturn(mod)) {
|
|
// If the loop ended with a noreturn terminator, then there is no way for it to loop,
|
|
// so we can just use the block instead.
|
|
try child_block.instructions.appendSlice(gpa, loop_block.instructions.items);
|
|
} else {
|
|
try child_block.instructions.append(gpa, loop_inst);
|
|
|
|
try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len + loop_block_len);
|
|
sema.air_instructions.items(.data)[@intFromEnum(loop_inst)].ty_pl.payload = sema.addExtraAssumeCapacity(
|
|
Air.Block{ .body_len = @intCast(loop_block_len) },
|
|
);
|
|
sema.air_extra.appendSliceAssumeCapacity(@ptrCast(loop_block.instructions.items));
|
|
}
|
|
return sema.analyzeBlockBody(parent_block, src, &child_block, merges, false);
|
|
}
|
|
|
|
fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const comp = mod.comp;
|
|
const gpa = sema.gpa;
|
|
const pl_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const src = pl_node.src();
|
|
const extra = sema.code.extraData(Zir.Inst.Block, pl_node.payload_index);
|
|
const body = sema.code.bodySlice(extra.end, extra.data.body_len);
|
|
|
|
// we check this here to avoid undefined symbols
|
|
if (!build_options.have_llvm)
|
|
return sema.fail(parent_block, src, "C import unavailable; Zig compiler built without LLVM extensions", .{});
|
|
|
|
var c_import_buf = std.ArrayList(u8).init(gpa);
|
|
defer c_import_buf.deinit();
|
|
|
|
var comptime_reason: Block.ComptimeReason = .{ .c_import = .{
|
|
.block = parent_block,
|
|
.src = src,
|
|
} };
|
|
var child_block: Block = .{
|
|
.parent = parent_block,
|
|
.sema = sema,
|
|
.src_decl = parent_block.src_decl,
|
|
.namespace = parent_block.namespace,
|
|
.wip_capture_scope = parent_block.wip_capture_scope,
|
|
.instructions = .{},
|
|
.inlining = parent_block.inlining,
|
|
.is_comptime = true,
|
|
.comptime_reason = &comptime_reason,
|
|
.c_import_buf = &c_import_buf,
|
|
.runtime_cond = parent_block.runtime_cond,
|
|
.runtime_loop = parent_block.runtime_loop,
|
|
.runtime_index = parent_block.runtime_index,
|
|
};
|
|
defer child_block.instructions.deinit(gpa);
|
|
|
|
// Ignore the result, all the relevant operations have written to c_import_buf already.
|
|
_ = try sema.analyzeBodyBreak(&child_block, body);
|
|
|
|
var c_import_res = comp.cImport(c_import_buf.items, parent_block.ownerModule()) catch |err|
|
|
return sema.fail(&child_block, src, "C import failed: {s}", .{@errorName(err)});
|
|
defer c_import_res.deinit(gpa);
|
|
|
|
if (c_import_res.errors.errorMessageCount() != 0) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(&child_block, src, "C import failed", .{});
|
|
errdefer msg.destroy(gpa);
|
|
|
|
if (!comp.config.link_libc)
|
|
try sema.errNote(&child_block, src, msg, "libc headers not available; compilation does not link against libc", .{});
|
|
|
|
const gop = try mod.cimport_errors.getOrPut(gpa, sema.owner_decl_index);
|
|
if (!gop.found_existing) {
|
|
gop.value_ptr.* = c_import_res.errors;
|
|
c_import_res.errors = std.zig.ErrorBundle.empty;
|
|
}
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(&child_block, msg);
|
|
}
|
|
const parent_mod = parent_block.ownerModule();
|
|
const c_import_mod = Package.Module.create(comp.arena, .{
|
|
.global_cache_directory = comp.global_cache_directory,
|
|
.paths = .{
|
|
.root = .{
|
|
.root_dir = Compilation.Directory.cwd(),
|
|
.sub_path = std.fs.path.dirname(c_import_res.out_zig_path) orelse "",
|
|
},
|
|
.root_src_path = std.fs.path.basename(c_import_res.out_zig_path),
|
|
},
|
|
.fully_qualified_name = c_import_res.out_zig_path,
|
|
.cc_argv = parent_mod.cc_argv,
|
|
.inherited = .{},
|
|
.global = comp.config,
|
|
.parent = parent_mod,
|
|
.builtin_mod = parent_mod.getBuiltinDependency(),
|
|
}) catch |err| switch (err) {
|
|
// None of these are possible because we are creating a package with
|
|
// the exact same configuration as the parent package, which already
|
|
// passed these checks.
|
|
error.ValgrindUnsupportedOnTarget => unreachable,
|
|
error.TargetRequiresSingleThreaded => unreachable,
|
|
error.BackendRequiresSingleThreaded => unreachable,
|
|
error.TargetRequiresPic => unreachable,
|
|
error.PieRequiresPic => unreachable,
|
|
error.DynamicLinkingRequiresPic => unreachable,
|
|
error.TargetHasNoRedZone => unreachable,
|
|
error.StackCheckUnsupportedByTarget => unreachable,
|
|
error.StackProtectorUnsupportedByTarget => unreachable,
|
|
error.StackProtectorUnavailableWithoutLibC => unreachable,
|
|
|
|
else => |e| return e,
|
|
};
|
|
|
|
const result = mod.importPkg(c_import_mod) catch |err|
|
|
return sema.fail(&child_block, src, "C import failed: {s}", .{@errorName(err)});
|
|
|
|
mod.astGenFile(result.file) catch |err|
|
|
return sema.fail(&child_block, src, "C import failed: {s}", .{@errorName(err)});
|
|
|
|
try mod.semaFile(result.file);
|
|
const file_root_decl_index = result.file.root_decl.unwrap().?;
|
|
return sema.analyzeDeclVal(parent_block, src, file_root_decl_index);
|
|
}
|
|
|
|
fn zirSuspendBlock(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const src = inst_data.src();
|
|
return sema.failWithUseOfAsync(parent_block, src);
|
|
}
|
|
|
|
fn zirBlock(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index, force_comptime: bool) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const pl_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const src = pl_node.src();
|
|
const extra = sema.code.extraData(Zir.Inst.Block, pl_node.payload_index);
|
|
const body = sema.code.bodySlice(extra.end, extra.data.body_len);
|
|
const gpa = sema.gpa;
|
|
|
|
// Reserve space for a Block instruction so that generated Break instructions can
|
|
// point to it, even if it doesn't end up getting used because the code ends up being
|
|
// comptime evaluated or is an unlabeled block.
|
|
const block_inst: Air.Inst.Index = @enumFromInt(sema.air_instructions.len);
|
|
try sema.air_instructions.append(gpa, .{
|
|
.tag = .block,
|
|
.data = undefined,
|
|
});
|
|
|
|
var label: Block.Label = .{
|
|
.zir_block = inst,
|
|
.merges = .{
|
|
.src_locs = .{},
|
|
.results = .{},
|
|
.br_list = .{},
|
|
.block_inst = block_inst,
|
|
},
|
|
};
|
|
|
|
var child_block: Block = .{
|
|
.parent = parent_block,
|
|
.sema = sema,
|
|
.src_decl = parent_block.src_decl,
|
|
.namespace = parent_block.namespace,
|
|
.wip_capture_scope = parent_block.wip_capture_scope,
|
|
.instructions = .{},
|
|
.label = &label,
|
|
.inlining = parent_block.inlining,
|
|
.is_comptime = parent_block.is_comptime or force_comptime,
|
|
.comptime_reason = parent_block.comptime_reason,
|
|
.is_typeof = parent_block.is_typeof,
|
|
.want_safety = parent_block.want_safety,
|
|
.float_mode = parent_block.float_mode,
|
|
.c_import_buf = parent_block.c_import_buf,
|
|
.runtime_cond = parent_block.runtime_cond,
|
|
.runtime_loop = parent_block.runtime_loop,
|
|
.runtime_index = parent_block.runtime_index,
|
|
.error_return_trace_index = parent_block.error_return_trace_index,
|
|
};
|
|
|
|
defer child_block.instructions.deinit(gpa);
|
|
defer label.merges.deinit(gpa);
|
|
|
|
return sema.resolveBlockBody(parent_block, src, &child_block, body, inst, &label.merges);
|
|
}
|
|
|
|
fn resolveBlockBody(
|
|
sema: *Sema,
|
|
parent_block: *Block,
|
|
src: LazySrcLoc,
|
|
child_block: *Block,
|
|
body: []const Zir.Inst.Index,
|
|
/// This is the instruction that a break instruction within `body` can
|
|
/// use to return from the body.
|
|
body_inst: Zir.Inst.Index,
|
|
merges: *Block.Merges,
|
|
) CompileError!Air.Inst.Ref {
|
|
if (child_block.is_comptime) {
|
|
return sema.resolveBody(child_block, body, body_inst);
|
|
} else {
|
|
var need_debug_scope = false;
|
|
child_block.need_debug_scope = &need_debug_scope;
|
|
if (sema.analyzeBodyInner(child_block, body)) |_| {
|
|
return sema.analyzeBlockBody(parent_block, src, child_block, merges, need_debug_scope);
|
|
} else |err| switch (err) {
|
|
error.ComptimeBreak => {
|
|
// Comptime control flow is happening, however child_block may still contain
|
|
// runtime instructions which need to be copied to the parent block.
|
|
if (need_debug_scope and child_block.instructions.items.len > 0) {
|
|
// We need a runtime block for scoping reasons.
|
|
_ = try child_block.addBr(merges.block_inst, .void_value);
|
|
try parent_block.instructions.append(sema.gpa, merges.block_inst);
|
|
try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.Block).Struct.fields.len +
|
|
child_block.instructions.items.len);
|
|
sema.air_instructions.items(.data)[@intFromEnum(merges.block_inst)] = .{ .ty_pl = .{
|
|
.ty = .void_type,
|
|
.payload = sema.addExtraAssumeCapacity(Air.Block{
|
|
.body_len = @intCast(child_block.instructions.items.len),
|
|
}),
|
|
} };
|
|
sema.air_extra.appendSliceAssumeCapacity(@ptrCast(child_block.instructions.items));
|
|
} else {
|
|
// We can copy instructions directly to the parent block.
|
|
try parent_block.instructions.appendSlice(sema.gpa, child_block.instructions.items);
|
|
}
|
|
|
|
const break_inst = sema.comptime_break_inst;
|
|
const break_data = sema.code.instructions.items(.data)[@intFromEnum(break_inst)].@"break";
|
|
const extra = sema.code.extraData(Zir.Inst.Break, break_data.payload_index).data;
|
|
if (extra.block_inst == body_inst) {
|
|
return try sema.resolveInst(break_data.operand);
|
|
} else {
|
|
return error.ComptimeBreak;
|
|
}
|
|
},
|
|
else => |e| return e,
|
|
}
|
|
}
|
|
}
|
|
|
|
fn analyzeBlockBody(
|
|
sema: *Sema,
|
|
parent_block: *Block,
|
|
src: LazySrcLoc,
|
|
child_block: *Block,
|
|
merges: *Block.Merges,
|
|
need_debug_scope: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const gpa = sema.gpa;
|
|
const mod = sema.mod;
|
|
|
|
// Blocks must terminate with noreturn instruction.
|
|
assert(child_block.instructions.items.len != 0);
|
|
assert(sema.typeOf(child_block.instructions.items[child_block.instructions.items.len - 1].toRef()).isNoReturn(mod));
|
|
|
|
if (merges.results.items.len == 0) {
|
|
// No need for a block instruction. We can put the new instructions
|
|
// directly into the parent block.
|
|
if (need_debug_scope) {
|
|
// The code following this block is unreachable, as the block has no
|
|
// merges, so we don't necessarily need to emit this as an AIR block.
|
|
// However, we need a block *somewhere* to make the scoping correct,
|
|
// so forward this request to the parent block.
|
|
if (parent_block.need_debug_scope) |ptr| ptr.* = true;
|
|
}
|
|
try parent_block.instructions.appendSlice(gpa, child_block.instructions.items);
|
|
return child_block.instructions.items[child_block.instructions.items.len - 1].toRef();
|
|
}
|
|
if (merges.results.items.len == 1) {
|
|
// If the `break` is trailing, we may be able to elide the AIR block here
|
|
// by appending the new instructions directly to the parent block.
|
|
if (!need_debug_scope) {
|
|
const last_inst_index = child_block.instructions.items.len - 1;
|
|
const last_inst = child_block.instructions.items[last_inst_index];
|
|
if (sema.getBreakBlock(last_inst)) |br_block| {
|
|
if (br_block == merges.block_inst) {
|
|
// Great, the last instruction is the break! Put the instructions
|
|
// directly into the parent block.
|
|
try parent_block.instructions.appendSlice(gpa, child_block.instructions.items[0..last_inst_index]);
|
|
return merges.results.items[0];
|
|
}
|
|
}
|
|
}
|
|
// Okay, we need a runtime block. If the value is comptime-known, the
|
|
// block should just return void, and we return the merge result
|
|
// directly. Otherwise, we can defer to the logic below.
|
|
if (try sema.resolveValue(merges.results.items[0])) |result_val| {
|
|
// Create a block containing all instruction from the body.
|
|
try parent_block.instructions.append(gpa, merges.block_inst);
|
|
try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len +
|
|
child_block.instructions.items.len);
|
|
sema.air_instructions.items(.data)[@intFromEnum(merges.block_inst)] = .{ .ty_pl = .{
|
|
.ty = .void_type,
|
|
.payload = sema.addExtraAssumeCapacity(Air.Block{
|
|
.body_len = @intCast(child_block.instructions.items.len),
|
|
}),
|
|
} };
|
|
sema.air_extra.appendSliceAssumeCapacity(@ptrCast(child_block.instructions.items));
|
|
// Rewrite the break to just give value {}; the value is
|
|
// comptime-known and will be returned directly.
|
|
sema.air_instructions.items(.data)[@intFromEnum(merges.br_list.items[0])].br.operand = .void_value;
|
|
return Air.internedToRef(result_val.toIntern());
|
|
}
|
|
}
|
|
// It is impossible to have the number of results be > 1 in a comptime scope.
|
|
assert(!child_block.is_comptime); // Should already got a compile error in the condbr condition.
|
|
|
|
// Note that we'll always create an AIR block here, so `need_debug_scope` is irrelevant.
|
|
|
|
// Need to set the type and emit the Block instruction. This allows machine code generation
|
|
// to emit a jump instruction to after the block when it encounters the break.
|
|
try parent_block.instructions.append(gpa, merges.block_inst);
|
|
const resolved_ty = try sema.resolvePeerTypes(parent_block, src, merges.results.items, .{ .override = merges.src_locs.items });
|
|
// TODO add note "missing else causes void value"
|
|
|
|
const type_src = src; // TODO: better source location
|
|
if (try sema.typeRequiresComptime(resolved_ty)) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(child_block, type_src, "value with comptime-only type '{}' depends on runtime control flow", .{resolved_ty.fmt(mod)});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
const runtime_src = child_block.runtime_cond orelse child_block.runtime_loop.?;
|
|
try mod.errNoteNonLazy(runtime_src, msg, "runtime control flow here", .{});
|
|
|
|
const child_src_decl = mod.declPtr(child_block.src_decl);
|
|
try sema.explainWhyTypeIsComptime(msg, child_src_decl.toSrcLoc(type_src, mod), resolved_ty);
|
|
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(child_block, msg);
|
|
}
|
|
const ty_inst = Air.internedToRef(resolved_ty.toIntern());
|
|
try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len +
|
|
child_block.instructions.items.len);
|
|
sema.air_instructions.items(.data)[@intFromEnum(merges.block_inst)] = .{ .ty_pl = .{
|
|
.ty = ty_inst,
|
|
.payload = sema.addExtraAssumeCapacity(Air.Block{
|
|
.body_len = @intCast(child_block.instructions.items.len),
|
|
}),
|
|
} };
|
|
sema.air_extra.appendSliceAssumeCapacity(@ptrCast(child_block.instructions.items));
|
|
// Now that the block has its type resolved, we need to go back into all the break
|
|
// instructions, and insert type coercion on the operands.
|
|
for (merges.br_list.items) |br| {
|
|
const br_operand = sema.air_instructions.items(.data)[@intFromEnum(br)].br.operand;
|
|
const br_operand_src = src;
|
|
const br_operand_ty = sema.typeOf(br_operand);
|
|
if (br_operand_ty.eql(resolved_ty, mod)) {
|
|
// No type coercion needed.
|
|
continue;
|
|
}
|
|
var coerce_block = parent_block.makeSubBlock();
|
|
defer coerce_block.instructions.deinit(gpa);
|
|
const coerced_operand = try sema.coerce(&coerce_block, resolved_ty, br_operand, br_operand_src);
|
|
// If no instructions were produced, such as in the case of a coercion of a
|
|
// constant value to a new type, we can simply point the br operand to it.
|
|
if (coerce_block.instructions.items.len == 0) {
|
|
sema.air_instructions.items(.data)[@intFromEnum(br)].br.operand = coerced_operand;
|
|
continue;
|
|
}
|
|
assert(coerce_block.instructions.items[coerce_block.instructions.items.len - 1].toRef() == coerced_operand);
|
|
|
|
// Convert the br instruction to a block instruction that has the coercion
|
|
// and then a new br inside that returns the coerced instruction.
|
|
const sub_block_len: u32 = @intCast(coerce_block.instructions.items.len + 1);
|
|
try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len +
|
|
sub_block_len);
|
|
try sema.air_instructions.ensureUnusedCapacity(gpa, 1);
|
|
const sub_br_inst: Air.Inst.Index = @enumFromInt(sema.air_instructions.len);
|
|
|
|
sema.air_instructions.items(.tag)[@intFromEnum(br)] = .block;
|
|
sema.air_instructions.items(.data)[@intFromEnum(br)] = .{ .ty_pl = .{
|
|
.ty = .noreturn_type,
|
|
.payload = sema.addExtraAssumeCapacity(Air.Block{
|
|
.body_len = sub_block_len,
|
|
}),
|
|
} };
|
|
sema.air_extra.appendSliceAssumeCapacity(@ptrCast(coerce_block.instructions.items));
|
|
sema.air_extra.appendAssumeCapacity(@intFromEnum(sub_br_inst));
|
|
|
|
sema.air_instructions.appendAssumeCapacity(.{
|
|
.tag = .br,
|
|
.data = .{ .br = .{
|
|
.block_inst = merges.block_inst,
|
|
.operand = coerced_operand,
|
|
} },
|
|
});
|
|
}
|
|
return merges.block_inst.toRef();
|
|
}
|
|
|
|
fn zirExport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.Export, inst_data.payload_index).data;
|
|
const src = inst_data.src();
|
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const options_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
|
|
const decl_name = try mod.intern_pool.getOrPutString(mod.gpa, sema.code.nullTerminatedString(extra.decl_name));
|
|
const decl_index = if (extra.namespace != .none) index_blk: {
|
|
const container_ty = try sema.resolveType(block, operand_src, extra.namespace);
|
|
const container_namespace = container_ty.getNamespaceIndex(mod).unwrap().?;
|
|
|
|
const maybe_index = try sema.lookupInNamespace(block, operand_src, container_namespace, decl_name, false);
|
|
break :index_blk maybe_index orelse
|
|
return sema.failWithBadMemberAccess(block, container_ty, operand_src, decl_name);
|
|
} else try sema.lookupIdentifier(block, operand_src, decl_name);
|
|
const options = sema.resolveExportOptions(block, .unneeded, extra.options) catch |err| switch (err) {
|
|
error.NeededSourceLocation => {
|
|
_ = try sema.resolveExportOptions(block, options_src, extra.options);
|
|
unreachable;
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
{
|
|
try mod.ensureDeclAnalyzed(decl_index);
|
|
const exported_decl = mod.declPtr(decl_index);
|
|
if (exported_decl.val.getFunction(mod)) |function| {
|
|
return sema.analyzeExport(block, src, options, function.owner_decl);
|
|
}
|
|
}
|
|
try sema.analyzeExport(block, src, options, decl_index);
|
|
}
|
|
|
|
fn zirExportValue(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.ExportValue, inst_data.payload_index).data;
|
|
const src = inst_data.src();
|
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const options_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
|
|
const operand = try sema.resolveInstConst(block, operand_src, extra.operand, .{
|
|
.needed_comptime_reason = "export target must be comptime-known",
|
|
});
|
|
const options = try sema.resolveExportOptions(block, options_src, extra.options);
|
|
if (options.linkage == .Internal)
|
|
return;
|
|
if (operand.val.getFunction(mod)) |function| {
|
|
const decl_index = function.owner_decl;
|
|
return sema.analyzeExport(block, src, options, decl_index);
|
|
}
|
|
|
|
try addExport(mod, .{
|
|
.opts = options,
|
|
.src = src,
|
|
.owner_decl = sema.owner_decl_index,
|
|
.src_decl = block.src_decl,
|
|
.exported = .{ .value = operand.val.toIntern() },
|
|
.status = .in_progress,
|
|
});
|
|
}
|
|
|
|
pub fn analyzeExport(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
options: Module.Export.Options,
|
|
exported_decl_index: InternPool.DeclIndex,
|
|
) !void {
|
|
const gpa = sema.gpa;
|
|
const mod = sema.mod;
|
|
|
|
if (options.linkage == .Internal)
|
|
return;
|
|
|
|
try mod.ensureDeclAnalyzed(exported_decl_index);
|
|
const exported_decl = mod.declPtr(exported_decl_index);
|
|
|
|
if (!try sema.validateExternType(exported_decl.ty, .other)) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "unable to export type '{}'", .{exported_decl.ty.fmt(mod)});
|
|
errdefer msg.destroy(gpa);
|
|
|
|
const src_decl = mod.declPtr(block.src_decl);
|
|
try sema.explainWhyTypeIsNotExtern(msg, src_decl.toSrcLoc(src, mod), exported_decl.ty, .other);
|
|
|
|
try sema.addDeclaredHereNote(msg, exported_decl.ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
|
|
// TODO: some backends might support re-exporting extern decls
|
|
if (exported_decl.isExtern(mod)) {
|
|
return sema.fail(block, src, "export target cannot be extern", .{});
|
|
}
|
|
|
|
// This decl is alive no matter what, since it's being exported
|
|
try mod.markDeclAlive(exported_decl);
|
|
try sema.maybeQueueFuncBodyAnalysis(exported_decl_index);
|
|
|
|
try addExport(mod, .{
|
|
.opts = options,
|
|
.src = src,
|
|
.owner_decl = sema.owner_decl_index,
|
|
.src_decl = block.src_decl,
|
|
.exported = .{ .decl_index = exported_decl_index },
|
|
.status = .in_progress,
|
|
});
|
|
}
|
|
|
|
fn addExport(mod: *Module, export_init: Module.Export) error{OutOfMemory}!void {
|
|
const gpa = mod.gpa;
|
|
|
|
try mod.decl_exports.ensureUnusedCapacity(gpa, 1);
|
|
try mod.value_exports.ensureUnusedCapacity(gpa, 1);
|
|
try mod.export_owners.ensureUnusedCapacity(gpa, 1);
|
|
|
|
const new_export = try gpa.create(Module.Export);
|
|
errdefer gpa.destroy(new_export);
|
|
|
|
new_export.* = export_init;
|
|
|
|
const eo_gop = mod.export_owners.getOrPutAssumeCapacity(export_init.owner_decl);
|
|
if (!eo_gop.found_existing) eo_gop.value_ptr.* = .{};
|
|
try eo_gop.value_ptr.append(gpa, new_export);
|
|
errdefer _ = eo_gop.value_ptr.pop();
|
|
|
|
switch (export_init.exported) {
|
|
.decl_index => |decl_index| {
|
|
const de_gop = mod.decl_exports.getOrPutAssumeCapacity(decl_index);
|
|
if (!de_gop.found_existing) de_gop.value_ptr.* = .{};
|
|
try de_gop.value_ptr.append(gpa, new_export);
|
|
},
|
|
.value => |value| {
|
|
const ve_gop = mod.value_exports.getOrPutAssumeCapacity(value);
|
|
if (!ve_gop.found_existing) ve_gop.value_ptr.* = .{};
|
|
try ve_gop.value_ptr.append(gpa, new_export);
|
|
},
|
|
}
|
|
}
|
|
|
|
fn zirSetAlignStack(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!void {
|
|
const mod = sema.mod;
|
|
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
|
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
|
|
const src = LazySrcLoc.nodeOffset(extra.node);
|
|
const alignment = try sema.resolveAlign(block, operand_src, extra.operand);
|
|
if (alignment.order(Alignment.fromNonzeroByteUnits(256)).compare(.gt)) {
|
|
return sema.fail(block, src, "attempt to @setAlignStack({d}); maximum is 256", .{
|
|
alignment.toByteUnitsOptional().?,
|
|
});
|
|
}
|
|
|
|
const fn_owner_decl = mod.funcOwnerDeclPtr(sema.func_index);
|
|
switch (fn_owner_decl.ty.fnCallingConvention(mod)) {
|
|
.Naked => return sema.fail(block, src, "@setAlignStack in naked function", .{}),
|
|
.Inline => return sema.fail(block, src, "@setAlignStack in inline function", .{}),
|
|
else => if (block.inlining != null) {
|
|
return sema.fail(block, src, "@setAlignStack in inline call", .{});
|
|
},
|
|
}
|
|
|
|
if (sema.prev_stack_alignment_src) |prev_src| {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "multiple @setAlignStack in the same function body", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, prev_src, msg, "other instance here", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
sema.prev_stack_alignment_src = src;
|
|
|
|
const ip = &mod.intern_pool;
|
|
const a = ip.funcAnalysis(sema.func_index);
|
|
if (a.stack_alignment != .none) {
|
|
a.stack_alignment = @enumFromInt(@max(
|
|
@intFromEnum(alignment),
|
|
@intFromEnum(a.stack_alignment),
|
|
));
|
|
}
|
|
}
|
|
|
|
fn zirSetCold(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!void {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
|
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
|
|
const is_cold = try sema.resolveConstBool(block, operand_src, extra.operand, .{
|
|
.needed_comptime_reason = "operand to @setCold must be comptime-known",
|
|
});
|
|
if (sema.func_index == .none) return; // does nothing outside a function
|
|
ip.funcAnalysis(sema.func_index).is_cold = is_cold;
|
|
}
|
|
|
|
fn zirSetFloatMode(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!void {
|
|
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
|
|
const src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
|
|
block.float_mode = try sema.resolveBuiltinEnum(block, src, extra.operand, "FloatMode", .{
|
|
.needed_comptime_reason = "operand to @setFloatMode must be comptime-known",
|
|
});
|
|
}
|
|
|
|
fn zirSetRuntimeSafety(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
block.want_safety = try sema.resolveConstBool(block, operand_src, inst_data.operand, .{
|
|
.needed_comptime_reason = "operand to @setRuntimeSafety must be comptime-known",
|
|
});
|
|
}
|
|
|
|
fn zirFence(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!void {
|
|
if (block.is_comptime) return;
|
|
|
|
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
|
|
const order_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
|
|
const order = try sema.resolveAtomicOrder(block, order_src, extra.operand, .{
|
|
.needed_comptime_reason = "atomic order of @fence must be comptime-known",
|
|
});
|
|
|
|
if (@intFromEnum(order) < @intFromEnum(std.builtin.AtomicOrder.Acquire)) {
|
|
return sema.fail(block, order_src, "atomic ordering must be Acquire or stricter", .{});
|
|
}
|
|
|
|
_ = try block.addInst(.{
|
|
.tag = .fence,
|
|
.data = .{ .fence = order },
|
|
});
|
|
}
|
|
|
|
fn zirBreak(sema: *Sema, start_block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].@"break";
|
|
const extra = sema.code.extraData(Zir.Inst.Break, inst_data.payload_index).data;
|
|
const operand = try sema.resolveInst(inst_data.operand);
|
|
const zir_block = extra.block_inst;
|
|
|
|
var block = start_block;
|
|
while (true) {
|
|
if (block.label) |label| {
|
|
if (label.zir_block == zir_block) {
|
|
const br_ref = try start_block.addBr(label.merges.block_inst, operand);
|
|
const src_loc = if (extra.operand_src_node != Zir.Inst.Break.no_src_node)
|
|
LazySrcLoc.nodeOffset(extra.operand_src_node)
|
|
else
|
|
null;
|
|
try label.merges.src_locs.append(sema.gpa, src_loc);
|
|
try label.merges.results.append(sema.gpa, operand);
|
|
try label.merges.br_list.append(sema.gpa, br_ref.toIndex().?);
|
|
block.runtime_index.increment();
|
|
if (block.runtime_cond == null and block.runtime_loop == null) {
|
|
block.runtime_cond = start_block.runtime_cond orelse start_block.runtime_loop;
|
|
block.runtime_loop = start_block.runtime_loop;
|
|
}
|
|
return inst;
|
|
}
|
|
}
|
|
block = block.parent.?;
|
|
}
|
|
}
|
|
|
|
fn zirDbgStmt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
|
|
if (block.is_comptime or block.ownerModule().strip) return;
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].dbg_stmt;
|
|
|
|
if (block.instructions.items.len != 0) {
|
|
const idx = block.instructions.items[block.instructions.items.len - 1];
|
|
if (sema.air_instructions.items(.tag)[@intFromEnum(idx)] == .dbg_stmt) {
|
|
// The previous dbg_stmt didn't correspond to any actual code, so replace it.
|
|
sema.air_instructions.items(.data)[@intFromEnum(idx)].dbg_stmt = .{
|
|
.line = inst_data.line,
|
|
.column = inst_data.column,
|
|
};
|
|
return;
|
|
}
|
|
}
|
|
|
|
_ = try block.addInst(.{
|
|
.tag = .dbg_stmt,
|
|
.data = .{ .dbg_stmt = .{
|
|
.line = inst_data.line,
|
|
.column = inst_data.column,
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn zirDbgVar(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
air_tag: Air.Inst.Tag,
|
|
) CompileError!void {
|
|
const str_op = sema.code.instructions.items(.data)[@intFromEnum(inst)].str_op;
|
|
const operand = try sema.resolveInst(str_op.operand);
|
|
const name = str_op.getStr(sema.code);
|
|
try sema.addDbgVar(block, operand, air_tag, name);
|
|
}
|
|
|
|
fn addDbgVar(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
operand: Air.Inst.Ref,
|
|
air_tag: Air.Inst.Tag,
|
|
name: []const u8,
|
|
) CompileError!void {
|
|
if (block.is_comptime or block.ownerModule().strip) return;
|
|
|
|
const mod = sema.mod;
|
|
const operand_ty = sema.typeOf(operand);
|
|
const val_ty = switch (air_tag) {
|
|
.dbg_var_ptr => operand_ty.childType(mod),
|
|
.dbg_var_val => operand_ty,
|
|
else => unreachable,
|
|
};
|
|
if (try sema.typeRequiresComptime(val_ty)) return;
|
|
if (!(try sema.typeHasRuntimeBits(val_ty))) return;
|
|
|
|
// To ensure the lexical scoping is known to backends, this alloc must be
|
|
// within a real runtime block. We set a flag which communicates information
|
|
// to the closest lexically enclosing block:
|
|
// * If it is a `block_inline`, communicates to logic in `analyzeBodyInner`
|
|
// to create a post-hoc block.
|
|
// * Otherwise, communicates to logic in `resolveBlockBody` to create a
|
|
// real `block` instruction.
|
|
if (block.need_debug_scope) |ptr| ptr.* = true;
|
|
|
|
try sema.queueFullTypeResolution(operand_ty);
|
|
|
|
// Add the name to the AIR.
|
|
const name_extra_index: u32 = @intCast(sema.air_extra.items.len);
|
|
const elements_used = name.len / 4 + 1;
|
|
try sema.air_extra.ensureUnusedCapacity(sema.gpa, elements_used);
|
|
const buffer = mem.sliceAsBytes(sema.air_extra.unusedCapacitySlice());
|
|
@memcpy(buffer[0..name.len], name);
|
|
buffer[name.len] = 0;
|
|
sema.air_extra.items.len += elements_used;
|
|
|
|
_ = try block.addInst(.{
|
|
.tag = air_tag,
|
|
.data = .{ .pl_op = .{
|
|
.payload = name_extra_index,
|
|
.operand = operand,
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn zirDeclRef(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].str_tok;
|
|
const src = inst_data.src();
|
|
const decl_name = try mod.intern_pool.getOrPutString(sema.gpa, inst_data.get(sema.code));
|
|
const decl_index = try sema.lookupIdentifier(block, src, decl_name);
|
|
try sema.addReferencedBy(block, src, decl_index);
|
|
return sema.analyzeDeclRef(decl_index);
|
|
}
|
|
|
|
fn zirDeclVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].str_tok;
|
|
const src = inst_data.src();
|
|
const decl_name = try mod.intern_pool.getOrPutString(sema.gpa, inst_data.get(sema.code));
|
|
const decl = try sema.lookupIdentifier(block, src, decl_name);
|
|
return sema.analyzeDeclVal(block, src, decl);
|
|
}
|
|
|
|
fn lookupIdentifier(sema: *Sema, block: *Block, src: LazySrcLoc, name: InternPool.NullTerminatedString) !InternPool.DeclIndex {
|
|
const mod = sema.mod;
|
|
var namespace = block.namespace;
|
|
while (true) {
|
|
if (try sema.lookupInNamespace(block, src, namespace, name, false)) |decl_index| {
|
|
return decl_index;
|
|
}
|
|
namespace = mod.namespacePtr(namespace).parent.unwrap() orelse break;
|
|
}
|
|
unreachable; // AstGen detects use of undeclared identifiers.
|
|
}
|
|
|
|
/// This looks up a member of a specific namespace. It is affected by `usingnamespace` but
|
|
/// only for ones in the specified namespace.
|
|
fn lookupInNamespace(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
namespace_index: InternPool.NamespaceIndex,
|
|
ident_name: InternPool.NullTerminatedString,
|
|
observe_usingnamespace: bool,
|
|
) CompileError!?InternPool.DeclIndex {
|
|
const mod = sema.mod;
|
|
|
|
const namespace = mod.namespacePtr(namespace_index);
|
|
const namespace_decl = mod.declPtr(namespace.decl_index);
|
|
if (namespace_decl.analysis == .file_failure) {
|
|
return error.AnalysisFail;
|
|
}
|
|
|
|
if (observe_usingnamespace and namespace.usingnamespace_set.count() != 0) {
|
|
const src_file = mod.namespacePtr(block.namespace).file_scope;
|
|
|
|
const gpa = sema.gpa;
|
|
var checked_namespaces: std.AutoArrayHashMapUnmanaged(*Namespace, bool) = .{};
|
|
defer checked_namespaces.deinit(gpa);
|
|
|
|
// Keep track of name conflicts for error notes.
|
|
var candidates: std.ArrayListUnmanaged(InternPool.DeclIndex) = .{};
|
|
defer candidates.deinit(gpa);
|
|
|
|
try checked_namespaces.put(gpa, namespace, namespace.file_scope == src_file);
|
|
var check_i: usize = 0;
|
|
|
|
while (check_i < checked_namespaces.count()) : (check_i += 1) {
|
|
const check_ns = checked_namespaces.keys()[check_i];
|
|
if (check_ns.decls.getKeyAdapted(ident_name, Module.DeclAdapter{ .zcu = mod })) |decl_index| {
|
|
// Skip decls which are not marked pub, which are in a different
|
|
// file than the `a.b`/`@hasDecl` syntax.
|
|
const decl = mod.declPtr(decl_index);
|
|
if (decl.is_pub or (src_file == decl.getFileScope(mod) and checked_namespaces.values()[check_i])) {
|
|
try candidates.append(gpa, decl_index);
|
|
}
|
|
}
|
|
var it = check_ns.usingnamespace_set.iterator();
|
|
while (it.next()) |entry| {
|
|
const sub_usingnamespace_decl_index = entry.key_ptr.*;
|
|
// Skip the decl we're currently analysing.
|
|
if (sub_usingnamespace_decl_index == sema.owner_decl_index) continue;
|
|
const sub_usingnamespace_decl = mod.declPtr(sub_usingnamespace_decl_index);
|
|
const sub_is_pub = entry.value_ptr.*;
|
|
if (!sub_is_pub and src_file != sub_usingnamespace_decl.getFileScope(mod)) {
|
|
// Skip usingnamespace decls which are not marked pub, which are in
|
|
// a different file than the `a.b`/`@hasDecl` syntax.
|
|
continue;
|
|
}
|
|
try sema.ensureDeclAnalyzed(sub_usingnamespace_decl_index);
|
|
const ns_ty = sub_usingnamespace_decl.val.toType();
|
|
const sub_ns = ns_ty.getNamespace(mod).?;
|
|
try checked_namespaces.put(gpa, sub_ns, src_file == sub_usingnamespace_decl.getFileScope(mod));
|
|
}
|
|
}
|
|
|
|
{
|
|
var i: usize = 0;
|
|
while (i < candidates.items.len) {
|
|
if (candidates.items[i] == sema.owner_decl_index) {
|
|
_ = candidates.orderedRemove(i);
|
|
} else {
|
|
i += 1;
|
|
}
|
|
}
|
|
}
|
|
|
|
switch (candidates.items.len) {
|
|
0 => {},
|
|
1 => {
|
|
const decl_index = candidates.items[0];
|
|
return decl_index;
|
|
},
|
|
else => {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "ambiguous reference", .{});
|
|
errdefer msg.destroy(gpa);
|
|
for (candidates.items) |candidate_index| {
|
|
const candidate = mod.declPtr(candidate_index);
|
|
const src_loc = candidate.srcLoc(mod);
|
|
try mod.errNoteNonLazy(src_loc, msg, "declared here", .{});
|
|
}
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
},
|
|
}
|
|
} else if (namespace.decls.getKeyAdapted(ident_name, Module.DeclAdapter{ .zcu = mod })) |decl_index| {
|
|
return decl_index;
|
|
}
|
|
|
|
return null;
|
|
}
|
|
|
|
fn funcDeclSrc(sema: *Sema, func_inst: Air.Inst.Ref) !?*Decl {
|
|
const mod = sema.mod;
|
|
const func_val = (try sema.resolveValue(func_inst)) orelse return null;
|
|
if (func_val.isUndef(mod)) return null;
|
|
const owner_decl_index = switch (mod.intern_pool.indexToKey(func_val.toIntern())) {
|
|
.extern_func => |extern_func| extern_func.decl,
|
|
.func => |func| func.owner_decl,
|
|
.ptr => |ptr| switch (ptr.addr) {
|
|
.decl => |decl| mod.declPtr(decl).val.getFunction(mod).?.owner_decl,
|
|
else => return null,
|
|
},
|
|
else => return null,
|
|
};
|
|
return mod.declPtr(owner_decl_index);
|
|
}
|
|
|
|
pub fn analyzeSaveErrRetIndex(sema: *Sema, block: *Block) SemaError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
|
|
if (block.is_comptime or block.is_typeof) {
|
|
const index_val = try mod.intValue_u64(Type.usize, sema.comptime_err_ret_trace.items.len);
|
|
return Air.internedToRef(index_val.toIntern());
|
|
}
|
|
|
|
if (!block.ownerModule().error_tracing) return .none;
|
|
|
|
const stack_trace_ty = sema.getBuiltinType("StackTrace") catch |err| switch (err) {
|
|
error.NeededSourceLocation, error.GenericPoison, error.ComptimeReturn, error.ComptimeBreak => unreachable,
|
|
else => |e| return e,
|
|
};
|
|
sema.resolveTypeFields(stack_trace_ty) catch |err| switch (err) {
|
|
error.NeededSourceLocation, error.GenericPoison, error.ComptimeReturn, error.ComptimeBreak => unreachable,
|
|
else => |e| return e,
|
|
};
|
|
const field_name = try mod.intern_pool.getOrPutString(gpa, "index");
|
|
const field_index = sema.structFieldIndex(block, stack_trace_ty, field_name, .unneeded) catch |err| switch (err) {
|
|
error.AnalysisFail, error.NeededSourceLocation => @panic("std.builtin.StackTrace is corrupt"),
|
|
error.GenericPoison, error.ComptimeReturn, error.ComptimeBreak => unreachable,
|
|
error.OutOfMemory => |e| return e,
|
|
};
|
|
|
|
return try block.addInst(.{
|
|
.tag = .save_err_return_trace_index,
|
|
.data = .{ .ty_pl = .{
|
|
.ty = Air.internedToRef(stack_trace_ty.toIntern()),
|
|
.payload = @intCast(field_index),
|
|
} },
|
|
});
|
|
}
|
|
|
|
/// Add instructions to block to "pop" the error return trace.
|
|
/// If `operand` is provided, only pops if operand is non-error.
|
|
fn popErrorReturnTrace(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
operand: Air.Inst.Ref,
|
|
saved_error_trace_index: Air.Inst.Ref,
|
|
) CompileError!void {
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
var is_non_error: ?bool = null;
|
|
var is_non_error_inst: Air.Inst.Ref = undefined;
|
|
if (operand != .none) {
|
|
is_non_error_inst = try sema.analyzeIsNonErr(block, src, operand);
|
|
if (try sema.resolveDefinedValue(block, src, is_non_error_inst)) |cond_val|
|
|
is_non_error = cond_val.toBool();
|
|
} else is_non_error = true; // no operand means pop unconditionally
|
|
|
|
if (is_non_error == true) {
|
|
// AstGen determined this result does not go to an error-handling expr (try/catch/return etc.), or
|
|
// the result is comptime-known to be a non-error. Either way, pop unconditionally.
|
|
|
|
const stack_trace_ty = try sema.getBuiltinType("StackTrace");
|
|
try sema.resolveTypeFields(stack_trace_ty);
|
|
const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty);
|
|
const err_return_trace = try block.addTy(.err_return_trace, ptr_stack_trace_ty);
|
|
const field_name = try mod.intern_pool.getOrPutString(gpa, "index");
|
|
const field_ptr = try sema.structFieldPtr(block, src, err_return_trace, field_name, src, stack_trace_ty, true);
|
|
try sema.storePtr2(block, src, field_ptr, src, saved_error_trace_index, src, .store);
|
|
} else if (is_non_error == null) {
|
|
// The result might be an error. If it is, we leave the error trace alone. If it isn't, we need
|
|
// to pop any error trace that may have been propagated from our arguments.
|
|
|
|
try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len);
|
|
const cond_block_inst = try block.addInstAsIndex(.{
|
|
.tag = .block,
|
|
.data = .{
|
|
.ty_pl = .{
|
|
.ty = .void_type,
|
|
.payload = undefined, // updated below
|
|
},
|
|
},
|
|
});
|
|
|
|
var then_block = block.makeSubBlock();
|
|
defer then_block.instructions.deinit(gpa);
|
|
|
|
// If non-error, then pop the error return trace by restoring the index.
|
|
const stack_trace_ty = try sema.getBuiltinType("StackTrace");
|
|
try sema.resolveTypeFields(stack_trace_ty);
|
|
const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty);
|
|
const err_return_trace = try then_block.addTy(.err_return_trace, ptr_stack_trace_ty);
|
|
const field_name = try mod.intern_pool.getOrPutString(gpa, "index");
|
|
const field_ptr = try sema.structFieldPtr(&then_block, src, err_return_trace, field_name, src, stack_trace_ty, true);
|
|
try sema.storePtr2(&then_block, src, field_ptr, src, saved_error_trace_index, src, .store);
|
|
_ = try then_block.addBr(cond_block_inst, .void_value);
|
|
|
|
// Otherwise, do nothing
|
|
var else_block = block.makeSubBlock();
|
|
defer else_block.instructions.deinit(gpa);
|
|
_ = try else_block.addBr(cond_block_inst, .void_value);
|
|
|
|
try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.CondBr).Struct.fields.len +
|
|
then_block.instructions.items.len + else_block.instructions.items.len +
|
|
@typeInfo(Air.Block).Struct.fields.len + 1); // +1 for the sole .cond_br instruction in the .block
|
|
|
|
const cond_br_inst: Air.Inst.Index = @enumFromInt(sema.air_instructions.len);
|
|
try sema.air_instructions.append(gpa, .{ .tag = .cond_br, .data = .{ .pl_op = .{
|
|
.operand = is_non_error_inst,
|
|
.payload = sema.addExtraAssumeCapacity(Air.CondBr{
|
|
.then_body_len = @intCast(then_block.instructions.items.len),
|
|
.else_body_len = @intCast(else_block.instructions.items.len),
|
|
}),
|
|
} } });
|
|
sema.air_extra.appendSliceAssumeCapacity(@ptrCast(then_block.instructions.items));
|
|
sema.air_extra.appendSliceAssumeCapacity(@ptrCast(else_block.instructions.items));
|
|
|
|
sema.air_instructions.items(.data)[@intFromEnum(cond_block_inst)].ty_pl.payload = sema.addExtraAssumeCapacity(Air.Block{ .body_len = 1 });
|
|
sema.air_extra.appendAssumeCapacity(@intFromEnum(cond_br_inst));
|
|
}
|
|
}
|
|
|
|
fn zirCall(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
comptime kind: enum { direct, field },
|
|
) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const callee_src: LazySrcLoc = .{ .node_offset_call_func = inst_data.src_node };
|
|
const call_src = inst_data.src();
|
|
const ExtraType = switch (kind) {
|
|
.direct => Zir.Inst.Call,
|
|
.field => Zir.Inst.FieldCall,
|
|
};
|
|
const extra = sema.code.extraData(ExtraType, inst_data.payload_index);
|
|
const args_len = extra.data.flags.args_len;
|
|
|
|
const modifier: std.builtin.CallModifier = @enumFromInt(extra.data.flags.packed_modifier);
|
|
const ensure_result_used = extra.data.flags.ensure_result_used;
|
|
const pop_error_return_trace = extra.data.flags.pop_error_return_trace;
|
|
|
|
const callee: ResolvedFieldCallee = switch (kind) {
|
|
.direct => .{ .direct = try sema.resolveInst(extra.data.callee) },
|
|
.field => blk: {
|
|
const object_ptr = try sema.resolveInst(extra.data.obj_ptr);
|
|
const field_name = try mod.intern_pool.getOrPutString(sema.gpa, sema.code.nullTerminatedString(extra.data.field_name_start));
|
|
const field_name_src: LazySrcLoc = .{ .node_offset_field_name = inst_data.src_node };
|
|
break :blk try sema.fieldCallBind(block, callee_src, object_ptr, field_name, field_name_src);
|
|
},
|
|
};
|
|
const func: Air.Inst.Ref = switch (callee) {
|
|
.direct => |func_inst| func_inst,
|
|
.method => |method| method.func_inst,
|
|
};
|
|
|
|
const callee_ty = sema.typeOf(func);
|
|
const total_args = args_len + @intFromBool(callee == .method);
|
|
const func_ty = try sema.checkCallArgumentCount(block, func, callee_src, callee_ty, total_args, callee == .method);
|
|
|
|
// The block index before the call, so we can potentially insert an error trace save here later.
|
|
const block_index: Air.Inst.Index = @enumFromInt(block.instructions.items.len);
|
|
|
|
// This will be set by `analyzeCall` to indicate whether any parameter was an error (making the
|
|
// error trace potentially dirty).
|
|
var input_is_error = false;
|
|
|
|
const args_info: CallArgsInfo = .{ .zir_call = .{
|
|
.bound_arg = switch (callee) {
|
|
.direct => .none,
|
|
.method => |method| method.arg0_inst,
|
|
},
|
|
.bound_arg_src = callee_src,
|
|
.call_inst = inst,
|
|
.call_node_offset = inst_data.src_node,
|
|
.num_args = args_len,
|
|
.args_body = @ptrCast(sema.code.extra[extra.end..]),
|
|
.any_arg_is_error = &input_is_error,
|
|
} };
|
|
|
|
// AstGen ensures that a call instruction is always preceded by a dbg_stmt instruction.
|
|
const call_dbg_node: Zir.Inst.Index = @enumFromInt(@intFromEnum(inst) - 1);
|
|
const call_inst = try sema.analyzeCall(block, func, func_ty, callee_src, call_src, modifier, ensure_result_used, args_info, call_dbg_node, .call);
|
|
|
|
if (sema.owner_func_index == .none or
|
|
!mod.intern_pool.funcAnalysis(sema.owner_func_index).calls_or_awaits_errorable_fn)
|
|
{
|
|
// No errorable fn actually called; we have no error return trace
|
|
input_is_error = false;
|
|
}
|
|
|
|
if (block.ownerModule().error_tracing and
|
|
!block.is_comptime and !block.is_typeof and (input_is_error or pop_error_return_trace))
|
|
{
|
|
const return_ty = sema.typeOf(call_inst);
|
|
if (modifier != .always_tail and return_ty.isNoReturn(mod))
|
|
return call_inst; // call to "fn (...) noreturn", don't pop
|
|
|
|
// TODO: we don't fix up the error trace for always_tail correctly, we should be doing it
|
|
// *before* the recursive call. This will be a bit tricky to do and probably requires
|
|
// moving this logic into analyzeCall. But that's probably a good idea anyway.
|
|
if (modifier == .always_tail)
|
|
return call_inst;
|
|
|
|
// If any input is an error-type, we might need to pop any trace it generated. Otherwise, we only
|
|
// need to clean-up our own trace if we were passed to a non-error-handling expression.
|
|
if (input_is_error or (pop_error_return_trace and return_ty.isError(mod))) {
|
|
const stack_trace_ty = try sema.getBuiltinType("StackTrace");
|
|
try sema.resolveTypeFields(stack_trace_ty);
|
|
const field_name = try mod.intern_pool.getOrPutString(sema.gpa, "index");
|
|
const field_index = try sema.structFieldIndex(block, stack_trace_ty, field_name, call_src);
|
|
|
|
// Insert a save instruction before the arg resolution + call instructions we just generated
|
|
const save_inst = try block.insertInst(block_index, .{
|
|
.tag = .save_err_return_trace_index,
|
|
.data = .{ .ty_pl = .{
|
|
.ty = Air.internedToRef(stack_trace_ty.toIntern()),
|
|
.payload = @intCast(field_index),
|
|
} },
|
|
});
|
|
|
|
// Pop the error return trace, testing the result for non-error if necessary
|
|
const operand = if (pop_error_return_trace or modifier == .always_tail) .none else call_inst;
|
|
try sema.popErrorReturnTrace(block, call_src, operand, save_inst);
|
|
}
|
|
|
|
return call_inst;
|
|
} else {
|
|
return call_inst;
|
|
}
|
|
}
|
|
|
|
fn checkCallArgumentCount(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
func: Air.Inst.Ref,
|
|
func_src: LazySrcLoc,
|
|
callee_ty: Type,
|
|
total_args: usize,
|
|
member_fn: bool,
|
|
) !Type {
|
|
const mod = sema.mod;
|
|
const func_ty = func_ty: {
|
|
switch (callee_ty.zigTypeTag(mod)) {
|
|
.Fn => break :func_ty callee_ty,
|
|
.Pointer => {
|
|
const ptr_info = callee_ty.ptrInfo(mod);
|
|
if (ptr_info.flags.size == .One and Type.fromInterned(ptr_info.child).zigTypeTag(mod) == .Fn) {
|
|
break :func_ty Type.fromInterned(ptr_info.child);
|
|
}
|
|
},
|
|
.Optional => {
|
|
const opt_child = callee_ty.optionalChild(mod);
|
|
if (opt_child.zigTypeTag(mod) == .Fn or (opt_child.isSinglePointer(mod) and
|
|
opt_child.childType(mod).zigTypeTag(mod) == .Fn))
|
|
{
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, func_src, "cannot call optional type '{}'", .{
|
|
callee_ty.fmt(mod),
|
|
});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, func_src, msg, "consider using '.?', 'orelse' or 'if'", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
},
|
|
else => {},
|
|
}
|
|
return sema.fail(block, func_src, "type '{}' not a function", .{callee_ty.fmt(mod)});
|
|
};
|
|
|
|
const func_ty_info = mod.typeToFunc(func_ty).?;
|
|
const fn_params_len = func_ty_info.param_types.len;
|
|
const args_len = total_args - @intFromBool(member_fn);
|
|
if (func_ty_info.is_var_args) {
|
|
assert(callConvSupportsVarArgs(func_ty_info.cc));
|
|
if (total_args >= fn_params_len) return func_ty;
|
|
} else if (fn_params_len == total_args) {
|
|
return func_ty;
|
|
}
|
|
|
|
const maybe_decl = try sema.funcDeclSrc(func);
|
|
const member_str = if (member_fn) "member function " else "";
|
|
const variadic_str = if (func_ty_info.is_var_args) "at least " else "";
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(
|
|
block,
|
|
func_src,
|
|
"{s}expected {s}{d} argument(s), found {d}",
|
|
.{
|
|
member_str,
|
|
variadic_str,
|
|
fn_params_len - @intFromBool(member_fn),
|
|
args_len,
|
|
},
|
|
);
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
if (maybe_decl) |fn_decl| try mod.errNoteNonLazy(fn_decl.srcLoc(mod), msg, "function declared here", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
|
|
fn callBuiltin(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
call_src: LazySrcLoc,
|
|
builtin_fn: Air.Inst.Ref,
|
|
modifier: std.builtin.CallModifier,
|
|
args: []const Air.Inst.Ref,
|
|
operation: CallOperation,
|
|
) !void {
|
|
const mod = sema.mod;
|
|
const callee_ty = sema.typeOf(builtin_fn);
|
|
const func_ty = func_ty: {
|
|
switch (callee_ty.zigTypeTag(mod)) {
|
|
.Fn => break :func_ty callee_ty,
|
|
.Pointer => {
|
|
const ptr_info = callee_ty.ptrInfo(mod);
|
|
if (ptr_info.flags.size == .One and Type.fromInterned(ptr_info.child).zigTypeTag(mod) == .Fn) {
|
|
break :func_ty Type.fromInterned(ptr_info.child);
|
|
}
|
|
},
|
|
else => {},
|
|
}
|
|
std.debug.panic("type '{}' is not a function calling builtin fn", .{callee_ty.fmt(mod)});
|
|
};
|
|
|
|
const func_ty_info = mod.typeToFunc(func_ty).?;
|
|
const fn_params_len = func_ty_info.param_types.len;
|
|
if (args.len != fn_params_len or (func_ty_info.is_var_args and args.len < fn_params_len)) {
|
|
std.debug.panic("parameter count mismatch calling builtin fn, expected {d}, found {d}", .{ fn_params_len, args.len });
|
|
}
|
|
|
|
_ = try sema.analyzeCall(
|
|
block,
|
|
builtin_fn,
|
|
func_ty,
|
|
call_src,
|
|
call_src,
|
|
modifier,
|
|
false,
|
|
.{ .resolved = .{ .src = call_src, .args = args } },
|
|
null,
|
|
operation,
|
|
);
|
|
}
|
|
|
|
const CallOperation = enum {
|
|
call,
|
|
@"@call",
|
|
@"@panic",
|
|
@"safety check",
|
|
@"error return",
|
|
};
|
|
|
|
const CallArgsInfo = union(enum) {
|
|
/// The full list of resolved (but uncoerced) arguments is known ahead of time.
|
|
resolved: struct {
|
|
src: LazySrcLoc,
|
|
args: []const Air.Inst.Ref,
|
|
},
|
|
|
|
/// The list of resolved (but uncoerced) arguments is known ahead of time, but
|
|
/// originated from a usage of the @call builtin at the given node offset.
|
|
call_builtin: struct {
|
|
call_node_offset: i32,
|
|
args: []const Air.Inst.Ref,
|
|
},
|
|
|
|
/// This call corresponds to a ZIR call instruction. The arguments have not yet been
|
|
/// resolved. They must be resolved by `analyzeCall` so that argument resolution and
|
|
/// generic instantiation may be interleaved. This is required for RLS to work on
|
|
/// generic parameters.
|
|
zir_call: struct {
|
|
/// This may be `none`, in which case it is ignored. Otherwise, it is the
|
|
/// already-resolved value of the first argument, from method call syntax.
|
|
bound_arg: Air.Inst.Ref,
|
|
/// The source location of `bound_arg` if it is not `null`. Otherwise `undefined`.
|
|
bound_arg_src: LazySrcLoc,
|
|
/// The ZIR call instruction. The parameter type is placed at this index while
|
|
/// analyzing arguments.
|
|
call_inst: Zir.Inst.Index,
|
|
/// The node offset of `call_inst`.
|
|
call_node_offset: i32,
|
|
/// The number of arguments to this call, not including `bound_arg`.
|
|
num_args: u32,
|
|
/// The ZIR corresponding to all function arguments (other than `bound_arg`, if it
|
|
/// is not `none`). Format is precisely the same as trailing data of ZIR `call`.
|
|
args_body: []const Zir.Inst.Index,
|
|
/// This bool will be set to true if any argument evaluated turns out to have an error set or error union type.
|
|
/// This is used by the caller to restore the error return trace when necessary.
|
|
any_arg_is_error: *bool,
|
|
},
|
|
|
|
fn count(cai: CallArgsInfo) usize {
|
|
return switch (cai) {
|
|
inline .resolved, .call_builtin => |resolved| resolved.args.len,
|
|
.zir_call => |zir_call| zir_call.num_args + @intFromBool(zir_call.bound_arg != .none),
|
|
};
|
|
}
|
|
|
|
fn argSrc(cai: CallArgsInfo, block: *Block, arg_index: usize) LazySrcLoc {
|
|
return switch (cai) {
|
|
.resolved => |resolved| resolved.src,
|
|
.call_builtin => |call_builtin| .{ .call_arg = .{
|
|
.decl = block.src_decl,
|
|
.call_node_offset = call_builtin.call_node_offset,
|
|
.arg_index = @intCast(arg_index),
|
|
} },
|
|
.zir_call => |zir_call| if (arg_index == 0 and zir_call.bound_arg != .none) {
|
|
return zir_call.bound_arg_src;
|
|
} else .{ .call_arg = .{
|
|
.decl = block.src_decl,
|
|
.call_node_offset = zir_call.call_node_offset,
|
|
.arg_index = @intCast(arg_index - @intFromBool(zir_call.bound_arg != .none)),
|
|
} },
|
|
};
|
|
}
|
|
|
|
/// Analyzes the arg at `arg_index` and coerces it to `param_ty`.
|
|
/// `param_ty` may be `generic_poison` or `var_args_param`.
|
|
/// `func_ty_info` may be the type before instantiation, even if a generic
|
|
/// instantiation has been partially completed.
|
|
fn analyzeArg(
|
|
cai: CallArgsInfo,
|
|
sema: *Sema,
|
|
block: *Block,
|
|
arg_index: usize,
|
|
param_ty: Type,
|
|
func_ty_info: InternPool.Key.FuncType,
|
|
func_inst: Air.Inst.Ref,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const param_count = func_ty_info.param_types.len;
|
|
switch (param_ty.toIntern()) {
|
|
.generic_poison_type, .var_args_param_type => {},
|
|
else => try sema.queueFullTypeResolution(param_ty),
|
|
}
|
|
const uncoerced_arg: Air.Inst.Ref = switch (cai) {
|
|
inline .resolved, .call_builtin => |resolved| resolved.args[arg_index],
|
|
.zir_call => |zir_call| arg_val: {
|
|
const has_bound_arg = zir_call.bound_arg != .none;
|
|
if (arg_index == 0 and has_bound_arg) {
|
|
break :arg_val zir_call.bound_arg;
|
|
}
|
|
const real_arg_idx = arg_index - @intFromBool(has_bound_arg);
|
|
|
|
const arg_body = if (real_arg_idx == 0) blk: {
|
|
const start = zir_call.num_args;
|
|
const end = @intFromEnum(zir_call.args_body[0]);
|
|
break :blk zir_call.args_body[start..end];
|
|
} else blk: {
|
|
const start = @intFromEnum(zir_call.args_body[real_arg_idx - 1]);
|
|
const end = @intFromEnum(zir_call.args_body[real_arg_idx]);
|
|
break :blk zir_call.args_body[start..end];
|
|
};
|
|
|
|
// Generate args to comptime params in comptime block
|
|
const parent_comptime = block.is_comptime;
|
|
defer block.is_comptime = parent_comptime;
|
|
// Note that we are indexing into parameters, not arguments, so use `arg_index` instead of `real_arg_idx`
|
|
if (arg_index < @min(param_count, 32) and func_ty_info.paramIsComptime(@intCast(arg_index))) {
|
|
block.is_comptime = true;
|
|
// TODO set comptime_reason
|
|
}
|
|
// Give the arg its result type
|
|
sema.inst_map.putAssumeCapacity(zir_call.call_inst, Air.internedToRef(param_ty.toIntern()));
|
|
// Resolve the arg!
|
|
const uncoerced_arg = try sema.resolveBody(block, arg_body, zir_call.call_inst);
|
|
|
|
if (sema.typeOf(uncoerced_arg).zigTypeTag(mod) == .NoReturn) {
|
|
// This terminates resolution of arguments. The caller should
|
|
// propagate this.
|
|
return uncoerced_arg;
|
|
}
|
|
|
|
if (sema.typeOf(uncoerced_arg).isError(mod)) {
|
|
zir_call.any_arg_is_error.* = true;
|
|
}
|
|
|
|
break :arg_val uncoerced_arg;
|
|
},
|
|
};
|
|
switch (param_ty.toIntern()) {
|
|
.generic_poison_type => return uncoerced_arg,
|
|
.var_args_param_type => return sema.coerceVarArgParam(block, uncoerced_arg, cai.argSrc(block, arg_index)),
|
|
else => return sema.coerceExtra(
|
|
block,
|
|
param_ty,
|
|
uncoerced_arg,
|
|
cai.argSrc(block, arg_index),
|
|
.{ .param_src = .{
|
|
.func_inst = func_inst,
|
|
.param_i = @intCast(arg_index),
|
|
} },
|
|
) catch |err| switch (err) {
|
|
error.NotCoercible => unreachable,
|
|
else => |e| return e,
|
|
},
|
|
}
|
|
}
|
|
};
|
|
|
|
/// While performing an inline call, we need to switch between two Sema states a few times: the
|
|
/// state for the caller (with the callee's `code`, `fn_ret_ty`, etc), and the state for the callee.
|
|
/// These cannot be two separate Sema instances as they must share AIR.
|
|
/// Therefore, this struct acts as a helper to switch between the two.
|
|
/// This switching is required during argument evaluation, where function argument analysis must be
|
|
/// interleaved with resolving generic parameter types.
|
|
const InlineCallSema = struct {
|
|
sema: *Sema,
|
|
cur: enum {
|
|
caller,
|
|
callee,
|
|
},
|
|
|
|
other_code: Zir,
|
|
other_func_index: InternPool.Index,
|
|
other_fn_ret_ty: Type,
|
|
other_fn_ret_ty_ies: ?*InferredErrorSet,
|
|
other_inst_map: InstMap,
|
|
other_error_return_trace_index_on_fn_entry: Air.Inst.Ref,
|
|
other_generic_owner: InternPool.Index,
|
|
other_generic_call_src: LazySrcLoc,
|
|
other_generic_call_decl: InternPool.OptionalDeclIndex,
|
|
|
|
/// Sema should currently be set up for the caller (i.e. unchanged yet). This init will not
|
|
/// change that. The other parameters contain data for the callee Sema. The other modified
|
|
/// Sema fields are all initialized to default values for the callee.
|
|
/// Must call deinit on the result.
|
|
fn init(
|
|
sema: *Sema,
|
|
callee_code: Zir,
|
|
callee_func_index: InternPool.Index,
|
|
callee_error_return_trace_index_on_fn_entry: Air.Inst.Ref,
|
|
) InlineCallSema {
|
|
return .{
|
|
.sema = sema,
|
|
.cur = .caller,
|
|
.other_code = callee_code,
|
|
.other_func_index = callee_func_index,
|
|
.other_fn_ret_ty = Type.void,
|
|
.other_fn_ret_ty_ies = null,
|
|
.other_inst_map = .{},
|
|
.other_error_return_trace_index_on_fn_entry = callee_error_return_trace_index_on_fn_entry,
|
|
.other_generic_owner = .none,
|
|
.other_generic_call_src = .unneeded,
|
|
.other_generic_call_decl = .none,
|
|
};
|
|
}
|
|
|
|
/// Switch back to the caller Sema if necessary and free all temporary state of the callee Sema.
|
|
fn deinit(ics: *InlineCallSema) void {
|
|
switch (ics.cur) {
|
|
.caller => {},
|
|
.callee => ics.swap(),
|
|
}
|
|
// Callee Sema owns the inst_map memory
|
|
ics.other_inst_map.deinit(ics.sema.gpa);
|
|
ics.* = undefined;
|
|
}
|
|
|
|
/// Returns a Sema instance suitable for usage from the caller context.
|
|
fn caller(ics: *InlineCallSema) *Sema {
|
|
switch (ics.cur) {
|
|
.caller => {},
|
|
.callee => ics.swap(),
|
|
}
|
|
return ics.sema;
|
|
}
|
|
|
|
/// Returns a Sema instance suitable for usage from the callee context.
|
|
fn callee(ics: *InlineCallSema) *Sema {
|
|
switch (ics.cur) {
|
|
.caller => ics.swap(),
|
|
.callee => {},
|
|
}
|
|
return ics.sema;
|
|
}
|
|
|
|
/// Internal use only. Swaps to the other Sema state.
|
|
fn swap(ics: *InlineCallSema) void {
|
|
ics.cur = switch (ics.cur) {
|
|
.caller => .callee,
|
|
.callee => .caller,
|
|
};
|
|
// zig fmt: off
|
|
std.mem.swap(Zir, &ics.sema.code, &ics.other_code);
|
|
std.mem.swap(InternPool.Index, &ics.sema.func_index, &ics.other_func_index);
|
|
std.mem.swap(Type, &ics.sema.fn_ret_ty, &ics.other_fn_ret_ty);
|
|
std.mem.swap(?*InferredErrorSet, &ics.sema.fn_ret_ty_ies, &ics.other_fn_ret_ty_ies);
|
|
std.mem.swap(InstMap, &ics.sema.inst_map, &ics.other_inst_map);
|
|
std.mem.swap(InternPool.Index, &ics.sema.generic_owner, &ics.other_generic_owner);
|
|
std.mem.swap(LazySrcLoc, &ics.sema.generic_call_src, &ics.other_generic_call_src);
|
|
std.mem.swap(InternPool.OptionalDeclIndex, &ics.sema.generic_call_decl, &ics.other_generic_call_decl);
|
|
std.mem.swap(Air.Inst.Ref, &ics.sema.error_return_trace_index_on_fn_entry, &ics.other_error_return_trace_index_on_fn_entry);
|
|
// zig fmt: on
|
|
}
|
|
};
|
|
|
|
fn analyzeCall(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
func: Air.Inst.Ref,
|
|
func_ty: Type,
|
|
func_src: LazySrcLoc,
|
|
call_src: LazySrcLoc,
|
|
modifier: std.builtin.CallModifier,
|
|
ensure_result_used: bool,
|
|
args_info: CallArgsInfo,
|
|
call_dbg_node: ?Zir.Inst.Index,
|
|
operation: CallOperation,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
|
|
const callee_ty = sema.typeOf(func);
|
|
const func_ty_info = mod.typeToFunc(func_ty).?;
|
|
const cc = func_ty_info.cc;
|
|
if (try sema.resolveValue(func)) |func_val|
|
|
if (func_val.isUndef(mod))
|
|
return sema.failWithUseOfUndef(block, call_src);
|
|
if (cc == .Naked) {
|
|
const maybe_decl = try sema.funcDeclSrc(func);
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(
|
|
block,
|
|
func_src,
|
|
"unable to call function with naked calling convention",
|
|
.{},
|
|
);
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
if (maybe_decl) |fn_decl| try mod.errNoteNonLazy(fn_decl.srcLoc(mod), msg, "function declared here", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
|
|
const call_tag: Air.Inst.Tag = switch (modifier) {
|
|
.auto,
|
|
.always_inline,
|
|
.compile_time,
|
|
.no_async,
|
|
=> Air.Inst.Tag.call,
|
|
|
|
.never_tail => Air.Inst.Tag.call_never_tail,
|
|
.never_inline => Air.Inst.Tag.call_never_inline,
|
|
.always_tail => Air.Inst.Tag.call_always_tail,
|
|
|
|
.async_kw => return sema.failWithUseOfAsync(block, call_src),
|
|
};
|
|
|
|
if (modifier == .never_inline and func_ty_info.cc == .Inline) {
|
|
return sema.fail(block, call_src, "'never_inline' call of inline function", .{});
|
|
}
|
|
if (modifier == .always_inline and func_ty_info.is_noinline) {
|
|
return sema.fail(block, call_src, "'always_inline' call of noinline function", .{});
|
|
}
|
|
|
|
const gpa = sema.gpa;
|
|
|
|
var is_generic_call = func_ty_info.is_generic;
|
|
var is_comptime_call = block.is_comptime or modifier == .compile_time;
|
|
var comptime_reason: ?*const Block.ComptimeReason = null;
|
|
if (!is_comptime_call) {
|
|
if (sema.typeRequiresComptime(Type.fromInterned(func_ty_info.return_type))) |ct| {
|
|
is_comptime_call = ct;
|
|
if (ct) {
|
|
comptime_reason = &.{ .comptime_ret_ty = .{
|
|
.block = block,
|
|
.func = func,
|
|
.func_src = func_src,
|
|
.return_ty = Type.fromInterned(func_ty_info.return_type),
|
|
} };
|
|
}
|
|
} else |err| switch (err) {
|
|
error.GenericPoison => is_generic_call = true,
|
|
else => |e| return e,
|
|
}
|
|
}
|
|
var is_inline_call = is_comptime_call or modifier == .always_inline or
|
|
func_ty_info.cc == .Inline;
|
|
|
|
if (sema.func_is_naked and !is_inline_call and !is_comptime_call) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, call_src, "runtime {s} not allowed in naked function", .{@tagName(operation)});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
switch (operation) {
|
|
.call, .@"@call", .@"@panic", .@"error return" => {},
|
|
.@"safety check" => try sema.errNote(block, call_src, msg, "use @setRuntimeSafety to disable runtime safety", .{}),
|
|
}
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
|
|
if (!is_inline_call and is_generic_call) {
|
|
if (sema.instantiateGenericCall(
|
|
block,
|
|
func,
|
|
func_src,
|
|
call_src,
|
|
ensure_result_used,
|
|
args_info,
|
|
call_tag,
|
|
call_dbg_node,
|
|
)) |some| {
|
|
return some;
|
|
} else |err| switch (err) {
|
|
error.GenericPoison => {
|
|
is_inline_call = true;
|
|
},
|
|
error.ComptimeReturn => {
|
|
is_inline_call = true;
|
|
is_comptime_call = true;
|
|
comptime_reason = &.{ .comptime_ret_ty = .{
|
|
.block = block,
|
|
.func = func,
|
|
.func_src = func_src,
|
|
.return_ty = Type.fromInterned(func_ty_info.return_type),
|
|
} };
|
|
},
|
|
else => |e| return e,
|
|
}
|
|
}
|
|
|
|
if (is_comptime_call and modifier == .never_inline) {
|
|
return sema.fail(block, call_src, "unable to perform 'never_inline' call at compile-time", .{});
|
|
}
|
|
|
|
const result: Air.Inst.Ref = if (is_inline_call) res: {
|
|
const func_val = try sema.resolveConstDefinedValue(block, func_src, func, .{
|
|
.needed_comptime_reason = "function being called at comptime must be comptime-known",
|
|
.block_comptime_reason = comptime_reason,
|
|
});
|
|
const prev_fn_index = sema.func_index;
|
|
const module_fn_index = switch (mod.intern_pool.indexToKey(func_val.toIntern())) {
|
|
.extern_func => return sema.fail(block, call_src, "{s} call of extern function", .{
|
|
@as([]const u8, if (is_comptime_call) "comptime" else "inline"),
|
|
}),
|
|
.func => func_val.toIntern(),
|
|
.ptr => |ptr| switch (ptr.addr) {
|
|
.decl => |decl| blk: {
|
|
const func_val_ptr = mod.declPtr(decl).val.toIntern();
|
|
const intern_index = mod.intern_pool.indexToKey(func_val_ptr);
|
|
if (intern_index == .extern_func or (intern_index == .variable and intern_index.variable.is_extern))
|
|
return sema.fail(block, call_src, "{s} call of extern function pointer", .{
|
|
@as([]const u8, if (is_comptime_call) "comptime" else "inline"),
|
|
});
|
|
break :blk func_val_ptr;
|
|
},
|
|
else => {
|
|
assert(callee_ty.isPtrAtRuntime(mod));
|
|
return sema.fail(block, call_src, "{s} call of function pointer", .{
|
|
@as([]const u8, if (is_comptime_call) "comptime" else "inline"),
|
|
});
|
|
},
|
|
},
|
|
else => unreachable,
|
|
};
|
|
if (func_ty_info.is_var_args) {
|
|
return sema.fail(block, call_src, "{s} call of variadic function", .{
|
|
@as([]const u8, if (is_comptime_call) "comptime" else "inline"),
|
|
});
|
|
}
|
|
|
|
// Analyze the ZIR. The same ZIR gets analyzed into a runtime function
|
|
// or an inlined call depending on what union tag the `label` field is
|
|
// set to in the `Block`.
|
|
// This block instruction will be used to capture the return value from the
|
|
// inlined function.
|
|
const block_inst: Air.Inst.Index = @enumFromInt(sema.air_instructions.len);
|
|
try sema.air_instructions.append(gpa, .{
|
|
.tag = .block,
|
|
.data = undefined,
|
|
});
|
|
// This one is shared among sub-blocks within the same callee, but not
|
|
// shared among the entire inline/comptime call stack.
|
|
var inlining: Block.Inlining = .{
|
|
.call_block = block,
|
|
.call_src = call_src,
|
|
.has_comptime_args = false,
|
|
.func = module_fn_index,
|
|
.comptime_result = undefined,
|
|
.merges = .{
|
|
.src_locs = .{},
|
|
.results = .{},
|
|
.br_list = .{},
|
|
.block_inst = block_inst,
|
|
},
|
|
};
|
|
|
|
const module_fn = mod.funcInfo(module_fn_index);
|
|
const fn_owner_decl = mod.declPtr(module_fn.owner_decl);
|
|
|
|
// We effectively want a child Sema here, but can't literally do that, because we need AIR
|
|
// to be shared. InlineCallSema is a wrapper which handles this for us. While `ics` is in
|
|
// scope, we should use its `caller`/`callee` methods rather than using `sema` directly
|
|
// whenever performing an operation where the difference matters.
|
|
var ics = InlineCallSema.init(
|
|
sema,
|
|
fn_owner_decl.getFileScope(mod).zir,
|
|
module_fn_index,
|
|
block.error_return_trace_index,
|
|
);
|
|
defer ics.deinit();
|
|
|
|
var child_block: Block = .{
|
|
.parent = null,
|
|
.sema = sema,
|
|
.src_decl = module_fn.owner_decl,
|
|
.namespace = fn_owner_decl.src_namespace,
|
|
.wip_capture_scope = try mod.createCaptureScope(fn_owner_decl.src_scope),
|
|
.instructions = .{},
|
|
.label = null,
|
|
.inlining = &inlining,
|
|
.is_typeof = block.is_typeof,
|
|
.is_comptime = is_comptime_call,
|
|
.comptime_reason = comptime_reason,
|
|
.error_return_trace_index = block.error_return_trace_index,
|
|
.runtime_cond = block.runtime_cond,
|
|
.runtime_loop = block.runtime_loop,
|
|
.runtime_index = block.runtime_index,
|
|
};
|
|
|
|
const merges = &child_block.inlining.?.merges;
|
|
|
|
defer child_block.instructions.deinit(gpa);
|
|
defer merges.deinit(gpa);
|
|
|
|
try sema.emitBackwardBranch(block, call_src);
|
|
|
|
// Whether this call should be memoized, set to false if the call can
|
|
// mutate comptime state.
|
|
var should_memoize = true;
|
|
|
|
// If it's a comptime function call, we need to memoize it as long as no external
|
|
// comptime memory is mutated.
|
|
const memoized_arg_values = try sema.arena.alloc(InternPool.Index, func_ty_info.param_types.len);
|
|
|
|
const owner_info = mod.typeToFunc(fn_owner_decl.ty).?;
|
|
const new_param_types = try sema.arena.alloc(InternPool.Index, owner_info.param_types.len);
|
|
var new_fn_info: InternPool.GetFuncTypeKey = .{
|
|
.param_types = new_param_types,
|
|
.return_type = owner_info.return_type,
|
|
.noalias_bits = owner_info.noalias_bits,
|
|
.alignment = if (owner_info.align_is_generic) null else owner_info.alignment,
|
|
.cc = if (owner_info.cc_is_generic) null else owner_info.cc,
|
|
.is_var_args = owner_info.is_var_args,
|
|
.is_noinline = owner_info.is_noinline,
|
|
.section_is_generic = owner_info.section_is_generic,
|
|
.addrspace_is_generic = owner_info.addrspace_is_generic,
|
|
.is_generic = owner_info.is_generic,
|
|
};
|
|
|
|
// This will have return instructions analyzed as break instructions to
|
|
// the block_inst above. Here we are performing "comptime/inline semantic analysis"
|
|
// for a function body, which means we must map the parameter ZIR instructions to
|
|
// the AIR instructions of the callsite. The callee could be a generic function
|
|
// which means its parameter type expressions must be resolved in order and used
|
|
// to successively coerce the arguments.
|
|
const fn_info = ics.callee().code.getFnInfo(module_fn.zir_body_inst.resolve(ip));
|
|
try ics.callee().inst_map.ensureSpaceForInstructions(gpa, fn_info.param_body);
|
|
|
|
var arg_i: u32 = 0;
|
|
for (fn_info.param_body) |inst| {
|
|
const opt_noreturn_ref = try analyzeInlineCallArg(
|
|
&ics,
|
|
block,
|
|
&child_block,
|
|
inst,
|
|
new_param_types,
|
|
&arg_i,
|
|
args_info,
|
|
is_comptime_call,
|
|
&should_memoize,
|
|
memoized_arg_values,
|
|
func_ty_info,
|
|
func,
|
|
);
|
|
if (opt_noreturn_ref) |ref| {
|
|
// Analyzing this argument gave a ref of a noreturn type. Terminate argument analysis here.
|
|
return ref;
|
|
}
|
|
}
|
|
|
|
// From here, we only really need to use the callee Sema. Make it the active one, then we
|
|
// can just use `sema` directly.
|
|
_ = ics.callee();
|
|
|
|
if (!inlining.has_comptime_args) {
|
|
if (module_fn.analysis(ip).state == .sema_failure)
|
|
return error.AnalysisFail;
|
|
|
|
var block_it = block;
|
|
while (block_it.inlining) |parent_inlining| {
|
|
if (!parent_inlining.has_comptime_args and parent_inlining.func == module_fn_index) {
|
|
const err_msg = try sema.errMsg(block, call_src, "inline call is recursive", .{});
|
|
return sema.failWithOwnedErrorMsg(null, err_msg);
|
|
}
|
|
block_it = parent_inlining.call_block;
|
|
}
|
|
}
|
|
|
|
// In case it is a generic function with an expression for the return type that depends
|
|
// on parameters, we must now do the same for the return type as we just did with
|
|
// each of the parameters, resolving the return type and providing it to the child
|
|
// `Sema` so that it can be used for the `ret_ptr` instruction.
|
|
const ret_ty_inst = if (fn_info.ret_ty_body.len != 0)
|
|
try sema.resolveBody(&child_block, fn_info.ret_ty_body, module_fn.zir_body_inst.resolve(ip))
|
|
else
|
|
try sema.resolveInst(fn_info.ret_ty_ref);
|
|
const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = 0 };
|
|
sema.fn_ret_ty = try sema.analyzeAsType(&child_block, ret_ty_src, ret_ty_inst);
|
|
if (module_fn.analysis(ip).inferred_error_set) {
|
|
// Create a fresh inferred error set type for inline/comptime calls.
|
|
const ies = try sema.arena.create(InferredErrorSet);
|
|
ies.* = .{ .func = .none };
|
|
sema.fn_ret_ty_ies = ies;
|
|
sema.fn_ret_ty = Type.fromInterned((try ip.get(gpa, .{ .error_union_type = .{
|
|
.error_set_type = .adhoc_inferred_error_set_type,
|
|
.payload_type = sema.fn_ret_ty.toIntern(),
|
|
} })));
|
|
}
|
|
|
|
// This `res2` is here instead of directly breaking from `res` due to a stage1
|
|
// bug generating invalid LLVM IR.
|
|
const res2: Air.Inst.Ref = res2: {
|
|
if (should_memoize and is_comptime_call) {
|
|
if (mod.intern_pool.getIfExists(.{ .memoized_call = .{
|
|
.func = module_fn_index,
|
|
.arg_values = memoized_arg_values,
|
|
.result = .none,
|
|
} })) |memoized_call_index| {
|
|
const memoized_call = mod.intern_pool.indexToKey(memoized_call_index).memoized_call;
|
|
break :res2 Air.internedToRef(memoized_call.result);
|
|
}
|
|
}
|
|
|
|
new_fn_info.return_type = sema.fn_ret_ty.toIntern();
|
|
const new_func_resolved_ty = try mod.funcType(new_fn_info);
|
|
if (!is_comptime_call and !block.is_typeof) {
|
|
try emitDbgInline(block, prev_fn_index, module_fn_index, new_func_resolved_ty, .dbg_inline_begin);
|
|
|
|
const zir_tags = sema.code.instructions.items(.tag);
|
|
for (fn_info.param_body) |param| switch (zir_tags[@intFromEnum(param)]) {
|
|
.param, .param_comptime => {
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(param)].pl_tok;
|
|
const extra = sema.code.extraData(Zir.Inst.Param, inst_data.payload_index);
|
|
const param_name = sema.code.nullTerminatedString(extra.data.name);
|
|
const inst = sema.inst_map.get(param).?;
|
|
|
|
try sema.addDbgVar(&child_block, inst, .dbg_var_val, param_name);
|
|
},
|
|
.param_anytype, .param_anytype_comptime => {
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(param)].str_tok;
|
|
const param_name = inst_data.get(sema.code);
|
|
const inst = sema.inst_map.get(param).?;
|
|
|
|
try sema.addDbgVar(&child_block, inst, .dbg_var_val, param_name);
|
|
},
|
|
else => continue,
|
|
};
|
|
}
|
|
|
|
if (is_comptime_call and ensure_result_used) {
|
|
try sema.ensureResultUsed(block, sema.fn_ret_ty, call_src);
|
|
}
|
|
|
|
if (is_comptime_call or block.is_typeof) {
|
|
// Save the error trace as our first action in the function
|
|
// to match the behavior of runtime function calls.
|
|
const error_return_trace_index = try sema.analyzeSaveErrRetIndex(&child_block);
|
|
sema.error_return_trace_index_on_fn_entry = error_return_trace_index;
|
|
child_block.error_return_trace_index = error_return_trace_index;
|
|
}
|
|
|
|
const result = result: {
|
|
sema.analyzeBody(&child_block, fn_info.body) catch |err| switch (err) {
|
|
error.ComptimeReturn => break :result inlining.comptime_result,
|
|
else => |e| return e,
|
|
};
|
|
break :result try sema.analyzeBlockBody(block, call_src, &child_block, merges, false);
|
|
};
|
|
|
|
if (!is_comptime_call and !block.is_typeof and
|
|
sema.typeOf(result).zigTypeTag(mod) != .NoReturn)
|
|
{
|
|
try emitDbgInline(
|
|
block,
|
|
module_fn_index,
|
|
prev_fn_index,
|
|
mod.funcOwnerDeclPtr(sema.func_index).ty,
|
|
.dbg_inline_end,
|
|
);
|
|
}
|
|
|
|
if (should_memoize and is_comptime_call) {
|
|
const result_val = try sema.resolveConstValue(block, .unneeded, result, undefined);
|
|
const result_interned = try result_val.intern2(sema.fn_ret_ty, mod);
|
|
|
|
// Transform ad-hoc inferred error set types into concrete error sets.
|
|
const result_transformed = try sema.resolveAdHocInferredErrorSet(block, call_src, result_interned);
|
|
|
|
// TODO: check whether any external comptime memory was mutated by the
|
|
// comptime function call. If so, then do not memoize the call here.
|
|
_ = try mod.intern(.{ .memoized_call = .{
|
|
.func = module_fn_index,
|
|
.arg_values = memoized_arg_values,
|
|
.result = result_transformed,
|
|
} });
|
|
|
|
break :res2 Air.internedToRef(result_transformed);
|
|
}
|
|
|
|
if (try sema.resolveValue(result)) |result_val| {
|
|
const result_interned = try result_val.intern2(sema.fn_ret_ty, mod);
|
|
const result_transformed = try sema.resolveAdHocInferredErrorSet(block, call_src, result_interned);
|
|
break :res2 Air.internedToRef(result_transformed);
|
|
}
|
|
|
|
const new_ty = try sema.resolveAdHocInferredErrorSetTy(block, call_src, sema.typeOf(result).toIntern());
|
|
if (new_ty != .none) {
|
|
// TODO: mutate in place the previous instruction if possible
|
|
// rather than adding a bitcast instruction.
|
|
break :res2 try block.addBitCast(Type.fromInterned(new_ty), result);
|
|
}
|
|
|
|
break :res2 result;
|
|
};
|
|
|
|
break :res res2;
|
|
} else res: {
|
|
assert(!func_ty_info.is_generic);
|
|
|
|
const args = try sema.arena.alloc(Air.Inst.Ref, args_info.count());
|
|
for (args, 0..) |*arg_out, arg_idx| {
|
|
// Non-generic, so param types are already resolved
|
|
const param_ty = if (arg_idx < func_ty_info.param_types.len) ty: {
|
|
break :ty Type.fromInterned(func_ty_info.param_types.get(ip)[arg_idx]);
|
|
} else Type.fromInterned(InternPool.Index.var_args_param_type);
|
|
assert(!param_ty.isGenericPoison());
|
|
arg_out.* = try args_info.analyzeArg(sema, block, arg_idx, param_ty, func_ty_info, func);
|
|
if (sema.typeOf(arg_out.*).zigTypeTag(mod) == .NoReturn) {
|
|
return arg_out.*;
|
|
}
|
|
}
|
|
|
|
if (call_dbg_node) |some| try sema.zirDbgStmt(block, some);
|
|
|
|
try sema.queueFullTypeResolution(Type.fromInterned(func_ty_info.return_type));
|
|
if (sema.owner_func_index != .none and Type.fromInterned(func_ty_info.return_type).isError(mod)) {
|
|
ip.funcAnalysis(sema.owner_func_index).calls_or_awaits_errorable_fn = true;
|
|
}
|
|
|
|
if (try sema.resolveValue(func)) |func_val| {
|
|
if (mod.intern_pool.isFuncBody(func_val.toIntern())) {
|
|
try mod.ensureFuncBodyAnalysisQueued(func_val.toIntern());
|
|
}
|
|
}
|
|
|
|
try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Call).Struct.fields.len +
|
|
args.len);
|
|
const func_inst = try block.addInst(.{
|
|
.tag = call_tag,
|
|
.data = .{ .pl_op = .{
|
|
.operand = func,
|
|
.payload = sema.addExtraAssumeCapacity(Air.Call{
|
|
.args_len = @intCast(args.len),
|
|
}),
|
|
} },
|
|
});
|
|
sema.appendRefsAssumeCapacity(args);
|
|
|
|
if (call_tag == .call_always_tail) {
|
|
if (ensure_result_used) {
|
|
try sema.ensureResultUsed(block, sema.typeOf(func_inst), call_src);
|
|
}
|
|
return sema.handleTailCall(block, call_src, func_ty, func_inst);
|
|
}
|
|
if (block.wantSafety() and func_ty_info.return_type == .noreturn_type) skip_safety: {
|
|
// Function pointers and extern functions aren't guaranteed to
|
|
// actually be noreturn so we add a safety check for them.
|
|
if (try sema.resolveValue(func)) |func_val| {
|
|
switch (mod.intern_pool.indexToKey(func_val.toIntern())) {
|
|
.func => break :skip_safety,
|
|
.ptr => |ptr| switch (ptr.addr) {
|
|
.decl => |decl| if (!mod.declPtr(decl).isExtern(mod)) break :skip_safety,
|
|
else => {},
|
|
},
|
|
else => {},
|
|
}
|
|
}
|
|
try sema.safetyPanic(block, call_src, .noreturn_returned);
|
|
return .unreachable_value;
|
|
}
|
|
if (func_ty_info.return_type == .noreturn_type) {
|
|
_ = try block.addNoOp(.unreach);
|
|
return .unreachable_value;
|
|
}
|
|
break :res func_inst;
|
|
};
|
|
|
|
if (ensure_result_used) {
|
|
try sema.ensureResultUsed(block, sema.typeOf(result), call_src);
|
|
}
|
|
return result;
|
|
}
|
|
|
|
fn handleTailCall(sema: *Sema, block: *Block, call_src: LazySrcLoc, func_ty: Type, result: Air.Inst.Ref) !Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const target = mod.getTarget();
|
|
const backend = mod.comp.getZigBackend();
|
|
if (!target_util.supportsTailCall(target, backend)) {
|
|
return sema.fail(block, call_src, "unable to perform tail call: compiler backend '{s}' does not support tail calls on target architecture '{s}' with the selected CPU feature flags", .{
|
|
@tagName(backend), @tagName(target.cpu.arch),
|
|
});
|
|
}
|
|
const func_decl = mod.funcOwnerDeclPtr(sema.owner_func_index);
|
|
if (!func_ty.eql(func_decl.ty, mod)) {
|
|
return sema.fail(block, call_src, "unable to perform tail call: type of function being called '{}' does not match type of calling function '{}'", .{
|
|
func_ty.fmt(mod), func_decl.ty.fmt(mod),
|
|
});
|
|
}
|
|
_ = try block.addUnOp(.ret, result);
|
|
return .unreachable_value;
|
|
}
|
|
|
|
/// Usually, returns null. If an argument was noreturn, returns that ref (which should become the call result).
|
|
fn analyzeInlineCallArg(
|
|
ics: *InlineCallSema,
|
|
arg_block: *Block,
|
|
param_block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
new_param_types: []InternPool.Index,
|
|
arg_i: *u32,
|
|
args_info: CallArgsInfo,
|
|
is_comptime_call: bool,
|
|
should_memoize: *bool,
|
|
memoized_arg_values: []InternPool.Index,
|
|
func_ty_info: InternPool.Key.FuncType,
|
|
func_inst: Air.Inst.Ref,
|
|
) !?Air.Inst.Ref {
|
|
const mod = ics.sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
const zir_tags = ics.callee().code.instructions.items(.tag);
|
|
switch (zir_tags[@intFromEnum(inst)]) {
|
|
.param_comptime, .param_anytype_comptime => param_block.inlining.?.has_comptime_args = true,
|
|
else => {},
|
|
}
|
|
switch (zir_tags[@intFromEnum(inst)]) {
|
|
.param, .param_comptime => {
|
|
// Evaluate the parameter type expression now that previous ones have
|
|
// been mapped, and coerce the corresponding argument to it.
|
|
const pl_tok = ics.callee().code.instructions.items(.data)[@intFromEnum(inst)].pl_tok;
|
|
const param_src = pl_tok.src();
|
|
const extra = ics.callee().code.extraData(Zir.Inst.Param, pl_tok.payload_index);
|
|
const param_body = ics.callee().code.bodySlice(extra.end, extra.data.body_len);
|
|
const param_ty = param_ty: {
|
|
const raw_param_ty = func_ty_info.param_types.get(ip)[arg_i.*];
|
|
if (raw_param_ty != .generic_poison_type) break :param_ty raw_param_ty;
|
|
const param_ty_inst = try ics.callee().resolveBody(param_block, param_body, inst);
|
|
const param_ty = try ics.callee().analyzeAsType(param_block, param_src, param_ty_inst);
|
|
break :param_ty param_ty.toIntern();
|
|
};
|
|
new_param_types[arg_i.*] = param_ty;
|
|
const casted_arg = try args_info.analyzeArg(ics.caller(), arg_block, arg_i.*, Type.fromInterned(param_ty), func_ty_info, func_inst);
|
|
if (ics.caller().typeOf(casted_arg).zigTypeTag(mod) == .NoReturn) {
|
|
return casted_arg;
|
|
}
|
|
const arg_src = args_info.argSrc(arg_block, arg_i.*);
|
|
if (try ics.callee().typeRequiresComptime(Type.fromInterned(param_ty))) {
|
|
_ = try ics.caller().resolveConstValue(arg_block, arg_src, casted_arg, .{
|
|
.needed_comptime_reason = "argument to parameter with comptime-only type must be comptime-known",
|
|
.block_comptime_reason = param_block.comptime_reason,
|
|
});
|
|
} else if (!is_comptime_call and zir_tags[@intFromEnum(inst)] == .param_comptime) {
|
|
_ = try ics.caller().resolveConstValue(arg_block, arg_src, casted_arg, .{
|
|
.needed_comptime_reason = "parameter is comptime",
|
|
});
|
|
}
|
|
|
|
if (is_comptime_call) {
|
|
ics.callee().inst_map.putAssumeCapacityNoClobber(inst, casted_arg);
|
|
const arg_val = try ics.caller().resolveConstValue(arg_block, arg_src, casted_arg, .{
|
|
.needed_comptime_reason = "argument to function being called at comptime must be comptime-known",
|
|
.block_comptime_reason = param_block.comptime_reason,
|
|
});
|
|
switch (arg_val.toIntern()) {
|
|
.generic_poison, .generic_poison_type => {
|
|
// This function is currently evaluated as part of an as-of-yet unresolvable
|
|
// parameter or return type.
|
|
return error.GenericPoison;
|
|
},
|
|
else => {},
|
|
}
|
|
// Needed so that lazy values do not trigger
|
|
// assertion due to type not being resolved
|
|
// when the hash function is called.
|
|
const resolved_arg_val = try ics.caller().resolveLazyValue(arg_val);
|
|
should_memoize.* = should_memoize.* and !resolved_arg_val.canMutateComptimeVarState(mod);
|
|
memoized_arg_values[arg_i.*] = try resolved_arg_val.intern(Type.fromInterned(param_ty), mod);
|
|
} else {
|
|
ics.callee().inst_map.putAssumeCapacityNoClobber(inst, casted_arg);
|
|
}
|
|
|
|
if (try ics.caller().resolveValue(casted_arg)) |_| {
|
|
param_block.inlining.?.has_comptime_args = true;
|
|
}
|
|
|
|
arg_i.* += 1;
|
|
},
|
|
.param_anytype, .param_anytype_comptime => {
|
|
// No coercion needed.
|
|
const uncasted_arg = try args_info.analyzeArg(ics.caller(), arg_block, arg_i.*, Type.generic_poison, func_ty_info, func_inst);
|
|
if (ics.caller().typeOf(uncasted_arg).zigTypeTag(mod) == .NoReturn) {
|
|
return uncasted_arg;
|
|
}
|
|
const arg_src = args_info.argSrc(arg_block, arg_i.*);
|
|
new_param_types[arg_i.*] = ics.caller().typeOf(uncasted_arg).toIntern();
|
|
|
|
if (is_comptime_call) {
|
|
ics.callee().inst_map.putAssumeCapacityNoClobber(inst, uncasted_arg);
|
|
const arg_val = try ics.caller().resolveConstValue(arg_block, arg_src, uncasted_arg, .{
|
|
.needed_comptime_reason = "argument to function being called at comptime must be comptime-known",
|
|
.block_comptime_reason = param_block.comptime_reason,
|
|
});
|
|
switch (arg_val.toIntern()) {
|
|
.generic_poison, .generic_poison_type => {
|
|
// This function is currently evaluated as part of an as-of-yet unresolvable
|
|
// parameter or return type.
|
|
return error.GenericPoison;
|
|
},
|
|
else => {},
|
|
}
|
|
// Needed so that lazy values do not trigger
|
|
// assertion due to type not being resolved
|
|
// when the hash function is called.
|
|
const resolved_arg_val = try ics.caller().resolveLazyValue(arg_val);
|
|
should_memoize.* = should_memoize.* and !resolved_arg_val.canMutateComptimeVarState(mod);
|
|
memoized_arg_values[arg_i.*] = try resolved_arg_val.intern(ics.caller().typeOf(uncasted_arg), mod);
|
|
} else {
|
|
if (zir_tags[@intFromEnum(inst)] == .param_anytype_comptime) {
|
|
_ = try ics.caller().resolveConstValue(arg_block, arg_src, uncasted_arg, .{
|
|
.needed_comptime_reason = "parameter is comptime",
|
|
});
|
|
}
|
|
ics.callee().inst_map.putAssumeCapacityNoClobber(inst, uncasted_arg);
|
|
}
|
|
|
|
if (try ics.caller().resolveValue(uncasted_arg)) |_| {
|
|
param_block.inlining.?.has_comptime_args = true;
|
|
}
|
|
|
|
arg_i.* += 1;
|
|
},
|
|
else => {},
|
|
}
|
|
|
|
return null;
|
|
}
|
|
|
|
fn instantiateGenericCall(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
func: Air.Inst.Ref,
|
|
func_src: LazySrcLoc,
|
|
call_src: LazySrcLoc,
|
|
ensure_result_used: bool,
|
|
args_info: CallArgsInfo,
|
|
call_tag: Air.Inst.Tag,
|
|
call_dbg_node: ?Zir.Inst.Index,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
const ip = &mod.intern_pool;
|
|
|
|
const func_val = try sema.resolveConstDefinedValue(block, func_src, func, .{
|
|
.needed_comptime_reason = "generic function being called must be comptime-known",
|
|
});
|
|
const generic_owner = switch (mod.intern_pool.indexToKey(func_val.toIntern())) {
|
|
.func => func_val.toIntern(),
|
|
.ptr => |ptr| mod.declPtr(ptr.addr.decl).val.toIntern(),
|
|
else => unreachable,
|
|
};
|
|
const generic_owner_func = mod.intern_pool.indexToKey(generic_owner).func;
|
|
const generic_owner_ty_info = mod.typeToFunc(Type.fromInterned(generic_owner_func.ty)).?;
|
|
|
|
try sema.declareDependency(.{ .src_hash = generic_owner_func.zir_body_inst });
|
|
|
|
// Even though there may already be a generic instantiation corresponding
|
|
// to this callsite, we must evaluate the expressions of the generic
|
|
// function signature with the values of the callsite plugged in.
|
|
// Importantly, this may include type coercions that determine whether the
|
|
// instantiation is a match of a previous instantiation.
|
|
// The actual monomorphization happens via adding `func_instance` to
|
|
// `InternPool`.
|
|
|
|
const fn_owner_decl = mod.declPtr(generic_owner_func.owner_decl);
|
|
const namespace_index = fn_owner_decl.src_namespace;
|
|
const namespace = mod.namespacePtr(namespace_index);
|
|
const fn_zir = namespace.file_scope.zir;
|
|
const fn_info = fn_zir.getFnInfo(generic_owner_func.zir_body_inst.resolve(ip));
|
|
|
|
const comptime_args = try sema.arena.alloc(InternPool.Index, args_info.count());
|
|
@memset(comptime_args, .none);
|
|
|
|
// We may overestimate the number of runtime args, but this will definitely be sufficient.
|
|
const max_runtime_args = args_info.count() - @popCount(generic_owner_ty_info.comptime_bits);
|
|
var runtime_args = try std.ArrayListUnmanaged(Air.Inst.Ref).initCapacity(sema.arena, max_runtime_args);
|
|
|
|
// Re-run the block that creates the function, with the comptime parameters
|
|
// pre-populated inside `inst_map`. This causes `param_comptime` and
|
|
// `param_anytype_comptime` ZIR instructions to be ignored, resulting in a
|
|
// new, monomorphized function, with the comptime parameters elided.
|
|
var child_sema: Sema = .{
|
|
.mod = mod,
|
|
.gpa = gpa,
|
|
.arena = sema.arena,
|
|
.code = fn_zir,
|
|
// We pass the generic callsite's owner decl here because whatever `Decl`
|
|
// dependencies are chased at this point should be attached to the
|
|
// callsite, not the `Decl` associated with the `func_instance`.
|
|
.owner_decl = sema.owner_decl,
|
|
.owner_decl_index = sema.owner_decl_index,
|
|
.func_index = sema.owner_func_index,
|
|
// This may not be known yet, since the calling convention could be generic, but there
|
|
// should be no illegal instructions encountered while creating the function anyway.
|
|
.func_is_naked = false,
|
|
.fn_ret_ty = Type.void,
|
|
.fn_ret_ty_ies = null,
|
|
.owner_func_index = .none,
|
|
.comptime_args = comptime_args,
|
|
.generic_owner = generic_owner,
|
|
.generic_call_src = call_src,
|
|
.generic_call_decl = block.src_decl.toOptional(),
|
|
.branch_quota = sema.branch_quota,
|
|
.branch_count = sema.branch_count,
|
|
.comptime_mutable_decls = sema.comptime_mutable_decls,
|
|
.comptime_err_ret_trace = sema.comptime_err_ret_trace,
|
|
};
|
|
defer child_sema.deinit();
|
|
|
|
var child_block: Block = .{
|
|
.parent = null,
|
|
.sema = &child_sema,
|
|
.src_decl = generic_owner_func.owner_decl,
|
|
.namespace = namespace_index,
|
|
.wip_capture_scope = try mod.createCaptureScope(fn_owner_decl.src_scope),
|
|
.instructions = .{},
|
|
.inlining = null,
|
|
.is_comptime = true,
|
|
};
|
|
defer child_block.instructions.deinit(gpa);
|
|
|
|
try child_sema.inst_map.ensureSpaceForInstructions(gpa, fn_info.param_body);
|
|
|
|
for (fn_info.param_body[0..args_info.count()], 0..) |param_inst, arg_index| {
|
|
const param_tag = fn_zir.instructions.items(.tag)[@intFromEnum(param_inst)];
|
|
|
|
const param_ty = switch (generic_owner_ty_info.param_types.get(ip)[arg_index]) {
|
|
else => |ty| Type.fromInterned(ty), // parameter is not generic, so type is already resolved
|
|
.generic_poison_type => param_ty: {
|
|
// We have every parameter before this one, so can resolve this parameter's type now.
|
|
// However, first check the param type, since it may be anytype.
|
|
switch (param_tag) {
|
|
.param_anytype, .param_anytype_comptime => {
|
|
// The parameter doesn't have a type.
|
|
break :param_ty Type.generic_poison;
|
|
},
|
|
.param, .param_comptime => {
|
|
// We now know every prior parameter, so can resolve this
|
|
// parameter's type. The child sema has these types.
|
|
const param_data = fn_zir.instructions.items(.data)[@intFromEnum(param_inst)].pl_tok;
|
|
const param_extra = fn_zir.extraData(Zir.Inst.Param, param_data.payload_index);
|
|
const param_ty_body = fn_zir.bodySlice(param_extra.end, param_extra.data.body_len);
|
|
|
|
// Make sure any nested instructions don't clobber our work.
|
|
const prev_params = child_block.params;
|
|
const prev_no_partial_func_ty = child_sema.no_partial_func_ty;
|
|
const prev_generic_owner = child_sema.generic_owner;
|
|
const prev_generic_call_src = child_sema.generic_call_src;
|
|
const prev_generic_call_decl = child_sema.generic_call_decl;
|
|
child_block.params = .{};
|
|
child_sema.no_partial_func_ty = true;
|
|
child_sema.generic_owner = .none;
|
|
child_sema.generic_call_src = .unneeded;
|
|
child_sema.generic_call_decl = .none;
|
|
defer {
|
|
child_block.params = prev_params;
|
|
child_sema.no_partial_func_ty = prev_no_partial_func_ty;
|
|
child_sema.generic_owner = prev_generic_owner;
|
|
child_sema.generic_call_src = prev_generic_call_src;
|
|
child_sema.generic_call_decl = prev_generic_call_decl;
|
|
}
|
|
|
|
const param_ty_inst = try child_sema.resolveBody(&child_block, param_ty_body, param_inst);
|
|
break :param_ty try child_sema.analyzeAsType(&child_block, param_data.src(), param_ty_inst);
|
|
},
|
|
else => unreachable,
|
|
}
|
|
},
|
|
};
|
|
const arg_ref = try args_info.analyzeArg(sema, block, arg_index, param_ty, generic_owner_ty_info, func);
|
|
const arg_ty = sema.typeOf(arg_ref);
|
|
if (arg_ty.zigTypeTag(mod) == .NoReturn) {
|
|
// This terminates argument analysis.
|
|
return arg_ref;
|
|
}
|
|
|
|
const arg_is_comptime = switch (param_tag) {
|
|
.param_comptime, .param_anytype_comptime => true,
|
|
.param, .param_anytype => try sema.typeRequiresComptime(arg_ty),
|
|
else => unreachable,
|
|
};
|
|
|
|
if (arg_is_comptime) {
|
|
if (try sema.resolveValue(arg_ref)) |arg_val| {
|
|
comptime_args[arg_index] = arg_val.toIntern();
|
|
child_sema.inst_map.putAssumeCapacityNoClobber(
|
|
param_inst,
|
|
Air.internedToRef(arg_val.toIntern()),
|
|
);
|
|
} else switch (param_tag) {
|
|
.param_comptime,
|
|
.param_anytype_comptime,
|
|
=> return sema.failWithOwnedErrorMsg(block, msg: {
|
|
const arg_src = args_info.argSrc(block, arg_index);
|
|
const msg = try sema.errMsg(block, arg_src, "runtime-known argument passed to comptime parameter", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
const param_src = switch (param_tag) {
|
|
.param_comptime => fn_zir.instructions.items(.data)[@intFromEnum(param_inst)].pl_tok.src(),
|
|
.param_anytype_comptime => fn_zir.instructions.items(.data)[@intFromEnum(param_inst)].str_tok.src(),
|
|
else => unreachable,
|
|
};
|
|
try child_sema.errNote(&child_block, param_src, msg, "declared comptime here", .{});
|
|
break :msg msg;
|
|
}),
|
|
|
|
.param,
|
|
.param_anytype,
|
|
=> return sema.failWithOwnedErrorMsg(block, msg: {
|
|
const arg_src = args_info.argSrc(block, arg_index);
|
|
const msg = try sema.errMsg(block, arg_src, "runtime-known argument passed to parameter of comptime-only type", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
const param_src = switch (param_tag) {
|
|
.param => fn_zir.instructions.items(.data)[@intFromEnum(param_inst)].pl_tok.src(),
|
|
.param_anytype => fn_zir.instructions.items(.data)[@intFromEnum(param_inst)].str_tok.src(),
|
|
else => unreachable,
|
|
};
|
|
try child_sema.errNote(&child_block, param_src, msg, "declared here", .{});
|
|
const src_decl = mod.declPtr(block.src_decl);
|
|
try sema.explainWhyTypeIsComptime(msg, src_decl.toSrcLoc(arg_src, mod), arg_ty);
|
|
break :msg msg;
|
|
}),
|
|
|
|
else => unreachable,
|
|
}
|
|
} else {
|
|
// The parameter is runtime-known.
|
|
try sema.queueFullTypeResolution(arg_ty);
|
|
child_sema.inst_map.putAssumeCapacityNoClobber(param_inst, try child_block.addInst(.{
|
|
.tag = .arg,
|
|
.data = .{ .arg = .{
|
|
.ty = Air.internedToRef(arg_ty.toIntern()),
|
|
.src_index = @intCast(arg_index),
|
|
} },
|
|
}));
|
|
const param_name: Zir.NullTerminatedString = switch (param_tag) {
|
|
.param_anytype => fn_zir.instructions.items(.data)[@intFromEnum(param_inst)].str_tok.start,
|
|
.param => name: {
|
|
const inst_data = fn_zir.instructions.items(.data)[@intFromEnum(param_inst)].pl_tok;
|
|
const extra = fn_zir.extraData(Zir.Inst.Param, inst_data.payload_index);
|
|
break :name extra.data.name;
|
|
},
|
|
else => unreachable,
|
|
};
|
|
try child_block.params.append(sema.arena, .{
|
|
.ty = arg_ty.toIntern(), // This is the type after coercion
|
|
.is_comptime = false, // We're adding only runtime args to the instantiation
|
|
.name = param_name,
|
|
});
|
|
runtime_args.appendAssumeCapacity(arg_ref);
|
|
}
|
|
}
|
|
|
|
// We've already handled parameters, so don't resolve the whole body. Instead, just
|
|
// do the instructions after the params (i.e. the func itself).
|
|
const new_func_inst = try child_sema.resolveBody(&child_block, fn_info.param_body[args_info.count()..], fn_info.param_body_inst);
|
|
const callee_index = (child_sema.resolveConstDefinedValue(&child_block, .unneeded, new_func_inst, undefined) catch unreachable).toIntern();
|
|
|
|
const callee = mod.funcInfo(callee_index);
|
|
callee.branchQuota(ip).* = @max(callee.branchQuota(ip).*, sema.branch_quota);
|
|
|
|
try sema.addReferencedBy(block, call_src, callee.owner_decl);
|
|
|
|
// Make a runtime call to the new function, making sure to omit the comptime args.
|
|
const func_ty = Type.fromInterned(callee.ty);
|
|
const func_ty_info = mod.typeToFunc(func_ty).?;
|
|
|
|
// If the call evaluated to a return type that requires comptime, never mind
|
|
// our generic instantiation. Instead we need to perform a comptime call.
|
|
if (try sema.typeRequiresComptime(Type.fromInterned(func_ty_info.return_type))) {
|
|
return error.ComptimeReturn;
|
|
}
|
|
// Similarly, if the call evaluated to a generic type we need to instead
|
|
// call it inline.
|
|
if (func_ty_info.is_generic or func_ty_info.cc == .Inline) {
|
|
return error.GenericPoison;
|
|
}
|
|
|
|
try sema.queueFullTypeResolution(Type.fromInterned(func_ty_info.return_type));
|
|
|
|
if (call_dbg_node) |some| try sema.zirDbgStmt(block, some);
|
|
|
|
if (sema.owner_func_index != .none and
|
|
Type.fromInterned(func_ty_info.return_type).isError(mod))
|
|
{
|
|
ip.funcAnalysis(sema.owner_func_index).calls_or_awaits_errorable_fn = true;
|
|
}
|
|
|
|
try mod.ensureFuncBodyAnalysisQueued(callee_index);
|
|
|
|
try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.Call).Struct.fields.len + runtime_args.items.len);
|
|
const result = try block.addInst(.{
|
|
.tag = call_tag,
|
|
.data = .{ .pl_op = .{
|
|
.operand = Air.internedToRef(callee_index),
|
|
.payload = sema.addExtraAssumeCapacity(Air.Call{
|
|
.args_len = @intCast(runtime_args.items.len),
|
|
}),
|
|
} },
|
|
});
|
|
sema.appendRefsAssumeCapacity(runtime_args.items);
|
|
|
|
if (ensure_result_used) {
|
|
try sema.ensureResultUsed(block, sema.typeOf(result), call_src);
|
|
}
|
|
if (call_tag == .call_always_tail) {
|
|
return sema.handleTailCall(block, call_src, func_ty, result);
|
|
}
|
|
if (func_ty.fnReturnType(mod).isNoReturn(mod)) {
|
|
_ = try block.addNoOp(.unreach);
|
|
return .unreachable_value;
|
|
}
|
|
return result;
|
|
}
|
|
|
|
fn resolveTupleLazyValues(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!void {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
const tuple = switch (ip.indexToKey(ty.toIntern())) {
|
|
.anon_struct_type => |tuple| tuple,
|
|
else => return,
|
|
};
|
|
for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, field_val| {
|
|
try sema.resolveTupleLazyValues(block, src, Type.fromInterned(field_ty));
|
|
if (field_val == .none) continue;
|
|
// TODO: mutate in intern pool
|
|
_ = try sema.resolveLazyValue(Value.fromInterned(field_val));
|
|
}
|
|
}
|
|
|
|
fn emitDbgInline(
|
|
block: *Block,
|
|
old_func: InternPool.Index,
|
|
new_func: InternPool.Index,
|
|
new_func_ty: Type,
|
|
tag: Air.Inst.Tag,
|
|
) CompileError!void {
|
|
if (block.ownerModule().strip) return;
|
|
|
|
// Recursive inline call; no dbg_inline needed.
|
|
if (old_func == new_func) return;
|
|
|
|
_ = try block.addInst(.{
|
|
.tag = tag,
|
|
.data = .{ .ty_fn = .{
|
|
.ty = Air.internedToRef(new_func_ty.toIntern()),
|
|
.func = new_func,
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn zirIntType(sema: *Sema, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const int_type = sema.code.instructions.items(.data)[@intFromEnum(inst)].int_type;
|
|
const ty = try mod.intType(int_type.signedness, int_type.bit_count);
|
|
return Air.internedToRef(ty.toIntern());
|
|
}
|
|
|
|
fn zirOptionalType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const operand_src: LazySrcLoc = .{ .node_offset_un_op = inst_data.src_node };
|
|
const child_type = try sema.resolveType(block, operand_src, inst_data.operand);
|
|
if (child_type.zigTypeTag(mod) == .Opaque) {
|
|
return sema.fail(block, operand_src, "opaque type '{}' cannot be optional", .{child_type.fmt(mod)});
|
|
} else if (child_type.zigTypeTag(mod) == .Null) {
|
|
return sema.fail(block, operand_src, "type '{}' cannot be optional", .{child_type.fmt(mod)});
|
|
}
|
|
const opt_type = try mod.optionalType(child_type.toIntern());
|
|
|
|
return Air.internedToRef(opt_type.toIntern());
|
|
}
|
|
|
|
fn zirArrayInitElemType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const bin = sema.code.instructions.items(.data)[@intFromEnum(inst)].bin;
|
|
const maybe_wrapped_indexable_ty = sema.resolveType(block, .unneeded, bin.lhs) catch |err| switch (err) {
|
|
// Since this is a ZIR instruction that returns a type, encountering
|
|
// generic poison should not result in a failed compilation, but the
|
|
// generic poison type. This prevents unnecessary failures when
|
|
// constructing types at compile-time.
|
|
error.GenericPoison => return .generic_poison_type,
|
|
else => |e| return e,
|
|
};
|
|
const indexable_ty = maybe_wrapped_indexable_ty.optEuBaseType(mod);
|
|
try sema.resolveTypeFields(indexable_ty);
|
|
assert(indexable_ty.isIndexable(mod)); // validated by a previous instruction
|
|
if (indexable_ty.zigTypeTag(mod) == .Struct) {
|
|
const elem_type = indexable_ty.structFieldType(@intFromEnum(bin.rhs), mod);
|
|
return Air.internedToRef(elem_type.toIntern());
|
|
} else {
|
|
const elem_type = indexable_ty.elemType2(mod);
|
|
return Air.internedToRef(elem_type.toIntern());
|
|
}
|
|
}
|
|
|
|
fn zirElemType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const un_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const maybe_wrapped_ptr_ty = sema.resolveType(block, .unneeded, un_node.operand) catch |err| switch (err) {
|
|
error.GenericPoison => return .generic_poison_type,
|
|
else => |e| return e,
|
|
};
|
|
const ptr_ty = maybe_wrapped_ptr_ty.optEuBaseType(mod);
|
|
assert(ptr_ty.zigTypeTag(mod) == .Pointer); // validated by a previous instruction
|
|
const elem_ty = ptr_ty.childType(mod);
|
|
if (elem_ty.toIntern() == .anyopaque_type) {
|
|
// The pointer's actual child type is effectively unknown, so it makes
|
|
// sense to represent it with a generic poison.
|
|
return .generic_poison_type;
|
|
}
|
|
return Air.internedToRef(ptr_ty.childType(mod).toIntern());
|
|
}
|
|
|
|
fn zirIndexablePtrElemType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const un_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const src = un_node.src();
|
|
const ptr_ty = sema.resolveType(block, src, un_node.operand) catch |err| switch (err) {
|
|
error.GenericPoison => return .generic_poison_type,
|
|
else => |e| return e,
|
|
};
|
|
try sema.checkMemOperand(block, src, ptr_ty);
|
|
const elem_ty = switch (ptr_ty.ptrSize(mod)) {
|
|
.Slice, .Many, .C => ptr_ty.childType(mod),
|
|
.One => ptr_ty.childType(mod).childType(mod),
|
|
};
|
|
return Air.internedToRef(elem_ty.toIntern());
|
|
}
|
|
|
|
fn zirVectorElemType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const un_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const vec_ty = sema.resolveType(block, .unneeded, un_node.operand) catch |err| switch (err) {
|
|
// Since this is a ZIR instruction that returns a type, encountering
|
|
// generic poison should not result in a failed compilation, but the
|
|
// generic poison type. This prevents unnecessary failures when
|
|
// constructing types at compile-time.
|
|
error.GenericPoison => return .generic_poison_type,
|
|
else => |e| return e,
|
|
};
|
|
if (!vec_ty.isVector(mod)) {
|
|
return sema.fail(block, un_node.src(), "expected vector type, found '{}'", .{vec_ty.fmt(mod)});
|
|
}
|
|
return Air.internedToRef(vec_ty.childType(mod).toIntern());
|
|
}
|
|
|
|
fn zirVectorType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const len_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const elem_type_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const len: u32 = @intCast(try sema.resolveInt(block, len_src, extra.lhs, Type.u32, .{
|
|
.needed_comptime_reason = "vector length must be comptime-known",
|
|
}));
|
|
const elem_type = try sema.resolveType(block, elem_type_src, extra.rhs);
|
|
try sema.checkVectorElemType(block, elem_type_src, elem_type);
|
|
const vector_type = try mod.vectorType(.{
|
|
.len = len,
|
|
.child = elem_type.toIntern(),
|
|
});
|
|
return Air.internedToRef(vector_type.toIntern());
|
|
}
|
|
|
|
fn zirArrayType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const len_src: LazySrcLoc = .{ .node_offset_array_type_len = inst_data.src_node };
|
|
const elem_src: LazySrcLoc = .{ .node_offset_array_type_elem = inst_data.src_node };
|
|
const len = try sema.resolveInt(block, len_src, extra.lhs, Type.usize, .{
|
|
.needed_comptime_reason = "array length must be comptime-known",
|
|
});
|
|
const elem_type = try sema.resolveType(block, elem_src, extra.rhs);
|
|
try sema.validateArrayElemType(block, elem_type, elem_src);
|
|
const array_ty = try sema.mod.arrayType(.{
|
|
.len = len,
|
|
.child = elem_type.toIntern(),
|
|
});
|
|
|
|
return Air.internedToRef(array_ty.toIntern());
|
|
}
|
|
|
|
fn zirArrayTypeSentinel(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.ArrayTypeSentinel, inst_data.payload_index).data;
|
|
const len_src: LazySrcLoc = .{ .node_offset_array_type_len = inst_data.src_node };
|
|
const sentinel_src: LazySrcLoc = .{ .node_offset_array_type_sentinel = inst_data.src_node };
|
|
const elem_src: LazySrcLoc = .{ .node_offset_array_type_elem = inst_data.src_node };
|
|
const len = try sema.resolveInt(block, len_src, extra.len, Type.usize, .{
|
|
.needed_comptime_reason = "array length must be comptime-known",
|
|
});
|
|
const elem_type = try sema.resolveType(block, elem_src, extra.elem_type);
|
|
try sema.validateArrayElemType(block, elem_type, elem_src);
|
|
const uncasted_sentinel = try sema.resolveInst(extra.sentinel);
|
|
const sentinel = try sema.coerce(block, elem_type, uncasted_sentinel, sentinel_src);
|
|
const sentinel_val = try sema.resolveConstDefinedValue(block, sentinel_src, sentinel, .{
|
|
.needed_comptime_reason = "array sentinel value must be comptime-known",
|
|
});
|
|
const array_ty = try sema.mod.arrayType(.{
|
|
.len = len,
|
|
.sentinel = sentinel_val.toIntern(),
|
|
.child = elem_type.toIntern(),
|
|
});
|
|
|
|
return Air.internedToRef(array_ty.toIntern());
|
|
}
|
|
|
|
fn validateArrayElemType(sema: *Sema, block: *Block, elem_type: Type, elem_src: LazySrcLoc) !void {
|
|
const mod = sema.mod;
|
|
if (elem_type.zigTypeTag(mod) == .Opaque) {
|
|
return sema.fail(block, elem_src, "array of opaque type '{}' not allowed", .{elem_type.fmt(mod)});
|
|
} else if (elem_type.zigTypeTag(mod) == .NoReturn) {
|
|
return sema.fail(block, elem_src, "array of 'noreturn' not allowed", .{});
|
|
}
|
|
}
|
|
|
|
fn zirAnyframeType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
if (true) {
|
|
return sema.failWithUseOfAsync(block, inst_data.src());
|
|
}
|
|
const mod = sema.mod;
|
|
const operand_src: LazySrcLoc = .{ .node_offset_anyframe_type = inst_data.src_node };
|
|
const return_type = try sema.resolveType(block, operand_src, inst_data.operand);
|
|
const anyframe_type = try mod.anyframeType(return_type);
|
|
|
|
return Air.internedToRef(anyframe_type.toIntern());
|
|
}
|
|
|
|
fn zirErrorUnionType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
|
|
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
|
|
const error_set = try sema.resolveType(block, lhs_src, extra.lhs);
|
|
const payload = try sema.resolveType(block, rhs_src, extra.rhs);
|
|
|
|
if (error_set.zigTypeTag(mod) != .ErrorSet) {
|
|
return sema.fail(block, lhs_src, "expected error set type, found '{}'", .{
|
|
error_set.fmt(mod),
|
|
});
|
|
}
|
|
try sema.validateErrorUnionPayloadType(block, payload, rhs_src);
|
|
const err_union_ty = try mod.errorUnionType(error_set, payload);
|
|
return Air.internedToRef(err_union_ty.toIntern());
|
|
}
|
|
|
|
fn validateErrorUnionPayloadType(sema: *Sema, block: *Block, payload_ty: Type, payload_src: LazySrcLoc) !void {
|
|
const mod = sema.mod;
|
|
if (payload_ty.zigTypeTag(mod) == .Opaque) {
|
|
return sema.fail(block, payload_src, "error union with payload of opaque type '{}' not allowed", .{
|
|
payload_ty.fmt(mod),
|
|
});
|
|
} else if (payload_ty.zigTypeTag(mod) == .ErrorSet) {
|
|
return sema.fail(block, payload_src, "error union with payload of error set type '{}' not allowed", .{
|
|
payload_ty.fmt(mod),
|
|
});
|
|
}
|
|
}
|
|
|
|
fn zirErrorValue(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
_ = block;
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].str_tok;
|
|
const name = try mod.intern_pool.getOrPutString(sema.gpa, inst_data.get(sema.code));
|
|
_ = try mod.getErrorValue(name);
|
|
// Create an error set type with only this error value, and return the value.
|
|
const error_set_type = try mod.singleErrorSetType(name);
|
|
return Air.internedToRef((try mod.intern(.{ .err = .{
|
|
.ty = error_set_type.toIntern(),
|
|
.name = name,
|
|
} })));
|
|
}
|
|
|
|
fn zirIntFromError(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
|
|
const src = LazySrcLoc.nodeOffset(extra.node);
|
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
|
|
const uncasted_operand = try sema.resolveInst(extra.operand);
|
|
const operand = try sema.coerce(block, Type.anyerror, uncasted_operand, operand_src);
|
|
const err_int_ty = try mod.errorIntType();
|
|
|
|
if (try sema.resolveValue(operand)) |val| {
|
|
if (val.isUndef(mod)) {
|
|
return mod.undefRef(err_int_ty);
|
|
}
|
|
const err_name = ip.indexToKey(val.toIntern()).err.name;
|
|
return Air.internedToRef((try mod.intValue(
|
|
err_int_ty,
|
|
try mod.getErrorValue(err_name),
|
|
)).toIntern());
|
|
}
|
|
|
|
const op_ty = sema.typeOf(uncasted_operand);
|
|
switch (try sema.resolveInferredErrorSetTy(block, src, op_ty.toIntern())) {
|
|
.anyerror_type => {},
|
|
else => |err_set_ty_index| {
|
|
const names = ip.indexToKey(err_set_ty_index).error_set_type.names;
|
|
switch (names.len) {
|
|
0 => return Air.internedToRef((try mod.intValue(err_int_ty, 0)).toIntern()),
|
|
1 => {
|
|
const int: Module.ErrorInt = @intCast(mod.global_error_set.getIndex(names.get(ip)[0]).?);
|
|
return mod.intRef(err_int_ty, int);
|
|
},
|
|
else => {},
|
|
}
|
|
},
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, src, operand_src);
|
|
return block.addBitCast(err_int_ty, operand);
|
|
}
|
|
|
|
fn zirErrorFromInt(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
|
|
const src = LazySrcLoc.nodeOffset(extra.node);
|
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
|
|
const uncasted_operand = try sema.resolveInst(extra.operand);
|
|
const err_int_ty = try mod.errorIntType();
|
|
const operand = try sema.coerce(block, err_int_ty, uncasted_operand, operand_src);
|
|
|
|
if (try sema.resolveDefinedValue(block, operand_src, operand)) |value| {
|
|
const int = try sema.usizeCast(block, operand_src, try value.toUnsignedIntAdvanced(sema));
|
|
if (int > mod.global_error_set.count() or int == 0)
|
|
return sema.fail(block, operand_src, "integer value '{d}' represents no error", .{int});
|
|
return Air.internedToRef((try mod.intern(.{ .err = .{
|
|
.ty = .anyerror_type,
|
|
.name = mod.global_error_set.keys()[int],
|
|
} })));
|
|
}
|
|
try sema.requireRuntimeBlock(block, src, operand_src);
|
|
if (block.wantSafety()) {
|
|
const is_lt_len = try block.addUnOp(.cmp_lt_errors_len, operand);
|
|
const zero_val = Air.internedToRef((try mod.intValue(err_int_ty, 0)).toIntern());
|
|
const is_non_zero = try block.addBinOp(.cmp_neq, operand, zero_val);
|
|
const ok = try block.addBinOp(.bool_and, is_lt_len, is_non_zero);
|
|
try sema.addSafetyCheck(block, src, ok, .invalid_error_code);
|
|
}
|
|
return block.addInst(.{
|
|
.tag = .bitcast,
|
|
.data = .{ .ty_op = .{
|
|
.ty = .anyerror_type,
|
|
.operand = operand,
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn zirMergeErrorSets(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node };
|
|
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
|
|
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
|
|
const lhs = try sema.resolveInst(extra.lhs);
|
|
const rhs = try sema.resolveInst(extra.rhs);
|
|
if (sema.typeOf(lhs).zigTypeTag(mod) == .Bool and sema.typeOf(rhs).zigTypeTag(mod) == .Bool) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, lhs_src, "expected error set type, found 'bool'", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, src, msg, "'||' merges error sets; 'or' performs boolean OR", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
const lhs_ty = try sema.analyzeAsType(block, lhs_src, lhs);
|
|
const rhs_ty = try sema.analyzeAsType(block, rhs_src, rhs);
|
|
if (lhs_ty.zigTypeTag(mod) != .ErrorSet)
|
|
return sema.fail(block, lhs_src, "expected error set type, found '{}'", .{lhs_ty.fmt(mod)});
|
|
if (rhs_ty.zigTypeTag(mod) != .ErrorSet)
|
|
return sema.fail(block, rhs_src, "expected error set type, found '{}'", .{rhs_ty.fmt(mod)});
|
|
|
|
// Anything merged with anyerror is anyerror.
|
|
if (lhs_ty.toIntern() == .anyerror_type or rhs_ty.toIntern() == .anyerror_type) {
|
|
return .anyerror_type;
|
|
}
|
|
|
|
if (ip.isInferredErrorSetType(lhs_ty.toIntern())) {
|
|
switch (try sema.resolveInferredErrorSet(block, src, lhs_ty.toIntern())) {
|
|
// isAnyError might have changed from a false negative to a true
|
|
// positive after resolution.
|
|
.anyerror_type => return .anyerror_type,
|
|
else => {},
|
|
}
|
|
}
|
|
if (ip.isInferredErrorSetType(rhs_ty.toIntern())) {
|
|
switch (try sema.resolveInferredErrorSet(block, src, rhs_ty.toIntern())) {
|
|
// isAnyError might have changed from a false negative to a true
|
|
// positive after resolution.
|
|
.anyerror_type => return .anyerror_type,
|
|
else => {},
|
|
}
|
|
}
|
|
|
|
const err_set_ty = try sema.errorSetMerge(lhs_ty, rhs_ty);
|
|
return Air.internedToRef(err_set_ty.toIntern());
|
|
}
|
|
|
|
fn zirEnumLiteral(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
_ = block;
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].str_tok;
|
|
const name = inst_data.get(sema.code);
|
|
return Air.internedToRef((try mod.intern(.{
|
|
.enum_literal = try mod.intern_pool.getOrPutString(sema.gpa, name),
|
|
})));
|
|
}
|
|
|
|
fn zirIntFromEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const src = inst_data.src();
|
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const operand = try sema.resolveInst(inst_data.operand);
|
|
const operand_ty = sema.typeOf(operand);
|
|
|
|
const enum_tag: Air.Inst.Ref = switch (operand_ty.zigTypeTag(mod)) {
|
|
.Enum => operand,
|
|
.Union => blk: {
|
|
try sema.resolveTypeFields(operand_ty);
|
|
const tag_ty = operand_ty.unionTagType(mod) orelse {
|
|
return sema.fail(
|
|
block,
|
|
operand_src,
|
|
"untagged union '{}' cannot be converted to integer",
|
|
.{src},
|
|
);
|
|
};
|
|
|
|
break :blk try sema.unionToTag(block, tag_ty, operand, operand_src);
|
|
},
|
|
else => {
|
|
return sema.fail(block, operand_src, "expected enum or tagged union, found '{}'", .{
|
|
operand_ty.fmt(mod),
|
|
});
|
|
},
|
|
};
|
|
const enum_tag_ty = sema.typeOf(enum_tag);
|
|
const int_tag_ty = enum_tag_ty.intTagType(mod);
|
|
|
|
// TODO: use correct solution
|
|
// https://github.com/ziglang/zig/issues/15909
|
|
if (enum_tag_ty.enumFieldCount(mod) == 0 and !enum_tag_ty.isNonexhaustiveEnum(mod)) {
|
|
return sema.fail(block, operand_src, "cannot use @intFromEnum on empty enum '{}'", .{
|
|
enum_tag_ty.fmt(mod),
|
|
});
|
|
}
|
|
|
|
if (try sema.typeHasOnePossibleValue(enum_tag_ty)) |opv| {
|
|
return Air.internedToRef((try mod.getCoerced(opv, int_tag_ty)).toIntern());
|
|
}
|
|
|
|
if (try sema.resolveValue(enum_tag)) |enum_tag_val| {
|
|
if (enum_tag_val.isUndef(mod)) {
|
|
return mod.undefRef(int_tag_ty);
|
|
}
|
|
|
|
const val = try enum_tag_val.intFromEnum(enum_tag_ty, mod);
|
|
return Air.internedToRef(val.toIntern());
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, src, operand_src);
|
|
return block.addBitCast(int_tag_ty, enum_tag);
|
|
}
|
|
|
|
fn zirEnumFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const src = inst_data.src();
|
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const dest_ty = try sema.resolveDestType(block, src, extra.lhs, .remove_eu_opt, "@enumFromInt");
|
|
const operand = try sema.resolveInst(extra.rhs);
|
|
|
|
if (dest_ty.zigTypeTag(mod) != .Enum) {
|
|
return sema.fail(block, src, "expected enum, found '{}'", .{dest_ty.fmt(mod)});
|
|
}
|
|
_ = try sema.checkIntType(block, operand_src, sema.typeOf(operand));
|
|
|
|
if (try sema.resolveValue(operand)) |int_val| {
|
|
if (dest_ty.isNonexhaustiveEnum(mod)) {
|
|
const int_tag_ty = dest_ty.intTagType(mod);
|
|
if (try sema.intFitsInType(int_val, int_tag_ty, null)) {
|
|
return Air.internedToRef((try mod.getCoerced(int_val, dest_ty)).toIntern());
|
|
}
|
|
return sema.fail(block, src, "int value '{}' out of range of non-exhaustive enum '{}'", .{
|
|
int_val.fmtValue(sema.typeOf(operand), mod), dest_ty.fmt(mod),
|
|
});
|
|
}
|
|
if (int_val.isUndef(mod)) {
|
|
return sema.failWithUseOfUndef(block, operand_src);
|
|
}
|
|
if (!(try sema.enumHasInt(dest_ty, int_val))) {
|
|
return sema.fail(block, src, "enum '{}' has no tag with value '{}'", .{
|
|
dest_ty.fmt(mod), int_val.fmtValue(sema.typeOf(operand), mod),
|
|
});
|
|
}
|
|
return Air.internedToRef((try mod.getCoerced(int_val, dest_ty)).toIntern());
|
|
}
|
|
|
|
if (dest_ty.intTagType(mod).zigTypeTag(mod) == .ComptimeInt) {
|
|
return sema.failWithNeededComptime(block, operand_src, .{
|
|
.needed_comptime_reason = "value being casted to enum with 'comptime_int' tag type must be comptime-known",
|
|
});
|
|
}
|
|
|
|
if (try sema.typeHasOnePossibleValue(dest_ty)) |opv| {
|
|
const result = Air.internedToRef(opv.toIntern());
|
|
// The operand is runtime-known but the result is comptime-known. In
|
|
// this case we still need a safety check.
|
|
// TODO add a safety check here. we can't use is_named_enum_value -
|
|
// it needs to convert the enum back to int and make sure it equals the operand int.
|
|
return result;
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, src, operand_src);
|
|
const result = try block.addTyOp(.intcast, dest_ty, operand);
|
|
if (block.wantSafety() and !dest_ty.isNonexhaustiveEnum(mod) and
|
|
mod.backendSupportsFeature(.is_named_enum_value))
|
|
{
|
|
const ok = try block.addUnOp(.is_named_enum_value, result);
|
|
try sema.addSafetyCheck(block, src, ok, .invalid_enum_value);
|
|
}
|
|
return result;
|
|
}
|
|
|
|
/// Pointer in, pointer out.
|
|
fn zirOptionalPayloadPtr(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
safety_check: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const optional_ptr = try sema.resolveInst(inst_data.operand);
|
|
const src = inst_data.src();
|
|
|
|
return sema.analyzeOptionalPayloadPtr(block, src, optional_ptr, safety_check, false);
|
|
}
|
|
|
|
fn analyzeOptionalPayloadPtr(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
optional_ptr: Air.Inst.Ref,
|
|
safety_check: bool,
|
|
initializing: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const optional_ptr_ty = sema.typeOf(optional_ptr);
|
|
assert(optional_ptr_ty.zigTypeTag(mod) == .Pointer);
|
|
|
|
const opt_type = optional_ptr_ty.childType(mod);
|
|
if (opt_type.zigTypeTag(mod) != .Optional) {
|
|
return sema.fail(block, src, "expected optional type, found '{}'", .{opt_type.fmt(mod)});
|
|
}
|
|
|
|
const child_type = opt_type.optionalChild(mod);
|
|
const child_pointer = try sema.ptrType(.{
|
|
.child = child_type.toIntern(),
|
|
.flags = .{
|
|
.is_const = optional_ptr_ty.isConstPtr(mod),
|
|
.address_space = optional_ptr_ty.ptrAddressSpace(mod),
|
|
},
|
|
});
|
|
|
|
if (try sema.resolveDefinedValue(block, src, optional_ptr)) |ptr_val| {
|
|
if (initializing) {
|
|
if (!ptr_val.isComptimeMutablePtr(mod)) {
|
|
// If the pointer resulting from this function was stored at comptime,
|
|
// the optional non-null bit would be set that way. But in this case,
|
|
// we need to emit a runtime instruction to do it.
|
|
const opt_payload_ptr = try block.addTyOp(.optional_payload_ptr_set, child_pointer, optional_ptr);
|
|
try sema.checkKnownAllocPtr(optional_ptr, opt_payload_ptr);
|
|
}
|
|
return Air.internedToRef((try mod.intern(.{ .ptr = .{
|
|
.ty = child_pointer.toIntern(),
|
|
.addr = .{ .opt_payload = ptr_val.toIntern() },
|
|
} })));
|
|
}
|
|
if (try sema.pointerDeref(block, src, ptr_val, optional_ptr_ty)) |val| {
|
|
if (val.isNull(mod)) {
|
|
return sema.fail(block, src, "unable to unwrap null", .{});
|
|
}
|
|
// The same Value represents the pointer to the optional and the payload.
|
|
return Air.internedToRef((try mod.intern(.{ .ptr = .{
|
|
.ty = child_pointer.toIntern(),
|
|
.addr = .{ .opt_payload = ptr_val.toIntern() },
|
|
} })));
|
|
}
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
if (safety_check and block.wantSafety()) {
|
|
const is_non_null = try block.addUnOp(.is_non_null_ptr, optional_ptr);
|
|
try sema.addSafetyCheck(block, src, is_non_null, .unwrap_null);
|
|
}
|
|
|
|
if (initializing) {
|
|
const opt_payload_ptr = try block.addTyOp(.optional_payload_ptr_set, child_pointer, optional_ptr);
|
|
try sema.checkKnownAllocPtr(optional_ptr, opt_payload_ptr);
|
|
return opt_payload_ptr;
|
|
} else {
|
|
return block.addTyOp(.optional_payload_ptr, child_pointer, optional_ptr);
|
|
}
|
|
}
|
|
|
|
/// Value in, value out.
|
|
fn zirOptionalPayload(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
safety_check: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const src = inst_data.src();
|
|
const operand = try sema.resolveInst(inst_data.operand);
|
|
const operand_ty = sema.typeOf(operand);
|
|
const result_ty = switch (operand_ty.zigTypeTag(mod)) {
|
|
.Optional => operand_ty.optionalChild(mod),
|
|
.Pointer => t: {
|
|
if (operand_ty.ptrSize(mod) != .C) {
|
|
return sema.failWithExpectedOptionalType(block, src, operand_ty);
|
|
}
|
|
// TODO https://github.com/ziglang/zig/issues/6597
|
|
if (true) break :t operand_ty;
|
|
const ptr_info = operand_ty.ptrInfo(mod);
|
|
break :t try sema.ptrType(.{
|
|
.child = ptr_info.child,
|
|
.flags = .{
|
|
.alignment = ptr_info.flags.alignment,
|
|
.is_const = ptr_info.flags.is_const,
|
|
.is_volatile = ptr_info.flags.is_volatile,
|
|
.is_allowzero = ptr_info.flags.is_allowzero,
|
|
.address_space = ptr_info.flags.address_space,
|
|
},
|
|
});
|
|
},
|
|
else => return sema.failWithExpectedOptionalType(block, src, operand_ty),
|
|
};
|
|
|
|
if (try sema.resolveDefinedValue(block, src, operand)) |val| {
|
|
return if (val.optionalValue(mod)) |payload|
|
|
Air.internedToRef(payload.toIntern())
|
|
else
|
|
sema.fail(block, src, "unable to unwrap null", .{});
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
if (safety_check and block.wantSafety()) {
|
|
const is_non_null = try block.addUnOp(.is_non_null, operand);
|
|
try sema.addSafetyCheck(block, src, is_non_null, .unwrap_null);
|
|
}
|
|
return block.addTyOp(.optional_payload, result_ty, operand);
|
|
}
|
|
|
|
/// Value in, value out
|
|
fn zirErrUnionPayload(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const src = inst_data.src();
|
|
const operand = try sema.resolveInst(inst_data.operand);
|
|
const operand_src = src;
|
|
const err_union_ty = sema.typeOf(operand);
|
|
if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) {
|
|
return sema.fail(block, operand_src, "expected error union type, found '{}'", .{
|
|
err_union_ty.fmt(mod),
|
|
});
|
|
}
|
|
return sema.analyzeErrUnionPayload(block, src, err_union_ty, operand, operand_src, false);
|
|
}
|
|
|
|
fn analyzeErrUnionPayload(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
err_union_ty: Type,
|
|
operand: Air.Inst.Ref,
|
|
operand_src: LazySrcLoc,
|
|
safety_check: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const payload_ty = err_union_ty.errorUnionPayload(mod);
|
|
if (try sema.resolveDefinedValue(block, operand_src, operand)) |val| {
|
|
if (val.getErrorName(mod).unwrap()) |name| {
|
|
return sema.failWithComptimeErrorRetTrace(block, src, name);
|
|
}
|
|
return Air.internedToRef(mod.intern_pool.indexToKey(val.toIntern()).error_union.val.payload);
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
|
|
// If the error set has no fields then no safety check is needed.
|
|
if (safety_check and block.wantSafety() and
|
|
!err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod))
|
|
{
|
|
try sema.panicUnwrapError(block, src, operand, .unwrap_errunion_err, .is_non_err);
|
|
}
|
|
|
|
return block.addTyOp(.unwrap_errunion_payload, payload_ty, operand);
|
|
}
|
|
|
|
/// Pointer in, pointer out.
|
|
fn zirErrUnionPayloadPtr(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const operand = try sema.resolveInst(inst_data.operand);
|
|
const src = inst_data.src();
|
|
|
|
return sema.analyzeErrUnionPayloadPtr(block, src, operand, false, false);
|
|
}
|
|
|
|
fn analyzeErrUnionPayloadPtr(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
operand: Air.Inst.Ref,
|
|
safety_check: bool,
|
|
initializing: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const operand_ty = sema.typeOf(operand);
|
|
assert(operand_ty.zigTypeTag(mod) == .Pointer);
|
|
|
|
if (operand_ty.childType(mod).zigTypeTag(mod) != .ErrorUnion) {
|
|
return sema.fail(block, src, "expected error union type, found '{}'", .{
|
|
operand_ty.childType(mod).fmt(mod),
|
|
});
|
|
}
|
|
|
|
const err_union_ty = operand_ty.childType(mod);
|
|
const payload_ty = err_union_ty.errorUnionPayload(mod);
|
|
const operand_pointer_ty = try sema.ptrType(.{
|
|
.child = payload_ty.toIntern(),
|
|
.flags = .{
|
|
.is_const = operand_ty.isConstPtr(mod),
|
|
.address_space = operand_ty.ptrAddressSpace(mod),
|
|
},
|
|
});
|
|
|
|
if (try sema.resolveDefinedValue(block, src, operand)) |ptr_val| {
|
|
if (initializing) {
|
|
if (!ptr_val.isComptimeMutablePtr(mod)) {
|
|
// If the pointer resulting from this function was stored at comptime,
|
|
// the error union error code would be set that way. But in this case,
|
|
// we need to emit a runtime instruction to do it.
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
const eu_payload_ptr = try block.addTyOp(.errunion_payload_ptr_set, operand_pointer_ty, operand);
|
|
try sema.checkKnownAllocPtr(operand, eu_payload_ptr);
|
|
}
|
|
return Air.internedToRef((try mod.intern(.{ .ptr = .{
|
|
.ty = operand_pointer_ty.toIntern(),
|
|
.addr = .{ .eu_payload = ptr_val.toIntern() },
|
|
} })));
|
|
}
|
|
if (try sema.pointerDeref(block, src, ptr_val, operand_ty)) |val| {
|
|
if (val.getErrorName(mod).unwrap()) |name| {
|
|
return sema.failWithComptimeErrorRetTrace(block, src, name);
|
|
}
|
|
return Air.internedToRef((try mod.intern(.{ .ptr = .{
|
|
.ty = operand_pointer_ty.toIntern(),
|
|
.addr = .{ .eu_payload = ptr_val.toIntern() },
|
|
} })));
|
|
}
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
|
|
// If the error set has no fields then no safety check is needed.
|
|
if (safety_check and block.wantSafety() and
|
|
!err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod))
|
|
{
|
|
try sema.panicUnwrapError(block, src, operand, .unwrap_errunion_err_ptr, .is_non_err_ptr);
|
|
}
|
|
|
|
if (initializing) {
|
|
const eu_payload_ptr = try block.addTyOp(.errunion_payload_ptr_set, operand_pointer_ty, operand);
|
|
try sema.checkKnownAllocPtr(operand, eu_payload_ptr);
|
|
return eu_payload_ptr;
|
|
} else {
|
|
return block.addTyOp(.unwrap_errunion_payload_ptr, operand_pointer_ty, operand);
|
|
}
|
|
}
|
|
|
|
/// Value in, value out
|
|
fn zirErrUnionCode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const src = inst_data.src();
|
|
const operand = try sema.resolveInst(inst_data.operand);
|
|
return sema.analyzeErrUnionCode(block, src, operand);
|
|
}
|
|
|
|
fn analyzeErrUnionCode(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Inst.Ref) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const operand_ty = sema.typeOf(operand);
|
|
if (operand_ty.zigTypeTag(mod) != .ErrorUnion) {
|
|
return sema.fail(block, src, "expected error union type, found '{}'", .{
|
|
operand_ty.fmt(mod),
|
|
});
|
|
}
|
|
|
|
const result_ty = operand_ty.errorUnionSet(mod);
|
|
|
|
if (try sema.resolveDefinedValue(block, src, operand)) |val| {
|
|
return Air.internedToRef((try mod.intern(.{ .err = .{
|
|
.ty = result_ty.toIntern(),
|
|
.name = mod.intern_pool.indexToKey(val.toIntern()).error_union.val.err_name,
|
|
} })));
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
return block.addTyOp(.unwrap_errunion_err, result_ty, operand);
|
|
}
|
|
|
|
/// Pointer in, value out
|
|
fn zirErrUnionCodePtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const src = inst_data.src();
|
|
const operand = try sema.resolveInst(inst_data.operand);
|
|
return sema.analyzeErrUnionCodePtr(block, src, operand);
|
|
}
|
|
|
|
fn analyzeErrUnionCodePtr(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Inst.Ref) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const operand_ty = sema.typeOf(operand);
|
|
assert(operand_ty.zigTypeTag(mod) == .Pointer);
|
|
|
|
if (operand_ty.childType(mod).zigTypeTag(mod) != .ErrorUnion) {
|
|
return sema.fail(block, src, "expected error union type, found '{}'", .{
|
|
operand_ty.childType(mod).fmt(mod),
|
|
});
|
|
}
|
|
|
|
const result_ty = operand_ty.childType(mod).errorUnionSet(mod);
|
|
|
|
if (try sema.resolveDefinedValue(block, src, operand)) |pointer_val| {
|
|
if (try sema.pointerDeref(block, src, pointer_val, operand_ty)) |val| {
|
|
assert(val.getErrorName(mod) != .none);
|
|
return Air.internedToRef((try mod.intern(.{ .err = .{
|
|
.ty = result_ty.toIntern(),
|
|
.name = mod.intern_pool.indexToKey(val.toIntern()).error_union.val.err_name,
|
|
} })));
|
|
}
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
return block.addTyOp(.unwrap_errunion_err_ptr, result_ty, operand);
|
|
}
|
|
|
|
fn zirFunc(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
inferred_error_set: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.Func, inst_data.payload_index);
|
|
const target = sema.mod.getTarget();
|
|
const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = inst_data.src_node };
|
|
|
|
var extra_index = extra.end;
|
|
|
|
const ret_ty: Type = switch (extra.data.ret_body_len) {
|
|
0 => Type.void,
|
|
1 => blk: {
|
|
const ret_ty_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]);
|
|
extra_index += 1;
|
|
if (sema.resolveType(block, ret_ty_src, ret_ty_ref)) |ret_ty| {
|
|
break :blk ret_ty;
|
|
} else |err| switch (err) {
|
|
error.GenericPoison => {
|
|
break :blk Type.generic_poison;
|
|
},
|
|
else => |e| return e,
|
|
}
|
|
},
|
|
else => blk: {
|
|
const ret_ty_body = sema.code.bodySlice(extra_index, extra.data.ret_body_len);
|
|
extra_index += ret_ty_body.len;
|
|
|
|
const ret_ty_val = try sema.resolveGenericBody(block, ret_ty_src, ret_ty_body, inst, Type.type, .{
|
|
.needed_comptime_reason = "return type must be comptime-known",
|
|
});
|
|
break :blk ret_ty_val.toType();
|
|
},
|
|
};
|
|
|
|
var src_locs: Zir.Inst.Func.SrcLocs = undefined;
|
|
const has_body = extra.data.body_len != 0;
|
|
if (has_body) {
|
|
extra_index += extra.data.body_len;
|
|
src_locs = sema.code.extraData(Zir.Inst.Func.SrcLocs, extra_index).data;
|
|
}
|
|
|
|
// If this instruction has a body it means it's the type of the `owner_decl`
|
|
// otherwise it's a function type without a `callconv` attribute and should
|
|
// never be `.C`.
|
|
const cc: std.builtin.CallingConvention = if (has_body and mod.declPtr(block.src_decl).is_exported)
|
|
.C
|
|
else
|
|
.Unspecified;
|
|
|
|
return sema.funcCommon(
|
|
block,
|
|
inst_data.src_node,
|
|
inst,
|
|
.none,
|
|
target_util.defaultAddressSpace(target, .function),
|
|
.default,
|
|
cc,
|
|
ret_ty,
|
|
false,
|
|
inferred_error_set,
|
|
false,
|
|
has_body,
|
|
src_locs,
|
|
null,
|
|
0,
|
|
false,
|
|
);
|
|
}
|
|
|
|
fn resolveGenericBody(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
body: []const Zir.Inst.Index,
|
|
func_inst: Zir.Inst.Index,
|
|
dest_ty: Type,
|
|
reason: NeededComptimeReason,
|
|
) !Value {
|
|
assert(body.len != 0);
|
|
|
|
const err = err: {
|
|
// Make sure any nested param instructions don't clobber our work.
|
|
const prev_params = block.params;
|
|
const prev_no_partial_func_type = sema.no_partial_func_ty;
|
|
const prev_generic_owner = sema.generic_owner;
|
|
const prev_generic_call_src = sema.generic_call_src;
|
|
const prev_generic_call_decl = sema.generic_call_decl;
|
|
block.params = .{};
|
|
sema.no_partial_func_ty = true;
|
|
sema.generic_owner = .none;
|
|
sema.generic_call_src = .unneeded;
|
|
sema.generic_call_decl = .none;
|
|
defer {
|
|
block.params = prev_params;
|
|
sema.no_partial_func_ty = prev_no_partial_func_type;
|
|
sema.generic_owner = prev_generic_owner;
|
|
sema.generic_call_src = prev_generic_call_src;
|
|
sema.generic_call_decl = prev_generic_call_decl;
|
|
}
|
|
|
|
const uncasted = sema.resolveBody(block, body, func_inst) catch |err| break :err err;
|
|
const result = sema.coerce(block, dest_ty, uncasted, src) catch |err| break :err err;
|
|
const val = sema.resolveConstDefinedValue(block, src, result, reason) catch |err| break :err err;
|
|
return val;
|
|
};
|
|
switch (err) {
|
|
error.GenericPoison => {
|
|
if (dest_ty.toIntern() == .type_type) {
|
|
return Value.generic_poison_type;
|
|
} else {
|
|
return Value.generic_poison;
|
|
}
|
|
},
|
|
else => |e| return e,
|
|
}
|
|
}
|
|
|
|
/// Given a library name, examines if the library name should end up in
|
|
/// `link.File.Options.system_libs` table (for example, libc is always
|
|
/// specified via dedicated flag `link_libc` instead),
|
|
/// and puts it there if it doesn't exist.
|
|
/// It also dupes the library name which can then be saved as part of the
|
|
/// respective `Decl` (either `ExternFn` or `Var`).
|
|
/// The liveness of the duped library name is tied to liveness of `Module`.
|
|
/// To deallocate, call `deinit` on the respective `Decl` (`ExternFn` or `Var`).
|
|
fn handleExternLibName(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src_loc: LazySrcLoc,
|
|
lib_name: []const u8,
|
|
) CompileError!void {
|
|
blk: {
|
|
const mod = sema.mod;
|
|
const comp = mod.comp;
|
|
const target = mod.getTarget();
|
|
log.debug("extern fn symbol expected in lib '{s}'", .{lib_name});
|
|
if (target.is_libc_lib_name(lib_name)) {
|
|
if (!comp.config.link_libc) {
|
|
return sema.fail(
|
|
block,
|
|
src_loc,
|
|
"dependency on libc must be explicitly specified in the build command",
|
|
.{},
|
|
);
|
|
}
|
|
break :blk;
|
|
}
|
|
if (target.is_libcpp_lib_name(lib_name)) {
|
|
if (!comp.config.link_libcpp) return sema.fail(
|
|
block,
|
|
src_loc,
|
|
"dependency on libc++ must be explicitly specified in the build command",
|
|
.{},
|
|
);
|
|
break :blk;
|
|
}
|
|
if (mem.eql(u8, lib_name, "unwind")) {
|
|
if (!comp.config.link_libunwind) return sema.fail(
|
|
block,
|
|
src_loc,
|
|
"dependency on libunwind must be explicitly specified in the build command",
|
|
.{},
|
|
);
|
|
break :blk;
|
|
}
|
|
if (!target.isWasm() and !block.ownerModule().pic) {
|
|
return sema.fail(
|
|
block,
|
|
src_loc,
|
|
"dependency on dynamic library '{s}' requires enabling Position Independent Code. Fixed by '-l{s}' or '-fPIC'.",
|
|
.{ lib_name, lib_name },
|
|
);
|
|
}
|
|
comp.addLinkLib(lib_name) catch |err| {
|
|
return sema.fail(block, src_loc, "unable to add link lib '{s}': {s}", .{
|
|
lib_name, @errorName(err),
|
|
});
|
|
};
|
|
}
|
|
}
|
|
|
|
/// These are calling conventions that are confirmed to work with variadic functions.
|
|
/// Any calling conventions not included here are either not yet verified to work with variadic
|
|
/// functions or there are no more other calling conventions that support variadic functions.
|
|
const calling_conventions_supporting_var_args = [_]std.builtin.CallingConvention{
|
|
.C,
|
|
};
|
|
fn callConvSupportsVarArgs(cc: std.builtin.CallingConvention) bool {
|
|
return for (calling_conventions_supporting_var_args) |supported_cc| {
|
|
if (cc == supported_cc) return true;
|
|
} else false;
|
|
}
|
|
fn checkCallConvSupportsVarArgs(sema: *Sema, block: *Block, src: LazySrcLoc, cc: std.builtin.CallingConvention) CompileError!void {
|
|
const CallingConventionsSupportingVarArgsList = struct {
|
|
pub fn format(_: @This(), comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
|
|
_ = fmt;
|
|
_ = options;
|
|
for (calling_conventions_supporting_var_args, 0..) |cc_inner, i| {
|
|
if (i != 0)
|
|
try writer.writeAll(", ");
|
|
try writer.print("'.{s}'", .{@tagName(cc_inner)});
|
|
}
|
|
}
|
|
};
|
|
|
|
if (!callConvSupportsVarArgs(cc)) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "variadic function does not support '.{s}' calling convention", .{@tagName(cc)});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, src, msg, "supported calling conventions: {}", .{CallingConventionsSupportingVarArgsList{}});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
}
|
|
|
|
const Section = union(enum) {
|
|
generic,
|
|
default,
|
|
explicit: InternPool.NullTerminatedString,
|
|
};
|
|
|
|
fn funcCommon(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src_node_offset: i32,
|
|
func_inst: Zir.Inst.Index,
|
|
/// null means generic poison
|
|
alignment: ?Alignment,
|
|
/// null means generic poison
|
|
address_space: ?std.builtin.AddressSpace,
|
|
section: Section,
|
|
/// null means generic poison
|
|
cc: ?std.builtin.CallingConvention,
|
|
/// this might be Type.generic_poison
|
|
bare_return_type: Type,
|
|
var_args: bool,
|
|
inferred_error_set: bool,
|
|
is_extern: bool,
|
|
has_body: bool,
|
|
src_locs: Zir.Inst.Func.SrcLocs,
|
|
opt_lib_name: ?[]const u8,
|
|
noalias_bits: u32,
|
|
is_noinline: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
const target = mod.getTarget();
|
|
const ip = &mod.intern_pool;
|
|
const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = src_node_offset };
|
|
const cc_src: LazySrcLoc = .{ .node_offset_fn_type_cc = src_node_offset };
|
|
const func_src = LazySrcLoc.nodeOffset(src_node_offset);
|
|
|
|
var is_generic = bare_return_type.isGenericPoison() or
|
|
alignment == null or
|
|
address_space == null or
|
|
section == .generic or
|
|
cc == null;
|
|
|
|
if (var_args) {
|
|
if (is_generic) {
|
|
return sema.fail(block, func_src, "generic function cannot be variadic", .{});
|
|
}
|
|
try sema.checkCallConvSupportsVarArgs(block, cc_src, cc.?);
|
|
}
|
|
|
|
const is_source_decl = sema.generic_owner == .none;
|
|
|
|
// In the case of generic calling convention, or generic alignment, we use
|
|
// default values which are only meaningful for the generic function, *not*
|
|
// the instantiation, which can depend on comptime parameters.
|
|
// Related proposal: https://github.com/ziglang/zig/issues/11834
|
|
const cc_resolved = cc orelse .Unspecified;
|
|
var comptime_bits: u32 = 0;
|
|
for (block.params.items(.ty), block.params.items(.is_comptime), 0..) |param_ty_ip, param_is_comptime, i| {
|
|
const param_ty = Type.fromInterned(param_ty_ip);
|
|
const is_noalias = blk: {
|
|
const index = std.math.cast(u5, i) orelse break :blk false;
|
|
break :blk @as(u1, @truncate(noalias_bits >> index)) != 0;
|
|
};
|
|
const param_src: LazySrcLoc = .{ .fn_proto_param = .{
|
|
.decl = block.src_decl,
|
|
.fn_proto_node_offset = src_node_offset,
|
|
.param_index = @intCast(i),
|
|
} };
|
|
const requires_comptime = try sema.typeRequiresComptime(param_ty);
|
|
if (param_is_comptime or requires_comptime) {
|
|
comptime_bits |= @as(u32, 1) << @intCast(i); // TODO: handle cast error
|
|
}
|
|
const this_generic = param_ty.isGenericPoison();
|
|
is_generic = is_generic or this_generic;
|
|
if (param_is_comptime and !target_util.fnCallConvAllowsZigTypes(target, cc_resolved)) {
|
|
return sema.fail(block, param_src, "comptime parameters not allowed in function with calling convention '{s}'", .{@tagName(cc_resolved)});
|
|
}
|
|
if (this_generic and !sema.no_partial_func_ty and !target_util.fnCallConvAllowsZigTypes(target, cc_resolved)) {
|
|
return sema.fail(block, param_src, "generic parameters not allowed in function with calling convention '{s}'", .{@tagName(cc_resolved)});
|
|
}
|
|
if (!param_ty.isValidParamType(mod)) {
|
|
const opaque_str = if (param_ty.zigTypeTag(mod) == .Opaque) "opaque " else "";
|
|
return sema.fail(block, param_src, "parameter of {s}type '{}' not allowed", .{
|
|
opaque_str, param_ty.fmt(mod),
|
|
});
|
|
}
|
|
if (!this_generic and !target_util.fnCallConvAllowsZigTypes(target, cc_resolved) and !try sema.validateExternType(param_ty, .param_ty)) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, param_src, "parameter of type '{}' not allowed in function with calling convention '{s}'", .{
|
|
param_ty.fmt(mod), @tagName(cc_resolved),
|
|
});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
const src_decl = mod.declPtr(block.src_decl);
|
|
try sema.explainWhyTypeIsNotExtern(msg, src_decl.toSrcLoc(param_src, mod), param_ty, .param_ty);
|
|
|
|
try sema.addDeclaredHereNote(msg, param_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
if (is_source_decl and requires_comptime and !param_is_comptime and has_body and !block.is_comptime) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, param_src, "parameter of type '{}' must be declared comptime", .{
|
|
param_ty.fmt(mod),
|
|
});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
const src_decl = mod.declPtr(block.src_decl);
|
|
try sema.explainWhyTypeIsComptime(msg, src_decl.toSrcLoc(param_src, mod), param_ty);
|
|
|
|
try sema.addDeclaredHereNote(msg, param_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
if (is_source_decl and !this_generic and is_noalias and
|
|
!(param_ty.zigTypeTag(mod) == .Pointer or param_ty.isPtrLikeOptional(mod)))
|
|
{
|
|
return sema.fail(block, param_src, "non-pointer parameter declared noalias", .{});
|
|
}
|
|
}
|
|
|
|
var ret_ty_requires_comptime = false;
|
|
const ret_poison = if (sema.typeRequiresComptime(bare_return_type)) |ret_comptime| rp: {
|
|
ret_ty_requires_comptime = ret_comptime;
|
|
break :rp bare_return_type.isGenericPoison();
|
|
} else |err| switch (err) {
|
|
error.GenericPoison => rp: {
|
|
is_generic = true;
|
|
break :rp true;
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
const final_is_generic = is_generic or comptime_bits != 0 or ret_ty_requires_comptime;
|
|
|
|
const param_types = block.params.items(.ty);
|
|
|
|
if (!is_source_decl) {
|
|
assert(has_body);
|
|
assert(!is_generic);
|
|
assert(comptime_bits == 0);
|
|
assert(cc != null);
|
|
assert(section != .generic);
|
|
assert(address_space != null);
|
|
assert(!var_args);
|
|
if (inferred_error_set) {
|
|
try sema.validateErrorUnionPayloadType(block, bare_return_type, ret_ty_src);
|
|
}
|
|
const func_index = try ip.getFuncInstance(gpa, .{
|
|
.param_types = param_types,
|
|
.noalias_bits = noalias_bits,
|
|
.bare_return_type = bare_return_type.toIntern(),
|
|
.cc = cc_resolved,
|
|
.alignment = alignment.?,
|
|
.section = switch (section) {
|
|
.generic => unreachable,
|
|
.default => .none,
|
|
.explicit => |x| x.toOptional(),
|
|
},
|
|
.is_noinline = is_noinline,
|
|
.inferred_error_set = inferred_error_set,
|
|
.generic_owner = sema.generic_owner,
|
|
.comptime_args = sema.comptime_args,
|
|
});
|
|
return finishFunc(
|
|
sema,
|
|
block,
|
|
func_index,
|
|
.none,
|
|
ret_poison,
|
|
bare_return_type,
|
|
ret_ty_src,
|
|
cc_resolved,
|
|
is_source_decl,
|
|
ret_ty_requires_comptime,
|
|
func_inst,
|
|
cc_src,
|
|
is_noinline,
|
|
is_generic,
|
|
final_is_generic,
|
|
);
|
|
}
|
|
|
|
// extern_func and func_decl functions take ownership of `sema.owner_decl`.
|
|
sema.owner_decl.@"linksection" = switch (section) {
|
|
.generic => .none,
|
|
.default => .none,
|
|
.explicit => |section_name| section_name.toOptional(),
|
|
};
|
|
sema.owner_decl.alignment = alignment orelse .none;
|
|
sema.owner_decl.@"addrspace" = address_space orelse .generic;
|
|
|
|
if (inferred_error_set) {
|
|
assert(!is_extern);
|
|
assert(has_body);
|
|
if (!ret_poison)
|
|
try sema.validateErrorUnionPayloadType(block, bare_return_type, ret_ty_src);
|
|
const func_index = try ip.getFuncDeclIes(gpa, .{
|
|
.owner_decl = sema.owner_decl_index,
|
|
|
|
.param_types = param_types,
|
|
.noalias_bits = noalias_bits,
|
|
.comptime_bits = comptime_bits,
|
|
.bare_return_type = bare_return_type.toIntern(),
|
|
.cc = cc,
|
|
.alignment = alignment,
|
|
.section_is_generic = section == .generic,
|
|
.addrspace_is_generic = address_space == null,
|
|
.is_var_args = var_args,
|
|
.is_generic = final_is_generic,
|
|
.is_noinline = is_noinline,
|
|
|
|
.zir_body_inst = try ip.trackZir(gpa, block.getFileScope(mod), func_inst),
|
|
.lbrace_line = src_locs.lbrace_line,
|
|
.rbrace_line = src_locs.rbrace_line,
|
|
.lbrace_column = @as(u16, @truncate(src_locs.columns)),
|
|
.rbrace_column = @as(u16, @truncate(src_locs.columns >> 16)),
|
|
});
|
|
return finishFunc(
|
|
sema,
|
|
block,
|
|
func_index,
|
|
.none,
|
|
ret_poison,
|
|
bare_return_type,
|
|
ret_ty_src,
|
|
cc_resolved,
|
|
is_source_decl,
|
|
ret_ty_requires_comptime,
|
|
func_inst,
|
|
cc_src,
|
|
is_noinline,
|
|
is_generic,
|
|
final_is_generic,
|
|
);
|
|
}
|
|
|
|
const func_ty = try ip.getFuncType(gpa, .{
|
|
.param_types = param_types,
|
|
.noalias_bits = noalias_bits,
|
|
.comptime_bits = comptime_bits,
|
|
.return_type = bare_return_type.toIntern(),
|
|
.cc = cc,
|
|
.alignment = alignment,
|
|
.section_is_generic = section == .generic,
|
|
.addrspace_is_generic = address_space == null,
|
|
.is_var_args = var_args,
|
|
.is_generic = final_is_generic,
|
|
.is_noinline = is_noinline,
|
|
});
|
|
|
|
if (is_extern) {
|
|
assert(comptime_bits == 0);
|
|
assert(cc != null);
|
|
assert(section != .generic);
|
|
assert(address_space != null);
|
|
assert(!is_generic);
|
|
if (opt_lib_name) |lib_name| try sema.handleExternLibName(block, .{
|
|
.node_offset_lib_name = src_node_offset,
|
|
}, lib_name);
|
|
const func_index = try ip.getExternFunc(gpa, .{
|
|
.ty = func_ty,
|
|
.decl = sema.owner_decl_index,
|
|
.lib_name = try mod.intern_pool.getOrPutStringOpt(gpa, opt_lib_name),
|
|
});
|
|
return finishFunc(
|
|
sema,
|
|
block,
|
|
func_index,
|
|
func_ty,
|
|
ret_poison,
|
|
bare_return_type,
|
|
ret_ty_src,
|
|
cc_resolved,
|
|
is_source_decl,
|
|
ret_ty_requires_comptime,
|
|
func_inst,
|
|
cc_src,
|
|
is_noinline,
|
|
is_generic,
|
|
final_is_generic,
|
|
);
|
|
}
|
|
|
|
if (has_body) {
|
|
const func_index = try ip.getFuncDecl(gpa, .{
|
|
.owner_decl = sema.owner_decl_index,
|
|
.ty = func_ty,
|
|
.cc = cc,
|
|
.is_noinline = is_noinline,
|
|
.zir_body_inst = try ip.trackZir(gpa, block.getFileScope(mod), func_inst),
|
|
.lbrace_line = src_locs.lbrace_line,
|
|
.rbrace_line = src_locs.rbrace_line,
|
|
.lbrace_column = @as(u16, @truncate(src_locs.columns)),
|
|
.rbrace_column = @as(u16, @truncate(src_locs.columns >> 16)),
|
|
});
|
|
return finishFunc(
|
|
sema,
|
|
block,
|
|
func_index,
|
|
func_ty,
|
|
ret_poison,
|
|
bare_return_type,
|
|
ret_ty_src,
|
|
cc_resolved,
|
|
is_source_decl,
|
|
ret_ty_requires_comptime,
|
|
func_inst,
|
|
cc_src,
|
|
is_noinline,
|
|
is_generic,
|
|
final_is_generic,
|
|
);
|
|
}
|
|
|
|
return finishFunc(
|
|
sema,
|
|
block,
|
|
.none,
|
|
func_ty,
|
|
ret_poison,
|
|
bare_return_type,
|
|
ret_ty_src,
|
|
cc_resolved,
|
|
is_source_decl,
|
|
ret_ty_requires_comptime,
|
|
func_inst,
|
|
cc_src,
|
|
is_noinline,
|
|
is_generic,
|
|
final_is_generic,
|
|
);
|
|
}
|
|
|
|
fn finishFunc(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
opt_func_index: InternPool.Index,
|
|
func_ty: InternPool.Index,
|
|
ret_poison: bool,
|
|
bare_return_type: Type,
|
|
ret_ty_src: LazySrcLoc,
|
|
cc_resolved: std.builtin.CallingConvention,
|
|
is_source_decl: bool,
|
|
ret_ty_requires_comptime: bool,
|
|
func_inst: Zir.Inst.Index,
|
|
cc_src: LazySrcLoc,
|
|
is_noinline: bool,
|
|
is_generic: bool,
|
|
final_is_generic: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
const gpa = sema.gpa;
|
|
const target = mod.getTarget();
|
|
|
|
const return_type: Type = if (opt_func_index == .none or ret_poison)
|
|
bare_return_type
|
|
else
|
|
Type.fromInterned(ip.funcTypeReturnType(ip.typeOf(opt_func_index)));
|
|
|
|
if (!return_type.isValidReturnType(mod)) {
|
|
const opaque_str = if (return_type.zigTypeTag(mod) == .Opaque) "opaque " else "";
|
|
return sema.fail(block, ret_ty_src, "{s}return type '{}' not allowed", .{
|
|
opaque_str, return_type.fmt(mod),
|
|
});
|
|
}
|
|
if (!ret_poison and !target_util.fnCallConvAllowsZigTypes(target, cc_resolved) and
|
|
!try sema.validateExternType(return_type, .ret_ty))
|
|
{
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, ret_ty_src, "return type '{}' not allowed in function with calling convention '{s}'", .{
|
|
return_type.fmt(mod), @tagName(cc_resolved),
|
|
});
|
|
errdefer msg.destroy(gpa);
|
|
|
|
const src_decl = mod.declPtr(block.src_decl);
|
|
try sema.explainWhyTypeIsNotExtern(msg, src_decl.toSrcLoc(ret_ty_src, mod), return_type, .ret_ty);
|
|
|
|
try sema.addDeclaredHereNote(msg, return_type);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
|
|
// If the return type is comptime-only but not dependent on parameters then
|
|
// all parameter types also need to be comptime.
|
|
if (is_source_decl and opt_func_index != .none and ret_ty_requires_comptime and !block.is_comptime) comptime_check: {
|
|
for (block.params.items(.is_comptime)) |is_comptime| {
|
|
if (!is_comptime) break;
|
|
} else break :comptime_check;
|
|
|
|
const msg = try sema.errMsg(
|
|
block,
|
|
ret_ty_src,
|
|
"function with comptime-only return type '{}' requires all parameters to be comptime",
|
|
.{return_type.fmt(mod)},
|
|
);
|
|
try sema.explainWhyTypeIsComptime(msg, sema.owner_decl.toSrcLoc(ret_ty_src, mod), return_type);
|
|
|
|
const tags = sema.code.instructions.items(.tag);
|
|
const data = sema.code.instructions.items(.data);
|
|
const param_body = sema.code.getParamBody(func_inst);
|
|
for (
|
|
block.params.items(.is_comptime),
|
|
block.params.items(.name),
|
|
param_body[0..block.params.len],
|
|
) |is_comptime, name_nts, param_index| {
|
|
if (!is_comptime) {
|
|
const param_src = switch (tags[@intFromEnum(param_index)]) {
|
|
.param => data[@intFromEnum(param_index)].pl_tok.src(),
|
|
.param_anytype => data[@intFromEnum(param_index)].str_tok.src(),
|
|
else => unreachable,
|
|
};
|
|
const name = sema.code.nullTerminatedString(name_nts);
|
|
if (name.len != 0) {
|
|
try sema.errNote(block, param_src, msg, "param '{s}' is required to be comptime", .{name});
|
|
} else {
|
|
try sema.errNote(block, param_src, msg, "param is required to be comptime", .{});
|
|
}
|
|
}
|
|
}
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
|
|
const arch = target.cpu.arch;
|
|
if (@as(?[]const u8, switch (cc_resolved) {
|
|
.Unspecified, .C, .Naked, .Async, .Inline => null,
|
|
.Interrupt => switch (arch) {
|
|
.x86, .x86_64, .avr, .msp430 => null,
|
|
else => "x86, x86_64, AVR, and MSP430",
|
|
},
|
|
.Signal => switch (arch) {
|
|
.avr => null,
|
|
else => "AVR",
|
|
},
|
|
.Stdcall, .Fastcall, .Thiscall => switch (arch) {
|
|
.x86 => null,
|
|
else => "x86",
|
|
},
|
|
.Vectorcall => switch (arch) {
|
|
.x86, .aarch64, .aarch64_be, .aarch64_32 => null,
|
|
else => "x86 and AArch64",
|
|
},
|
|
.APCS, .AAPCS, .AAPCSVFP => switch (arch) {
|
|
.arm, .armeb, .aarch64, .aarch64_be, .aarch64_32, .thumb, .thumbeb => null,
|
|
else => "ARM",
|
|
},
|
|
.SysV, .Win64 => switch (arch) {
|
|
.x86_64 => null,
|
|
else => "x86_64",
|
|
},
|
|
.Kernel => switch (arch) {
|
|
.nvptx, .nvptx64, .amdgcn, .spirv32, .spirv64 => null,
|
|
else => "nvptx, amdgcn and SPIR-V",
|
|
},
|
|
.Fragment, .Vertex => switch (arch) {
|
|
.spirv32, .spirv64 => null,
|
|
else => "SPIR-V",
|
|
},
|
|
})) |allowed_platform| {
|
|
return sema.fail(block, cc_src, "callconv '{s}' is only available on {s}, not {s}", .{
|
|
@tagName(cc_resolved),
|
|
allowed_platform,
|
|
@tagName(arch),
|
|
});
|
|
}
|
|
|
|
if (cc_resolved == .Inline and is_noinline) {
|
|
return sema.fail(block, cc_src, "'noinline' function cannot have callconv 'Inline'", .{});
|
|
}
|
|
if (is_generic and sema.no_partial_func_ty) return error.GenericPoison;
|
|
|
|
if (!final_is_generic and sema.wantErrorReturnTracing(return_type)) {
|
|
// Make sure that StackTrace's fields are resolved so that the backend can
|
|
// lower this fn type.
|
|
const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace");
|
|
try sema.resolveTypeFields(unresolved_stack_trace_ty);
|
|
}
|
|
|
|
return Air.internedToRef(if (opt_func_index != .none) opt_func_index else func_ty);
|
|
}
|
|
|
|
fn zirParam(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
comptime_syntax: bool,
|
|
) CompileError!void {
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_tok;
|
|
const src = inst_data.src();
|
|
const extra = sema.code.extraData(Zir.Inst.Param, inst_data.payload_index);
|
|
const param_name: Zir.NullTerminatedString = extra.data.name;
|
|
const body = sema.code.bodySlice(extra.end, extra.data.body_len);
|
|
|
|
const param_ty = param_ty: {
|
|
const err = err: {
|
|
// Make sure any nested param instructions don't clobber our work.
|
|
const prev_params = block.params;
|
|
const prev_no_partial_func_type = sema.no_partial_func_ty;
|
|
const prev_generic_owner = sema.generic_owner;
|
|
const prev_generic_call_src = sema.generic_call_src;
|
|
const prev_generic_call_decl = sema.generic_call_decl;
|
|
block.params = .{};
|
|
sema.no_partial_func_ty = true;
|
|
sema.generic_owner = .none;
|
|
sema.generic_call_src = .unneeded;
|
|
sema.generic_call_decl = .none;
|
|
defer {
|
|
block.params = prev_params;
|
|
sema.no_partial_func_ty = prev_no_partial_func_type;
|
|
sema.generic_owner = prev_generic_owner;
|
|
sema.generic_call_src = prev_generic_call_src;
|
|
sema.generic_call_decl = prev_generic_call_decl;
|
|
}
|
|
|
|
if (sema.resolveBody(block, body, inst)) |param_ty_inst| {
|
|
if (sema.analyzeAsType(block, src, param_ty_inst)) |param_ty| {
|
|
break :param_ty param_ty;
|
|
} else |err| break :err err;
|
|
} else |err| break :err err;
|
|
};
|
|
switch (err) {
|
|
error.GenericPoison => {
|
|
// The type is not available until the generic instantiation.
|
|
// We result the param instruction with a poison value and
|
|
// insert an anytype parameter.
|
|
try block.params.append(sema.arena, .{
|
|
.ty = .generic_poison_type,
|
|
.is_comptime = comptime_syntax,
|
|
.name = param_name,
|
|
});
|
|
sema.inst_map.putAssumeCapacityNoClobber(inst, .generic_poison);
|
|
return;
|
|
},
|
|
else => |e| return e,
|
|
}
|
|
};
|
|
|
|
const is_comptime = sema.typeRequiresComptime(param_ty) catch |err| switch (err) {
|
|
error.GenericPoison => {
|
|
// The type is not available until the generic instantiation.
|
|
// We result the param instruction with a poison value and
|
|
// insert an anytype parameter.
|
|
try block.params.append(sema.arena, .{
|
|
.ty = .generic_poison_type,
|
|
.is_comptime = comptime_syntax,
|
|
.name = param_name,
|
|
});
|
|
sema.inst_map.putAssumeCapacityNoClobber(inst, .generic_poison);
|
|
return;
|
|
},
|
|
else => |e| return e,
|
|
} or comptime_syntax;
|
|
|
|
try block.params.append(sema.arena, .{
|
|
.ty = param_ty.toIntern(),
|
|
.is_comptime = comptime_syntax,
|
|
.name = param_name,
|
|
});
|
|
|
|
if (is_comptime) {
|
|
// If this is a comptime parameter we can add a constant generic_poison
|
|
// since this is also a generic parameter.
|
|
sema.inst_map.putAssumeCapacityNoClobber(inst, .generic_poison);
|
|
} else {
|
|
// Otherwise we need a dummy runtime instruction.
|
|
const result_index: Air.Inst.Index = @enumFromInt(sema.air_instructions.len);
|
|
try sema.air_instructions.append(sema.gpa, .{
|
|
.tag = .alloc,
|
|
.data = .{ .ty = param_ty },
|
|
});
|
|
sema.inst_map.putAssumeCapacityNoClobber(inst, result_index.toRef());
|
|
}
|
|
}
|
|
|
|
fn zirParamAnytype(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
comptime_syntax: bool,
|
|
) CompileError!void {
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].str_tok;
|
|
const param_name: Zir.NullTerminatedString = inst_data.start;
|
|
|
|
// We are evaluating a generic function without any comptime args provided.
|
|
|
|
try block.params.append(sema.arena, .{
|
|
.ty = .generic_poison_type,
|
|
.is_comptime = comptime_syntax,
|
|
.name = param_name,
|
|
});
|
|
sema.inst_map.putAssumeCapacity(inst, .generic_poison);
|
|
}
|
|
|
|
fn zirAsNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const src = inst_data.src();
|
|
const extra = sema.code.extraData(Zir.Inst.As, inst_data.payload_index).data;
|
|
return sema.analyzeAs(block, src, extra.dest_type, extra.operand, false);
|
|
}
|
|
|
|
fn zirAsShiftOperand(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const src = inst_data.src();
|
|
const extra = sema.code.extraData(Zir.Inst.As, inst_data.payload_index).data;
|
|
return sema.analyzeAs(block, src, extra.dest_type, extra.operand, true);
|
|
}
|
|
|
|
fn analyzeAs(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
zir_dest_type: Zir.Inst.Ref,
|
|
zir_operand: Zir.Inst.Ref,
|
|
no_cast_to_comptime_int: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const operand = try sema.resolveInst(zir_operand);
|
|
if (zir_dest_type == .var_args_param_type) return operand;
|
|
const operand_air_inst = sema.resolveInst(zir_dest_type) catch |err| switch (err) {
|
|
error.GenericPoison => return operand,
|
|
else => |e| return e,
|
|
};
|
|
if (operand_air_inst == .var_args_param_type) return operand;
|
|
const dest_ty = sema.analyzeAsType(block, src, operand_air_inst) catch |err| switch (err) {
|
|
error.GenericPoison => return operand,
|
|
else => |e| return e,
|
|
};
|
|
const dest_ty_tag = dest_ty.zigTypeTagOrPoison(mod) catch |err| switch (err) {
|
|
error.GenericPoison => return operand,
|
|
};
|
|
if (dest_ty_tag == .NoReturn) {
|
|
return sema.fail(block, src, "cannot cast to noreturn", .{});
|
|
}
|
|
const is_ret = if (zir_dest_type.toIndex()) |ptr_index|
|
|
sema.code.instructions.items(.tag)[@intFromEnum(ptr_index)] == .ret_type
|
|
else
|
|
false;
|
|
return sema.coerceExtra(block, dest_ty, operand, src, .{ .is_ret = is_ret, .no_cast_to_comptime_int = no_cast_to_comptime_int }) catch |err| switch (err) {
|
|
error.NotCoercible => unreachable,
|
|
else => |e| return e,
|
|
};
|
|
}
|
|
|
|
fn zirIntFromPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const ptr_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const operand = try sema.resolveInst(inst_data.operand);
|
|
const operand_ty = sema.typeOf(operand);
|
|
const ptr_ty = operand_ty.scalarType(mod);
|
|
const is_vector = operand_ty.zigTypeTag(mod) == .Vector;
|
|
if (!ptr_ty.isPtrAtRuntime(mod)) {
|
|
return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty.fmt(mod)});
|
|
}
|
|
const pointee_ty = ptr_ty.childType(mod);
|
|
if (try sema.typeRequiresComptime(ptr_ty)) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, ptr_src, "comptime-only type '{}' has no pointer address", .{pointee_ty.fmt(mod)});
|
|
errdefer msg.destroy(sema.gpa);
|
|
const src_decl = mod.declPtr(block.src_decl);
|
|
try sema.explainWhyTypeIsComptime(msg, src_decl.toSrcLoc(ptr_src, mod), pointee_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
if (try sema.resolveValueIntable(operand)) |operand_val| ct: {
|
|
if (!is_vector) {
|
|
return Air.internedToRef((try mod.intValue(
|
|
Type.usize,
|
|
(try operand_val.getUnsignedIntAdvanced(mod, sema)).?,
|
|
)).toIntern());
|
|
}
|
|
const len = operand_ty.vectorLen(mod);
|
|
const dest_ty = try mod.vectorType(.{ .child = .usize_type, .len = len });
|
|
const new_elems = try sema.arena.alloc(InternPool.Index, len);
|
|
for (new_elems, 0..) |*new_elem, i| {
|
|
const ptr_val = try operand_val.elemValue(mod, i);
|
|
const addr = try ptr_val.getUnsignedIntAdvanced(mod, sema) orelse {
|
|
// A vector element wasn't an integer pointer. This is a runtime operation.
|
|
break :ct;
|
|
};
|
|
new_elem.* = (try mod.intValue(
|
|
Type.usize,
|
|
addr,
|
|
)).toIntern();
|
|
}
|
|
return Air.internedToRef(try mod.intern(.{ .aggregate = .{
|
|
.ty = dest_ty.toIntern(),
|
|
.storage = .{ .elems = new_elems },
|
|
} }));
|
|
}
|
|
try sema.requireRuntimeBlock(block, inst_data.src(), ptr_src);
|
|
if (!is_vector) {
|
|
return block.addUnOp(.int_from_ptr, operand);
|
|
}
|
|
const len = operand_ty.vectorLen(mod);
|
|
const dest_ty = try mod.vectorType(.{ .child = .usize_type, .len = len });
|
|
const new_elems = try sema.arena.alloc(Air.Inst.Ref, len);
|
|
for (new_elems, 0..) |*new_elem, i| {
|
|
const idx_ref = try mod.intRef(Type.usize, i);
|
|
const old_elem = try block.addBinOp(.array_elem_val, operand, idx_ref);
|
|
new_elem.* = try block.addUnOp(.int_from_ptr, old_elem);
|
|
}
|
|
return block.addAggregateInit(dest_ty, new_elems);
|
|
}
|
|
|
|
fn zirFieldVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const src = inst_data.src();
|
|
const field_name_src: LazySrcLoc = .{ .node_offset_field_name = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.Field, inst_data.payload_index).data;
|
|
const field_name = try mod.intern_pool.getOrPutString(sema.gpa, sema.code.nullTerminatedString(extra.field_name_start));
|
|
const object = try sema.resolveInst(extra.lhs);
|
|
return sema.fieldVal(block, src, object, field_name, field_name_src);
|
|
}
|
|
|
|
fn zirFieldPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const src = inst_data.src();
|
|
const field_name_src: LazySrcLoc = .{ .node_offset_field_name = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.Field, inst_data.payload_index).data;
|
|
const field_name = try mod.intern_pool.getOrPutString(sema.gpa, sema.code.nullTerminatedString(extra.field_name_start));
|
|
const object_ptr = try sema.resolveInst(extra.lhs);
|
|
return sema.fieldPtr(block, src, object_ptr, field_name, field_name_src, false);
|
|
}
|
|
|
|
fn zirStructInitFieldPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const src = inst_data.src();
|
|
const field_name_src: LazySrcLoc = .{ .node_offset_field_name_init = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.Field, inst_data.payload_index).data;
|
|
const field_name = try mod.intern_pool.getOrPutString(sema.gpa, sema.code.nullTerminatedString(extra.field_name_start));
|
|
const object_ptr = try sema.resolveInst(extra.lhs);
|
|
const struct_ty = sema.typeOf(object_ptr).childType(mod);
|
|
switch (struct_ty.zigTypeTag(mod)) {
|
|
.Struct, .Union => {
|
|
return sema.fieldPtr(block, src, object_ptr, field_name, field_name_src, true);
|
|
},
|
|
else => {
|
|
return sema.failWithStructInitNotSupported(block, src, struct_ty);
|
|
},
|
|
}
|
|
}
|
|
|
|
fn zirFieldValNamed(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const src = inst_data.src();
|
|
const field_name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.FieldNamed, inst_data.payload_index).data;
|
|
const object = try sema.resolveInst(extra.lhs);
|
|
const field_name = try sema.resolveConstStringIntern(block, field_name_src, extra.field_name, .{
|
|
.needed_comptime_reason = "field name must be comptime-known",
|
|
});
|
|
return sema.fieldVal(block, src, object, field_name, field_name_src);
|
|
}
|
|
|
|
fn zirFieldPtrNamed(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const src = inst_data.src();
|
|
const field_name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.FieldNamed, inst_data.payload_index).data;
|
|
const object_ptr = try sema.resolveInst(extra.lhs);
|
|
const field_name = try sema.resolveConstStringIntern(block, field_name_src, extra.field_name, .{
|
|
.needed_comptime_reason = "field name must be comptime-known",
|
|
});
|
|
return sema.fieldPtr(block, src, object_ptr, field_name, field_name_src, false);
|
|
}
|
|
|
|
fn zirIntCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const src = inst_data.src();
|
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
|
|
const dest_ty = try sema.resolveDestType(block, src, extra.lhs, .remove_eu_opt, "@intCast");
|
|
const operand = try sema.resolveInst(extra.rhs);
|
|
|
|
return sema.intCast(block, inst_data.src(), dest_ty, src, operand, operand_src, true);
|
|
}
|
|
|
|
fn intCast(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
dest_ty: Type,
|
|
dest_ty_src: LazySrcLoc,
|
|
operand: Air.Inst.Ref,
|
|
operand_src: LazySrcLoc,
|
|
runtime_safety: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const operand_ty = sema.typeOf(operand);
|
|
const dest_scalar_ty = try sema.checkIntOrVectorAllowComptime(block, dest_ty, dest_ty_src);
|
|
const operand_scalar_ty = try sema.checkIntOrVectorAllowComptime(block, operand_ty, operand_src);
|
|
|
|
if (try sema.isComptimeKnown(operand)) {
|
|
return sema.coerce(block, dest_ty, operand, operand_src);
|
|
} else if (dest_scalar_ty.zigTypeTag(mod) == .ComptimeInt) {
|
|
return sema.fail(block, operand_src, "unable to cast runtime value to 'comptime_int'", .{});
|
|
}
|
|
|
|
try sema.checkVectorizableBinaryOperands(block, operand_src, dest_ty, operand_ty, dest_ty_src, operand_src);
|
|
const is_vector = dest_ty.zigTypeTag(mod) == .Vector;
|
|
|
|
if ((try sema.typeHasOnePossibleValue(dest_ty))) |opv| {
|
|
// requirement: intCast(u0, input) iff input == 0
|
|
if (runtime_safety and block.wantSafety()) {
|
|
try sema.requireRuntimeBlock(block, src, operand_src);
|
|
const wanted_info = dest_scalar_ty.intInfo(mod);
|
|
const wanted_bits = wanted_info.bits;
|
|
|
|
if (wanted_bits == 0) {
|
|
const ok = if (is_vector) ok: {
|
|
const zeros = try sema.splat(operand_ty, try mod.intValue(operand_scalar_ty, 0));
|
|
const zero_inst = Air.internedToRef(zeros.toIntern());
|
|
const is_in_range = try block.addCmpVector(operand, zero_inst, .eq);
|
|
const all_in_range = try block.addInst(.{
|
|
.tag = .reduce,
|
|
.data = .{ .reduce = .{ .operand = is_in_range, .operation = .And } },
|
|
});
|
|
break :ok all_in_range;
|
|
} else ok: {
|
|
const zero_inst = Air.internedToRef((try mod.intValue(operand_ty, 0)).toIntern());
|
|
const is_in_range = try block.addBinOp(.cmp_lte, operand, zero_inst);
|
|
break :ok is_in_range;
|
|
};
|
|
try sema.addSafetyCheck(block, src, ok, .cast_truncated_data);
|
|
}
|
|
}
|
|
|
|
return Air.internedToRef(opv.toIntern());
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, src, operand_src);
|
|
if (runtime_safety and block.wantSafety()) {
|
|
const actual_info = operand_scalar_ty.intInfo(mod);
|
|
const wanted_info = dest_scalar_ty.intInfo(mod);
|
|
const actual_bits = actual_info.bits;
|
|
const wanted_bits = wanted_info.bits;
|
|
const actual_value_bits = actual_bits - @intFromBool(actual_info.signedness == .signed);
|
|
const wanted_value_bits = wanted_bits - @intFromBool(wanted_info.signedness == .signed);
|
|
|
|
// range shrinkage
|
|
// requirement: int value fits into target type
|
|
if (wanted_value_bits < actual_value_bits) {
|
|
const dest_max_val_scalar = try dest_scalar_ty.maxIntScalar(mod, operand_scalar_ty);
|
|
const dest_max_val = try sema.splat(operand_ty, dest_max_val_scalar);
|
|
const dest_max = Air.internedToRef(dest_max_val.toIntern());
|
|
const diff = try block.addBinOp(.sub_wrap, dest_max, operand);
|
|
|
|
if (actual_info.signedness == .signed) {
|
|
// Reinterpret the sign-bit as part of the value. This will make
|
|
// negative differences (`operand` > `dest_max`) appear too big.
|
|
const unsigned_scalar_operand_ty = try mod.intType(.unsigned, actual_bits);
|
|
const unsigned_operand_ty = if (is_vector) try mod.vectorType(.{
|
|
.len = dest_ty.vectorLen(mod),
|
|
.child = unsigned_scalar_operand_ty.toIntern(),
|
|
}) else unsigned_scalar_operand_ty;
|
|
const diff_unsigned = try block.addBitCast(unsigned_operand_ty, diff);
|
|
|
|
// If the destination type is signed, then we need to double its
|
|
// range to account for negative values.
|
|
const dest_range_val = if (wanted_info.signedness == .signed) range_val: {
|
|
const one_scalar = try mod.intValue(unsigned_scalar_operand_ty, 1);
|
|
const one = if (is_vector) Value.fromInterned((try mod.intern(.{ .aggregate = .{
|
|
.ty = unsigned_operand_ty.toIntern(),
|
|
.storage = .{ .repeated_elem = one_scalar.toIntern() },
|
|
} }))) else one_scalar;
|
|
const range_minus_one = try dest_max_val.shl(one, unsigned_operand_ty, sema.arena, mod);
|
|
break :range_val try sema.intAdd(range_minus_one, one, unsigned_operand_ty, undefined);
|
|
} else try mod.getCoerced(dest_max_val, unsigned_operand_ty);
|
|
const dest_range = Air.internedToRef(dest_range_val.toIntern());
|
|
|
|
const ok = if (is_vector) ok: {
|
|
const is_in_range = try block.addCmpVector(diff_unsigned, dest_range, .lte);
|
|
const all_in_range = try block.addInst(.{
|
|
.tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce,
|
|
.data = .{ .reduce = .{
|
|
.operand = is_in_range,
|
|
.operation = .And,
|
|
} },
|
|
});
|
|
break :ok all_in_range;
|
|
} else ok: {
|
|
const is_in_range = try block.addBinOp(.cmp_lte, diff_unsigned, dest_range);
|
|
break :ok is_in_range;
|
|
};
|
|
// TODO negative_to_unsigned?
|
|
try sema.addSafetyCheck(block, src, ok, .cast_truncated_data);
|
|
} else {
|
|
const ok = if (is_vector) ok: {
|
|
const is_in_range = try block.addCmpVector(diff, dest_max, .lte);
|
|
const all_in_range = try block.addInst(.{
|
|
.tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce,
|
|
.data = .{ .reduce = .{
|
|
.operand = is_in_range,
|
|
.operation = .And,
|
|
} },
|
|
});
|
|
break :ok all_in_range;
|
|
} else ok: {
|
|
const is_in_range = try block.addBinOp(.cmp_lte, diff, dest_max);
|
|
break :ok is_in_range;
|
|
};
|
|
try sema.addSafetyCheck(block, src, ok, .cast_truncated_data);
|
|
}
|
|
} else if (actual_info.signedness == .signed and wanted_info.signedness == .unsigned) {
|
|
// no shrinkage, yes sign loss
|
|
// requirement: signed to unsigned >= 0
|
|
const ok = if (is_vector) ok: {
|
|
const scalar_zero = try mod.intValue(operand_scalar_ty, 0);
|
|
const zero_val = try sema.splat(operand_ty, scalar_zero);
|
|
const zero_inst = Air.internedToRef(zero_val.toIntern());
|
|
const is_in_range = try block.addCmpVector(operand, zero_inst, .gte);
|
|
const all_in_range = try block.addInst(.{
|
|
.tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce,
|
|
.data = .{ .reduce = .{
|
|
.operand = is_in_range,
|
|
.operation = .And,
|
|
} },
|
|
});
|
|
break :ok all_in_range;
|
|
} else ok: {
|
|
const zero_inst = Air.internedToRef((try mod.intValue(operand_ty, 0)).toIntern());
|
|
const is_in_range = try block.addBinOp(.cmp_gte, operand, zero_inst);
|
|
break :ok is_in_range;
|
|
};
|
|
try sema.addSafetyCheck(block, src, ok, .negative_to_unsigned);
|
|
}
|
|
}
|
|
return block.addTyOp(.intcast, dest_ty, operand);
|
|
}
|
|
|
|
fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const src = inst_data.src();
|
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
|
|
const dest_ty = try sema.resolveDestType(block, src, extra.lhs, .remove_eu_opt, "@bitCast");
|
|
const operand = try sema.resolveInst(extra.rhs);
|
|
const operand_ty = sema.typeOf(operand);
|
|
switch (dest_ty.zigTypeTag(mod)) {
|
|
.AnyFrame,
|
|
.ComptimeFloat,
|
|
.ComptimeInt,
|
|
.EnumLiteral,
|
|
.ErrorSet,
|
|
.ErrorUnion,
|
|
.Fn,
|
|
.Frame,
|
|
.NoReturn,
|
|
.Null,
|
|
.Opaque,
|
|
.Optional,
|
|
.Type,
|
|
.Undefined,
|
|
.Void,
|
|
=> return sema.fail(block, src, "cannot @bitCast to '{}'", .{dest_ty.fmt(mod)}),
|
|
|
|
.Enum => {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "cannot @bitCast to '{}'", .{dest_ty.fmt(mod)});
|
|
errdefer msg.destroy(sema.gpa);
|
|
switch (operand_ty.zigTypeTag(mod)) {
|
|
.Int, .ComptimeInt => try sema.errNote(block, src, msg, "use @enumFromInt to cast from '{}'", .{operand_ty.fmt(mod)}),
|
|
else => {},
|
|
}
|
|
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
},
|
|
|
|
.Pointer => {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "cannot @bitCast to '{}'", .{dest_ty.fmt(mod)});
|
|
errdefer msg.destroy(sema.gpa);
|
|
switch (operand_ty.zigTypeTag(mod)) {
|
|
.Int, .ComptimeInt => try sema.errNote(block, src, msg, "use @ptrFromInt to cast from '{}'", .{operand_ty.fmt(mod)}),
|
|
.Pointer => try sema.errNote(block, src, msg, "use @ptrCast to cast from '{}'", .{operand_ty.fmt(mod)}),
|
|
else => {},
|
|
}
|
|
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
},
|
|
.Struct, .Union => if (dest_ty.containerLayout(mod) == .Auto) {
|
|
const container = switch (dest_ty.zigTypeTag(mod)) {
|
|
.Struct => "struct",
|
|
.Union => "union",
|
|
else => unreachable,
|
|
};
|
|
return sema.fail(block, src, "cannot @bitCast to '{}'; {s} does not have a guaranteed in-memory layout", .{
|
|
dest_ty.fmt(mod), container,
|
|
});
|
|
},
|
|
|
|
.Array,
|
|
.Bool,
|
|
.Float,
|
|
.Int,
|
|
.Vector,
|
|
=> {},
|
|
}
|
|
switch (operand_ty.zigTypeTag(mod)) {
|
|
.AnyFrame,
|
|
.ComptimeFloat,
|
|
.ComptimeInt,
|
|
.EnumLiteral,
|
|
.ErrorSet,
|
|
.ErrorUnion,
|
|
.Fn,
|
|
.Frame,
|
|
.NoReturn,
|
|
.Null,
|
|
.Opaque,
|
|
.Optional,
|
|
.Type,
|
|
.Undefined,
|
|
.Void,
|
|
=> return sema.fail(block, operand_src, "cannot @bitCast from '{}'", .{operand_ty.fmt(mod)}),
|
|
|
|
.Enum => {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, operand_src, "cannot @bitCast from '{}'", .{operand_ty.fmt(mod)});
|
|
errdefer msg.destroy(sema.gpa);
|
|
switch (dest_ty.zigTypeTag(mod)) {
|
|
.Int, .ComptimeInt => try sema.errNote(block, operand_src, msg, "use @intFromEnum to cast to '{}'", .{dest_ty.fmt(mod)}),
|
|
else => {},
|
|
}
|
|
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
},
|
|
.Pointer => {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, operand_src, "cannot @bitCast from '{}'", .{operand_ty.fmt(mod)});
|
|
errdefer msg.destroy(sema.gpa);
|
|
switch (dest_ty.zigTypeTag(mod)) {
|
|
.Int, .ComptimeInt => try sema.errNote(block, operand_src, msg, "use @intFromPtr to cast to '{}'", .{dest_ty.fmt(mod)}),
|
|
.Pointer => try sema.errNote(block, operand_src, msg, "use @ptrCast to cast to '{}'", .{dest_ty.fmt(mod)}),
|
|
else => {},
|
|
}
|
|
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
},
|
|
.Struct, .Union => if (operand_ty.containerLayout(mod) == .Auto) {
|
|
const container = switch (operand_ty.zigTypeTag(mod)) {
|
|
.Struct => "struct",
|
|
.Union => "union",
|
|
else => unreachable,
|
|
};
|
|
return sema.fail(block, operand_src, "cannot @bitCast from '{}'; {s} does not have a guaranteed in-memory layout", .{
|
|
operand_ty.fmt(mod), container,
|
|
});
|
|
},
|
|
|
|
.Array,
|
|
.Bool,
|
|
.Float,
|
|
.Int,
|
|
.Vector,
|
|
=> {},
|
|
}
|
|
return sema.bitCast(block, dest_ty, operand, inst_data.src(), operand_src);
|
|
}
|
|
|
|
fn zirFloatCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const src = inst_data.src();
|
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
|
|
const dest_ty = try sema.resolveDestType(block, src, extra.lhs, .remove_eu_opt, "@floatCast");
|
|
const dest_scalar_ty = dest_ty.scalarType(mod);
|
|
|
|
const operand = try sema.resolveInst(extra.rhs);
|
|
const operand_ty = sema.typeOf(operand);
|
|
const operand_scalar_ty = operand_ty.scalarType(mod);
|
|
|
|
try sema.checkVectorizableBinaryOperands(block, operand_src, dest_ty, operand_ty, src, operand_src);
|
|
const is_vector = dest_ty.zigTypeTag(mod) == .Vector;
|
|
|
|
const target = mod.getTarget();
|
|
const dest_is_comptime_float = switch (dest_scalar_ty.zigTypeTag(mod)) {
|
|
.ComptimeFloat => true,
|
|
.Float => false,
|
|
else => return sema.fail(
|
|
block,
|
|
src,
|
|
"expected float or vector type, found '{}'",
|
|
.{dest_ty.fmt(mod)},
|
|
),
|
|
};
|
|
|
|
switch (operand_scalar_ty.zigTypeTag(mod)) {
|
|
.ComptimeFloat, .Float, .ComptimeInt => {},
|
|
else => return sema.fail(
|
|
block,
|
|
operand_src,
|
|
"expected float or vector type, found '{}'",
|
|
.{operand_ty.fmt(mod)},
|
|
),
|
|
}
|
|
|
|
if (try sema.resolveValue(operand)) |operand_val| {
|
|
if (!is_vector) {
|
|
return Air.internedToRef((try operand_val.floatCast(dest_ty, mod)).toIntern());
|
|
}
|
|
const vec_len = operand_ty.vectorLen(mod);
|
|
const new_elems = try sema.arena.alloc(InternPool.Index, vec_len);
|
|
for (new_elems, 0..) |*new_elem, i| {
|
|
const old_elem = try operand_val.elemValue(mod, i);
|
|
new_elem.* = (try old_elem.floatCast(dest_scalar_ty, mod)).toIntern();
|
|
}
|
|
return Air.internedToRef(try mod.intern(.{ .aggregate = .{
|
|
.ty = dest_ty.toIntern(),
|
|
.storage = .{ .elems = new_elems },
|
|
} }));
|
|
}
|
|
if (dest_is_comptime_float) {
|
|
return sema.fail(block, operand_src, "unable to cast runtime value to 'comptime_float'", .{});
|
|
}
|
|
try sema.requireRuntimeBlock(block, inst_data.src(), operand_src);
|
|
|
|
const src_bits = operand_scalar_ty.floatBits(target);
|
|
const dst_bits = dest_scalar_ty.floatBits(target);
|
|
if (dst_bits >= src_bits) {
|
|
return sema.coerce(block, dest_ty, operand, operand_src);
|
|
}
|
|
if (!is_vector) {
|
|
return block.addTyOp(.fptrunc, dest_ty, operand);
|
|
}
|
|
const vec_len = operand_ty.vectorLen(mod);
|
|
const new_elems = try sema.arena.alloc(Air.Inst.Ref, vec_len);
|
|
for (new_elems, 0..) |*new_elem, i| {
|
|
const idx_ref = try mod.intRef(Type.usize, i);
|
|
const old_elem = try block.addBinOp(.array_elem_val, operand, idx_ref);
|
|
new_elem.* = try block.addTyOp(.fptrunc, dest_scalar_ty, old_elem);
|
|
}
|
|
return block.addAggregateInit(dest_ty, new_elems);
|
|
}
|
|
|
|
fn zirElemVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const src = inst_data.src();
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const array = try sema.resolveInst(extra.lhs);
|
|
const elem_index = try sema.resolveInst(extra.rhs);
|
|
return sema.elemVal(block, src, array, elem_index, src, false);
|
|
}
|
|
|
|
fn zirElemValNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const src = inst_data.src();
|
|
const elem_index_src: LazySrcLoc = .{ .node_offset_array_access_index = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const array = try sema.resolveInst(extra.lhs);
|
|
const uncoerced_elem_index = try sema.resolveInst(extra.rhs);
|
|
const elem_index = try sema.coerce(block, Type.usize, uncoerced_elem_index, elem_index_src);
|
|
return sema.elemVal(block, src, array, elem_index, elem_index_src, true);
|
|
}
|
|
|
|
fn zirElemValImm(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].elem_val_imm;
|
|
const array = try sema.resolveInst(inst_data.operand);
|
|
const elem_index = try mod.intRef(Type.usize, inst_data.idx);
|
|
return sema.elemVal(block, .unneeded, array, elem_index, .unneeded, false);
|
|
}
|
|
|
|
fn zirElemPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const src = inst_data.src();
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const array_ptr = try sema.resolveInst(extra.lhs);
|
|
const elem_index = try sema.resolveInst(extra.rhs);
|
|
const indexable_ty = sema.typeOf(array_ptr);
|
|
if (indexable_ty.zigTypeTag(mod) != .Pointer) {
|
|
const capture_src: LazySrcLoc = .{ .for_capture_from_input = inst_data.src_node };
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, capture_src, "pointer capture of non pointer type '{}'", .{
|
|
indexable_ty.fmt(mod),
|
|
});
|
|
errdefer msg.destroy(sema.gpa);
|
|
if (indexable_ty.isIndexable(mod)) {
|
|
try sema.errNote(block, src, msg, "consider using '&' here", .{});
|
|
}
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
return sema.elemPtrOneLayerOnly(block, src, array_ptr, elem_index, src, false, false);
|
|
}
|
|
|
|
fn zirElemPtrNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const src = inst_data.src();
|
|
const elem_index_src: LazySrcLoc = .{ .node_offset_array_access_index = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const array_ptr = try sema.resolveInst(extra.lhs);
|
|
const uncoerced_elem_index = try sema.resolveInst(extra.rhs);
|
|
const elem_index = try sema.coerce(block, Type.usize, uncoerced_elem_index, elem_index_src);
|
|
return sema.elemPtr(block, src, array_ptr, elem_index, elem_index_src, false, true);
|
|
}
|
|
|
|
fn zirArrayInitElemPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const src = inst_data.src();
|
|
const extra = sema.code.extraData(Zir.Inst.ElemPtrImm, inst_data.payload_index).data;
|
|
const array_ptr = try sema.resolveInst(extra.ptr);
|
|
const elem_index = try sema.mod.intRef(Type.usize, extra.index);
|
|
const array_ty = sema.typeOf(array_ptr).childType(mod);
|
|
switch (array_ty.zigTypeTag(mod)) {
|
|
.Array, .Vector => {},
|
|
else => if (!array_ty.isTuple(mod)) {
|
|
return sema.failWithArrayInitNotSupported(block, src, array_ty);
|
|
},
|
|
}
|
|
return sema.elemPtr(block, src, array_ptr, elem_index, src, true, true);
|
|
}
|
|
|
|
fn zirSliceStart(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const src = inst_data.src();
|
|
const extra = sema.code.extraData(Zir.Inst.SliceStart, inst_data.payload_index).data;
|
|
const array_ptr = try sema.resolveInst(extra.lhs);
|
|
const start = try sema.resolveInst(extra.start);
|
|
const ptr_src: LazySrcLoc = .{ .node_offset_slice_ptr = inst_data.src_node };
|
|
const start_src: LazySrcLoc = .{ .node_offset_slice_start = inst_data.src_node };
|
|
const end_src: LazySrcLoc = .{ .node_offset_slice_end = inst_data.src_node };
|
|
|
|
return sema.analyzeSlice(block, src, array_ptr, start, .none, .none, .unneeded, ptr_src, start_src, end_src, false);
|
|
}
|
|
|
|
fn zirSliceEnd(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const src = inst_data.src();
|
|
const extra = sema.code.extraData(Zir.Inst.SliceEnd, inst_data.payload_index).data;
|
|
const array_ptr = try sema.resolveInst(extra.lhs);
|
|
const start = try sema.resolveInst(extra.start);
|
|
const end = try sema.resolveInst(extra.end);
|
|
const ptr_src: LazySrcLoc = .{ .node_offset_slice_ptr = inst_data.src_node };
|
|
const start_src: LazySrcLoc = .{ .node_offset_slice_start = inst_data.src_node };
|
|
const end_src: LazySrcLoc = .{ .node_offset_slice_end = inst_data.src_node };
|
|
|
|
return sema.analyzeSlice(block, src, array_ptr, start, end, .none, .unneeded, ptr_src, start_src, end_src, false);
|
|
}
|
|
|
|
fn zirSliceSentinel(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const src = inst_data.src();
|
|
const sentinel_src: LazySrcLoc = .{ .node_offset_slice_sentinel = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.SliceSentinel, inst_data.payload_index).data;
|
|
const array_ptr = try sema.resolveInst(extra.lhs);
|
|
const start = try sema.resolveInst(extra.start);
|
|
const end: Air.Inst.Ref = if (extra.end == .none) .none else try sema.resolveInst(extra.end);
|
|
const sentinel = try sema.resolveInst(extra.sentinel);
|
|
const ptr_src: LazySrcLoc = .{ .node_offset_slice_ptr = inst_data.src_node };
|
|
const start_src: LazySrcLoc = .{ .node_offset_slice_start = inst_data.src_node };
|
|
const end_src: LazySrcLoc = .{ .node_offset_slice_end = inst_data.src_node };
|
|
|
|
return sema.analyzeSlice(block, src, array_ptr, start, end, sentinel, sentinel_src, ptr_src, start_src, end_src, false);
|
|
}
|
|
|
|
fn zirSliceLength(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const src = inst_data.src();
|
|
const extra = sema.code.extraData(Zir.Inst.SliceLength, inst_data.payload_index).data;
|
|
const array_ptr = try sema.resolveInst(extra.lhs);
|
|
const start = try sema.resolveInst(extra.start);
|
|
const len = try sema.resolveInst(extra.len);
|
|
const sentinel = if (extra.sentinel == .none) .none else try sema.resolveInst(extra.sentinel);
|
|
const ptr_src: LazySrcLoc = .{ .node_offset_slice_ptr = inst_data.src_node };
|
|
const start_src: LazySrcLoc = .{ .node_offset_slice_start = extra.start_src_node_offset };
|
|
const end_src: LazySrcLoc = .{ .node_offset_slice_end = inst_data.src_node };
|
|
const sentinel_src: LazySrcLoc = if (sentinel == .none)
|
|
.unneeded
|
|
else
|
|
.{ .node_offset_slice_sentinel = inst_data.src_node };
|
|
|
|
return sema.analyzeSlice(block, src, array_ptr, start, len, sentinel, sentinel_src, ptr_src, start_src, end_src, true);
|
|
}
|
|
|
|
/// Holds common data used when analyzing or resolving switch prong bodies,
|
|
/// including setting up captures.
|
|
const SwitchProngAnalysis = struct {
|
|
sema: *Sema,
|
|
/// The block containing the `switch_block` itself.
|
|
parent_block: *Block,
|
|
/// The raw switch operand value (*not* the condition). Always defined.
|
|
operand: Air.Inst.Ref,
|
|
/// May be `undefined` if no prong has a by-ref capture.
|
|
operand_ptr: Air.Inst.Ref,
|
|
/// The switch condition value. For unions, `operand` is the union and `cond` is its tag.
|
|
cond: Air.Inst.Ref,
|
|
/// If this switch is on an error set, this is the type to assign to the
|
|
/// `else` prong. If `null`, the prong should be unreachable.
|
|
else_error_ty: ?Type,
|
|
/// The index of the `switch_block` instruction itself.
|
|
switch_block_inst: Zir.Inst.Index,
|
|
/// The dummy index into which inline tag captures should be placed. May be
|
|
/// undefined if no prong has a tag capture.
|
|
tag_capture_inst: Zir.Inst.Index,
|
|
|
|
/// Resolve a switch prong which is determined at comptime to have no peers.
|
|
/// Uses `resolveBlockBody`. Sets up captures as needed.
|
|
fn resolveProngComptime(
|
|
spa: SwitchProngAnalysis,
|
|
child_block: *Block,
|
|
prong_type: enum { normal, special },
|
|
prong_body: []const Zir.Inst.Index,
|
|
capture: Zir.Inst.SwitchBlock.ProngInfo.Capture,
|
|
/// Must use the `scalar_capture`, `special_capture`, or `multi_capture` union field.
|
|
raw_capture_src: Module.SwitchProngSrc,
|
|
/// The set of all values which can reach this prong. May be undefined
|
|
/// if the prong is special or contains ranges.
|
|
case_vals: []const Air.Inst.Ref,
|
|
/// The inline capture of this prong. If this is not an inline prong,
|
|
/// this is `.none`.
|
|
inline_case_capture: Air.Inst.Ref,
|
|
/// Whether this prong has an inline tag capture. If `true`, then
|
|
/// `inline_case_capture` cannot be `.none`.
|
|
has_tag_capture: bool,
|
|
merges: *Block.Merges,
|
|
) CompileError!Air.Inst.Ref {
|
|
const sema = spa.sema;
|
|
const src = sema.code.instructions.items(.data)[@intFromEnum(spa.switch_block_inst)].pl_node.src();
|
|
|
|
if (has_tag_capture) {
|
|
const tag_ref = try spa.analyzeTagCapture(child_block, raw_capture_src, inline_case_capture);
|
|
sema.inst_map.putAssumeCapacity(spa.tag_capture_inst, tag_ref);
|
|
}
|
|
defer if (has_tag_capture) assert(sema.inst_map.remove(spa.tag_capture_inst));
|
|
|
|
switch (capture) {
|
|
.none => {
|
|
return sema.resolveBlockBody(spa.parent_block, src, child_block, prong_body, spa.switch_block_inst, merges);
|
|
},
|
|
|
|
.by_val, .by_ref => {
|
|
const capture_ref = try spa.analyzeCapture(
|
|
child_block,
|
|
capture == .by_ref,
|
|
prong_type == .special,
|
|
raw_capture_src,
|
|
case_vals,
|
|
inline_case_capture,
|
|
);
|
|
|
|
if (sema.typeOf(capture_ref).isNoReturn(sema.mod)) {
|
|
// This prong should be unreachable!
|
|
return .unreachable_value;
|
|
}
|
|
|
|
sema.inst_map.putAssumeCapacity(spa.switch_block_inst, capture_ref);
|
|
defer assert(sema.inst_map.remove(spa.switch_block_inst));
|
|
|
|
return sema.resolveBlockBody(spa.parent_block, src, child_block, prong_body, spa.switch_block_inst, merges);
|
|
},
|
|
}
|
|
}
|
|
|
|
/// Analyze a switch prong which may have peers at runtime.
|
|
/// Uses `analyzeBodyRuntimeBreak`. Sets up captures as needed.
|
|
fn analyzeProngRuntime(
|
|
spa: SwitchProngAnalysis,
|
|
case_block: *Block,
|
|
prong_type: enum { normal, special },
|
|
prong_body: []const Zir.Inst.Index,
|
|
capture: Zir.Inst.SwitchBlock.ProngInfo.Capture,
|
|
/// Must use the `scalar`, `special`, or `multi_capture` union field.
|
|
raw_capture_src: Module.SwitchProngSrc,
|
|
/// The set of all values which can reach this prong. May be undefined
|
|
/// if the prong is special or contains ranges.
|
|
case_vals: []const Air.Inst.Ref,
|
|
/// The inline capture of this prong. If this is not an inline prong,
|
|
/// this is `.none`.
|
|
inline_case_capture: Air.Inst.Ref,
|
|
/// Whether this prong has an inline tag capture. If `true`, then
|
|
/// `inline_case_capture` cannot be `.none`.
|
|
has_tag_capture: bool,
|
|
) CompileError!void {
|
|
const sema = spa.sema;
|
|
|
|
if (has_tag_capture) {
|
|
const tag_ref = try spa.analyzeTagCapture(case_block, raw_capture_src, inline_case_capture);
|
|
sema.inst_map.putAssumeCapacity(spa.tag_capture_inst, tag_ref);
|
|
}
|
|
defer if (has_tag_capture) assert(sema.inst_map.remove(spa.tag_capture_inst));
|
|
|
|
switch (capture) {
|
|
.none => {
|
|
return sema.analyzeBodyRuntimeBreak(case_block, prong_body);
|
|
},
|
|
|
|
.by_val, .by_ref => {
|
|
const capture_ref = try spa.analyzeCapture(
|
|
case_block,
|
|
capture == .by_ref,
|
|
prong_type == .special,
|
|
raw_capture_src,
|
|
case_vals,
|
|
inline_case_capture,
|
|
);
|
|
|
|
if (sema.typeOf(capture_ref).isNoReturn(sema.mod)) {
|
|
// No need to analyze any further, the prong is unreachable
|
|
return;
|
|
}
|
|
|
|
sema.inst_map.putAssumeCapacity(spa.switch_block_inst, capture_ref);
|
|
defer assert(sema.inst_map.remove(spa.switch_block_inst));
|
|
|
|
return sema.analyzeBodyRuntimeBreak(case_block, prong_body);
|
|
},
|
|
}
|
|
}
|
|
|
|
fn analyzeTagCapture(
|
|
spa: SwitchProngAnalysis,
|
|
block: *Block,
|
|
raw_capture_src: Module.SwitchProngSrc,
|
|
inline_case_capture: Air.Inst.Ref,
|
|
) CompileError!Air.Inst.Ref {
|
|
const sema = spa.sema;
|
|
const mod = sema.mod;
|
|
const operand_ty = sema.typeOf(spa.operand);
|
|
if (operand_ty.zigTypeTag(mod) != .Union) {
|
|
const zir_datas = sema.code.instructions.items(.data);
|
|
const switch_node_offset = zir_datas[@intFromEnum(spa.switch_block_inst)].pl_node.src_node;
|
|
const raw_tag_capture_src: Module.SwitchProngSrc = switch (raw_capture_src) {
|
|
.scalar_capture => |i| .{ .scalar_tag_capture = i },
|
|
.multi_capture => |i| .{ .multi_tag_capture = i },
|
|
.special_capture => .special_tag_capture,
|
|
else => unreachable,
|
|
};
|
|
const capture_src = raw_tag_capture_src.resolve(mod, mod.declPtr(block.src_decl), switch_node_offset, .none);
|
|
return sema.fail(block, capture_src, "cannot capture tag of non-union type '{}'", .{
|
|
operand_ty.fmt(mod),
|
|
});
|
|
}
|
|
assert(inline_case_capture != .none);
|
|
return inline_case_capture;
|
|
}
|
|
|
|
fn analyzeCapture(
|
|
spa: SwitchProngAnalysis,
|
|
block: *Block,
|
|
capture_byref: bool,
|
|
is_special_prong: bool,
|
|
raw_capture_src: Module.SwitchProngSrc,
|
|
case_vals: []const Air.Inst.Ref,
|
|
inline_case_capture: Air.Inst.Ref,
|
|
) CompileError!Air.Inst.Ref {
|
|
const sema = spa.sema;
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
|
|
const zir_datas = sema.code.instructions.items(.data);
|
|
const switch_node_offset = zir_datas[@intFromEnum(spa.switch_block_inst)].pl_node.src_node;
|
|
|
|
const operand_ty = sema.typeOf(spa.operand);
|
|
const operand_ptr_ty = if (capture_byref) sema.typeOf(spa.operand_ptr) else undefined;
|
|
const operand_src: LazySrcLoc = .{ .node_offset_switch_operand = switch_node_offset };
|
|
|
|
if (inline_case_capture != .none) {
|
|
const item_val = sema.resolveConstDefinedValue(block, .unneeded, inline_case_capture, undefined) catch unreachable;
|
|
if (operand_ty.zigTypeTag(mod) == .Union) {
|
|
const field_index: u32 = @intCast(operand_ty.unionTagFieldIndex(item_val, mod).?);
|
|
const union_obj = mod.typeToUnion(operand_ty).?;
|
|
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
|
|
if (capture_byref) {
|
|
const ptr_field_ty = try sema.ptrType(.{
|
|
.child = field_ty.toIntern(),
|
|
.flags = .{
|
|
.is_const = !operand_ptr_ty.ptrIsMutable(mod),
|
|
.is_volatile = operand_ptr_ty.isVolatilePtr(mod),
|
|
.address_space = operand_ptr_ty.ptrAddressSpace(mod),
|
|
},
|
|
});
|
|
if (try sema.resolveDefinedValue(block, operand_src, spa.operand_ptr)) |union_ptr| {
|
|
return Air.internedToRef((try mod.intern(.{ .ptr = .{
|
|
.ty = ptr_field_ty.toIntern(),
|
|
.addr = .{ .field = .{
|
|
.base = union_ptr.toIntern(),
|
|
.index = field_index,
|
|
} },
|
|
} })));
|
|
}
|
|
return block.addStructFieldPtr(spa.operand_ptr, field_index, ptr_field_ty);
|
|
} else {
|
|
if (try sema.resolveDefinedValue(block, operand_src, spa.operand)) |union_val| {
|
|
const tag_and_val = ip.indexToKey(union_val.toIntern()).un;
|
|
return Air.internedToRef(tag_and_val.val);
|
|
}
|
|
return block.addStructFieldVal(spa.operand, field_index, field_ty);
|
|
}
|
|
} else if (capture_byref) {
|
|
return anonDeclRef(sema, item_val.toIntern());
|
|
} else {
|
|
return inline_case_capture;
|
|
}
|
|
}
|
|
|
|
if (is_special_prong) {
|
|
if (capture_byref) {
|
|
return spa.operand_ptr;
|
|
}
|
|
|
|
switch (operand_ty.zigTypeTag(mod)) {
|
|
.ErrorSet => if (spa.else_error_ty) |ty| {
|
|
return sema.bitCast(block, ty, spa.operand, operand_src, null);
|
|
} else {
|
|
try block.addUnreachable(operand_src, false);
|
|
return .unreachable_value;
|
|
},
|
|
else => return spa.operand,
|
|
}
|
|
}
|
|
|
|
switch (operand_ty.zigTypeTag(mod)) {
|
|
.Union => {
|
|
const union_obj = mod.typeToUnion(operand_ty).?;
|
|
const first_item_val = sema.resolveConstDefinedValue(block, .unneeded, case_vals[0], undefined) catch unreachable;
|
|
|
|
const first_field_index: u32 = mod.unionTagFieldIndex(union_obj, first_item_val).?;
|
|
const first_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[first_field_index]);
|
|
|
|
const field_indices = try sema.arena.alloc(u32, case_vals.len);
|
|
for (case_vals, field_indices) |item, *field_idx| {
|
|
const item_val = sema.resolveConstDefinedValue(block, .unneeded, item, undefined) catch unreachable;
|
|
field_idx.* = mod.unionTagFieldIndex(union_obj, item_val).?;
|
|
}
|
|
|
|
// Fast path: if all the operands are the same type already, we don't need to hit
|
|
// PTR! This will also allow us to emit simpler code.
|
|
const same_types = for (field_indices[1..]) |field_idx| {
|
|
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_idx]);
|
|
if (!field_ty.eql(first_field_ty, sema.mod)) break false;
|
|
} else true;
|
|
|
|
const capture_ty = if (same_types) first_field_ty else capture_ty: {
|
|
// We need values to run PTR on, so make a bunch of undef constants.
|
|
const dummy_captures = try sema.arena.alloc(Air.Inst.Ref, case_vals.len);
|
|
for (dummy_captures, field_indices) |*dummy, field_idx| {
|
|
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_idx]);
|
|
dummy.* = try mod.undefRef(field_ty);
|
|
}
|
|
|
|
const case_srcs = try sema.arena.alloc(?LazySrcLoc, case_vals.len);
|
|
@memset(case_srcs, .unneeded);
|
|
|
|
break :capture_ty sema.resolvePeerTypes(block, .unneeded, dummy_captures, .{ .override = case_srcs }) catch |err| switch (err) {
|
|
error.NeededSourceLocation => {
|
|
// This must be a multi-prong so this must be a `multi_capture` src
|
|
const multi_idx = raw_capture_src.multi_capture;
|
|
const src_decl_ptr = sema.mod.declPtr(block.src_decl);
|
|
for (case_srcs, 0..) |*case_src, i| {
|
|
const raw_case_src: Module.SwitchProngSrc = .{ .multi = .{ .prong = multi_idx, .item = @intCast(i) } };
|
|
case_src.* = raw_case_src.resolve(mod, src_decl_ptr, switch_node_offset, .none);
|
|
}
|
|
const capture_src = raw_capture_src.resolve(mod, src_decl_ptr, switch_node_offset, .none);
|
|
_ = sema.resolvePeerTypes(block, capture_src, dummy_captures, .{ .override = case_srcs }) catch |err1| switch (err1) {
|
|
error.AnalysisFail => {
|
|
const msg = sema.err orelse return error.AnalysisFail;
|
|
try sema.reparentOwnedErrorMsg(block, capture_src, msg, "capture group with incompatible types", .{});
|
|
return error.AnalysisFail;
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
unreachable;
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
};
|
|
|
|
// By-reference captures have some further restrictions which make them easier to emit
|
|
if (capture_byref) {
|
|
const operand_ptr_info = operand_ptr_ty.ptrInfo(mod);
|
|
const capture_ptr_ty = resolve: {
|
|
// By-ref captures of hetereogeneous types are only allowed if all field
|
|
// pointer types are peer resolvable to each other.
|
|
// We need values to run PTR on, so make a bunch of undef constants.
|
|
const dummy_captures = try sema.arena.alloc(Air.Inst.Ref, case_vals.len);
|
|
for (field_indices, dummy_captures) |field_idx, *dummy| {
|
|
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_idx]);
|
|
const field_ptr_ty = try sema.ptrType(.{
|
|
.child = field_ty.toIntern(),
|
|
.flags = .{
|
|
.is_const = operand_ptr_info.flags.is_const,
|
|
.is_volatile = operand_ptr_info.flags.is_volatile,
|
|
.address_space = operand_ptr_info.flags.address_space,
|
|
.alignment = union_obj.fieldAlign(ip, field_idx),
|
|
},
|
|
});
|
|
dummy.* = try mod.undefRef(field_ptr_ty);
|
|
}
|
|
const case_srcs = try sema.arena.alloc(?LazySrcLoc, case_vals.len);
|
|
@memset(case_srcs, .unneeded);
|
|
|
|
break :resolve sema.resolvePeerTypes(block, .unneeded, dummy_captures, .{ .override = case_srcs }) catch |err| switch (err) {
|
|
error.NeededSourceLocation => {
|
|
// This must be a multi-prong so this must be a `multi_capture` src
|
|
const multi_idx = raw_capture_src.multi_capture;
|
|
const src_decl_ptr = sema.mod.declPtr(block.src_decl);
|
|
for (case_srcs, 0..) |*case_src, i| {
|
|
const raw_case_src: Module.SwitchProngSrc = .{ .multi = .{ .prong = multi_idx, .item = @intCast(i) } };
|
|
case_src.* = raw_case_src.resolve(mod, src_decl_ptr, switch_node_offset, .none);
|
|
}
|
|
const capture_src = raw_capture_src.resolve(mod, src_decl_ptr, switch_node_offset, .none);
|
|
_ = sema.resolvePeerTypes(block, capture_src, dummy_captures, .{ .override = case_srcs }) catch |err1| switch (err1) {
|
|
error.AnalysisFail => {
|
|
const msg = sema.err orelse return error.AnalysisFail;
|
|
try sema.errNote(block, capture_src, msg, "this coercion is only possible when capturing by value", .{});
|
|
try sema.reparentOwnedErrorMsg(block, capture_src, msg, "capture group with incompatible types", .{});
|
|
return error.AnalysisFail;
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
unreachable;
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
};
|
|
|
|
if (try sema.resolveDefinedValue(block, operand_src, spa.operand_ptr)) |op_ptr_val| {
|
|
if (op_ptr_val.isUndef(mod)) return mod.undefRef(capture_ptr_ty);
|
|
return Air.internedToRef((try mod.intern(.{ .ptr = .{
|
|
.ty = capture_ptr_ty.toIntern(),
|
|
.addr = .{ .field = .{
|
|
.base = op_ptr_val.toIntern(),
|
|
.index = first_field_index,
|
|
} },
|
|
} })));
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, operand_src, null);
|
|
return block.addStructFieldPtr(spa.operand_ptr, first_field_index, capture_ptr_ty);
|
|
}
|
|
|
|
if (try sema.resolveDefinedValue(block, operand_src, spa.operand)) |operand_val| {
|
|
if (operand_val.isUndef(mod)) return mod.undefRef(capture_ty);
|
|
const union_val = ip.indexToKey(operand_val.toIntern()).un;
|
|
if (Value.fromInterned(union_val.tag).isUndef(mod)) return mod.undefRef(capture_ty);
|
|
const uncoerced = Air.internedToRef(union_val.val);
|
|
return sema.coerce(block, capture_ty, uncoerced, operand_src);
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, operand_src, null);
|
|
|
|
if (same_types) {
|
|
return block.addStructFieldVal(spa.operand, first_field_index, capture_ty);
|
|
}
|
|
|
|
// We may have to emit a switch block which coerces the operand to the capture type.
|
|
// If we can, try to avoid that using in-memory coercions.
|
|
const first_non_imc = in_mem: {
|
|
for (field_indices, 0..) |field_idx, i| {
|
|
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_idx]);
|
|
if (.ok != try sema.coerceInMemoryAllowed(block, capture_ty, field_ty, false, sema.mod.getTarget(), .unneeded, .unneeded)) {
|
|
break :in_mem i;
|
|
}
|
|
}
|
|
// All fields are in-memory coercible to the resolved type!
|
|
// Just take the first field and bitcast the result.
|
|
const uncoerced = try block.addStructFieldVal(spa.operand, first_field_index, first_field_ty);
|
|
return block.addBitCast(capture_ty, uncoerced);
|
|
};
|
|
|
|
// By-val capture with heterogeneous types which are not all in-memory coercible to
|
|
// the resolved capture type. We finally have to fall back to the ugly method.
|
|
|
|
// However, let's first track which operands are in-memory coercible. There may well
|
|
// be several, and we can squash all of these cases into the same switch prong using
|
|
// a simple bitcast. We'll make this the 'else' prong.
|
|
|
|
var in_mem_coercible = try std.DynamicBitSet.initFull(sema.arena, field_indices.len);
|
|
in_mem_coercible.unset(first_non_imc);
|
|
{
|
|
const next = first_non_imc + 1;
|
|
for (field_indices[next..], next..) |field_idx, i| {
|
|
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_idx]);
|
|
if (.ok != try sema.coerceInMemoryAllowed(block, capture_ty, field_ty, false, sema.mod.getTarget(), .unneeded, .unneeded)) {
|
|
in_mem_coercible.unset(i);
|
|
}
|
|
}
|
|
}
|
|
|
|
const capture_block_inst = try block.addInstAsIndex(.{
|
|
.tag = .block,
|
|
.data = .{
|
|
.ty_pl = .{
|
|
.ty = Air.internedToRef(capture_ty.toIntern()),
|
|
.payload = undefined, // updated below
|
|
},
|
|
},
|
|
});
|
|
|
|
const prong_count = field_indices.len - in_mem_coercible.count();
|
|
|
|
const estimated_extra = prong_count * 6; // 2 for Case, 1 item, probably 3 insts
|
|
var cases_extra = try std.ArrayList(u32).initCapacity(sema.gpa, estimated_extra);
|
|
defer cases_extra.deinit();
|
|
|
|
{
|
|
// Non-bitcast cases
|
|
var it = in_mem_coercible.iterator(.{ .kind = .unset });
|
|
while (it.next()) |idx| {
|
|
var coerce_block = block.makeSubBlock();
|
|
defer coerce_block.instructions.deinit(sema.gpa);
|
|
|
|
const field_idx = field_indices[idx];
|
|
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_idx]);
|
|
const uncoerced = try coerce_block.addStructFieldVal(spa.operand, field_idx, field_ty);
|
|
const coerced = sema.coerce(&coerce_block, capture_ty, uncoerced, .unneeded) catch |err| switch (err) {
|
|
error.NeededSourceLocation => {
|
|
const multi_idx = raw_capture_src.multi_capture;
|
|
const src_decl_ptr = sema.mod.declPtr(block.src_decl);
|
|
const raw_case_src: Module.SwitchProngSrc = .{ .multi = .{ .prong = multi_idx, .item = @intCast(idx) } };
|
|
const case_src = raw_case_src.resolve(mod, src_decl_ptr, switch_node_offset, .none);
|
|
_ = try sema.coerce(&coerce_block, capture_ty, uncoerced, case_src);
|
|
unreachable;
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
_ = try coerce_block.addBr(capture_block_inst, coerced);
|
|
|
|
try cases_extra.ensureUnusedCapacity(3 + coerce_block.instructions.items.len);
|
|
cases_extra.appendAssumeCapacity(1); // items_len
|
|
cases_extra.appendAssumeCapacity(@intCast(coerce_block.instructions.items.len)); // body_len
|
|
cases_extra.appendAssumeCapacity(@intFromEnum(case_vals[idx])); // item
|
|
cases_extra.appendSliceAssumeCapacity(@ptrCast(coerce_block.instructions.items)); // body
|
|
}
|
|
}
|
|
const else_body_len = len: {
|
|
// 'else' prong uses a bitcast
|
|
var coerce_block = block.makeSubBlock();
|
|
defer coerce_block.instructions.deinit(sema.gpa);
|
|
|
|
const first_imc_item_idx = in_mem_coercible.findFirstSet().?;
|
|
const first_imc_field_idx = field_indices[first_imc_item_idx];
|
|
const first_imc_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[first_imc_field_idx]);
|
|
const uncoerced = try coerce_block.addStructFieldVal(spa.operand, first_imc_field_idx, first_imc_field_ty);
|
|
const coerced = try coerce_block.addBitCast(capture_ty, uncoerced);
|
|
_ = try coerce_block.addBr(capture_block_inst, coerced);
|
|
|
|
try cases_extra.appendSlice(@ptrCast(coerce_block.instructions.items));
|
|
break :len coerce_block.instructions.items.len;
|
|
};
|
|
|
|
try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.SwitchBr).Struct.fields.len +
|
|
cases_extra.items.len +
|
|
@typeInfo(Air.Block).Struct.fields.len +
|
|
1);
|
|
|
|
const switch_br_inst: u32 = @intCast(sema.air_instructions.len);
|
|
try sema.air_instructions.append(sema.gpa, .{
|
|
.tag = .switch_br,
|
|
.data = .{ .pl_op = .{
|
|
.operand = spa.cond,
|
|
.payload = sema.addExtraAssumeCapacity(Air.SwitchBr{
|
|
.cases_len = @intCast(prong_count),
|
|
.else_body_len = @intCast(else_body_len),
|
|
}),
|
|
} },
|
|
});
|
|
sema.air_extra.appendSliceAssumeCapacity(cases_extra.items);
|
|
|
|
// Set up block body
|
|
sema.air_instructions.items(.data)[@intFromEnum(capture_block_inst)].ty_pl.payload = sema.addExtraAssumeCapacity(Air.Block{
|
|
.body_len = 1,
|
|
});
|
|
sema.air_extra.appendAssumeCapacity(switch_br_inst);
|
|
|
|
return capture_block_inst.toRef();
|
|
},
|
|
.ErrorSet => {
|
|
if (capture_byref) {
|
|
const capture_src = raw_capture_src.resolve(mod, mod.declPtr(block.src_decl), switch_node_offset, .none);
|
|
return sema.fail(
|
|
block,
|
|
capture_src,
|
|
"error set cannot be captured by reference",
|
|
.{},
|
|
);
|
|
}
|
|
|
|
if (case_vals.len == 1) {
|
|
const item_val = sema.resolveConstDefinedValue(block, .unneeded, case_vals[0], undefined) catch unreachable;
|
|
const item_ty = try mod.singleErrorSetType(item_val.getErrorName(mod).unwrap().?);
|
|
return sema.bitCast(block, item_ty, spa.operand, operand_src, null);
|
|
}
|
|
|
|
var names: InferredErrorSet.NameMap = .{};
|
|
try names.ensureUnusedCapacity(sema.arena, case_vals.len);
|
|
for (case_vals) |err| {
|
|
const err_val = sema.resolveConstDefinedValue(block, .unneeded, err, undefined) catch unreachable;
|
|
names.putAssumeCapacityNoClobber(err_val.getErrorName(mod).unwrap().?, {});
|
|
}
|
|
const error_ty = try mod.errorSetFromUnsortedNames(names.keys());
|
|
return sema.bitCast(block, error_ty, spa.operand, operand_src, null);
|
|
},
|
|
else => {
|
|
// In this case the capture value is just the passed-through value
|
|
// of the switch condition.
|
|
if (capture_byref) {
|
|
return spa.operand_ptr;
|
|
} else {
|
|
return spa.operand;
|
|
}
|
|
},
|
|
}
|
|
}
|
|
};
|
|
|
|
fn switchCond(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
operand: Air.Inst.Ref,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const operand_ty = sema.typeOf(operand);
|
|
switch (operand_ty.zigTypeTag(mod)) {
|
|
.Type,
|
|
.Void,
|
|
.Bool,
|
|
.Int,
|
|
.Float,
|
|
.ComptimeFloat,
|
|
.ComptimeInt,
|
|
.EnumLiteral,
|
|
.Pointer,
|
|
.Fn,
|
|
.ErrorSet,
|
|
.Enum,
|
|
=> {
|
|
if (operand_ty.isSlice(mod)) {
|
|
return sema.fail(block, src, "switch on type '{}'", .{operand_ty.fmt(mod)});
|
|
}
|
|
if ((try sema.typeHasOnePossibleValue(operand_ty))) |opv| {
|
|
return Air.internedToRef(opv.toIntern());
|
|
}
|
|
return operand;
|
|
},
|
|
|
|
.Union => {
|
|
try sema.resolveTypeFields(operand_ty);
|
|
const enum_ty = operand_ty.unionTagType(mod) orelse {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "switch on union with no attached enum", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
if (operand_ty.declSrcLocOrNull(mod)) |union_src| {
|
|
try mod.errNoteNonLazy(union_src, msg, "consider 'union(enum)' here", .{});
|
|
}
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
};
|
|
return sema.unionToTag(block, enum_ty, operand, src);
|
|
},
|
|
|
|
.ErrorUnion,
|
|
.NoReturn,
|
|
.Array,
|
|
.Struct,
|
|
.Undefined,
|
|
.Null,
|
|
.Optional,
|
|
.Opaque,
|
|
.Vector,
|
|
.Frame,
|
|
.AnyFrame,
|
|
=> return sema.fail(block, src, "switch on type '{}'", .{operand_ty.fmt(mod)}),
|
|
}
|
|
}
|
|
|
|
const SwitchErrorSet = std.AutoHashMap(InternPool.NullTerminatedString, Module.SwitchProngSrc);
|
|
|
|
fn zirSwitchBlockErrUnion(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const switch_src = inst_data.src();
|
|
const switch_src_node_offset = inst_data.src_node;
|
|
const switch_operand_src: LazySrcLoc = .{ .node_offset_switch_operand = switch_src_node_offset };
|
|
const else_prong_src: LazySrcLoc = .{ .node_offset_switch_special_prong = switch_src_node_offset };
|
|
const extra = sema.code.extraData(Zir.Inst.SwitchBlockErrUnion, inst_data.payload_index);
|
|
const main_operand_src: LazySrcLoc = .{ .node_offset_if_cond = extra.data.main_src_node_offset };
|
|
const main_src: LazySrcLoc = .{ .node_offset_main_token = extra.data.main_src_node_offset };
|
|
|
|
const raw_operand_val = try sema.resolveInst(extra.data.operand);
|
|
|
|
// AstGen guarantees that the instruction immediately preceding
|
|
// switch_block_err_union is a dbg_stmt
|
|
const cond_dbg_node_index: Zir.Inst.Index = @enumFromInt(@intFromEnum(inst) - 1);
|
|
|
|
var header_extra_index: usize = extra.end;
|
|
|
|
const scalar_cases_len = extra.data.bits.scalar_cases_len;
|
|
const multi_cases_len = if (extra.data.bits.has_multi_cases) blk: {
|
|
const multi_cases_len = sema.code.extra[header_extra_index];
|
|
header_extra_index += 1;
|
|
break :blk multi_cases_len;
|
|
} else 0;
|
|
|
|
const err_capture_inst: Zir.Inst.Index = if (extra.data.bits.any_uses_err_capture) blk: {
|
|
const err_capture_inst: Zir.Inst.Index = @enumFromInt(sema.code.extra[header_extra_index]);
|
|
header_extra_index += 1;
|
|
// SwitchProngAnalysis wants inst_map to have space for the tag capture.
|
|
// Note that the normal capture is referred to via the switch block
|
|
// index, which there is already necessarily space for.
|
|
try sema.inst_map.ensureSpaceForInstructions(gpa, &.{err_capture_inst});
|
|
break :blk err_capture_inst;
|
|
} else undefined;
|
|
|
|
var case_vals = try std.ArrayListUnmanaged(Air.Inst.Ref).initCapacity(gpa, scalar_cases_len + 2 * multi_cases_len);
|
|
defer case_vals.deinit(gpa);
|
|
|
|
const NonError = struct {
|
|
body: []const Zir.Inst.Index,
|
|
end: usize,
|
|
capture: Zir.Inst.SwitchBlock.ProngInfo.Capture,
|
|
};
|
|
|
|
const non_error_case: NonError = non_error: {
|
|
const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(sema.code.extra[header_extra_index]);
|
|
const extra_body_start = header_extra_index + 1;
|
|
break :non_error .{
|
|
.body = sema.code.bodySlice(extra_body_start, info.body_len),
|
|
.end = extra_body_start + info.body_len,
|
|
.capture = info.capture,
|
|
};
|
|
};
|
|
|
|
const Else = struct {
|
|
body: []const Zir.Inst.Index,
|
|
end: usize,
|
|
is_inline: bool,
|
|
has_capture: bool,
|
|
};
|
|
|
|
const else_case: Else = if (!extra.data.bits.has_else) .{
|
|
.body = &.{},
|
|
.end = non_error_case.end,
|
|
.is_inline = false,
|
|
.has_capture = false,
|
|
} else special: {
|
|
const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(sema.code.extra[non_error_case.end]);
|
|
const extra_body_start = non_error_case.end + 1;
|
|
assert(info.capture != .by_ref);
|
|
assert(!info.has_tag_capture);
|
|
break :special .{
|
|
.body = sema.code.bodySlice(extra_body_start, info.body_len),
|
|
.end = extra_body_start + info.body_len,
|
|
.is_inline = info.is_inline,
|
|
.has_capture = info.capture != .none,
|
|
};
|
|
};
|
|
|
|
var seen_errors = SwitchErrorSet.init(gpa);
|
|
defer seen_errors.deinit();
|
|
|
|
const operand_ty = sema.typeOf(raw_operand_val);
|
|
const operand_err_set = if (extra.data.bits.payload_is_ref)
|
|
operand_ty.childType(mod)
|
|
else
|
|
operand_ty;
|
|
|
|
if (operand_err_set.zigTypeTag(mod) != .ErrorUnion) {
|
|
return sema.fail(block, switch_src, "expected error union type, found '{}'", .{
|
|
operand_ty.fmt(mod),
|
|
});
|
|
}
|
|
|
|
const operand_err_set_ty = operand_err_set.errorUnionSet(mod);
|
|
|
|
const block_inst: Air.Inst.Index = @enumFromInt(sema.air_instructions.len);
|
|
try sema.air_instructions.append(gpa, .{
|
|
.tag = .block,
|
|
.data = undefined,
|
|
});
|
|
var label: Block.Label = .{
|
|
.zir_block = inst,
|
|
.merges = .{
|
|
.src_locs = .{},
|
|
.results = .{},
|
|
.br_list = .{},
|
|
.block_inst = block_inst,
|
|
},
|
|
};
|
|
|
|
var child_block: Block = .{
|
|
.parent = block,
|
|
.sema = sema,
|
|
.src_decl = block.src_decl,
|
|
.namespace = block.namespace,
|
|
.wip_capture_scope = block.wip_capture_scope,
|
|
.instructions = .{},
|
|
.label = &label,
|
|
.inlining = block.inlining,
|
|
.is_comptime = block.is_comptime,
|
|
.comptime_reason = block.comptime_reason,
|
|
.is_typeof = block.is_typeof,
|
|
.c_import_buf = block.c_import_buf,
|
|
.runtime_cond = block.runtime_cond,
|
|
.runtime_loop = block.runtime_loop,
|
|
.runtime_index = block.runtime_index,
|
|
.error_return_trace_index = block.error_return_trace_index,
|
|
.want_safety = block.want_safety,
|
|
};
|
|
const merges = &child_block.label.?.merges;
|
|
defer child_block.instructions.deinit(gpa);
|
|
defer merges.deinit(gpa);
|
|
|
|
const resolved_err_set = try sema.resolveInferredErrorSetTy(block, main_src, operand_err_set_ty.toIntern());
|
|
if (Type.fromInterned(resolved_err_set).errorSetIsEmpty(mod)) {
|
|
return sema.resolveBlockBody(block, main_operand_src, &child_block, non_error_case.body, inst, merges);
|
|
}
|
|
|
|
const else_error_ty: ?Type = try validateErrSetSwitch(
|
|
sema,
|
|
block,
|
|
&seen_errors,
|
|
&case_vals,
|
|
operand_err_set_ty,
|
|
inst_data,
|
|
scalar_cases_len,
|
|
multi_cases_len,
|
|
.{ .body = else_case.body, .end = else_case.end, .src = else_prong_src },
|
|
extra.data.bits.has_else,
|
|
);
|
|
|
|
var spa: SwitchProngAnalysis = .{
|
|
.sema = sema,
|
|
.parent_block = block,
|
|
.operand = undefined, // must be set to the unwrapped error code before use
|
|
.operand_ptr = .none,
|
|
.cond = raw_operand_val,
|
|
.else_error_ty = else_error_ty,
|
|
.switch_block_inst = inst,
|
|
.tag_capture_inst = undefined,
|
|
};
|
|
|
|
if (try sema.resolveDefinedValue(&child_block, main_src, raw_operand_val)) |ov| {
|
|
const operand_val = if (extra.data.bits.payload_is_ref)
|
|
(try sema.pointerDeref(&child_block, main_src, ov, operand_ty)).?
|
|
else
|
|
ov;
|
|
|
|
if (operand_val.errorUnionIsPayload(mod)) {
|
|
return sema.resolveBlockBody(block, main_operand_src, &child_block, non_error_case.body, inst, merges);
|
|
} else {
|
|
const err_val = Value.fromInterned(try mod.intern(.{
|
|
.err = .{
|
|
.ty = operand_err_set_ty.toIntern(),
|
|
.name = operand_val.getErrorName(mod).unwrap().?,
|
|
},
|
|
}));
|
|
spa.operand = if (extra.data.bits.payload_is_ref)
|
|
try sema.analyzeErrUnionCodePtr(block, switch_operand_src, raw_operand_val)
|
|
else
|
|
try sema.analyzeErrUnionCode(block, switch_operand_src, raw_operand_val);
|
|
|
|
if (extra.data.bits.any_uses_err_capture) {
|
|
sema.inst_map.putAssumeCapacity(err_capture_inst, spa.operand);
|
|
}
|
|
defer if (extra.data.bits.any_uses_err_capture) assert(sema.inst_map.remove(err_capture_inst));
|
|
|
|
return resolveSwitchComptime(
|
|
sema,
|
|
spa,
|
|
&child_block,
|
|
try sema.switchCond(block, switch_operand_src, spa.operand),
|
|
err_val,
|
|
operand_err_set_ty,
|
|
.{
|
|
.body = else_case.body,
|
|
.end = else_case.end,
|
|
.capture = if (else_case.has_capture) .by_val else .none,
|
|
.is_inline = else_case.is_inline,
|
|
.has_tag_capture = false,
|
|
},
|
|
case_vals,
|
|
scalar_cases_len,
|
|
multi_cases_len,
|
|
true,
|
|
false,
|
|
);
|
|
}
|
|
}
|
|
|
|
if (scalar_cases_len + multi_cases_len == 0) {
|
|
if (else_error_ty) |ty| if (ty.errorSetIsEmpty(mod)) {
|
|
return sema.resolveBlockBody(block, main_operand_src, &child_block, non_error_case.body, inst, merges);
|
|
};
|
|
}
|
|
|
|
if (child_block.is_comptime) {
|
|
_ = try sema.resolveConstDefinedValue(&child_block, main_operand_src, raw_operand_val, .{
|
|
.needed_comptime_reason = "condition in comptime switch must be comptime-known",
|
|
.block_comptime_reason = child_block.comptime_reason,
|
|
});
|
|
unreachable;
|
|
}
|
|
|
|
const cond = if (extra.data.bits.payload_is_ref) blk: {
|
|
try sema.checkErrorType(block, main_src, sema.typeOf(raw_operand_val).elemType2(mod));
|
|
const loaded = try sema.analyzeLoad(block, main_src, raw_operand_val, main_src);
|
|
break :blk try sema.analyzeIsNonErr(block, main_src, loaded);
|
|
} else blk: {
|
|
try sema.checkErrorType(block, main_src, sema.typeOf(raw_operand_val));
|
|
break :blk try sema.analyzeIsNonErr(block, main_src, raw_operand_val);
|
|
};
|
|
|
|
var sub_block = child_block.makeSubBlock();
|
|
sub_block.runtime_loop = null;
|
|
sub_block.runtime_cond = mod.declPtr(child_block.src_decl).toSrcLoc(main_operand_src, mod);
|
|
sub_block.runtime_index.increment();
|
|
defer sub_block.instructions.deinit(gpa);
|
|
|
|
try sema.analyzeBodyRuntimeBreak(&sub_block, non_error_case.body);
|
|
const true_instructions = try sub_block.instructions.toOwnedSlice(gpa);
|
|
defer gpa.free(true_instructions);
|
|
|
|
spa.operand = if (extra.data.bits.payload_is_ref)
|
|
try sema.analyzeErrUnionCodePtr(&sub_block, switch_operand_src, raw_operand_val)
|
|
else
|
|
try sema.analyzeErrUnionCode(&sub_block, switch_operand_src, raw_operand_val);
|
|
|
|
if (extra.data.bits.any_uses_err_capture) {
|
|
sema.inst_map.putAssumeCapacity(err_capture_inst, spa.operand);
|
|
}
|
|
defer if (extra.data.bits.any_uses_err_capture) assert(sema.inst_map.remove(err_capture_inst));
|
|
_ = try sema.analyzeSwitchRuntimeBlock(
|
|
spa,
|
|
&sub_block,
|
|
switch_src,
|
|
try sema.switchCond(block, switch_operand_src, spa.operand),
|
|
operand_err_set_ty,
|
|
switch_operand_src,
|
|
case_vals,
|
|
.{
|
|
.body = else_case.body,
|
|
.end = else_case.end,
|
|
.capture = if (else_case.has_capture) .by_val else .none,
|
|
.is_inline = else_case.is_inline,
|
|
.has_tag_capture = false,
|
|
},
|
|
scalar_cases_len,
|
|
multi_cases_len,
|
|
false,
|
|
undefined,
|
|
true,
|
|
switch_src_node_offset,
|
|
else_prong_src,
|
|
undefined,
|
|
seen_errors,
|
|
undefined,
|
|
undefined,
|
|
undefined,
|
|
cond_dbg_node_index,
|
|
true,
|
|
);
|
|
|
|
try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.CondBr).Struct.fields.len +
|
|
true_instructions.len + sub_block.instructions.items.len);
|
|
|
|
_ = try child_block.addInst(.{
|
|
.tag = .cond_br,
|
|
.data = .{ .pl_op = .{
|
|
.operand = cond,
|
|
.payload = sema.addExtraAssumeCapacity(Air.CondBr{
|
|
.then_body_len = @intCast(true_instructions.len),
|
|
.else_body_len = @intCast(sub_block.instructions.items.len),
|
|
}),
|
|
} },
|
|
});
|
|
sema.air_extra.appendSliceAssumeCapacity(@ptrCast(true_instructions));
|
|
sema.air_extra.appendSliceAssumeCapacity(@ptrCast(sub_block.instructions.items));
|
|
|
|
return sema.analyzeBlockBody(block, main_src, &child_block, merges, false);
|
|
}
|
|
|
|
fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_ref: bool) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const src = inst_data.src();
|
|
const src_node_offset = inst_data.src_node;
|
|
const operand_src: LazySrcLoc = .{ .node_offset_switch_operand = src_node_offset };
|
|
const special_prong_src: LazySrcLoc = .{ .node_offset_switch_special_prong = src_node_offset };
|
|
const extra = sema.code.extraData(Zir.Inst.SwitchBlock, inst_data.payload_index);
|
|
|
|
const raw_operand_val: Air.Inst.Ref, const raw_operand_ptr: Air.Inst.Ref = blk: {
|
|
const maybe_ptr = try sema.resolveInst(extra.data.operand);
|
|
if (operand_is_ref) {
|
|
const val = try sema.analyzeLoad(block, src, maybe_ptr, operand_src);
|
|
break :blk .{ val, maybe_ptr };
|
|
} else {
|
|
break :blk .{ maybe_ptr, undefined };
|
|
}
|
|
};
|
|
|
|
const operand = try sema.switchCond(block, operand_src, raw_operand_val);
|
|
|
|
// AstGen guarantees that the instruction immediately preceding
|
|
// switch_block(_ref) is a dbg_stmt
|
|
const cond_dbg_node_index: Zir.Inst.Index = @enumFromInt(@intFromEnum(inst) - 1);
|
|
|
|
var header_extra_index: usize = extra.end;
|
|
|
|
const scalar_cases_len = extra.data.bits.scalar_cases_len;
|
|
const multi_cases_len = if (extra.data.bits.has_multi_cases) blk: {
|
|
const multi_cases_len = sema.code.extra[header_extra_index];
|
|
header_extra_index += 1;
|
|
break :blk multi_cases_len;
|
|
} else 0;
|
|
|
|
const tag_capture_inst: Zir.Inst.Index = if (extra.data.bits.any_has_tag_capture) blk: {
|
|
const tag_capture_inst: Zir.Inst.Index = @enumFromInt(sema.code.extra[header_extra_index]);
|
|
header_extra_index += 1;
|
|
// SwitchProngAnalysis wants inst_map to have space for the tag capture.
|
|
// Note that the normal capture is referred to via the switch block
|
|
// index, which there is already necessarily space for.
|
|
try sema.inst_map.ensureSpaceForInstructions(gpa, &.{tag_capture_inst});
|
|
break :blk tag_capture_inst;
|
|
} else undefined;
|
|
|
|
var case_vals = try std.ArrayListUnmanaged(Air.Inst.Ref).initCapacity(gpa, scalar_cases_len + 2 * multi_cases_len);
|
|
defer case_vals.deinit(gpa);
|
|
|
|
const special_prong = extra.data.bits.specialProng();
|
|
const special: SpecialProng = switch (special_prong) {
|
|
.none => .{
|
|
.body = &.{},
|
|
.end = header_extra_index,
|
|
.capture = .none,
|
|
.is_inline = false,
|
|
.has_tag_capture = false,
|
|
},
|
|
.under, .@"else" => blk: {
|
|
const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(sema.code.extra[header_extra_index]);
|
|
const extra_body_start = header_extra_index + 1;
|
|
break :blk .{
|
|
.body = sema.code.bodySlice(extra_body_start, info.body_len),
|
|
.end = extra_body_start + info.body_len,
|
|
.capture = info.capture,
|
|
.is_inline = info.is_inline,
|
|
.has_tag_capture = info.has_tag_capture,
|
|
};
|
|
},
|
|
};
|
|
|
|
const maybe_union_ty = sema.typeOf(raw_operand_val);
|
|
const union_originally = maybe_union_ty.zigTypeTag(mod) == .Union;
|
|
|
|
// Duplicate checking variables later also used for `inline else`.
|
|
var seen_enum_fields: []?Module.SwitchProngSrc = &.{};
|
|
var seen_errors = SwitchErrorSet.init(gpa);
|
|
var range_set = RangeSet.init(gpa, mod);
|
|
var true_count: u8 = 0;
|
|
var false_count: u8 = 0;
|
|
|
|
defer {
|
|
range_set.deinit();
|
|
gpa.free(seen_enum_fields);
|
|
seen_errors.deinit();
|
|
}
|
|
|
|
var empty_enum = false;
|
|
|
|
const operand_ty = sema.typeOf(operand);
|
|
const err_set = operand_ty.zigTypeTag(mod) == .ErrorSet;
|
|
|
|
var else_error_ty: ?Type = null;
|
|
|
|
// Validate usage of '_' prongs.
|
|
if (special_prong == .under and (!operand_ty.isNonexhaustiveEnum(mod) or union_originally)) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(
|
|
block,
|
|
src,
|
|
"'_' prong only allowed when switching on non-exhaustive enums",
|
|
.{},
|
|
);
|
|
errdefer msg.destroy(gpa);
|
|
try sema.errNote(
|
|
block,
|
|
special_prong_src,
|
|
msg,
|
|
"'_' prong here",
|
|
.{},
|
|
);
|
|
try sema.errNote(
|
|
block,
|
|
src,
|
|
msg,
|
|
"consider using 'else'",
|
|
.{},
|
|
);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
|
|
// Validate for duplicate items, missing else prong, and invalid range.
|
|
switch (operand_ty.zigTypeTag(mod)) {
|
|
.Union => unreachable, // handled in `switchCond`
|
|
.Enum => {
|
|
seen_enum_fields = try gpa.alloc(?Module.SwitchProngSrc, operand_ty.enumFieldCount(mod));
|
|
empty_enum = seen_enum_fields.len == 0 and !operand_ty.isNonexhaustiveEnum(mod);
|
|
@memset(seen_enum_fields, null);
|
|
// `range_set` is used for non-exhaustive enum values that do not correspond to any tags.
|
|
|
|
var extra_index: usize = special.end;
|
|
{
|
|
var scalar_i: u32 = 0;
|
|
while (scalar_i < scalar_cases_len) : (scalar_i += 1) {
|
|
const item_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]);
|
|
extra_index += 1;
|
|
const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(sema.code.extra[extra_index]);
|
|
extra_index += 1 + info.body_len;
|
|
|
|
case_vals.appendAssumeCapacity(try sema.validateSwitchItemEnum(
|
|
block,
|
|
seen_enum_fields,
|
|
&range_set,
|
|
item_ref,
|
|
operand_ty,
|
|
src_node_offset,
|
|
.{ .scalar = scalar_i },
|
|
));
|
|
}
|
|
}
|
|
{
|
|
var multi_i: u32 = 0;
|
|
while (multi_i < multi_cases_len) : (multi_i += 1) {
|
|
const items_len = sema.code.extra[extra_index];
|
|
extra_index += 1;
|
|
const ranges_len = sema.code.extra[extra_index];
|
|
extra_index += 1;
|
|
const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(sema.code.extra[extra_index]);
|
|
extra_index += 1;
|
|
const items = sema.code.refSlice(extra_index, items_len);
|
|
extra_index += items_len + info.body_len;
|
|
|
|
try case_vals.ensureUnusedCapacity(gpa, items.len);
|
|
for (items, 0..) |item_ref, item_i| {
|
|
case_vals.appendAssumeCapacity(try sema.validateSwitchItemEnum(
|
|
block,
|
|
seen_enum_fields,
|
|
&range_set,
|
|
item_ref,
|
|
operand_ty,
|
|
src_node_offset,
|
|
.{ .multi = .{ .prong = multi_i, .item = @intCast(item_i) } },
|
|
));
|
|
}
|
|
|
|
try sema.validateSwitchNoRange(block, ranges_len, operand_ty, src_node_offset);
|
|
}
|
|
}
|
|
const all_tags_handled = for (seen_enum_fields) |seen_src| {
|
|
if (seen_src == null) break false;
|
|
} else true;
|
|
|
|
if (special_prong == .@"else") {
|
|
if (all_tags_handled and !operand_ty.isNonexhaustiveEnum(mod)) return sema.fail(
|
|
block,
|
|
special_prong_src,
|
|
"unreachable else prong; all cases already handled",
|
|
.{},
|
|
);
|
|
} else if (!all_tags_handled) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(
|
|
block,
|
|
src,
|
|
"switch must handle all possibilities",
|
|
.{},
|
|
);
|
|
errdefer msg.destroy(sema.gpa);
|
|
for (seen_enum_fields, 0..) |seen_src, i| {
|
|
if (seen_src != null) continue;
|
|
|
|
const field_name = operand_ty.enumFieldName(i, mod);
|
|
try sema.addFieldErrNote(
|
|
operand_ty,
|
|
i,
|
|
msg,
|
|
"unhandled enumeration value: '{}'",
|
|
.{field_name.fmt(&mod.intern_pool)},
|
|
);
|
|
}
|
|
try mod.errNoteNonLazy(
|
|
operand_ty.declSrcLoc(mod),
|
|
msg,
|
|
"enum '{}' declared here",
|
|
.{operand_ty.fmt(mod)},
|
|
);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
} else if (special_prong == .none and operand_ty.isNonexhaustiveEnum(mod) and !union_originally) {
|
|
return sema.fail(
|
|
block,
|
|
src,
|
|
"switch on non-exhaustive enum must include 'else' or '_' prong",
|
|
.{},
|
|
);
|
|
}
|
|
},
|
|
.ErrorSet => else_error_ty = try validateErrSetSwitch(
|
|
sema,
|
|
block,
|
|
&seen_errors,
|
|
&case_vals,
|
|
operand_ty,
|
|
inst_data,
|
|
scalar_cases_len,
|
|
multi_cases_len,
|
|
.{ .body = special.body, .end = special.end, .src = special_prong_src },
|
|
special_prong == .@"else",
|
|
),
|
|
.Int, .ComptimeInt => {
|
|
var extra_index: usize = special.end;
|
|
{
|
|
var scalar_i: u32 = 0;
|
|
while (scalar_i < scalar_cases_len) : (scalar_i += 1) {
|
|
const item_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]);
|
|
extra_index += 1;
|
|
const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(sema.code.extra[extra_index]);
|
|
extra_index += 1 + info.body_len;
|
|
|
|
case_vals.appendAssumeCapacity(try sema.validateSwitchItemInt(
|
|
block,
|
|
&range_set,
|
|
item_ref,
|
|
operand_ty,
|
|
src_node_offset,
|
|
.{ .scalar = scalar_i },
|
|
));
|
|
}
|
|
}
|
|
{
|
|
var multi_i: u32 = 0;
|
|
while (multi_i < multi_cases_len) : (multi_i += 1) {
|
|
const items_len = sema.code.extra[extra_index];
|
|
extra_index += 1;
|
|
const ranges_len = sema.code.extra[extra_index];
|
|
extra_index += 1;
|
|
const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(sema.code.extra[extra_index]);
|
|
extra_index += 1;
|
|
const items = sema.code.refSlice(extra_index, items_len);
|
|
extra_index += items_len;
|
|
|
|
try case_vals.ensureUnusedCapacity(gpa, items.len);
|
|
for (items, 0..) |item_ref, item_i| {
|
|
case_vals.appendAssumeCapacity(try sema.validateSwitchItemInt(
|
|
block,
|
|
&range_set,
|
|
item_ref,
|
|
operand_ty,
|
|
src_node_offset,
|
|
.{ .multi = .{ .prong = multi_i, .item = @intCast(item_i) } },
|
|
));
|
|
}
|
|
|
|
try case_vals.ensureUnusedCapacity(gpa, 2 * ranges_len);
|
|
var range_i: u32 = 0;
|
|
while (range_i < ranges_len) : (range_i += 1) {
|
|
const item_first: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]);
|
|
extra_index += 1;
|
|
const item_last: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]);
|
|
extra_index += 1;
|
|
|
|
const vals = try sema.validateSwitchRange(
|
|
block,
|
|
&range_set,
|
|
item_first,
|
|
item_last,
|
|
operand_ty,
|
|
src_node_offset,
|
|
.{ .range = .{ .prong = multi_i, .item = range_i } },
|
|
);
|
|
case_vals.appendAssumeCapacity(vals[0]);
|
|
case_vals.appendAssumeCapacity(vals[1]);
|
|
}
|
|
|
|
extra_index += info.body_len;
|
|
}
|
|
}
|
|
|
|
check_range: {
|
|
if (operand_ty.zigTypeTag(mod) == .Int) {
|
|
const min_int = try operand_ty.minInt(mod, operand_ty);
|
|
const max_int = try operand_ty.maxInt(mod, operand_ty);
|
|
if (try range_set.spans(min_int.toIntern(), max_int.toIntern())) {
|
|
if (special_prong == .@"else") {
|
|
return sema.fail(
|
|
block,
|
|
special_prong_src,
|
|
"unreachable else prong; all cases already handled",
|
|
.{},
|
|
);
|
|
}
|
|
break :check_range;
|
|
}
|
|
}
|
|
if (special_prong != .@"else") {
|
|
return sema.fail(
|
|
block,
|
|
src,
|
|
"switch must handle all possibilities",
|
|
.{},
|
|
);
|
|
}
|
|
}
|
|
},
|
|
.Bool => {
|
|
var extra_index: usize = special.end;
|
|
{
|
|
var scalar_i: u32 = 0;
|
|
while (scalar_i < scalar_cases_len) : (scalar_i += 1) {
|
|
const item_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]);
|
|
extra_index += 1;
|
|
const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(sema.code.extra[extra_index]);
|
|
extra_index += 1 + info.body_len;
|
|
|
|
case_vals.appendAssumeCapacity(try sema.validateSwitchItemBool(
|
|
block,
|
|
&true_count,
|
|
&false_count,
|
|
item_ref,
|
|
src_node_offset,
|
|
.{ .scalar = scalar_i },
|
|
));
|
|
}
|
|
}
|
|
{
|
|
var multi_i: u32 = 0;
|
|
while (multi_i < multi_cases_len) : (multi_i += 1) {
|
|
const items_len = sema.code.extra[extra_index];
|
|
extra_index += 1;
|
|
const ranges_len = sema.code.extra[extra_index];
|
|
extra_index += 1;
|
|
const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(sema.code.extra[extra_index]);
|
|
extra_index += 1;
|
|
const items = sema.code.refSlice(extra_index, items_len);
|
|
extra_index += items_len + info.body_len;
|
|
|
|
try case_vals.ensureUnusedCapacity(gpa, items.len);
|
|
for (items, 0..) |item_ref, item_i| {
|
|
case_vals.appendAssumeCapacity(try sema.validateSwitchItemBool(
|
|
block,
|
|
&true_count,
|
|
&false_count,
|
|
item_ref,
|
|
src_node_offset,
|
|
.{ .multi = .{ .prong = multi_i, .item = @intCast(item_i) } },
|
|
));
|
|
}
|
|
|
|
try sema.validateSwitchNoRange(block, ranges_len, operand_ty, src_node_offset);
|
|
}
|
|
}
|
|
switch (special_prong) {
|
|
.@"else" => {
|
|
if (true_count + false_count == 2) {
|
|
return sema.fail(
|
|
block,
|
|
special_prong_src,
|
|
"unreachable else prong; all cases already handled",
|
|
.{},
|
|
);
|
|
}
|
|
},
|
|
.under, .none => {
|
|
if (true_count + false_count < 2) {
|
|
return sema.fail(
|
|
block,
|
|
src,
|
|
"switch must handle all possibilities",
|
|
.{},
|
|
);
|
|
}
|
|
},
|
|
}
|
|
},
|
|
.EnumLiteral, .Void, .Fn, .Pointer, .Type => {
|
|
if (special_prong != .@"else") {
|
|
return sema.fail(
|
|
block,
|
|
src,
|
|
"else prong required when switching on type '{}'",
|
|
.{operand_ty.fmt(mod)},
|
|
);
|
|
}
|
|
|
|
var seen_values = ValueSrcMap{};
|
|
defer seen_values.deinit(gpa);
|
|
|
|
var extra_index: usize = special.end;
|
|
{
|
|
var scalar_i: u32 = 0;
|
|
while (scalar_i < scalar_cases_len) : (scalar_i += 1) {
|
|
const item_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]);
|
|
extra_index += 1;
|
|
const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(sema.code.extra[extra_index]);
|
|
extra_index += 1;
|
|
extra_index += info.body_len;
|
|
|
|
case_vals.appendAssumeCapacity(try sema.validateSwitchItemSparse(
|
|
block,
|
|
&seen_values,
|
|
item_ref,
|
|
operand_ty,
|
|
src_node_offset,
|
|
.{ .scalar = scalar_i },
|
|
));
|
|
}
|
|
}
|
|
{
|
|
var multi_i: u32 = 0;
|
|
while (multi_i < multi_cases_len) : (multi_i += 1) {
|
|
const items_len = sema.code.extra[extra_index];
|
|
extra_index += 1;
|
|
const ranges_len = sema.code.extra[extra_index];
|
|
extra_index += 1;
|
|
const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(sema.code.extra[extra_index]);
|
|
extra_index += 1;
|
|
const items = sema.code.refSlice(extra_index, items_len);
|
|
extra_index += items_len + info.body_len;
|
|
|
|
try case_vals.ensureUnusedCapacity(gpa, items.len);
|
|
for (items, 0..) |item_ref, item_i| {
|
|
case_vals.appendAssumeCapacity(try sema.validateSwitchItemSparse(
|
|
block,
|
|
&seen_values,
|
|
item_ref,
|
|
operand_ty,
|
|
src_node_offset,
|
|
.{ .multi = .{ .prong = multi_i, .item = @intCast(item_i) } },
|
|
));
|
|
}
|
|
|
|
try sema.validateSwitchNoRange(block, ranges_len, operand_ty, src_node_offset);
|
|
}
|
|
}
|
|
},
|
|
|
|
.ErrorUnion,
|
|
.NoReturn,
|
|
.Array,
|
|
.Struct,
|
|
.Undefined,
|
|
.Null,
|
|
.Optional,
|
|
.Opaque,
|
|
.Vector,
|
|
.Frame,
|
|
.AnyFrame,
|
|
.ComptimeFloat,
|
|
.Float,
|
|
=> return sema.fail(block, operand_src, "invalid switch operand type '{}'", .{
|
|
operand_ty.fmt(mod),
|
|
}),
|
|
}
|
|
|
|
const spa: SwitchProngAnalysis = .{
|
|
.sema = sema,
|
|
.parent_block = block,
|
|
.operand = raw_operand_val,
|
|
.operand_ptr = raw_operand_ptr,
|
|
.cond = operand,
|
|
.else_error_ty = else_error_ty,
|
|
.switch_block_inst = inst,
|
|
.tag_capture_inst = tag_capture_inst,
|
|
};
|
|
|
|
const block_inst: Air.Inst.Index = @enumFromInt(sema.air_instructions.len);
|
|
try sema.air_instructions.append(gpa, .{
|
|
.tag = .block,
|
|
.data = undefined,
|
|
});
|
|
var label: Block.Label = .{
|
|
.zir_block = inst,
|
|
.merges = .{
|
|
.src_locs = .{},
|
|
.results = .{},
|
|
.br_list = .{},
|
|
.block_inst = block_inst,
|
|
},
|
|
};
|
|
|
|
var child_block: Block = .{
|
|
.parent = block,
|
|
.sema = sema,
|
|
.src_decl = block.src_decl,
|
|
.namespace = block.namespace,
|
|
.wip_capture_scope = block.wip_capture_scope,
|
|
.instructions = .{},
|
|
.label = &label,
|
|
.inlining = block.inlining,
|
|
.is_comptime = block.is_comptime,
|
|
.comptime_reason = block.comptime_reason,
|
|
.is_typeof = block.is_typeof,
|
|
.c_import_buf = block.c_import_buf,
|
|
.runtime_cond = block.runtime_cond,
|
|
.runtime_loop = block.runtime_loop,
|
|
.runtime_index = block.runtime_index,
|
|
.want_safety = block.want_safety,
|
|
.error_return_trace_index = block.error_return_trace_index,
|
|
};
|
|
const merges = &child_block.label.?.merges;
|
|
defer child_block.instructions.deinit(gpa);
|
|
defer merges.deinit(gpa);
|
|
|
|
if (try sema.resolveDefinedValue(&child_block, src, operand)) |operand_val| {
|
|
return resolveSwitchComptime(
|
|
sema,
|
|
spa,
|
|
&child_block,
|
|
operand,
|
|
operand_val,
|
|
operand_ty,
|
|
special,
|
|
case_vals,
|
|
scalar_cases_len,
|
|
multi_cases_len,
|
|
err_set,
|
|
empty_enum,
|
|
);
|
|
}
|
|
|
|
if (scalar_cases_len + multi_cases_len == 0 and !special.is_inline) {
|
|
if (empty_enum) {
|
|
return .void_value;
|
|
}
|
|
if (special_prong == .none) {
|
|
return sema.fail(block, src, "switch must handle all possibilities", .{});
|
|
}
|
|
if (err_set and try sema.maybeErrorUnwrap(block, special.body, operand, operand_src, false)) {
|
|
return .unreachable_value;
|
|
}
|
|
if (mod.backendSupportsFeature(.is_named_enum_value) and block.wantSafety() and operand_ty.zigTypeTag(mod) == .Enum and
|
|
(!operand_ty.isNonexhaustiveEnum(mod) or union_originally))
|
|
{
|
|
try sema.zirDbgStmt(block, cond_dbg_node_index);
|
|
const ok = try block.addUnOp(.is_named_enum_value, operand);
|
|
try sema.addSafetyCheck(block, src, ok, .corrupt_switch);
|
|
}
|
|
|
|
return spa.resolveProngComptime(
|
|
&child_block,
|
|
.special,
|
|
special.body,
|
|
special.capture,
|
|
.special_capture,
|
|
undefined, // case_vals may be undefined for special prongs
|
|
.none,
|
|
false,
|
|
merges,
|
|
);
|
|
}
|
|
|
|
if (child_block.is_comptime) {
|
|
_ = try sema.resolveConstDefinedValue(&child_block, operand_src, operand, .{
|
|
.needed_comptime_reason = "condition in comptime switch must be comptime-known",
|
|
.block_comptime_reason = child_block.comptime_reason,
|
|
});
|
|
unreachable;
|
|
}
|
|
|
|
_ = try sema.analyzeSwitchRuntimeBlock(
|
|
spa,
|
|
&child_block,
|
|
src,
|
|
operand,
|
|
operand_ty,
|
|
operand_src,
|
|
case_vals,
|
|
special,
|
|
scalar_cases_len,
|
|
multi_cases_len,
|
|
union_originally,
|
|
maybe_union_ty,
|
|
err_set,
|
|
src_node_offset,
|
|
special_prong_src,
|
|
seen_enum_fields,
|
|
seen_errors,
|
|
range_set,
|
|
true_count,
|
|
false_count,
|
|
cond_dbg_node_index,
|
|
false,
|
|
);
|
|
|
|
return sema.analyzeBlockBody(block, src, &child_block, merges, false);
|
|
}
|
|
|
|
const SpecialProng = struct {
|
|
body: []const Zir.Inst.Index,
|
|
end: usize,
|
|
capture: Zir.Inst.SwitchBlock.ProngInfo.Capture,
|
|
is_inline: bool,
|
|
has_tag_capture: bool,
|
|
};
|
|
|
|
fn analyzeSwitchRuntimeBlock(
|
|
sema: *Sema,
|
|
spa: SwitchProngAnalysis,
|
|
child_block: *Block,
|
|
src: LazySrcLoc,
|
|
operand: Air.Inst.Ref,
|
|
operand_ty: Type,
|
|
operand_src: LazySrcLoc,
|
|
case_vals: std.ArrayListUnmanaged(Air.Inst.Ref),
|
|
special: SpecialProng,
|
|
scalar_cases_len: usize,
|
|
multi_cases_len: usize,
|
|
union_originally: bool,
|
|
maybe_union_ty: Type,
|
|
err_set: bool,
|
|
src_node_offset: i32,
|
|
special_prong_src: LazySrcLoc,
|
|
seen_enum_fields: []?Module.SwitchProngSrc,
|
|
seen_errors: SwitchErrorSet,
|
|
range_set: RangeSet,
|
|
true_count: u8,
|
|
false_count: u8,
|
|
cond_dbg_node_index: Zir.Inst.Index,
|
|
allow_err_code_unwrap: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
const ip = &mod.intern_pool;
|
|
|
|
const block = child_block.parent.?;
|
|
|
|
const estimated_cases_extra = (scalar_cases_len + multi_cases_len) *
|
|
@typeInfo(Air.SwitchBr.Case).Struct.fields.len + 2;
|
|
var cases_extra = try std.ArrayListUnmanaged(u32).initCapacity(gpa, estimated_cases_extra);
|
|
defer cases_extra.deinit(gpa);
|
|
|
|
var case_block = child_block.makeSubBlock();
|
|
case_block.runtime_loop = null;
|
|
case_block.runtime_cond = mod.declPtr(child_block.src_decl).toSrcLoc(operand_src, mod);
|
|
case_block.runtime_index.increment();
|
|
defer case_block.instructions.deinit(gpa);
|
|
|
|
var extra_index: usize = special.end;
|
|
|
|
var scalar_i: usize = 0;
|
|
while (scalar_i < scalar_cases_len) : (scalar_i += 1) {
|
|
extra_index += 1;
|
|
const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(sema.code.extra[extra_index]);
|
|
extra_index += 1;
|
|
const body = sema.code.bodySlice(extra_index, info.body_len);
|
|
extra_index += info.body_len;
|
|
|
|
case_block.instructions.shrinkRetainingCapacity(0);
|
|
case_block.wip_capture_scope = try mod.createCaptureScope(child_block.wip_capture_scope);
|
|
|
|
const item = case_vals.items[scalar_i];
|
|
// `item` is already guaranteed to be constant known.
|
|
|
|
const analyze_body = if (union_originally) blk: {
|
|
const unresolved_item_val = sema.resolveConstDefinedValue(block, .unneeded, item, undefined) catch unreachable;
|
|
const item_val = sema.resolveLazyValue(unresolved_item_val) catch unreachable;
|
|
const field_ty = maybe_union_ty.unionFieldType(item_val, mod).?;
|
|
break :blk field_ty.zigTypeTag(mod) != .NoReturn;
|
|
} else true;
|
|
|
|
if (err_set and try sema.maybeErrorUnwrap(&case_block, body, operand, operand_src, allow_err_code_unwrap)) {
|
|
// nothing to do here
|
|
} else if (analyze_body) {
|
|
try spa.analyzeProngRuntime(
|
|
&case_block,
|
|
.normal,
|
|
body,
|
|
info.capture,
|
|
.{ .scalar_capture = @intCast(scalar_i) },
|
|
&.{item},
|
|
if (info.is_inline) item else .none,
|
|
info.has_tag_capture,
|
|
);
|
|
} else {
|
|
_ = try case_block.addNoOp(.unreach);
|
|
}
|
|
|
|
try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len);
|
|
cases_extra.appendAssumeCapacity(1); // items_len
|
|
cases_extra.appendAssumeCapacity(@intCast(case_block.instructions.items.len));
|
|
cases_extra.appendAssumeCapacity(@intFromEnum(item));
|
|
cases_extra.appendSliceAssumeCapacity(@ptrCast(case_block.instructions.items));
|
|
}
|
|
|
|
var is_first = true;
|
|
var prev_cond_br: Air.Inst.Index = undefined;
|
|
var first_else_body: []const Air.Inst.Index = &.{};
|
|
defer gpa.free(first_else_body);
|
|
var prev_then_body: []const Air.Inst.Index = &.{};
|
|
defer gpa.free(prev_then_body);
|
|
|
|
var cases_len = scalar_cases_len;
|
|
var case_val_idx: usize = scalar_cases_len;
|
|
var multi_i: u32 = 0;
|
|
while (multi_i < multi_cases_len) : (multi_i += 1) {
|
|
const items_len = sema.code.extra[extra_index];
|
|
extra_index += 1;
|
|
const ranges_len = sema.code.extra[extra_index];
|
|
extra_index += 1;
|
|
const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(sema.code.extra[extra_index]);
|
|
extra_index += 1 + items_len;
|
|
|
|
const items = case_vals.items[case_val_idx..][0..items_len];
|
|
case_val_idx += items_len;
|
|
|
|
case_block.instructions.shrinkRetainingCapacity(0);
|
|
case_block.wip_capture_scope = child_block.wip_capture_scope;
|
|
|
|
// Generate all possible cases as scalar prongs.
|
|
if (info.is_inline) {
|
|
const body_start = extra_index + 2 * ranges_len;
|
|
const body = sema.code.bodySlice(body_start, info.body_len);
|
|
var emit_bb = false;
|
|
|
|
var range_i: u32 = 0;
|
|
while (range_i < ranges_len) : (range_i += 1) {
|
|
const range_items = case_vals.items[case_val_idx..][0..2];
|
|
extra_index += 2;
|
|
case_val_idx += 2;
|
|
|
|
const item_first_ref = range_items[0];
|
|
const item_last_ref = range_items[1];
|
|
|
|
var item = sema.resolveConstDefinedValue(block, .unneeded, item_first_ref, undefined) catch unreachable;
|
|
const item_last = sema.resolveConstDefinedValue(block, .unneeded, item_last_ref, undefined) catch unreachable;
|
|
|
|
while (item.compareScalar(.lte, item_last, operand_ty, mod)) : ({
|
|
// Previous validation has resolved any possible lazy values.
|
|
item = sema.intAddScalar(item, try mod.intValue(operand_ty, 1), operand_ty) catch |err| switch (err) {
|
|
error.Overflow => unreachable,
|
|
else => |e| return e,
|
|
};
|
|
}) {
|
|
cases_len += 1;
|
|
|
|
const item_ref = Air.internedToRef(item.toIntern());
|
|
|
|
case_block.instructions.shrinkRetainingCapacity(0);
|
|
case_block.wip_capture_scope = child_block.wip_capture_scope;
|
|
|
|
if (emit_bb) sema.emitBackwardBranch(block, .unneeded) catch |err| switch (err) {
|
|
error.NeededSourceLocation => {
|
|
const case_src = Module.SwitchProngSrc{
|
|
.range = .{ .prong = multi_i, .item = range_i },
|
|
};
|
|
const decl = mod.declPtr(case_block.src_decl);
|
|
try sema.emitBackwardBranch(block, case_src.resolve(mod, decl, src_node_offset, .none));
|
|
unreachable;
|
|
},
|
|
else => return err,
|
|
};
|
|
emit_bb = true;
|
|
|
|
try spa.analyzeProngRuntime(
|
|
&case_block,
|
|
.normal,
|
|
body,
|
|
info.capture,
|
|
.{ .multi_capture = multi_i },
|
|
undefined, // case_vals may be undefined for ranges
|
|
item_ref,
|
|
info.has_tag_capture,
|
|
);
|
|
|
|
try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len);
|
|
cases_extra.appendAssumeCapacity(1); // items_len
|
|
cases_extra.appendAssumeCapacity(@intCast(case_block.instructions.items.len));
|
|
cases_extra.appendAssumeCapacity(@intFromEnum(item_ref));
|
|
cases_extra.appendSliceAssumeCapacity(@ptrCast(case_block.instructions.items));
|
|
|
|
if (item.compareScalar(.eq, item_last, operand_ty, mod)) break;
|
|
}
|
|
}
|
|
|
|
for (items, 0..) |item, item_i| {
|
|
cases_len += 1;
|
|
|
|
case_block.instructions.shrinkRetainingCapacity(0);
|
|
case_block.wip_capture_scope = child_block.wip_capture_scope;
|
|
|
|
const analyze_body = if (union_originally) blk: {
|
|
const item_val = sema.resolveConstDefinedValue(block, .unneeded, item, undefined) catch unreachable;
|
|
const field_ty = maybe_union_ty.unionFieldType(item_val, mod).?;
|
|
break :blk field_ty.zigTypeTag(mod) != .NoReturn;
|
|
} else true;
|
|
|
|
if (emit_bb) sema.emitBackwardBranch(block, .unneeded) catch |err| switch (err) {
|
|
error.NeededSourceLocation => {
|
|
const case_src = Module.SwitchProngSrc{
|
|
.multi = .{ .prong = multi_i, .item = @intCast(item_i) },
|
|
};
|
|
const decl = mod.declPtr(case_block.src_decl);
|
|
try sema.emitBackwardBranch(block, case_src.resolve(mod, decl, src_node_offset, .none));
|
|
unreachable;
|
|
},
|
|
else => return err,
|
|
};
|
|
emit_bb = true;
|
|
|
|
if (analyze_body) {
|
|
try spa.analyzeProngRuntime(
|
|
&case_block,
|
|
.normal,
|
|
body,
|
|
info.capture,
|
|
.{ .multi_capture = multi_i },
|
|
&.{item},
|
|
item,
|
|
info.has_tag_capture,
|
|
);
|
|
} else {
|
|
_ = try case_block.addNoOp(.unreach);
|
|
}
|
|
|
|
try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len);
|
|
cases_extra.appendAssumeCapacity(1); // items_len
|
|
cases_extra.appendAssumeCapacity(@intCast(case_block.instructions.items.len));
|
|
cases_extra.appendAssumeCapacity(@intFromEnum(item));
|
|
cases_extra.appendSliceAssumeCapacity(@ptrCast(case_block.instructions.items));
|
|
}
|
|
|
|
extra_index += info.body_len;
|
|
continue;
|
|
}
|
|
|
|
var any_ok: Air.Inst.Ref = .none;
|
|
|
|
// If there are any ranges, we have to put all the items into the
|
|
// else prong. Otherwise, we can take advantage of multiple items
|
|
// mapping to the same body.
|
|
if (ranges_len == 0) {
|
|
cases_len += 1;
|
|
|
|
const analyze_body = if (union_originally)
|
|
for (items) |item| {
|
|
const item_val = sema.resolveConstDefinedValue(block, .unneeded, item, undefined) catch unreachable;
|
|
const field_ty = maybe_union_ty.unionFieldType(item_val, mod).?;
|
|
if (field_ty.zigTypeTag(mod) != .NoReturn) break true;
|
|
} else false
|
|
else
|
|
true;
|
|
|
|
const body = sema.code.bodySlice(extra_index, info.body_len);
|
|
extra_index += info.body_len;
|
|
if (err_set and try sema.maybeErrorUnwrap(&case_block, body, operand, operand_src, allow_err_code_unwrap)) {
|
|
// nothing to do here
|
|
} else if (analyze_body) {
|
|
try spa.analyzeProngRuntime(
|
|
&case_block,
|
|
.normal,
|
|
body,
|
|
info.capture,
|
|
.{ .multi_capture = multi_i },
|
|
items,
|
|
.none,
|
|
false,
|
|
);
|
|
} else {
|
|
_ = try case_block.addNoOp(.unreach);
|
|
}
|
|
|
|
try cases_extra.ensureUnusedCapacity(gpa, 2 + items.len +
|
|
case_block.instructions.items.len);
|
|
|
|
cases_extra.appendAssumeCapacity(@intCast(items.len));
|
|
cases_extra.appendAssumeCapacity(@intCast(case_block.instructions.items.len));
|
|
|
|
for (items) |item| {
|
|
cases_extra.appendAssumeCapacity(@intFromEnum(item));
|
|
}
|
|
|
|
cases_extra.appendSliceAssumeCapacity(@ptrCast(case_block.instructions.items));
|
|
} else {
|
|
for (items) |item| {
|
|
const cmp_ok = try case_block.addBinOp(if (case_block.float_mode == .Optimized) .cmp_eq_optimized else .cmp_eq, operand, item);
|
|
if (any_ok != .none) {
|
|
any_ok = try case_block.addBinOp(.bool_or, any_ok, cmp_ok);
|
|
} else {
|
|
any_ok = cmp_ok;
|
|
}
|
|
}
|
|
|
|
var range_i: usize = 0;
|
|
while (range_i < ranges_len) : (range_i += 1) {
|
|
const range_items = case_vals.items[case_val_idx..][0..2];
|
|
extra_index += 2;
|
|
case_val_idx += 2;
|
|
|
|
const item_first = range_items[0];
|
|
const item_last = range_items[1];
|
|
|
|
// operand >= first and operand <= last
|
|
const range_first_ok = try case_block.addBinOp(
|
|
if (case_block.float_mode == .Optimized) .cmp_gte_optimized else .cmp_gte,
|
|
operand,
|
|
item_first,
|
|
);
|
|
const range_last_ok = try case_block.addBinOp(
|
|
if (case_block.float_mode == .Optimized) .cmp_lte_optimized else .cmp_lte,
|
|
operand,
|
|
item_last,
|
|
);
|
|
const range_ok = try case_block.addBinOp(
|
|
.bool_and,
|
|
range_first_ok,
|
|
range_last_ok,
|
|
);
|
|
if (any_ok != .none) {
|
|
any_ok = try case_block.addBinOp(.bool_or, any_ok, range_ok);
|
|
} else {
|
|
any_ok = range_ok;
|
|
}
|
|
}
|
|
|
|
const new_cond_br = try case_block.addInstAsIndex(.{ .tag = .cond_br, .data = .{
|
|
.pl_op = .{
|
|
.operand = any_ok,
|
|
.payload = undefined,
|
|
},
|
|
} });
|
|
var cond_body = try case_block.instructions.toOwnedSlice(gpa);
|
|
defer gpa.free(cond_body);
|
|
|
|
case_block.instructions.shrinkRetainingCapacity(0);
|
|
case_block.wip_capture_scope = try mod.createCaptureScope(child_block.wip_capture_scope);
|
|
|
|
const body = sema.code.bodySlice(extra_index, info.body_len);
|
|
extra_index += info.body_len;
|
|
if (err_set and try sema.maybeErrorUnwrap(&case_block, body, operand, operand_src, allow_err_code_unwrap)) {
|
|
// nothing to do here
|
|
} else {
|
|
try spa.analyzeProngRuntime(
|
|
&case_block,
|
|
.normal,
|
|
body,
|
|
info.capture,
|
|
.{ .multi_capture = multi_i },
|
|
items,
|
|
.none,
|
|
false,
|
|
);
|
|
}
|
|
|
|
if (is_first) {
|
|
is_first = false;
|
|
first_else_body = cond_body;
|
|
cond_body = &.{};
|
|
} else {
|
|
try sema.air_extra.ensureUnusedCapacity(
|
|
gpa,
|
|
@typeInfo(Air.CondBr).Struct.fields.len + prev_then_body.len + cond_body.len,
|
|
);
|
|
|
|
sema.air_instructions.items(.data)[@intFromEnum(prev_cond_br)].pl_op.payload =
|
|
sema.addExtraAssumeCapacity(Air.CondBr{
|
|
.then_body_len = @intCast(prev_then_body.len),
|
|
.else_body_len = @intCast(cond_body.len),
|
|
});
|
|
sema.air_extra.appendSliceAssumeCapacity(@ptrCast(prev_then_body));
|
|
sema.air_extra.appendSliceAssumeCapacity(@ptrCast(cond_body));
|
|
}
|
|
gpa.free(prev_then_body);
|
|
prev_then_body = try case_block.instructions.toOwnedSlice(gpa);
|
|
prev_cond_br = new_cond_br;
|
|
}
|
|
}
|
|
|
|
var final_else_body: []const Air.Inst.Index = &.{};
|
|
if (special.body.len != 0 or !is_first or case_block.wantSafety()) {
|
|
var emit_bb = false;
|
|
if (special.is_inline) switch (operand_ty.zigTypeTag(mod)) {
|
|
.Enum => {
|
|
if (operand_ty.isNonexhaustiveEnum(mod) and !union_originally) {
|
|
return sema.fail(block, special_prong_src, "cannot enumerate values of type '{}' for 'inline else'", .{
|
|
operand_ty.fmt(mod),
|
|
});
|
|
}
|
|
for (seen_enum_fields, 0..) |f, i| {
|
|
if (f != null) continue;
|
|
cases_len += 1;
|
|
|
|
const item_val = try mod.enumValueFieldIndex(operand_ty, @intCast(i));
|
|
const item_ref = Air.internedToRef(item_val.toIntern());
|
|
|
|
case_block.instructions.shrinkRetainingCapacity(0);
|
|
case_block.wip_capture_scope = child_block.wip_capture_scope;
|
|
|
|
const analyze_body = if (union_originally) blk: {
|
|
const field_ty = maybe_union_ty.unionFieldType(item_val, mod).?;
|
|
break :blk field_ty.zigTypeTag(mod) != .NoReturn;
|
|
} else true;
|
|
|
|
if (emit_bb) try sema.emitBackwardBranch(block, special_prong_src);
|
|
emit_bb = true;
|
|
|
|
if (analyze_body) {
|
|
try spa.analyzeProngRuntime(
|
|
&case_block,
|
|
.special,
|
|
special.body,
|
|
special.capture,
|
|
.special_capture,
|
|
&.{item_ref},
|
|
item_ref,
|
|
special.has_tag_capture,
|
|
);
|
|
} else {
|
|
_ = try case_block.addNoOp(.unreach);
|
|
}
|
|
|
|
try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len);
|
|
cases_extra.appendAssumeCapacity(1); // items_len
|
|
cases_extra.appendAssumeCapacity(@intCast(case_block.instructions.items.len));
|
|
cases_extra.appendAssumeCapacity(@intFromEnum(item_ref));
|
|
cases_extra.appendSliceAssumeCapacity(@ptrCast(case_block.instructions.items));
|
|
}
|
|
},
|
|
.ErrorSet => {
|
|
if (operand_ty.isAnyError(mod)) {
|
|
return sema.fail(block, special_prong_src, "cannot enumerate values of type '{}' for 'inline else'", .{
|
|
operand_ty.fmt(mod),
|
|
});
|
|
}
|
|
const error_names = operand_ty.errorSetNames(mod);
|
|
for (0..error_names.len) |name_index| {
|
|
const error_name = error_names.get(ip)[name_index];
|
|
if (seen_errors.contains(error_name)) continue;
|
|
cases_len += 1;
|
|
|
|
const item_val = try mod.intern(.{ .err = .{
|
|
.ty = operand_ty.toIntern(),
|
|
.name = error_name,
|
|
} });
|
|
const item_ref = Air.internedToRef(item_val);
|
|
|
|
case_block.instructions.shrinkRetainingCapacity(0);
|
|
case_block.wip_capture_scope = child_block.wip_capture_scope;
|
|
|
|
if (emit_bb) try sema.emitBackwardBranch(block, special_prong_src);
|
|
emit_bb = true;
|
|
|
|
try spa.analyzeProngRuntime(
|
|
&case_block,
|
|
.special,
|
|
special.body,
|
|
special.capture,
|
|
.special_capture,
|
|
&.{item_ref},
|
|
item_ref,
|
|
special.has_tag_capture,
|
|
);
|
|
|
|
try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len);
|
|
cases_extra.appendAssumeCapacity(1); // items_len
|
|
cases_extra.appendAssumeCapacity(@intCast(case_block.instructions.items.len));
|
|
cases_extra.appendAssumeCapacity(@intFromEnum(item_ref));
|
|
cases_extra.appendSliceAssumeCapacity(@ptrCast(case_block.instructions.items));
|
|
}
|
|
},
|
|
.Int => {
|
|
var it = try RangeSetUnhandledIterator.init(sema, operand_ty, range_set);
|
|
while (try it.next()) |cur| {
|
|
cases_len += 1;
|
|
|
|
const item_ref = Air.internedToRef(cur);
|
|
|
|
case_block.instructions.shrinkRetainingCapacity(0);
|
|
case_block.wip_capture_scope = child_block.wip_capture_scope;
|
|
|
|
if (emit_bb) try sema.emitBackwardBranch(block, special_prong_src);
|
|
emit_bb = true;
|
|
|
|
try spa.analyzeProngRuntime(
|
|
&case_block,
|
|
.special,
|
|
special.body,
|
|
special.capture,
|
|
.special_capture,
|
|
&.{item_ref},
|
|
item_ref,
|
|
special.has_tag_capture,
|
|
);
|
|
|
|
try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len);
|
|
cases_extra.appendAssumeCapacity(1); // items_len
|
|
cases_extra.appendAssumeCapacity(@intCast(case_block.instructions.items.len));
|
|
cases_extra.appendAssumeCapacity(@intFromEnum(item_ref));
|
|
cases_extra.appendSliceAssumeCapacity(@ptrCast(case_block.instructions.items));
|
|
}
|
|
},
|
|
.Bool => {
|
|
if (true_count == 0) {
|
|
cases_len += 1;
|
|
|
|
case_block.instructions.shrinkRetainingCapacity(0);
|
|
case_block.wip_capture_scope = child_block.wip_capture_scope;
|
|
|
|
if (emit_bb) try sema.emitBackwardBranch(block, special_prong_src);
|
|
emit_bb = true;
|
|
|
|
try spa.analyzeProngRuntime(
|
|
&case_block,
|
|
.special,
|
|
special.body,
|
|
special.capture,
|
|
.special_capture,
|
|
&.{.bool_true},
|
|
.bool_true,
|
|
special.has_tag_capture,
|
|
);
|
|
|
|
try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len);
|
|
cases_extra.appendAssumeCapacity(1); // items_len
|
|
cases_extra.appendAssumeCapacity(@intCast(case_block.instructions.items.len));
|
|
cases_extra.appendAssumeCapacity(@intFromEnum(Air.Inst.Ref.bool_true));
|
|
cases_extra.appendSliceAssumeCapacity(@ptrCast(case_block.instructions.items));
|
|
}
|
|
if (false_count == 0) {
|
|
cases_len += 1;
|
|
|
|
case_block.instructions.shrinkRetainingCapacity(0);
|
|
case_block.wip_capture_scope = child_block.wip_capture_scope;
|
|
|
|
if (emit_bb) try sema.emitBackwardBranch(block, special_prong_src);
|
|
emit_bb = true;
|
|
|
|
try spa.analyzeProngRuntime(
|
|
&case_block,
|
|
.special,
|
|
special.body,
|
|
special.capture,
|
|
.special_capture,
|
|
&.{.bool_false},
|
|
.bool_false,
|
|
special.has_tag_capture,
|
|
);
|
|
|
|
try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len);
|
|
cases_extra.appendAssumeCapacity(1); // items_len
|
|
cases_extra.appendAssumeCapacity(@intCast(case_block.instructions.items.len));
|
|
cases_extra.appendAssumeCapacity(@intFromEnum(Air.Inst.Ref.bool_false));
|
|
cases_extra.appendSliceAssumeCapacity(@ptrCast(case_block.instructions.items));
|
|
}
|
|
},
|
|
else => return sema.fail(block, special_prong_src, "cannot enumerate values of type '{}' for 'inline else'", .{
|
|
operand_ty.fmt(mod),
|
|
}),
|
|
};
|
|
|
|
case_block.instructions.shrinkRetainingCapacity(0);
|
|
case_block.wip_capture_scope = try mod.createCaptureScope(child_block.wip_capture_scope);
|
|
|
|
if (mod.backendSupportsFeature(.is_named_enum_value) and
|
|
special.body.len != 0 and block.wantSafety() and
|
|
operand_ty.zigTypeTag(mod) == .Enum and
|
|
(!operand_ty.isNonexhaustiveEnum(mod) or union_originally))
|
|
{
|
|
try sema.zirDbgStmt(&case_block, cond_dbg_node_index);
|
|
const ok = try case_block.addUnOp(.is_named_enum_value, operand);
|
|
try sema.addSafetyCheck(&case_block, src, ok, .corrupt_switch);
|
|
}
|
|
|
|
const analyze_body = if (union_originally and !special.is_inline)
|
|
for (seen_enum_fields, 0..) |seen_field, index| {
|
|
if (seen_field != null) continue;
|
|
const union_obj = mod.typeToUnion(maybe_union_ty).?;
|
|
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[index]);
|
|
if (field_ty.zigTypeTag(mod) != .NoReturn) break true;
|
|
} else false
|
|
else
|
|
true;
|
|
if (special.body.len != 0 and err_set and
|
|
try sema.maybeErrorUnwrap(&case_block, special.body, operand, operand_src, allow_err_code_unwrap))
|
|
{
|
|
// nothing to do here
|
|
} else if (special.body.len != 0 and analyze_body and !special.is_inline) {
|
|
try spa.analyzeProngRuntime(
|
|
&case_block,
|
|
.special,
|
|
special.body,
|
|
special.capture,
|
|
.special_capture,
|
|
undefined, // case_vals may be undefined for special prongs
|
|
.none,
|
|
false,
|
|
);
|
|
} else {
|
|
// We still need a terminator in this block, but we have proven
|
|
// that it is unreachable.
|
|
if (case_block.wantSafety()) {
|
|
try sema.zirDbgStmt(&case_block, cond_dbg_node_index);
|
|
try sema.safetyPanic(&case_block, src, .corrupt_switch);
|
|
} else {
|
|
_ = try case_block.addNoOp(.unreach);
|
|
}
|
|
}
|
|
|
|
if (is_first) {
|
|
final_else_body = case_block.instructions.items;
|
|
} else {
|
|
try sema.air_extra.ensureUnusedCapacity(gpa, prev_then_body.len +
|
|
@typeInfo(Air.CondBr).Struct.fields.len + case_block.instructions.items.len);
|
|
|
|
sema.air_instructions.items(.data)[@intFromEnum(prev_cond_br)].pl_op.payload =
|
|
sema.addExtraAssumeCapacity(Air.CondBr{
|
|
.then_body_len = @intCast(prev_then_body.len),
|
|
.else_body_len = @intCast(case_block.instructions.items.len),
|
|
});
|
|
sema.air_extra.appendSliceAssumeCapacity(@ptrCast(prev_then_body));
|
|
sema.air_extra.appendSliceAssumeCapacity(@ptrCast(case_block.instructions.items));
|
|
final_else_body = first_else_body;
|
|
}
|
|
}
|
|
|
|
try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.SwitchBr).Struct.fields.len +
|
|
cases_extra.items.len + final_else_body.len);
|
|
|
|
const payload_index = sema.addExtraAssumeCapacity(Air.SwitchBr{
|
|
.cases_len = @intCast(cases_len),
|
|
.else_body_len = @intCast(final_else_body.len),
|
|
});
|
|
|
|
sema.air_extra.appendSliceAssumeCapacity(@ptrCast(cases_extra.items));
|
|
sema.air_extra.appendSliceAssumeCapacity(@ptrCast(final_else_body));
|
|
|
|
return try child_block.addInst(.{
|
|
.tag = .switch_br,
|
|
.data = .{ .pl_op = .{
|
|
.operand = operand,
|
|
.payload = payload_index,
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn resolveSwitchComptime(
|
|
sema: *Sema,
|
|
spa: SwitchProngAnalysis,
|
|
child_block: *Block,
|
|
cond_operand: Air.Inst.Ref,
|
|
operand_val: Value,
|
|
operand_ty: Type,
|
|
special: SpecialProng,
|
|
case_vals: std.ArrayListUnmanaged(Air.Inst.Ref),
|
|
scalar_cases_len: u32,
|
|
multi_cases_len: u32,
|
|
err_set: bool,
|
|
empty_enum: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const merges = &child_block.label.?.merges;
|
|
const resolved_operand_val = try sema.resolveLazyValue(operand_val);
|
|
var extra_index: usize = special.end;
|
|
{
|
|
var scalar_i: usize = 0;
|
|
while (scalar_i < scalar_cases_len) : (scalar_i += 1) {
|
|
extra_index += 1;
|
|
const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(sema.code.extra[extra_index]);
|
|
extra_index += 1;
|
|
const body = sema.code.bodySlice(extra_index, info.body_len);
|
|
extra_index += info.body_len;
|
|
|
|
const item = case_vals.items[scalar_i];
|
|
const item_val = sema.resolveConstDefinedValue(child_block, .unneeded, item, undefined) catch unreachable;
|
|
if (operand_val.eql(item_val, operand_ty, sema.mod)) {
|
|
if (err_set) try sema.maybeErrorUnwrapComptime(child_block, body, cond_operand);
|
|
return spa.resolveProngComptime(
|
|
child_block,
|
|
.normal,
|
|
body,
|
|
info.capture,
|
|
.{ .scalar_capture = @intCast(scalar_i) },
|
|
&.{item},
|
|
if (info.is_inline) cond_operand else .none,
|
|
info.has_tag_capture,
|
|
merges,
|
|
);
|
|
}
|
|
}
|
|
}
|
|
{
|
|
var multi_i: usize = 0;
|
|
var case_val_idx: usize = scalar_cases_len;
|
|
while (multi_i < multi_cases_len) : (multi_i += 1) {
|
|
const items_len = sema.code.extra[extra_index];
|
|
extra_index += 1;
|
|
const ranges_len = sema.code.extra[extra_index];
|
|
extra_index += 1;
|
|
const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(sema.code.extra[extra_index]);
|
|
extra_index += 1 + items_len;
|
|
const body = sema.code.bodySlice(extra_index + 2 * ranges_len, info.body_len);
|
|
|
|
const items = case_vals.items[case_val_idx..][0..items_len];
|
|
case_val_idx += items_len;
|
|
|
|
for (items) |item| {
|
|
// Validation above ensured these will succeed.
|
|
const item_val = sema.resolveConstDefinedValue(child_block, .unneeded, item, undefined) catch unreachable;
|
|
if (operand_val.eql(item_val, operand_ty, sema.mod)) {
|
|
if (err_set) try sema.maybeErrorUnwrapComptime(child_block, body, cond_operand);
|
|
return spa.resolveProngComptime(
|
|
child_block,
|
|
.normal,
|
|
body,
|
|
info.capture,
|
|
.{ .multi_capture = @intCast(multi_i) },
|
|
items,
|
|
if (info.is_inline) cond_operand else .none,
|
|
info.has_tag_capture,
|
|
merges,
|
|
);
|
|
}
|
|
}
|
|
|
|
var range_i: usize = 0;
|
|
while (range_i < ranges_len) : (range_i += 1) {
|
|
const range_items = case_vals.items[case_val_idx..][0..2];
|
|
extra_index += 2;
|
|
case_val_idx += 2;
|
|
|
|
// Validation above ensured these will succeed.
|
|
const first_val = sema.resolveConstDefinedValue(child_block, .unneeded, range_items[0], undefined) catch unreachable;
|
|
const last_val = sema.resolveConstDefinedValue(child_block, .unneeded, range_items[1], undefined) catch unreachable;
|
|
if ((try sema.compareAll(resolved_operand_val, .gte, first_val, operand_ty)) and
|
|
(try sema.compareAll(resolved_operand_val, .lte, last_val, operand_ty)))
|
|
{
|
|
if (err_set) try sema.maybeErrorUnwrapComptime(child_block, body, cond_operand);
|
|
return spa.resolveProngComptime(
|
|
child_block,
|
|
.normal,
|
|
body,
|
|
info.capture,
|
|
.{ .multi_capture = @intCast(multi_i) },
|
|
undefined, // case_vals may be undefined for ranges
|
|
if (info.is_inline) cond_operand else .none,
|
|
info.has_tag_capture,
|
|
merges,
|
|
);
|
|
}
|
|
}
|
|
|
|
extra_index += info.body_len;
|
|
}
|
|
}
|
|
if (err_set) try sema.maybeErrorUnwrapComptime(child_block, special.body, cond_operand);
|
|
if (empty_enum) {
|
|
return .void_value;
|
|
}
|
|
|
|
return spa.resolveProngComptime(
|
|
child_block,
|
|
.special,
|
|
special.body,
|
|
special.capture,
|
|
.special_capture,
|
|
undefined, // case_vals may be undefined for special prongs
|
|
if (special.is_inline) cond_operand else .none,
|
|
special.has_tag_capture,
|
|
merges,
|
|
);
|
|
}
|
|
|
|
const RangeSetUnhandledIterator = struct {
|
|
mod: *Module,
|
|
cur: ?InternPool.Index,
|
|
max: InternPool.Index,
|
|
range_i: usize,
|
|
ranges: []const RangeSet.Range,
|
|
limbs: []math.big.Limb,
|
|
|
|
const preallocated_limbs = math.big.int.calcTwosCompLimbCount(128);
|
|
|
|
fn init(sema: *Sema, ty: Type, range_set: RangeSet) !RangeSetUnhandledIterator {
|
|
const mod = sema.mod;
|
|
const int_type = mod.intern_pool.indexToKey(ty.toIntern()).int_type;
|
|
const needed_limbs = math.big.int.calcTwosCompLimbCount(int_type.bits);
|
|
return .{
|
|
.mod = mod,
|
|
.cur = (try ty.minInt(mod, ty)).toIntern(),
|
|
.max = (try ty.maxInt(mod, ty)).toIntern(),
|
|
.range_i = 0,
|
|
.ranges = range_set.ranges.items,
|
|
.limbs = if (needed_limbs > preallocated_limbs)
|
|
try sema.arena.alloc(math.big.Limb, needed_limbs)
|
|
else
|
|
&.{},
|
|
};
|
|
}
|
|
|
|
fn addOne(it: *const RangeSetUnhandledIterator, val: InternPool.Index) !?InternPool.Index {
|
|
if (val == it.max) return null;
|
|
const int = it.mod.intern_pool.indexToKey(val).int;
|
|
|
|
switch (int.storage) {
|
|
inline .u64, .i64 => |val_int| {
|
|
const next_int = @addWithOverflow(val_int, 1);
|
|
if (next_int[1] == 0)
|
|
return (try it.mod.intValue(Type.fromInterned(int.ty), next_int[0])).toIntern();
|
|
},
|
|
.big_int => {},
|
|
.lazy_align, .lazy_size => unreachable,
|
|
}
|
|
|
|
var val_space: InternPool.Key.Int.Storage.BigIntSpace = undefined;
|
|
const val_bigint = int.storage.toBigInt(&val_space);
|
|
|
|
var result_limbs: [preallocated_limbs]math.big.Limb = undefined;
|
|
var result_bigint = math.big.int.Mutable.init(
|
|
if (it.limbs.len > 0) it.limbs else &result_limbs,
|
|
0,
|
|
);
|
|
|
|
result_bigint.addScalar(val_bigint, 1);
|
|
return (try it.mod.intValue_big(Type.fromInterned(int.ty), result_bigint.toConst())).toIntern();
|
|
}
|
|
|
|
fn next(it: *RangeSetUnhandledIterator) !?InternPool.Index {
|
|
var cur = it.cur orelse return null;
|
|
while (it.range_i < it.ranges.len and cur == it.ranges[it.range_i].first) {
|
|
defer it.range_i += 1;
|
|
cur = (try it.addOne(it.ranges[it.range_i].last)) orelse {
|
|
it.cur = null;
|
|
return null;
|
|
};
|
|
}
|
|
it.cur = try it.addOne(cur);
|
|
return cur;
|
|
}
|
|
};
|
|
|
|
const ResolvedSwitchItem = struct {
|
|
ref: Air.Inst.Ref,
|
|
val: InternPool.Index,
|
|
};
|
|
fn resolveSwitchItemVal(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
item_ref: Zir.Inst.Ref,
|
|
/// Coerce `item_ref` to this type.
|
|
coerce_ty: Type,
|
|
switch_node_offset: i32,
|
|
switch_prong_src: Module.SwitchProngSrc,
|
|
range_expand: Module.SwitchProngSrc.RangeExpand,
|
|
) CompileError!ResolvedSwitchItem {
|
|
const mod = sema.mod;
|
|
const uncoerced_item = try sema.resolveInst(item_ref);
|
|
|
|
// Constructing a LazySrcLoc is costly because we only have the switch AST node.
|
|
// Only if we know for sure we need to report a compile error do we resolve the
|
|
// full source locations.
|
|
|
|
const item = sema.coerce(block, coerce_ty, uncoerced_item, .unneeded) catch |err| switch (err) {
|
|
error.NeededSourceLocation => {
|
|
const src = switch_prong_src.resolve(mod, mod.declPtr(block.src_decl), switch_node_offset, range_expand);
|
|
_ = try sema.coerce(block, coerce_ty, uncoerced_item, src);
|
|
unreachable;
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
|
|
const maybe_lazy = sema.resolveConstDefinedValue(block, .unneeded, item, undefined) catch |err| switch (err) {
|
|
error.NeededSourceLocation => {
|
|
const src = switch_prong_src.resolve(mod, mod.declPtr(block.src_decl), switch_node_offset, range_expand);
|
|
_ = try sema.resolveConstDefinedValue(block, src, item, .{
|
|
.needed_comptime_reason = "switch prong values must be comptime-known",
|
|
});
|
|
unreachable;
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
|
|
const val = try sema.resolveLazyValue(maybe_lazy);
|
|
const new_item = if (val.toIntern() != maybe_lazy.toIntern()) blk: {
|
|
break :blk Air.internedToRef(val.toIntern());
|
|
} else item;
|
|
|
|
return .{ .ref = new_item, .val = val.toIntern() };
|
|
}
|
|
|
|
fn validateErrSetSwitch(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
seen_errors: *SwitchErrorSet,
|
|
case_vals: *std.ArrayListUnmanaged(Air.Inst.Ref),
|
|
operand_ty: Type,
|
|
inst_data: std.meta.FieldType(Zir.Inst.Data, .pl_node),
|
|
scalar_cases_len: u32,
|
|
multi_cases_len: u32,
|
|
else_case: struct { body: []const Zir.Inst.Index, end: usize, src: LazySrcLoc },
|
|
has_else: bool,
|
|
) CompileError!?Type {
|
|
const gpa = sema.gpa;
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
|
|
const src_node_offset = inst_data.src_node;
|
|
const src = inst_data.src();
|
|
|
|
var extra_index: usize = else_case.end;
|
|
{
|
|
var scalar_i: u32 = 0;
|
|
while (scalar_i < scalar_cases_len) : (scalar_i += 1) {
|
|
const item_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]);
|
|
extra_index += 1;
|
|
const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(sema.code.extra[extra_index]);
|
|
extra_index += 1 + info.body_len;
|
|
|
|
case_vals.appendAssumeCapacity(try sema.validateSwitchItemError(
|
|
block,
|
|
seen_errors,
|
|
item_ref,
|
|
operand_ty,
|
|
src_node_offset,
|
|
.{ .scalar = scalar_i },
|
|
));
|
|
}
|
|
}
|
|
{
|
|
var multi_i: u32 = 0;
|
|
while (multi_i < multi_cases_len) : (multi_i += 1) {
|
|
const items_len = sema.code.extra[extra_index];
|
|
extra_index += 1;
|
|
const ranges_len = sema.code.extra[extra_index];
|
|
extra_index += 1;
|
|
const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(sema.code.extra[extra_index]);
|
|
extra_index += 1;
|
|
const items = sema.code.refSlice(extra_index, items_len);
|
|
extra_index += items_len + info.body_len;
|
|
|
|
try case_vals.ensureUnusedCapacity(gpa, items.len);
|
|
for (items, 0..) |item_ref, item_i| {
|
|
case_vals.appendAssumeCapacity(try sema.validateSwitchItemError(
|
|
block,
|
|
seen_errors,
|
|
item_ref,
|
|
operand_ty,
|
|
src_node_offset,
|
|
.{ .multi = .{ .prong = multi_i, .item = @intCast(item_i) } },
|
|
));
|
|
}
|
|
|
|
try sema.validateSwitchNoRange(block, ranges_len, operand_ty, src_node_offset);
|
|
}
|
|
}
|
|
|
|
switch (try sema.resolveInferredErrorSetTy(block, src, operand_ty.toIntern())) {
|
|
.anyerror_type => {
|
|
if (!has_else) {
|
|
return sema.fail(
|
|
block,
|
|
src,
|
|
"else prong required when switching on type 'anyerror'",
|
|
.{},
|
|
);
|
|
}
|
|
return Type.anyerror;
|
|
},
|
|
else => |err_set_ty_index| else_validation: {
|
|
const error_names = ip.indexToKey(err_set_ty_index).error_set_type.names;
|
|
var maybe_msg: ?*Module.ErrorMsg = null;
|
|
errdefer if (maybe_msg) |msg| msg.destroy(sema.gpa);
|
|
|
|
for (error_names.get(ip)) |error_name| {
|
|
if (!seen_errors.contains(error_name) and !has_else) {
|
|
const msg = maybe_msg orelse blk: {
|
|
maybe_msg = try sema.errMsg(
|
|
block,
|
|
src,
|
|
"switch must handle all possibilities",
|
|
.{},
|
|
);
|
|
break :blk maybe_msg.?;
|
|
};
|
|
|
|
try sema.errNote(
|
|
block,
|
|
src,
|
|
msg,
|
|
"unhandled error value: 'error.{}'",
|
|
.{error_name.fmt(ip)},
|
|
);
|
|
}
|
|
}
|
|
|
|
if (maybe_msg) |msg| {
|
|
maybe_msg = null;
|
|
try sema.addDeclaredHereNote(msg, operand_ty);
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
|
|
if (has_else and seen_errors.count() == error_names.len) {
|
|
// In order to enable common patterns for generic code allow simple else bodies
|
|
// else => unreachable,
|
|
// else => return,
|
|
// else => |e| return e,
|
|
// even if all the possible errors were already handled.
|
|
const tags = sema.code.instructions.items(.tag);
|
|
const datas = sema.code.instructions.items(.data);
|
|
for (else_case.body) |else_inst| switch (tags[@intFromEnum(else_inst)]) {
|
|
.dbg_stmt,
|
|
.dbg_var_val,
|
|
.ret_type,
|
|
.as_node,
|
|
.ret_node,
|
|
.@"unreachable",
|
|
.@"defer",
|
|
.defer_err_code,
|
|
.err_union_code,
|
|
.ret_err_value_code,
|
|
.save_err_ret_index,
|
|
.restore_err_ret_index_unconditional,
|
|
.restore_err_ret_index_fn_entry,
|
|
.is_non_err,
|
|
.ret_is_non_err,
|
|
.condbr,
|
|
=> {},
|
|
.extended => switch (datas[@intFromEnum(else_inst)].extended.opcode) {
|
|
.restore_err_ret_index => {},
|
|
else => break,
|
|
},
|
|
else => break,
|
|
} else break :else_validation;
|
|
|
|
return sema.fail(
|
|
block,
|
|
else_case.src,
|
|
"unreachable else prong; all cases already handled",
|
|
.{},
|
|
);
|
|
}
|
|
|
|
var names: InferredErrorSet.NameMap = .{};
|
|
try names.ensureUnusedCapacity(sema.arena, error_names.len);
|
|
for (error_names.get(ip)) |error_name| {
|
|
if (seen_errors.contains(error_name)) continue;
|
|
|
|
names.putAssumeCapacityNoClobber(error_name, {});
|
|
}
|
|
// No need to keep the hash map metadata correct; here we
|
|
// extract the (sorted) keys only.
|
|
return try mod.errorSetFromUnsortedNames(names.keys());
|
|
},
|
|
}
|
|
return null;
|
|
}
|
|
|
|
fn validateSwitchRange(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
range_set: *RangeSet,
|
|
first_ref: Zir.Inst.Ref,
|
|
last_ref: Zir.Inst.Ref,
|
|
operand_ty: Type,
|
|
src_node_offset: i32,
|
|
switch_prong_src: Module.SwitchProngSrc,
|
|
) CompileError![2]Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const first = try sema.resolveSwitchItemVal(block, first_ref, operand_ty, src_node_offset, switch_prong_src, .first);
|
|
const last = try sema.resolveSwitchItemVal(block, last_ref, operand_ty, src_node_offset, switch_prong_src, .last);
|
|
if (try Value.fromInterned(first.val).compareAll(.gt, Value.fromInterned(last.val), operand_ty, mod)) {
|
|
const src = switch_prong_src.resolve(mod, mod.declPtr(block.src_decl), src_node_offset, .first);
|
|
return sema.fail(block, src, "range start value is greater than the end value", .{});
|
|
}
|
|
const maybe_prev_src = try range_set.add(first.val, last.val, switch_prong_src);
|
|
try sema.validateSwitchDupe(block, maybe_prev_src, switch_prong_src, src_node_offset);
|
|
return .{ first.ref, last.ref };
|
|
}
|
|
|
|
fn validateSwitchItemInt(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
range_set: *RangeSet,
|
|
item_ref: Zir.Inst.Ref,
|
|
operand_ty: Type,
|
|
src_node_offset: i32,
|
|
switch_prong_src: Module.SwitchProngSrc,
|
|
) CompileError!Air.Inst.Ref {
|
|
const item = try sema.resolveSwitchItemVal(block, item_ref, operand_ty, src_node_offset, switch_prong_src, .none);
|
|
const maybe_prev_src = try range_set.add(item.val, item.val, switch_prong_src);
|
|
try sema.validateSwitchDupe(block, maybe_prev_src, switch_prong_src, src_node_offset);
|
|
return item.ref;
|
|
}
|
|
|
|
fn validateSwitchItemEnum(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
seen_fields: []?Module.SwitchProngSrc,
|
|
range_set: *RangeSet,
|
|
item_ref: Zir.Inst.Ref,
|
|
operand_ty: Type,
|
|
src_node_offset: i32,
|
|
switch_prong_src: Module.SwitchProngSrc,
|
|
) CompileError!Air.Inst.Ref {
|
|
const ip = &sema.mod.intern_pool;
|
|
const item = try sema.resolveSwitchItemVal(block, item_ref, operand_ty, src_node_offset, switch_prong_src, .none);
|
|
const int = ip.indexToKey(item.val).enum_tag.int;
|
|
const field_index = ip.indexToKey(ip.typeOf(item.val)).enum_type.tagValueIndex(ip, int) orelse {
|
|
const maybe_prev_src = try range_set.add(int, int, switch_prong_src);
|
|
try sema.validateSwitchDupe(block, maybe_prev_src, switch_prong_src, src_node_offset);
|
|
return item.ref;
|
|
};
|
|
const maybe_prev_src = seen_fields[field_index];
|
|
seen_fields[field_index] = switch_prong_src;
|
|
try sema.validateSwitchDupe(block, maybe_prev_src, switch_prong_src, src_node_offset);
|
|
return item.ref;
|
|
}
|
|
|
|
fn validateSwitchItemError(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
seen_errors: *SwitchErrorSet,
|
|
item_ref: Zir.Inst.Ref,
|
|
operand_ty: Type,
|
|
src_node_offset: i32,
|
|
switch_prong_src: Module.SwitchProngSrc,
|
|
) CompileError!Air.Inst.Ref {
|
|
const ip = &sema.mod.intern_pool;
|
|
const item = try sema.resolveSwitchItemVal(block, item_ref, operand_ty, src_node_offset, switch_prong_src, .none);
|
|
const error_name = ip.indexToKey(item.val).err.name;
|
|
const maybe_prev_src = if (try seen_errors.fetchPut(error_name, switch_prong_src)) |prev|
|
|
prev.value
|
|
else
|
|
null;
|
|
try sema.validateSwitchDupe(block, maybe_prev_src, switch_prong_src, src_node_offset);
|
|
return item.ref;
|
|
}
|
|
|
|
fn validateSwitchDupe(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
maybe_prev_src: ?Module.SwitchProngSrc,
|
|
switch_prong_src: Module.SwitchProngSrc,
|
|
src_node_offset: i32,
|
|
) CompileError!void {
|
|
const prev_prong_src = maybe_prev_src orelse return;
|
|
const mod = sema.mod;
|
|
const block_src_decl = mod.declPtr(block.src_decl);
|
|
const src = switch_prong_src.resolve(mod, block_src_decl, src_node_offset, .none);
|
|
const prev_src = prev_prong_src.resolve(mod, block_src_decl, src_node_offset, .none);
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(
|
|
block,
|
|
src,
|
|
"duplicate switch value",
|
|
.{},
|
|
);
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(
|
|
block,
|
|
prev_src,
|
|
msg,
|
|
"previous value here",
|
|
.{},
|
|
);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
|
|
fn validateSwitchItemBool(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
true_count: *u8,
|
|
false_count: *u8,
|
|
item_ref: Zir.Inst.Ref,
|
|
src_node_offset: i32,
|
|
switch_prong_src: Module.SwitchProngSrc,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const item = try sema.resolveSwitchItemVal(block, item_ref, Type.bool, src_node_offset, switch_prong_src, .none);
|
|
if (Value.fromInterned(item.val).toBool()) {
|
|
true_count.* += 1;
|
|
} else {
|
|
false_count.* += 1;
|
|
}
|
|
if (true_count.* > 1 or false_count.* > 1) {
|
|
const block_src_decl = sema.mod.declPtr(block.src_decl);
|
|
const src = switch_prong_src.resolve(mod, block_src_decl, src_node_offset, .none);
|
|
return sema.fail(block, src, "duplicate switch value", .{});
|
|
}
|
|
return item.ref;
|
|
}
|
|
|
|
const ValueSrcMap = std.AutoHashMapUnmanaged(InternPool.Index, Module.SwitchProngSrc);
|
|
|
|
fn validateSwitchItemSparse(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
seen_values: *ValueSrcMap,
|
|
item_ref: Zir.Inst.Ref,
|
|
operand_ty: Type,
|
|
src_node_offset: i32,
|
|
switch_prong_src: Module.SwitchProngSrc,
|
|
) CompileError!Air.Inst.Ref {
|
|
const item = try sema.resolveSwitchItemVal(block, item_ref, operand_ty, src_node_offset, switch_prong_src, .none);
|
|
const kv = (try seen_values.fetchPut(sema.gpa, item.val, switch_prong_src)) orelse return item.ref;
|
|
try sema.validateSwitchDupe(block, kv.value, switch_prong_src, src_node_offset);
|
|
unreachable;
|
|
}
|
|
|
|
fn validateSwitchNoRange(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
ranges_len: u32,
|
|
operand_ty: Type,
|
|
src_node_offset: i32,
|
|
) CompileError!void {
|
|
if (ranges_len == 0)
|
|
return;
|
|
|
|
const operand_src: LazySrcLoc = .{ .node_offset_switch_operand = src_node_offset };
|
|
const range_src: LazySrcLoc = .{ .node_offset_switch_range = src_node_offset };
|
|
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(
|
|
block,
|
|
operand_src,
|
|
"ranges not allowed when switching on type '{}'",
|
|
.{operand_ty.fmt(sema.mod)},
|
|
);
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(
|
|
block,
|
|
range_src,
|
|
msg,
|
|
"range here",
|
|
.{},
|
|
);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
|
|
fn maybeErrorUnwrap(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
body: []const Zir.Inst.Index,
|
|
operand: Air.Inst.Ref,
|
|
operand_src: LazySrcLoc,
|
|
allow_err_code_inst: bool,
|
|
) !bool {
|
|
const mod = sema.mod;
|
|
if (!mod.backendSupportsFeature(.panic_unwrap_error)) return false;
|
|
|
|
const tags = sema.code.instructions.items(.tag);
|
|
for (body) |inst| {
|
|
switch (tags[@intFromEnum(inst)]) {
|
|
.@"unreachable" => if (!block.wantSafety()) return false,
|
|
.err_union_code => if (!allow_err_code_inst) return false,
|
|
.save_err_ret_index,
|
|
.dbg_stmt,
|
|
.str,
|
|
.as_node,
|
|
.panic,
|
|
.field_val,
|
|
=> {},
|
|
else => return false,
|
|
}
|
|
}
|
|
|
|
for (body) |inst| {
|
|
const air_inst = switch (tags[@intFromEnum(inst)]) {
|
|
.err_union_code => continue,
|
|
.dbg_stmt => {
|
|
try sema.zirDbgStmt(block, inst);
|
|
continue;
|
|
},
|
|
.save_err_ret_index => {
|
|
try sema.zirSaveErrRetIndex(block, inst);
|
|
continue;
|
|
},
|
|
.str => try sema.zirStr(inst),
|
|
.as_node => try sema.zirAsNode(block, inst),
|
|
.field_val => try sema.zirFieldVal(block, inst),
|
|
.@"unreachable" => {
|
|
if (!mod.comp.formatted_panics) {
|
|
try sema.safetyPanic(block, operand_src, .unwrap_error);
|
|
return true;
|
|
}
|
|
|
|
const panic_fn = try sema.getBuiltin("panicUnwrapError");
|
|
const err_return_trace = try sema.getErrorReturnTrace(block);
|
|
const args: [2]Air.Inst.Ref = .{ err_return_trace, operand };
|
|
try sema.callBuiltin(block, operand_src, panic_fn, .auto, &args, .@"safety check");
|
|
return true;
|
|
},
|
|
.panic => {
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const msg_inst = try sema.resolveInst(inst_data.operand);
|
|
|
|
const panic_fn = try sema.getBuiltin("panic");
|
|
const err_return_trace = try sema.getErrorReturnTrace(block);
|
|
const args: [3]Air.Inst.Ref = .{ msg_inst, err_return_trace, .null_value };
|
|
try sema.callBuiltin(block, operand_src, panic_fn, .auto, &args, .@"safety check");
|
|
return true;
|
|
},
|
|
else => unreachable,
|
|
};
|
|
if (sema.typeOf(air_inst).isNoReturn(mod))
|
|
return true;
|
|
sema.inst_map.putAssumeCapacity(inst, air_inst);
|
|
}
|
|
unreachable;
|
|
}
|
|
|
|
fn maybeErrorUnwrapCondbr(sema: *Sema, block: *Block, body: []const Zir.Inst.Index, cond: Zir.Inst.Ref, cond_src: LazySrcLoc) !void {
|
|
const mod = sema.mod;
|
|
const index = cond.toIndex() orelse return;
|
|
if (sema.code.instructions.items(.tag)[@intFromEnum(index)] != .is_non_err) return;
|
|
|
|
const err_inst_data = sema.code.instructions.items(.data)[@intFromEnum(index)].un_node;
|
|
const err_operand = try sema.resolveInst(err_inst_data.operand);
|
|
const operand_ty = sema.typeOf(err_operand);
|
|
if (operand_ty.zigTypeTag(mod) == .ErrorSet) {
|
|
try sema.maybeErrorUnwrapComptime(block, body, err_operand);
|
|
return;
|
|
}
|
|
if (try sema.resolveDefinedValue(block, cond_src, err_operand)) |val| {
|
|
if (!operand_ty.isError(mod)) return;
|
|
if (val.getErrorName(mod) == .none) return;
|
|
try sema.maybeErrorUnwrapComptime(block, body, err_operand);
|
|
}
|
|
}
|
|
|
|
fn maybeErrorUnwrapComptime(sema: *Sema, block: *Block, body: []const Zir.Inst.Index, operand: Air.Inst.Ref) !void {
|
|
const tags = sema.code.instructions.items(.tag);
|
|
const inst = for (body) |inst| {
|
|
switch (tags[@intFromEnum(inst)]) {
|
|
.dbg_stmt,
|
|
.save_err_ret_index,
|
|
=> {},
|
|
.@"unreachable" => break inst,
|
|
else => return,
|
|
}
|
|
} else return;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].@"unreachable";
|
|
const src = inst_data.src();
|
|
|
|
if (try sema.resolveDefinedValue(block, src, operand)) |val| {
|
|
if (val.getErrorName(sema.mod).unwrap()) |name| {
|
|
return sema.failWithComptimeErrorRetTrace(block, src, name);
|
|
}
|
|
}
|
|
}
|
|
|
|
fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
|
|
const ty = try sema.resolveType(block, ty_src, extra.lhs);
|
|
const field_name = try sema.resolveConstStringIntern(block, name_src, extra.rhs, .{
|
|
.needed_comptime_reason = "field name must be comptime-known",
|
|
});
|
|
try sema.resolveTypeFields(ty);
|
|
const ip = &mod.intern_pool;
|
|
|
|
const has_field = hf: {
|
|
switch (ip.indexToKey(ty.toIntern())) {
|
|
.ptr_type => |ptr_type| switch (ptr_type.flags.size) {
|
|
.Slice => {
|
|
if (ip.stringEqlSlice(field_name, "ptr")) break :hf true;
|
|
if (ip.stringEqlSlice(field_name, "len")) break :hf true;
|
|
break :hf false;
|
|
},
|
|
else => {},
|
|
},
|
|
.anon_struct_type => |anon_struct| {
|
|
if (anon_struct.names.len != 0) {
|
|
break :hf mem.indexOfScalar(InternPool.NullTerminatedString, anon_struct.names.get(ip), field_name) != null;
|
|
} else {
|
|
const field_index = field_name.toUnsigned(ip) orelse break :hf false;
|
|
break :hf field_index < ty.structFieldCount(mod);
|
|
}
|
|
},
|
|
.struct_type => |struct_type| {
|
|
break :hf struct_type.nameIndex(ip, field_name) != null;
|
|
},
|
|
.union_type => |union_type| {
|
|
const union_obj = ip.loadUnionType(union_type);
|
|
break :hf union_obj.nameIndex(ip, field_name) != null;
|
|
},
|
|
.enum_type => |enum_type| {
|
|
break :hf enum_type.nameIndex(ip, field_name) != null;
|
|
},
|
|
.array_type => break :hf ip.stringEqlSlice(field_name, "len"),
|
|
else => {},
|
|
}
|
|
return sema.fail(block, ty_src, "type '{}' does not support '@hasField'", .{
|
|
ty.fmt(mod),
|
|
});
|
|
};
|
|
return if (has_field) .bool_true else .bool_false;
|
|
}
|
|
|
|
fn zirHasDecl(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const src = inst_data.src();
|
|
const lhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const rhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
|
|
const container_type = try sema.resolveType(block, lhs_src, extra.lhs);
|
|
const decl_name = try sema.resolveConstStringIntern(block, rhs_src, extra.rhs, .{
|
|
.needed_comptime_reason = "decl name must be comptime-known",
|
|
});
|
|
|
|
try sema.checkNamespaceType(block, lhs_src, container_type);
|
|
if (container_type.typeDeclInst(mod)) |type_decl_inst| {
|
|
try sema.declareDependency(.{ .namespace_name = .{
|
|
.namespace = type_decl_inst,
|
|
.name = decl_name,
|
|
} });
|
|
}
|
|
|
|
const namespace = container_type.getNamespaceIndex(mod).unwrap() orelse
|
|
return .bool_false;
|
|
if (try sema.lookupInNamespace(block, src, namespace, decl_name, true)) |decl_index| {
|
|
const decl = mod.declPtr(decl_index);
|
|
if (decl.is_pub or decl.getFileScope(mod) == block.getFileScope(mod)) {
|
|
return .bool_true;
|
|
}
|
|
}
|
|
return .bool_false;
|
|
}
|
|
|
|
fn zirImport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].str_tok;
|
|
const operand_src = inst_data.src();
|
|
const operand = inst_data.get(sema.code);
|
|
|
|
const result = mod.importFile(block.getFileScope(mod), operand) catch |err| switch (err) {
|
|
error.ImportOutsideModulePath => {
|
|
return sema.fail(block, operand_src, "import of file outside module path: '{s}'", .{operand});
|
|
},
|
|
error.ModuleNotFound => {
|
|
return sema.fail(block, operand_src, "no module named '{s}' available within module {s}", .{
|
|
operand, block.getFileScope(mod).mod.fully_qualified_name,
|
|
});
|
|
},
|
|
else => {
|
|
// TODO: these errors are file system errors; make sure an update() will
|
|
// retry this and not cache the file system error, which may be transient.
|
|
return sema.fail(block, operand_src, "unable to open '{s}': {s}", .{ operand, @errorName(err) });
|
|
},
|
|
};
|
|
try mod.semaFile(result.file);
|
|
const file_root_decl_index = result.file.root_decl.unwrap().?;
|
|
return sema.analyzeDeclVal(block, operand_src, file_root_decl_index);
|
|
}
|
|
|
|
fn zirEmbedFile(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const name = try sema.resolveConstString(block, operand_src, inst_data.operand, .{
|
|
.needed_comptime_reason = "file path name must be comptime-known",
|
|
});
|
|
|
|
if (name.len == 0) {
|
|
return sema.fail(block, operand_src, "file path name cannot be empty", .{});
|
|
}
|
|
|
|
const src_loc = mod.declPtr(block.src_decl).toSrcLoc(operand_src, mod);
|
|
const val = mod.embedFile(block.getFileScope(mod), name, src_loc) catch |err| switch (err) {
|
|
error.ImportOutsideModulePath => {
|
|
return sema.fail(block, operand_src, "embed of file outside package path: '{s}'", .{name});
|
|
},
|
|
else => {
|
|
// TODO: these errors are file system errors; make sure an update() will
|
|
// retry this and not cache the file system error, which may be transient.
|
|
return sema.fail(block, operand_src, "unable to open '{s}': {s}", .{ name, @errorName(err) });
|
|
},
|
|
};
|
|
|
|
return Air.internedToRef(val);
|
|
}
|
|
|
|
fn zirRetErrValueCode(sema: *Sema, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].str_tok;
|
|
const name = try mod.intern_pool.getOrPutString(sema.gpa, inst_data.get(sema.code));
|
|
_ = try mod.getErrorValue(name);
|
|
const error_set_type = try mod.singleErrorSetType(name);
|
|
return Air.internedToRef((try mod.intern(.{ .err = .{
|
|
.ty = error_set_type.toIntern(),
|
|
.name = name,
|
|
} })));
|
|
}
|
|
|
|
fn zirShl(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
air_tag: Air.Inst.Tag,
|
|
) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const src = inst_data.src();
|
|
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
|
|
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const lhs = try sema.resolveInst(extra.lhs);
|
|
const rhs = try sema.resolveInst(extra.rhs);
|
|
const lhs_ty = sema.typeOf(lhs);
|
|
const rhs_ty = sema.typeOf(rhs);
|
|
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
|
|
|
|
const scalar_ty = lhs_ty.scalarType(mod);
|
|
const scalar_rhs_ty = rhs_ty.scalarType(mod);
|
|
|
|
// TODO coerce rhs if air_tag is not shl_sat
|
|
const rhs_is_comptime_int = try sema.checkIntType(block, rhs_src, scalar_rhs_ty);
|
|
|
|
const maybe_lhs_val = try sema.resolveValueIntable(lhs);
|
|
const maybe_rhs_val = try sema.resolveValueIntable(rhs);
|
|
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (rhs_val.isUndef(mod)) {
|
|
return mod.undefRef(sema.typeOf(lhs));
|
|
}
|
|
// If rhs is 0, return lhs without doing any calculations.
|
|
if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
|
|
return lhs;
|
|
}
|
|
if (scalar_ty.zigTypeTag(mod) != .ComptimeInt and air_tag != .shl_sat) {
|
|
const bit_value = try mod.intValue(Type.comptime_int, scalar_ty.intInfo(mod).bits);
|
|
if (rhs_ty.zigTypeTag(mod) == .Vector) {
|
|
var i: usize = 0;
|
|
while (i < rhs_ty.vectorLen(mod)) : (i += 1) {
|
|
const rhs_elem = try rhs_val.elemValue(mod, i);
|
|
if (rhs_elem.compareHetero(.gte, bit_value, mod)) {
|
|
return sema.fail(block, rhs_src, "shift amount '{}' at index '{d}' is too large for operand type '{}'", .{
|
|
rhs_elem.fmtValue(scalar_ty, mod),
|
|
i,
|
|
scalar_ty.fmt(mod),
|
|
});
|
|
}
|
|
}
|
|
} else if (rhs_val.compareHetero(.gte, bit_value, mod)) {
|
|
return sema.fail(block, rhs_src, "shift amount '{}' is too large for operand type '{}'", .{
|
|
rhs_val.fmtValue(scalar_ty, mod),
|
|
scalar_ty.fmt(mod),
|
|
});
|
|
}
|
|
}
|
|
if (rhs_ty.zigTypeTag(mod) == .Vector) {
|
|
var i: usize = 0;
|
|
while (i < rhs_ty.vectorLen(mod)) : (i += 1) {
|
|
const rhs_elem = try rhs_val.elemValue(mod, i);
|
|
if (rhs_elem.compareHetero(.lt, try mod.intValue(scalar_rhs_ty, 0), mod)) {
|
|
return sema.fail(block, rhs_src, "shift by negative amount '{}' at index '{d}'", .{
|
|
rhs_elem.fmtValue(scalar_ty, mod),
|
|
i,
|
|
});
|
|
}
|
|
}
|
|
} else if (rhs_val.compareHetero(.lt, try mod.intValue(rhs_ty, 0), mod)) {
|
|
return sema.fail(block, rhs_src, "shift by negative amount '{}'", .{
|
|
rhs_val.fmtValue(scalar_ty, mod),
|
|
});
|
|
}
|
|
}
|
|
|
|
const runtime_src = if (maybe_lhs_val) |lhs_val| rs: {
|
|
if (lhs_val.isUndef(mod)) return mod.undefRef(lhs_ty);
|
|
const rhs_val = maybe_rhs_val orelse {
|
|
if (scalar_ty.zigTypeTag(mod) == .ComptimeInt) {
|
|
return sema.fail(block, src, "LHS of shift must be a fixed-width integer type, or RHS must be comptime-known", .{});
|
|
}
|
|
break :rs rhs_src;
|
|
};
|
|
const val = if (scalar_ty.zigTypeTag(mod) == .ComptimeInt)
|
|
try lhs_val.shl(rhs_val, lhs_ty, sema.arena, mod)
|
|
else switch (air_tag) {
|
|
.shl_exact => val: {
|
|
const shifted = try lhs_val.shlWithOverflow(rhs_val, lhs_ty, sema.arena, mod);
|
|
if (shifted.overflow_bit.compareAllWithZero(.eq, mod)) {
|
|
break :val shifted.wrapped_result;
|
|
}
|
|
return sema.fail(block, src, "operation caused overflow", .{});
|
|
},
|
|
.shl_sat => try lhs_val.shlSat(rhs_val, lhs_ty, sema.arena, mod),
|
|
.shl => try lhs_val.shlTrunc(rhs_val, lhs_ty, sema.arena, mod),
|
|
else => unreachable,
|
|
};
|
|
return Air.internedToRef(val.toIntern());
|
|
} else lhs_src;
|
|
|
|
const new_rhs = if (air_tag == .shl_sat) rhs: {
|
|
// Limit the RHS type for saturating shl to be an integer as small as the LHS.
|
|
if (rhs_is_comptime_int or
|
|
scalar_rhs_ty.intInfo(mod).bits > scalar_ty.intInfo(mod).bits)
|
|
{
|
|
const max_int = Air.internedToRef((try lhs_ty.maxInt(mod, lhs_ty)).toIntern());
|
|
const rhs_limited = try sema.analyzeMinMax(block, rhs_src, .min, &.{ rhs, max_int }, &.{ rhs_src, rhs_src });
|
|
break :rhs try sema.intCast(block, src, lhs_ty, rhs_src, rhs_limited, rhs_src, false);
|
|
} else {
|
|
break :rhs rhs;
|
|
}
|
|
} else rhs;
|
|
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
if (block.wantSafety()) {
|
|
const bit_count = scalar_ty.intInfo(mod).bits;
|
|
if (!std.math.isPowerOfTwo(bit_count)) {
|
|
const bit_count_val = try mod.intValue(scalar_rhs_ty, bit_count);
|
|
const ok = if (rhs_ty.zigTypeTag(mod) == .Vector) ok: {
|
|
const bit_count_inst = Air.internedToRef((try sema.splat(rhs_ty, bit_count_val)).toIntern());
|
|
const lt = try block.addCmpVector(rhs, bit_count_inst, .lt);
|
|
break :ok try block.addInst(.{
|
|
.tag = .reduce,
|
|
.data = .{ .reduce = .{
|
|
.operand = lt,
|
|
.operation = .And,
|
|
} },
|
|
});
|
|
} else ok: {
|
|
const bit_count_inst = Air.internedToRef(bit_count_val.toIntern());
|
|
break :ok try block.addBinOp(.cmp_lt, rhs, bit_count_inst);
|
|
};
|
|
try sema.addSafetyCheck(block, src, ok, .shift_rhs_too_big);
|
|
}
|
|
|
|
if (air_tag == .shl_exact) {
|
|
const op_ov_tuple_ty = try sema.overflowArithmeticTupleType(lhs_ty);
|
|
const op_ov = try block.addInst(.{
|
|
.tag = .shl_with_overflow,
|
|
.data = .{ .ty_pl = .{
|
|
.ty = Air.internedToRef(op_ov_tuple_ty.toIntern()),
|
|
.payload = try sema.addExtra(Air.Bin{
|
|
.lhs = lhs,
|
|
.rhs = rhs,
|
|
}),
|
|
} },
|
|
});
|
|
const ov_bit = try sema.tupleFieldValByIndex(block, src, op_ov, 1, op_ov_tuple_ty);
|
|
const any_ov_bit = if (lhs_ty.zigTypeTag(mod) == .Vector)
|
|
try block.addInst(.{
|
|
.tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce,
|
|
.data = .{ .reduce = .{
|
|
.operand = ov_bit,
|
|
.operation = .Or,
|
|
} },
|
|
})
|
|
else
|
|
ov_bit;
|
|
const zero_ov = Air.internedToRef((try mod.intValue(Type.u1, 0)).toIntern());
|
|
const no_ov = try block.addBinOp(.cmp_eq, any_ov_bit, zero_ov);
|
|
|
|
try sema.addSafetyCheck(block, src, no_ov, .shl_overflow);
|
|
return sema.tupleFieldValByIndex(block, src, op_ov, 0, op_ov_tuple_ty);
|
|
}
|
|
}
|
|
return block.addBinOp(air_tag, lhs, new_rhs);
|
|
}
|
|
|
|
fn zirShr(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
air_tag: Air.Inst.Tag,
|
|
) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const src = inst_data.src();
|
|
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
|
|
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const lhs = try sema.resolveInst(extra.lhs);
|
|
const rhs = try sema.resolveInst(extra.rhs);
|
|
const lhs_ty = sema.typeOf(lhs);
|
|
const rhs_ty = sema.typeOf(rhs);
|
|
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
|
|
const scalar_ty = lhs_ty.scalarType(mod);
|
|
|
|
const maybe_lhs_val = try sema.resolveValueIntable(lhs);
|
|
const maybe_rhs_val = try sema.resolveValueIntable(rhs);
|
|
|
|
const runtime_src = if (maybe_rhs_val) |rhs_val| rs: {
|
|
if (rhs_val.isUndef(mod)) {
|
|
return mod.undefRef(lhs_ty);
|
|
}
|
|
// If rhs is 0, return lhs without doing any calculations.
|
|
if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
|
|
return lhs;
|
|
}
|
|
if (scalar_ty.zigTypeTag(mod) != .ComptimeInt) {
|
|
const bit_value = try mod.intValue(Type.comptime_int, scalar_ty.intInfo(mod).bits);
|
|
if (rhs_ty.zigTypeTag(mod) == .Vector) {
|
|
var i: usize = 0;
|
|
while (i < rhs_ty.vectorLen(mod)) : (i += 1) {
|
|
const rhs_elem = try rhs_val.elemValue(mod, i);
|
|
if (rhs_elem.compareHetero(.gte, bit_value, mod)) {
|
|
return sema.fail(block, rhs_src, "shift amount '{}' at index '{d}' is too large for operand type '{}'", .{
|
|
rhs_elem.fmtValue(scalar_ty, mod),
|
|
i,
|
|
scalar_ty.fmt(mod),
|
|
});
|
|
}
|
|
}
|
|
} else if (rhs_val.compareHetero(.gte, bit_value, mod)) {
|
|
return sema.fail(block, rhs_src, "shift amount '{}' is too large for operand type '{}'", .{
|
|
rhs_val.fmtValue(scalar_ty, mod),
|
|
scalar_ty.fmt(mod),
|
|
});
|
|
}
|
|
}
|
|
if (rhs_ty.zigTypeTag(mod) == .Vector) {
|
|
var i: usize = 0;
|
|
while (i < rhs_ty.vectorLen(mod)) : (i += 1) {
|
|
const rhs_elem = try rhs_val.elemValue(mod, i);
|
|
if (rhs_elem.compareHetero(.lt, try mod.intValue(rhs_ty.childType(mod), 0), mod)) {
|
|
return sema.fail(block, rhs_src, "shift by negative amount '{}' at index '{d}'", .{
|
|
rhs_elem.fmtValue(scalar_ty, mod),
|
|
i,
|
|
});
|
|
}
|
|
}
|
|
} else if (rhs_val.compareHetero(.lt, try mod.intValue(rhs_ty, 0), mod)) {
|
|
return sema.fail(block, rhs_src, "shift by negative amount '{}'", .{
|
|
rhs_val.fmtValue(scalar_ty, mod),
|
|
});
|
|
}
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (lhs_val.isUndef(mod)) {
|
|
return mod.undefRef(lhs_ty);
|
|
}
|
|
if (air_tag == .shr_exact) {
|
|
// Detect if any ones would be shifted out.
|
|
const truncated = try lhs_val.intTruncBitsAsValue(lhs_ty, sema.arena, .unsigned, rhs_val, mod);
|
|
if (!(try truncated.compareAllWithZeroAdvanced(.eq, sema))) {
|
|
return sema.fail(block, src, "exact shift shifted out 1 bits", .{});
|
|
}
|
|
}
|
|
const val = try lhs_val.shr(rhs_val, lhs_ty, sema.arena, mod);
|
|
return Air.internedToRef(val.toIntern());
|
|
} else {
|
|
break :rs lhs_src;
|
|
}
|
|
} else rhs_src;
|
|
|
|
if (maybe_rhs_val == null and scalar_ty.zigTypeTag(mod) == .ComptimeInt) {
|
|
return sema.fail(block, src, "LHS of shift must be a fixed-width integer type, or RHS must be comptime-known", .{});
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
const result = try block.addBinOp(air_tag, lhs, rhs);
|
|
if (block.wantSafety()) {
|
|
const bit_count = scalar_ty.intInfo(mod).bits;
|
|
if (!std.math.isPowerOfTwo(bit_count)) {
|
|
const bit_count_val = try mod.intValue(rhs_ty.scalarType(mod), bit_count);
|
|
|
|
const ok = if (rhs_ty.zigTypeTag(mod) == .Vector) ok: {
|
|
const bit_count_inst = Air.internedToRef((try sema.splat(rhs_ty, bit_count_val)).toIntern());
|
|
const lt = try block.addCmpVector(rhs, bit_count_inst, .lt);
|
|
break :ok try block.addInst(.{
|
|
.tag = .reduce,
|
|
.data = .{ .reduce = .{
|
|
.operand = lt,
|
|
.operation = .And,
|
|
} },
|
|
});
|
|
} else ok: {
|
|
const bit_count_inst = Air.internedToRef(bit_count_val.toIntern());
|
|
break :ok try block.addBinOp(.cmp_lt, rhs, bit_count_inst);
|
|
};
|
|
try sema.addSafetyCheck(block, src, ok, .shift_rhs_too_big);
|
|
}
|
|
|
|
if (air_tag == .shr_exact) {
|
|
const back = try block.addBinOp(.shl, result, rhs);
|
|
|
|
const ok = if (rhs_ty.zigTypeTag(mod) == .Vector) ok: {
|
|
const eql = try block.addCmpVector(lhs, back, .eq);
|
|
break :ok try block.addInst(.{
|
|
.tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce,
|
|
.data = .{ .reduce = .{
|
|
.operand = eql,
|
|
.operation = .And,
|
|
} },
|
|
});
|
|
} else try block.addBinOp(.cmp_eq, lhs, back);
|
|
try sema.addSafetyCheck(block, src, ok, .shr_overflow);
|
|
}
|
|
}
|
|
return result;
|
|
}
|
|
|
|
fn zirBitwise(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
air_tag: Air.Inst.Tag,
|
|
) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node };
|
|
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
|
|
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const lhs = try sema.resolveInst(extra.lhs);
|
|
const rhs = try sema.resolveInst(extra.rhs);
|
|
const lhs_ty = sema.typeOf(lhs);
|
|
const rhs_ty = sema.typeOf(rhs);
|
|
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
|
|
|
|
const instructions = &[_]Air.Inst.Ref{ lhs, rhs };
|
|
const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{ .override = &[_]?LazySrcLoc{ lhs_src, rhs_src } });
|
|
const scalar_type = resolved_type.scalarType(mod);
|
|
const scalar_tag = scalar_type.zigTypeTag(mod);
|
|
|
|
const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
|
|
const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
|
|
|
|
const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
|
|
|
|
if (!is_int) {
|
|
return sema.fail(block, src, "invalid operands to binary bitwise expression: '{s}' and '{s}'", .{ @tagName(lhs_ty.zigTypeTag(mod)), @tagName(rhs_ty.zigTypeTag(mod)) });
|
|
}
|
|
|
|
const runtime_src = runtime: {
|
|
// TODO: ask the linker what kind of relocations are available, and
|
|
// in some cases emit a Value that means "this decl's address AND'd with this operand".
|
|
if (try sema.resolveValueIntable(casted_lhs)) |lhs_val| {
|
|
if (try sema.resolveValueIntable(casted_rhs)) |rhs_val| {
|
|
const result_val = switch (air_tag) {
|
|
.bit_and => try lhs_val.bitwiseAnd(rhs_val, resolved_type, sema.arena, mod),
|
|
.bit_or => try lhs_val.bitwiseOr(rhs_val, resolved_type, sema.arena, mod),
|
|
.xor => try lhs_val.bitwiseXor(rhs_val, resolved_type, sema.arena, mod),
|
|
else => unreachable,
|
|
};
|
|
return Air.internedToRef(result_val.toIntern());
|
|
} else {
|
|
break :runtime rhs_src;
|
|
}
|
|
} else {
|
|
break :runtime lhs_src;
|
|
}
|
|
};
|
|
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
return block.addBinOp(air_tag, casted_lhs, casted_rhs);
|
|
}
|
|
|
|
fn zirBitNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const src = inst_data.src();
|
|
const operand_src: LazySrcLoc = .{ .node_offset_un_op = inst_data.src_node };
|
|
|
|
const operand = try sema.resolveInst(inst_data.operand);
|
|
const operand_type = sema.typeOf(operand);
|
|
const scalar_type = operand_type.scalarType(mod);
|
|
|
|
if (scalar_type.zigTypeTag(mod) != .Int) {
|
|
return sema.fail(block, src, "unable to perform binary not operation on type '{}'", .{
|
|
operand_type.fmt(mod),
|
|
});
|
|
}
|
|
|
|
if (try sema.resolveValue(operand)) |val| {
|
|
if (val.isUndef(mod)) {
|
|
return mod.undefRef(operand_type);
|
|
} else if (operand_type.zigTypeTag(mod) == .Vector) {
|
|
const vec_len = try sema.usizeCast(block, operand_src, operand_type.vectorLen(mod));
|
|
const elems = try sema.arena.alloc(InternPool.Index, vec_len);
|
|
for (elems, 0..) |*elem, i| {
|
|
const elem_val = try val.elemValue(mod, i);
|
|
elem.* = try (try elem_val.bitwiseNot(scalar_type, sema.arena, mod)).intern(scalar_type, mod);
|
|
}
|
|
return Air.internedToRef((try mod.intern(.{ .aggregate = .{
|
|
.ty = operand_type.toIntern(),
|
|
.storage = .{ .elems = elems },
|
|
} })));
|
|
} else {
|
|
const result_val = try val.bitwiseNot(operand_type, sema.arena, mod);
|
|
return Air.internedToRef(result_val.toIntern());
|
|
}
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
return block.addTyOp(.not, operand_type, operand);
|
|
}
|
|
|
|
fn analyzeTupleCat(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src_node: i32,
|
|
lhs: Air.Inst.Ref,
|
|
rhs: Air.Inst.Ref,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const lhs_ty = sema.typeOf(lhs);
|
|
const rhs_ty = sema.typeOf(rhs);
|
|
const src = LazySrcLoc.nodeOffset(src_node);
|
|
|
|
const lhs_len = lhs_ty.structFieldCount(mod);
|
|
const rhs_len = rhs_ty.structFieldCount(mod);
|
|
const dest_fields = lhs_len + rhs_len;
|
|
|
|
if (dest_fields == 0) {
|
|
return Air.internedToRef(Value.empty_struct.toIntern());
|
|
}
|
|
if (lhs_len == 0) {
|
|
return rhs;
|
|
}
|
|
if (rhs_len == 0) {
|
|
return lhs;
|
|
}
|
|
const final_len = try sema.usizeCast(block, src, dest_fields);
|
|
|
|
const types = try sema.arena.alloc(InternPool.Index, final_len);
|
|
const values = try sema.arena.alloc(InternPool.Index, final_len);
|
|
|
|
const opt_runtime_src = rs: {
|
|
var runtime_src: ?LazySrcLoc = null;
|
|
var i: u32 = 0;
|
|
while (i < lhs_len) : (i += 1) {
|
|
types[i] = lhs_ty.structFieldType(i, mod).toIntern();
|
|
const default_val = lhs_ty.structFieldDefaultValue(i, mod);
|
|
values[i] = default_val.toIntern();
|
|
const operand_src: LazySrcLoc = .{ .array_cat_lhs = .{
|
|
.array_cat_offset = src_node,
|
|
.elem_index = i,
|
|
} };
|
|
if (default_val.toIntern() == .unreachable_value) {
|
|
runtime_src = operand_src;
|
|
values[i] = .none;
|
|
}
|
|
}
|
|
i = 0;
|
|
while (i < rhs_len) : (i += 1) {
|
|
types[i + lhs_len] = rhs_ty.structFieldType(i, mod).toIntern();
|
|
const default_val = rhs_ty.structFieldDefaultValue(i, mod);
|
|
values[i + lhs_len] = default_val.toIntern();
|
|
const operand_src: LazySrcLoc = .{ .array_cat_rhs = .{
|
|
.array_cat_offset = src_node,
|
|
.elem_index = i,
|
|
} };
|
|
if (default_val.toIntern() == .unreachable_value) {
|
|
runtime_src = operand_src;
|
|
values[i + lhs_len] = .none;
|
|
}
|
|
}
|
|
break :rs runtime_src;
|
|
};
|
|
|
|
const tuple_ty = try mod.intern_pool.getAnonStructType(mod.gpa, .{
|
|
.types = types,
|
|
.values = values,
|
|
.names = &.{},
|
|
});
|
|
|
|
const runtime_src = opt_runtime_src orelse {
|
|
const tuple_val = try mod.intern(.{ .aggregate = .{
|
|
.ty = tuple_ty,
|
|
.storage = .{ .elems = values },
|
|
} });
|
|
return Air.internedToRef(tuple_val);
|
|
};
|
|
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
|
|
const element_refs = try sema.arena.alloc(Air.Inst.Ref, final_len);
|
|
var i: u32 = 0;
|
|
while (i < lhs_len) : (i += 1) {
|
|
const operand_src: LazySrcLoc = .{ .array_cat_lhs = .{
|
|
.array_cat_offset = src_node,
|
|
.elem_index = i,
|
|
} };
|
|
element_refs[i] = try sema.tupleFieldValByIndex(block, operand_src, lhs, i, lhs_ty);
|
|
}
|
|
i = 0;
|
|
while (i < rhs_len) : (i += 1) {
|
|
const operand_src: LazySrcLoc = .{ .array_cat_rhs = .{
|
|
.array_cat_offset = src_node,
|
|
.elem_index = i,
|
|
} };
|
|
element_refs[i + lhs_len] =
|
|
try sema.tupleFieldValByIndex(block, operand_src, rhs, i, rhs_ty);
|
|
}
|
|
|
|
return block.addAggregateInit(Type.fromInterned(tuple_ty), element_refs);
|
|
}
|
|
|
|
fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const lhs = try sema.resolveInst(extra.lhs);
|
|
const rhs = try sema.resolveInst(extra.rhs);
|
|
const lhs_ty = sema.typeOf(lhs);
|
|
const rhs_ty = sema.typeOf(rhs);
|
|
const src = inst_data.src();
|
|
|
|
const lhs_is_tuple = lhs_ty.isTuple(mod);
|
|
const rhs_is_tuple = rhs_ty.isTuple(mod);
|
|
if (lhs_is_tuple and rhs_is_tuple) {
|
|
return sema.analyzeTupleCat(block, inst_data.src_node, lhs, rhs);
|
|
}
|
|
|
|
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
|
|
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
|
|
|
|
const lhs_info = try sema.getArrayCatInfo(block, lhs_src, lhs, rhs_ty) orelse lhs_info: {
|
|
if (lhs_is_tuple) break :lhs_info @as(Type.ArrayInfo, undefined);
|
|
return sema.fail(block, lhs_src, "expected indexable; found '{}'", .{lhs_ty.fmt(mod)});
|
|
};
|
|
const rhs_info = try sema.getArrayCatInfo(block, rhs_src, rhs, lhs_ty) orelse {
|
|
assert(!rhs_is_tuple);
|
|
return sema.fail(block, rhs_src, "expected indexable; found '{}'", .{rhs_ty.fmt(mod)});
|
|
};
|
|
|
|
const resolved_elem_ty = t: {
|
|
var trash_block = block.makeSubBlock();
|
|
trash_block.is_comptime = false;
|
|
defer trash_block.instructions.deinit(sema.gpa);
|
|
|
|
const instructions = [_]Air.Inst.Ref{
|
|
try trash_block.addBitCast(lhs_info.elem_type, .void_value),
|
|
try trash_block.addBitCast(rhs_info.elem_type, .void_value),
|
|
};
|
|
break :t try sema.resolvePeerTypes(block, src, &instructions, .{
|
|
.override = &[_]?LazySrcLoc{ lhs_src, rhs_src },
|
|
});
|
|
};
|
|
|
|
// When there is a sentinel mismatch, no sentinel on the result.
|
|
// Otherwise, use the sentinel value provided by either operand,
|
|
// coercing it to the peer-resolved element type.
|
|
const res_sent_val: ?Value = s: {
|
|
if (lhs_info.sentinel) |lhs_sent_val| {
|
|
const lhs_sent = Air.internedToRef(lhs_sent_val.toIntern());
|
|
if (rhs_info.sentinel) |rhs_sent_val| {
|
|
const rhs_sent = Air.internedToRef(rhs_sent_val.toIntern());
|
|
const lhs_sent_casted = try sema.coerce(block, resolved_elem_ty, lhs_sent, lhs_src);
|
|
const rhs_sent_casted = try sema.coerce(block, resolved_elem_ty, rhs_sent, rhs_src);
|
|
const lhs_sent_casted_val = (try sema.resolveDefinedValue(block, lhs_src, lhs_sent_casted)).?;
|
|
const rhs_sent_casted_val = (try sema.resolveDefinedValue(block, rhs_src, rhs_sent_casted)).?;
|
|
if (try sema.valuesEqual(lhs_sent_casted_val, rhs_sent_casted_val, resolved_elem_ty)) {
|
|
break :s lhs_sent_casted_val;
|
|
} else {
|
|
break :s null;
|
|
}
|
|
} else {
|
|
const lhs_sent_casted = try sema.coerce(block, resolved_elem_ty, lhs_sent, lhs_src);
|
|
const lhs_sent_casted_val = (try sema.resolveDefinedValue(block, lhs_src, lhs_sent_casted)).?;
|
|
break :s lhs_sent_casted_val;
|
|
}
|
|
} else {
|
|
if (rhs_info.sentinel) |rhs_sent_val| {
|
|
const rhs_sent = Air.internedToRef(rhs_sent_val.toIntern());
|
|
const rhs_sent_casted = try sema.coerce(block, resolved_elem_ty, rhs_sent, rhs_src);
|
|
const rhs_sent_casted_val = (try sema.resolveDefinedValue(block, rhs_src, rhs_sent_casted)).?;
|
|
break :s rhs_sent_casted_val;
|
|
} else {
|
|
break :s null;
|
|
}
|
|
}
|
|
};
|
|
|
|
const lhs_len = try sema.usizeCast(block, lhs_src, lhs_info.len);
|
|
const rhs_len = try sema.usizeCast(block, rhs_src, rhs_info.len);
|
|
const result_len = std.math.add(usize, lhs_len, rhs_len) catch |err| switch (err) {
|
|
error.Overflow => return sema.fail(
|
|
block,
|
|
src,
|
|
"concatenating arrays of length {d} and {d} produces an array too large for this compiler implementation to handle",
|
|
.{ lhs_len, rhs_len },
|
|
),
|
|
};
|
|
|
|
const result_ty = try mod.arrayType(.{
|
|
.len = result_len,
|
|
.sentinel = if (res_sent_val) |v| v.toIntern() else .none,
|
|
.child = resolved_elem_ty.toIntern(),
|
|
});
|
|
const ptr_addrspace = p: {
|
|
if (lhs_ty.zigTypeTag(mod) == .Pointer) break :p lhs_ty.ptrAddressSpace(mod);
|
|
if (rhs_ty.zigTypeTag(mod) == .Pointer) break :p rhs_ty.ptrAddressSpace(mod);
|
|
break :p null;
|
|
};
|
|
|
|
const runtime_src = if (switch (lhs_ty.zigTypeTag(mod)) {
|
|
.Array, .Struct => try sema.resolveValue(lhs),
|
|
.Pointer => try sema.resolveDefinedValue(block, lhs_src, lhs),
|
|
else => unreachable,
|
|
}) |lhs_val| rs: {
|
|
if (switch (rhs_ty.zigTypeTag(mod)) {
|
|
.Array, .Struct => try sema.resolveValue(rhs),
|
|
.Pointer => try sema.resolveDefinedValue(block, rhs_src, rhs),
|
|
else => unreachable,
|
|
}) |rhs_val| {
|
|
const lhs_sub_val = if (lhs_ty.isSinglePointer(mod))
|
|
(try sema.pointerDeref(block, lhs_src, lhs_val, lhs_ty)).?
|
|
else
|
|
lhs_val;
|
|
|
|
const rhs_sub_val = if (rhs_ty.isSinglePointer(mod))
|
|
(try sema.pointerDeref(block, rhs_src, rhs_val, rhs_ty)).?
|
|
else
|
|
rhs_val;
|
|
|
|
const element_vals = try sema.arena.alloc(InternPool.Index, result_len);
|
|
var elem_i: u32 = 0;
|
|
while (elem_i < lhs_len) : (elem_i += 1) {
|
|
const lhs_elem_i = elem_i;
|
|
const elem_default_val = if (lhs_is_tuple) lhs_ty.structFieldDefaultValue(lhs_elem_i, mod) else Value.@"unreachable";
|
|
const elem_val = if (elem_default_val.toIntern() == .unreachable_value) try lhs_sub_val.elemValue(mod, lhs_elem_i) else elem_default_val;
|
|
const elem_val_inst = Air.internedToRef(elem_val.toIntern());
|
|
const operand_src: LazySrcLoc = .{ .array_cat_lhs = .{
|
|
.array_cat_offset = inst_data.src_node,
|
|
.elem_index = elem_i,
|
|
} };
|
|
const coerced_elem_val_inst = try sema.coerce(block, resolved_elem_ty, elem_val_inst, operand_src);
|
|
const coerced_elem_val = try sema.resolveConstValue(block, operand_src, coerced_elem_val_inst, undefined);
|
|
element_vals[elem_i] = try coerced_elem_val.intern(resolved_elem_ty, mod);
|
|
}
|
|
while (elem_i < result_len) : (elem_i += 1) {
|
|
const rhs_elem_i = elem_i - lhs_len;
|
|
const elem_default_val = if (rhs_is_tuple) rhs_ty.structFieldDefaultValue(rhs_elem_i, mod) else Value.@"unreachable";
|
|
const elem_val = if (elem_default_val.toIntern() == .unreachable_value) try rhs_sub_val.elemValue(mod, rhs_elem_i) else elem_default_val;
|
|
const elem_val_inst = Air.internedToRef(elem_val.toIntern());
|
|
const operand_src: LazySrcLoc = .{ .array_cat_rhs = .{
|
|
.array_cat_offset = inst_data.src_node,
|
|
.elem_index = @intCast(rhs_elem_i),
|
|
} };
|
|
const coerced_elem_val_inst = try sema.coerce(block, resolved_elem_ty, elem_val_inst, operand_src);
|
|
const coerced_elem_val = try sema.resolveConstValue(block, operand_src, coerced_elem_val_inst, undefined);
|
|
element_vals[elem_i] = try coerced_elem_val.intern(resolved_elem_ty, mod);
|
|
}
|
|
return sema.addConstantMaybeRef(try mod.intern(.{ .aggregate = .{
|
|
.ty = result_ty.toIntern(),
|
|
.storage = .{ .elems = element_vals },
|
|
} }), ptr_addrspace != null);
|
|
} else break :rs rhs_src;
|
|
} else lhs_src;
|
|
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
|
|
if (ptr_addrspace) |ptr_as| {
|
|
const alloc_ty = try sema.ptrType(.{
|
|
.child = result_ty.toIntern(),
|
|
.flags = .{ .address_space = ptr_as },
|
|
});
|
|
const alloc = try block.addTy(.alloc, alloc_ty);
|
|
const elem_ptr_ty = try sema.ptrType(.{
|
|
.child = resolved_elem_ty.toIntern(),
|
|
.flags = .{ .address_space = ptr_as },
|
|
});
|
|
|
|
var elem_i: u32 = 0;
|
|
while (elem_i < lhs_len) : (elem_i += 1) {
|
|
const elem_index = try mod.intRef(Type.usize, elem_i);
|
|
const elem_ptr = try block.addPtrElemPtr(alloc, elem_index, elem_ptr_ty);
|
|
const operand_src: LazySrcLoc = .{ .array_cat_lhs = .{
|
|
.array_cat_offset = inst_data.src_node,
|
|
.elem_index = elem_i,
|
|
} };
|
|
const init = try sema.elemVal(block, operand_src, lhs, elem_index, src, true);
|
|
try sema.storePtr2(block, src, elem_ptr, src, init, operand_src, .store);
|
|
}
|
|
while (elem_i < result_len) : (elem_i += 1) {
|
|
const rhs_elem_i = elem_i - lhs_len;
|
|
const elem_index = try mod.intRef(Type.usize, elem_i);
|
|
const rhs_index = try mod.intRef(Type.usize, rhs_elem_i);
|
|
const elem_ptr = try block.addPtrElemPtr(alloc, elem_index, elem_ptr_ty);
|
|
const operand_src: LazySrcLoc = .{ .array_cat_rhs = .{
|
|
.array_cat_offset = inst_data.src_node,
|
|
.elem_index = @intCast(rhs_elem_i),
|
|
} };
|
|
const init = try sema.elemVal(block, operand_src, rhs, rhs_index, src, true);
|
|
try sema.storePtr2(block, src, elem_ptr, src, init, operand_src, .store);
|
|
}
|
|
if (res_sent_val) |sent_val| {
|
|
const elem_index = try mod.intRef(Type.usize, result_len);
|
|
const elem_ptr = try block.addPtrElemPtr(alloc, elem_index, elem_ptr_ty);
|
|
const init = Air.internedToRef((try mod.getCoerced(sent_val, lhs_info.elem_type)).toIntern());
|
|
try sema.storePtr2(block, src, elem_ptr, src, init, lhs_src, .store);
|
|
}
|
|
|
|
return alloc;
|
|
}
|
|
|
|
const element_refs = try sema.arena.alloc(Air.Inst.Ref, result_len);
|
|
{
|
|
var elem_i: u32 = 0;
|
|
while (elem_i < lhs_len) : (elem_i += 1) {
|
|
const index = try mod.intRef(Type.usize, elem_i);
|
|
const operand_src: LazySrcLoc = .{ .array_cat_lhs = .{
|
|
.array_cat_offset = inst_data.src_node,
|
|
.elem_index = elem_i,
|
|
} };
|
|
const init = try sema.elemVal(block, operand_src, lhs, index, src, true);
|
|
element_refs[elem_i] = try sema.coerce(block, resolved_elem_ty, init, operand_src);
|
|
}
|
|
while (elem_i < result_len) : (elem_i += 1) {
|
|
const rhs_elem_i = elem_i - lhs_len;
|
|
const index = try mod.intRef(Type.usize, rhs_elem_i);
|
|
const operand_src: LazySrcLoc = .{ .array_cat_rhs = .{
|
|
.array_cat_offset = inst_data.src_node,
|
|
.elem_index = @intCast(rhs_elem_i),
|
|
} };
|
|
const init = try sema.elemVal(block, operand_src, rhs, index, src, true);
|
|
element_refs[elem_i] = try sema.coerce(block, resolved_elem_ty, init, operand_src);
|
|
}
|
|
}
|
|
|
|
return block.addAggregateInit(result_ty, element_refs);
|
|
}
|
|
|
|
fn getArrayCatInfo(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Inst.Ref, peer_ty: Type) !?Type.ArrayInfo {
|
|
const mod = sema.mod;
|
|
const operand_ty = sema.typeOf(operand);
|
|
switch (operand_ty.zigTypeTag(mod)) {
|
|
.Array => return operand_ty.arrayInfo(mod),
|
|
.Pointer => {
|
|
const ptr_info = operand_ty.ptrInfo(mod);
|
|
switch (ptr_info.flags.size) {
|
|
// TODO: in the Many case here this should only work if the type
|
|
// has a sentinel, and this code should compute the length based
|
|
// on the sentinel value.
|
|
.Slice, .Many => {
|
|
const val = try sema.resolveConstDefinedValue(block, src, operand, .{
|
|
.needed_comptime_reason = "slice value being concatenated must be comptime-known",
|
|
});
|
|
return Type.ArrayInfo{
|
|
.elem_type = Type.fromInterned(ptr_info.child),
|
|
.sentinel = switch (ptr_info.sentinel) {
|
|
.none => null,
|
|
else => Value.fromInterned(ptr_info.sentinel),
|
|
},
|
|
.len = val.sliceLen(mod),
|
|
};
|
|
},
|
|
.One => {
|
|
if (Type.fromInterned(ptr_info.child).zigTypeTag(mod) == .Array) {
|
|
return Type.fromInterned(ptr_info.child).arrayInfo(mod);
|
|
}
|
|
},
|
|
.C => {},
|
|
}
|
|
},
|
|
.Struct => {
|
|
if (operand_ty.isTuple(mod) and peer_ty.isIndexable(mod)) {
|
|
assert(!peer_ty.isTuple(mod));
|
|
return .{
|
|
.elem_type = peer_ty.elemType2(mod),
|
|
.sentinel = null,
|
|
.len = operand_ty.arrayLen(mod),
|
|
};
|
|
}
|
|
},
|
|
else => {},
|
|
}
|
|
return null;
|
|
}
|
|
|
|
fn analyzeTupleMul(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src_node: i32,
|
|
operand: Air.Inst.Ref,
|
|
factor: usize,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const operand_ty = sema.typeOf(operand);
|
|
const src = LazySrcLoc.nodeOffset(src_node);
|
|
const len_src: LazySrcLoc = .{ .node_offset_bin_rhs = src_node };
|
|
|
|
const tuple_len = operand_ty.structFieldCount(mod);
|
|
const final_len = std.math.mul(usize, tuple_len, factor) catch
|
|
return sema.fail(block, len_src, "operation results in overflow", .{});
|
|
|
|
if (final_len == 0) {
|
|
return Air.internedToRef(Value.empty_struct.toIntern());
|
|
}
|
|
const types = try sema.arena.alloc(InternPool.Index, final_len);
|
|
const values = try sema.arena.alloc(InternPool.Index, final_len);
|
|
|
|
const opt_runtime_src = rs: {
|
|
var runtime_src: ?LazySrcLoc = null;
|
|
for (0..tuple_len) |i| {
|
|
types[i] = operand_ty.structFieldType(i, mod).toIntern();
|
|
values[i] = operand_ty.structFieldDefaultValue(i, mod).toIntern();
|
|
const operand_src: LazySrcLoc = .{ .array_cat_lhs = .{
|
|
.array_cat_offset = src_node,
|
|
.elem_index = @intCast(i),
|
|
} };
|
|
if (values[i] == .unreachable_value) {
|
|
runtime_src = operand_src;
|
|
values[i] = .none; // TODO don't treat unreachable_value as special
|
|
}
|
|
}
|
|
for (0..factor) |i| {
|
|
mem.copyForwards(InternPool.Index, types[tuple_len * i ..], types[0..tuple_len]);
|
|
mem.copyForwards(InternPool.Index, values[tuple_len * i ..], values[0..tuple_len]);
|
|
}
|
|
break :rs runtime_src;
|
|
};
|
|
|
|
const tuple_ty = try mod.intern_pool.getAnonStructType(mod.gpa, .{
|
|
.types = types,
|
|
.values = values,
|
|
.names = &.{},
|
|
});
|
|
|
|
const runtime_src = opt_runtime_src orelse {
|
|
const tuple_val = try mod.intern(.{ .aggregate = .{
|
|
.ty = tuple_ty,
|
|
.storage = .{ .elems = values },
|
|
} });
|
|
return Air.internedToRef(tuple_val);
|
|
};
|
|
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
|
|
const element_refs = try sema.arena.alloc(Air.Inst.Ref, final_len);
|
|
var i: u32 = 0;
|
|
while (i < tuple_len) : (i += 1) {
|
|
const operand_src: LazySrcLoc = .{ .array_cat_lhs = .{
|
|
.array_cat_offset = src_node,
|
|
.elem_index = i,
|
|
} };
|
|
element_refs[i] = try sema.tupleFieldValByIndex(block, operand_src, operand, @intCast(i), operand_ty);
|
|
}
|
|
i = 1;
|
|
while (i < factor) : (i += 1) {
|
|
@memcpy(element_refs[tuple_len * i ..][0..tuple_len], element_refs[0..tuple_len]);
|
|
}
|
|
|
|
return block.addAggregateInit(Type.fromInterned(tuple_ty), element_refs);
|
|
}
|
|
|
|
fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.ArrayMul, inst_data.payload_index).data;
|
|
const uncoerced_lhs = try sema.resolveInst(extra.lhs);
|
|
const uncoerced_lhs_ty = sema.typeOf(uncoerced_lhs);
|
|
const src: LazySrcLoc = inst_data.src();
|
|
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
|
|
const operator_src: LazySrcLoc = .{ .node_offset_main_token = inst_data.src_node };
|
|
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
|
|
|
|
const lhs, const lhs_ty = coerced_lhs: {
|
|
// If we have a result type, we might be able to do this more efficiently
|
|
// by coercing the LHS first. Specifically, if we want an array or vector
|
|
// and have a tuple, coerce the tuple immediately.
|
|
no_coerce: {
|
|
if (extra.res_ty == .none) break :no_coerce;
|
|
const res_ty_inst = try sema.resolveInst(extra.res_ty);
|
|
const res_ty = try sema.analyzeAsType(block, src, res_ty_inst);
|
|
if (res_ty.isGenericPoison()) break :no_coerce;
|
|
if (!uncoerced_lhs_ty.isTuple(mod)) break :no_coerce;
|
|
const lhs_len = uncoerced_lhs_ty.structFieldCount(mod);
|
|
const lhs_dest_ty = switch (res_ty.zigTypeTag(mod)) {
|
|
else => break :no_coerce,
|
|
.Array => try mod.arrayType(.{
|
|
.child = res_ty.childType(mod).toIntern(),
|
|
.len = lhs_len,
|
|
.sentinel = if (res_ty.sentinel(mod)) |s| s.toIntern() else .none,
|
|
}),
|
|
.Vector => try mod.vectorType(.{
|
|
.child = res_ty.childType(mod).toIntern(),
|
|
.len = lhs_len,
|
|
}),
|
|
};
|
|
// Attempt to coerce to this type, but don't emit an error if it fails. Instead,
|
|
// just exit out of this path and let the usual error happen later, so that error
|
|
// messages are consistent.
|
|
const coerced = sema.coerceExtra(block, lhs_dest_ty, uncoerced_lhs, lhs_src, .{ .report_err = false }) catch |err| switch (err) {
|
|
error.NotCoercible => break :no_coerce,
|
|
else => |e| return e,
|
|
};
|
|
break :coerced_lhs .{ coerced, lhs_dest_ty };
|
|
}
|
|
break :coerced_lhs .{ uncoerced_lhs, uncoerced_lhs_ty };
|
|
};
|
|
|
|
if (lhs_ty.isTuple(mod)) {
|
|
// In `**` rhs must be comptime-known, but lhs can be runtime-known
|
|
const factor = try sema.resolveInt(block, rhs_src, extra.rhs, Type.usize, .{
|
|
.needed_comptime_reason = "array multiplication factor must be comptime-known",
|
|
});
|
|
const factor_casted = try sema.usizeCast(block, rhs_src, factor);
|
|
return sema.analyzeTupleMul(block, inst_data.src_node, lhs, factor_casted);
|
|
}
|
|
|
|
// Analyze the lhs first, to catch the case that someone tried to do exponentiation
|
|
const lhs_info = try sema.getArrayCatInfo(block, lhs_src, lhs, lhs_ty) orelse {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, lhs_src, "expected indexable; found '{}'", .{lhs_ty.fmt(mod)});
|
|
errdefer msg.destroy(sema.gpa);
|
|
switch (lhs_ty.zigTypeTag(mod)) {
|
|
.Int, .Float, .ComptimeFloat, .ComptimeInt, .Vector => {
|
|
try sema.errNote(block, operator_src, msg, "this operator multiplies arrays; use std.math.pow for exponentiation", .{});
|
|
},
|
|
else => {},
|
|
}
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
};
|
|
|
|
// In `**` rhs must be comptime-known, but lhs can be runtime-known
|
|
const factor = try sema.resolveInt(block, rhs_src, extra.rhs, Type.usize, .{
|
|
.needed_comptime_reason = "array multiplication factor must be comptime-known",
|
|
});
|
|
|
|
const result_len_u64 = std.math.mul(u64, lhs_info.len, factor) catch
|
|
return sema.fail(block, rhs_src, "operation results in overflow", .{});
|
|
const result_len = try sema.usizeCast(block, src, result_len_u64);
|
|
|
|
const result_ty = try mod.arrayType(.{
|
|
.len = result_len,
|
|
.sentinel = if (lhs_info.sentinel) |s| s.toIntern() else .none,
|
|
.child = lhs_info.elem_type.toIntern(),
|
|
});
|
|
|
|
const ptr_addrspace = if (lhs_ty.zigTypeTag(mod) == .Pointer) lhs_ty.ptrAddressSpace(mod) else null;
|
|
const lhs_len = try sema.usizeCast(block, lhs_src, lhs_info.len);
|
|
|
|
if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val| {
|
|
const lhs_sub_val = if (lhs_ty.isSinglePointer(mod))
|
|
(try sema.pointerDeref(block, lhs_src, lhs_val, lhs_ty)).?
|
|
else
|
|
lhs_val;
|
|
|
|
const val = v: {
|
|
// Optimization for the common pattern of a single element repeated N times, such
|
|
// as zero-filling a byte array.
|
|
if (lhs_len == 1 and lhs_info.sentinel == null) {
|
|
const elem_val = try lhs_sub_val.elemValue(mod, 0);
|
|
break :v try mod.intern(.{ .aggregate = .{
|
|
.ty = result_ty.toIntern(),
|
|
.storage = .{ .repeated_elem = elem_val.toIntern() },
|
|
} });
|
|
}
|
|
|
|
const element_vals = try sema.arena.alloc(InternPool.Index, result_len);
|
|
var elem_i: usize = 0;
|
|
while (elem_i < result_len) {
|
|
var lhs_i: usize = 0;
|
|
while (lhs_i < lhs_len) : (lhs_i += 1) {
|
|
const elem_val = try lhs_sub_val.elemValue(mod, lhs_i);
|
|
element_vals[elem_i] = elem_val.toIntern();
|
|
elem_i += 1;
|
|
}
|
|
}
|
|
break :v try mod.intern(.{ .aggregate = .{
|
|
.ty = result_ty.toIntern(),
|
|
.storage = .{ .elems = element_vals },
|
|
} });
|
|
};
|
|
return sema.addConstantMaybeRef(val, ptr_addrspace != null);
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, src, lhs_src);
|
|
|
|
// Grab all the LHS values ahead of time, rather than repeatedly emitting instructions
|
|
// to get the same elem values.
|
|
const lhs_vals = try sema.arena.alloc(Air.Inst.Ref, lhs_len);
|
|
for (lhs_vals, 0..) |*lhs_val, idx| {
|
|
const idx_ref = try mod.intRef(Type.usize, idx);
|
|
lhs_val.* = try sema.elemVal(block, lhs_src, lhs, idx_ref, src, false);
|
|
}
|
|
|
|
if (ptr_addrspace) |ptr_as| {
|
|
const alloc_ty = try sema.ptrType(.{
|
|
.child = result_ty.toIntern(),
|
|
.flags = .{ .address_space = ptr_as },
|
|
});
|
|
const alloc = try block.addTy(.alloc, alloc_ty);
|
|
const elem_ptr_ty = try sema.ptrType(.{
|
|
.child = lhs_info.elem_type.toIntern(),
|
|
.flags = .{ .address_space = ptr_as },
|
|
});
|
|
|
|
var elem_i: usize = 0;
|
|
while (elem_i < result_len) {
|
|
for (lhs_vals) |lhs_val| {
|
|
const elem_index = try mod.intRef(Type.usize, elem_i);
|
|
const elem_ptr = try block.addPtrElemPtr(alloc, elem_index, elem_ptr_ty);
|
|
try sema.storePtr2(block, src, elem_ptr, src, lhs_val, lhs_src, .store);
|
|
elem_i += 1;
|
|
}
|
|
}
|
|
if (lhs_info.sentinel) |sent_val| {
|
|
const elem_index = try mod.intRef(Type.usize, result_len);
|
|
const elem_ptr = try block.addPtrElemPtr(alloc, elem_index, elem_ptr_ty);
|
|
const init = Air.internedToRef(sent_val.toIntern());
|
|
try sema.storePtr2(block, src, elem_ptr, src, init, lhs_src, .store);
|
|
}
|
|
|
|
return alloc;
|
|
}
|
|
|
|
const element_refs = try sema.arena.alloc(Air.Inst.Ref, result_len);
|
|
for (0..try sema.usizeCast(block, rhs_src, factor)) |i| {
|
|
@memcpy(element_refs[i * lhs_len ..][0..lhs_len], lhs_vals);
|
|
}
|
|
return block.addAggregateInit(result_ty, element_refs);
|
|
}
|
|
|
|
fn zirNegate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const src = inst_data.src();
|
|
const lhs_src = src;
|
|
const rhs_src: LazySrcLoc = .{ .node_offset_un_op = inst_data.src_node };
|
|
|
|
const rhs = try sema.resolveInst(inst_data.operand);
|
|
const rhs_ty = sema.typeOf(rhs);
|
|
const rhs_scalar_ty = rhs_ty.scalarType(mod);
|
|
|
|
if (rhs_scalar_ty.isUnsignedInt(mod) or switch (rhs_scalar_ty.zigTypeTag(mod)) {
|
|
.Int, .ComptimeInt, .Float, .ComptimeFloat => false,
|
|
else => true,
|
|
}) {
|
|
return sema.fail(block, src, "negation of type '{}'", .{rhs_ty.fmt(mod)});
|
|
}
|
|
|
|
if (rhs_scalar_ty.isAnyFloat()) {
|
|
// We handle float negation here to ensure negative zero is represented in the bits.
|
|
if (try sema.resolveValue(rhs)) |rhs_val| {
|
|
if (rhs_val.isUndef(mod)) return mod.undefRef(rhs_ty);
|
|
return Air.internedToRef((try rhs_val.floatNeg(rhs_ty, sema.arena, mod)).toIntern());
|
|
}
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
return block.addUnOp(if (block.float_mode == .Optimized) .neg_optimized else .neg, rhs);
|
|
}
|
|
|
|
const lhs = Air.internedToRef((try sema.splat(rhs_ty, try mod.intValue(rhs_scalar_ty, 0))).toIntern());
|
|
return sema.analyzeArithmetic(block, .sub, lhs, rhs, src, lhs_src, rhs_src, true);
|
|
}
|
|
|
|
fn zirNegateWrap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const src = inst_data.src();
|
|
const lhs_src = src;
|
|
const rhs_src: LazySrcLoc = .{ .node_offset_un_op = inst_data.src_node };
|
|
|
|
const rhs = try sema.resolveInst(inst_data.operand);
|
|
const rhs_ty = sema.typeOf(rhs);
|
|
const rhs_scalar_ty = rhs_ty.scalarType(mod);
|
|
|
|
switch (rhs_scalar_ty.zigTypeTag(mod)) {
|
|
.Int, .ComptimeInt, .Float, .ComptimeFloat => {},
|
|
else => return sema.fail(block, src, "negation of type '{}'", .{rhs_ty.fmt(mod)}),
|
|
}
|
|
|
|
const lhs = Air.internedToRef((try sema.splat(rhs_ty, try mod.intValue(rhs_scalar_ty, 0))).toIntern());
|
|
return sema.analyzeArithmetic(block, .subwrap, lhs, rhs, src, lhs_src, rhs_src, true);
|
|
}
|
|
|
|
fn zirArithmetic(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
zir_tag: Zir.Inst.Tag,
|
|
safety: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node };
|
|
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
|
|
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const lhs = try sema.resolveInst(extra.lhs);
|
|
const rhs = try sema.resolveInst(extra.rhs);
|
|
|
|
return sema.analyzeArithmetic(block, zir_tag, lhs, rhs, src, lhs_src, rhs_src, safety);
|
|
}
|
|
|
|
fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node };
|
|
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
|
|
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const lhs = try sema.resolveInst(extra.lhs);
|
|
const rhs = try sema.resolveInst(extra.rhs);
|
|
const lhs_ty = sema.typeOf(lhs);
|
|
const rhs_ty = sema.typeOf(rhs);
|
|
const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod);
|
|
const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod);
|
|
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
|
|
try sema.checkInvalidPtrArithmetic(block, src, lhs_ty);
|
|
|
|
const instructions = &[_]Air.Inst.Ref{ lhs, rhs };
|
|
const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{
|
|
.override = &[_]?LazySrcLoc{ lhs_src, rhs_src },
|
|
});
|
|
|
|
const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
|
|
const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
|
|
|
|
const lhs_scalar_ty = lhs_ty.scalarType(mod);
|
|
const rhs_scalar_ty = rhs_ty.scalarType(mod);
|
|
const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod);
|
|
|
|
const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
|
|
|
|
try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .div);
|
|
|
|
const maybe_lhs_val = try sema.resolveValueIntable(casted_lhs);
|
|
const maybe_rhs_val = try sema.resolveValueIntable(casted_rhs);
|
|
|
|
if ((lhs_ty.zigTypeTag(mod) == .ComptimeFloat and rhs_ty.zigTypeTag(mod) == .ComptimeInt) or
|
|
(lhs_ty.zigTypeTag(mod) == .ComptimeInt and rhs_ty.zigTypeTag(mod) == .ComptimeFloat))
|
|
{
|
|
// If it makes a difference whether we coerce to ints or floats before doing the division, error.
|
|
// If lhs % rhs is 0, it doesn't matter.
|
|
const lhs_val = maybe_lhs_val orelse unreachable;
|
|
const rhs_val = maybe_rhs_val orelse unreachable;
|
|
const rem = lhs_val.floatRem(rhs_val, resolved_type, sema.arena, mod) catch unreachable;
|
|
if (!rem.compareAllWithZero(.eq, mod)) {
|
|
return sema.fail(
|
|
block,
|
|
src,
|
|
"ambiguous coercion of division operands '{}' and '{}'; non-zero remainder '{}'",
|
|
.{ lhs_ty.fmt(mod), rhs_ty.fmt(mod), rem.fmtValue(resolved_type, mod) },
|
|
);
|
|
}
|
|
}
|
|
|
|
// TODO: emit compile error when .div is used on integers and there would be an
|
|
// ambiguous result between div_floor and div_trunc.
|
|
|
|
// For integers:
|
|
// If the lhs is zero, then zero is returned regardless of rhs.
|
|
// If the rhs is zero, compile error for division by zero.
|
|
// If the rhs is undefined, compile error because there is a possible
|
|
// value (zero) for which the division would be illegal behavior.
|
|
// If the lhs is undefined:
|
|
// * if lhs type is signed:
|
|
// * if rhs is comptime-known and not -1, result is undefined
|
|
// * if rhs is -1 or runtime-known, compile error because there is a
|
|
// possible value (-min_int / -1) for which division would be
|
|
// illegal behavior.
|
|
// * if lhs type is unsigned, undef is returned regardless of rhs.
|
|
//
|
|
// For floats:
|
|
// If the rhs is zero:
|
|
// * comptime_float: compile error for division by zero.
|
|
// * other float type:
|
|
// * if the lhs is zero: QNaN
|
|
// * otherwise: +Inf or -Inf depending on lhs sign
|
|
// If the rhs is undefined:
|
|
// * comptime_float: compile error because there is a possible
|
|
// value (zero) for which the division would be illegal behavior.
|
|
// * other float type: result is undefined
|
|
// If the lhs is undefined, result is undefined.
|
|
switch (scalar_tag) {
|
|
.Int, .ComptimeInt, .ComptimeFloat => {
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (!lhs_val.isUndef(mod)) {
|
|
if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
|
|
const scalar_zero = switch (scalar_tag) {
|
|
.ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0),
|
|
.ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0),
|
|
else => unreachable,
|
|
};
|
|
const zero_val = try sema.splat(resolved_type, scalar_zero);
|
|
return Air.internedToRef(zero_val.toIntern());
|
|
}
|
|
}
|
|
}
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (rhs_val.isUndef(mod)) {
|
|
return sema.failWithUseOfUndef(block, rhs_src);
|
|
}
|
|
if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) {
|
|
return sema.failWithDivideByZero(block, rhs_src);
|
|
}
|
|
// TODO: if the RHS is one, return the LHS directly
|
|
}
|
|
},
|
|
else => {},
|
|
}
|
|
|
|
const runtime_src = rs: {
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (lhs_val.isUndef(mod)) {
|
|
if (lhs_scalar_ty.isSignedInt(mod) and rhs_scalar_ty.isSignedInt(mod)) {
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (try sema.compareAll(rhs_val, .neq, try mod.intValue(resolved_type, -1), resolved_type)) {
|
|
return mod.undefRef(resolved_type);
|
|
}
|
|
}
|
|
return sema.failWithUseOfUndef(block, rhs_src);
|
|
}
|
|
return mod.undefRef(resolved_type);
|
|
}
|
|
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (is_int) {
|
|
var overflow_idx: ?usize = null;
|
|
const res = try lhs_val.intDiv(rhs_val, resolved_type, &overflow_idx, sema.arena, mod);
|
|
if (overflow_idx) |vec_idx| {
|
|
return sema.failWithIntegerOverflow(block, src, resolved_type, res, vec_idx);
|
|
}
|
|
return Air.internedToRef(res.toIntern());
|
|
} else {
|
|
return Air.internedToRef((try lhs_val.floatDiv(rhs_val, resolved_type, sema.arena, mod)).toIntern());
|
|
}
|
|
} else {
|
|
break :rs rhs_src;
|
|
}
|
|
} else {
|
|
break :rs lhs_src;
|
|
}
|
|
};
|
|
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
|
|
if (block.wantSafety()) {
|
|
try sema.addDivIntOverflowSafety(block, src, resolved_type, lhs_scalar_ty, maybe_lhs_val, maybe_rhs_val, casted_lhs, casted_rhs, is_int);
|
|
try sema.addDivByZeroSafety(block, src, resolved_type, maybe_rhs_val, casted_rhs, is_int);
|
|
}
|
|
|
|
const air_tag = if (is_int) blk: {
|
|
if (lhs_ty.isSignedInt(mod) or rhs_ty.isSignedInt(mod)) {
|
|
return sema.fail(
|
|
block,
|
|
src,
|
|
"division with '{}' and '{}': signed integers must use @divTrunc, @divFloor, or @divExact",
|
|
.{ lhs_ty.fmt(mod), rhs_ty.fmt(mod) },
|
|
);
|
|
}
|
|
break :blk Air.Inst.Tag.div_trunc;
|
|
} else switch (block.float_mode) {
|
|
.Optimized => Air.Inst.Tag.div_float_optimized,
|
|
.Strict => Air.Inst.Tag.div_float,
|
|
};
|
|
return block.addBinOp(air_tag, casted_lhs, casted_rhs);
|
|
}
|
|
|
|
fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node };
|
|
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
|
|
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const lhs = try sema.resolveInst(extra.lhs);
|
|
const rhs = try sema.resolveInst(extra.rhs);
|
|
const lhs_ty = sema.typeOf(lhs);
|
|
const rhs_ty = sema.typeOf(rhs);
|
|
const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod);
|
|
const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod);
|
|
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
|
|
try sema.checkInvalidPtrArithmetic(block, src, lhs_ty);
|
|
|
|
const instructions = &[_]Air.Inst.Ref{ lhs, rhs };
|
|
const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{
|
|
.override = &[_]?LazySrcLoc{ lhs_src, rhs_src },
|
|
});
|
|
|
|
const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
|
|
const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
|
|
|
|
const lhs_scalar_ty = lhs_ty.scalarType(mod);
|
|
const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod);
|
|
|
|
const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
|
|
|
|
try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .div_exact);
|
|
|
|
const maybe_lhs_val = try sema.resolveValueIntable(casted_lhs);
|
|
const maybe_rhs_val = try sema.resolveValueIntable(casted_rhs);
|
|
|
|
const runtime_src = rs: {
|
|
// For integers:
|
|
// If the lhs is zero, then zero is returned regardless of rhs.
|
|
// If the rhs is zero, compile error for division by zero.
|
|
// If the rhs is undefined, compile error because there is a possible
|
|
// value (zero) for which the division would be illegal behavior.
|
|
// If the lhs is undefined, compile error because there is a possible
|
|
// value for which the division would result in a remainder.
|
|
// TODO: emit runtime safety for if there is a remainder
|
|
// TODO: emit runtime safety for division by zero
|
|
//
|
|
// For floats:
|
|
// If the rhs is zero, compile error for division by zero.
|
|
// If the rhs is undefined, compile error because there is a possible
|
|
// value (zero) for which the division would be illegal behavior.
|
|
// If the lhs is undefined, compile error because there is a possible
|
|
// value for which the division would result in a remainder.
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (lhs_val.isUndef(mod)) {
|
|
return sema.failWithUseOfUndef(block, rhs_src);
|
|
} else {
|
|
if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
|
|
const scalar_zero = switch (scalar_tag) {
|
|
.ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0),
|
|
.ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0),
|
|
else => unreachable,
|
|
};
|
|
const zero_val = try sema.splat(resolved_type, scalar_zero);
|
|
return Air.internedToRef(zero_val.toIntern());
|
|
}
|
|
}
|
|
}
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (rhs_val.isUndef(mod)) {
|
|
return sema.failWithUseOfUndef(block, rhs_src);
|
|
}
|
|
if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) {
|
|
return sema.failWithDivideByZero(block, rhs_src);
|
|
}
|
|
// TODO: if the RHS is one, return the LHS directly
|
|
}
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (is_int) {
|
|
const modulus_val = try lhs_val.intMod(rhs_val, resolved_type, sema.arena, mod);
|
|
if (!(modulus_val.compareAllWithZero(.eq, mod))) {
|
|
return sema.fail(block, src, "exact division produced remainder", .{});
|
|
}
|
|
var overflow_idx: ?usize = null;
|
|
const res = try lhs_val.intDiv(rhs_val, resolved_type, &overflow_idx, sema.arena, mod);
|
|
if (overflow_idx) |vec_idx| {
|
|
return sema.failWithIntegerOverflow(block, src, resolved_type, res, vec_idx);
|
|
}
|
|
return Air.internedToRef(res.toIntern());
|
|
} else {
|
|
const modulus_val = try lhs_val.floatMod(rhs_val, resolved_type, sema.arena, mod);
|
|
if (!(modulus_val.compareAllWithZero(.eq, mod))) {
|
|
return sema.fail(block, src, "exact division produced remainder", .{});
|
|
}
|
|
return Air.internedToRef((try lhs_val.floatDiv(rhs_val, resolved_type, sema.arena, mod)).toIntern());
|
|
}
|
|
} else break :rs rhs_src;
|
|
} else break :rs lhs_src;
|
|
};
|
|
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
|
|
// Depending on whether safety is enabled, we will have a slightly different strategy
|
|
// here. The `div_exact` AIR instruction causes undefined behavior if a remainder
|
|
// is produced, so in the safety check case, it cannot be used. Instead we do a
|
|
// div_trunc and check for remainder.
|
|
|
|
if (block.wantSafety()) {
|
|
try sema.addDivIntOverflowSafety(block, src, resolved_type, lhs_scalar_ty, maybe_lhs_val, maybe_rhs_val, casted_lhs, casted_rhs, is_int);
|
|
try sema.addDivByZeroSafety(block, src, resolved_type, maybe_rhs_val, casted_rhs, is_int);
|
|
|
|
const result = try block.addBinOp(.div_trunc, casted_lhs, casted_rhs);
|
|
const ok = if (!is_int) ok: {
|
|
const floored = try block.addUnOp(.floor, result);
|
|
|
|
if (resolved_type.zigTypeTag(mod) == .Vector) {
|
|
const eql = try block.addCmpVector(result, floored, .eq);
|
|
break :ok try block.addInst(.{
|
|
.tag = switch (block.float_mode) {
|
|
.Strict => .reduce,
|
|
.Optimized => .reduce_optimized,
|
|
},
|
|
.data = .{ .reduce = .{
|
|
.operand = eql,
|
|
.operation = .And,
|
|
} },
|
|
});
|
|
} else {
|
|
const is_in_range = try block.addBinOp(switch (block.float_mode) {
|
|
.Strict => .cmp_eq,
|
|
.Optimized => .cmp_eq_optimized,
|
|
}, result, floored);
|
|
break :ok is_in_range;
|
|
}
|
|
} else ok: {
|
|
const remainder = try block.addBinOp(.rem, casted_lhs, casted_rhs);
|
|
|
|
const scalar_zero = switch (scalar_tag) {
|
|
.ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0),
|
|
.ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0),
|
|
else => unreachable,
|
|
};
|
|
if (resolved_type.zigTypeTag(mod) == .Vector) {
|
|
const zero_val = try sema.splat(resolved_type, scalar_zero);
|
|
const zero = Air.internedToRef(zero_val.toIntern());
|
|
const eql = try block.addCmpVector(remainder, zero, .eq);
|
|
break :ok try block.addInst(.{
|
|
.tag = .reduce,
|
|
.data = .{ .reduce = .{
|
|
.operand = eql,
|
|
.operation = .And,
|
|
} },
|
|
});
|
|
} else {
|
|
const zero = Air.internedToRef(scalar_zero.toIntern());
|
|
const is_in_range = try block.addBinOp(.cmp_eq, remainder, zero);
|
|
break :ok is_in_range;
|
|
}
|
|
};
|
|
try sema.addSafetyCheck(block, src, ok, .exact_division_remainder);
|
|
return result;
|
|
}
|
|
|
|
return block.addBinOp(airTag(block, is_int, .div_exact, .div_exact_optimized), casted_lhs, casted_rhs);
|
|
}
|
|
|
|
fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node };
|
|
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
|
|
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const lhs = try sema.resolveInst(extra.lhs);
|
|
const rhs = try sema.resolveInst(extra.rhs);
|
|
const lhs_ty = sema.typeOf(lhs);
|
|
const rhs_ty = sema.typeOf(rhs);
|
|
const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod);
|
|
const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod);
|
|
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
|
|
try sema.checkInvalidPtrArithmetic(block, src, lhs_ty);
|
|
|
|
const instructions = &[_]Air.Inst.Ref{ lhs, rhs };
|
|
const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{
|
|
.override = &[_]?LazySrcLoc{ lhs_src, rhs_src },
|
|
});
|
|
|
|
const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
|
|
const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
|
|
|
|
const lhs_scalar_ty = lhs_ty.scalarType(mod);
|
|
const rhs_scalar_ty = rhs_ty.scalarType(mod);
|
|
const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod);
|
|
|
|
const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
|
|
|
|
try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .div_floor);
|
|
|
|
const maybe_lhs_val = try sema.resolveValueIntable(casted_lhs);
|
|
const maybe_rhs_val = try sema.resolveValueIntable(casted_rhs);
|
|
|
|
const runtime_src = rs: {
|
|
// For integers:
|
|
// If the lhs is zero, then zero is returned regardless of rhs.
|
|
// If the rhs is zero, compile error for division by zero.
|
|
// If the rhs is undefined, compile error because there is a possible
|
|
// value (zero) for which the division would be illegal behavior.
|
|
// If the lhs is undefined:
|
|
// * if lhs type is signed:
|
|
// * if rhs is comptime-known and not -1, result is undefined
|
|
// * if rhs is -1 or runtime-known, compile error because there is a
|
|
// possible value (-min_int / -1) for which division would be
|
|
// illegal behavior.
|
|
// * if lhs type is unsigned, undef is returned regardless of rhs.
|
|
// TODO: emit runtime safety for division by zero
|
|
//
|
|
// For floats:
|
|
// If the rhs is zero, compile error for division by zero.
|
|
// If the rhs is undefined, compile error because there is a possible
|
|
// value (zero) for which the division would be illegal behavior.
|
|
// If the lhs is undefined, result is undefined.
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (!lhs_val.isUndef(mod)) {
|
|
if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
|
|
const scalar_zero = switch (scalar_tag) {
|
|
.ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0),
|
|
.ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0),
|
|
else => unreachable,
|
|
};
|
|
const zero_val = try sema.splat(resolved_type, scalar_zero);
|
|
return Air.internedToRef(zero_val.toIntern());
|
|
}
|
|
}
|
|
}
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (rhs_val.isUndef(mod)) {
|
|
return sema.failWithUseOfUndef(block, rhs_src);
|
|
}
|
|
if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) {
|
|
return sema.failWithDivideByZero(block, rhs_src);
|
|
}
|
|
// TODO: if the RHS is one, return the LHS directly
|
|
}
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (lhs_val.isUndef(mod)) {
|
|
if (lhs_scalar_ty.isSignedInt(mod) and rhs_scalar_ty.isSignedInt(mod)) {
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (try sema.compareAll(rhs_val, .neq, try mod.intValue(resolved_type, -1), resolved_type)) {
|
|
return mod.undefRef(resolved_type);
|
|
}
|
|
}
|
|
return sema.failWithUseOfUndef(block, rhs_src);
|
|
}
|
|
return mod.undefRef(resolved_type);
|
|
}
|
|
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (is_int) {
|
|
return Air.internedToRef((try lhs_val.intDivFloor(rhs_val, resolved_type, sema.arena, mod)).toIntern());
|
|
} else {
|
|
return Air.internedToRef((try lhs_val.floatDivFloor(rhs_val, resolved_type, sema.arena, mod)).toIntern());
|
|
}
|
|
} else break :rs rhs_src;
|
|
} else break :rs lhs_src;
|
|
};
|
|
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
|
|
if (block.wantSafety()) {
|
|
try sema.addDivIntOverflowSafety(block, src, resolved_type, lhs_scalar_ty, maybe_lhs_val, maybe_rhs_val, casted_lhs, casted_rhs, is_int);
|
|
try sema.addDivByZeroSafety(block, src, resolved_type, maybe_rhs_val, casted_rhs, is_int);
|
|
}
|
|
|
|
return block.addBinOp(airTag(block, is_int, .div_floor, .div_floor_optimized), casted_lhs, casted_rhs);
|
|
}
|
|
|
|
fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node };
|
|
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
|
|
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const lhs = try sema.resolveInst(extra.lhs);
|
|
const rhs = try sema.resolveInst(extra.rhs);
|
|
const lhs_ty = sema.typeOf(lhs);
|
|
const rhs_ty = sema.typeOf(rhs);
|
|
const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod);
|
|
const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod);
|
|
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
|
|
try sema.checkInvalidPtrArithmetic(block, src, lhs_ty);
|
|
|
|
const instructions = &[_]Air.Inst.Ref{ lhs, rhs };
|
|
const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{
|
|
.override = &[_]?LazySrcLoc{ lhs_src, rhs_src },
|
|
});
|
|
|
|
const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
|
|
const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
|
|
|
|
const lhs_scalar_ty = lhs_ty.scalarType(mod);
|
|
const rhs_scalar_ty = rhs_ty.scalarType(mod);
|
|
const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod);
|
|
|
|
const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
|
|
|
|
try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .div_trunc);
|
|
|
|
const maybe_lhs_val = try sema.resolveValueIntable(casted_lhs);
|
|
const maybe_rhs_val = try sema.resolveValueIntable(casted_rhs);
|
|
|
|
const runtime_src = rs: {
|
|
// For integers:
|
|
// If the lhs is zero, then zero is returned regardless of rhs.
|
|
// If the rhs is zero, compile error for division by zero.
|
|
// If the rhs is undefined, compile error because there is a possible
|
|
// value (zero) for which the division would be illegal behavior.
|
|
// If the lhs is undefined:
|
|
// * if lhs type is signed:
|
|
// * if rhs is comptime-known and not -1, result is undefined
|
|
// * if rhs is -1 or runtime-known, compile error because there is a
|
|
// possible value (-min_int / -1) for which division would be
|
|
// illegal behavior.
|
|
// * if lhs type is unsigned, undef is returned regardless of rhs.
|
|
// TODO: emit runtime safety for division by zero
|
|
//
|
|
// For floats:
|
|
// If the rhs is zero, compile error for division by zero.
|
|
// If the rhs is undefined, compile error because there is a possible
|
|
// value (zero) for which the division would be illegal behavior.
|
|
// If the lhs is undefined, result is undefined.
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (!lhs_val.isUndef(mod)) {
|
|
if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
|
|
const scalar_zero = switch (scalar_tag) {
|
|
.ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0),
|
|
.ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0),
|
|
else => unreachable,
|
|
};
|
|
const zero_val = try sema.splat(resolved_type, scalar_zero);
|
|
return Air.internedToRef(zero_val.toIntern());
|
|
}
|
|
}
|
|
}
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (rhs_val.isUndef(mod)) {
|
|
return sema.failWithUseOfUndef(block, rhs_src);
|
|
}
|
|
if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) {
|
|
return sema.failWithDivideByZero(block, rhs_src);
|
|
}
|
|
}
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (lhs_val.isUndef(mod)) {
|
|
if (lhs_scalar_ty.isSignedInt(mod) and rhs_scalar_ty.isSignedInt(mod)) {
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (try sema.compareAll(rhs_val, .neq, try mod.intValue(resolved_type, -1), resolved_type)) {
|
|
return mod.undefRef(resolved_type);
|
|
}
|
|
}
|
|
return sema.failWithUseOfUndef(block, rhs_src);
|
|
}
|
|
return mod.undefRef(resolved_type);
|
|
}
|
|
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (is_int) {
|
|
var overflow_idx: ?usize = null;
|
|
const res = try lhs_val.intDiv(rhs_val, resolved_type, &overflow_idx, sema.arena, mod);
|
|
if (overflow_idx) |vec_idx| {
|
|
return sema.failWithIntegerOverflow(block, src, resolved_type, res, vec_idx);
|
|
}
|
|
return Air.internedToRef(res.toIntern());
|
|
} else {
|
|
return Air.internedToRef((try lhs_val.floatDivTrunc(rhs_val, resolved_type, sema.arena, mod)).toIntern());
|
|
}
|
|
} else break :rs rhs_src;
|
|
} else break :rs lhs_src;
|
|
};
|
|
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
|
|
if (block.wantSafety()) {
|
|
try sema.addDivIntOverflowSafety(block, src, resolved_type, lhs_scalar_ty, maybe_lhs_val, maybe_rhs_val, casted_lhs, casted_rhs, is_int);
|
|
try sema.addDivByZeroSafety(block, src, resolved_type, maybe_rhs_val, casted_rhs, is_int);
|
|
}
|
|
|
|
return block.addBinOp(airTag(block, is_int, .div_trunc, .div_trunc_optimized), casted_lhs, casted_rhs);
|
|
}
|
|
|
|
fn addDivIntOverflowSafety(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
resolved_type: Type,
|
|
lhs_scalar_ty: Type,
|
|
maybe_lhs_val: ?Value,
|
|
maybe_rhs_val: ?Value,
|
|
casted_lhs: Air.Inst.Ref,
|
|
casted_rhs: Air.Inst.Ref,
|
|
is_int: bool,
|
|
) CompileError!void {
|
|
const mod = sema.mod;
|
|
if (!is_int) return;
|
|
|
|
// If the LHS is unsigned, it cannot cause overflow.
|
|
if (!lhs_scalar_ty.isSignedInt(mod)) return;
|
|
|
|
// If the LHS is widened to a larger integer type, no overflow is possible.
|
|
if (lhs_scalar_ty.intInfo(mod).bits < resolved_type.intInfo(mod).bits) {
|
|
return;
|
|
}
|
|
|
|
const min_int = try resolved_type.minInt(mod, resolved_type);
|
|
const neg_one_scalar = try mod.intValue(lhs_scalar_ty, -1);
|
|
const neg_one = try sema.splat(resolved_type, neg_one_scalar);
|
|
|
|
// If the LHS is comptime-known to be not equal to the min int,
|
|
// no overflow is possible.
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (try lhs_val.compareAll(.neq, min_int, resolved_type, mod)) return;
|
|
}
|
|
|
|
// If the RHS is comptime-known to not be equal to -1, no overflow is possible.
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (try rhs_val.compareAll(.neq, neg_one, resolved_type, mod)) return;
|
|
}
|
|
|
|
var ok: Air.Inst.Ref = .none;
|
|
if (resolved_type.zigTypeTag(mod) == .Vector) {
|
|
if (maybe_lhs_val == null) {
|
|
const min_int_ref = Air.internedToRef(min_int.toIntern());
|
|
ok = try block.addCmpVector(casted_lhs, min_int_ref, .neq);
|
|
}
|
|
if (maybe_rhs_val == null) {
|
|
const neg_one_ref = Air.internedToRef(neg_one.toIntern());
|
|
const rhs_ok = try block.addCmpVector(casted_rhs, neg_one_ref, .neq);
|
|
if (ok == .none) {
|
|
ok = rhs_ok;
|
|
} else {
|
|
ok = try block.addBinOp(.bool_or, ok, rhs_ok);
|
|
}
|
|
}
|
|
assert(ok != .none);
|
|
ok = try block.addInst(.{
|
|
.tag = .reduce,
|
|
.data = .{ .reduce = .{
|
|
.operand = ok,
|
|
.operation = .And,
|
|
} },
|
|
});
|
|
} else {
|
|
if (maybe_lhs_val == null) {
|
|
const min_int_ref = Air.internedToRef(min_int.toIntern());
|
|
ok = try block.addBinOp(.cmp_neq, casted_lhs, min_int_ref);
|
|
}
|
|
if (maybe_rhs_val == null) {
|
|
const neg_one_ref = Air.internedToRef(neg_one.toIntern());
|
|
const rhs_ok = try block.addBinOp(.cmp_neq, casted_rhs, neg_one_ref);
|
|
if (ok == .none) {
|
|
ok = rhs_ok;
|
|
} else {
|
|
ok = try block.addBinOp(.bool_or, ok, rhs_ok);
|
|
}
|
|
}
|
|
assert(ok != .none);
|
|
}
|
|
try sema.addSafetyCheck(block, src, ok, .integer_overflow);
|
|
}
|
|
|
|
fn addDivByZeroSafety(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
resolved_type: Type,
|
|
maybe_rhs_val: ?Value,
|
|
casted_rhs: Air.Inst.Ref,
|
|
is_int: bool,
|
|
) CompileError!void {
|
|
// Strict IEEE floats have well-defined division by zero.
|
|
if (!is_int and block.float_mode == .Strict) return;
|
|
|
|
// If rhs was comptime-known to be zero a compile error would have been
|
|
// emitted above.
|
|
if (maybe_rhs_val != null) return;
|
|
|
|
const mod = sema.mod;
|
|
const scalar_zero = if (is_int)
|
|
try mod.intValue(resolved_type.scalarType(mod), 0)
|
|
else
|
|
try mod.floatValue(resolved_type.scalarType(mod), 0.0);
|
|
const ok = if (resolved_type.zigTypeTag(mod) == .Vector) ok: {
|
|
const zero_val = try sema.splat(resolved_type, scalar_zero);
|
|
const zero = Air.internedToRef(zero_val.toIntern());
|
|
const ok = try block.addCmpVector(casted_rhs, zero, .neq);
|
|
break :ok try block.addInst(.{
|
|
.tag = if (is_int) .reduce else .reduce_optimized,
|
|
.data = .{ .reduce = .{
|
|
.operand = ok,
|
|
.operation = .And,
|
|
} },
|
|
});
|
|
} else ok: {
|
|
const zero = Air.internedToRef(scalar_zero.toIntern());
|
|
break :ok try block.addBinOp(if (is_int) .cmp_neq else .cmp_neq_optimized, casted_rhs, zero);
|
|
};
|
|
try sema.addSafetyCheck(block, src, ok, .divide_by_zero);
|
|
}
|
|
|
|
fn airTag(block: *Block, is_int: bool, normal: Air.Inst.Tag, optimized: Air.Inst.Tag) Air.Inst.Tag {
|
|
if (is_int) return normal;
|
|
return switch (block.float_mode) {
|
|
.Strict => normal,
|
|
.Optimized => optimized,
|
|
};
|
|
}
|
|
|
|
fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node };
|
|
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
|
|
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const lhs = try sema.resolveInst(extra.lhs);
|
|
const rhs = try sema.resolveInst(extra.rhs);
|
|
const lhs_ty = sema.typeOf(lhs);
|
|
const rhs_ty = sema.typeOf(rhs);
|
|
const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod);
|
|
const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod);
|
|
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
|
|
try sema.checkInvalidPtrArithmetic(block, src, lhs_ty);
|
|
|
|
const instructions = &[_]Air.Inst.Ref{ lhs, rhs };
|
|
const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{
|
|
.override = &[_]?LazySrcLoc{ lhs_src, rhs_src },
|
|
});
|
|
|
|
const is_vector = resolved_type.zigTypeTag(mod) == .Vector;
|
|
|
|
const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
|
|
const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
|
|
|
|
const lhs_scalar_ty = lhs_ty.scalarType(mod);
|
|
const rhs_scalar_ty = rhs_ty.scalarType(mod);
|
|
const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod);
|
|
|
|
const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
|
|
|
|
try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .mod_rem);
|
|
|
|
const maybe_lhs_val = try sema.resolveValueIntable(casted_lhs);
|
|
const maybe_rhs_val = try sema.resolveValueIntable(casted_rhs);
|
|
|
|
const runtime_src = rs: {
|
|
// For integers:
|
|
// Either operand being undef is a compile error because there exists
|
|
// a possible value (TODO what is it?) that would invoke illegal behavior.
|
|
// TODO: can lhs undef be handled better?
|
|
//
|
|
// For floats:
|
|
// If the rhs is zero, compile error for division by zero.
|
|
// If the rhs is undefined, compile error because there is a possible
|
|
// value (zero) for which the division would be illegal behavior.
|
|
// If the lhs is undefined, result is undefined.
|
|
//
|
|
// For either one: if the result would be different between @mod and @rem,
|
|
// then emit a compile error saying you have to pick one.
|
|
if (is_int) {
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (lhs_val.isUndef(mod)) {
|
|
return sema.failWithUseOfUndef(block, lhs_src);
|
|
}
|
|
if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
|
|
const scalar_zero = switch (scalar_tag) {
|
|
.ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0),
|
|
.ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0),
|
|
else => unreachable,
|
|
};
|
|
const zero_val = if (is_vector) Value.fromInterned((try mod.intern(.{ .aggregate = .{
|
|
.ty = resolved_type.toIntern(),
|
|
.storage = .{ .repeated_elem = scalar_zero.toIntern() },
|
|
} }))) else scalar_zero;
|
|
return Air.internedToRef(zero_val.toIntern());
|
|
}
|
|
} else if (lhs_scalar_ty.isSignedInt(mod)) {
|
|
return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty);
|
|
}
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (rhs_val.isUndef(mod)) {
|
|
return sema.failWithUseOfUndef(block, rhs_src);
|
|
}
|
|
if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) {
|
|
return sema.failWithDivideByZero(block, rhs_src);
|
|
}
|
|
if (!(try rhs_val.compareAllWithZeroAdvanced(.gte, sema))) {
|
|
return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty);
|
|
}
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
const rem_result = try sema.intRem(resolved_type, lhs_val, rhs_val);
|
|
// If this answer could possibly be different by doing `intMod`,
|
|
// we must emit a compile error. Otherwise, it's OK.
|
|
if (!(try lhs_val.compareAllWithZeroAdvanced(.gte, sema)) and
|
|
!(try rem_result.compareAllWithZeroAdvanced(.eq, sema)))
|
|
{
|
|
return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty);
|
|
}
|
|
return Air.internedToRef(rem_result.toIntern());
|
|
}
|
|
break :rs lhs_src;
|
|
} else if (rhs_scalar_ty.isSignedInt(mod)) {
|
|
return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty);
|
|
} else {
|
|
break :rs rhs_src;
|
|
}
|
|
}
|
|
// float operands
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (rhs_val.isUndef(mod)) {
|
|
return sema.failWithUseOfUndef(block, rhs_src);
|
|
}
|
|
if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) {
|
|
return sema.failWithDivideByZero(block, rhs_src);
|
|
}
|
|
if (!(try rhs_val.compareAllWithZeroAdvanced(.gte, sema))) {
|
|
return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty);
|
|
}
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (lhs_val.isUndef(mod) or !(try lhs_val.compareAllWithZeroAdvanced(.gte, sema))) {
|
|
return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty);
|
|
}
|
|
return Air.internedToRef((try lhs_val.floatRem(rhs_val, resolved_type, sema.arena, mod)).toIntern());
|
|
} else {
|
|
return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty);
|
|
}
|
|
} else {
|
|
return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty);
|
|
}
|
|
};
|
|
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
|
|
if (block.wantSafety()) {
|
|
try sema.addDivByZeroSafety(block, src, resolved_type, maybe_rhs_val, casted_rhs, is_int);
|
|
}
|
|
|
|
const air_tag = airTag(block, is_int, .rem, .rem_optimized);
|
|
return block.addBinOp(air_tag, casted_lhs, casted_rhs);
|
|
}
|
|
|
|
fn intRem(
|
|
sema: *Sema,
|
|
ty: Type,
|
|
lhs: Value,
|
|
rhs: Value,
|
|
) CompileError!Value {
|
|
const mod = sema.mod;
|
|
if (ty.zigTypeTag(mod) == .Vector) {
|
|
const result_data = try sema.arena.alloc(InternPool.Index, ty.vectorLen(mod));
|
|
const scalar_ty = ty.scalarType(mod);
|
|
for (result_data, 0..) |*scalar, i| {
|
|
const lhs_elem = try lhs.elemValue(mod, i);
|
|
const rhs_elem = try rhs.elemValue(mod, i);
|
|
scalar.* = try (try sema.intRemScalar(lhs_elem, rhs_elem, scalar_ty)).intern(scalar_ty, mod);
|
|
}
|
|
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
|
|
.ty = ty.toIntern(),
|
|
.storage = .{ .elems = result_data },
|
|
} })));
|
|
}
|
|
return sema.intRemScalar(lhs, rhs, ty);
|
|
}
|
|
|
|
fn intRemScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) CompileError!Value {
|
|
const mod = sema.mod;
|
|
// TODO is this a performance issue? maybe we should try the operation without
|
|
// resorting to BigInt first.
|
|
var lhs_space: Value.BigIntSpace = undefined;
|
|
var rhs_space: Value.BigIntSpace = undefined;
|
|
const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema);
|
|
const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema);
|
|
const limbs_q = try sema.arena.alloc(
|
|
math.big.Limb,
|
|
lhs_bigint.limbs.len,
|
|
);
|
|
const limbs_r = try sema.arena.alloc(
|
|
math.big.Limb,
|
|
// TODO: consider reworking Sema to re-use Values rather than
|
|
// always producing new Value objects.
|
|
rhs_bigint.limbs.len,
|
|
);
|
|
const limbs_buffer = try sema.arena.alloc(
|
|
math.big.Limb,
|
|
math.big.int.calcDivLimbsBufferLen(lhs_bigint.limbs.len, rhs_bigint.limbs.len),
|
|
);
|
|
var result_q = math.big.int.Mutable{ .limbs = limbs_q, .positive = undefined, .len = undefined };
|
|
var result_r = math.big.int.Mutable{ .limbs = limbs_r, .positive = undefined, .len = undefined };
|
|
result_q.divTrunc(&result_r, lhs_bigint, rhs_bigint, limbs_buffer);
|
|
return mod.intValue_big(scalar_ty, result_r.toConst());
|
|
}
|
|
|
|
fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node };
|
|
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
|
|
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const lhs = try sema.resolveInst(extra.lhs);
|
|
const rhs = try sema.resolveInst(extra.rhs);
|
|
const lhs_ty = sema.typeOf(lhs);
|
|
const rhs_ty = sema.typeOf(rhs);
|
|
const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod);
|
|
const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod);
|
|
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
|
|
try sema.checkInvalidPtrArithmetic(block, src, lhs_ty);
|
|
|
|
const instructions = &[_]Air.Inst.Ref{ lhs, rhs };
|
|
const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{
|
|
.override = &[_]?LazySrcLoc{ lhs_src, rhs_src },
|
|
});
|
|
|
|
const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
|
|
const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
|
|
|
|
const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod);
|
|
|
|
const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
|
|
|
|
try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .mod);
|
|
|
|
const maybe_lhs_val = try sema.resolveValueIntable(casted_lhs);
|
|
const maybe_rhs_val = try sema.resolveValueIntable(casted_rhs);
|
|
|
|
const runtime_src = rs: {
|
|
// For integers:
|
|
// Either operand being undef is a compile error because there exists
|
|
// a possible value (TODO what is it?) that would invoke illegal behavior.
|
|
// TODO: can lhs zero be handled better?
|
|
// TODO: can lhs undef be handled better?
|
|
//
|
|
// For floats:
|
|
// If the rhs is zero, compile error for division by zero.
|
|
// If the rhs is undefined, compile error because there is a possible
|
|
// value (zero) for which the division would be illegal behavior.
|
|
// If the lhs is undefined, result is undefined.
|
|
if (is_int) {
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (lhs_val.isUndef(mod)) {
|
|
return sema.failWithUseOfUndef(block, lhs_src);
|
|
}
|
|
}
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (rhs_val.isUndef(mod)) {
|
|
return sema.failWithUseOfUndef(block, rhs_src);
|
|
}
|
|
if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) {
|
|
return sema.failWithDivideByZero(block, rhs_src);
|
|
}
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
return Air.internedToRef((try lhs_val.intMod(rhs_val, resolved_type, sema.arena, mod)).toIntern());
|
|
}
|
|
break :rs lhs_src;
|
|
} else {
|
|
break :rs rhs_src;
|
|
}
|
|
}
|
|
// float operands
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (rhs_val.isUndef(mod)) {
|
|
return sema.failWithUseOfUndef(block, rhs_src);
|
|
}
|
|
if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) {
|
|
return sema.failWithDivideByZero(block, rhs_src);
|
|
}
|
|
}
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (lhs_val.isUndef(mod)) {
|
|
return mod.undefRef(resolved_type);
|
|
}
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
return Air.internedToRef((try lhs_val.floatMod(rhs_val, resolved_type, sema.arena, mod)).toIntern());
|
|
} else break :rs rhs_src;
|
|
} else break :rs lhs_src;
|
|
};
|
|
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
|
|
if (block.wantSafety()) {
|
|
try sema.addDivByZeroSafety(block, src, resolved_type, maybe_rhs_val, casted_rhs, is_int);
|
|
}
|
|
|
|
const air_tag = airTag(block, is_int, .mod, .mod_optimized);
|
|
return block.addBinOp(air_tag, casted_lhs, casted_rhs);
|
|
}
|
|
|
|
fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node };
|
|
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
|
|
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const lhs = try sema.resolveInst(extra.lhs);
|
|
const rhs = try sema.resolveInst(extra.rhs);
|
|
const lhs_ty = sema.typeOf(lhs);
|
|
const rhs_ty = sema.typeOf(rhs);
|
|
const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod);
|
|
const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod);
|
|
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
|
|
try sema.checkInvalidPtrArithmetic(block, src, lhs_ty);
|
|
|
|
const instructions = &[_]Air.Inst.Ref{ lhs, rhs };
|
|
const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{
|
|
.override = &[_]?LazySrcLoc{ lhs_src, rhs_src },
|
|
});
|
|
|
|
const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
|
|
const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
|
|
|
|
const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod);
|
|
|
|
const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
|
|
|
|
try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .rem);
|
|
|
|
const maybe_lhs_val = try sema.resolveValueIntable(casted_lhs);
|
|
const maybe_rhs_val = try sema.resolveValueIntable(casted_rhs);
|
|
|
|
const runtime_src = rs: {
|
|
// For integers:
|
|
// Either operand being undef is a compile error because there exists
|
|
// a possible value (TODO what is it?) that would invoke illegal behavior.
|
|
// TODO: can lhs zero be handled better?
|
|
// TODO: can lhs undef be handled better?
|
|
//
|
|
// For floats:
|
|
// If the rhs is zero, compile error for division by zero.
|
|
// If the rhs is undefined, compile error because there is a possible
|
|
// value (zero) for which the division would be illegal behavior.
|
|
// If the lhs is undefined, result is undefined.
|
|
if (is_int) {
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (lhs_val.isUndef(mod)) {
|
|
return sema.failWithUseOfUndef(block, lhs_src);
|
|
}
|
|
}
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (rhs_val.isUndef(mod)) {
|
|
return sema.failWithUseOfUndef(block, rhs_src);
|
|
}
|
|
if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) {
|
|
return sema.failWithDivideByZero(block, rhs_src);
|
|
}
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
return Air.internedToRef((try sema.intRem(resolved_type, lhs_val, rhs_val)).toIntern());
|
|
}
|
|
break :rs lhs_src;
|
|
} else {
|
|
break :rs rhs_src;
|
|
}
|
|
}
|
|
// float operands
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (rhs_val.isUndef(mod)) {
|
|
return sema.failWithUseOfUndef(block, rhs_src);
|
|
}
|
|
if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) {
|
|
return sema.failWithDivideByZero(block, rhs_src);
|
|
}
|
|
}
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (lhs_val.isUndef(mod)) {
|
|
return mod.undefRef(resolved_type);
|
|
}
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
return Air.internedToRef((try lhs_val.floatRem(rhs_val, resolved_type, sema.arena, mod)).toIntern());
|
|
} else break :rs rhs_src;
|
|
} else break :rs lhs_src;
|
|
};
|
|
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
|
|
if (block.wantSafety()) {
|
|
try sema.addDivByZeroSafety(block, src, resolved_type, maybe_rhs_val, casted_rhs, is_int);
|
|
}
|
|
|
|
const air_tag = airTag(block, is_int, .rem, .rem_optimized);
|
|
return block.addBinOp(air_tag, casted_lhs, casted_rhs);
|
|
}
|
|
|
|
fn zirOverflowArithmetic(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
extended: Zir.Inst.Extended.InstData,
|
|
zir_tag: Zir.Inst.Extended,
|
|
) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data;
|
|
const src = LazySrcLoc.nodeOffset(extra.node);
|
|
|
|
const lhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
|
|
const rhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = extra.node };
|
|
|
|
const uncasted_lhs = try sema.resolveInst(extra.lhs);
|
|
const uncasted_rhs = try sema.resolveInst(extra.rhs);
|
|
|
|
const lhs_ty = sema.typeOf(uncasted_lhs);
|
|
const rhs_ty = sema.typeOf(uncasted_rhs);
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
|
|
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
|
|
|
|
const instructions = &[_]Air.Inst.Ref{ uncasted_lhs, uncasted_rhs };
|
|
const dest_ty = if (zir_tag == .shl_with_overflow)
|
|
lhs_ty
|
|
else
|
|
try sema.resolvePeerTypes(block, src, instructions, .{
|
|
.override = &[_]?LazySrcLoc{ lhs_src, rhs_src },
|
|
});
|
|
|
|
const rhs_dest_ty = if (zir_tag == .shl_with_overflow)
|
|
try sema.log2IntType(block, lhs_ty, src)
|
|
else
|
|
dest_ty;
|
|
|
|
const lhs = try sema.coerce(block, dest_ty, uncasted_lhs, lhs_src);
|
|
const rhs = try sema.coerce(block, rhs_dest_ty, uncasted_rhs, rhs_src);
|
|
|
|
if (dest_ty.scalarType(mod).zigTypeTag(mod) != .Int) {
|
|
return sema.fail(block, src, "expected vector of integers or integer tag type, found '{}'", .{dest_ty.fmt(mod)});
|
|
}
|
|
|
|
const maybe_lhs_val = try sema.resolveValue(lhs);
|
|
const maybe_rhs_val = try sema.resolveValue(rhs);
|
|
|
|
const tuple_ty = try sema.overflowArithmeticTupleType(dest_ty);
|
|
const overflow_ty = Type.fromInterned(ip.indexToKey(tuple_ty.toIntern()).anon_struct_type.types.get(ip)[1]);
|
|
|
|
var result: struct {
|
|
inst: Air.Inst.Ref = .none,
|
|
wrapped: Value = Value.@"unreachable",
|
|
overflow_bit: Value,
|
|
} = result: {
|
|
const zero_bit = try mod.intValue(Type.u1, 0);
|
|
switch (zir_tag) {
|
|
.add_with_overflow => {
|
|
// If either of the arguments is zero, `false` is returned and the other is stored
|
|
// to the result, even if it is undefined..
|
|
// Otherwise, if either of the argument is undefined, undefined is returned.
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) {
|
|
break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = rhs };
|
|
}
|
|
}
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (!rhs_val.isUndef(mod) and (try rhs_val.compareAllWithZeroAdvanced(.eq, sema))) {
|
|
break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs };
|
|
}
|
|
}
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (lhs_val.isUndef(mod) or rhs_val.isUndef(mod)) {
|
|
break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef };
|
|
}
|
|
|
|
const result = try sema.intAddWithOverflow(lhs_val, rhs_val, dest_ty);
|
|
break :result .{ .overflow_bit = result.overflow_bit, .wrapped = result.wrapped_result };
|
|
}
|
|
}
|
|
},
|
|
.sub_with_overflow => {
|
|
// If the rhs is zero, then the result is lhs and no overflow occured.
|
|
// Otherwise, if either result is undefined, both results are undefined.
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (rhs_val.isUndef(mod)) {
|
|
break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef };
|
|
} else if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
|
|
break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs };
|
|
} else if (maybe_lhs_val) |lhs_val| {
|
|
if (lhs_val.isUndef(mod)) {
|
|
break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef };
|
|
}
|
|
|
|
const result = try sema.intSubWithOverflow(lhs_val, rhs_val, dest_ty);
|
|
break :result .{ .overflow_bit = result.overflow_bit, .wrapped = result.wrapped_result };
|
|
}
|
|
}
|
|
},
|
|
.mul_with_overflow => {
|
|
// If either of the arguments is zero, the result is zero and no overflow occured.
|
|
// If either of the arguments is one, the result is the other and no overflow occured.
|
|
// Otherwise, if either of the arguments is undefined, both results are undefined.
|
|
const scalar_one = try mod.intValue(dest_ty.scalarType(mod), 1);
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (!lhs_val.isUndef(mod)) {
|
|
if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
|
|
break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs };
|
|
} else if (try sema.compareAll(lhs_val, .eq, try sema.splat(dest_ty, scalar_one), dest_ty)) {
|
|
break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = rhs };
|
|
}
|
|
}
|
|
}
|
|
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (!rhs_val.isUndef(mod)) {
|
|
if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
|
|
break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = rhs };
|
|
} else if (try sema.compareAll(rhs_val, .eq, try sema.splat(dest_ty, scalar_one), dest_ty)) {
|
|
break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs };
|
|
}
|
|
}
|
|
}
|
|
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (lhs_val.isUndef(mod) or rhs_val.isUndef(mod)) {
|
|
break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef };
|
|
}
|
|
|
|
const result = try lhs_val.intMulWithOverflow(rhs_val, dest_ty, sema.arena, mod);
|
|
break :result .{ .overflow_bit = result.overflow_bit, .wrapped = result.wrapped_result };
|
|
}
|
|
}
|
|
},
|
|
.shl_with_overflow => {
|
|
// If lhs is zero, the result is zero and no overflow occurred.
|
|
// If rhs is zero, the result is lhs (even if undefined) and no overflow occurred.
|
|
// Oterhwise if either of the arguments is undefined, both results are undefined.
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) {
|
|
break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs };
|
|
}
|
|
}
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (!rhs_val.isUndef(mod) and (try rhs_val.compareAllWithZeroAdvanced(.eq, sema))) {
|
|
break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs };
|
|
}
|
|
}
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (lhs_val.isUndef(mod) or rhs_val.isUndef(mod)) {
|
|
break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef };
|
|
}
|
|
|
|
const result = try lhs_val.shlWithOverflow(rhs_val, dest_ty, sema.arena, mod);
|
|
break :result .{ .overflow_bit = result.overflow_bit, .wrapped = result.wrapped_result };
|
|
}
|
|
}
|
|
},
|
|
else => unreachable,
|
|
}
|
|
|
|
const air_tag: Air.Inst.Tag = switch (zir_tag) {
|
|
.add_with_overflow => .add_with_overflow,
|
|
.mul_with_overflow => .mul_with_overflow,
|
|
.sub_with_overflow => .sub_with_overflow,
|
|
.shl_with_overflow => .shl_with_overflow,
|
|
else => unreachable,
|
|
};
|
|
|
|
const runtime_src = if (maybe_lhs_val == null) lhs_src else rhs_src;
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
|
|
return block.addInst(.{
|
|
.tag = air_tag,
|
|
.data = .{ .ty_pl = .{
|
|
.ty = Air.internedToRef(tuple_ty.toIntern()),
|
|
.payload = try block.sema.addExtra(Air.Bin{
|
|
.lhs = lhs,
|
|
.rhs = rhs,
|
|
}),
|
|
} },
|
|
});
|
|
};
|
|
|
|
if (result.inst != .none) {
|
|
if (try sema.resolveValue(result.inst)) |some| {
|
|
result.wrapped = some;
|
|
result.inst = .none;
|
|
}
|
|
}
|
|
|
|
if (result.inst == .none) {
|
|
return Air.internedToRef((try mod.intern(.{ .aggregate = .{
|
|
.ty = tuple_ty.toIntern(),
|
|
.storage = .{ .elems = &.{
|
|
result.wrapped.toIntern(),
|
|
result.overflow_bit.toIntern(),
|
|
} },
|
|
} })));
|
|
}
|
|
|
|
const element_refs = try sema.arena.alloc(Air.Inst.Ref, 2);
|
|
element_refs[0] = result.inst;
|
|
element_refs[1] = Air.internedToRef(result.overflow_bit.toIntern());
|
|
return block.addAggregateInit(tuple_ty, element_refs);
|
|
}
|
|
|
|
fn splat(sema: *Sema, ty: Type, val: Value) !Value {
|
|
const mod = sema.mod;
|
|
if (ty.zigTypeTag(mod) != .Vector) return val;
|
|
const repeated = try mod.intern(.{ .aggregate = .{
|
|
.ty = ty.toIntern(),
|
|
.storage = .{ .repeated_elem = val.toIntern() },
|
|
} });
|
|
return Value.fromInterned(repeated);
|
|
}
|
|
|
|
fn overflowArithmeticTupleType(sema: *Sema, ty: Type) !Type {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
const ov_ty = if (ty.zigTypeTag(mod) == .Vector) try mod.vectorType(.{
|
|
.len = ty.vectorLen(mod),
|
|
.child = .u1_type,
|
|
}) else Type.u1;
|
|
|
|
const types = [2]InternPool.Index{ ty.toIntern(), ov_ty.toIntern() };
|
|
const values = [2]InternPool.Index{ .none, .none };
|
|
const tuple_ty = try ip.getAnonStructType(mod.gpa, .{
|
|
.types = &types,
|
|
.values = &values,
|
|
.names = &.{},
|
|
});
|
|
return Type.fromInterned(tuple_ty);
|
|
}
|
|
|
|
fn analyzeArithmetic(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
/// TODO performance investigation: make this comptime?
|
|
zir_tag: Zir.Inst.Tag,
|
|
lhs: Air.Inst.Ref,
|
|
rhs: Air.Inst.Ref,
|
|
src: LazySrcLoc,
|
|
lhs_src: LazySrcLoc,
|
|
rhs_src: LazySrcLoc,
|
|
want_safety: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const lhs_ty = sema.typeOf(lhs);
|
|
const rhs_ty = sema.typeOf(rhs);
|
|
const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod);
|
|
const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod);
|
|
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
|
|
|
|
if (lhs_zig_ty_tag == .Pointer) switch (lhs_ty.ptrSize(mod)) {
|
|
.One, .Slice => {},
|
|
.Many, .C => {
|
|
const air_tag: Air.Inst.Tag = switch (zir_tag) {
|
|
.add => .ptr_add,
|
|
.sub => .ptr_sub,
|
|
else => return sema.fail(block, src, "invalid pointer arithmetic operator", .{}),
|
|
};
|
|
return sema.analyzePtrArithmetic(block, src, lhs, rhs, air_tag, lhs_src, rhs_src);
|
|
},
|
|
};
|
|
|
|
const instructions = &[_]Air.Inst.Ref{ lhs, rhs };
|
|
const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{
|
|
.override = &[_]?LazySrcLoc{ lhs_src, rhs_src },
|
|
});
|
|
|
|
const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
|
|
const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
|
|
|
|
const scalar_type = resolved_type.scalarType(mod);
|
|
const scalar_tag = scalar_type.zigTypeTag(mod);
|
|
|
|
const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
|
|
|
|
try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, zir_tag);
|
|
|
|
const maybe_lhs_val = try sema.resolveValueIntable(casted_lhs);
|
|
const maybe_rhs_val = try sema.resolveValueIntable(casted_rhs);
|
|
const runtime_src: LazySrcLoc, const air_tag: Air.Inst.Tag, const air_tag_safe: Air.Inst.Tag = rs: {
|
|
switch (zir_tag) {
|
|
.add, .add_unsafe => {
|
|
// For integers:intAddSat
|
|
// If either of the operands are zero, then the other operand is
|
|
// returned, even if it is undefined.
|
|
// If either of the operands are undefined, it's a compile error
|
|
// because there is a possible value for which the addition would
|
|
// overflow (max_int), causing illegal behavior.
|
|
// For floats: either operand being undef makes the result undef.
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) {
|
|
return casted_rhs;
|
|
}
|
|
}
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (rhs_val.isUndef(mod)) {
|
|
if (is_int) {
|
|
return sema.failWithUseOfUndef(block, rhs_src);
|
|
} else {
|
|
return mod.undefRef(resolved_type);
|
|
}
|
|
}
|
|
if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
|
|
return casted_lhs;
|
|
}
|
|
}
|
|
const air_tag: Air.Inst.Tag = if (block.float_mode == .Optimized) .add_optimized else .add;
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (lhs_val.isUndef(mod)) {
|
|
if (is_int) {
|
|
return sema.failWithUseOfUndef(block, lhs_src);
|
|
} else {
|
|
return mod.undefRef(resolved_type);
|
|
}
|
|
}
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (is_int) {
|
|
var overflow_idx: ?usize = null;
|
|
const sum = try sema.intAdd(lhs_val, rhs_val, resolved_type, &overflow_idx);
|
|
if (overflow_idx) |vec_idx| {
|
|
return sema.failWithIntegerOverflow(block, src, resolved_type, sum, vec_idx);
|
|
}
|
|
return Air.internedToRef(sum.toIntern());
|
|
} else {
|
|
return Air.internedToRef((try Value.floatAdd(lhs_val, rhs_val, resolved_type, sema.arena, mod)).toIntern());
|
|
}
|
|
} else break :rs .{ rhs_src, air_tag, .add_safe };
|
|
} else break :rs .{ lhs_src, air_tag, .add_safe };
|
|
},
|
|
.addwrap => {
|
|
// Integers only; floats are checked above.
|
|
// If either of the operands are zero, the other operand is returned.
|
|
// If either of the operands are undefined, the result is undefined.
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) {
|
|
return casted_rhs;
|
|
}
|
|
}
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (rhs_val.isUndef(mod)) {
|
|
return mod.undefRef(resolved_type);
|
|
}
|
|
if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
|
|
return casted_lhs;
|
|
}
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
return Air.internedToRef((try sema.numberAddWrapScalar(lhs_val, rhs_val, resolved_type)).toIntern());
|
|
} else break :rs .{ lhs_src, .add_wrap, .add_wrap };
|
|
} else break :rs .{ rhs_src, .add_wrap, .add_wrap };
|
|
},
|
|
.add_sat => {
|
|
// Integers only; floats are checked above.
|
|
// If either of the operands are zero, then the other operand is returned.
|
|
// If either of the operands are undefined, the result is undefined.
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) {
|
|
return casted_rhs;
|
|
}
|
|
}
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (rhs_val.isUndef(mod)) {
|
|
return mod.undefRef(resolved_type);
|
|
}
|
|
if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
|
|
return casted_lhs;
|
|
}
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (lhs_val.isUndef(mod)) {
|
|
return mod.undefRef(resolved_type);
|
|
}
|
|
|
|
const val = if (scalar_tag == .ComptimeInt)
|
|
try sema.intAdd(lhs_val, rhs_val, resolved_type, undefined)
|
|
else
|
|
try lhs_val.intAddSat(rhs_val, resolved_type, sema.arena, mod);
|
|
|
|
return Air.internedToRef(val.toIntern());
|
|
} else break :rs .{
|
|
lhs_src,
|
|
.add_sat,
|
|
.add_sat,
|
|
};
|
|
} else break :rs .{
|
|
rhs_src,
|
|
.add_sat,
|
|
.add_sat,
|
|
};
|
|
},
|
|
.sub => {
|
|
// For integers:
|
|
// If the rhs is zero, then the other operand is
|
|
// returned, even if it is undefined.
|
|
// If either of the operands are undefined, it's a compile error
|
|
// because there is a possible value for which the subtraction would
|
|
// overflow, causing illegal behavior.
|
|
// For floats: either operand being undef makes the result undef.
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (rhs_val.isUndef(mod)) {
|
|
if (is_int) {
|
|
return sema.failWithUseOfUndef(block, rhs_src);
|
|
} else {
|
|
return mod.undefRef(resolved_type);
|
|
}
|
|
}
|
|
if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
|
|
return casted_lhs;
|
|
}
|
|
}
|
|
const air_tag: Air.Inst.Tag = if (block.float_mode == .Optimized) .sub_optimized else .sub;
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (lhs_val.isUndef(mod)) {
|
|
if (is_int) {
|
|
return sema.failWithUseOfUndef(block, lhs_src);
|
|
} else {
|
|
return mod.undefRef(resolved_type);
|
|
}
|
|
}
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (is_int) {
|
|
var overflow_idx: ?usize = null;
|
|
const diff = try sema.intSub(lhs_val, rhs_val, resolved_type, &overflow_idx);
|
|
if (overflow_idx) |vec_idx| {
|
|
return sema.failWithIntegerOverflow(block, src, resolved_type, diff, vec_idx);
|
|
}
|
|
return Air.internedToRef(diff.toIntern());
|
|
} else {
|
|
return Air.internedToRef((try Value.floatSub(lhs_val, rhs_val, resolved_type, sema.arena, mod)).toIntern());
|
|
}
|
|
} else break :rs .{ rhs_src, air_tag, .sub_safe };
|
|
} else break :rs .{ lhs_src, air_tag, .sub_safe };
|
|
},
|
|
.subwrap => {
|
|
// Integers only; floats are checked above.
|
|
// If the RHS is zero, then the LHS is returned, even if it is undefined.
|
|
// If either of the operands are undefined, the result is undefined.
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (rhs_val.isUndef(mod)) {
|
|
return mod.undefRef(resolved_type);
|
|
}
|
|
if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
|
|
return casted_lhs;
|
|
}
|
|
}
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (lhs_val.isUndef(mod)) {
|
|
return mod.undefRef(resolved_type);
|
|
}
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
return Air.internedToRef((try sema.numberSubWrapScalar(lhs_val, rhs_val, resolved_type)).toIntern());
|
|
} else break :rs .{ rhs_src, .sub_wrap, .sub_wrap };
|
|
} else break :rs .{ lhs_src, .sub_wrap, .sub_wrap };
|
|
},
|
|
.sub_sat => {
|
|
// Integers only; floats are checked above.
|
|
// If the RHS is zero, then the LHS is returned, even if it is undefined.
|
|
// If either of the operands are undefined, the result is undefined.
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (rhs_val.isUndef(mod)) {
|
|
return mod.undefRef(resolved_type);
|
|
}
|
|
if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
|
|
return casted_lhs;
|
|
}
|
|
}
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (lhs_val.isUndef(mod)) {
|
|
return mod.undefRef(resolved_type);
|
|
}
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
const val = if (scalar_tag == .ComptimeInt)
|
|
try sema.intSub(lhs_val, rhs_val, resolved_type, undefined)
|
|
else
|
|
try lhs_val.intSubSat(rhs_val, resolved_type, sema.arena, mod);
|
|
|
|
return Air.internedToRef(val.toIntern());
|
|
} else break :rs .{ rhs_src, .sub_sat, .sub_sat };
|
|
} else break :rs .{ lhs_src, .sub_sat, .sub_sat };
|
|
},
|
|
.mul => {
|
|
// For integers:
|
|
// If either of the operands are zero, the result is zero.
|
|
// If either of the operands are one, the result is the other
|
|
// operand, even if it is undefined.
|
|
// If either of the operands are undefined, it's a compile error
|
|
// because there is a possible value for which the addition would
|
|
// overflow (max_int), causing illegal behavior.
|
|
//
|
|
// For floats:
|
|
// If either of the operands are undefined, the result is undefined.
|
|
// If either of the operands are inf, and the other operand is zero,
|
|
// the result is nan.
|
|
// If either of the operands are nan, the result is nan.
|
|
const scalar_zero = switch (scalar_tag) {
|
|
.ComptimeFloat, .Float => try mod.floatValue(scalar_type, 0.0),
|
|
.ComptimeInt, .Int => try mod.intValue(scalar_type, 0),
|
|
else => unreachable,
|
|
};
|
|
const scalar_one = switch (scalar_tag) {
|
|
.ComptimeFloat, .Float => try mod.floatValue(scalar_type, 1.0),
|
|
.ComptimeInt, .Int => try mod.intValue(scalar_type, 1),
|
|
else => unreachable,
|
|
};
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (!lhs_val.isUndef(mod)) {
|
|
if (lhs_val.isNan(mod)) {
|
|
return Air.internedToRef(lhs_val.toIntern());
|
|
}
|
|
if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) lz: {
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (rhs_val.isNan(mod)) {
|
|
return Air.internedToRef(rhs_val.toIntern());
|
|
}
|
|
if (rhs_val.isInf(mod)) {
|
|
return Air.internedToRef((try mod.floatValue(resolved_type, std.math.nan(f128))).toIntern());
|
|
}
|
|
} else if (resolved_type.isAnyFloat()) {
|
|
break :lz;
|
|
}
|
|
const zero_val = try sema.splat(resolved_type, scalar_zero);
|
|
return Air.internedToRef(zero_val.toIntern());
|
|
}
|
|
if (try sema.compareAll(lhs_val, .eq, try sema.splat(resolved_type, scalar_one), resolved_type)) {
|
|
return casted_rhs;
|
|
}
|
|
}
|
|
}
|
|
const air_tag: Air.Inst.Tag = if (block.float_mode == .Optimized) .mul_optimized else .mul;
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (rhs_val.isUndef(mod)) {
|
|
if (is_int) {
|
|
return sema.failWithUseOfUndef(block, rhs_src);
|
|
} else {
|
|
return mod.undefRef(resolved_type);
|
|
}
|
|
}
|
|
if (rhs_val.isNan(mod)) {
|
|
return Air.internedToRef(rhs_val.toIntern());
|
|
}
|
|
if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) rz: {
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (lhs_val.isInf(mod)) {
|
|
return Air.internedToRef((try mod.floatValue(resolved_type, std.math.nan(f128))).toIntern());
|
|
}
|
|
} else if (resolved_type.isAnyFloat()) {
|
|
break :rz;
|
|
}
|
|
const zero_val = try sema.splat(resolved_type, scalar_zero);
|
|
return Air.internedToRef(zero_val.toIntern());
|
|
}
|
|
if (try sema.compareAll(rhs_val, .eq, try sema.splat(resolved_type, scalar_one), resolved_type)) {
|
|
return casted_lhs;
|
|
}
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (lhs_val.isUndef(mod)) {
|
|
if (is_int) {
|
|
return sema.failWithUseOfUndef(block, lhs_src);
|
|
} else {
|
|
return mod.undefRef(resolved_type);
|
|
}
|
|
}
|
|
if (is_int) {
|
|
var overflow_idx: ?usize = null;
|
|
const product = try lhs_val.intMul(rhs_val, resolved_type, &overflow_idx, sema.arena, mod);
|
|
if (overflow_idx) |vec_idx| {
|
|
return sema.failWithIntegerOverflow(block, src, resolved_type, product, vec_idx);
|
|
}
|
|
return Air.internedToRef(product.toIntern());
|
|
} else {
|
|
return Air.internedToRef((try lhs_val.floatMul(rhs_val, resolved_type, sema.arena, mod)).toIntern());
|
|
}
|
|
} else break :rs .{ lhs_src, air_tag, .mul_safe };
|
|
} else break :rs .{ rhs_src, air_tag, .mul_safe };
|
|
},
|
|
.mulwrap => {
|
|
// Integers only; floats are handled above.
|
|
// If either of the operands are zero, result is zero.
|
|
// If either of the operands are one, result is the other operand.
|
|
// If either of the operands are undefined, result is undefined.
|
|
const scalar_zero = switch (scalar_tag) {
|
|
.ComptimeFloat, .Float => try mod.floatValue(scalar_type, 0.0),
|
|
.ComptimeInt, .Int => try mod.intValue(scalar_type, 0),
|
|
else => unreachable,
|
|
};
|
|
const scalar_one = switch (scalar_tag) {
|
|
.ComptimeFloat, .Float => try mod.floatValue(scalar_type, 1.0),
|
|
.ComptimeInt, .Int => try mod.intValue(scalar_type, 1),
|
|
else => unreachable,
|
|
};
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (!lhs_val.isUndef(mod)) {
|
|
if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
|
|
const zero_val = try sema.splat(resolved_type, scalar_zero);
|
|
return Air.internedToRef(zero_val.toIntern());
|
|
}
|
|
if (try sema.compareAll(lhs_val, .eq, try sema.splat(resolved_type, scalar_one), resolved_type)) {
|
|
return casted_rhs;
|
|
}
|
|
}
|
|
}
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (rhs_val.isUndef(mod)) {
|
|
return mod.undefRef(resolved_type);
|
|
}
|
|
if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
|
|
const zero_val = try sema.splat(resolved_type, scalar_zero);
|
|
return Air.internedToRef(zero_val.toIntern());
|
|
}
|
|
if (try sema.compareAll(rhs_val, .eq, try sema.splat(resolved_type, scalar_one), resolved_type)) {
|
|
return casted_lhs;
|
|
}
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (lhs_val.isUndef(mod)) {
|
|
return mod.undefRef(resolved_type);
|
|
}
|
|
return Air.internedToRef((try lhs_val.numberMulWrap(rhs_val, resolved_type, sema.arena, mod)).toIntern());
|
|
} else break :rs .{ lhs_src, .mul_wrap, .mul_wrap };
|
|
} else break :rs .{ rhs_src, .mul_wrap, .mul_wrap };
|
|
},
|
|
.mul_sat => {
|
|
// Integers only; floats are checked above.
|
|
// If either of the operands are zero, result is zero.
|
|
// If either of the operands are one, result is the other operand.
|
|
// If either of the operands are undefined, result is undefined.
|
|
const scalar_zero = switch (scalar_tag) {
|
|
.ComptimeFloat, .Float => try mod.floatValue(scalar_type, 0.0),
|
|
.ComptimeInt, .Int => try mod.intValue(scalar_type, 0),
|
|
else => unreachable,
|
|
};
|
|
const scalar_one = switch (scalar_tag) {
|
|
.ComptimeFloat, .Float => try mod.floatValue(scalar_type, 1.0),
|
|
.ComptimeInt, .Int => try mod.intValue(scalar_type, 1),
|
|
else => unreachable,
|
|
};
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (!lhs_val.isUndef(mod)) {
|
|
if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
|
|
const zero_val = try sema.splat(resolved_type, scalar_zero);
|
|
return Air.internedToRef(zero_val.toIntern());
|
|
}
|
|
if (try sema.compareAll(lhs_val, .eq, try sema.splat(resolved_type, scalar_one), resolved_type)) {
|
|
return casted_rhs;
|
|
}
|
|
}
|
|
}
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (rhs_val.isUndef(mod)) {
|
|
return mod.undefRef(resolved_type);
|
|
}
|
|
if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
|
|
const zero_val = try sema.splat(resolved_type, scalar_zero);
|
|
return Air.internedToRef(zero_val.toIntern());
|
|
}
|
|
if (try sema.compareAll(rhs_val, .eq, try sema.splat(resolved_type, scalar_one), resolved_type)) {
|
|
return casted_lhs;
|
|
}
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (lhs_val.isUndef(mod)) {
|
|
return mod.undefRef(resolved_type);
|
|
}
|
|
|
|
const val = if (scalar_tag == .ComptimeInt)
|
|
try lhs_val.intMul(rhs_val, resolved_type, undefined, sema.arena, mod)
|
|
else
|
|
try lhs_val.intMulSat(rhs_val, resolved_type, sema.arena, mod);
|
|
|
|
return Air.internedToRef(val.toIntern());
|
|
} else break :rs .{ lhs_src, .mul_sat, .mul_sat };
|
|
} else break :rs .{ rhs_src, .mul_sat, .mul_sat };
|
|
},
|
|
else => unreachable,
|
|
}
|
|
};
|
|
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
|
|
if (block.wantSafety() and want_safety and scalar_tag == .Int) {
|
|
if (mod.backendSupportsFeature(.safety_checked_instructions)) {
|
|
if (air_tag != air_tag_safe) {
|
|
_ = try sema.preparePanicId(block, .integer_overflow);
|
|
}
|
|
return block.addBinOp(air_tag_safe, casted_lhs, casted_rhs);
|
|
} else {
|
|
const maybe_op_ov: ?Air.Inst.Tag = switch (air_tag) {
|
|
.add => .add_with_overflow,
|
|
.sub => .sub_with_overflow,
|
|
.mul => .mul_with_overflow,
|
|
else => null,
|
|
};
|
|
if (maybe_op_ov) |op_ov_tag| {
|
|
const op_ov_tuple_ty = try sema.overflowArithmeticTupleType(resolved_type);
|
|
const op_ov = try block.addInst(.{
|
|
.tag = op_ov_tag,
|
|
.data = .{ .ty_pl = .{
|
|
.ty = Air.internedToRef(op_ov_tuple_ty.toIntern()),
|
|
.payload = try sema.addExtra(Air.Bin{
|
|
.lhs = casted_lhs,
|
|
.rhs = casted_rhs,
|
|
}),
|
|
} },
|
|
});
|
|
const ov_bit = try sema.tupleFieldValByIndex(block, src, op_ov, 1, op_ov_tuple_ty);
|
|
const any_ov_bit = if (resolved_type.zigTypeTag(mod) == .Vector)
|
|
try block.addInst(.{
|
|
.tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce,
|
|
.data = .{ .reduce = .{
|
|
.operand = ov_bit,
|
|
.operation = .Or,
|
|
} },
|
|
})
|
|
else
|
|
ov_bit;
|
|
const zero_ov = Air.internedToRef((try mod.intValue(Type.u1, 0)).toIntern());
|
|
const no_ov = try block.addBinOp(.cmp_eq, any_ov_bit, zero_ov);
|
|
|
|
try sema.addSafetyCheck(block, src, no_ov, .integer_overflow);
|
|
return sema.tupleFieldValByIndex(block, src, op_ov, 0, op_ov_tuple_ty);
|
|
}
|
|
}
|
|
}
|
|
return block.addBinOp(air_tag, casted_lhs, casted_rhs);
|
|
}
|
|
|
|
fn analyzePtrArithmetic(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
op_src: LazySrcLoc,
|
|
ptr: Air.Inst.Ref,
|
|
uncasted_offset: Air.Inst.Ref,
|
|
air_tag: Air.Inst.Tag,
|
|
ptr_src: LazySrcLoc,
|
|
offset_src: LazySrcLoc,
|
|
) CompileError!Air.Inst.Ref {
|
|
// TODO if the operand is comptime-known to be negative, or is a negative int,
|
|
// coerce to isize instead of usize.
|
|
const offset = try sema.coerce(block, Type.usize, uncasted_offset, offset_src);
|
|
const mod = sema.mod;
|
|
const opt_ptr_val = try sema.resolveValue(ptr);
|
|
const opt_off_val = try sema.resolveDefinedValue(block, offset_src, offset);
|
|
const ptr_ty = sema.typeOf(ptr);
|
|
const ptr_info = ptr_ty.ptrInfo(mod);
|
|
assert(ptr_info.flags.size == .Many or ptr_info.flags.size == .C);
|
|
|
|
const new_ptr_ty = t: {
|
|
// Calculate the new pointer alignment.
|
|
// This code is duplicated in `elemPtrType`.
|
|
if (ptr_info.flags.alignment == .none) {
|
|
// ABI-aligned pointer. Any pointer arithmetic maintains the same ABI-alignedness.
|
|
break :t ptr_ty;
|
|
}
|
|
// If the addend is not a comptime-known value we can still count on
|
|
// it being a multiple of the type size.
|
|
const elem_size = try sema.typeAbiSize(Type.fromInterned(ptr_info.child));
|
|
const addend = if (opt_off_val) |off_val| a: {
|
|
const off_int = try sema.usizeCast(block, offset_src, try off_val.toUnsignedIntAdvanced(sema));
|
|
break :a elem_size * off_int;
|
|
} else elem_size;
|
|
|
|
// The resulting pointer is aligned to the lcd between the offset (an
|
|
// arbitrary number) and the alignment factor (always a power of two,
|
|
// non zero).
|
|
const new_align: Alignment = @enumFromInt(@min(
|
|
@ctz(addend),
|
|
@intFromEnum(ptr_info.flags.alignment),
|
|
));
|
|
assert(new_align != .none);
|
|
|
|
break :t try sema.ptrType(.{
|
|
.child = ptr_info.child,
|
|
.sentinel = ptr_info.sentinel,
|
|
.flags = .{
|
|
.size = ptr_info.flags.size,
|
|
.alignment = new_align,
|
|
.is_const = ptr_info.flags.is_const,
|
|
.is_volatile = ptr_info.flags.is_volatile,
|
|
.is_allowzero = ptr_info.flags.is_allowzero,
|
|
.address_space = ptr_info.flags.address_space,
|
|
},
|
|
});
|
|
};
|
|
|
|
const runtime_src = rs: {
|
|
if (opt_ptr_val) |ptr_val| {
|
|
if (opt_off_val) |offset_val| {
|
|
if (ptr_val.isUndef(mod)) return mod.undefRef(new_ptr_ty);
|
|
|
|
const offset_int = try sema.usizeCast(block, offset_src, try offset_val.toUnsignedIntAdvanced(sema));
|
|
if (offset_int == 0) return ptr;
|
|
if (try ptr_val.getUnsignedIntAdvanced(mod, sema)) |addr| {
|
|
const elem_size = try sema.typeAbiSize(Type.fromInterned(ptr_info.child));
|
|
const new_addr = switch (air_tag) {
|
|
.ptr_add => addr + elem_size * offset_int,
|
|
.ptr_sub => addr - elem_size * offset_int,
|
|
else => unreachable,
|
|
};
|
|
const new_ptr_val = try mod.ptrIntValue(new_ptr_ty, new_addr);
|
|
return Air.internedToRef(new_ptr_val.toIntern());
|
|
}
|
|
if (air_tag == .ptr_sub) {
|
|
return sema.fail(block, op_src, "TODO implement Sema comptime pointer subtraction", .{});
|
|
}
|
|
const new_ptr_val = try ptr_val.elemPtr(new_ptr_ty, offset_int, mod);
|
|
return Air.internedToRef(new_ptr_val.toIntern());
|
|
} else break :rs offset_src;
|
|
} else break :rs ptr_src;
|
|
};
|
|
|
|
try sema.requireRuntimeBlock(block, op_src, runtime_src);
|
|
return block.addInst(.{
|
|
.tag = air_tag,
|
|
.data = .{ .ty_pl = .{
|
|
.ty = Air.internedToRef(new_ptr_ty.toIntern()),
|
|
.payload = try sema.addExtra(Air.Bin{
|
|
.lhs = ptr,
|
|
.rhs = offset,
|
|
}),
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn zirLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const src = inst_data.src();
|
|
const ptr_src = src; // TODO better source location
|
|
const ptr = try sema.resolveInst(inst_data.operand);
|
|
return sema.analyzeLoad(block, src, ptr, ptr_src);
|
|
}
|
|
|
|
fn zirAsm(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
extended: Zir.Inst.Extended.InstData,
|
|
tmpl_is_expr: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const extra = sema.code.extraData(Zir.Inst.Asm, extended.operand);
|
|
const src = LazySrcLoc.nodeOffset(extra.data.src_node);
|
|
const ret_ty_src: LazySrcLoc = .{ .node_offset_asm_ret_ty = extra.data.src_node };
|
|
const outputs_len: u5 = @truncate(extended.small);
|
|
const inputs_len: u5 = @truncate(extended.small >> 5);
|
|
const clobbers_len: u5 = @truncate(extended.small >> 10);
|
|
const is_volatile = @as(u1, @truncate(extended.small >> 15)) != 0;
|
|
const is_global_assembly = sema.func_index == .none;
|
|
const zir_tags = sema.code.instructions.items(.tag);
|
|
|
|
const asm_source: []const u8 = if (tmpl_is_expr) blk: {
|
|
const tmpl: Zir.Inst.Ref = @enumFromInt(@intFromEnum(extra.data.asm_source));
|
|
const s: []const u8 = try sema.resolveConstString(block, src, tmpl, .{
|
|
.needed_comptime_reason = "assembly code must be comptime-known",
|
|
});
|
|
break :blk s;
|
|
} else sema.code.nullTerminatedString(extra.data.asm_source);
|
|
|
|
if (is_global_assembly) {
|
|
if (outputs_len != 0) {
|
|
return sema.fail(block, src, "module-level assembly does not support outputs", .{});
|
|
}
|
|
if (inputs_len != 0) {
|
|
return sema.fail(block, src, "module-level assembly does not support inputs", .{});
|
|
}
|
|
if (clobbers_len != 0) {
|
|
return sema.fail(block, src, "module-level assembly does not support clobbers", .{});
|
|
}
|
|
if (is_volatile) {
|
|
return sema.fail(block, src, "volatile keyword is redundant on module-level assembly", .{});
|
|
}
|
|
try sema.mod.addGlobalAssembly(sema.owner_decl_index, asm_source);
|
|
return .void_value;
|
|
}
|
|
|
|
if (block.is_comptime) {
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
}
|
|
|
|
var extra_i = extra.end;
|
|
var output_type_bits = extra.data.output_type_bits;
|
|
var needed_capacity: usize = @typeInfo(Air.Asm).Struct.fields.len + outputs_len + inputs_len;
|
|
|
|
const ConstraintName = struct { c: []const u8, n: []const u8 };
|
|
const out_args = try sema.arena.alloc(Air.Inst.Ref, outputs_len);
|
|
const outputs = try sema.arena.alloc(ConstraintName, outputs_len);
|
|
var expr_ty = Air.Inst.Ref.void_type;
|
|
|
|
for (out_args, 0..) |*arg, out_i| {
|
|
const output = sema.code.extraData(Zir.Inst.Asm.Output, extra_i);
|
|
extra_i = output.end;
|
|
|
|
const is_type = @as(u1, @truncate(output_type_bits)) != 0;
|
|
output_type_bits >>= 1;
|
|
|
|
if (is_type) {
|
|
// Indicate the output is the asm instruction return value.
|
|
arg.* = .none;
|
|
const out_ty = try sema.resolveType(block, ret_ty_src, output.data.operand);
|
|
try sema.queueFullTypeResolution(out_ty);
|
|
expr_ty = Air.internedToRef(out_ty.toIntern());
|
|
} else {
|
|
arg.* = try sema.resolveInst(output.data.operand);
|
|
}
|
|
|
|
const constraint = sema.code.nullTerminatedString(output.data.constraint);
|
|
const name = sema.code.nullTerminatedString(output.data.name);
|
|
needed_capacity += (constraint.len + name.len + (2 + 3)) / 4;
|
|
|
|
if (output.data.operand.toIndex()) |index| {
|
|
if (zir_tags[@intFromEnum(index)] == .ref) {
|
|
// TODO: better error location; it would be even nicer if there were notes that pointed at the output and the variable definition
|
|
return sema.fail(block, src, "asm cannot output to const local '{s}'", .{name});
|
|
}
|
|
}
|
|
|
|
outputs[out_i] = .{ .c = constraint, .n = name };
|
|
}
|
|
|
|
const args = try sema.arena.alloc(Air.Inst.Ref, inputs_len);
|
|
const inputs = try sema.arena.alloc(ConstraintName, inputs_len);
|
|
const mod = sema.mod;
|
|
|
|
for (args, 0..) |*arg, arg_i| {
|
|
const input = sema.code.extraData(Zir.Inst.Asm.Input, extra_i);
|
|
extra_i = input.end;
|
|
|
|
const uncasted_arg = try sema.resolveInst(input.data.operand);
|
|
const uncasted_arg_ty = sema.typeOf(uncasted_arg);
|
|
switch (uncasted_arg_ty.zigTypeTag(mod)) {
|
|
.ComptimeInt => arg.* = try sema.coerce(block, Type.usize, uncasted_arg, src),
|
|
.ComptimeFloat => arg.* = try sema.coerce(block, Type.f64, uncasted_arg, src),
|
|
else => {
|
|
arg.* = uncasted_arg;
|
|
try sema.queueFullTypeResolution(uncasted_arg_ty);
|
|
},
|
|
}
|
|
|
|
const constraint = sema.code.nullTerminatedString(input.data.constraint);
|
|
const name = sema.code.nullTerminatedString(input.data.name);
|
|
needed_capacity += (constraint.len + name.len + (2 + 3)) / 4;
|
|
inputs[arg_i] = .{ .c = constraint, .n = name };
|
|
}
|
|
|
|
const clobbers = try sema.arena.alloc([]const u8, clobbers_len);
|
|
for (clobbers) |*name| {
|
|
const name_index: Zir.NullTerminatedString = @enumFromInt(sema.code.extra[extra_i]);
|
|
name.* = sema.code.nullTerminatedString(name_index);
|
|
extra_i += 1;
|
|
|
|
needed_capacity += name.*.len / 4 + 1;
|
|
}
|
|
|
|
needed_capacity += (asm_source.len + 3) / 4;
|
|
|
|
const gpa = sema.gpa;
|
|
try sema.air_extra.ensureUnusedCapacity(gpa, needed_capacity);
|
|
const asm_air = try block.addInst(.{
|
|
.tag = .assembly,
|
|
.data = .{ .ty_pl = .{
|
|
.ty = expr_ty,
|
|
.payload = sema.addExtraAssumeCapacity(Air.Asm{
|
|
.source_len = @intCast(asm_source.len),
|
|
.outputs_len = outputs_len,
|
|
.inputs_len = @intCast(args.len),
|
|
.flags = (@as(u32, @intFromBool(is_volatile)) << 31) | @as(u32, @intCast(clobbers.len)),
|
|
}),
|
|
} },
|
|
});
|
|
sema.appendRefsAssumeCapacity(out_args);
|
|
sema.appendRefsAssumeCapacity(args);
|
|
for (outputs) |o| {
|
|
const buffer = mem.sliceAsBytes(sema.air_extra.unusedCapacitySlice());
|
|
@memcpy(buffer[0..o.c.len], o.c);
|
|
buffer[o.c.len] = 0;
|
|
@memcpy(buffer[o.c.len + 1 ..][0..o.n.len], o.n);
|
|
buffer[o.c.len + 1 + o.n.len] = 0;
|
|
sema.air_extra.items.len += (o.c.len + o.n.len + (2 + 3)) / 4;
|
|
}
|
|
for (inputs) |input| {
|
|
const buffer = mem.sliceAsBytes(sema.air_extra.unusedCapacitySlice());
|
|
@memcpy(buffer[0..input.c.len], input.c);
|
|
buffer[input.c.len] = 0;
|
|
@memcpy(buffer[input.c.len + 1 ..][0..input.n.len], input.n);
|
|
buffer[input.c.len + 1 + input.n.len] = 0;
|
|
sema.air_extra.items.len += (input.c.len + input.n.len + (2 + 3)) / 4;
|
|
}
|
|
for (clobbers) |clobber| {
|
|
const buffer = mem.sliceAsBytes(sema.air_extra.unusedCapacitySlice());
|
|
@memcpy(buffer[0..clobber.len], clobber);
|
|
buffer[clobber.len] = 0;
|
|
sema.air_extra.items.len += clobber.len / 4 + 1;
|
|
}
|
|
{
|
|
const buffer = mem.sliceAsBytes(sema.air_extra.unusedCapacitySlice());
|
|
@memcpy(buffer[0..asm_source.len], asm_source);
|
|
sema.air_extra.items.len += (asm_source.len + 3) / 4;
|
|
}
|
|
return asm_air;
|
|
}
|
|
|
|
/// Only called for equality operators. See also `zirCmp`.
|
|
fn zirCmpEq(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
op: std.math.CompareOperator,
|
|
air_tag: Air.Inst.Tag,
|
|
) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const src: LazySrcLoc = inst_data.src();
|
|
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
|
|
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
|
|
const lhs = try sema.resolveInst(extra.lhs);
|
|
const rhs = try sema.resolveInst(extra.rhs);
|
|
|
|
const lhs_ty = sema.typeOf(lhs);
|
|
const rhs_ty = sema.typeOf(rhs);
|
|
const lhs_ty_tag = lhs_ty.zigTypeTag(mod);
|
|
const rhs_ty_tag = rhs_ty.zigTypeTag(mod);
|
|
if (lhs_ty_tag == .Null and rhs_ty_tag == .Null) {
|
|
// null == null, null != null
|
|
return if (op == .eq) .bool_true else .bool_false;
|
|
}
|
|
|
|
// comparing null with optionals
|
|
if (lhs_ty_tag == .Null and (rhs_ty_tag == .Optional or rhs_ty.isCPtr(mod))) {
|
|
return sema.analyzeIsNull(block, src, rhs, op == .neq);
|
|
}
|
|
if (rhs_ty_tag == .Null and (lhs_ty_tag == .Optional or lhs_ty.isCPtr(mod))) {
|
|
return sema.analyzeIsNull(block, src, lhs, op == .neq);
|
|
}
|
|
|
|
if (lhs_ty_tag == .Null or rhs_ty_tag == .Null) {
|
|
const non_null_type = if (lhs_ty_tag == .Null) rhs_ty else lhs_ty;
|
|
return sema.fail(block, src, "comparison of '{}' with null", .{non_null_type.fmt(mod)});
|
|
}
|
|
|
|
if (lhs_ty_tag == .Union and (rhs_ty_tag == .EnumLiteral or rhs_ty_tag == .Enum)) {
|
|
return sema.analyzeCmpUnionTag(block, src, lhs, lhs_src, rhs, rhs_src, op);
|
|
}
|
|
if (rhs_ty_tag == .Union and (lhs_ty_tag == .EnumLiteral or lhs_ty_tag == .Enum)) {
|
|
return sema.analyzeCmpUnionTag(block, src, rhs, rhs_src, lhs, lhs_src, op);
|
|
}
|
|
|
|
if (lhs_ty_tag == .ErrorSet and rhs_ty_tag == .ErrorSet) {
|
|
const runtime_src: LazySrcLoc = src: {
|
|
if (try sema.resolveValue(lhs)) |lval| {
|
|
if (try sema.resolveValue(rhs)) |rval| {
|
|
if (lval.isUndef(mod) or rval.isUndef(mod)) {
|
|
return mod.undefRef(Type.bool);
|
|
}
|
|
const lkey = mod.intern_pool.indexToKey(lval.toIntern());
|
|
const rkey = mod.intern_pool.indexToKey(rval.toIntern());
|
|
return if ((lkey.err.name == rkey.err.name) == (op == .eq))
|
|
.bool_true
|
|
else
|
|
.bool_false;
|
|
} else {
|
|
break :src rhs_src;
|
|
}
|
|
} else {
|
|
break :src lhs_src;
|
|
}
|
|
};
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
return block.addBinOp(air_tag, lhs, rhs);
|
|
}
|
|
if (lhs_ty_tag == .Type and rhs_ty_tag == .Type) {
|
|
const lhs_as_type = try sema.analyzeAsType(block, lhs_src, lhs);
|
|
const rhs_as_type = try sema.analyzeAsType(block, rhs_src, rhs);
|
|
return if (lhs_as_type.eql(rhs_as_type, mod) == (op == .eq)) .bool_true else .bool_false;
|
|
}
|
|
return sema.analyzeCmp(block, src, lhs, rhs, op, lhs_src, rhs_src, true);
|
|
}
|
|
|
|
fn analyzeCmpUnionTag(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
un: Air.Inst.Ref,
|
|
un_src: LazySrcLoc,
|
|
tag: Air.Inst.Ref,
|
|
tag_src: LazySrcLoc,
|
|
op: std.math.CompareOperator,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const union_ty = sema.typeOf(un);
|
|
try sema.resolveTypeFields(union_ty);
|
|
const union_tag_ty = union_ty.unionTagType(mod) orelse {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, un_src, "comparison of union and enum literal is only valid for tagged union types", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try mod.errNoteNonLazy(union_ty.declSrcLoc(mod), msg, "union '{}' is not a tagged union", .{union_ty.fmt(mod)});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
};
|
|
// Coerce both the union and the tag to the union's tag type, and then execute the
|
|
// enum comparison codepath.
|
|
const coerced_tag = try sema.coerce(block, union_tag_ty, tag, tag_src);
|
|
const coerced_union = try sema.coerce(block, union_tag_ty, un, un_src);
|
|
|
|
if (try sema.resolveValue(coerced_tag)) |enum_val| {
|
|
if (enum_val.isUndef(mod)) return mod.undefRef(Type.bool);
|
|
const field_ty = union_ty.unionFieldType(enum_val, mod).?;
|
|
if (field_ty.zigTypeTag(mod) == .NoReturn) {
|
|
return .bool_false;
|
|
}
|
|
}
|
|
|
|
return sema.cmpSelf(block, src, coerced_union, coerced_tag, op, un_src, tag_src);
|
|
}
|
|
|
|
/// Only called for non-equality operators. See also `zirCmpEq`.
|
|
fn zirCmp(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
op: std.math.CompareOperator,
|
|
) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const src: LazySrcLoc = inst_data.src();
|
|
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
|
|
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
|
|
const lhs = try sema.resolveInst(extra.lhs);
|
|
const rhs = try sema.resolveInst(extra.rhs);
|
|
return sema.analyzeCmp(block, src, lhs, rhs, op, lhs_src, rhs_src, false);
|
|
}
|
|
|
|
fn analyzeCmp(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
lhs: Air.Inst.Ref,
|
|
rhs: Air.Inst.Ref,
|
|
op: std.math.CompareOperator,
|
|
lhs_src: LazySrcLoc,
|
|
rhs_src: LazySrcLoc,
|
|
is_equality_cmp: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const lhs_ty = sema.typeOf(lhs);
|
|
const rhs_ty = sema.typeOf(rhs);
|
|
if (lhs_ty.zigTypeTag(mod) != .Optional and rhs_ty.zigTypeTag(mod) != .Optional) {
|
|
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
|
|
}
|
|
|
|
if (lhs_ty.zigTypeTag(mod) == .Vector and rhs_ty.zigTypeTag(mod) == .Vector) {
|
|
return sema.cmpVector(block, src, lhs, rhs, op, lhs_src, rhs_src);
|
|
}
|
|
if (lhs_ty.isNumeric(mod) and rhs_ty.isNumeric(mod)) {
|
|
// This operation allows any combination of integer and float types, regardless of the
|
|
// signed-ness, comptime-ness, and bit-width. So peer type resolution is incorrect for
|
|
// numeric types.
|
|
return sema.cmpNumeric(block, src, lhs, rhs, op, lhs_src, rhs_src);
|
|
}
|
|
if (is_equality_cmp and lhs_ty.zigTypeTag(mod) == .ErrorUnion and rhs_ty.zigTypeTag(mod) == .ErrorSet) {
|
|
const casted_lhs = try sema.analyzeErrUnionCode(block, lhs_src, lhs);
|
|
return sema.cmpSelf(block, src, casted_lhs, rhs, op, lhs_src, rhs_src);
|
|
}
|
|
if (is_equality_cmp and lhs_ty.zigTypeTag(mod) == .ErrorSet and rhs_ty.zigTypeTag(mod) == .ErrorUnion) {
|
|
const casted_rhs = try sema.analyzeErrUnionCode(block, rhs_src, rhs);
|
|
return sema.cmpSelf(block, src, lhs, casted_rhs, op, lhs_src, rhs_src);
|
|
}
|
|
const instructions = &[_]Air.Inst.Ref{ lhs, rhs };
|
|
const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{ .override = &[_]?LazySrcLoc{ lhs_src, rhs_src } });
|
|
if (!resolved_type.isSelfComparable(mod, is_equality_cmp)) {
|
|
return sema.fail(block, src, "operator {s} not allowed for type '{}'", .{
|
|
compareOperatorName(op), resolved_type.fmt(mod),
|
|
});
|
|
}
|
|
const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
|
|
const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
|
|
return sema.cmpSelf(block, src, casted_lhs, casted_rhs, op, lhs_src, rhs_src);
|
|
}
|
|
|
|
fn compareOperatorName(comp: std.math.CompareOperator) []const u8 {
|
|
return switch (comp) {
|
|
.lt => "<",
|
|
.lte => "<=",
|
|
.eq => "==",
|
|
.gte => ">=",
|
|
.gt => ">",
|
|
.neq => "!=",
|
|
};
|
|
}
|
|
|
|
fn cmpSelf(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
casted_lhs: Air.Inst.Ref,
|
|
casted_rhs: Air.Inst.Ref,
|
|
op: std.math.CompareOperator,
|
|
lhs_src: LazySrcLoc,
|
|
rhs_src: LazySrcLoc,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const resolved_type = sema.typeOf(casted_lhs);
|
|
const runtime_src: LazySrcLoc = src: {
|
|
if (try sema.resolveValue(casted_lhs)) |lhs_val| {
|
|
if (lhs_val.isUndef(mod)) return mod.undefRef(Type.bool);
|
|
if (try sema.resolveValue(casted_rhs)) |rhs_val| {
|
|
if (rhs_val.isUndef(mod)) return mod.undefRef(Type.bool);
|
|
|
|
if (resolved_type.zigTypeTag(mod) == .Vector) {
|
|
const cmp_val = try sema.compareVector(lhs_val, op, rhs_val, resolved_type);
|
|
return Air.internedToRef(cmp_val.toIntern());
|
|
}
|
|
|
|
return if (try sema.compareAll(lhs_val, op, rhs_val, resolved_type))
|
|
.bool_true
|
|
else
|
|
.bool_false;
|
|
} else {
|
|
if (resolved_type.zigTypeTag(mod) == .Bool) {
|
|
// We can lower bool eq/neq more efficiently.
|
|
return sema.runtimeBoolCmp(block, src, op, casted_rhs, lhs_val.toBool(), rhs_src);
|
|
}
|
|
break :src rhs_src;
|
|
}
|
|
} else {
|
|
// For bools, we still check the other operand, because we can lower
|
|
// bool eq/neq more efficiently.
|
|
if (resolved_type.zigTypeTag(mod) == .Bool) {
|
|
if (try sema.resolveValue(casted_rhs)) |rhs_val| {
|
|
if (rhs_val.isUndef(mod)) return mod.undefRef(Type.bool);
|
|
return sema.runtimeBoolCmp(block, src, op, casted_lhs, rhs_val.toBool(), lhs_src);
|
|
}
|
|
}
|
|
break :src lhs_src;
|
|
}
|
|
};
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
if (resolved_type.zigTypeTag(mod) == .Vector) {
|
|
return block.addCmpVector(casted_lhs, casted_rhs, op);
|
|
}
|
|
const tag = Air.Inst.Tag.fromCmpOp(op, block.float_mode == .Optimized);
|
|
return block.addBinOp(tag, casted_lhs, casted_rhs);
|
|
}
|
|
|
|
/// cmp_eq (x, false) => not(x)
|
|
/// cmp_eq (x, true ) => x
|
|
/// cmp_neq(x, false) => x
|
|
/// cmp_neq(x, true ) => not(x)
|
|
fn runtimeBoolCmp(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
op: std.math.CompareOperator,
|
|
lhs: Air.Inst.Ref,
|
|
rhs: bool,
|
|
runtime_src: LazySrcLoc,
|
|
) CompileError!Air.Inst.Ref {
|
|
if ((op == .neq) == rhs) {
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
return block.addTyOp(.not, Type.bool, lhs);
|
|
} else {
|
|
return lhs;
|
|
}
|
|
}
|
|
|
|
fn zirSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const ty = try sema.resolveType(block, operand_src, inst_data.operand);
|
|
switch (ty.zigTypeTag(mod)) {
|
|
.Fn,
|
|
.NoReturn,
|
|
.Undefined,
|
|
.Null,
|
|
.Opaque,
|
|
=> return sema.fail(block, operand_src, "no size available for type '{}'", .{ty.fmt(mod)}),
|
|
|
|
.Type,
|
|
.EnumLiteral,
|
|
.ComptimeFloat,
|
|
.ComptimeInt,
|
|
.Void,
|
|
=> return mod.intRef(Type.comptime_int, 0),
|
|
|
|
.Bool,
|
|
.Int,
|
|
.Float,
|
|
.Pointer,
|
|
.Array,
|
|
.Struct,
|
|
.Optional,
|
|
.ErrorUnion,
|
|
.ErrorSet,
|
|
.Enum,
|
|
.Union,
|
|
.Vector,
|
|
.Frame,
|
|
.AnyFrame,
|
|
=> {},
|
|
}
|
|
const val = try ty.lazyAbiSize(mod);
|
|
if (val.isLazySize(mod)) {
|
|
try sema.queueFullTypeResolution(ty);
|
|
}
|
|
return Air.internedToRef(val.toIntern());
|
|
}
|
|
|
|
fn zirBitSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const operand_ty = try sema.resolveType(block, operand_src, inst_data.operand);
|
|
switch (operand_ty.zigTypeTag(mod)) {
|
|
.Fn,
|
|
.NoReturn,
|
|
.Undefined,
|
|
.Null,
|
|
.Opaque,
|
|
=> return sema.fail(block, operand_src, "no size available for type '{}'", .{operand_ty.fmt(mod)}),
|
|
|
|
.Type,
|
|
.EnumLiteral,
|
|
.ComptimeFloat,
|
|
.ComptimeInt,
|
|
.Void,
|
|
=> return mod.intRef(Type.comptime_int, 0),
|
|
|
|
.Bool,
|
|
.Int,
|
|
.Float,
|
|
.Pointer,
|
|
.Array,
|
|
.Struct,
|
|
.Optional,
|
|
.ErrorUnion,
|
|
.ErrorSet,
|
|
.Enum,
|
|
.Union,
|
|
.Vector,
|
|
.Frame,
|
|
.AnyFrame,
|
|
=> {},
|
|
}
|
|
const bit_size = try operand_ty.bitSizeAdvanced(mod, sema);
|
|
return mod.intRef(Type.comptime_int, bit_size);
|
|
}
|
|
|
|
fn zirThis(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
extended: Zir.Inst.Extended.InstData,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const this_decl_index = mod.namespacePtr(block.namespace).decl_index;
|
|
const src = LazySrcLoc.nodeOffset(@bitCast(extended.operand));
|
|
return sema.analyzeDeclVal(block, src, this_decl_index);
|
|
}
|
|
|
|
fn zirClosureCapture(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_tok;
|
|
// Closures are not necessarily constant values. For example, the
|
|
// code might do something like this:
|
|
// fn foo(x: anytype) void { const S = struct {field: @TypeOf(x)}; }
|
|
// ...in which case the closure_capture instruction has access to a runtime
|
|
// value only. In such case only the type is saved into the scope.
|
|
const operand = try sema.resolveInst(inst_data.operand);
|
|
const ty = sema.typeOf(operand);
|
|
const key: CaptureScope.Key = .{
|
|
.zir_index = inst,
|
|
.index = block.wip_capture_scope,
|
|
};
|
|
if (try sema.resolveValue(operand)) |val| {
|
|
try mod.comptime_capture_scopes.put(gpa, key, try val.intern(ty, mod));
|
|
} else {
|
|
try mod.runtime_capture_scopes.put(gpa, key, ty.toIntern());
|
|
}
|
|
}
|
|
|
|
fn zirClosureGet(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
//const ip = &mod.intern_pool;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].inst_node;
|
|
var scope: CaptureScope.Index = mod.declPtr(block.src_decl).src_scope;
|
|
assert(scope != .none);
|
|
// Note: The target closure must be in this scope list.
|
|
// If it's not here, the zir is invalid, or the list is broken.
|
|
const capture_ty = while (true) {
|
|
// Note: We don't need to add a dependency here, because
|
|
// decls always depend on their lexical parents.
|
|
const key: CaptureScope.Key = .{
|
|
.zir_index = inst_data.inst,
|
|
.index = scope,
|
|
};
|
|
if (mod.comptime_capture_scopes.get(key)) |val|
|
|
return Air.internedToRef(val);
|
|
if (mod.runtime_capture_scopes.get(key)) |ty|
|
|
break ty;
|
|
scope = scope.parent(mod);
|
|
assert(scope != .none);
|
|
};
|
|
|
|
// The comptime case is handled already above. Runtime case below.
|
|
|
|
if (!block.is_typeof and sema.func_index == .none) {
|
|
const msg = msg: {
|
|
const name = name: {
|
|
const file = sema.owner_decl.getFileScope(mod);
|
|
const tree = file.getTree(sema.gpa) catch |err| {
|
|
// In this case we emit a warning + a less precise source location.
|
|
log.warn("unable to load {s}: {s}", .{
|
|
file.sub_file_path, @errorName(err),
|
|
});
|
|
break :name null;
|
|
};
|
|
const node = sema.owner_decl.relativeToNodeIndex(inst_data.src_node);
|
|
const token = tree.nodes.items(.main_token)[node];
|
|
break :name tree.tokenSlice(token);
|
|
};
|
|
|
|
const msg = if (name) |some|
|
|
try sema.errMsg(block, inst_data.src(), "'{s}' not accessible outside function scope", .{some})
|
|
else
|
|
try sema.errMsg(block, inst_data.src(), "variable not accessible outside function scope", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
// TODO add "declared here" note
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
|
|
if (!block.is_typeof and !block.is_comptime and sema.func_index != .none) {
|
|
const msg = msg: {
|
|
const name = name: {
|
|
const file = sema.owner_decl.getFileScope(mod);
|
|
const tree = file.getTree(sema.gpa) catch |err| {
|
|
// In this case we emit a warning + a less precise source location.
|
|
log.warn("unable to load {s}: {s}", .{
|
|
file.sub_file_path, @errorName(err),
|
|
});
|
|
break :name null;
|
|
};
|
|
const node = sema.owner_decl.relativeToNodeIndex(inst_data.src_node);
|
|
const token = tree.nodes.items(.main_token)[node];
|
|
break :name tree.tokenSlice(token);
|
|
};
|
|
|
|
const msg = if (name) |some|
|
|
try sema.errMsg(block, inst_data.src(), "'{s}' not accessible from inner function", .{some})
|
|
else
|
|
try sema.errMsg(block, inst_data.src(), "variable not accessible from inner function", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
try sema.errNote(block, LazySrcLoc.nodeOffset(0), msg, "crossed function definition here", .{});
|
|
|
|
// TODO add "declared here" note
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
|
|
assert(block.is_typeof);
|
|
// We need a dummy runtime instruction with the correct type.
|
|
return block.addTy(.alloc, Type.fromInterned(capture_ty));
|
|
}
|
|
|
|
fn zirRetAddr(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
extended: Zir.Inst.Extended.InstData,
|
|
) CompileError!Air.Inst.Ref {
|
|
_ = extended;
|
|
if (block.is_comptime) {
|
|
// TODO: we could give a meaningful lazy value here. #14938
|
|
return sema.mod.intRef(Type.usize, 0);
|
|
} else {
|
|
return block.addNoOp(.ret_addr);
|
|
}
|
|
}
|
|
|
|
fn zirFrameAddress(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
extended: Zir.Inst.Extended.InstData,
|
|
) CompileError!Air.Inst.Ref {
|
|
const src = LazySrcLoc.nodeOffset(@bitCast(extended.operand));
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
return try block.addNoOp(.frame_addr);
|
|
}
|
|
|
|
fn zirBuiltinSrc(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
extended: Zir.Inst.Extended.InstData,
|
|
) CompileError!Air.Inst.Ref {
|
|
_ = block;
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const extra = sema.code.extraData(Zir.Inst.Src, extended.operand).data;
|
|
const fn_owner_decl = mod.funcOwnerDeclPtr(sema.func_index);
|
|
const ip = &mod.intern_pool;
|
|
const gpa = sema.gpa;
|
|
|
|
const func_name_val = v: {
|
|
// This dupe prevents InternPool string pool memory from being reallocated
|
|
// while a reference exists.
|
|
const bytes = try sema.arena.dupe(u8, ip.stringToSlice(fn_owner_decl.name));
|
|
const array_ty = try ip.get(gpa, .{ .array_type = .{
|
|
.len = bytes.len,
|
|
.sentinel = .zero_u8,
|
|
.child = .u8_type,
|
|
} });
|
|
break :v try ip.get(gpa, .{ .slice = .{
|
|
.ty = .slice_const_u8_sentinel_0_type,
|
|
.ptr = try ip.get(gpa, .{ .ptr = .{
|
|
.ty = .manyptr_const_u8_sentinel_0_type,
|
|
.addr = .{ .anon_decl = .{
|
|
.orig_ty = .slice_const_u8_sentinel_0_type,
|
|
.val = try ip.get(gpa, .{ .aggregate = .{
|
|
.ty = array_ty,
|
|
.storage = .{ .bytes = bytes },
|
|
} }),
|
|
} },
|
|
} }),
|
|
.len = (try mod.intValue(Type.usize, bytes.len)).toIntern(),
|
|
} });
|
|
};
|
|
|
|
const file_name_val = v: {
|
|
// The compiler must not call realpath anywhere.
|
|
const bytes = try fn_owner_decl.getFileScope(mod).fullPathZ(sema.arena);
|
|
const array_ty = try ip.get(gpa, .{ .array_type = .{
|
|
.len = bytes.len,
|
|
.sentinel = .zero_u8,
|
|
.child = .u8_type,
|
|
} });
|
|
break :v try ip.get(gpa, .{ .slice = .{
|
|
.ty = .slice_const_u8_sentinel_0_type,
|
|
.ptr = try ip.get(gpa, .{ .ptr = .{
|
|
.ty = .manyptr_const_u8_sentinel_0_type,
|
|
.addr = .{ .anon_decl = .{
|
|
.orig_ty = .slice_const_u8_sentinel_0_type,
|
|
.val = try ip.get(gpa, .{ .aggregate = .{
|
|
.ty = array_ty,
|
|
.storage = .{ .bytes = bytes },
|
|
} }),
|
|
} },
|
|
} }),
|
|
.len = (try mod.intValue(Type.usize, bytes.len)).toIntern(),
|
|
} });
|
|
};
|
|
|
|
const src_loc_ty = try sema.getBuiltinType("SourceLocation");
|
|
const fields = .{
|
|
// file: [:0]const u8,
|
|
file_name_val,
|
|
// fn_name: [:0]const u8,
|
|
func_name_val,
|
|
// line: u32,
|
|
(try mod.intValue(Type.u32, extra.line + 1)).toIntern(),
|
|
// column: u32,
|
|
(try mod.intValue(Type.u32, extra.column + 1)).toIntern(),
|
|
};
|
|
return Air.internedToRef((try mod.intern(.{ .aggregate = .{
|
|
.ty = src_loc_ty.toIntern(),
|
|
.storage = .{ .elems = &fields },
|
|
} })));
|
|
}
|
|
|
|
fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
const ip = &mod.intern_pool;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const src = inst_data.src();
|
|
const ty = try sema.resolveType(block, src, inst_data.operand);
|
|
const type_info_ty = try sema.getBuiltinType("Type");
|
|
const type_info_tag_ty = type_info_ty.unionTagType(mod).?;
|
|
|
|
if (ty.typeDeclInst(mod)) |type_decl_inst| {
|
|
try sema.declareDependency(.{ .namespace = type_decl_inst });
|
|
}
|
|
|
|
switch (ty.zigTypeTag(mod)) {
|
|
.Type,
|
|
.Void,
|
|
.Bool,
|
|
.NoReturn,
|
|
.ComptimeFloat,
|
|
.ComptimeInt,
|
|
.Undefined,
|
|
.Null,
|
|
.EnumLiteral,
|
|
=> |type_info_tag| return Air.internedToRef((try mod.intern(.{ .un = .{
|
|
.ty = type_info_ty.toIntern(),
|
|
.tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(type_info_tag))).toIntern(),
|
|
.val = .void_value,
|
|
} }))),
|
|
.Fn => {
|
|
const fn_info_decl_index = (try sema.namespaceLookup(
|
|
block,
|
|
src,
|
|
type_info_ty.getNamespaceIndex(mod).unwrap().?,
|
|
try ip.getOrPutString(gpa, "Fn"),
|
|
)).?;
|
|
try sema.ensureDeclAnalyzed(fn_info_decl_index);
|
|
const fn_info_decl = mod.declPtr(fn_info_decl_index);
|
|
const fn_info_ty = fn_info_decl.val.toType();
|
|
|
|
const param_info_decl_index = (try sema.namespaceLookup(
|
|
block,
|
|
src,
|
|
fn_info_ty.getNamespaceIndex(mod).unwrap().?,
|
|
try ip.getOrPutString(gpa, "Param"),
|
|
)).?;
|
|
try sema.ensureDeclAnalyzed(param_info_decl_index);
|
|
const param_info_decl = mod.declPtr(param_info_decl_index);
|
|
const param_info_ty = param_info_decl.val.toType();
|
|
|
|
const func_ty_info = mod.typeToFunc(ty).?;
|
|
const param_vals = try sema.arena.alloc(InternPool.Index, func_ty_info.param_types.len);
|
|
for (param_vals, 0..) |*param_val, i| {
|
|
const param_ty = func_ty_info.param_types.get(ip)[i];
|
|
const is_generic = param_ty == .generic_poison_type;
|
|
const param_ty_val = try ip.get(gpa, .{ .opt = .{
|
|
.ty = try ip.get(gpa, .{ .opt_type = .type_type }),
|
|
.val = if (is_generic) .none else param_ty,
|
|
} });
|
|
|
|
const is_noalias = blk: {
|
|
const index = std.math.cast(u5, i) orelse break :blk false;
|
|
break :blk @as(u1, @truncate(func_ty_info.noalias_bits >> index)) != 0;
|
|
};
|
|
|
|
const param_fields = .{
|
|
// is_generic: bool,
|
|
Value.makeBool(is_generic).toIntern(),
|
|
// is_noalias: bool,
|
|
Value.makeBool(is_noalias).toIntern(),
|
|
// type: ?type,
|
|
param_ty_val,
|
|
};
|
|
param_val.* = try mod.intern(.{ .aggregate = .{
|
|
.ty = param_info_ty.toIntern(),
|
|
.storage = .{ .elems = ¶m_fields },
|
|
} });
|
|
}
|
|
|
|
const args_val = v: {
|
|
const new_decl_ty = try mod.arrayType(.{
|
|
.len = param_vals.len,
|
|
.child = param_info_ty.toIntern(),
|
|
});
|
|
const new_decl_val = try mod.intern(.{ .aggregate = .{
|
|
.ty = new_decl_ty.toIntern(),
|
|
.storage = .{ .elems = param_vals },
|
|
} });
|
|
const ptr_ty = (try sema.ptrType(.{
|
|
.child = param_info_ty.toIntern(),
|
|
.flags = .{
|
|
.size = .Slice,
|
|
.is_const = true,
|
|
},
|
|
})).toIntern();
|
|
break :v try mod.intern(.{ .slice = .{
|
|
.ty = ptr_ty,
|
|
.ptr = try mod.intern(.{ .ptr = .{
|
|
.ty = Type.fromInterned(ptr_ty).slicePtrFieldType(mod).toIntern(),
|
|
.addr = .{ .anon_decl = .{
|
|
.orig_ty = ptr_ty,
|
|
.val = new_decl_val,
|
|
} },
|
|
} }),
|
|
.len = (try mod.intValue(Type.usize, param_vals.len)).toIntern(),
|
|
} });
|
|
};
|
|
|
|
const ret_ty_opt = try mod.intern(.{ .opt = .{
|
|
.ty = try ip.get(gpa, .{ .opt_type = .type_type }),
|
|
.val = if (func_ty_info.return_type == .generic_poison_type)
|
|
.none
|
|
else
|
|
func_ty_info.return_type,
|
|
} });
|
|
|
|
const callconv_ty = try sema.getBuiltinType("CallingConvention");
|
|
|
|
const field_values = .{
|
|
// calling_convention: CallingConvention,
|
|
(try mod.enumValueFieldIndex(callconv_ty, @intFromEnum(func_ty_info.cc))).toIntern(),
|
|
// alignment: comptime_int,
|
|
(try mod.intValue(Type.comptime_int, ty.abiAlignment(mod).toByteUnits(0))).toIntern(),
|
|
// is_generic: bool,
|
|
Value.makeBool(func_ty_info.is_generic).toIntern(),
|
|
// is_var_args: bool,
|
|
Value.makeBool(func_ty_info.is_var_args).toIntern(),
|
|
// return_type: ?type,
|
|
ret_ty_opt,
|
|
// args: []const Fn.Param,
|
|
args_val,
|
|
};
|
|
return Air.internedToRef((try mod.intern(.{ .un = .{
|
|
.ty = type_info_ty.toIntern(),
|
|
.tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Fn))).toIntern(),
|
|
.val = try mod.intern(.{ .aggregate = .{
|
|
.ty = fn_info_ty.toIntern(),
|
|
.storage = .{ .elems = &field_values },
|
|
} }),
|
|
} })));
|
|
},
|
|
.Int => {
|
|
const int_info_decl_index = (try sema.namespaceLookup(
|
|
block,
|
|
src,
|
|
type_info_ty.getNamespaceIndex(mod).unwrap().?,
|
|
try ip.getOrPutString(gpa, "Int"),
|
|
)).?;
|
|
try sema.ensureDeclAnalyzed(int_info_decl_index);
|
|
const int_info_decl = mod.declPtr(int_info_decl_index);
|
|
const int_info_ty = int_info_decl.val.toType();
|
|
|
|
const signedness_ty = try sema.getBuiltinType("Signedness");
|
|
const info = ty.intInfo(mod);
|
|
const field_values = .{
|
|
// signedness: Signedness,
|
|
try (try mod.enumValueFieldIndex(signedness_ty, @intFromEnum(info.signedness))).intern(signedness_ty, mod),
|
|
// bits: u16,
|
|
(try mod.intValue(Type.u16, info.bits)).toIntern(),
|
|
};
|
|
return Air.internedToRef((try mod.intern(.{ .un = .{
|
|
.ty = type_info_ty.toIntern(),
|
|
.tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Int))).toIntern(),
|
|
.val = try mod.intern(.{ .aggregate = .{
|
|
.ty = int_info_ty.toIntern(),
|
|
.storage = .{ .elems = &field_values },
|
|
} }),
|
|
} })));
|
|
},
|
|
.Float => {
|
|
const float_info_decl_index = (try sema.namespaceLookup(
|
|
block,
|
|
src,
|
|
type_info_ty.getNamespaceIndex(mod).unwrap().?,
|
|
try ip.getOrPutString(gpa, "Float"),
|
|
)).?;
|
|
try sema.ensureDeclAnalyzed(float_info_decl_index);
|
|
const float_info_decl = mod.declPtr(float_info_decl_index);
|
|
const float_info_ty = float_info_decl.val.toType();
|
|
|
|
const field_vals = .{
|
|
// bits: u16,
|
|
(try mod.intValue(Type.u16, ty.bitSize(mod))).toIntern(),
|
|
};
|
|
return Air.internedToRef((try mod.intern(.{ .un = .{
|
|
.ty = type_info_ty.toIntern(),
|
|
.tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Float))).toIntern(),
|
|
.val = try mod.intern(.{ .aggregate = .{
|
|
.ty = float_info_ty.toIntern(),
|
|
.storage = .{ .elems = &field_vals },
|
|
} }),
|
|
} })));
|
|
},
|
|
.Pointer => {
|
|
const info = ty.ptrInfo(mod);
|
|
const alignment = if (info.flags.alignment.toByteUnitsOptional()) |alignment|
|
|
try mod.intValue(Type.comptime_int, alignment)
|
|
else
|
|
try Type.fromInterned(info.child).lazyAbiAlignment(mod);
|
|
|
|
const addrspace_ty = try sema.getBuiltinType("AddressSpace");
|
|
const pointer_ty = t: {
|
|
const decl_index = (try sema.namespaceLookup(
|
|
block,
|
|
src,
|
|
(try sema.getBuiltinType("Type")).getNamespaceIndex(mod).unwrap().?,
|
|
try ip.getOrPutString(gpa, "Pointer"),
|
|
)).?;
|
|
try sema.ensureDeclAnalyzed(decl_index);
|
|
const decl = mod.declPtr(decl_index);
|
|
break :t decl.val.toType();
|
|
};
|
|
const ptr_size_ty = t: {
|
|
const decl_index = (try sema.namespaceLookup(
|
|
block,
|
|
src,
|
|
pointer_ty.getNamespaceIndex(mod).unwrap().?,
|
|
try ip.getOrPutString(gpa, "Size"),
|
|
)).?;
|
|
try sema.ensureDeclAnalyzed(decl_index);
|
|
const decl = mod.declPtr(decl_index);
|
|
break :t decl.val.toType();
|
|
};
|
|
|
|
const field_values = .{
|
|
// size: Size,
|
|
try (try mod.enumValueFieldIndex(ptr_size_ty, @intFromEnum(info.flags.size))).intern(ptr_size_ty, mod),
|
|
// is_const: bool,
|
|
Value.makeBool(info.flags.is_const).toIntern(),
|
|
// is_volatile: bool,
|
|
Value.makeBool(info.flags.is_volatile).toIntern(),
|
|
// alignment: comptime_int,
|
|
alignment.toIntern(),
|
|
// address_space: AddressSpace
|
|
try (try mod.enumValueFieldIndex(addrspace_ty, @intFromEnum(info.flags.address_space))).intern(addrspace_ty, mod),
|
|
// child: type,
|
|
info.child,
|
|
// is_allowzero: bool,
|
|
Value.makeBool(info.flags.is_allowzero).toIntern(),
|
|
// sentinel: ?*const anyopaque,
|
|
(try sema.optRefValue(switch (info.sentinel) {
|
|
.none => null,
|
|
else => Value.fromInterned(info.sentinel),
|
|
})).toIntern(),
|
|
};
|
|
return Air.internedToRef((try mod.intern(.{ .un = .{
|
|
.ty = type_info_ty.toIntern(),
|
|
.tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Pointer))).toIntern(),
|
|
.val = try mod.intern(.{ .aggregate = .{
|
|
.ty = pointer_ty.toIntern(),
|
|
.storage = .{ .elems = &field_values },
|
|
} }),
|
|
} })));
|
|
},
|
|
.Array => {
|
|
const array_field_ty = t: {
|
|
const array_field_ty_decl_index = (try sema.namespaceLookup(
|
|
block,
|
|
src,
|
|
type_info_ty.getNamespaceIndex(mod).unwrap().?,
|
|
try ip.getOrPutString(gpa, "Array"),
|
|
)).?;
|
|
try sema.ensureDeclAnalyzed(array_field_ty_decl_index);
|
|
const array_field_ty_decl = mod.declPtr(array_field_ty_decl_index);
|
|
break :t array_field_ty_decl.val.toType();
|
|
};
|
|
|
|
const info = ty.arrayInfo(mod);
|
|
const field_values = .{
|
|
// len: comptime_int,
|
|
(try mod.intValue(Type.comptime_int, info.len)).toIntern(),
|
|
// child: type,
|
|
info.elem_type.toIntern(),
|
|
// sentinel: ?*const anyopaque,
|
|
(try sema.optRefValue(info.sentinel)).toIntern(),
|
|
};
|
|
return Air.internedToRef((try mod.intern(.{ .un = .{
|
|
.ty = type_info_ty.toIntern(),
|
|
.tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Array))).toIntern(),
|
|
.val = try mod.intern(.{ .aggregate = .{
|
|
.ty = array_field_ty.toIntern(),
|
|
.storage = .{ .elems = &field_values },
|
|
} }),
|
|
} })));
|
|
},
|
|
.Vector => {
|
|
const vector_field_ty = t: {
|
|
const vector_field_ty_decl_index = (try sema.namespaceLookup(
|
|
block,
|
|
src,
|
|
type_info_ty.getNamespaceIndex(mod).unwrap().?,
|
|
try ip.getOrPutString(gpa, "Vector"),
|
|
)).?;
|
|
try sema.ensureDeclAnalyzed(vector_field_ty_decl_index);
|
|
const vector_field_ty_decl = mod.declPtr(vector_field_ty_decl_index);
|
|
break :t vector_field_ty_decl.val.toType();
|
|
};
|
|
|
|
const info = ty.arrayInfo(mod);
|
|
const field_values = .{
|
|
// len: comptime_int,
|
|
(try mod.intValue(Type.comptime_int, info.len)).toIntern(),
|
|
// child: type,
|
|
info.elem_type.toIntern(),
|
|
};
|
|
return Air.internedToRef((try mod.intern(.{ .un = .{
|
|
.ty = type_info_ty.toIntern(),
|
|
.tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Vector))).toIntern(),
|
|
.val = try mod.intern(.{ .aggregate = .{
|
|
.ty = vector_field_ty.toIntern(),
|
|
.storage = .{ .elems = &field_values },
|
|
} }),
|
|
} })));
|
|
},
|
|
.Optional => {
|
|
const optional_field_ty = t: {
|
|
const optional_field_ty_decl_index = (try sema.namespaceLookup(
|
|
block,
|
|
src,
|
|
type_info_ty.getNamespaceIndex(mod).unwrap().?,
|
|
try ip.getOrPutString(gpa, "Optional"),
|
|
)).?;
|
|
try sema.ensureDeclAnalyzed(optional_field_ty_decl_index);
|
|
const optional_field_ty_decl = mod.declPtr(optional_field_ty_decl_index);
|
|
break :t optional_field_ty_decl.val.toType();
|
|
};
|
|
|
|
const field_values = .{
|
|
// child: type,
|
|
ty.optionalChild(mod).toIntern(),
|
|
};
|
|
return Air.internedToRef((try mod.intern(.{ .un = .{
|
|
.ty = type_info_ty.toIntern(),
|
|
.tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Optional))).toIntern(),
|
|
.val = try mod.intern(.{ .aggregate = .{
|
|
.ty = optional_field_ty.toIntern(),
|
|
.storage = .{ .elems = &field_values },
|
|
} }),
|
|
} })));
|
|
},
|
|
.ErrorSet => {
|
|
// Get the Error type
|
|
const error_field_ty = t: {
|
|
const set_field_ty_decl_index = (try sema.namespaceLookup(
|
|
block,
|
|
src,
|
|
type_info_ty.getNamespaceIndex(mod).unwrap().?,
|
|
try ip.getOrPutString(gpa, "Error"),
|
|
)).?;
|
|
try sema.ensureDeclAnalyzed(set_field_ty_decl_index);
|
|
const set_field_ty_decl = mod.declPtr(set_field_ty_decl_index);
|
|
break :t set_field_ty_decl.val.toType();
|
|
};
|
|
|
|
try sema.queueFullTypeResolution(error_field_ty);
|
|
|
|
// Build our list of Error values
|
|
// Optional value is only null if anyerror
|
|
// Value can be zero-length slice otherwise
|
|
const error_field_vals = switch (try sema.resolveInferredErrorSetTy(block, src, ty.toIntern())) {
|
|
.anyerror_type => null,
|
|
else => |err_set_ty_index| blk: {
|
|
const names = ip.indexToKey(err_set_ty_index).error_set_type.names;
|
|
const vals = try sema.arena.alloc(InternPool.Index, names.len);
|
|
for (vals, 0..) |*field_val, i| {
|
|
// TODO: write something like getCoercedInts to avoid needing to dupe
|
|
const name = try sema.arena.dupeZ(u8, ip.stringToSlice(names.get(ip)[i]));
|
|
const name_val = v: {
|
|
const new_decl_ty = try mod.arrayType(.{
|
|
.len = name.len,
|
|
.sentinel = .zero_u8,
|
|
.child = .u8_type,
|
|
});
|
|
const new_decl_val = try mod.intern(.{ .aggregate = .{
|
|
.ty = new_decl_ty.toIntern(),
|
|
.storage = .{ .bytes = name },
|
|
} });
|
|
break :v try mod.intern(.{ .slice = .{
|
|
.ty = .slice_const_u8_sentinel_0_type,
|
|
.ptr = try mod.intern(.{ .ptr = .{
|
|
.ty = .manyptr_const_u8_sentinel_0_type,
|
|
.addr = .{ .anon_decl = .{
|
|
.val = new_decl_val,
|
|
.orig_ty = .slice_const_u8_sentinel_0_type,
|
|
} },
|
|
} }),
|
|
.len = (try mod.intValue(Type.usize, name.len)).toIntern(),
|
|
} });
|
|
};
|
|
|
|
const error_field_fields = .{
|
|
// name: [:0]const u8,
|
|
name_val,
|
|
};
|
|
field_val.* = try mod.intern(.{ .aggregate = .{
|
|
.ty = error_field_ty.toIntern(),
|
|
.storage = .{ .elems = &error_field_fields },
|
|
} });
|
|
}
|
|
|
|
break :blk vals;
|
|
},
|
|
};
|
|
|
|
// Build our ?[]const Error value
|
|
const slice_errors_ty = try sema.ptrType(.{
|
|
.child = error_field_ty.toIntern(),
|
|
.flags = .{
|
|
.size = .Slice,
|
|
.is_const = true,
|
|
},
|
|
});
|
|
const opt_slice_errors_ty = try mod.optionalType(slice_errors_ty.toIntern());
|
|
const errors_payload_val: InternPool.Index = if (error_field_vals) |vals| v: {
|
|
const array_errors_ty = try mod.arrayType(.{
|
|
.len = vals.len,
|
|
.child = error_field_ty.toIntern(),
|
|
});
|
|
const new_decl_val = try mod.intern(.{ .aggregate = .{
|
|
.ty = array_errors_ty.toIntern(),
|
|
.storage = .{ .elems = vals },
|
|
} });
|
|
break :v try mod.intern(.{ .slice = .{
|
|
.ty = slice_errors_ty.toIntern(),
|
|
.ptr = try mod.intern(.{ .ptr = .{
|
|
.ty = slice_errors_ty.slicePtrFieldType(mod).toIntern(),
|
|
.addr = .{ .anon_decl = .{
|
|
.orig_ty = slice_errors_ty.toIntern(),
|
|
.val = new_decl_val,
|
|
} },
|
|
} }),
|
|
.len = (try mod.intValue(Type.usize, vals.len)).toIntern(),
|
|
} });
|
|
} else .none;
|
|
const errors_val = try mod.intern(.{ .opt = .{
|
|
.ty = opt_slice_errors_ty.toIntern(),
|
|
.val = errors_payload_val,
|
|
} });
|
|
|
|
// Construct Type{ .ErrorSet = errors_val }
|
|
return Air.internedToRef((try mod.intern(.{ .un = .{
|
|
.ty = type_info_ty.toIntern(),
|
|
.tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.ErrorSet))).toIntern(),
|
|
.val = errors_val,
|
|
} })));
|
|
},
|
|
.ErrorUnion => {
|
|
const error_union_field_ty = t: {
|
|
const error_union_field_ty_decl_index = (try sema.namespaceLookup(
|
|
block,
|
|
src,
|
|
type_info_ty.getNamespaceIndex(mod).unwrap().?,
|
|
try ip.getOrPutString(gpa, "ErrorUnion"),
|
|
)).?;
|
|
try sema.ensureDeclAnalyzed(error_union_field_ty_decl_index);
|
|
const error_union_field_ty_decl = mod.declPtr(error_union_field_ty_decl_index);
|
|
break :t error_union_field_ty_decl.val.toType();
|
|
};
|
|
|
|
const field_values = .{
|
|
// error_set: type,
|
|
ty.errorUnionSet(mod).toIntern(),
|
|
// payload: type,
|
|
ty.errorUnionPayload(mod).toIntern(),
|
|
};
|
|
return Air.internedToRef((try mod.intern(.{ .un = .{
|
|
.ty = type_info_ty.toIntern(),
|
|
.tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.ErrorUnion))).toIntern(),
|
|
.val = try mod.intern(.{ .aggregate = .{
|
|
.ty = error_union_field_ty.toIntern(),
|
|
.storage = .{ .elems = &field_values },
|
|
} }),
|
|
} })));
|
|
},
|
|
.Enum => {
|
|
const is_exhaustive = Value.makeBool(ip.indexToKey(ty.toIntern()).enum_type.tag_mode != .nonexhaustive);
|
|
|
|
const enum_field_ty = t: {
|
|
const enum_field_ty_decl_index = (try sema.namespaceLookup(
|
|
block,
|
|
src,
|
|
type_info_ty.getNamespaceIndex(mod).unwrap().?,
|
|
try ip.getOrPutString(gpa, "EnumField"),
|
|
)).?;
|
|
try sema.ensureDeclAnalyzed(enum_field_ty_decl_index);
|
|
const enum_field_ty_decl = mod.declPtr(enum_field_ty_decl_index);
|
|
break :t enum_field_ty_decl.val.toType();
|
|
};
|
|
|
|
const enum_field_vals = try sema.arena.alloc(InternPool.Index, ip.indexToKey(ty.toIntern()).enum_type.names.len);
|
|
for (enum_field_vals, 0..) |*field_val, i| {
|
|
const enum_type = ip.indexToKey(ty.toIntern()).enum_type;
|
|
const value_val = if (enum_type.values.len > 0)
|
|
try mod.intern_pool.getCoercedInts(
|
|
mod.gpa,
|
|
mod.intern_pool.indexToKey(enum_type.values.get(ip)[i]).int,
|
|
.comptime_int_type,
|
|
)
|
|
else
|
|
(try mod.intValue(Type.comptime_int, i)).toIntern();
|
|
// TODO: write something like getCoercedInts to avoid needing to dupe
|
|
const name = try sema.arena.dupeZ(u8, ip.stringToSlice(enum_type.names.get(ip)[i]));
|
|
const name_val = v: {
|
|
const new_decl_ty = try mod.arrayType(.{
|
|
.len = name.len,
|
|
.sentinel = .zero_u8,
|
|
.child = .u8_type,
|
|
});
|
|
const new_decl_val = try mod.intern(.{ .aggregate = .{
|
|
.ty = new_decl_ty.toIntern(),
|
|
.storage = .{ .bytes = name },
|
|
} });
|
|
break :v try mod.intern(.{ .slice = .{
|
|
.ty = .slice_const_u8_sentinel_0_type,
|
|
.ptr = try mod.intern(.{ .ptr = .{
|
|
.ty = .manyptr_const_u8_sentinel_0_type,
|
|
.addr = .{ .anon_decl = .{
|
|
.val = new_decl_val,
|
|
.orig_ty = .slice_const_u8_sentinel_0_type,
|
|
} },
|
|
} }),
|
|
.len = (try mod.intValue(Type.usize, name.len)).toIntern(),
|
|
} });
|
|
};
|
|
|
|
const enum_field_fields = .{
|
|
// name: [:0]const u8,
|
|
name_val,
|
|
// value: comptime_int,
|
|
value_val,
|
|
};
|
|
field_val.* = try mod.intern(.{ .aggregate = .{
|
|
.ty = enum_field_ty.toIntern(),
|
|
.storage = .{ .elems = &enum_field_fields },
|
|
} });
|
|
}
|
|
|
|
const fields_val = v: {
|
|
const fields_array_ty = try mod.arrayType(.{
|
|
.len = enum_field_vals.len,
|
|
.child = enum_field_ty.toIntern(),
|
|
});
|
|
const new_decl_val = try mod.intern(.{ .aggregate = .{
|
|
.ty = fields_array_ty.toIntern(),
|
|
.storage = .{ .elems = enum_field_vals },
|
|
} });
|
|
const ptr_ty = (try sema.ptrType(.{
|
|
.child = enum_field_ty.toIntern(),
|
|
.flags = .{
|
|
.size = .Slice,
|
|
.is_const = true,
|
|
},
|
|
})).toIntern();
|
|
break :v try mod.intern(.{ .slice = .{
|
|
.ty = ptr_ty,
|
|
.ptr = try mod.intern(.{ .ptr = .{
|
|
.ty = Type.fromInterned(ptr_ty).slicePtrFieldType(mod).toIntern(),
|
|
.addr = .{ .anon_decl = .{
|
|
.val = new_decl_val,
|
|
.orig_ty = ptr_ty,
|
|
} },
|
|
} }),
|
|
.len = (try mod.intValue(Type.usize, enum_field_vals.len)).toIntern(),
|
|
} });
|
|
};
|
|
|
|
const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, ip.indexToKey(ty.toIntern()).enum_type.namespace);
|
|
|
|
const type_enum_ty = t: {
|
|
const type_enum_ty_decl_index = (try sema.namespaceLookup(
|
|
block,
|
|
src,
|
|
type_info_ty.getNamespaceIndex(mod).unwrap().?,
|
|
try ip.getOrPutString(gpa, "Enum"),
|
|
)).?;
|
|
try sema.ensureDeclAnalyzed(type_enum_ty_decl_index);
|
|
const type_enum_ty_decl = mod.declPtr(type_enum_ty_decl_index);
|
|
break :t type_enum_ty_decl.val.toType();
|
|
};
|
|
|
|
const field_values = .{
|
|
// tag_type: type,
|
|
ip.indexToKey(ty.toIntern()).enum_type.tag_ty,
|
|
// fields: []const EnumField,
|
|
fields_val,
|
|
// decls: []const Declaration,
|
|
decls_val,
|
|
// is_exhaustive: bool,
|
|
is_exhaustive.toIntern(),
|
|
};
|
|
return Air.internedToRef((try mod.intern(.{ .un = .{
|
|
.ty = type_info_ty.toIntern(),
|
|
.tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Enum))).toIntern(),
|
|
.val = try mod.intern(.{ .aggregate = .{
|
|
.ty = type_enum_ty.toIntern(),
|
|
.storage = .{ .elems = &field_values },
|
|
} }),
|
|
} })));
|
|
},
|
|
.Union => {
|
|
const type_union_ty = t: {
|
|
const type_union_ty_decl_index = (try sema.namespaceLookup(
|
|
block,
|
|
src,
|
|
type_info_ty.getNamespaceIndex(mod).unwrap().?,
|
|
try ip.getOrPutString(gpa, "Union"),
|
|
)).?;
|
|
try sema.ensureDeclAnalyzed(type_union_ty_decl_index);
|
|
const type_union_ty_decl = mod.declPtr(type_union_ty_decl_index);
|
|
break :t type_union_ty_decl.val.toType();
|
|
};
|
|
|
|
const union_field_ty = t: {
|
|
const union_field_ty_decl_index = (try sema.namespaceLookup(
|
|
block,
|
|
src,
|
|
type_info_ty.getNamespaceIndex(mod).unwrap().?,
|
|
try ip.getOrPutString(gpa, "UnionField"),
|
|
)).?;
|
|
try sema.ensureDeclAnalyzed(union_field_ty_decl_index);
|
|
const union_field_ty_decl = mod.declPtr(union_field_ty_decl_index);
|
|
break :t union_field_ty_decl.val.toType();
|
|
};
|
|
|
|
try sema.resolveTypeLayout(ty); // Getting alignment requires type layout
|
|
const union_obj = mod.typeToUnion(ty).?;
|
|
const layout = union_obj.getLayout(ip);
|
|
|
|
const union_field_vals = try gpa.alloc(InternPool.Index, union_obj.field_names.len);
|
|
defer gpa.free(union_field_vals);
|
|
|
|
for (union_field_vals, 0..) |*field_val, i| {
|
|
// TODO: write something like getCoercedInts to avoid needing to dupe
|
|
const name = try sema.arena.dupeZ(u8, ip.stringToSlice(union_obj.field_names.get(ip)[i]));
|
|
const name_val = v: {
|
|
const new_decl_ty = try mod.arrayType(.{
|
|
.len = name.len,
|
|
.sentinel = .zero_u8,
|
|
.child = .u8_type,
|
|
});
|
|
const new_decl_val = try mod.intern(.{ .aggregate = .{
|
|
.ty = new_decl_ty.toIntern(),
|
|
.storage = .{ .bytes = name },
|
|
} });
|
|
break :v try mod.intern(.{ .slice = .{
|
|
.ty = .slice_const_u8_sentinel_0_type,
|
|
.ptr = try mod.intern(.{ .ptr = .{
|
|
.ty = .manyptr_const_u8_sentinel_0_type,
|
|
.addr = .{ .anon_decl = .{
|
|
.val = new_decl_val,
|
|
.orig_ty = .slice_const_u8_sentinel_0_type,
|
|
} },
|
|
} }),
|
|
.len = (try mod.intValue(Type.usize, name.len)).toIntern(),
|
|
} });
|
|
};
|
|
|
|
const alignment = switch (layout) {
|
|
.Auto, .Extern => try sema.unionFieldAlignment(union_obj, @intCast(i)),
|
|
.Packed => .none,
|
|
};
|
|
|
|
const field_ty = union_obj.field_types.get(ip)[i];
|
|
const union_field_fields = .{
|
|
// name: [:0]const u8,
|
|
name_val,
|
|
// type: type,
|
|
field_ty,
|
|
// alignment: comptime_int,
|
|
(try mod.intValue(Type.comptime_int, alignment.toByteUnits(0))).toIntern(),
|
|
};
|
|
field_val.* = try mod.intern(.{ .aggregate = .{
|
|
.ty = union_field_ty.toIntern(),
|
|
.storage = .{ .elems = &union_field_fields },
|
|
} });
|
|
}
|
|
|
|
const fields_val = v: {
|
|
const array_fields_ty = try mod.arrayType(.{
|
|
.len = union_field_vals.len,
|
|
.child = union_field_ty.toIntern(),
|
|
});
|
|
const new_decl_val = try mod.intern(.{ .aggregate = .{
|
|
.ty = array_fields_ty.toIntern(),
|
|
.storage = .{ .elems = union_field_vals },
|
|
} });
|
|
const ptr_ty = (try sema.ptrType(.{
|
|
.child = union_field_ty.toIntern(),
|
|
.flags = .{
|
|
.size = .Slice,
|
|
.is_const = true,
|
|
},
|
|
})).toIntern();
|
|
break :v try mod.intern(.{ .slice = .{
|
|
.ty = ptr_ty,
|
|
.ptr = try mod.intern(.{ .ptr = .{
|
|
.ty = Type.fromInterned(ptr_ty).slicePtrFieldType(mod).toIntern(),
|
|
.addr = .{ .anon_decl = .{
|
|
.orig_ty = ptr_ty,
|
|
.val = new_decl_val,
|
|
} },
|
|
} }),
|
|
.len = (try mod.intValue(Type.usize, union_field_vals.len)).toIntern(),
|
|
} });
|
|
};
|
|
|
|
const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, ty.getNamespaceIndex(mod));
|
|
|
|
const enum_tag_ty_val = try mod.intern(.{ .opt = .{
|
|
.ty = (try mod.optionalType(.type_type)).toIntern(),
|
|
.val = if (ty.unionTagType(mod)) |tag_ty| tag_ty.toIntern() else .none,
|
|
} });
|
|
|
|
const container_layout_ty = t: {
|
|
const decl_index = (try sema.namespaceLookup(
|
|
block,
|
|
src,
|
|
(try sema.getBuiltinType("Type")).getNamespaceIndex(mod).unwrap().?,
|
|
try ip.getOrPutString(gpa, "ContainerLayout"),
|
|
)).?;
|
|
try sema.ensureDeclAnalyzed(decl_index);
|
|
const decl = mod.declPtr(decl_index);
|
|
break :t decl.val.toType();
|
|
};
|
|
|
|
const field_values = .{
|
|
// layout: ContainerLayout,
|
|
(try mod.enumValueFieldIndex(container_layout_ty, @intFromEnum(layout))).toIntern(),
|
|
|
|
// tag_type: ?type,
|
|
enum_tag_ty_val,
|
|
// fields: []const UnionField,
|
|
fields_val,
|
|
// decls: []const Declaration,
|
|
decls_val,
|
|
};
|
|
return Air.internedToRef((try mod.intern(.{ .un = .{
|
|
.ty = type_info_ty.toIntern(),
|
|
.tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Union))).toIntern(),
|
|
.val = try mod.intern(.{ .aggregate = .{
|
|
.ty = type_union_ty.toIntern(),
|
|
.storage = .{ .elems = &field_values },
|
|
} }),
|
|
} })));
|
|
},
|
|
.Struct => {
|
|
const type_struct_ty = t: {
|
|
const type_struct_ty_decl_index = (try sema.namespaceLookup(
|
|
block,
|
|
src,
|
|
type_info_ty.getNamespaceIndex(mod).unwrap().?,
|
|
try ip.getOrPutString(gpa, "Struct"),
|
|
)).?;
|
|
try sema.ensureDeclAnalyzed(type_struct_ty_decl_index);
|
|
const type_struct_ty_decl = mod.declPtr(type_struct_ty_decl_index);
|
|
break :t type_struct_ty_decl.val.toType();
|
|
};
|
|
|
|
const struct_field_ty = t: {
|
|
const struct_field_ty_decl_index = (try sema.namespaceLookup(
|
|
block,
|
|
src,
|
|
type_info_ty.getNamespaceIndex(mod).unwrap().?,
|
|
try ip.getOrPutString(gpa, "StructField"),
|
|
)).?;
|
|
try sema.ensureDeclAnalyzed(struct_field_ty_decl_index);
|
|
const struct_field_ty_decl = mod.declPtr(struct_field_ty_decl_index);
|
|
break :t struct_field_ty_decl.val.toType();
|
|
};
|
|
|
|
try sema.resolveTypeLayout(ty); // Getting alignment requires type layout
|
|
|
|
var struct_field_vals: []InternPool.Index = &.{};
|
|
defer gpa.free(struct_field_vals);
|
|
fv: {
|
|
const struct_type = switch (ip.indexToKey(ty.toIntern())) {
|
|
.anon_struct_type => |tuple| {
|
|
struct_field_vals = try gpa.alloc(InternPool.Index, tuple.types.len);
|
|
for (struct_field_vals, 0..) |*struct_field_val, i| {
|
|
const anon_struct_type = ip.indexToKey(ty.toIntern()).anon_struct_type;
|
|
const field_ty = anon_struct_type.types.get(ip)[i];
|
|
const field_val = anon_struct_type.values.get(ip)[i];
|
|
const name_val = v: {
|
|
// TODO: write something like getCoercedInts to avoid needing to dupe
|
|
const bytes = if (tuple.names.len != 0)
|
|
// https://github.com/ziglang/zig/issues/15709
|
|
try sema.arena.dupeZ(u8, ip.stringToSlice(ip.indexToKey(ty.toIntern()).anon_struct_type.names.get(ip)[i]))
|
|
else
|
|
try std.fmt.allocPrintZ(sema.arena, "{d}", .{i});
|
|
const new_decl_ty = try mod.arrayType(.{
|
|
.len = bytes.len,
|
|
.sentinel = .zero_u8,
|
|
.child = .u8_type,
|
|
});
|
|
const new_decl_val = try mod.intern(.{ .aggregate = .{
|
|
.ty = new_decl_ty.toIntern(),
|
|
.storage = .{ .bytes = bytes },
|
|
} });
|
|
break :v try mod.intern(.{ .slice = .{
|
|
.ty = .slice_const_u8_sentinel_0_type,
|
|
.ptr = try mod.intern(.{ .ptr = .{
|
|
.ty = .manyptr_const_u8_sentinel_0_type,
|
|
.addr = .{ .anon_decl = .{
|
|
.val = new_decl_val,
|
|
.orig_ty = .slice_const_u8_sentinel_0_type,
|
|
} },
|
|
} }),
|
|
.len = (try mod.intValue(Type.usize, bytes.len)).toIntern(),
|
|
} });
|
|
};
|
|
|
|
try sema.resolveTypeLayout(Type.fromInterned(field_ty));
|
|
|
|
const is_comptime = field_val != .none;
|
|
const opt_default_val = if (is_comptime) Value.fromInterned(field_val) else null;
|
|
const default_val_ptr = try sema.optRefValue(opt_default_val);
|
|
const struct_field_fields = .{
|
|
// name: [:0]const u8,
|
|
name_val,
|
|
// type: type,
|
|
field_ty,
|
|
// default_value: ?*const anyopaque,
|
|
default_val_ptr.toIntern(),
|
|
// is_comptime: bool,
|
|
Value.makeBool(is_comptime).toIntern(),
|
|
// alignment: comptime_int,
|
|
(try mod.intValue(Type.comptime_int, Type.fromInterned(field_ty).abiAlignment(mod).toByteUnits(0))).toIntern(),
|
|
};
|
|
struct_field_val.* = try mod.intern(.{ .aggregate = .{
|
|
.ty = struct_field_ty.toIntern(),
|
|
.storage = .{ .elems = &struct_field_fields },
|
|
} });
|
|
}
|
|
break :fv;
|
|
},
|
|
.struct_type => |s| s,
|
|
else => unreachable,
|
|
};
|
|
struct_field_vals = try gpa.alloc(InternPool.Index, struct_type.field_types.len);
|
|
|
|
try sema.resolveStructFieldInits(ty);
|
|
|
|
for (struct_field_vals, 0..) |*field_val, i| {
|
|
// TODO: write something like getCoercedInts to avoid needing to dupe
|
|
const name = if (struct_type.fieldName(ip, i).unwrap()) |name_nts|
|
|
try sema.arena.dupeZ(u8, ip.stringToSlice(name_nts))
|
|
else
|
|
try std.fmt.allocPrintZ(sema.arena, "{d}", .{i});
|
|
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
|
|
const field_init = struct_type.fieldInit(ip, i);
|
|
const field_is_comptime = struct_type.fieldIsComptime(ip, i);
|
|
const name_val = v: {
|
|
const new_decl_ty = try mod.arrayType(.{
|
|
.len = name.len,
|
|
.sentinel = .zero_u8,
|
|
.child = .u8_type,
|
|
});
|
|
const new_decl_val = try mod.intern(.{ .aggregate = .{
|
|
.ty = new_decl_ty.toIntern(),
|
|
.storage = .{ .bytes = name },
|
|
} });
|
|
break :v try mod.intern(.{ .slice = .{
|
|
.ty = .slice_const_u8_sentinel_0_type,
|
|
.ptr = try mod.intern(.{ .ptr = .{
|
|
.ty = .manyptr_const_u8_sentinel_0_type,
|
|
.addr = .{ .anon_decl = .{
|
|
.val = new_decl_val,
|
|
.orig_ty = .slice_const_u8_sentinel_0_type,
|
|
} },
|
|
} }),
|
|
.len = (try mod.intValue(Type.usize, name.len)).toIntern(),
|
|
} });
|
|
};
|
|
|
|
const opt_default_val = if (field_init == .none) null else Value.fromInterned(field_init);
|
|
const default_val_ptr = try sema.optRefValue(opt_default_val);
|
|
const alignment = switch (struct_type.layout) {
|
|
.Packed => .none,
|
|
else => try sema.structFieldAlignment(
|
|
struct_type.fieldAlign(ip, i),
|
|
field_ty,
|
|
struct_type.layout,
|
|
),
|
|
};
|
|
|
|
const struct_field_fields = .{
|
|
// name: [:0]const u8,
|
|
name_val,
|
|
// type: type,
|
|
field_ty.toIntern(),
|
|
// default_value: ?*const anyopaque,
|
|
default_val_ptr.toIntern(),
|
|
// is_comptime: bool,
|
|
Value.makeBool(field_is_comptime).toIntern(),
|
|
// alignment: comptime_int,
|
|
(try mod.intValue(Type.comptime_int, alignment.toByteUnits(0))).toIntern(),
|
|
};
|
|
field_val.* = try mod.intern(.{ .aggregate = .{
|
|
.ty = struct_field_ty.toIntern(),
|
|
.storage = .{ .elems = &struct_field_fields },
|
|
} });
|
|
}
|
|
}
|
|
|
|
const fields_val = v: {
|
|
const array_fields_ty = try mod.arrayType(.{
|
|
.len = struct_field_vals.len,
|
|
.child = struct_field_ty.toIntern(),
|
|
});
|
|
const new_decl_val = try mod.intern(.{ .aggregate = .{
|
|
.ty = array_fields_ty.toIntern(),
|
|
.storage = .{ .elems = struct_field_vals },
|
|
} });
|
|
const ptr_ty = (try sema.ptrType(.{
|
|
.child = struct_field_ty.toIntern(),
|
|
.flags = .{
|
|
.size = .Slice,
|
|
.is_const = true,
|
|
},
|
|
})).toIntern();
|
|
break :v try mod.intern(.{ .slice = .{
|
|
.ty = ptr_ty,
|
|
.ptr = try mod.intern(.{ .ptr = .{
|
|
.ty = Type.fromInterned(ptr_ty).slicePtrFieldType(mod).toIntern(),
|
|
.addr = .{ .anon_decl = .{
|
|
.orig_ty = ptr_ty,
|
|
.val = new_decl_val,
|
|
} },
|
|
} }),
|
|
.len = (try mod.intValue(Type.usize, struct_field_vals.len)).toIntern(),
|
|
} });
|
|
};
|
|
|
|
const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, ty.getNamespaceIndex(mod));
|
|
|
|
const backing_integer_val = try mod.intern(.{ .opt = .{
|
|
.ty = (try mod.optionalType(.type_type)).toIntern(),
|
|
.val = if (mod.typeToPackedStruct(ty)) |packed_struct| val: {
|
|
assert(Type.fromInterned(packed_struct.backingIntType(ip).*).isInt(mod));
|
|
break :val packed_struct.backingIntType(ip).*;
|
|
} else .none,
|
|
} });
|
|
|
|
const container_layout_ty = t: {
|
|
const decl_index = (try sema.namespaceLookup(
|
|
block,
|
|
src,
|
|
(try sema.getBuiltinType("Type")).getNamespaceIndex(mod).unwrap().?,
|
|
try ip.getOrPutString(gpa, "ContainerLayout"),
|
|
)).?;
|
|
try sema.ensureDeclAnalyzed(decl_index);
|
|
const decl = mod.declPtr(decl_index);
|
|
break :t decl.val.toType();
|
|
};
|
|
|
|
const layout = ty.containerLayout(mod);
|
|
|
|
const field_values = [_]InternPool.Index{
|
|
// layout: ContainerLayout,
|
|
(try mod.enumValueFieldIndex(container_layout_ty, @intFromEnum(layout))).toIntern(),
|
|
// backing_integer: ?type,
|
|
backing_integer_val,
|
|
// fields: []const StructField,
|
|
fields_val,
|
|
// decls: []const Declaration,
|
|
decls_val,
|
|
// is_tuple: bool,
|
|
Value.makeBool(ty.isTuple(mod)).toIntern(),
|
|
};
|
|
return Air.internedToRef((try mod.intern(.{ .un = .{
|
|
.ty = type_info_ty.toIntern(),
|
|
.tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Struct))).toIntern(),
|
|
.val = try mod.intern(.{ .aggregate = .{
|
|
.ty = type_struct_ty.toIntern(),
|
|
.storage = .{ .elems = &field_values },
|
|
} }),
|
|
} })));
|
|
},
|
|
.Opaque => {
|
|
const type_opaque_ty = t: {
|
|
const type_opaque_ty_decl_index = (try sema.namespaceLookup(
|
|
block,
|
|
src,
|
|
type_info_ty.getNamespaceIndex(mod).unwrap().?,
|
|
try ip.getOrPutString(gpa, "Opaque"),
|
|
)).?;
|
|
try sema.ensureDeclAnalyzed(type_opaque_ty_decl_index);
|
|
const type_opaque_ty_decl = mod.declPtr(type_opaque_ty_decl_index);
|
|
break :t type_opaque_ty_decl.val.toType();
|
|
};
|
|
|
|
try sema.resolveTypeFields(ty);
|
|
const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, ty.getNamespaceIndex(mod));
|
|
|
|
const field_values = .{
|
|
// decls: []const Declaration,
|
|
decls_val,
|
|
};
|
|
return Air.internedToRef((try mod.intern(.{ .un = .{
|
|
.ty = type_info_ty.toIntern(),
|
|
.tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Opaque))).toIntern(),
|
|
.val = try mod.intern(.{ .aggregate = .{
|
|
.ty = type_opaque_ty.toIntern(),
|
|
.storage = .{ .elems = &field_values },
|
|
} }),
|
|
} })));
|
|
},
|
|
.Frame => return sema.failWithUseOfAsync(block, src),
|
|
.AnyFrame => return sema.failWithUseOfAsync(block, src),
|
|
}
|
|
}
|
|
|
|
fn typeInfoDecls(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
type_info_ty: Type,
|
|
opt_namespace: InternPool.OptionalNamespaceIndex,
|
|
) CompileError!InternPool.Index {
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
|
|
const declaration_ty = t: {
|
|
const declaration_ty_decl_index = (try sema.namespaceLookup(
|
|
block,
|
|
src,
|
|
type_info_ty.getNamespaceIndex(mod).unwrap().?,
|
|
try mod.intern_pool.getOrPutString(gpa, "Declaration"),
|
|
)).?;
|
|
try sema.ensureDeclAnalyzed(declaration_ty_decl_index);
|
|
const declaration_ty_decl = mod.declPtr(declaration_ty_decl_index);
|
|
break :t declaration_ty_decl.val.toType();
|
|
};
|
|
try sema.queueFullTypeResolution(declaration_ty);
|
|
|
|
var decl_vals = std.ArrayList(InternPool.Index).init(gpa);
|
|
defer decl_vals.deinit();
|
|
|
|
var seen_namespaces = std.AutoHashMap(*Namespace, void).init(gpa);
|
|
defer seen_namespaces.deinit();
|
|
|
|
if (opt_namespace.unwrap()) |namespace_index| {
|
|
const namespace = mod.namespacePtr(namespace_index);
|
|
try sema.typeInfoNamespaceDecls(block, namespace, declaration_ty, &decl_vals, &seen_namespaces);
|
|
}
|
|
|
|
const array_decl_ty = try mod.arrayType(.{
|
|
.len = decl_vals.items.len,
|
|
.child = declaration_ty.toIntern(),
|
|
});
|
|
const new_decl_val = try mod.intern(.{ .aggregate = .{
|
|
.ty = array_decl_ty.toIntern(),
|
|
.storage = .{ .elems = decl_vals.items },
|
|
} });
|
|
const ptr_ty = (try sema.ptrType(.{
|
|
.child = declaration_ty.toIntern(),
|
|
.flags = .{
|
|
.size = .Slice,
|
|
.is_const = true,
|
|
},
|
|
})).toIntern();
|
|
return try mod.intern(.{ .slice = .{
|
|
.ty = ptr_ty,
|
|
.ptr = try mod.intern(.{ .ptr = .{
|
|
.ty = Type.fromInterned(ptr_ty).slicePtrFieldType(mod).toIntern(),
|
|
.addr = .{ .anon_decl = .{
|
|
.orig_ty = ptr_ty,
|
|
.val = new_decl_val,
|
|
} },
|
|
} }),
|
|
.len = (try mod.intValue(Type.usize, decl_vals.items.len)).toIntern(),
|
|
} });
|
|
}
|
|
|
|
fn typeInfoNamespaceDecls(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
namespace: *Namespace,
|
|
declaration_ty: Type,
|
|
decl_vals: *std.ArrayList(InternPool.Index),
|
|
seen_namespaces: *std.AutoHashMap(*Namespace, void),
|
|
) !void {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
const gop = try seen_namespaces.getOrPut(namespace);
|
|
if (gop.found_existing) return;
|
|
const decls = namespace.decls.keys();
|
|
for (decls) |decl_index| {
|
|
const decl = mod.declPtr(decl_index);
|
|
if (decl.kind == .@"usingnamespace") {
|
|
if (decl.analysis == .in_progress) continue;
|
|
try mod.ensureDeclAnalyzed(decl_index);
|
|
const new_ns = decl.val.toType().getNamespace(mod).?;
|
|
try sema.typeInfoNamespaceDecls(block, new_ns, declaration_ty, decl_vals, seen_namespaces);
|
|
continue;
|
|
}
|
|
if (decl.kind != .named or !decl.is_pub) continue;
|
|
const name_val = v: {
|
|
// TODO: write something like getCoercedInts to avoid needing to dupe
|
|
const name = try sema.arena.dupeZ(u8, ip.stringToSlice(decl.name));
|
|
const new_decl_ty = try mod.arrayType(.{
|
|
.len = name.len,
|
|
.sentinel = .zero_u8,
|
|
.child = .u8_type,
|
|
});
|
|
const new_decl_val = try mod.intern(.{ .aggregate = .{
|
|
.ty = new_decl_ty.toIntern(),
|
|
.storage = .{ .bytes = name },
|
|
} });
|
|
break :v try mod.intern(.{ .slice = .{
|
|
.ty = .slice_const_u8_sentinel_0_type,
|
|
.ptr = try mod.intern(.{ .ptr = .{
|
|
.ty = .manyptr_const_u8_sentinel_0_type,
|
|
.addr = .{ .anon_decl = .{
|
|
.orig_ty = .slice_const_u8_sentinel_0_type,
|
|
.val = new_decl_val,
|
|
} },
|
|
} }),
|
|
.len = (try mod.intValue(Type.usize, name.len)).toIntern(),
|
|
} });
|
|
};
|
|
|
|
const fields = .{
|
|
//name: [:0]const u8,
|
|
name_val,
|
|
};
|
|
try decl_vals.append(try mod.intern(.{ .aggregate = .{
|
|
.ty = declaration_ty.toIntern(),
|
|
.storage = .{ .elems = &fields },
|
|
} }));
|
|
}
|
|
}
|
|
|
|
fn zirTypeof(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
_ = block;
|
|
const zir_datas = sema.code.instructions.items(.data);
|
|
const inst_data = zir_datas[@intFromEnum(inst)].un_node;
|
|
const operand = try sema.resolveInst(inst_data.operand);
|
|
const operand_ty = sema.typeOf(operand);
|
|
return Air.internedToRef(operand_ty.toIntern());
|
|
}
|
|
|
|
fn zirTypeofBuiltin(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const pl_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.Block, pl_node.payload_index);
|
|
const body = sema.code.bodySlice(extra.end, extra.data.body_len);
|
|
|
|
var child_block: Block = .{
|
|
.parent = block,
|
|
.sema = sema,
|
|
.src_decl = block.src_decl,
|
|
.namespace = block.namespace,
|
|
.wip_capture_scope = block.wip_capture_scope,
|
|
.instructions = .{},
|
|
.inlining = block.inlining,
|
|
.is_comptime = false,
|
|
.is_typeof = true,
|
|
.want_safety = false,
|
|
.error_return_trace_index = block.error_return_trace_index,
|
|
};
|
|
defer child_block.instructions.deinit(sema.gpa);
|
|
|
|
const operand = try sema.resolveBody(&child_block, body, inst);
|
|
const operand_ty = sema.typeOf(operand);
|
|
if (operand_ty.isGenericPoison()) return error.GenericPoison;
|
|
return Air.internedToRef(operand_ty.toIntern());
|
|
}
|
|
|
|
fn zirTypeofLog2IntType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const src = inst_data.src();
|
|
const operand = try sema.resolveInst(inst_data.operand);
|
|
const operand_ty = sema.typeOf(operand);
|
|
const res_ty = try sema.log2IntType(block, operand_ty, src);
|
|
return Air.internedToRef(res_ty.toIntern());
|
|
}
|
|
|
|
fn log2IntType(sema: *Sema, block: *Block, operand: Type, src: LazySrcLoc) CompileError!Type {
|
|
const mod = sema.mod;
|
|
switch (operand.zigTypeTag(mod)) {
|
|
.ComptimeInt => return Type.comptime_int,
|
|
.Int => {
|
|
const bits = operand.bitSize(mod);
|
|
const count = if (bits == 0)
|
|
0
|
|
else blk: {
|
|
var count: u16 = 0;
|
|
var s = bits - 1;
|
|
while (s != 0) : (s >>= 1) {
|
|
count += 1;
|
|
}
|
|
break :blk count;
|
|
};
|
|
return mod.intType(.unsigned, count);
|
|
},
|
|
.Vector => {
|
|
const elem_ty = operand.elemType2(mod);
|
|
const log2_elem_ty = try sema.log2IntType(block, elem_ty, src);
|
|
return mod.vectorType(.{
|
|
.len = operand.vectorLen(mod),
|
|
.child = log2_elem_ty.toIntern(),
|
|
});
|
|
},
|
|
else => {},
|
|
}
|
|
return sema.fail(
|
|
block,
|
|
src,
|
|
"bit shifting operation expected integer type, found '{}'",
|
|
.{operand.fmt(mod)},
|
|
);
|
|
}
|
|
|
|
fn zirTypeofPeer(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
extended: Zir.Inst.Extended.InstData,
|
|
) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const extra = sema.code.extraData(Zir.Inst.TypeOfPeer, extended.operand);
|
|
const src = LazySrcLoc.nodeOffset(extra.data.src_node);
|
|
const body = sema.code.bodySlice(extra.data.body_index, extra.data.body_len);
|
|
|
|
var child_block: Block = .{
|
|
.parent = block,
|
|
.sema = sema,
|
|
.src_decl = block.src_decl,
|
|
.namespace = block.namespace,
|
|
.wip_capture_scope = block.wip_capture_scope,
|
|
.instructions = .{},
|
|
.inlining = block.inlining,
|
|
.is_comptime = false,
|
|
.is_typeof = true,
|
|
.runtime_cond = block.runtime_cond,
|
|
.runtime_loop = block.runtime_loop,
|
|
.runtime_index = block.runtime_index,
|
|
};
|
|
defer child_block.instructions.deinit(sema.gpa);
|
|
// Ignore the result, we only care about the instructions in `args`.
|
|
_ = try sema.analyzeBodyBreak(&child_block, body);
|
|
|
|
const args = sema.code.refSlice(extra.end, extended.small);
|
|
|
|
const inst_list = try sema.gpa.alloc(Air.Inst.Ref, args.len);
|
|
defer sema.gpa.free(inst_list);
|
|
|
|
for (args, 0..) |arg_ref, i| {
|
|
inst_list[i] = try sema.resolveInst(arg_ref);
|
|
}
|
|
|
|
const result_type = try sema.resolvePeerTypes(block, src, inst_list, .{ .typeof_builtin_call_node_offset = extra.data.src_node });
|
|
return Air.internedToRef(result_type.toIntern());
|
|
}
|
|
|
|
fn zirBoolNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const src = inst_data.src();
|
|
const operand_src: LazySrcLoc = .{ .node_offset_un_op = inst_data.src_node };
|
|
const uncasted_operand = try sema.resolveInst(inst_data.operand);
|
|
|
|
const operand = try sema.coerce(block, Type.bool, uncasted_operand, operand_src);
|
|
if (try sema.resolveValue(operand)) |val| {
|
|
return if (val.isUndef(mod))
|
|
mod.undefRef(Type.bool)
|
|
else if (val.toBool()) .bool_false else .bool_true;
|
|
}
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
return block.addTyOp(.not, Type.bool, operand);
|
|
}
|
|
|
|
fn zirBoolBr(
|
|
sema: *Sema,
|
|
parent_block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
is_bool_or: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
|
|
const datas = sema.code.instructions.items(.data);
|
|
const inst_data = datas[@intFromEnum(inst)].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.BoolBr, inst_data.payload_index);
|
|
|
|
const uncoerced_lhs = try sema.resolveInst(extra.data.lhs);
|
|
const body = sema.code.bodySlice(extra.end, extra.data.body_len);
|
|
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
|
|
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
|
|
|
|
const lhs = try sema.coerce(parent_block, Type.bool, uncoerced_lhs, lhs_src);
|
|
|
|
if (try sema.resolveDefinedValue(parent_block, lhs_src, lhs)) |lhs_val| {
|
|
if (is_bool_or and lhs_val.toBool()) {
|
|
return .bool_true;
|
|
} else if (!is_bool_or and !lhs_val.toBool()) {
|
|
return .bool_false;
|
|
}
|
|
// comptime-known left-hand side. No need for a block here; the result
|
|
// is simply the rhs expression. Here we rely on there only being 1
|
|
// break instruction (`break_inline`).
|
|
const rhs_result = try sema.resolveBody(parent_block, body, inst);
|
|
if (sema.typeOf(rhs_result).isNoReturn(mod)) {
|
|
return rhs_result;
|
|
}
|
|
return sema.coerce(parent_block, Type.bool, rhs_result, rhs_src);
|
|
}
|
|
|
|
const block_inst: Air.Inst.Index = @enumFromInt(sema.air_instructions.len);
|
|
try sema.air_instructions.append(gpa, .{
|
|
.tag = .block,
|
|
.data = .{ .ty_pl = .{
|
|
.ty = .bool_type,
|
|
.payload = undefined,
|
|
} },
|
|
});
|
|
|
|
var child_block = parent_block.makeSubBlock();
|
|
child_block.runtime_loop = null;
|
|
child_block.runtime_cond = mod.declPtr(child_block.src_decl).toSrcLoc(lhs_src, mod);
|
|
child_block.runtime_index.increment();
|
|
defer child_block.instructions.deinit(gpa);
|
|
|
|
var then_block = child_block.makeSubBlock();
|
|
defer then_block.instructions.deinit(gpa);
|
|
|
|
var else_block = child_block.makeSubBlock();
|
|
defer else_block.instructions.deinit(gpa);
|
|
|
|
const lhs_block = if (is_bool_or) &then_block else &else_block;
|
|
const rhs_block = if (is_bool_or) &else_block else &then_block;
|
|
|
|
const lhs_result: Air.Inst.Ref = if (is_bool_or) .bool_true else .bool_false;
|
|
_ = try lhs_block.addBr(block_inst, lhs_result);
|
|
|
|
const rhs_result = try sema.resolveBody(rhs_block, body, inst);
|
|
const rhs_noret = sema.typeOf(rhs_result).isNoReturn(mod);
|
|
const coerced_rhs_result = if (!rhs_noret) rhs: {
|
|
const coerced_result = try sema.coerce(rhs_block, Type.bool, rhs_result, rhs_src);
|
|
_ = try rhs_block.addBr(block_inst, coerced_result);
|
|
break :rhs coerced_result;
|
|
} else rhs_result;
|
|
|
|
const result = sema.finishCondBr(parent_block, &child_block, &then_block, &else_block, lhs, block_inst);
|
|
if (!rhs_noret) {
|
|
if (try sema.resolveDefinedValue(rhs_block, rhs_src, coerced_rhs_result)) |rhs_val| {
|
|
if (is_bool_or and rhs_val.toBool()) {
|
|
return .bool_true;
|
|
} else if (!is_bool_or and !rhs_val.toBool()) {
|
|
return .bool_false;
|
|
}
|
|
}
|
|
}
|
|
|
|
return result;
|
|
}
|
|
|
|
fn finishCondBr(
|
|
sema: *Sema,
|
|
parent_block: *Block,
|
|
child_block: *Block,
|
|
then_block: *Block,
|
|
else_block: *Block,
|
|
cond: Air.Inst.Ref,
|
|
block_inst: Air.Inst.Index,
|
|
) !Air.Inst.Ref {
|
|
const gpa = sema.gpa;
|
|
|
|
try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.CondBr).Struct.fields.len +
|
|
then_block.instructions.items.len + else_block.instructions.items.len +
|
|
@typeInfo(Air.Block).Struct.fields.len + child_block.instructions.items.len + 1);
|
|
|
|
const cond_br_payload = sema.addExtraAssumeCapacity(Air.CondBr{
|
|
.then_body_len = @intCast(then_block.instructions.items.len),
|
|
.else_body_len = @intCast(else_block.instructions.items.len),
|
|
});
|
|
sema.air_extra.appendSliceAssumeCapacity(@ptrCast(then_block.instructions.items));
|
|
sema.air_extra.appendSliceAssumeCapacity(@ptrCast(else_block.instructions.items));
|
|
|
|
_ = try child_block.addInst(.{ .tag = .cond_br, .data = .{ .pl_op = .{
|
|
.operand = cond,
|
|
.payload = cond_br_payload,
|
|
} } });
|
|
|
|
sema.air_instructions.items(.data)[@intFromEnum(block_inst)].ty_pl.payload = sema.addExtraAssumeCapacity(
|
|
Air.Block{ .body_len = @intCast(child_block.instructions.items.len) },
|
|
);
|
|
sema.air_extra.appendSliceAssumeCapacity(@ptrCast(child_block.instructions.items));
|
|
|
|
try parent_block.instructions.append(gpa, block_inst);
|
|
return block_inst.toRef();
|
|
}
|
|
|
|
fn checkNullableType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void {
|
|
const mod = sema.mod;
|
|
switch (ty.zigTypeTag(mod)) {
|
|
.Optional, .Null, .Undefined => return,
|
|
.Pointer => if (ty.isPtrLikeOptional(mod)) return,
|
|
else => {},
|
|
}
|
|
return sema.failWithExpectedOptionalType(block, src, ty);
|
|
}
|
|
|
|
fn zirIsNonNull(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const src = inst_data.src();
|
|
const operand = try sema.resolveInst(inst_data.operand);
|
|
try sema.checkNullableType(block, src, sema.typeOf(operand));
|
|
return sema.analyzeIsNull(block, src, operand, true);
|
|
}
|
|
|
|
fn zirIsNonNullPtr(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const src = inst_data.src();
|
|
const ptr = try sema.resolveInst(inst_data.operand);
|
|
try sema.checkNullableType(block, src, sema.typeOf(ptr).elemType2(mod));
|
|
if ((try sema.resolveValue(ptr)) == null) {
|
|
return block.addUnOp(.is_non_null_ptr, ptr);
|
|
}
|
|
const loaded = try sema.analyzeLoad(block, src, ptr, src);
|
|
return sema.analyzeIsNull(block, src, loaded, true);
|
|
}
|
|
|
|
fn checkErrorType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void {
|
|
const mod = sema.mod;
|
|
switch (ty.zigTypeTag(mod)) {
|
|
.ErrorSet, .ErrorUnion, .Undefined => return,
|
|
else => return sema.fail(block, src, "expected error union type, found '{}'", .{
|
|
ty.fmt(mod),
|
|
}),
|
|
}
|
|
}
|
|
|
|
fn zirIsNonErr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const src = inst_data.src();
|
|
const operand = try sema.resolveInst(inst_data.operand);
|
|
try sema.checkErrorType(block, src, sema.typeOf(operand));
|
|
return sema.analyzeIsNonErr(block, src, operand);
|
|
}
|
|
|
|
fn zirIsNonErrPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const src = inst_data.src();
|
|
const ptr = try sema.resolveInst(inst_data.operand);
|
|
try sema.checkErrorType(block, src, sema.typeOf(ptr).elemType2(mod));
|
|
const loaded = try sema.analyzeLoad(block, src, ptr, src);
|
|
return sema.analyzeIsNonErr(block, src, loaded);
|
|
}
|
|
|
|
fn zirRetIsNonErr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const src = inst_data.src();
|
|
const operand = try sema.resolveInst(inst_data.operand);
|
|
return sema.analyzeIsNonErr(block, src, operand);
|
|
}
|
|
|
|
fn zirCondbr(
|
|
sema: *Sema,
|
|
parent_block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
) CompileError!Zir.Inst.Index {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const cond_src: LazySrcLoc = .{ .node_offset_if_cond = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.CondBr, inst_data.payload_index);
|
|
|
|
const then_body = sema.code.bodySlice(extra.end, extra.data.then_body_len);
|
|
const else_body = sema.code.bodySlice(extra.end + then_body.len, extra.data.else_body_len);
|
|
|
|
const uncasted_cond = try sema.resolveInst(extra.data.condition);
|
|
const cond = try sema.coerce(parent_block, Type.bool, uncasted_cond, cond_src);
|
|
|
|
if (try sema.resolveDefinedValue(parent_block, cond_src, cond)) |cond_val| {
|
|
const body = if (cond_val.toBool()) then_body else else_body;
|
|
|
|
try sema.maybeErrorUnwrapCondbr(parent_block, body, extra.data.condition, cond_src);
|
|
// We use `analyzeBodyInner` since we want to propagate any possible
|
|
// `error.ComptimeBreak` to the caller.
|
|
return sema.analyzeBodyInner(parent_block, body);
|
|
}
|
|
|
|
const gpa = sema.gpa;
|
|
|
|
// We'll re-use the sub block to save on memory bandwidth, and yank out the
|
|
// instructions array in between using it for the then block and else block.
|
|
var sub_block = parent_block.makeSubBlock();
|
|
sub_block.runtime_loop = null;
|
|
sub_block.runtime_cond = mod.declPtr(parent_block.src_decl).toSrcLoc(cond_src, mod);
|
|
sub_block.runtime_index.increment();
|
|
defer sub_block.instructions.deinit(gpa);
|
|
|
|
try sema.analyzeBodyRuntimeBreak(&sub_block, then_body);
|
|
const true_instructions = try sub_block.instructions.toOwnedSlice(gpa);
|
|
defer gpa.free(true_instructions);
|
|
|
|
const err_cond = blk: {
|
|
const index = extra.data.condition.toIndex() orelse break :blk null;
|
|
if (sema.code.instructions.items(.tag)[@intFromEnum(index)] != .is_non_err) break :blk null;
|
|
|
|
const err_inst_data = sema.code.instructions.items(.data)[@intFromEnum(index)].un_node;
|
|
const err_operand = try sema.resolveInst(err_inst_data.operand);
|
|
const operand_ty = sema.typeOf(err_operand);
|
|
assert(operand_ty.zigTypeTag(mod) == .ErrorUnion);
|
|
const result_ty = operand_ty.errorUnionSet(mod);
|
|
break :blk try sub_block.addTyOp(.unwrap_errunion_err, result_ty, err_operand);
|
|
};
|
|
|
|
if (err_cond != null and try sema.maybeErrorUnwrap(&sub_block, else_body, err_cond.?, cond_src, false)) {
|
|
// nothing to do
|
|
} else {
|
|
try sema.analyzeBodyRuntimeBreak(&sub_block, else_body);
|
|
}
|
|
try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.CondBr).Struct.fields.len +
|
|
true_instructions.len + sub_block.instructions.items.len);
|
|
_ = try parent_block.addInst(.{
|
|
.tag = .cond_br,
|
|
.data = .{ .pl_op = .{
|
|
.operand = cond,
|
|
.payload = sema.addExtraAssumeCapacity(Air.CondBr{
|
|
.then_body_len = @intCast(true_instructions.len),
|
|
.else_body_len = @intCast(sub_block.instructions.items.len),
|
|
}),
|
|
} },
|
|
});
|
|
sema.air_extra.appendSliceAssumeCapacity(@ptrCast(true_instructions));
|
|
sema.air_extra.appendSliceAssumeCapacity(@ptrCast(sub_block.instructions.items));
|
|
return always_noreturn;
|
|
}
|
|
|
|
fn zirTry(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const src = inst_data.src();
|
|
const operand_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.Try, inst_data.payload_index);
|
|
const body = sema.code.bodySlice(extra.end, extra.data.body_len);
|
|
const err_union = try sema.resolveInst(extra.data.operand);
|
|
const err_union_ty = sema.typeOf(err_union);
|
|
const mod = sema.mod;
|
|
if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) {
|
|
return sema.fail(parent_block, operand_src, "expected error union type, found '{}'", .{
|
|
err_union_ty.fmt(mod),
|
|
});
|
|
}
|
|
const is_non_err = try sema.analyzeIsNonErrComptimeOnly(parent_block, operand_src, err_union);
|
|
if (is_non_err != .none) {
|
|
const is_non_err_val = (try sema.resolveDefinedValue(parent_block, operand_src, is_non_err)).?;
|
|
if (is_non_err_val.toBool()) {
|
|
return sema.analyzeErrUnionPayload(parent_block, src, err_union_ty, err_union, operand_src, false);
|
|
}
|
|
// We can analyze the body directly in the parent block because we know there are
|
|
// no breaks from the body possible, and that the body is noreturn.
|
|
return sema.resolveBody(parent_block, body, inst);
|
|
}
|
|
|
|
var sub_block = parent_block.makeSubBlock();
|
|
defer sub_block.instructions.deinit(sema.gpa);
|
|
|
|
// This body is guaranteed to end with noreturn and has no breaks.
|
|
_ = try sema.analyzeBodyInner(&sub_block, body);
|
|
|
|
try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.Try).Struct.fields.len +
|
|
sub_block.instructions.items.len);
|
|
const try_inst = try parent_block.addInst(.{
|
|
.tag = .@"try",
|
|
.data = .{ .pl_op = .{
|
|
.operand = err_union,
|
|
.payload = sema.addExtraAssumeCapacity(Air.Try{
|
|
.body_len = @intCast(sub_block.instructions.items.len),
|
|
}),
|
|
} },
|
|
});
|
|
sema.air_extra.appendSliceAssumeCapacity(@ptrCast(sub_block.instructions.items));
|
|
return try_inst;
|
|
}
|
|
|
|
fn zirTryPtr(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const src = inst_data.src();
|
|
const operand_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.Try, inst_data.payload_index);
|
|
const body = sema.code.bodySlice(extra.end, extra.data.body_len);
|
|
const operand = try sema.resolveInst(extra.data.operand);
|
|
const err_union = try sema.analyzeLoad(parent_block, src, operand, operand_src);
|
|
const err_union_ty = sema.typeOf(err_union);
|
|
const mod = sema.mod;
|
|
if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) {
|
|
return sema.fail(parent_block, operand_src, "expected error union type, found '{}'", .{
|
|
err_union_ty.fmt(mod),
|
|
});
|
|
}
|
|
const is_non_err = try sema.analyzeIsNonErrComptimeOnly(parent_block, operand_src, err_union);
|
|
if (is_non_err != .none) {
|
|
const is_non_err_val = (try sema.resolveDefinedValue(parent_block, operand_src, is_non_err)).?;
|
|
if (is_non_err_val.toBool()) {
|
|
return sema.analyzeErrUnionPayloadPtr(parent_block, src, operand, false, false);
|
|
}
|
|
// We can analyze the body directly in the parent block because we know there are
|
|
// no breaks from the body possible, and that the body is noreturn.
|
|
return sema.resolveBody(parent_block, body, inst);
|
|
}
|
|
|
|
var sub_block = parent_block.makeSubBlock();
|
|
defer sub_block.instructions.deinit(sema.gpa);
|
|
|
|
// This body is guaranteed to end with noreturn and has no breaks.
|
|
_ = try sema.analyzeBodyInner(&sub_block, body);
|
|
|
|
const operand_ty = sema.typeOf(operand);
|
|
const ptr_info = operand_ty.ptrInfo(mod);
|
|
const res_ty = try sema.ptrType(.{
|
|
.child = err_union_ty.errorUnionPayload(mod).toIntern(),
|
|
.flags = .{
|
|
.is_const = ptr_info.flags.is_const,
|
|
.is_volatile = ptr_info.flags.is_volatile,
|
|
.is_allowzero = ptr_info.flags.is_allowzero,
|
|
.address_space = ptr_info.flags.address_space,
|
|
},
|
|
});
|
|
const res_ty_ref = Air.internedToRef(res_ty.toIntern());
|
|
try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.TryPtr).Struct.fields.len +
|
|
sub_block.instructions.items.len);
|
|
const try_inst = try parent_block.addInst(.{
|
|
.tag = .try_ptr,
|
|
.data = .{ .ty_pl = .{
|
|
.ty = res_ty_ref,
|
|
.payload = sema.addExtraAssumeCapacity(Air.TryPtr{
|
|
.ptr = operand,
|
|
.body_len = @intCast(sub_block.instructions.items.len),
|
|
}),
|
|
} },
|
|
});
|
|
sema.air_extra.appendSliceAssumeCapacity(@ptrCast(sub_block.instructions.items));
|
|
return try_inst;
|
|
}
|
|
|
|
fn ensurePostHoc(sema: *Sema, block: *Block, dest_block: Zir.Inst.Index) !*LabeledBlock {
|
|
const gop = sema.inst_map.getOrPutAssumeCapacity(dest_block);
|
|
if (gop.found_existing) existing: {
|
|
// This may be a *result* from an earlier iteration of an inline loop.
|
|
// In this case, there will not be a post-hoc block entry, and we can
|
|
// continue with the logic below.
|
|
const new_block_inst = gop.value_ptr.*.toIndex() orelse break :existing;
|
|
return sema.post_hoc_blocks.get(new_block_inst) orelse break :existing;
|
|
}
|
|
|
|
try sema.post_hoc_blocks.ensureUnusedCapacity(sema.gpa, 1);
|
|
|
|
const new_block_inst: Air.Inst.Index = @enumFromInt(sema.air_instructions.len);
|
|
gop.value_ptr.* = new_block_inst.toRef();
|
|
try sema.air_instructions.append(sema.gpa, .{
|
|
.tag = .block,
|
|
.data = undefined,
|
|
});
|
|
const labeled_block = try sema.gpa.create(LabeledBlock);
|
|
labeled_block.* = .{
|
|
.label = .{
|
|
.zir_block = dest_block,
|
|
.merges = .{
|
|
.src_locs = .{},
|
|
.results = .{},
|
|
.br_list = .{},
|
|
.block_inst = new_block_inst,
|
|
},
|
|
},
|
|
.block = .{
|
|
.parent = block,
|
|
.sema = sema,
|
|
.src_decl = block.src_decl,
|
|
.namespace = block.namespace,
|
|
.wip_capture_scope = block.wip_capture_scope,
|
|
.instructions = .{},
|
|
.label = &labeled_block.label,
|
|
.inlining = block.inlining,
|
|
.is_comptime = block.is_comptime,
|
|
},
|
|
};
|
|
sema.post_hoc_blocks.putAssumeCapacityNoClobber(new_block_inst, labeled_block);
|
|
return labeled_block;
|
|
}
|
|
|
|
// A `break` statement is inside a runtime condition, but trying to
|
|
// break from an inline loop. In such case we must convert it to
|
|
// a runtime break.
|
|
fn addRuntimeBreak(sema: *Sema, child_block: *Block, break_data: BreakData) !void {
|
|
const labeled_block = try sema.ensurePostHoc(child_block, break_data.block_inst);
|
|
|
|
const operand = try sema.resolveInst(break_data.operand);
|
|
const br_ref = try child_block.addBr(labeled_block.label.merges.block_inst, operand);
|
|
|
|
try labeled_block.label.merges.results.append(sema.gpa, operand);
|
|
try labeled_block.label.merges.br_list.append(sema.gpa, br_ref.toIndex().?);
|
|
try labeled_block.label.merges.src_locs.append(sema.gpa, null);
|
|
|
|
labeled_block.block.runtime_index.increment();
|
|
if (labeled_block.block.runtime_cond == null and labeled_block.block.runtime_loop == null) {
|
|
labeled_block.block.runtime_cond = child_block.runtime_cond orelse child_block.runtime_loop;
|
|
labeled_block.block.runtime_loop = child_block.runtime_loop;
|
|
}
|
|
}
|
|
|
|
fn zirUnreachable(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index {
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].@"unreachable";
|
|
const src = inst_data.src();
|
|
|
|
if (block.is_comptime) {
|
|
return sema.fail(block, src, "reached unreachable code", .{});
|
|
}
|
|
// TODO Add compile error for @optimizeFor occurring too late in a scope.
|
|
block.addUnreachable(src, true) catch |err| switch (err) {
|
|
error.AnalysisFail => {
|
|
const msg = sema.err orelse return err;
|
|
if (!mem.eql(u8, msg.msg, "runtime safety check not allowed in naked function")) return err;
|
|
try sema.errNote(block, src, msg, "the end of a naked function is implicitly unreachable", .{});
|
|
return err;
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
return always_noreturn;
|
|
}
|
|
|
|
fn zirRetErrValue(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
) CompileError!Zir.Inst.Index {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].str_tok;
|
|
const err_name = try mod.intern_pool.getOrPutString(sema.gpa, inst_data.get(sema.code));
|
|
_ = try mod.getErrorValue(err_name);
|
|
const src = inst_data.src();
|
|
// Return the error code from the function.
|
|
const error_set_type = try mod.singleErrorSetType(err_name);
|
|
const result_inst = Air.internedToRef((try mod.intern(.{ .err = .{
|
|
.ty = error_set_type.toIntern(),
|
|
.name = err_name,
|
|
} })));
|
|
return sema.analyzeRet(block, result_inst, src, src);
|
|
}
|
|
|
|
fn zirRetImplicit(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
) CompileError!Zir.Inst.Index {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_tok;
|
|
const r_brace_src = inst_data.src();
|
|
if (block.inlining == null and sema.func_is_naked) {
|
|
assert(!block.is_comptime);
|
|
if (block.wantSafety()) {
|
|
// Calling a safety function from a naked function would not be legal.
|
|
_ = try block.addNoOp(.trap);
|
|
} else {
|
|
try block.addUnreachable(r_brace_src, false);
|
|
}
|
|
return always_noreturn;
|
|
}
|
|
|
|
const operand = try sema.resolveInst(inst_data.operand);
|
|
const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = 0 };
|
|
const base_tag = sema.fn_ret_ty.baseZigTypeTag(mod);
|
|
if (base_tag == .NoReturn) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, ret_ty_src, "function declared '{}' implicitly returns", .{
|
|
sema.fn_ret_ty.fmt(mod),
|
|
});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, r_brace_src, msg, "control flow reaches end of body here", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
} else if (base_tag != .Void) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, ret_ty_src, "function with non-void return type '{}' implicitly returns", .{
|
|
sema.fn_ret_ty.fmt(mod),
|
|
});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, r_brace_src, msg, "control flow reaches end of body here", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
|
|
return sema.analyzeRet(block, operand, r_brace_src, r_brace_src);
|
|
}
|
|
|
|
fn zirRetNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const operand = try sema.resolveInst(inst_data.operand);
|
|
const src = inst_data.src();
|
|
|
|
return sema.analyzeRet(block, operand, src, .{ .node_offset_return_operand = inst_data.src_node });
|
|
}
|
|
|
|
fn zirRetLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const src = inst_data.src();
|
|
const ret_ptr = try sema.resolveInst(inst_data.operand);
|
|
|
|
if (block.is_comptime or block.inlining != null or sema.func_is_naked) {
|
|
const operand = try sema.analyzeLoad(block, src, ret_ptr, src);
|
|
return sema.analyzeRet(block, operand, src, .{ .node_offset_return_operand = inst_data.src_node });
|
|
}
|
|
|
|
if (sema.wantErrorReturnTracing(sema.fn_ret_ty)) {
|
|
const is_non_err = try sema.analyzePtrIsNonErr(block, src, ret_ptr);
|
|
return sema.retWithErrTracing(block, src, is_non_err, .ret_load, ret_ptr);
|
|
}
|
|
|
|
_ = try block.addUnOp(.ret_load, ret_ptr);
|
|
return always_noreturn;
|
|
}
|
|
|
|
fn retWithErrTracing(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
is_non_err: Air.Inst.Ref,
|
|
ret_tag: Air.Inst.Tag,
|
|
operand: Air.Inst.Ref,
|
|
) CompileError!Zir.Inst.Index {
|
|
const mod = sema.mod;
|
|
const need_check = switch (is_non_err) {
|
|
.bool_true => {
|
|
_ = try block.addUnOp(ret_tag, operand);
|
|
return always_noreturn;
|
|
},
|
|
.bool_false => false,
|
|
else => true,
|
|
};
|
|
const gpa = sema.gpa;
|
|
const stack_trace_ty = try sema.getBuiltinType("StackTrace");
|
|
try sema.resolveTypeFields(stack_trace_ty);
|
|
const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty);
|
|
const err_return_trace = try block.addTy(.err_return_trace, ptr_stack_trace_ty);
|
|
const return_err_fn = try sema.getBuiltin("returnError");
|
|
const args: [1]Air.Inst.Ref = .{err_return_trace};
|
|
|
|
if (!need_check) {
|
|
try sema.callBuiltin(block, src, return_err_fn, .never_inline, &args, .@"error return");
|
|
_ = try block.addUnOp(ret_tag, operand);
|
|
return always_noreturn;
|
|
}
|
|
|
|
var then_block = block.makeSubBlock();
|
|
defer then_block.instructions.deinit(gpa);
|
|
_ = try then_block.addUnOp(ret_tag, operand);
|
|
|
|
var else_block = block.makeSubBlock();
|
|
defer else_block.instructions.deinit(gpa);
|
|
try sema.callBuiltin(&else_block, src, return_err_fn, .never_inline, &args, .@"error return");
|
|
_ = try else_block.addUnOp(ret_tag, operand);
|
|
|
|
try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.CondBr).Struct.fields.len +
|
|
then_block.instructions.items.len + else_block.instructions.items.len +
|
|
@typeInfo(Air.Block).Struct.fields.len + 1);
|
|
|
|
const cond_br_payload = sema.addExtraAssumeCapacity(Air.CondBr{
|
|
.then_body_len = @intCast(then_block.instructions.items.len),
|
|
.else_body_len = @intCast(else_block.instructions.items.len),
|
|
});
|
|
sema.air_extra.appendSliceAssumeCapacity(@ptrCast(then_block.instructions.items));
|
|
sema.air_extra.appendSliceAssumeCapacity(@ptrCast(else_block.instructions.items));
|
|
|
|
_ = try block.addInst(.{ .tag = .cond_br, .data = .{ .pl_op = .{
|
|
.operand = is_non_err,
|
|
.payload = cond_br_payload,
|
|
} } });
|
|
|
|
return always_noreturn;
|
|
}
|
|
|
|
fn wantErrorReturnTracing(sema: *Sema, fn_ret_ty: Type) bool {
|
|
const mod = sema.mod;
|
|
return fn_ret_ty.isError(mod) and mod.comp.config.any_error_tracing;
|
|
}
|
|
|
|
fn zirSaveErrRetIndex(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].save_err_ret_index;
|
|
|
|
if (!block.ownerModule().error_tracing) return;
|
|
|
|
// This is only relevant at runtime.
|
|
if (block.is_comptime or block.is_typeof) return;
|
|
|
|
const save_index = inst_data.operand == .none or b: {
|
|
const operand = try sema.resolveInst(inst_data.operand);
|
|
const operand_ty = sema.typeOf(operand);
|
|
break :b operand_ty.isError(mod);
|
|
};
|
|
|
|
if (save_index)
|
|
block.error_return_trace_index = try sema.analyzeSaveErrRetIndex(block);
|
|
}
|
|
|
|
fn zirRestoreErrRetIndex(sema: *Sema, start_block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!void {
|
|
const extra = sema.code.extraData(Zir.Inst.RestoreErrRetIndex, extended.operand).data;
|
|
return sema.restoreErrRetIndex(start_block, extra.src(), extra.block, extra.operand);
|
|
}
|
|
|
|
/// If `operand` is non-error (or is `none`), restores the error return trace to
|
|
/// its state at the point `block` was reached (or, if `block` is `none`, the
|
|
/// point this function began execution).
|
|
fn restoreErrRetIndex(sema: *Sema, start_block: *Block, src: LazySrcLoc, target_block: Zir.Inst.Ref, operand_zir: Zir.Inst.Ref) CompileError!void {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
|
|
const saved_index = if (target_block.toIndexAllowNone()) |zir_block| b: {
|
|
var block = start_block;
|
|
while (true) {
|
|
if (block.label) |label| {
|
|
if (label.zir_block == zir_block) {
|
|
const target_trace_index = if (block.parent) |parent_block| tgt: {
|
|
break :tgt parent_block.error_return_trace_index;
|
|
} else sema.error_return_trace_index_on_fn_entry;
|
|
|
|
if (start_block.error_return_trace_index != target_trace_index)
|
|
break :b target_trace_index;
|
|
|
|
return; // No need to restore
|
|
}
|
|
}
|
|
block = block.parent.?;
|
|
}
|
|
} else b: {
|
|
if (start_block.error_return_trace_index != sema.error_return_trace_index_on_fn_entry)
|
|
break :b sema.error_return_trace_index_on_fn_entry;
|
|
|
|
return; // No need to restore
|
|
};
|
|
|
|
const operand = try sema.resolveInstAllowNone(operand_zir);
|
|
|
|
if (start_block.is_comptime or start_block.is_typeof) {
|
|
const is_non_error = if (operand != .none) blk: {
|
|
const is_non_error_inst = try sema.analyzeIsNonErr(start_block, src, operand);
|
|
const cond_val = try sema.resolveDefinedValue(start_block, src, is_non_error_inst);
|
|
break :blk cond_val.?.toBool();
|
|
} else true; // no operand means pop unconditionally
|
|
|
|
if (is_non_error) return;
|
|
|
|
const saved_index_val = try sema.resolveDefinedValue(start_block, src, saved_index);
|
|
const saved_index_int = saved_index_val.?.toUnsignedInt(mod);
|
|
assert(saved_index_int <= sema.comptime_err_ret_trace.items.len);
|
|
sema.comptime_err_ret_trace.items.len = @intCast(saved_index_int);
|
|
return;
|
|
}
|
|
|
|
if (!mod.intern_pool.funcAnalysis(sema.owner_func_index).calls_or_awaits_errorable_fn) return;
|
|
if (!start_block.ownerModule().error_tracing) return;
|
|
|
|
assert(saved_index != .none); // The .error_return_trace_index field was dropped somewhere
|
|
|
|
return sema.popErrorReturnTrace(start_block, src, operand, saved_index);
|
|
}
|
|
|
|
fn addToInferredErrorSet(sema: *Sema, uncasted_operand: Air.Inst.Ref) !void {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
assert(sema.fn_ret_ty.zigTypeTag(mod) == .ErrorUnion);
|
|
const err_set_ty = sema.fn_ret_ty.errorUnionSet(mod).toIntern();
|
|
switch (err_set_ty) {
|
|
.adhoc_inferred_error_set_type => {
|
|
const ies = sema.fn_ret_ty_ies.?;
|
|
assert(ies.func == .none);
|
|
try sema.addToInferredErrorSetPtr(ies, sema.typeOf(uncasted_operand));
|
|
},
|
|
else => if (ip.isInferredErrorSetType(err_set_ty)) {
|
|
const ies = sema.fn_ret_ty_ies.?;
|
|
assert(ies.func == sema.func_index);
|
|
try sema.addToInferredErrorSetPtr(ies, sema.typeOf(uncasted_operand));
|
|
},
|
|
}
|
|
}
|
|
|
|
fn addToInferredErrorSetPtr(sema: *Sema, ies: *InferredErrorSet, op_ty: Type) !void {
|
|
const arena = sema.arena;
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
switch (op_ty.zigTypeTag(mod)) {
|
|
.ErrorSet => try ies.addErrorSet(op_ty, ip, arena),
|
|
.ErrorUnion => try ies.addErrorSet(op_ty.errorUnionSet(mod), ip, arena),
|
|
else => {},
|
|
}
|
|
}
|
|
|
|
fn analyzeRet(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
uncasted_operand: Air.Inst.Ref,
|
|
src: LazySrcLoc,
|
|
operand_src: LazySrcLoc,
|
|
) CompileError!Zir.Inst.Index {
|
|
// Special case for returning an error to an inferred error set; we need to
|
|
// add the error tag to the inferred error set of the in-scope function, so
|
|
// that the coercion below works correctly.
|
|
const mod = sema.mod;
|
|
if (sema.fn_ret_ty_ies != null and sema.fn_ret_ty.zigTypeTag(mod) == .ErrorUnion) {
|
|
try sema.addToInferredErrorSet(uncasted_operand);
|
|
}
|
|
const operand = sema.coerceExtra(block, sema.fn_ret_ty, uncasted_operand, operand_src, .{ .is_ret = true }) catch |err| switch (err) {
|
|
error.NotCoercible => unreachable,
|
|
else => |e| return e,
|
|
};
|
|
|
|
if (block.inlining) |inlining| {
|
|
if (block.is_comptime) {
|
|
const ret_val = try sema.resolveConstValue(block, operand_src, operand, .{
|
|
.needed_comptime_reason = "value being returned at comptime must be comptime-known",
|
|
});
|
|
inlining.comptime_result = operand;
|
|
|
|
if (sema.fn_ret_ty.isError(mod) and ret_val.getErrorName(mod) != .none) {
|
|
const src_decl = mod.declPtr(block.src_decl);
|
|
const src_loc = src_decl.toSrcLoc(src, mod);
|
|
try sema.comptime_err_ret_trace.append(src_loc);
|
|
}
|
|
return error.ComptimeReturn;
|
|
}
|
|
// We are inlining a function call; rewrite the `ret` as a `break`.
|
|
const br_inst = try block.addBr(inlining.merges.block_inst, operand);
|
|
try inlining.merges.results.append(sema.gpa, operand);
|
|
try inlining.merges.br_list.append(sema.gpa, br_inst.toIndex().?);
|
|
try inlining.merges.src_locs.append(sema.gpa, operand_src);
|
|
return always_noreturn;
|
|
} else if (block.is_comptime) {
|
|
return sema.fail(block, src, "function called at runtime cannot return value at comptime", .{});
|
|
} else if (sema.func_is_naked) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "cannot return from naked function", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
try sema.errNote(block, src, msg, "can only return using assembly", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
|
|
try sema.resolveTypeLayout(sema.fn_ret_ty);
|
|
|
|
const air_tag: Air.Inst.Tag = if (block.wantSafety()) .ret_safe else .ret;
|
|
if (sema.wantErrorReturnTracing(sema.fn_ret_ty)) {
|
|
// Avoid adding a frame to the error return trace in case the value is comptime-known
|
|
// to be not an error.
|
|
const is_non_err = try sema.analyzeIsNonErr(block, operand_src, operand);
|
|
return sema.retWithErrTracing(block, src, is_non_err, air_tag, operand);
|
|
}
|
|
|
|
_ = try block.addUnOp(air_tag, operand);
|
|
|
|
return always_noreturn;
|
|
}
|
|
|
|
fn floatOpAllowed(tag: Zir.Inst.Tag) bool {
|
|
// extend this swich as additional operators are implemented
|
|
return switch (tag) {
|
|
.add, .sub, .mul, .div, .div_exact, .div_trunc, .div_floor, .mod, .rem, .mod_rem => true,
|
|
else => false,
|
|
};
|
|
}
|
|
|
|
fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].ptr_type;
|
|
const extra = sema.code.extraData(Zir.Inst.PtrType, inst_data.payload_index);
|
|
const elem_ty_src: LazySrcLoc = .{ .node_offset_ptr_elem = extra.data.src_node };
|
|
const sentinel_src: LazySrcLoc = .{ .node_offset_ptr_sentinel = extra.data.src_node };
|
|
const align_src: LazySrcLoc = .{ .node_offset_ptr_align = extra.data.src_node };
|
|
const addrspace_src: LazySrcLoc = .{ .node_offset_ptr_addrspace = extra.data.src_node };
|
|
const bitoffset_src: LazySrcLoc = .{ .node_offset_ptr_bitoffset = extra.data.src_node };
|
|
const hostsize_src: LazySrcLoc = .{ .node_offset_ptr_hostsize = extra.data.src_node };
|
|
|
|
const elem_ty = blk: {
|
|
const air_inst = try sema.resolveInst(extra.data.elem_type);
|
|
const ty = sema.analyzeAsType(block, elem_ty_src, air_inst) catch |err| {
|
|
if (err == error.AnalysisFail and sema.err != null and sema.typeOf(air_inst).isSinglePointer(mod)) {
|
|
try sema.errNote(block, elem_ty_src, sema.err.?, "use '.*' to dereference pointer", .{});
|
|
}
|
|
return err;
|
|
};
|
|
if (ty.isGenericPoison()) return error.GenericPoison;
|
|
break :blk ty;
|
|
};
|
|
|
|
if (elem_ty.zigTypeTag(mod) == .NoReturn)
|
|
return sema.fail(block, elem_ty_src, "pointer to noreturn not allowed", .{});
|
|
|
|
const target = mod.getTarget();
|
|
|
|
var extra_i = extra.end;
|
|
|
|
const sentinel = if (inst_data.flags.has_sentinel) blk: {
|
|
const ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_i]);
|
|
extra_i += 1;
|
|
const coerced = try sema.coerce(block, elem_ty, try sema.resolveInst(ref), sentinel_src);
|
|
const val = try sema.resolveConstDefinedValue(block, sentinel_src, coerced, .{
|
|
.needed_comptime_reason = "pointer sentinel value must be comptime-known",
|
|
});
|
|
break :blk val.toIntern();
|
|
} else .none;
|
|
|
|
const abi_align: Alignment = if (inst_data.flags.has_align) blk: {
|
|
const ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_i]);
|
|
extra_i += 1;
|
|
const coerced = try sema.coerce(block, Type.u32, try sema.resolveInst(ref), align_src);
|
|
const val = try sema.resolveConstDefinedValue(block, align_src, coerced, .{
|
|
.needed_comptime_reason = "pointer alignment must be comptime-known",
|
|
});
|
|
// Check if this happens to be the lazy alignment of our element type, in
|
|
// which case we can make this 0 without resolving it.
|
|
switch (mod.intern_pool.indexToKey(val.toIntern())) {
|
|
.int => |int| switch (int.storage) {
|
|
.lazy_align => |lazy_ty| if (lazy_ty == elem_ty.toIntern()) break :blk .none,
|
|
else => {},
|
|
},
|
|
else => {},
|
|
}
|
|
const align_bytes = (try val.getUnsignedIntAdvanced(mod, sema)).?;
|
|
break :blk try sema.validateAlignAllowZero(block, align_src, align_bytes);
|
|
} else .none;
|
|
|
|
const address_space: std.builtin.AddressSpace = if (inst_data.flags.has_addrspace) blk: {
|
|
const ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_i]);
|
|
extra_i += 1;
|
|
break :blk try sema.analyzeAddressSpace(block, addrspace_src, ref, .pointer);
|
|
} else if (elem_ty.zigTypeTag(mod) == .Fn and target.cpu.arch == .avr) .flash else .generic;
|
|
|
|
const bit_offset: u16 = if (inst_data.flags.has_bit_range) blk: {
|
|
const ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_i]);
|
|
extra_i += 1;
|
|
const bit_offset = try sema.resolveInt(block, bitoffset_src, ref, Type.u16, .{
|
|
.needed_comptime_reason = "pointer bit-offset must be comptime-known",
|
|
});
|
|
break :blk @intCast(bit_offset);
|
|
} else 0;
|
|
|
|
const host_size: u16 = if (inst_data.flags.has_bit_range) blk: {
|
|
const ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_i]);
|
|
extra_i += 1;
|
|
const host_size = try sema.resolveInt(block, hostsize_src, ref, Type.u16, .{
|
|
.needed_comptime_reason = "pointer host size must be comptime-known",
|
|
});
|
|
break :blk @intCast(host_size);
|
|
} else 0;
|
|
|
|
if (host_size != 0 and bit_offset >= host_size * 8) {
|
|
return sema.fail(block, bitoffset_src, "bit offset starts after end of host integer", .{});
|
|
}
|
|
|
|
if (elem_ty.zigTypeTag(mod) == .Fn) {
|
|
if (inst_data.size != .One) {
|
|
return sema.fail(block, elem_ty_src, "function pointers must be single pointers", .{});
|
|
}
|
|
const fn_align = mod.typeToFunc(elem_ty).?.alignment;
|
|
if (inst_data.flags.has_align and abi_align != .none and fn_align != .none and
|
|
abi_align != fn_align)
|
|
{
|
|
return sema.fail(block, align_src, "function pointer alignment disagrees with function alignment", .{});
|
|
}
|
|
} else if (inst_data.size == .Many and elem_ty.zigTypeTag(mod) == .Opaque) {
|
|
return sema.fail(block, elem_ty_src, "unknown-length pointer to opaque not allowed", .{});
|
|
} else if (inst_data.size == .C) {
|
|
if (!try sema.validateExternType(elem_ty, .other)) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, elem_ty_src, "C pointers cannot point to non-C-ABI-compatible type '{}'", .{elem_ty.fmt(mod)});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
const src_decl = mod.declPtr(block.src_decl);
|
|
try sema.explainWhyTypeIsNotExtern(msg, src_decl.toSrcLoc(elem_ty_src, mod), elem_ty, .other);
|
|
|
|
try sema.addDeclaredHereNote(msg, elem_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
if (elem_ty.zigTypeTag(mod) == .Opaque) {
|
|
return sema.fail(block, elem_ty_src, "C pointers cannot point to opaque types", .{});
|
|
}
|
|
}
|
|
|
|
const ty = try sema.ptrType(.{
|
|
.child = elem_ty.toIntern(),
|
|
.sentinel = sentinel,
|
|
.flags = .{
|
|
.alignment = abi_align,
|
|
.address_space = address_space,
|
|
.is_const = !inst_data.flags.is_mutable,
|
|
.is_allowzero = inst_data.flags.is_allowzero,
|
|
.is_volatile = inst_data.flags.is_volatile,
|
|
.size = inst_data.size,
|
|
},
|
|
.packed_offset = .{
|
|
.bit_offset = bit_offset,
|
|
.host_size = host_size,
|
|
},
|
|
});
|
|
return Air.internedToRef(ty.toIntern());
|
|
}
|
|
|
|
fn zirStructInitEmpty(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const src = inst_data.src();
|
|
const obj_ty = try sema.resolveType(block, src, inst_data.operand);
|
|
const mod = sema.mod;
|
|
|
|
switch (obj_ty.zigTypeTag(mod)) {
|
|
.Struct => return sema.structInitEmpty(block, obj_ty, src, src),
|
|
.Array, .Vector => return sema.arrayInitEmpty(block, src, obj_ty),
|
|
.Void => return Air.internedToRef(Value.void.toIntern()),
|
|
.Union => return sema.fail(block, src, "union initializer must initialize one field", .{}),
|
|
else => return sema.failWithArrayInitNotSupported(block, src, obj_ty),
|
|
}
|
|
}
|
|
|
|
fn zirStructInitEmptyResult(sema: *Sema, block: *Block, inst: Zir.Inst.Index, is_byref: bool) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const src = inst_data.src();
|
|
const ty_operand = sema.resolveType(block, src, inst_data.operand) catch |err| switch (err) {
|
|
// Generic poison means this is an untyped anonymous empty struct init
|
|
error.GenericPoison => return .empty_struct,
|
|
else => |e| return e,
|
|
};
|
|
const init_ty = if (is_byref) ty: {
|
|
const ptr_ty = ty_operand.optEuBaseType(mod);
|
|
assert(ptr_ty.zigTypeTag(mod) == .Pointer); // validated by a previous instruction
|
|
if (!ptr_ty.isSlice(mod)) {
|
|
break :ty ptr_ty.childType(mod);
|
|
}
|
|
// To make `&.{}` a `[:s]T`, the init should be a `[0:s]T`.
|
|
break :ty try mod.arrayType(.{
|
|
.len = 0,
|
|
.sentinel = if (ptr_ty.sentinel(mod)) |s| s.toIntern() else .none,
|
|
.child = ptr_ty.childType(mod).toIntern(),
|
|
});
|
|
} else ty_operand;
|
|
const obj_ty = init_ty.optEuBaseType(mod);
|
|
|
|
const empty_ref = switch (obj_ty.zigTypeTag(mod)) {
|
|
.Struct => try sema.structInitEmpty(block, obj_ty, src, src),
|
|
.Array, .Vector => try sema.arrayInitEmpty(block, src, obj_ty),
|
|
.Union => return sema.fail(block, src, "union initializer must initialize one field", .{}),
|
|
else => return sema.failWithArrayInitNotSupported(block, src, obj_ty),
|
|
};
|
|
const init_ref = try sema.coerce(block, init_ty, empty_ref, src);
|
|
|
|
if (is_byref) {
|
|
const init_val = (try sema.resolveValue(init_ref)).?;
|
|
return anonDeclRef(sema, init_val.toIntern());
|
|
} else {
|
|
return init_ref;
|
|
}
|
|
}
|
|
|
|
fn structInitEmpty(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
struct_ty: Type,
|
|
dest_src: LazySrcLoc,
|
|
init_src: LazySrcLoc,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
// This logic must be synchronized with that in `zirStructInit`.
|
|
try sema.resolveTypeFields(struct_ty);
|
|
|
|
// The init values to use for the struct instance.
|
|
const field_inits = try gpa.alloc(Air.Inst.Ref, struct_ty.structFieldCount(mod));
|
|
defer gpa.free(field_inits);
|
|
@memset(field_inits, .none);
|
|
|
|
return sema.finishStructInit(block, init_src, dest_src, field_inits, struct_ty, struct_ty, false);
|
|
}
|
|
|
|
fn arrayInitEmpty(sema: *Sema, block: *Block, src: LazySrcLoc, obj_ty: Type) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const arr_len = obj_ty.arrayLen(mod);
|
|
if (arr_len != 0) {
|
|
if (obj_ty.zigTypeTag(mod) == .Array) {
|
|
return sema.fail(block, src, "expected {d} array elements; found 0", .{arr_len});
|
|
} else {
|
|
return sema.fail(block, src, "expected {d} vector elements; found 0", .{arr_len});
|
|
}
|
|
}
|
|
return Air.internedToRef((try mod.intern(.{ .aggregate = .{
|
|
.ty = obj_ty.toIntern(),
|
|
.storage = .{ .elems = &.{} },
|
|
} })));
|
|
}
|
|
|
|
fn zirUnionInit(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const field_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
|
|
const init_src: LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.UnionInit, inst_data.payload_index).data;
|
|
const union_ty = try sema.resolveType(block, ty_src, extra.union_type);
|
|
if (union_ty.zigTypeTag(sema.mod) != .Union) {
|
|
return sema.fail(block, ty_src, "expected union type, found '{}'", .{union_ty.fmt(sema.mod)});
|
|
}
|
|
const field_name = try sema.resolveConstStringIntern(block, field_src, extra.field_name, .{
|
|
.needed_comptime_reason = "name of field being initialized must be comptime-known",
|
|
});
|
|
const init = try sema.resolveInst(extra.init);
|
|
return sema.unionInit(block, init, init_src, union_ty, ty_src, field_name, field_src);
|
|
}
|
|
|
|
fn unionInit(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
uncasted_init: Air.Inst.Ref,
|
|
init_src: LazySrcLoc,
|
|
union_ty: Type,
|
|
union_ty_src: LazySrcLoc,
|
|
field_name: InternPool.NullTerminatedString,
|
|
field_src: LazySrcLoc,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_src);
|
|
const field_ty = Type.fromInterned(mod.typeToUnion(union_ty).?.field_types.get(ip)[field_index]);
|
|
const init = try sema.coerce(block, field_ty, uncasted_init, init_src);
|
|
|
|
if (try sema.resolveValue(init)) |init_val| {
|
|
const tag_ty = union_ty.unionTagTypeHypothetical(mod);
|
|
const tag_val = try mod.enumValueFieldIndex(tag_ty, field_index);
|
|
return Air.internedToRef((try mod.intern(.{ .un = .{
|
|
.ty = union_ty.toIntern(),
|
|
.tag = try tag_val.intern(tag_ty, mod),
|
|
.val = try init_val.intern(field_ty, mod),
|
|
} })));
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, init_src, null);
|
|
_ = union_ty_src;
|
|
try sema.queueFullTypeResolution(union_ty);
|
|
return block.addUnionInit(union_ty, field_index, init);
|
|
}
|
|
|
|
fn zirStructInit(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
is_ref: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const gpa = sema.gpa;
|
|
const zir_datas = sema.code.instructions.items(.data);
|
|
const inst_data = zir_datas[@intFromEnum(inst)].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.StructInit, inst_data.payload_index);
|
|
const src = inst_data.src();
|
|
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
const first_item = sema.code.extraData(Zir.Inst.StructInit.Item, extra.end).data;
|
|
const first_field_type_data = zir_datas[@intFromEnum(first_item.field_type)].pl_node;
|
|
const first_field_type_extra = sema.code.extraData(Zir.Inst.FieldType, first_field_type_data.payload_index).data;
|
|
const result_ty = sema.resolveType(block, src, first_field_type_extra.container_type) catch |err| switch (err) {
|
|
error.GenericPoison => {
|
|
// The type wasn't actually known, so treat this as an anon struct init.
|
|
return sema.structInitAnon(block, src, .typed_init, extra.data, extra.end, is_ref);
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
const resolved_ty = result_ty.optEuBaseType(mod);
|
|
try sema.resolveTypeLayout(resolved_ty);
|
|
|
|
if (resolved_ty.zigTypeTag(mod) == .Struct) {
|
|
// This logic must be synchronized with that in `zirStructInitEmpty`.
|
|
|
|
// Maps field index to field_type index of where it was already initialized.
|
|
// For making sure all fields are accounted for and no fields are duplicated.
|
|
const found_fields = try gpa.alloc(Zir.Inst.Index, resolved_ty.structFieldCount(mod));
|
|
defer gpa.free(found_fields);
|
|
|
|
// The init values to use for the struct instance.
|
|
const field_inits = try gpa.alloc(Air.Inst.Ref, resolved_ty.structFieldCount(mod));
|
|
defer gpa.free(field_inits);
|
|
@memset(field_inits, .none);
|
|
|
|
var field_i: u32 = 0;
|
|
var extra_index = extra.end;
|
|
|
|
const is_packed = resolved_ty.containerLayout(mod) == .Packed;
|
|
while (field_i < extra.data.fields_len) : (field_i += 1) {
|
|
const item = sema.code.extraData(Zir.Inst.StructInit.Item, extra_index);
|
|
extra_index = item.end;
|
|
|
|
const field_type_data = zir_datas[@intFromEnum(item.data.field_type)].pl_node;
|
|
const field_src: LazySrcLoc = .{ .node_offset_initializer = field_type_data.src_node };
|
|
const field_type_extra = sema.code.extraData(Zir.Inst.FieldType, field_type_data.payload_index).data;
|
|
const field_name = try ip.getOrPutString(gpa, sema.code.nullTerminatedString(field_type_extra.name_start));
|
|
const field_index = if (resolved_ty.isTuple(mod))
|
|
try sema.tupleFieldIndex(block, resolved_ty, field_name, field_src)
|
|
else
|
|
try sema.structFieldIndex(block, resolved_ty, field_name, field_src);
|
|
assert(field_inits[field_index] == .none);
|
|
found_fields[field_index] = item.data.field_type;
|
|
const uncoerced_init = try sema.resolveInst(item.data.init);
|
|
const field_ty = resolved_ty.structFieldType(field_index, mod);
|
|
field_inits[field_index] = try sema.coerce(block, field_ty, uncoerced_init, field_src);
|
|
if (!is_packed) {
|
|
try sema.resolveStructFieldInits(resolved_ty);
|
|
if (try resolved_ty.structFieldValueComptime(mod, field_index)) |default_value| {
|
|
const init_val = (try sema.resolveValue(field_inits[field_index])) orelse {
|
|
return sema.failWithNeededComptime(block, field_src, .{
|
|
.needed_comptime_reason = "value stored in comptime field must be comptime-known",
|
|
});
|
|
};
|
|
|
|
if (!init_val.eql(default_value, resolved_ty.structFieldType(field_index, mod), mod)) {
|
|
return sema.failWithInvalidComptimeFieldStore(block, field_src, resolved_ty, field_index);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
return sema.finishStructInit(block, src, src, field_inits, resolved_ty, result_ty, is_ref);
|
|
} else if (resolved_ty.zigTypeTag(mod) == .Union) {
|
|
if (extra.data.fields_len != 1) {
|
|
return sema.fail(block, src, "union initialization expects exactly one field", .{});
|
|
}
|
|
|
|
const item = sema.code.extraData(Zir.Inst.StructInit.Item, extra.end);
|
|
|
|
const field_type_data = zir_datas[@intFromEnum(item.data.field_type)].pl_node;
|
|
const field_src: LazySrcLoc = .{ .node_offset_initializer = field_type_data.src_node };
|
|
const field_type_extra = sema.code.extraData(Zir.Inst.FieldType, field_type_data.payload_index).data;
|
|
const field_name = try ip.getOrPutString(gpa, sema.code.nullTerminatedString(field_type_extra.name_start));
|
|
const field_index = try sema.unionFieldIndex(block, resolved_ty, field_name, field_src);
|
|
const tag_ty = resolved_ty.unionTagTypeHypothetical(mod);
|
|
const tag_val = try mod.enumValueFieldIndex(tag_ty, field_index);
|
|
const field_ty = Type.fromInterned(mod.typeToUnion(resolved_ty).?.field_types.get(ip)[field_index]);
|
|
|
|
if (field_ty.zigTypeTag(mod) == .NoReturn) {
|
|
return sema.failWithOwnedErrorMsg(block, msg: {
|
|
const msg = try sema.errMsg(block, src, "cannot initialize 'noreturn' field of union", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
try sema.addFieldErrNote(resolved_ty, field_index, msg, "field '{}' declared here", .{
|
|
field_name.fmt(ip),
|
|
});
|
|
try sema.addDeclaredHereNote(msg, resolved_ty);
|
|
break :msg msg;
|
|
});
|
|
}
|
|
|
|
const uncoerced_init_inst = try sema.resolveInst(item.data.init);
|
|
const init_inst = try sema.coerce(block, field_ty, uncoerced_init_inst, field_src);
|
|
|
|
if (try sema.resolveValue(init_inst)) |val| {
|
|
const struct_val = Value.fromInterned((try mod.intern(.{ .un = .{
|
|
.ty = resolved_ty.toIntern(),
|
|
.tag = try tag_val.intern(tag_ty, mod),
|
|
.val = try val.intern(field_ty, mod),
|
|
} })));
|
|
const final_val_inst = try sema.coerce(block, result_ty, Air.internedToRef(struct_val.toIntern()), src);
|
|
const final_val = (try sema.resolveValue(final_val_inst)).?;
|
|
return sema.addConstantMaybeRef(final_val.toIntern(), is_ref);
|
|
}
|
|
|
|
if (try sema.typeRequiresComptime(resolved_ty)) {
|
|
return sema.failWithNeededComptime(block, field_src, .{
|
|
.needed_comptime_reason = "initializer of comptime only union must be comptime-known",
|
|
});
|
|
}
|
|
|
|
if (is_ref) {
|
|
const target = mod.getTarget();
|
|
const alloc_ty = try sema.ptrType(.{
|
|
.child = result_ty.toIntern(),
|
|
.flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
|
|
});
|
|
const alloc = try block.addTy(.alloc, alloc_ty);
|
|
const base_ptr = try sema.optEuBasePtrInit(block, alloc, src);
|
|
const field_ptr = try sema.unionFieldPtr(block, field_src, base_ptr, field_name, field_src, resolved_ty, true);
|
|
try sema.storePtr(block, src, field_ptr, init_inst);
|
|
const new_tag = Air.internedToRef(tag_val.toIntern());
|
|
_ = try block.addBinOp(.set_union_tag, base_ptr, new_tag);
|
|
return sema.makePtrConst(block, alloc);
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
try sema.queueFullTypeResolution(resolved_ty);
|
|
const union_val = try block.addUnionInit(resolved_ty, field_index, init_inst);
|
|
return sema.coerce(block, result_ty, union_val, src);
|
|
}
|
|
unreachable;
|
|
}
|
|
|
|
fn finishStructInit(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
init_src: LazySrcLoc,
|
|
dest_src: LazySrcLoc,
|
|
field_inits: []Air.Inst.Ref,
|
|
struct_ty: Type,
|
|
result_ty: Type,
|
|
is_ref: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
|
|
var root_msg: ?*Module.ErrorMsg = null;
|
|
errdefer if (root_msg) |msg| msg.destroy(sema.gpa);
|
|
|
|
switch (ip.indexToKey(struct_ty.toIntern())) {
|
|
.anon_struct_type => |anon_struct| {
|
|
// We can't get the slices, as the coercion may invalidate them.
|
|
for (0..anon_struct.types.len) |i| {
|
|
if (field_inits[i] != .none) {
|
|
// Coerce the init value to the field type.
|
|
const field_ty = Type.fromInterned(anon_struct.types.get(ip)[i]);
|
|
field_inits[i] = sema.coerce(block, field_ty, field_inits[i], .unneeded) catch |err| switch (err) {
|
|
error.NeededSourceLocation => {
|
|
const decl = mod.declPtr(block.src_decl);
|
|
const field_src = mod.initSrc(init_src.node_offset.x, decl, i);
|
|
_ = try sema.coerce(block, field_ty, field_inits[i], field_src);
|
|
unreachable;
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
continue;
|
|
}
|
|
|
|
const default_val = anon_struct.values.get(ip)[i];
|
|
|
|
if (default_val == .none) {
|
|
if (anon_struct.names.len == 0) {
|
|
const template = "missing tuple field with index {d}";
|
|
if (root_msg) |msg| {
|
|
try sema.errNote(block, init_src, msg, template, .{i});
|
|
} else {
|
|
root_msg = try sema.errMsg(block, init_src, template, .{i});
|
|
}
|
|
} else {
|
|
const field_name = anon_struct.names.get(ip)[i];
|
|
const template = "missing struct field: {}";
|
|
const args = .{field_name.fmt(ip)};
|
|
if (root_msg) |msg| {
|
|
try sema.errNote(block, init_src, msg, template, args);
|
|
} else {
|
|
root_msg = try sema.errMsg(block, init_src, template, args);
|
|
}
|
|
}
|
|
} else {
|
|
field_inits[i] = Air.internedToRef(default_val);
|
|
}
|
|
}
|
|
},
|
|
.struct_type => |struct_type| {
|
|
for (0..struct_type.field_types.len) |i| {
|
|
if (field_inits[i] != .none) {
|
|
// Coerce the init value to the field type.
|
|
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
|
|
field_inits[i] = sema.coerce(block, field_ty, field_inits[i], init_src) catch |err| switch (err) {
|
|
error.NeededSourceLocation => {
|
|
const decl = mod.declPtr(block.src_decl);
|
|
const field_src = mod.initSrc(init_src.node_offset.x, decl, i);
|
|
_ = try sema.coerce(block, field_ty, field_inits[i], field_src);
|
|
unreachable;
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
continue;
|
|
}
|
|
|
|
try sema.resolveStructFieldInits(struct_ty);
|
|
|
|
const field_init = struct_type.fieldInit(ip, i);
|
|
if (field_init == .none) {
|
|
if (!struct_type.isTuple(ip)) {
|
|
const field_name = struct_type.field_names.get(ip)[i];
|
|
const template = "missing struct field: {}";
|
|
const args = .{field_name.fmt(ip)};
|
|
if (root_msg) |msg| {
|
|
try sema.errNote(block, init_src, msg, template, args);
|
|
} else {
|
|
root_msg = try sema.errMsg(block, init_src, template, args);
|
|
}
|
|
} else {
|
|
const template = "missing tuple field with index {d}";
|
|
if (root_msg) |msg| {
|
|
try sema.errNote(block, init_src, msg, template, .{i});
|
|
} else {
|
|
root_msg = try sema.errMsg(block, init_src, template, .{i});
|
|
}
|
|
}
|
|
} else {
|
|
field_inits[i] = Air.internedToRef(field_init);
|
|
}
|
|
}
|
|
},
|
|
else => unreachable,
|
|
}
|
|
|
|
if (root_msg) |msg| {
|
|
if (mod.typeToStruct(struct_ty)) |struct_type| {
|
|
const decl = mod.declPtr(struct_type.decl.unwrap().?);
|
|
const fqn = try decl.fullyQualifiedName(mod);
|
|
try mod.errNoteNonLazy(
|
|
decl.srcLoc(mod),
|
|
msg,
|
|
"struct '{}' declared here",
|
|
.{fqn.fmt(ip)},
|
|
);
|
|
}
|
|
root_msg = null;
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
|
|
// Find which field forces the expression to be runtime, if any.
|
|
const opt_runtime_index = for (field_inits, 0..) |field_init, i| {
|
|
if (!(try sema.isComptimeKnown(field_init))) {
|
|
break i;
|
|
}
|
|
} else null;
|
|
|
|
const runtime_index = opt_runtime_index orelse {
|
|
const elems = try sema.arena.alloc(InternPool.Index, field_inits.len);
|
|
for (elems, field_inits) |*elem, field_init| {
|
|
elem.* = (sema.resolveValue(field_init) catch unreachable).?.toIntern();
|
|
}
|
|
const struct_val = try mod.intern(.{ .aggregate = .{
|
|
.ty = struct_ty.toIntern(),
|
|
.storage = .{ .elems = elems },
|
|
} });
|
|
const final_val_inst = try sema.coerce(block, result_ty, Air.internedToRef(struct_val), init_src);
|
|
const final_val = (try sema.resolveValue(final_val_inst)).?;
|
|
return sema.addConstantMaybeRef(final_val.toIntern(), is_ref);
|
|
};
|
|
|
|
if (try sema.typeRequiresComptime(struct_ty)) {
|
|
const decl = mod.declPtr(block.src_decl);
|
|
const field_src = mod.initSrc(init_src.node_offset.x, decl, runtime_index);
|
|
return sema.failWithNeededComptime(block, field_src, .{
|
|
.needed_comptime_reason = "initializer of comptime only struct must be comptime-known",
|
|
});
|
|
}
|
|
|
|
if (is_ref) {
|
|
try sema.resolveStructLayout(struct_ty);
|
|
const target = sema.mod.getTarget();
|
|
const alloc_ty = try sema.ptrType(.{
|
|
.child = result_ty.toIntern(),
|
|
.flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
|
|
});
|
|
const alloc = try block.addTy(.alloc, alloc_ty);
|
|
const base_ptr = try sema.optEuBasePtrInit(block, alloc, init_src);
|
|
for (field_inits, 0..) |field_init, i_usize| {
|
|
const i: u32 = @intCast(i_usize);
|
|
const field_src = dest_src;
|
|
const field_ptr = try sema.structFieldPtrByIndex(block, dest_src, base_ptr, i, field_src, struct_ty, true);
|
|
try sema.storePtr(block, dest_src, field_ptr, field_init);
|
|
}
|
|
|
|
return sema.makePtrConst(block, alloc);
|
|
}
|
|
|
|
sema.requireRuntimeBlock(block, .unneeded, null) catch |err| switch (err) {
|
|
error.NeededSourceLocation => {
|
|
const decl = mod.declPtr(block.src_decl);
|
|
const field_src = mod.initSrc(dest_src.node_offset.x, decl, runtime_index);
|
|
try sema.requireRuntimeBlock(block, dest_src, field_src);
|
|
unreachable;
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
try sema.resolveStructFieldInits(struct_ty);
|
|
try sema.queueFullTypeResolution(struct_ty);
|
|
const struct_val = try block.addAggregateInit(struct_ty, field_inits);
|
|
return sema.coerce(block, result_ty, struct_val, init_src);
|
|
}
|
|
|
|
fn zirStructInitAnon(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
) CompileError!Air.Inst.Ref {
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const src = inst_data.src();
|
|
const extra = sema.code.extraData(Zir.Inst.StructInitAnon, inst_data.payload_index);
|
|
return sema.structInitAnon(block, src, .anon_init, extra.data, extra.end, false);
|
|
}
|
|
|
|
fn structInitAnon(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
/// It is possible for a typed struct_init to be downgraded to an anonymous init due to a
|
|
/// generic poison type. In this case, we need to know to interpret the extra data differently.
|
|
comptime kind: enum { anon_init, typed_init },
|
|
extra_data: switch (kind) {
|
|
.anon_init => Zir.Inst.StructInitAnon,
|
|
.typed_init => Zir.Inst.StructInit,
|
|
},
|
|
extra_end: usize,
|
|
is_ref: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
const ip = &mod.intern_pool;
|
|
const zir_datas = sema.code.instructions.items(.data);
|
|
|
|
const types = try sema.arena.alloc(InternPool.Index, extra_data.fields_len);
|
|
const values = try sema.arena.alloc(InternPool.Index, types.len);
|
|
const names = try sema.arena.alloc(InternPool.NullTerminatedString, types.len);
|
|
|
|
// Find which field forces the expression to be runtime, if any.
|
|
const opt_runtime_index = rs: {
|
|
var runtime_index: ?usize = null;
|
|
var extra_index = extra_end;
|
|
for (types, values, names, 0..) |*field_ty, *field_val, *field_name, i_usize| {
|
|
const item = switch (kind) {
|
|
.anon_init => sema.code.extraData(Zir.Inst.StructInitAnon.Item, extra_index),
|
|
.typed_init => sema.code.extraData(Zir.Inst.StructInit.Item, extra_index),
|
|
};
|
|
extra_index = item.end;
|
|
|
|
const name = switch (kind) {
|
|
.anon_init => sema.code.nullTerminatedString(item.data.field_name),
|
|
.typed_init => name: {
|
|
// `item.data.field_type` references a `field_type` instruction
|
|
const field_type_data = zir_datas[@intFromEnum(item.data.field_type)].pl_node;
|
|
const field_type_extra = sema.code.extraData(Zir.Inst.FieldType, field_type_data.payload_index);
|
|
break :name sema.code.nullTerminatedString(field_type_extra.data.name_start);
|
|
},
|
|
};
|
|
|
|
const name_ip = try mod.intern_pool.getOrPutString(gpa, name);
|
|
field_name.* = name_ip;
|
|
|
|
const init = try sema.resolveInst(item.data.init);
|
|
field_ty.* = sema.typeOf(init).toIntern();
|
|
if (Type.fromInterned(field_ty.*).zigTypeTag(mod) == .Opaque) {
|
|
const msg = msg: {
|
|
const decl = mod.declPtr(block.src_decl);
|
|
const field_src = mod.initSrc(src.node_offset.x, decl, @intCast(i_usize));
|
|
const msg = try sema.errMsg(block, field_src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
try sema.addDeclaredHereNote(msg, Type.fromInterned(field_ty.*));
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
if (try sema.resolveValue(init)) |init_val| {
|
|
field_val.* = try init_val.intern(Type.fromInterned(field_ty.*), mod);
|
|
} else {
|
|
field_val.* = .none;
|
|
runtime_index = @intCast(i_usize);
|
|
}
|
|
}
|
|
break :rs runtime_index;
|
|
};
|
|
|
|
const tuple_ty = try ip.getAnonStructType(gpa, .{
|
|
.names = names,
|
|
.types = types,
|
|
.values = values,
|
|
});
|
|
|
|
const runtime_index = opt_runtime_index orelse {
|
|
const tuple_val = try mod.intern(.{ .aggregate = .{
|
|
.ty = tuple_ty,
|
|
.storage = .{ .elems = values },
|
|
} });
|
|
return sema.addConstantMaybeRef(tuple_val, is_ref);
|
|
};
|
|
|
|
sema.requireRuntimeBlock(block, .unneeded, null) catch |err| switch (err) {
|
|
error.NeededSourceLocation => {
|
|
const decl = mod.declPtr(block.src_decl);
|
|
const field_src = mod.initSrc(src.node_offset.x, decl, runtime_index);
|
|
try sema.requireRuntimeBlock(block, src, field_src);
|
|
unreachable;
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
|
|
if (is_ref) {
|
|
const target = mod.getTarget();
|
|
const alloc_ty = try sema.ptrType(.{
|
|
.child = tuple_ty,
|
|
.flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
|
|
});
|
|
const alloc = try block.addTy(.alloc, alloc_ty);
|
|
var extra_index = extra_end;
|
|
for (types, 0..) |field_ty, i_usize| {
|
|
const i: u32 = @intCast(i_usize);
|
|
const item = switch (kind) {
|
|
.anon_init => sema.code.extraData(Zir.Inst.StructInitAnon.Item, extra_index),
|
|
.typed_init => sema.code.extraData(Zir.Inst.StructInit.Item, extra_index),
|
|
};
|
|
extra_index = item.end;
|
|
|
|
const field_ptr_ty = try sema.ptrType(.{
|
|
.child = field_ty,
|
|
.flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
|
|
});
|
|
if (values[i] == .none) {
|
|
const init = try sema.resolveInst(item.data.init);
|
|
const field_ptr = try block.addStructFieldPtr(alloc, i, field_ptr_ty);
|
|
_ = try block.addBinOp(.store, field_ptr, init);
|
|
}
|
|
}
|
|
|
|
return sema.makePtrConst(block, alloc);
|
|
}
|
|
|
|
const element_refs = try sema.arena.alloc(Air.Inst.Ref, types.len);
|
|
var extra_index = extra_end;
|
|
for (types, 0..) |_, i| {
|
|
const item = switch (kind) {
|
|
.anon_init => sema.code.extraData(Zir.Inst.StructInitAnon.Item, extra_index),
|
|
.typed_init => sema.code.extraData(Zir.Inst.StructInit.Item, extra_index),
|
|
};
|
|
extra_index = item.end;
|
|
element_refs[i] = try sema.resolveInst(item.data.init);
|
|
}
|
|
|
|
return block.addAggregateInit(Type.fromInterned(tuple_ty), element_refs);
|
|
}
|
|
|
|
fn zirArrayInit(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
is_ref: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const src = inst_data.src();
|
|
|
|
const extra = sema.code.extraData(Zir.Inst.MultiOp, inst_data.payload_index);
|
|
const args = sema.code.refSlice(extra.end, extra.data.operands_len);
|
|
assert(args.len >= 2); // array_ty + at least one element
|
|
|
|
const result_ty = sema.resolveType(block, src, args[0]) catch |err| switch (err) {
|
|
error.GenericPoison => {
|
|
// The type wasn't actually known, so treat this as an anon array init.
|
|
return sema.arrayInitAnon(block, src, args[1..], is_ref);
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
const array_ty = result_ty.optEuBaseType(mod);
|
|
const is_tuple = array_ty.zigTypeTag(mod) == .Struct;
|
|
const sentinel_val = array_ty.sentinel(mod);
|
|
|
|
var root_msg: ?*Module.ErrorMsg = null;
|
|
errdefer if (root_msg) |msg| msg.destroy(sema.gpa);
|
|
|
|
const final_len = try sema.usizeCast(block, src, array_ty.arrayLenIncludingSentinel(mod));
|
|
const resolved_args = try gpa.alloc(Air.Inst.Ref, final_len);
|
|
defer gpa.free(resolved_args);
|
|
for (resolved_args, 0..) |*dest, i| {
|
|
// Less inits than needed.
|
|
if (i + 2 > args.len) if (is_tuple) {
|
|
const default_val = array_ty.structFieldDefaultValue(i, mod).toIntern();
|
|
if (default_val == .unreachable_value) {
|
|
const template = "missing tuple field with index {d}";
|
|
if (root_msg) |msg| {
|
|
try sema.errNote(block, src, msg, template, .{i});
|
|
} else {
|
|
root_msg = try sema.errMsg(block, src, template, .{i});
|
|
}
|
|
} else {
|
|
dest.* = Air.internedToRef(default_val);
|
|
}
|
|
continue;
|
|
} else {
|
|
dest.* = Air.internedToRef(sentinel_val.?.toIntern());
|
|
break;
|
|
};
|
|
|
|
const arg = args[i + 1];
|
|
const resolved_arg = try sema.resolveInst(arg);
|
|
const elem_ty = if (is_tuple)
|
|
array_ty.structFieldType(i, mod)
|
|
else
|
|
array_ty.elemType2(mod);
|
|
dest.* = sema.coerce(block, elem_ty, resolved_arg, .unneeded) catch |err| switch (err) {
|
|
error.NeededSourceLocation => {
|
|
const decl = mod.declPtr(block.src_decl);
|
|
const elem_src = mod.initSrc(src.node_offset.x, decl, i);
|
|
_ = try sema.coerce(block, elem_ty, resolved_arg, elem_src);
|
|
unreachable;
|
|
},
|
|
else => return err,
|
|
};
|
|
if (is_tuple) {
|
|
if (array_ty.structFieldIsComptime(i, mod))
|
|
try sema.resolveStructFieldInits(array_ty);
|
|
if (try array_ty.structFieldValueComptime(mod, i)) |field_val| {
|
|
const init_val = try sema.resolveValue(dest.*) orelse {
|
|
const decl = mod.declPtr(block.src_decl);
|
|
const elem_src = mod.initSrc(src.node_offset.x, decl, i);
|
|
return sema.failWithNeededComptime(block, elem_src, .{
|
|
.needed_comptime_reason = "value stored in comptime field must be comptime-known",
|
|
});
|
|
};
|
|
if (!field_val.eql(init_val, elem_ty, mod)) {
|
|
const decl = mod.declPtr(block.src_decl);
|
|
const elem_src = mod.initSrc(src.node_offset.x, decl, i);
|
|
return sema.failWithInvalidComptimeFieldStore(block, elem_src, array_ty, i);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
if (root_msg) |msg| {
|
|
try sema.addDeclaredHereNote(msg, array_ty);
|
|
root_msg = null;
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
|
|
const opt_runtime_index: ?u32 = for (resolved_args, 0..) |arg, i| {
|
|
const comptime_known = try sema.isComptimeKnown(arg);
|
|
if (!comptime_known) break @intCast(i);
|
|
} else null;
|
|
|
|
const runtime_index = opt_runtime_index orelse {
|
|
const elem_vals = try sema.arena.alloc(InternPool.Index, resolved_args.len);
|
|
for (elem_vals, resolved_args, 0..) |*val, arg, i| {
|
|
const elem_ty = if (is_tuple)
|
|
array_ty.structFieldType(i, mod)
|
|
else
|
|
array_ty.elemType2(mod);
|
|
// We checked that all args are comptime above.
|
|
val.* = try ((sema.resolveValue(arg) catch unreachable).?).intern(elem_ty, mod);
|
|
}
|
|
const arr_val = try mod.intern(.{ .aggregate = .{
|
|
.ty = array_ty.toIntern(),
|
|
.storage = .{ .elems = elem_vals },
|
|
} });
|
|
const result_ref = try sema.coerce(block, result_ty, Air.internedToRef(arr_val), src);
|
|
const result_val = (try sema.resolveValue(result_ref)).?;
|
|
return sema.addConstantMaybeRef(result_val.toIntern(), is_ref);
|
|
};
|
|
|
|
sema.requireRuntimeBlock(block, .unneeded, null) catch |err| switch (err) {
|
|
error.NeededSourceLocation => {
|
|
const decl = mod.declPtr(block.src_decl);
|
|
const elem_src = mod.initSrc(src.node_offset.x, decl, runtime_index);
|
|
try sema.requireRuntimeBlock(block, src, elem_src);
|
|
unreachable;
|
|
},
|
|
else => return err,
|
|
};
|
|
try sema.queueFullTypeResolution(array_ty);
|
|
|
|
if (is_ref) {
|
|
const target = mod.getTarget();
|
|
const alloc_ty = try sema.ptrType(.{
|
|
.child = result_ty.toIntern(),
|
|
.flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
|
|
});
|
|
const alloc = try block.addTy(.alloc, alloc_ty);
|
|
const base_ptr = try sema.optEuBasePtrInit(block, alloc, src);
|
|
|
|
if (is_tuple) {
|
|
for (resolved_args, 0..) |arg, i| {
|
|
const elem_ptr_ty = try sema.ptrType(.{
|
|
.child = array_ty.structFieldType(i, mod).toIntern(),
|
|
.flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
|
|
});
|
|
const elem_ptr_ty_ref = Air.internedToRef(elem_ptr_ty.toIntern());
|
|
|
|
const index = try mod.intRef(Type.usize, i);
|
|
const elem_ptr = try block.addPtrElemPtrTypeRef(base_ptr, index, elem_ptr_ty_ref);
|
|
_ = try block.addBinOp(.store, elem_ptr, arg);
|
|
}
|
|
return sema.makePtrConst(block, alloc);
|
|
}
|
|
|
|
const elem_ptr_ty = try sema.ptrType(.{
|
|
.child = array_ty.elemType2(mod).toIntern(),
|
|
.flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
|
|
});
|
|
const elem_ptr_ty_ref = Air.internedToRef(elem_ptr_ty.toIntern());
|
|
|
|
for (resolved_args, 0..) |arg, i| {
|
|
const index = try mod.intRef(Type.usize, i);
|
|
const elem_ptr = try block.addPtrElemPtrTypeRef(base_ptr, index, elem_ptr_ty_ref);
|
|
_ = try block.addBinOp(.store, elem_ptr, arg);
|
|
}
|
|
return sema.makePtrConst(block, alloc);
|
|
}
|
|
|
|
const arr_ref = try block.addAggregateInit(array_ty, resolved_args);
|
|
return sema.coerce(block, result_ty, arr_ref, src);
|
|
}
|
|
|
|
fn zirArrayInitAnon(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
) CompileError!Air.Inst.Ref {
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const src = inst_data.src();
|
|
const extra = sema.code.extraData(Zir.Inst.MultiOp, inst_data.payload_index);
|
|
const operands = sema.code.refSlice(extra.end, extra.data.operands_len);
|
|
return sema.arrayInitAnon(block, src, operands, false);
|
|
}
|
|
|
|
fn arrayInitAnon(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
operands: []const Zir.Inst.Ref,
|
|
is_ref: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
const ip = &mod.intern_pool;
|
|
|
|
const types = try sema.arena.alloc(InternPool.Index, operands.len);
|
|
const values = try sema.arena.alloc(InternPool.Index, operands.len);
|
|
|
|
const opt_runtime_src = rs: {
|
|
var runtime_src: ?LazySrcLoc = null;
|
|
for (operands, 0..) |operand, i| {
|
|
const operand_src = src; // TODO better source location
|
|
const elem = try sema.resolveInst(operand);
|
|
types[i] = sema.typeOf(elem).toIntern();
|
|
if (Type.fromInterned(types[i]).zigTypeTag(mod) == .Opaque) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, operand_src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{});
|
|
errdefer msg.destroy(gpa);
|
|
|
|
try sema.addDeclaredHereNote(msg, Type.fromInterned(types[i]));
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
if (try sema.resolveValue(elem)) |val| {
|
|
values[i] = val.toIntern();
|
|
} else {
|
|
values[i] = .none;
|
|
runtime_src = operand_src;
|
|
}
|
|
}
|
|
break :rs runtime_src;
|
|
};
|
|
|
|
const tuple_ty = try ip.getAnonStructType(gpa, .{
|
|
.types = types,
|
|
.values = values,
|
|
.names = &.{},
|
|
});
|
|
|
|
const runtime_src = opt_runtime_src orelse {
|
|
const tuple_val = try mod.intern(.{ .aggregate = .{
|
|
.ty = tuple_ty,
|
|
.storage = .{ .elems = values },
|
|
} });
|
|
return sema.addConstantMaybeRef(tuple_val, is_ref);
|
|
};
|
|
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
|
|
if (is_ref) {
|
|
const target = sema.mod.getTarget();
|
|
const alloc_ty = try sema.ptrType(.{
|
|
.child = tuple_ty,
|
|
.flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
|
|
});
|
|
const alloc = try block.addTy(.alloc, alloc_ty);
|
|
for (operands, 0..) |operand, i_usize| {
|
|
const i: u32 = @intCast(i_usize);
|
|
const field_ptr_ty = try sema.ptrType(.{
|
|
.child = types[i],
|
|
.flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
|
|
});
|
|
if (values[i] == .none) {
|
|
const field_ptr = try block.addStructFieldPtr(alloc, i, field_ptr_ty);
|
|
_ = try block.addBinOp(.store, field_ptr, try sema.resolveInst(operand));
|
|
}
|
|
}
|
|
|
|
return sema.makePtrConst(block, alloc);
|
|
}
|
|
|
|
const element_refs = try sema.arena.alloc(Air.Inst.Ref, operands.len);
|
|
for (operands, 0..) |operand, i| {
|
|
element_refs[i] = try sema.resolveInst(operand);
|
|
}
|
|
|
|
return block.addAggregateInit(Type.fromInterned(tuple_ty), element_refs);
|
|
}
|
|
|
|
fn addConstantMaybeRef(sema: *Sema, val: InternPool.Index, is_ref: bool) !Air.Inst.Ref {
|
|
return if (is_ref) anonDeclRef(sema, val) else Air.internedToRef(val);
|
|
}
|
|
|
|
fn zirFieldTypeRef(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.FieldTypeRef, inst_data.payload_index).data;
|
|
const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const field_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
|
|
const aggregate_ty = try sema.resolveType(block, ty_src, extra.container_type);
|
|
const field_name = try sema.resolveConstStringIntern(block, field_src, extra.field_name, .{
|
|
.needed_comptime_reason = "field name must be comptime-known",
|
|
});
|
|
return sema.fieldType(block, aggregate_ty, field_name, field_src, ty_src);
|
|
}
|
|
|
|
fn zirStructInitFieldType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.FieldType, inst_data.payload_index).data;
|
|
const ty_src = inst_data.src();
|
|
const field_name_src: LazySrcLoc = .{ .node_offset_field_name_init = inst_data.src_node };
|
|
const wrapped_aggregate_ty = sema.resolveType(block, ty_src, extra.container_type) catch |err| switch (err) {
|
|
// Since this is a ZIR instruction that returns a type, encountering
|
|
// generic poison should not result in a failed compilation, but the
|
|
// generic poison type. This prevents unnecessary failures when
|
|
// constructing types at compile-time.
|
|
error.GenericPoison => return .generic_poison_type,
|
|
else => |e| return e,
|
|
};
|
|
const aggregate_ty = wrapped_aggregate_ty.optEuBaseType(mod);
|
|
const zir_field_name = sema.code.nullTerminatedString(extra.name_start);
|
|
const field_name = try ip.getOrPutString(sema.gpa, zir_field_name);
|
|
return sema.fieldType(block, aggregate_ty, field_name, field_name_src, ty_src);
|
|
}
|
|
|
|
fn fieldType(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
aggregate_ty: Type,
|
|
field_name: InternPool.NullTerminatedString,
|
|
field_src: LazySrcLoc,
|
|
ty_src: LazySrcLoc,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
var cur_ty = aggregate_ty;
|
|
while (true) {
|
|
try sema.resolveTypeFields(cur_ty);
|
|
switch (cur_ty.zigTypeTag(mod)) {
|
|
.Struct => switch (ip.indexToKey(cur_ty.toIntern())) {
|
|
.anon_struct_type => |anon_struct| {
|
|
const field_index = if (anon_struct.names.len == 0)
|
|
try sema.tupleFieldIndex(block, cur_ty, field_name, field_src)
|
|
else
|
|
try sema.anonStructFieldIndex(block, cur_ty, field_name, field_src);
|
|
return Air.internedToRef(anon_struct.types.get(ip)[field_index]);
|
|
},
|
|
.struct_type => |struct_type| {
|
|
const field_index = struct_type.nameIndex(ip, field_name) orelse
|
|
return sema.failWithBadStructFieldAccess(block, struct_type, field_src, field_name);
|
|
const field_ty = struct_type.field_types.get(ip)[field_index];
|
|
return Air.internedToRef(field_ty);
|
|
},
|
|
else => unreachable,
|
|
},
|
|
.Union => {
|
|
const union_obj = mod.typeToUnion(cur_ty).?;
|
|
const field_index = union_obj.nameIndex(ip, field_name) orelse
|
|
return sema.failWithBadUnionFieldAccess(block, union_obj, field_src, field_name);
|
|
const field_ty = union_obj.field_types.get(ip)[field_index];
|
|
return Air.internedToRef(field_ty);
|
|
},
|
|
.Optional => {
|
|
// Struct/array init through optional requires the child type to not be a pointer.
|
|
// If the child of .optional is a pointer it'll error on the next loop.
|
|
cur_ty = Type.fromInterned(ip.indexToKey(cur_ty.toIntern()).opt_type);
|
|
continue;
|
|
},
|
|
.ErrorUnion => {
|
|
cur_ty = cur_ty.errorUnionPayload(mod);
|
|
continue;
|
|
},
|
|
else => {},
|
|
}
|
|
return sema.fail(block, ty_src, "expected struct or union; found '{}'", .{
|
|
cur_ty.fmt(sema.mod),
|
|
});
|
|
}
|
|
}
|
|
|
|
fn zirErrorReturnTrace(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref {
|
|
return sema.getErrorReturnTrace(block);
|
|
}
|
|
|
|
fn getErrorReturnTrace(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
const stack_trace_ty = try sema.getBuiltinType("StackTrace");
|
|
try sema.resolveTypeFields(stack_trace_ty);
|
|
const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty);
|
|
const opt_ptr_stack_trace_ty = try mod.optionalType(ptr_stack_trace_ty.toIntern());
|
|
|
|
if (sema.owner_func_index != .none and
|
|
ip.funcAnalysis(sema.owner_func_index).calls_or_awaits_errorable_fn and
|
|
block.ownerModule().error_tracing)
|
|
{
|
|
return block.addTy(.err_return_trace, opt_ptr_stack_trace_ty);
|
|
}
|
|
return Air.internedToRef((try mod.intern(.{ .opt = .{
|
|
.ty = opt_ptr_stack_trace_ty.toIntern(),
|
|
.val = .none,
|
|
} })));
|
|
}
|
|
|
|
fn zirFrame(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
extended: Zir.Inst.Extended.InstData,
|
|
) CompileError!Air.Inst.Ref {
|
|
const src = LazySrcLoc.nodeOffset(@bitCast(extended.operand));
|
|
return sema.failWithUseOfAsync(block, src);
|
|
}
|
|
|
|
fn zirAlignOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const ty = try sema.resolveType(block, operand_src, inst_data.operand);
|
|
if (ty.isNoReturn(mod)) {
|
|
return sema.fail(block, operand_src, "no align available for type '{}'", .{ty.fmt(sema.mod)});
|
|
}
|
|
const val = try ty.lazyAbiAlignment(mod);
|
|
if (val.isLazyAlign(mod)) {
|
|
try sema.queueFullTypeResolution(ty);
|
|
}
|
|
return Air.internedToRef(val.toIntern());
|
|
}
|
|
|
|
fn zirIntFromBool(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const src = inst_data.src();
|
|
const operand = try sema.resolveInst(inst_data.operand);
|
|
const operand_ty = sema.typeOf(operand);
|
|
const is_vector = operand_ty.zigTypeTag(mod) == .Vector;
|
|
const operand_scalar_ty = operand_ty.scalarType(mod);
|
|
if (operand_scalar_ty.toIntern() != .bool_type) {
|
|
return sema.fail(block, src, "expected 'bool', found '{}'", .{operand_scalar_ty.zigTypeTag(mod)});
|
|
}
|
|
if (try sema.resolveValue(operand)) |val| {
|
|
if (!is_vector) {
|
|
if (val.isUndef(mod)) return mod.undefRef(Type.u1);
|
|
if (val.toBool()) return Air.internedToRef((try mod.intValue(Type.u1, 1)).toIntern());
|
|
return Air.internedToRef((try mod.intValue(Type.u1, 0)).toIntern());
|
|
}
|
|
const len = operand_ty.vectorLen(mod);
|
|
const dest_ty = try mod.vectorType(.{ .child = .u1_type, .len = len });
|
|
if (val.isUndef(mod)) return mod.undefRef(dest_ty);
|
|
const new_elems = try sema.arena.alloc(InternPool.Index, len);
|
|
for (new_elems, 0..) |*new_elem, i| {
|
|
const old_elem = try val.elemValue(mod, i);
|
|
const new_val = if (old_elem.isUndef(mod))
|
|
try mod.undefValue(Type.u1)
|
|
else if (old_elem.toBool())
|
|
try mod.intValue(Type.u1, 1)
|
|
else
|
|
try mod.intValue(Type.u1, 0);
|
|
new_elem.* = new_val.toIntern();
|
|
}
|
|
return Air.internedToRef(try mod.intern(.{ .aggregate = .{
|
|
.ty = dest_ty.toIntern(),
|
|
.storage = .{ .elems = new_elems },
|
|
} }));
|
|
}
|
|
if (!is_vector) {
|
|
return block.addUnOp(.int_from_bool, operand);
|
|
}
|
|
const len = operand_ty.vectorLen(mod);
|
|
const dest_ty = try mod.vectorType(.{ .child = .u1_type, .len = len });
|
|
const new_elems = try sema.arena.alloc(Air.Inst.Ref, len);
|
|
for (new_elems, 0..) |*new_elem, i| {
|
|
const idx_ref = try mod.intRef(Type.usize, i);
|
|
const old_elem = try block.addBinOp(.array_elem_val, operand, idx_ref);
|
|
new_elem.* = try block.addUnOp(.int_from_bool, old_elem);
|
|
}
|
|
return block.addAggregateInit(dest_ty, new_elems);
|
|
}
|
|
|
|
fn zirErrorName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const uncoerced_operand = try sema.resolveInst(inst_data.operand);
|
|
const operand = try sema.coerce(block, Type.anyerror, uncoerced_operand, operand_src);
|
|
|
|
if (try sema.resolveDefinedValue(block, operand_src, operand)) |val| {
|
|
const err_name = sema.mod.intern_pool.indexToKey(val.toIntern()).err.name;
|
|
return sema.addStrLit(sema.mod.intern_pool.stringToSlice(err_name));
|
|
}
|
|
|
|
// Similar to zirTagName, we have special AIR instruction for the error name in case an optimimzation pass
|
|
// might be able to resolve the result at compile time.
|
|
return block.addUnOp(.error_name, operand);
|
|
}
|
|
|
|
fn zirAbs(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const operand = try sema.resolveInst(inst_data.operand);
|
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const operand_ty = sema.typeOf(operand);
|
|
const scalar_ty = operand_ty.scalarType(mod);
|
|
|
|
const result_ty = switch (scalar_ty.zigTypeTag(mod)) {
|
|
.ComptimeFloat, .Float, .ComptimeInt => operand_ty,
|
|
.Int => if (scalar_ty.isSignedInt(mod)) try operand_ty.toUnsigned(mod) else return operand,
|
|
else => return sema.fail(
|
|
block,
|
|
operand_src,
|
|
"expected integer, float, or vector of either integers or floats, found '{}'",
|
|
.{operand_ty.fmt(mod)},
|
|
),
|
|
};
|
|
|
|
return (try sema.maybeConstantUnaryMath(operand, result_ty, Value.abs)) orelse {
|
|
try sema.requireRuntimeBlock(block, operand_src, null);
|
|
return block.addTyOp(.abs, result_ty, operand);
|
|
};
|
|
}
|
|
|
|
fn maybeConstantUnaryMath(
|
|
sema: *Sema,
|
|
operand: Air.Inst.Ref,
|
|
result_ty: Type,
|
|
comptime eval: fn (Value, Type, Allocator, *Module) Allocator.Error!Value,
|
|
) CompileError!?Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
switch (result_ty.zigTypeTag(mod)) {
|
|
.Vector => if (try sema.resolveValue(operand)) |val| {
|
|
const scalar_ty = result_ty.scalarType(mod);
|
|
const vec_len = result_ty.vectorLen(mod);
|
|
if (val.isUndef(mod))
|
|
return try mod.undefRef(result_ty);
|
|
|
|
const elems = try sema.arena.alloc(InternPool.Index, vec_len);
|
|
for (elems, 0..) |*elem, i| {
|
|
const elem_val = try val.elemValue(sema.mod, i);
|
|
elem.* = try (try eval(elem_val, scalar_ty, sema.arena, sema.mod)).intern(scalar_ty, mod);
|
|
}
|
|
return Air.internedToRef((try mod.intern(.{ .aggregate = .{
|
|
.ty = result_ty.toIntern(),
|
|
.storage = .{ .elems = elems },
|
|
} })));
|
|
},
|
|
else => if (try sema.resolveValue(operand)) |operand_val| {
|
|
if (operand_val.isUndef(mod))
|
|
return try mod.undefRef(result_ty);
|
|
const result_val = try eval(operand_val, result_ty, sema.arena, sema.mod);
|
|
return Air.internedToRef(result_val.toIntern());
|
|
},
|
|
}
|
|
return null;
|
|
}
|
|
|
|
fn zirUnaryMath(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
air_tag: Air.Inst.Tag,
|
|
comptime eval: fn (Value, Type, Allocator, *Module) Allocator.Error!Value,
|
|
) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const operand = try sema.resolveInst(inst_data.operand);
|
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const operand_ty = sema.typeOf(operand);
|
|
const scalar_ty = operand_ty.scalarType(mod);
|
|
|
|
switch (scalar_ty.zigTypeTag(mod)) {
|
|
.ComptimeFloat, .Float => {},
|
|
else => return sema.fail(
|
|
block,
|
|
operand_src,
|
|
"expected vector of floats or float type, found '{}'",
|
|
.{operand_ty.fmt(sema.mod)},
|
|
),
|
|
}
|
|
|
|
return (try sema.maybeConstantUnaryMath(operand, operand_ty, eval)) orelse {
|
|
try sema.requireRuntimeBlock(block, operand_src, null);
|
|
return block.addUnOp(air_tag, operand);
|
|
};
|
|
}
|
|
|
|
fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const src = inst_data.src();
|
|
const operand = try sema.resolveInst(inst_data.operand);
|
|
const operand_ty = sema.typeOf(operand);
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
|
|
try sema.resolveTypeLayout(operand_ty);
|
|
const enum_ty = switch (operand_ty.zigTypeTag(mod)) {
|
|
.EnumLiteral => {
|
|
const val = try sema.resolveConstDefinedValue(block, .unneeded, operand, undefined);
|
|
const tag_name = ip.indexToKey(val.toIntern()).enum_literal;
|
|
return sema.addStrLit(ip.stringToSlice(tag_name));
|
|
},
|
|
.Enum => operand_ty,
|
|
.Union => operand_ty.unionTagType(mod) orelse
|
|
return sema.fail(block, src, "union '{}' is untagged", .{operand_ty.fmt(sema.mod)}),
|
|
else => return sema.fail(block, operand_src, "expected enum or union; found '{}'", .{
|
|
operand_ty.fmt(mod),
|
|
}),
|
|
};
|
|
if (enum_ty.enumFieldCount(mod) == 0) {
|
|
// TODO I don't think this is the correct way to handle this but
|
|
// it prevents a crash.
|
|
// https://github.com/ziglang/zig/issues/15909
|
|
return sema.fail(block, operand_src, "cannot get @tagName of empty enum '{}'", .{
|
|
enum_ty.fmt(mod),
|
|
});
|
|
}
|
|
const enum_decl_index = enum_ty.getOwnerDecl(mod);
|
|
const casted_operand = try sema.coerce(block, enum_ty, operand, operand_src);
|
|
if (try sema.resolveDefinedValue(block, operand_src, casted_operand)) |val| {
|
|
const field_index = enum_ty.enumTagFieldIndex(val, mod) orelse {
|
|
const enum_decl = mod.declPtr(enum_decl_index);
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "no field with value '{}' in enum '{}'", .{
|
|
val.fmtValue(enum_ty, sema.mod), enum_decl.name.fmt(ip),
|
|
});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try mod.errNoteNonLazy(enum_decl.srcLoc(mod), msg, "declared here", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
};
|
|
// TODO: write something like getCoercedInts to avoid needing to dupe
|
|
const field_name = enum_ty.enumFieldName(field_index, mod);
|
|
return sema.addStrLit(ip.stringToSlice(field_name));
|
|
}
|
|
try sema.requireRuntimeBlock(block, src, operand_src);
|
|
if (block.wantSafety() and sema.mod.backendSupportsFeature(.is_named_enum_value)) {
|
|
const ok = try block.addUnOp(.is_named_enum_value, casted_operand);
|
|
try sema.addSafetyCheck(block, src, ok, .invalid_enum_value);
|
|
}
|
|
// In case the value is runtime-known, we have an AIR instruction for this instead
|
|
// of trying to lower it in Sema because an optimization pass may result in the operand
|
|
// being comptime-known, which would let us elide the `tag_name` AIR instruction.
|
|
return block.addUnOp(.tag_name, casted_operand);
|
|
}
|
|
|
|
fn zirReify(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
extended: Zir.Inst.Extended.InstData,
|
|
inst: Zir.Inst.Index,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
const ip = &mod.intern_pool;
|
|
const name_strategy: Zir.Inst.NameStrategy = @enumFromInt(extended.small);
|
|
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
|
|
const src = LazySrcLoc.nodeOffset(extra.node);
|
|
const type_info_ty = try sema.getBuiltinType("Type");
|
|
const uncasted_operand = try sema.resolveInst(extra.operand);
|
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
|
|
const type_info = try sema.coerce(block, type_info_ty, uncasted_operand, operand_src);
|
|
const val = try sema.resolveConstDefinedValue(block, operand_src, type_info, .{
|
|
.needed_comptime_reason = "operand to @Type must be comptime-known",
|
|
});
|
|
const union_val = ip.indexToKey(val.toIntern()).un;
|
|
const target = mod.getTarget();
|
|
if (try Value.fromInterned(union_val.val).anyUndef(mod)) return sema.failWithUseOfUndef(block, src);
|
|
const tag_index = type_info_ty.unionTagFieldIndex(Value.fromInterned(union_val.tag), mod).?;
|
|
switch (@as(std.builtin.TypeId, @enumFromInt(tag_index))) {
|
|
.Type => return .type_type,
|
|
.Void => return .void_type,
|
|
.Bool => return .bool_type,
|
|
.NoReturn => return .noreturn_type,
|
|
.ComptimeFloat => return .comptime_float_type,
|
|
.ComptimeInt => return .comptime_int_type,
|
|
.Undefined => return .undefined_type,
|
|
.Null => return .null_type,
|
|
.AnyFrame => return sema.failWithUseOfAsync(block, src),
|
|
.EnumLiteral => return .enum_literal_type,
|
|
.Int => {
|
|
const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
|
|
const signedness_val = try Value.fromInterned(union_val.val).fieldValue(
|
|
mod,
|
|
struct_type.nameIndex(ip, try ip.getOrPutString(gpa, "signedness")).?,
|
|
);
|
|
const bits_val = try Value.fromInterned(union_val.val).fieldValue(
|
|
mod,
|
|
struct_type.nameIndex(ip, try ip.getOrPutString(gpa, "bits")).?,
|
|
);
|
|
|
|
const signedness = mod.toEnum(std.builtin.Signedness, signedness_val);
|
|
const bits: u16 = @intCast(try bits_val.toUnsignedIntAdvanced(sema));
|
|
const ty = try mod.intType(signedness, bits);
|
|
return Air.internedToRef(ty.toIntern());
|
|
},
|
|
.Vector => {
|
|
const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
|
|
const len_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
|
|
ip,
|
|
try ip.getOrPutString(gpa, "len"),
|
|
).?);
|
|
const child_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
|
|
ip,
|
|
try ip.getOrPutString(gpa, "child"),
|
|
).?);
|
|
|
|
const len: u32 = @intCast(try len_val.toUnsignedIntAdvanced(sema));
|
|
const child_ty = child_val.toType();
|
|
|
|
try sema.checkVectorElemType(block, src, child_ty);
|
|
|
|
const ty = try mod.vectorType(.{
|
|
.len = len,
|
|
.child = child_ty.toIntern(),
|
|
});
|
|
return Air.internedToRef(ty.toIntern());
|
|
},
|
|
.Float => {
|
|
const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
|
|
const bits_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
|
|
ip,
|
|
try ip.getOrPutString(gpa, "bits"),
|
|
).?);
|
|
|
|
const bits: u16 = @intCast(try bits_val.toUnsignedIntAdvanced(sema));
|
|
const ty = switch (bits) {
|
|
16 => Type.f16,
|
|
32 => Type.f32,
|
|
64 => Type.f64,
|
|
80 => Type.f80,
|
|
128 => Type.f128,
|
|
else => return sema.fail(block, src, "{}-bit float unsupported", .{bits}),
|
|
};
|
|
return Air.internedToRef(ty.toIntern());
|
|
},
|
|
.Pointer => {
|
|
const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
|
|
const size_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
|
|
ip,
|
|
try ip.getOrPutString(gpa, "size"),
|
|
).?);
|
|
const is_const_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
|
|
ip,
|
|
try ip.getOrPutString(gpa, "is_const"),
|
|
).?);
|
|
const is_volatile_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
|
|
ip,
|
|
try ip.getOrPutString(gpa, "is_volatile"),
|
|
).?);
|
|
const alignment_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
|
|
ip,
|
|
try ip.getOrPutString(gpa, "alignment"),
|
|
).?);
|
|
const address_space_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
|
|
ip,
|
|
try ip.getOrPutString(gpa, "address_space"),
|
|
).?);
|
|
const child_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
|
|
ip,
|
|
try ip.getOrPutString(gpa, "child"),
|
|
).?);
|
|
const is_allowzero_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
|
|
ip,
|
|
try ip.getOrPutString(gpa, "is_allowzero"),
|
|
).?);
|
|
const sentinel_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
|
|
ip,
|
|
try ip.getOrPutString(gpa, "sentinel"),
|
|
).?);
|
|
|
|
if (!try sema.intFitsInType(alignment_val, Type.u32, null)) {
|
|
return sema.fail(block, src, "alignment must fit in 'u32'", .{});
|
|
}
|
|
|
|
const alignment_val_int = (try alignment_val.getUnsignedIntAdvanced(mod, sema)).?;
|
|
if (alignment_val_int > 0 and !math.isPowerOfTwo(alignment_val_int)) {
|
|
return sema.fail(block, src, "alignment value '{d}' is not a power of two or zero", .{alignment_val_int});
|
|
}
|
|
const abi_align = Alignment.fromByteUnits(alignment_val_int);
|
|
|
|
const elem_ty = child_val.toType();
|
|
if (abi_align != .none) {
|
|
try sema.resolveTypeLayout(elem_ty);
|
|
}
|
|
|
|
const ptr_size = mod.toEnum(std.builtin.Type.Pointer.Size, size_val);
|
|
|
|
const actual_sentinel: InternPool.Index = s: {
|
|
if (!sentinel_val.isNull(mod)) {
|
|
if (ptr_size == .One or ptr_size == .C) {
|
|
return sema.fail(block, src, "sentinels are only allowed on slices and unknown-length pointers", .{});
|
|
}
|
|
const sentinel_ptr_val = sentinel_val.optionalValue(mod).?;
|
|
const ptr_ty = try mod.singleMutPtrType(elem_ty);
|
|
const sent_val = (try sema.pointerDeref(block, src, sentinel_ptr_val, ptr_ty)).?;
|
|
break :s sent_val.toIntern();
|
|
}
|
|
break :s .none;
|
|
};
|
|
|
|
if (elem_ty.zigTypeTag(mod) == .NoReturn) {
|
|
return sema.fail(block, src, "pointer to noreturn not allowed", .{});
|
|
} else if (elem_ty.zigTypeTag(mod) == .Fn) {
|
|
if (ptr_size != .One) {
|
|
return sema.fail(block, src, "function pointers must be single pointers", .{});
|
|
}
|
|
const fn_align = mod.typeToFunc(elem_ty).?.alignment;
|
|
if (abi_align != .none and fn_align != .none and
|
|
abi_align != fn_align)
|
|
{
|
|
return sema.fail(block, src, "function pointer alignment disagrees with function alignment", .{});
|
|
}
|
|
} else if (ptr_size == .Many and elem_ty.zigTypeTag(mod) == .Opaque) {
|
|
return sema.fail(block, src, "unknown-length pointer to opaque not allowed", .{});
|
|
} else if (ptr_size == .C) {
|
|
if (!try sema.validateExternType(elem_ty, .other)) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "C pointers cannot point to non-C-ABI-compatible type '{}'", .{elem_ty.fmt(mod)});
|
|
errdefer msg.destroy(gpa);
|
|
|
|
const src_decl = mod.declPtr(block.src_decl);
|
|
try sema.explainWhyTypeIsNotExtern(msg, src_decl.toSrcLoc(src, mod), elem_ty, .other);
|
|
|
|
try sema.addDeclaredHereNote(msg, elem_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
if (elem_ty.zigTypeTag(mod) == .Opaque) {
|
|
return sema.fail(block, src, "C pointers cannot point to opaque types", .{});
|
|
}
|
|
}
|
|
|
|
const ty = try sema.ptrType(.{
|
|
.child = elem_ty.toIntern(),
|
|
.sentinel = actual_sentinel,
|
|
.flags = .{
|
|
.size = ptr_size,
|
|
.is_const = is_const_val.toBool(),
|
|
.is_volatile = is_volatile_val.toBool(),
|
|
.alignment = abi_align,
|
|
.address_space = mod.toEnum(std.builtin.AddressSpace, address_space_val),
|
|
.is_allowzero = is_allowzero_val.toBool(),
|
|
},
|
|
});
|
|
return Air.internedToRef(ty.toIntern());
|
|
},
|
|
.Array => {
|
|
const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
|
|
const len_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
|
|
ip,
|
|
try ip.getOrPutString(gpa, "len"),
|
|
).?);
|
|
const child_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
|
|
ip,
|
|
try ip.getOrPutString(gpa, "child"),
|
|
).?);
|
|
const sentinel_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
|
|
ip,
|
|
try ip.getOrPutString(gpa, "sentinel"),
|
|
).?);
|
|
|
|
const len = try len_val.toUnsignedIntAdvanced(sema);
|
|
const child_ty = child_val.toType();
|
|
const sentinel = if (sentinel_val.optionalValue(mod)) |p| blk: {
|
|
const ptr_ty = try mod.singleMutPtrType(child_ty);
|
|
break :blk (try sema.pointerDeref(block, src, p, ptr_ty)).?;
|
|
} else null;
|
|
|
|
const ty = try mod.arrayType(.{
|
|
.len = len,
|
|
.sentinel = if (sentinel) |s| s.toIntern() else .none,
|
|
.child = child_ty.toIntern(),
|
|
});
|
|
return Air.internedToRef(ty.toIntern());
|
|
},
|
|
.Optional => {
|
|
const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
|
|
const child_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
|
|
ip,
|
|
try ip.getOrPutString(gpa, "child"),
|
|
).?);
|
|
|
|
const child_ty = child_val.toType();
|
|
|
|
const ty = try mod.optionalType(child_ty.toIntern());
|
|
return Air.internedToRef(ty.toIntern());
|
|
},
|
|
.ErrorUnion => {
|
|
const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
|
|
const error_set_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
|
|
ip,
|
|
try ip.getOrPutString(gpa, "error_set"),
|
|
).?);
|
|
const payload_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
|
|
ip,
|
|
try ip.getOrPutString(gpa, "payload"),
|
|
).?);
|
|
|
|
const error_set_ty = error_set_val.toType();
|
|
const payload_ty = payload_val.toType();
|
|
|
|
if (error_set_ty.zigTypeTag(mod) != .ErrorSet) {
|
|
return sema.fail(block, src, "Type.ErrorUnion.error_set must be an error set type", .{});
|
|
}
|
|
|
|
const ty = try mod.errorUnionType(error_set_ty, payload_ty);
|
|
return Air.internedToRef(ty.toIntern());
|
|
},
|
|
.ErrorSet => {
|
|
const payload_val = Value.fromInterned(union_val.val).optionalValue(mod) orelse
|
|
return Air.internedToRef(Type.anyerror.toIntern());
|
|
|
|
const len = try sema.usizeCast(block, src, payload_val.sliceLen(mod));
|
|
var names: InferredErrorSet.NameMap = .{};
|
|
try names.ensureUnusedCapacity(sema.arena, len);
|
|
for (0..len) |i| {
|
|
const elem_val = try payload_val.elemValue(mod, i);
|
|
const elem_struct_type = ip.indexToKey(ip.typeOf(elem_val.toIntern())).struct_type;
|
|
const name_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
|
|
ip,
|
|
try ip.getOrPutString(gpa, "name"),
|
|
).?);
|
|
|
|
const name = try name_val.toIpString(Type.slice_const_u8, mod);
|
|
_ = try mod.getErrorValue(name);
|
|
const gop = names.getOrPutAssumeCapacity(name);
|
|
if (gop.found_existing) {
|
|
return sema.fail(block, src, "duplicate error '{}'", .{
|
|
name.fmt(ip),
|
|
});
|
|
}
|
|
}
|
|
|
|
const ty = try mod.errorSetFromUnsortedNames(names.keys());
|
|
return Air.internedToRef(ty.toIntern());
|
|
},
|
|
.Struct => {
|
|
const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
|
|
const layout_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
|
|
ip,
|
|
try ip.getOrPutString(gpa, "layout"),
|
|
).?);
|
|
const backing_integer_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
|
|
ip,
|
|
try ip.getOrPutString(gpa, "backing_integer"),
|
|
).?);
|
|
const fields_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
|
|
ip,
|
|
try ip.getOrPutString(gpa, "fields"),
|
|
).?);
|
|
const decls_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
|
|
ip,
|
|
try ip.getOrPutString(gpa, "decls"),
|
|
).?);
|
|
const is_tuple_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
|
|
ip,
|
|
try ip.getOrPutString(gpa, "is_tuple"),
|
|
).?);
|
|
|
|
const layout = mod.toEnum(std.builtin.Type.ContainerLayout, layout_val);
|
|
|
|
// Decls
|
|
if (decls_val.sliceLen(mod) > 0) {
|
|
return sema.fail(block, src, "reified structs must have no decls", .{});
|
|
}
|
|
|
|
if (layout != .Packed and !backing_integer_val.isNull(mod)) {
|
|
return sema.fail(block, src, "non-packed struct does not support backing integer type", .{});
|
|
}
|
|
|
|
return try sema.reifyStruct(block, inst, src, layout, backing_integer_val, fields_val, name_strategy, is_tuple_val.toBool());
|
|
},
|
|
.Enum => {
|
|
const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
|
|
const tag_type_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
|
|
ip,
|
|
try ip.getOrPutString(gpa, "tag_type"),
|
|
).?);
|
|
const fields_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
|
|
ip,
|
|
try ip.getOrPutString(gpa, "fields"),
|
|
).?);
|
|
const decls_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
|
|
ip,
|
|
try ip.getOrPutString(gpa, "decls"),
|
|
).?);
|
|
const is_exhaustive_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
|
|
ip,
|
|
try ip.getOrPutString(gpa, "is_exhaustive"),
|
|
).?);
|
|
|
|
// Decls
|
|
if (decls_val.sliceLen(mod) > 0) {
|
|
return sema.fail(block, src, "reified enums must have no decls", .{});
|
|
}
|
|
|
|
const int_tag_ty = tag_type_val.toType();
|
|
if (int_tag_ty.zigTypeTag(mod) != .Int) {
|
|
return sema.fail(block, src, "Type.Enum.tag_type must be an integer type", .{});
|
|
}
|
|
|
|
// Because these things each reference each other, `undefined`
|
|
// placeholders are used before being set after the enum type gains
|
|
// an InternPool index.
|
|
|
|
const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
|
|
.ty = Type.noreturn,
|
|
.val = Value.@"unreachable",
|
|
}, name_strategy, "enum", inst);
|
|
const new_decl = mod.declPtr(new_decl_index);
|
|
new_decl.owns_tv = true;
|
|
errdefer {
|
|
new_decl.has_tv = false; // namespace and val were destroyed by later errdefers
|
|
mod.abortAnonDecl(new_decl_index);
|
|
}
|
|
|
|
// Define our empty enum decl
|
|
const fields_len: u32 = @intCast(try sema.usizeCast(block, src, fields_val.sliceLen(mod)));
|
|
const incomplete_enum = try ip.getIncompleteEnum(gpa, .{
|
|
.decl = new_decl_index,
|
|
.namespace = .none,
|
|
.fields_len = fields_len,
|
|
.has_values = true,
|
|
.tag_mode = if (!is_exhaustive_val.toBool())
|
|
.nonexhaustive
|
|
else
|
|
.explicit,
|
|
.tag_ty = int_tag_ty.toIntern(),
|
|
.zir_index = .none,
|
|
});
|
|
// TODO: figure out InternPool removals for incremental compilation
|
|
//errdefer ip.remove(incomplete_enum.index);
|
|
|
|
new_decl.ty = Type.type;
|
|
new_decl.val = Value.fromInterned(incomplete_enum.index);
|
|
|
|
for (0..fields_len) |field_i| {
|
|
const elem_val = try fields_val.elemValue(mod, field_i);
|
|
const elem_struct_type = ip.indexToKey(ip.typeOf(elem_val.toIntern())).struct_type;
|
|
const name_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
|
|
ip,
|
|
try ip.getOrPutString(gpa, "name"),
|
|
).?);
|
|
const value_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
|
|
ip,
|
|
try ip.getOrPutString(gpa, "value"),
|
|
).?);
|
|
|
|
const field_name = try name_val.toIpString(Type.slice_const_u8, mod);
|
|
|
|
if (!try sema.intFitsInType(value_val, int_tag_ty, null)) {
|
|
// TODO: better source location
|
|
return sema.fail(block, src, "field '{}' with enumeration value '{}' is too large for backing int type '{}'", .{
|
|
field_name.fmt(ip),
|
|
value_val.fmtValue(Type.comptime_int, mod),
|
|
int_tag_ty.fmt(mod),
|
|
});
|
|
}
|
|
|
|
if (incomplete_enum.addFieldName(ip, field_name)) |other_index| {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "duplicate enum field '{}'", .{
|
|
field_name.fmt(ip),
|
|
});
|
|
errdefer msg.destroy(gpa);
|
|
_ = other_index; // TODO: this note is incorrect
|
|
try sema.errNote(block, src, msg, "other field here", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
|
|
if (incomplete_enum.addFieldValue(ip, (try mod.getCoerced(value_val, int_tag_ty)).toIntern())) |other| {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "enum tag value {} already taken", .{value_val.fmtValue(Type.comptime_int, mod)});
|
|
errdefer msg.destroy(gpa);
|
|
_ = other; // TODO: this note is incorrect
|
|
try sema.errNote(block, src, msg, "other enum tag value here", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
}
|
|
|
|
const decl_val = sema.analyzeDeclVal(block, src, new_decl_index);
|
|
try mod.finalizeAnonDecl(new_decl_index);
|
|
return decl_val;
|
|
},
|
|
.Opaque => {
|
|
const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
|
|
const decls_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
|
|
ip,
|
|
try ip.getOrPutString(gpa, "decls"),
|
|
).?);
|
|
|
|
// Decls
|
|
if (decls_val.sliceLen(mod) > 0) {
|
|
return sema.fail(block, src, "reified opaque must have no decls", .{});
|
|
}
|
|
|
|
// Because these three things each reference each other,
|
|
// `undefined` placeholders are used in two places before being set
|
|
// after the opaque type gains an InternPool index.
|
|
|
|
const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
|
|
.ty = Type.noreturn,
|
|
.val = Value.@"unreachable",
|
|
}, name_strategy, "opaque", inst);
|
|
const new_decl = mod.declPtr(new_decl_index);
|
|
new_decl.owns_tv = true;
|
|
errdefer {
|
|
new_decl.has_tv = false; // namespace and val were destroyed by later errdefers
|
|
mod.abortAnonDecl(new_decl_index);
|
|
}
|
|
|
|
const new_namespace_index = try mod.createNamespace(.{
|
|
.parent = block.namespace.toOptional(),
|
|
.decl_index = new_decl_index,
|
|
.file_scope = block.getFileScope(mod),
|
|
});
|
|
errdefer mod.destroyNamespace(new_namespace_index);
|
|
|
|
const opaque_ty = try mod.intern(.{ .opaque_type = .{
|
|
.decl = new_decl_index,
|
|
.namespace = new_namespace_index,
|
|
.zir_index = .none,
|
|
} });
|
|
// TODO: figure out InternPool removals for incremental compilation
|
|
//errdefer ip.remove(opaque_ty);
|
|
|
|
new_decl.ty = Type.type;
|
|
new_decl.val = Value.fromInterned(opaque_ty);
|
|
|
|
const decl_val = sema.analyzeDeclVal(block, src, new_decl_index);
|
|
try mod.finalizeAnonDecl(new_decl_index);
|
|
return decl_val;
|
|
},
|
|
.Union => {
|
|
const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
|
|
const layout_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
|
|
ip,
|
|
try ip.getOrPutString(gpa, "layout"),
|
|
).?);
|
|
const tag_type_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
|
|
ip,
|
|
try ip.getOrPutString(gpa, "tag_type"),
|
|
).?);
|
|
const fields_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
|
|
ip,
|
|
try ip.getOrPutString(gpa, "fields"),
|
|
).?);
|
|
const decls_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
|
|
ip,
|
|
try ip.getOrPutString(gpa, "decls"),
|
|
).?);
|
|
|
|
// Decls
|
|
if (decls_val.sliceLen(mod) > 0) {
|
|
return sema.fail(block, src, "reified unions must have no decls", .{});
|
|
}
|
|
const layout = mod.toEnum(std.builtin.Type.ContainerLayout, layout_val);
|
|
const fields_len: u32 = @intCast(try sema.usizeCast(block, src, fields_val.sliceLen(mod)));
|
|
|
|
// Tag type
|
|
var explicit_tags_seen: []bool = &.{};
|
|
var enum_field_names: []InternPool.NullTerminatedString = &.{};
|
|
var enum_tag_ty: InternPool.Index = .none;
|
|
if (tag_type_val.optionalValue(mod)) |payload_val| {
|
|
enum_tag_ty = payload_val.toType().toIntern();
|
|
|
|
const enum_type = switch (ip.indexToKey(enum_tag_ty)) {
|
|
.enum_type => |x| x,
|
|
else => return sema.fail(block, src, "Type.Union.tag_type must be an enum type", .{}),
|
|
};
|
|
|
|
explicit_tags_seen = try sema.arena.alloc(bool, enum_type.names.len);
|
|
@memset(explicit_tags_seen, false);
|
|
} else {
|
|
enum_field_names = try sema.arena.alloc(InternPool.NullTerminatedString, fields_len);
|
|
}
|
|
|
|
// Fields
|
|
var any_aligned_fields: bool = false;
|
|
var union_fields: std.MultiArrayList(struct {
|
|
type: InternPool.Index,
|
|
alignment: InternPool.Alignment,
|
|
}) = .{};
|
|
var field_name_table: std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void) = .{};
|
|
try field_name_table.ensureTotalCapacity(sema.arena, fields_len);
|
|
|
|
for (0..fields_len) |i| {
|
|
const elem_val = try fields_val.elemValue(mod, i);
|
|
const elem_struct_type = ip.indexToKey(ip.typeOf(elem_val.toIntern())).struct_type;
|
|
const name_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
|
|
ip,
|
|
try ip.getOrPutString(gpa, "name"),
|
|
).?);
|
|
const type_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
|
|
ip,
|
|
try ip.getOrPutString(gpa, "type"),
|
|
).?);
|
|
const alignment_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
|
|
ip,
|
|
try ip.getOrPutString(gpa, "alignment"),
|
|
).?);
|
|
|
|
const field_name = try name_val.toIpString(Type.slice_const_u8, mod);
|
|
|
|
if (enum_field_names.len != 0) {
|
|
enum_field_names[i] = field_name;
|
|
}
|
|
|
|
if (enum_tag_ty != .none) {
|
|
const tag_info = ip.indexToKey(enum_tag_ty).enum_type;
|
|
const enum_index = tag_info.nameIndex(ip, field_name) orelse {
|
|
return sema.fail(block, src, "no field named '{}' in enum '{}'", .{
|
|
field_name.fmt(ip), Type.fromInterned(enum_tag_ty).fmt(mod),
|
|
});
|
|
};
|
|
assert(explicit_tags_seen.len == tag_info.names.len);
|
|
// No check for duplicate because the check already happened in order
|
|
// to create the enum type in the first place.
|
|
assert(!explicit_tags_seen[enum_index]);
|
|
explicit_tags_seen[enum_index] = true;
|
|
}
|
|
|
|
const gop = field_name_table.getOrPutAssumeCapacity(field_name);
|
|
if (gop.found_existing) {
|
|
// TODO: better source location
|
|
return sema.fail(block, src, "duplicate union field {}", .{field_name.fmt(ip)});
|
|
}
|
|
|
|
const field_ty = type_val.toType();
|
|
const alignment_val_int = (try alignment_val.getUnsignedIntAdvanced(mod, sema)).?;
|
|
if (alignment_val_int > 0 and !math.isPowerOfTwo(alignment_val_int)) {
|
|
// TODO: better source location
|
|
return sema.fail(block, src, "alignment value '{d}' is not a power of two or zero", .{
|
|
alignment_val_int,
|
|
});
|
|
}
|
|
const field_align = Alignment.fromByteUnits(alignment_val_int);
|
|
any_aligned_fields = any_aligned_fields or field_align != .none;
|
|
|
|
try union_fields.append(sema.arena, .{
|
|
.type = field_ty.toIntern(),
|
|
.alignment = field_align,
|
|
});
|
|
|
|
if (field_ty.zigTypeTag(mod) == .Opaque) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "opaque types have unknown size and therefore cannot be directly embedded in unions", .{});
|
|
errdefer msg.destroy(gpa);
|
|
|
|
try sema.addDeclaredHereNote(msg, field_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
if (layout == .Extern and !try sema.validateExternType(field_ty, .union_field)) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "extern unions cannot contain fields of type '{}'", .{field_ty.fmt(mod)});
|
|
errdefer msg.destroy(gpa);
|
|
|
|
const src_decl = mod.declPtr(block.src_decl);
|
|
try sema.explainWhyTypeIsNotExtern(msg, src_decl.toSrcLoc(src, mod), field_ty, .union_field);
|
|
|
|
try sema.addDeclaredHereNote(msg, field_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
} else if (layout == .Packed and !try sema.validatePackedType(field_ty)) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "packed unions cannot contain fields of type '{}'", .{field_ty.fmt(mod)});
|
|
errdefer msg.destroy(gpa);
|
|
|
|
const src_decl = mod.declPtr(block.src_decl);
|
|
try sema.explainWhyTypeIsNotPacked(msg, src_decl.toSrcLoc(src, mod), field_ty);
|
|
|
|
try sema.addDeclaredHereNote(msg, field_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
}
|
|
|
|
if (enum_tag_ty != .none) {
|
|
const tag_info = ip.indexToKey(enum_tag_ty).enum_type;
|
|
if (tag_info.names.len > fields_len) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "enum field(s) missing in union", .{});
|
|
errdefer msg.destroy(gpa);
|
|
|
|
assert(explicit_tags_seen.len == tag_info.names.len);
|
|
for (tag_info.names.get(ip), 0..) |field_name, field_index| {
|
|
if (explicit_tags_seen[field_index]) continue;
|
|
try sema.addFieldErrNote(Type.fromInterned(enum_tag_ty), field_index, msg, "field '{}' missing, declared here", .{
|
|
field_name.fmt(ip),
|
|
});
|
|
}
|
|
try sema.addDeclaredHereNote(msg, Type.fromInterned(enum_tag_ty));
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
} else {
|
|
enum_tag_ty = try sema.generateUnionTagTypeSimple(block, enum_field_names, .none);
|
|
}
|
|
|
|
// Because these three things each reference each other, `undefined`
|
|
// placeholders are used before being set after the union type gains an
|
|
// InternPool index.
|
|
|
|
const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
|
|
.ty = Type.noreturn,
|
|
.val = Value.@"unreachable",
|
|
}, name_strategy, "union", inst);
|
|
const new_decl = mod.declPtr(new_decl_index);
|
|
new_decl.owns_tv = true;
|
|
errdefer {
|
|
new_decl.has_tv = false; // namespace and val were destroyed by later errdefers
|
|
mod.abortAnonDecl(new_decl_index);
|
|
}
|
|
|
|
const new_namespace_index = try mod.createNamespace(.{
|
|
.parent = block.namespace.toOptional(),
|
|
.decl_index = new_decl_index,
|
|
.file_scope = block.getFileScope(mod),
|
|
});
|
|
errdefer mod.destroyNamespace(new_namespace_index);
|
|
|
|
const union_ty = try ip.getUnionType(gpa, .{
|
|
.decl = new_decl_index,
|
|
.namespace = new_namespace_index,
|
|
.enum_tag_ty = enum_tag_ty,
|
|
.fields_len = fields_len,
|
|
.zir_index = .none,
|
|
.flags = .{
|
|
.layout = layout,
|
|
.status = .have_field_types,
|
|
.runtime_tag = if (!tag_type_val.isNull(mod))
|
|
.tagged
|
|
else if (layout != .Auto)
|
|
.none
|
|
else switch (block.wantSafety()) {
|
|
true => .safety,
|
|
false => .none,
|
|
},
|
|
.any_aligned_fields = any_aligned_fields,
|
|
.requires_comptime = .unknown,
|
|
.assumed_runtime_bits = false,
|
|
.assumed_pointer_aligned = false,
|
|
.alignment = .none,
|
|
},
|
|
.field_types = union_fields.items(.type),
|
|
.field_aligns = if (any_aligned_fields) union_fields.items(.alignment) else &.{},
|
|
});
|
|
|
|
new_decl.ty = Type.type;
|
|
new_decl.val = Value.fromInterned(union_ty);
|
|
|
|
const decl_val = sema.analyzeDeclVal(block, src, new_decl_index);
|
|
try mod.finalizeAnonDecl(new_decl_index);
|
|
return decl_val;
|
|
},
|
|
.Fn => {
|
|
const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
|
|
const calling_convention_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
|
|
ip,
|
|
try ip.getOrPutString(gpa, "calling_convention"),
|
|
).?);
|
|
const alignment_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
|
|
ip,
|
|
try ip.getOrPutString(gpa, "alignment"),
|
|
).?);
|
|
const is_generic_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
|
|
ip,
|
|
try ip.getOrPutString(gpa, "is_generic"),
|
|
).?);
|
|
const is_var_args_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
|
|
ip,
|
|
try ip.getOrPutString(gpa, "is_var_args"),
|
|
).?);
|
|
const return_type_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
|
|
ip,
|
|
try ip.getOrPutString(gpa, "return_type"),
|
|
).?);
|
|
const params_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
|
|
ip,
|
|
try ip.getOrPutString(gpa, "params"),
|
|
).?);
|
|
|
|
const is_generic = is_generic_val.toBool();
|
|
if (is_generic) {
|
|
return sema.fail(block, src, "Type.Fn.is_generic must be false for @Type", .{});
|
|
}
|
|
|
|
const is_var_args = is_var_args_val.toBool();
|
|
const cc = mod.toEnum(std.builtin.CallingConvention, calling_convention_val);
|
|
if (is_var_args) {
|
|
try sema.checkCallConvSupportsVarArgs(block, src, cc);
|
|
}
|
|
|
|
const alignment = alignment: {
|
|
const alignment = try sema.validateAlignAllowZero(block, src, try alignment_val.toUnsignedIntAdvanced(sema));
|
|
const default = target_util.defaultFunctionAlignment(target);
|
|
break :alignment if (alignment == default) .none else alignment;
|
|
};
|
|
const return_type = return_type_val.optionalValue(mod) orelse
|
|
return sema.fail(block, src, "Type.Fn.return_type must be non-null for @Type", .{});
|
|
|
|
const args_len = try sema.usizeCast(block, src, params_val.sliceLen(mod));
|
|
const param_types = try sema.arena.alloc(InternPool.Index, args_len);
|
|
|
|
var noalias_bits: u32 = 0;
|
|
for (param_types, 0..) |*param_type, i| {
|
|
const elem_val = try params_val.elemValue(mod, i);
|
|
const elem_struct_type = ip.indexToKey(ip.typeOf(elem_val.toIntern())).struct_type;
|
|
const param_is_generic_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
|
|
ip,
|
|
try ip.getOrPutString(gpa, "is_generic"),
|
|
).?);
|
|
const param_is_noalias_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
|
|
ip,
|
|
try ip.getOrPutString(gpa, "is_noalias"),
|
|
).?);
|
|
const opt_param_type_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
|
|
ip,
|
|
try ip.getOrPutString(gpa, "type"),
|
|
).?);
|
|
|
|
if (param_is_generic_val.toBool()) {
|
|
return sema.fail(block, src, "Type.Fn.Param.is_generic must be false for @Type", .{});
|
|
}
|
|
|
|
const param_type_val = opt_param_type_val.optionalValue(mod) orelse
|
|
return sema.fail(block, src, "Type.Fn.Param.type must be non-null for @Type", .{});
|
|
param_type.* = param_type_val.toIntern();
|
|
|
|
if (param_is_noalias_val.toBool()) {
|
|
if (!Type.fromInterned(param_type.*).isPtrAtRuntime(mod)) {
|
|
return sema.fail(block, src, "non-pointer parameter declared noalias", .{});
|
|
}
|
|
noalias_bits |= @as(u32, 1) << (std.math.cast(u5, i) orelse
|
|
return sema.fail(block, src, "this compiler implementation only supports 'noalias' on the first 32 parameters", .{}));
|
|
}
|
|
}
|
|
|
|
const ty = try mod.funcType(.{
|
|
.param_types = param_types,
|
|
.noalias_bits = noalias_bits,
|
|
.return_type = return_type.toIntern(),
|
|
.alignment = alignment,
|
|
.cc = cc,
|
|
.is_var_args = is_var_args,
|
|
});
|
|
return Air.internedToRef(ty.toIntern());
|
|
},
|
|
.Frame => return sema.failWithUseOfAsync(block, src),
|
|
}
|
|
}
|
|
|
|
fn reifyStruct(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
src: LazySrcLoc,
|
|
layout: std.builtin.Type.ContainerLayout,
|
|
backing_int_val: Value,
|
|
fields_val: Value,
|
|
name_strategy: Zir.Inst.NameStrategy,
|
|
is_tuple: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
const ip = &mod.intern_pool;
|
|
|
|
if (is_tuple) switch (layout) {
|
|
.Extern => return sema.fail(block, src, "extern tuples are not supported", .{}),
|
|
.Packed => return sema.fail(block, src, "packed tuples are not supported", .{}),
|
|
.Auto => {},
|
|
};
|
|
|
|
const fields_len: u32 = @intCast(try sema.usizeCast(block, src, fields_val.sliceLen(mod)));
|
|
|
|
// Because these three things each reference each other, `undefined`
|
|
// placeholders are used before being set after the struct type gains an
|
|
// InternPool index.
|
|
|
|
const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
|
|
.ty = Type.noreturn,
|
|
.val = Value.@"unreachable",
|
|
}, name_strategy, "struct", inst);
|
|
const new_decl = mod.declPtr(new_decl_index);
|
|
new_decl.owns_tv = true;
|
|
errdefer {
|
|
new_decl.has_tv = false; // namespace and val were destroyed by later errdefers
|
|
mod.abortAnonDecl(new_decl_index);
|
|
}
|
|
|
|
const ty = try ip.getStructType(gpa, .{
|
|
.decl = new_decl_index,
|
|
.namespace = .none,
|
|
.zir_index = .none,
|
|
.layout = layout,
|
|
.known_non_opv = false,
|
|
.fields_len = fields_len,
|
|
.requires_comptime = .unknown,
|
|
.is_tuple = is_tuple,
|
|
// So that we don't have to scan ahead, we allocate space in the struct
|
|
// type for alignments, comptime fields, and default inits. This might
|
|
// result in wasted space, however, this is a permitted encoding of
|
|
// struct types.
|
|
.any_comptime_fields = true,
|
|
.any_default_inits = true,
|
|
.inits_resolved = true,
|
|
.any_aligned_fields = true,
|
|
});
|
|
// TODO: figure out InternPool removals for incremental compilation
|
|
//errdefer ip.remove(ty);
|
|
const struct_type = ip.indexToKey(ty).struct_type;
|
|
|
|
new_decl.ty = Type.type;
|
|
new_decl.val = Value.fromInterned(ty);
|
|
|
|
// Fields
|
|
for (0..fields_len) |i| {
|
|
const elem_val = try fields_val.elemValue(mod, i);
|
|
const elem_struct_type = ip.indexToKey(ip.typeOf(elem_val.toIntern())).struct_type;
|
|
const name_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
|
|
ip,
|
|
try ip.getOrPutString(gpa, "name"),
|
|
).?);
|
|
const type_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
|
|
ip,
|
|
try ip.getOrPutString(gpa, "type"),
|
|
).?);
|
|
const default_value_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
|
|
ip,
|
|
try ip.getOrPutString(gpa, "default_value"),
|
|
).?);
|
|
const is_comptime_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
|
|
ip,
|
|
try ip.getOrPutString(gpa, "is_comptime"),
|
|
).?);
|
|
const alignment_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
|
|
ip,
|
|
try ip.getOrPutString(gpa, "alignment"),
|
|
).?);
|
|
|
|
if (!try sema.intFitsInType(alignment_val, Type.u32, null)) {
|
|
return sema.fail(block, src, "alignment must fit in 'u32'", .{});
|
|
}
|
|
const abi_align = (try alignment_val.getUnsignedIntAdvanced(mod, sema)).?;
|
|
|
|
if (layout == .Packed) {
|
|
if (abi_align != 0) return sema.fail(block, src, "alignment in a packed struct field must be set to 0", .{});
|
|
if (is_comptime_val.toBool()) return sema.fail(block, src, "packed struct fields cannot be marked comptime", .{});
|
|
} else {
|
|
if (abi_align > 0 and !math.isPowerOfTwo(abi_align)) return sema.fail(block, src, "alignment value '{d}' is not a power of two or zero", .{abi_align});
|
|
struct_type.field_aligns.get(ip)[i] = Alignment.fromByteUnits(abi_align);
|
|
}
|
|
if (layout == .Extern and is_comptime_val.toBool()) {
|
|
return sema.fail(block, src, "extern struct fields cannot be marked comptime", .{});
|
|
}
|
|
|
|
const field_name = try name_val.toIpString(Type.slice_const_u8, mod);
|
|
|
|
if (is_tuple) {
|
|
const field_index = field_name.toUnsigned(ip) orelse return sema.fail(
|
|
block,
|
|
src,
|
|
"tuple cannot have non-numeric field '{}'",
|
|
.{field_name.fmt(ip)},
|
|
);
|
|
|
|
if (field_index >= fields_len) {
|
|
return sema.fail(
|
|
block,
|
|
src,
|
|
"tuple field {} exceeds tuple field count",
|
|
.{field_index},
|
|
);
|
|
}
|
|
} else if (struct_type.addFieldName(ip, field_name)) |prev_index| {
|
|
_ = prev_index; // TODO: better source location
|
|
return sema.fail(block, src, "duplicate struct field name {}", .{field_name.fmt(ip)});
|
|
}
|
|
|
|
const field_ty = type_val.toType();
|
|
const default_val = if (default_value_val.optionalValue(mod)) |opt_val|
|
|
(try sema.pointerDeref(block, src, opt_val, try mod.singleConstPtrType(field_ty)) orelse
|
|
return sema.failWithNeededComptime(block, src, .{
|
|
.needed_comptime_reason = "struct field default value must be comptime-known",
|
|
})).toIntern()
|
|
else
|
|
.none;
|
|
if (is_comptime_val.toBool() and default_val == .none) {
|
|
return sema.fail(block, src, "comptime field without default initialization value", .{});
|
|
}
|
|
|
|
struct_type.field_types.get(ip)[i] = field_ty.toIntern();
|
|
struct_type.field_inits.get(ip)[i] = default_val;
|
|
if (is_comptime_val.toBool())
|
|
struct_type.setFieldComptime(ip, i);
|
|
|
|
if (field_ty.zigTypeTag(mod) == .Opaque) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{});
|
|
errdefer msg.destroy(gpa);
|
|
|
|
try sema.addDeclaredHereNote(msg, field_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
if (field_ty.zigTypeTag(mod) == .NoReturn) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "struct fields cannot be 'noreturn'", .{});
|
|
errdefer msg.destroy(gpa);
|
|
|
|
try sema.addDeclaredHereNote(msg, field_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
if (layout == .Extern and !try sema.validateExternType(field_ty, .struct_field)) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "extern structs cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)});
|
|
errdefer msg.destroy(gpa);
|
|
|
|
const src_decl = sema.mod.declPtr(block.src_decl);
|
|
try sema.explainWhyTypeIsNotExtern(msg, src_decl.toSrcLoc(src, mod), field_ty, .struct_field);
|
|
|
|
try sema.addDeclaredHereNote(msg, field_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
} else if (layout == .Packed and !try sema.validatePackedType(field_ty)) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "packed structs cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)});
|
|
errdefer msg.destroy(gpa);
|
|
|
|
const src_decl = sema.mod.declPtr(block.src_decl);
|
|
try sema.explainWhyTypeIsNotPacked(msg, src_decl.toSrcLoc(src, mod), field_ty);
|
|
|
|
try sema.addDeclaredHereNote(msg, field_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
}
|
|
|
|
if (layout == .Packed) {
|
|
for (0..struct_type.field_types.len) |index| {
|
|
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[index]);
|
|
sema.resolveTypeLayout(field_ty) catch |err| switch (err) {
|
|
error.AnalysisFail => {
|
|
const msg = sema.err orelse return err;
|
|
try sema.addFieldErrNote(Type.fromInterned(ty), index, msg, "while checking this field", .{});
|
|
return err;
|
|
},
|
|
else => return err,
|
|
};
|
|
}
|
|
|
|
var fields_bit_sum: u64 = 0;
|
|
for (0..struct_type.field_types.len) |i| {
|
|
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
|
|
fields_bit_sum += field_ty.bitSize(mod);
|
|
}
|
|
|
|
if (backing_int_val.optionalValue(mod)) |backing_int_ty_val| {
|
|
const backing_int_ty = backing_int_ty_val.toType();
|
|
try sema.checkBackingIntType(block, src, backing_int_ty, fields_bit_sum);
|
|
struct_type.backingIntType(ip).* = backing_int_ty.toIntern();
|
|
} else {
|
|
const backing_int_ty = try mod.intType(.unsigned, @intCast(fields_bit_sum));
|
|
struct_type.backingIntType(ip).* = backing_int_ty.toIntern();
|
|
}
|
|
}
|
|
|
|
const decl_val = sema.analyzeDeclVal(block, src, new_decl_index);
|
|
try mod.finalizeAnonDecl(new_decl_index);
|
|
return decl_val;
|
|
}
|
|
|
|
fn resolveVaListRef(sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref) CompileError!Air.Inst.Ref {
|
|
const va_list_ty = try sema.getBuiltinType("VaList");
|
|
const va_list_ptr = try sema.mod.singleMutPtrType(va_list_ty);
|
|
|
|
const inst = try sema.resolveInst(zir_ref);
|
|
return sema.coerce(block, va_list_ptr, inst, src);
|
|
}
|
|
|
|
fn zirCVaArg(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data;
|
|
const src = LazySrcLoc.nodeOffset(extra.node);
|
|
const va_list_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
|
|
const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = extra.node };
|
|
|
|
const va_list_ref = try sema.resolveVaListRef(block, va_list_src, extra.lhs);
|
|
const arg_ty = try sema.resolveType(block, ty_src, extra.rhs);
|
|
|
|
if (!try sema.validateExternType(arg_ty, .param_ty)) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, ty_src, "cannot get '{}' from variadic argument", .{arg_ty.fmt(sema.mod)});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
const src_decl = sema.mod.declPtr(block.src_decl);
|
|
try sema.explainWhyTypeIsNotExtern(msg, src_decl.toSrcLoc(ty_src, mod), arg_ty, .param_ty);
|
|
|
|
try sema.addDeclaredHereNote(msg, arg_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
return block.addTyOp(.c_va_arg, arg_ty, va_list_ref);
|
|
}
|
|
|
|
fn zirCVaCopy(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
|
|
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
|
|
const src = LazySrcLoc.nodeOffset(extra.node);
|
|
const va_list_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
|
|
|
|
const va_list_ref = try sema.resolveVaListRef(block, va_list_src, extra.operand);
|
|
const va_list_ty = try sema.getBuiltinType("VaList");
|
|
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
return block.addTyOp(.c_va_copy, va_list_ty, va_list_ref);
|
|
}
|
|
|
|
fn zirCVaEnd(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
|
|
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
|
|
const src = LazySrcLoc.nodeOffset(extra.node);
|
|
const va_list_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
|
|
|
|
const va_list_ref = try sema.resolveVaListRef(block, va_list_src, extra.operand);
|
|
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
return block.addUnOp(.c_va_end, va_list_ref);
|
|
}
|
|
|
|
fn zirCVaStart(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
|
|
const src = LazySrcLoc.nodeOffset(@bitCast(extended.operand));
|
|
|
|
const va_list_ty = try sema.getBuiltinType("VaList");
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
return block.addInst(.{
|
|
.tag = .c_va_start,
|
|
.data = .{ .ty = va_list_ty },
|
|
});
|
|
}
|
|
|
|
fn zirTypeName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const ty = try sema.resolveType(block, ty_src, inst_data.operand);
|
|
|
|
var bytes = std.ArrayList(u8).init(sema.arena);
|
|
try ty.print(bytes.writer(), mod);
|
|
return addStrLitNoAlias(sema, bytes.items);
|
|
}
|
|
|
|
fn zirFrameType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const src = inst_data.src();
|
|
return sema.failWithUseOfAsync(block, src);
|
|
}
|
|
|
|
fn zirFrameSize(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const src = inst_data.src();
|
|
return sema.failWithUseOfAsync(block, src);
|
|
}
|
|
|
|
fn zirIntFromFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const src = inst_data.src();
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const dest_ty = try sema.resolveDestType(block, src, extra.lhs, .remove_eu_opt, "@intFromFloat");
|
|
const operand = try sema.resolveInst(extra.rhs);
|
|
const operand_ty = sema.typeOf(operand);
|
|
|
|
try sema.checkVectorizableBinaryOperands(block, operand_src, dest_ty, operand_ty, src, operand_src);
|
|
const is_vector = dest_ty.zigTypeTag(mod) == .Vector;
|
|
|
|
const dest_scalar_ty = dest_ty.scalarType(mod);
|
|
const operand_scalar_ty = operand_ty.scalarType(mod);
|
|
|
|
_ = try sema.checkIntType(block, src, dest_scalar_ty);
|
|
try sema.checkFloatType(block, operand_src, operand_scalar_ty);
|
|
|
|
if (try sema.resolveValue(operand)) |operand_val| {
|
|
const result_val = try sema.intFromFloat(block, operand_src, operand_val, operand_ty, dest_ty, .truncate);
|
|
return Air.internedToRef(result_val.toIntern());
|
|
} else if (dest_scalar_ty.zigTypeTag(mod) == .ComptimeInt) {
|
|
return sema.failWithNeededComptime(block, operand_src, .{
|
|
.needed_comptime_reason = "value being casted to 'comptime_int' must be comptime-known",
|
|
});
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, inst_data.src(), operand_src);
|
|
if (dest_scalar_ty.intInfo(mod).bits == 0) {
|
|
if (!is_vector) {
|
|
if (block.wantSafety()) {
|
|
const ok = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_eq_optimized else .cmp_eq, operand, Air.internedToRef((try mod.floatValue(operand_ty, 0.0)).toIntern()));
|
|
try sema.addSafetyCheck(block, src, ok, .integer_part_out_of_bounds);
|
|
}
|
|
return Air.internedToRef((try mod.intValue(dest_ty, 0)).toIntern());
|
|
}
|
|
if (block.wantSafety()) {
|
|
const len = dest_ty.vectorLen(mod);
|
|
for (0..len) |i| {
|
|
const idx_ref = try mod.intRef(Type.usize, i);
|
|
const elem_ref = try block.addBinOp(.array_elem_val, operand, idx_ref);
|
|
const ok = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_eq_optimized else .cmp_eq, elem_ref, Air.internedToRef((try mod.floatValue(operand_scalar_ty, 0.0)).toIntern()));
|
|
try sema.addSafetyCheck(block, src, ok, .integer_part_out_of_bounds);
|
|
}
|
|
}
|
|
return Air.internedToRef(try mod.intern(.{ .aggregate = .{
|
|
.ty = dest_ty.toIntern(),
|
|
.storage = .{ .repeated_elem = (try mod.intValue(dest_scalar_ty, 0)).toIntern() },
|
|
} }));
|
|
}
|
|
if (!is_vector) {
|
|
const result = try block.addTyOp(if (block.float_mode == .Optimized) .int_from_float_optimized else .int_from_float, dest_ty, operand);
|
|
if (block.wantSafety()) {
|
|
const back = try block.addTyOp(.float_from_int, operand_ty, result);
|
|
const diff = try block.addBinOp(.sub, operand, back);
|
|
const ok_pos = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_lt_optimized else .cmp_lt, diff, Air.internedToRef((try mod.floatValue(operand_ty, 1.0)).toIntern()));
|
|
const ok_neg = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_gt_optimized else .cmp_gt, diff, Air.internedToRef((try mod.floatValue(operand_ty, -1.0)).toIntern()));
|
|
const ok = try block.addBinOp(.bool_and, ok_pos, ok_neg);
|
|
try sema.addSafetyCheck(block, src, ok, .integer_part_out_of_bounds);
|
|
}
|
|
return result;
|
|
}
|
|
const len = dest_ty.vectorLen(mod);
|
|
const new_elems = try sema.arena.alloc(Air.Inst.Ref, len);
|
|
for (new_elems, 0..) |*new_elem, i| {
|
|
const idx_ref = try mod.intRef(Type.usize, i);
|
|
const old_elem = try block.addBinOp(.array_elem_val, operand, idx_ref);
|
|
const result = try block.addTyOp(if (block.float_mode == .Optimized) .int_from_float_optimized else .int_from_float, dest_scalar_ty, old_elem);
|
|
if (block.wantSafety()) {
|
|
const back = try block.addTyOp(.float_from_int, operand_scalar_ty, result);
|
|
const diff = try block.addBinOp(.sub, old_elem, back);
|
|
const ok_pos = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_lt_optimized else .cmp_lt, diff, Air.internedToRef((try mod.floatValue(operand_scalar_ty, 1.0)).toIntern()));
|
|
const ok_neg = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_gt_optimized else .cmp_gt, diff, Air.internedToRef((try mod.floatValue(operand_scalar_ty, -1.0)).toIntern()));
|
|
const ok = try block.addBinOp(.bool_and, ok_pos, ok_neg);
|
|
try sema.addSafetyCheck(block, src, ok, .integer_part_out_of_bounds);
|
|
}
|
|
new_elem.* = result;
|
|
}
|
|
return block.addAggregateInit(dest_ty, new_elems);
|
|
}
|
|
|
|
fn zirFloatFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const src = inst_data.src();
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const dest_ty = try sema.resolveDestType(block, src, extra.lhs, .remove_eu_opt, "@floatFromInt");
|
|
const operand = try sema.resolveInst(extra.rhs);
|
|
const operand_ty = sema.typeOf(operand);
|
|
|
|
try sema.checkVectorizableBinaryOperands(block, operand_src, dest_ty, operand_ty, src, operand_src);
|
|
const is_vector = dest_ty.zigTypeTag(mod) == .Vector;
|
|
|
|
const dest_scalar_ty = dest_ty.scalarType(mod);
|
|
const operand_scalar_ty = operand_ty.scalarType(mod);
|
|
|
|
try sema.checkFloatType(block, src, dest_scalar_ty);
|
|
_ = try sema.checkIntType(block, operand_src, operand_scalar_ty);
|
|
|
|
if (try sema.resolveValue(operand)) |operand_val| {
|
|
const result_val = try operand_val.floatFromIntAdvanced(sema.arena, operand_ty, dest_ty, mod, sema);
|
|
return Air.internedToRef(result_val.toIntern());
|
|
} else if (dest_scalar_ty.zigTypeTag(mod) == .ComptimeFloat) {
|
|
return sema.failWithNeededComptime(block, operand_src, .{
|
|
.needed_comptime_reason = "value being casted to 'comptime_float' must be comptime-known",
|
|
});
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, src, operand_src);
|
|
if (!is_vector) {
|
|
return block.addTyOp(.float_from_int, dest_ty, operand);
|
|
}
|
|
const len = operand_ty.vectorLen(mod);
|
|
const new_elems = try sema.arena.alloc(Air.Inst.Ref, len);
|
|
for (new_elems, 0..) |*new_elem, i| {
|
|
const idx_ref = try mod.intRef(Type.usize, i);
|
|
const old_elem = try block.addBinOp(.array_elem_val, operand, idx_ref);
|
|
new_elem.* = try block.addTyOp(.float_from_int, dest_scalar_ty, old_elem);
|
|
}
|
|
return block.addAggregateInit(dest_ty, new_elems);
|
|
}
|
|
|
|
fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const src = inst_data.src();
|
|
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
|
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const operand_res = try sema.resolveInst(extra.rhs);
|
|
|
|
const uncoerced_operand_ty = sema.typeOf(operand_res);
|
|
const dest_ty = try sema.resolveDestType(block, src, extra.lhs, .remove_eu, "@ptrFromInt");
|
|
try sema.checkVectorizableBinaryOperands(block, operand_src, dest_ty, uncoerced_operand_ty, src, operand_src);
|
|
|
|
const is_vector = dest_ty.zigTypeTag(mod) == .Vector;
|
|
const operand_ty = if (is_vector) operand_ty: {
|
|
const len = dest_ty.vectorLen(mod);
|
|
break :operand_ty try mod.vectorType(.{ .child = .usize_type, .len = len });
|
|
} else Type.usize;
|
|
|
|
const operand_coerced = try sema.coerce(block, operand_ty, operand_res, operand_src);
|
|
|
|
const ptr_ty = dest_ty.scalarType(mod);
|
|
try sema.checkPtrType(block, src, ptr_ty, true);
|
|
|
|
const elem_ty = ptr_ty.elemType2(mod);
|
|
const ptr_align = try ptr_ty.ptrAlignmentAdvanced(mod, sema);
|
|
|
|
if (ptr_ty.isSlice(mod)) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "integer cannot be converted to slice type '{}'", .{ptr_ty.fmt(sema.mod)});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, src, msg, "slice length cannot be inferred from address", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
|
|
if (try sema.resolveDefinedValue(block, operand_src, operand_coerced)) |val| {
|
|
if (!is_vector) {
|
|
const ptr_val = try sema.ptrFromIntVal(block, operand_src, val, ptr_ty, ptr_align);
|
|
return Air.internedToRef(ptr_val.toIntern());
|
|
}
|
|
const len = dest_ty.vectorLen(mod);
|
|
const new_elems = try sema.arena.alloc(InternPool.Index, len);
|
|
for (new_elems, 0..) |*new_elem, i| {
|
|
const elem = try val.elemValue(mod, i);
|
|
const ptr_val = try sema.ptrFromIntVal(block, operand_src, elem, ptr_ty, ptr_align);
|
|
new_elem.* = ptr_val.toIntern();
|
|
}
|
|
return Air.internedToRef(try mod.intern(.{ .aggregate = .{
|
|
.ty = dest_ty.toIntern(),
|
|
.storage = .{ .elems = new_elems },
|
|
} }));
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, src, operand_src);
|
|
if (!is_vector) {
|
|
if (block.wantSafety() and (try sema.typeHasRuntimeBits(elem_ty) or elem_ty.zigTypeTag(mod) == .Fn)) {
|
|
if (!ptr_ty.isAllowzeroPtr(mod)) {
|
|
const is_non_zero = try block.addBinOp(.cmp_neq, operand_coerced, .zero_usize);
|
|
try sema.addSafetyCheck(block, src, is_non_zero, .cast_to_null);
|
|
}
|
|
if (ptr_align.compare(.gt, .@"1")) {
|
|
const align_bytes_minus_1 = ptr_align.toByteUnitsOptional().? - 1;
|
|
const align_minus_1 = Air.internedToRef((try mod.intValue(Type.usize, align_bytes_minus_1)).toIntern());
|
|
const remainder = try block.addBinOp(.bit_and, operand_coerced, align_minus_1);
|
|
const is_aligned = try block.addBinOp(.cmp_eq, remainder, .zero_usize);
|
|
try sema.addSafetyCheck(block, src, is_aligned, .incorrect_alignment);
|
|
}
|
|
}
|
|
return block.addBitCast(dest_ty, operand_coerced);
|
|
}
|
|
|
|
const len = dest_ty.vectorLen(mod);
|
|
if (block.wantSafety() and (try sema.typeHasRuntimeBits(elem_ty) or elem_ty.zigTypeTag(mod) == .Fn)) {
|
|
for (0..len) |i| {
|
|
const idx_ref = try mod.intRef(Type.usize, i);
|
|
const elem_coerced = try block.addBinOp(.array_elem_val, operand_coerced, idx_ref);
|
|
if (!ptr_ty.isAllowzeroPtr(mod)) {
|
|
const is_non_zero = try block.addBinOp(.cmp_neq, elem_coerced, .zero_usize);
|
|
try sema.addSafetyCheck(block, src, is_non_zero, .cast_to_null);
|
|
}
|
|
if (ptr_align.compare(.gt, .@"1")) {
|
|
const align_bytes_minus_1 = ptr_align.toByteUnitsOptional().? - 1;
|
|
const align_minus_1 = Air.internedToRef((try mod.intValue(Type.usize, align_bytes_minus_1)).toIntern());
|
|
const remainder = try block.addBinOp(.bit_and, elem_coerced, align_minus_1);
|
|
const is_aligned = try block.addBinOp(.cmp_eq, remainder, .zero_usize);
|
|
try sema.addSafetyCheck(block, src, is_aligned, .incorrect_alignment);
|
|
}
|
|
}
|
|
}
|
|
|
|
const new_elems = try sema.arena.alloc(Air.Inst.Ref, len);
|
|
for (new_elems, 0..) |*new_elem, i| {
|
|
const idx_ref = try mod.intRef(Type.usize, i);
|
|
const old_elem = try block.addBinOp(.array_elem_val, operand_coerced, idx_ref);
|
|
new_elem.* = try block.addBitCast(ptr_ty, old_elem);
|
|
}
|
|
return block.addAggregateInit(dest_ty, new_elems);
|
|
}
|
|
|
|
fn ptrFromIntVal(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
operand_src: LazySrcLoc,
|
|
operand_val: Value,
|
|
ptr_ty: Type,
|
|
ptr_align: Alignment,
|
|
) !Value {
|
|
const mod = sema.mod;
|
|
const addr = try operand_val.toUnsignedIntAdvanced(sema);
|
|
if (!ptr_ty.isAllowzeroPtr(mod) and addr == 0)
|
|
return sema.fail(block, operand_src, "pointer type '{}' does not allow address zero", .{ptr_ty.fmt(sema.mod)});
|
|
if (addr != 0 and ptr_align != .none and !ptr_align.check(addr))
|
|
return sema.fail(block, operand_src, "pointer type '{}' requires aligned address", .{ptr_ty.fmt(sema.mod)});
|
|
|
|
return switch (ptr_ty.zigTypeTag(mod)) {
|
|
.Optional => Value.fromInterned((try mod.intern(.{ .opt = .{
|
|
.ty = ptr_ty.toIntern(),
|
|
.val = if (addr == 0) .none else (try mod.ptrIntValue(ptr_ty.childType(mod), addr)).toIntern(),
|
|
} }))),
|
|
.Pointer => try mod.ptrIntValue(ptr_ty, addr),
|
|
else => unreachable,
|
|
};
|
|
}
|
|
|
|
fn zirErrorCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data;
|
|
const src = LazySrcLoc.nodeOffset(extra.node);
|
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
|
|
const base_dest_ty = try sema.resolveDestType(block, src, extra.lhs, .remove_opt, "@errorCast");
|
|
const operand = try sema.resolveInst(extra.rhs);
|
|
const base_operand_ty = sema.typeOf(operand);
|
|
const dest_tag = base_dest_ty.zigTypeTag(mod);
|
|
const operand_tag = base_operand_ty.zigTypeTag(mod);
|
|
if (dest_tag != operand_tag) {
|
|
return sema.fail(block, src, "expected source and destination types to match, found '{s}' and '{s}'", .{
|
|
@tagName(operand_tag), @tagName(dest_tag),
|
|
});
|
|
} else if (dest_tag != .ErrorSet and dest_tag != .ErrorUnion) {
|
|
return sema.fail(block, src, "expected error set or error union type, found '{s}'", .{@tagName(dest_tag)});
|
|
}
|
|
const dest_ty, const operand_ty = if (dest_tag == .ErrorUnion) .{
|
|
base_dest_ty.errorUnionSet(mod),
|
|
base_operand_ty.errorUnionSet(mod),
|
|
} else .{
|
|
base_dest_ty,
|
|
base_operand_ty,
|
|
};
|
|
|
|
// operand must be defined since it can be an invalid error value
|
|
const maybe_operand_val = try sema.resolveDefinedValue(block, operand_src, operand);
|
|
|
|
const disjoint = disjoint: {
|
|
// Try avoiding resolving inferred error sets if we can
|
|
if (!dest_ty.isAnyError(mod) and dest_ty.errorSetIsEmpty(mod)) break :disjoint true;
|
|
if (!operand_ty.isAnyError(mod) and operand_ty.errorSetIsEmpty(mod)) break :disjoint true;
|
|
if (dest_ty.isAnyError(mod)) break :disjoint false;
|
|
if (operand_ty.isAnyError(mod)) break :disjoint false;
|
|
const dest_err_names = dest_ty.errorSetNames(mod);
|
|
for (0..dest_err_names.len) |dest_err_index| {
|
|
if (Type.errorSetHasFieldIp(ip, operand_ty.toIntern(), dest_err_names.get(ip)[dest_err_index]))
|
|
break :disjoint false;
|
|
}
|
|
|
|
if (!ip.isInferredErrorSetType(dest_ty.toIntern()) and
|
|
!ip.isInferredErrorSetType(operand_ty.toIntern()))
|
|
{
|
|
break :disjoint true;
|
|
}
|
|
|
|
_ = try sema.resolveInferredErrorSetTy(block, src, dest_ty.toIntern());
|
|
_ = try sema.resolveInferredErrorSetTy(block, operand_src, operand_ty.toIntern());
|
|
for (0..dest_err_names.len) |dest_err_index| {
|
|
if (Type.errorSetHasFieldIp(ip, operand_ty.toIntern(), dest_err_names.get(ip)[dest_err_index]))
|
|
break :disjoint false;
|
|
}
|
|
|
|
break :disjoint true;
|
|
};
|
|
if (disjoint and dest_tag != .ErrorUnion) {
|
|
return sema.fail(block, src, "error sets '{}' and '{}' have no common errors", .{
|
|
operand_ty.fmt(sema.mod), dest_ty.fmt(sema.mod),
|
|
});
|
|
}
|
|
|
|
if (maybe_operand_val) |val| {
|
|
if (!dest_ty.isAnyError(mod)) check: {
|
|
const operand_val = mod.intern_pool.indexToKey(val.toIntern());
|
|
var error_name: InternPool.NullTerminatedString = undefined;
|
|
if (dest_tag == .ErrorUnion) {
|
|
if (operand_val.error_union.val != .err_name) break :check;
|
|
error_name = operand_val.error_union.val.err_name;
|
|
} else {
|
|
error_name = operand_val.err.name;
|
|
}
|
|
if (!Type.errorSetHasFieldIp(ip, dest_ty.toIntern(), error_name)) {
|
|
return sema.fail(block, src, "'error.{}' not a member of error set '{}'", .{
|
|
error_name.fmt(ip), dest_ty.fmt(sema.mod),
|
|
});
|
|
}
|
|
}
|
|
|
|
return Air.internedToRef((try mod.getCoerced(val, base_dest_ty)).toIntern());
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, src, operand_src);
|
|
const err_int_ty = try mod.errorIntType();
|
|
if (block.wantSafety() and !dest_ty.isAnyError(mod) and
|
|
dest_ty.toIntern() != .adhoc_inferred_error_set_type and
|
|
sema.mod.backendSupportsFeature(.error_set_has_value))
|
|
{
|
|
if (dest_tag == .ErrorUnion) {
|
|
const err_code = try sema.analyzeErrUnionCode(block, operand_src, operand);
|
|
const err_int = try block.addBitCast(err_int_ty, err_code);
|
|
const zero_err = try mod.intRef(try mod.errorIntType(), 0);
|
|
|
|
const is_zero = try block.addBinOp(.cmp_eq, err_int, zero_err);
|
|
if (disjoint) {
|
|
// Error must be zero.
|
|
try sema.addSafetyCheck(block, src, is_zero, .invalid_error_code);
|
|
} else {
|
|
// Error must be in destination set or zero.
|
|
const has_value = try block.addTyOp(.error_set_has_value, dest_ty, err_code);
|
|
const ok = try block.addBinOp(.bool_or, has_value, is_zero);
|
|
try sema.addSafetyCheck(block, src, ok, .invalid_error_code);
|
|
}
|
|
} else {
|
|
const err_int_inst = try block.addBitCast(err_int_ty, operand);
|
|
const ok = try block.addTyOp(.error_set_has_value, dest_ty, err_int_inst);
|
|
try sema.addSafetyCheck(block, src, ok, .invalid_error_code);
|
|
}
|
|
}
|
|
return block.addBitCast(base_dest_ty, operand);
|
|
}
|
|
|
|
fn zirPtrCastFull(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
|
|
const flags: Zir.Inst.FullPtrCastFlags = @bitCast(@as(
|
|
@typeInfo(Zir.Inst.FullPtrCastFlags).Struct.backing_integer.?,
|
|
@truncate(extended.small),
|
|
));
|
|
const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data;
|
|
const src = LazySrcLoc.nodeOffset(extra.node);
|
|
const operand_src: LazySrcLoc = .{ .node_offset_ptrcast_operand = extra.node };
|
|
const operand = try sema.resolveInst(extra.rhs);
|
|
const dest_ty = try sema.resolveDestType(block, src, extra.lhs, .remove_eu, flags.needResultTypeBuiltinName());
|
|
return sema.ptrCastFull(
|
|
block,
|
|
flags,
|
|
src,
|
|
operand,
|
|
operand_src,
|
|
dest_ty,
|
|
);
|
|
}
|
|
|
|
fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const src = inst_data.src();
|
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const dest_ty = try sema.resolveDestType(block, src, extra.lhs, .remove_eu, "@ptrCast");
|
|
const operand = try sema.resolveInst(extra.rhs);
|
|
|
|
return sema.ptrCastFull(
|
|
block,
|
|
.{ .ptr_cast = true },
|
|
src,
|
|
operand,
|
|
operand_src,
|
|
dest_ty,
|
|
);
|
|
}
|
|
|
|
fn ptrCastFull(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
flags: Zir.Inst.FullPtrCastFlags,
|
|
src: LazySrcLoc,
|
|
operand: Air.Inst.Ref,
|
|
operand_src: LazySrcLoc,
|
|
dest_ty: Type,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const operand_ty = sema.typeOf(operand);
|
|
|
|
try sema.checkPtrType(block, src, dest_ty, true);
|
|
try sema.checkPtrOperand(block, operand_src, operand_ty);
|
|
|
|
const src_info = operand_ty.ptrInfo(mod);
|
|
const dest_info = dest_ty.ptrInfo(mod);
|
|
|
|
try sema.resolveTypeLayout(Type.fromInterned(src_info.child));
|
|
try sema.resolveTypeLayout(Type.fromInterned(dest_info.child));
|
|
|
|
const src_slice_like = src_info.flags.size == .Slice or
|
|
(src_info.flags.size == .One and Type.fromInterned(src_info.child).zigTypeTag(mod) == .Array);
|
|
|
|
const dest_slice_like = dest_info.flags.size == .Slice or
|
|
(dest_info.flags.size == .One and Type.fromInterned(dest_info.child).zigTypeTag(mod) == .Array);
|
|
|
|
if (dest_info.flags.size == .Slice and !src_slice_like) {
|
|
return sema.fail(block, src, "illegal pointer cast to slice", .{});
|
|
}
|
|
|
|
if (dest_info.flags.size == .Slice) {
|
|
const src_elem_size = switch (src_info.flags.size) {
|
|
.Slice => Type.fromInterned(src_info.child).abiSize(mod),
|
|
// pointer to array
|
|
.One => Type.fromInterned(src_info.child).childType(mod).abiSize(mod),
|
|
else => unreachable,
|
|
};
|
|
const dest_elem_size = Type.fromInterned(dest_info.child).abiSize(mod);
|
|
if (src_elem_size != dest_elem_size) {
|
|
return sema.fail(block, src, "TODO: implement @ptrCast between slices changing the length", .{});
|
|
}
|
|
}
|
|
|
|
// The checking logic in this function must stay in sync with Sema.coerceInMemoryAllowedPtrs
|
|
|
|
if (!flags.ptr_cast) {
|
|
check_size: {
|
|
if (src_info.flags.size == dest_info.flags.size) break :check_size;
|
|
if (src_slice_like and dest_slice_like) break :check_size;
|
|
if (src_info.flags.size == .C) break :check_size;
|
|
if (dest_info.flags.size == .C) break :check_size;
|
|
return sema.failWithOwnedErrorMsg(block, msg: {
|
|
const msg = try sema.errMsg(block, src, "cannot implicitly convert {s} pointer to {s} pointer", .{
|
|
pointerSizeString(src_info.flags.size),
|
|
pointerSizeString(dest_info.flags.size),
|
|
});
|
|
errdefer msg.destroy(sema.gpa);
|
|
if (dest_info.flags.size == .Many and
|
|
(src_info.flags.size == .Slice or
|
|
(src_info.flags.size == .One and Type.fromInterned(src_info.child).zigTypeTag(mod) == .Array)))
|
|
{
|
|
try sema.errNote(block, src, msg, "use 'ptr' field to convert slice to many pointer", .{});
|
|
} else {
|
|
try sema.errNote(block, src, msg, "use @ptrCast to change pointer size", .{});
|
|
}
|
|
break :msg msg;
|
|
});
|
|
}
|
|
|
|
check_child: {
|
|
const src_child = if (dest_info.flags.size == .Slice and src_info.flags.size == .One) blk: {
|
|
// *[n]T -> []T
|
|
break :blk Type.fromInterned(src_info.child).childType(mod);
|
|
} else Type.fromInterned(src_info.child);
|
|
|
|
const dest_child = Type.fromInterned(dest_info.child);
|
|
|
|
const imc_res = try sema.coerceInMemoryAllowed(
|
|
block,
|
|
dest_child,
|
|
src_child,
|
|
!dest_info.flags.is_const,
|
|
mod.getTarget(),
|
|
src,
|
|
operand_src,
|
|
);
|
|
if (imc_res == .ok) break :check_child;
|
|
return sema.failWithOwnedErrorMsg(block, msg: {
|
|
const msg = try sema.errMsg(block, src, "pointer element type '{}' cannot coerce into element type '{}'", .{
|
|
src_child.fmt(mod),
|
|
dest_child.fmt(mod),
|
|
});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try imc_res.report(sema, block, src, msg);
|
|
try sema.errNote(block, src, msg, "use @ptrCast to cast pointer element type", .{});
|
|
break :msg msg;
|
|
});
|
|
}
|
|
|
|
check_sent: {
|
|
if (dest_info.sentinel == .none) break :check_sent;
|
|
if (src_info.flags.size == .C) break :check_sent;
|
|
if (src_info.sentinel != .none) {
|
|
const coerced_sent = try mod.intern_pool.getCoerced(sema.gpa, src_info.sentinel, dest_info.child);
|
|
if (dest_info.sentinel == coerced_sent) break :check_sent;
|
|
}
|
|
if (src_slice_like and src_info.flags.size == .One and dest_info.flags.size == .Slice) {
|
|
// [*]nT -> []T
|
|
const arr_ty = Type.fromInterned(src_info.child);
|
|
if (arr_ty.sentinel(mod)) |src_sentinel| {
|
|
const coerced_sent = try mod.intern_pool.getCoerced(sema.gpa, src_sentinel.toIntern(), dest_info.child);
|
|
if (dest_info.sentinel == coerced_sent) break :check_sent;
|
|
}
|
|
}
|
|
return sema.failWithOwnedErrorMsg(block, msg: {
|
|
const msg = if (src_info.sentinel == .none) blk: {
|
|
break :blk try sema.errMsg(block, src, "destination pointer requires '{}' sentinel", .{
|
|
Value.fromInterned(dest_info.sentinel).fmtValue(Type.fromInterned(dest_info.child), mod),
|
|
});
|
|
} else blk: {
|
|
break :blk try sema.errMsg(block, src, "pointer sentinel '{}' cannot coerce into pointer sentinel '{}'", .{
|
|
Value.fromInterned(src_info.sentinel).fmtValue(Type.fromInterned(src_info.child), mod),
|
|
Value.fromInterned(dest_info.sentinel).fmtValue(Type.fromInterned(dest_info.child), mod),
|
|
});
|
|
};
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, src, msg, "use @ptrCast to cast pointer sentinel", .{});
|
|
break :msg msg;
|
|
});
|
|
}
|
|
|
|
if (src_info.packed_offset.host_size != dest_info.packed_offset.host_size) {
|
|
return sema.failWithOwnedErrorMsg(block, msg: {
|
|
const msg = try sema.errMsg(block, src, "pointer host size '{}' cannot coerce into pointer host size '{}'", .{
|
|
src_info.packed_offset.host_size,
|
|
dest_info.packed_offset.host_size,
|
|
});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, src, msg, "use @ptrCast to cast pointer host size", .{});
|
|
break :msg msg;
|
|
});
|
|
}
|
|
|
|
if (src_info.packed_offset.bit_offset != dest_info.packed_offset.bit_offset) {
|
|
return sema.failWithOwnedErrorMsg(block, msg: {
|
|
const msg = try sema.errMsg(block, src, "pointer bit offset '{}' cannot coerce into pointer bit offset '{}'", .{
|
|
src_info.packed_offset.bit_offset,
|
|
dest_info.packed_offset.bit_offset,
|
|
});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, src, msg, "use @ptrCast to cast pointer bit offset", .{});
|
|
break :msg msg;
|
|
});
|
|
}
|
|
|
|
check_allowzero: {
|
|
const src_allows_zero = operand_ty.ptrAllowsZero(mod);
|
|
const dest_allows_zero = dest_ty.ptrAllowsZero(mod);
|
|
if (!src_allows_zero) break :check_allowzero;
|
|
if (dest_allows_zero) break :check_allowzero;
|
|
|
|
return sema.failWithOwnedErrorMsg(block, msg: {
|
|
const msg = try sema.errMsg(block, src, "'{}' could have null values which are illegal in type '{}'", .{
|
|
operand_ty.fmt(mod),
|
|
dest_ty.fmt(mod),
|
|
});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, src, msg, "use @ptrCast to assert the pointer is not null", .{});
|
|
break :msg msg;
|
|
});
|
|
}
|
|
|
|
// TODO: vector index?
|
|
}
|
|
|
|
const src_align = if (src_info.flags.alignment != .none)
|
|
src_info.flags.alignment
|
|
else
|
|
Type.fromInterned(src_info.child).abiAlignment(mod);
|
|
|
|
const dest_align = if (dest_info.flags.alignment != .none)
|
|
dest_info.flags.alignment
|
|
else
|
|
Type.fromInterned(dest_info.child).abiAlignment(mod);
|
|
|
|
if (!flags.align_cast) {
|
|
if (dest_align.compare(.gt, src_align)) {
|
|
return sema.failWithOwnedErrorMsg(block, msg: {
|
|
const msg = try sema.errMsg(block, src, "cast increases pointer alignment", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, operand_src, msg, "'{}' has alignment '{d}'", .{
|
|
operand_ty.fmt(mod), src_align.toByteUnits(0),
|
|
});
|
|
try sema.errNote(block, src, msg, "'{}' has alignment '{d}'", .{
|
|
dest_ty.fmt(mod), dest_align.toByteUnits(0),
|
|
});
|
|
try sema.errNote(block, src, msg, "use @alignCast to assert pointer alignment", .{});
|
|
break :msg msg;
|
|
});
|
|
}
|
|
}
|
|
|
|
if (!flags.addrspace_cast) {
|
|
if (src_info.flags.address_space != dest_info.flags.address_space) {
|
|
return sema.failWithOwnedErrorMsg(block, msg: {
|
|
const msg = try sema.errMsg(block, src, "cast changes pointer address space", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, operand_src, msg, "'{}' has address space '{s}'", .{
|
|
operand_ty.fmt(mod), @tagName(src_info.flags.address_space),
|
|
});
|
|
try sema.errNote(block, src, msg, "'{}' has address space '{s}'", .{
|
|
dest_ty.fmt(mod), @tagName(dest_info.flags.address_space),
|
|
});
|
|
try sema.errNote(block, src, msg, "use @addrSpaceCast to cast pointer address space", .{});
|
|
break :msg msg;
|
|
});
|
|
}
|
|
} else {
|
|
// Some address space casts are always disallowed
|
|
if (!target_util.addrSpaceCastIsValid(mod.getTarget(), src_info.flags.address_space, dest_info.flags.address_space)) {
|
|
return sema.failWithOwnedErrorMsg(block, msg: {
|
|
const msg = try sema.errMsg(block, src, "invalid address space cast", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, operand_src, msg, "address space '{s}' is not compatible with address space '{s}'", .{
|
|
@tagName(src_info.flags.address_space),
|
|
@tagName(dest_info.flags.address_space),
|
|
});
|
|
break :msg msg;
|
|
});
|
|
}
|
|
}
|
|
|
|
if (!flags.const_cast) {
|
|
if (src_info.flags.is_const and !dest_info.flags.is_const) {
|
|
return sema.failWithOwnedErrorMsg(block, msg: {
|
|
const msg = try sema.errMsg(block, src, "cast discards const qualifier", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, src, msg, "use @constCast to discard const qualifier", .{});
|
|
break :msg msg;
|
|
});
|
|
}
|
|
}
|
|
|
|
if (!flags.volatile_cast) {
|
|
if (src_info.flags.is_volatile and !dest_info.flags.is_volatile) {
|
|
return sema.failWithOwnedErrorMsg(block, msg: {
|
|
const msg = try sema.errMsg(block, src, "cast discards volatile qualifier", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, src, msg, "use @volatileCast to discard volatile qualifier", .{});
|
|
break :msg msg;
|
|
});
|
|
}
|
|
}
|
|
|
|
const ptr = if (src_info.flags.size == .Slice and dest_info.flags.size != .Slice) ptr: {
|
|
break :ptr try sema.analyzeSlicePtr(block, operand_src, operand, operand_ty);
|
|
} else operand;
|
|
|
|
const dest_ptr_ty = if (dest_info.flags.size == .Slice and src_info.flags.size != .Slice) blk: {
|
|
// Only convert to a many-pointer at first
|
|
var info = dest_info;
|
|
info.flags.size = .Many;
|
|
const ty = try sema.ptrType(info);
|
|
if (dest_ty.zigTypeTag(mod) == .Optional) {
|
|
break :blk try mod.optionalType(ty.toIntern());
|
|
} else {
|
|
break :blk ty;
|
|
}
|
|
} else dest_ty;
|
|
|
|
// Cannot do @addrSpaceCast at comptime
|
|
if (!flags.addrspace_cast) {
|
|
if (try sema.resolveValue(ptr)) |ptr_val| {
|
|
if (!dest_ty.ptrAllowsZero(mod) and ptr_val.isUndef(mod)) {
|
|
return sema.failWithUseOfUndef(block, operand_src);
|
|
}
|
|
if (!dest_ty.ptrAllowsZero(mod) and ptr_val.isNull(mod)) {
|
|
return sema.fail(block, operand_src, "null pointer casted to type '{}'", .{dest_ty.fmt(mod)});
|
|
}
|
|
if (dest_align.compare(.gt, src_align)) {
|
|
if (try ptr_val.getUnsignedIntAdvanced(mod, null)) |addr| {
|
|
if (!dest_align.check(addr)) {
|
|
return sema.fail(block, operand_src, "pointer address 0x{X} is not aligned to {d} bytes", .{
|
|
addr,
|
|
dest_align.toByteUnitsOptional().?,
|
|
});
|
|
}
|
|
}
|
|
}
|
|
if (dest_info.flags.size == .Slice and src_info.flags.size != .Slice) {
|
|
if (ptr_val.isUndef(mod)) return mod.undefRef(dest_ty);
|
|
const arr_len = try mod.intValue(Type.usize, Type.fromInterned(src_info.child).arrayLen(mod));
|
|
return Air.internedToRef((try mod.intern(.{ .slice = .{
|
|
.ty = dest_ty.toIntern(),
|
|
.ptr = try mod.intern(.{ .ptr = .{
|
|
.ty = dest_ty.slicePtrFieldType(mod).toIntern(),
|
|
.addr = mod.intern_pool.indexToKey(ptr_val.toIntern()).ptr.addr,
|
|
} }),
|
|
.len = arr_len.toIntern(),
|
|
} })));
|
|
} else {
|
|
assert(dest_ptr_ty.eql(dest_ty, mod));
|
|
return Air.internedToRef((try mod.getCoerced(ptr_val, dest_ty)).toIntern());
|
|
}
|
|
}
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
|
|
if (block.wantSafety() and operand_ty.ptrAllowsZero(mod) and !dest_ty.ptrAllowsZero(mod) and
|
|
(try sema.typeHasRuntimeBits(Type.fromInterned(dest_info.child)) or Type.fromInterned(dest_info.child).zigTypeTag(mod) == .Fn))
|
|
{
|
|
const ptr_int = try block.addUnOp(.int_from_ptr, ptr);
|
|
const is_non_zero = try block.addBinOp(.cmp_neq, ptr_int, .zero_usize);
|
|
const ok = if (src_info.flags.size == .Slice and dest_info.flags.size == .Slice) ok: {
|
|
const len = try sema.analyzeSliceLen(block, operand_src, ptr);
|
|
const len_zero = try block.addBinOp(.cmp_eq, len, .zero_usize);
|
|
break :ok try block.addBinOp(.bool_or, len_zero, is_non_zero);
|
|
} else is_non_zero;
|
|
try sema.addSafetyCheck(block, src, ok, .cast_to_null);
|
|
}
|
|
|
|
if (block.wantSafety() and
|
|
dest_align.compare(.gt, src_align) and
|
|
try sema.typeHasRuntimeBits(Type.fromInterned(dest_info.child)))
|
|
{
|
|
const align_bytes_minus_1 = dest_align.toByteUnitsOptional().? - 1;
|
|
const align_minus_1 = Air.internedToRef((try mod.intValue(Type.usize, align_bytes_minus_1)).toIntern());
|
|
const ptr_int = try block.addUnOp(.int_from_ptr, ptr);
|
|
const remainder = try block.addBinOp(.bit_and, ptr_int, align_minus_1);
|
|
const is_aligned = try block.addBinOp(.cmp_eq, remainder, .zero_usize);
|
|
const ok = if (src_info.flags.size == .Slice and dest_info.flags.size == .Slice) ok: {
|
|
const len = try sema.analyzeSliceLen(block, operand_src, ptr);
|
|
const len_zero = try block.addBinOp(.cmp_eq, len, .zero_usize);
|
|
break :ok try block.addBinOp(.bool_or, len_zero, is_aligned);
|
|
} else is_aligned;
|
|
try sema.addSafetyCheck(block, src, ok, .incorrect_alignment);
|
|
}
|
|
|
|
// If we're going from an array pointer to a slice, this will only be the pointer part!
|
|
const result_ptr = if (flags.addrspace_cast) ptr: {
|
|
// We can't change address spaces with a bitcast, so this requires two instructions
|
|
var intermediate_info = src_info;
|
|
intermediate_info.flags.address_space = dest_info.flags.address_space;
|
|
const intermediate_ptr_ty = try sema.ptrType(intermediate_info);
|
|
const intermediate_ty = if (dest_ptr_ty.zigTypeTag(mod) == .Optional) blk: {
|
|
break :blk try mod.optionalType(intermediate_ptr_ty.toIntern());
|
|
} else intermediate_ptr_ty;
|
|
const intermediate = try block.addInst(.{
|
|
.tag = .addrspace_cast,
|
|
.data = .{ .ty_op = .{
|
|
.ty = Air.internedToRef(intermediate_ty.toIntern()),
|
|
.operand = ptr,
|
|
} },
|
|
});
|
|
if (intermediate_ty.eql(dest_ptr_ty, mod)) {
|
|
// We only changed the address space, so no need for a bitcast
|
|
break :ptr intermediate;
|
|
}
|
|
break :ptr try block.addBitCast(dest_ptr_ty, intermediate);
|
|
} else ptr: {
|
|
break :ptr try block.addBitCast(dest_ptr_ty, ptr);
|
|
};
|
|
|
|
if (dest_info.flags.size == .Slice and src_info.flags.size != .Slice) {
|
|
// We have to construct a slice using the operand's child's array length
|
|
// Note that we know from the check at the start of the function that operand_ty is slice-like
|
|
const arr_len = Air.internedToRef((try mod.intValue(Type.usize, Type.fromInterned(src_info.child).arrayLen(mod))).toIntern());
|
|
return block.addInst(.{
|
|
.tag = .slice,
|
|
.data = .{ .ty_pl = .{
|
|
.ty = Air.internedToRef(dest_ty.toIntern()),
|
|
.payload = try sema.addExtra(Air.Bin{
|
|
.lhs = result_ptr,
|
|
.rhs = arr_len,
|
|
}),
|
|
} },
|
|
});
|
|
} else {
|
|
assert(dest_ptr_ty.eql(dest_ty, mod));
|
|
try sema.checkKnownAllocPtr(operand, result_ptr);
|
|
return result_ptr;
|
|
}
|
|
}
|
|
|
|
fn zirPtrCastNoDest(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const flags: Zir.Inst.FullPtrCastFlags = @bitCast(@as(
|
|
@typeInfo(Zir.Inst.FullPtrCastFlags).Struct.backing_integer.?,
|
|
@truncate(extended.small),
|
|
));
|
|
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
|
|
const src = LazySrcLoc.nodeOffset(extra.node);
|
|
const operand_src: LazySrcLoc = .{ .node_offset_ptrcast_operand = extra.node };
|
|
const operand = try sema.resolveInst(extra.operand);
|
|
const operand_ty = sema.typeOf(operand);
|
|
try sema.checkPtrOperand(block, operand_src, operand_ty);
|
|
|
|
var ptr_info = operand_ty.ptrInfo(mod);
|
|
if (flags.const_cast) ptr_info.flags.is_const = false;
|
|
if (flags.volatile_cast) ptr_info.flags.is_volatile = false;
|
|
|
|
const dest_ty = blk: {
|
|
const dest_ty = try sema.ptrType(ptr_info);
|
|
if (operand_ty.zigTypeTag(mod) == .Optional) {
|
|
break :blk try mod.optionalType(dest_ty.toIntern());
|
|
}
|
|
break :blk dest_ty;
|
|
};
|
|
|
|
if (try sema.resolveValue(operand)) |operand_val| {
|
|
return Air.internedToRef((try mod.getCoerced(operand_val, dest_ty)).toIntern());
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
const new_ptr = try block.addBitCast(dest_ty, operand);
|
|
try sema.checkKnownAllocPtr(operand, new_ptr);
|
|
return new_ptr;
|
|
}
|
|
|
|
fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const src = inst_data.src();
|
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const dest_ty = try sema.resolveDestType(block, src, extra.lhs, .remove_eu_opt, "@truncate");
|
|
const dest_scalar_ty = try sema.checkIntOrVectorAllowComptime(block, dest_ty, src);
|
|
const operand = try sema.resolveInst(extra.rhs);
|
|
const operand_ty = sema.typeOf(operand);
|
|
const operand_scalar_ty = try sema.checkIntOrVectorAllowComptime(block, operand_ty, operand_src);
|
|
|
|
const operand_is_vector = operand_ty.zigTypeTag(mod) == .Vector;
|
|
const dest_is_vector = dest_ty.zigTypeTag(mod) == .Vector;
|
|
if (operand_is_vector != dest_is_vector) {
|
|
return sema.fail(block, operand_src, "expected type '{}', found '{}'", .{ dest_ty.fmt(mod), operand_ty.fmt(mod) });
|
|
}
|
|
|
|
if (dest_scalar_ty.zigTypeTag(mod) == .ComptimeInt) {
|
|
return sema.coerce(block, dest_ty, operand, operand_src);
|
|
}
|
|
|
|
const dest_info = dest_scalar_ty.intInfo(mod);
|
|
|
|
if (try sema.typeHasOnePossibleValue(dest_ty)) |val| {
|
|
return Air.internedToRef(val.toIntern());
|
|
}
|
|
|
|
if (operand_scalar_ty.zigTypeTag(mod) != .ComptimeInt) {
|
|
const operand_info = operand_ty.intInfo(mod);
|
|
if (try sema.typeHasOnePossibleValue(operand_ty)) |val| {
|
|
return Air.internedToRef(val.toIntern());
|
|
}
|
|
|
|
if (operand_info.signedness != dest_info.signedness) {
|
|
return sema.fail(block, operand_src, "expected {s} integer type, found '{}'", .{
|
|
@tagName(dest_info.signedness), operand_ty.fmt(mod),
|
|
});
|
|
}
|
|
if (operand_info.bits < dest_info.bits) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(
|
|
block,
|
|
src,
|
|
"destination type '{}' has more bits than source type '{}'",
|
|
.{ dest_ty.fmt(mod), operand_ty.fmt(mod) },
|
|
);
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, src, msg, "destination type has {d} bits", .{
|
|
dest_info.bits,
|
|
});
|
|
try sema.errNote(block, operand_src, msg, "operand type has {d} bits", .{
|
|
operand_info.bits,
|
|
});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
}
|
|
|
|
if (try sema.resolveValueIntable(operand)) |val| {
|
|
if (val.isUndef(mod)) return mod.undefRef(dest_ty);
|
|
if (!dest_is_vector) {
|
|
return Air.internedToRef((try mod.getCoerced(
|
|
try val.intTrunc(operand_ty, sema.arena, dest_info.signedness, dest_info.bits, mod),
|
|
dest_ty,
|
|
)).toIntern());
|
|
}
|
|
const elems = try sema.arena.alloc(InternPool.Index, operand_ty.vectorLen(mod));
|
|
for (elems, 0..) |*elem, i| {
|
|
const elem_val = try val.elemValue(mod, i);
|
|
elem.* = try (try elem_val.intTrunc(operand_scalar_ty, sema.arena, dest_info.signedness, dest_info.bits, mod)).intern(dest_scalar_ty, mod);
|
|
}
|
|
return Air.internedToRef((try mod.intern(.{ .aggregate = .{
|
|
.ty = dest_ty.toIntern(),
|
|
.storage = .{ .elems = elems },
|
|
} })));
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, src, operand_src);
|
|
return block.addTyOp(.trunc, dest_ty, operand);
|
|
}
|
|
|
|
fn zirBitCount(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
air_tag: Air.Inst.Tag,
|
|
comptime comptimeOp: fn (val: Value, ty: Type, mod: *Module) u64,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const src = inst_data.src();
|
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const operand = try sema.resolveInst(inst_data.operand);
|
|
const operand_ty = sema.typeOf(operand);
|
|
_ = try sema.checkIntOrVector(block, operand, operand_src);
|
|
const bits = operand_ty.intInfo(mod).bits;
|
|
|
|
if (try sema.typeHasOnePossibleValue(operand_ty)) |val| {
|
|
return Air.internedToRef(val.toIntern());
|
|
}
|
|
|
|
const result_scalar_ty = try mod.smallestUnsignedInt(bits);
|
|
switch (operand_ty.zigTypeTag(mod)) {
|
|
.Vector => {
|
|
const vec_len = operand_ty.vectorLen(mod);
|
|
const result_ty = try mod.vectorType(.{
|
|
.len = vec_len,
|
|
.child = result_scalar_ty.toIntern(),
|
|
});
|
|
if (try sema.resolveValue(operand)) |val| {
|
|
if (val.isUndef(mod)) return mod.undefRef(result_ty);
|
|
|
|
const elems = try sema.arena.alloc(InternPool.Index, vec_len);
|
|
const scalar_ty = operand_ty.scalarType(mod);
|
|
for (elems, 0..) |*elem, i| {
|
|
const elem_val = try val.elemValue(mod, i);
|
|
const count = comptimeOp(elem_val, scalar_ty, mod);
|
|
elem.* = (try mod.intValue(result_scalar_ty, count)).toIntern();
|
|
}
|
|
return Air.internedToRef((try mod.intern(.{ .aggregate = .{
|
|
.ty = result_ty.toIntern(),
|
|
.storage = .{ .elems = elems },
|
|
} })));
|
|
} else {
|
|
try sema.requireRuntimeBlock(block, src, operand_src);
|
|
return block.addTyOp(air_tag, result_ty, operand);
|
|
}
|
|
},
|
|
.Int => {
|
|
if (try sema.resolveValueResolveLazy(operand)) |val| {
|
|
if (val.isUndef(mod)) return mod.undefRef(result_scalar_ty);
|
|
return mod.intRef(result_scalar_ty, comptimeOp(val, operand_ty, mod));
|
|
} else {
|
|
try sema.requireRuntimeBlock(block, src, operand_src);
|
|
return block.addTyOp(air_tag, result_scalar_ty, operand);
|
|
}
|
|
},
|
|
else => unreachable,
|
|
}
|
|
}
|
|
|
|
fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const src = inst_data.src();
|
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const operand = try sema.resolveInst(inst_data.operand);
|
|
const operand_ty = sema.typeOf(operand);
|
|
const scalar_ty = try sema.checkIntOrVector(block, operand, operand_src);
|
|
const bits = scalar_ty.intInfo(mod).bits;
|
|
if (bits % 8 != 0) {
|
|
return sema.fail(
|
|
block,
|
|
operand_src,
|
|
"@byteSwap requires the number of bits to be evenly divisible by 8, but {} has {} bits",
|
|
.{ scalar_ty.fmt(mod), bits },
|
|
);
|
|
}
|
|
|
|
if (try sema.typeHasOnePossibleValue(operand_ty)) |val| {
|
|
return Air.internedToRef(val.toIntern());
|
|
}
|
|
|
|
switch (operand_ty.zigTypeTag(mod)) {
|
|
.Int => {
|
|
const runtime_src = if (try sema.resolveValue(operand)) |val| {
|
|
if (val.isUndef(mod)) return mod.undefRef(operand_ty);
|
|
const result_val = try val.byteSwap(operand_ty, mod, sema.arena);
|
|
return Air.internedToRef(result_val.toIntern());
|
|
} else operand_src;
|
|
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
return block.addTyOp(.byte_swap, operand_ty, operand);
|
|
},
|
|
.Vector => {
|
|
const runtime_src = if (try sema.resolveValue(operand)) |val| {
|
|
if (val.isUndef(mod))
|
|
return mod.undefRef(operand_ty);
|
|
|
|
const vec_len = operand_ty.vectorLen(mod);
|
|
const elems = try sema.arena.alloc(InternPool.Index, vec_len);
|
|
for (elems, 0..) |*elem, i| {
|
|
const elem_val = try val.elemValue(mod, i);
|
|
elem.* = try (try elem_val.byteSwap(scalar_ty, mod, sema.arena)).intern(scalar_ty, mod);
|
|
}
|
|
return Air.internedToRef((try mod.intern(.{ .aggregate = .{
|
|
.ty = operand_ty.toIntern(),
|
|
.storage = .{ .elems = elems },
|
|
} })));
|
|
} else operand_src;
|
|
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
return block.addTyOp(.byte_swap, operand_ty, operand);
|
|
},
|
|
else => unreachable,
|
|
}
|
|
}
|
|
|
|
fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const src = inst_data.src();
|
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const operand = try sema.resolveInst(inst_data.operand);
|
|
const operand_ty = sema.typeOf(operand);
|
|
const scalar_ty = try sema.checkIntOrVector(block, operand, operand_src);
|
|
|
|
if (try sema.typeHasOnePossibleValue(operand_ty)) |val| {
|
|
return Air.internedToRef(val.toIntern());
|
|
}
|
|
|
|
const mod = sema.mod;
|
|
switch (operand_ty.zigTypeTag(mod)) {
|
|
.Int => {
|
|
const runtime_src = if (try sema.resolveValue(operand)) |val| {
|
|
if (val.isUndef(mod)) return mod.undefRef(operand_ty);
|
|
const result_val = try val.bitReverse(operand_ty, mod, sema.arena);
|
|
return Air.internedToRef(result_val.toIntern());
|
|
} else operand_src;
|
|
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
return block.addTyOp(.bit_reverse, operand_ty, operand);
|
|
},
|
|
.Vector => {
|
|
const runtime_src = if (try sema.resolveValue(operand)) |val| {
|
|
if (val.isUndef(mod))
|
|
return mod.undefRef(operand_ty);
|
|
|
|
const vec_len = operand_ty.vectorLen(mod);
|
|
const elems = try sema.arena.alloc(InternPool.Index, vec_len);
|
|
for (elems, 0..) |*elem, i| {
|
|
const elem_val = try val.elemValue(mod, i);
|
|
elem.* = try (try elem_val.bitReverse(scalar_ty, mod, sema.arena)).intern(scalar_ty, mod);
|
|
}
|
|
return Air.internedToRef((try mod.intern(.{ .aggregate = .{
|
|
.ty = operand_ty.toIntern(),
|
|
.storage = .{ .elems = elems },
|
|
} })));
|
|
} else operand_src;
|
|
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
return block.addTyOp(.bit_reverse, operand_ty, operand);
|
|
},
|
|
else => unreachable,
|
|
}
|
|
}
|
|
|
|
fn zirBitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const offset = try sema.bitOffsetOf(block, inst);
|
|
return sema.mod.intRef(Type.comptime_int, offset);
|
|
}
|
|
|
|
fn zirOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const offset = try sema.bitOffsetOf(block, inst);
|
|
// TODO reminder to make this a compile error for packed structs
|
|
return sema.mod.intRef(Type.comptime_int, offset / 8);
|
|
}
|
|
|
|
fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u64 {
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node };
|
|
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
|
|
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
|
|
const ty = try sema.resolveType(block, lhs_src, extra.lhs);
|
|
const field_name = try sema.resolveConstStringIntern(block, rhs_src, extra.rhs, .{
|
|
.needed_comptime_reason = "name of field must be comptime-known",
|
|
});
|
|
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
try sema.resolveTypeLayout(ty);
|
|
switch (ty.zigTypeTag(mod)) {
|
|
.Struct => {},
|
|
else => return sema.fail(block, lhs_src, "expected struct type, found '{}'", .{ty.fmt(mod)}),
|
|
}
|
|
|
|
const field_index = if (ty.isTuple(mod)) blk: {
|
|
if (ip.stringEqlSlice(field_name, "len")) {
|
|
return sema.fail(block, src, "no offset available for 'len' field of tuple", .{});
|
|
}
|
|
break :blk try sema.tupleFieldIndex(block, ty, field_name, rhs_src);
|
|
} else try sema.structFieldIndex(block, ty, field_name, rhs_src);
|
|
|
|
if (ty.structFieldIsComptime(field_index, mod)) {
|
|
return sema.fail(block, src, "no offset available for comptime field", .{});
|
|
}
|
|
|
|
switch (ty.containerLayout(mod)) {
|
|
.Packed => {
|
|
var bit_sum: u64 = 0;
|
|
const struct_type = ip.indexToKey(ty.toIntern()).struct_type;
|
|
for (0..struct_type.field_types.len) |i| {
|
|
if (i == field_index) {
|
|
return bit_sum;
|
|
}
|
|
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
|
|
bit_sum += field_ty.bitSize(mod);
|
|
} else unreachable;
|
|
},
|
|
else => return ty.structFieldOffset(field_index, mod) * 8,
|
|
}
|
|
}
|
|
|
|
fn checkNamespaceType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!void {
|
|
const mod = sema.mod;
|
|
switch (ty.zigTypeTag(mod)) {
|
|
.Struct, .Enum, .Union, .Opaque => return,
|
|
else => return sema.fail(block, src, "expected struct, enum, union, or opaque; found '{}'", .{ty.fmt(mod)}),
|
|
}
|
|
}
|
|
|
|
/// Returns `true` if the type was a comptime_int.
|
|
fn checkIntType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!bool {
|
|
const mod = sema.mod;
|
|
switch (try ty.zigTypeTagOrPoison(mod)) {
|
|
.ComptimeInt => return true,
|
|
.Int => return false,
|
|
else => return sema.fail(block, src, "expected integer type, found '{}'", .{ty.fmt(mod)}),
|
|
}
|
|
}
|
|
|
|
fn checkInvalidPtrArithmetic(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
ty: Type,
|
|
) CompileError!void {
|
|
const mod = sema.mod;
|
|
switch (try ty.zigTypeTagOrPoison(mod)) {
|
|
.Pointer => switch (ty.ptrSize(mod)) {
|
|
.One, .Slice => return,
|
|
.Many, .C => return sema.fail(
|
|
block,
|
|
src,
|
|
"invalid pointer arithmetic operator",
|
|
.{},
|
|
),
|
|
},
|
|
else => return,
|
|
}
|
|
}
|
|
|
|
fn checkArithmeticOp(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
scalar_tag: std.builtin.TypeId,
|
|
lhs_zig_ty_tag: std.builtin.TypeId,
|
|
rhs_zig_ty_tag: std.builtin.TypeId,
|
|
zir_tag: Zir.Inst.Tag,
|
|
) CompileError!void {
|
|
const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
|
|
const is_float = scalar_tag == .Float or scalar_tag == .ComptimeFloat;
|
|
|
|
if (!is_int and !(is_float and floatOpAllowed(zir_tag))) {
|
|
return sema.fail(block, src, "invalid operands to binary expression: '{s}' and '{s}'", .{
|
|
@tagName(lhs_zig_ty_tag), @tagName(rhs_zig_ty_tag),
|
|
});
|
|
}
|
|
}
|
|
|
|
fn checkPtrOperand(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
ty_src: LazySrcLoc,
|
|
ty: Type,
|
|
) CompileError!void {
|
|
const mod = sema.mod;
|
|
switch (ty.zigTypeTag(mod)) {
|
|
.Pointer => return,
|
|
.Fn => {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(
|
|
block,
|
|
ty_src,
|
|
"expected pointer, found '{}'",
|
|
.{ty.fmt(mod)},
|
|
);
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
try sema.errNote(block, ty_src, msg, "use '&' to obtain a function pointer", .{});
|
|
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
},
|
|
.Optional => if (ty.childType(mod).zigTypeTag(mod) == .Pointer) return,
|
|
else => {},
|
|
}
|
|
return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty.fmt(mod)});
|
|
}
|
|
|
|
fn checkPtrType(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
ty_src: LazySrcLoc,
|
|
ty: Type,
|
|
allow_slice: bool,
|
|
) CompileError!void {
|
|
const mod = sema.mod;
|
|
switch (ty.zigTypeTag(mod)) {
|
|
.Pointer => if (allow_slice or !ty.isSlice(mod)) return,
|
|
.Fn => {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(
|
|
block,
|
|
ty_src,
|
|
"expected pointer type, found '{}'",
|
|
.{ty.fmt(mod)},
|
|
);
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
try sema.errNote(block, ty_src, msg, "use '*const ' to make a function pointer type", .{});
|
|
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
},
|
|
.Optional => if (ty.childType(mod).zigTypeTag(mod) == .Pointer) return,
|
|
else => {},
|
|
}
|
|
return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty.fmt(mod)});
|
|
}
|
|
|
|
fn checkVectorElemType(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
ty_src: LazySrcLoc,
|
|
ty: Type,
|
|
) CompileError!void {
|
|
const mod = sema.mod;
|
|
switch (ty.zigTypeTag(mod)) {
|
|
.Int, .Float, .Bool => return,
|
|
.Optional, .Pointer => if (ty.isPtrAtRuntime(mod)) return,
|
|
else => {},
|
|
}
|
|
return sema.fail(block, ty_src, "expected integer, float, bool, or pointer for the vector element type; found '{}'", .{ty.fmt(mod)});
|
|
}
|
|
|
|
fn checkFloatType(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
ty_src: LazySrcLoc,
|
|
ty: Type,
|
|
) CompileError!void {
|
|
const mod = sema.mod;
|
|
switch (ty.zigTypeTag(mod)) {
|
|
.ComptimeInt, .ComptimeFloat, .Float => {},
|
|
else => return sema.fail(block, ty_src, "expected float type, found '{}'", .{ty.fmt(mod)}),
|
|
}
|
|
}
|
|
|
|
fn checkNumericType(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
ty_src: LazySrcLoc,
|
|
ty: Type,
|
|
) CompileError!void {
|
|
const mod = sema.mod;
|
|
switch (ty.zigTypeTag(mod)) {
|
|
.ComptimeFloat, .Float, .ComptimeInt, .Int => {},
|
|
.Vector => switch (ty.childType(mod).zigTypeTag(mod)) {
|
|
.ComptimeFloat, .Float, .ComptimeInt, .Int => {},
|
|
else => |t| return sema.fail(block, ty_src, "expected number, found '{}'", .{t}),
|
|
},
|
|
else => return sema.fail(block, ty_src, "expected number, found '{}'", .{ty.fmt(mod)}),
|
|
}
|
|
}
|
|
|
|
/// Returns the casted pointer.
|
|
fn checkAtomicPtrOperand(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
elem_ty: Type,
|
|
elem_ty_src: LazySrcLoc,
|
|
ptr: Air.Inst.Ref,
|
|
ptr_src: LazySrcLoc,
|
|
ptr_const: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
var diag: Module.AtomicPtrAlignmentDiagnostics = .{};
|
|
const alignment = mod.atomicPtrAlignment(elem_ty, &diag) catch |err| switch (err) {
|
|
error.OutOfMemory => return error.OutOfMemory,
|
|
error.FloatTooBig => return sema.fail(
|
|
block,
|
|
elem_ty_src,
|
|
"expected {d}-bit float type or smaller; found {d}-bit float type",
|
|
.{ diag.max_bits, diag.bits },
|
|
),
|
|
error.IntTooBig => return sema.fail(
|
|
block,
|
|
elem_ty_src,
|
|
"expected {d}-bit integer type or smaller; found {d}-bit integer type",
|
|
.{ diag.max_bits, diag.bits },
|
|
),
|
|
error.BadType => return sema.fail(
|
|
block,
|
|
elem_ty_src,
|
|
"expected bool, integer, float, enum, or pointer type; found '{}'",
|
|
.{elem_ty.fmt(mod)},
|
|
),
|
|
};
|
|
|
|
var wanted_ptr_data: InternPool.Key.PtrType = .{
|
|
.child = elem_ty.toIntern(),
|
|
.flags = .{
|
|
.alignment = alignment,
|
|
.is_const = ptr_const,
|
|
},
|
|
};
|
|
|
|
const ptr_ty = sema.typeOf(ptr);
|
|
const ptr_data = switch (try ptr_ty.zigTypeTagOrPoison(mod)) {
|
|
.Pointer => ptr_ty.ptrInfo(mod),
|
|
else => {
|
|
const wanted_ptr_ty = try sema.ptrType(wanted_ptr_data);
|
|
_ = try sema.coerce(block, wanted_ptr_ty, ptr, ptr_src);
|
|
unreachable;
|
|
},
|
|
};
|
|
|
|
wanted_ptr_data.flags.address_space = ptr_data.flags.address_space;
|
|
wanted_ptr_data.flags.is_allowzero = ptr_data.flags.is_allowzero;
|
|
wanted_ptr_data.flags.is_volatile = ptr_data.flags.is_volatile;
|
|
|
|
const wanted_ptr_ty = try sema.ptrType(wanted_ptr_data);
|
|
const casted_ptr = try sema.coerce(block, wanted_ptr_ty, ptr, ptr_src);
|
|
|
|
return casted_ptr;
|
|
}
|
|
|
|
fn checkPtrIsNotComptimeMutable(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
ptr_val: Value,
|
|
ptr_src: LazySrcLoc,
|
|
operand_src: LazySrcLoc,
|
|
) CompileError!void {
|
|
_ = operand_src;
|
|
if (ptr_val.isComptimeMutablePtr(sema.mod)) {
|
|
return sema.fail(block, ptr_src, "cannot store runtime value in compile time variable", .{});
|
|
}
|
|
}
|
|
|
|
fn checkComptimeVarStore(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
decl_ref_mut: InternPool.Key.Ptr.Addr.MutDecl,
|
|
) CompileError!void {
|
|
if (@intFromEnum(decl_ref_mut.runtime_index) < @intFromEnum(block.runtime_index)) {
|
|
if (block.runtime_cond) |cond_src| {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "store to comptime variable depends on runtime condition", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.mod.errNoteNonLazy(cond_src, msg, "runtime condition here", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
if (block.runtime_loop) |loop_src| {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "cannot store to comptime variable in non-inline loop", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.mod.errNoteNonLazy(loop_src, msg, "non-inline loop here", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
unreachable;
|
|
}
|
|
}
|
|
|
|
fn checkIntOrVector(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
operand: Air.Inst.Ref,
|
|
operand_src: LazySrcLoc,
|
|
) CompileError!Type {
|
|
const mod = sema.mod;
|
|
const operand_ty = sema.typeOf(operand);
|
|
switch (try operand_ty.zigTypeTagOrPoison(mod)) {
|
|
.Int => return operand_ty,
|
|
.Vector => {
|
|
const elem_ty = operand_ty.childType(mod);
|
|
switch (try elem_ty.zigTypeTagOrPoison(mod)) {
|
|
.Int => return elem_ty,
|
|
else => return sema.fail(block, operand_src, "expected vector of integers; found vector of '{}'", .{
|
|
elem_ty.fmt(mod),
|
|
}),
|
|
}
|
|
},
|
|
else => return sema.fail(block, operand_src, "expected integer or vector, found '{}'", .{
|
|
operand_ty.fmt(mod),
|
|
}),
|
|
}
|
|
}
|
|
|
|
fn checkIntOrVectorAllowComptime(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
operand_ty: Type,
|
|
operand_src: LazySrcLoc,
|
|
) CompileError!Type {
|
|
const mod = sema.mod;
|
|
switch (try operand_ty.zigTypeTagOrPoison(mod)) {
|
|
.Int, .ComptimeInt => return operand_ty,
|
|
.Vector => {
|
|
const elem_ty = operand_ty.childType(mod);
|
|
switch (try elem_ty.zigTypeTagOrPoison(mod)) {
|
|
.Int, .ComptimeInt => return elem_ty,
|
|
else => return sema.fail(block, operand_src, "expected vector of integers; found vector of '{}'", .{
|
|
elem_ty.fmt(mod),
|
|
}),
|
|
}
|
|
},
|
|
else => return sema.fail(block, operand_src, "expected integer or vector, found '{}'", .{
|
|
operand_ty.fmt(mod),
|
|
}),
|
|
}
|
|
}
|
|
|
|
const SimdBinOp = struct {
|
|
len: ?usize,
|
|
/// Coerced to `result_ty`.
|
|
lhs: Air.Inst.Ref,
|
|
/// Coerced to `result_ty`.
|
|
rhs: Air.Inst.Ref,
|
|
lhs_val: ?Value,
|
|
rhs_val: ?Value,
|
|
/// Only different than `scalar_ty` when it is a vector operation.
|
|
result_ty: Type,
|
|
scalar_ty: Type,
|
|
};
|
|
|
|
fn checkSimdBinOp(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
uncasted_lhs: Air.Inst.Ref,
|
|
uncasted_rhs: Air.Inst.Ref,
|
|
lhs_src: LazySrcLoc,
|
|
rhs_src: LazySrcLoc,
|
|
) CompileError!SimdBinOp {
|
|
const mod = sema.mod;
|
|
const lhs_ty = sema.typeOf(uncasted_lhs);
|
|
const rhs_ty = sema.typeOf(uncasted_rhs);
|
|
|
|
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
|
|
const vec_len: ?usize = if (lhs_ty.zigTypeTag(mod) == .Vector) lhs_ty.vectorLen(mod) else null;
|
|
const result_ty = try sema.resolvePeerTypes(block, src, &.{ uncasted_lhs, uncasted_rhs }, .{
|
|
.override = &[_]?LazySrcLoc{ lhs_src, rhs_src },
|
|
});
|
|
const lhs = try sema.coerce(block, result_ty, uncasted_lhs, lhs_src);
|
|
const rhs = try sema.coerce(block, result_ty, uncasted_rhs, rhs_src);
|
|
|
|
return SimdBinOp{
|
|
.len = vec_len,
|
|
.lhs = lhs,
|
|
.rhs = rhs,
|
|
.lhs_val = try sema.resolveValue(lhs),
|
|
.rhs_val = try sema.resolveValue(rhs),
|
|
.result_ty = result_ty,
|
|
.scalar_ty = result_ty.scalarType(mod),
|
|
};
|
|
}
|
|
|
|
fn checkVectorizableBinaryOperands(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
lhs_ty: Type,
|
|
rhs_ty: Type,
|
|
lhs_src: LazySrcLoc,
|
|
rhs_src: LazySrcLoc,
|
|
) CompileError!void {
|
|
const mod = sema.mod;
|
|
const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod);
|
|
const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod);
|
|
if (lhs_zig_ty_tag != .Vector and rhs_zig_ty_tag != .Vector) return;
|
|
|
|
const lhs_is_vector = switch (lhs_zig_ty_tag) {
|
|
.Vector, .Array => true,
|
|
else => false,
|
|
};
|
|
const rhs_is_vector = switch (rhs_zig_ty_tag) {
|
|
.Vector, .Array => true,
|
|
else => false,
|
|
};
|
|
|
|
if (lhs_is_vector and rhs_is_vector) {
|
|
const lhs_len = lhs_ty.arrayLen(mod);
|
|
const rhs_len = rhs_ty.arrayLen(mod);
|
|
if (lhs_len != rhs_len) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "vector length mismatch", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, lhs_src, msg, "length {d} here", .{lhs_len});
|
|
try sema.errNote(block, rhs_src, msg, "length {d} here", .{rhs_len});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
} else {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "mixed scalar and vector operands: '{}' and '{}'", .{
|
|
lhs_ty.fmt(mod), rhs_ty.fmt(mod),
|
|
});
|
|
errdefer msg.destroy(sema.gpa);
|
|
if (lhs_is_vector) {
|
|
try sema.errNote(block, lhs_src, msg, "vector here", .{});
|
|
try sema.errNote(block, rhs_src, msg, "scalar here", .{});
|
|
} else {
|
|
try sema.errNote(block, lhs_src, msg, "scalar here", .{});
|
|
try sema.errNote(block, rhs_src, msg, "vector here", .{});
|
|
}
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
}
|
|
|
|
fn maybeOptionsSrc(sema: *Sema, block: *Block, base_src: LazySrcLoc, wanted: []const u8) LazySrcLoc {
|
|
if (base_src == .unneeded) return .unneeded;
|
|
const mod = sema.mod;
|
|
return mod.optionsSrc(mod.declPtr(block.src_decl), base_src, wanted);
|
|
}
|
|
|
|
fn resolveExportOptions(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
zir_ref: Zir.Inst.Ref,
|
|
) CompileError!Module.Export.Options {
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
const ip = &mod.intern_pool;
|
|
const export_options_ty = try sema.getBuiltinType("ExportOptions");
|
|
const air_ref = try sema.resolveInst(zir_ref);
|
|
const options = try sema.coerce(block, export_options_ty, air_ref, src);
|
|
|
|
const name_src = sema.maybeOptionsSrc(block, src, "name");
|
|
const linkage_src = sema.maybeOptionsSrc(block, src, "linkage");
|
|
const section_src = sema.maybeOptionsSrc(block, src, "section");
|
|
const visibility_src = sema.maybeOptionsSrc(block, src, "visibility");
|
|
|
|
const name_operand = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "name"), name_src);
|
|
const name_val = try sema.resolveConstDefinedValue(block, name_src, name_operand, .{
|
|
.needed_comptime_reason = "name of exported value must be comptime-known",
|
|
});
|
|
const name_ty = Type.slice_const_u8;
|
|
const name = try name_val.toAllocatedBytes(name_ty, sema.arena, mod);
|
|
|
|
const linkage_operand = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "linkage"), linkage_src);
|
|
const linkage_val = try sema.resolveConstDefinedValue(block, linkage_src, linkage_operand, .{
|
|
.needed_comptime_reason = "linkage of exported value must be comptime-known",
|
|
});
|
|
const linkage = mod.toEnum(std.builtin.GlobalLinkage, linkage_val);
|
|
|
|
const section_operand = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "section"), section_src);
|
|
const section_opt_val = try sema.resolveConstDefinedValue(block, section_src, section_operand, .{
|
|
.needed_comptime_reason = "linksection of exported value must be comptime-known",
|
|
});
|
|
const section_ty = Type.slice_const_u8;
|
|
const section = if (section_opt_val.optionalValue(mod)) |section_val|
|
|
try section_val.toAllocatedBytes(section_ty, sema.arena, mod)
|
|
else
|
|
null;
|
|
|
|
const visibility_operand = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "visibility"), visibility_src);
|
|
const visibility_val = try sema.resolveConstDefinedValue(block, visibility_src, visibility_operand, .{
|
|
.needed_comptime_reason = "visibility of exported value must be comptime-known",
|
|
});
|
|
const visibility = mod.toEnum(std.builtin.SymbolVisibility, visibility_val);
|
|
|
|
if (name.len < 1) {
|
|
return sema.fail(block, name_src, "exported symbol name cannot be empty", .{});
|
|
}
|
|
|
|
if (visibility != .default and linkage == .Internal) {
|
|
return sema.fail(block, visibility_src, "symbol '{s}' exported with internal linkage has non-default visibility {s}", .{
|
|
name, @tagName(visibility),
|
|
});
|
|
}
|
|
|
|
return .{
|
|
.name = try ip.getOrPutString(gpa, name),
|
|
.linkage = linkage,
|
|
.section = try ip.getOrPutStringOpt(gpa, section),
|
|
.visibility = visibility,
|
|
};
|
|
}
|
|
|
|
fn resolveBuiltinEnum(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
zir_ref: Zir.Inst.Ref,
|
|
comptime name: []const u8,
|
|
reason: NeededComptimeReason,
|
|
) CompileError!@field(std.builtin, name) {
|
|
const mod = sema.mod;
|
|
const ty = try sema.getBuiltinType(name);
|
|
const air_ref = try sema.resolveInst(zir_ref);
|
|
const coerced = try sema.coerce(block, ty, air_ref, src);
|
|
const val = try sema.resolveConstDefinedValue(block, src, coerced, reason);
|
|
return mod.toEnum(@field(std.builtin, name), val);
|
|
}
|
|
|
|
fn resolveAtomicOrder(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
zir_ref: Zir.Inst.Ref,
|
|
reason: NeededComptimeReason,
|
|
) CompileError!std.builtin.AtomicOrder {
|
|
return sema.resolveBuiltinEnum(block, src, zir_ref, "AtomicOrder", reason);
|
|
}
|
|
|
|
fn resolveAtomicRmwOp(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
zir_ref: Zir.Inst.Ref,
|
|
) CompileError!std.builtin.AtomicRmwOp {
|
|
return sema.resolveBuiltinEnum(block, src, zir_ref, "AtomicRmwOp", .{
|
|
.needed_comptime_reason = "@atomicRmW operation must be comptime-known",
|
|
});
|
|
}
|
|
|
|
fn zirCmpxchg(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
extended: Zir.Inst.Extended.InstData,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const extra = sema.code.extraData(Zir.Inst.Cmpxchg, extended.operand).data;
|
|
const air_tag: Air.Inst.Tag = switch (extended.small) {
|
|
0 => .cmpxchg_weak,
|
|
1 => .cmpxchg_strong,
|
|
else => unreachable,
|
|
};
|
|
const src = LazySrcLoc.nodeOffset(extra.node);
|
|
// zig fmt: off
|
|
const elem_ty_src : LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
|
|
const ptr_src : LazySrcLoc = .{ .node_offset_builtin_call_arg1 = extra.node };
|
|
const expected_src : LazySrcLoc = .{ .node_offset_builtin_call_arg2 = extra.node };
|
|
const new_value_src : LazySrcLoc = .{ .node_offset_builtin_call_arg3 = extra.node };
|
|
const success_order_src: LazySrcLoc = .{ .node_offset_builtin_call_arg4 = extra.node };
|
|
const failure_order_src: LazySrcLoc = .{ .node_offset_builtin_call_arg5 = extra.node };
|
|
// zig fmt: on
|
|
const expected_value = try sema.resolveInst(extra.expected_value);
|
|
const elem_ty = sema.typeOf(expected_value);
|
|
if (elem_ty.zigTypeTag(mod) == .Float) {
|
|
return sema.fail(
|
|
block,
|
|
elem_ty_src,
|
|
"expected bool, integer, enum, or pointer type; found '{}'",
|
|
.{elem_ty.fmt(mod)},
|
|
);
|
|
}
|
|
const uncasted_ptr = try sema.resolveInst(extra.ptr);
|
|
const ptr = try sema.checkAtomicPtrOperand(block, elem_ty, elem_ty_src, uncasted_ptr, ptr_src, false);
|
|
const new_value = try sema.coerce(block, elem_ty, try sema.resolveInst(extra.new_value), new_value_src);
|
|
const success_order = try sema.resolveAtomicOrder(block, success_order_src, extra.success_order, .{
|
|
.needed_comptime_reason = "atomic order of cmpxchg success must be comptime-known",
|
|
});
|
|
const failure_order = try sema.resolveAtomicOrder(block, failure_order_src, extra.failure_order, .{
|
|
.needed_comptime_reason = "atomic order of cmpxchg failure must be comptime-known",
|
|
});
|
|
|
|
if (@intFromEnum(success_order) < @intFromEnum(std.builtin.AtomicOrder.Monotonic)) {
|
|
return sema.fail(block, success_order_src, "success atomic ordering must be Monotonic or stricter", .{});
|
|
}
|
|
if (@intFromEnum(failure_order) < @intFromEnum(std.builtin.AtomicOrder.Monotonic)) {
|
|
return sema.fail(block, failure_order_src, "failure atomic ordering must be Monotonic or stricter", .{});
|
|
}
|
|
if (@intFromEnum(failure_order) > @intFromEnum(success_order)) {
|
|
return sema.fail(block, failure_order_src, "failure atomic ordering must be no stricter than success", .{});
|
|
}
|
|
if (failure_order == .Release or failure_order == .AcqRel) {
|
|
return sema.fail(block, failure_order_src, "failure atomic ordering must not be Release or AcqRel", .{});
|
|
}
|
|
|
|
const result_ty = try mod.optionalType(elem_ty.toIntern());
|
|
|
|
// special case zero bit types
|
|
if ((try sema.typeHasOnePossibleValue(elem_ty)) != null) {
|
|
return Air.internedToRef((try mod.intern(.{ .opt = .{
|
|
.ty = result_ty.toIntern(),
|
|
.val = .none,
|
|
} })));
|
|
}
|
|
|
|
const runtime_src = if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| rs: {
|
|
if (try sema.resolveValue(expected_value)) |expected_val| {
|
|
if (try sema.resolveValue(new_value)) |new_val| {
|
|
if (expected_val.isUndef(mod) or new_val.isUndef(mod)) {
|
|
// TODO: this should probably cause the memory stored at the pointer
|
|
// to become undef as well
|
|
return mod.undefRef(result_ty);
|
|
}
|
|
const ptr_ty = sema.typeOf(ptr);
|
|
const stored_val = (try sema.pointerDeref(block, ptr_src, ptr_val, ptr_ty)) orelse break :rs ptr_src;
|
|
const result_val = try mod.intern(.{ .opt = .{
|
|
.ty = result_ty.toIntern(),
|
|
.val = if (stored_val.eql(expected_val, elem_ty, mod)) blk: {
|
|
try sema.storePtr(block, src, ptr, new_value);
|
|
break :blk .none;
|
|
} else stored_val.toIntern(),
|
|
} });
|
|
return Air.internedToRef(result_val);
|
|
} else break :rs new_value_src;
|
|
} else break :rs expected_src;
|
|
} else ptr_src;
|
|
|
|
const flags: u32 = @as(u32, @intFromEnum(success_order)) |
|
|
(@as(u32, @intFromEnum(failure_order)) << 3);
|
|
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
return block.addInst(.{
|
|
.tag = air_tag,
|
|
.data = .{ .ty_pl = .{
|
|
.ty = Air.internedToRef(result_ty.toIntern()),
|
|
.payload = try sema.addExtra(Air.Cmpxchg{
|
|
.ptr = ptr,
|
|
.expected_value = expected_value,
|
|
.new_value = new_value,
|
|
.flags = flags,
|
|
}),
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn zirSplat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const src = inst_data.src();
|
|
const scalar_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const dest_ty = try sema.resolveDestType(block, src, extra.lhs, .remove_eu_opt, "@splat");
|
|
|
|
if (!dest_ty.isVector(mod)) return sema.fail(block, src, "expected vector type, found '{}'", .{dest_ty.fmt(mod)});
|
|
|
|
const operand = try sema.resolveInst(extra.rhs);
|
|
const scalar_ty = dest_ty.childType(mod);
|
|
const scalar = try sema.coerce(block, scalar_ty, operand, scalar_src);
|
|
if (try sema.resolveValue(scalar)) |scalar_val| {
|
|
if (scalar_val.isUndef(mod)) return mod.undefRef(dest_ty);
|
|
return Air.internedToRef((try sema.splat(dest_ty, scalar_val)).toIntern());
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, inst_data.src(), scalar_src);
|
|
return block.addTyOp(.splat, dest_ty, scalar);
|
|
}
|
|
|
|
fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const op_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
|
|
const operation = try sema.resolveBuiltinEnum(block, op_src, extra.lhs, "ReduceOp", .{
|
|
.needed_comptime_reason = "@reduce operation must be comptime-known",
|
|
});
|
|
const operand = try sema.resolveInst(extra.rhs);
|
|
const operand_ty = sema.typeOf(operand);
|
|
const mod = sema.mod;
|
|
|
|
if (operand_ty.zigTypeTag(mod) != .Vector) {
|
|
return sema.fail(block, operand_src, "expected vector, found '{}'", .{operand_ty.fmt(mod)});
|
|
}
|
|
|
|
const scalar_ty = operand_ty.childType(mod);
|
|
|
|
// Type-check depending on operation.
|
|
switch (operation) {
|
|
.And, .Or, .Xor => switch (scalar_ty.zigTypeTag(mod)) {
|
|
.Int, .Bool => {},
|
|
else => return sema.fail(block, operand_src, "@reduce operation '{s}' requires integer or boolean operand; found '{}'", .{
|
|
@tagName(operation), operand_ty.fmt(mod),
|
|
}),
|
|
},
|
|
.Min, .Max, .Add, .Mul => switch (scalar_ty.zigTypeTag(mod)) {
|
|
.Int, .Float => {},
|
|
else => return sema.fail(block, operand_src, "@reduce operation '{s}' requires integer or float operand; found '{}'", .{
|
|
@tagName(operation), operand_ty.fmt(mod),
|
|
}),
|
|
},
|
|
}
|
|
|
|
const vec_len = operand_ty.vectorLen(mod);
|
|
if (vec_len == 0) {
|
|
// TODO re-evaluate if we should introduce a "neutral value" for some operations,
|
|
// e.g. zero for add and one for mul.
|
|
return sema.fail(block, operand_src, "@reduce operation requires a vector with nonzero length", .{});
|
|
}
|
|
|
|
if (try sema.resolveValue(operand)) |operand_val| {
|
|
if (operand_val.isUndef(mod)) return mod.undefRef(scalar_ty);
|
|
|
|
var accum: Value = try operand_val.elemValue(mod, 0);
|
|
var i: u32 = 1;
|
|
while (i < vec_len) : (i += 1) {
|
|
const elem_val = try operand_val.elemValue(mod, i);
|
|
switch (operation) {
|
|
.And => accum = try accum.bitwiseAnd(elem_val, scalar_ty, sema.arena, mod),
|
|
.Or => accum = try accum.bitwiseOr(elem_val, scalar_ty, sema.arena, mod),
|
|
.Xor => accum = try accum.bitwiseXor(elem_val, scalar_ty, sema.arena, mod),
|
|
.Min => accum = accum.numberMin(elem_val, mod),
|
|
.Max => accum = accum.numberMax(elem_val, mod),
|
|
.Add => accum = try sema.numberAddWrapScalar(accum, elem_val, scalar_ty),
|
|
.Mul => accum = try accum.numberMulWrap(elem_val, scalar_ty, sema.arena, mod),
|
|
}
|
|
}
|
|
return Air.internedToRef(accum.toIntern());
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, inst_data.src(), operand_src);
|
|
return block.addInst(.{
|
|
.tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce,
|
|
.data = .{ .reduce = .{
|
|
.operand = operand,
|
|
.operation = operation,
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn zirShuffle(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.Shuffle, inst_data.payload_index).data;
|
|
const elem_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const mask_src: LazySrcLoc = .{ .node_offset_builtin_call_arg3 = inst_data.src_node };
|
|
|
|
const elem_ty = try sema.resolveType(block, elem_ty_src, extra.elem_type);
|
|
try sema.checkVectorElemType(block, elem_ty_src, elem_ty);
|
|
const a = try sema.resolveInst(extra.a);
|
|
const b = try sema.resolveInst(extra.b);
|
|
var mask = try sema.resolveInst(extra.mask);
|
|
var mask_ty = sema.typeOf(mask);
|
|
|
|
const mask_len = switch (sema.typeOf(mask).zigTypeTag(mod)) {
|
|
.Array, .Vector => sema.typeOf(mask).arrayLen(mod),
|
|
else => return sema.fail(block, mask_src, "expected vector or array, found '{}'", .{sema.typeOf(mask).fmt(sema.mod)}),
|
|
};
|
|
mask_ty = try mod.vectorType(.{
|
|
.len = @intCast(mask_len),
|
|
.child = .i32_type,
|
|
});
|
|
mask = try sema.coerce(block, mask_ty, mask, mask_src);
|
|
const mask_val = try sema.resolveConstValue(block, mask_src, mask, .{
|
|
.needed_comptime_reason = "shuffle mask must be comptime-known",
|
|
});
|
|
return sema.analyzeShuffle(block, inst_data.src_node, elem_ty, a, b, mask_val, @intCast(mask_len));
|
|
}
|
|
|
|
fn analyzeShuffle(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src_node: i32,
|
|
elem_ty: Type,
|
|
a_arg: Air.Inst.Ref,
|
|
b_arg: Air.Inst.Ref,
|
|
mask: Value,
|
|
mask_len: u32,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const a_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = src_node };
|
|
const b_src: LazySrcLoc = .{ .node_offset_builtin_call_arg2 = src_node };
|
|
const mask_src: LazySrcLoc = .{ .node_offset_builtin_call_arg3 = src_node };
|
|
var a = a_arg;
|
|
var b = b_arg;
|
|
|
|
const res_ty = try mod.vectorType(.{
|
|
.len = mask_len,
|
|
.child = elem_ty.toIntern(),
|
|
});
|
|
|
|
const maybe_a_len = switch (sema.typeOf(a).zigTypeTag(mod)) {
|
|
.Array, .Vector => sema.typeOf(a).arrayLen(mod),
|
|
.Undefined => null,
|
|
else => return sema.fail(block, a_src, "expected vector or array with element type '{}', found '{}'", .{
|
|
elem_ty.fmt(sema.mod),
|
|
sema.typeOf(a).fmt(sema.mod),
|
|
}),
|
|
};
|
|
const maybe_b_len = switch (sema.typeOf(b).zigTypeTag(mod)) {
|
|
.Array, .Vector => sema.typeOf(b).arrayLen(mod),
|
|
.Undefined => null,
|
|
else => return sema.fail(block, b_src, "expected vector or array with element type '{}', found '{}'", .{
|
|
elem_ty.fmt(sema.mod),
|
|
sema.typeOf(b).fmt(sema.mod),
|
|
}),
|
|
};
|
|
if (maybe_a_len == null and maybe_b_len == null) {
|
|
return mod.undefRef(res_ty);
|
|
}
|
|
const a_len: u32 = @intCast(maybe_a_len orelse maybe_b_len.?);
|
|
const b_len: u32 = @intCast(maybe_b_len orelse a_len);
|
|
|
|
const a_ty = try mod.vectorType(.{
|
|
.len = a_len,
|
|
.child = elem_ty.toIntern(),
|
|
});
|
|
const b_ty = try mod.vectorType(.{
|
|
.len = b_len,
|
|
.child = elem_ty.toIntern(),
|
|
});
|
|
|
|
if (maybe_a_len == null) a = try mod.undefRef(a_ty) else a = try sema.coerce(block, a_ty, a, a_src);
|
|
if (maybe_b_len == null) b = try mod.undefRef(b_ty) else b = try sema.coerce(block, b_ty, b, b_src);
|
|
|
|
const operand_info = [2]std.meta.Tuple(&.{ u64, LazySrcLoc, Type }){
|
|
.{ a_len, a_src, a_ty },
|
|
.{ b_len, b_src, b_ty },
|
|
};
|
|
|
|
for (0..@intCast(mask_len)) |i| {
|
|
const elem = try mask.elemValue(sema.mod, i);
|
|
if (elem.isUndef(mod)) continue;
|
|
const elem_resolved = try sema.resolveLazyValue(elem);
|
|
const int = elem_resolved.toSignedInt(mod);
|
|
var unsigned: u32 = undefined;
|
|
var chosen: u32 = undefined;
|
|
if (int >= 0) {
|
|
unsigned = @intCast(int);
|
|
chosen = 0;
|
|
} else {
|
|
unsigned = @intCast(~int);
|
|
chosen = 1;
|
|
}
|
|
if (unsigned >= operand_info[chosen][0]) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, mask_src, "mask index '{d}' has out-of-bounds selection", .{i});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
try sema.errNote(block, operand_info[chosen][1], msg, "selected index '{d}' out of bounds of '{}'", .{
|
|
unsigned,
|
|
operand_info[chosen][2].fmt(sema.mod),
|
|
});
|
|
|
|
if (chosen == 0) {
|
|
try sema.errNote(block, b_src, msg, "selections from the second vector are specified with negative numbers", .{});
|
|
}
|
|
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
}
|
|
|
|
if (try sema.resolveValue(a)) |a_val| {
|
|
if (try sema.resolveValue(b)) |b_val| {
|
|
const values = try sema.arena.alloc(InternPool.Index, mask_len);
|
|
for (values, 0..) |*value, i| {
|
|
const mask_elem_val = try mask.elemValue(sema.mod, i);
|
|
if (mask_elem_val.isUndef(mod)) {
|
|
value.* = try mod.intern(.{ .undef = elem_ty.toIntern() });
|
|
continue;
|
|
}
|
|
const int = mask_elem_val.toSignedInt(mod);
|
|
const unsigned: u32 = @intCast(if (int >= 0) int else ~int);
|
|
values[i] = try (try (if (int >= 0) a_val else b_val).elemValue(mod, unsigned)).intern(elem_ty, mod);
|
|
}
|
|
return Air.internedToRef((try mod.intern(.{ .aggregate = .{
|
|
.ty = res_ty.toIntern(),
|
|
.storage = .{ .elems = values },
|
|
} })));
|
|
}
|
|
}
|
|
|
|
// All static analysis passed, and not comptime.
|
|
// For runtime codegen, vectors a and b must be the same length. Here we
|
|
// recursively @shuffle the smaller vector to append undefined elements
|
|
// to it up to the length of the longer vector. This recursion terminates
|
|
// in 1 call because these calls to analyzeShuffle guarantee a_len == b_len.
|
|
if (a_len != b_len) {
|
|
const min_len = @min(a_len, b_len);
|
|
const max_src = if (a_len > b_len) a_src else b_src;
|
|
const max_len = try sema.usizeCast(block, max_src, @max(a_len, b_len));
|
|
|
|
const expand_mask_values = try sema.arena.alloc(InternPool.Index, max_len);
|
|
for (@intCast(0)..@intCast(min_len)) |i| {
|
|
expand_mask_values[i] = (try mod.intValue(Type.comptime_int, i)).toIntern();
|
|
}
|
|
for (@intCast(min_len)..@intCast(max_len)) |i| {
|
|
expand_mask_values[i] = (try mod.intValue(Type.comptime_int, -1)).toIntern();
|
|
}
|
|
const expand_mask = try mod.intern(.{ .aggregate = .{
|
|
.ty = (try mod.vectorType(.{ .len = @intCast(max_len), .child = .comptime_int_type })).toIntern(),
|
|
.storage = .{ .elems = expand_mask_values },
|
|
} });
|
|
|
|
if (a_len < b_len) {
|
|
const undef = try mod.undefRef(a_ty);
|
|
a = try sema.analyzeShuffle(block, src_node, elem_ty, a, undef, Value.fromInterned(expand_mask), @intCast(max_len));
|
|
} else {
|
|
const undef = try mod.undefRef(b_ty);
|
|
b = try sema.analyzeShuffle(block, src_node, elem_ty, b, undef, Value.fromInterned(expand_mask), @intCast(max_len));
|
|
}
|
|
}
|
|
|
|
return block.addInst(.{
|
|
.tag = .shuffle,
|
|
.data = .{ .ty_pl = .{
|
|
.ty = Air.internedToRef(res_ty.toIntern()),
|
|
.payload = try block.sema.addExtra(Air.Shuffle{
|
|
.a = a,
|
|
.b = b,
|
|
.mask = mask.toIntern(),
|
|
.mask_len = mask_len,
|
|
}),
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const extra = sema.code.extraData(Zir.Inst.Select, extended.operand).data;
|
|
|
|
const src = LazySrcLoc.nodeOffset(extra.node);
|
|
const elem_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
|
|
const pred_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = extra.node };
|
|
const a_src: LazySrcLoc = .{ .node_offset_builtin_call_arg2 = extra.node };
|
|
const b_src: LazySrcLoc = .{ .node_offset_builtin_call_arg3 = extra.node };
|
|
|
|
const elem_ty = try sema.resolveType(block, elem_ty_src, extra.elem_type);
|
|
try sema.checkVectorElemType(block, elem_ty_src, elem_ty);
|
|
const pred_uncoerced = try sema.resolveInst(extra.pred);
|
|
const pred_ty = sema.typeOf(pred_uncoerced);
|
|
|
|
const vec_len_u64 = switch (try pred_ty.zigTypeTagOrPoison(mod)) {
|
|
.Vector, .Array => pred_ty.arrayLen(mod),
|
|
else => return sema.fail(block, pred_src, "expected vector or array, found '{}'", .{pred_ty.fmt(mod)}),
|
|
};
|
|
const vec_len: u32 = @intCast(try sema.usizeCast(block, pred_src, vec_len_u64));
|
|
|
|
const bool_vec_ty = try mod.vectorType(.{
|
|
.len = vec_len,
|
|
.child = .bool_type,
|
|
});
|
|
const pred = try sema.coerce(block, bool_vec_ty, pred_uncoerced, pred_src);
|
|
|
|
const vec_ty = try mod.vectorType(.{
|
|
.len = vec_len,
|
|
.child = elem_ty.toIntern(),
|
|
});
|
|
const a = try sema.coerce(block, vec_ty, try sema.resolveInst(extra.a), a_src);
|
|
const b = try sema.coerce(block, vec_ty, try sema.resolveInst(extra.b), b_src);
|
|
|
|
const maybe_pred = try sema.resolveValue(pred);
|
|
const maybe_a = try sema.resolveValue(a);
|
|
const maybe_b = try sema.resolveValue(b);
|
|
|
|
const runtime_src = if (maybe_pred) |pred_val| rs: {
|
|
if (pred_val.isUndef(mod)) return mod.undefRef(vec_ty);
|
|
|
|
if (maybe_a) |a_val| {
|
|
if (a_val.isUndef(mod)) return mod.undefRef(vec_ty);
|
|
|
|
if (maybe_b) |b_val| {
|
|
if (b_val.isUndef(mod)) return mod.undefRef(vec_ty);
|
|
|
|
const elems = try sema.gpa.alloc(InternPool.Index, vec_len);
|
|
defer sema.gpa.free(elems);
|
|
for (elems, 0..) |*elem, i| {
|
|
const pred_elem_val = try pred_val.elemValue(mod, i);
|
|
const should_choose_a = pred_elem_val.toBool();
|
|
elem.* = try (try (if (should_choose_a) a_val else b_val).elemValue(mod, i)).intern(elem_ty, mod);
|
|
}
|
|
|
|
return Air.internedToRef((try mod.intern(.{ .aggregate = .{
|
|
.ty = vec_ty.toIntern(),
|
|
.storage = .{ .elems = elems },
|
|
} })));
|
|
} else {
|
|
break :rs b_src;
|
|
}
|
|
} else {
|
|
if (maybe_b) |b_val| {
|
|
if (b_val.isUndef(mod)) return mod.undefRef(vec_ty);
|
|
}
|
|
break :rs a_src;
|
|
}
|
|
} else rs: {
|
|
if (maybe_a) |a_val| {
|
|
if (a_val.isUndef(mod)) return mod.undefRef(vec_ty);
|
|
}
|
|
if (maybe_b) |b_val| {
|
|
if (b_val.isUndef(mod)) return mod.undefRef(vec_ty);
|
|
}
|
|
break :rs pred_src;
|
|
};
|
|
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
return block.addInst(.{
|
|
.tag = .select,
|
|
.data = .{ .pl_op = .{
|
|
.operand = pred,
|
|
.payload = try block.sema.addExtra(Air.Bin{
|
|
.lhs = a,
|
|
.rhs = b,
|
|
}),
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn zirAtomicLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.AtomicLoad, inst_data.payload_index).data;
|
|
// zig fmt: off
|
|
const elem_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const ptr_src : LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
|
|
const order_src : LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node };
|
|
// zig fmt: on
|
|
const elem_ty = try sema.resolveType(block, elem_ty_src, extra.elem_type);
|
|
const uncasted_ptr = try sema.resolveInst(extra.ptr);
|
|
const ptr = try sema.checkAtomicPtrOperand(block, elem_ty, elem_ty_src, uncasted_ptr, ptr_src, true);
|
|
const order = try sema.resolveAtomicOrder(block, order_src, extra.ordering, .{
|
|
.needed_comptime_reason = "atomic order of @atomicLoad must be comptime-known",
|
|
});
|
|
|
|
switch (order) {
|
|
.Release, .AcqRel => {
|
|
return sema.fail(
|
|
block,
|
|
order_src,
|
|
"@atomicLoad atomic ordering must not be Release or AcqRel",
|
|
.{},
|
|
);
|
|
},
|
|
else => {},
|
|
}
|
|
|
|
if (try sema.typeHasOnePossibleValue(elem_ty)) |val| {
|
|
return Air.internedToRef(val.toIntern());
|
|
}
|
|
|
|
if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| {
|
|
if (try sema.pointerDeref(block, ptr_src, ptr_val, sema.typeOf(ptr))) |elem_val| {
|
|
return Air.internedToRef(elem_val.toIntern());
|
|
}
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, inst_data.src(), ptr_src);
|
|
return block.addInst(.{
|
|
.tag = .atomic_load,
|
|
.data = .{ .atomic_load = .{
|
|
.ptr = ptr,
|
|
.order = order,
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.AtomicRmw, inst_data.payload_index).data;
|
|
const src = inst_data.src();
|
|
// zig fmt: off
|
|
const elem_ty_src : LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const ptr_src : LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
|
|
const op_src : LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node };
|
|
const operand_src : LazySrcLoc = .{ .node_offset_builtin_call_arg3 = inst_data.src_node };
|
|
const order_src : LazySrcLoc = .{ .node_offset_builtin_call_arg4 = inst_data.src_node };
|
|
// zig fmt: on
|
|
const operand = try sema.resolveInst(extra.operand);
|
|
const elem_ty = sema.typeOf(operand);
|
|
const uncasted_ptr = try sema.resolveInst(extra.ptr);
|
|
const ptr = try sema.checkAtomicPtrOperand(block, elem_ty, elem_ty_src, uncasted_ptr, ptr_src, false);
|
|
const op = try sema.resolveAtomicRmwOp(block, op_src, extra.operation);
|
|
|
|
switch (elem_ty.zigTypeTag(mod)) {
|
|
.Enum => if (op != .Xchg) {
|
|
return sema.fail(block, op_src, "@atomicRmw with enum only allowed with .Xchg", .{});
|
|
},
|
|
.Bool => if (op != .Xchg) {
|
|
return sema.fail(block, op_src, "@atomicRmw with bool only allowed with .Xchg", .{});
|
|
},
|
|
.Float => switch (op) {
|
|
.Xchg, .Add, .Sub, .Max, .Min => {},
|
|
else => return sema.fail(block, op_src, "@atomicRmw with float only allowed with .Xchg, .Add, .Sub, .Max, and .Min", .{}),
|
|
},
|
|
else => {},
|
|
}
|
|
const order = try sema.resolveAtomicOrder(block, order_src, extra.ordering, .{
|
|
.needed_comptime_reason = "atomic order of @atomicRmW must be comptime-known",
|
|
});
|
|
|
|
if (order == .Unordered) {
|
|
return sema.fail(block, order_src, "@atomicRmw atomic ordering must not be Unordered", .{});
|
|
}
|
|
|
|
// special case zero bit types
|
|
if (try sema.typeHasOnePossibleValue(elem_ty)) |val| {
|
|
return Air.internedToRef(val.toIntern());
|
|
}
|
|
|
|
const runtime_src = if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| rs: {
|
|
const maybe_operand_val = try sema.resolveValue(operand);
|
|
const operand_val = maybe_operand_val orelse {
|
|
try sema.checkPtrIsNotComptimeMutable(block, ptr_val, ptr_src, operand_src);
|
|
break :rs operand_src;
|
|
};
|
|
if (ptr_val.isComptimeMutablePtr(mod)) {
|
|
const ptr_ty = sema.typeOf(ptr);
|
|
const stored_val = (try sema.pointerDeref(block, ptr_src, ptr_val, ptr_ty)) orelse break :rs ptr_src;
|
|
const new_val = switch (op) {
|
|
// zig fmt: off
|
|
.Xchg => operand_val,
|
|
.Add => try sema.numberAddWrapScalar(stored_val, operand_val, elem_ty),
|
|
.Sub => try sema.numberSubWrapScalar(stored_val, operand_val, elem_ty),
|
|
.And => try stored_val.bitwiseAnd (operand_val, elem_ty, sema.arena, mod),
|
|
.Nand => try stored_val.bitwiseNand (operand_val, elem_ty, sema.arena, mod),
|
|
.Or => try stored_val.bitwiseOr (operand_val, elem_ty, sema.arena, mod),
|
|
.Xor => try stored_val.bitwiseXor (operand_val, elem_ty, sema.arena, mod),
|
|
.Max => stored_val.numberMax (operand_val, mod),
|
|
.Min => stored_val.numberMin (operand_val, mod),
|
|
// zig fmt: on
|
|
};
|
|
try sema.storePtrVal(block, src, ptr_val, new_val, elem_ty);
|
|
return Air.internedToRef(stored_val.toIntern());
|
|
} else break :rs ptr_src;
|
|
} else ptr_src;
|
|
|
|
const flags: u32 = @as(u32, @intFromEnum(order)) | (@as(u32, @intFromEnum(op)) << 3);
|
|
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
return block.addInst(.{
|
|
.tag = .atomic_rmw,
|
|
.data = .{ .pl_op = .{
|
|
.operand = ptr,
|
|
.payload = try sema.addExtra(Air.AtomicRmw{
|
|
.operand = operand,
|
|
.flags = flags,
|
|
}),
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn zirAtomicStore(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.AtomicStore, inst_data.payload_index).data;
|
|
const src = inst_data.src();
|
|
// zig fmt: off
|
|
const elem_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const ptr_src : LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
|
|
const operand_src : LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node };
|
|
const order_src : LazySrcLoc = .{ .node_offset_builtin_call_arg3 = inst_data.src_node };
|
|
// zig fmt: on
|
|
const operand = try sema.resolveInst(extra.operand);
|
|
const elem_ty = sema.typeOf(operand);
|
|
const uncasted_ptr = try sema.resolveInst(extra.ptr);
|
|
const ptr = try sema.checkAtomicPtrOperand(block, elem_ty, elem_ty_src, uncasted_ptr, ptr_src, false);
|
|
const order = try sema.resolveAtomicOrder(block, order_src, extra.ordering, .{
|
|
.needed_comptime_reason = "atomic order of @atomicStore must be comptime-known",
|
|
});
|
|
|
|
const air_tag: Air.Inst.Tag = switch (order) {
|
|
.Acquire, .AcqRel => {
|
|
return sema.fail(
|
|
block,
|
|
order_src,
|
|
"@atomicStore atomic ordering must not be Acquire or AcqRel",
|
|
.{},
|
|
);
|
|
},
|
|
.Unordered => .atomic_store_unordered,
|
|
.Monotonic => .atomic_store_monotonic,
|
|
.Release => .atomic_store_release,
|
|
.SeqCst => .atomic_store_seq_cst,
|
|
};
|
|
|
|
return sema.storePtr2(block, src, ptr, ptr_src, operand, operand_src, air_tag);
|
|
}
|
|
|
|
fn zirMulAdd(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.MulAdd, inst_data.payload_index).data;
|
|
const src = inst_data.src();
|
|
|
|
const mulend1_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
|
|
const mulend2_src: LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node };
|
|
const addend_src: LazySrcLoc = .{ .node_offset_builtin_call_arg3 = inst_data.src_node };
|
|
|
|
const addend = try sema.resolveInst(extra.addend);
|
|
const ty = sema.typeOf(addend);
|
|
const mulend1 = try sema.coerce(block, ty, try sema.resolveInst(extra.mulend1), mulend1_src);
|
|
const mulend2 = try sema.coerce(block, ty, try sema.resolveInst(extra.mulend2), mulend2_src);
|
|
|
|
const maybe_mulend1 = try sema.resolveValue(mulend1);
|
|
const maybe_mulend2 = try sema.resolveValue(mulend2);
|
|
const maybe_addend = try sema.resolveValue(addend);
|
|
const mod = sema.mod;
|
|
|
|
switch (ty.scalarType(mod).zigTypeTag(mod)) {
|
|
.ComptimeFloat, .Float => {},
|
|
else => return sema.fail(block, src, "expected vector of floats or float type, found '{}'", .{ty.fmt(sema.mod)}),
|
|
}
|
|
|
|
const runtime_src = if (maybe_mulend1) |mulend1_val| rs: {
|
|
if (maybe_mulend2) |mulend2_val| {
|
|
if (mulend2_val.isUndef(mod)) return mod.undefRef(ty);
|
|
|
|
if (maybe_addend) |addend_val| {
|
|
if (addend_val.isUndef(mod)) return mod.undefRef(ty);
|
|
const result_val = try Value.mulAdd(ty, mulend1_val, mulend2_val, addend_val, sema.arena, sema.mod);
|
|
return Air.internedToRef(result_val.toIntern());
|
|
} else {
|
|
break :rs addend_src;
|
|
}
|
|
} else {
|
|
if (maybe_addend) |addend_val| {
|
|
if (addend_val.isUndef(mod)) return mod.undefRef(ty);
|
|
}
|
|
break :rs mulend2_src;
|
|
}
|
|
} else rs: {
|
|
if (maybe_mulend2) |mulend2_val| {
|
|
if (mulend2_val.isUndef(mod)) return mod.undefRef(ty);
|
|
}
|
|
if (maybe_addend) |addend_val| {
|
|
if (addend_val.isUndef(mod)) return mod.undefRef(ty);
|
|
}
|
|
break :rs mulend1_src;
|
|
};
|
|
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
return block.addInst(.{
|
|
.tag = .mul_add,
|
|
.data = .{ .pl_op = .{
|
|
.operand = addend,
|
|
.payload = try sema.addExtra(Air.Bin{
|
|
.lhs = mulend1,
|
|
.rhs = mulend2,
|
|
}),
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn zirBuiltinCall(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const modifier_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const func_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
|
|
const args_src: LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node };
|
|
const call_src = inst_data.src();
|
|
|
|
const extra = sema.code.extraData(Zir.Inst.BuiltinCall, inst_data.payload_index).data;
|
|
const func = try sema.resolveInst(extra.callee);
|
|
|
|
const modifier_ty = try sema.getBuiltinType("CallModifier");
|
|
const air_ref = try sema.resolveInst(extra.modifier);
|
|
const modifier_ref = try sema.coerce(block, modifier_ty, air_ref, modifier_src);
|
|
const modifier_val = try sema.resolveConstDefinedValue(block, modifier_src, modifier_ref, .{
|
|
.needed_comptime_reason = "call modifier must be comptime-known",
|
|
});
|
|
var modifier = mod.toEnum(std.builtin.CallModifier, modifier_val);
|
|
switch (modifier) {
|
|
// These can be upgraded to comptime or nosuspend calls.
|
|
.auto, .never_tail, .no_async => {
|
|
if (block.is_comptime) {
|
|
if (modifier == .never_tail) {
|
|
return sema.fail(block, modifier_src, "unable to perform 'never_tail' call at compile-time", .{});
|
|
}
|
|
modifier = .compile_time;
|
|
} else if (extra.flags.is_nosuspend) {
|
|
modifier = .no_async;
|
|
}
|
|
},
|
|
// These can be upgraded to comptime. nosuspend bit can be safely ignored.
|
|
.always_inline, .compile_time => {
|
|
_ = (try sema.resolveDefinedValue(block, func_src, func)) orelse {
|
|
return sema.fail(block, func_src, "modifier '{s}' requires a comptime-known function", .{@tagName(modifier)});
|
|
};
|
|
|
|
if (block.is_comptime) {
|
|
modifier = .compile_time;
|
|
}
|
|
},
|
|
.always_tail => {
|
|
if (block.is_comptime) {
|
|
modifier = .compile_time;
|
|
}
|
|
},
|
|
.async_kw => {
|
|
if (extra.flags.is_nosuspend) {
|
|
return sema.fail(block, modifier_src, "modifier 'async_kw' cannot be used inside nosuspend block", .{});
|
|
}
|
|
if (block.is_comptime) {
|
|
return sema.fail(block, modifier_src, "modifier 'async_kw' cannot be used in combination with comptime function call", .{});
|
|
}
|
|
},
|
|
.never_inline => {
|
|
if (block.is_comptime) {
|
|
return sema.fail(block, modifier_src, "unable to perform 'never_inline' call at compile-time", .{});
|
|
}
|
|
},
|
|
}
|
|
|
|
const args = try sema.resolveInst(extra.args);
|
|
|
|
const args_ty = sema.typeOf(args);
|
|
if (!args_ty.isTuple(mod) and args_ty.toIntern() != .empty_struct_type) {
|
|
return sema.fail(block, args_src, "expected a tuple, found '{}'", .{args_ty.fmt(sema.mod)});
|
|
}
|
|
|
|
const resolved_args: []Air.Inst.Ref = try sema.arena.alloc(Air.Inst.Ref, args_ty.structFieldCount(mod));
|
|
for (resolved_args, 0..) |*resolved, i| {
|
|
resolved.* = try sema.tupleFieldValByIndex(block, args_src, args, @intCast(i), args_ty);
|
|
}
|
|
|
|
const callee_ty = sema.typeOf(func);
|
|
const func_ty = try sema.checkCallArgumentCount(block, func, func_src, callee_ty, resolved_args.len, false);
|
|
const ensure_result_used = extra.flags.ensure_result_used;
|
|
return sema.analyzeCall(
|
|
block,
|
|
func,
|
|
func_ty,
|
|
func_src,
|
|
call_src,
|
|
modifier,
|
|
ensure_result_used,
|
|
.{ .call_builtin = .{
|
|
.call_node_offset = inst_data.src_node,
|
|
.args = resolved_args,
|
|
} },
|
|
null,
|
|
.@"@call",
|
|
);
|
|
}
|
|
|
|
fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.FieldParentPtr, inst_data.payload_index).data;
|
|
const src = inst_data.src();
|
|
const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
|
|
const ptr_src: LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node };
|
|
|
|
const parent_ty = try sema.resolveType(block, ty_src, extra.parent_type);
|
|
const field_name = try sema.resolveConstStringIntern(block, name_src, extra.field_name, .{
|
|
.needed_comptime_reason = "field name must be comptime-known",
|
|
});
|
|
const field_ptr = try sema.resolveInst(extra.field_ptr);
|
|
const field_ptr_ty = sema.typeOf(field_ptr);
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
|
|
if (parent_ty.zigTypeTag(mod) != .Struct and parent_ty.zigTypeTag(mod) != .Union) {
|
|
return sema.fail(block, ty_src, "expected struct or union type, found '{}'", .{parent_ty.fmt(sema.mod)});
|
|
}
|
|
try sema.resolveTypeLayout(parent_ty);
|
|
|
|
const field_index = switch (parent_ty.zigTypeTag(mod)) {
|
|
.Struct => blk: {
|
|
if (parent_ty.isTuple(mod)) {
|
|
if (ip.stringEqlSlice(field_name, "len")) {
|
|
return sema.fail(block, src, "cannot get @fieldParentPtr of 'len' field of tuple", .{});
|
|
}
|
|
break :blk try sema.tupleFieldIndex(block, parent_ty, field_name, name_src);
|
|
} else {
|
|
break :blk try sema.structFieldIndex(block, parent_ty, field_name, name_src);
|
|
}
|
|
},
|
|
.Union => try sema.unionFieldIndex(block, parent_ty, field_name, name_src),
|
|
else => unreachable,
|
|
};
|
|
|
|
if (parent_ty.zigTypeTag(mod) == .Struct and parent_ty.structFieldIsComptime(field_index, mod)) {
|
|
return sema.fail(block, src, "cannot get @fieldParentPtr of a comptime field", .{});
|
|
}
|
|
|
|
try sema.checkPtrOperand(block, ptr_src, field_ptr_ty);
|
|
const field_ptr_ty_info = field_ptr_ty.ptrInfo(mod);
|
|
|
|
var ptr_ty_data: InternPool.Key.PtrType = .{
|
|
.child = parent_ty.structFieldType(field_index, mod).toIntern(),
|
|
.flags = .{
|
|
.address_space = field_ptr_ty_info.flags.address_space,
|
|
.is_const = field_ptr_ty_info.flags.is_const,
|
|
},
|
|
};
|
|
|
|
if (parent_ty.containerLayout(mod) == .Packed) {
|
|
return sema.fail(block, src, "TODO handle packed structs/unions with @fieldParentPtr", .{});
|
|
} else {
|
|
ptr_ty_data.flags.alignment = blk: {
|
|
if (mod.typeToStruct(parent_ty)) |struct_type| {
|
|
break :blk struct_type.fieldAlign(ip, field_index);
|
|
} else if (mod.typeToUnion(parent_ty)) |union_obj| {
|
|
break :blk union_obj.fieldAlign(ip, field_index);
|
|
} else {
|
|
break :blk .none;
|
|
}
|
|
};
|
|
}
|
|
|
|
const actual_field_ptr_ty = try sema.ptrType(ptr_ty_data);
|
|
const casted_field_ptr = try sema.coerce(block, actual_field_ptr_ty, field_ptr, ptr_src);
|
|
|
|
ptr_ty_data.child = parent_ty.toIntern();
|
|
const result_ptr = try sema.ptrType(ptr_ty_data);
|
|
|
|
if (try sema.resolveDefinedValue(block, src, casted_field_ptr)) |field_ptr_val| {
|
|
const field = switch (ip.indexToKey(field_ptr_val.toIntern())) {
|
|
.ptr => |ptr| switch (ptr.addr) {
|
|
.field => |field| field,
|
|
else => null,
|
|
},
|
|
else => null,
|
|
} orelse return sema.fail(block, ptr_src, "pointer value not based on parent struct", .{});
|
|
|
|
if (field.index != field_index) {
|
|
return sema.fail(block, src, "field '{}' has index '{d}' but pointer value is index '{d}' of struct '{}'", .{
|
|
field_name.fmt(ip), field_index, field.index, parent_ty.fmt(sema.mod),
|
|
});
|
|
}
|
|
return Air.internedToRef(field.base);
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, src, ptr_src);
|
|
try sema.queueFullTypeResolution(result_ptr);
|
|
return block.addInst(.{
|
|
.tag = .field_parent_ptr,
|
|
.data = .{ .ty_pl = .{
|
|
.ty = Air.internedToRef(result_ptr.toIntern()),
|
|
.payload = try block.sema.addExtra(Air.FieldParentPtr{
|
|
.field_ptr = casted_field_ptr,
|
|
.field_index = @intCast(field_index),
|
|
}),
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn zirMinMax(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
comptime air_tag: Air.Inst.Tag,
|
|
) CompileError!Air.Inst.Ref {
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const src = inst_data.src();
|
|
const lhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const rhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
|
|
const lhs = try sema.resolveInst(extra.lhs);
|
|
const rhs = try sema.resolveInst(extra.rhs);
|
|
try sema.checkNumericType(block, lhs_src, sema.typeOf(lhs));
|
|
try sema.checkNumericType(block, rhs_src, sema.typeOf(rhs));
|
|
return sema.analyzeMinMax(block, src, air_tag, &.{ lhs, rhs }, &.{ lhs_src, rhs_src });
|
|
}
|
|
|
|
fn zirMinMaxMulti(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
extended: Zir.Inst.Extended.InstData,
|
|
comptime air_tag: Air.Inst.Tag,
|
|
) CompileError!Air.Inst.Ref {
|
|
const extra = sema.code.extraData(Zir.Inst.NodeMultiOp, extended.operand);
|
|
const src_node = extra.data.src_node;
|
|
const src = LazySrcLoc.nodeOffset(src_node);
|
|
const operands = sema.code.refSlice(extra.end, extended.small);
|
|
|
|
const air_refs = try sema.arena.alloc(Air.Inst.Ref, operands.len);
|
|
const operand_srcs = try sema.arena.alloc(LazySrcLoc, operands.len);
|
|
|
|
for (operands, air_refs, operand_srcs, 0..) |zir_ref, *air_ref, *op_src, i| {
|
|
op_src.* = switch (i) {
|
|
0 => .{ .node_offset_builtin_call_arg0 = src_node },
|
|
1 => .{ .node_offset_builtin_call_arg1 = src_node },
|
|
2 => .{ .node_offset_builtin_call_arg2 = src_node },
|
|
3 => .{ .node_offset_builtin_call_arg3 = src_node },
|
|
4 => .{ .node_offset_builtin_call_arg4 = src_node },
|
|
5 => .{ .node_offset_builtin_call_arg5 = src_node },
|
|
else => src, // TODO: better source location
|
|
};
|
|
air_ref.* = try sema.resolveInst(zir_ref);
|
|
try sema.checkNumericType(block, op_src.*, sema.typeOf(air_ref.*));
|
|
}
|
|
|
|
return sema.analyzeMinMax(block, src, air_tag, air_refs, operand_srcs);
|
|
}
|
|
|
|
fn analyzeMinMax(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
comptime air_tag: Air.Inst.Tag,
|
|
operands: []const Air.Inst.Ref,
|
|
operand_srcs: []const LazySrcLoc,
|
|
) CompileError!Air.Inst.Ref {
|
|
assert(operands.len == operand_srcs.len);
|
|
assert(operands.len > 0);
|
|
const mod = sema.mod;
|
|
|
|
if (operands.len == 1) return operands[0];
|
|
|
|
const opFunc = switch (air_tag) {
|
|
.min => Value.numberMin,
|
|
.max => Value.numberMax,
|
|
else => @compileError("unreachable"),
|
|
};
|
|
|
|
// The set of runtime-known operands. Set up in the loop below.
|
|
var runtime_known = try std.DynamicBitSet.initFull(sema.arena, operands.len);
|
|
// The current minmax value - initially this will always be comptime-known, then we'll add
|
|
// runtime values into the mix later.
|
|
var cur_minmax: ?Air.Inst.Ref = null;
|
|
var cur_minmax_src: LazySrcLoc = undefined; // defined if cur_minmax not null
|
|
// The current known scalar bounds of the value.
|
|
var bounds_status: enum {
|
|
unknown, // We've only seen undef comptime_ints so far, so do not know the bounds.
|
|
defined, // We've seen only integers, so the bounds are defined.
|
|
non_integral, // There are floats in the mix, so the bounds aren't defined.
|
|
} = .unknown;
|
|
var cur_min_scalar: Value = undefined;
|
|
var cur_max_scalar: Value = undefined;
|
|
|
|
// First, find all comptime-known arguments, and get their min/max
|
|
|
|
for (operands, operand_srcs, 0..) |operand, operand_src, operand_idx| {
|
|
// Resolve the value now to avoid redundant calls to `checkSimdBinOp` - we'll have to call
|
|
// it in the runtime path anyway since the result type may have been refined
|
|
const unresolved_uncoerced_val = try sema.resolveValue(operand) orelse continue;
|
|
const uncoerced_val = try sema.resolveLazyValue(unresolved_uncoerced_val);
|
|
|
|
runtime_known.unset(operand_idx);
|
|
|
|
switch (bounds_status) {
|
|
.unknown, .defined => refine_bounds: {
|
|
const ty = sema.typeOf(operand);
|
|
if (!ty.scalarType(mod).isInt(mod) and !ty.scalarType(mod).eql(Type.comptime_int, mod)) {
|
|
bounds_status = .non_integral;
|
|
break :refine_bounds;
|
|
}
|
|
const scalar_bounds: ?[2]Value = bounds: {
|
|
if (!ty.isVector(mod)) break :bounds try uncoerced_val.intValueBounds(mod);
|
|
var cur_bounds: [2]Value = try Value.intValueBounds(try uncoerced_val.elemValue(mod, 0), mod) orelse break :bounds null;
|
|
const len = try sema.usizeCast(block, src, ty.vectorLen(mod));
|
|
for (1..len) |i| {
|
|
const elem = try uncoerced_val.elemValue(mod, i);
|
|
const elem_bounds = try elem.intValueBounds(mod) orelse break :bounds null;
|
|
cur_bounds = .{
|
|
Value.numberMin(elem_bounds[0], cur_bounds[0], mod),
|
|
Value.numberMax(elem_bounds[1], cur_bounds[1], mod),
|
|
};
|
|
}
|
|
break :bounds cur_bounds;
|
|
};
|
|
if (scalar_bounds) |bounds| {
|
|
if (bounds_status == .unknown) {
|
|
cur_min_scalar = bounds[0];
|
|
cur_max_scalar = bounds[1];
|
|
bounds_status = .defined;
|
|
} else {
|
|
cur_min_scalar = opFunc(cur_min_scalar, bounds[0], mod);
|
|
cur_max_scalar = opFunc(cur_max_scalar, bounds[1], mod);
|
|
}
|
|
}
|
|
},
|
|
.non_integral => {},
|
|
}
|
|
|
|
const cur = cur_minmax orelse {
|
|
cur_minmax = operand;
|
|
cur_minmax_src = operand_src;
|
|
continue;
|
|
};
|
|
|
|
const simd_op = try sema.checkSimdBinOp(block, src, cur, operand, cur_minmax_src, operand_src);
|
|
const cur_val = try sema.resolveLazyValue(simd_op.lhs_val.?); // cur_minmax is comptime-known
|
|
const operand_val = try sema.resolveLazyValue(simd_op.rhs_val.?); // we checked the operand was resolvable above
|
|
|
|
const vec_len = simd_op.len orelse {
|
|
const result_val = opFunc(cur_val, operand_val, mod);
|
|
cur_minmax = Air.internedToRef(result_val.toIntern());
|
|
continue;
|
|
};
|
|
const elems = try sema.arena.alloc(InternPool.Index, vec_len);
|
|
for (elems, 0..) |*elem, i| {
|
|
const lhs_elem_val = try cur_val.elemValue(mod, i);
|
|
const rhs_elem_val = try operand_val.elemValue(mod, i);
|
|
const uncoerced_elem = opFunc(lhs_elem_val, rhs_elem_val, mod);
|
|
elem.* = (try mod.getCoerced(uncoerced_elem, simd_op.scalar_ty)).toIntern();
|
|
}
|
|
cur_minmax = Air.internedToRef((try mod.intern(.{ .aggregate = .{
|
|
.ty = simd_op.result_ty.toIntern(),
|
|
.storage = .{ .elems = elems },
|
|
} })));
|
|
}
|
|
|
|
const opt_runtime_idx = runtime_known.findFirstSet();
|
|
|
|
if (cur_minmax) |ct_minmax_ref| refine: {
|
|
// Refine the comptime-known result type based on the bounds. This isn't strictly necessary
|
|
// in the runtime case, since we'll refine the type again later, but keeping things as small
|
|
// as possible will allow us to emit more optimal AIR (if all the runtime operands have
|
|
// smaller types than the non-refined comptime type).
|
|
|
|
const val = (try sema.resolveValue(ct_minmax_ref)).?;
|
|
const orig_ty = sema.typeOf(ct_minmax_ref);
|
|
|
|
if (opt_runtime_idx == null and orig_ty.scalarType(mod).eql(Type.comptime_int, mod)) {
|
|
// If all arguments were `comptime_int`, and there are no runtime args, we'll preserve that type
|
|
break :refine;
|
|
}
|
|
|
|
// We can't refine float types
|
|
if (orig_ty.scalarType(mod).isAnyFloat()) break :refine;
|
|
|
|
assert(bounds_status == .defined); // there was a non-comptime-int integral comptime-known arg
|
|
|
|
const refined_scalar_ty = try mod.intFittingRange(cur_min_scalar, cur_max_scalar);
|
|
const refined_ty = if (orig_ty.isVector(mod)) try mod.vectorType(.{
|
|
.len = orig_ty.vectorLen(mod),
|
|
.child = refined_scalar_ty.toIntern(),
|
|
}) else refined_scalar_ty;
|
|
|
|
// Apply the refined type to the current value
|
|
if (std.debug.runtime_safety) {
|
|
assert(try sema.intFitsInType(val, refined_ty, null));
|
|
}
|
|
cur_minmax = try sema.coerceInMemory(val, refined_ty);
|
|
}
|
|
|
|
const runtime_idx = opt_runtime_idx orelse return cur_minmax.?;
|
|
const runtime_src = operand_srcs[runtime_idx];
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
|
|
// Now, iterate over runtime operands, emitting a min/max instruction for each. We'll refine the
|
|
// type again at the end, based on the comptime-known bound.
|
|
|
|
// If the comptime-known part is undef we can avoid emitting actual instructions later
|
|
const known_undef = if (cur_minmax) |operand| blk: {
|
|
const val = (try sema.resolveValue(operand)).?;
|
|
break :blk val.isUndef(mod);
|
|
} else false;
|
|
|
|
if (cur_minmax == null) {
|
|
// No comptime operands - use the first operand as the starting value
|
|
assert(bounds_status == .unknown);
|
|
assert(runtime_idx == 0);
|
|
cur_minmax = operands[0];
|
|
cur_minmax_src = runtime_src;
|
|
runtime_known.unset(0); // don't look at this operand in the loop below
|
|
const scalar_ty = sema.typeOf(cur_minmax.?).scalarType(mod);
|
|
if (scalar_ty.isInt(mod)) {
|
|
cur_min_scalar = try scalar_ty.minInt(mod, scalar_ty);
|
|
cur_max_scalar = try scalar_ty.maxInt(mod, scalar_ty);
|
|
bounds_status = .defined;
|
|
} else {
|
|
bounds_status = .non_integral;
|
|
}
|
|
}
|
|
|
|
var it = runtime_known.iterator(.{});
|
|
while (it.next()) |idx| {
|
|
const lhs = cur_minmax.?;
|
|
const lhs_src = cur_minmax_src;
|
|
const rhs = operands[idx];
|
|
const rhs_src = operand_srcs[idx];
|
|
const simd_op = try sema.checkSimdBinOp(block, src, lhs, rhs, lhs_src, rhs_src);
|
|
if (known_undef) {
|
|
cur_minmax = try mod.undefRef(simd_op.result_ty);
|
|
} else {
|
|
cur_minmax = try block.addBinOp(air_tag, simd_op.lhs, simd_op.rhs);
|
|
}
|
|
// Compute the bounds of this type
|
|
switch (bounds_status) {
|
|
.unknown, .defined => refine_bounds: {
|
|
const scalar_ty = sema.typeOf(rhs).scalarType(mod);
|
|
if (scalar_ty.isAnyFloat()) {
|
|
bounds_status = .non_integral;
|
|
break :refine_bounds;
|
|
}
|
|
const scalar_min = try scalar_ty.minInt(mod, scalar_ty);
|
|
const scalar_max = try scalar_ty.maxInt(mod, scalar_ty);
|
|
if (bounds_status == .unknown) {
|
|
cur_min_scalar = scalar_min;
|
|
cur_max_scalar = scalar_max;
|
|
bounds_status = .defined;
|
|
} else {
|
|
cur_min_scalar = opFunc(cur_min_scalar, scalar_min, mod);
|
|
cur_max_scalar = opFunc(cur_max_scalar, scalar_max, mod);
|
|
}
|
|
},
|
|
.non_integral => {},
|
|
}
|
|
}
|
|
|
|
// Finally, refine the type based on the known bounds.
|
|
const unrefined_ty = sema.typeOf(cur_minmax.?);
|
|
if (unrefined_ty.scalarType(mod).isAnyFloat()) {
|
|
// We can't refine floats, so we're done.
|
|
return cur_minmax.?;
|
|
}
|
|
assert(bounds_status == .defined); // there were integral runtime operands
|
|
const refined_scalar_ty = try mod.intFittingRange(cur_min_scalar, cur_max_scalar);
|
|
const refined_ty = if (unrefined_ty.isVector(mod)) try mod.vectorType(.{
|
|
.len = unrefined_ty.vectorLen(mod),
|
|
.child = refined_scalar_ty.toIntern(),
|
|
}) else refined_scalar_ty;
|
|
|
|
if (!refined_ty.eql(unrefined_ty, mod)) {
|
|
// We've reduced the type - cast the result down
|
|
return block.addTyOp(.intcast, refined_ty, cur_minmax.?);
|
|
}
|
|
|
|
return cur_minmax.?;
|
|
}
|
|
|
|
fn upgradeToArrayPtr(sema: *Sema, block: *Block, ptr: Air.Inst.Ref, len: u64) !Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const ptr_ty = sema.typeOf(ptr);
|
|
const info = ptr_ty.ptrInfo(mod);
|
|
if (info.flags.size == .One) {
|
|
// Already an array pointer.
|
|
return ptr;
|
|
}
|
|
const new_ty = try sema.ptrType(.{
|
|
.child = (try mod.arrayType(.{
|
|
.len = len,
|
|
.sentinel = info.sentinel,
|
|
.child = info.child,
|
|
})).toIntern(),
|
|
.flags = .{
|
|
.alignment = info.flags.alignment,
|
|
.is_const = info.flags.is_const,
|
|
.is_volatile = info.flags.is_volatile,
|
|
.is_allowzero = info.flags.is_allowzero,
|
|
.address_space = info.flags.address_space,
|
|
},
|
|
});
|
|
const non_slice_ptr = if (info.flags.size == .Slice)
|
|
try block.addTyOp(.slice_ptr, ptr_ty.slicePtrFieldType(mod), ptr)
|
|
else
|
|
ptr;
|
|
return block.addBitCast(new_ty, non_slice_ptr);
|
|
}
|
|
|
|
fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const src = inst_data.src();
|
|
const dest_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const src_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
|
|
const dest_ptr = try sema.resolveInst(extra.lhs);
|
|
const src_ptr = try sema.resolveInst(extra.rhs);
|
|
const dest_ty = sema.typeOf(dest_ptr);
|
|
const src_ty = sema.typeOf(src_ptr);
|
|
const dest_len = try indexablePtrLenOrNone(sema, block, dest_src, dest_ptr);
|
|
const src_len = try indexablePtrLenOrNone(sema, block, src_src, src_ptr);
|
|
const target = sema.mod.getTarget();
|
|
const mod = sema.mod;
|
|
|
|
if (dest_ty.isConstPtr(mod)) {
|
|
return sema.fail(block, dest_src, "cannot memcpy to constant pointer", .{});
|
|
}
|
|
|
|
if (dest_len == .none and src_len == .none) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "unknown @memcpy length", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, dest_src, msg, "destination type '{}' provides no length", .{
|
|
dest_ty.fmt(sema.mod),
|
|
});
|
|
try sema.errNote(block, src_src, msg, "source type '{}' provides no length", .{
|
|
src_ty.fmt(sema.mod),
|
|
});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
|
|
var len_val: ?Value = null;
|
|
|
|
if (dest_len != .none and src_len != .none) check: {
|
|
// If we can check at compile-time, no need for runtime safety.
|
|
if (try sema.resolveDefinedValue(block, dest_src, dest_len)) |dest_len_val| {
|
|
len_val = dest_len_val;
|
|
if (try sema.resolveDefinedValue(block, src_src, src_len)) |src_len_val| {
|
|
if (!(try sema.valuesEqual(dest_len_val, src_len_val, Type.usize))) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "non-matching @memcpy lengths", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, dest_src, msg, "length {} here", .{
|
|
dest_len_val.fmtValue(Type.usize, sema.mod),
|
|
});
|
|
try sema.errNote(block, src_src, msg, "length {} here", .{
|
|
src_len_val.fmtValue(Type.usize, sema.mod),
|
|
});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
break :check;
|
|
}
|
|
} else if (try sema.resolveDefinedValue(block, src_src, src_len)) |src_len_val| {
|
|
len_val = src_len_val;
|
|
}
|
|
|
|
if (block.wantSafety()) {
|
|
const ok = try block.addBinOp(.cmp_eq, dest_len, src_len);
|
|
try sema.addSafetyCheck(block, src, ok, .memcpy_len_mismatch);
|
|
}
|
|
} else if (dest_len != .none) {
|
|
if (try sema.resolveDefinedValue(block, dest_src, dest_len)) |dest_len_val| {
|
|
len_val = dest_len_val;
|
|
}
|
|
} else if (src_len != .none) {
|
|
if (try sema.resolveDefinedValue(block, src_src, src_len)) |src_len_val| {
|
|
len_val = src_len_val;
|
|
}
|
|
}
|
|
|
|
const runtime_src = if (try sema.resolveDefinedValue(block, dest_src, dest_ptr)) |dest_ptr_val| rs: {
|
|
if (!dest_ptr_val.isComptimeMutablePtr(mod)) break :rs dest_src;
|
|
if (try sema.resolveDefinedValue(block, src_src, src_ptr)) |_| {
|
|
const len_u64 = (try len_val.?.getUnsignedIntAdvanced(mod, sema)).?;
|
|
const len = try sema.usizeCast(block, dest_src, len_u64);
|
|
for (0..len) |i| {
|
|
const elem_index = try mod.intRef(Type.usize, i);
|
|
const dest_elem_ptr = try sema.elemPtrOneLayerOnly(
|
|
block,
|
|
src,
|
|
dest_ptr,
|
|
elem_index,
|
|
src,
|
|
true, // init
|
|
false, // oob_safety
|
|
);
|
|
const src_elem_ptr = try sema.elemPtrOneLayerOnly(
|
|
block,
|
|
src,
|
|
src_ptr,
|
|
elem_index,
|
|
src,
|
|
false, // init
|
|
false, // oob_safety
|
|
);
|
|
const uncoerced_elem = try sema.analyzeLoad(block, src, src_elem_ptr, src_src);
|
|
try sema.storePtr2(
|
|
block,
|
|
src,
|
|
dest_elem_ptr,
|
|
dest_src,
|
|
uncoerced_elem,
|
|
src_src,
|
|
.store,
|
|
);
|
|
}
|
|
return;
|
|
} else break :rs src_src;
|
|
} else dest_src;
|
|
|
|
// If in-memory coercion is not allowed, explode this memcpy call into a
|
|
// for loop that copies element-wise.
|
|
// Likewise if this is an iterable rather than a pointer, do the same
|
|
// lowering. The AIR instruction requires pointers with element types of
|
|
// equal ABI size.
|
|
|
|
if (dest_ty.zigTypeTag(mod) != .Pointer or src_ty.zigTypeTag(mod) != .Pointer) {
|
|
return sema.fail(block, src, "TODO: lower @memcpy to a for loop because the source or destination iterable is a tuple", .{});
|
|
}
|
|
|
|
const dest_elem_ty = dest_ty.elemType2(mod);
|
|
const src_elem_ty = src_ty.elemType2(mod);
|
|
if (.ok != try sema.coerceInMemoryAllowed(block, dest_elem_ty, src_elem_ty, true, target, dest_src, src_src)) {
|
|
return sema.fail(block, src, "TODO: lower @memcpy to a for loop because the element types have different ABI sizes", .{});
|
|
}
|
|
|
|
// If the length is comptime-known, then upgrade src and destination types
|
|
// into pointer-to-array. At this point we know they are both pointers
|
|
// already.
|
|
var new_dest_ptr = dest_ptr;
|
|
var new_src_ptr = src_ptr;
|
|
if (len_val) |val| {
|
|
const len = try val.toUnsignedIntAdvanced(sema);
|
|
if (len == 0) {
|
|
// This AIR instruction guarantees length > 0 if it is comptime-known.
|
|
return;
|
|
}
|
|
new_dest_ptr = try upgradeToArrayPtr(sema, block, dest_ptr, len);
|
|
new_src_ptr = try upgradeToArrayPtr(sema, block, src_ptr, len);
|
|
}
|
|
|
|
if (dest_len != .none) {
|
|
// Change the src from slice to a many pointer, to avoid multiple ptr
|
|
// slice extractions in AIR instructions.
|
|
const new_src_ptr_ty = sema.typeOf(new_src_ptr);
|
|
if (new_src_ptr_ty.isSlice(mod)) {
|
|
new_src_ptr = try sema.analyzeSlicePtr(block, src_src, new_src_ptr, new_src_ptr_ty);
|
|
}
|
|
} else if (dest_len == .none and len_val == null) {
|
|
// Change the dest to a slice, since its type must have the length.
|
|
const dest_ptr_ptr = try sema.analyzeRef(block, dest_src, new_dest_ptr);
|
|
new_dest_ptr = try sema.analyzeSlice(block, dest_src, dest_ptr_ptr, .zero, src_len, .none, .unneeded, dest_src, dest_src, dest_src, false);
|
|
const new_src_ptr_ty = sema.typeOf(new_src_ptr);
|
|
if (new_src_ptr_ty.isSlice(mod)) {
|
|
new_src_ptr = try sema.analyzeSlicePtr(block, src_src, new_src_ptr, new_src_ptr_ty);
|
|
}
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
|
|
// Aliasing safety check.
|
|
if (block.wantSafety()) {
|
|
const len = if (len_val) |v|
|
|
Air.internedToRef(v.toIntern())
|
|
else if (dest_len != .none)
|
|
dest_len
|
|
else
|
|
src_len;
|
|
|
|
// Extract raw pointer from dest slice. The AIR instructions could support them, but
|
|
// it would cause redundant machine code instructions.
|
|
const new_dest_ptr_ty = sema.typeOf(new_dest_ptr);
|
|
const raw_dest_ptr = if (new_dest_ptr_ty.isSlice(mod))
|
|
try sema.analyzeSlicePtr(block, dest_src, new_dest_ptr, new_dest_ptr_ty)
|
|
else if (new_dest_ptr_ty.ptrSize(mod) == .One) ptr: {
|
|
var dest_manyptr_ty_key = mod.intern_pool.indexToKey(new_dest_ptr_ty.toIntern()).ptr_type;
|
|
assert(dest_manyptr_ty_key.flags.size == .One);
|
|
dest_manyptr_ty_key.child = dest_elem_ty.toIntern();
|
|
dest_manyptr_ty_key.flags.size = .Many;
|
|
break :ptr try sema.coerceCompatiblePtrs(block, try sema.ptrType(dest_manyptr_ty_key), new_dest_ptr, dest_src);
|
|
} else new_dest_ptr;
|
|
|
|
const new_src_ptr_ty = sema.typeOf(new_src_ptr);
|
|
const raw_src_ptr = if (new_src_ptr_ty.isSlice(mod))
|
|
try sema.analyzeSlicePtr(block, src_src, new_src_ptr, new_src_ptr_ty)
|
|
else if (new_src_ptr_ty.ptrSize(mod) == .One) ptr: {
|
|
var src_manyptr_ty_key = mod.intern_pool.indexToKey(new_src_ptr_ty.toIntern()).ptr_type;
|
|
assert(src_manyptr_ty_key.flags.size == .One);
|
|
src_manyptr_ty_key.child = src_elem_ty.toIntern();
|
|
src_manyptr_ty_key.flags.size = .Many;
|
|
break :ptr try sema.coerceCompatiblePtrs(block, try sema.ptrType(src_manyptr_ty_key), new_src_ptr, src_src);
|
|
} else new_src_ptr;
|
|
|
|
// ok1: dest >= src + len
|
|
// ok2: src >= dest + len
|
|
const src_plus_len = try sema.analyzePtrArithmetic(block, src, raw_src_ptr, len, .ptr_add, src_src, src);
|
|
const dest_plus_len = try sema.analyzePtrArithmetic(block, src, raw_dest_ptr, len, .ptr_add, dest_src, src);
|
|
const ok1 = try block.addBinOp(.cmp_gte, raw_dest_ptr, src_plus_len);
|
|
const ok2 = try block.addBinOp(.cmp_gte, new_src_ptr, dest_plus_len);
|
|
const ok = try block.addBinOp(.bool_or, ok1, ok2);
|
|
try sema.addSafetyCheck(block, src, ok, .memcpy_alias);
|
|
}
|
|
|
|
_ = try block.addInst(.{
|
|
.tag = .memcpy,
|
|
.data = .{ .bin_op = .{
|
|
.lhs = new_dest_ptr,
|
|
.rhs = new_src_ptr,
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
const ip = &mod.intern_pool;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const src = inst_data.src();
|
|
const dest_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const value_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
|
|
const dest_ptr = try sema.resolveInst(extra.lhs);
|
|
const uncoerced_elem = try sema.resolveInst(extra.rhs);
|
|
const dest_ptr_ty = sema.typeOf(dest_ptr);
|
|
try checkMemOperand(sema, block, dest_src, dest_ptr_ty);
|
|
|
|
if (dest_ptr_ty.isConstPtr(mod)) {
|
|
return sema.fail(block, dest_src, "cannot memset constant pointer", .{});
|
|
}
|
|
|
|
const dest_elem_ty: Type = dest_elem_ty: {
|
|
const ptr_info = dest_ptr_ty.ptrInfo(mod);
|
|
switch (ptr_info.flags.size) {
|
|
.Slice => break :dest_elem_ty Type.fromInterned(ptr_info.child),
|
|
.One => {
|
|
if (Type.fromInterned(ptr_info.child).zigTypeTag(mod) == .Array) {
|
|
break :dest_elem_ty Type.fromInterned(ptr_info.child).childType(mod);
|
|
}
|
|
},
|
|
.Many, .C => {},
|
|
}
|
|
return sema.failWithOwnedErrorMsg(block, msg: {
|
|
const msg = try sema.errMsg(block, src, "unknown @memset length", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, dest_src, msg, "destination type '{}' provides no length", .{
|
|
dest_ptr_ty.fmt(mod),
|
|
});
|
|
break :msg msg;
|
|
});
|
|
};
|
|
|
|
const elem = try sema.coerce(block, dest_elem_ty, uncoerced_elem, value_src);
|
|
|
|
const runtime_src = rs: {
|
|
const ptr_val = try sema.resolveDefinedValue(block, dest_src, dest_ptr) orelse break :rs dest_src;
|
|
const len_air_ref = try sema.fieldVal(block, src, dest_ptr, try ip.getOrPutString(gpa, "len"), dest_src);
|
|
const len_val = (try sema.resolveDefinedValue(block, dest_src, len_air_ref)) orelse break :rs dest_src;
|
|
const len_u64 = (try len_val.getUnsignedIntAdvanced(mod, sema)).?;
|
|
const len = try sema.usizeCast(block, dest_src, len_u64);
|
|
if (len == 0) {
|
|
// This AIR instruction guarantees length > 0 if it is comptime-known.
|
|
return;
|
|
}
|
|
|
|
if (!ptr_val.isComptimeMutablePtr(mod)) break :rs dest_src;
|
|
const elem_val = try sema.resolveValue(elem) orelse break :rs value_src;
|
|
const array_ty = try mod.arrayType(.{
|
|
.child = dest_elem_ty.toIntern(),
|
|
.len = len_u64,
|
|
});
|
|
const array_val = Value.fromInterned((try mod.intern(.{ .aggregate = .{
|
|
.ty = array_ty.toIntern(),
|
|
.storage = .{ .repeated_elem = elem_val.toIntern() },
|
|
} })));
|
|
const array_ptr_ty = ty: {
|
|
var info = dest_ptr_ty.ptrInfo(mod);
|
|
info.flags.size = .One;
|
|
info.child = array_ty.toIntern();
|
|
break :ty try mod.ptrType(info);
|
|
};
|
|
const raw_ptr_val = if (dest_ptr_ty.isSlice(mod)) ptr_val.slicePtr(mod) else ptr_val;
|
|
const array_ptr_val = try mod.getCoerced(raw_ptr_val, array_ptr_ty);
|
|
return sema.storePtrVal(block, src, array_ptr_val, array_val, array_ty);
|
|
};
|
|
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
_ = try block.addInst(.{
|
|
.tag = if (block.wantSafety()) .memset_safe else .memset,
|
|
.data = .{ .bin_op = .{
|
|
.lhs = dest_ptr,
|
|
.rhs = elem,
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn zirBuiltinAsyncCall(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
|
|
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
|
|
const src = LazySrcLoc.nodeOffset(extra.node);
|
|
return sema.failWithUseOfAsync(block, src);
|
|
}
|
|
|
|
fn zirResume(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const src = inst_data.src();
|
|
return sema.failWithUseOfAsync(block, src);
|
|
}
|
|
|
|
fn zirAwait(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
) CompileError!Air.Inst.Ref {
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
|
const src = inst_data.src();
|
|
|
|
return sema.failWithUseOfAsync(block, src);
|
|
}
|
|
|
|
fn zirAwaitNosuspend(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
extended: Zir.Inst.Extended.InstData,
|
|
) CompileError!Air.Inst.Ref {
|
|
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
|
|
const src = LazySrcLoc.nodeOffset(extra.node);
|
|
|
|
return sema.failWithUseOfAsync(block, src);
|
|
}
|
|
|
|
fn zirVarExtended(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
extended: Zir.Inst.Extended.InstData,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const extra = sema.code.extraData(Zir.Inst.ExtendedVar, extended.operand);
|
|
const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = 0 };
|
|
const init_src: LazySrcLoc = .{ .node_offset_var_decl_init = 0 };
|
|
const small: Zir.Inst.ExtendedVar.Small = @bitCast(extended.small);
|
|
|
|
var extra_index: usize = extra.end;
|
|
|
|
const lib_name = if (small.has_lib_name) lib_name: {
|
|
const lib_name_index: Zir.NullTerminatedString = @enumFromInt(sema.code.extra[extra_index]);
|
|
const lib_name = sema.code.nullTerminatedString(lib_name_index);
|
|
extra_index += 1;
|
|
try sema.handleExternLibName(block, ty_src, lib_name);
|
|
break :lib_name lib_name;
|
|
} else null;
|
|
|
|
// ZIR supports encoding this information but it is not used; the information
|
|
// is encoded via the Decl entry.
|
|
assert(!small.has_align);
|
|
|
|
const uncasted_init: Air.Inst.Ref = if (small.has_init) blk: {
|
|
const init_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]);
|
|
extra_index += 1;
|
|
break :blk try sema.resolveInst(init_ref);
|
|
} else .none;
|
|
|
|
const have_ty = extra.data.var_type != .none;
|
|
const var_ty = if (have_ty)
|
|
try sema.resolveType(block, ty_src, extra.data.var_type)
|
|
else
|
|
sema.typeOf(uncasted_init);
|
|
|
|
const init_val = if (uncasted_init != .none) blk: {
|
|
const init = if (have_ty)
|
|
try sema.coerce(block, var_ty, uncasted_init, init_src)
|
|
else
|
|
uncasted_init;
|
|
|
|
break :blk ((try sema.resolveValue(init)) orelse {
|
|
return sema.failWithNeededComptime(block, init_src, .{
|
|
.needed_comptime_reason = "container level variable initializers must be comptime-known",
|
|
});
|
|
}).toIntern();
|
|
} else .none;
|
|
|
|
try sema.validateVarType(block, ty_src, var_ty, small.is_extern);
|
|
|
|
return Air.internedToRef((try mod.intern(.{ .variable = .{
|
|
.ty = var_ty.toIntern(),
|
|
.init = init_val,
|
|
.decl = sema.owner_decl_index,
|
|
.lib_name = try mod.intern_pool.getOrPutStringOpt(sema.gpa, lib_name),
|
|
.is_extern = small.is_extern,
|
|
.is_const = small.is_const,
|
|
.is_threadlocal = small.is_threadlocal,
|
|
.is_weak_linkage = false,
|
|
} })));
|
|
}
|
|
|
|
fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.FuncFancy, inst_data.payload_index);
|
|
const target = mod.getTarget();
|
|
|
|
const align_src: LazySrcLoc = .{ .node_offset_fn_type_align = inst_data.src_node };
|
|
const addrspace_src: LazySrcLoc = .{ .node_offset_fn_type_addrspace = inst_data.src_node };
|
|
const section_src: LazySrcLoc = .{ .node_offset_fn_type_section = inst_data.src_node };
|
|
const cc_src: LazySrcLoc = .{ .node_offset_fn_type_cc = inst_data.src_node };
|
|
const ret_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = inst_data.src_node };
|
|
const has_body = extra.data.body_len != 0;
|
|
|
|
var extra_index: usize = extra.end;
|
|
|
|
const lib_name: ?[]const u8 = if (extra.data.bits.has_lib_name) blk: {
|
|
const lib_name_index: Zir.NullTerminatedString = @enumFromInt(sema.code.extra[extra_index]);
|
|
const lib_name = sema.code.nullTerminatedString(lib_name_index);
|
|
extra_index += 1;
|
|
break :blk lib_name;
|
|
} else null;
|
|
|
|
if (has_body and
|
|
(extra.data.bits.has_align_body or extra.data.bits.has_align_ref) and
|
|
!target_util.supportsFunctionAlignment(target))
|
|
{
|
|
return sema.fail(block, align_src, "target does not support function alignment", .{});
|
|
}
|
|
|
|
const @"align": ?Alignment = if (extra.data.bits.has_align_body) blk: {
|
|
const body_len = sema.code.extra[extra_index];
|
|
extra_index += 1;
|
|
const body = sema.code.bodySlice(extra_index, body_len);
|
|
extra_index += body.len;
|
|
|
|
const val = try sema.resolveGenericBody(block, align_src, body, inst, Type.u29, .{
|
|
.needed_comptime_reason = "alignment must be comptime-known",
|
|
});
|
|
if (val.isGenericPoison()) {
|
|
break :blk null;
|
|
}
|
|
const alignment = try sema.validateAlignAllowZero(block, align_src, try val.toUnsignedIntAdvanced(sema));
|
|
const default = target_util.defaultFunctionAlignment(target);
|
|
break :blk if (alignment == default) .none else alignment;
|
|
} else if (extra.data.bits.has_align_ref) blk: {
|
|
const align_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]);
|
|
extra_index += 1;
|
|
const uncoerced_align = sema.resolveInst(align_ref) catch |err| switch (err) {
|
|
error.GenericPoison => break :blk null,
|
|
else => |e| return e,
|
|
};
|
|
const coerced_align = sema.coerce(block, Type.u29, uncoerced_align, align_src) catch |err| switch (err) {
|
|
error.GenericPoison => break :blk null,
|
|
else => |e| return e,
|
|
};
|
|
const align_val = sema.resolveConstDefinedValue(block, align_src, coerced_align, .{
|
|
.needed_comptime_reason = "alignment must be comptime-known",
|
|
}) catch |err| switch (err) {
|
|
error.GenericPoison => break :blk null,
|
|
else => |e| return e,
|
|
};
|
|
const alignment = try sema.validateAlignAllowZero(block, align_src, try align_val.toUnsignedIntAdvanced(sema));
|
|
const default = target_util.defaultFunctionAlignment(target);
|
|
break :blk if (alignment == default) .none else alignment;
|
|
} else .none;
|
|
|
|
const @"addrspace": ?std.builtin.AddressSpace = if (extra.data.bits.has_addrspace_body) blk: {
|
|
const body_len = sema.code.extra[extra_index];
|
|
extra_index += 1;
|
|
const body = sema.code.bodySlice(extra_index, body_len);
|
|
extra_index += body.len;
|
|
|
|
const addrspace_ty = Type.fromInterned(.address_space_type);
|
|
const val = try sema.resolveGenericBody(block, addrspace_src, body, inst, addrspace_ty, .{
|
|
.needed_comptime_reason = "addrspace must be comptime-known",
|
|
});
|
|
if (val.isGenericPoison()) {
|
|
break :blk null;
|
|
}
|
|
break :blk mod.toEnum(std.builtin.AddressSpace, val);
|
|
} else if (extra.data.bits.has_addrspace_ref) blk: {
|
|
const addrspace_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]);
|
|
extra_index += 1;
|
|
const addrspace_ty = Type.fromInterned(.address_space_type);
|
|
const uncoerced_addrspace = sema.resolveInst(addrspace_ref) catch |err| switch (err) {
|
|
error.GenericPoison => break :blk null,
|
|
else => |e| return e,
|
|
};
|
|
const coerced_addrspace = sema.coerce(block, addrspace_ty, uncoerced_addrspace, addrspace_src) catch |err| switch (err) {
|
|
error.GenericPoison => break :blk null,
|
|
else => |e| return e,
|
|
};
|
|
const addrspace_val = sema.resolveConstDefinedValue(block, addrspace_src, coerced_addrspace, .{
|
|
.needed_comptime_reason = "addrspace must be comptime-known",
|
|
}) catch |err| switch (err) {
|
|
error.GenericPoison => break :blk null,
|
|
else => |e| return e,
|
|
};
|
|
break :blk mod.toEnum(std.builtin.AddressSpace, addrspace_val);
|
|
} else target_util.defaultAddressSpace(target, .function);
|
|
|
|
const section: Section = if (extra.data.bits.has_section_body) blk: {
|
|
const body_len = sema.code.extra[extra_index];
|
|
extra_index += 1;
|
|
const body = sema.code.bodySlice(extra_index, body_len);
|
|
extra_index += body.len;
|
|
|
|
const ty = Type.slice_const_u8;
|
|
const val = try sema.resolveGenericBody(block, section_src, body, inst, ty, .{
|
|
.needed_comptime_reason = "linksection must be comptime-known",
|
|
});
|
|
if (val.isGenericPoison()) {
|
|
break :blk .generic;
|
|
}
|
|
break :blk .{ .explicit = try val.toIpString(ty, mod) };
|
|
} else if (extra.data.bits.has_section_ref) blk: {
|
|
const section_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]);
|
|
extra_index += 1;
|
|
const section_name = sema.resolveConstStringIntern(block, section_src, section_ref, .{
|
|
.needed_comptime_reason = "linksection must be comptime-known",
|
|
}) catch |err| switch (err) {
|
|
error.GenericPoison => {
|
|
break :blk .generic;
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
break :blk .{ .explicit = section_name };
|
|
} else .default;
|
|
|
|
const cc: ?std.builtin.CallingConvention = if (extra.data.bits.has_cc_body) blk: {
|
|
const body_len = sema.code.extra[extra_index];
|
|
extra_index += 1;
|
|
const body = sema.code.bodySlice(extra_index, body_len);
|
|
extra_index += body.len;
|
|
|
|
const cc_ty = try sema.getBuiltinType("CallingConvention");
|
|
const val = try sema.resolveGenericBody(block, cc_src, body, inst, cc_ty, .{
|
|
.needed_comptime_reason = "calling convention must be comptime-known",
|
|
});
|
|
if (val.isGenericPoison()) {
|
|
break :blk null;
|
|
}
|
|
break :blk mod.toEnum(std.builtin.CallingConvention, val);
|
|
} else if (extra.data.bits.has_cc_ref) blk: {
|
|
const cc_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]);
|
|
extra_index += 1;
|
|
const cc_ty = Type.fromInterned(.calling_convention_type);
|
|
const uncoerced_cc = sema.resolveInst(cc_ref) catch |err| switch (err) {
|
|
error.GenericPoison => break :blk null,
|
|
else => |e| return e,
|
|
};
|
|
const coerced_cc = sema.coerce(block, cc_ty, uncoerced_cc, cc_src) catch |err| switch (err) {
|
|
error.GenericPoison => break :blk null,
|
|
else => |e| return e,
|
|
};
|
|
const cc_val = sema.resolveConstDefinedValue(block, cc_src, coerced_cc, .{
|
|
.needed_comptime_reason = "calling convention must be comptime-known",
|
|
}) catch |err| switch (err) {
|
|
error.GenericPoison => break :blk null,
|
|
else => |e| return e,
|
|
};
|
|
break :blk mod.toEnum(std.builtin.CallingConvention, cc_val);
|
|
} else if (sema.owner_decl.is_exported and has_body)
|
|
.C
|
|
else
|
|
.Unspecified;
|
|
|
|
const ret_ty: Type = if (extra.data.bits.has_ret_ty_body) blk: {
|
|
const body_len = sema.code.extra[extra_index];
|
|
extra_index += 1;
|
|
const body = sema.code.bodySlice(extra_index, body_len);
|
|
extra_index += body.len;
|
|
|
|
const val = try sema.resolveGenericBody(block, ret_src, body, inst, Type.type, .{
|
|
.needed_comptime_reason = "return type must be comptime-known",
|
|
});
|
|
const ty = val.toType();
|
|
break :blk ty;
|
|
} else if (extra.data.bits.has_ret_ty_ref) blk: {
|
|
const ret_ty_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]);
|
|
extra_index += 1;
|
|
const ret_ty_tv = sema.resolveInstConst(block, ret_src, ret_ty_ref, .{
|
|
.needed_comptime_reason = "return type must be comptime-known",
|
|
}) catch |err| switch (err) {
|
|
error.GenericPoison => {
|
|
break :blk Type.generic_poison;
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
const ty = ret_ty_tv.val.toType();
|
|
break :blk ty;
|
|
} else Type.void;
|
|
|
|
const noalias_bits: u32 = if (extra.data.bits.has_any_noalias) blk: {
|
|
const x = sema.code.extra[extra_index];
|
|
extra_index += 1;
|
|
break :blk x;
|
|
} else 0;
|
|
|
|
var src_locs: Zir.Inst.Func.SrcLocs = undefined;
|
|
if (has_body) {
|
|
extra_index += extra.data.body_len;
|
|
src_locs = sema.code.extraData(Zir.Inst.Func.SrcLocs, extra_index).data;
|
|
}
|
|
|
|
const is_var_args = extra.data.bits.is_var_args;
|
|
const is_inferred_error = extra.data.bits.is_inferred_error;
|
|
const is_extern = extra.data.bits.is_extern;
|
|
const is_noinline = extra.data.bits.is_noinline;
|
|
|
|
return sema.funcCommon(
|
|
block,
|
|
inst_data.src_node,
|
|
inst,
|
|
@"align",
|
|
@"addrspace",
|
|
section,
|
|
cc,
|
|
ret_ty,
|
|
is_var_args,
|
|
is_inferred_error,
|
|
is_extern,
|
|
has_body,
|
|
src_locs,
|
|
lib_name,
|
|
noalias_bits,
|
|
is_noinline,
|
|
);
|
|
}
|
|
|
|
fn zirCUndef(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
extended: Zir.Inst.Extended.InstData,
|
|
) CompileError!Air.Inst.Ref {
|
|
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
|
|
const src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
|
|
|
|
const name = try sema.resolveConstString(block, src, extra.operand, .{
|
|
.needed_comptime_reason = "name of macro being undefined must be comptime-known",
|
|
});
|
|
try block.c_import_buf.?.writer().print("#undef {s}\n", .{name});
|
|
return .void_value;
|
|
}
|
|
|
|
fn zirCInclude(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
extended: Zir.Inst.Extended.InstData,
|
|
) CompileError!Air.Inst.Ref {
|
|
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
|
|
const src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
|
|
|
|
const name = try sema.resolveConstString(block, src, extra.operand, .{
|
|
.needed_comptime_reason = "path being included must be comptime-known",
|
|
});
|
|
try block.c_import_buf.?.writer().print("#include <{s}>\n", .{name});
|
|
return .void_value;
|
|
}
|
|
|
|
fn zirCDefine(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
extended: Zir.Inst.Extended.InstData,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data;
|
|
const name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
|
|
const val_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = extra.node };
|
|
|
|
const name = try sema.resolveConstString(block, name_src, extra.lhs, .{
|
|
.needed_comptime_reason = "name of macro being undefined must be comptime-known",
|
|
});
|
|
const rhs = try sema.resolveInst(extra.rhs);
|
|
if (sema.typeOf(rhs).zigTypeTag(mod) != .Void) {
|
|
const value = try sema.resolveConstString(block, val_src, extra.rhs, .{
|
|
.needed_comptime_reason = "value of macro being undefined must be comptime-known",
|
|
});
|
|
try block.c_import_buf.?.writer().print("#define {s} {s}\n", .{ name, value });
|
|
} else {
|
|
try block.c_import_buf.?.writer().print("#define {s}\n", .{name});
|
|
}
|
|
return .void_value;
|
|
}
|
|
|
|
fn zirWasmMemorySize(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
extended: Zir.Inst.Extended.InstData,
|
|
) CompileError!Air.Inst.Ref {
|
|
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
|
|
const index_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
|
|
const builtin_src = LazySrcLoc.nodeOffset(extra.node);
|
|
const target = sema.mod.getTarget();
|
|
if (!target.isWasm()) {
|
|
return sema.fail(block, builtin_src, "builtin @wasmMemorySize is available when targeting WebAssembly; targeted CPU architecture is {s}", .{@tagName(target.cpu.arch)});
|
|
}
|
|
|
|
const index: u32 = @intCast(try sema.resolveInt(block, index_src, extra.operand, Type.u32, .{
|
|
.needed_comptime_reason = "wasm memory size index must be comptime-known",
|
|
}));
|
|
try sema.requireRuntimeBlock(block, builtin_src, null);
|
|
return block.addInst(.{
|
|
.tag = .wasm_memory_size,
|
|
.data = .{ .pl_op = .{
|
|
.operand = .none,
|
|
.payload = index,
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn zirWasmMemoryGrow(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
extended: Zir.Inst.Extended.InstData,
|
|
) CompileError!Air.Inst.Ref {
|
|
const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data;
|
|
const builtin_src = LazySrcLoc.nodeOffset(extra.node);
|
|
const index_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
|
|
const delta_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = extra.node };
|
|
const target = sema.mod.getTarget();
|
|
if (!target.isWasm()) {
|
|
return sema.fail(block, builtin_src, "builtin @wasmMemoryGrow is available when targeting WebAssembly; targeted CPU architecture is {s}", .{@tagName(target.cpu.arch)});
|
|
}
|
|
|
|
const index: u32 = @intCast(try sema.resolveInt(block, index_src, extra.lhs, Type.u32, .{
|
|
.needed_comptime_reason = "wasm memory size index must be comptime-known",
|
|
}));
|
|
const delta = try sema.coerce(block, Type.u32, try sema.resolveInst(extra.rhs), delta_src);
|
|
|
|
try sema.requireRuntimeBlock(block, builtin_src, null);
|
|
return block.addInst(.{
|
|
.tag = .wasm_memory_grow,
|
|
.data = .{ .pl_op = .{
|
|
.operand = delta,
|
|
.payload = index,
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn resolvePrefetchOptions(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
zir_ref: Zir.Inst.Ref,
|
|
) CompileError!std.builtin.PrefetchOptions {
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
const ip = &mod.intern_pool;
|
|
const options_ty = try sema.getBuiltinType("PrefetchOptions");
|
|
const options = try sema.coerce(block, options_ty, try sema.resolveInst(zir_ref), src);
|
|
|
|
const rw_src = sema.maybeOptionsSrc(block, src, "rw");
|
|
const locality_src = sema.maybeOptionsSrc(block, src, "locality");
|
|
const cache_src = sema.maybeOptionsSrc(block, src, "cache");
|
|
|
|
const rw = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "rw"), rw_src);
|
|
const rw_val = try sema.resolveConstDefinedValue(block, rw_src, rw, .{
|
|
.needed_comptime_reason = "prefetch read/write must be comptime-known",
|
|
});
|
|
|
|
const locality = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "locality"), locality_src);
|
|
const locality_val = try sema.resolveConstDefinedValue(block, locality_src, locality, .{
|
|
.needed_comptime_reason = "prefetch locality must be comptime-known",
|
|
});
|
|
|
|
const cache = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "cache"), cache_src);
|
|
const cache_val = try sema.resolveConstDefinedValue(block, cache_src, cache, .{
|
|
.needed_comptime_reason = "prefetch cache must be comptime-known",
|
|
});
|
|
|
|
return std.builtin.PrefetchOptions{
|
|
.rw = mod.toEnum(std.builtin.PrefetchOptions.Rw, rw_val),
|
|
.locality = @intCast(try locality_val.toUnsignedIntAdvanced(sema)),
|
|
.cache = mod.toEnum(std.builtin.PrefetchOptions.Cache, cache_val),
|
|
};
|
|
}
|
|
|
|
fn zirPrefetch(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
extended: Zir.Inst.Extended.InstData,
|
|
) CompileError!Air.Inst.Ref {
|
|
const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data;
|
|
const ptr_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
|
|
const opts_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = extra.node };
|
|
const ptr = try sema.resolveInst(extra.lhs);
|
|
try sema.checkPtrOperand(block, ptr_src, sema.typeOf(ptr));
|
|
|
|
const options = sema.resolvePrefetchOptions(block, .unneeded, extra.rhs) catch |err| switch (err) {
|
|
error.NeededSourceLocation => {
|
|
_ = try sema.resolvePrefetchOptions(block, opts_src, extra.rhs);
|
|
unreachable;
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
|
|
if (!block.is_comptime) {
|
|
_ = try block.addInst(.{
|
|
.tag = .prefetch,
|
|
.data = .{ .prefetch = .{
|
|
.ptr = ptr,
|
|
.rw = options.rw,
|
|
.locality = options.locality,
|
|
.cache = options.cache,
|
|
} },
|
|
});
|
|
}
|
|
|
|
return .void_value;
|
|
}
|
|
|
|
fn resolveExternOptions(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
zir_ref: Zir.Inst.Ref,
|
|
) CompileError!struct {
|
|
name: InternPool.NullTerminatedString,
|
|
library_name: InternPool.OptionalNullTerminatedString = .none,
|
|
linkage: std.builtin.GlobalLinkage = .Strong,
|
|
is_thread_local: bool = false,
|
|
} {
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
const ip = &mod.intern_pool;
|
|
const options_inst = try sema.resolveInst(zir_ref);
|
|
const extern_options_ty = try sema.getBuiltinType("ExternOptions");
|
|
const options = try sema.coerce(block, extern_options_ty, options_inst, src);
|
|
|
|
const name_src = sema.maybeOptionsSrc(block, src, "name");
|
|
const library_src = sema.maybeOptionsSrc(block, src, "library");
|
|
const linkage_src = sema.maybeOptionsSrc(block, src, "linkage");
|
|
const thread_local_src = sema.maybeOptionsSrc(block, src, "thread_local");
|
|
|
|
const name_ref = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "name"), name_src);
|
|
const name_val = try sema.resolveConstDefinedValue(block, name_src, name_ref, .{
|
|
.needed_comptime_reason = "name of the extern symbol must be comptime-known",
|
|
});
|
|
const name = try name_val.toAllocatedBytes(Type.slice_const_u8, sema.arena, mod);
|
|
|
|
const library_name_inst = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "library_name"), library_src);
|
|
const library_name_val = try sema.resolveConstDefinedValue(block, library_src, library_name_inst, .{
|
|
.needed_comptime_reason = "library in which extern symbol is must be comptime-known",
|
|
});
|
|
|
|
const linkage_ref = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "linkage"), linkage_src);
|
|
const linkage_val = try sema.resolveConstDefinedValue(block, linkage_src, linkage_ref, .{
|
|
.needed_comptime_reason = "linkage of the extern symbol must be comptime-known",
|
|
});
|
|
const linkage = mod.toEnum(std.builtin.GlobalLinkage, linkage_val);
|
|
|
|
const is_thread_local = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "is_thread_local"), thread_local_src);
|
|
const is_thread_local_val = try sema.resolveConstDefinedValue(block, thread_local_src, is_thread_local, .{
|
|
.needed_comptime_reason = "threadlocality of the extern symbol must be comptime-known",
|
|
});
|
|
|
|
const library_name = if (library_name_val.optionalValue(mod)) |library_name_payload| library_name: {
|
|
const library_name = try library_name_payload.toAllocatedBytes(Type.slice_const_u8, sema.arena, mod);
|
|
if (library_name.len == 0) {
|
|
return sema.fail(block, library_src, "library name cannot be empty", .{});
|
|
}
|
|
try sema.handleExternLibName(block, library_src, library_name);
|
|
break :library_name library_name;
|
|
} else null;
|
|
|
|
if (name.len == 0) {
|
|
return sema.fail(block, name_src, "extern symbol name cannot be empty", .{});
|
|
}
|
|
|
|
if (linkage != .Weak and linkage != .Strong) {
|
|
return sema.fail(block, linkage_src, "extern symbol must use strong or weak linkage", .{});
|
|
}
|
|
|
|
return .{
|
|
.name = try ip.getOrPutString(gpa, name),
|
|
.library_name = try ip.getOrPutStringOpt(gpa, library_name),
|
|
.linkage = linkage,
|
|
.is_thread_local = is_thread_local_val.toBool(),
|
|
};
|
|
}
|
|
|
|
fn zirBuiltinExtern(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
extended: Zir.Inst.Extended.InstData,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data;
|
|
const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
|
|
const options_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = extra.node };
|
|
|
|
var ty = try sema.resolveType(block, ty_src, extra.lhs);
|
|
if (!ty.isPtrAtRuntime(mod)) {
|
|
return sema.fail(block, ty_src, "expected (optional) pointer", .{});
|
|
}
|
|
if (!try sema.validateExternType(ty, .other)) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, ty_src, "extern symbol cannot have type '{}'", .{ty.fmt(mod)});
|
|
errdefer msg.destroy(sema.gpa);
|
|
const src_decl = sema.mod.declPtr(block.src_decl);
|
|
try sema.explainWhyTypeIsNotExtern(msg, src_decl.toSrcLoc(ty_src, mod), ty, .other);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
|
|
const options = sema.resolveExternOptions(block, .unneeded, extra.rhs) catch |err| switch (err) {
|
|
error.NeededSourceLocation => {
|
|
_ = try sema.resolveExternOptions(block, options_src, extra.rhs);
|
|
unreachable;
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
|
|
if (options.linkage == .Weak and !ty.ptrAllowsZero(mod)) {
|
|
ty = try mod.optionalType(ty.toIntern());
|
|
}
|
|
const ptr_info = ty.ptrInfo(mod);
|
|
|
|
// TODO check duplicate extern
|
|
|
|
const new_decl_index = try mod.allocateNewDecl(sema.owner_decl.src_namespace, sema.owner_decl.src_node, .none);
|
|
errdefer mod.destroyDecl(new_decl_index);
|
|
const new_decl = mod.declPtr(new_decl_index);
|
|
new_decl.name = options.name;
|
|
|
|
new_decl.src_line = sema.owner_decl.src_line;
|
|
new_decl.ty = Type.fromInterned(ptr_info.child);
|
|
new_decl.val = Value.fromInterned(
|
|
if (Type.fromInterned(ptr_info.child).zigTypeTag(mod) == .Fn)
|
|
try ip.getExternFunc(sema.gpa, .{
|
|
.ty = ptr_info.child,
|
|
.decl = new_decl_index,
|
|
.lib_name = options.library_name,
|
|
})
|
|
else
|
|
try mod.intern(.{ .variable = .{
|
|
.ty = ptr_info.child,
|
|
.init = .none,
|
|
.decl = new_decl_index,
|
|
.lib_name = options.library_name,
|
|
.is_extern = true,
|
|
.is_const = ptr_info.flags.is_const,
|
|
.is_threadlocal = options.is_thread_local,
|
|
.is_weak_linkage = options.linkage == .Weak,
|
|
} }),
|
|
);
|
|
new_decl.alignment = .none;
|
|
new_decl.@"linksection" = .none;
|
|
new_decl.has_tv = true;
|
|
new_decl.owns_tv = true;
|
|
new_decl.analysis = .complete;
|
|
|
|
try sema.ensureDeclAnalyzed(new_decl_index);
|
|
|
|
return Air.internedToRef((try mod.getCoerced(Value.fromInterned((try mod.intern(.{ .ptr = .{
|
|
.ty = switch (ip.indexToKey(ty.toIntern())) {
|
|
.ptr_type => ty.toIntern(),
|
|
.opt_type => |child_type| child_type,
|
|
else => unreachable,
|
|
},
|
|
.addr = .{ .decl = new_decl_index },
|
|
} }))), ty)).toIntern());
|
|
}
|
|
|
|
fn zirWorkItem(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
extended: Zir.Inst.Extended.InstData,
|
|
zir_tag: Zir.Inst.Extended,
|
|
) CompileError!Air.Inst.Ref {
|
|
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
|
|
const dimension_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
|
|
const builtin_src = LazySrcLoc.nodeOffset(extra.node);
|
|
const target = sema.mod.getTarget();
|
|
|
|
switch (target.cpu.arch) {
|
|
// TODO: Allow for other GPU targets.
|
|
.amdgcn => {},
|
|
else => {
|
|
return sema.fail(block, builtin_src, "builtin only available on GPU targets; targeted architecture is {s}", .{@tagName(target.cpu.arch)});
|
|
},
|
|
}
|
|
|
|
const dimension: u32 = @intCast(try sema.resolveInt(block, dimension_src, extra.operand, Type.u32, .{
|
|
.needed_comptime_reason = "dimension must be comptime-known",
|
|
}));
|
|
try sema.requireRuntimeBlock(block, builtin_src, null);
|
|
|
|
return block.addInst(.{
|
|
.tag = switch (zir_tag) {
|
|
.work_item_id => .work_item_id,
|
|
.work_group_size => .work_group_size,
|
|
.work_group_id => .work_group_id,
|
|
else => unreachable,
|
|
},
|
|
.data = .{ .pl_op = .{
|
|
.operand = .none,
|
|
.payload = dimension,
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn zirInComptime(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
) CompileError!Air.Inst.Ref {
|
|
_ = sema;
|
|
return if (block.is_comptime) .bool_true else .bool_false;
|
|
}
|
|
|
|
fn requireRuntimeBlock(sema: *Sema, block: *Block, src: LazySrcLoc, runtime_src: ?LazySrcLoc) !void {
|
|
if (block.is_comptime) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "unable to evaluate comptime expression", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
if (runtime_src) |some| {
|
|
try sema.errNote(block, some, msg, "operation is runtime due to this operand", .{});
|
|
}
|
|
if (block.comptime_reason) |some| {
|
|
try some.explain(sema, msg);
|
|
}
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
}
|
|
|
|
/// Emit a compile error if type cannot be used for a runtime variable.
|
|
fn validateVarType(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
var_ty: Type,
|
|
is_extern: bool,
|
|
) CompileError!void {
|
|
const mod = sema.mod;
|
|
if (is_extern) {
|
|
if (!try sema.validateExternType(var_ty, .other)) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "extern variable cannot have type '{}'", .{var_ty.fmt(mod)});
|
|
errdefer msg.destroy(sema.gpa);
|
|
const src_decl = mod.declPtr(block.src_decl);
|
|
try sema.explainWhyTypeIsNotExtern(msg, src_decl.toSrcLoc(src, mod), var_ty, .other);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
} else {
|
|
if (var_ty.zigTypeTag(mod) == .Opaque) {
|
|
return sema.fail(
|
|
block,
|
|
src,
|
|
"non-extern variable with opaque type '{}'",
|
|
.{var_ty.fmt(mod)},
|
|
);
|
|
}
|
|
}
|
|
|
|
if (!try sema.typeRequiresComptime(var_ty)) return;
|
|
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "variable of type '{}' must be const or comptime", .{var_ty.fmt(mod)});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
const src_decl = mod.declPtr(block.src_decl);
|
|
try sema.explainWhyTypeIsComptime(msg, src_decl.toSrcLoc(src, mod), var_ty);
|
|
if (var_ty.zigTypeTag(mod) == .ComptimeInt or var_ty.zigTypeTag(mod) == .ComptimeFloat) {
|
|
try sema.errNote(block, src, msg, "to modify this variable at runtime, it must be given an explicit fixed-size number type", .{});
|
|
}
|
|
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
|
|
const TypeSet = std.AutoHashMapUnmanaged(InternPool.Index, void);
|
|
|
|
fn explainWhyTypeIsComptime(
|
|
sema: *Sema,
|
|
msg: *Module.ErrorMsg,
|
|
src_loc: Module.SrcLoc,
|
|
ty: Type,
|
|
) CompileError!void {
|
|
var type_set = TypeSet{};
|
|
defer type_set.deinit(sema.gpa);
|
|
|
|
try sema.resolveTypeFully(ty);
|
|
return sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty, &type_set);
|
|
}
|
|
|
|
fn explainWhyTypeIsComptimeInner(
|
|
sema: *Sema,
|
|
msg: *Module.ErrorMsg,
|
|
src_loc: Module.SrcLoc,
|
|
ty: Type,
|
|
type_set: *TypeSet,
|
|
) CompileError!void {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
switch (ty.zigTypeTag(mod)) {
|
|
.Bool,
|
|
.Int,
|
|
.Float,
|
|
.ErrorSet,
|
|
.Enum,
|
|
.Frame,
|
|
.AnyFrame,
|
|
.Void,
|
|
=> return,
|
|
|
|
.Fn => {
|
|
try mod.errNoteNonLazy(src_loc, msg, "use '*const {}' for a function pointer type", .{
|
|
ty.fmt(sema.mod),
|
|
});
|
|
},
|
|
|
|
.Type => {
|
|
try mod.errNoteNonLazy(src_loc, msg, "types are not available at runtime", .{});
|
|
},
|
|
|
|
.ComptimeFloat,
|
|
.ComptimeInt,
|
|
.EnumLiteral,
|
|
.NoReturn,
|
|
.Undefined,
|
|
.Null,
|
|
=> return,
|
|
|
|
.Opaque => {
|
|
try mod.errNoteNonLazy(src_loc, msg, "opaque type '{}' has undefined size", .{ty.fmt(sema.mod)});
|
|
},
|
|
|
|
.Array, .Vector => {
|
|
try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.childType(mod), type_set);
|
|
},
|
|
.Pointer => {
|
|
const elem_ty = ty.elemType2(mod);
|
|
if (elem_ty.zigTypeTag(mod) == .Fn) {
|
|
const fn_info = mod.typeToFunc(elem_ty).?;
|
|
if (fn_info.is_generic) {
|
|
try mod.errNoteNonLazy(src_loc, msg, "function is generic", .{});
|
|
}
|
|
switch (fn_info.cc) {
|
|
.Inline => try mod.errNoteNonLazy(src_loc, msg, "function has inline calling convention", .{}),
|
|
else => {},
|
|
}
|
|
if (Type.fromInterned(fn_info.return_type).comptimeOnly(mod)) {
|
|
try mod.errNoteNonLazy(src_loc, msg, "function has a comptime-only return type", .{});
|
|
}
|
|
return;
|
|
}
|
|
try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.childType(mod), type_set);
|
|
},
|
|
|
|
.Optional => {
|
|
try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.optionalChild(mod), type_set);
|
|
},
|
|
.ErrorUnion => {
|
|
try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.errorUnionPayload(mod), type_set);
|
|
},
|
|
|
|
.Struct => {
|
|
if ((try type_set.getOrPut(sema.gpa, ty.toIntern())).found_existing) return;
|
|
|
|
if (mod.typeToStruct(ty)) |struct_type| {
|
|
for (0..struct_type.field_types.len) |i| {
|
|
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
|
|
const field_src_loc = mod.fieldSrcLoc(struct_type.decl.unwrap().?, .{
|
|
.index = i,
|
|
.range = .type,
|
|
});
|
|
|
|
if (try sema.typeRequiresComptime(field_ty)) {
|
|
try mod.errNoteNonLazy(field_src_loc, msg, "struct requires comptime because of this field", .{});
|
|
try sema.explainWhyTypeIsComptimeInner(msg, field_src_loc, field_ty, type_set);
|
|
}
|
|
}
|
|
}
|
|
// TODO tuples
|
|
},
|
|
|
|
.Union => {
|
|
if ((try type_set.getOrPut(sema.gpa, ty.toIntern())).found_existing) return;
|
|
|
|
if (mod.typeToUnion(ty)) |union_obj| {
|
|
for (0..union_obj.field_types.len) |i| {
|
|
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[i]);
|
|
const field_src_loc = mod.fieldSrcLoc(union_obj.decl, .{
|
|
.index = i,
|
|
.range = .type,
|
|
});
|
|
|
|
if (try sema.typeRequiresComptime(field_ty)) {
|
|
try mod.errNoteNonLazy(field_src_loc, msg, "union requires comptime because of this field", .{});
|
|
try sema.explainWhyTypeIsComptimeInner(msg, field_src_loc, field_ty, type_set);
|
|
}
|
|
}
|
|
}
|
|
},
|
|
}
|
|
}
|
|
|
|
const ExternPosition = enum {
|
|
ret_ty,
|
|
param_ty,
|
|
union_field,
|
|
struct_field,
|
|
element,
|
|
other,
|
|
};
|
|
|
|
/// Returns true if `ty` is allowed in extern types.
|
|
/// Does *NOT* require `ty` to be resolved in any way.
|
|
/// Calls `resolveTypeLayout` for packed containers.
|
|
fn validateExternType(
|
|
sema: *Sema,
|
|
ty: Type,
|
|
position: ExternPosition,
|
|
) !bool {
|
|
const mod = sema.mod;
|
|
switch (ty.zigTypeTag(mod)) {
|
|
.Type,
|
|
.ComptimeFloat,
|
|
.ComptimeInt,
|
|
.EnumLiteral,
|
|
.Undefined,
|
|
.Null,
|
|
.ErrorUnion,
|
|
.ErrorSet,
|
|
.Frame,
|
|
=> return false,
|
|
.Void => return position == .union_field or position == .ret_ty or position == .struct_field or position == .element,
|
|
.NoReturn => return position == .ret_ty,
|
|
.Opaque,
|
|
.Bool,
|
|
.Float,
|
|
.AnyFrame,
|
|
=> return true,
|
|
.Pointer => {
|
|
if (ty.childType(mod).zigTypeTag(mod) == .Fn) {
|
|
return ty.isConstPtr(mod) and try sema.validateExternType(ty.childType(mod), .other);
|
|
}
|
|
return !(ty.isSlice(mod) or try sema.typeRequiresComptime(ty));
|
|
},
|
|
.Int => switch (ty.intInfo(mod).bits) {
|
|
0, 8, 16, 32, 64, 128 => return true,
|
|
else => return false,
|
|
},
|
|
.Fn => {
|
|
if (position != .other) return false;
|
|
const target = sema.mod.getTarget();
|
|
// For now we want to authorize PTX kernel to use zig objects, even if we end up exposing the ABI.
|
|
// The goal is to experiment with more integrated CPU/GPU code.
|
|
if (ty.fnCallingConvention(mod) == .Kernel and (target.cpu.arch == .nvptx or target.cpu.arch == .nvptx64)) {
|
|
return true;
|
|
}
|
|
return !target_util.fnCallConvAllowsZigTypes(target, ty.fnCallingConvention(mod));
|
|
},
|
|
.Enum => {
|
|
return sema.validateExternType(ty.intTagType(mod), position);
|
|
},
|
|
.Struct, .Union => switch (ty.containerLayout(mod)) {
|
|
.Extern => return true,
|
|
.Packed => {
|
|
const bit_size = try ty.bitSizeAdvanced(mod, sema);
|
|
switch (bit_size) {
|
|
0, 8, 16, 32, 64, 128 => return true,
|
|
else => return false,
|
|
}
|
|
},
|
|
.Auto => return !(try sema.typeHasRuntimeBits(ty)),
|
|
},
|
|
.Array => {
|
|
if (position == .ret_ty or position == .param_ty) return false;
|
|
return sema.validateExternType(ty.elemType2(mod), .element);
|
|
},
|
|
.Vector => return sema.validateExternType(ty.elemType2(mod), .element),
|
|
.Optional => return ty.isPtrLikeOptional(mod),
|
|
}
|
|
}
|
|
|
|
fn explainWhyTypeIsNotExtern(
|
|
sema: *Sema,
|
|
msg: *Module.ErrorMsg,
|
|
src_loc: Module.SrcLoc,
|
|
ty: Type,
|
|
position: ExternPosition,
|
|
) CompileError!void {
|
|
const mod = sema.mod;
|
|
switch (ty.zigTypeTag(mod)) {
|
|
.Opaque,
|
|
.Bool,
|
|
.Float,
|
|
.AnyFrame,
|
|
=> return,
|
|
|
|
.Type,
|
|
.ComptimeFloat,
|
|
.ComptimeInt,
|
|
.EnumLiteral,
|
|
.Undefined,
|
|
.Null,
|
|
.ErrorUnion,
|
|
.ErrorSet,
|
|
.Frame,
|
|
=> return,
|
|
|
|
.Pointer => {
|
|
if (ty.isSlice(mod)) {
|
|
try mod.errNoteNonLazy(src_loc, msg, "slices have no guaranteed in-memory representation", .{});
|
|
} else {
|
|
const pointee_ty = ty.childType(mod);
|
|
if (!ty.isConstPtr(mod) and pointee_ty.zigTypeTag(mod) == .Fn) {
|
|
try mod.errNoteNonLazy(src_loc, msg, "pointer to extern function must be 'const'", .{});
|
|
} else if (try sema.typeRequiresComptime(ty)) {
|
|
try mod.errNoteNonLazy(src_loc, msg, "pointer to comptime-only type '{}'", .{pointee_ty.fmt(sema.mod)});
|
|
try sema.explainWhyTypeIsComptime(msg, src_loc, ty);
|
|
}
|
|
try sema.explainWhyTypeIsNotExtern(msg, src_loc, pointee_ty, .other);
|
|
}
|
|
},
|
|
.Void => try mod.errNoteNonLazy(src_loc, msg, "'void' is a zero bit type; for C 'void' use 'anyopaque'", .{}),
|
|
.NoReturn => try mod.errNoteNonLazy(src_loc, msg, "'noreturn' is only allowed as a return type", .{}),
|
|
.Int => if (!std.math.isPowerOfTwo(ty.intInfo(mod).bits)) {
|
|
try mod.errNoteNonLazy(src_loc, msg, "only integers with 0 or power of two bits are extern compatible", .{});
|
|
} else {
|
|
try mod.errNoteNonLazy(src_loc, msg, "only integers with 0, 8, 16, 32, 64 and 128 bits are extern compatible", .{});
|
|
},
|
|
.Fn => {
|
|
if (position != .other) {
|
|
try mod.errNoteNonLazy(src_loc, msg, "type has no guaranteed in-memory representation", .{});
|
|
try mod.errNoteNonLazy(src_loc, msg, "use '*const ' to make a function pointer type", .{});
|
|
return;
|
|
}
|
|
switch (ty.fnCallingConvention(mod)) {
|
|
.Unspecified => try mod.errNoteNonLazy(src_loc, msg, "extern function must specify calling convention", .{}),
|
|
.Async => try mod.errNoteNonLazy(src_loc, msg, "async function cannot be extern", .{}),
|
|
.Inline => try mod.errNoteNonLazy(src_loc, msg, "inline function cannot be extern", .{}),
|
|
else => return,
|
|
}
|
|
},
|
|
.Enum => {
|
|
const tag_ty = ty.intTagType(mod);
|
|
try mod.errNoteNonLazy(src_loc, msg, "enum tag type '{}' is not extern compatible", .{tag_ty.fmt(sema.mod)});
|
|
try sema.explainWhyTypeIsNotExtern(msg, src_loc, tag_ty, position);
|
|
},
|
|
.Struct => try mod.errNoteNonLazy(src_loc, msg, "only extern structs and ABI sized packed structs are extern compatible", .{}),
|
|
.Union => try mod.errNoteNonLazy(src_loc, msg, "only extern unions and ABI sized packed unions are extern compatible", .{}),
|
|
.Array => {
|
|
if (position == .ret_ty) {
|
|
return mod.errNoteNonLazy(src_loc, msg, "arrays are not allowed as a return type", .{});
|
|
} else if (position == .param_ty) {
|
|
return mod.errNoteNonLazy(src_loc, msg, "arrays are not allowed as a parameter type", .{});
|
|
}
|
|
try sema.explainWhyTypeIsNotExtern(msg, src_loc, ty.elemType2(mod), .element);
|
|
},
|
|
.Vector => try sema.explainWhyTypeIsNotExtern(msg, src_loc, ty.elemType2(mod), .element),
|
|
.Optional => try mod.errNoteNonLazy(src_loc, msg, "only pointer like optionals are extern compatible", .{}),
|
|
}
|
|
}
|
|
|
|
/// Returns true if `ty` is allowed in packed types.
|
|
/// Does not require `ty` to be resolved in any way, but may resolve whether it is comptime-only.
|
|
fn validatePackedType(sema: *Sema, ty: Type) !bool {
|
|
const mod = sema.mod;
|
|
switch (ty.zigTypeTag(mod)) {
|
|
.Type,
|
|
.ComptimeFloat,
|
|
.ComptimeInt,
|
|
.EnumLiteral,
|
|
.Undefined,
|
|
.Null,
|
|
.ErrorUnion,
|
|
.ErrorSet,
|
|
.Frame,
|
|
.NoReturn,
|
|
.Opaque,
|
|
.AnyFrame,
|
|
.Fn,
|
|
.Array,
|
|
=> return false,
|
|
.Optional => return ty.isPtrLikeOptional(mod),
|
|
.Void,
|
|
.Bool,
|
|
.Float,
|
|
.Int,
|
|
.Vector,
|
|
.Enum,
|
|
=> return true,
|
|
.Pointer => return !ty.isSlice(mod) and !try sema.typeRequiresComptime(ty),
|
|
.Struct, .Union => return ty.containerLayout(mod) == .Packed,
|
|
}
|
|
}
|
|
|
|
fn explainWhyTypeIsNotPacked(
|
|
sema: *Sema,
|
|
msg: *Module.ErrorMsg,
|
|
src_loc: Module.SrcLoc,
|
|
ty: Type,
|
|
) CompileError!void {
|
|
const mod = sema.mod;
|
|
switch (ty.zigTypeTag(mod)) {
|
|
.Void,
|
|
.Bool,
|
|
.Float,
|
|
.Int,
|
|
.Vector,
|
|
.Enum,
|
|
=> return,
|
|
.Type,
|
|
.ComptimeFloat,
|
|
.ComptimeInt,
|
|
.EnumLiteral,
|
|
.Undefined,
|
|
.Null,
|
|
.Frame,
|
|
.NoReturn,
|
|
.Opaque,
|
|
.ErrorUnion,
|
|
.ErrorSet,
|
|
.AnyFrame,
|
|
.Optional,
|
|
.Array,
|
|
=> try mod.errNoteNonLazy(src_loc, msg, "type has no guaranteed in-memory representation", .{}),
|
|
.Pointer => if (ty.isSlice(mod)) {
|
|
try mod.errNoteNonLazy(src_loc, msg, "slices have no guaranteed in-memory representation", .{});
|
|
} else {
|
|
try mod.errNoteNonLazy(src_loc, msg, "comptime-only pointer has no guaranteed in-memory representation", .{});
|
|
try sema.explainWhyTypeIsComptime(msg, src_loc, ty);
|
|
},
|
|
.Fn => {
|
|
try mod.errNoteNonLazy(src_loc, msg, "type has no guaranteed in-memory representation", .{});
|
|
try mod.errNoteNonLazy(src_loc, msg, "use '*const ' to make a function pointer type", .{});
|
|
},
|
|
.Struct => try mod.errNoteNonLazy(src_loc, msg, "only packed structs layout are allowed in packed types", .{}),
|
|
.Union => try mod.errNoteNonLazy(src_loc, msg, "only packed unions layout are allowed in packed types", .{}),
|
|
}
|
|
}
|
|
|
|
fn prepareSimplePanic(sema: *Sema, block: *Block) !void {
|
|
const mod = sema.mod;
|
|
|
|
if (mod.panic_func_index == .none) {
|
|
const decl_index = (try sema.getBuiltinDecl(block, "panic"));
|
|
// decl_index may be an alias; we must find the decl that actually
|
|
// owns the function.
|
|
try sema.ensureDeclAnalyzed(decl_index);
|
|
const tv = try mod.declPtr(decl_index).typedValue();
|
|
try sema.declareDependency(.{ .decl_val = decl_index });
|
|
assert(tv.ty.zigTypeTag(mod) == .Fn);
|
|
assert(try sema.fnHasRuntimeBits(tv.ty));
|
|
const func_index = tv.val.toIntern();
|
|
try mod.ensureFuncBodyAnalysisQueued(func_index);
|
|
mod.panic_func_index = func_index;
|
|
}
|
|
|
|
if (mod.null_stack_trace == .none) {
|
|
const stack_trace_ty = try sema.getBuiltinType("StackTrace");
|
|
try sema.resolveTypeFields(stack_trace_ty);
|
|
const target = mod.getTarget();
|
|
const ptr_stack_trace_ty = try sema.ptrType(.{
|
|
.child = stack_trace_ty.toIntern(),
|
|
.flags = .{
|
|
.address_space = target_util.defaultAddressSpace(target, .global_constant),
|
|
},
|
|
});
|
|
const opt_ptr_stack_trace_ty = try mod.optionalType(ptr_stack_trace_ty.toIntern());
|
|
mod.null_stack_trace = try mod.intern(.{ .opt = .{
|
|
.ty = opt_ptr_stack_trace_ty.toIntern(),
|
|
.val = .none,
|
|
} });
|
|
}
|
|
}
|
|
|
|
/// Backends depend on panic decls being available when lowering safety-checked
|
|
/// instructions. This function ensures the panic function will be available to
|
|
/// be called during that time.
|
|
fn preparePanicId(sema: *Sema, block: *Block, panic_id: Module.PanicId) !InternPool.DeclIndex {
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
if (mod.panic_messages[@intFromEnum(panic_id)].unwrap()) |x| return x;
|
|
|
|
try sema.prepareSimplePanic(block);
|
|
|
|
const panic_messages_ty = try sema.getBuiltinType("panic_messages");
|
|
const msg_decl_index = (sema.namespaceLookup(
|
|
block,
|
|
.unneeded,
|
|
panic_messages_ty.getNamespaceIndex(mod).unwrap().?,
|
|
try mod.intern_pool.getOrPutString(gpa, @tagName(panic_id)),
|
|
) catch |err| switch (err) {
|
|
error.AnalysisFail, error.NeededSourceLocation => @panic("std.builtin.panic_messages is corrupt"),
|
|
error.GenericPoison, error.ComptimeReturn, error.ComptimeBreak => unreachable,
|
|
error.OutOfMemory => |e| return e,
|
|
}).?;
|
|
try sema.ensureDeclAnalyzed(msg_decl_index);
|
|
mod.panic_messages[@intFromEnum(panic_id)] = msg_decl_index.toOptional();
|
|
return msg_decl_index;
|
|
}
|
|
|
|
fn addSafetyCheck(
|
|
sema: *Sema,
|
|
parent_block: *Block,
|
|
src: LazySrcLoc,
|
|
ok: Air.Inst.Ref,
|
|
panic_id: Module.PanicId,
|
|
) !void {
|
|
const gpa = sema.gpa;
|
|
assert(!parent_block.is_comptime);
|
|
|
|
var fail_block: Block = .{
|
|
.parent = parent_block,
|
|
.sema = sema,
|
|
.src_decl = parent_block.src_decl,
|
|
.namespace = parent_block.namespace,
|
|
.wip_capture_scope = parent_block.wip_capture_scope,
|
|
.instructions = .{},
|
|
.inlining = parent_block.inlining,
|
|
.is_comptime = false,
|
|
};
|
|
|
|
defer fail_block.instructions.deinit(gpa);
|
|
|
|
try sema.safetyPanic(&fail_block, src, panic_id);
|
|
try sema.addSafetyCheckExtra(parent_block, ok, &fail_block);
|
|
}
|
|
|
|
fn addSafetyCheckExtra(
|
|
sema: *Sema,
|
|
parent_block: *Block,
|
|
ok: Air.Inst.Ref,
|
|
fail_block: *Block,
|
|
) !void {
|
|
const gpa = sema.gpa;
|
|
|
|
try parent_block.instructions.ensureUnusedCapacity(gpa, 1);
|
|
|
|
try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len +
|
|
1 + // The main block only needs space for the cond_br.
|
|
@typeInfo(Air.CondBr).Struct.fields.len +
|
|
1 + // The ok branch of the cond_br only needs space for the br.
|
|
fail_block.instructions.items.len);
|
|
|
|
try sema.air_instructions.ensureUnusedCapacity(gpa, 3);
|
|
const block_inst: Air.Inst.Index = @enumFromInt(sema.air_instructions.len);
|
|
const cond_br_inst: Air.Inst.Index = @enumFromInt(@intFromEnum(block_inst) + 1);
|
|
const br_inst: Air.Inst.Index = @enumFromInt(@intFromEnum(cond_br_inst) + 1);
|
|
sema.air_instructions.appendAssumeCapacity(.{
|
|
.tag = .block,
|
|
.data = .{ .ty_pl = .{
|
|
.ty = .void_type,
|
|
.payload = sema.addExtraAssumeCapacity(Air.Block{
|
|
.body_len = 1,
|
|
}),
|
|
} },
|
|
});
|
|
sema.air_extra.appendAssumeCapacity(@intFromEnum(cond_br_inst));
|
|
|
|
sema.air_instructions.appendAssumeCapacity(.{
|
|
.tag = .cond_br,
|
|
.data = .{ .pl_op = .{
|
|
.operand = ok,
|
|
.payload = sema.addExtraAssumeCapacity(Air.CondBr{
|
|
.then_body_len = 1,
|
|
.else_body_len = @intCast(fail_block.instructions.items.len),
|
|
}),
|
|
} },
|
|
});
|
|
sema.air_extra.appendAssumeCapacity(@intFromEnum(br_inst));
|
|
sema.air_extra.appendSliceAssumeCapacity(@ptrCast(fail_block.instructions.items));
|
|
|
|
sema.air_instructions.appendAssumeCapacity(.{
|
|
.tag = .br,
|
|
.data = .{ .br = .{
|
|
.block_inst = block_inst,
|
|
.operand = .void_value,
|
|
} },
|
|
});
|
|
|
|
parent_block.instructions.appendAssumeCapacity(block_inst);
|
|
}
|
|
|
|
fn panicWithMsg(sema: *Sema, block: *Block, src: LazySrcLoc, msg_inst: Air.Inst.Ref, operation: CallOperation) !void {
|
|
const mod = sema.mod;
|
|
|
|
if (!mod.backendSupportsFeature(.panic_fn)) {
|
|
_ = try block.addNoOp(.trap);
|
|
return;
|
|
}
|
|
|
|
try sema.prepareSimplePanic(block);
|
|
|
|
const panic_func = mod.funcInfo(mod.panic_func_index);
|
|
const panic_fn = try sema.analyzeDeclVal(block, src, panic_func.owner_decl);
|
|
const null_stack_trace = Air.internedToRef(mod.null_stack_trace);
|
|
|
|
const opt_usize_ty = try mod.optionalType(.usize_type);
|
|
const null_ret_addr = Air.internedToRef((try mod.intern(.{ .opt = .{
|
|
.ty = opt_usize_ty.toIntern(),
|
|
.val = .none,
|
|
} })));
|
|
try sema.callBuiltin(block, src, panic_fn, .auto, &.{ msg_inst, null_stack_trace, null_ret_addr }, operation);
|
|
}
|
|
|
|
fn panicUnwrapError(
|
|
sema: *Sema,
|
|
parent_block: *Block,
|
|
src: LazySrcLoc,
|
|
operand: Air.Inst.Ref,
|
|
unwrap_err_tag: Air.Inst.Tag,
|
|
is_non_err_tag: Air.Inst.Tag,
|
|
) !void {
|
|
assert(!parent_block.is_comptime);
|
|
const ok = try parent_block.addUnOp(is_non_err_tag, operand);
|
|
if (!sema.mod.comp.formatted_panics) {
|
|
return sema.addSafetyCheck(parent_block, src, ok, .unwrap_error);
|
|
}
|
|
const gpa = sema.gpa;
|
|
|
|
var fail_block: Block = .{
|
|
.parent = parent_block,
|
|
.sema = sema,
|
|
.src_decl = parent_block.src_decl,
|
|
.namespace = parent_block.namespace,
|
|
.wip_capture_scope = parent_block.wip_capture_scope,
|
|
.instructions = .{},
|
|
.inlining = parent_block.inlining,
|
|
.is_comptime = false,
|
|
};
|
|
|
|
defer fail_block.instructions.deinit(gpa);
|
|
|
|
{
|
|
if (!sema.mod.backendSupportsFeature(.panic_unwrap_error)) {
|
|
_ = try fail_block.addNoOp(.trap);
|
|
} else {
|
|
const panic_fn = try sema.getBuiltin("panicUnwrapError");
|
|
const err = try fail_block.addTyOp(unwrap_err_tag, Type.anyerror, operand);
|
|
const err_return_trace = try sema.getErrorReturnTrace(&fail_block);
|
|
const args: [2]Air.Inst.Ref = .{ err_return_trace, err };
|
|
try sema.callBuiltin(&fail_block, src, panic_fn, .auto, &args, .@"safety check");
|
|
}
|
|
}
|
|
try sema.addSafetyCheckExtra(parent_block, ok, &fail_block);
|
|
}
|
|
|
|
fn panicIndexOutOfBounds(
|
|
sema: *Sema,
|
|
parent_block: *Block,
|
|
src: LazySrcLoc,
|
|
index: Air.Inst.Ref,
|
|
len: Air.Inst.Ref,
|
|
cmp_op: Air.Inst.Tag,
|
|
) !void {
|
|
assert(!parent_block.is_comptime);
|
|
const ok = try parent_block.addBinOp(cmp_op, index, len);
|
|
if (!sema.mod.comp.formatted_panics) {
|
|
return sema.addSafetyCheck(parent_block, src, ok, .index_out_of_bounds);
|
|
}
|
|
try sema.safetyCheckFormatted(parent_block, src, ok, "panicOutOfBounds", &.{ index, len });
|
|
}
|
|
|
|
fn panicInactiveUnionField(
|
|
sema: *Sema,
|
|
parent_block: *Block,
|
|
src: LazySrcLoc,
|
|
active_tag: Air.Inst.Ref,
|
|
wanted_tag: Air.Inst.Ref,
|
|
) !void {
|
|
assert(!parent_block.is_comptime);
|
|
const ok = try parent_block.addBinOp(.cmp_eq, active_tag, wanted_tag);
|
|
if (!sema.mod.comp.formatted_panics) {
|
|
return sema.addSafetyCheck(parent_block, src, ok, .inactive_union_field);
|
|
}
|
|
try sema.safetyCheckFormatted(parent_block, src, ok, "panicInactiveUnionField", &.{ active_tag, wanted_tag });
|
|
}
|
|
|
|
fn panicSentinelMismatch(
|
|
sema: *Sema,
|
|
parent_block: *Block,
|
|
src: LazySrcLoc,
|
|
maybe_sentinel: ?Value,
|
|
sentinel_ty: Type,
|
|
ptr: Air.Inst.Ref,
|
|
sentinel_index: Air.Inst.Ref,
|
|
) !void {
|
|
assert(!parent_block.is_comptime);
|
|
const mod = sema.mod;
|
|
const expected_sentinel_val = maybe_sentinel orelse return;
|
|
const expected_sentinel = Air.internedToRef(expected_sentinel_val.toIntern());
|
|
|
|
const ptr_ty = sema.typeOf(ptr);
|
|
const actual_sentinel = if (ptr_ty.isSlice(mod))
|
|
try parent_block.addBinOp(.slice_elem_val, ptr, sentinel_index)
|
|
else blk: {
|
|
const elem_ptr_ty = try sema.elemPtrType(ptr_ty, null);
|
|
const sentinel_ptr = try parent_block.addPtrElemPtr(ptr, sentinel_index, elem_ptr_ty);
|
|
break :blk try parent_block.addTyOp(.load, sentinel_ty, sentinel_ptr);
|
|
};
|
|
|
|
const ok = if (sentinel_ty.zigTypeTag(mod) == .Vector) ok: {
|
|
const eql =
|
|
try parent_block.addCmpVector(expected_sentinel, actual_sentinel, .eq);
|
|
break :ok try parent_block.addInst(.{
|
|
.tag = .reduce,
|
|
.data = .{ .reduce = .{
|
|
.operand = eql,
|
|
.operation = .And,
|
|
} },
|
|
});
|
|
} else if (sentinel_ty.isSelfComparable(mod, true))
|
|
try parent_block.addBinOp(.cmp_eq, expected_sentinel, actual_sentinel)
|
|
else {
|
|
const panic_fn = try sema.getBuiltin("checkNonScalarSentinel");
|
|
const args: [2]Air.Inst.Ref = .{ expected_sentinel, actual_sentinel };
|
|
try sema.callBuiltin(parent_block, src, panic_fn, .auto, &args, .@"safety check");
|
|
return;
|
|
};
|
|
|
|
if (!sema.mod.comp.formatted_panics) {
|
|
return sema.addSafetyCheck(parent_block, src, ok, .sentinel_mismatch);
|
|
}
|
|
try sema.safetyCheckFormatted(parent_block, src, ok, "panicSentinelMismatch", &.{ expected_sentinel, actual_sentinel });
|
|
}
|
|
|
|
fn safetyCheckFormatted(
|
|
sema: *Sema,
|
|
parent_block: *Block,
|
|
src: LazySrcLoc,
|
|
ok: Air.Inst.Ref,
|
|
func: []const u8,
|
|
args: []const Air.Inst.Ref,
|
|
) CompileError!void {
|
|
assert(sema.mod.comp.formatted_panics);
|
|
const gpa = sema.gpa;
|
|
|
|
var fail_block: Block = .{
|
|
.parent = parent_block,
|
|
.sema = sema,
|
|
.src_decl = parent_block.src_decl,
|
|
.namespace = parent_block.namespace,
|
|
.wip_capture_scope = parent_block.wip_capture_scope,
|
|
.instructions = .{},
|
|
.inlining = parent_block.inlining,
|
|
.is_comptime = false,
|
|
};
|
|
|
|
defer fail_block.instructions.deinit(gpa);
|
|
|
|
if (!sema.mod.backendSupportsFeature(.safety_check_formatted)) {
|
|
_ = try fail_block.addNoOp(.trap);
|
|
} else {
|
|
const panic_fn = try sema.getBuiltin(func);
|
|
try sema.callBuiltin(&fail_block, src, panic_fn, .auto, args, .@"safety check");
|
|
}
|
|
try sema.addSafetyCheckExtra(parent_block, ok, &fail_block);
|
|
}
|
|
|
|
fn safetyPanic(sema: *Sema, block: *Block, src: LazySrcLoc, panic_id: Module.PanicId) CompileError!void {
|
|
const msg_decl_index = try sema.preparePanicId(block, panic_id);
|
|
const msg_inst = try sema.analyzeDeclVal(block, src, msg_decl_index);
|
|
try sema.panicWithMsg(block, src, msg_inst, .@"safety check");
|
|
}
|
|
|
|
fn emitBackwardBranch(sema: *Sema, block: *Block, src: LazySrcLoc) !void {
|
|
sema.branch_count += 1;
|
|
if (sema.branch_count > sema.branch_quota) {
|
|
const msg = try sema.errMsg(
|
|
block,
|
|
src,
|
|
"evaluation exceeded {d} backwards branches",
|
|
.{sema.branch_quota},
|
|
);
|
|
try sema.errNote(
|
|
block,
|
|
src,
|
|
msg,
|
|
"use @setEvalBranchQuota() to raise the branch limit from {d}",
|
|
.{sema.branch_quota},
|
|
);
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
}
|
|
|
|
fn fieldVal(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
object: Air.Inst.Ref,
|
|
field_name: InternPool.NullTerminatedString,
|
|
field_name_src: LazySrcLoc,
|
|
) CompileError!Air.Inst.Ref {
|
|
// When editing this function, note that there is corresponding logic to be edited
|
|
// in `fieldPtr`. This function takes a value and returns a value.
|
|
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
const object_src = src; // TODO better source location
|
|
const object_ty = sema.typeOf(object);
|
|
|
|
// Zig allows dereferencing a single pointer during field lookup. Note that
|
|
// we don't actually need to generate the dereference some field lookups, like the
|
|
// length of arrays and other comptime operations.
|
|
const is_pointer_to = object_ty.isSinglePointer(mod);
|
|
|
|
const inner_ty = if (is_pointer_to)
|
|
object_ty.childType(mod)
|
|
else
|
|
object_ty;
|
|
|
|
switch (inner_ty.zigTypeTag(mod)) {
|
|
.Array => {
|
|
if (ip.stringEqlSlice(field_name, "len")) {
|
|
return Air.internedToRef((try mod.intValue(Type.usize, inner_ty.arrayLen(mod))).toIntern());
|
|
} else if (ip.stringEqlSlice(field_name, "ptr") and is_pointer_to) {
|
|
const ptr_info = object_ty.ptrInfo(mod);
|
|
const result_ty = try sema.ptrType(.{
|
|
.child = Type.fromInterned(ptr_info.child).childType(mod).toIntern(),
|
|
.sentinel = if (inner_ty.sentinel(mod)) |s| s.toIntern() else .none,
|
|
.flags = .{
|
|
.size = .Many,
|
|
.alignment = ptr_info.flags.alignment,
|
|
.is_const = ptr_info.flags.is_const,
|
|
.is_volatile = ptr_info.flags.is_volatile,
|
|
.is_allowzero = ptr_info.flags.is_allowzero,
|
|
.address_space = ptr_info.flags.address_space,
|
|
.vector_index = ptr_info.flags.vector_index,
|
|
},
|
|
.packed_offset = ptr_info.packed_offset,
|
|
});
|
|
return sema.coerce(block, result_ty, object, src);
|
|
} else {
|
|
return sema.fail(
|
|
block,
|
|
field_name_src,
|
|
"no member named '{}' in '{}'",
|
|
.{ field_name.fmt(ip), object_ty.fmt(mod) },
|
|
);
|
|
}
|
|
},
|
|
.Pointer => {
|
|
const ptr_info = inner_ty.ptrInfo(mod);
|
|
if (ptr_info.flags.size == .Slice) {
|
|
if (ip.stringEqlSlice(field_name, "ptr")) {
|
|
const slice = if (is_pointer_to)
|
|
try sema.analyzeLoad(block, src, object, object_src)
|
|
else
|
|
object;
|
|
return sema.analyzeSlicePtr(block, object_src, slice, inner_ty);
|
|
} else if (ip.stringEqlSlice(field_name, "len")) {
|
|
const slice = if (is_pointer_to)
|
|
try sema.analyzeLoad(block, src, object, object_src)
|
|
else
|
|
object;
|
|
return sema.analyzeSliceLen(block, src, slice);
|
|
} else {
|
|
return sema.fail(
|
|
block,
|
|
field_name_src,
|
|
"no member named '{}' in '{}'",
|
|
.{ field_name.fmt(ip), object_ty.fmt(mod) },
|
|
);
|
|
}
|
|
}
|
|
},
|
|
.Type => {
|
|
const dereffed_type = if (is_pointer_to)
|
|
try sema.analyzeLoad(block, src, object, object_src)
|
|
else
|
|
object;
|
|
|
|
const val = (try sema.resolveDefinedValue(block, object_src, dereffed_type)).?;
|
|
const child_type = val.toType();
|
|
|
|
if (child_type.typeDeclInst(mod)) |type_decl_inst| {
|
|
try sema.declareDependency(.{ .namespace_name = .{
|
|
.namespace = type_decl_inst,
|
|
.name = field_name,
|
|
} });
|
|
}
|
|
|
|
switch (try child_type.zigTypeTagOrPoison(mod)) {
|
|
.ErrorSet => {
|
|
switch (ip.indexToKey(child_type.toIntern())) {
|
|
.error_set_type => |error_set_type| blk: {
|
|
if (error_set_type.nameIndex(ip, field_name) != null) break :blk;
|
|
return sema.fail(block, src, "no error named '{}' in '{}'", .{
|
|
field_name.fmt(ip), child_type.fmt(mod),
|
|
});
|
|
},
|
|
.inferred_error_set_type => {
|
|
return sema.fail(block, src, "TODO handle inferred error sets here", .{});
|
|
},
|
|
.simple_type => |t| {
|
|
assert(t == .anyerror);
|
|
_ = try mod.getErrorValue(field_name);
|
|
},
|
|
else => unreachable,
|
|
}
|
|
|
|
const error_set_type = if (!child_type.isAnyError(mod))
|
|
child_type
|
|
else
|
|
try mod.singleErrorSetType(field_name);
|
|
return Air.internedToRef((try mod.intern(.{ .err = .{
|
|
.ty = error_set_type.toIntern(),
|
|
.name = field_name,
|
|
} })));
|
|
},
|
|
.Union => {
|
|
if (child_type.getNamespaceIndex(mod).unwrap()) |namespace| {
|
|
if (try sema.namespaceLookupVal(block, src, namespace, field_name)) |inst| {
|
|
return inst;
|
|
}
|
|
}
|
|
try sema.resolveTypeFields(child_type);
|
|
if (child_type.unionTagType(mod)) |enum_ty| {
|
|
if (enum_ty.enumFieldIndex(field_name, mod)) |field_index_usize| {
|
|
const field_index: u32 = @intCast(field_index_usize);
|
|
return Air.internedToRef((try mod.enumValueFieldIndex(enum_ty, field_index)).toIntern());
|
|
}
|
|
}
|
|
return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name);
|
|
},
|
|
.Enum => {
|
|
if (child_type.getNamespaceIndex(mod).unwrap()) |namespace| {
|
|
if (try sema.namespaceLookupVal(block, src, namespace, field_name)) |inst| {
|
|
return inst;
|
|
}
|
|
}
|
|
const field_index_usize = child_type.enumFieldIndex(field_name, mod) orelse
|
|
return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name);
|
|
const field_index: u32 = @intCast(field_index_usize);
|
|
const enum_val = try mod.enumValueFieldIndex(child_type, field_index);
|
|
return Air.internedToRef(enum_val.toIntern());
|
|
},
|
|
.Struct, .Opaque => {
|
|
if (child_type.getNamespaceIndex(mod).unwrap()) |namespace| {
|
|
if (try sema.namespaceLookupVal(block, src, namespace, field_name)) |inst| {
|
|
return inst;
|
|
}
|
|
}
|
|
return sema.failWithBadMemberAccess(block, child_type, src, field_name);
|
|
},
|
|
else => {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "type '{}' has no members", .{child_type.fmt(mod)});
|
|
errdefer msg.destroy(sema.gpa);
|
|
if (child_type.isSlice(mod)) try sema.errNote(block, src, msg, "slice values have 'len' and 'ptr' members", .{});
|
|
if (child_type.zigTypeTag(mod) == .Array) try sema.errNote(block, src, msg, "array values have 'len' member", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
},
|
|
}
|
|
},
|
|
.Struct => if (is_pointer_to) {
|
|
// Avoid loading the entire struct by fetching a pointer and loading that
|
|
const field_ptr = try sema.structFieldPtr(block, src, object, field_name, field_name_src, inner_ty, false);
|
|
return sema.analyzeLoad(block, src, field_ptr, object_src);
|
|
} else {
|
|
return sema.structFieldVal(block, src, object, field_name, field_name_src, inner_ty);
|
|
},
|
|
.Union => if (is_pointer_to) {
|
|
// Avoid loading the entire union by fetching a pointer and loading that
|
|
const field_ptr = try sema.unionFieldPtr(block, src, object, field_name, field_name_src, inner_ty, false);
|
|
return sema.analyzeLoad(block, src, field_ptr, object_src);
|
|
} else {
|
|
return sema.unionFieldVal(block, src, object, field_name, field_name_src, inner_ty);
|
|
},
|
|
else => {},
|
|
}
|
|
return sema.failWithInvalidFieldAccess(block, src, object_ty, field_name);
|
|
}
|
|
|
|
fn fieldPtr(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
object_ptr: Air.Inst.Ref,
|
|
field_name: InternPool.NullTerminatedString,
|
|
field_name_src: LazySrcLoc,
|
|
initializing: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
// When editing this function, note that there is corresponding logic to be edited
|
|
// in `fieldVal`. This function takes a pointer and returns a pointer.
|
|
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
const object_ptr_src = src; // TODO better source location
|
|
const object_ptr_ty = sema.typeOf(object_ptr);
|
|
const object_ty = switch (object_ptr_ty.zigTypeTag(mod)) {
|
|
.Pointer => object_ptr_ty.childType(mod),
|
|
else => return sema.fail(block, object_ptr_src, "expected pointer, found '{}'", .{object_ptr_ty.fmt(mod)}),
|
|
};
|
|
|
|
// Zig allows dereferencing a single pointer during field lookup. Note that
|
|
// we don't actually need to generate the dereference some field lookups, like the
|
|
// length of arrays and other comptime operations.
|
|
const is_pointer_to = object_ty.isSinglePointer(mod);
|
|
|
|
const inner_ty = if (is_pointer_to)
|
|
object_ty.childType(mod)
|
|
else
|
|
object_ty;
|
|
|
|
switch (inner_ty.zigTypeTag(mod)) {
|
|
.Array => {
|
|
if (ip.stringEqlSlice(field_name, "len")) {
|
|
const int_val = try mod.intValue(Type.usize, inner_ty.arrayLen(mod));
|
|
return anonDeclRef(sema, int_val.toIntern());
|
|
} else {
|
|
return sema.fail(
|
|
block,
|
|
field_name_src,
|
|
"no member named '{}' in '{}'",
|
|
.{ field_name.fmt(ip), object_ty.fmt(mod) },
|
|
);
|
|
}
|
|
},
|
|
.Pointer => if (inner_ty.isSlice(mod)) {
|
|
const inner_ptr = if (is_pointer_to)
|
|
try sema.analyzeLoad(block, src, object_ptr, object_ptr_src)
|
|
else
|
|
object_ptr;
|
|
|
|
const attr_ptr_ty = if (is_pointer_to) object_ty else object_ptr_ty;
|
|
|
|
if (ip.stringEqlSlice(field_name, "ptr")) {
|
|
const slice_ptr_ty = inner_ty.slicePtrFieldType(mod);
|
|
|
|
const result_ty = try sema.ptrType(.{
|
|
.child = slice_ptr_ty.toIntern(),
|
|
.flags = .{
|
|
.is_const = !attr_ptr_ty.ptrIsMutable(mod),
|
|
.is_volatile = attr_ptr_ty.isVolatilePtr(mod),
|
|
.address_space = attr_ptr_ty.ptrAddressSpace(mod),
|
|
},
|
|
});
|
|
|
|
if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| {
|
|
return Air.internedToRef((try mod.intern(.{ .ptr = .{
|
|
.ty = result_ty.toIntern(),
|
|
.addr = .{ .field = .{
|
|
.base = val.toIntern(),
|
|
.index = Value.slice_ptr_index,
|
|
} },
|
|
} })));
|
|
}
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
|
|
const field_ptr = try block.addTyOp(.ptr_slice_ptr_ptr, result_ty, inner_ptr);
|
|
try sema.checkKnownAllocPtr(inner_ptr, field_ptr);
|
|
return field_ptr;
|
|
} else if (ip.stringEqlSlice(field_name, "len")) {
|
|
const result_ty = try sema.ptrType(.{
|
|
.child = .usize_type,
|
|
.flags = .{
|
|
.is_const = !attr_ptr_ty.ptrIsMutable(mod),
|
|
.is_volatile = attr_ptr_ty.isVolatilePtr(mod),
|
|
.address_space = attr_ptr_ty.ptrAddressSpace(mod),
|
|
},
|
|
});
|
|
|
|
if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| {
|
|
return Air.internedToRef((try mod.intern(.{ .ptr = .{
|
|
.ty = result_ty.toIntern(),
|
|
.addr = .{ .field = .{
|
|
.base = val.toIntern(),
|
|
.index = Value.slice_len_index,
|
|
} },
|
|
} })));
|
|
}
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
|
|
const field_ptr = try block.addTyOp(.ptr_slice_len_ptr, result_ty, inner_ptr);
|
|
try sema.checkKnownAllocPtr(inner_ptr, field_ptr);
|
|
return field_ptr;
|
|
} else {
|
|
return sema.fail(
|
|
block,
|
|
field_name_src,
|
|
"no member named '{}' in '{}'",
|
|
.{ field_name.fmt(ip), object_ty.fmt(mod) },
|
|
);
|
|
}
|
|
},
|
|
.Type => {
|
|
_ = try sema.resolveConstDefinedValue(block, .unneeded, object_ptr, undefined);
|
|
const result = try sema.analyzeLoad(block, src, object_ptr, object_ptr_src);
|
|
const inner = if (is_pointer_to)
|
|
try sema.analyzeLoad(block, src, result, object_ptr_src)
|
|
else
|
|
result;
|
|
|
|
const val = (sema.resolveDefinedValue(block, src, inner) catch unreachable).?;
|
|
const child_type = val.toType();
|
|
|
|
if (child_type.typeDeclInst(mod)) |type_decl_inst| {
|
|
try sema.declareDependency(.{ .namespace_name = .{
|
|
.namespace = type_decl_inst,
|
|
.name = field_name,
|
|
} });
|
|
}
|
|
|
|
switch (child_type.zigTypeTag(mod)) {
|
|
.ErrorSet => {
|
|
switch (ip.indexToKey(child_type.toIntern())) {
|
|
.error_set_type => |error_set_type| blk: {
|
|
if (error_set_type.nameIndex(ip, field_name) != null) {
|
|
break :blk;
|
|
}
|
|
return sema.fail(block, src, "no error named '{}' in '{}'", .{
|
|
field_name.fmt(ip), child_type.fmt(mod),
|
|
});
|
|
},
|
|
.inferred_error_set_type => {
|
|
return sema.fail(block, src, "TODO handle inferred error sets here", .{});
|
|
},
|
|
.simple_type => |t| {
|
|
assert(t == .anyerror);
|
|
_ = try mod.getErrorValue(field_name);
|
|
},
|
|
else => unreachable,
|
|
}
|
|
|
|
const error_set_type = if (!child_type.isAnyError(mod))
|
|
child_type
|
|
else
|
|
try mod.singleErrorSetType(field_name);
|
|
return anonDeclRef(sema, try mod.intern(.{ .err = .{
|
|
.ty = error_set_type.toIntern(),
|
|
.name = field_name,
|
|
} }));
|
|
},
|
|
.Union => {
|
|
if (child_type.getNamespaceIndex(mod).unwrap()) |namespace| {
|
|
if (try sema.namespaceLookupRef(block, src, namespace, field_name)) |inst| {
|
|
return inst;
|
|
}
|
|
}
|
|
try sema.resolveTypeFields(child_type);
|
|
if (child_type.unionTagType(mod)) |enum_ty| {
|
|
if (enum_ty.enumFieldIndex(field_name, mod)) |field_index| {
|
|
const field_index_u32: u32 = @intCast(field_index);
|
|
const idx_val = try mod.enumValueFieldIndex(enum_ty, field_index_u32);
|
|
return anonDeclRef(sema, idx_val.toIntern());
|
|
}
|
|
}
|
|
return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name);
|
|
},
|
|
.Enum => {
|
|
if (child_type.getNamespaceIndex(mod).unwrap()) |namespace| {
|
|
if (try sema.namespaceLookupRef(block, src, namespace, field_name)) |inst| {
|
|
return inst;
|
|
}
|
|
}
|
|
const field_index = child_type.enumFieldIndex(field_name, mod) orelse {
|
|
return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name);
|
|
};
|
|
const field_index_u32: u32 = @intCast(field_index);
|
|
const idx_val = try mod.enumValueFieldIndex(child_type, field_index_u32);
|
|
return anonDeclRef(sema, idx_val.toIntern());
|
|
},
|
|
.Struct, .Opaque => {
|
|
if (child_type.getNamespaceIndex(mod).unwrap()) |namespace| {
|
|
if (try sema.namespaceLookupRef(block, src, namespace, field_name)) |inst| {
|
|
return inst;
|
|
}
|
|
}
|
|
return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name);
|
|
},
|
|
else => return sema.fail(block, src, "type '{}' has no members", .{child_type.fmt(mod)}),
|
|
}
|
|
},
|
|
.Struct => {
|
|
const inner_ptr = if (is_pointer_to)
|
|
try sema.analyzeLoad(block, src, object_ptr, object_ptr_src)
|
|
else
|
|
object_ptr;
|
|
const field_ptr = try sema.structFieldPtr(block, src, inner_ptr, field_name, field_name_src, inner_ty, initializing);
|
|
try sema.checkKnownAllocPtr(inner_ptr, field_ptr);
|
|
return field_ptr;
|
|
},
|
|
.Union => {
|
|
const inner_ptr = if (is_pointer_to)
|
|
try sema.analyzeLoad(block, src, object_ptr, object_ptr_src)
|
|
else
|
|
object_ptr;
|
|
const field_ptr = try sema.unionFieldPtr(block, src, inner_ptr, field_name, field_name_src, inner_ty, initializing);
|
|
try sema.checkKnownAllocPtr(inner_ptr, field_ptr);
|
|
return field_ptr;
|
|
},
|
|
else => {},
|
|
}
|
|
return sema.failWithInvalidFieldAccess(block, src, object_ty, field_name);
|
|
}
|
|
|
|
const ResolvedFieldCallee = union(enum) {
|
|
/// The LHS of the call was an actual field with this value.
|
|
direct: Air.Inst.Ref,
|
|
/// This is a method call, with the function and first argument given.
|
|
method: struct {
|
|
func_inst: Air.Inst.Ref,
|
|
arg0_inst: Air.Inst.Ref,
|
|
},
|
|
};
|
|
|
|
fn fieldCallBind(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
raw_ptr: Air.Inst.Ref,
|
|
field_name: InternPool.NullTerminatedString,
|
|
field_name_src: LazySrcLoc,
|
|
) CompileError!ResolvedFieldCallee {
|
|
// When editing this function, note that there is corresponding logic to be edited
|
|
// in `fieldVal`. This function takes a pointer and returns a pointer.
|
|
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
const raw_ptr_src = src; // TODO better source location
|
|
const raw_ptr_ty = sema.typeOf(raw_ptr);
|
|
const inner_ty = if (raw_ptr_ty.zigTypeTag(mod) == .Pointer and (raw_ptr_ty.ptrSize(mod) == .One or raw_ptr_ty.ptrSize(mod) == .C))
|
|
raw_ptr_ty.childType(mod)
|
|
else
|
|
return sema.fail(block, raw_ptr_src, "expected single pointer, found '{}'", .{raw_ptr_ty.fmt(mod)});
|
|
|
|
// Optionally dereference a second pointer to get the concrete type.
|
|
const is_double_ptr = inner_ty.zigTypeTag(mod) == .Pointer and inner_ty.ptrSize(mod) == .One;
|
|
const concrete_ty = if (is_double_ptr) inner_ty.childType(mod) else inner_ty;
|
|
const ptr_ty = if (is_double_ptr) inner_ty else raw_ptr_ty;
|
|
const object_ptr = if (is_double_ptr)
|
|
try sema.analyzeLoad(block, src, raw_ptr, src)
|
|
else
|
|
raw_ptr;
|
|
|
|
find_field: {
|
|
switch (concrete_ty.zigTypeTag(mod)) {
|
|
.Struct => {
|
|
try sema.resolveTypeFields(concrete_ty);
|
|
if (mod.typeToStruct(concrete_ty)) |struct_type| {
|
|
const field_index = struct_type.nameIndex(ip, field_name) orelse
|
|
break :find_field;
|
|
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]);
|
|
|
|
return sema.finishFieldCallBind(block, src, ptr_ty, field_ty, field_index, object_ptr);
|
|
} else if (concrete_ty.isTuple(mod)) {
|
|
if (ip.stringEqlSlice(field_name, "len")) {
|
|
return .{ .direct = try mod.intRef(Type.usize, concrete_ty.structFieldCount(mod)) };
|
|
}
|
|
if (field_name.toUnsigned(ip)) |field_index| {
|
|
if (field_index >= concrete_ty.structFieldCount(mod)) break :find_field;
|
|
return sema.finishFieldCallBind(block, src, ptr_ty, concrete_ty.structFieldType(field_index, mod), field_index, object_ptr);
|
|
}
|
|
} else {
|
|
const max = concrete_ty.structFieldCount(mod);
|
|
for (0..max) |i_usize| {
|
|
const i: u32 = @intCast(i_usize);
|
|
if (field_name == concrete_ty.structFieldName(i, mod).unwrap().?) {
|
|
return sema.finishFieldCallBind(block, src, ptr_ty, concrete_ty.structFieldType(i, mod), i, object_ptr);
|
|
}
|
|
}
|
|
}
|
|
},
|
|
.Union => {
|
|
try sema.resolveTypeFields(concrete_ty);
|
|
const union_obj = mod.typeToUnion(concrete_ty).?;
|
|
_ = union_obj.nameIndex(ip, field_name) orelse break :find_field;
|
|
|
|
const field_ptr = try unionFieldPtr(sema, block, src, object_ptr, field_name, field_name_src, concrete_ty, false);
|
|
return .{ .direct = try sema.analyzeLoad(block, src, field_ptr, src) };
|
|
},
|
|
.Type => {
|
|
const namespace = try sema.analyzeLoad(block, src, object_ptr, src);
|
|
return .{ .direct = try sema.fieldVal(block, src, namespace, field_name, field_name_src) };
|
|
},
|
|
else => {},
|
|
}
|
|
}
|
|
|
|
// If we get here, we need to look for a decl in the struct type instead.
|
|
const found_decl = switch (concrete_ty.zigTypeTag(mod)) {
|
|
.Struct, .Opaque, .Union, .Enum => found_decl: {
|
|
if (concrete_ty.getNamespaceIndex(mod).unwrap()) |namespace| {
|
|
if (try sema.namespaceLookup(block, src, namespace, field_name)) |decl_idx| {
|
|
try sema.addReferencedBy(block, src, decl_idx);
|
|
const decl_val = try sema.analyzeDeclVal(block, src, decl_idx);
|
|
const decl_type = sema.typeOf(decl_val);
|
|
if (mod.typeToFunc(decl_type)) |func_type| f: {
|
|
if (func_type.param_types.len == 0) break :f;
|
|
|
|
const first_param_type = Type.fromInterned(func_type.param_types.get(ip)[0]);
|
|
// zig fmt: off
|
|
if (first_param_type.isGenericPoison() or (
|
|
first_param_type.zigTypeTag(mod) == .Pointer and
|
|
(first_param_type.ptrSize(mod) == .One or
|
|
first_param_type.ptrSize(mod) == .C) and
|
|
first_param_type.childType(mod).eql(concrete_ty, mod)))
|
|
{
|
|
// zig fmt: on
|
|
// Note that if the param type is generic poison, we know that it must
|
|
// specifically be `anytype` since it's the first parameter, meaning we
|
|
// can safely assume it can be a pointer.
|
|
// TODO: bound fn calls on rvalues should probably
|
|
// generate a by-value argument somehow.
|
|
return .{ .method = .{
|
|
.func_inst = decl_val,
|
|
.arg0_inst = object_ptr,
|
|
} };
|
|
} else if (first_param_type.eql(concrete_ty, mod)) {
|
|
const deref = try sema.analyzeLoad(block, src, object_ptr, src);
|
|
return .{ .method = .{
|
|
.func_inst = decl_val,
|
|
.arg0_inst = deref,
|
|
} };
|
|
} else if (first_param_type.zigTypeTag(mod) == .Optional) {
|
|
const child = first_param_type.optionalChild(mod);
|
|
if (child.eql(concrete_ty, mod)) {
|
|
const deref = try sema.analyzeLoad(block, src, object_ptr, src);
|
|
return .{ .method = .{
|
|
.func_inst = decl_val,
|
|
.arg0_inst = deref,
|
|
} };
|
|
} else if (child.zigTypeTag(mod) == .Pointer and
|
|
child.ptrSize(mod) == .One and
|
|
child.childType(mod).eql(concrete_ty, mod))
|
|
{
|
|
return .{ .method = .{
|
|
.func_inst = decl_val,
|
|
.arg0_inst = object_ptr,
|
|
} };
|
|
}
|
|
} else if (first_param_type.zigTypeTag(mod) == .ErrorUnion and
|
|
first_param_type.errorUnionPayload(mod).eql(concrete_ty, mod))
|
|
{
|
|
const deref = try sema.analyzeLoad(block, src, object_ptr, src);
|
|
return .{ .method = .{
|
|
.func_inst = decl_val,
|
|
.arg0_inst = deref,
|
|
} };
|
|
}
|
|
}
|
|
break :found_decl decl_idx;
|
|
}
|
|
}
|
|
break :found_decl null;
|
|
},
|
|
else => null,
|
|
};
|
|
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "no field or member function named '{}' in '{}'", .{
|
|
field_name.fmt(ip),
|
|
concrete_ty.fmt(mod),
|
|
});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.addDeclaredHereNote(msg, concrete_ty);
|
|
if (found_decl) |decl_idx| {
|
|
const decl = mod.declPtr(decl_idx);
|
|
try mod.errNoteNonLazy(decl.srcLoc(mod), msg, "'{}' is not a member function", .{field_name.fmt(ip)});
|
|
}
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
|
|
fn finishFieldCallBind(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
ptr_ty: Type,
|
|
field_ty: Type,
|
|
field_index: u32,
|
|
object_ptr: Air.Inst.Ref,
|
|
) CompileError!ResolvedFieldCallee {
|
|
const mod = sema.mod;
|
|
const ptr_field_ty = try sema.ptrType(.{
|
|
.child = field_ty.toIntern(),
|
|
.flags = .{
|
|
.is_const = !ptr_ty.ptrIsMutable(mod),
|
|
.address_space = ptr_ty.ptrAddressSpace(mod),
|
|
},
|
|
});
|
|
|
|
const container_ty = ptr_ty.childType(mod);
|
|
if (container_ty.zigTypeTag(mod) == .Struct) {
|
|
if (container_ty.structFieldIsComptime(field_index, mod)) {
|
|
try sema.resolveStructFieldInits(container_ty);
|
|
const default_val = (try container_ty.structFieldValueComptime(mod, field_index)).?;
|
|
return .{ .direct = Air.internedToRef(default_val.toIntern()) };
|
|
}
|
|
}
|
|
|
|
if (try sema.resolveDefinedValue(block, src, object_ptr)) |struct_ptr_val| {
|
|
const pointer = Air.internedToRef((try mod.intern(.{ .ptr = .{
|
|
.ty = ptr_field_ty.toIntern(),
|
|
.addr = .{ .field = .{
|
|
.base = struct_ptr_val.toIntern(),
|
|
.index = field_index,
|
|
} },
|
|
} })));
|
|
return .{ .direct = try sema.analyzeLoad(block, src, pointer, src) };
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
const ptr_inst = try block.addStructFieldPtr(object_ptr, field_index, ptr_field_ty);
|
|
return .{ .direct = try sema.analyzeLoad(block, src, ptr_inst, src) };
|
|
}
|
|
|
|
fn namespaceLookup(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
namespace: InternPool.NamespaceIndex,
|
|
decl_name: InternPool.NullTerminatedString,
|
|
) CompileError!?InternPool.DeclIndex {
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
if (try sema.lookupInNamespace(block, src, namespace, decl_name, true)) |decl_index| {
|
|
const decl = mod.declPtr(decl_index);
|
|
if (!decl.is_pub and decl.getFileScope(mod) != block.getFileScope(mod)) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "'{}' is not marked 'pub'", .{
|
|
decl_name.fmt(&mod.intern_pool),
|
|
});
|
|
errdefer msg.destroy(gpa);
|
|
try mod.errNoteNonLazy(decl.srcLoc(mod), msg, "declared here", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
return decl_index;
|
|
}
|
|
return null;
|
|
}
|
|
|
|
fn namespaceLookupRef(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
namespace: InternPool.NamespaceIndex,
|
|
decl_name: InternPool.NullTerminatedString,
|
|
) CompileError!?Air.Inst.Ref {
|
|
const decl = (try sema.namespaceLookup(block, src, namespace, decl_name)) orelse return null;
|
|
try sema.addReferencedBy(block, src, decl);
|
|
return try sema.analyzeDeclRef(decl);
|
|
}
|
|
|
|
fn namespaceLookupVal(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
namespace: InternPool.NamespaceIndex,
|
|
decl_name: InternPool.NullTerminatedString,
|
|
) CompileError!?Air.Inst.Ref {
|
|
const decl = (try sema.namespaceLookup(block, src, namespace, decl_name)) orelse return null;
|
|
return try sema.analyzeDeclVal(block, src, decl);
|
|
}
|
|
|
|
fn structFieldPtr(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
struct_ptr: Air.Inst.Ref,
|
|
field_name: InternPool.NullTerminatedString,
|
|
field_name_src: LazySrcLoc,
|
|
struct_ty: Type,
|
|
initializing: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
assert(struct_ty.zigTypeTag(mod) == .Struct);
|
|
|
|
try sema.resolveTypeFields(struct_ty);
|
|
try sema.resolveStructLayout(struct_ty);
|
|
|
|
if (struct_ty.isTuple(mod)) {
|
|
if (ip.stringEqlSlice(field_name, "len")) {
|
|
const len_inst = try mod.intRef(Type.usize, struct_ty.structFieldCount(mod));
|
|
return sema.analyzeRef(block, src, len_inst);
|
|
}
|
|
const field_index = try sema.tupleFieldIndex(block, struct_ty, field_name, field_name_src);
|
|
return sema.tupleFieldPtr(block, src, struct_ptr, field_name_src, field_index, initializing);
|
|
} else if (struct_ty.isAnonStruct(mod)) {
|
|
const field_index = try sema.anonStructFieldIndex(block, struct_ty, field_name, field_name_src);
|
|
return sema.tupleFieldPtr(block, src, struct_ptr, field_name_src, field_index, initializing);
|
|
}
|
|
|
|
const struct_type = mod.typeToStruct(struct_ty).?;
|
|
|
|
const field_index = struct_type.nameIndex(ip, field_name) orelse
|
|
return sema.failWithBadStructFieldAccess(block, struct_type, field_name_src, field_name);
|
|
|
|
return sema.structFieldPtrByIndex(block, src, struct_ptr, field_index, field_name_src, struct_ty, initializing);
|
|
}
|
|
|
|
fn structFieldPtrByIndex(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
struct_ptr: Air.Inst.Ref,
|
|
field_index: u32,
|
|
field_src: LazySrcLoc,
|
|
struct_ty: Type,
|
|
initializing: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
if (struct_ty.isAnonStruct(mod)) {
|
|
return sema.tupleFieldPtr(block, src, struct_ptr, field_src, field_index, initializing);
|
|
}
|
|
|
|
const struct_type = mod.typeToStruct(struct_ty).?;
|
|
const field_ty = struct_type.field_types.get(ip)[field_index];
|
|
const struct_ptr_ty = sema.typeOf(struct_ptr);
|
|
const struct_ptr_ty_info = struct_ptr_ty.ptrInfo(mod);
|
|
|
|
var ptr_ty_data: InternPool.Key.PtrType = .{
|
|
.child = field_ty,
|
|
.flags = .{
|
|
.is_const = struct_ptr_ty_info.flags.is_const,
|
|
.is_volatile = struct_ptr_ty_info.flags.is_volatile,
|
|
.address_space = struct_ptr_ty_info.flags.address_space,
|
|
},
|
|
};
|
|
|
|
const target = mod.getTarget();
|
|
|
|
const parent_align = if (struct_ptr_ty_info.flags.alignment != .none)
|
|
struct_ptr_ty_info.flags.alignment
|
|
else
|
|
try sema.typeAbiAlignment(Type.fromInterned(struct_ptr_ty_info.child));
|
|
|
|
if (struct_type.layout == .Packed) {
|
|
comptime assert(Type.packed_struct_layout_version == 2);
|
|
|
|
var running_bits: u16 = 0;
|
|
for (0..struct_type.field_types.len) |i| {
|
|
const f_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
|
|
if (!(try sema.typeHasRuntimeBits(f_ty))) continue;
|
|
|
|
if (i == field_index) {
|
|
ptr_ty_data.packed_offset.bit_offset = running_bits;
|
|
}
|
|
running_bits += @intCast(f_ty.bitSize(mod));
|
|
}
|
|
ptr_ty_data.packed_offset.host_size = (running_bits + 7) / 8;
|
|
|
|
// If this is a packed struct embedded in another one, we need to offset
|
|
// the bits against each other.
|
|
if (struct_ptr_ty_info.packed_offset.host_size != 0) {
|
|
ptr_ty_data.packed_offset.host_size = struct_ptr_ty_info.packed_offset.host_size;
|
|
ptr_ty_data.packed_offset.bit_offset += struct_ptr_ty_info.packed_offset.bit_offset;
|
|
}
|
|
|
|
ptr_ty_data.flags.alignment = parent_align;
|
|
|
|
// If the field happens to be byte-aligned, simplify the pointer type.
|
|
// The pointee type bit size must match its ABI byte size so that loads and stores
|
|
// do not interfere with the surrounding packed bits.
|
|
// We do not attempt this with big-endian targets yet because of nested
|
|
// structs and floats. I need to double-check the desired behavior for big endian
|
|
// targets before adding the necessary complications to this code. This will not
|
|
// cause miscompilations; it only means the field pointer uses bit masking when it
|
|
// might not be strictly necessary.
|
|
if (parent_align != .none and ptr_ty_data.packed_offset.bit_offset % 8 == 0 and
|
|
target.cpu.arch.endian() == .little)
|
|
{
|
|
const elem_size_bytes = try sema.typeAbiSize(Type.fromInterned(ptr_ty_data.child));
|
|
const elem_size_bits = Type.fromInterned(ptr_ty_data.child).bitSize(mod);
|
|
if (elem_size_bytes * 8 == elem_size_bits) {
|
|
const byte_offset = ptr_ty_data.packed_offset.bit_offset / 8;
|
|
const new_align: Alignment = @enumFromInt(@ctz(byte_offset | parent_align.toByteUnitsOptional().?));
|
|
assert(new_align != .none);
|
|
ptr_ty_data.flags.alignment = new_align;
|
|
ptr_ty_data.packed_offset = .{ .host_size = 0, .bit_offset = 0 };
|
|
}
|
|
}
|
|
} else if (struct_type.layout == .Extern) {
|
|
// For extern structs, field alignment might be bigger than type's
|
|
// natural alignment. Eg, in `extern struct { x: u32, y: u16 }` the
|
|
// second field is aligned as u32.
|
|
const field_offset = struct_ty.structFieldOffset(field_index, mod);
|
|
ptr_ty_data.flags.alignment = if (parent_align == .none)
|
|
.none
|
|
else
|
|
@enumFromInt(@min(@intFromEnum(parent_align), @ctz(field_offset)));
|
|
} else {
|
|
// Our alignment is capped at the field alignment.
|
|
const field_align = try sema.structFieldAlignment(
|
|
struct_type.fieldAlign(ip, field_index),
|
|
Type.fromInterned(field_ty),
|
|
struct_type.layout,
|
|
);
|
|
ptr_ty_data.flags.alignment = if (struct_ptr_ty_info.flags.alignment == .none)
|
|
field_align
|
|
else
|
|
field_align.min(parent_align);
|
|
}
|
|
|
|
const ptr_field_ty = try sema.ptrType(ptr_ty_data);
|
|
|
|
if (struct_type.fieldIsComptime(ip, field_index)) {
|
|
try sema.resolveStructFieldInits(struct_ty);
|
|
const val = try mod.intern(.{ .ptr = .{
|
|
.ty = ptr_field_ty.toIntern(),
|
|
.addr = .{ .comptime_field = struct_type.field_inits.get(ip)[field_index] },
|
|
} });
|
|
return Air.internedToRef(val);
|
|
}
|
|
|
|
if (try sema.resolveDefinedValue(block, src, struct_ptr)) |struct_ptr_val| {
|
|
const val = try mod.intern(.{ .ptr = .{
|
|
.ty = ptr_field_ty.toIntern(),
|
|
.addr = .{ .field = .{
|
|
.base = try struct_ptr_val.intern(struct_ptr_ty, mod),
|
|
.index = field_index,
|
|
} },
|
|
} });
|
|
return Air.internedToRef(val);
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
return block.addStructFieldPtr(struct_ptr, field_index, ptr_field_ty);
|
|
}
|
|
|
|
fn structFieldVal(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
struct_byval: Air.Inst.Ref,
|
|
field_name: InternPool.NullTerminatedString,
|
|
field_name_src: LazySrcLoc,
|
|
struct_ty: Type,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
assert(struct_ty.zigTypeTag(mod) == .Struct);
|
|
|
|
try sema.resolveTypeFields(struct_ty);
|
|
|
|
switch (ip.indexToKey(struct_ty.toIntern())) {
|
|
.struct_type => |struct_type| {
|
|
if (struct_type.isTuple(ip))
|
|
return sema.tupleFieldVal(block, src, struct_byval, field_name, field_name_src, struct_ty);
|
|
|
|
const field_index = struct_type.nameIndex(ip, field_name) orelse
|
|
return sema.failWithBadStructFieldAccess(block, struct_type, field_name_src, field_name);
|
|
if (struct_type.fieldIsComptime(ip, field_index)) {
|
|
try sema.resolveStructFieldInits(struct_ty);
|
|
return Air.internedToRef(struct_type.field_inits.get(ip)[field_index]);
|
|
}
|
|
|
|
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]);
|
|
|
|
if (try sema.resolveValue(struct_byval)) |struct_val| {
|
|
if (struct_val.isUndef(mod)) return mod.undefRef(field_ty);
|
|
if ((try sema.typeHasOnePossibleValue(field_ty))) |opv| {
|
|
return Air.internedToRef(opv.toIntern());
|
|
}
|
|
return Air.internedToRef((try struct_val.fieldValue(mod, field_index)).toIntern());
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
try sema.resolveTypeLayout(field_ty);
|
|
return block.addStructFieldVal(struct_byval, field_index, field_ty);
|
|
},
|
|
.anon_struct_type => |anon_struct| {
|
|
if (anon_struct.names.len == 0) {
|
|
return sema.tupleFieldVal(block, src, struct_byval, field_name, field_name_src, struct_ty);
|
|
} else {
|
|
const field_index = try sema.anonStructFieldIndex(block, struct_ty, field_name, field_name_src);
|
|
return sema.tupleFieldValByIndex(block, src, struct_byval, field_index, struct_ty);
|
|
}
|
|
},
|
|
else => unreachable,
|
|
}
|
|
}
|
|
|
|
fn tupleFieldVal(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
tuple_byval: Air.Inst.Ref,
|
|
field_name: InternPool.NullTerminatedString,
|
|
field_name_src: LazySrcLoc,
|
|
tuple_ty: Type,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
if (mod.intern_pool.stringEqlSlice(field_name, "len")) {
|
|
return mod.intRef(Type.usize, tuple_ty.structFieldCount(mod));
|
|
}
|
|
const field_index = try sema.tupleFieldIndex(block, tuple_ty, field_name, field_name_src);
|
|
return sema.tupleFieldValByIndex(block, src, tuple_byval, field_index, tuple_ty);
|
|
}
|
|
|
|
/// Asserts that `field_name` is not "len".
|
|
fn tupleFieldIndex(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
tuple_ty: Type,
|
|
field_name: InternPool.NullTerminatedString,
|
|
field_name_src: LazySrcLoc,
|
|
) CompileError!u32 {
|
|
const mod = sema.mod;
|
|
assert(!mod.intern_pool.stringEqlSlice(field_name, "len"));
|
|
if (field_name.toUnsigned(&mod.intern_pool)) |field_index| {
|
|
if (field_index < tuple_ty.structFieldCount(mod)) return field_index;
|
|
return sema.fail(block, field_name_src, "index '{}' out of bounds of tuple '{}'", .{
|
|
field_name.fmt(&mod.intern_pool), tuple_ty.fmt(mod),
|
|
});
|
|
}
|
|
|
|
return sema.fail(block, field_name_src, "no field named '{}' in tuple '{}'", .{
|
|
field_name.fmt(&mod.intern_pool), tuple_ty.fmt(mod),
|
|
});
|
|
}
|
|
|
|
fn tupleFieldValByIndex(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
tuple_byval: Air.Inst.Ref,
|
|
field_index: u32,
|
|
tuple_ty: Type,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const field_ty = tuple_ty.structFieldType(field_index, mod);
|
|
|
|
if (tuple_ty.structFieldIsComptime(field_index, mod))
|
|
try sema.resolveStructFieldInits(tuple_ty);
|
|
if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_value| {
|
|
return Air.internedToRef(default_value.toIntern());
|
|
}
|
|
|
|
if (try sema.resolveValue(tuple_byval)) |tuple_val| {
|
|
if ((try sema.typeHasOnePossibleValue(field_ty))) |opv| {
|
|
return Air.internedToRef(opv.toIntern());
|
|
}
|
|
return switch (mod.intern_pool.indexToKey(tuple_val.toIntern())) {
|
|
.undef => mod.undefRef(field_ty),
|
|
.aggregate => |aggregate| Air.internedToRef(switch (aggregate.storage) {
|
|
.bytes => |bytes| try mod.intValue(Type.u8, bytes[0]),
|
|
.elems => |elems| Value.fromInterned(elems[field_index]),
|
|
.repeated_elem => |elem| Value.fromInterned(elem),
|
|
}.toIntern()),
|
|
else => unreachable,
|
|
};
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
try sema.resolveTypeLayout(field_ty);
|
|
return block.addStructFieldVal(tuple_byval, field_index, field_ty);
|
|
}
|
|
|
|
fn unionFieldPtr(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
union_ptr: Air.Inst.Ref,
|
|
field_name: InternPool.NullTerminatedString,
|
|
field_name_src: LazySrcLoc,
|
|
union_ty: Type,
|
|
initializing: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
|
|
assert(union_ty.zigTypeTag(mod) == .Union);
|
|
|
|
const union_ptr_ty = sema.typeOf(union_ptr);
|
|
const union_ptr_info = union_ptr_ty.ptrInfo(mod);
|
|
try sema.resolveTypeFields(union_ty);
|
|
const union_obj = mod.typeToUnion(union_ty).?;
|
|
const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_name_src);
|
|
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
|
|
const ptr_field_ty = try sema.ptrType(.{
|
|
.child = field_ty.toIntern(),
|
|
.flags = .{
|
|
.is_const = union_ptr_info.flags.is_const,
|
|
.is_volatile = union_ptr_info.flags.is_volatile,
|
|
.address_space = union_ptr_info.flags.address_space,
|
|
.alignment = if (union_obj.getLayout(ip) == .Auto) blk: {
|
|
const union_align = if (union_ptr_info.flags.alignment != .none)
|
|
union_ptr_info.flags.alignment
|
|
else
|
|
try sema.typeAbiAlignment(union_ty);
|
|
const field_align = try sema.unionFieldAlignment(union_obj, field_index);
|
|
break :blk union_align.min(field_align);
|
|
} else union_ptr_info.flags.alignment,
|
|
},
|
|
.packed_offset = union_ptr_info.packed_offset,
|
|
});
|
|
const enum_field_index: u32 = @intCast(Type.fromInterned(union_obj.enum_tag_ty).enumFieldIndex(field_name, mod).?);
|
|
|
|
if (initializing and field_ty.zigTypeTag(mod) == .NoReturn) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "cannot initialize 'noreturn' field of union", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
try sema.addFieldErrNote(union_ty, field_index, msg, "field '{}' declared here", .{
|
|
field_name.fmt(ip),
|
|
});
|
|
try sema.addDeclaredHereNote(msg, union_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
|
|
if (try sema.resolveDefinedValue(block, src, union_ptr)) |union_ptr_val| ct: {
|
|
switch (union_obj.getLayout(ip)) {
|
|
.Auto => if (!initializing) {
|
|
const union_val = (try sema.pointerDeref(block, src, union_ptr_val, union_ptr_ty)) orelse
|
|
break :ct;
|
|
if (union_val.isUndef(mod)) {
|
|
return sema.failWithUseOfUndef(block, src);
|
|
}
|
|
const un = ip.indexToKey(union_val.toIntern()).un;
|
|
const field_tag = try mod.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index);
|
|
const tag_matches = un.tag == field_tag.toIntern();
|
|
if (!tag_matches) {
|
|
const msg = msg: {
|
|
const active_index = Type.fromInterned(union_obj.enum_tag_ty).enumTagFieldIndex(Value.fromInterned(un.tag), mod).?;
|
|
const active_field_name = Type.fromInterned(union_obj.enum_tag_ty).enumFieldName(active_index, mod);
|
|
const msg = try sema.errMsg(block, src, "access of union field '{}' while field '{}' is active", .{
|
|
field_name.fmt(ip),
|
|
active_field_name.fmt(ip),
|
|
});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.addDeclaredHereNote(msg, union_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
},
|
|
.Packed, .Extern => {},
|
|
}
|
|
return Air.internedToRef((try mod.intern(.{ .ptr = .{
|
|
.ty = ptr_field_ty.toIntern(),
|
|
.addr = .{ .field = .{
|
|
.base = union_ptr_val.toIntern(),
|
|
.index = field_index,
|
|
} },
|
|
} })));
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
if (!initializing and union_obj.getLayout(ip) == .Auto and block.wantSafety() and
|
|
union_ty.unionTagTypeSafety(mod) != null and union_obj.field_names.len > 1)
|
|
{
|
|
const wanted_tag_val = try mod.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index);
|
|
const wanted_tag = Air.internedToRef(wanted_tag_val.toIntern());
|
|
// TODO would it be better if get_union_tag supported pointers to unions?
|
|
const union_val = try block.addTyOp(.load, union_ty, union_ptr);
|
|
const active_tag = try block.addTyOp(.get_union_tag, Type.fromInterned(union_obj.enum_tag_ty), union_val);
|
|
try sema.panicInactiveUnionField(block, src, active_tag, wanted_tag);
|
|
}
|
|
if (field_ty.zigTypeTag(mod) == .NoReturn) {
|
|
_ = try block.addNoOp(.unreach);
|
|
return .unreachable_value;
|
|
}
|
|
return block.addStructFieldPtr(union_ptr, field_index, ptr_field_ty);
|
|
}
|
|
|
|
fn unionFieldVal(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
union_byval: Air.Inst.Ref,
|
|
field_name: InternPool.NullTerminatedString,
|
|
field_name_src: LazySrcLoc,
|
|
union_ty: Type,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
assert(union_ty.zigTypeTag(mod) == .Union);
|
|
|
|
try sema.resolveTypeFields(union_ty);
|
|
const union_obj = mod.typeToUnion(union_ty).?;
|
|
const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_name_src);
|
|
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
|
|
const enum_field_index: u32 = @intCast(Type.fromInterned(union_obj.enum_tag_ty).enumFieldIndex(field_name, mod).?);
|
|
|
|
if (try sema.resolveValue(union_byval)) |union_val| {
|
|
if (union_val.isUndef(mod)) return mod.undefRef(field_ty);
|
|
|
|
const un = ip.indexToKey(union_val.toIntern()).un;
|
|
const field_tag = try mod.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index);
|
|
const tag_matches = un.tag == field_tag.toIntern();
|
|
switch (union_obj.getLayout(ip)) {
|
|
.Auto => {
|
|
if (tag_matches) {
|
|
return Air.internedToRef(un.val);
|
|
} else {
|
|
const msg = msg: {
|
|
const active_index = Type.fromInterned(union_obj.enum_tag_ty).enumTagFieldIndex(Value.fromInterned(un.tag), mod).?;
|
|
const active_field_name = Type.fromInterned(union_obj.enum_tag_ty).enumFieldName(active_index, mod);
|
|
const msg = try sema.errMsg(block, src, "access of union field '{}' while field '{}' is active", .{
|
|
field_name.fmt(ip), active_field_name.fmt(ip),
|
|
});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.addDeclaredHereNote(msg, union_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
},
|
|
.Packed, .Extern => |layout| {
|
|
if (tag_matches) {
|
|
return Air.internedToRef(un.val);
|
|
} else {
|
|
const old_ty = if (un.tag == .none)
|
|
Type.fromInterned(ip.typeOf(un.val))
|
|
else
|
|
union_ty.unionFieldType(Value.fromInterned(un.tag), mod).?;
|
|
|
|
if (try sema.bitCastUnionFieldVal(block, src, Value.fromInterned(un.val), old_ty, field_ty, layout)) |new_val| {
|
|
return Air.internedToRef(new_val.toIntern());
|
|
}
|
|
}
|
|
},
|
|
}
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
if (union_obj.getLayout(ip) == .Auto and block.wantSafety() and
|
|
union_ty.unionTagTypeSafety(mod) != null and union_obj.field_names.len > 1)
|
|
{
|
|
const wanted_tag_val = try mod.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index);
|
|
const wanted_tag = Air.internedToRef(wanted_tag_val.toIntern());
|
|
const active_tag = try block.addTyOp(.get_union_tag, Type.fromInterned(union_obj.enum_tag_ty), union_byval);
|
|
try sema.panicInactiveUnionField(block, src, active_tag, wanted_tag);
|
|
}
|
|
if (field_ty.zigTypeTag(mod) == .NoReturn) {
|
|
_ = try block.addNoOp(.unreach);
|
|
return .unreachable_value;
|
|
}
|
|
try sema.resolveTypeLayout(field_ty);
|
|
return block.addStructFieldVal(union_byval, field_index, field_ty);
|
|
}
|
|
|
|
fn elemPtr(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
indexable_ptr: Air.Inst.Ref,
|
|
elem_index: Air.Inst.Ref,
|
|
elem_index_src: LazySrcLoc,
|
|
init: bool,
|
|
oob_safety: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const indexable_ptr_src = src; // TODO better source location
|
|
const indexable_ptr_ty = sema.typeOf(indexable_ptr);
|
|
|
|
const indexable_ty = switch (indexable_ptr_ty.zigTypeTag(mod)) {
|
|
.Pointer => indexable_ptr_ty.childType(mod),
|
|
else => return sema.fail(block, indexable_ptr_src, "expected pointer, found '{}'", .{indexable_ptr_ty.fmt(mod)}),
|
|
};
|
|
try checkIndexable(sema, block, src, indexable_ty);
|
|
|
|
const elem_ptr = switch (indexable_ty.zigTypeTag(mod)) {
|
|
.Array, .Vector => try sema.elemPtrArray(block, src, indexable_ptr_src, indexable_ptr, elem_index_src, elem_index, init, oob_safety),
|
|
.Struct => blk: {
|
|
// Tuple field access.
|
|
const index_val = try sema.resolveConstDefinedValue(block, elem_index_src, elem_index, .{
|
|
.needed_comptime_reason = "tuple field access index must be comptime-known",
|
|
});
|
|
const index: u32 = @intCast(try index_val.toUnsignedIntAdvanced(sema));
|
|
break :blk try sema.tupleFieldPtr(block, src, indexable_ptr, elem_index_src, index, init);
|
|
},
|
|
else => {
|
|
const indexable = try sema.analyzeLoad(block, indexable_ptr_src, indexable_ptr, indexable_ptr_src);
|
|
return elemPtrOneLayerOnly(sema, block, src, indexable, elem_index, elem_index_src, init, oob_safety);
|
|
},
|
|
};
|
|
|
|
try sema.checkKnownAllocPtr(indexable_ptr, elem_ptr);
|
|
return elem_ptr;
|
|
}
|
|
|
|
/// Asserts that the type of indexable is pointer.
|
|
fn elemPtrOneLayerOnly(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
indexable: Air.Inst.Ref,
|
|
elem_index: Air.Inst.Ref,
|
|
elem_index_src: LazySrcLoc,
|
|
init: bool,
|
|
oob_safety: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const indexable_src = src; // TODO better source location
|
|
const indexable_ty = sema.typeOf(indexable);
|
|
const mod = sema.mod;
|
|
|
|
try checkIndexable(sema, block, src, indexable_ty);
|
|
|
|
switch (indexable_ty.ptrSize(mod)) {
|
|
.Slice => return sema.elemPtrSlice(block, src, indexable_src, indexable, elem_index_src, elem_index, oob_safety),
|
|
.Many, .C => {
|
|
const maybe_ptr_val = try sema.resolveDefinedValue(block, indexable_src, indexable);
|
|
const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index);
|
|
const runtime_src = rs: {
|
|
const ptr_val = maybe_ptr_val orelse break :rs indexable_src;
|
|
const index_val = maybe_index_val orelse break :rs elem_index_src;
|
|
const index: usize = @intCast(try index_val.toUnsignedIntAdvanced(sema));
|
|
const result_ty = try sema.elemPtrType(indexable_ty, index);
|
|
const elem_ptr = try ptr_val.elemPtr(result_ty, index, mod);
|
|
return Air.internedToRef(elem_ptr.toIntern());
|
|
};
|
|
const result_ty = try sema.elemPtrType(indexable_ty, null);
|
|
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
return block.addPtrElemPtr(indexable, elem_index, result_ty);
|
|
},
|
|
.One => {
|
|
const child_ty = indexable_ty.childType(mod);
|
|
const elem_ptr = switch (child_ty.zigTypeTag(mod)) {
|
|
.Array, .Vector => try sema.elemPtrArray(block, src, indexable_src, indexable, elem_index_src, elem_index, init, oob_safety),
|
|
.Struct => blk: {
|
|
assert(child_ty.isTuple(mod));
|
|
const index_val = try sema.resolveConstDefinedValue(block, elem_index_src, elem_index, .{
|
|
.needed_comptime_reason = "tuple field access index must be comptime-known",
|
|
});
|
|
const index: u32 = @intCast(try index_val.toUnsignedIntAdvanced(sema));
|
|
break :blk try sema.tupleFieldPtr(block, indexable_src, indexable, elem_index_src, index, false);
|
|
},
|
|
else => unreachable, // Guaranteed by checkIndexable
|
|
};
|
|
try sema.checkKnownAllocPtr(indexable, elem_ptr);
|
|
return elem_ptr;
|
|
},
|
|
}
|
|
}
|
|
|
|
fn elemVal(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
indexable: Air.Inst.Ref,
|
|
elem_index_uncasted: Air.Inst.Ref,
|
|
elem_index_src: LazySrcLoc,
|
|
oob_safety: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const indexable_src = src; // TODO better source location
|
|
const indexable_ty = sema.typeOf(indexable);
|
|
const mod = sema.mod;
|
|
|
|
try checkIndexable(sema, block, src, indexable_ty);
|
|
|
|
// TODO in case of a vector of pointers, we need to detect whether the element
|
|
// index is a scalar or vector instead of unconditionally casting to usize.
|
|
const elem_index = try sema.coerce(block, Type.usize, elem_index_uncasted, elem_index_src);
|
|
|
|
switch (indexable_ty.zigTypeTag(mod)) {
|
|
.Pointer => switch (indexable_ty.ptrSize(mod)) {
|
|
.Slice => return sema.elemValSlice(block, src, indexable_src, indexable, elem_index_src, elem_index, oob_safety),
|
|
.Many, .C => {
|
|
const maybe_indexable_val = try sema.resolveDefinedValue(block, indexable_src, indexable);
|
|
const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index);
|
|
|
|
const runtime_src = rs: {
|
|
const indexable_val = maybe_indexable_val orelse break :rs indexable_src;
|
|
const index_val = maybe_index_val orelse break :rs elem_index_src;
|
|
const index: usize = @intCast(try index_val.toUnsignedIntAdvanced(sema));
|
|
const elem_ty = indexable_ty.elemType2(mod);
|
|
const many_ptr_ty = try mod.manyConstPtrType(elem_ty);
|
|
const many_ptr_val = try mod.getCoerced(indexable_val, many_ptr_ty);
|
|
const elem_ptr_ty = try mod.singleConstPtrType(elem_ty);
|
|
const elem_ptr_val = try many_ptr_val.elemPtr(elem_ptr_ty, index, mod);
|
|
if (try sema.pointerDeref(block, indexable_src, elem_ptr_val, elem_ptr_ty)) |elem_val| {
|
|
return Air.internedToRef((try mod.getCoerced(elem_val, elem_ty)).toIntern());
|
|
}
|
|
break :rs indexable_src;
|
|
};
|
|
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
return block.addBinOp(.ptr_elem_val, indexable, elem_index);
|
|
},
|
|
.One => {
|
|
arr_sent: {
|
|
const inner_ty = indexable_ty.childType(mod);
|
|
if (inner_ty.zigTypeTag(mod) != .Array) break :arr_sent;
|
|
const sentinel = inner_ty.sentinel(mod) orelse break :arr_sent;
|
|
const index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index) orelse break :arr_sent;
|
|
const index = try sema.usizeCast(block, src, try index_val.toUnsignedIntAdvanced(sema));
|
|
if (index != inner_ty.arrayLen(mod)) break :arr_sent;
|
|
return Air.internedToRef(sentinel.toIntern());
|
|
}
|
|
const elem_ptr = try sema.elemPtr(block, indexable_src, indexable, elem_index, elem_index_src, false, oob_safety);
|
|
return sema.analyzeLoad(block, indexable_src, elem_ptr, elem_index_src);
|
|
},
|
|
},
|
|
.Array => return sema.elemValArray(block, src, indexable_src, indexable, elem_index_src, elem_index, oob_safety),
|
|
.Vector => {
|
|
// TODO: If the index is a vector, the result should be a vector.
|
|
return sema.elemValArray(block, src, indexable_src, indexable, elem_index_src, elem_index, oob_safety);
|
|
},
|
|
.Struct => {
|
|
// Tuple field access.
|
|
const index_val = try sema.resolveConstDefinedValue(block, elem_index_src, elem_index, .{
|
|
.needed_comptime_reason = "tuple field access index must be comptime-known",
|
|
});
|
|
const index: u32 = @intCast(try index_val.toUnsignedIntAdvanced(sema));
|
|
return sema.tupleField(block, indexable_src, indexable, elem_index_src, index);
|
|
},
|
|
else => unreachable,
|
|
}
|
|
}
|
|
|
|
fn validateRuntimeElemAccess(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
elem_index_src: LazySrcLoc,
|
|
elem_ty: Type,
|
|
parent_ty: Type,
|
|
parent_src: LazySrcLoc,
|
|
) CompileError!void {
|
|
const mod = sema.mod;
|
|
if (try sema.typeRequiresComptime(elem_ty)) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(
|
|
block,
|
|
elem_index_src,
|
|
"values of type '{}' must be comptime-known, but index value is runtime-known",
|
|
.{parent_ty.fmt(mod)},
|
|
);
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
const src_decl = mod.declPtr(block.src_decl);
|
|
try sema.explainWhyTypeIsComptime(msg, src_decl.toSrcLoc(parent_src, mod), parent_ty);
|
|
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
}
|
|
|
|
fn tupleFieldPtr(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
tuple_ptr_src: LazySrcLoc,
|
|
tuple_ptr: Air.Inst.Ref,
|
|
field_index_src: LazySrcLoc,
|
|
field_index: u32,
|
|
init: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const tuple_ptr_ty = sema.typeOf(tuple_ptr);
|
|
const tuple_ty = tuple_ptr_ty.childType(mod);
|
|
try sema.resolveTypeFields(tuple_ty);
|
|
const field_count = tuple_ty.structFieldCount(mod);
|
|
|
|
if (field_count == 0) {
|
|
return sema.fail(block, tuple_ptr_src, "indexing into empty tuple is not allowed", .{});
|
|
}
|
|
|
|
if (field_index >= field_count) {
|
|
return sema.fail(block, field_index_src, "index {d} outside tuple of length {d}", .{
|
|
field_index, field_count,
|
|
});
|
|
}
|
|
|
|
const field_ty = tuple_ty.structFieldType(field_index, mod);
|
|
const ptr_field_ty = try sema.ptrType(.{
|
|
.child = field_ty.toIntern(),
|
|
.flags = .{
|
|
.is_const = !tuple_ptr_ty.ptrIsMutable(mod),
|
|
.is_volatile = tuple_ptr_ty.isVolatilePtr(mod),
|
|
.address_space = tuple_ptr_ty.ptrAddressSpace(mod),
|
|
},
|
|
});
|
|
|
|
if (tuple_ty.structFieldIsComptime(field_index, mod))
|
|
try sema.resolveStructFieldInits(tuple_ty);
|
|
if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_val| {
|
|
return Air.internedToRef((try mod.intern(.{ .ptr = .{
|
|
.ty = ptr_field_ty.toIntern(),
|
|
.addr = .{ .comptime_field = default_val.toIntern() },
|
|
} })));
|
|
}
|
|
|
|
if (try sema.resolveValue(tuple_ptr)) |tuple_ptr_val| {
|
|
return Air.internedToRef((try mod.intern(.{ .ptr = .{
|
|
.ty = ptr_field_ty.toIntern(),
|
|
.addr = .{ .field = .{
|
|
.base = tuple_ptr_val.toIntern(),
|
|
.index = field_index,
|
|
} },
|
|
} })));
|
|
}
|
|
|
|
if (!init) {
|
|
try sema.validateRuntimeElemAccess(block, field_index_src, field_ty, tuple_ty, tuple_ptr_src);
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, tuple_ptr_src, null);
|
|
return block.addStructFieldPtr(tuple_ptr, field_index, ptr_field_ty);
|
|
}
|
|
|
|
fn tupleField(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
tuple_src: LazySrcLoc,
|
|
tuple: Air.Inst.Ref,
|
|
field_index_src: LazySrcLoc,
|
|
field_index: u32,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const tuple_ty = sema.typeOf(tuple);
|
|
try sema.resolveTypeFields(tuple_ty);
|
|
const field_count = tuple_ty.structFieldCount(mod);
|
|
|
|
if (field_count == 0) {
|
|
return sema.fail(block, tuple_src, "indexing into empty tuple is not allowed", .{});
|
|
}
|
|
|
|
if (field_index >= field_count) {
|
|
return sema.fail(block, field_index_src, "index {d} outside tuple of length {d}", .{
|
|
field_index, field_count,
|
|
});
|
|
}
|
|
|
|
const field_ty = tuple_ty.structFieldType(field_index, mod);
|
|
|
|
if (tuple_ty.structFieldIsComptime(field_index, mod))
|
|
try sema.resolveStructFieldInits(tuple_ty);
|
|
if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_value| {
|
|
return Air.internedToRef(default_value.toIntern()); // comptime field
|
|
}
|
|
|
|
if (try sema.resolveValue(tuple)) |tuple_val| {
|
|
if (tuple_val.isUndef(mod)) return mod.undefRef(field_ty);
|
|
return Air.internedToRef((try tuple_val.fieldValue(mod, field_index)).toIntern());
|
|
}
|
|
|
|
try sema.validateRuntimeElemAccess(block, field_index_src, field_ty, tuple_ty, tuple_src);
|
|
|
|
try sema.requireRuntimeBlock(block, tuple_src, null);
|
|
try sema.resolveTypeLayout(field_ty);
|
|
return block.addStructFieldVal(tuple, field_index, field_ty);
|
|
}
|
|
|
|
fn elemValArray(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
array_src: LazySrcLoc,
|
|
array: Air.Inst.Ref,
|
|
elem_index_src: LazySrcLoc,
|
|
elem_index: Air.Inst.Ref,
|
|
oob_safety: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const array_ty = sema.typeOf(array);
|
|
const array_sent = array_ty.sentinel(mod);
|
|
const array_len = array_ty.arrayLen(mod);
|
|
const array_len_s = array_len + @intFromBool(array_sent != null);
|
|
const elem_ty = array_ty.childType(mod);
|
|
|
|
if (array_len_s == 0) {
|
|
return sema.fail(block, array_src, "indexing into empty array is not allowed", .{});
|
|
}
|
|
|
|
const maybe_undef_array_val = try sema.resolveValue(array);
|
|
// index must be defined since it can access out of bounds
|
|
const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index);
|
|
|
|
if (maybe_index_val) |index_val| {
|
|
const index: usize = @intCast(try index_val.toUnsignedIntAdvanced(sema));
|
|
if (array_sent) |s| {
|
|
if (index == array_len) {
|
|
return Air.internedToRef(s.toIntern());
|
|
}
|
|
}
|
|
if (index >= array_len_s) {
|
|
const sentinel_label: []const u8 = if (array_sent != null) " +1 (sentinel)" else "";
|
|
return sema.fail(block, elem_index_src, "index {d} outside array of length {d}{s}", .{ index, array_len, sentinel_label });
|
|
}
|
|
}
|
|
if (maybe_undef_array_val) |array_val| {
|
|
if (array_val.isUndef(mod)) {
|
|
return mod.undefRef(elem_ty);
|
|
}
|
|
if (maybe_index_val) |index_val| {
|
|
const index: usize = @intCast(try index_val.toUnsignedIntAdvanced(sema));
|
|
const elem_val = try array_val.elemValue(mod, index);
|
|
return Air.internedToRef(elem_val.toIntern());
|
|
}
|
|
}
|
|
|
|
try sema.validateRuntimeElemAccess(block, elem_index_src, elem_ty, array_ty, array_src);
|
|
|
|
const runtime_src = if (maybe_undef_array_val != null) elem_index_src else array_src;
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
try sema.queueFullTypeResolution(array_ty);
|
|
if (oob_safety and block.wantSafety()) {
|
|
// Runtime check is only needed if unable to comptime check
|
|
if (maybe_index_val == null) {
|
|
const len_inst = try mod.intRef(Type.usize, array_len);
|
|
const cmp_op: Air.Inst.Tag = if (array_sent != null) .cmp_lte else .cmp_lt;
|
|
try sema.panicIndexOutOfBounds(block, src, elem_index, len_inst, cmp_op);
|
|
}
|
|
}
|
|
return block.addBinOp(.array_elem_val, array, elem_index);
|
|
}
|
|
|
|
fn elemPtrArray(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
array_ptr_src: LazySrcLoc,
|
|
array_ptr: Air.Inst.Ref,
|
|
elem_index_src: LazySrcLoc,
|
|
elem_index: Air.Inst.Ref,
|
|
init: bool,
|
|
oob_safety: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const array_ptr_ty = sema.typeOf(array_ptr);
|
|
const array_ty = array_ptr_ty.childType(mod);
|
|
const array_sent = array_ty.sentinel(mod) != null;
|
|
const array_len = array_ty.arrayLen(mod);
|
|
const array_len_s = array_len + @intFromBool(array_sent);
|
|
|
|
if (array_len_s == 0) {
|
|
return sema.fail(block, array_ptr_src, "indexing into empty array is not allowed", .{});
|
|
}
|
|
|
|
const maybe_undef_array_ptr_val = try sema.resolveValue(array_ptr);
|
|
// The index must not be undefined since it can be out of bounds.
|
|
const offset: ?usize = if (try sema.resolveDefinedValue(block, elem_index_src, elem_index)) |index_val| o: {
|
|
const index = try sema.usizeCast(block, elem_index_src, try index_val.toUnsignedIntAdvanced(sema));
|
|
if (index >= array_len_s) {
|
|
const sentinel_label: []const u8 = if (array_sent) " +1 (sentinel)" else "";
|
|
return sema.fail(block, elem_index_src, "index {d} outside array of length {d}{s}", .{ index, array_len, sentinel_label });
|
|
}
|
|
break :o index;
|
|
} else null;
|
|
|
|
const elem_ptr_ty = try sema.elemPtrType(array_ptr_ty, offset);
|
|
|
|
if (maybe_undef_array_ptr_val) |array_ptr_val| {
|
|
if (array_ptr_val.isUndef(mod)) {
|
|
return mod.undefRef(elem_ptr_ty);
|
|
}
|
|
if (offset) |index| {
|
|
const elem_ptr = try array_ptr_val.elemPtr(elem_ptr_ty, index, mod);
|
|
return Air.internedToRef(elem_ptr.toIntern());
|
|
}
|
|
}
|
|
|
|
if (!init) {
|
|
try sema.validateRuntimeElemAccess(block, elem_index_src, array_ty.elemType2(mod), array_ty, array_ptr_src);
|
|
}
|
|
|
|
const runtime_src = if (maybe_undef_array_ptr_val != null) elem_index_src else array_ptr_src;
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
|
|
// Runtime check is only needed if unable to comptime check.
|
|
if (oob_safety and block.wantSafety() and offset == null) {
|
|
const len_inst = try mod.intRef(Type.usize, array_len);
|
|
const cmp_op: Air.Inst.Tag = if (array_sent) .cmp_lte else .cmp_lt;
|
|
try sema.panicIndexOutOfBounds(block, src, elem_index, len_inst, cmp_op);
|
|
}
|
|
|
|
return block.addPtrElemPtr(array_ptr, elem_index, elem_ptr_ty);
|
|
}
|
|
|
|
fn elemValSlice(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
slice_src: LazySrcLoc,
|
|
slice: Air.Inst.Ref,
|
|
elem_index_src: LazySrcLoc,
|
|
elem_index: Air.Inst.Ref,
|
|
oob_safety: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const slice_ty = sema.typeOf(slice);
|
|
const slice_sent = slice_ty.sentinel(mod) != null;
|
|
const elem_ty = slice_ty.elemType2(mod);
|
|
var runtime_src = slice_src;
|
|
|
|
// slice must be defined since it can dereferenced as null
|
|
const maybe_slice_val = try sema.resolveDefinedValue(block, slice_src, slice);
|
|
// index must be defined since it can index out of bounds
|
|
const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index);
|
|
|
|
if (maybe_slice_val) |slice_val| {
|
|
runtime_src = elem_index_src;
|
|
const slice_len = slice_val.sliceLen(mod);
|
|
const slice_len_s = slice_len + @intFromBool(slice_sent);
|
|
if (slice_len_s == 0) {
|
|
return sema.fail(block, slice_src, "indexing into empty slice is not allowed", .{});
|
|
}
|
|
if (maybe_index_val) |index_val| {
|
|
const index: usize = @intCast(try index_val.toUnsignedIntAdvanced(sema));
|
|
if (index >= slice_len_s) {
|
|
const sentinel_label: []const u8 = if (slice_sent) " +1 (sentinel)" else "";
|
|
return sema.fail(block, elem_index_src, "index {d} outside slice of length {d}{s}", .{ index, slice_len, sentinel_label });
|
|
}
|
|
const elem_ptr_ty = try sema.elemPtrType(slice_ty, index);
|
|
const elem_ptr_val = try slice_val.elemPtr(elem_ptr_ty, index, mod);
|
|
if (try sema.pointerDeref(block, slice_src, elem_ptr_val, elem_ptr_ty)) |elem_val| {
|
|
return Air.internedToRef(elem_val.toIntern());
|
|
}
|
|
runtime_src = slice_src;
|
|
}
|
|
}
|
|
|
|
try sema.validateRuntimeElemAccess(block, elem_index_src, elem_ty, slice_ty, slice_src);
|
|
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
if (oob_safety and block.wantSafety()) {
|
|
const len_inst = if (maybe_slice_val) |slice_val|
|
|
try mod.intRef(Type.usize, slice_val.sliceLen(mod))
|
|
else
|
|
try block.addTyOp(.slice_len, Type.usize, slice);
|
|
const cmp_op: Air.Inst.Tag = if (slice_sent) .cmp_lte else .cmp_lt;
|
|
try sema.panicIndexOutOfBounds(block, src, elem_index, len_inst, cmp_op);
|
|
}
|
|
try sema.queueFullTypeResolution(sema.typeOf(slice));
|
|
return block.addBinOp(.slice_elem_val, slice, elem_index);
|
|
}
|
|
|
|
fn elemPtrSlice(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
slice_src: LazySrcLoc,
|
|
slice: Air.Inst.Ref,
|
|
elem_index_src: LazySrcLoc,
|
|
elem_index: Air.Inst.Ref,
|
|
oob_safety: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const slice_ty = sema.typeOf(slice);
|
|
const slice_sent = slice_ty.sentinel(mod) != null;
|
|
|
|
const maybe_undef_slice_val = try sema.resolveValue(slice);
|
|
// The index must not be undefined since it can be out of bounds.
|
|
const offset: ?usize = if (try sema.resolveDefinedValue(block, elem_index_src, elem_index)) |index_val| o: {
|
|
const index = try sema.usizeCast(block, elem_index_src, try index_val.toUnsignedIntAdvanced(sema));
|
|
break :o index;
|
|
} else null;
|
|
|
|
const elem_ptr_ty = try sema.elemPtrType(slice_ty, offset);
|
|
|
|
if (maybe_undef_slice_val) |slice_val| {
|
|
if (slice_val.isUndef(mod)) {
|
|
return mod.undefRef(elem_ptr_ty);
|
|
}
|
|
const slice_len = slice_val.sliceLen(mod);
|
|
const slice_len_s = slice_len + @intFromBool(slice_sent);
|
|
if (slice_len_s == 0) {
|
|
return sema.fail(block, slice_src, "indexing into empty slice is not allowed", .{});
|
|
}
|
|
if (offset) |index| {
|
|
if (index >= slice_len_s) {
|
|
const sentinel_label: []const u8 = if (slice_sent) " +1 (sentinel)" else "";
|
|
return sema.fail(block, elem_index_src, "index {d} outside slice of length {d}{s}", .{ index, slice_len, sentinel_label });
|
|
}
|
|
const elem_ptr_val = try slice_val.elemPtr(elem_ptr_ty, index, mod);
|
|
return Air.internedToRef(elem_ptr_val.toIntern());
|
|
}
|
|
}
|
|
|
|
try sema.validateRuntimeElemAccess(block, elem_index_src, elem_ptr_ty, slice_ty, slice_src);
|
|
|
|
const runtime_src = if (maybe_undef_slice_val != null) elem_index_src else slice_src;
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
if (oob_safety and block.wantSafety()) {
|
|
const len_inst = len: {
|
|
if (maybe_undef_slice_val) |slice_val|
|
|
if (!slice_val.isUndef(mod))
|
|
break :len try mod.intRef(Type.usize, slice_val.sliceLen(mod));
|
|
break :len try block.addTyOp(.slice_len, Type.usize, slice);
|
|
};
|
|
const cmp_op: Air.Inst.Tag = if (slice_sent) .cmp_lte else .cmp_lt;
|
|
try sema.panicIndexOutOfBounds(block, src, elem_index, len_inst, cmp_op);
|
|
}
|
|
return block.addSliceElemPtr(slice, elem_index, elem_ptr_ty);
|
|
}
|
|
|
|
fn coerce(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
dest_ty_unresolved: Type,
|
|
inst: Air.Inst.Ref,
|
|
inst_src: LazySrcLoc,
|
|
) CompileError!Air.Inst.Ref {
|
|
return sema.coerceExtra(block, dest_ty_unresolved, inst, inst_src, .{}) catch |err| switch (err) {
|
|
error.NotCoercible => unreachable,
|
|
else => |e| return e,
|
|
};
|
|
}
|
|
|
|
const CoersionError = CompileError || error{
|
|
/// When coerce is called recursively, this error should be returned instead of using `fail`
|
|
/// to ensure correct types in compile errors.
|
|
NotCoercible,
|
|
};
|
|
|
|
const CoerceOpts = struct {
|
|
/// Should coerceExtra emit error messages.
|
|
report_err: bool = true,
|
|
/// Ignored if `report_err == false`.
|
|
is_ret: bool = false,
|
|
/// Should coercion to comptime_int emit an error message.
|
|
no_cast_to_comptime_int: bool = false,
|
|
|
|
param_src: struct {
|
|
func_inst: Air.Inst.Ref = .none,
|
|
param_i: u32 = undefined,
|
|
|
|
fn get(info: @This(), sema: *Sema) !?Module.SrcLoc {
|
|
if (info.func_inst == .none) return null;
|
|
const mod = sema.mod;
|
|
const fn_decl = (try sema.funcDeclSrc(info.func_inst)) orelse return null;
|
|
const param_src = Module.paramSrc(0, mod, fn_decl, info.param_i);
|
|
if (param_src == .node_offset_param) {
|
|
return Module.SrcLoc{
|
|
.file_scope = fn_decl.getFileScope(mod),
|
|
.parent_decl_node = fn_decl.src_node,
|
|
.lazy = LazySrcLoc.nodeOffset(param_src.node_offset_param),
|
|
};
|
|
}
|
|
return fn_decl.toSrcLoc(param_src, mod);
|
|
}
|
|
} = .{},
|
|
};
|
|
|
|
fn coerceExtra(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
dest_ty: Type,
|
|
inst: Air.Inst.Ref,
|
|
inst_src: LazySrcLoc,
|
|
opts: CoerceOpts,
|
|
) CoersionError!Air.Inst.Ref {
|
|
if (dest_ty.isGenericPoison()) return inst;
|
|
const mod = sema.mod;
|
|
const dest_ty_src = inst_src; // TODO better source location
|
|
try sema.resolveTypeFields(dest_ty);
|
|
const inst_ty = sema.typeOf(inst);
|
|
try sema.resolveTypeFields(inst_ty);
|
|
const target = mod.getTarget();
|
|
// If the types are the same, we can return the operand.
|
|
if (dest_ty.eql(inst_ty, mod))
|
|
return inst;
|
|
|
|
const maybe_inst_val = try sema.resolveValue(inst);
|
|
|
|
var in_memory_result = try sema.coerceInMemoryAllowed(block, dest_ty, inst_ty, false, target, dest_ty_src, inst_src);
|
|
if (in_memory_result == .ok) {
|
|
if (maybe_inst_val) |val| {
|
|
return sema.coerceInMemory(val, dest_ty);
|
|
}
|
|
try sema.requireRuntimeBlock(block, inst_src, null);
|
|
try sema.queueFullTypeResolution(dest_ty);
|
|
const new_val = try block.addBitCast(dest_ty, inst);
|
|
try sema.checkKnownAllocPtr(inst, new_val);
|
|
return new_val;
|
|
}
|
|
|
|
switch (dest_ty.zigTypeTag(mod)) {
|
|
.Optional => optional: {
|
|
if (maybe_inst_val) |val| {
|
|
// undefined sets the optional bit also to undefined.
|
|
if (val.toIntern() == .undef) {
|
|
return mod.undefRef(dest_ty);
|
|
}
|
|
|
|
// null to ?T
|
|
if (val.toIntern() == .null_value) {
|
|
return Air.internedToRef((try mod.intern(.{ .opt = .{
|
|
.ty = dest_ty.toIntern(),
|
|
.val = .none,
|
|
} })));
|
|
}
|
|
}
|
|
|
|
// cast from ?*T and ?[*]T to ?*anyopaque
|
|
// but don't do it if the source type is a double pointer
|
|
if (dest_ty.isPtrLikeOptional(mod) and
|
|
dest_ty.elemType2(mod).toIntern() == .anyopaque_type and
|
|
inst_ty.isPtrAtRuntime(mod))
|
|
anyopaque_check: {
|
|
if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :optional;
|
|
const elem_ty = inst_ty.elemType2(mod);
|
|
if (elem_ty.zigTypeTag(mod) == .Pointer or elem_ty.isPtrLikeOptional(mod)) {
|
|
in_memory_result = .{ .double_ptr_to_anyopaque = .{
|
|
.actual = inst_ty,
|
|
.wanted = dest_ty,
|
|
} };
|
|
break :optional;
|
|
}
|
|
// Let the logic below handle wrapping the optional now that
|
|
// it has been checked to correctly coerce.
|
|
if (!inst_ty.isPtrLikeOptional(mod)) break :anyopaque_check;
|
|
return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src);
|
|
}
|
|
|
|
// T to ?T
|
|
const child_type = dest_ty.optionalChild(mod);
|
|
const intermediate = sema.coerceExtra(block, child_type, inst, inst_src, .{ .report_err = false }) catch |err| switch (err) {
|
|
error.NotCoercible => {
|
|
if (in_memory_result == .no_match) {
|
|
// Try to give more useful notes
|
|
in_memory_result = try sema.coerceInMemoryAllowed(block, child_type, inst_ty, false, target, dest_ty_src, inst_src);
|
|
}
|
|
break :optional;
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
return try sema.wrapOptional(block, dest_ty, intermediate, inst_src);
|
|
},
|
|
.Pointer => pointer: {
|
|
const dest_info = dest_ty.ptrInfo(mod);
|
|
|
|
// Function body to function pointer.
|
|
if (inst_ty.zigTypeTag(mod) == .Fn) {
|
|
const fn_val = try sema.resolveConstDefinedValue(block, .unneeded, inst, undefined);
|
|
const fn_decl = fn_val.pointerDecl(mod).?;
|
|
const inst_as_ptr = try sema.analyzeDeclRef(fn_decl);
|
|
return sema.coerce(block, dest_ty, inst_as_ptr, inst_src);
|
|
}
|
|
|
|
// *T to *[1]T
|
|
single_item: {
|
|
if (dest_info.flags.size != .One) break :single_item;
|
|
if (!inst_ty.isSinglePointer(mod)) break :single_item;
|
|
if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :pointer;
|
|
const ptr_elem_ty = inst_ty.childType(mod);
|
|
const array_ty = Type.fromInterned(dest_info.child);
|
|
if (array_ty.zigTypeTag(mod) != .Array) break :single_item;
|
|
const array_elem_ty = array_ty.childType(mod);
|
|
if (array_ty.arrayLen(mod) != 1) break :single_item;
|
|
const dest_is_mut = !dest_info.flags.is_const;
|
|
switch (try sema.coerceInMemoryAllowed(block, array_elem_ty, ptr_elem_ty, dest_is_mut, target, dest_ty_src, inst_src)) {
|
|
.ok => {},
|
|
else => break :single_item,
|
|
}
|
|
return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src);
|
|
}
|
|
|
|
// Coercions where the source is a single pointer to an array.
|
|
src_array_ptr: {
|
|
if (!inst_ty.isSinglePointer(mod)) break :src_array_ptr;
|
|
if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :pointer;
|
|
const array_ty = inst_ty.childType(mod);
|
|
if (array_ty.zigTypeTag(mod) != .Array) break :src_array_ptr;
|
|
const array_elem_type = array_ty.childType(mod);
|
|
const dest_is_mut = !dest_info.flags.is_const;
|
|
|
|
const dst_elem_type = Type.fromInterned(dest_info.child);
|
|
const elem_res = try sema.coerceInMemoryAllowed(block, dst_elem_type, array_elem_type, dest_is_mut, target, dest_ty_src, inst_src);
|
|
switch (elem_res) {
|
|
.ok => {},
|
|
else => {
|
|
in_memory_result = .{ .ptr_child = .{
|
|
.child = try elem_res.dupe(sema.arena),
|
|
.actual = array_elem_type,
|
|
.wanted = dst_elem_type,
|
|
} };
|
|
break :src_array_ptr;
|
|
},
|
|
}
|
|
|
|
if (dest_info.sentinel != .none) {
|
|
if (array_ty.sentinel(mod)) |inst_sent| {
|
|
if (Air.internedToRef(dest_info.sentinel) !=
|
|
try sema.coerceInMemory(inst_sent, dst_elem_type))
|
|
{
|
|
in_memory_result = .{ .ptr_sentinel = .{
|
|
.actual = inst_sent,
|
|
.wanted = Value.fromInterned(dest_info.sentinel),
|
|
.ty = dst_elem_type,
|
|
} };
|
|
break :src_array_ptr;
|
|
}
|
|
} else {
|
|
in_memory_result = .{ .ptr_sentinel = .{
|
|
.actual = Value.@"unreachable",
|
|
.wanted = Value.fromInterned(dest_info.sentinel),
|
|
.ty = dst_elem_type,
|
|
} };
|
|
break :src_array_ptr;
|
|
}
|
|
}
|
|
|
|
switch (dest_info.flags.size) {
|
|
.Slice => {
|
|
// *[N]T to []T
|
|
return sema.coerceArrayPtrToSlice(block, dest_ty, inst, inst_src);
|
|
},
|
|
.C => {
|
|
// *[N]T to [*c]T
|
|
return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src);
|
|
},
|
|
.Many => {
|
|
// *[N]T to [*]T
|
|
return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src);
|
|
},
|
|
.One => {},
|
|
}
|
|
}
|
|
|
|
// coercion from C pointer
|
|
if (inst_ty.isCPtr(mod)) src_c_ptr: {
|
|
if (dest_info.flags.size == .Slice) break :src_c_ptr;
|
|
if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :src_c_ptr;
|
|
// In this case we must add a safety check because the C pointer
|
|
// could be null.
|
|
const src_elem_ty = inst_ty.childType(mod);
|
|
const dest_is_mut = !dest_info.flags.is_const;
|
|
const dst_elem_type = Type.fromInterned(dest_info.child);
|
|
switch (try sema.coerceInMemoryAllowed(block, dst_elem_type, src_elem_ty, dest_is_mut, target, dest_ty_src, inst_src)) {
|
|
.ok => {},
|
|
else => break :src_c_ptr,
|
|
}
|
|
return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src);
|
|
}
|
|
|
|
// cast from *T and [*]T to *anyopaque
|
|
// but don't do it if the source type is a double pointer
|
|
if (dest_info.child == .anyopaque_type and inst_ty.zigTypeTag(mod) == .Pointer) to_anyopaque: {
|
|
if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :pointer;
|
|
const elem_ty = inst_ty.elemType2(mod);
|
|
if (elem_ty.zigTypeTag(mod) == .Pointer or elem_ty.isPtrLikeOptional(mod)) {
|
|
in_memory_result = .{ .double_ptr_to_anyopaque = .{
|
|
.actual = inst_ty,
|
|
.wanted = dest_ty,
|
|
} };
|
|
break :pointer;
|
|
}
|
|
if (dest_ty.isSlice(mod)) break :to_anyopaque;
|
|
if (inst_ty.isSlice(mod)) {
|
|
in_memory_result = .{ .slice_to_anyopaque = .{
|
|
.actual = inst_ty,
|
|
.wanted = dest_ty,
|
|
} };
|
|
break :pointer;
|
|
}
|
|
return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src);
|
|
}
|
|
|
|
switch (dest_info.flags.size) {
|
|
// coercion to C pointer
|
|
.C => switch (inst_ty.zigTypeTag(mod)) {
|
|
.Null => return Air.internedToRef(try mod.intern(.{ .ptr = .{
|
|
.ty = dest_ty.toIntern(),
|
|
.addr = .{ .int = .zero_usize },
|
|
} })),
|
|
.ComptimeInt => {
|
|
const addr = sema.coerceExtra(block, Type.usize, inst, inst_src, .{ .report_err = false }) catch |err| switch (err) {
|
|
error.NotCoercible => break :pointer,
|
|
else => |e| return e,
|
|
};
|
|
return try sema.coerceCompatiblePtrs(block, dest_ty, addr, inst_src);
|
|
},
|
|
.Int => {
|
|
const ptr_size_ty = switch (inst_ty.intInfo(mod).signedness) {
|
|
.signed => Type.isize,
|
|
.unsigned => Type.usize,
|
|
};
|
|
const addr = sema.coerceExtra(block, ptr_size_ty, inst, inst_src, .{ .report_err = false }) catch |err| switch (err) {
|
|
error.NotCoercible => {
|
|
// Try to give more useful notes
|
|
in_memory_result = try sema.coerceInMemoryAllowed(block, ptr_size_ty, inst_ty, false, target, dest_ty_src, inst_src);
|
|
break :pointer;
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
return try sema.coerceCompatiblePtrs(block, dest_ty, addr, inst_src);
|
|
},
|
|
.Pointer => p: {
|
|
if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :p;
|
|
const inst_info = inst_ty.ptrInfo(mod);
|
|
switch (try sema.coerceInMemoryAllowed(
|
|
block,
|
|
Type.fromInterned(dest_info.child),
|
|
Type.fromInterned(inst_info.child),
|
|
!dest_info.flags.is_const,
|
|
target,
|
|
dest_ty_src,
|
|
inst_src,
|
|
)) {
|
|
.ok => {},
|
|
else => break :p,
|
|
}
|
|
if (inst_info.flags.size == .Slice) {
|
|
assert(dest_info.sentinel == .none);
|
|
if (inst_info.sentinel == .none or
|
|
inst_info.sentinel != (try mod.intValue(Type.fromInterned(inst_info.child), 0)).toIntern())
|
|
break :p;
|
|
|
|
const slice_ptr = try sema.analyzeSlicePtr(block, inst_src, inst, inst_ty);
|
|
return sema.coerceCompatiblePtrs(block, dest_ty, slice_ptr, inst_src);
|
|
}
|
|
return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src);
|
|
},
|
|
else => {},
|
|
},
|
|
.One => switch (Type.fromInterned(dest_info.child).zigTypeTag(mod)) {
|
|
.Union => {
|
|
// pointer to anonymous struct to pointer to union
|
|
if (inst_ty.isSinglePointer(mod) and
|
|
inst_ty.childType(mod).isAnonStruct(mod) and
|
|
sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result))
|
|
{
|
|
return sema.coerceAnonStructToUnionPtrs(block, dest_ty, dest_ty_src, inst, inst_src);
|
|
}
|
|
},
|
|
.Struct => {
|
|
// pointer to anonymous struct to pointer to struct
|
|
if (inst_ty.isSinglePointer(mod) and
|
|
inst_ty.childType(mod).isAnonStruct(mod) and
|
|
sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result))
|
|
{
|
|
return sema.coerceAnonStructToStructPtrs(block, dest_ty, dest_ty_src, inst, inst_src) catch |err| switch (err) {
|
|
error.NotCoercible => break :pointer,
|
|
else => |e| return e,
|
|
};
|
|
}
|
|
},
|
|
.Array => {
|
|
// pointer to tuple to pointer to array
|
|
if (inst_ty.isSinglePointer(mod) and
|
|
inst_ty.childType(mod).isTuple(mod) and
|
|
sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result))
|
|
{
|
|
return sema.coerceTupleToArrayPtrs(block, dest_ty, dest_ty_src, inst, inst_src);
|
|
}
|
|
},
|
|
else => {},
|
|
},
|
|
.Slice => to_slice: {
|
|
if (inst_ty.zigTypeTag(mod) == .Array) {
|
|
return sema.fail(
|
|
block,
|
|
inst_src,
|
|
"array literal requires address-of operator (&) to coerce to slice type '{}'",
|
|
.{dest_ty.fmt(mod)},
|
|
);
|
|
}
|
|
|
|
if (!inst_ty.isSinglePointer(mod)) break :to_slice;
|
|
const inst_child_ty = inst_ty.childType(mod);
|
|
if (!inst_child_ty.isTuple(mod)) break :to_slice;
|
|
|
|
// empty tuple to zero-length slice
|
|
// note that this allows coercing to a mutable slice.
|
|
if (inst_child_ty.structFieldCount(mod) == 0) {
|
|
// Optional slice is represented with a null pointer so
|
|
// we use a dummy pointer value with the required alignment.
|
|
return Air.internedToRef((try mod.intern(.{ .slice = .{
|
|
.ty = dest_ty.toIntern(),
|
|
.ptr = try mod.intern(.{ .ptr = .{
|
|
.ty = dest_ty.slicePtrFieldType(mod).toIntern(),
|
|
.addr = .{ .int = if (dest_info.flags.alignment != .none)
|
|
(try mod.intValue(
|
|
Type.usize,
|
|
dest_info.flags.alignment.toByteUnitsOptional().?,
|
|
)).toIntern()
|
|
else
|
|
try mod.intern_pool.getCoercedInts(
|
|
mod.gpa,
|
|
mod.intern_pool.indexToKey(
|
|
(try Type.fromInterned(dest_info.child).lazyAbiAlignment(mod)).toIntern(),
|
|
).int,
|
|
.usize_type,
|
|
) },
|
|
} }),
|
|
.len = (try mod.intValue(Type.usize, 0)).toIntern(),
|
|
} })));
|
|
}
|
|
|
|
// pointer to tuple to slice
|
|
if (!dest_info.flags.is_const) {
|
|
const err_msg = err_msg: {
|
|
const err_msg = try sema.errMsg(block, inst_src, "cannot cast pointer to tuple to '{}'", .{dest_ty.fmt(mod)});
|
|
errdefer err_msg.destroy(sema.gpa);
|
|
try sema.errNote(block, dest_ty_src, err_msg, "pointers to tuples can only coerce to constant pointers", .{});
|
|
break :err_msg err_msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, err_msg);
|
|
}
|
|
return sema.coerceTupleToSlicePtrs(block, dest_ty, dest_ty_src, inst, inst_src);
|
|
},
|
|
.Many => p: {
|
|
if (!inst_ty.isSlice(mod)) break :p;
|
|
if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :p;
|
|
const inst_info = inst_ty.ptrInfo(mod);
|
|
|
|
switch (try sema.coerceInMemoryAllowed(
|
|
block,
|
|
Type.fromInterned(dest_info.child),
|
|
Type.fromInterned(inst_info.child),
|
|
!dest_info.flags.is_const,
|
|
target,
|
|
dest_ty_src,
|
|
inst_src,
|
|
)) {
|
|
.ok => {},
|
|
else => break :p,
|
|
}
|
|
|
|
if (dest_info.sentinel == .none or inst_info.sentinel == .none or
|
|
Air.internedToRef(dest_info.sentinel) !=
|
|
try sema.coerceInMemory(Value.fromInterned(inst_info.sentinel), Type.fromInterned(dest_info.child)))
|
|
break :p;
|
|
|
|
const slice_ptr = try sema.analyzeSlicePtr(block, inst_src, inst, inst_ty);
|
|
return sema.coerceCompatiblePtrs(block, dest_ty, slice_ptr, inst_src);
|
|
},
|
|
}
|
|
},
|
|
.Int, .ComptimeInt => switch (inst_ty.zigTypeTag(mod)) {
|
|
.Float, .ComptimeFloat => float: {
|
|
const val = maybe_inst_val orelse {
|
|
if (dest_ty.zigTypeTag(mod) == .ComptimeInt) {
|
|
if (!opts.report_err) return error.NotCoercible;
|
|
return sema.failWithNeededComptime(block, inst_src, .{
|
|
.needed_comptime_reason = "value being casted to 'comptime_int' must be comptime-known",
|
|
});
|
|
}
|
|
break :float;
|
|
};
|
|
const result_val = try sema.intFromFloat(block, inst_src, val, inst_ty, dest_ty, .exact);
|
|
return Air.internedToRef(result_val.toIntern());
|
|
},
|
|
.Int, .ComptimeInt => {
|
|
if (maybe_inst_val) |val| {
|
|
// comptime-known integer to other number
|
|
if (!(try sema.intFitsInType(val, dest_ty, null))) {
|
|
if (!opts.report_err) return error.NotCoercible;
|
|
return sema.fail(block, inst_src, "type '{}' cannot represent integer value '{}'", .{ dest_ty.fmt(mod), val.fmtValue(inst_ty, mod) });
|
|
}
|
|
return switch (mod.intern_pool.indexToKey(val.toIntern())) {
|
|
.undef => try mod.undefRef(dest_ty),
|
|
.int => |int| Air.internedToRef(
|
|
try mod.intern_pool.getCoercedInts(mod.gpa, int, dest_ty.toIntern()),
|
|
),
|
|
else => unreachable,
|
|
};
|
|
}
|
|
if (dest_ty.zigTypeTag(mod) == .ComptimeInt) {
|
|
if (!opts.report_err) return error.NotCoercible;
|
|
if (opts.no_cast_to_comptime_int) return inst;
|
|
return sema.failWithNeededComptime(block, inst_src, .{
|
|
.needed_comptime_reason = "value being casted to 'comptime_int' must be comptime-known",
|
|
});
|
|
}
|
|
|
|
// integer widening
|
|
const dst_info = dest_ty.intInfo(mod);
|
|
const src_info = inst_ty.intInfo(mod);
|
|
if ((src_info.signedness == dst_info.signedness and dst_info.bits >= src_info.bits) or
|
|
// small enough unsigned ints can get casted to large enough signed ints
|
|
(dst_info.signedness == .signed and dst_info.bits > src_info.bits))
|
|
{
|
|
try sema.requireRuntimeBlock(block, inst_src, null);
|
|
return block.addTyOp(.intcast, dest_ty, inst);
|
|
}
|
|
},
|
|
else => {},
|
|
},
|
|
.Float, .ComptimeFloat => switch (inst_ty.zigTypeTag(mod)) {
|
|
.ComptimeFloat => {
|
|
const val = try sema.resolveConstDefinedValue(block, .unneeded, inst, undefined);
|
|
const result_val = try val.floatCast(dest_ty, mod);
|
|
return Air.internedToRef(result_val.toIntern());
|
|
},
|
|
.Float => {
|
|
if (maybe_inst_val) |val| {
|
|
const result_val = try val.floatCast(dest_ty, mod);
|
|
if (!val.eql(try result_val.floatCast(inst_ty, mod), inst_ty, mod)) {
|
|
return sema.fail(
|
|
block,
|
|
inst_src,
|
|
"type '{}' cannot represent float value '{}'",
|
|
.{ dest_ty.fmt(mod), val.fmtValue(inst_ty, mod) },
|
|
);
|
|
}
|
|
return Air.internedToRef(result_val.toIntern());
|
|
} else if (dest_ty.zigTypeTag(mod) == .ComptimeFloat) {
|
|
if (!opts.report_err) return error.NotCoercible;
|
|
return sema.failWithNeededComptime(block, inst_src, .{
|
|
.needed_comptime_reason = "value being casted to 'comptime_float' must be comptime-known",
|
|
});
|
|
}
|
|
|
|
// float widening
|
|
const src_bits = inst_ty.floatBits(target);
|
|
const dst_bits = dest_ty.floatBits(target);
|
|
if (dst_bits >= src_bits) {
|
|
try sema.requireRuntimeBlock(block, inst_src, null);
|
|
return block.addTyOp(.fpext, dest_ty, inst);
|
|
}
|
|
},
|
|
.Int, .ComptimeInt => int: {
|
|
const val = maybe_inst_val orelse {
|
|
if (dest_ty.zigTypeTag(mod) == .ComptimeFloat) {
|
|
if (!opts.report_err) return error.NotCoercible;
|
|
return sema.failWithNeededComptime(block, inst_src, .{
|
|
.needed_comptime_reason = "value being casted to 'comptime_float' must be comptime-known",
|
|
});
|
|
}
|
|
break :int;
|
|
};
|
|
const result_val = try val.floatFromIntAdvanced(sema.arena, inst_ty, dest_ty, mod, sema);
|
|
// TODO implement this compile error
|
|
//const int_again_val = try result_val.intFromFloat(sema.arena, inst_ty);
|
|
//if (!int_again_val.eql(val, inst_ty, mod)) {
|
|
// return sema.fail(
|
|
// block,
|
|
// inst_src,
|
|
// "type '{}' cannot represent integer value '{}'",
|
|
// .{ dest_ty.fmt(mod), val },
|
|
// );
|
|
//}
|
|
return Air.internedToRef(result_val.toIntern());
|
|
},
|
|
else => {},
|
|
},
|
|
.Enum => switch (inst_ty.zigTypeTag(mod)) {
|
|
.EnumLiteral => {
|
|
// enum literal to enum
|
|
const val = try sema.resolveConstDefinedValue(block, .unneeded, inst, undefined);
|
|
const string = mod.intern_pool.indexToKey(val.toIntern()).enum_literal;
|
|
const field_index = dest_ty.enumFieldIndex(string, mod) orelse {
|
|
return sema.fail(block, inst_src, "no field named '{}' in enum '{}'", .{
|
|
string.fmt(&mod.intern_pool), dest_ty.fmt(mod),
|
|
});
|
|
};
|
|
return Air.internedToRef((try mod.enumValueFieldIndex(dest_ty, @intCast(field_index))).toIntern());
|
|
},
|
|
.Union => blk: {
|
|
// union to its own tag type
|
|
const union_tag_ty = inst_ty.unionTagType(mod) orelse break :blk;
|
|
if (union_tag_ty.eql(dest_ty, mod)) {
|
|
return sema.unionToTag(block, dest_ty, inst, inst_src);
|
|
}
|
|
},
|
|
else => {},
|
|
},
|
|
.ErrorUnion => switch (inst_ty.zigTypeTag(mod)) {
|
|
.ErrorUnion => eu: {
|
|
if (maybe_inst_val) |inst_val| {
|
|
switch (inst_val.toIntern()) {
|
|
.undef => return mod.undefRef(dest_ty),
|
|
else => switch (mod.intern_pool.indexToKey(inst_val.toIntern())) {
|
|
.error_union => |error_union| switch (error_union.val) {
|
|
.err_name => |err_name| {
|
|
const error_set_ty = inst_ty.errorUnionSet(mod);
|
|
const error_set_val = Air.internedToRef((try mod.intern(.{ .err = .{
|
|
.ty = error_set_ty.toIntern(),
|
|
.name = err_name,
|
|
} })));
|
|
return sema.wrapErrorUnionSet(block, dest_ty, error_set_val, inst_src);
|
|
},
|
|
.payload => |payload| {
|
|
const payload_val = Air.internedToRef(payload);
|
|
return sema.wrapErrorUnionPayload(block, dest_ty, payload_val, inst_src) catch |err| switch (err) {
|
|
error.NotCoercible => break :eu,
|
|
else => |e| return e,
|
|
};
|
|
},
|
|
},
|
|
else => unreachable,
|
|
},
|
|
}
|
|
}
|
|
},
|
|
.ErrorSet => {
|
|
// E to E!T
|
|
return sema.wrapErrorUnionSet(block, dest_ty, inst, inst_src);
|
|
},
|
|
else => eu: {
|
|
// T to E!T
|
|
return sema.wrapErrorUnionPayload(block, dest_ty, inst, inst_src) catch |err| switch (err) {
|
|
error.NotCoercible => break :eu,
|
|
else => |e| return e,
|
|
};
|
|
},
|
|
},
|
|
.Union => switch (inst_ty.zigTypeTag(mod)) {
|
|
.Enum, .EnumLiteral => return sema.coerceEnumToUnion(block, dest_ty, dest_ty_src, inst, inst_src),
|
|
.Struct => {
|
|
if (inst_ty.isAnonStruct(mod)) {
|
|
return sema.coerceAnonStructToUnion(block, dest_ty, dest_ty_src, inst, inst_src);
|
|
}
|
|
},
|
|
else => {},
|
|
},
|
|
.Array => switch (inst_ty.zigTypeTag(mod)) {
|
|
.Vector => return sema.coerceArrayLike(block, dest_ty, dest_ty_src, inst, inst_src),
|
|
.Struct => {
|
|
if (inst == .empty_struct) {
|
|
return sema.arrayInitEmpty(block, inst_src, dest_ty);
|
|
}
|
|
if (inst_ty.isTuple(mod)) {
|
|
return sema.coerceTupleToArray(block, dest_ty, dest_ty_src, inst, inst_src);
|
|
}
|
|
},
|
|
else => {},
|
|
},
|
|
.Vector => switch (inst_ty.zigTypeTag(mod)) {
|
|
.Array, .Vector => return sema.coerceArrayLike(block, dest_ty, dest_ty_src, inst, inst_src),
|
|
.Struct => {
|
|
if (inst_ty.isTuple(mod)) {
|
|
return sema.coerceTupleToArray(block, dest_ty, dest_ty_src, inst, inst_src);
|
|
}
|
|
},
|
|
else => {},
|
|
},
|
|
.Struct => blk: {
|
|
if (inst == .empty_struct) {
|
|
return sema.structInitEmpty(block, dest_ty, dest_ty_src, inst_src);
|
|
}
|
|
if (inst_ty.isTupleOrAnonStruct(mod)) {
|
|
return sema.coerceTupleToStruct(block, dest_ty, inst, inst_src) catch |err| switch (err) {
|
|
error.NotCoercible => break :blk,
|
|
else => |e| return e,
|
|
};
|
|
}
|
|
},
|
|
else => {},
|
|
}
|
|
|
|
// undefined to anything. We do this after the big switch above so that
|
|
// special logic has a chance to run first, such as `*[N]T` to `[]T` which
|
|
// should initialize the length field of the slice.
|
|
if (maybe_inst_val) |val| if (val.toIntern() == .undef) return mod.undefRef(dest_ty);
|
|
|
|
if (!opts.report_err) return error.NotCoercible;
|
|
|
|
if (opts.is_ret and dest_ty.zigTypeTag(mod) == .NoReturn) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, inst_src, "function declared 'noreturn' returns", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = 0 };
|
|
const src_decl = mod.funcOwnerDeclPtr(sema.func_index);
|
|
try mod.errNoteNonLazy(src_decl.toSrcLoc(ret_ty_src, mod), msg, "'noreturn' declared here", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, inst_src, "expected type '{}', found '{}'", .{ dest_ty.fmt(mod), inst_ty.fmt(mod) });
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
// E!T to T
|
|
if (inst_ty.zigTypeTag(mod) == .ErrorUnion and
|
|
(try sema.coerceInMemoryAllowed(block, inst_ty.errorUnionPayload(mod), dest_ty, false, target, dest_ty_src, inst_src)) == .ok)
|
|
{
|
|
try sema.errNote(block, inst_src, msg, "cannot convert error union to payload type", .{});
|
|
try sema.errNote(block, inst_src, msg, "consider using 'try', 'catch', or 'if'", .{});
|
|
}
|
|
|
|
// ?T to T
|
|
if (inst_ty.zigTypeTag(mod) == .Optional and
|
|
(try sema.coerceInMemoryAllowed(block, inst_ty.optionalChild(mod), dest_ty, false, target, dest_ty_src, inst_src)) == .ok)
|
|
{
|
|
try sema.errNote(block, inst_src, msg, "cannot convert optional to payload type", .{});
|
|
try sema.errNote(block, inst_src, msg, "consider using '.?', 'orelse', or 'if'", .{});
|
|
}
|
|
|
|
try in_memory_result.report(sema, block, inst_src, msg);
|
|
|
|
// Add notes about function return type
|
|
if (opts.is_ret and
|
|
mod.test_functions.get(mod.funcOwnerDeclIndex(sema.func_index)) == null)
|
|
{
|
|
const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = 0 };
|
|
const src_decl = mod.funcOwnerDeclPtr(sema.func_index);
|
|
if (inst_ty.isError(mod) and !dest_ty.isError(mod)) {
|
|
try mod.errNoteNonLazy(src_decl.toSrcLoc(ret_ty_src, mod), msg, "function cannot return an error", .{});
|
|
} else {
|
|
try mod.errNoteNonLazy(src_decl.toSrcLoc(ret_ty_src, mod), msg, "function return type declared here", .{});
|
|
}
|
|
}
|
|
|
|
if (try opts.param_src.get(sema)) |param_src| {
|
|
try mod.errNoteNonLazy(param_src, msg, "parameter type declared here", .{});
|
|
}
|
|
|
|
// TODO maybe add "cannot store an error in type '{}'" note
|
|
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
|
|
fn coerceInMemory(
|
|
sema: *Sema,
|
|
val: Value,
|
|
dst_ty: Type,
|
|
) CompileError!Air.Inst.Ref {
|
|
return Air.internedToRef((try sema.mod.getCoerced(val, dst_ty)).toIntern());
|
|
}
|
|
|
|
const InMemoryCoercionResult = union(enum) {
|
|
ok,
|
|
no_match: Pair,
|
|
int_not_coercible: Int,
|
|
error_union_payload: PairAndChild,
|
|
array_len: IntPair,
|
|
array_sentinel: Sentinel,
|
|
array_elem: PairAndChild,
|
|
vector_len: IntPair,
|
|
vector_elem: PairAndChild,
|
|
optional_shape: Pair,
|
|
optional_child: PairAndChild,
|
|
from_anyerror,
|
|
missing_error: []const InternPool.NullTerminatedString,
|
|
/// true if wanted is var args
|
|
fn_var_args: bool,
|
|
/// true if wanted is generic
|
|
fn_generic: bool,
|
|
fn_param_count: IntPair,
|
|
fn_param_noalias: IntPair,
|
|
fn_param_comptime: ComptimeParam,
|
|
fn_param: Param,
|
|
fn_cc: CC,
|
|
fn_return_type: PairAndChild,
|
|
ptr_child: PairAndChild,
|
|
ptr_addrspace: AddressSpace,
|
|
ptr_sentinel: Sentinel,
|
|
ptr_size: Size,
|
|
ptr_qualifiers: Qualifiers,
|
|
ptr_allowzero: Pair,
|
|
ptr_bit_range: BitRange,
|
|
ptr_alignment: AlignPair,
|
|
double_ptr_to_anyopaque: Pair,
|
|
slice_to_anyopaque: Pair,
|
|
|
|
const Pair = struct {
|
|
actual: Type,
|
|
wanted: Type,
|
|
};
|
|
|
|
const PairAndChild = struct {
|
|
child: *InMemoryCoercionResult,
|
|
actual: Type,
|
|
wanted: Type,
|
|
};
|
|
|
|
const Param = struct {
|
|
child: *InMemoryCoercionResult,
|
|
actual: Type,
|
|
wanted: Type,
|
|
index: u64,
|
|
};
|
|
|
|
const ComptimeParam = struct {
|
|
index: u64,
|
|
wanted: bool,
|
|
};
|
|
|
|
const Sentinel = struct {
|
|
// unreachable_value indicates no sentinel
|
|
actual: Value,
|
|
wanted: Value,
|
|
ty: Type,
|
|
};
|
|
|
|
const Int = struct {
|
|
actual_signedness: std.builtin.Signedness,
|
|
wanted_signedness: std.builtin.Signedness,
|
|
actual_bits: u16,
|
|
wanted_bits: u16,
|
|
};
|
|
|
|
const IntPair = struct {
|
|
actual: u64,
|
|
wanted: u64,
|
|
};
|
|
|
|
const AlignPair = struct {
|
|
actual: Alignment,
|
|
wanted: Alignment,
|
|
};
|
|
|
|
const Size = struct {
|
|
actual: std.builtin.Type.Pointer.Size,
|
|
wanted: std.builtin.Type.Pointer.Size,
|
|
};
|
|
|
|
const Qualifiers = struct {
|
|
actual_const: bool,
|
|
wanted_const: bool,
|
|
actual_volatile: bool,
|
|
wanted_volatile: bool,
|
|
};
|
|
|
|
const AddressSpace = struct {
|
|
actual: std.builtin.AddressSpace,
|
|
wanted: std.builtin.AddressSpace,
|
|
};
|
|
|
|
const CC = struct {
|
|
actual: std.builtin.CallingConvention,
|
|
wanted: std.builtin.CallingConvention,
|
|
};
|
|
|
|
const BitRange = struct {
|
|
actual_host: u16,
|
|
wanted_host: u16,
|
|
actual_offset: u16,
|
|
wanted_offset: u16,
|
|
};
|
|
|
|
fn dupe(child: *const InMemoryCoercionResult, arena: Allocator) !*InMemoryCoercionResult {
|
|
const res = try arena.create(InMemoryCoercionResult);
|
|
res.* = child.*;
|
|
return res;
|
|
}
|
|
|
|
fn report(res: *const InMemoryCoercionResult, sema: *Sema, block: *Block, src: LazySrcLoc, msg: *Module.ErrorMsg) !void {
|
|
const mod = sema.mod;
|
|
var cur = res;
|
|
while (true) switch (cur.*) {
|
|
.ok => unreachable,
|
|
.no_match => |types| {
|
|
try sema.addDeclaredHereNote(msg, types.wanted);
|
|
try sema.addDeclaredHereNote(msg, types.actual);
|
|
break;
|
|
},
|
|
.int_not_coercible => |int| {
|
|
try sema.errNote(block, src, msg, "{s} {d}-bit int cannot represent all possible {s} {d}-bit values", .{
|
|
@tagName(int.wanted_signedness), int.wanted_bits, @tagName(int.actual_signedness), int.actual_bits,
|
|
});
|
|
break;
|
|
},
|
|
.error_union_payload => |pair| {
|
|
try sema.errNote(block, src, msg, "error union payload '{}' cannot cast into error union payload '{}'", .{
|
|
pair.actual.fmt(mod), pair.wanted.fmt(mod),
|
|
});
|
|
cur = pair.child;
|
|
},
|
|
.array_len => |lens| {
|
|
try sema.errNote(block, src, msg, "array of length {d} cannot cast into an array of length {d}", .{
|
|
lens.actual, lens.wanted,
|
|
});
|
|
break;
|
|
},
|
|
.array_sentinel => |sentinel| {
|
|
if (sentinel.actual.toIntern() != .unreachable_value) {
|
|
try sema.errNote(block, src, msg, "array sentinel '{}' cannot cast into array sentinel '{}'", .{
|
|
sentinel.actual.fmtValue(sentinel.ty, mod), sentinel.wanted.fmtValue(sentinel.ty, mod),
|
|
});
|
|
} else {
|
|
try sema.errNote(block, src, msg, "destination array requires '{}' sentinel", .{
|
|
sentinel.wanted.fmtValue(sentinel.ty, mod),
|
|
});
|
|
}
|
|
break;
|
|
},
|
|
.array_elem => |pair| {
|
|
try sema.errNote(block, src, msg, "array element type '{}' cannot cast into array element type '{}'", .{
|
|
pair.actual.fmt(mod), pair.wanted.fmt(mod),
|
|
});
|
|
cur = pair.child;
|
|
},
|
|
.vector_len => |lens| {
|
|
try sema.errNote(block, src, msg, "vector of length {d} cannot cast into a vector of length {d}", .{
|
|
lens.actual, lens.wanted,
|
|
});
|
|
break;
|
|
},
|
|
.vector_elem => |pair| {
|
|
try sema.errNote(block, src, msg, "vector element type '{}' cannot cast into vector element type '{}'", .{
|
|
pair.actual.fmt(mod), pair.wanted.fmt(mod),
|
|
});
|
|
cur = pair.child;
|
|
},
|
|
.optional_shape => |pair| {
|
|
try sema.errNote(block, src, msg, "optional type child '{}' cannot cast into optional type child '{}'", .{
|
|
pair.actual.optionalChild(mod).fmt(mod), pair.wanted.optionalChild(mod).fmt(mod),
|
|
});
|
|
break;
|
|
},
|
|
.optional_child => |pair| {
|
|
try sema.errNote(block, src, msg, "optional type child '{}' cannot cast into optional type child '{}'", .{
|
|
pair.actual.fmt(mod), pair.wanted.fmt(mod),
|
|
});
|
|
cur = pair.child;
|
|
},
|
|
.from_anyerror => {
|
|
try sema.errNote(block, src, msg, "global error set cannot cast into a smaller set", .{});
|
|
break;
|
|
},
|
|
.missing_error => |missing_errors| {
|
|
for (missing_errors) |err| {
|
|
try sema.errNote(block, src, msg, "'error.{}' not a member of destination error set", .{err.fmt(&mod.intern_pool)});
|
|
}
|
|
break;
|
|
},
|
|
.fn_var_args => |wanted_var_args| {
|
|
if (wanted_var_args) {
|
|
try sema.errNote(block, src, msg, "non-variadic function cannot cast into a variadic function", .{});
|
|
} else {
|
|
try sema.errNote(block, src, msg, "variadic function cannot cast into a non-variadic function", .{});
|
|
}
|
|
break;
|
|
},
|
|
.fn_generic => |wanted_generic| {
|
|
if (wanted_generic) {
|
|
try sema.errNote(block, src, msg, "non-generic function cannot cast into a generic function", .{});
|
|
} else {
|
|
try sema.errNote(block, src, msg, "generic function cannot cast into a non-generic function", .{});
|
|
}
|
|
break;
|
|
},
|
|
.fn_param_count => |lens| {
|
|
try sema.errNote(block, src, msg, "function with {d} parameters cannot cast into a function with {d} parameters", .{
|
|
lens.actual, lens.wanted,
|
|
});
|
|
break;
|
|
},
|
|
.fn_param_noalias => |param| {
|
|
var index: u6 = 0;
|
|
var actual_noalias = false;
|
|
while (true) : (index += 1) {
|
|
const actual: u1 = @truncate(param.actual >> index);
|
|
const wanted: u1 = @truncate(param.wanted >> index);
|
|
if (actual != wanted) {
|
|
actual_noalias = actual == 1;
|
|
break;
|
|
}
|
|
}
|
|
if (!actual_noalias) {
|
|
try sema.errNote(block, src, msg, "regular parameter {d} cannot cast into a noalias parameter", .{index});
|
|
} else {
|
|
try sema.errNote(block, src, msg, "noalias parameter {d} cannot cast into a regular parameter", .{index});
|
|
}
|
|
break;
|
|
},
|
|
.fn_param_comptime => |param| {
|
|
if (param.wanted) {
|
|
try sema.errNote(block, src, msg, "non-comptime parameter {d} cannot cast into a comptime parameter", .{param.index});
|
|
} else {
|
|
try sema.errNote(block, src, msg, "comptime parameter {d} cannot cast into a non-comptime parameter", .{param.index});
|
|
}
|
|
break;
|
|
},
|
|
.fn_param => |param| {
|
|
try sema.errNote(block, src, msg, "parameter {d} '{}' cannot cast into '{}'", .{
|
|
param.index, param.actual.fmt(mod), param.wanted.fmt(mod),
|
|
});
|
|
cur = param.child;
|
|
},
|
|
.fn_cc => |cc| {
|
|
try sema.errNote(block, src, msg, "calling convention '{s}' cannot cast into calling convention '{s}'", .{ @tagName(cc.actual), @tagName(cc.wanted) });
|
|
break;
|
|
},
|
|
.fn_return_type => |pair| {
|
|
try sema.errNote(block, src, msg, "return type '{}' cannot cast into return type '{}'", .{
|
|
pair.actual.fmt(mod), pair.wanted.fmt(mod),
|
|
});
|
|
cur = pair.child;
|
|
},
|
|
.ptr_child => |pair| {
|
|
try sema.errNote(block, src, msg, "pointer type child '{}' cannot cast into pointer type child '{}'", .{
|
|
pair.actual.fmt(mod), pair.wanted.fmt(mod),
|
|
});
|
|
cur = pair.child;
|
|
},
|
|
.ptr_addrspace => |@"addrspace"| {
|
|
try sema.errNote(block, src, msg, "address space '{s}' cannot cast into address space '{s}'", .{ @tagName(@"addrspace".actual), @tagName(@"addrspace".wanted) });
|
|
break;
|
|
},
|
|
.ptr_sentinel => |sentinel| {
|
|
if (sentinel.actual.toIntern() != .unreachable_value) {
|
|
try sema.errNote(block, src, msg, "pointer sentinel '{}' cannot cast into pointer sentinel '{}'", .{
|
|
sentinel.actual.fmtValue(sentinel.ty, mod), sentinel.wanted.fmtValue(sentinel.ty, mod),
|
|
});
|
|
} else {
|
|
try sema.errNote(block, src, msg, "destination pointer requires '{}' sentinel", .{
|
|
sentinel.wanted.fmtValue(sentinel.ty, mod),
|
|
});
|
|
}
|
|
break;
|
|
},
|
|
.ptr_size => |size| {
|
|
try sema.errNote(block, src, msg, "a {s} pointer cannot cast into a {s} pointer", .{ pointerSizeString(size.actual), pointerSizeString(size.wanted) });
|
|
break;
|
|
},
|
|
.ptr_qualifiers => |qualifiers| {
|
|
const ok_const = !qualifiers.actual_const or qualifiers.wanted_const;
|
|
const ok_volatile = !qualifiers.actual_volatile or qualifiers.wanted_volatile;
|
|
if (!ok_const) {
|
|
try sema.errNote(block, src, msg, "cast discards const qualifier", .{});
|
|
} else if (!ok_volatile) {
|
|
try sema.errNote(block, src, msg, "cast discards volatile qualifier", .{});
|
|
}
|
|
break;
|
|
},
|
|
.ptr_allowzero => |pair| {
|
|
const wanted_allow_zero = pair.wanted.ptrAllowsZero(mod);
|
|
const actual_allow_zero = pair.actual.ptrAllowsZero(mod);
|
|
if (actual_allow_zero and !wanted_allow_zero) {
|
|
try sema.errNote(block, src, msg, "'{}' could have null values which are illegal in type '{}'", .{
|
|
pair.actual.fmt(mod), pair.wanted.fmt(mod),
|
|
});
|
|
} else {
|
|
try sema.errNote(block, src, msg, "mutable '{}' allows illegal null values stored to type '{}'", .{
|
|
pair.actual.fmt(mod), pair.wanted.fmt(mod),
|
|
});
|
|
}
|
|
break;
|
|
},
|
|
.ptr_bit_range => |bit_range| {
|
|
if (bit_range.actual_host != bit_range.wanted_host) {
|
|
try sema.errNote(block, src, msg, "pointer host size '{}' cannot cast into pointer host size '{}'", .{
|
|
bit_range.actual_host, bit_range.wanted_host,
|
|
});
|
|
}
|
|
if (bit_range.actual_offset != bit_range.wanted_offset) {
|
|
try sema.errNote(block, src, msg, "pointer bit offset '{}' cannot cast into pointer bit offset '{}'", .{
|
|
bit_range.actual_offset, bit_range.wanted_offset,
|
|
});
|
|
}
|
|
break;
|
|
},
|
|
.ptr_alignment => |pair| {
|
|
try sema.errNote(block, src, msg, "pointer alignment '{d}' cannot cast into pointer alignment '{d}'", .{
|
|
pair.actual.toByteUnits(0), pair.wanted.toByteUnits(0),
|
|
});
|
|
break;
|
|
},
|
|
.double_ptr_to_anyopaque => |pair| {
|
|
try sema.errNote(block, src, msg, "cannot implicitly cast double pointer '{}' to anyopaque pointer '{}'", .{
|
|
pair.actual.fmt(mod), pair.wanted.fmt(mod),
|
|
});
|
|
break;
|
|
},
|
|
.slice_to_anyopaque => |pair| {
|
|
try sema.errNote(block, src, msg, "cannot implicitly cast slice '{}' to anyopaque pointer '{}'", .{
|
|
pair.actual.fmt(mod), pair.wanted.fmt(mod),
|
|
});
|
|
try sema.errNote(block, src, msg, "consider using '.ptr'", .{});
|
|
break;
|
|
},
|
|
};
|
|
}
|
|
};
|
|
|
|
fn pointerSizeString(size: std.builtin.Type.Pointer.Size) []const u8 {
|
|
return switch (size) {
|
|
.One => "single",
|
|
.Many => "many",
|
|
.C => "C",
|
|
.Slice => unreachable,
|
|
};
|
|
}
|
|
|
|
/// If pointers have the same representation in runtime memory, a bitcast AIR instruction
|
|
/// may be used for the coercion.
|
|
/// * `const` attribute can be gained
|
|
/// * `volatile` attribute can be gained
|
|
/// * `allowzero` attribute can be gained (whether from explicit attribute, C pointer, or optional pointer) but only if !dest_is_mut
|
|
/// * alignment can be decreased
|
|
/// * bit offset attributes must match exactly
|
|
/// * `*`/`[*]` must match exactly, but `[*c]` matches either one
|
|
/// * sentinel-terminated pointers can coerce into `[*]`
|
|
fn coerceInMemoryAllowed(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
dest_ty: Type,
|
|
src_ty: Type,
|
|
dest_is_mut: bool,
|
|
target: std.Target,
|
|
dest_src: LazySrcLoc,
|
|
src_src: LazySrcLoc,
|
|
) CompileError!InMemoryCoercionResult {
|
|
const mod = sema.mod;
|
|
|
|
if (dest_ty.eql(src_ty, mod))
|
|
return .ok;
|
|
|
|
const dest_tag = dest_ty.zigTypeTag(mod);
|
|
const src_tag = src_ty.zigTypeTag(mod);
|
|
|
|
// Differently-named integers with the same number of bits.
|
|
if (dest_tag == .Int and src_tag == .Int) {
|
|
const dest_info = dest_ty.intInfo(mod);
|
|
const src_info = src_ty.intInfo(mod);
|
|
|
|
if (dest_info.signedness == src_info.signedness and
|
|
dest_info.bits == src_info.bits)
|
|
{
|
|
return .ok;
|
|
}
|
|
|
|
if ((src_info.signedness == dest_info.signedness and dest_info.bits < src_info.bits) or
|
|
// small enough unsigned ints can get casted to large enough signed ints
|
|
(dest_info.signedness == .signed and (src_info.signedness == .unsigned or dest_info.bits <= src_info.bits)) or
|
|
(dest_info.signedness == .unsigned and src_info.signedness == .signed))
|
|
{
|
|
return InMemoryCoercionResult{ .int_not_coercible = .{
|
|
.actual_signedness = src_info.signedness,
|
|
.wanted_signedness = dest_info.signedness,
|
|
.actual_bits = src_info.bits,
|
|
.wanted_bits = dest_info.bits,
|
|
} };
|
|
}
|
|
}
|
|
|
|
// Differently-named floats with the same number of bits.
|
|
if (dest_tag == .Float and src_tag == .Float) {
|
|
const dest_bits = dest_ty.floatBits(target);
|
|
const src_bits = src_ty.floatBits(target);
|
|
if (dest_bits == src_bits) {
|
|
return .ok;
|
|
}
|
|
}
|
|
|
|
// Pointers / Pointer-like Optionals
|
|
const maybe_dest_ptr_ty = try sema.typePtrOrOptionalPtrTy(dest_ty);
|
|
const maybe_src_ptr_ty = try sema.typePtrOrOptionalPtrTy(src_ty);
|
|
if (maybe_dest_ptr_ty) |dest_ptr_ty| {
|
|
if (maybe_src_ptr_ty) |src_ptr_ty| {
|
|
return try sema.coerceInMemoryAllowedPtrs(block, dest_ty, src_ty, dest_ptr_ty, src_ptr_ty, dest_is_mut, target, dest_src, src_src);
|
|
}
|
|
}
|
|
|
|
// Slices
|
|
if (dest_ty.isSlice(mod) and src_ty.isSlice(mod)) {
|
|
return try sema.coerceInMemoryAllowedPtrs(block, dest_ty, src_ty, dest_ty, src_ty, dest_is_mut, target, dest_src, src_src);
|
|
}
|
|
|
|
// Functions
|
|
if (dest_tag == .Fn and src_tag == .Fn) {
|
|
return try sema.coerceInMemoryAllowedFns(block, dest_ty, src_ty, target, dest_src, src_src);
|
|
}
|
|
|
|
// Error Unions
|
|
if (dest_tag == .ErrorUnion and src_tag == .ErrorUnion) {
|
|
const dest_payload = dest_ty.errorUnionPayload(mod);
|
|
const src_payload = src_ty.errorUnionPayload(mod);
|
|
const child = try sema.coerceInMemoryAllowed(block, dest_payload, src_payload, dest_is_mut, target, dest_src, src_src);
|
|
if (child != .ok) {
|
|
return InMemoryCoercionResult{ .error_union_payload = .{
|
|
.child = try child.dupe(sema.arena),
|
|
.actual = src_payload,
|
|
.wanted = dest_payload,
|
|
} };
|
|
}
|
|
return try sema.coerceInMemoryAllowed(block, dest_ty.errorUnionSet(mod), src_ty.errorUnionSet(mod), dest_is_mut, target, dest_src, src_src);
|
|
}
|
|
|
|
// Error Sets
|
|
if (dest_tag == .ErrorSet and src_tag == .ErrorSet) {
|
|
return try sema.coerceInMemoryAllowedErrorSets(block, dest_ty, src_ty, dest_src, src_src);
|
|
}
|
|
|
|
// Arrays
|
|
if (dest_tag == .Array and src_tag == .Array) {
|
|
const dest_info = dest_ty.arrayInfo(mod);
|
|
const src_info = src_ty.arrayInfo(mod);
|
|
if (dest_info.len != src_info.len) {
|
|
return InMemoryCoercionResult{ .array_len = .{
|
|
.actual = src_info.len,
|
|
.wanted = dest_info.len,
|
|
} };
|
|
}
|
|
|
|
const child = try sema.coerceInMemoryAllowed(block, dest_info.elem_type, src_info.elem_type, dest_is_mut, target, dest_src, src_src);
|
|
if (child != .ok) {
|
|
return InMemoryCoercionResult{ .array_elem = .{
|
|
.child = try child.dupe(sema.arena),
|
|
.actual = src_info.elem_type,
|
|
.wanted = dest_info.elem_type,
|
|
} };
|
|
}
|
|
const ok_sent = dest_info.sentinel == null or
|
|
(src_info.sentinel != null and
|
|
dest_info.sentinel.?.eql(
|
|
try mod.getCoerced(src_info.sentinel.?, dest_info.elem_type),
|
|
dest_info.elem_type,
|
|
mod,
|
|
));
|
|
if (!ok_sent) {
|
|
return InMemoryCoercionResult{ .array_sentinel = .{
|
|
.actual = src_info.sentinel orelse Value.@"unreachable",
|
|
.wanted = dest_info.sentinel orelse Value.@"unreachable",
|
|
.ty = dest_info.elem_type,
|
|
} };
|
|
}
|
|
return .ok;
|
|
}
|
|
|
|
// Vectors
|
|
if (dest_tag == .Vector and src_tag == .Vector) {
|
|
const dest_len = dest_ty.vectorLen(mod);
|
|
const src_len = src_ty.vectorLen(mod);
|
|
if (dest_len != src_len) {
|
|
return InMemoryCoercionResult{ .vector_len = .{
|
|
.actual = src_len,
|
|
.wanted = dest_len,
|
|
} };
|
|
}
|
|
|
|
const dest_elem_ty = dest_ty.scalarType(mod);
|
|
const src_elem_ty = src_ty.scalarType(mod);
|
|
const child = try sema.coerceInMemoryAllowed(block, dest_elem_ty, src_elem_ty, dest_is_mut, target, dest_src, src_src);
|
|
if (child != .ok) {
|
|
return InMemoryCoercionResult{ .vector_elem = .{
|
|
.child = try child.dupe(sema.arena),
|
|
.actual = src_elem_ty,
|
|
.wanted = dest_elem_ty,
|
|
} };
|
|
}
|
|
|
|
return .ok;
|
|
}
|
|
|
|
// Arrays <-> Vectors
|
|
if ((dest_tag == .Vector and src_tag == .Array) or
|
|
(dest_tag == .Array and src_tag == .Vector))
|
|
{
|
|
const dest_len = dest_ty.arrayLen(mod);
|
|
const src_len = src_ty.arrayLen(mod);
|
|
if (dest_len != src_len) {
|
|
return InMemoryCoercionResult{ .array_len = .{
|
|
.actual = src_len,
|
|
.wanted = dest_len,
|
|
} };
|
|
}
|
|
|
|
const dest_elem_ty = dest_ty.childType(mod);
|
|
const src_elem_ty = src_ty.childType(mod);
|
|
const child = try sema.coerceInMemoryAllowed(block, dest_elem_ty, src_elem_ty, dest_is_mut, target, dest_src, src_src);
|
|
if (child != .ok) {
|
|
return InMemoryCoercionResult{ .array_elem = .{
|
|
.child = try child.dupe(sema.arena),
|
|
.actual = src_elem_ty,
|
|
.wanted = dest_elem_ty,
|
|
} };
|
|
}
|
|
|
|
if (dest_tag == .Array) {
|
|
const dest_info = dest_ty.arrayInfo(mod);
|
|
if (dest_info.sentinel != null) {
|
|
return InMemoryCoercionResult{ .array_sentinel = .{
|
|
.actual = Value.@"unreachable",
|
|
.wanted = dest_info.sentinel.?,
|
|
.ty = dest_info.elem_type,
|
|
} };
|
|
}
|
|
}
|
|
|
|
// The memory layout of @Vector(N, iM) is the same as the integer type i(N*M),
|
|
// that is to say, the padding bits are not in the same place as the array [N]iM.
|
|
// If there's no padding, the bitcast is possible.
|
|
const elem_bit_size = dest_elem_ty.bitSize(mod);
|
|
const elem_abi_byte_size = dest_elem_ty.abiSize(mod);
|
|
if (elem_abi_byte_size * 8 == elem_bit_size)
|
|
return .ok;
|
|
}
|
|
|
|
// Optionals
|
|
if (dest_tag == .Optional and src_tag == .Optional) {
|
|
if ((maybe_dest_ptr_ty != null) != (maybe_src_ptr_ty != null)) {
|
|
return InMemoryCoercionResult{ .optional_shape = .{
|
|
.actual = src_ty,
|
|
.wanted = dest_ty,
|
|
} };
|
|
}
|
|
const dest_child_type = dest_ty.optionalChild(mod);
|
|
const src_child_type = src_ty.optionalChild(mod);
|
|
|
|
const child = try sema.coerceInMemoryAllowed(block, dest_child_type, src_child_type, dest_is_mut, target, dest_src, src_src);
|
|
if (child != .ok) {
|
|
return InMemoryCoercionResult{ .optional_child = .{
|
|
.child = try child.dupe(sema.arena),
|
|
.actual = src_child_type,
|
|
.wanted = dest_child_type,
|
|
} };
|
|
}
|
|
|
|
return .ok;
|
|
}
|
|
|
|
// Tuples (with in-memory-coercible fields)
|
|
if (dest_ty.isTuple(mod) and src_ty.isTuple(mod)) tuple: {
|
|
if (dest_ty.containerLayout(mod) != src_ty.containerLayout(mod)) break :tuple;
|
|
if (dest_ty.structFieldCount(mod) != src_ty.structFieldCount(mod)) break :tuple;
|
|
const field_count = dest_ty.structFieldCount(mod);
|
|
for (0..field_count) |field_idx| {
|
|
if (dest_ty.structFieldIsComptime(field_idx, mod) != src_ty.structFieldIsComptime(field_idx, mod)) break :tuple;
|
|
if (dest_ty.structFieldAlign(field_idx, mod) != src_ty.structFieldAlign(field_idx, mod)) break :tuple;
|
|
const dest_field_ty = dest_ty.structFieldType(field_idx, mod);
|
|
const src_field_ty = src_ty.structFieldType(field_idx, mod);
|
|
const field = try sema.coerceInMemoryAllowed(block, dest_field_ty, src_field_ty, dest_is_mut, target, dest_src, src_src);
|
|
if (field != .ok) break :tuple;
|
|
}
|
|
return .ok;
|
|
}
|
|
|
|
return InMemoryCoercionResult{ .no_match = .{
|
|
.actual = dest_ty,
|
|
.wanted = src_ty,
|
|
} };
|
|
}
|
|
|
|
fn coerceInMemoryAllowedErrorSets(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
dest_ty: Type,
|
|
src_ty: Type,
|
|
dest_src: LazySrcLoc,
|
|
src_src: LazySrcLoc,
|
|
) !InMemoryCoercionResult {
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
const ip = &mod.intern_pool;
|
|
|
|
// Coercion to `anyerror`. Note that this check can return false negatives
|
|
// in case the error sets did not get resolved.
|
|
if (dest_ty.isAnyError(mod)) {
|
|
return .ok;
|
|
}
|
|
|
|
if (dest_ty.toIntern() == .adhoc_inferred_error_set_type) {
|
|
// We are trying to coerce an error set to the current function's
|
|
// inferred error set.
|
|
const dst_ies = sema.fn_ret_ty_ies.?;
|
|
try dst_ies.addErrorSet(src_ty, ip, sema.arena);
|
|
return .ok;
|
|
}
|
|
|
|
if (ip.isInferredErrorSetType(dest_ty.toIntern())) {
|
|
const dst_ies_func_index = ip.iesFuncIndex(dest_ty.toIntern());
|
|
if (sema.fn_ret_ty_ies) |dst_ies| {
|
|
if (dst_ies.func == dst_ies_func_index) {
|
|
// We are trying to coerce an error set to the current function's
|
|
// inferred error set.
|
|
try dst_ies.addErrorSet(src_ty, ip, sema.arena);
|
|
return .ok;
|
|
}
|
|
}
|
|
switch (try sema.resolveInferredErrorSet(block, dest_src, dest_ty.toIntern())) {
|
|
// isAnyError might have changed from a false negative to a true
|
|
// positive after resolution.
|
|
.anyerror_type => return .ok,
|
|
else => {},
|
|
}
|
|
}
|
|
|
|
var missing_error_buf = std.ArrayList(InternPool.NullTerminatedString).init(gpa);
|
|
defer missing_error_buf.deinit();
|
|
|
|
switch (src_ty.toIntern()) {
|
|
.anyerror_type => switch (ip.indexToKey(dest_ty.toIntern())) {
|
|
.simple_type => unreachable, // filtered out above
|
|
.error_set_type, .inferred_error_set_type => return .from_anyerror,
|
|
else => unreachable,
|
|
},
|
|
|
|
else => switch (ip.indexToKey(src_ty.toIntern())) {
|
|
.inferred_error_set_type => {
|
|
const resolved_src_ty = try sema.resolveInferredErrorSet(block, src_src, src_ty.toIntern());
|
|
// src anyerror status might have changed after the resolution.
|
|
if (resolved_src_ty == .anyerror_type) {
|
|
// dest_ty.isAnyError(mod) == true is already checked for at this point.
|
|
return .from_anyerror;
|
|
}
|
|
|
|
for (ip.indexToKey(resolved_src_ty).error_set_type.names.get(ip)) |key| {
|
|
if (!Type.errorSetHasFieldIp(ip, dest_ty.toIntern(), key)) {
|
|
try missing_error_buf.append(key);
|
|
}
|
|
}
|
|
|
|
if (missing_error_buf.items.len != 0) {
|
|
return InMemoryCoercionResult{
|
|
.missing_error = try sema.arena.dupe(InternPool.NullTerminatedString, missing_error_buf.items),
|
|
};
|
|
}
|
|
|
|
return .ok;
|
|
},
|
|
.error_set_type => |error_set_type| {
|
|
for (error_set_type.names.get(ip)) |name| {
|
|
if (!Type.errorSetHasFieldIp(ip, dest_ty.toIntern(), name)) {
|
|
try missing_error_buf.append(name);
|
|
}
|
|
}
|
|
|
|
if (missing_error_buf.items.len != 0) {
|
|
return InMemoryCoercionResult{
|
|
.missing_error = try sema.arena.dupe(InternPool.NullTerminatedString, missing_error_buf.items),
|
|
};
|
|
}
|
|
|
|
return .ok;
|
|
},
|
|
else => unreachable,
|
|
},
|
|
}
|
|
}
|
|
|
|
fn coerceInMemoryAllowedFns(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
dest_ty: Type,
|
|
src_ty: Type,
|
|
target: std.Target,
|
|
dest_src: LazySrcLoc,
|
|
src_src: LazySrcLoc,
|
|
) !InMemoryCoercionResult {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
|
|
const dest_info = mod.typeToFunc(dest_ty).?;
|
|
const src_info = mod.typeToFunc(src_ty).?;
|
|
|
|
{
|
|
if (dest_info.is_var_args != src_info.is_var_args) {
|
|
return InMemoryCoercionResult{ .fn_var_args = dest_info.is_var_args };
|
|
}
|
|
|
|
if (dest_info.is_generic != src_info.is_generic) {
|
|
return InMemoryCoercionResult{ .fn_generic = dest_info.is_generic };
|
|
}
|
|
|
|
if (dest_info.cc != src_info.cc) {
|
|
return InMemoryCoercionResult{ .fn_cc = .{
|
|
.actual = src_info.cc,
|
|
.wanted = dest_info.cc,
|
|
} };
|
|
}
|
|
|
|
switch (src_info.return_type) {
|
|
.noreturn_type, .generic_poison_type => {},
|
|
else => {
|
|
const dest_return_type = Type.fromInterned(dest_info.return_type);
|
|
const src_return_type = Type.fromInterned(src_info.return_type);
|
|
const rt = try sema.coerceInMemoryAllowed(block, dest_return_type, src_return_type, false, target, dest_src, src_src);
|
|
if (rt != .ok) {
|
|
return InMemoryCoercionResult{ .fn_return_type = .{
|
|
.child = try rt.dupe(sema.arena),
|
|
.actual = src_return_type,
|
|
.wanted = dest_return_type,
|
|
} };
|
|
}
|
|
},
|
|
}
|
|
}
|
|
|
|
const params_len = params_len: {
|
|
if (dest_info.param_types.len != src_info.param_types.len) {
|
|
return InMemoryCoercionResult{ .fn_param_count = .{
|
|
.actual = src_info.param_types.len,
|
|
.wanted = dest_info.param_types.len,
|
|
} };
|
|
}
|
|
|
|
if (dest_info.noalias_bits != src_info.noalias_bits) {
|
|
return InMemoryCoercionResult{ .fn_param_noalias = .{
|
|
.actual = src_info.noalias_bits,
|
|
.wanted = dest_info.noalias_bits,
|
|
} };
|
|
}
|
|
|
|
break :params_len dest_info.param_types.len;
|
|
};
|
|
|
|
for (0..params_len) |param_i| {
|
|
const dest_param_ty = Type.fromInterned(dest_info.param_types.get(ip)[param_i]);
|
|
const src_param_ty = Type.fromInterned(src_info.param_types.get(ip)[param_i]);
|
|
|
|
const param_i_small: u5 = @intCast(param_i);
|
|
if (dest_info.paramIsComptime(param_i_small) != src_info.paramIsComptime(param_i_small)) {
|
|
return InMemoryCoercionResult{ .fn_param_comptime = .{
|
|
.index = param_i,
|
|
.wanted = dest_info.paramIsComptime(param_i_small),
|
|
} };
|
|
}
|
|
|
|
switch (src_param_ty.toIntern()) {
|
|
.generic_poison_type => {},
|
|
else => {
|
|
// Note: Cast direction is reversed here.
|
|
const param = try sema.coerceInMemoryAllowed(block, src_param_ty, dest_param_ty, false, target, dest_src, src_src);
|
|
if (param != .ok) {
|
|
return InMemoryCoercionResult{ .fn_param = .{
|
|
.child = try param.dupe(sema.arena),
|
|
.actual = src_param_ty,
|
|
.wanted = dest_param_ty,
|
|
.index = param_i,
|
|
} };
|
|
}
|
|
},
|
|
}
|
|
}
|
|
|
|
return .ok;
|
|
}
|
|
|
|
fn coerceInMemoryAllowedPtrs(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
dest_ty: Type,
|
|
src_ty: Type,
|
|
dest_ptr_ty: Type,
|
|
src_ptr_ty: Type,
|
|
dest_is_mut: bool,
|
|
target: std.Target,
|
|
dest_src: LazySrcLoc,
|
|
src_src: LazySrcLoc,
|
|
) !InMemoryCoercionResult {
|
|
const mod = sema.mod;
|
|
const dest_info = dest_ptr_ty.ptrInfo(mod);
|
|
const src_info = src_ptr_ty.ptrInfo(mod);
|
|
|
|
const ok_ptr_size = src_info.flags.size == dest_info.flags.size or
|
|
src_info.flags.size == .C or dest_info.flags.size == .C;
|
|
if (!ok_ptr_size) {
|
|
return InMemoryCoercionResult{ .ptr_size = .{
|
|
.actual = src_info.flags.size,
|
|
.wanted = dest_info.flags.size,
|
|
} };
|
|
}
|
|
|
|
const ok_cv_qualifiers =
|
|
(!src_info.flags.is_const or dest_info.flags.is_const) and
|
|
(!src_info.flags.is_volatile or dest_info.flags.is_volatile);
|
|
|
|
if (!ok_cv_qualifiers) {
|
|
return InMemoryCoercionResult{ .ptr_qualifiers = .{
|
|
.actual_const = src_info.flags.is_const,
|
|
.wanted_const = dest_info.flags.is_const,
|
|
.actual_volatile = src_info.flags.is_volatile,
|
|
.wanted_volatile = dest_info.flags.is_volatile,
|
|
} };
|
|
}
|
|
|
|
if (dest_info.flags.address_space != src_info.flags.address_space) {
|
|
return InMemoryCoercionResult{ .ptr_addrspace = .{
|
|
.actual = src_info.flags.address_space,
|
|
.wanted = dest_info.flags.address_space,
|
|
} };
|
|
}
|
|
|
|
const child = try sema.coerceInMemoryAllowed(block, Type.fromInterned(dest_info.child), Type.fromInterned(src_info.child), !dest_info.flags.is_const, target, dest_src, src_src);
|
|
if (child != .ok) {
|
|
return InMemoryCoercionResult{ .ptr_child = .{
|
|
.child = try child.dupe(sema.arena),
|
|
.actual = Type.fromInterned(src_info.child),
|
|
.wanted = Type.fromInterned(dest_info.child),
|
|
} };
|
|
}
|
|
|
|
const dest_allow_zero = dest_ty.ptrAllowsZero(mod);
|
|
const src_allow_zero = src_ty.ptrAllowsZero(mod);
|
|
|
|
const ok_allows_zero = (dest_allow_zero and
|
|
(src_allow_zero or !dest_is_mut)) or
|
|
(!dest_allow_zero and !src_allow_zero);
|
|
if (!ok_allows_zero) {
|
|
return InMemoryCoercionResult{ .ptr_allowzero = .{
|
|
.actual = src_ty,
|
|
.wanted = dest_ty,
|
|
} };
|
|
}
|
|
|
|
if (src_info.packed_offset.host_size != dest_info.packed_offset.host_size or
|
|
src_info.packed_offset.bit_offset != dest_info.packed_offset.bit_offset)
|
|
{
|
|
return InMemoryCoercionResult{ .ptr_bit_range = .{
|
|
.actual_host = src_info.packed_offset.host_size,
|
|
.wanted_host = dest_info.packed_offset.host_size,
|
|
.actual_offset = src_info.packed_offset.bit_offset,
|
|
.wanted_offset = dest_info.packed_offset.bit_offset,
|
|
} };
|
|
}
|
|
|
|
const ok_sent = dest_info.sentinel == .none or src_info.flags.size == .C or
|
|
(src_info.sentinel != .none and
|
|
dest_info.sentinel == try mod.intern_pool.getCoerced(sema.gpa, src_info.sentinel, dest_info.child));
|
|
if (!ok_sent) {
|
|
return InMemoryCoercionResult{ .ptr_sentinel = .{
|
|
.actual = switch (src_info.sentinel) {
|
|
.none => Value.@"unreachable",
|
|
else => Value.fromInterned(src_info.sentinel),
|
|
},
|
|
.wanted = switch (dest_info.sentinel) {
|
|
.none => Value.@"unreachable",
|
|
else => Value.fromInterned(dest_info.sentinel),
|
|
},
|
|
.ty = Type.fromInterned(dest_info.child),
|
|
} };
|
|
}
|
|
|
|
// If both pointers have alignment 0, it means they both want ABI alignment.
|
|
// In this case, if they share the same child type, no need to resolve
|
|
// pointee type alignment. Otherwise both pointee types must have their alignment
|
|
// resolved and we compare the alignment numerically.
|
|
if (src_info.flags.alignment != .none or dest_info.flags.alignment != .none or
|
|
dest_info.child != src_info.child)
|
|
{
|
|
const src_align = if (src_info.flags.alignment != .none)
|
|
src_info.flags.alignment
|
|
else
|
|
try sema.typeAbiAlignment(Type.fromInterned(src_info.child));
|
|
|
|
const dest_align = if (dest_info.flags.alignment != .none)
|
|
dest_info.flags.alignment
|
|
else
|
|
try sema.typeAbiAlignment(Type.fromInterned(dest_info.child));
|
|
|
|
if (dest_align.compare(.gt, src_align)) {
|
|
return InMemoryCoercionResult{ .ptr_alignment = .{
|
|
.actual = src_align,
|
|
.wanted = dest_align,
|
|
} };
|
|
}
|
|
}
|
|
|
|
return .ok;
|
|
}
|
|
|
|
fn coerceVarArgParam(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Air.Inst.Ref,
|
|
inst_src: LazySrcLoc,
|
|
) !Air.Inst.Ref {
|
|
if (block.is_typeof) return inst;
|
|
|
|
const mod = sema.mod;
|
|
const uncasted_ty = sema.typeOf(inst);
|
|
const coerced = switch (uncasted_ty.zigTypeTag(mod)) {
|
|
// TODO consider casting to c_int/f64 if they fit
|
|
.ComptimeInt, .ComptimeFloat => return sema.fail(
|
|
block,
|
|
inst_src,
|
|
"integer and float literals passed to variadic function must be casted to a fixed-size number type",
|
|
.{},
|
|
),
|
|
.Fn => fn_ptr: {
|
|
const fn_val = try sema.resolveConstDefinedValue(block, .unneeded, inst, undefined);
|
|
const fn_decl = fn_val.pointerDecl(mod).?;
|
|
break :fn_ptr try sema.analyzeDeclRef(fn_decl);
|
|
},
|
|
.Array => return sema.fail(block, inst_src, "arrays must be passed by reference to variadic function", .{}),
|
|
.Float => float: {
|
|
const target = sema.mod.getTarget();
|
|
const double_bits = target.c_type_bit_size(.double);
|
|
const inst_bits = uncasted_ty.floatBits(sema.mod.getTarget());
|
|
if (inst_bits >= double_bits) break :float inst;
|
|
switch (double_bits) {
|
|
32 => break :float try sema.coerce(block, Type.f32, inst, inst_src),
|
|
64 => break :float try sema.coerce(block, Type.f64, inst, inst_src),
|
|
else => unreachable,
|
|
}
|
|
},
|
|
else => if (uncasted_ty.isAbiInt(mod)) int: {
|
|
if (!try sema.validateExternType(uncasted_ty, .param_ty)) break :int inst;
|
|
const target = sema.mod.getTarget();
|
|
const uncasted_info = uncasted_ty.intInfo(mod);
|
|
if (uncasted_info.bits <= target.c_type_bit_size(switch (uncasted_info.signedness) {
|
|
.signed => .int,
|
|
.unsigned => .uint,
|
|
})) break :int try sema.coerce(block, switch (uncasted_info.signedness) {
|
|
.signed => Type.c_int,
|
|
.unsigned => Type.c_uint,
|
|
}, inst, inst_src);
|
|
if (uncasted_info.bits <= target.c_type_bit_size(switch (uncasted_info.signedness) {
|
|
.signed => .long,
|
|
.unsigned => .ulong,
|
|
})) break :int try sema.coerce(block, switch (uncasted_info.signedness) {
|
|
.signed => Type.c_long,
|
|
.unsigned => Type.c_ulong,
|
|
}, inst, inst_src);
|
|
if (uncasted_info.bits <= target.c_type_bit_size(switch (uncasted_info.signedness) {
|
|
.signed => .longlong,
|
|
.unsigned => .ulonglong,
|
|
})) break :int try sema.coerce(block, switch (uncasted_info.signedness) {
|
|
.signed => Type.c_longlong,
|
|
.unsigned => Type.c_ulonglong,
|
|
}, inst, inst_src);
|
|
break :int inst;
|
|
} else inst,
|
|
};
|
|
|
|
const coerced_ty = sema.typeOf(coerced);
|
|
if (!try sema.validateExternType(coerced_ty, .param_ty)) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, inst_src, "cannot pass '{}' to variadic function", .{coerced_ty.fmt(sema.mod)});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
const src_decl = sema.mod.declPtr(block.src_decl);
|
|
try sema.explainWhyTypeIsNotExtern(msg, src_decl.toSrcLoc(inst_src, mod), coerced_ty, .param_ty);
|
|
|
|
try sema.addDeclaredHereNote(msg, coerced_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
return coerced;
|
|
}
|
|
|
|
// TODO migrate callsites to use storePtr2 instead.
|
|
fn storePtr(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
ptr: Air.Inst.Ref,
|
|
uncasted_operand: Air.Inst.Ref,
|
|
) CompileError!void {
|
|
const air_tag: Air.Inst.Tag = if (block.wantSafety()) .store_safe else .store;
|
|
return sema.storePtr2(block, src, ptr, src, uncasted_operand, src, air_tag);
|
|
}
|
|
|
|
fn storePtr2(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
ptr: Air.Inst.Ref,
|
|
ptr_src: LazySrcLoc,
|
|
uncasted_operand: Air.Inst.Ref,
|
|
operand_src: LazySrcLoc,
|
|
air_tag: Air.Inst.Tag,
|
|
) CompileError!void {
|
|
const mod = sema.mod;
|
|
const ptr_ty = sema.typeOf(ptr);
|
|
if (ptr_ty.isConstPtr(mod))
|
|
return sema.fail(block, ptr_src, "cannot assign to constant", .{});
|
|
|
|
const elem_ty = ptr_ty.childType(mod);
|
|
|
|
// To generate better code for tuples, we detect a tuple operand here, and
|
|
// analyze field loads and stores directly. This avoids an extra allocation + memcpy
|
|
// which would occur if we used `coerce`.
|
|
// However, we avoid this mechanism if the destination element type is a tuple,
|
|
// because the regular store will be better for this case.
|
|
// If the destination type is a struct we don't want this mechanism to trigger, because
|
|
// this code does not handle tuple-to-struct coercion which requires dealing with missing
|
|
// fields.
|
|
const operand_ty = sema.typeOf(uncasted_operand);
|
|
if (operand_ty.isTuple(mod) and elem_ty.zigTypeTag(mod) == .Array) {
|
|
const field_count = operand_ty.structFieldCount(mod);
|
|
var i: u32 = 0;
|
|
while (i < field_count) : (i += 1) {
|
|
const elem_src = operand_src; // TODO better source location
|
|
const elem = try sema.tupleField(block, operand_src, uncasted_operand, elem_src, i);
|
|
const elem_index = try mod.intRef(Type.usize, i);
|
|
const elem_ptr = try sema.elemPtr(block, ptr_src, ptr, elem_index, elem_src, false, true);
|
|
try sema.storePtr2(block, src, elem_ptr, elem_src, elem, elem_src, .store);
|
|
}
|
|
return;
|
|
}
|
|
|
|
// TODO do the same thing for anon structs as for tuples above.
|
|
// However, beware of the need to handle missing/extra fields.
|
|
|
|
const is_ret = air_tag == .ret_ptr;
|
|
|
|
// Detect if we are storing an array operand to a bitcasted vector pointer.
|
|
// If so, we instead reach through the bitcasted pointer to the vector pointer,
|
|
// bitcast the array operand to a vector, and then lower this as a store of
|
|
// a vector value to a vector pointer. This generally results in better code,
|
|
// as well as working around an LLVM bug:
|
|
// https://github.com/ziglang/zig/issues/11154
|
|
if (sema.obtainBitCastedVectorPtr(ptr)) |vector_ptr| {
|
|
const vector_ty = sema.typeOf(vector_ptr).childType(mod);
|
|
const vector = sema.coerceExtra(block, vector_ty, uncasted_operand, operand_src, .{ .is_ret = is_ret }) catch |err| switch (err) {
|
|
error.NotCoercible => unreachable,
|
|
else => |e| return e,
|
|
};
|
|
try sema.storePtr2(block, src, vector_ptr, ptr_src, vector, operand_src, .store);
|
|
return;
|
|
}
|
|
|
|
const operand = sema.coerceExtra(block, elem_ty, uncasted_operand, operand_src, .{ .is_ret = is_ret }) catch |err| switch (err) {
|
|
error.NotCoercible => unreachable,
|
|
else => |e| return e,
|
|
};
|
|
const maybe_operand_val = try sema.resolveValue(operand);
|
|
|
|
const runtime_src = if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| rs: {
|
|
const operand_val = maybe_operand_val orelse {
|
|
try sema.checkPtrIsNotComptimeMutable(block, ptr_val, ptr_src, operand_src);
|
|
break :rs operand_src;
|
|
};
|
|
if (ptr_val.isComptimeMutablePtr(mod)) {
|
|
try sema.storePtrVal(block, src, ptr_val, operand_val, elem_ty);
|
|
return;
|
|
} else break :rs ptr_src;
|
|
} else ptr_src;
|
|
|
|
// We do this after the possible comptime store above, for the case of field_ptr stores
|
|
// to unions because we want the comptime tag to be set, even if the field type is void.
|
|
if ((try sema.typeHasOnePossibleValue(elem_ty)) != null) {
|
|
return;
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
try sema.queueFullTypeResolution(elem_ty);
|
|
|
|
if (ptr_ty.ptrInfo(mod).flags.vector_index == .runtime) {
|
|
const ptr_inst = ptr.toIndex().?;
|
|
const air_tags = sema.air_instructions.items(.tag);
|
|
if (air_tags[@intFromEnum(ptr_inst)] == .ptr_elem_ptr) {
|
|
const ty_pl = sema.air_instructions.items(.data)[@intFromEnum(ptr_inst)].ty_pl;
|
|
const bin_op = sema.getTmpAir().extraData(Air.Bin, ty_pl.payload).data;
|
|
_ = try block.addInst(.{
|
|
.tag = .vector_store_elem,
|
|
.data = .{ .vector_store_elem = .{
|
|
.vector_ptr = bin_op.lhs,
|
|
.payload = try block.sema.addExtra(Air.Bin{
|
|
.lhs = bin_op.rhs,
|
|
.rhs = operand,
|
|
}),
|
|
} },
|
|
});
|
|
return;
|
|
}
|
|
return sema.fail(block, ptr_src, "unable to determine vector element index of type '{}'", .{
|
|
ptr_ty.fmt(sema.mod),
|
|
});
|
|
}
|
|
|
|
const store_inst = if (is_ret)
|
|
try block.addBinOp(.store, ptr, operand)
|
|
else
|
|
try block.addBinOp(air_tag, ptr, operand);
|
|
|
|
try sema.checkComptimeKnownStore(block, store_inst);
|
|
|
|
return;
|
|
}
|
|
|
|
/// Given an AIR store instruction, checks whether we are performing a
|
|
/// comptime-known store to a local alloc, and updates `maybe_comptime_allocs`
|
|
/// accordingly.
|
|
fn checkComptimeKnownStore(sema: *Sema, block: *Block, store_inst_ref: Air.Inst.Ref) !void {
|
|
const store_inst = store_inst_ref.toIndex().?;
|
|
const inst_data = sema.air_instructions.items(.data)[@intFromEnum(store_inst)].bin_op;
|
|
const ptr = inst_data.lhs.toIndex() orelse return;
|
|
const operand = inst_data.rhs;
|
|
|
|
const maybe_base_alloc = sema.base_allocs.get(ptr) orelse return;
|
|
const maybe_comptime_alloc = sema.maybe_comptime_allocs.getPtr(maybe_base_alloc) orelse return;
|
|
|
|
ct: {
|
|
if (null == try sema.resolveValue(operand)) break :ct;
|
|
if (maybe_comptime_alloc.runtime_index != block.runtime_index) break :ct;
|
|
return maybe_comptime_alloc.stores.append(sema.arena, store_inst);
|
|
}
|
|
|
|
// Store is runtime-known
|
|
_ = sema.maybe_comptime_allocs.remove(maybe_base_alloc);
|
|
}
|
|
|
|
/// Given an AIR instruction transforming a pointer (struct_field_ptr,
|
|
/// ptr_elem_ptr, bitcast, etc), checks whether the base pointer refers to a
|
|
/// local alloc, and updates `base_allocs` accordingly.
|
|
fn checkKnownAllocPtr(sema: *Sema, base_ptr: Air.Inst.Ref, new_ptr: Air.Inst.Ref) !void {
|
|
const base_ptr_inst = base_ptr.toIndex() orelse return;
|
|
const new_ptr_inst = new_ptr.toIndex() orelse return;
|
|
const alloc_inst = sema.base_allocs.get(base_ptr_inst) orelse return;
|
|
try sema.base_allocs.put(sema.gpa, new_ptr_inst, alloc_inst);
|
|
|
|
switch (sema.air_instructions.items(.tag)[@intFromEnum(new_ptr_inst)]) {
|
|
.optional_payload_ptr_set, .errunion_payload_ptr_set => {
|
|
const maybe_comptime_alloc = sema.maybe_comptime_allocs.getPtr(alloc_inst) orelse return;
|
|
try maybe_comptime_alloc.non_elideable_pointers.append(sema.arena, new_ptr_inst);
|
|
},
|
|
.ptr_elem_ptr => {
|
|
const tmp_air = sema.getTmpAir();
|
|
const pl_idx = tmp_air.instructions.items(.data)[@intFromEnum(new_ptr_inst)].ty_pl.payload;
|
|
const bin = tmp_air.extraData(Air.Bin, pl_idx).data;
|
|
const index_ref = bin.rhs;
|
|
|
|
// If the index value is runtime-known, this pointer is also runtime-known, so
|
|
// we must in turn make the alloc value runtime-known.
|
|
if (null == try sema.resolveValue(index_ref)) {
|
|
_ = sema.maybe_comptime_allocs.remove(alloc_inst);
|
|
}
|
|
},
|
|
else => {},
|
|
}
|
|
}
|
|
|
|
/// Traverse an arbitrary number of bitcasted pointers and return the underyling vector
|
|
/// pointer. Only if the final element type matches the vector element type, and the
|
|
/// lengths match.
|
|
fn obtainBitCastedVectorPtr(sema: *Sema, ptr: Air.Inst.Ref) ?Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const array_ty = sema.typeOf(ptr).childType(mod);
|
|
if (array_ty.zigTypeTag(mod) != .Array) return null;
|
|
var ptr_ref = ptr;
|
|
var ptr_inst = ptr_ref.toIndex() orelse return null;
|
|
const air_datas = sema.air_instructions.items(.data);
|
|
const air_tags = sema.air_instructions.items(.tag);
|
|
const vector_ty = while (air_tags[@intFromEnum(ptr_inst)] == .bitcast) {
|
|
ptr_ref = air_datas[@intFromEnum(ptr_inst)].ty_op.operand;
|
|
if (!sema.isKnownZigType(ptr_ref, .Pointer)) return null;
|
|
const child_ty = sema.typeOf(ptr_ref).childType(mod);
|
|
if (child_ty.zigTypeTag(mod) == .Vector) break child_ty;
|
|
ptr_inst = ptr_ref.toIndex() orelse return null;
|
|
} else return null;
|
|
|
|
// We have a pointer-to-array and a pointer-to-vector. If the elements and
|
|
// lengths match, return the result.
|
|
if (array_ty.childType(mod).eql(vector_ty.childType(mod), sema.mod) and
|
|
array_ty.arrayLen(mod) == vector_ty.vectorLen(mod))
|
|
{
|
|
return ptr_ref;
|
|
} else {
|
|
return null;
|
|
}
|
|
}
|
|
|
|
/// Call when you have Value objects rather than Air instructions, and you want to
|
|
/// assert the store must be done at comptime.
|
|
fn storePtrVal(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
ptr_val: Value,
|
|
operand_val: Value,
|
|
operand_ty: Type,
|
|
) !void {
|
|
const mod = sema.mod;
|
|
var mut_kit = try sema.beginComptimePtrMutation(block, src, ptr_val, operand_ty);
|
|
try sema.checkComptimeVarStore(block, src, mut_kit.mut_decl);
|
|
|
|
try sema.resolveTypeLayout(operand_ty);
|
|
switch (mut_kit.pointee) {
|
|
.opv => {},
|
|
.direct => |val_ptr| {
|
|
if (mut_kit.mut_decl.runtime_index == .comptime_field_ptr) {
|
|
val_ptr.* = Value.fromInterned((try val_ptr.intern(operand_ty, mod)));
|
|
if (!operand_val.eql(val_ptr.*, operand_ty, mod)) {
|
|
// TODO use failWithInvalidComptimeFieldStore
|
|
return sema.fail(block, src, "value stored in comptime field does not match the default value of the field", .{});
|
|
}
|
|
return;
|
|
}
|
|
val_ptr.* = Value.fromInterned((try operand_val.intern(operand_ty, mod)));
|
|
},
|
|
.reinterpret => |reinterpret| {
|
|
try sema.resolveTypeLayout(mut_kit.ty);
|
|
const abi_size = try sema.usizeCast(block, src, mut_kit.ty.abiSize(mod));
|
|
const buffer = try sema.gpa.alloc(u8, abi_size);
|
|
defer sema.gpa.free(buffer);
|
|
reinterpret.val_ptr.*.writeToMemory(mut_kit.ty, mod, buffer) catch |err| switch (err) {
|
|
error.OutOfMemory => return error.OutOfMemory,
|
|
error.ReinterpretDeclRef => unreachable,
|
|
error.IllDefinedMemoryLayout => unreachable, // Sema was supposed to emit a compile error already
|
|
error.Unimplemented => return sema.fail(block, src, "TODO: implement writeToMemory for type '{}'", .{mut_kit.ty.fmt(mod)}),
|
|
};
|
|
if (reinterpret.write_packed) {
|
|
operand_val.writeToPackedMemory(operand_ty, mod, buffer[reinterpret.byte_offset..], 0) catch |err| switch (err) {
|
|
error.OutOfMemory => return error.OutOfMemory,
|
|
error.ReinterpretDeclRef => unreachable,
|
|
};
|
|
} else {
|
|
operand_val.writeToMemory(operand_ty, mod, buffer[reinterpret.byte_offset..]) catch |err| switch (err) {
|
|
error.OutOfMemory => return error.OutOfMemory,
|
|
error.ReinterpretDeclRef => unreachable,
|
|
error.IllDefinedMemoryLayout => unreachable, // Sema was supposed to emit a compile error already
|
|
error.Unimplemented => return sema.fail(block, src, "TODO: implement writeToMemory for type '{}'", .{operand_ty.fmt(mod)}),
|
|
};
|
|
}
|
|
const val = Value.readFromMemory(mut_kit.ty, mod, buffer, sema.arena) catch |err| switch (err) {
|
|
error.OutOfMemory => return error.OutOfMemory,
|
|
error.IllDefinedMemoryLayout => unreachable,
|
|
error.Unimplemented => return sema.fail(block, src, "TODO: implement readFromMemory for type '{}'", .{mut_kit.ty.fmt(mod)}),
|
|
};
|
|
reinterpret.val_ptr.* = Value.fromInterned((try val.intern(mut_kit.ty, mod)));
|
|
},
|
|
.bad_decl_ty, .bad_ptr_ty => {
|
|
// TODO show the decl declaration site in a note and explain whether the decl
|
|
// or the pointer is the problematic type
|
|
return sema.fail(
|
|
block,
|
|
src,
|
|
"comptime mutation of a reinterpreted pointer requires type '{}' to have a well-defined memory layout",
|
|
.{mut_kit.ty.fmt(mod)},
|
|
);
|
|
},
|
|
}
|
|
}
|
|
|
|
const ComptimePtrMutationKit = struct {
|
|
mut_decl: InternPool.Key.Ptr.Addr.MutDecl,
|
|
pointee: union(enum) {
|
|
opv,
|
|
/// The pointer type matches the actual comptime Value so a direct
|
|
/// modification is possible.
|
|
direct: *Value,
|
|
/// The largest parent Value containing pointee and having a well-defined memory layout.
|
|
/// This is used for bitcasting, if direct dereferencing failed.
|
|
reinterpret: struct {
|
|
val_ptr: *Value,
|
|
byte_offset: usize,
|
|
/// If set, write the operand to packed memory
|
|
write_packed: bool = false,
|
|
},
|
|
/// If the root decl could not be used as parent, this means `ty` is the type that
|
|
/// caused that by not having a well-defined layout.
|
|
/// This one means the Decl that owns the value trying to be modified does not
|
|
/// have a well defined memory layout.
|
|
bad_decl_ty,
|
|
/// If the root decl could not be used as parent, this means `ty` is the type that
|
|
/// caused that by not having a well-defined layout.
|
|
/// This one means the pointer type that is being stored through does not
|
|
/// have a well defined memory layout.
|
|
bad_ptr_ty,
|
|
},
|
|
ty: Type,
|
|
};
|
|
|
|
fn beginComptimePtrMutation(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
ptr_val: Value,
|
|
ptr_elem_ty: Type,
|
|
) CompileError!ComptimePtrMutationKit {
|
|
const mod = sema.mod;
|
|
const ptr = mod.intern_pool.indexToKey(ptr_val.toIntern()).ptr;
|
|
switch (ptr.addr) {
|
|
.decl, .anon_decl, .int => unreachable, // isComptimeMutablePtr has been checked already
|
|
.mut_decl => |mut_decl| {
|
|
const decl = mod.declPtr(mut_decl.decl);
|
|
return sema.beginComptimePtrMutationInner(block, src, decl.ty, &decl.val, ptr_elem_ty, mut_decl);
|
|
},
|
|
.comptime_field => |comptime_field| {
|
|
const duped = try sema.arena.create(Value);
|
|
duped.* = Value.fromInterned(comptime_field);
|
|
return sema.beginComptimePtrMutationInner(block, src, Type.fromInterned(mod.intern_pool.typeOf(comptime_field)), duped, ptr_elem_ty, .{
|
|
.decl = undefined,
|
|
.runtime_index = .comptime_field_ptr,
|
|
});
|
|
},
|
|
.eu_payload => |eu_ptr| {
|
|
const eu_ty = Type.fromInterned(mod.intern_pool.typeOf(eu_ptr)).childType(mod);
|
|
var parent = try sema.beginComptimePtrMutation(block, src, Value.fromInterned(eu_ptr), eu_ty);
|
|
switch (parent.pointee) {
|
|
.opv => unreachable,
|
|
.direct => |val_ptr| {
|
|
const payload_ty = parent.ty.errorUnionPayload(mod);
|
|
if (val_ptr.ip_index == .none and val_ptr.tag() == .eu_payload) {
|
|
return ComptimePtrMutationKit{
|
|
.mut_decl = parent.mut_decl,
|
|
.pointee = .{ .direct = &val_ptr.castTag(.eu_payload).?.data },
|
|
.ty = payload_ty,
|
|
};
|
|
} else {
|
|
// An error union has been initialized to undefined at comptime and now we
|
|
// are for the first time setting the payload. We must change the
|
|
// representation of the error union from `undef` to `opt_payload`.
|
|
|
|
const payload = try sema.arena.create(Value.Payload.SubValue);
|
|
payload.* = .{
|
|
.base = .{ .tag = .eu_payload },
|
|
.data = Value.fromInterned((try mod.intern(.{ .undef = payload_ty.toIntern() }))),
|
|
};
|
|
|
|
val_ptr.* = Value.initPayload(&payload.base);
|
|
|
|
return ComptimePtrMutationKit{
|
|
.mut_decl = parent.mut_decl,
|
|
.pointee = .{ .direct = &payload.data },
|
|
.ty = payload_ty,
|
|
};
|
|
}
|
|
},
|
|
.bad_decl_ty, .bad_ptr_ty => return parent,
|
|
// Even though the parent value type has well-defined memory layout, our
|
|
// pointer type does not.
|
|
.reinterpret => return ComptimePtrMutationKit{
|
|
.mut_decl = parent.mut_decl,
|
|
.pointee = .bad_ptr_ty,
|
|
.ty = eu_ty,
|
|
},
|
|
}
|
|
},
|
|
.opt_payload => |opt_ptr| {
|
|
const opt_ty = Type.fromInterned(mod.intern_pool.typeOf(opt_ptr)).childType(mod);
|
|
var parent = try sema.beginComptimePtrMutation(block, src, Value.fromInterned(opt_ptr), opt_ty);
|
|
switch (parent.pointee) {
|
|
.opv => unreachable,
|
|
.direct => |val_ptr| {
|
|
const payload_ty = parent.ty.optionalChild(mod);
|
|
switch (val_ptr.ip_index) {
|
|
.none => return ComptimePtrMutationKit{
|
|
.mut_decl = parent.mut_decl,
|
|
.pointee = .{ .direct = &val_ptr.castTag(.opt_payload).?.data },
|
|
.ty = payload_ty,
|
|
},
|
|
else => {
|
|
const payload_val = switch (mod.intern_pool.indexToKey(val_ptr.ip_index)) {
|
|
.undef => try mod.intern(.{ .undef = payload_ty.toIntern() }),
|
|
.opt => |opt| switch (opt.val) {
|
|
.none => try mod.intern(.{ .undef = payload_ty.toIntern() }),
|
|
else => |payload| payload,
|
|
},
|
|
else => unreachable,
|
|
};
|
|
|
|
// An optional has been initialized to undefined at comptime and now we
|
|
// are for the first time setting the payload. We must change the
|
|
// representation of the optional from `undef` to `opt_payload`.
|
|
|
|
const payload = try sema.arena.create(Value.Payload.SubValue);
|
|
payload.* = .{
|
|
.base = .{ .tag = .opt_payload },
|
|
.data = Value.fromInterned(payload_val),
|
|
};
|
|
|
|
val_ptr.* = Value.initPayload(&payload.base);
|
|
|
|
return ComptimePtrMutationKit{
|
|
.mut_decl = parent.mut_decl,
|
|
.pointee = .{ .direct = &payload.data },
|
|
.ty = payload_ty,
|
|
};
|
|
},
|
|
}
|
|
},
|
|
.bad_decl_ty, .bad_ptr_ty => return parent,
|
|
// Even though the parent value type has well-defined memory layout, our
|
|
// pointer type does not.
|
|
.reinterpret => return ComptimePtrMutationKit{
|
|
.mut_decl = parent.mut_decl,
|
|
.pointee = .bad_ptr_ty,
|
|
.ty = opt_ty,
|
|
},
|
|
}
|
|
},
|
|
.elem => |elem_ptr| {
|
|
const base_elem_ty = Type.fromInterned(mod.intern_pool.typeOf(elem_ptr.base)).elemType2(mod);
|
|
var parent = try sema.beginComptimePtrMutation(block, src, Value.fromInterned(elem_ptr.base), base_elem_ty);
|
|
|
|
switch (parent.pointee) {
|
|
.opv => unreachable,
|
|
.direct => |val_ptr| switch (parent.ty.zigTypeTag(mod)) {
|
|
.Array, .Vector => {
|
|
const elem_ty = parent.ty.childType(mod);
|
|
const check_len = parent.ty.arrayLenIncludingSentinel(mod);
|
|
if ((try sema.typeHasOnePossibleValue(ptr_elem_ty)) != null) {
|
|
if (elem_ptr.index > check_len) {
|
|
// TODO have the parent include the decl so we can say "declared here"
|
|
return sema.fail(block, src, "comptime store of index {d} out of bounds of array length {d}", .{
|
|
elem_ptr.index, check_len,
|
|
});
|
|
}
|
|
return .{
|
|
.mut_decl = parent.mut_decl,
|
|
.pointee = .opv,
|
|
.ty = elem_ty,
|
|
};
|
|
}
|
|
if (elem_ptr.index >= check_len) {
|
|
// TODO have the parent include the decl so we can say "declared here"
|
|
return sema.fail(block, src, "comptime store of index {d} out of bounds of array length {d}", .{
|
|
elem_ptr.index, check_len,
|
|
});
|
|
}
|
|
|
|
// We might have a pointer to multiple elements of the array (e.g. a pointer
|
|
// to a sub-array). In this case, we just have to reinterpret the relevant
|
|
// bytes of the whole array rather than any single element.
|
|
reinterp_multi_elem: {
|
|
if (try sema.typeRequiresComptime(base_elem_ty)) break :reinterp_multi_elem;
|
|
if (try sema.typeRequiresComptime(ptr_elem_ty)) break :reinterp_multi_elem;
|
|
|
|
const elem_abi_size_u64 = try sema.typeAbiSize(base_elem_ty);
|
|
if (elem_abi_size_u64 >= try sema.typeAbiSize(ptr_elem_ty)) break :reinterp_multi_elem;
|
|
|
|
const elem_abi_size = try sema.usizeCast(block, src, elem_abi_size_u64);
|
|
const elem_idx = try sema.usizeCast(block, src, elem_ptr.index);
|
|
return .{
|
|
.mut_decl = parent.mut_decl,
|
|
.pointee = .{ .reinterpret = .{
|
|
.val_ptr = val_ptr,
|
|
.byte_offset = elem_abi_size * elem_idx,
|
|
} },
|
|
.ty = parent.ty,
|
|
};
|
|
}
|
|
|
|
switch (val_ptr.ip_index) {
|
|
.none => switch (val_ptr.tag()) {
|
|
.bytes => {
|
|
// An array is memory-optimized to store a slice of bytes, but we are about
|
|
// to modify an individual field and the representation has to change.
|
|
// If we wanted to avoid this, there would need to be special detection
|
|
// elsewhere to identify when writing a value to an array element that is stored
|
|
// using the `bytes` tag, and handle it without making a call to this function.
|
|
const arena = mod.tmp_hack_arena.allocator();
|
|
|
|
const bytes = val_ptr.castTag(.bytes).?.data;
|
|
const dest_len = parent.ty.arrayLenIncludingSentinel(mod);
|
|
// bytes.len may be one greater than dest_len because of the case when
|
|
// assigning `[N:S]T` to `[N]T`. This is allowed; the sentinel is omitted.
|
|
assert(bytes.len >= dest_len);
|
|
const elems = try arena.alloc(Value, @intCast(dest_len));
|
|
for (elems, 0..) |*elem, i| {
|
|
elem.* = try mod.intValue(elem_ty, bytes[i]);
|
|
}
|
|
|
|
val_ptr.* = try Value.Tag.aggregate.create(arena, elems);
|
|
|
|
return beginComptimePtrMutationInner(
|
|
sema,
|
|
block,
|
|
src,
|
|
elem_ty,
|
|
&elems[@intCast(elem_ptr.index)],
|
|
ptr_elem_ty,
|
|
parent.mut_decl,
|
|
);
|
|
},
|
|
.repeated => {
|
|
// An array is memory-optimized to store only a single element value, and
|
|
// that value is understood to be the same for the entire length of the array.
|
|
// However, now we want to modify an individual field and so the
|
|
// representation has to change. If we wanted to avoid this, there would
|
|
// need to be special detection elsewhere to identify when writing a value to an
|
|
// array element that is stored using the `repeated` tag, and handle it
|
|
// without making a call to this function.
|
|
const arena = mod.tmp_hack_arena.allocator();
|
|
|
|
const repeated_val = try val_ptr.castTag(.repeated).?.data.intern(parent.ty.childType(mod), mod);
|
|
const array_len_including_sentinel =
|
|
try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel(mod));
|
|
const elems = try arena.alloc(Value, array_len_including_sentinel);
|
|
@memset(elems, Value.fromInterned(repeated_val));
|
|
|
|
val_ptr.* = try Value.Tag.aggregate.create(arena, elems);
|
|
|
|
return beginComptimePtrMutationInner(
|
|
sema,
|
|
block,
|
|
src,
|
|
elem_ty,
|
|
&elems[@intCast(elem_ptr.index)],
|
|
ptr_elem_ty,
|
|
parent.mut_decl,
|
|
);
|
|
},
|
|
|
|
.aggregate => return beginComptimePtrMutationInner(
|
|
sema,
|
|
block,
|
|
src,
|
|
elem_ty,
|
|
&val_ptr.castTag(.aggregate).?.data[@intCast(elem_ptr.index)],
|
|
ptr_elem_ty,
|
|
parent.mut_decl,
|
|
),
|
|
|
|
else => unreachable,
|
|
},
|
|
else => switch (mod.intern_pool.indexToKey(val_ptr.toIntern())) {
|
|
.undef => {
|
|
// An array has been initialized to undefined at comptime and now we
|
|
// are for the first time setting an element. We must change the representation
|
|
// of the array from `undef` to `array`.
|
|
const arena = mod.tmp_hack_arena.allocator();
|
|
|
|
const array_len_including_sentinel =
|
|
try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel(mod));
|
|
const elems = try arena.alloc(Value, array_len_including_sentinel);
|
|
@memset(elems, Value.fromInterned((try mod.intern(.{ .undef = elem_ty.toIntern() }))));
|
|
|
|
val_ptr.* = try Value.Tag.aggregate.create(arena, elems);
|
|
|
|
return beginComptimePtrMutationInner(
|
|
sema,
|
|
block,
|
|
src,
|
|
elem_ty,
|
|
&elems[@intCast(elem_ptr.index)],
|
|
ptr_elem_ty,
|
|
parent.mut_decl,
|
|
);
|
|
},
|
|
else => unreachable,
|
|
},
|
|
}
|
|
},
|
|
else => {
|
|
if (elem_ptr.index != 0) {
|
|
// TODO include a "declared here" note for the decl
|
|
return sema.fail(block, src, "out of bounds comptime store of index {d}", .{
|
|
elem_ptr.index,
|
|
});
|
|
}
|
|
return beginComptimePtrMutationInner(
|
|
sema,
|
|
block,
|
|
src,
|
|
parent.ty,
|
|
val_ptr,
|
|
ptr_elem_ty,
|
|
parent.mut_decl,
|
|
);
|
|
},
|
|
},
|
|
.reinterpret => |reinterpret| {
|
|
if (!base_elem_ty.hasWellDefinedLayout(mod)) {
|
|
// Even though the parent value type has well-defined memory layout, our
|
|
// pointer type does not.
|
|
return ComptimePtrMutationKit{
|
|
.mut_decl = parent.mut_decl,
|
|
.pointee = .bad_ptr_ty,
|
|
.ty = base_elem_ty,
|
|
};
|
|
}
|
|
|
|
const elem_abi_size_u64 = try sema.typeAbiSize(base_elem_ty);
|
|
const elem_abi_size = try sema.usizeCast(block, src, elem_abi_size_u64);
|
|
const elem_idx = try sema.usizeCast(block, src, elem_ptr.index);
|
|
return ComptimePtrMutationKit{
|
|
.mut_decl = parent.mut_decl,
|
|
.pointee = .{ .reinterpret = .{
|
|
.val_ptr = reinterpret.val_ptr,
|
|
.byte_offset = reinterpret.byte_offset + elem_abi_size * elem_idx,
|
|
} },
|
|
.ty = parent.ty,
|
|
};
|
|
},
|
|
.bad_decl_ty, .bad_ptr_ty => return parent,
|
|
}
|
|
},
|
|
.field => |field_ptr| {
|
|
const base_child_ty = Type.fromInterned(mod.intern_pool.typeOf(field_ptr.base)).childType(mod);
|
|
const field_index: u32 = @intCast(field_ptr.index);
|
|
|
|
var parent = try sema.beginComptimePtrMutation(block, src, Value.fromInterned(field_ptr.base), base_child_ty);
|
|
switch (parent.pointee) {
|
|
.opv => unreachable,
|
|
.direct => |val_ptr| switch (val_ptr.ip_index) {
|
|
.empty_struct => {
|
|
const duped = try sema.arena.create(Value);
|
|
duped.* = val_ptr.*;
|
|
return beginComptimePtrMutationInner(
|
|
sema,
|
|
block,
|
|
src,
|
|
parent.ty.structFieldType(field_index, mod),
|
|
duped,
|
|
ptr_elem_ty,
|
|
parent.mut_decl,
|
|
);
|
|
},
|
|
.none => switch (val_ptr.tag()) {
|
|
.aggregate => return beginComptimePtrMutationInner(
|
|
sema,
|
|
block,
|
|
src,
|
|
parent.ty.structFieldType(field_index, mod),
|
|
&val_ptr.castTag(.aggregate).?.data[field_index],
|
|
ptr_elem_ty,
|
|
parent.mut_decl,
|
|
),
|
|
.repeated => {
|
|
const arena = mod.tmp_hack_arena.allocator();
|
|
|
|
const elems = try arena.alloc(Value, parent.ty.structFieldCount(mod));
|
|
@memset(elems, val_ptr.castTag(.repeated).?.data);
|
|
val_ptr.* = try Value.Tag.aggregate.create(arena, elems);
|
|
|
|
return beginComptimePtrMutationInner(
|
|
sema,
|
|
block,
|
|
src,
|
|
parent.ty.structFieldType(field_index, mod),
|
|
&elems[field_index],
|
|
ptr_elem_ty,
|
|
parent.mut_decl,
|
|
);
|
|
},
|
|
.@"union" => {
|
|
const payload = &val_ptr.castTag(.@"union").?.data;
|
|
const layout = base_child_ty.containerLayout(mod);
|
|
|
|
const tag_type = base_child_ty.unionTagTypeHypothetical(mod);
|
|
const hypothetical_tag = try mod.enumValueFieldIndex(tag_type, field_index);
|
|
if (layout == .Auto or (payload.tag != null and hypothetical_tag.eql(payload.tag.?, tag_type, mod))) {
|
|
// We need to set the active field of the union.
|
|
payload.tag = hypothetical_tag;
|
|
|
|
const field_ty = parent.ty.structFieldType(field_index, mod);
|
|
return beginComptimePtrMutationInner(
|
|
sema,
|
|
block,
|
|
src,
|
|
field_ty,
|
|
&payload.val,
|
|
ptr_elem_ty,
|
|
parent.mut_decl,
|
|
);
|
|
} else {
|
|
// Writing to a different field (a different or unknown tag is active) requires reinterpreting
|
|
// memory of the entire union, which requires knowing its abiSize.
|
|
try sema.resolveTypeLayout(parent.ty);
|
|
|
|
// This union value no longer has a well-defined tag type.
|
|
// The reinterpretation will read it back out as .none.
|
|
payload.val = try payload.val.unintern(sema.arena, mod);
|
|
return ComptimePtrMutationKit{
|
|
.mut_decl = parent.mut_decl,
|
|
.pointee = .{ .reinterpret = .{
|
|
.val_ptr = val_ptr,
|
|
.byte_offset = 0,
|
|
.write_packed = layout == .Packed,
|
|
} },
|
|
.ty = parent.ty,
|
|
};
|
|
}
|
|
},
|
|
.slice => switch (field_index) {
|
|
Value.slice_ptr_index => return beginComptimePtrMutationInner(
|
|
sema,
|
|
block,
|
|
src,
|
|
parent.ty.slicePtrFieldType(mod),
|
|
&val_ptr.castTag(.slice).?.data.ptr,
|
|
ptr_elem_ty,
|
|
parent.mut_decl,
|
|
),
|
|
|
|
Value.slice_len_index => return beginComptimePtrMutationInner(
|
|
sema,
|
|
block,
|
|
src,
|
|
Type.usize,
|
|
&val_ptr.castTag(.slice).?.data.len,
|
|
ptr_elem_ty,
|
|
parent.mut_decl,
|
|
),
|
|
|
|
else => unreachable,
|
|
},
|
|
else => unreachable,
|
|
},
|
|
else => switch (mod.intern_pool.indexToKey(val_ptr.toIntern())) {
|
|
.undef => {
|
|
// A struct or union has been initialized to undefined at comptime and now we
|
|
// are for the first time setting a field. We must change the representation
|
|
// of the struct/union from `undef` to `struct`/`union`.
|
|
const arena = mod.tmp_hack_arena.allocator();
|
|
|
|
switch (parent.ty.zigTypeTag(mod)) {
|
|
.Struct => {
|
|
const fields = try arena.alloc(Value, parent.ty.structFieldCount(mod));
|
|
for (fields, 0..) |*field, i| field.* = Value.fromInterned((try mod.intern(.{
|
|
.undef = parent.ty.structFieldType(i, mod).toIntern(),
|
|
})));
|
|
|
|
val_ptr.* = try Value.Tag.aggregate.create(arena, fields);
|
|
|
|
return beginComptimePtrMutationInner(
|
|
sema,
|
|
block,
|
|
src,
|
|
parent.ty.structFieldType(field_index, mod),
|
|
&fields[field_index],
|
|
ptr_elem_ty,
|
|
parent.mut_decl,
|
|
);
|
|
},
|
|
.Union => {
|
|
const payload = try arena.create(Value.Payload.Union);
|
|
const tag_ty = parent.ty.unionTagTypeHypothetical(mod);
|
|
const payload_ty = parent.ty.structFieldType(field_index, mod);
|
|
payload.* = .{ .data = .{
|
|
.tag = try mod.enumValueFieldIndex(tag_ty, field_index),
|
|
.val = Value.fromInterned((try mod.intern(.{ .undef = payload_ty.toIntern() }))),
|
|
} };
|
|
|
|
val_ptr.* = Value.initPayload(&payload.base);
|
|
|
|
return beginComptimePtrMutationInner(
|
|
sema,
|
|
block,
|
|
src,
|
|
payload_ty,
|
|
&payload.data.val,
|
|
ptr_elem_ty,
|
|
parent.mut_decl,
|
|
);
|
|
},
|
|
.Pointer => {
|
|
assert(parent.ty.isSlice(mod));
|
|
const ptr_ty = parent.ty.slicePtrFieldType(mod);
|
|
val_ptr.* = try Value.Tag.slice.create(arena, .{
|
|
.ptr = Value.fromInterned((try mod.intern(.{ .undef = ptr_ty.toIntern() }))),
|
|
.len = Value.fromInterned((try mod.intern(.{ .undef = .usize_type }))),
|
|
});
|
|
|
|
switch (field_index) {
|
|
Value.slice_ptr_index => return beginComptimePtrMutationInner(
|
|
sema,
|
|
block,
|
|
src,
|
|
ptr_ty,
|
|
&val_ptr.castTag(.slice).?.data.ptr,
|
|
ptr_elem_ty,
|
|
parent.mut_decl,
|
|
),
|
|
Value.slice_len_index => return beginComptimePtrMutationInner(
|
|
sema,
|
|
block,
|
|
src,
|
|
Type.usize,
|
|
&val_ptr.castTag(.slice).?.data.len,
|
|
ptr_elem_ty,
|
|
parent.mut_decl,
|
|
),
|
|
|
|
else => unreachable,
|
|
}
|
|
},
|
|
else => unreachable,
|
|
}
|
|
},
|
|
else => unreachable,
|
|
},
|
|
},
|
|
.reinterpret => |reinterpret| {
|
|
const field_offset_u64 = base_child_ty.structFieldOffset(field_index, mod);
|
|
const field_offset = try sema.usizeCast(block, src, field_offset_u64);
|
|
return ComptimePtrMutationKit{
|
|
.mut_decl = parent.mut_decl,
|
|
.pointee = .{ .reinterpret = .{
|
|
.val_ptr = reinterpret.val_ptr,
|
|
.byte_offset = reinterpret.byte_offset + field_offset,
|
|
} },
|
|
.ty = parent.ty,
|
|
};
|
|
},
|
|
.bad_decl_ty, .bad_ptr_ty => return parent,
|
|
}
|
|
},
|
|
}
|
|
}
|
|
|
|
fn beginComptimePtrMutationInner(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
decl_ty: Type,
|
|
decl_val: *Value,
|
|
ptr_elem_ty: Type,
|
|
mut_decl: InternPool.Key.Ptr.Addr.MutDecl,
|
|
) CompileError!ComptimePtrMutationKit {
|
|
const mod = sema.mod;
|
|
const target = mod.getTarget();
|
|
const coerce_ok = (try sema.coerceInMemoryAllowed(block, ptr_elem_ty, decl_ty, true, target, src, src)) == .ok;
|
|
|
|
decl_val.* = try decl_val.unintern(sema.arena, mod);
|
|
|
|
if (coerce_ok) {
|
|
return ComptimePtrMutationKit{
|
|
.mut_decl = mut_decl,
|
|
.pointee = .{ .direct = decl_val },
|
|
.ty = decl_ty,
|
|
};
|
|
}
|
|
|
|
// Handle the case that the decl is an array and we're actually trying to point to an element.
|
|
if (decl_ty.isArrayOrVector(mod)) {
|
|
const decl_elem_ty = decl_ty.childType(mod);
|
|
if ((try sema.coerceInMemoryAllowed(block, ptr_elem_ty, decl_elem_ty, true, target, src, src)) == .ok) {
|
|
return ComptimePtrMutationKit{
|
|
.mut_decl = mut_decl,
|
|
.pointee = .{ .direct = decl_val },
|
|
.ty = decl_ty,
|
|
};
|
|
}
|
|
}
|
|
|
|
if (!decl_ty.hasWellDefinedLayout(mod)) {
|
|
return ComptimePtrMutationKit{
|
|
.mut_decl = mut_decl,
|
|
.pointee = .bad_decl_ty,
|
|
.ty = decl_ty,
|
|
};
|
|
}
|
|
if (!ptr_elem_ty.hasWellDefinedLayout(mod)) {
|
|
return ComptimePtrMutationKit{
|
|
.mut_decl = mut_decl,
|
|
.pointee = .bad_ptr_ty,
|
|
.ty = ptr_elem_ty,
|
|
};
|
|
}
|
|
return ComptimePtrMutationKit{
|
|
.mut_decl = mut_decl,
|
|
.pointee = .{ .reinterpret = .{
|
|
.val_ptr = decl_val,
|
|
.byte_offset = 0,
|
|
} },
|
|
.ty = decl_ty,
|
|
};
|
|
}
|
|
|
|
const TypedValueAndOffset = struct {
|
|
tv: TypedValue,
|
|
byte_offset: usize,
|
|
};
|
|
|
|
const ComptimePtrLoadKit = struct {
|
|
/// The Value and Type corresponding to the pointee of the provided pointer.
|
|
/// If a direct dereference is not possible, this is null.
|
|
pointee: ?TypedValue,
|
|
/// The largest parent Value containing `pointee` and having a well-defined memory layout.
|
|
/// This is used for bitcasting, if direct dereferencing failed (i.e. `pointee` is null).
|
|
parent: ?TypedValueAndOffset,
|
|
/// Whether the `pointee` could be mutated by further
|
|
/// semantic analysis and a copy must be performed.
|
|
is_mutable: bool,
|
|
/// If the root decl could not be used as `parent`, this is the type that
|
|
/// caused that by not having a well-defined layout
|
|
ty_without_well_defined_layout: ?Type,
|
|
};
|
|
|
|
const ComptimePtrLoadError = CompileError || error{
|
|
RuntimeLoad,
|
|
};
|
|
|
|
/// If `maybe_array_ty` is provided, it will be used to directly dereference an
|
|
/// .elem_ptr of type T to a value of [N]T, if necessary.
|
|
fn beginComptimePtrLoad(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
ptr_val: Value,
|
|
maybe_array_ty: ?Type,
|
|
) ComptimePtrLoadError!ComptimePtrLoadKit {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
const target = mod.getTarget();
|
|
|
|
var deref: ComptimePtrLoadKit = switch (ip.indexToKey(ptr_val.toIntern())) {
|
|
.ptr => |ptr| switch (ptr.addr) {
|
|
.decl, .mut_decl => blk: {
|
|
const decl_index = switch (ptr.addr) {
|
|
.decl => |decl| decl,
|
|
.mut_decl => |mut_decl| mut_decl.decl,
|
|
else => unreachable,
|
|
};
|
|
const is_mutable = ptr.addr == .mut_decl;
|
|
const decl = mod.declPtr(decl_index);
|
|
const decl_tv = try decl.typedValue();
|
|
try sema.declareDependency(.{ .decl_val = decl_index });
|
|
if (decl.val.getVariable(mod) != null) return error.RuntimeLoad;
|
|
|
|
const layout_defined = decl.ty.hasWellDefinedLayout(mod);
|
|
break :blk ComptimePtrLoadKit{
|
|
.parent = if (layout_defined) .{ .tv = decl_tv, .byte_offset = 0 } else null,
|
|
.pointee = decl_tv,
|
|
.is_mutable = is_mutable,
|
|
.ty_without_well_defined_layout = if (!layout_defined) decl.ty else null,
|
|
};
|
|
},
|
|
.anon_decl => |anon_decl| blk: {
|
|
const decl_val = anon_decl.val;
|
|
if (Value.fromInterned(decl_val).getVariable(mod) != null) return error.RuntimeLoad;
|
|
const decl_ty = Type.fromInterned(ip.typeOf(decl_val));
|
|
const decl_tv: TypedValue = .{ .ty = decl_ty, .val = Value.fromInterned(decl_val) };
|
|
const layout_defined = decl_ty.hasWellDefinedLayout(mod);
|
|
break :blk ComptimePtrLoadKit{
|
|
.parent = if (layout_defined) .{ .tv = decl_tv, .byte_offset = 0 } else null,
|
|
.pointee = decl_tv,
|
|
.is_mutable = false,
|
|
.ty_without_well_defined_layout = if (!layout_defined) decl_ty else null,
|
|
};
|
|
},
|
|
.int => return error.RuntimeLoad,
|
|
.eu_payload, .opt_payload => |container_ptr| blk: {
|
|
const container_ty = Type.fromInterned(ip.typeOf(container_ptr)).childType(mod);
|
|
const payload_ty = switch (ptr.addr) {
|
|
.eu_payload => container_ty.errorUnionPayload(mod),
|
|
.opt_payload => container_ty.optionalChild(mod),
|
|
else => unreachable,
|
|
};
|
|
var deref = try sema.beginComptimePtrLoad(block, src, Value.fromInterned(container_ptr), container_ty);
|
|
|
|
// eu_payload and opt_payload never have a well-defined layout
|
|
if (deref.parent != null) {
|
|
deref.parent = null;
|
|
deref.ty_without_well_defined_layout = container_ty;
|
|
}
|
|
|
|
if (deref.pointee) |*tv| {
|
|
const coerce_in_mem_ok =
|
|
(try sema.coerceInMemoryAllowed(block, container_ty, tv.ty, false, target, src, src)) == .ok or
|
|
(try sema.coerceInMemoryAllowed(block, tv.ty, container_ty, false, target, src, src)) == .ok;
|
|
if (coerce_in_mem_ok) {
|
|
const payload_val = switch (tv.val.ip_index) {
|
|
.none => tv.val.cast(Value.Payload.SubValue).?.data,
|
|
.null_value => return sema.fail(block, src, "attempt to use null value", .{}),
|
|
else => Value.fromInterned(switch (ip.indexToKey(tv.val.toIntern())) {
|
|
.error_union => |error_union| switch (error_union.val) {
|
|
.err_name => |err_name| return sema.fail(
|
|
block,
|
|
src,
|
|
"attempt to unwrap error: {}",
|
|
.{err_name.fmt(ip)},
|
|
),
|
|
.payload => |payload| payload,
|
|
},
|
|
.opt => |opt| switch (opt.val) {
|
|
.none => return sema.fail(block, src, "attempt to use null value", .{}),
|
|
else => |payload| payload,
|
|
},
|
|
else => unreachable,
|
|
}),
|
|
};
|
|
tv.* = TypedValue{ .ty = payload_ty, .val = payload_val };
|
|
break :blk deref;
|
|
}
|
|
}
|
|
deref.pointee = null;
|
|
break :blk deref;
|
|
},
|
|
.comptime_field => |comptime_field| blk: {
|
|
const field_ty = Type.fromInterned(ip.typeOf(comptime_field));
|
|
break :blk ComptimePtrLoadKit{
|
|
.parent = null,
|
|
.pointee = .{ .ty = field_ty, .val = Value.fromInterned(comptime_field) },
|
|
.is_mutable = false,
|
|
.ty_without_well_defined_layout = field_ty,
|
|
};
|
|
},
|
|
.elem => |elem_ptr| blk: {
|
|
const elem_ty = Type.fromInterned(ip.typeOf(elem_ptr.base)).elemType2(mod);
|
|
var deref = try sema.beginComptimePtrLoad(block, src, Value.fromInterned(elem_ptr.base), null);
|
|
|
|
// This code assumes that elem_ptrs have been "flattened" in order for direct dereference
|
|
// to succeed, meaning that elem ptrs of the same elem_ty are coalesced. Here we check that
|
|
// our parent is not an elem_ptr with the same elem_ty, since that would be "unflattened"
|
|
switch (ip.indexToKey(elem_ptr.base)) {
|
|
.ptr => |base_ptr| switch (base_ptr.addr) {
|
|
.elem => |base_elem| assert(!Type.fromInterned(ip.typeOf(base_elem.base)).elemType2(mod).eql(elem_ty, mod)),
|
|
else => {},
|
|
},
|
|
else => {},
|
|
}
|
|
|
|
if (elem_ptr.index != 0) {
|
|
if (elem_ty.hasWellDefinedLayout(mod)) {
|
|
if (deref.parent) |*parent| {
|
|
// Update the byte offset (in-place)
|
|
const elem_size = try sema.typeAbiSize(elem_ty);
|
|
const offset = parent.byte_offset + elem_size * elem_ptr.index;
|
|
parent.byte_offset = try sema.usizeCast(block, src, offset);
|
|
}
|
|
} else {
|
|
deref.parent = null;
|
|
deref.ty_without_well_defined_layout = elem_ty;
|
|
}
|
|
}
|
|
|
|
// If we're loading an elem that was derived from a different type
|
|
// than the true type of the underlying decl, we cannot deref directly
|
|
const ty_matches = if (deref.pointee != null and deref.pointee.?.ty.isArrayOrVector(mod)) x: {
|
|
const deref_elem_ty = deref.pointee.?.ty.childType(mod);
|
|
break :x (try sema.coerceInMemoryAllowed(block, deref_elem_ty, elem_ty, false, target, src, src)) == .ok or
|
|
(try sema.coerceInMemoryAllowed(block, elem_ty, deref_elem_ty, false, target, src, src)) == .ok;
|
|
} else false;
|
|
if (!ty_matches) {
|
|
deref.pointee = null;
|
|
break :blk deref;
|
|
}
|
|
|
|
var array_tv = deref.pointee.?;
|
|
const check_len = array_tv.ty.arrayLenIncludingSentinel(mod);
|
|
if (maybe_array_ty) |load_ty| {
|
|
// It's possible that we're loading a [N]T, in which case we'd like to slice
|
|
// the pointee array directly from our parent array.
|
|
if (load_ty.isArrayOrVector(mod) and load_ty.childType(mod).eql(elem_ty, mod)) {
|
|
const len = try sema.usizeCast(block, src, load_ty.arrayLenIncludingSentinel(mod));
|
|
const elem_idx = try sema.usizeCast(block, src, elem_ptr.index);
|
|
deref.pointee = if (elem_ptr.index + len <= check_len) TypedValue{
|
|
.ty = try mod.arrayType(.{
|
|
.len = len,
|
|
.child = elem_ty.toIntern(),
|
|
}),
|
|
.val = try array_tv.val.sliceArray(mod, sema.arena, elem_idx, elem_idx + len),
|
|
} else null;
|
|
break :blk deref;
|
|
}
|
|
}
|
|
|
|
if (elem_ptr.index >= check_len) {
|
|
deref.pointee = null;
|
|
break :blk deref;
|
|
}
|
|
if (elem_ptr.index == check_len - 1) {
|
|
if (array_tv.ty.sentinel(mod)) |sent| {
|
|
deref.pointee = TypedValue{
|
|
.ty = elem_ty,
|
|
.val = sent,
|
|
};
|
|
break :blk deref;
|
|
}
|
|
}
|
|
deref.pointee = TypedValue{
|
|
.ty = elem_ty,
|
|
.val = try array_tv.val.elemValue(mod, @intCast(elem_ptr.index)),
|
|
};
|
|
break :blk deref;
|
|
},
|
|
.field => |field_ptr| blk: {
|
|
const field_index: u32 = @intCast(field_ptr.index);
|
|
const container_ty = Type.fromInterned(ip.typeOf(field_ptr.base)).childType(mod);
|
|
var deref = try sema.beginComptimePtrLoad(block, src, Value.fromInterned(field_ptr.base), container_ty);
|
|
|
|
if (container_ty.hasWellDefinedLayout(mod)) {
|
|
const struct_obj = mod.typeToStruct(container_ty);
|
|
if (struct_obj != null and struct_obj.?.layout == .Packed) {
|
|
// packed structs are not byte addressable
|
|
deref.parent = null;
|
|
} else if (deref.parent) |*parent| {
|
|
// Update the byte offset (in-place)
|
|
try sema.resolveTypeLayout(container_ty);
|
|
const field_offset = container_ty.structFieldOffset(field_index, mod);
|
|
parent.byte_offset = try sema.usizeCast(block, src, parent.byte_offset + field_offset);
|
|
}
|
|
} else {
|
|
deref.parent = null;
|
|
deref.ty_without_well_defined_layout = container_ty;
|
|
}
|
|
|
|
const tv = deref.pointee orelse {
|
|
deref.pointee = null;
|
|
break :blk deref;
|
|
};
|
|
const coerce_in_mem_ok =
|
|
(try sema.coerceInMemoryAllowed(block, container_ty, tv.ty, false, target, src, src)) == .ok or
|
|
(try sema.coerceInMemoryAllowed(block, tv.ty, container_ty, false, target, src, src)) == .ok;
|
|
if (!coerce_in_mem_ok) {
|
|
deref.pointee = null;
|
|
break :blk deref;
|
|
}
|
|
|
|
if (container_ty.isSlice(mod)) {
|
|
deref.pointee = switch (field_index) {
|
|
Value.slice_ptr_index => TypedValue{
|
|
.ty = container_ty.slicePtrFieldType(mod),
|
|
.val = tv.val.slicePtr(mod),
|
|
},
|
|
Value.slice_len_index => TypedValue{
|
|
.ty = Type.usize,
|
|
.val = Value.fromInterned(ip.indexToKey(try tv.val.intern(tv.ty, mod)).slice.len),
|
|
},
|
|
else => unreachable,
|
|
};
|
|
} else {
|
|
const field_ty = container_ty.structFieldType(field_index, mod);
|
|
deref.pointee = TypedValue{
|
|
.ty = field_ty,
|
|
.val = try tv.val.fieldValue(mod, field_index),
|
|
};
|
|
}
|
|
break :blk deref;
|
|
},
|
|
},
|
|
.opt => |opt| switch (opt.val) {
|
|
.none => return sema.fail(block, src, "attempt to use null value", .{}),
|
|
else => |payload| try sema.beginComptimePtrLoad(block, src, Value.fromInterned(payload), null),
|
|
},
|
|
else => unreachable,
|
|
};
|
|
|
|
if (deref.pointee) |tv| {
|
|
if (deref.parent == null and tv.ty.hasWellDefinedLayout(mod)) {
|
|
deref.parent = .{ .tv = tv, .byte_offset = 0 };
|
|
}
|
|
}
|
|
return deref;
|
|
}
|
|
|
|
fn bitCast(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
dest_ty: Type,
|
|
inst: Air.Inst.Ref,
|
|
inst_src: LazySrcLoc,
|
|
operand_src: ?LazySrcLoc,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
try sema.resolveTypeLayout(dest_ty);
|
|
|
|
const old_ty = sema.typeOf(inst);
|
|
try sema.resolveTypeLayout(old_ty);
|
|
|
|
const dest_bits = dest_ty.bitSize(mod);
|
|
const old_bits = old_ty.bitSize(mod);
|
|
|
|
if (old_bits != dest_bits) {
|
|
return sema.fail(block, inst_src, "@bitCast size mismatch: destination type '{}' has {d} bits but source type '{}' has {d} bits", .{
|
|
dest_ty.fmt(mod),
|
|
dest_bits,
|
|
old_ty.fmt(mod),
|
|
old_bits,
|
|
});
|
|
}
|
|
|
|
if (try sema.resolveValue(inst)) |val| {
|
|
if (val.isUndef(mod))
|
|
return mod.undefRef(dest_ty);
|
|
if (try sema.bitCastVal(block, inst_src, val, old_ty, dest_ty, 0)) |result_val| {
|
|
return Air.internedToRef(result_val.toIntern());
|
|
}
|
|
}
|
|
try sema.requireRuntimeBlock(block, inst_src, operand_src);
|
|
return block.addBitCast(dest_ty, inst);
|
|
}
|
|
|
|
fn bitCastVal(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
val: Value,
|
|
old_ty: Type,
|
|
new_ty: Type,
|
|
buffer_offset: usize,
|
|
) !?Value {
|
|
const mod = sema.mod;
|
|
if (old_ty.eql(new_ty, mod)) return val;
|
|
|
|
// For types with well-defined memory layouts, we serialize them a byte buffer,
|
|
// then deserialize to the new type.
|
|
const abi_size = try sema.usizeCast(block, src, old_ty.abiSize(mod));
|
|
|
|
const buffer = try sema.gpa.alloc(u8, abi_size);
|
|
defer sema.gpa.free(buffer);
|
|
val.writeToMemory(old_ty, mod, buffer) catch |err| switch (err) {
|
|
error.OutOfMemory => return error.OutOfMemory,
|
|
error.ReinterpretDeclRef => return null,
|
|
error.IllDefinedMemoryLayout => unreachable, // Sema was supposed to emit a compile error already
|
|
error.Unimplemented => return sema.fail(block, src, "TODO: implement writeToMemory for type '{}'", .{old_ty.fmt(mod)}),
|
|
};
|
|
|
|
return Value.readFromMemory(new_ty, mod, buffer[buffer_offset..], sema.arena) catch |err| switch (err) {
|
|
error.OutOfMemory => return error.OutOfMemory,
|
|
error.IllDefinedMemoryLayout => unreachable,
|
|
error.Unimplemented => return sema.fail(block, src, "TODO: implement readFromMemory for type '{}'", .{new_ty.fmt(mod)}),
|
|
};
|
|
}
|
|
|
|
fn bitCastUnionFieldVal(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
val: Value,
|
|
old_ty: Type,
|
|
field_ty: Type,
|
|
layout: std.builtin.Type.ContainerLayout,
|
|
) !?Value {
|
|
const mod = sema.mod;
|
|
if (old_ty.eql(field_ty, mod)) return val;
|
|
|
|
// Bitcasting a union field value requires that that field's layout be known
|
|
try sema.resolveTypeLayout(field_ty);
|
|
|
|
const old_size = try sema.usizeCast(block, src, old_ty.abiSize(mod));
|
|
const field_size = try sema.usizeCast(block, src, field_ty.abiSize(mod));
|
|
const endian = mod.getTarget().cpu.arch.endian();
|
|
|
|
const buffer = try sema.gpa.alloc(u8, @max(old_size, field_size));
|
|
defer sema.gpa.free(buffer);
|
|
|
|
// Reading a larger value means we need to reinterpret from undefined bytes.
|
|
const offset = switch (layout) {
|
|
.Extern => offset: {
|
|
if (field_size > old_size) @memset(buffer[old_size..], 0xaa);
|
|
val.writeToMemory(old_ty, mod, buffer) catch |err| switch (err) {
|
|
error.OutOfMemory => return error.OutOfMemory,
|
|
error.ReinterpretDeclRef => return null,
|
|
error.IllDefinedMemoryLayout => unreachable, // Sema was supposed to emit a compile error already
|
|
error.Unimplemented => return sema.fail(block, src, "TODO: implement writeToMemory for type '{}'", .{old_ty.fmt(mod)}),
|
|
};
|
|
break :offset 0;
|
|
},
|
|
.Packed => offset: {
|
|
if (field_size > old_size) {
|
|
const min_size = @max(old_size, 1);
|
|
switch (endian) {
|
|
.little => @memset(buffer[min_size - 1 ..], 0xaa),
|
|
.big => @memset(buffer[0 .. buffer.len - min_size + 1], 0xaa),
|
|
}
|
|
}
|
|
|
|
val.writeToPackedMemory(old_ty, mod, buffer, 0) catch |err| switch (err) {
|
|
error.OutOfMemory => return error.OutOfMemory,
|
|
error.ReinterpretDeclRef => return null,
|
|
};
|
|
|
|
break :offset if (endian == .big) buffer.len - field_size else 0;
|
|
},
|
|
.Auto => unreachable,
|
|
};
|
|
|
|
return Value.readFromMemory(field_ty, mod, buffer[offset..], sema.arena) catch |err| switch (err) {
|
|
error.OutOfMemory => return error.OutOfMemory,
|
|
error.IllDefinedMemoryLayout => unreachable,
|
|
error.Unimplemented => return sema.fail(block, src, "TODO: implement readFromMemory for type '{}'", .{field_ty.fmt(mod)}),
|
|
};
|
|
}
|
|
|
|
fn coerceArrayPtrToSlice(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
dest_ty: Type,
|
|
inst: Air.Inst.Ref,
|
|
inst_src: LazySrcLoc,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
if (try sema.resolveValue(inst)) |val| {
|
|
const ptr_array_ty = sema.typeOf(inst);
|
|
const array_ty = ptr_array_ty.childType(mod);
|
|
const slice_val = try mod.intern(.{ .slice = .{
|
|
.ty = dest_ty.toIntern(),
|
|
.ptr = try mod.intern(.{ .ptr = .{
|
|
.ty = dest_ty.slicePtrFieldType(mod).toIntern(),
|
|
.addr = switch (mod.intern_pool.indexToKey(val.toIntern())) {
|
|
.undef => .{ .int = try mod.intern(.{ .undef = .usize_type }) },
|
|
.ptr => |ptr| ptr.addr,
|
|
else => unreachable,
|
|
},
|
|
} }),
|
|
.len = (try mod.intValue(Type.usize, array_ty.arrayLen(mod))).toIntern(),
|
|
} });
|
|
return Air.internedToRef(slice_val);
|
|
}
|
|
try sema.requireRuntimeBlock(block, inst_src, null);
|
|
return block.addTyOp(.array_to_slice, dest_ty, inst);
|
|
}
|
|
|
|
fn checkPtrAttributes(sema: *Sema, dest_ty: Type, inst_ty: Type, in_memory_result: *InMemoryCoercionResult) bool {
|
|
const mod = sema.mod;
|
|
const dest_info = dest_ty.ptrInfo(mod);
|
|
const inst_info = inst_ty.ptrInfo(mod);
|
|
const len0 = (Type.fromInterned(inst_info.child).zigTypeTag(mod) == .Array and (Type.fromInterned(inst_info.child).arrayLenIncludingSentinel(mod) == 0 or
|
|
(Type.fromInterned(inst_info.child).arrayLen(mod) == 0 and dest_info.sentinel == .none and dest_info.flags.size != .C and dest_info.flags.size != .Many))) or
|
|
(Type.fromInterned(inst_info.child).isTuple(mod) and Type.fromInterned(inst_info.child).structFieldCount(mod) == 0);
|
|
|
|
const ok_cv_qualifiers =
|
|
((!inst_info.flags.is_const or dest_info.flags.is_const) or len0) and
|
|
(!inst_info.flags.is_volatile or dest_info.flags.is_volatile);
|
|
|
|
if (!ok_cv_qualifiers) {
|
|
in_memory_result.* = .{ .ptr_qualifiers = .{
|
|
.actual_const = inst_info.flags.is_const,
|
|
.wanted_const = dest_info.flags.is_const,
|
|
.actual_volatile = inst_info.flags.is_volatile,
|
|
.wanted_volatile = dest_info.flags.is_volatile,
|
|
} };
|
|
return false;
|
|
}
|
|
if (dest_info.flags.address_space != inst_info.flags.address_space) {
|
|
in_memory_result.* = .{ .ptr_addrspace = .{
|
|
.actual = inst_info.flags.address_space,
|
|
.wanted = dest_info.flags.address_space,
|
|
} };
|
|
return false;
|
|
}
|
|
if (inst_info.flags.alignment == .none and dest_info.flags.alignment == .none) return true;
|
|
if (len0) return true;
|
|
|
|
const inst_align = if (inst_info.flags.alignment != .none)
|
|
inst_info.flags.alignment
|
|
else
|
|
Type.fromInterned(inst_info.child).abiAlignment(mod);
|
|
|
|
const dest_align = if (dest_info.flags.alignment != .none)
|
|
dest_info.flags.alignment
|
|
else
|
|
Type.fromInterned(dest_info.child).abiAlignment(mod);
|
|
|
|
if (dest_align.compare(.gt, inst_align)) {
|
|
in_memory_result.* = .{ .ptr_alignment = .{
|
|
.actual = inst_align,
|
|
.wanted = dest_align,
|
|
} };
|
|
return false;
|
|
}
|
|
return true;
|
|
}
|
|
|
|
fn coerceCompatiblePtrs(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
dest_ty: Type,
|
|
inst: Air.Inst.Ref,
|
|
inst_src: LazySrcLoc,
|
|
) !Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_ty = sema.typeOf(inst);
|
|
if (try sema.resolveValue(inst)) |val| {
|
|
if (!val.isUndef(mod) and val.isNull(mod) and !dest_ty.isAllowzeroPtr(mod)) {
|
|
return sema.fail(block, inst_src, "null pointer casted to type '{}'", .{dest_ty.fmt(sema.mod)});
|
|
}
|
|
// The comptime Value representation is compatible with both types.
|
|
return Air.internedToRef(
|
|
(try mod.getCoerced(Value.fromInterned((try val.intern(inst_ty, mod))), dest_ty)).toIntern(),
|
|
);
|
|
}
|
|
try sema.requireRuntimeBlock(block, inst_src, null);
|
|
const inst_allows_zero = inst_ty.zigTypeTag(mod) != .Pointer or inst_ty.ptrAllowsZero(mod);
|
|
if (block.wantSafety() and inst_allows_zero and !dest_ty.ptrAllowsZero(mod) and
|
|
(try sema.typeHasRuntimeBits(dest_ty.elemType2(mod)) or dest_ty.elemType2(mod).zigTypeTag(mod) == .Fn))
|
|
{
|
|
const actual_ptr = if (inst_ty.isSlice(mod))
|
|
try sema.analyzeSlicePtr(block, inst_src, inst, inst_ty)
|
|
else
|
|
inst;
|
|
const ptr_int = try block.addUnOp(.int_from_ptr, actual_ptr);
|
|
const is_non_zero = try block.addBinOp(.cmp_neq, ptr_int, .zero_usize);
|
|
const ok = if (inst_ty.isSlice(mod)) ok: {
|
|
const len = try sema.analyzeSliceLen(block, inst_src, inst);
|
|
const len_zero = try block.addBinOp(.cmp_eq, len, .zero_usize);
|
|
break :ok try block.addBinOp(.bool_or, len_zero, is_non_zero);
|
|
} else is_non_zero;
|
|
try sema.addSafetyCheck(block, inst_src, ok, .cast_to_null);
|
|
}
|
|
const new_ptr = try sema.bitCast(block, dest_ty, inst, inst_src, null);
|
|
try sema.checkKnownAllocPtr(inst, new_ptr);
|
|
return new_ptr;
|
|
}
|
|
|
|
fn coerceEnumToUnion(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
union_ty: Type,
|
|
union_ty_src: LazySrcLoc,
|
|
inst: Air.Inst.Ref,
|
|
inst_src: LazySrcLoc,
|
|
) !Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
const inst_ty = sema.typeOf(inst);
|
|
|
|
const tag_ty = union_ty.unionTagType(mod) orelse {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, inst_src, "expected type '{}', found '{}'", .{
|
|
union_ty.fmt(sema.mod), inst_ty.fmt(sema.mod),
|
|
});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, union_ty_src, msg, "cannot coerce enum to untagged union", .{});
|
|
try sema.addDeclaredHereNote(msg, union_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
};
|
|
|
|
const enum_tag = try sema.coerce(block, tag_ty, inst, inst_src);
|
|
if (try sema.resolveDefinedValue(block, inst_src, enum_tag)) |val| {
|
|
const field_index = union_ty.unionTagFieldIndex(val, sema.mod) orelse {
|
|
return sema.fail(block, inst_src, "union '{}' has no tag with value '{}'", .{
|
|
union_ty.fmt(sema.mod), val.fmtValue(tag_ty, sema.mod),
|
|
});
|
|
};
|
|
|
|
const union_obj = mod.typeToUnion(union_ty).?;
|
|
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
|
|
try sema.resolveTypeFields(field_ty);
|
|
if (field_ty.zigTypeTag(mod) == .NoReturn) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, inst_src, "cannot initialize 'noreturn' field of union", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
const field_name = union_obj.field_names.get(ip)[field_index];
|
|
try sema.addFieldErrNote(union_ty, field_index, msg, "field '{}' declared here", .{
|
|
field_name.fmt(ip),
|
|
});
|
|
try sema.addDeclaredHereNote(msg, union_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
const opv = (try sema.typeHasOnePossibleValue(field_ty)) orelse {
|
|
const msg = msg: {
|
|
const field_name = union_obj.field_names.get(ip)[field_index];
|
|
const msg = try sema.errMsg(block, inst_src, "coercion from enum '{}' to union '{}' must initialize '{}' field '{}'", .{
|
|
inst_ty.fmt(sema.mod), union_ty.fmt(sema.mod),
|
|
field_ty.fmt(sema.mod), field_name.fmt(ip),
|
|
});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
try sema.addFieldErrNote(union_ty, field_index, msg, "field '{}' declared here", .{
|
|
field_name.fmt(ip),
|
|
});
|
|
try sema.addDeclaredHereNote(msg, union_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
};
|
|
|
|
return Air.internedToRef((try mod.unionValue(union_ty, val, opv)).toIntern());
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, inst_src, null);
|
|
|
|
if (tag_ty.isNonexhaustiveEnum(mod)) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, inst_src, "runtime coercion to union '{}' from non-exhaustive enum", .{
|
|
union_ty.fmt(sema.mod),
|
|
});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.addDeclaredHereNote(msg, tag_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
|
|
const union_obj = mod.typeToUnion(union_ty).?;
|
|
{
|
|
var msg: ?*Module.ErrorMsg = null;
|
|
errdefer if (msg) |some| some.destroy(sema.gpa);
|
|
|
|
for (union_obj.field_types.get(ip), 0..) |field_ty, field_index| {
|
|
if (Type.fromInterned(field_ty).zigTypeTag(mod) == .NoReturn) {
|
|
const err_msg = msg orelse try sema.errMsg(
|
|
block,
|
|
inst_src,
|
|
"runtime coercion from enum '{}' to union '{}' which has a 'noreturn' field",
|
|
.{ tag_ty.fmt(sema.mod), union_ty.fmt(sema.mod) },
|
|
);
|
|
msg = err_msg;
|
|
|
|
try sema.addFieldErrNote(union_ty, field_index, err_msg, "'noreturn' field here", .{});
|
|
}
|
|
}
|
|
if (msg) |some| {
|
|
msg = null;
|
|
try sema.addDeclaredHereNote(some, union_ty);
|
|
return sema.failWithOwnedErrorMsg(block, some);
|
|
}
|
|
}
|
|
|
|
// If the union has all fields 0 bits, the union value is just the enum value.
|
|
if (union_ty.unionHasAllZeroBitFieldTypes(mod)) {
|
|
return block.addBitCast(union_ty, enum_tag);
|
|
}
|
|
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(
|
|
block,
|
|
inst_src,
|
|
"runtime coercion from enum '{}' to union '{}' which has non-void fields",
|
|
.{ tag_ty.fmt(sema.mod), union_ty.fmt(sema.mod) },
|
|
);
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
for (0..union_obj.field_names.len) |field_index| {
|
|
const field_name = union_obj.field_names.get(ip)[field_index];
|
|
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
|
|
if (!(try sema.typeHasRuntimeBits(field_ty))) continue;
|
|
try sema.addFieldErrNote(union_ty, field_index, msg, "field '{}' has type '{}'", .{
|
|
field_name.fmt(ip),
|
|
field_ty.fmt(sema.mod),
|
|
});
|
|
}
|
|
try sema.addDeclaredHereNote(msg, union_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
|
|
fn coerceAnonStructToUnion(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
union_ty: Type,
|
|
union_ty_src: LazySrcLoc,
|
|
inst: Air.Inst.Ref,
|
|
inst_src: LazySrcLoc,
|
|
) !Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
const inst_ty = sema.typeOf(inst);
|
|
const field_info: union(enum) {
|
|
name: InternPool.NullTerminatedString,
|
|
count: usize,
|
|
} = switch (ip.indexToKey(inst_ty.toIntern())) {
|
|
.anon_struct_type => |anon_struct_type| if (anon_struct_type.names.len == 1)
|
|
.{ .name = anon_struct_type.names.get(ip)[0] }
|
|
else
|
|
.{ .count = anon_struct_type.names.len },
|
|
.struct_type => |struct_type| name: {
|
|
const field_names = struct_type.field_names.get(ip);
|
|
break :name if (field_names.len == 1)
|
|
.{ .name = field_names[0] }
|
|
else
|
|
.{ .count = field_names.len };
|
|
},
|
|
else => unreachable,
|
|
};
|
|
switch (field_info) {
|
|
.name => |field_name| {
|
|
const init = try sema.structFieldVal(block, inst_src, inst, field_name, inst_src, inst_ty);
|
|
return sema.unionInit(block, init, inst_src, union_ty, union_ty_src, field_name, inst_src);
|
|
},
|
|
.count => |field_count| {
|
|
assert(field_count != 1);
|
|
const msg = msg: {
|
|
const msg = if (field_count > 1) try sema.errMsg(
|
|
block,
|
|
inst_src,
|
|
"cannot initialize multiple union fields at once; unions can only have one active field",
|
|
.{},
|
|
) else try sema.errMsg(
|
|
block,
|
|
inst_src,
|
|
"union initializer must initialize one field",
|
|
.{},
|
|
);
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
// TODO add notes for where the anon struct was created to point out
|
|
// the extra fields.
|
|
|
|
try sema.addDeclaredHereNote(msg, union_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
},
|
|
}
|
|
}
|
|
|
|
fn coerceAnonStructToUnionPtrs(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
ptr_union_ty: Type,
|
|
union_ty_src: LazySrcLoc,
|
|
ptr_anon_struct: Air.Inst.Ref,
|
|
anon_struct_src: LazySrcLoc,
|
|
) !Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const union_ty = ptr_union_ty.childType(mod);
|
|
const anon_struct = try sema.analyzeLoad(block, anon_struct_src, ptr_anon_struct, anon_struct_src);
|
|
const union_inst = try sema.coerceAnonStructToUnion(block, union_ty, union_ty_src, anon_struct, anon_struct_src);
|
|
return sema.analyzeRef(block, union_ty_src, union_inst);
|
|
}
|
|
|
|
fn coerceAnonStructToStructPtrs(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
ptr_struct_ty: Type,
|
|
struct_ty_src: LazySrcLoc,
|
|
ptr_anon_struct: Air.Inst.Ref,
|
|
anon_struct_src: LazySrcLoc,
|
|
) !Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const struct_ty = ptr_struct_ty.childType(mod);
|
|
const anon_struct = try sema.analyzeLoad(block, anon_struct_src, ptr_anon_struct, anon_struct_src);
|
|
const struct_inst = try sema.coerceTupleToStruct(block, struct_ty, anon_struct, anon_struct_src);
|
|
return sema.analyzeRef(block, struct_ty_src, struct_inst);
|
|
}
|
|
|
|
/// If the lengths match, coerces element-wise.
|
|
fn coerceArrayLike(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
dest_ty: Type,
|
|
dest_ty_src: LazySrcLoc,
|
|
inst: Air.Inst.Ref,
|
|
inst_src: LazySrcLoc,
|
|
) !Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_ty = sema.typeOf(inst);
|
|
const target = mod.getTarget();
|
|
|
|
// try coercion of the whole array
|
|
const in_memory_result = try sema.coerceInMemoryAllowed(block, dest_ty, inst_ty, false, target, dest_ty_src, inst_src);
|
|
if (in_memory_result == .ok) {
|
|
if (try sema.resolveValue(inst)) |inst_val| {
|
|
// These types share the same comptime value representation.
|
|
return sema.coerceInMemory(inst_val, dest_ty);
|
|
}
|
|
try sema.requireRuntimeBlock(block, inst_src, null);
|
|
return block.addBitCast(dest_ty, inst);
|
|
}
|
|
|
|
// otherwise, try element by element
|
|
const inst_len = inst_ty.arrayLen(mod);
|
|
const dest_len = try sema.usizeCast(block, dest_ty_src, dest_ty.arrayLen(mod));
|
|
if (dest_len != inst_len) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, inst_src, "expected type '{}', found '{}'", .{
|
|
dest_ty.fmt(mod), inst_ty.fmt(mod),
|
|
});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, dest_ty_src, msg, "destination has length {d}", .{dest_len});
|
|
try sema.errNote(block, inst_src, msg, "source has length {d}", .{inst_len});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
|
|
const dest_elem_ty = dest_ty.childType(mod);
|
|
if (dest_ty.isVector(mod) and inst_ty.isVector(mod) and (try sema.resolveValue(inst)) == null) {
|
|
const inst_elem_ty = inst_ty.childType(mod);
|
|
switch (dest_elem_ty.zigTypeTag(mod)) {
|
|
.Int => if (inst_elem_ty.isInt(mod)) {
|
|
// integer widening
|
|
const dst_info = dest_elem_ty.intInfo(mod);
|
|
const src_info = inst_elem_ty.intInfo(mod);
|
|
if ((src_info.signedness == dst_info.signedness and dst_info.bits >= src_info.bits) or
|
|
// small enough unsigned ints can get casted to large enough signed ints
|
|
(dst_info.signedness == .signed and dst_info.bits > src_info.bits))
|
|
{
|
|
try sema.requireRuntimeBlock(block, inst_src, null);
|
|
return block.addTyOp(.intcast, dest_ty, inst);
|
|
}
|
|
},
|
|
.Float => if (inst_elem_ty.isRuntimeFloat()) {
|
|
// float widening
|
|
const src_bits = inst_elem_ty.floatBits(target);
|
|
const dst_bits = dest_elem_ty.floatBits(target);
|
|
if (dst_bits >= src_bits) {
|
|
try sema.requireRuntimeBlock(block, inst_src, null);
|
|
return block.addTyOp(.fpext, dest_ty, inst);
|
|
}
|
|
},
|
|
else => {},
|
|
}
|
|
}
|
|
|
|
const element_vals = try sema.arena.alloc(InternPool.Index, dest_len);
|
|
const element_refs = try sema.arena.alloc(Air.Inst.Ref, dest_len);
|
|
var runtime_src: ?LazySrcLoc = null;
|
|
|
|
for (element_vals, element_refs, 0..) |*val, *ref, i| {
|
|
const index_ref = Air.internedToRef((try mod.intValue(Type.usize, i)).toIntern());
|
|
const src = inst_src; // TODO better source location
|
|
const elem_src = inst_src; // TODO better source location
|
|
const elem_ref = try sema.elemValArray(block, src, inst_src, inst, elem_src, index_ref, true);
|
|
const coerced = try sema.coerce(block, dest_elem_ty, elem_ref, elem_src);
|
|
ref.* = coerced;
|
|
if (runtime_src == null) {
|
|
if (try sema.resolveValue(coerced)) |elem_val| {
|
|
val.* = try elem_val.intern(dest_elem_ty, mod);
|
|
} else {
|
|
runtime_src = elem_src;
|
|
}
|
|
}
|
|
}
|
|
|
|
if (runtime_src) |rs| {
|
|
try sema.requireRuntimeBlock(block, inst_src, rs);
|
|
return block.addAggregateInit(dest_ty, element_refs);
|
|
}
|
|
|
|
return Air.internedToRef((try mod.intern(.{ .aggregate = .{
|
|
.ty = dest_ty.toIntern(),
|
|
.storage = .{ .elems = element_vals },
|
|
} })));
|
|
}
|
|
|
|
/// If the lengths match, coerces element-wise.
|
|
fn coerceTupleToArray(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
dest_ty: Type,
|
|
dest_ty_src: LazySrcLoc,
|
|
inst: Air.Inst.Ref,
|
|
inst_src: LazySrcLoc,
|
|
) !Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_ty = sema.typeOf(inst);
|
|
const inst_len = inst_ty.arrayLen(mod);
|
|
const dest_len = dest_ty.arrayLen(mod);
|
|
|
|
if (dest_len != inst_len) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, inst_src, "expected type '{}', found '{}'", .{
|
|
dest_ty.fmt(sema.mod), inst_ty.fmt(sema.mod),
|
|
});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, dest_ty_src, msg, "destination has length {d}", .{dest_len});
|
|
try sema.errNote(block, inst_src, msg, "source has length {d}", .{inst_len});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
|
|
const dest_elems = try sema.usizeCast(block, dest_ty_src, dest_len);
|
|
const element_vals = try sema.arena.alloc(InternPool.Index, dest_elems);
|
|
const element_refs = try sema.arena.alloc(Air.Inst.Ref, dest_elems);
|
|
const dest_elem_ty = dest_ty.childType(mod);
|
|
|
|
var runtime_src: ?LazySrcLoc = null;
|
|
for (element_vals, element_refs, 0..) |*val, *ref, i_usize| {
|
|
const i: u32 = @intCast(i_usize);
|
|
if (i_usize == inst_len) {
|
|
const sentinel_val = dest_ty.sentinel(mod).?;
|
|
val.* = sentinel_val.toIntern();
|
|
ref.* = Air.internedToRef(sentinel_val.toIntern());
|
|
break;
|
|
}
|
|
const elem_src = inst_src; // TODO better source location
|
|
const elem_ref = try sema.tupleField(block, inst_src, inst, elem_src, i);
|
|
const coerced = try sema.coerce(block, dest_elem_ty, elem_ref, elem_src);
|
|
ref.* = coerced;
|
|
if (runtime_src == null) {
|
|
if (try sema.resolveValue(coerced)) |elem_val| {
|
|
val.* = try elem_val.intern(dest_elem_ty, mod);
|
|
} else {
|
|
runtime_src = elem_src;
|
|
}
|
|
}
|
|
}
|
|
|
|
if (runtime_src) |rs| {
|
|
try sema.requireRuntimeBlock(block, inst_src, rs);
|
|
return block.addAggregateInit(dest_ty, element_refs);
|
|
}
|
|
|
|
return Air.internedToRef((try mod.intern(.{ .aggregate = .{
|
|
.ty = dest_ty.toIntern(),
|
|
.storage = .{ .elems = element_vals },
|
|
} })));
|
|
}
|
|
|
|
/// If the lengths match, coerces element-wise.
|
|
fn coerceTupleToSlicePtrs(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
slice_ty: Type,
|
|
slice_ty_src: LazySrcLoc,
|
|
ptr_tuple: Air.Inst.Ref,
|
|
tuple_src: LazySrcLoc,
|
|
) !Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const tuple_ty = sema.typeOf(ptr_tuple).childType(mod);
|
|
const tuple = try sema.analyzeLoad(block, tuple_src, ptr_tuple, tuple_src);
|
|
const slice_info = slice_ty.ptrInfo(mod);
|
|
const array_ty = try mod.arrayType(.{
|
|
.len = tuple_ty.structFieldCount(mod),
|
|
.sentinel = slice_info.sentinel,
|
|
.child = slice_info.child,
|
|
});
|
|
const array_inst = try sema.coerceTupleToArray(block, array_ty, slice_ty_src, tuple, tuple_src);
|
|
if (slice_info.flags.alignment != .none) {
|
|
return sema.fail(block, slice_ty_src, "TODO: override the alignment of the array decl we create here", .{});
|
|
}
|
|
const ptr_array = try sema.analyzeRef(block, slice_ty_src, array_inst);
|
|
return sema.coerceArrayPtrToSlice(block, slice_ty, ptr_array, slice_ty_src);
|
|
}
|
|
|
|
/// If the lengths match, coerces element-wise.
|
|
fn coerceTupleToArrayPtrs(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
ptr_array_ty: Type,
|
|
array_ty_src: LazySrcLoc,
|
|
ptr_tuple: Air.Inst.Ref,
|
|
tuple_src: LazySrcLoc,
|
|
) !Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const tuple = try sema.analyzeLoad(block, tuple_src, ptr_tuple, tuple_src);
|
|
const ptr_info = ptr_array_ty.ptrInfo(mod);
|
|
const array_ty = Type.fromInterned(ptr_info.child);
|
|
const array_inst = try sema.coerceTupleToArray(block, array_ty, array_ty_src, tuple, tuple_src);
|
|
if (ptr_info.flags.alignment != .none) {
|
|
return sema.fail(block, array_ty_src, "TODO: override the alignment of the array decl we create here", .{});
|
|
}
|
|
const ptr_array = try sema.analyzeRef(block, array_ty_src, array_inst);
|
|
return ptr_array;
|
|
}
|
|
|
|
/// Handles both tuples and anon struct literals. Coerces field-wise. Reports
|
|
/// errors for both extra fields and missing fields.
|
|
fn coerceTupleToStruct(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
struct_ty: Type,
|
|
inst: Air.Inst.Ref,
|
|
inst_src: LazySrcLoc,
|
|
) !Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
try sema.resolveTypeFields(struct_ty);
|
|
try sema.resolveStructFieldInits(struct_ty);
|
|
|
|
if (struct_ty.isTupleOrAnonStruct(mod)) {
|
|
return sema.coerceTupleToTuple(block, struct_ty, inst, inst_src);
|
|
}
|
|
|
|
const struct_type = mod.typeToStruct(struct_ty).?;
|
|
const field_vals = try sema.arena.alloc(InternPool.Index, struct_type.field_types.len);
|
|
const field_refs = try sema.arena.alloc(Air.Inst.Ref, field_vals.len);
|
|
@memset(field_refs, .none);
|
|
|
|
const inst_ty = sema.typeOf(inst);
|
|
var runtime_src: ?LazySrcLoc = null;
|
|
const field_count = switch (ip.indexToKey(inst_ty.toIntern())) {
|
|
.anon_struct_type => |anon_struct_type| anon_struct_type.types.len,
|
|
.struct_type => |s| s.field_types.len,
|
|
else => unreachable,
|
|
};
|
|
for (0..field_count) |field_index_usize| {
|
|
const field_i: u32 = @intCast(field_index_usize);
|
|
const field_src = inst_src; // TODO better source location
|
|
// https://github.com/ziglang/zig/issues/15709
|
|
const field_name: InternPool.NullTerminatedString = switch (ip.indexToKey(inst_ty.toIntern())) {
|
|
.anon_struct_type => |anon_struct_type| if (anon_struct_type.names.len > 0)
|
|
anon_struct_type.names.get(ip)[field_i]
|
|
else
|
|
try ip.getOrPutStringFmt(sema.gpa, "{d}", .{field_i}),
|
|
.struct_type => |s| s.field_names.get(ip)[field_i],
|
|
else => unreachable,
|
|
};
|
|
const field_index = try sema.structFieldIndex(block, struct_ty, field_name, field_src);
|
|
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]);
|
|
const elem_ref = try sema.tupleField(block, inst_src, inst, field_src, field_i);
|
|
const coerced = try sema.coerce(block, field_ty, elem_ref, field_src);
|
|
field_refs[field_index] = coerced;
|
|
if (struct_type.fieldIsComptime(ip, field_index)) {
|
|
const init_val = (try sema.resolveValue(coerced)) orelse {
|
|
return sema.failWithNeededComptime(block, field_src, .{
|
|
.needed_comptime_reason = "value stored in comptime field must be comptime-known",
|
|
});
|
|
};
|
|
|
|
const field_init = Value.fromInterned(struct_type.field_inits.get(ip)[field_index]);
|
|
if (!init_val.eql(field_init, field_ty, sema.mod)) {
|
|
return sema.failWithInvalidComptimeFieldStore(block, field_src, inst_ty, field_i);
|
|
}
|
|
}
|
|
if (runtime_src == null) {
|
|
if (try sema.resolveValue(coerced)) |field_val| {
|
|
field_vals[field_index] = field_val.toIntern();
|
|
} else {
|
|
runtime_src = field_src;
|
|
}
|
|
}
|
|
}
|
|
|
|
// Populate default field values and report errors for missing fields.
|
|
var root_msg: ?*Module.ErrorMsg = null;
|
|
errdefer if (root_msg) |msg| msg.destroy(sema.gpa);
|
|
|
|
for (field_refs, 0..) |*field_ref, i| {
|
|
if (field_ref.* != .none) continue;
|
|
|
|
const field_name = struct_type.field_names.get(ip)[i];
|
|
const field_default_val = struct_type.fieldInit(ip, i);
|
|
const field_src = inst_src; // TODO better source location
|
|
if (field_default_val == .none) {
|
|
const template = "missing struct field: {}";
|
|
const args = .{field_name.fmt(ip)};
|
|
if (root_msg) |msg| {
|
|
try sema.errNote(block, field_src, msg, template, args);
|
|
} else {
|
|
root_msg = try sema.errMsg(block, field_src, template, args);
|
|
}
|
|
continue;
|
|
}
|
|
if (runtime_src == null) {
|
|
field_vals[i] = field_default_val;
|
|
} else {
|
|
field_ref.* = Air.internedToRef(field_default_val);
|
|
}
|
|
}
|
|
|
|
if (root_msg) |msg| {
|
|
try sema.addDeclaredHereNote(msg, struct_ty);
|
|
root_msg = null;
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
|
|
if (runtime_src) |rs| {
|
|
try sema.requireRuntimeBlock(block, inst_src, rs);
|
|
return block.addAggregateInit(struct_ty, field_refs);
|
|
}
|
|
|
|
const struct_val = try mod.intern(.{ .aggregate = .{
|
|
.ty = struct_ty.toIntern(),
|
|
.storage = .{ .elems = field_vals },
|
|
} });
|
|
// TODO: figure out InternPool removals for incremental compilation
|
|
//errdefer ip.remove(struct_val);
|
|
|
|
return Air.internedToRef(struct_val);
|
|
}
|
|
|
|
fn coerceTupleToTuple(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
tuple_ty: Type,
|
|
inst: Air.Inst.Ref,
|
|
inst_src: LazySrcLoc,
|
|
) !Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
const dest_field_count = switch (ip.indexToKey(tuple_ty.toIntern())) {
|
|
.anon_struct_type => |anon_struct_type| anon_struct_type.types.len,
|
|
.struct_type => |struct_type| struct_type.field_types.len,
|
|
else => unreachable,
|
|
};
|
|
const field_vals = try sema.arena.alloc(InternPool.Index, dest_field_count);
|
|
const field_refs = try sema.arena.alloc(Air.Inst.Ref, field_vals.len);
|
|
@memset(field_refs, .none);
|
|
|
|
const inst_ty = sema.typeOf(inst);
|
|
const src_field_count = switch (ip.indexToKey(inst_ty.toIntern())) {
|
|
.anon_struct_type => |anon_struct_type| anon_struct_type.types.len,
|
|
.struct_type => |struct_type| struct_type.field_types.len,
|
|
else => unreachable,
|
|
};
|
|
if (src_field_count > dest_field_count) return error.NotCoercible;
|
|
|
|
var runtime_src: ?LazySrcLoc = null;
|
|
for (0..dest_field_count) |field_index_usize| {
|
|
const field_i: u32 = @intCast(field_index_usize);
|
|
const field_src = inst_src; // TODO better source location
|
|
// https://github.com/ziglang/zig/issues/15709
|
|
const field_name: InternPool.NullTerminatedString = switch (ip.indexToKey(inst_ty.toIntern())) {
|
|
.anon_struct_type => |anon_struct_type| if (anon_struct_type.names.len > 0)
|
|
anon_struct_type.names.get(ip)[field_i]
|
|
else
|
|
try ip.getOrPutStringFmt(sema.gpa, "{d}", .{field_i}),
|
|
.struct_type => |struct_type| if (struct_type.field_names.len > 0)
|
|
struct_type.field_names.get(ip)[field_i]
|
|
else
|
|
try ip.getOrPutStringFmt(sema.gpa, "{d}", .{field_i}),
|
|
else => unreachable,
|
|
};
|
|
|
|
if (ip.stringEqlSlice(field_name, "len"))
|
|
return sema.fail(block, field_src, "cannot assign to 'len' field of tuple", .{});
|
|
|
|
const field_ty = switch (ip.indexToKey(tuple_ty.toIntern())) {
|
|
.anon_struct_type => |anon_struct_type| anon_struct_type.types.get(ip)[field_index_usize],
|
|
.struct_type => |struct_type| struct_type.field_types.get(ip)[field_index_usize],
|
|
else => unreachable,
|
|
};
|
|
const default_val = switch (ip.indexToKey(tuple_ty.toIntern())) {
|
|
.anon_struct_type => |anon_struct_type| anon_struct_type.values.get(ip)[field_index_usize],
|
|
.struct_type => |struct_type| struct_type.fieldInit(ip, field_index_usize),
|
|
else => unreachable,
|
|
};
|
|
|
|
const field_index = try sema.tupleFieldIndex(block, tuple_ty, field_name, field_src);
|
|
|
|
const elem_ref = try sema.tupleField(block, inst_src, inst, field_src, field_i);
|
|
const coerced = try sema.coerce(block, Type.fromInterned(field_ty), elem_ref, field_src);
|
|
field_refs[field_index] = coerced;
|
|
if (default_val != .none) {
|
|
const init_val = (try sema.resolveValue(coerced)) orelse {
|
|
return sema.failWithNeededComptime(block, field_src, .{
|
|
.needed_comptime_reason = "value stored in comptime field must be comptime-known",
|
|
});
|
|
};
|
|
|
|
if (!init_val.eql(Value.fromInterned(default_val), Type.fromInterned(field_ty), sema.mod)) {
|
|
return sema.failWithInvalidComptimeFieldStore(block, field_src, inst_ty, field_i);
|
|
}
|
|
}
|
|
if (runtime_src == null) {
|
|
if (try sema.resolveValue(coerced)) |field_val| {
|
|
field_vals[field_index] = field_val.toIntern();
|
|
} else {
|
|
runtime_src = field_src;
|
|
}
|
|
}
|
|
}
|
|
|
|
// Populate default field values and report errors for missing fields.
|
|
var root_msg: ?*Module.ErrorMsg = null;
|
|
errdefer if (root_msg) |msg| msg.destroy(sema.gpa);
|
|
|
|
for (field_refs, 0..) |*field_ref, i_usize| {
|
|
const i: u32 = @intCast(i_usize);
|
|
if (field_ref.* != .none) continue;
|
|
|
|
const default_val = switch (ip.indexToKey(tuple_ty.toIntern())) {
|
|
.anon_struct_type => |anon_struct_type| anon_struct_type.values.get(ip)[i],
|
|
.struct_type => |struct_type| struct_type.fieldInit(ip, i),
|
|
else => unreachable,
|
|
};
|
|
|
|
const field_src = inst_src; // TODO better source location
|
|
if (default_val == .none) {
|
|
const field_name = tuple_ty.structFieldName(i, mod).unwrap() orelse {
|
|
const template = "missing tuple field: {d}";
|
|
if (root_msg) |msg| {
|
|
try sema.errNote(block, field_src, msg, template, .{i});
|
|
} else {
|
|
root_msg = try sema.errMsg(block, field_src, template, .{i});
|
|
}
|
|
continue;
|
|
};
|
|
const template = "missing struct field: {}";
|
|
const args = .{field_name.fmt(ip)};
|
|
if (root_msg) |msg| {
|
|
try sema.errNote(block, field_src, msg, template, args);
|
|
} else {
|
|
root_msg = try sema.errMsg(block, field_src, template, args);
|
|
}
|
|
continue;
|
|
}
|
|
if (runtime_src == null) {
|
|
field_vals[i] = default_val;
|
|
} else {
|
|
field_ref.* = Air.internedToRef(default_val);
|
|
}
|
|
}
|
|
|
|
if (root_msg) |msg| {
|
|
try sema.addDeclaredHereNote(msg, tuple_ty);
|
|
root_msg = null;
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
|
|
if (runtime_src) |rs| {
|
|
try sema.requireRuntimeBlock(block, inst_src, rs);
|
|
return block.addAggregateInit(tuple_ty, field_refs);
|
|
}
|
|
|
|
return Air.internedToRef((try mod.intern(.{ .aggregate = .{
|
|
.ty = tuple_ty.toIntern(),
|
|
.storage = .{ .elems = field_vals },
|
|
} })));
|
|
}
|
|
|
|
fn analyzeDeclVal(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
decl_index: InternPool.DeclIndex,
|
|
) CompileError!Air.Inst.Ref {
|
|
try sema.addReferencedBy(block, src, decl_index);
|
|
if (sema.decl_val_table.get(decl_index)) |result| {
|
|
return result;
|
|
}
|
|
const decl_ref = try sema.analyzeDeclRefInner(decl_index, false);
|
|
const result = try sema.analyzeLoad(block, src, decl_ref, src);
|
|
if (result.toInterned() != null) {
|
|
if (!block.is_typeof) {
|
|
try sema.decl_val_table.put(sema.gpa, decl_index, result);
|
|
}
|
|
}
|
|
return result;
|
|
}
|
|
|
|
fn addReferencedBy(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
decl_index: InternPool.DeclIndex,
|
|
) !void {
|
|
if (sema.mod.comp.reference_trace == 0) return;
|
|
if (src == .unneeded) {
|
|
// We can't use NeededSourceLocation, since sites handling that assume it means a compile
|
|
// error. Our long-term strategy here is to gradually transition from NeededSourceLocation
|
|
// into having more LazySrcLoc tags. In the meantime, let release compilers just ignore this
|
|
// reference (a slightly-incomplete error is better than a crash!), but trigger a panic in
|
|
// debug so we can fix this case.
|
|
if (std.debug.runtime_safety) unreachable else return;
|
|
}
|
|
try sema.mod.reference_table.put(sema.gpa, decl_index, .{
|
|
.referencer = block.src_decl,
|
|
.src = src,
|
|
});
|
|
}
|
|
|
|
fn ensureDeclAnalyzed(sema: *Sema, decl_index: InternPool.DeclIndex) CompileError!void {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
const decl = mod.declPtr(decl_index);
|
|
if (decl.analysis == .in_progress) {
|
|
const msg = try Module.ErrorMsg.create(sema.gpa, decl.srcLoc(mod), "dependency loop detected", .{});
|
|
return sema.failWithOwnedErrorMsg(null, msg);
|
|
}
|
|
|
|
mod.ensureDeclAnalyzed(decl_index) catch |err| {
|
|
if (sema.owner_func_index != .none) {
|
|
ip.funcAnalysis(sema.owner_func_index).state = .dependency_failure;
|
|
} else {
|
|
sema.owner_decl.analysis = .dependency_failure;
|
|
}
|
|
return err;
|
|
};
|
|
}
|
|
|
|
fn ensureFuncBodyAnalyzed(sema: *Sema, func: InternPool.Index) CompileError!void {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
mod.ensureFuncBodyAnalyzed(func) catch |err| {
|
|
if (sema.owner_func_index != .none) {
|
|
ip.funcAnalysis(sema.owner_func_index).state = .dependency_failure;
|
|
} else {
|
|
sema.owner_decl.analysis = .dependency_failure;
|
|
}
|
|
return err;
|
|
};
|
|
}
|
|
|
|
fn optRefValue(sema: *Sema, opt_val: ?Value) !Value {
|
|
const mod = sema.mod;
|
|
const ptr_anyopaque_ty = try mod.singleConstPtrType(Type.anyopaque);
|
|
return Value.fromInterned((try mod.intern(.{ .opt = .{
|
|
.ty = (try mod.optionalType(ptr_anyopaque_ty.toIntern())).toIntern(),
|
|
.val = if (opt_val) |val| (try mod.getCoerced(
|
|
Value.fromInterned((try sema.refValue(val.toIntern()))),
|
|
ptr_anyopaque_ty,
|
|
)).toIntern() else .none,
|
|
} })));
|
|
}
|
|
|
|
fn analyzeDeclRef(sema: *Sema, decl_index: InternPool.DeclIndex) CompileError!Air.Inst.Ref {
|
|
return sema.analyzeDeclRefInner(decl_index, true);
|
|
}
|
|
|
|
/// Analyze a reference to the decl at the given index. Ensures the underlying decl is analyzed, but
|
|
/// only triggers analysis for function bodies if `analyze_fn_body` is true. If it's possible for a
|
|
/// decl_ref to end up in runtime code, the function body must be analyzed: `analyzeDeclRef` wraps
|
|
/// this function with `analyze_fn_body` set to true.
|
|
fn analyzeDeclRefInner(sema: *Sema, decl_index: InternPool.DeclIndex, analyze_fn_body: bool) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
try sema.ensureDeclAnalyzed(decl_index);
|
|
|
|
const decl = mod.declPtr(decl_index);
|
|
const decl_tv = try decl.typedValue();
|
|
// TODO: if this is a `decl_ref` of a non-variable decl, only depend on decl type
|
|
try sema.declareDependency(.{ .decl_val = decl_index });
|
|
const ptr_ty = try sema.ptrType(.{
|
|
.child = decl_tv.ty.toIntern(),
|
|
.flags = .{
|
|
.alignment = decl.alignment,
|
|
.is_const = if (decl.val.getVariable(mod)) |variable| variable.is_const else true,
|
|
.address_space = decl.@"addrspace",
|
|
},
|
|
});
|
|
if (analyze_fn_body) {
|
|
try sema.maybeQueueFuncBodyAnalysis(decl_index);
|
|
}
|
|
return Air.internedToRef((try mod.intern(.{ .ptr = .{
|
|
.ty = ptr_ty.toIntern(),
|
|
.addr = .{ .decl = decl_index },
|
|
} })));
|
|
}
|
|
|
|
fn maybeQueueFuncBodyAnalysis(sema: *Sema, decl_index: InternPool.DeclIndex) !void {
|
|
const mod = sema.mod;
|
|
const decl = mod.declPtr(decl_index);
|
|
const tv = try decl.typedValue();
|
|
if (tv.ty.zigTypeTag(mod) != .Fn) return;
|
|
if (!try sema.fnHasRuntimeBits(tv.ty)) return;
|
|
const func_index = tv.val.toIntern();
|
|
if (!mod.intern_pool.isFuncBody(func_index)) return; // undef or extern function
|
|
try mod.ensureFuncBodyAnalysisQueued(func_index);
|
|
}
|
|
|
|
fn analyzeRef(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
operand: Air.Inst.Ref,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const operand_ty = sema.typeOf(operand);
|
|
|
|
if (try sema.resolveValue(operand)) |val| {
|
|
switch (mod.intern_pool.indexToKey(val.toIntern())) {
|
|
.extern_func => |extern_func| return sema.analyzeDeclRef(extern_func.decl),
|
|
.func => |func| return sema.analyzeDeclRef(func.owner_decl),
|
|
else => return anonDeclRef(sema, val.toIntern()),
|
|
}
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
const address_space = target_util.defaultAddressSpace(mod.getTarget(), .local);
|
|
const ptr_type = try sema.ptrType(.{
|
|
.child = operand_ty.toIntern(),
|
|
.flags = .{
|
|
.is_const = true,
|
|
.address_space = address_space,
|
|
},
|
|
});
|
|
const mut_ptr_type = try sema.ptrType(.{
|
|
.child = operand_ty.toIntern(),
|
|
.flags = .{ .address_space = address_space },
|
|
});
|
|
const alloc = try block.addTy(.alloc, mut_ptr_type);
|
|
try sema.storePtr(block, src, alloc, operand);
|
|
|
|
// TODO: Replace with sema.coerce when that supports adding pointer constness.
|
|
return sema.bitCast(block, ptr_type, alloc, src, null);
|
|
}
|
|
|
|
fn analyzeLoad(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
ptr: Air.Inst.Ref,
|
|
ptr_src: LazySrcLoc,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const ptr_ty = sema.typeOf(ptr);
|
|
const elem_ty = switch (ptr_ty.zigTypeTag(mod)) {
|
|
.Pointer => ptr_ty.childType(mod),
|
|
else => return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty.fmt(sema.mod)}),
|
|
};
|
|
if (elem_ty.zigTypeTag(mod) == .Opaque) {
|
|
return sema.fail(block, ptr_src, "cannot load opaque type '{}'", .{elem_ty.fmt(mod)});
|
|
}
|
|
|
|
if (try sema.typeHasOnePossibleValue(elem_ty)) |opv| {
|
|
return Air.internedToRef(opv.toIntern());
|
|
}
|
|
|
|
if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| {
|
|
if (try sema.pointerDeref(block, src, ptr_val, ptr_ty)) |elem_val| {
|
|
return Air.internedToRef(elem_val.toIntern());
|
|
}
|
|
}
|
|
|
|
if (ptr_ty.ptrInfo(mod).flags.vector_index == .runtime) {
|
|
const ptr_inst = ptr.toIndex().?;
|
|
const air_tags = sema.air_instructions.items(.tag);
|
|
if (air_tags[@intFromEnum(ptr_inst)] == .ptr_elem_ptr) {
|
|
const ty_pl = sema.air_instructions.items(.data)[@intFromEnum(ptr_inst)].ty_pl;
|
|
const bin_op = sema.getTmpAir().extraData(Air.Bin, ty_pl.payload).data;
|
|
return block.addBinOp(.ptr_elem_val, bin_op.lhs, bin_op.rhs);
|
|
}
|
|
return sema.fail(block, ptr_src, "unable to determine vector element index of type '{}'", .{
|
|
ptr_ty.fmt(sema.mod),
|
|
});
|
|
}
|
|
|
|
return block.addTyOp(.load, elem_ty, ptr);
|
|
}
|
|
|
|
fn analyzeSlicePtr(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
slice_src: LazySrcLoc,
|
|
slice: Air.Inst.Ref,
|
|
slice_ty: Type,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const result_ty = slice_ty.slicePtrFieldType(mod);
|
|
if (try sema.resolveValue(slice)) |val| {
|
|
if (val.isUndef(mod)) return mod.undefRef(result_ty);
|
|
return Air.internedToRef(val.slicePtr(mod).toIntern());
|
|
}
|
|
try sema.requireRuntimeBlock(block, slice_src, null);
|
|
return block.addTyOp(.slice_ptr, result_ty, slice);
|
|
}
|
|
|
|
fn analyzeSliceLen(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
slice_inst: Air.Inst.Ref,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
if (try sema.resolveValue(slice_inst)) |slice_val| {
|
|
if (slice_val.isUndef(mod)) {
|
|
return mod.undefRef(Type.usize);
|
|
}
|
|
return mod.intRef(Type.usize, slice_val.sliceLen(sema.mod));
|
|
}
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
return block.addTyOp(.slice_len, Type.usize, slice_inst);
|
|
}
|
|
|
|
fn analyzeIsNull(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
operand: Air.Inst.Ref,
|
|
invert_logic: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const result_ty = Type.bool;
|
|
if (try sema.resolveValue(operand)) |opt_val| {
|
|
if (opt_val.isUndef(mod)) {
|
|
return mod.undefRef(result_ty);
|
|
}
|
|
const is_null = opt_val.isNull(mod);
|
|
const bool_value = if (invert_logic) !is_null else is_null;
|
|
return if (bool_value) .bool_true else .bool_false;
|
|
}
|
|
|
|
const inverted_non_null_res: Air.Inst.Ref = if (invert_logic) .bool_true else .bool_false;
|
|
const operand_ty = sema.typeOf(operand);
|
|
if (operand_ty.zigTypeTag(mod) == .Optional and operand_ty.optionalChild(mod).zigTypeTag(mod) == .NoReturn) {
|
|
return inverted_non_null_res;
|
|
}
|
|
if (operand_ty.zigTypeTag(mod) != .Optional and !operand_ty.isPtrLikeOptional(mod)) {
|
|
return inverted_non_null_res;
|
|
}
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
const air_tag: Air.Inst.Tag = if (invert_logic) .is_non_null else .is_null;
|
|
return block.addUnOp(air_tag, operand);
|
|
}
|
|
|
|
fn analyzePtrIsNonErrComptimeOnly(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
operand: Air.Inst.Ref,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const ptr_ty = sema.typeOf(operand);
|
|
assert(ptr_ty.zigTypeTag(mod) == .Pointer);
|
|
const child_ty = ptr_ty.childType(mod);
|
|
|
|
const child_tag = child_ty.zigTypeTag(mod);
|
|
if (child_tag != .ErrorSet and child_tag != .ErrorUnion) return .bool_true;
|
|
if (child_tag == .ErrorSet) return .bool_false;
|
|
assert(child_tag == .ErrorUnion);
|
|
|
|
_ = block;
|
|
_ = src;
|
|
|
|
return .none;
|
|
}
|
|
|
|
fn analyzeIsNonErrComptimeOnly(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
operand: Air.Inst.Ref,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
const operand_ty = sema.typeOf(operand);
|
|
const ot = operand_ty.zigTypeTag(mod);
|
|
if (ot != .ErrorSet and ot != .ErrorUnion) return .bool_true;
|
|
if (ot == .ErrorSet) return .bool_false;
|
|
assert(ot == .ErrorUnion);
|
|
|
|
const payload_ty = operand_ty.errorUnionPayload(mod);
|
|
if (payload_ty.zigTypeTag(mod) == .NoReturn) {
|
|
return .bool_false;
|
|
}
|
|
|
|
if (operand.toIndex()) |operand_inst| {
|
|
switch (sema.air_instructions.items(.tag)[@intFromEnum(operand_inst)]) {
|
|
.wrap_errunion_payload => return .bool_true,
|
|
.wrap_errunion_err => return .bool_false,
|
|
else => {},
|
|
}
|
|
} else if (operand == .undef) {
|
|
return mod.undefRef(Type.bool);
|
|
} else if (@intFromEnum(operand) < InternPool.static_len) {
|
|
// None of the ref tags can be errors.
|
|
return .bool_true;
|
|
}
|
|
|
|
const maybe_operand_val = try sema.resolveValue(operand);
|
|
|
|
// exception if the error union error set is known to be empty,
|
|
// we allow the comparison but always make it comptime-known.
|
|
const set_ty = ip.errorUnionSet(operand_ty.toIntern());
|
|
switch (set_ty) {
|
|
.anyerror_type => {},
|
|
.adhoc_inferred_error_set_type => if (sema.fn_ret_ty_ies) |ies| blk: {
|
|
// If the error set is empty, we must return a comptime true or false.
|
|
// However we want to avoid unnecessarily resolving an inferred error set
|
|
// in case it is already non-empty.
|
|
switch (ies.resolved) {
|
|
.anyerror_type => break :blk,
|
|
.none => {},
|
|
else => |i| if (ip.indexToKey(i).error_set_type.names.len != 0) break :blk,
|
|
}
|
|
|
|
if (maybe_operand_val != null) break :blk;
|
|
|
|
// Try to avoid resolving inferred error set if possible.
|
|
if (ies.errors.count() != 0) return .none;
|
|
switch (ies.resolved) {
|
|
.anyerror_type => return .none,
|
|
.none => {},
|
|
else => switch (ip.indexToKey(ies.resolved).error_set_type.names.len) {
|
|
0 => return .bool_true,
|
|
else => return .none,
|
|
},
|
|
}
|
|
// We do not have a comptime answer because this inferred error
|
|
// set is not resolved, and an instruction later in this function
|
|
// body may or may not cause an error to be added to this set.
|
|
return .none;
|
|
},
|
|
else => switch (ip.indexToKey(set_ty)) {
|
|
.error_set_type => |error_set_type| {
|
|
if (error_set_type.names.len == 0) return .bool_true;
|
|
},
|
|
.inferred_error_set_type => |func_index| blk: {
|
|
// If the error set is empty, we must return a comptime true or false.
|
|
// However we want to avoid unnecessarily resolving an inferred error set
|
|
// in case it is already non-empty.
|
|
switch (ip.funcIesResolved(func_index).*) {
|
|
.anyerror_type => break :blk,
|
|
.none => {},
|
|
else => |i| if (ip.indexToKey(i).error_set_type.names.len != 0) break :blk,
|
|
}
|
|
if (maybe_operand_val != null) break :blk;
|
|
if (sema.fn_ret_ty_ies) |ies| {
|
|
if (ies.func == func_index) {
|
|
// Try to avoid resolving inferred error set if possible.
|
|
if (ies.errors.count() != 0) return .none;
|
|
switch (ies.resolved) {
|
|
.anyerror_type => return .none,
|
|
.none => {},
|
|
else => switch (ip.indexToKey(ies.resolved).error_set_type.names.len) {
|
|
0 => return .bool_true,
|
|
else => return .none,
|
|
},
|
|
}
|
|
// We do not have a comptime answer because this inferred error
|
|
// set is not resolved, and an instruction later in this function
|
|
// body may or may not cause an error to be added to this set.
|
|
return .none;
|
|
}
|
|
}
|
|
const resolved_ty = try sema.resolveInferredErrorSet(block, src, set_ty);
|
|
if (resolved_ty == .anyerror_type)
|
|
break :blk;
|
|
if (ip.indexToKey(resolved_ty).error_set_type.names.len == 0)
|
|
return .bool_true;
|
|
},
|
|
else => unreachable,
|
|
},
|
|
}
|
|
|
|
if (maybe_operand_val) |err_union| {
|
|
if (err_union.isUndef(mod)) {
|
|
return mod.undefRef(Type.bool);
|
|
}
|
|
if (err_union.getErrorName(mod) == .none) {
|
|
return .bool_true;
|
|
} else {
|
|
return .bool_false;
|
|
}
|
|
}
|
|
return .none;
|
|
}
|
|
|
|
fn analyzeIsNonErr(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
operand: Air.Inst.Ref,
|
|
) CompileError!Air.Inst.Ref {
|
|
const result = try sema.analyzeIsNonErrComptimeOnly(block, src, operand);
|
|
if (result == .none) {
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
return block.addUnOp(.is_non_err, operand);
|
|
} else {
|
|
return result;
|
|
}
|
|
}
|
|
|
|
fn analyzePtrIsNonErr(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
operand: Air.Inst.Ref,
|
|
) CompileError!Air.Inst.Ref {
|
|
const result = try sema.analyzePtrIsNonErrComptimeOnly(block, src, operand);
|
|
if (result == .none) {
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
return block.addUnOp(.is_non_err_ptr, operand);
|
|
} else {
|
|
return result;
|
|
}
|
|
}
|
|
|
|
fn analyzeSlice(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
ptr_ptr: Air.Inst.Ref,
|
|
uncasted_start: Air.Inst.Ref,
|
|
uncasted_end_opt: Air.Inst.Ref,
|
|
sentinel_opt: Air.Inst.Ref,
|
|
sentinel_src: LazySrcLoc,
|
|
ptr_src: LazySrcLoc,
|
|
start_src: LazySrcLoc,
|
|
end_src: LazySrcLoc,
|
|
by_length: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
// Slice expressions can operate on a variable whose type is an array. This requires
|
|
// the slice operand to be a pointer. In the case of a non-array, it will be a double pointer.
|
|
const ptr_ptr_ty = sema.typeOf(ptr_ptr);
|
|
const ptr_ptr_child_ty = switch (ptr_ptr_ty.zigTypeTag(mod)) {
|
|
.Pointer => ptr_ptr_ty.childType(mod),
|
|
else => return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ptr_ty.fmt(mod)}),
|
|
};
|
|
|
|
var array_ty = ptr_ptr_child_ty;
|
|
var slice_ty = ptr_ptr_ty;
|
|
var ptr_or_slice = ptr_ptr;
|
|
var elem_ty: Type = undefined;
|
|
var ptr_sentinel: ?Value = null;
|
|
switch (ptr_ptr_child_ty.zigTypeTag(mod)) {
|
|
.Array => {
|
|
ptr_sentinel = ptr_ptr_child_ty.sentinel(mod);
|
|
elem_ty = ptr_ptr_child_ty.childType(mod);
|
|
},
|
|
.Pointer => switch (ptr_ptr_child_ty.ptrSize(mod)) {
|
|
.One => {
|
|
const double_child_ty = ptr_ptr_child_ty.childType(mod);
|
|
ptr_or_slice = try sema.analyzeLoad(block, src, ptr_ptr, ptr_src);
|
|
if (double_child_ty.zigTypeTag(mod) == .Array) {
|
|
ptr_sentinel = double_child_ty.sentinel(mod);
|
|
slice_ty = ptr_ptr_child_ty;
|
|
array_ty = double_child_ty;
|
|
elem_ty = double_child_ty.childType(mod);
|
|
} else {
|
|
const bounds_error_message = "slice of single-item pointer must have comptime-known bounds [0..0], [0..1], or [1..1]";
|
|
if (uncasted_end_opt == .none) {
|
|
return sema.fail(block, src, bounds_error_message, .{});
|
|
}
|
|
const start_value = try sema.resolveConstDefinedValue(
|
|
block,
|
|
start_src,
|
|
uncasted_start,
|
|
.{ .needed_comptime_reason = bounds_error_message },
|
|
);
|
|
|
|
const end_value = try sema.resolveConstDefinedValue(
|
|
block,
|
|
end_src,
|
|
uncasted_end_opt,
|
|
.{ .needed_comptime_reason = bounds_error_message },
|
|
);
|
|
|
|
if (try sema.compareScalar(start_value, .neq, end_value, Type.comptime_int)) {
|
|
if (try sema.compareScalar(start_value, .neq, Value.zero_comptime_int, Type.comptime_int)) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, start_src, bounds_error_message, .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(
|
|
block,
|
|
start_src,
|
|
msg,
|
|
"expected '{}', found '{}'",
|
|
.{
|
|
Value.zero_comptime_int.fmtValue(Type.comptime_int, mod),
|
|
start_value.fmtValue(Type.comptime_int, mod),
|
|
},
|
|
);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
} else if (try sema.compareScalar(end_value, .neq, Value.one_comptime_int, Type.comptime_int)) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, end_src, bounds_error_message, .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(
|
|
block,
|
|
end_src,
|
|
msg,
|
|
"expected '{}', found '{}'",
|
|
.{
|
|
Value.one_comptime_int.fmtValue(Type.comptime_int, mod),
|
|
end_value.fmtValue(Type.comptime_int, mod),
|
|
},
|
|
);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
} else {
|
|
if (try sema.compareScalar(end_value, .gt, Value.one_comptime_int, Type.comptime_int)) {
|
|
return sema.fail(
|
|
block,
|
|
end_src,
|
|
"end index {} out of bounds for slice of single-item pointer",
|
|
.{end_value.fmtValue(Type.comptime_int, mod)},
|
|
);
|
|
}
|
|
}
|
|
|
|
array_ty = try mod.arrayType(.{
|
|
.len = 1,
|
|
.child = double_child_ty.toIntern(),
|
|
});
|
|
const ptr_info = ptr_ptr_child_ty.ptrInfo(mod);
|
|
slice_ty = try mod.ptrType(.{
|
|
.child = array_ty.toIntern(),
|
|
.flags = .{
|
|
.alignment = ptr_info.flags.alignment,
|
|
.is_const = ptr_info.flags.is_const,
|
|
.is_allowzero = ptr_info.flags.is_allowzero,
|
|
.is_volatile = ptr_info.flags.is_volatile,
|
|
.address_space = ptr_info.flags.address_space,
|
|
},
|
|
});
|
|
elem_ty = double_child_ty;
|
|
}
|
|
},
|
|
.Many, .C => {
|
|
ptr_sentinel = ptr_ptr_child_ty.sentinel(mod);
|
|
ptr_or_slice = try sema.analyzeLoad(block, src, ptr_ptr, ptr_src);
|
|
slice_ty = ptr_ptr_child_ty;
|
|
array_ty = ptr_ptr_child_ty;
|
|
elem_ty = ptr_ptr_child_ty.childType(mod);
|
|
|
|
if (ptr_ptr_child_ty.ptrSize(mod) == .C) {
|
|
if (try sema.resolveDefinedValue(block, ptr_src, ptr_or_slice)) |ptr_val| {
|
|
if (ptr_val.isNull(mod)) {
|
|
return sema.fail(block, src, "slice of null pointer", .{});
|
|
}
|
|
}
|
|
}
|
|
},
|
|
.Slice => {
|
|
ptr_sentinel = ptr_ptr_child_ty.sentinel(mod);
|
|
ptr_or_slice = try sema.analyzeLoad(block, src, ptr_ptr, ptr_src);
|
|
slice_ty = ptr_ptr_child_ty;
|
|
array_ty = ptr_ptr_child_ty;
|
|
elem_ty = ptr_ptr_child_ty.childType(mod);
|
|
},
|
|
},
|
|
else => return sema.fail(block, src, "slice of non-array type '{}'", .{ptr_ptr_child_ty.fmt(mod)}),
|
|
}
|
|
|
|
const ptr = if (slice_ty.isSlice(mod))
|
|
try sema.analyzeSlicePtr(block, ptr_src, ptr_or_slice, slice_ty)
|
|
else if (array_ty.zigTypeTag(mod) == .Array) ptr: {
|
|
var manyptr_ty_key = mod.intern_pool.indexToKey(slice_ty.toIntern()).ptr_type;
|
|
assert(manyptr_ty_key.child == array_ty.toIntern());
|
|
assert(manyptr_ty_key.flags.size == .One);
|
|
manyptr_ty_key.child = elem_ty.toIntern();
|
|
manyptr_ty_key.flags.size = .Many;
|
|
break :ptr try sema.coerceCompatiblePtrs(block, try sema.ptrType(manyptr_ty_key), ptr_or_slice, ptr_src);
|
|
} else ptr_or_slice;
|
|
|
|
const start = try sema.coerce(block, Type.usize, uncasted_start, start_src);
|
|
const new_ptr = try sema.analyzePtrArithmetic(block, src, ptr, start, .ptr_add, ptr_src, start_src);
|
|
const new_ptr_ty = sema.typeOf(new_ptr);
|
|
|
|
// true if and only if the end index of the slice, implicitly or explicitly, equals
|
|
// the length of the underlying object being sliced. we might learn the length of the
|
|
// underlying object because it is an array (which has the length in the type), or
|
|
// we might learn of the length because it is a comptime-known slice value.
|
|
var end_is_len = uncasted_end_opt == .none;
|
|
const end = e: {
|
|
if (array_ty.zigTypeTag(mod) == .Array) {
|
|
const len_val = try mod.intValue(Type.usize, array_ty.arrayLen(mod));
|
|
|
|
if (!end_is_len) {
|
|
const end = if (by_length) end: {
|
|
const len = try sema.coerce(block, Type.usize, uncasted_end_opt, end_src);
|
|
const uncasted_end = try sema.analyzeArithmetic(block, .add, start, len, src, start_src, end_src, false);
|
|
break :end try sema.coerce(block, Type.usize, uncasted_end, end_src);
|
|
} else try sema.coerce(block, Type.usize, uncasted_end_opt, end_src);
|
|
if (try sema.resolveValue(end)) |end_val| {
|
|
const len_s_val = try mod.intValue(
|
|
Type.usize,
|
|
array_ty.arrayLenIncludingSentinel(mod),
|
|
);
|
|
if (!(try sema.compareAll(end_val, .lte, len_s_val, Type.usize))) {
|
|
const sentinel_label: []const u8 = if (array_ty.sentinel(mod) != null)
|
|
" +1 (sentinel)"
|
|
else
|
|
"";
|
|
|
|
return sema.fail(
|
|
block,
|
|
end_src,
|
|
"end index {} out of bounds for array of length {}{s}",
|
|
.{
|
|
end_val.fmtValue(Type.usize, mod),
|
|
len_val.fmtValue(Type.usize, mod),
|
|
sentinel_label,
|
|
},
|
|
);
|
|
}
|
|
|
|
// end_is_len is only true if we are NOT using the sentinel
|
|
// length. For sentinel-length, we don't want the type to
|
|
// contain the sentinel.
|
|
if (end_val.eql(len_val, Type.usize, mod)) {
|
|
end_is_len = true;
|
|
}
|
|
}
|
|
break :e end;
|
|
}
|
|
|
|
break :e Air.internedToRef(len_val.toIntern());
|
|
} else if (slice_ty.isSlice(mod)) {
|
|
if (!end_is_len) {
|
|
const end = if (by_length) end: {
|
|
const len = try sema.coerce(block, Type.usize, uncasted_end_opt, end_src);
|
|
const uncasted_end = try sema.analyzeArithmetic(block, .add, start, len, src, start_src, end_src, false);
|
|
break :end try sema.coerce(block, Type.usize, uncasted_end, end_src);
|
|
} else try sema.coerce(block, Type.usize, uncasted_end_opt, end_src);
|
|
if (try sema.resolveDefinedValue(block, end_src, end)) |end_val| {
|
|
if (try sema.resolveValue(ptr_or_slice)) |slice_val| {
|
|
if (slice_val.isUndef(mod)) {
|
|
return sema.fail(block, src, "slice of undefined", .{});
|
|
}
|
|
const has_sentinel = slice_ty.sentinel(mod) != null;
|
|
const slice_len = slice_val.sliceLen(mod);
|
|
const len_plus_sent = slice_len + @intFromBool(has_sentinel);
|
|
const slice_len_val_with_sentinel = try mod.intValue(Type.usize, len_plus_sent);
|
|
if (!(try sema.compareAll(end_val, .lte, slice_len_val_with_sentinel, Type.usize))) {
|
|
const sentinel_label: []const u8 = if (has_sentinel)
|
|
" +1 (sentinel)"
|
|
else
|
|
"";
|
|
|
|
return sema.fail(
|
|
block,
|
|
end_src,
|
|
"end index {} out of bounds for slice of length {d}{s}",
|
|
.{
|
|
end_val.fmtValue(Type.usize, mod),
|
|
slice_val.sliceLen(mod),
|
|
sentinel_label,
|
|
},
|
|
);
|
|
}
|
|
|
|
// If the slice has a sentinel, we consider end_is_len
|
|
// is only true if it equals the length WITHOUT the
|
|
// sentinel, so we don't add a sentinel type.
|
|
const slice_len_val = try mod.intValue(Type.usize, slice_len);
|
|
if (end_val.eql(slice_len_val, Type.usize, mod)) {
|
|
end_is_len = true;
|
|
}
|
|
}
|
|
}
|
|
break :e end;
|
|
}
|
|
break :e try sema.analyzeSliceLen(block, src, ptr_or_slice);
|
|
}
|
|
if (!end_is_len) {
|
|
if (by_length) {
|
|
const len = try sema.coerce(block, Type.usize, uncasted_end_opt, end_src);
|
|
const uncasted_end = try sema.analyzeArithmetic(block, .add, start, len, src, start_src, end_src, false);
|
|
break :e try sema.coerce(block, Type.usize, uncasted_end, end_src);
|
|
} else break :e try sema.coerce(block, Type.usize, uncasted_end_opt, end_src);
|
|
}
|
|
return sema.analyzePtrArithmetic(block, src, ptr, start, .ptr_add, ptr_src, start_src);
|
|
};
|
|
|
|
const sentinel = s: {
|
|
if (sentinel_opt != .none) {
|
|
const casted = try sema.coerce(block, elem_ty, sentinel_opt, sentinel_src);
|
|
break :s try sema.resolveConstDefinedValue(block, sentinel_src, casted, .{
|
|
.needed_comptime_reason = "slice sentinel must be comptime-known",
|
|
});
|
|
}
|
|
// If we are slicing to the end of something that is sentinel-terminated
|
|
// then the resulting slice type is also sentinel-terminated.
|
|
if (end_is_len) {
|
|
if (ptr_sentinel) |sent| {
|
|
break :s sent;
|
|
}
|
|
}
|
|
break :s null;
|
|
};
|
|
const slice_sentinel = if (sentinel_opt != .none) sentinel else null;
|
|
|
|
var checked_start_lte_end = by_length;
|
|
var runtime_src: ?LazySrcLoc = null;
|
|
|
|
// requirement: start <= end
|
|
if (try sema.resolveDefinedValue(block, end_src, end)) |end_val| {
|
|
if (try sema.resolveDefinedValue(block, start_src, start)) |start_val| {
|
|
if (!by_length and !(try sema.compareAll(start_val, .lte, end_val, Type.usize))) {
|
|
return sema.fail(
|
|
block,
|
|
start_src,
|
|
"start index {} is larger than end index {}",
|
|
.{
|
|
start_val.fmtValue(Type.usize, mod),
|
|
end_val.fmtValue(Type.usize, mod),
|
|
},
|
|
);
|
|
}
|
|
checked_start_lte_end = true;
|
|
if (try sema.resolveValue(new_ptr)) |ptr_val| sentinel_check: {
|
|
const expected_sentinel = sentinel orelse break :sentinel_check;
|
|
const start_int = start_val.getUnsignedInt(mod).?;
|
|
const end_int = end_val.getUnsignedInt(mod).?;
|
|
const sentinel_index = try sema.usizeCast(block, end_src, end_int - start_int);
|
|
|
|
const many_ptr_ty = try mod.manyConstPtrType(elem_ty);
|
|
const many_ptr_val = try mod.getCoerced(ptr_val, many_ptr_ty);
|
|
const elem_ptr_ty = try mod.singleConstPtrType(elem_ty);
|
|
const elem_ptr = try many_ptr_val.elemPtr(elem_ptr_ty, sentinel_index, mod);
|
|
const res = try sema.pointerDerefExtra(block, src, elem_ptr, elem_ty);
|
|
const actual_sentinel = switch (res) {
|
|
.runtime_load => break :sentinel_check,
|
|
.val => |v| v,
|
|
.needed_well_defined => |ty| return sema.fail(
|
|
block,
|
|
src,
|
|
"comptime dereference requires '{}' to have a well-defined layout, but it does not.",
|
|
.{ty.fmt(mod)},
|
|
),
|
|
.out_of_bounds => |ty| return sema.fail(
|
|
block,
|
|
end_src,
|
|
"slice end index {d} exceeds bounds of containing decl of type '{}'",
|
|
.{ end_int, ty.fmt(mod) },
|
|
),
|
|
};
|
|
|
|
if (!actual_sentinel.eql(expected_sentinel, elem_ty, mod)) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "value in memory does not match slice sentinel", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, src, msg, "expected '{}', found '{}'", .{
|
|
expected_sentinel.fmtValue(elem_ty, mod),
|
|
actual_sentinel.fmtValue(elem_ty, mod),
|
|
});
|
|
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
} else {
|
|
runtime_src = ptr_src;
|
|
}
|
|
} else {
|
|
runtime_src = start_src;
|
|
}
|
|
} else {
|
|
runtime_src = end_src;
|
|
}
|
|
|
|
if (!checked_start_lte_end and block.wantSafety() and !block.is_comptime) {
|
|
// requirement: start <= end
|
|
assert(!block.is_comptime);
|
|
try sema.requireRuntimeBlock(block, src, runtime_src.?);
|
|
const ok = try block.addBinOp(.cmp_lte, start, end);
|
|
if (!sema.mod.comp.formatted_panics) {
|
|
try sema.addSafetyCheck(block, src, ok, .start_index_greater_than_end);
|
|
} else {
|
|
try sema.safetyCheckFormatted(block, src, ok, "panicStartGreaterThanEnd", &.{ start, end });
|
|
}
|
|
}
|
|
const new_len = if (by_length)
|
|
try sema.coerce(block, Type.usize, uncasted_end_opt, end_src)
|
|
else
|
|
try sema.analyzeArithmetic(block, .sub, end, start, src, end_src, start_src, false);
|
|
const opt_new_len_val = try sema.resolveDefinedValue(block, src, new_len);
|
|
|
|
const new_ptr_ty_info = new_ptr_ty.ptrInfo(mod);
|
|
const new_allowzero = new_ptr_ty_info.flags.is_allowzero and sema.typeOf(ptr).ptrSize(mod) != .C;
|
|
|
|
if (opt_new_len_val) |new_len_val| {
|
|
const new_len_int = try new_len_val.toUnsignedIntAdvanced(sema);
|
|
|
|
const return_ty = try sema.ptrType(.{
|
|
.child = (try mod.arrayType(.{
|
|
.len = new_len_int,
|
|
.sentinel = if (sentinel) |s| s.toIntern() else .none,
|
|
.child = elem_ty.toIntern(),
|
|
})).toIntern(),
|
|
.flags = .{
|
|
.alignment = new_ptr_ty_info.flags.alignment,
|
|
.is_const = new_ptr_ty_info.flags.is_const,
|
|
.is_allowzero = new_allowzero,
|
|
.is_volatile = new_ptr_ty_info.flags.is_volatile,
|
|
.address_space = new_ptr_ty_info.flags.address_space,
|
|
},
|
|
});
|
|
|
|
const opt_new_ptr_val = try sema.resolveValue(new_ptr);
|
|
const new_ptr_val = opt_new_ptr_val orelse {
|
|
const result = try block.addBitCast(return_ty, new_ptr);
|
|
if (block.wantSafety()) {
|
|
// requirement: slicing C ptr is non-null
|
|
if (ptr_ptr_child_ty.isCPtr(mod)) {
|
|
const is_non_null = try sema.analyzeIsNull(block, ptr_src, ptr, true);
|
|
try sema.addSafetyCheck(block, src, is_non_null, .unwrap_null);
|
|
}
|
|
|
|
if (slice_ty.isSlice(mod)) {
|
|
const slice_len_inst = try block.addTyOp(.slice_len, Type.usize, ptr_or_slice);
|
|
const actual_len = if (slice_ty.sentinel(mod) == null)
|
|
slice_len_inst
|
|
else
|
|
try sema.analyzeArithmetic(block, .add, slice_len_inst, .one, src, end_src, end_src, true);
|
|
|
|
const actual_end = if (slice_sentinel != null)
|
|
try sema.analyzeArithmetic(block, .add, end, .one, src, end_src, end_src, true)
|
|
else
|
|
end;
|
|
|
|
try sema.panicIndexOutOfBounds(block, src, actual_end, actual_len, .cmp_lte);
|
|
}
|
|
|
|
// requirement: result[new_len] == slice_sentinel
|
|
try sema.panicSentinelMismatch(block, src, slice_sentinel, elem_ty, result, new_len);
|
|
}
|
|
return result;
|
|
};
|
|
|
|
if (!new_ptr_val.isUndef(mod)) {
|
|
return Air.internedToRef((try mod.getCoerced(
|
|
Value.fromInterned((try new_ptr_val.intern(new_ptr_ty, mod))),
|
|
return_ty,
|
|
)).toIntern());
|
|
}
|
|
|
|
// Special case: @as([]i32, undefined)[x..x]
|
|
if (new_len_int == 0) {
|
|
return mod.undefRef(return_ty);
|
|
}
|
|
|
|
return sema.fail(block, src, "non-zero length slice of undefined pointer", .{});
|
|
}
|
|
|
|
const return_ty = try sema.ptrType(.{
|
|
.child = elem_ty.toIntern(),
|
|
.sentinel = if (sentinel) |s| s.toIntern() else .none,
|
|
.flags = .{
|
|
.size = .Slice,
|
|
.alignment = new_ptr_ty_info.flags.alignment,
|
|
.is_const = new_ptr_ty_info.flags.is_const,
|
|
.is_volatile = new_ptr_ty_info.flags.is_volatile,
|
|
.is_allowzero = new_allowzero,
|
|
.address_space = new_ptr_ty_info.flags.address_space,
|
|
},
|
|
});
|
|
|
|
try sema.requireRuntimeBlock(block, src, runtime_src.?);
|
|
if (block.wantSafety()) {
|
|
// requirement: slicing C ptr is non-null
|
|
if (ptr_ptr_child_ty.isCPtr(mod)) {
|
|
const is_non_null = try sema.analyzeIsNull(block, ptr_src, ptr, true);
|
|
try sema.addSafetyCheck(block, src, is_non_null, .unwrap_null);
|
|
}
|
|
|
|
// requirement: end <= len
|
|
const opt_len_inst = if (array_ty.zigTypeTag(mod) == .Array)
|
|
try mod.intRef(Type.usize, array_ty.arrayLenIncludingSentinel(mod))
|
|
else if (slice_ty.isSlice(mod)) blk: {
|
|
if (try sema.resolveDefinedValue(block, src, ptr_or_slice)) |slice_val| {
|
|
// we don't need to add one for sentinels because the
|
|
// underlying value data includes the sentinel
|
|
break :blk try mod.intRef(Type.usize, slice_val.sliceLen(mod));
|
|
}
|
|
|
|
const slice_len_inst = try block.addTyOp(.slice_len, Type.usize, ptr_or_slice);
|
|
if (slice_ty.sentinel(mod) == null) break :blk slice_len_inst;
|
|
|
|
// we have to add one because slice lengths don't include the sentinel
|
|
break :blk try sema.analyzeArithmetic(block, .add, slice_len_inst, .one, src, end_src, end_src, true);
|
|
} else null;
|
|
if (opt_len_inst) |len_inst| {
|
|
const actual_end = if (slice_sentinel != null)
|
|
try sema.analyzeArithmetic(block, .add, end, .one, src, end_src, end_src, true)
|
|
else
|
|
end;
|
|
try sema.panicIndexOutOfBounds(block, src, actual_end, len_inst, .cmp_lte);
|
|
}
|
|
|
|
// requirement: start <= end
|
|
try sema.panicIndexOutOfBounds(block, src, start, end, .cmp_lte);
|
|
}
|
|
const result = try block.addInst(.{
|
|
.tag = .slice,
|
|
.data = .{ .ty_pl = .{
|
|
.ty = Air.internedToRef(return_ty.toIntern()),
|
|
.payload = try sema.addExtra(Air.Bin{
|
|
.lhs = new_ptr,
|
|
.rhs = new_len,
|
|
}),
|
|
} },
|
|
});
|
|
if (block.wantSafety()) {
|
|
// requirement: result[new_len] == slice_sentinel
|
|
try sema.panicSentinelMismatch(block, src, slice_sentinel, elem_ty, result, new_len);
|
|
}
|
|
return result;
|
|
}
|
|
|
|
/// Asserts that lhs and rhs types are both numeric.
|
|
fn cmpNumeric(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
uncasted_lhs: Air.Inst.Ref,
|
|
uncasted_rhs: Air.Inst.Ref,
|
|
op: std.math.CompareOperator,
|
|
lhs_src: LazySrcLoc,
|
|
rhs_src: LazySrcLoc,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const lhs_ty = sema.typeOf(uncasted_lhs);
|
|
const rhs_ty = sema.typeOf(uncasted_rhs);
|
|
|
|
assert(lhs_ty.isNumeric(mod));
|
|
assert(rhs_ty.isNumeric(mod));
|
|
|
|
const lhs_ty_tag = lhs_ty.zigTypeTag(mod);
|
|
const rhs_ty_tag = rhs_ty.zigTypeTag(mod);
|
|
const target = mod.getTarget();
|
|
|
|
// One exception to heterogeneous comparison: comptime_float needs to
|
|
// coerce to fixed-width float.
|
|
|
|
const lhs = if (lhs_ty_tag == .ComptimeFloat and rhs_ty_tag == .Float)
|
|
try sema.coerce(block, rhs_ty, uncasted_lhs, lhs_src)
|
|
else
|
|
uncasted_lhs;
|
|
|
|
const rhs = if (lhs_ty_tag == .Float and rhs_ty_tag == .ComptimeFloat)
|
|
try sema.coerce(block, lhs_ty, uncasted_rhs, rhs_src)
|
|
else
|
|
uncasted_rhs;
|
|
|
|
const runtime_src: LazySrcLoc = src: {
|
|
if (try sema.resolveValue(lhs)) |lhs_val| {
|
|
if (try sema.resolveValue(rhs)) |rhs_val| {
|
|
// Compare ints: const vs. undefined (or vice versa)
|
|
if (!lhs_val.isUndef(mod) and (lhs_ty.isInt(mod) or lhs_ty_tag == .ComptimeInt) and rhs_ty.isInt(mod) and rhs_val.isUndef(mod)) {
|
|
if (try sema.compareIntsOnlyPossibleResult(try sema.resolveLazyValue(lhs_val), op, rhs_ty)) |res| {
|
|
return if (res) .bool_true else .bool_false;
|
|
}
|
|
} else if (!rhs_val.isUndef(mod) and (rhs_ty.isInt(mod) or rhs_ty_tag == .ComptimeInt) and lhs_ty.isInt(mod) and lhs_val.isUndef(mod)) {
|
|
if (try sema.compareIntsOnlyPossibleResult(try sema.resolveLazyValue(rhs_val), op.reverse(), lhs_ty)) |res| {
|
|
return if (res) .bool_true else .bool_false;
|
|
}
|
|
}
|
|
|
|
if (lhs_val.isUndef(mod) or rhs_val.isUndef(mod)) {
|
|
return mod.undefRef(Type.bool);
|
|
}
|
|
if (lhs_val.isNan(mod) or rhs_val.isNan(mod)) {
|
|
return if (op == std.math.CompareOperator.neq) .bool_true else .bool_false;
|
|
}
|
|
return if (try Value.compareHeteroAdvanced(lhs_val, op, rhs_val, mod, sema))
|
|
.bool_true
|
|
else
|
|
.bool_false;
|
|
} else {
|
|
if (!lhs_val.isUndef(mod) and (lhs_ty.isInt(mod) or lhs_ty_tag == .ComptimeInt) and rhs_ty.isInt(mod)) {
|
|
// Compare ints: const vs. var
|
|
if (try sema.compareIntsOnlyPossibleResult(try sema.resolveLazyValue(lhs_val), op, rhs_ty)) |res| {
|
|
return if (res) .bool_true else .bool_false;
|
|
}
|
|
}
|
|
break :src rhs_src;
|
|
}
|
|
} else {
|
|
if (try sema.resolveValueResolveLazy(rhs)) |rhs_val| {
|
|
if (!rhs_val.isUndef(mod) and (rhs_ty.isInt(mod) or rhs_ty_tag == .ComptimeInt) and lhs_ty.isInt(mod)) {
|
|
// Compare ints: var vs. const
|
|
if (try sema.compareIntsOnlyPossibleResult(try sema.resolveLazyValue(rhs_val), op.reverse(), lhs_ty)) |res| {
|
|
return if (res) .bool_true else .bool_false;
|
|
}
|
|
}
|
|
}
|
|
break :src lhs_src;
|
|
}
|
|
};
|
|
|
|
// TODO handle comparisons against lazy zero values
|
|
// Some values can be compared against zero without being runtime-known or without forcing
|
|
// a full resolution of their value, for example `@sizeOf(@Frame(function))` is known to
|
|
// always be nonzero, and we benefit from not forcing the full evaluation and stack frame layout
|
|
// of this function if we don't need to.
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
|
|
// For floats, emit a float comparison instruction.
|
|
const lhs_is_float = switch (lhs_ty_tag) {
|
|
.Float, .ComptimeFloat => true,
|
|
else => false,
|
|
};
|
|
const rhs_is_float = switch (rhs_ty_tag) {
|
|
.Float, .ComptimeFloat => true,
|
|
else => false,
|
|
};
|
|
|
|
if (lhs_is_float and rhs_is_float) {
|
|
// Smaller fixed-width floats coerce to larger fixed-width floats.
|
|
// comptime_float coerces to fixed-width float.
|
|
const dest_ty = x: {
|
|
if (lhs_ty_tag == .ComptimeFloat) {
|
|
break :x rhs_ty;
|
|
} else if (rhs_ty_tag == .ComptimeFloat) {
|
|
break :x lhs_ty;
|
|
}
|
|
if (lhs_ty.floatBits(target) >= rhs_ty.floatBits(target)) {
|
|
break :x lhs_ty;
|
|
} else {
|
|
break :x rhs_ty;
|
|
}
|
|
};
|
|
const casted_lhs = try sema.coerce(block, dest_ty, lhs, lhs_src);
|
|
const casted_rhs = try sema.coerce(block, dest_ty, rhs, rhs_src);
|
|
return block.addBinOp(Air.Inst.Tag.fromCmpOp(op, block.float_mode == .Optimized), casted_lhs, casted_rhs);
|
|
}
|
|
// For mixed unsigned integer sizes, implicit cast both operands to the larger integer.
|
|
// For mixed signed and unsigned integers, implicit cast both operands to a signed
|
|
// integer with + 1 bit.
|
|
// For mixed floats and integers, extract the integer part from the float, cast that to
|
|
// a signed integer with mantissa bits + 1, and if there was any non-integral part of the float,
|
|
// add/subtract 1.
|
|
const lhs_is_signed = if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val|
|
|
!(try lhs_val.compareAllWithZeroAdvanced(.gte, sema))
|
|
else
|
|
(lhs_ty.isRuntimeFloat() or lhs_ty.isSignedInt(mod));
|
|
const rhs_is_signed = if (try sema.resolveDefinedValue(block, rhs_src, rhs)) |rhs_val|
|
|
!(try rhs_val.compareAllWithZeroAdvanced(.gte, sema))
|
|
else
|
|
(rhs_ty.isRuntimeFloat() or rhs_ty.isSignedInt(mod));
|
|
const dest_int_is_signed = lhs_is_signed or rhs_is_signed;
|
|
|
|
var dest_float_type: ?Type = null;
|
|
|
|
var lhs_bits: usize = undefined;
|
|
if (try sema.resolveValueResolveLazy(lhs)) |lhs_val| {
|
|
if (lhs_val.isUndef(mod))
|
|
return mod.undefRef(Type.bool);
|
|
if (lhs_val.isNan(mod)) switch (op) {
|
|
.neq => return .bool_true,
|
|
else => return .bool_false,
|
|
};
|
|
if (lhs_val.isInf(mod)) switch (op) {
|
|
.neq => return .bool_true,
|
|
.eq => return .bool_false,
|
|
.gt, .gte => return if (lhs_val.isNegativeInf(mod)) .bool_false else .bool_true,
|
|
.lt, .lte => return if (lhs_val.isNegativeInf(mod)) .bool_true else .bool_false,
|
|
};
|
|
if (!rhs_is_signed) {
|
|
switch (lhs_val.orderAgainstZero(mod)) {
|
|
.gt => {},
|
|
.eq => switch (op) { // LHS = 0, RHS is unsigned
|
|
.lte => return .bool_true,
|
|
.gt => return .bool_false,
|
|
else => {},
|
|
},
|
|
.lt => switch (op) { // LHS < 0, RHS is unsigned
|
|
.neq, .lt, .lte => return .bool_true,
|
|
.eq, .gt, .gte => return .bool_false,
|
|
},
|
|
}
|
|
}
|
|
if (lhs_is_float) {
|
|
if (lhs_val.floatHasFraction(mod)) {
|
|
switch (op) {
|
|
.eq => return .bool_false,
|
|
.neq => return .bool_true,
|
|
else => {},
|
|
}
|
|
}
|
|
|
|
var bigint = try float128IntPartToBigInt(sema.gpa, lhs_val.toFloat(f128, mod));
|
|
defer bigint.deinit();
|
|
if (lhs_val.floatHasFraction(mod)) {
|
|
if (lhs_is_signed) {
|
|
try bigint.addScalar(&bigint, -1);
|
|
} else {
|
|
try bigint.addScalar(&bigint, 1);
|
|
}
|
|
}
|
|
lhs_bits = bigint.toConst().bitCountTwosComp();
|
|
} else {
|
|
lhs_bits = lhs_val.intBitCountTwosComp(mod);
|
|
}
|
|
lhs_bits += @intFromBool(!lhs_is_signed and dest_int_is_signed);
|
|
} else if (lhs_is_float) {
|
|
dest_float_type = lhs_ty;
|
|
} else {
|
|
const int_info = lhs_ty.intInfo(mod);
|
|
lhs_bits = int_info.bits + @intFromBool(int_info.signedness == .unsigned and dest_int_is_signed);
|
|
}
|
|
|
|
var rhs_bits: usize = undefined;
|
|
if (try sema.resolveValueResolveLazy(rhs)) |rhs_val| {
|
|
if (rhs_val.isUndef(mod))
|
|
return mod.undefRef(Type.bool);
|
|
if (rhs_val.isNan(mod)) switch (op) {
|
|
.neq => return .bool_true,
|
|
else => return .bool_false,
|
|
};
|
|
if (rhs_val.isInf(mod)) switch (op) {
|
|
.neq => return .bool_true,
|
|
.eq => return .bool_false,
|
|
.gt, .gte => return if (rhs_val.isNegativeInf(mod)) .bool_true else .bool_false,
|
|
.lt, .lte => return if (rhs_val.isNegativeInf(mod)) .bool_false else .bool_true,
|
|
};
|
|
if (!lhs_is_signed) {
|
|
switch (rhs_val.orderAgainstZero(mod)) {
|
|
.gt => {},
|
|
.eq => switch (op) { // RHS = 0, LHS is unsigned
|
|
.gte => return .bool_true,
|
|
.lt => return .bool_false,
|
|
else => {},
|
|
},
|
|
.lt => switch (op) { // RHS < 0, LHS is unsigned
|
|
.neq, .gt, .gte => return .bool_true,
|
|
.eq, .lt, .lte => return .bool_false,
|
|
},
|
|
}
|
|
}
|
|
if (rhs_is_float) {
|
|
if (rhs_val.floatHasFraction(mod)) {
|
|
switch (op) {
|
|
.eq => return .bool_false,
|
|
.neq => return .bool_true,
|
|
else => {},
|
|
}
|
|
}
|
|
|
|
var bigint = try float128IntPartToBigInt(sema.gpa, rhs_val.toFloat(f128, mod));
|
|
defer bigint.deinit();
|
|
if (rhs_val.floatHasFraction(mod)) {
|
|
if (rhs_is_signed) {
|
|
try bigint.addScalar(&bigint, -1);
|
|
} else {
|
|
try bigint.addScalar(&bigint, 1);
|
|
}
|
|
}
|
|
rhs_bits = bigint.toConst().bitCountTwosComp();
|
|
} else {
|
|
rhs_bits = rhs_val.intBitCountTwosComp(mod);
|
|
}
|
|
rhs_bits += @intFromBool(!rhs_is_signed and dest_int_is_signed);
|
|
} else if (rhs_is_float) {
|
|
dest_float_type = rhs_ty;
|
|
} else {
|
|
const int_info = rhs_ty.intInfo(mod);
|
|
rhs_bits = int_info.bits + @intFromBool(int_info.signedness == .unsigned and dest_int_is_signed);
|
|
}
|
|
|
|
const dest_ty = if (dest_float_type) |ft| ft else blk: {
|
|
const max_bits = @max(lhs_bits, rhs_bits);
|
|
const casted_bits = std.math.cast(u16, max_bits) orelse return sema.fail(block, src, "{d} exceeds maximum integer bit count", .{max_bits});
|
|
const signedness: std.builtin.Signedness = if (dest_int_is_signed) .signed else .unsigned;
|
|
break :blk try mod.intType(signedness, casted_bits);
|
|
};
|
|
const casted_lhs = try sema.coerce(block, dest_ty, lhs, lhs_src);
|
|
const casted_rhs = try sema.coerce(block, dest_ty, rhs, rhs_src);
|
|
|
|
return block.addBinOp(Air.Inst.Tag.fromCmpOp(op, block.float_mode == .Optimized), casted_lhs, casted_rhs);
|
|
}
|
|
|
|
/// Asserts that LHS value is an int or comptime int and not undefined, and
|
|
/// that RHS type is an int. Given a const LHS and an unknown RHS, attempt to
|
|
/// determine whether `op` has a guaranteed result.
|
|
/// If it cannot be determined, returns null.
|
|
/// Otherwise returns a bool for the guaranteed comparison operation.
|
|
fn compareIntsOnlyPossibleResult(
|
|
sema: *Sema,
|
|
lhs_val: Value,
|
|
op: std.math.CompareOperator,
|
|
rhs_ty: Type,
|
|
) Allocator.Error!?bool {
|
|
const mod = sema.mod;
|
|
const rhs_info = rhs_ty.intInfo(mod);
|
|
const vs_zero = lhs_val.orderAgainstZeroAdvanced(mod, sema) catch unreachable;
|
|
const is_zero = vs_zero == .eq;
|
|
const is_negative = vs_zero == .lt;
|
|
const is_positive = vs_zero == .gt;
|
|
|
|
// Anything vs. zero-sized type has guaranteed outcome.
|
|
if (rhs_info.bits == 0) return switch (op) {
|
|
.eq, .lte, .gte => is_zero,
|
|
.neq, .lt, .gt => !is_zero,
|
|
};
|
|
|
|
// Special case for i1, which can only be 0 or -1.
|
|
// Zero and positive ints have guaranteed outcome.
|
|
if (rhs_info.bits == 1 and rhs_info.signedness == .signed) {
|
|
if (is_positive) return switch (op) {
|
|
.gt, .gte, .neq => true,
|
|
.lt, .lte, .eq => false,
|
|
};
|
|
if (is_zero) return switch (op) {
|
|
.gte => true,
|
|
.lt => false,
|
|
.gt, .lte, .eq, .neq => null,
|
|
};
|
|
}
|
|
|
|
// Negative vs. unsigned has guaranteed outcome.
|
|
if (rhs_info.signedness == .unsigned and is_negative) return switch (op) {
|
|
.eq, .gt, .gte => false,
|
|
.neq, .lt, .lte => true,
|
|
};
|
|
|
|
const sign_adj = @intFromBool(!is_negative and rhs_info.signedness == .signed);
|
|
const req_bits = lhs_val.intBitCountTwosComp(mod) + sign_adj;
|
|
|
|
// No sized type can have more than 65535 bits.
|
|
// The RHS type operand is either a runtime value or sized (but undefined) constant.
|
|
if (req_bits > 65535) return switch (op) {
|
|
.lt, .lte => is_negative,
|
|
.gt, .gte => is_positive,
|
|
.eq => false,
|
|
.neq => true,
|
|
};
|
|
const fits = req_bits <= rhs_info.bits;
|
|
|
|
// Oversized int has guaranteed outcome.
|
|
switch (op) {
|
|
.eq => return if (!fits) false else null,
|
|
.neq => return if (!fits) true else null,
|
|
.lt, .lte => if (!fits) return is_negative,
|
|
.gt, .gte => if (!fits) return !is_negative,
|
|
}
|
|
|
|
// For any other comparison, we need to know if the LHS value is
|
|
// equal to the maximum or minimum possible value of the RHS type.
|
|
const is_min, const is_max = edge: {
|
|
if (is_zero and rhs_info.signedness == .unsigned) break :edge .{ true, false };
|
|
|
|
if (req_bits != rhs_info.bits) break :edge .{ false, false };
|
|
|
|
const ty = try mod.intType(
|
|
if (is_negative) .signed else .unsigned,
|
|
@intCast(req_bits),
|
|
);
|
|
const pop_count = lhs_val.popCount(ty, mod);
|
|
|
|
if (is_negative) {
|
|
break :edge .{ pop_count == 1, false };
|
|
} else {
|
|
break :edge .{ false, pop_count == req_bits - sign_adj };
|
|
}
|
|
};
|
|
|
|
assert(fits);
|
|
return switch (op) {
|
|
.lt => if (is_max) false else null,
|
|
.lte => if (is_min) true else null,
|
|
.gt => if (is_min) false else null,
|
|
.gte => if (is_max) true else null,
|
|
.eq, .neq => unreachable,
|
|
};
|
|
}
|
|
|
|
/// Asserts that lhs and rhs types are both vectors.
|
|
fn cmpVector(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
lhs: Air.Inst.Ref,
|
|
rhs: Air.Inst.Ref,
|
|
op: std.math.CompareOperator,
|
|
lhs_src: LazySrcLoc,
|
|
rhs_src: LazySrcLoc,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const lhs_ty = sema.typeOf(lhs);
|
|
const rhs_ty = sema.typeOf(rhs);
|
|
assert(lhs_ty.zigTypeTag(mod) == .Vector);
|
|
assert(rhs_ty.zigTypeTag(mod) == .Vector);
|
|
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
|
|
|
|
const resolved_ty = try sema.resolvePeerTypes(block, src, &.{ lhs, rhs }, .{ .override = &.{ lhs_src, rhs_src } });
|
|
const casted_lhs = try sema.coerce(block, resolved_ty, lhs, lhs_src);
|
|
const casted_rhs = try sema.coerce(block, resolved_ty, rhs, rhs_src);
|
|
|
|
const result_ty = try mod.vectorType(.{
|
|
.len = lhs_ty.vectorLen(mod),
|
|
.child = .bool_type,
|
|
});
|
|
|
|
const runtime_src: LazySrcLoc = src: {
|
|
if (try sema.resolveValue(casted_lhs)) |lhs_val| {
|
|
if (try sema.resolveValue(casted_rhs)) |rhs_val| {
|
|
if (lhs_val.isUndef(mod) or rhs_val.isUndef(mod)) {
|
|
return mod.undefRef(result_ty);
|
|
}
|
|
const cmp_val = try sema.compareVector(lhs_val, op, rhs_val, resolved_ty);
|
|
return Air.internedToRef(cmp_val.toIntern());
|
|
} else {
|
|
break :src rhs_src;
|
|
}
|
|
} else {
|
|
break :src lhs_src;
|
|
}
|
|
};
|
|
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
return block.addCmpVector(casted_lhs, casted_rhs, op);
|
|
}
|
|
|
|
fn wrapOptional(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
dest_ty: Type,
|
|
inst: Air.Inst.Ref,
|
|
inst_src: LazySrcLoc,
|
|
) !Air.Inst.Ref {
|
|
if (try sema.resolveValue(inst)) |val| {
|
|
return Air.internedToRef((try sema.mod.intern(.{ .opt = .{
|
|
.ty = dest_ty.toIntern(),
|
|
.val = val.toIntern(),
|
|
} })));
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, inst_src, null);
|
|
return block.addTyOp(.wrap_optional, dest_ty, inst);
|
|
}
|
|
|
|
fn wrapErrorUnionPayload(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
dest_ty: Type,
|
|
inst: Air.Inst.Ref,
|
|
inst_src: LazySrcLoc,
|
|
) !Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const dest_payload_ty = dest_ty.errorUnionPayload(mod);
|
|
const coerced = try sema.coerceExtra(block, dest_payload_ty, inst, inst_src, .{ .report_err = false });
|
|
if (try sema.resolveValue(coerced)) |val| {
|
|
return Air.internedToRef((try mod.intern(.{ .error_union = .{
|
|
.ty = dest_ty.toIntern(),
|
|
.val = .{ .payload = try val.intern(dest_payload_ty, mod) },
|
|
} })));
|
|
}
|
|
try sema.requireRuntimeBlock(block, inst_src, null);
|
|
try sema.queueFullTypeResolution(dest_payload_ty);
|
|
return block.addTyOp(.wrap_errunion_payload, dest_ty, coerced);
|
|
}
|
|
|
|
fn wrapErrorUnionSet(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
dest_ty: Type,
|
|
inst: Air.Inst.Ref,
|
|
inst_src: LazySrcLoc,
|
|
) !Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
const inst_ty = sema.typeOf(inst);
|
|
const dest_err_set_ty = dest_ty.errorUnionSet(mod);
|
|
if (try sema.resolveValue(inst)) |val| {
|
|
const expected_name = mod.intern_pool.indexToKey(val.toIntern()).err.name;
|
|
switch (dest_err_set_ty.toIntern()) {
|
|
.anyerror_type => {},
|
|
.adhoc_inferred_error_set_type => ok: {
|
|
const ies = sema.fn_ret_ty_ies.?;
|
|
switch (ies.resolved) {
|
|
.anyerror_type => break :ok,
|
|
.none => if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, dest_err_set_ty, inst_ty, inst_src, inst_src)) {
|
|
break :ok;
|
|
},
|
|
else => |i| if (ip.indexToKey(i).error_set_type.nameIndex(ip, expected_name) != null) {
|
|
break :ok;
|
|
},
|
|
}
|
|
return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty);
|
|
},
|
|
else => switch (ip.indexToKey(dest_err_set_ty.toIntern())) {
|
|
.error_set_type => |error_set_type| ok: {
|
|
if (error_set_type.nameIndex(ip, expected_name) != null) break :ok;
|
|
return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty);
|
|
},
|
|
.inferred_error_set_type => |func_index| ok: {
|
|
// We carefully do this in an order that avoids unnecessarily
|
|
// resolving the destination error set type.
|
|
switch (ip.funcIesResolved(func_index).*) {
|
|
.anyerror_type => break :ok,
|
|
.none => if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, dest_err_set_ty, inst_ty, inst_src, inst_src)) {
|
|
break :ok;
|
|
},
|
|
else => |i| if (ip.indexToKey(i).error_set_type.nameIndex(ip, expected_name) != null) {
|
|
break :ok;
|
|
},
|
|
}
|
|
|
|
return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty);
|
|
},
|
|
else => unreachable,
|
|
},
|
|
}
|
|
return Air.internedToRef((try mod.intern(.{ .error_union = .{
|
|
.ty = dest_ty.toIntern(),
|
|
.val = .{ .err_name = expected_name },
|
|
} })));
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, inst_src, null);
|
|
const coerced = try sema.coerce(block, dest_err_set_ty, inst, inst_src);
|
|
return block.addTyOp(.wrap_errunion_err, dest_ty, coerced);
|
|
}
|
|
|
|
fn unionToTag(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
enum_ty: Type,
|
|
un: Air.Inst.Ref,
|
|
un_src: LazySrcLoc,
|
|
) !Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
if ((try sema.typeHasOnePossibleValue(enum_ty))) |opv| {
|
|
return Air.internedToRef(opv.toIntern());
|
|
}
|
|
if (try sema.resolveValue(un)) |un_val| {
|
|
const tag_val = un_val.unionTag(mod).?;
|
|
if (tag_val.isUndef(mod))
|
|
return try mod.undefRef(enum_ty);
|
|
return Air.internedToRef(tag_val.toIntern());
|
|
}
|
|
try sema.requireRuntimeBlock(block, un_src, null);
|
|
return block.addTyOp(.get_union_tag, enum_ty, un);
|
|
}
|
|
|
|
const PeerResolveStrategy = enum {
|
|
/// The type is not known.
|
|
/// If refined no further, this is equivalent to `exact`.
|
|
unknown,
|
|
/// The type may be an error set or error union.
|
|
/// If refined no further, it is an error set.
|
|
error_set,
|
|
/// The type must be some error union.
|
|
error_union,
|
|
/// The type may be @TypeOf(null), an optional or a C pointer.
|
|
/// If refined no further, it is @TypeOf(null).
|
|
nullable,
|
|
/// The type must be some optional or a C pointer.
|
|
/// If refined no further, it is an optional.
|
|
optional,
|
|
/// The type must be either an array or a vector.
|
|
/// If refined no further, it is an array.
|
|
array,
|
|
/// The type must be a vector.
|
|
vector,
|
|
/// The type must be a C pointer.
|
|
c_ptr,
|
|
/// The type must be a pointer (C or not).
|
|
/// If refined no further, it is a non-C pointer.
|
|
ptr,
|
|
/// The type must be a function or a pointer to a function.
|
|
/// If refined no further, it is a function.
|
|
func,
|
|
/// The type must be an enum literal, or some specific enum or union. Which one is decided
|
|
/// afterwards based on the types in question.
|
|
enum_or_union,
|
|
/// The type must be some integer or float type.
|
|
/// If refined no further, it is `comptime_int`.
|
|
comptime_int,
|
|
/// The type must be some float type.
|
|
/// If refined no further, it is `comptime_float`.
|
|
comptime_float,
|
|
/// The type must be some float or fixed-width integer type.
|
|
/// If refined no further, it is some fixed-width integer type.
|
|
fixed_int,
|
|
/// The type must be some fixed-width float type.
|
|
fixed_float,
|
|
/// The type must be a struct literal or tuple type.
|
|
coercible_struct,
|
|
/// The peers must all be of the same type.
|
|
exact,
|
|
|
|
/// Given two strategies, find a strategy that satisfies both, if one exists. If no such
|
|
/// strategy exists, any strategy may be returned; an error will be emitted when the caller
|
|
/// attempts to use the strategy to resolve the type.
|
|
/// Strategy `a` comes from the peer in `reason_peer`, while strategy `b` comes from the peer at
|
|
/// index `b_peer_idx`. `reason_peer` is updated to reflect the reason for the new strategy.
|
|
fn merge(a: PeerResolveStrategy, b: PeerResolveStrategy, reason_peer: *usize, b_peer_idx: usize) PeerResolveStrategy {
|
|
// Our merging should be order-independent. Thus, even though the union order is arbitrary,
|
|
// by sorting the tags and switching first on the smaller, we have half as many cases to
|
|
// worry about (since we avoid the duplicates).
|
|
const s0_is_a = @intFromEnum(a) <= @intFromEnum(b);
|
|
const s0 = if (s0_is_a) a else b;
|
|
const s1 = if (s0_is_a) b else a;
|
|
|
|
const ReasonMethod = enum {
|
|
all_s0,
|
|
all_s1,
|
|
either,
|
|
};
|
|
|
|
const reason_method: ReasonMethod, const strat: PeerResolveStrategy = switch (s0) {
|
|
.unknown => .{ .all_s1, s1 },
|
|
.error_set => switch (s1) {
|
|
.error_set => .{ .either, .error_set },
|
|
else => .{ .all_s0, .error_union },
|
|
},
|
|
.error_union => switch (s1) {
|
|
.error_union => .{ .either, .error_union },
|
|
else => .{ .all_s0, .error_union },
|
|
},
|
|
.nullable => switch (s1) {
|
|
.nullable => .{ .either, .nullable },
|
|
.c_ptr => .{ .all_s1, .c_ptr },
|
|
else => .{ .all_s0, .optional },
|
|
},
|
|
.optional => switch (s1) {
|
|
.optional => .{ .either, .optional },
|
|
.c_ptr => .{ .all_s1, .c_ptr },
|
|
else => .{ .all_s0, .optional },
|
|
},
|
|
.array => switch (s1) {
|
|
.array => .{ .either, .array },
|
|
.vector => .{ .all_s1, .vector },
|
|
else => .{ .all_s0, .array },
|
|
},
|
|
.vector => switch (s1) {
|
|
.vector => .{ .either, .vector },
|
|
else => .{ .all_s0, .vector },
|
|
},
|
|
.c_ptr => switch (s1) {
|
|
.c_ptr => .{ .either, .c_ptr },
|
|
else => .{ .all_s0, .c_ptr },
|
|
},
|
|
.ptr => switch (s1) {
|
|
.ptr => .{ .either, .ptr },
|
|
else => .{ .all_s0, .ptr },
|
|
},
|
|
.func => switch (s1) {
|
|
.func => .{ .either, .func },
|
|
else => .{ .all_s1, s1 }, // doesn't override anything later
|
|
},
|
|
.enum_or_union => switch (s1) {
|
|
.enum_or_union => .{ .either, .enum_or_union },
|
|
else => .{ .all_s0, .enum_or_union },
|
|
},
|
|
.comptime_int => switch (s1) {
|
|
.comptime_int => .{ .either, .comptime_int },
|
|
else => .{ .all_s1, s1 }, // doesn't override anything later
|
|
},
|
|
.comptime_float => switch (s1) {
|
|
.comptime_float => .{ .either, .comptime_float },
|
|
else => .{ .all_s1, s1 }, // doesn't override anything later
|
|
},
|
|
.fixed_int => switch (s1) {
|
|
.fixed_int => .{ .either, .fixed_int },
|
|
else => .{ .all_s1, s1 }, // doesn't override anything later
|
|
},
|
|
.fixed_float => switch (s1) {
|
|
.fixed_float => .{ .either, .fixed_float },
|
|
else => .{ .all_s1, s1 }, // doesn't override anything later
|
|
},
|
|
.coercible_struct => switch (s1) {
|
|
.exact => .{ .all_s1, .exact },
|
|
else => .{ .all_s0, .coercible_struct },
|
|
},
|
|
.exact => .{ .all_s0, .exact },
|
|
};
|
|
|
|
switch (reason_method) {
|
|
.all_s0 => {
|
|
if (!s0_is_a) {
|
|
reason_peer.* = b_peer_idx;
|
|
}
|
|
},
|
|
.all_s1 => {
|
|
if (s0_is_a) {
|
|
reason_peer.* = b_peer_idx;
|
|
}
|
|
},
|
|
.either => {
|
|
// Prefer the earliest peer
|
|
reason_peer.* = @min(reason_peer.*, b_peer_idx);
|
|
},
|
|
}
|
|
|
|
return strat;
|
|
}
|
|
|
|
fn select(ty: Type, mod: *Module) PeerResolveStrategy {
|
|
return switch (ty.zigTypeTag(mod)) {
|
|
.Type, .Void, .Bool, .Opaque, .Frame, .AnyFrame => .exact,
|
|
.NoReturn, .Undefined => .unknown,
|
|
.Null => .nullable,
|
|
.ComptimeInt => .comptime_int,
|
|
.Int => .fixed_int,
|
|
.ComptimeFloat => .comptime_float,
|
|
.Float => .fixed_float,
|
|
.Pointer => if (ty.ptrInfo(mod).flags.size == .C) .c_ptr else .ptr,
|
|
.Array => .array,
|
|
.Vector => .vector,
|
|
.Optional => .optional,
|
|
.ErrorSet => .error_set,
|
|
.ErrorUnion => .error_union,
|
|
.EnumLiteral, .Enum, .Union => .enum_or_union,
|
|
.Struct => if (ty.isTupleOrAnonStruct(mod)) .coercible_struct else .exact,
|
|
.Fn => .func,
|
|
};
|
|
}
|
|
};
|
|
|
|
const PeerResolveResult = union(enum) {
|
|
/// The peer type resolution was successful, and resulted in the given type.
|
|
success: Type,
|
|
/// There was some generic conflict between two peers.
|
|
conflict: struct {
|
|
peer_idx_a: usize,
|
|
peer_idx_b: usize,
|
|
},
|
|
/// There was an error when resolving the type of a struct or tuple field.
|
|
field_error: struct {
|
|
/// The name of the field which caused the failure.
|
|
field_name: []const u8,
|
|
/// The type of this field in each peer.
|
|
field_types: []Type,
|
|
/// The error from resolving the field type. Guaranteed not to be `success`.
|
|
sub_result: *PeerResolveResult,
|
|
},
|
|
|
|
fn report(
|
|
result: PeerResolveResult,
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
instructions: []const Air.Inst.Ref,
|
|
candidate_srcs: Module.PeerTypeCandidateSrc,
|
|
) !*Module.ErrorMsg {
|
|
const mod = sema.mod;
|
|
const decl_ptr = mod.declPtr(block.src_decl);
|
|
|
|
var opt_msg: ?*Module.ErrorMsg = null;
|
|
errdefer if (opt_msg) |msg| msg.destroy(sema.gpa);
|
|
|
|
// If we mention fields we'll want to include field types, so put peer types in a buffer
|
|
var peer_tys = try sema.arena.alloc(Type, instructions.len);
|
|
for (peer_tys, instructions) |*ty, inst| {
|
|
ty.* = sema.typeOf(inst);
|
|
}
|
|
|
|
var cur = result;
|
|
while (true) {
|
|
var conflict_idx: [2]usize = undefined;
|
|
|
|
switch (cur) {
|
|
.success => unreachable,
|
|
.conflict => |conflict| {
|
|
// Fall through to two-peer conflict handling below
|
|
conflict_idx = .{
|
|
conflict.peer_idx_a,
|
|
conflict.peer_idx_b,
|
|
};
|
|
},
|
|
.field_error => |field_error| {
|
|
const fmt = "struct field '{s}' has conflicting types";
|
|
const args = .{field_error.field_name};
|
|
if (opt_msg) |msg| {
|
|
try sema.errNote(block, src, msg, fmt, args);
|
|
} else {
|
|
opt_msg = try sema.errMsg(block, src, fmt, args);
|
|
}
|
|
|
|
// Continue on to child error
|
|
cur = field_error.sub_result.*;
|
|
peer_tys = field_error.field_types;
|
|
continue;
|
|
},
|
|
}
|
|
|
|
// This is the path for reporting a generic conflict between two peers.
|
|
|
|
if (conflict_idx[1] < conflict_idx[0]) {
|
|
// b comes first in source, so it's better if it comes first in the error
|
|
std.mem.swap(usize, &conflict_idx[0], &conflict_idx[1]);
|
|
}
|
|
|
|
const conflict_tys: [2]Type = .{
|
|
peer_tys[conflict_idx[0]],
|
|
peer_tys[conflict_idx[1]],
|
|
};
|
|
const conflict_srcs: [2]?LazySrcLoc = .{
|
|
candidate_srcs.resolve(mod, decl_ptr, conflict_idx[0]),
|
|
candidate_srcs.resolve(mod, decl_ptr, conflict_idx[1]),
|
|
};
|
|
|
|
const fmt = "incompatible types: '{}' and '{}'";
|
|
const args = .{
|
|
conflict_tys[0].fmt(mod),
|
|
conflict_tys[1].fmt(mod),
|
|
};
|
|
const msg = if (opt_msg) |msg| msg: {
|
|
try sema.errNote(block, src, msg, fmt, args);
|
|
break :msg msg;
|
|
} else msg: {
|
|
const msg = try sema.errMsg(block, src, fmt, args);
|
|
opt_msg = msg;
|
|
break :msg msg;
|
|
};
|
|
|
|
if (conflict_srcs[0]) |src_loc| try sema.errNote(block, src_loc, msg, "type '{}' here", .{conflict_tys[0].fmt(mod)});
|
|
if (conflict_srcs[1]) |src_loc| try sema.errNote(block, src_loc, msg, "type '{}' here", .{conflict_tys[1].fmt(mod)});
|
|
|
|
// No child error
|
|
break;
|
|
}
|
|
|
|
return opt_msg.?;
|
|
}
|
|
};
|
|
|
|
fn resolvePeerTypes(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
instructions: []const Air.Inst.Ref,
|
|
candidate_srcs: Module.PeerTypeCandidateSrc,
|
|
) !Type {
|
|
switch (instructions.len) {
|
|
0 => return Type.noreturn,
|
|
1 => return sema.typeOf(instructions[0]),
|
|
else => {},
|
|
}
|
|
|
|
const peer_tys = try sema.arena.alloc(?Type, instructions.len);
|
|
const peer_vals = try sema.arena.alloc(?Value, instructions.len);
|
|
|
|
for (instructions, peer_tys, peer_vals) |inst, *ty, *val| {
|
|
ty.* = sema.typeOf(inst);
|
|
val.* = try sema.resolveValue(inst);
|
|
}
|
|
|
|
switch (try sema.resolvePeerTypesInner(block, src, peer_tys, peer_vals)) {
|
|
.success => |ty| return ty,
|
|
else => |result| {
|
|
const msg = try result.report(sema, block, src, instructions, candidate_srcs);
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
},
|
|
}
|
|
}
|
|
|
|
fn resolvePeerTypesInner(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
peer_tys: []?Type,
|
|
peer_vals: []?Value,
|
|
) !PeerResolveResult {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
|
|
var strat_reason: usize = 0;
|
|
var s: PeerResolveStrategy = .unknown;
|
|
for (peer_tys, 0..) |opt_ty, i| {
|
|
const ty = opt_ty orelse continue;
|
|
s = s.merge(PeerResolveStrategy.select(ty, mod), &strat_reason, i);
|
|
}
|
|
|
|
if (s == .unknown) {
|
|
// The whole thing was noreturn or undefined - try to do an exact match
|
|
s = .exact;
|
|
} else {
|
|
// There was something other than noreturn and undefined, so we can ignore those peers
|
|
for (peer_tys) |*ty_ptr| {
|
|
const ty = ty_ptr.* orelse continue;
|
|
switch (ty.zigTypeTag(mod)) {
|
|
.NoReturn, .Undefined => ty_ptr.* = null,
|
|
else => {},
|
|
}
|
|
}
|
|
}
|
|
|
|
const target = mod.getTarget();
|
|
|
|
switch (s) {
|
|
.unknown => unreachable,
|
|
|
|
.error_set => {
|
|
var final_set: ?Type = null;
|
|
for (peer_tys, 0..) |opt_ty, i| {
|
|
const ty = opt_ty orelse continue;
|
|
if (ty.zigTypeTag(mod) != .ErrorSet) return .{ .conflict = .{
|
|
.peer_idx_a = strat_reason,
|
|
.peer_idx_b = i,
|
|
} };
|
|
if (final_set) |cur_set| {
|
|
final_set = try sema.maybeMergeErrorSets(block, src, cur_set, ty);
|
|
} else {
|
|
final_set = ty;
|
|
}
|
|
}
|
|
return .{ .success = final_set.? };
|
|
},
|
|
|
|
.error_union => {
|
|
var final_set: ?Type = null;
|
|
for (peer_tys, peer_vals) |*ty_ptr, *val_ptr| {
|
|
const ty = ty_ptr.* orelse continue;
|
|
const set_ty = switch (ty.zigTypeTag(mod)) {
|
|
.ErrorSet => blk: {
|
|
ty_ptr.* = null; // no payload to decide on
|
|
val_ptr.* = null;
|
|
break :blk ty;
|
|
},
|
|
.ErrorUnion => blk: {
|
|
const set_ty = ty.errorUnionSet(mod);
|
|
ty_ptr.* = ty.errorUnionPayload(mod);
|
|
if (val_ptr.*) |eu_val| switch (ip.indexToKey(eu_val.toIntern())) {
|
|
.error_union => |eu| switch (eu.val) {
|
|
.payload => |payload_ip| val_ptr.* = Value.fromInterned(payload_ip),
|
|
.err_name => val_ptr.* = null,
|
|
},
|
|
.undef => val_ptr.* = Value.fromInterned((try sema.mod.intern(.{ .undef = ty_ptr.*.?.toIntern() }))),
|
|
else => unreachable,
|
|
};
|
|
break :blk set_ty;
|
|
},
|
|
else => continue, // whole type is the payload
|
|
};
|
|
if (final_set) |cur_set| {
|
|
final_set = try sema.maybeMergeErrorSets(block, src, cur_set, set_ty);
|
|
} else {
|
|
final_set = set_ty;
|
|
}
|
|
}
|
|
assert(final_set != null);
|
|
const final_payload = switch (try sema.resolvePeerTypesInner(
|
|
block,
|
|
src,
|
|
peer_tys,
|
|
peer_vals,
|
|
)) {
|
|
.success => |ty| ty,
|
|
else => |result| return result,
|
|
};
|
|
return .{ .success = try mod.errorUnionType(final_set.?, final_payload) };
|
|
},
|
|
|
|
.nullable => {
|
|
for (peer_tys, 0..) |opt_ty, i| {
|
|
const ty = opt_ty orelse continue;
|
|
if (!ty.eql(Type.null, mod)) return .{ .conflict = .{
|
|
.peer_idx_a = strat_reason,
|
|
.peer_idx_b = i,
|
|
} };
|
|
}
|
|
return .{ .success = Type.null };
|
|
},
|
|
|
|
.optional => {
|
|
for (peer_tys, peer_vals) |*ty_ptr, *val_ptr| {
|
|
const ty = ty_ptr.* orelse continue;
|
|
switch (ty.zigTypeTag(mod)) {
|
|
.Null => {
|
|
ty_ptr.* = null;
|
|
val_ptr.* = null;
|
|
},
|
|
.Optional => {
|
|
ty_ptr.* = ty.optionalChild(mod);
|
|
if (val_ptr.*) |opt_val| val_ptr.* = if (!opt_val.isUndef(mod)) opt_val.optionalValue(mod) else null;
|
|
},
|
|
else => {},
|
|
}
|
|
}
|
|
const child_ty = switch (try sema.resolvePeerTypesInner(
|
|
block,
|
|
src,
|
|
peer_tys,
|
|
peer_vals,
|
|
)) {
|
|
.success => |ty| ty,
|
|
else => |result| return result,
|
|
};
|
|
return .{ .success = try mod.optionalType(child_ty.toIntern()) };
|
|
},
|
|
|
|
.array => {
|
|
// Index of the first non-null peer
|
|
var opt_first_idx: ?usize = null;
|
|
// Index of the first array or vector peer (i.e. not a tuple)
|
|
var opt_first_arr_idx: ?usize = null;
|
|
// Set to non-null once we see any peer, even a tuple
|
|
var len: u64 = undefined;
|
|
var sentinel: ?Value = undefined;
|
|
// Only set once we see a non-tuple peer
|
|
var elem_ty: Type = undefined;
|
|
|
|
for (peer_tys, 0..) |*ty_ptr, i| {
|
|
const ty = ty_ptr.* orelse continue;
|
|
|
|
if (!ty.isArrayOrVector(mod)) {
|
|
// We allow tuples of the correct length. We won't validate their elem type, since the elements can be coerced.
|
|
const arr_like = sema.typeIsArrayLike(ty) orelse return .{ .conflict = .{
|
|
.peer_idx_a = strat_reason,
|
|
.peer_idx_b = i,
|
|
} };
|
|
|
|
if (opt_first_idx) |first_idx| {
|
|
if (arr_like.len != len) return .{ .conflict = .{
|
|
.peer_idx_a = first_idx,
|
|
.peer_idx_b = i,
|
|
} };
|
|
} else {
|
|
opt_first_idx = i;
|
|
len = arr_like.len;
|
|
}
|
|
|
|
sentinel = null;
|
|
|
|
continue;
|
|
}
|
|
|
|
const first_arr_idx = opt_first_arr_idx orelse {
|
|
if (opt_first_idx == null) {
|
|
opt_first_idx = i;
|
|
len = ty.arrayLen(mod);
|
|
sentinel = ty.sentinel(mod);
|
|
}
|
|
opt_first_arr_idx = i;
|
|
elem_ty = ty.childType(mod);
|
|
continue;
|
|
};
|
|
|
|
if (ty.arrayLen(mod) != len) return .{ .conflict = .{
|
|
.peer_idx_a = first_arr_idx,
|
|
.peer_idx_b = i,
|
|
} };
|
|
|
|
const peer_elem_ty = ty.childType(mod);
|
|
if (!peer_elem_ty.eql(elem_ty, mod)) coerce: {
|
|
const peer_elem_coerces_to_elem =
|
|
try sema.coerceInMemoryAllowed(block, elem_ty, peer_elem_ty, false, mod.getTarget(), src, src);
|
|
if (peer_elem_coerces_to_elem == .ok) {
|
|
break :coerce;
|
|
}
|
|
|
|
const elem_coerces_to_peer_elem =
|
|
try sema.coerceInMemoryAllowed(block, peer_elem_ty, elem_ty, false, mod.getTarget(), src, src);
|
|
if (elem_coerces_to_peer_elem == .ok) {
|
|
elem_ty = peer_elem_ty;
|
|
break :coerce;
|
|
}
|
|
|
|
return .{ .conflict = .{
|
|
.peer_idx_a = first_arr_idx,
|
|
.peer_idx_b = i,
|
|
} };
|
|
}
|
|
|
|
if (sentinel) |cur_sent| {
|
|
if (ty.sentinel(mod)) |peer_sent| {
|
|
if (!peer_sent.eql(cur_sent, elem_ty, mod)) sentinel = null;
|
|
} else {
|
|
sentinel = null;
|
|
}
|
|
}
|
|
}
|
|
|
|
// There should always be at least one array or vector peer
|
|
assert(opt_first_arr_idx != null);
|
|
|
|
return .{ .success = try mod.arrayType(.{
|
|
.len = len,
|
|
.child = elem_ty.toIntern(),
|
|
.sentinel = if (sentinel) |sent_val| sent_val.toIntern() else .none,
|
|
}) };
|
|
},
|
|
|
|
.vector => {
|
|
var len: ?u64 = null;
|
|
var first_idx: usize = undefined;
|
|
for (peer_tys, peer_vals, 0..) |*ty_ptr, *val_ptr, i| {
|
|
const ty = ty_ptr.* orelse continue;
|
|
|
|
if (!ty.isArrayOrVector(mod)) {
|
|
// Allow tuples of the correct length
|
|
const arr_like = sema.typeIsArrayLike(ty) orelse return .{ .conflict = .{
|
|
.peer_idx_a = strat_reason,
|
|
.peer_idx_b = i,
|
|
} };
|
|
|
|
if (len) |expect_len| {
|
|
if (arr_like.len != expect_len) return .{ .conflict = .{
|
|
.peer_idx_a = first_idx,
|
|
.peer_idx_b = i,
|
|
} };
|
|
} else {
|
|
len = arr_like.len;
|
|
first_idx = i;
|
|
}
|
|
|
|
// Tuples won't participate in the child type resolution. We'll resolve without
|
|
// them, and if the tuples have a bad type, we'll get a coercion error later.
|
|
ty_ptr.* = null;
|
|
val_ptr.* = null;
|
|
|
|
continue;
|
|
}
|
|
|
|
if (len) |expect_len| {
|
|
if (ty.arrayLen(mod) != expect_len) return .{ .conflict = .{
|
|
.peer_idx_a = first_idx,
|
|
.peer_idx_b = i,
|
|
} };
|
|
} else {
|
|
len = ty.arrayLen(mod);
|
|
first_idx = i;
|
|
}
|
|
|
|
ty_ptr.* = ty.childType(mod);
|
|
val_ptr.* = null; // multiple child vals, so we can't easily use them in PTR
|
|
}
|
|
|
|
const child_ty = switch (try sema.resolvePeerTypesInner(
|
|
block,
|
|
src,
|
|
peer_tys,
|
|
peer_vals,
|
|
)) {
|
|
.success => |ty| ty,
|
|
else => |result| return result,
|
|
};
|
|
|
|
return .{ .success = try mod.vectorType(.{
|
|
.len = @intCast(len.?),
|
|
.child = child_ty.toIntern(),
|
|
}) };
|
|
},
|
|
|
|
.c_ptr => {
|
|
var opt_ptr_info: ?InternPool.Key.PtrType = null;
|
|
var first_idx: usize = undefined;
|
|
for (peer_tys, peer_vals, 0..) |opt_ty, opt_val, i| {
|
|
const ty = opt_ty orelse continue;
|
|
switch (ty.zigTypeTag(mod)) {
|
|
.ComptimeInt => continue, // comptime-known integers can always coerce to C pointers
|
|
.Int => {
|
|
if (opt_val != null) {
|
|
// Always allow the coercion for comptime-known ints
|
|
continue;
|
|
} else {
|
|
// Runtime-known, so check if the type is no bigger than a usize
|
|
const ptr_bits = target.ptrBitWidth();
|
|
const bits = ty.intInfo(mod).bits;
|
|
if (bits <= ptr_bits) continue;
|
|
}
|
|
},
|
|
.Null => continue,
|
|
else => {},
|
|
}
|
|
|
|
if (!ty.isPtrAtRuntime(mod)) return .{ .conflict = .{
|
|
.peer_idx_a = strat_reason,
|
|
.peer_idx_b = i,
|
|
} };
|
|
|
|
// Goes through optionals
|
|
const peer_info = ty.ptrInfo(mod);
|
|
|
|
var ptr_info = opt_ptr_info orelse {
|
|
opt_ptr_info = peer_info;
|
|
opt_ptr_info.?.flags.size = .C;
|
|
first_idx = i;
|
|
continue;
|
|
};
|
|
|
|
// Try peer -> cur, then cur -> peer
|
|
ptr_info.child = ((try sema.resolvePairInMemoryCoercible(block, src, Type.fromInterned(ptr_info.child), Type.fromInterned(peer_info.child))) orelse {
|
|
return .{ .conflict = .{
|
|
.peer_idx_a = first_idx,
|
|
.peer_idx_b = i,
|
|
} };
|
|
}).toIntern();
|
|
|
|
if (ptr_info.sentinel != .none and peer_info.sentinel != .none) {
|
|
const peer_sent = try ip.getCoerced(sema.gpa, ptr_info.sentinel, ptr_info.child);
|
|
const ptr_sent = try ip.getCoerced(sema.gpa, peer_info.sentinel, ptr_info.child);
|
|
if (ptr_sent == peer_sent) {
|
|
ptr_info.sentinel = ptr_sent;
|
|
} else {
|
|
ptr_info.sentinel = .none;
|
|
}
|
|
} else {
|
|
ptr_info.sentinel = .none;
|
|
}
|
|
|
|
// Note that the align can be always non-zero; Module.ptrType will canonicalize it
|
|
ptr_info.flags.alignment = InternPool.Alignment.min(
|
|
if (ptr_info.flags.alignment != .none)
|
|
ptr_info.flags.alignment
|
|
else
|
|
Type.fromInterned(ptr_info.child).abiAlignment(mod),
|
|
|
|
if (peer_info.flags.alignment != .none)
|
|
peer_info.flags.alignment
|
|
else
|
|
Type.fromInterned(peer_info.child).abiAlignment(mod),
|
|
);
|
|
if (ptr_info.flags.address_space != peer_info.flags.address_space) {
|
|
return .{ .conflict = .{
|
|
.peer_idx_a = first_idx,
|
|
.peer_idx_b = i,
|
|
} };
|
|
}
|
|
|
|
if (ptr_info.packed_offset.bit_offset != peer_info.packed_offset.bit_offset or
|
|
ptr_info.packed_offset.host_size != peer_info.packed_offset.host_size)
|
|
{
|
|
return .{ .conflict = .{
|
|
.peer_idx_a = first_idx,
|
|
.peer_idx_b = i,
|
|
} };
|
|
}
|
|
|
|
ptr_info.flags.is_const = ptr_info.flags.is_const or peer_info.flags.is_const;
|
|
ptr_info.flags.is_volatile = ptr_info.flags.is_volatile or peer_info.flags.is_volatile;
|
|
|
|
opt_ptr_info = ptr_info;
|
|
}
|
|
return .{ .success = try sema.ptrType(opt_ptr_info.?) };
|
|
},
|
|
|
|
.ptr => {
|
|
// If we've resolved to a `[]T` but then see a `[*]T`, we can resolve to a `[*]T` only
|
|
// if there were no actual slices. Else, we want the slice index to report a conflict.
|
|
var opt_slice_idx: ?usize = null;
|
|
|
|
var opt_ptr_info: ?InternPool.Key.PtrType = null;
|
|
var first_idx: usize = undefined;
|
|
var other_idx: usize = undefined; // We sometimes need a second peer index to report a generic error
|
|
|
|
for (peer_tys, 0..) |opt_ty, i| {
|
|
const ty = opt_ty orelse continue;
|
|
const peer_info: InternPool.Key.PtrType = switch (ty.zigTypeTag(mod)) {
|
|
.Pointer => ty.ptrInfo(mod),
|
|
.Fn => .{
|
|
.child = ty.toIntern(),
|
|
.flags = .{
|
|
.address_space = target_util.defaultAddressSpace(target, .global_constant),
|
|
},
|
|
},
|
|
else => return .{ .conflict = .{
|
|
.peer_idx_a = strat_reason,
|
|
.peer_idx_b = i,
|
|
} },
|
|
};
|
|
|
|
switch (peer_info.flags.size) {
|
|
.One, .Many => {},
|
|
.Slice => opt_slice_idx = i,
|
|
.C => return .{ .conflict = .{
|
|
.peer_idx_a = strat_reason,
|
|
.peer_idx_b = i,
|
|
} },
|
|
}
|
|
|
|
var ptr_info = opt_ptr_info orelse {
|
|
opt_ptr_info = peer_info;
|
|
first_idx = i;
|
|
continue;
|
|
};
|
|
|
|
other_idx = i;
|
|
|
|
// We want to return this in a lot of cases, so alias it here for convenience
|
|
const generic_err: PeerResolveResult = .{ .conflict = .{
|
|
.peer_idx_a = first_idx,
|
|
.peer_idx_b = i,
|
|
} };
|
|
|
|
// Note that the align can be always non-zero; Type.ptr will canonicalize it
|
|
ptr_info.flags.alignment = Alignment.min(
|
|
if (ptr_info.flags.alignment != .none)
|
|
ptr_info.flags.alignment
|
|
else
|
|
try sema.typeAbiAlignment(Type.fromInterned(ptr_info.child)),
|
|
|
|
if (peer_info.flags.alignment != .none)
|
|
peer_info.flags.alignment
|
|
else
|
|
try sema.typeAbiAlignment(Type.fromInterned(peer_info.child)),
|
|
);
|
|
|
|
if (ptr_info.flags.address_space != peer_info.flags.address_space) {
|
|
return generic_err;
|
|
}
|
|
|
|
if (ptr_info.packed_offset.bit_offset != peer_info.packed_offset.bit_offset or
|
|
ptr_info.packed_offset.host_size != peer_info.packed_offset.host_size)
|
|
{
|
|
return generic_err;
|
|
}
|
|
|
|
ptr_info.flags.is_const = ptr_info.flags.is_const or peer_info.flags.is_const;
|
|
ptr_info.flags.is_volatile = ptr_info.flags.is_volatile or peer_info.flags.is_volatile;
|
|
|
|
const peer_sentinel: InternPool.Index = switch (peer_info.flags.size) {
|
|
.One => switch (ip.indexToKey(peer_info.child)) {
|
|
.array_type => |array_type| array_type.sentinel,
|
|
else => .none,
|
|
},
|
|
.Many, .Slice => peer_info.sentinel,
|
|
.C => unreachable,
|
|
};
|
|
|
|
const cur_sentinel: InternPool.Index = switch (ptr_info.flags.size) {
|
|
.One => switch (ip.indexToKey(ptr_info.child)) {
|
|
.array_type => |array_type| array_type.sentinel,
|
|
else => .none,
|
|
},
|
|
.Many, .Slice => ptr_info.sentinel,
|
|
.C => unreachable,
|
|
};
|
|
|
|
// We abstract array handling slightly so that tuple pointers can work like array pointers
|
|
const peer_pointee_array = sema.typeIsArrayLike(Type.fromInterned(peer_info.child));
|
|
const cur_pointee_array = sema.typeIsArrayLike(Type.fromInterned(ptr_info.child));
|
|
|
|
// This switch is just responsible for deciding the size and pointee (not including
|
|
// single-pointer array sentinel).
|
|
good: {
|
|
switch (peer_info.flags.size) {
|
|
.One => switch (ptr_info.flags.size) {
|
|
.One => {
|
|
if (try sema.resolvePairInMemoryCoercible(block, src, Type.fromInterned(ptr_info.child), Type.fromInterned(peer_info.child))) |pointee| {
|
|
ptr_info.child = pointee.toIntern();
|
|
break :good;
|
|
}
|
|
|
|
const cur_arr = cur_pointee_array orelse return generic_err;
|
|
const peer_arr = peer_pointee_array orelse return generic_err;
|
|
|
|
if (try sema.resolvePairInMemoryCoercible(block, src, cur_arr.elem_ty, peer_arr.elem_ty)) |elem_ty| {
|
|
// *[n:x]T + *[n:y]T = *[n]T
|
|
if (cur_arr.len == peer_arr.len) {
|
|
ptr_info.child = (try mod.arrayType(.{
|
|
.len = cur_arr.len,
|
|
.child = elem_ty.toIntern(),
|
|
})).toIntern();
|
|
break :good;
|
|
}
|
|
// *[a]T + *[b]T = []T
|
|
ptr_info.flags.size = .Slice;
|
|
ptr_info.child = elem_ty.toIntern();
|
|
break :good;
|
|
}
|
|
|
|
if (peer_arr.elem_ty.toIntern() == .noreturn_type) {
|
|
// *struct{} + *[a]T = []T
|
|
ptr_info.flags.size = .Slice;
|
|
ptr_info.child = cur_arr.elem_ty.toIntern();
|
|
break :good;
|
|
}
|
|
|
|
if (cur_arr.elem_ty.toIntern() == .noreturn_type) {
|
|
// *[a]T + *struct{} = []T
|
|
ptr_info.flags.size = .Slice;
|
|
ptr_info.child = peer_arr.elem_ty.toIntern();
|
|
break :good;
|
|
}
|
|
|
|
return generic_err;
|
|
},
|
|
.Many => {
|
|
// Only works for *[n]T + [*]T -> [*]T
|
|
const arr = peer_pointee_array orelse return generic_err;
|
|
if (try sema.resolvePairInMemoryCoercible(block, src, Type.fromInterned(ptr_info.child), arr.elem_ty)) |pointee| {
|
|
ptr_info.child = pointee.toIntern();
|
|
break :good;
|
|
}
|
|
if (arr.elem_ty.toIntern() == .noreturn_type) {
|
|
// *struct{} + [*]T -> [*]T
|
|
break :good;
|
|
}
|
|
return generic_err;
|
|
},
|
|
.Slice => {
|
|
// Only works for *[n]T + []T -> []T
|
|
const arr = peer_pointee_array orelse return generic_err;
|
|
if (try sema.resolvePairInMemoryCoercible(block, src, Type.fromInterned(ptr_info.child), arr.elem_ty)) |pointee| {
|
|
ptr_info.child = pointee.toIntern();
|
|
break :good;
|
|
}
|
|
if (arr.elem_ty.toIntern() == .noreturn_type) {
|
|
// *struct{} + []T -> []T
|
|
break :good;
|
|
}
|
|
return generic_err;
|
|
},
|
|
.C => unreachable,
|
|
},
|
|
.Many => switch (ptr_info.flags.size) {
|
|
.One => {
|
|
// Only works for [*]T + *[n]T -> [*]T
|
|
const arr = cur_pointee_array orelse return generic_err;
|
|
if (try sema.resolvePairInMemoryCoercible(block, src, arr.elem_ty, Type.fromInterned(peer_info.child))) |pointee| {
|
|
ptr_info.flags.size = .Many;
|
|
ptr_info.child = pointee.toIntern();
|
|
break :good;
|
|
}
|
|
if (arr.elem_ty.toIntern() == .noreturn_type) {
|
|
// [*]T + *struct{} -> [*]T
|
|
ptr_info.flags.size = .Many;
|
|
ptr_info.child = peer_info.child;
|
|
break :good;
|
|
}
|
|
return generic_err;
|
|
},
|
|
.Many => {
|
|
if (try sema.resolvePairInMemoryCoercible(block, src, Type.fromInterned(ptr_info.child), Type.fromInterned(peer_info.child))) |pointee| {
|
|
ptr_info.child = pointee.toIntern();
|
|
break :good;
|
|
}
|
|
return generic_err;
|
|
},
|
|
.Slice => {
|
|
// Only works if no peers are actually slices
|
|
if (opt_slice_idx) |slice_idx| {
|
|
return .{ .conflict = .{
|
|
.peer_idx_a = slice_idx,
|
|
.peer_idx_b = i,
|
|
} };
|
|
}
|
|
// Okay, then works for [*]T + "[]T" -> [*]T
|
|
if (try sema.resolvePairInMemoryCoercible(block, src, Type.fromInterned(ptr_info.child), Type.fromInterned(peer_info.child))) |pointee| {
|
|
ptr_info.flags.size = .Many;
|
|
ptr_info.child = pointee.toIntern();
|
|
break :good;
|
|
}
|
|
return generic_err;
|
|
},
|
|
.C => unreachable,
|
|
},
|
|
.Slice => switch (ptr_info.flags.size) {
|
|
.One => {
|
|
// Only works for []T + *[n]T -> []T
|
|
const arr = cur_pointee_array orelse return generic_err;
|
|
if (try sema.resolvePairInMemoryCoercible(block, src, arr.elem_ty, Type.fromInterned(peer_info.child))) |pointee| {
|
|
ptr_info.flags.size = .Slice;
|
|
ptr_info.child = pointee.toIntern();
|
|
break :good;
|
|
}
|
|
if (arr.elem_ty.toIntern() == .noreturn_type) {
|
|
// []T + *struct{} -> []T
|
|
ptr_info.flags.size = .Slice;
|
|
ptr_info.child = peer_info.child;
|
|
break :good;
|
|
}
|
|
return generic_err;
|
|
},
|
|
.Many => {
|
|
// Impossible! (current peer is an actual slice)
|
|
return generic_err;
|
|
},
|
|
.Slice => {
|
|
if (try sema.resolvePairInMemoryCoercible(block, src, Type.fromInterned(ptr_info.child), Type.fromInterned(peer_info.child))) |pointee| {
|
|
ptr_info.child = pointee.toIntern();
|
|
break :good;
|
|
}
|
|
return generic_err;
|
|
},
|
|
.C => unreachable,
|
|
},
|
|
.C => unreachable,
|
|
}
|
|
}
|
|
|
|
const sentinel_ty = switch (ptr_info.flags.size) {
|
|
.One => switch (ip.indexToKey(ptr_info.child)) {
|
|
.array_type => |array_type| array_type.child,
|
|
else => ptr_info.child,
|
|
},
|
|
.Many, .Slice, .C => ptr_info.child,
|
|
};
|
|
|
|
sentinel: {
|
|
no_sentinel: {
|
|
if (peer_sentinel == .none) break :no_sentinel;
|
|
if (cur_sentinel == .none) break :no_sentinel;
|
|
const peer_sent_coerced = try ip.getCoerced(sema.gpa, peer_sentinel, sentinel_ty);
|
|
const cur_sent_coerced = try ip.getCoerced(sema.gpa, cur_sentinel, sentinel_ty);
|
|
if (peer_sent_coerced != cur_sent_coerced) break :no_sentinel;
|
|
// Sentinels match
|
|
if (ptr_info.flags.size == .One) switch (ip.indexToKey(ptr_info.child)) {
|
|
.array_type => |array_type| ptr_info.child = (try mod.arrayType(.{
|
|
.len = array_type.len,
|
|
.child = array_type.child,
|
|
.sentinel = cur_sent_coerced,
|
|
})).toIntern(),
|
|
else => unreachable,
|
|
} else {
|
|
ptr_info.sentinel = cur_sent_coerced;
|
|
}
|
|
break :sentinel;
|
|
}
|
|
// Clear existing sentinel
|
|
ptr_info.sentinel = .none;
|
|
switch (ip.indexToKey(ptr_info.child)) {
|
|
.array_type => |array_type| ptr_info.child = (try mod.arrayType(.{
|
|
.len = array_type.len,
|
|
.child = array_type.child,
|
|
.sentinel = .none,
|
|
})).toIntern(),
|
|
else => {},
|
|
}
|
|
}
|
|
|
|
opt_ptr_info = ptr_info;
|
|
}
|
|
|
|
// Before we succeed, check the pointee type. If we tried to apply PTR to (for instance)
|
|
// &.{} and &.{}, we'll currently have a pointer type of `*[0]noreturn` - we wanted to
|
|
// coerce the empty struct to a specific type, but no peer provided one. We need to
|
|
// detect this case and emit an error.
|
|
const pointee = opt_ptr_info.?.child;
|
|
switch (pointee) {
|
|
.noreturn_type => return .{ .conflict = .{
|
|
.peer_idx_a = first_idx,
|
|
.peer_idx_b = other_idx,
|
|
} },
|
|
else => switch (ip.indexToKey(pointee)) {
|
|
.array_type => |array_type| if (array_type.child == .noreturn_type) return .{ .conflict = .{
|
|
.peer_idx_a = first_idx,
|
|
.peer_idx_b = other_idx,
|
|
} },
|
|
else => {},
|
|
},
|
|
}
|
|
|
|
return .{ .success = try sema.ptrType(opt_ptr_info.?) };
|
|
},
|
|
|
|
.func => {
|
|
var opt_cur_ty: ?Type = null;
|
|
var first_idx: usize = undefined;
|
|
for (peer_tys, 0..) |opt_ty, i| {
|
|
const ty = opt_ty orelse continue;
|
|
const cur_ty = opt_cur_ty orelse {
|
|
opt_cur_ty = ty;
|
|
first_idx = i;
|
|
continue;
|
|
};
|
|
if (ty.zigTypeTag(mod) != .Fn) return .{ .conflict = .{
|
|
.peer_idx_a = strat_reason,
|
|
.peer_idx_b = i,
|
|
} };
|
|
// ty -> cur_ty
|
|
if (.ok == try sema.coerceInMemoryAllowedFns(block, cur_ty, ty, target, src, src)) {
|
|
continue;
|
|
}
|
|
// cur_ty -> ty
|
|
if (.ok == try sema.coerceInMemoryAllowedFns(block, ty, cur_ty, target, src, src)) {
|
|
opt_cur_ty = ty;
|
|
continue;
|
|
}
|
|
return .{ .conflict = .{
|
|
.peer_idx_a = first_idx,
|
|
.peer_idx_b = i,
|
|
} };
|
|
}
|
|
return .{ .success = opt_cur_ty.? };
|
|
},
|
|
|
|
.enum_or_union => {
|
|
var opt_cur_ty: ?Type = null;
|
|
// The peer index which gave the current type
|
|
var cur_ty_idx: usize = undefined;
|
|
|
|
for (peer_tys, 0..) |opt_ty, i| {
|
|
const ty = opt_ty orelse continue;
|
|
switch (ty.zigTypeTag(mod)) {
|
|
.EnumLiteral, .Enum, .Union => {},
|
|
else => return .{ .conflict = .{
|
|
.peer_idx_a = strat_reason,
|
|
.peer_idx_b = i,
|
|
} },
|
|
}
|
|
const cur_ty = opt_cur_ty orelse {
|
|
opt_cur_ty = ty;
|
|
cur_ty_idx = i;
|
|
continue;
|
|
};
|
|
|
|
// We want to return this in a lot of cases, so alias it here for convenience
|
|
const generic_err: PeerResolveResult = .{ .conflict = .{
|
|
.peer_idx_a = cur_ty_idx,
|
|
.peer_idx_b = i,
|
|
} };
|
|
|
|
switch (cur_ty.zigTypeTag(mod)) {
|
|
.EnumLiteral => {
|
|
opt_cur_ty = ty;
|
|
cur_ty_idx = i;
|
|
},
|
|
.Enum => switch (ty.zigTypeTag(mod)) {
|
|
.EnumLiteral => {},
|
|
.Enum => {
|
|
if (!ty.eql(cur_ty, mod)) return generic_err;
|
|
},
|
|
.Union => {
|
|
const tag_ty = ty.unionTagTypeHypothetical(mod);
|
|
if (!tag_ty.eql(cur_ty, mod)) return generic_err;
|
|
opt_cur_ty = ty;
|
|
cur_ty_idx = i;
|
|
},
|
|
else => unreachable,
|
|
},
|
|
.Union => switch (ty.zigTypeTag(mod)) {
|
|
.EnumLiteral => {},
|
|
.Enum => {
|
|
const cur_tag_ty = cur_ty.unionTagTypeHypothetical(mod);
|
|
if (!ty.eql(cur_tag_ty, mod)) return generic_err;
|
|
},
|
|
.Union => {
|
|
if (!ty.eql(cur_ty, mod)) return generic_err;
|
|
},
|
|
else => unreachable,
|
|
},
|
|
else => unreachable,
|
|
}
|
|
}
|
|
return .{ .success = opt_cur_ty.? };
|
|
},
|
|
|
|
.comptime_int => {
|
|
for (peer_tys, 0..) |opt_ty, i| {
|
|
const ty = opt_ty orelse continue;
|
|
switch (ty.zigTypeTag(mod)) {
|
|
.ComptimeInt => {},
|
|
else => return .{ .conflict = .{
|
|
.peer_idx_a = strat_reason,
|
|
.peer_idx_b = i,
|
|
} },
|
|
}
|
|
}
|
|
return .{ .success = Type.comptime_int };
|
|
},
|
|
|
|
.comptime_float => {
|
|
for (peer_tys, 0..) |opt_ty, i| {
|
|
const ty = opt_ty orelse continue;
|
|
switch (ty.zigTypeTag(mod)) {
|
|
.ComptimeInt, .ComptimeFloat => {},
|
|
else => return .{ .conflict = .{
|
|
.peer_idx_a = strat_reason,
|
|
.peer_idx_b = i,
|
|
} },
|
|
}
|
|
}
|
|
return .{ .success = Type.comptime_float };
|
|
},
|
|
|
|
.fixed_int => {
|
|
var idx_unsigned: ?usize = null;
|
|
var idx_signed: ?usize = null;
|
|
|
|
// TODO: this is for compatibility with legacy behavior. See beneath the loop.
|
|
var any_comptime_known = false;
|
|
|
|
for (peer_tys, peer_vals, 0..) |opt_ty, *ptr_opt_val, i| {
|
|
const ty = opt_ty orelse continue;
|
|
const opt_val = ptr_opt_val.*;
|
|
|
|
const peer_tag = ty.zigTypeTag(mod);
|
|
switch (peer_tag) {
|
|
.ComptimeInt => {
|
|
// If the value is undefined, we can't refine to a fixed-width int
|
|
if (opt_val == null or opt_val.?.isUndef(mod)) return .{ .conflict = .{
|
|
.peer_idx_a = strat_reason,
|
|
.peer_idx_b = i,
|
|
} };
|
|
any_comptime_known = true;
|
|
ptr_opt_val.* = try sema.resolveLazyValue(opt_val.?);
|
|
continue;
|
|
},
|
|
.Int => {},
|
|
else => return .{ .conflict = .{
|
|
.peer_idx_a = strat_reason,
|
|
.peer_idx_b = i,
|
|
} },
|
|
}
|
|
|
|
if (opt_val != null) any_comptime_known = true;
|
|
|
|
const info = ty.intInfo(mod);
|
|
|
|
const idx_ptr = switch (info.signedness) {
|
|
.unsigned => &idx_unsigned,
|
|
.signed => &idx_signed,
|
|
};
|
|
|
|
const largest_idx = idx_ptr.* orelse {
|
|
idx_ptr.* = i;
|
|
continue;
|
|
};
|
|
|
|
const cur_info = peer_tys[largest_idx].?.intInfo(mod);
|
|
if (info.bits > cur_info.bits) {
|
|
idx_ptr.* = i;
|
|
}
|
|
}
|
|
|
|
if (idx_signed == null) {
|
|
return .{ .success = peer_tys[idx_unsigned.?].? };
|
|
}
|
|
|
|
if (idx_unsigned == null) {
|
|
return .{ .success = peer_tys[idx_signed.?].? };
|
|
}
|
|
|
|
const unsigned_info = peer_tys[idx_unsigned.?].?.intInfo(mod);
|
|
const signed_info = peer_tys[idx_signed.?].?.intInfo(mod);
|
|
if (signed_info.bits > unsigned_info.bits) {
|
|
return .{ .success = peer_tys[idx_signed.?].? };
|
|
}
|
|
|
|
// TODO: this is for compatibility with legacy behavior. Before this version of PTR was
|
|
// implemented, the algorithm very often returned false positives, with the expectation
|
|
// that you'd just hit a coercion error later. One of these was that for integers, the
|
|
// largest type would always be returned, even if it couldn't fit everything. This had
|
|
// an unintentional consequence to semantics, which is that if values were known at
|
|
// comptime, they would be coerced down to the smallest type where possible. This
|
|
// behavior is unintuitive and order-dependent, so in my opinion should be eliminated,
|
|
// but for now we'll retain compatibility.
|
|
if (any_comptime_known) {
|
|
if (unsigned_info.bits > signed_info.bits) {
|
|
return .{ .success = peer_tys[idx_unsigned.?].? };
|
|
}
|
|
const idx = @min(idx_unsigned.?, idx_signed.?);
|
|
return .{ .success = peer_tys[idx].? };
|
|
}
|
|
|
|
return .{ .conflict = .{
|
|
.peer_idx_a = idx_unsigned.?,
|
|
.peer_idx_b = idx_signed.?,
|
|
} };
|
|
},
|
|
|
|
.fixed_float => {
|
|
var opt_cur_ty: ?Type = null;
|
|
|
|
for (peer_tys, peer_vals, 0..) |opt_ty, opt_val, i| {
|
|
const ty = opt_ty orelse continue;
|
|
switch (ty.zigTypeTag(mod)) {
|
|
.ComptimeFloat, .ComptimeInt => {},
|
|
.Int => {
|
|
if (opt_val == null) return .{ .conflict = .{
|
|
.peer_idx_a = strat_reason,
|
|
.peer_idx_b = i,
|
|
} };
|
|
},
|
|
.Float => {
|
|
if (opt_cur_ty) |cur_ty| {
|
|
if (cur_ty.eql(ty, mod)) continue;
|
|
// Recreate the type so we eliminate any c_longdouble
|
|
const bits = @max(cur_ty.floatBits(target), ty.floatBits(target));
|
|
opt_cur_ty = switch (bits) {
|
|
16 => Type.f16,
|
|
32 => Type.f32,
|
|
64 => Type.f64,
|
|
80 => Type.f80,
|
|
128 => Type.f128,
|
|
else => unreachable,
|
|
};
|
|
} else {
|
|
opt_cur_ty = ty;
|
|
}
|
|
},
|
|
else => return .{ .conflict = .{
|
|
.peer_idx_a = strat_reason,
|
|
.peer_idx_b = i,
|
|
} },
|
|
}
|
|
}
|
|
|
|
// Note that fixed_float is only chosen if there is at least one fixed-width float peer,
|
|
// so opt_cur_ty must be non-null.
|
|
return .{ .success = opt_cur_ty.? };
|
|
},
|
|
|
|
.coercible_struct => {
|
|
// First, check that every peer has the same approximate structure (field count and names)
|
|
|
|
var opt_first_idx: ?usize = null;
|
|
var is_tuple: bool = undefined;
|
|
var field_count: usize = undefined;
|
|
// Only defined for non-tuples.
|
|
var field_names: []InternPool.NullTerminatedString = undefined;
|
|
|
|
for (peer_tys, 0..) |opt_ty, i| {
|
|
const ty = opt_ty orelse continue;
|
|
|
|
if (!ty.isTupleOrAnonStruct(mod)) {
|
|
return .{ .conflict = .{
|
|
.peer_idx_a = strat_reason,
|
|
.peer_idx_b = i,
|
|
} };
|
|
}
|
|
|
|
const first_idx = opt_first_idx orelse {
|
|
opt_first_idx = i;
|
|
is_tuple = ty.isTuple(mod);
|
|
field_count = ty.structFieldCount(mod);
|
|
if (!is_tuple) {
|
|
const names = ip.indexToKey(ty.toIntern()).anon_struct_type.names.get(ip);
|
|
field_names = try sema.arena.dupe(InternPool.NullTerminatedString, names);
|
|
}
|
|
continue;
|
|
};
|
|
|
|
if (ty.isTuple(mod) != is_tuple or ty.structFieldCount(mod) != field_count) {
|
|
return .{ .conflict = .{
|
|
.peer_idx_a = first_idx,
|
|
.peer_idx_b = i,
|
|
} };
|
|
}
|
|
|
|
if (!is_tuple) {
|
|
for (field_names, 0..) |expected, field_index_usize| {
|
|
const field_index: u32 = @intCast(field_index_usize);
|
|
const actual = ty.structFieldName(field_index, mod).unwrap().?;
|
|
if (actual == expected) continue;
|
|
return .{ .conflict = .{
|
|
.peer_idx_a = first_idx,
|
|
.peer_idx_b = i,
|
|
} };
|
|
}
|
|
}
|
|
}
|
|
|
|
assert(opt_first_idx != null);
|
|
|
|
// Now, we'll recursively resolve the field types
|
|
const field_types = try sema.arena.alloc(InternPool.Index, field_count);
|
|
// Values for `comptime` fields - `.none` used for non-comptime fields
|
|
const field_vals = try sema.arena.alloc(InternPool.Index, field_count);
|
|
const sub_peer_tys = try sema.arena.alloc(?Type, peer_tys.len);
|
|
const sub_peer_vals = try sema.arena.alloc(?Value, peer_vals.len);
|
|
|
|
for (field_types, field_vals, 0..) |*field_ty, *field_val, field_idx| {
|
|
// Fill buffers with types and values of the field
|
|
for (peer_tys, peer_vals, sub_peer_tys, sub_peer_vals) |opt_ty, opt_val, *peer_field_ty, *peer_field_val| {
|
|
const ty = opt_ty orelse {
|
|
peer_field_ty.* = null;
|
|
peer_field_val.* = null;
|
|
continue;
|
|
};
|
|
peer_field_ty.* = ty.structFieldType(field_idx, mod);
|
|
peer_field_val.* = if (opt_val) |val| try val.fieldValue(mod, field_idx) else null;
|
|
}
|
|
|
|
// Resolve field type recursively
|
|
field_ty.* = switch (try sema.resolvePeerTypesInner(block, src, sub_peer_tys, sub_peer_vals)) {
|
|
.success => |ty| ty.toIntern(),
|
|
else => |result| {
|
|
const result_buf = try sema.arena.create(PeerResolveResult);
|
|
result_buf.* = result;
|
|
const field_name = if (is_tuple) name: {
|
|
break :name try std.fmt.allocPrint(sema.arena, "{d}", .{field_idx});
|
|
} else try sema.arena.dupe(u8, ip.stringToSlice(field_names[field_idx]));
|
|
|
|
// The error info needs the field types, but we can't reuse sub_peer_tys
|
|
// since the recursive call may have clobbered it.
|
|
const peer_field_tys = try sema.arena.alloc(Type, peer_tys.len);
|
|
for (peer_tys, peer_field_tys) |opt_ty, *peer_field_ty| {
|
|
// Already-resolved types won't be referenced by the error so it's fine
|
|
// to leave them undefined.
|
|
const ty = opt_ty orelse continue;
|
|
peer_field_ty.* = ty.structFieldType(field_idx, mod);
|
|
}
|
|
|
|
return .{ .field_error = .{
|
|
.field_name = field_name,
|
|
.field_types = peer_field_tys,
|
|
.sub_result = result_buf,
|
|
} };
|
|
},
|
|
};
|
|
|
|
// Decide if this is a comptime field. If it is comptime in all peers, and the
|
|
// coerced comptime values are all the same, we say it is comptime, else not.
|
|
|
|
var comptime_val: ?Value = null;
|
|
for (peer_tys) |opt_ty| {
|
|
const struct_ty = opt_ty orelse continue;
|
|
try sema.resolveStructFieldInits(struct_ty);
|
|
|
|
const uncoerced_field_val = try struct_ty.structFieldValueComptime(mod, field_idx) orelse {
|
|
comptime_val = null;
|
|
break;
|
|
};
|
|
const uncoerced_field = Air.internedToRef(uncoerced_field_val.toIntern());
|
|
const coerced_inst = sema.coerceExtra(block, Type.fromInterned(field_ty.*), uncoerced_field, src, .{ .report_err = false }) catch |err| switch (err) {
|
|
// It's possible for PTR to give false positives. Just give up on making this a comptime field, we'll get an error later anyway
|
|
error.NotCoercible => {
|
|
comptime_val = null;
|
|
break;
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
const coerced_val = (try sema.resolveValue(coerced_inst)) orelse continue;
|
|
const existing = comptime_val orelse {
|
|
comptime_val = coerced_val;
|
|
continue;
|
|
};
|
|
if (!coerced_val.eql(existing, Type.fromInterned(field_ty.*), mod)) {
|
|
comptime_val = null;
|
|
break;
|
|
}
|
|
}
|
|
|
|
field_val.* = if (comptime_val) |v| v.toIntern() else .none;
|
|
}
|
|
|
|
const final_ty = try ip.getAnonStructType(mod.gpa, .{
|
|
.types = field_types,
|
|
.names = if (is_tuple) &.{} else field_names,
|
|
.values = field_vals,
|
|
});
|
|
|
|
return .{ .success = Type.fromInterned(final_ty) };
|
|
},
|
|
|
|
.exact => {
|
|
var expect_ty: ?Type = null;
|
|
var first_idx: usize = undefined;
|
|
for (peer_tys, 0..) |opt_ty, i| {
|
|
const ty = opt_ty orelse continue;
|
|
if (expect_ty) |expect| {
|
|
if (!ty.eql(expect, mod)) return .{ .conflict = .{
|
|
.peer_idx_a = first_idx,
|
|
.peer_idx_b = i,
|
|
} };
|
|
} else {
|
|
expect_ty = ty;
|
|
first_idx = i;
|
|
}
|
|
}
|
|
return .{ .success = expect_ty.? };
|
|
},
|
|
}
|
|
}
|
|
|
|
fn maybeMergeErrorSets(sema: *Sema, block: *Block, src: LazySrcLoc, e0: Type, e1: Type) !Type {
|
|
// e0 -> e1
|
|
if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, e1, e0, src, src)) {
|
|
return e1;
|
|
}
|
|
|
|
// e1 -> e0
|
|
if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, e0, e1, src, src)) {
|
|
return e0;
|
|
}
|
|
|
|
return sema.errorSetMerge(e0, e1);
|
|
}
|
|
|
|
fn resolvePairInMemoryCoercible(sema: *Sema, block: *Block, src: LazySrcLoc, ty_a: Type, ty_b: Type) !?Type {
|
|
// ty_b -> ty_a
|
|
if (.ok == try sema.coerceInMemoryAllowed(block, ty_a, ty_b, true, sema.mod.getTarget(), src, src)) {
|
|
return ty_a;
|
|
}
|
|
|
|
// ty_a -> ty_b
|
|
if (.ok == try sema.coerceInMemoryAllowed(block, ty_b, ty_a, true, sema.mod.getTarget(), src, src)) {
|
|
return ty_b;
|
|
}
|
|
|
|
return null;
|
|
}
|
|
|
|
const ArrayLike = struct {
|
|
len: u64,
|
|
/// `noreturn` indicates that this type is `struct{}` so can coerce to anything
|
|
elem_ty: Type,
|
|
};
|
|
fn typeIsArrayLike(sema: *Sema, ty: Type) ?ArrayLike {
|
|
const mod = sema.mod;
|
|
return switch (ty.zigTypeTag(mod)) {
|
|
.Array => .{
|
|
.len = ty.arrayLen(mod),
|
|
.elem_ty = ty.childType(mod),
|
|
},
|
|
.Struct => {
|
|
const field_count = ty.structFieldCount(mod);
|
|
if (field_count == 0) return .{
|
|
.len = 0,
|
|
.elem_ty = Type.noreturn,
|
|
};
|
|
if (!ty.isTuple(mod)) return null;
|
|
const elem_ty = ty.structFieldType(0, mod);
|
|
for (1..field_count) |i| {
|
|
if (!ty.structFieldType(i, mod).eql(elem_ty, mod)) {
|
|
return null;
|
|
}
|
|
}
|
|
return .{
|
|
.len = field_count,
|
|
.elem_ty = elem_ty,
|
|
};
|
|
},
|
|
else => null,
|
|
};
|
|
}
|
|
|
|
pub fn resolveIes(sema: *Sema, block: *Block, src: LazySrcLoc) CompileError!void {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
|
|
if (sema.fn_ret_ty_ies) |ies| {
|
|
try sema.resolveInferredErrorSetPtr(block, src, ies);
|
|
assert(ies.resolved != .none);
|
|
ip.funcIesResolved(sema.func_index).* = ies.resolved;
|
|
}
|
|
}
|
|
|
|
pub fn resolveFnTypes(sema: *Sema, fn_ty: Type) CompileError!void {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
const fn_ty_info = mod.typeToFunc(fn_ty).?;
|
|
|
|
try sema.resolveTypeFully(Type.fromInterned(fn_ty_info.return_type));
|
|
|
|
if (mod.comp.config.any_error_tracing and
|
|
Type.fromInterned(fn_ty_info.return_type).isError(mod))
|
|
{
|
|
// Ensure the type exists so that backends can assume that.
|
|
_ = try sema.getBuiltinType("StackTrace");
|
|
}
|
|
|
|
for (0..fn_ty_info.param_types.len) |i| {
|
|
try sema.resolveTypeFully(Type.fromInterned(fn_ty_info.param_types.get(ip)[i]));
|
|
}
|
|
}
|
|
|
|
/// Make it so that calling hash() and eql() on `val` will not assert due
|
|
/// to a type not having its layout resolved.
|
|
fn resolveLazyValue(sema: *Sema, val: Value) CompileError!Value {
|
|
const mod = sema.mod;
|
|
switch (mod.intern_pool.indexToKey(val.toIntern())) {
|
|
.int => |int| switch (int.storage) {
|
|
.u64, .i64, .big_int => return val,
|
|
.lazy_align, .lazy_size => return mod.intValue(
|
|
Type.fromInterned(int.ty),
|
|
(try val.getUnsignedIntAdvanced(mod, sema)).?,
|
|
),
|
|
},
|
|
.slice => |slice| {
|
|
const ptr = try sema.resolveLazyValue(Value.fromInterned(slice.ptr));
|
|
const len = try sema.resolveLazyValue(Value.fromInterned(slice.len));
|
|
if (ptr.toIntern() == slice.ptr and len.toIntern() == slice.len) return val;
|
|
return Value.fromInterned(try mod.intern(.{ .slice = .{
|
|
.ty = slice.ty,
|
|
.ptr = ptr.toIntern(),
|
|
.len = len.toIntern(),
|
|
} }));
|
|
},
|
|
.ptr => |ptr| {
|
|
switch (ptr.addr) {
|
|
.decl, .mut_decl, .anon_decl => return val,
|
|
.comptime_field => |field_val| {
|
|
const resolved_field_val =
|
|
(try sema.resolveLazyValue(Value.fromInterned(field_val))).toIntern();
|
|
return if (resolved_field_val == field_val)
|
|
val
|
|
else
|
|
Value.fromInterned((try mod.intern(.{ .ptr = .{
|
|
.ty = ptr.ty,
|
|
.addr = .{ .comptime_field = resolved_field_val },
|
|
} })));
|
|
},
|
|
.int => |int| {
|
|
const resolved_int = (try sema.resolveLazyValue(Value.fromInterned(int))).toIntern();
|
|
return if (resolved_int == int)
|
|
val
|
|
else
|
|
Value.fromInterned((try mod.intern(.{ .ptr = .{
|
|
.ty = ptr.ty,
|
|
.addr = .{ .int = resolved_int },
|
|
} })));
|
|
},
|
|
.eu_payload, .opt_payload => |base| {
|
|
const resolved_base = (try sema.resolveLazyValue(Value.fromInterned(base))).toIntern();
|
|
return if (resolved_base == base)
|
|
val
|
|
else
|
|
Value.fromInterned((try mod.intern(.{ .ptr = .{
|
|
.ty = ptr.ty,
|
|
.addr = switch (ptr.addr) {
|
|
.eu_payload => .{ .eu_payload = resolved_base },
|
|
.opt_payload => .{ .opt_payload = resolved_base },
|
|
else => unreachable,
|
|
},
|
|
} })));
|
|
},
|
|
.elem, .field => |base_index| {
|
|
const resolved_base = (try sema.resolveLazyValue(Value.fromInterned(base_index.base))).toIntern();
|
|
return if (resolved_base == base_index.base)
|
|
val
|
|
else
|
|
Value.fromInterned((try mod.intern(.{ .ptr = .{
|
|
.ty = ptr.ty,
|
|
.addr = switch (ptr.addr) {
|
|
.elem => .{ .elem = .{
|
|
.base = resolved_base,
|
|
.index = base_index.index,
|
|
} },
|
|
.field => .{ .field = .{
|
|
.base = resolved_base,
|
|
.index = base_index.index,
|
|
} },
|
|
else => unreachable,
|
|
},
|
|
} })));
|
|
},
|
|
}
|
|
},
|
|
.aggregate => |aggregate| switch (aggregate.storage) {
|
|
.bytes => return val,
|
|
.elems => |elems| {
|
|
var resolved_elems: []InternPool.Index = &.{};
|
|
for (elems, 0..) |elem, i| {
|
|
const resolved_elem = (try sema.resolveLazyValue(Value.fromInterned(elem))).toIntern();
|
|
if (resolved_elems.len == 0 and resolved_elem != elem) {
|
|
resolved_elems = try sema.arena.alloc(InternPool.Index, elems.len);
|
|
@memcpy(resolved_elems[0..i], elems[0..i]);
|
|
}
|
|
if (resolved_elems.len > 0) resolved_elems[i] = resolved_elem;
|
|
}
|
|
return if (resolved_elems.len == 0) val else Value.fromInterned((try mod.intern(.{ .aggregate = .{
|
|
.ty = aggregate.ty,
|
|
.storage = .{ .elems = resolved_elems },
|
|
} })));
|
|
},
|
|
.repeated_elem => |elem| {
|
|
const resolved_elem = (try sema.resolveLazyValue(Value.fromInterned(elem))).toIntern();
|
|
return if (resolved_elem == elem) val else Value.fromInterned((try mod.intern(.{ .aggregate = .{
|
|
.ty = aggregate.ty,
|
|
.storage = .{ .repeated_elem = resolved_elem },
|
|
} })));
|
|
},
|
|
},
|
|
.un => |un| {
|
|
const resolved_tag = if (un.tag == .none)
|
|
.none
|
|
else
|
|
(try sema.resolveLazyValue(Value.fromInterned(un.tag))).toIntern();
|
|
const resolved_val = (try sema.resolveLazyValue(Value.fromInterned(un.val))).toIntern();
|
|
return if (resolved_tag == un.tag and resolved_val == un.val)
|
|
val
|
|
else
|
|
Value.fromInterned((try mod.intern(.{ .un = .{
|
|
.ty = un.ty,
|
|
.tag = resolved_tag,
|
|
.val = resolved_val,
|
|
} })));
|
|
},
|
|
else => return val,
|
|
}
|
|
}
|
|
|
|
pub fn resolveTypeLayout(sema: *Sema, ty: Type) CompileError!void {
|
|
const mod = sema.mod;
|
|
switch (mod.intern_pool.indexToKey(ty.toIntern())) {
|
|
.simple_type => |simple_type| return sema.resolveSimpleType(simple_type),
|
|
else => {},
|
|
}
|
|
switch (ty.zigTypeTag(mod)) {
|
|
.Struct => return sema.resolveStructLayout(ty),
|
|
.Union => return sema.resolveUnionLayout(ty),
|
|
.Array => {
|
|
if (ty.arrayLenIncludingSentinel(mod) == 0) return;
|
|
const elem_ty = ty.childType(mod);
|
|
return sema.resolveTypeLayout(elem_ty);
|
|
},
|
|
.Optional => {
|
|
const payload_ty = ty.optionalChild(mod);
|
|
// In case of querying the ABI alignment of this optional, we will ask
|
|
// for hasRuntimeBits() of the payload type, so we need "requires comptime"
|
|
// to be known already before this function returns.
|
|
_ = try sema.typeRequiresComptime(payload_ty);
|
|
return sema.resolveTypeLayout(payload_ty);
|
|
},
|
|
.ErrorUnion => {
|
|
const payload_ty = ty.errorUnionPayload(mod);
|
|
return sema.resolveTypeLayout(payload_ty);
|
|
},
|
|
.Fn => {
|
|
const info = mod.typeToFunc(ty).?;
|
|
if (info.is_generic) {
|
|
// Resolving of generic function types is deferred to when
|
|
// the function is instantiated.
|
|
return;
|
|
}
|
|
const ip = &mod.intern_pool;
|
|
for (0..info.param_types.len) |i| {
|
|
const param_ty = info.param_types.get(ip)[i];
|
|
try sema.resolveTypeLayout(Type.fromInterned(param_ty));
|
|
}
|
|
try sema.resolveTypeLayout(Type.fromInterned(info.return_type));
|
|
},
|
|
else => {},
|
|
}
|
|
}
|
|
|
|
/// Resolve a struct's alignment only without triggering resolution of its layout.
|
|
/// Asserts that the alignment is not yet resolved and the layout is non-packed.
|
|
pub fn resolveStructAlignment(
|
|
sema: *Sema,
|
|
ty: InternPool.Index,
|
|
struct_type: InternPool.Key.StructType,
|
|
) CompileError!Alignment {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
const target = mod.getTarget();
|
|
|
|
assert(struct_type.flagsPtr(ip).alignment == .none);
|
|
assert(struct_type.layout != .Packed);
|
|
|
|
if (struct_type.flagsPtr(ip).field_types_wip) {
|
|
// We'll guess "pointer-aligned", if the struct has an
|
|
// underaligned pointer field then some allocations
|
|
// might require explicit alignment.
|
|
struct_type.flagsPtr(ip).assumed_pointer_aligned = true;
|
|
const result = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8));
|
|
struct_type.flagsPtr(ip).alignment = result;
|
|
return result;
|
|
}
|
|
|
|
try sema.resolveTypeFieldsStruct(ty, struct_type);
|
|
|
|
if (struct_type.setAlignmentWip(ip)) {
|
|
// We'll guess "pointer-aligned", if the struct has an
|
|
// underaligned pointer field then some allocations
|
|
// might require explicit alignment.
|
|
struct_type.flagsPtr(ip).assumed_pointer_aligned = true;
|
|
const result = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8));
|
|
struct_type.flagsPtr(ip).alignment = result;
|
|
return result;
|
|
}
|
|
defer struct_type.clearAlignmentWip(ip);
|
|
|
|
var result: Alignment = .@"1";
|
|
|
|
for (0..struct_type.field_types.len) |i| {
|
|
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
|
|
if (struct_type.fieldIsComptime(ip, i) or try sema.typeRequiresComptime(field_ty))
|
|
continue;
|
|
const field_align = try sema.structFieldAlignment(
|
|
struct_type.fieldAlign(ip, i),
|
|
field_ty,
|
|
struct_type.layout,
|
|
);
|
|
result = result.maxStrict(field_align);
|
|
}
|
|
|
|
struct_type.flagsPtr(ip).alignment = result;
|
|
return result;
|
|
}
|
|
|
|
fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
const struct_type = mod.typeToStruct(ty) orelse return;
|
|
|
|
if (struct_type.haveLayout(ip))
|
|
return;
|
|
|
|
try sema.resolveTypeFields(ty);
|
|
|
|
if (struct_type.layout == .Packed) {
|
|
try semaBackingIntType(mod, struct_type);
|
|
return;
|
|
}
|
|
|
|
if (struct_type.setLayoutWip(ip)) {
|
|
const msg = try Module.ErrorMsg.create(
|
|
sema.gpa,
|
|
mod.declPtr(struct_type.decl.unwrap().?).srcLoc(mod),
|
|
"struct '{}' depends on itself",
|
|
.{ty.fmt(mod)},
|
|
);
|
|
return sema.failWithOwnedErrorMsg(null, msg);
|
|
}
|
|
defer struct_type.clearLayoutWip(ip);
|
|
|
|
const aligns = try sema.arena.alloc(Alignment, struct_type.field_types.len);
|
|
const sizes = try sema.arena.alloc(u64, struct_type.field_types.len);
|
|
|
|
var big_align: Alignment = .@"1";
|
|
|
|
for (aligns, sizes, 0..) |*field_align, *field_size, i| {
|
|
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
|
|
if (struct_type.fieldIsComptime(ip, i) or try sema.typeRequiresComptime(field_ty)) {
|
|
struct_type.offsets.get(ip)[i] = 0;
|
|
field_size.* = 0;
|
|
field_align.* = .none;
|
|
continue;
|
|
}
|
|
|
|
field_size.* = sema.typeAbiSize(field_ty) catch |err| switch (err) {
|
|
error.AnalysisFail => {
|
|
const msg = sema.err orelse return err;
|
|
try sema.addFieldErrNote(ty, i, msg, "while checking this field", .{});
|
|
return err;
|
|
},
|
|
else => return err,
|
|
};
|
|
field_align.* = try sema.structFieldAlignment(
|
|
struct_type.fieldAlign(ip, i),
|
|
field_ty,
|
|
struct_type.layout,
|
|
);
|
|
big_align = big_align.maxStrict(field_align.*);
|
|
}
|
|
|
|
if (struct_type.flagsPtr(ip).assumed_runtime_bits and !(try sema.typeHasRuntimeBits(ty))) {
|
|
const msg = try Module.ErrorMsg.create(
|
|
sema.gpa,
|
|
mod.declPtr(struct_type.decl.unwrap().?).srcLoc(mod),
|
|
"struct layout depends on it having runtime bits",
|
|
.{},
|
|
);
|
|
return sema.failWithOwnedErrorMsg(null, msg);
|
|
}
|
|
|
|
if (struct_type.flagsPtr(ip).assumed_pointer_aligned and
|
|
big_align.compareStrict(.neq, Alignment.fromByteUnits(@divExact(mod.getTarget().ptrBitWidth(), 8))))
|
|
{
|
|
const msg = try Module.ErrorMsg.create(
|
|
sema.gpa,
|
|
mod.declPtr(struct_type.decl.unwrap().?).srcLoc(mod),
|
|
"struct layout depends on being pointer aligned",
|
|
.{},
|
|
);
|
|
return sema.failWithOwnedErrorMsg(null, msg);
|
|
}
|
|
|
|
if (struct_type.hasReorderedFields()) {
|
|
const runtime_order = struct_type.runtime_order.get(ip);
|
|
|
|
for (runtime_order, 0..) |*ro, i| {
|
|
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
|
|
if (struct_type.fieldIsComptime(ip, i) or try sema.typeRequiresComptime(field_ty)) {
|
|
ro.* = .omitted;
|
|
} else {
|
|
ro.* = @enumFromInt(i);
|
|
}
|
|
}
|
|
|
|
const RuntimeOrder = InternPool.Key.StructType.RuntimeOrder;
|
|
|
|
const AlignSortContext = struct {
|
|
aligns: []const Alignment,
|
|
|
|
fn lessThan(ctx: @This(), a: RuntimeOrder, b: RuntimeOrder) bool {
|
|
if (a == .omitted) return false;
|
|
if (b == .omitted) return true;
|
|
const a_align = ctx.aligns[@intFromEnum(a)];
|
|
const b_align = ctx.aligns[@intFromEnum(b)];
|
|
return a_align.compare(.gt, b_align);
|
|
}
|
|
};
|
|
if (struct_type.isTuple(ip) or !mod.backendSupportsFeature(.field_reordering)) {
|
|
// TODO: don't handle tuples differently. This logic exists only because it
|
|
// uncovers latent bugs if removed. Fix the latent bugs and remove this logic!
|
|
// Likewise, implement field reordering support in all the backends!
|
|
// This logic does not reorder fields; it only moves the omitted ones to the end
|
|
// so that logic elsewhere does not need to special-case tuples.
|
|
var i: usize = 0;
|
|
var off: usize = 0;
|
|
while (i + off < runtime_order.len) {
|
|
if (runtime_order[i + off] == .omitted) {
|
|
off += 1;
|
|
continue;
|
|
}
|
|
runtime_order[i] = runtime_order[i + off];
|
|
i += 1;
|
|
}
|
|
@memset(runtime_order[i..], .omitted);
|
|
} else {
|
|
mem.sortUnstable(RuntimeOrder, runtime_order, AlignSortContext{
|
|
.aligns = aligns,
|
|
}, AlignSortContext.lessThan);
|
|
}
|
|
}
|
|
|
|
// Calculate size, alignment, and field offsets.
|
|
const offsets = struct_type.offsets.get(ip);
|
|
var it = struct_type.iterateRuntimeOrder(ip);
|
|
var offset: u64 = 0;
|
|
while (it.next()) |i| {
|
|
offsets[i] = @intCast(aligns[i].forward(offset));
|
|
offset = offsets[i] + sizes[i];
|
|
}
|
|
struct_type.size(ip).* = @intCast(big_align.forward(offset));
|
|
const flags = struct_type.flagsPtr(ip);
|
|
flags.alignment = big_align;
|
|
flags.layout_resolved = true;
|
|
_ = try sema.typeRequiresComptime(ty);
|
|
}
|
|
|
|
fn semaBackingIntType(mod: *Module, struct_type: InternPool.Key.StructType) CompileError!void {
|
|
const gpa = mod.gpa;
|
|
const ip = &mod.intern_pool;
|
|
|
|
const decl_index = struct_type.decl.unwrap().?;
|
|
const decl = mod.declPtr(decl_index);
|
|
|
|
const zir = mod.namespacePtr(struct_type.namespace.unwrap().?).file_scope.zir;
|
|
|
|
var analysis_arena = std.heap.ArenaAllocator.init(gpa);
|
|
defer analysis_arena.deinit();
|
|
|
|
var comptime_mutable_decls = std.ArrayList(InternPool.DeclIndex).init(gpa);
|
|
defer comptime_mutable_decls.deinit();
|
|
|
|
var comptime_err_ret_trace = std.ArrayList(Module.SrcLoc).init(gpa);
|
|
defer comptime_err_ret_trace.deinit();
|
|
|
|
var sema: Sema = .{
|
|
.mod = mod,
|
|
.gpa = gpa,
|
|
.arena = analysis_arena.allocator(),
|
|
.code = zir,
|
|
.owner_decl = decl,
|
|
.owner_decl_index = decl_index,
|
|
.func_index = .none,
|
|
.func_is_naked = false,
|
|
.fn_ret_ty = Type.void,
|
|
.fn_ret_ty_ies = null,
|
|
.owner_func_index = .none,
|
|
.comptime_mutable_decls = &comptime_mutable_decls,
|
|
.comptime_err_ret_trace = &comptime_err_ret_trace,
|
|
};
|
|
defer sema.deinit();
|
|
|
|
var block: Block = .{
|
|
.parent = null,
|
|
.sema = &sema,
|
|
.src_decl = decl_index,
|
|
.namespace = struct_type.namespace.unwrap() orelse decl.src_namespace,
|
|
.wip_capture_scope = try mod.createCaptureScope(decl.src_scope),
|
|
.instructions = .{},
|
|
.inlining = null,
|
|
.is_comptime = true,
|
|
};
|
|
defer assert(block.instructions.items.len == 0);
|
|
|
|
const fields_bit_sum = blk: {
|
|
var accumulator: u64 = 0;
|
|
for (0..struct_type.field_types.len) |i| {
|
|
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
|
|
accumulator += try field_ty.bitSizeAdvanced(mod, &sema);
|
|
}
|
|
break :blk accumulator;
|
|
};
|
|
|
|
const zir_index = struct_type.zir_index.unwrap().?.resolve(ip);
|
|
const extended = zir.instructions.items(.data)[@intFromEnum(zir_index)].extended;
|
|
assert(extended.opcode == .struct_decl);
|
|
const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small);
|
|
|
|
if (small.has_backing_int) {
|
|
var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.StructDecl).Struct.fields.len;
|
|
extra_index += @intFromBool(small.has_fields_len);
|
|
extra_index += @intFromBool(small.has_decls_len);
|
|
|
|
const backing_int_body_len = zir.extra[extra_index];
|
|
extra_index += 1;
|
|
|
|
const backing_int_src: LazySrcLoc = .{ .node_offset_container_tag = 0 };
|
|
const backing_int_ty = blk: {
|
|
if (backing_int_body_len == 0) {
|
|
const backing_int_ref: Zir.Inst.Ref = @enumFromInt(zir.extra[extra_index]);
|
|
break :blk try sema.resolveType(&block, backing_int_src, backing_int_ref);
|
|
} else {
|
|
const body = zir.bodySlice(extra_index, backing_int_body_len);
|
|
const ty_ref = try sema.resolveBody(&block, body, zir_index);
|
|
break :blk try sema.analyzeAsType(&block, backing_int_src, ty_ref);
|
|
}
|
|
};
|
|
|
|
try sema.checkBackingIntType(&block, backing_int_src, backing_int_ty, fields_bit_sum);
|
|
struct_type.backingIntType(ip).* = backing_int_ty.toIntern();
|
|
} else {
|
|
if (fields_bit_sum > std.math.maxInt(u16)) {
|
|
return sema.fail(&block, LazySrcLoc.nodeOffset(0), "size of packed struct '{d}' exceeds maximum bit width of 65535", .{fields_bit_sum});
|
|
}
|
|
const backing_int_ty = try mod.intType(.unsigned, @intCast(fields_bit_sum));
|
|
struct_type.backingIntType(ip).* = backing_int_ty.toIntern();
|
|
}
|
|
|
|
for (comptime_mutable_decls.items) |ct_decl_index| {
|
|
const ct_decl = mod.declPtr(ct_decl_index);
|
|
_ = try ct_decl.internValue(mod);
|
|
}
|
|
}
|
|
|
|
fn checkBackingIntType(sema: *Sema, block: *Block, src: LazySrcLoc, backing_int_ty: Type, fields_bit_sum: u64) CompileError!void {
|
|
const mod = sema.mod;
|
|
|
|
if (!backing_int_ty.isInt(mod)) {
|
|
return sema.fail(block, src, "expected backing integer type, found '{}'", .{backing_int_ty.fmt(sema.mod)});
|
|
}
|
|
if (backing_int_ty.bitSize(mod) != fields_bit_sum) {
|
|
return sema.fail(
|
|
block,
|
|
src,
|
|
"backing integer type '{}' has bit size {} but the struct fields have a total bit size of {}",
|
|
.{ backing_int_ty.fmt(sema.mod), backing_int_ty.bitSize(mod), fields_bit_sum },
|
|
);
|
|
}
|
|
}
|
|
|
|
fn checkIndexable(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void {
|
|
const mod = sema.mod;
|
|
if (!ty.isIndexable(mod)) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "type '{}' does not support indexing", .{ty.fmt(sema.mod)});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, src, msg, "operand must be an array, slice, tuple, or vector", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
}
|
|
|
|
fn checkMemOperand(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void {
|
|
const mod = sema.mod;
|
|
if (ty.zigTypeTag(mod) == .Pointer) {
|
|
switch (ty.ptrSize(mod)) {
|
|
.Slice, .Many, .C => return,
|
|
.One => {
|
|
const elem_ty = ty.childType(mod);
|
|
if (elem_ty.zigTypeTag(mod) == .Array) return;
|
|
// TODO https://github.com/ziglang/zig/issues/15479
|
|
// if (elem_ty.isTuple()) return;
|
|
},
|
|
}
|
|
}
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "type '{}' is not an indexable pointer", .{ty.fmt(sema.mod)});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, src, msg, "operand must be a slice, a many pointer or a pointer to an array", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
|
|
/// Resolve a unions's alignment only without triggering resolution of its layout.
|
|
/// Asserts that the alignment is not yet resolved.
|
|
pub fn resolveUnionAlignment(
|
|
sema: *Sema,
|
|
ty: Type,
|
|
union_type: InternPool.Key.UnionType,
|
|
) CompileError!Alignment {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
const target = mod.getTarget();
|
|
|
|
assert(!union_type.haveLayout(ip));
|
|
|
|
if (union_type.flagsPtr(ip).status == .field_types_wip) {
|
|
// We'll guess "pointer-aligned", if the union has an
|
|
// underaligned pointer field then some allocations
|
|
// might require explicit alignment.
|
|
union_type.flagsPtr(ip).assumed_pointer_aligned = true;
|
|
const result = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8));
|
|
union_type.flagsPtr(ip).alignment = result;
|
|
return result;
|
|
}
|
|
|
|
try sema.resolveTypeFieldsUnion(ty, union_type);
|
|
|
|
const union_obj = ip.loadUnionType(union_type);
|
|
var max_align: Alignment = .@"1";
|
|
for (0..union_obj.field_names.len) |field_index| {
|
|
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
|
|
if (!(try sema.typeHasRuntimeBits(field_ty))) continue;
|
|
|
|
const explicit_align = union_obj.fieldAlign(ip, @intCast(field_index));
|
|
const field_align = if (explicit_align != .none)
|
|
explicit_align
|
|
else
|
|
try sema.typeAbiAlignment(field_ty);
|
|
|
|
max_align = max_align.max(field_align);
|
|
}
|
|
|
|
union_type.flagsPtr(ip).alignment = max_align;
|
|
return max_align;
|
|
}
|
|
|
|
/// This logic must be kept in sync with `Module.getUnionLayout`.
|
|
fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
|
|
const union_type = ip.indexToKey(ty.ip_index).union_type;
|
|
try sema.resolveTypeFieldsUnion(ty, union_type);
|
|
|
|
const union_obj = ip.loadUnionType(union_type);
|
|
switch (union_obj.flagsPtr(ip).status) {
|
|
.none, .have_field_types => {},
|
|
.field_types_wip, .layout_wip => {
|
|
const msg = try Module.ErrorMsg.create(
|
|
sema.gpa,
|
|
mod.declPtr(union_obj.decl).srcLoc(mod),
|
|
"union '{}' depends on itself",
|
|
.{ty.fmt(mod)},
|
|
);
|
|
return sema.failWithOwnedErrorMsg(null, msg);
|
|
},
|
|
.have_layout, .fully_resolved_wip, .fully_resolved => return,
|
|
}
|
|
|
|
const prev_status = union_obj.flagsPtr(ip).status;
|
|
errdefer if (union_obj.flagsPtr(ip).status == .layout_wip) {
|
|
union_obj.flagsPtr(ip).status = prev_status;
|
|
};
|
|
|
|
union_obj.flagsPtr(ip).status = .layout_wip;
|
|
|
|
var max_size: u64 = 0;
|
|
var max_align: Alignment = .@"1";
|
|
for (0..union_obj.field_names.len) |field_index| {
|
|
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
|
|
if (!(try sema.typeHasRuntimeBits(field_ty))) continue;
|
|
|
|
max_size = @max(max_size, sema.typeAbiSize(field_ty) catch |err| switch (err) {
|
|
error.AnalysisFail => {
|
|
const msg = sema.err orelse return err;
|
|
try sema.addFieldErrNote(ty, field_index, msg, "while checking this field", .{});
|
|
return err;
|
|
},
|
|
else => return err,
|
|
});
|
|
|
|
const explicit_align = union_obj.fieldAlign(ip, @intCast(field_index));
|
|
const field_align = if (explicit_align != .none)
|
|
explicit_align
|
|
else
|
|
try sema.typeAbiAlignment(field_ty);
|
|
|
|
max_align = max_align.max(field_align);
|
|
}
|
|
|
|
const flags = union_obj.flagsPtr(ip);
|
|
const has_runtime_tag = flags.runtime_tag.hasTag() and try sema.typeHasRuntimeBits(Type.fromInterned(union_obj.enum_tag_ty));
|
|
const size, const alignment, const padding = if (has_runtime_tag) layout: {
|
|
const enum_tag_type = Type.fromInterned(union_obj.enum_tag_ty);
|
|
const tag_align = try sema.typeAbiAlignment(enum_tag_type);
|
|
const tag_size = try sema.typeAbiSize(enum_tag_type);
|
|
|
|
// Put the tag before or after the payload depending on which one's
|
|
// alignment is greater.
|
|
var size: u64 = 0;
|
|
var padding: u32 = 0;
|
|
if (tag_align.compare(.gte, max_align)) {
|
|
// {Tag, Payload}
|
|
size += tag_size;
|
|
size = max_align.forward(size);
|
|
size += max_size;
|
|
const prev_size = size;
|
|
size = tag_align.forward(size);
|
|
padding = @intCast(size - prev_size);
|
|
} else {
|
|
// {Payload, Tag}
|
|
size += max_size;
|
|
size = tag_align.forward(size);
|
|
size += tag_size;
|
|
const prev_size = size;
|
|
size = max_align.forward(size);
|
|
padding = @intCast(size - prev_size);
|
|
}
|
|
|
|
break :layout .{ size, max_align.max(tag_align), padding };
|
|
} else .{ max_align.forward(max_size), max_align, 0 };
|
|
|
|
union_type.size(ip).* = @intCast(size);
|
|
union_type.padding(ip).* = padding;
|
|
flags.alignment = alignment;
|
|
flags.status = .have_layout;
|
|
|
|
if (union_obj.flagsPtr(ip).assumed_runtime_bits and !(try sema.typeHasRuntimeBits(ty))) {
|
|
const msg = try Module.ErrorMsg.create(
|
|
sema.gpa,
|
|
mod.declPtr(union_obj.decl).srcLoc(mod),
|
|
"union layout depends on it having runtime bits",
|
|
.{},
|
|
);
|
|
return sema.failWithOwnedErrorMsg(null, msg);
|
|
}
|
|
|
|
if (union_obj.flagsPtr(ip).assumed_pointer_aligned and
|
|
alignment.compareStrict(.neq, Alignment.fromByteUnits(@divExact(mod.getTarget().ptrBitWidth(), 8))))
|
|
{
|
|
const msg = try Module.ErrorMsg.create(
|
|
sema.gpa,
|
|
mod.declPtr(union_obj.decl).srcLoc(mod),
|
|
"union layout depends on being pointer aligned",
|
|
.{},
|
|
);
|
|
return sema.failWithOwnedErrorMsg(null, msg);
|
|
}
|
|
}
|
|
|
|
/// Returns `error.AnalysisFail` if any of the types (recursively) failed to
|
|
/// be resolved.
|
|
pub fn resolveTypeFully(sema: *Sema, ty: Type) CompileError!void {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
switch (ty.zigTypeTag(mod)) {
|
|
.Pointer => {
|
|
return sema.resolveTypeFully(ty.childType(mod));
|
|
},
|
|
.Struct => switch (mod.intern_pool.indexToKey(ty.toIntern())) {
|
|
.struct_type => try sema.resolveStructFully(ty),
|
|
.anon_struct_type => |tuple| {
|
|
for (tuple.types.get(ip)) |field_ty| {
|
|
try sema.resolveTypeFully(Type.fromInterned(field_ty));
|
|
}
|
|
},
|
|
.simple_type => |simple_type| try sema.resolveSimpleType(simple_type),
|
|
else => {},
|
|
},
|
|
.Union => return sema.resolveUnionFully(ty),
|
|
.Array => return sema.resolveTypeFully(ty.childType(mod)),
|
|
.Optional => {
|
|
return sema.resolveTypeFully(ty.optionalChild(mod));
|
|
},
|
|
.ErrorUnion => return sema.resolveTypeFully(ty.errorUnionPayload(mod)),
|
|
.Fn => {
|
|
const info = mod.typeToFunc(ty).?;
|
|
if (info.is_generic) {
|
|
// Resolving of generic function types is deferred to when
|
|
// the function is instantiated.
|
|
return;
|
|
}
|
|
for (0..info.param_types.len) |i| {
|
|
const param_ty = info.param_types.get(ip)[i];
|
|
try sema.resolveTypeFully(Type.fromInterned(param_ty));
|
|
}
|
|
try sema.resolveTypeFully(Type.fromInterned(info.return_type));
|
|
},
|
|
else => {},
|
|
}
|
|
}
|
|
|
|
fn resolveStructFully(sema: *Sema, ty: Type) CompileError!void {
|
|
try sema.resolveStructLayout(ty);
|
|
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
const struct_type = mod.typeToStruct(ty).?;
|
|
|
|
if (struct_type.setFullyResolved(ip)) return;
|
|
errdefer struct_type.clearFullyResolved(ip);
|
|
|
|
// After we have resolve struct layout we have to go over the fields again to
|
|
// make sure pointer fields get their child types resolved as well.
|
|
// See also similar code for unions.
|
|
|
|
for (0..struct_type.field_types.len) |i| {
|
|
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
|
|
try sema.resolveTypeFully(field_ty);
|
|
}
|
|
}
|
|
|
|
fn resolveUnionFully(sema: *Sema, ty: Type) CompileError!void {
|
|
try sema.resolveUnionLayout(ty);
|
|
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
const union_obj = mod.typeToUnion(ty).?;
|
|
switch (union_obj.flagsPtr(ip).status) {
|
|
.none, .have_field_types, .field_types_wip, .layout_wip, .have_layout => {},
|
|
.fully_resolved_wip, .fully_resolved => return,
|
|
}
|
|
|
|
{
|
|
// After we have resolve union layout we have to go over the fields again to
|
|
// make sure pointer fields get their child types resolved as well.
|
|
// See also similar code for structs.
|
|
const prev_status = union_obj.flagsPtr(ip).status;
|
|
errdefer union_obj.flagsPtr(ip).status = prev_status;
|
|
|
|
union_obj.flagsPtr(ip).status = .fully_resolved_wip;
|
|
for (0..union_obj.field_types.len) |field_index| {
|
|
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
|
|
try sema.resolveTypeFully(field_ty);
|
|
}
|
|
union_obj.flagsPtr(ip).status = .fully_resolved;
|
|
}
|
|
|
|
// And let's not forget comptime-only status.
|
|
_ = try sema.typeRequiresComptime(ty);
|
|
}
|
|
|
|
pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!void {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
const ty_ip = ty.toIntern();
|
|
|
|
switch (ty_ip) {
|
|
.var_args_param_type => unreachable,
|
|
|
|
.none => unreachable,
|
|
|
|
.u0_type,
|
|
.i0_type,
|
|
.u1_type,
|
|
.u8_type,
|
|
.i8_type,
|
|
.u16_type,
|
|
.i16_type,
|
|
.u29_type,
|
|
.u32_type,
|
|
.i32_type,
|
|
.u64_type,
|
|
.i64_type,
|
|
.u80_type,
|
|
.u128_type,
|
|
.i128_type,
|
|
.usize_type,
|
|
.isize_type,
|
|
.c_char_type,
|
|
.c_short_type,
|
|
.c_ushort_type,
|
|
.c_int_type,
|
|
.c_uint_type,
|
|
.c_long_type,
|
|
.c_ulong_type,
|
|
.c_longlong_type,
|
|
.c_ulonglong_type,
|
|
.c_longdouble_type,
|
|
.f16_type,
|
|
.f32_type,
|
|
.f64_type,
|
|
.f80_type,
|
|
.f128_type,
|
|
.anyopaque_type,
|
|
.bool_type,
|
|
.void_type,
|
|
.type_type,
|
|
.anyerror_type,
|
|
.adhoc_inferred_error_set_type,
|
|
.comptime_int_type,
|
|
.comptime_float_type,
|
|
.noreturn_type,
|
|
.anyframe_type,
|
|
.null_type,
|
|
.undefined_type,
|
|
.enum_literal_type,
|
|
.manyptr_u8_type,
|
|
.manyptr_const_u8_type,
|
|
.manyptr_const_u8_sentinel_0_type,
|
|
.single_const_pointer_to_comptime_int_type,
|
|
.slice_const_u8_type,
|
|
.slice_const_u8_sentinel_0_type,
|
|
.optional_noreturn_type,
|
|
.anyerror_void_error_union_type,
|
|
.generic_poison_type,
|
|
.empty_struct_type,
|
|
=> {},
|
|
|
|
.undef => unreachable,
|
|
.zero => unreachable,
|
|
.zero_usize => unreachable,
|
|
.zero_u8 => unreachable,
|
|
.one => unreachable,
|
|
.one_usize => unreachable,
|
|
.one_u8 => unreachable,
|
|
.four_u8 => unreachable,
|
|
.negative_one => unreachable,
|
|
.calling_convention_c => unreachable,
|
|
.calling_convention_inline => unreachable,
|
|
.void_value => unreachable,
|
|
.unreachable_value => unreachable,
|
|
.null_value => unreachable,
|
|
.bool_true => unreachable,
|
|
.bool_false => unreachable,
|
|
.empty_struct => unreachable,
|
|
.generic_poison => unreachable,
|
|
|
|
else => switch (ip.items.items(.tag)[@intFromEnum(ty_ip)]) {
|
|
.type_struct,
|
|
.type_struct_ns,
|
|
.type_struct_packed,
|
|
.type_struct_packed_inits,
|
|
=> try sema.resolveTypeFieldsStruct(ty_ip, ip.indexToKey(ty_ip).struct_type),
|
|
|
|
.type_union => try sema.resolveTypeFieldsUnion(Type.fromInterned(ty_ip), ip.indexToKey(ty_ip).union_type),
|
|
.simple_type => try sema.resolveSimpleType(ip.indexToKey(ty_ip).simple_type),
|
|
else => {},
|
|
},
|
|
}
|
|
}
|
|
|
|
/// Fully resolves a simple type. This is usually a nop, but for builtin types with
|
|
/// special InternPool indices (such as std.builtin.Type) it will analyze and fully
|
|
/// resolve the container type.
|
|
fn resolveSimpleType(sema: *Sema, simple_type: InternPool.SimpleType) CompileError!void {
|
|
const builtin_type_name: []const u8 = switch (simple_type) {
|
|
.atomic_order => "AtomicOrder",
|
|
.atomic_rmw_op => "AtomicRmwOp",
|
|
.calling_convention => "CallingConvention",
|
|
.address_space => "AddressSpace",
|
|
.float_mode => "FloatMode",
|
|
.reduce_op => "ReduceOp",
|
|
.call_modifier => "CallModifer",
|
|
.prefetch_options => "PrefetchOptions",
|
|
.export_options => "ExportOptions",
|
|
.extern_options => "ExternOptions",
|
|
.type_info => "Type",
|
|
else => return,
|
|
};
|
|
// This will fully resolve the type.
|
|
_ = try sema.getBuiltinType(builtin_type_name);
|
|
}
|
|
|
|
pub fn resolveTypeFieldsStruct(
|
|
sema: *Sema,
|
|
ty: InternPool.Index,
|
|
struct_type: InternPool.Key.StructType,
|
|
) CompileError!void {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
// If there is no owner decl it means the struct has no fields.
|
|
const owner_decl = struct_type.decl.unwrap() orelse return;
|
|
|
|
switch (mod.declPtr(owner_decl).analysis) {
|
|
.file_failure,
|
|
.dependency_failure,
|
|
.sema_failure,
|
|
=> {
|
|
sema.owner_decl.analysis = .dependency_failure;
|
|
return error.AnalysisFail;
|
|
},
|
|
else => {},
|
|
}
|
|
|
|
if (struct_type.haveFieldTypes(ip)) return;
|
|
|
|
if (struct_type.setTypesWip(ip)) {
|
|
const msg = try Module.ErrorMsg.create(
|
|
sema.gpa,
|
|
mod.declPtr(owner_decl).srcLoc(mod),
|
|
"struct '{}' depends on itself",
|
|
.{Type.fromInterned(ty).fmt(mod)},
|
|
);
|
|
return sema.failWithOwnedErrorMsg(null, msg);
|
|
}
|
|
defer struct_type.clearTypesWip(ip);
|
|
|
|
try semaStructFields(mod, sema.arena, struct_type);
|
|
}
|
|
|
|
pub fn resolveStructFieldInits(sema: *Sema, ty: Type) CompileError!void {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
const struct_type = mod.typeToStruct(ty) orelse return;
|
|
const owner_decl = struct_type.decl.unwrap() orelse return;
|
|
|
|
// Inits can start as resolved
|
|
if (struct_type.haveFieldInits(ip)) return;
|
|
|
|
try sema.resolveStructLayout(ty);
|
|
|
|
if (struct_type.setInitsWip(ip)) {
|
|
const msg = try Module.ErrorMsg.create(
|
|
sema.gpa,
|
|
mod.declPtr(owner_decl).srcLoc(mod),
|
|
"struct '{}' depends on itself",
|
|
.{ty.fmt(mod)},
|
|
);
|
|
return sema.failWithOwnedErrorMsg(null, msg);
|
|
}
|
|
defer struct_type.clearInitsWip(ip);
|
|
|
|
try semaStructFieldInits(mod, sema.arena, struct_type);
|
|
struct_type.setHaveFieldInits(ip);
|
|
}
|
|
|
|
pub fn resolveTypeFieldsUnion(sema: *Sema, ty: Type, union_type: InternPool.Key.UnionType) CompileError!void {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
const owner_decl = mod.declPtr(union_type.decl);
|
|
switch (owner_decl.analysis) {
|
|
.file_failure,
|
|
.dependency_failure,
|
|
.sema_failure,
|
|
=> {
|
|
sema.owner_decl.analysis = .dependency_failure;
|
|
return error.AnalysisFail;
|
|
},
|
|
else => {},
|
|
}
|
|
switch (union_type.flagsPtr(ip).status) {
|
|
.none => {},
|
|
.field_types_wip => {
|
|
const msg = try Module.ErrorMsg.create(
|
|
sema.gpa,
|
|
owner_decl.srcLoc(mod),
|
|
"union '{}' depends on itself",
|
|
.{ty.fmt(mod)},
|
|
);
|
|
return sema.failWithOwnedErrorMsg(null, msg);
|
|
},
|
|
.have_field_types,
|
|
.have_layout,
|
|
.layout_wip,
|
|
.fully_resolved_wip,
|
|
.fully_resolved,
|
|
=> return,
|
|
}
|
|
|
|
union_type.flagsPtr(ip).status = .field_types_wip;
|
|
errdefer union_type.flagsPtr(ip).status = .none;
|
|
try semaUnionFields(mod, sema.arena, union_type);
|
|
union_type.flagsPtr(ip).status = .have_field_types;
|
|
}
|
|
|
|
/// Returns a normal error set corresponding to the fully populated inferred
|
|
/// error set.
|
|
fn resolveInferredErrorSet(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
ies_index: InternPool.Index,
|
|
) CompileError!InternPool.Index {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
const func_index = ip.iesFuncIndex(ies_index);
|
|
const func = mod.funcInfo(func_index);
|
|
const resolved_ty = func.resolvedErrorSet(ip).*;
|
|
if (resolved_ty != .none) return resolved_ty;
|
|
if (func.analysis(ip).state == .in_progress)
|
|
return sema.fail(block, src, "unable to resolve inferred error set", .{});
|
|
|
|
// In order to ensure that all dependencies are properly added to the set,
|
|
// we need to ensure the function body is analyzed of the inferred error
|
|
// set. However, in the case of comptime/inline function calls with
|
|
// inferred error sets, each call gets an adhoc InferredErrorSet object, which
|
|
// has no corresponding function body.
|
|
const ies_func_owner_decl = mod.declPtr(func.owner_decl);
|
|
const ies_func_info = mod.typeToFunc(ies_func_owner_decl.ty).?;
|
|
// if ies declared by a inline function with generic return type, the return_type should be generic_poison,
|
|
// because inline function does not create a new declaration, and the ies has been filled with analyzeCall,
|
|
// so here we can simply skip this case.
|
|
if (ies_func_info.return_type == .generic_poison_type) {
|
|
assert(ies_func_info.cc == .Inline);
|
|
} else if (ip.errorUnionSet(ies_func_info.return_type) == ies_index) {
|
|
if (ies_func_info.is_generic) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "unable to resolve inferred error set of generic function", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
try sema.mod.errNoteNonLazy(ies_func_owner_decl.srcLoc(mod), msg, "generic function declared here", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(block, msg);
|
|
}
|
|
// In this case we are dealing with the actual InferredErrorSet object that
|
|
// corresponds to the function, not one created to track an inline/comptime call.
|
|
try sema.ensureFuncBodyAnalyzed(func_index);
|
|
}
|
|
|
|
// This will now have been resolved by the logic at the end of `Module.analyzeFnBody`
|
|
// which calls `resolveInferredErrorSetPtr`.
|
|
const final_resolved_ty = func.resolvedErrorSet(ip).*;
|
|
assert(final_resolved_ty != .none);
|
|
return final_resolved_ty;
|
|
}
|
|
|
|
pub fn resolveInferredErrorSetPtr(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
ies: *InferredErrorSet,
|
|
) CompileError!void {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
|
|
if (ies.resolved != .none) return;
|
|
|
|
const ies_index = ip.errorUnionSet(sema.fn_ret_ty.toIntern());
|
|
|
|
for (ies.inferred_error_sets.keys()) |other_ies_index| {
|
|
if (ies_index == other_ies_index) continue;
|
|
switch (try sema.resolveInferredErrorSet(block, src, other_ies_index)) {
|
|
.anyerror_type => {
|
|
ies.resolved = .anyerror_type;
|
|
return;
|
|
},
|
|
else => |error_set_ty_index| {
|
|
const names = ip.indexToKey(error_set_ty_index).error_set_type.names;
|
|
for (names.get(ip)) |name| {
|
|
try ies.errors.put(sema.arena, name, {});
|
|
}
|
|
},
|
|
}
|
|
}
|
|
|
|
const resolved_error_set_ty = try mod.errorSetFromUnsortedNames(ies.errors.keys());
|
|
ies.resolved = resolved_error_set_ty.toIntern();
|
|
}
|
|
|
|
fn resolveAdHocInferredErrorSet(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
value: InternPool.Index,
|
|
) CompileError!InternPool.Index {
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
const ip = &mod.intern_pool;
|
|
const new_ty = try resolveAdHocInferredErrorSetTy(sema, block, src, ip.typeOf(value));
|
|
if (new_ty == .none) return value;
|
|
return ip.getCoerced(gpa, value, new_ty);
|
|
}
|
|
|
|
fn resolveAdHocInferredErrorSetTy(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
ty: InternPool.Index,
|
|
) CompileError!InternPool.Index {
|
|
const ies = sema.fn_ret_ty_ies orelse return .none;
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
const ip = &mod.intern_pool;
|
|
const error_union_info = switch (ip.indexToKey(ty)) {
|
|
.error_union_type => |x| x,
|
|
else => return .none,
|
|
};
|
|
if (error_union_info.error_set_type != .adhoc_inferred_error_set_type)
|
|
return .none;
|
|
|
|
try sema.resolveInferredErrorSetPtr(block, src, ies);
|
|
const new_ty = try ip.get(gpa, .{ .error_union_type = .{
|
|
.error_set_type = ies.resolved,
|
|
.payload_type = error_union_info.payload_type,
|
|
} });
|
|
return new_ty;
|
|
}
|
|
|
|
fn resolveInferredErrorSetTy(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
ty: InternPool.Index,
|
|
) CompileError!InternPool.Index {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
if (ty == .anyerror_type) return ty;
|
|
switch (ip.indexToKey(ty)) {
|
|
.error_set_type => return ty,
|
|
.inferred_error_set_type => return sema.resolveInferredErrorSet(block, src, ty),
|
|
else => unreachable,
|
|
}
|
|
}
|
|
|
|
fn structZirInfo(zir: Zir, zir_index: Zir.Inst.Index) struct {
|
|
/// fields_len
|
|
usize,
|
|
Zir.Inst.StructDecl.Small,
|
|
/// extra_index
|
|
usize,
|
|
} {
|
|
const extended = zir.instructions.items(.data)[@intFromEnum(zir_index)].extended;
|
|
assert(extended.opcode == .struct_decl);
|
|
const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small);
|
|
var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.StructDecl).Struct.fields.len;
|
|
|
|
const fields_len = if (small.has_fields_len) blk: {
|
|
const fields_len = zir.extra[extra_index];
|
|
extra_index += 1;
|
|
break :blk fields_len;
|
|
} else 0;
|
|
|
|
const decls_len = if (small.has_decls_len) decls_len: {
|
|
const decls_len = zir.extra[extra_index];
|
|
extra_index += 1;
|
|
break :decls_len decls_len;
|
|
} else 0;
|
|
|
|
// The backing integer cannot be handled until `resolveStructLayout()`.
|
|
if (small.has_backing_int) {
|
|
const backing_int_body_len = zir.extra[extra_index];
|
|
extra_index += 1; // backing_int_body_len
|
|
if (backing_int_body_len == 0) {
|
|
extra_index += 1; // backing_int_ref
|
|
} else {
|
|
extra_index += backing_int_body_len; // backing_int_body_inst
|
|
}
|
|
}
|
|
|
|
// Skip over decls.
|
|
extra_index += decls_len;
|
|
|
|
return .{ fields_len, small, extra_index };
|
|
}
|
|
|
|
fn semaStructFields(
|
|
mod: *Module,
|
|
arena: Allocator,
|
|
struct_type: InternPool.Key.StructType,
|
|
) CompileError!void {
|
|
const gpa = mod.gpa;
|
|
const ip = &mod.intern_pool;
|
|
const decl_index = struct_type.decl.unwrap() orelse return;
|
|
const decl = mod.declPtr(decl_index);
|
|
const namespace_index = struct_type.namespace.unwrap() orelse decl.src_namespace;
|
|
const zir = mod.namespacePtr(namespace_index).file_scope.zir;
|
|
const zir_index = struct_type.zir_index.unwrap().?.resolve(ip);
|
|
|
|
const fields_len, const small, var extra_index = structZirInfo(zir, zir_index);
|
|
|
|
if (fields_len == 0) switch (struct_type.layout) {
|
|
.Packed => {
|
|
try semaBackingIntType(mod, struct_type);
|
|
return;
|
|
},
|
|
.Auto, .Extern => {
|
|
struct_type.size(ip).* = 0;
|
|
struct_type.flagsPtr(ip).layout_resolved = true;
|
|
return;
|
|
},
|
|
};
|
|
|
|
var comptime_mutable_decls = std.ArrayList(InternPool.DeclIndex).init(gpa);
|
|
defer comptime_mutable_decls.deinit();
|
|
|
|
var comptime_err_ret_trace = std.ArrayList(Module.SrcLoc).init(gpa);
|
|
defer comptime_err_ret_trace.deinit();
|
|
|
|
var sema: Sema = .{
|
|
.mod = mod,
|
|
.gpa = gpa,
|
|
.arena = arena,
|
|
.code = zir,
|
|
.owner_decl = decl,
|
|
.owner_decl_index = decl_index,
|
|
.func_index = .none,
|
|
.func_is_naked = false,
|
|
.fn_ret_ty = Type.void,
|
|
.fn_ret_ty_ies = null,
|
|
.owner_func_index = .none,
|
|
.comptime_mutable_decls = &comptime_mutable_decls,
|
|
.comptime_err_ret_trace = &comptime_err_ret_trace,
|
|
};
|
|
defer sema.deinit();
|
|
|
|
var block_scope: Block = .{
|
|
.parent = null,
|
|
.sema = &sema,
|
|
.src_decl = decl_index,
|
|
.namespace = namespace_index,
|
|
.wip_capture_scope = try mod.createCaptureScope(decl.src_scope),
|
|
.instructions = .{},
|
|
.inlining = null,
|
|
.is_comptime = true,
|
|
};
|
|
defer assert(block_scope.instructions.items.len == 0);
|
|
|
|
const Field = struct {
|
|
type_body_len: u32 = 0,
|
|
align_body_len: u32 = 0,
|
|
init_body_len: u32 = 0,
|
|
type_ref: Zir.Inst.Ref = .none,
|
|
};
|
|
const fields = try sema.arena.alloc(Field, fields_len);
|
|
|
|
var any_inits = false;
|
|
var any_aligned = false;
|
|
|
|
{
|
|
const bits_per_field = 4;
|
|
const fields_per_u32 = 32 / bits_per_field;
|
|
const bit_bags_count = std.math.divCeil(usize, fields_len, fields_per_u32) catch unreachable;
|
|
const flags_index = extra_index;
|
|
var bit_bag_index: usize = flags_index;
|
|
extra_index += bit_bags_count;
|
|
var cur_bit_bag: u32 = undefined;
|
|
var field_i: u32 = 0;
|
|
while (field_i < fields_len) : (field_i += 1) {
|
|
if (field_i % fields_per_u32 == 0) {
|
|
cur_bit_bag = zir.extra[bit_bag_index];
|
|
bit_bag_index += 1;
|
|
}
|
|
const has_align = @as(u1, @truncate(cur_bit_bag)) != 0;
|
|
cur_bit_bag >>= 1;
|
|
const has_init = @as(u1, @truncate(cur_bit_bag)) != 0;
|
|
cur_bit_bag >>= 1;
|
|
const is_comptime = @as(u1, @truncate(cur_bit_bag)) != 0;
|
|
cur_bit_bag >>= 1;
|
|
const has_type_body = @as(u1, @truncate(cur_bit_bag)) != 0;
|
|
cur_bit_bag >>= 1;
|
|
|
|
if (is_comptime) struct_type.setFieldComptime(ip, field_i);
|
|
|
|
var opt_field_name_zir: ?[:0]const u8 = null;
|
|
if (!small.is_tuple) {
|
|
opt_field_name_zir = zir.nullTerminatedString(@enumFromInt(zir.extra[extra_index]));
|
|
extra_index += 1;
|
|
}
|
|
extra_index += 1; // doc_comment
|
|
|
|
fields[field_i] = .{};
|
|
|
|
if (has_type_body) {
|
|
fields[field_i].type_body_len = zir.extra[extra_index];
|
|
} else {
|
|
fields[field_i].type_ref = @enumFromInt(zir.extra[extra_index]);
|
|
}
|
|
extra_index += 1;
|
|
|
|
// This string needs to outlive the ZIR code.
|
|
if (opt_field_name_zir) |field_name_zir| {
|
|
const field_name = try ip.getOrPutString(gpa, field_name_zir);
|
|
assert(struct_type.addFieldName(ip, field_name) == null);
|
|
}
|
|
|
|
if (has_align) {
|
|
fields[field_i].align_body_len = zir.extra[extra_index];
|
|
extra_index += 1;
|
|
any_aligned = true;
|
|
}
|
|
if (has_init) {
|
|
fields[field_i].init_body_len = zir.extra[extra_index];
|
|
extra_index += 1;
|
|
any_inits = true;
|
|
}
|
|
}
|
|
}
|
|
|
|
// Next we do only types and alignments, saving the inits for a second pass,
|
|
// so that init values may depend on type layout.
|
|
|
|
for (fields, 0..) |zir_field, field_i| {
|
|
const field_ty: Type = ty: {
|
|
if (zir_field.type_ref != .none) {
|
|
break :ty sema.resolveType(&block_scope, .unneeded, zir_field.type_ref) catch |err| switch (err) {
|
|
error.NeededSourceLocation => {
|
|
const ty_src = mod.fieldSrcLoc(decl_index, .{
|
|
.index = field_i,
|
|
.range = .type,
|
|
}).lazy;
|
|
_ = try sema.resolveType(&block_scope, ty_src, zir_field.type_ref);
|
|
unreachable;
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
}
|
|
assert(zir_field.type_body_len != 0);
|
|
const body = zir.bodySlice(extra_index, zir_field.type_body_len);
|
|
extra_index += body.len;
|
|
const ty_ref = try sema.resolveBody(&block_scope, body, zir_index);
|
|
break :ty sema.analyzeAsType(&block_scope, .unneeded, ty_ref) catch |err| switch (err) {
|
|
error.NeededSourceLocation => {
|
|
const ty_src = mod.fieldSrcLoc(decl_index, .{
|
|
.index = field_i,
|
|
.range = .type,
|
|
}).lazy;
|
|
_ = try sema.analyzeAsType(&block_scope, ty_src, ty_ref);
|
|
unreachable;
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
};
|
|
if (field_ty.isGenericPoison()) {
|
|
return error.GenericPoison;
|
|
}
|
|
|
|
struct_type.field_types.get(ip)[field_i] = field_ty.toIntern();
|
|
|
|
if (field_ty.zigTypeTag(mod) == .Opaque) {
|
|
const msg = msg: {
|
|
const ty_src = mod.fieldSrcLoc(decl_index, .{
|
|
.index = field_i,
|
|
.range = .type,
|
|
}).lazy;
|
|
const msg = try sema.errMsg(&block_scope, ty_src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
try sema.addDeclaredHereNote(msg, field_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(&block_scope, msg);
|
|
}
|
|
if (field_ty.zigTypeTag(mod) == .NoReturn) {
|
|
const msg = msg: {
|
|
const ty_src = mod.fieldSrcLoc(decl_index, .{
|
|
.index = field_i,
|
|
.range = .type,
|
|
}).lazy;
|
|
const msg = try sema.errMsg(&block_scope, ty_src, "struct fields cannot be 'noreturn'", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
try sema.addDeclaredHereNote(msg, field_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(&block_scope, msg);
|
|
}
|
|
switch (struct_type.layout) {
|
|
.Extern => if (!try sema.validateExternType(field_ty, .struct_field)) {
|
|
const msg = msg: {
|
|
const ty_src = mod.fieldSrcLoc(decl_index, .{
|
|
.index = field_i,
|
|
.range = .type,
|
|
});
|
|
const msg = try sema.errMsg(&block_scope, ty_src.lazy, "extern structs cannot contain fields of type '{}'", .{field_ty.fmt(mod)});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
try sema.explainWhyTypeIsNotExtern(msg, ty_src, field_ty, .struct_field);
|
|
|
|
try sema.addDeclaredHereNote(msg, field_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(&block_scope, msg);
|
|
},
|
|
.Packed => if (!try sema.validatePackedType(field_ty)) {
|
|
const msg = msg: {
|
|
const ty_src = mod.fieldSrcLoc(decl_index, .{
|
|
.index = field_i,
|
|
.range = .type,
|
|
});
|
|
const msg = try sema.errMsg(&block_scope, ty_src.lazy, "packed structs cannot contain fields of type '{}'", .{field_ty.fmt(mod)});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
try sema.explainWhyTypeIsNotPacked(msg, ty_src, field_ty);
|
|
|
|
try sema.addDeclaredHereNote(msg, field_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(&block_scope, msg);
|
|
},
|
|
else => {},
|
|
}
|
|
|
|
if (zir_field.align_body_len > 0) {
|
|
const body = zir.bodySlice(extra_index, zir_field.align_body_len);
|
|
extra_index += body.len;
|
|
const align_ref = try sema.resolveBody(&block_scope, body, zir_index);
|
|
const field_align = sema.analyzeAsAlign(&block_scope, .unneeded, align_ref) catch |err| switch (err) {
|
|
error.NeededSourceLocation => {
|
|
const align_src = mod.fieldSrcLoc(decl_index, .{
|
|
.index = field_i,
|
|
.range = .alignment,
|
|
}).lazy;
|
|
_ = try sema.analyzeAsAlign(&block_scope, align_src, align_ref);
|
|
unreachable;
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
struct_type.field_aligns.get(ip)[field_i] = field_align;
|
|
}
|
|
|
|
extra_index += zir_field.init_body_len;
|
|
}
|
|
|
|
struct_type.clearTypesWip(ip);
|
|
if (!any_inits) struct_type.setHaveFieldInits(ip);
|
|
|
|
for (comptime_mutable_decls.items) |ct_decl_index| {
|
|
const ct_decl = mod.declPtr(ct_decl_index);
|
|
_ = try ct_decl.internValue(mod);
|
|
}
|
|
}
|
|
|
|
// This logic must be kept in sync with `semaStructFields`
|
|
fn semaStructFieldInits(
|
|
mod: *Module,
|
|
arena: Allocator,
|
|
struct_type: InternPool.Key.StructType,
|
|
) CompileError!void {
|
|
const gpa = mod.gpa;
|
|
const ip = &mod.intern_pool;
|
|
|
|
assert(!struct_type.haveFieldInits(ip));
|
|
|
|
const decl_index = struct_type.decl.unwrap() orelse return;
|
|
const decl = mod.declPtr(decl_index);
|
|
const namespace_index = struct_type.namespace.unwrap() orelse decl.src_namespace;
|
|
const zir = mod.namespacePtr(namespace_index).file_scope.zir;
|
|
const zir_index = struct_type.zir_index.unwrap().?.resolve(ip);
|
|
const fields_len, const small, var extra_index = structZirInfo(zir, zir_index);
|
|
|
|
var comptime_mutable_decls = std.ArrayList(InternPool.DeclIndex).init(gpa);
|
|
defer comptime_mutable_decls.deinit();
|
|
|
|
var comptime_err_ret_trace = std.ArrayList(Module.SrcLoc).init(gpa);
|
|
defer comptime_err_ret_trace.deinit();
|
|
|
|
var sema: Sema = .{
|
|
.mod = mod,
|
|
.gpa = gpa,
|
|
.arena = arena,
|
|
.code = zir,
|
|
.owner_decl = decl,
|
|
.owner_decl_index = decl_index,
|
|
.func_index = .none,
|
|
.func_is_naked = false,
|
|
.fn_ret_ty = Type.void,
|
|
.fn_ret_ty_ies = null,
|
|
.owner_func_index = .none,
|
|
.comptime_mutable_decls = &comptime_mutable_decls,
|
|
.comptime_err_ret_trace = &comptime_err_ret_trace,
|
|
};
|
|
defer sema.deinit();
|
|
|
|
var block_scope: Block = .{
|
|
.parent = null,
|
|
.sema = &sema,
|
|
.src_decl = decl_index,
|
|
.namespace = namespace_index,
|
|
.wip_capture_scope = try mod.createCaptureScope(decl.src_scope),
|
|
.instructions = .{},
|
|
.inlining = null,
|
|
.is_comptime = true,
|
|
};
|
|
defer assert(block_scope.instructions.items.len == 0);
|
|
|
|
const Field = struct {
|
|
type_body_len: u32 = 0,
|
|
align_body_len: u32 = 0,
|
|
init_body_len: u32 = 0,
|
|
};
|
|
const fields = try sema.arena.alloc(Field, fields_len);
|
|
|
|
var any_inits = false;
|
|
|
|
{
|
|
const bits_per_field = 4;
|
|
const fields_per_u32 = 32 / bits_per_field;
|
|
const bit_bags_count = std.math.divCeil(usize, fields_len, fields_per_u32) catch unreachable;
|
|
const flags_index = extra_index;
|
|
var bit_bag_index: usize = flags_index;
|
|
extra_index += bit_bags_count;
|
|
var cur_bit_bag: u32 = undefined;
|
|
var field_i: u32 = 0;
|
|
while (field_i < fields_len) : (field_i += 1) {
|
|
if (field_i % fields_per_u32 == 0) {
|
|
cur_bit_bag = zir.extra[bit_bag_index];
|
|
bit_bag_index += 1;
|
|
}
|
|
const has_align = @as(u1, @truncate(cur_bit_bag)) != 0;
|
|
cur_bit_bag >>= 1;
|
|
const has_init = @as(u1, @truncate(cur_bit_bag)) != 0;
|
|
cur_bit_bag >>= 2;
|
|
const has_type_body = @as(u1, @truncate(cur_bit_bag)) != 0;
|
|
cur_bit_bag >>= 1;
|
|
|
|
if (!small.is_tuple) {
|
|
extra_index += 1;
|
|
}
|
|
extra_index += 1; // doc_comment
|
|
|
|
fields[field_i] = .{};
|
|
|
|
if (has_type_body) fields[field_i].type_body_len = zir.extra[extra_index];
|
|
extra_index += 1;
|
|
|
|
if (has_align) {
|
|
fields[field_i].align_body_len = zir.extra[extra_index];
|
|
extra_index += 1;
|
|
}
|
|
if (has_init) {
|
|
fields[field_i].init_body_len = zir.extra[extra_index];
|
|
extra_index += 1;
|
|
any_inits = true;
|
|
}
|
|
}
|
|
}
|
|
|
|
if (any_inits) {
|
|
for (fields, 0..) |zir_field, field_i| {
|
|
extra_index += zir_field.type_body_len;
|
|
extra_index += zir_field.align_body_len;
|
|
const body = zir.bodySlice(extra_index, zir_field.init_body_len);
|
|
extra_index += zir_field.init_body_len;
|
|
|
|
if (body.len == 0) continue;
|
|
|
|
// Pre-populate the type mapping the body expects to be there.
|
|
// In init bodies, the zir index of the struct itself is used
|
|
// to refer to the current field type.
|
|
|
|
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_i]);
|
|
const type_ref = Air.internedToRef(field_ty.toIntern());
|
|
try sema.inst_map.ensureSpaceForInstructions(sema.gpa, &.{zir_index});
|
|
sema.inst_map.putAssumeCapacity(zir_index, type_ref);
|
|
|
|
const init = try sema.resolveBody(&block_scope, body, zir_index);
|
|
const coerced = sema.coerce(&block_scope, field_ty, init, .unneeded) catch |err| switch (err) {
|
|
error.NeededSourceLocation => {
|
|
const init_src = mod.fieldSrcLoc(decl_index, .{
|
|
.index = field_i,
|
|
.range = .value,
|
|
}).lazy;
|
|
_ = try sema.coerce(&block_scope, field_ty, init, init_src);
|
|
unreachable;
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
const default_val = (try sema.resolveValue(coerced)) orelse {
|
|
const init_src = mod.fieldSrcLoc(decl_index, .{
|
|
.index = field_i,
|
|
.range = .value,
|
|
}).lazy;
|
|
return sema.failWithNeededComptime(&block_scope, init_src, .{
|
|
.needed_comptime_reason = "struct field default value must be comptime-known",
|
|
});
|
|
};
|
|
|
|
const field_init = try default_val.intern(field_ty, mod);
|
|
struct_type.field_inits.get(ip)[field_i] = field_init;
|
|
}
|
|
}
|
|
|
|
for (comptime_mutable_decls.items) |ct_decl_index| {
|
|
const ct_decl = mod.declPtr(ct_decl_index);
|
|
_ = try ct_decl.internValue(mod);
|
|
}
|
|
}
|
|
|
|
fn semaUnionFields(mod: *Module, arena: Allocator, union_type: InternPool.Key.UnionType) CompileError!void {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const gpa = mod.gpa;
|
|
const ip = &mod.intern_pool;
|
|
const decl_index = union_type.decl;
|
|
const zir = mod.namespacePtr(union_type.namespace).file_scope.zir;
|
|
const zir_index = union_type.zir_index.unwrap().?.resolve(ip);
|
|
const extended = zir.instructions.items(.data)[@intFromEnum(zir_index)].extended;
|
|
assert(extended.opcode == .union_decl);
|
|
const small: Zir.Inst.UnionDecl.Small = @bitCast(extended.small);
|
|
var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.UnionDecl).Struct.fields.len;
|
|
|
|
const src = LazySrcLoc.nodeOffset(0);
|
|
|
|
const tag_type_ref: Zir.Inst.Ref = if (small.has_tag_type) blk: {
|
|
const ty_ref: Zir.Inst.Ref = @enumFromInt(zir.extra[extra_index]);
|
|
extra_index += 1;
|
|
break :blk ty_ref;
|
|
} else .none;
|
|
|
|
const body_len = if (small.has_body_len) blk: {
|
|
const body_len = zir.extra[extra_index];
|
|
extra_index += 1;
|
|
break :blk body_len;
|
|
} else 0;
|
|
|
|
const fields_len = if (small.has_fields_len) blk: {
|
|
const fields_len = zir.extra[extra_index];
|
|
extra_index += 1;
|
|
break :blk fields_len;
|
|
} else 0;
|
|
|
|
const decls_len = if (small.has_decls_len) decls_len: {
|
|
const decls_len = zir.extra[extra_index];
|
|
extra_index += 1;
|
|
break :decls_len decls_len;
|
|
} else 0;
|
|
|
|
// Skip over decls.
|
|
extra_index += decls_len;
|
|
|
|
const body = zir.bodySlice(extra_index, body_len);
|
|
extra_index += body.len;
|
|
|
|
const decl = mod.declPtr(decl_index);
|
|
|
|
var comptime_mutable_decls = std.ArrayList(InternPool.DeclIndex).init(gpa);
|
|
defer comptime_mutable_decls.deinit();
|
|
|
|
var comptime_err_ret_trace = std.ArrayList(Module.SrcLoc).init(gpa);
|
|
defer comptime_err_ret_trace.deinit();
|
|
|
|
var sema: Sema = .{
|
|
.mod = mod,
|
|
.gpa = gpa,
|
|
.arena = arena,
|
|
.code = zir,
|
|
.owner_decl = decl,
|
|
.owner_decl_index = decl_index,
|
|
.func_index = .none,
|
|
.func_is_naked = false,
|
|
.fn_ret_ty = Type.void,
|
|
.fn_ret_ty_ies = null,
|
|
.owner_func_index = .none,
|
|
.comptime_mutable_decls = &comptime_mutable_decls,
|
|
.comptime_err_ret_trace = &comptime_err_ret_trace,
|
|
};
|
|
defer sema.deinit();
|
|
|
|
var block_scope: Block = .{
|
|
.parent = null,
|
|
.sema = &sema,
|
|
.src_decl = decl_index,
|
|
.namespace = union_type.namespace,
|
|
.wip_capture_scope = try mod.createCaptureScope(decl.src_scope),
|
|
.instructions = .{},
|
|
.inlining = null,
|
|
.is_comptime = true,
|
|
};
|
|
defer assert(block_scope.instructions.items.len == 0);
|
|
|
|
if (body.len != 0) {
|
|
try sema.analyzeBody(&block_scope, body);
|
|
}
|
|
|
|
for (comptime_mutable_decls.items) |ct_decl_index| {
|
|
const ct_decl = mod.declPtr(ct_decl_index);
|
|
_ = try ct_decl.internValue(mod);
|
|
}
|
|
|
|
var int_tag_ty: Type = undefined;
|
|
var enum_field_names: []InternPool.NullTerminatedString = &.{};
|
|
var enum_field_vals: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .{};
|
|
var explicit_tags_seen: []bool = &.{};
|
|
if (tag_type_ref != .none) {
|
|
const tag_ty_src: LazySrcLoc = .{ .node_offset_container_tag = src.node_offset.x };
|
|
const provided_ty = try sema.resolveType(&block_scope, tag_ty_src, tag_type_ref);
|
|
if (small.auto_enum_tag) {
|
|
// The provided type is an integer type and we must construct the enum tag type here.
|
|
int_tag_ty = provided_ty;
|
|
if (int_tag_ty.zigTypeTag(mod) != .Int and int_tag_ty.zigTypeTag(mod) != .ComptimeInt) {
|
|
return sema.fail(&block_scope, tag_ty_src, "expected integer tag type, found '{}'", .{int_tag_ty.fmt(mod)});
|
|
}
|
|
|
|
if (fields_len > 0) {
|
|
const field_count_val = try mod.intValue(Type.comptime_int, fields_len - 1);
|
|
if (!(try sema.intFitsInType(field_count_val, int_tag_ty, null))) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(&block_scope, tag_ty_src, "specified integer tag type cannot represent every field", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(&block_scope, tag_ty_src, msg, "type '{}' cannot fit values in range 0...{d}", .{
|
|
int_tag_ty.fmt(mod),
|
|
fields_len - 1,
|
|
});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(&block_scope, msg);
|
|
}
|
|
enum_field_names = try sema.arena.alloc(InternPool.NullTerminatedString, fields_len);
|
|
try enum_field_vals.ensureTotalCapacity(sema.arena, fields_len);
|
|
}
|
|
} else {
|
|
// The provided type is the enum tag type.
|
|
union_type.tagTypePtr(ip).* = provided_ty.toIntern();
|
|
const enum_type = switch (ip.indexToKey(provided_ty.toIntern())) {
|
|
.enum_type => |x| x,
|
|
else => return sema.fail(&block_scope, tag_ty_src, "expected enum tag type, found '{}'", .{provided_ty.fmt(mod)}),
|
|
};
|
|
// The fields of the union must match the enum exactly.
|
|
// A flag per field is used to check for missing and extraneous fields.
|
|
explicit_tags_seen = try sema.arena.alloc(bool, enum_type.names.len);
|
|
@memset(explicit_tags_seen, false);
|
|
}
|
|
} else {
|
|
// If auto_enum_tag is false, this is an untagged union. However, for semantic analysis
|
|
// purposes, we still auto-generate an enum tag type the same way. That the union is
|
|
// untagged is represented by the Type tag (union vs union_tagged).
|
|
enum_field_names = try sema.arena.alloc(InternPool.NullTerminatedString, fields_len);
|
|
}
|
|
|
|
var field_types: std.ArrayListUnmanaged(InternPool.Index) = .{};
|
|
var field_aligns: std.ArrayListUnmanaged(InternPool.Alignment) = .{};
|
|
|
|
try field_types.ensureTotalCapacityPrecise(sema.arena, fields_len);
|
|
if (small.any_aligned_fields)
|
|
try field_aligns.ensureTotalCapacityPrecise(sema.arena, fields_len);
|
|
|
|
const bits_per_field = 4;
|
|
const fields_per_u32 = 32 / bits_per_field;
|
|
const bit_bags_count = std.math.divCeil(usize, fields_len, fields_per_u32) catch unreachable;
|
|
var bit_bag_index: usize = extra_index;
|
|
extra_index += bit_bags_count;
|
|
var cur_bit_bag: u32 = undefined;
|
|
var field_i: u32 = 0;
|
|
var last_tag_val: ?Value = null;
|
|
while (field_i < fields_len) : (field_i += 1) {
|
|
if (field_i % fields_per_u32 == 0) {
|
|
cur_bit_bag = zir.extra[bit_bag_index];
|
|
bit_bag_index += 1;
|
|
}
|
|
const has_type = @as(u1, @truncate(cur_bit_bag)) != 0;
|
|
cur_bit_bag >>= 1;
|
|
const has_align = @as(u1, @truncate(cur_bit_bag)) != 0;
|
|
cur_bit_bag >>= 1;
|
|
const has_tag = @as(u1, @truncate(cur_bit_bag)) != 0;
|
|
cur_bit_bag >>= 1;
|
|
const unused = @as(u1, @truncate(cur_bit_bag)) != 0;
|
|
cur_bit_bag >>= 1;
|
|
_ = unused;
|
|
|
|
const field_name_index: Zir.NullTerminatedString = @enumFromInt(zir.extra[extra_index]);
|
|
const field_name_zir = zir.nullTerminatedString(field_name_index);
|
|
extra_index += 1;
|
|
|
|
// doc_comment
|
|
extra_index += 1;
|
|
|
|
const field_type_ref: Zir.Inst.Ref = if (has_type) blk: {
|
|
const field_type_ref: Zir.Inst.Ref = @enumFromInt(zir.extra[extra_index]);
|
|
extra_index += 1;
|
|
break :blk field_type_ref;
|
|
} else .none;
|
|
|
|
const align_ref: Zir.Inst.Ref = if (has_align) blk: {
|
|
const align_ref: Zir.Inst.Ref = @enumFromInt(zir.extra[extra_index]);
|
|
extra_index += 1;
|
|
break :blk align_ref;
|
|
} else .none;
|
|
|
|
const tag_ref: Air.Inst.Ref = if (has_tag) blk: {
|
|
const tag_ref: Zir.Inst.Ref = @enumFromInt(zir.extra[extra_index]);
|
|
extra_index += 1;
|
|
break :blk try sema.resolveInst(tag_ref);
|
|
} else .none;
|
|
|
|
if (enum_field_vals.capacity() > 0) {
|
|
const enum_tag_val = if (tag_ref != .none) blk: {
|
|
const val = sema.semaUnionFieldVal(&block_scope, .unneeded, int_tag_ty, tag_ref) catch |err| switch (err) {
|
|
error.NeededSourceLocation => {
|
|
const val_src = mod.fieldSrcLoc(union_type.decl, .{
|
|
.index = field_i,
|
|
.range = .value,
|
|
}).lazy;
|
|
_ = try sema.semaUnionFieldVal(&block_scope, val_src, int_tag_ty, tag_ref);
|
|
unreachable;
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
last_tag_val = val;
|
|
|
|
break :blk val;
|
|
} else blk: {
|
|
const val = if (last_tag_val) |val|
|
|
try sema.intAdd(val, Value.one_comptime_int, int_tag_ty, undefined)
|
|
else
|
|
try mod.intValue(int_tag_ty, 0);
|
|
last_tag_val = val;
|
|
|
|
break :blk val;
|
|
};
|
|
const gop = enum_field_vals.getOrPutAssumeCapacity(enum_tag_val.toIntern());
|
|
if (gop.found_existing) {
|
|
const field_src = mod.fieldSrcLoc(union_type.decl, .{ .index = field_i }).lazy;
|
|
const other_field_src = mod.fieldSrcLoc(union_type.decl, .{ .index = gop.index }).lazy;
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(&block_scope, field_src, "enum tag value {} already taken", .{enum_tag_val.fmtValue(int_tag_ty, mod)});
|
|
errdefer msg.destroy(gpa);
|
|
try sema.errNote(&block_scope, other_field_src, msg, "other occurrence here", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(&block_scope, msg);
|
|
}
|
|
}
|
|
|
|
// This string needs to outlive the ZIR code.
|
|
const field_name = try ip.getOrPutString(gpa, field_name_zir);
|
|
if (enum_field_names.len != 0) {
|
|
enum_field_names[field_i] = field_name;
|
|
}
|
|
|
|
const field_ty: Type = if (!has_type)
|
|
Type.void
|
|
else if (field_type_ref == .none)
|
|
Type.noreturn
|
|
else
|
|
sema.resolveType(&block_scope, .unneeded, field_type_ref) catch |err| switch (err) {
|
|
error.NeededSourceLocation => {
|
|
const ty_src = mod.fieldSrcLoc(union_type.decl, .{
|
|
.index = field_i,
|
|
.range = .type,
|
|
}).lazy;
|
|
_ = try sema.resolveType(&block_scope, ty_src, field_type_ref);
|
|
unreachable;
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
|
|
if (field_ty.isGenericPoison()) {
|
|
return error.GenericPoison;
|
|
}
|
|
|
|
if (explicit_tags_seen.len > 0) {
|
|
const tag_info = ip.indexToKey(union_type.tagTypePtr(ip).*).enum_type;
|
|
const enum_index = tag_info.nameIndex(ip, field_name) orelse {
|
|
const ty_src = mod.fieldSrcLoc(union_type.decl, .{
|
|
.index = field_i,
|
|
.range = .name,
|
|
}).lazy;
|
|
return sema.fail(&block_scope, ty_src, "no field named '{}' in enum '{}'", .{
|
|
field_name.fmt(ip), Type.fromInterned(union_type.tagTypePtr(ip).*).fmt(mod),
|
|
});
|
|
};
|
|
|
|
// No check for duplicate because the check already happened in order
|
|
// to create the enum type in the first place.
|
|
assert(!explicit_tags_seen[enum_index]);
|
|
explicit_tags_seen[enum_index] = true;
|
|
|
|
// Enforce the enum fields and the union fields being in the same order.
|
|
if (enum_index != field_i) {
|
|
const msg = msg: {
|
|
const ty_src = mod.fieldSrcLoc(union_type.decl, .{
|
|
.index = field_i,
|
|
.range = .name,
|
|
}).lazy;
|
|
const enum_field_src = mod.fieldSrcLoc(tag_info.decl, .{ .index = enum_index }).lazy;
|
|
const msg = try sema.errMsg(&block_scope, ty_src, "union field '{}' ordered differently than corresponding enum field", .{
|
|
field_name.fmt(ip),
|
|
});
|
|
errdefer msg.destroy(sema.gpa);
|
|
const decl_ptr = mod.declPtr(tag_info.decl);
|
|
try mod.errNoteNonLazy(decl_ptr.toSrcLoc(enum_field_src, mod), msg, "enum field here", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(&block_scope, msg);
|
|
}
|
|
}
|
|
|
|
if (field_ty.zigTypeTag(mod) == .Opaque) {
|
|
const msg = msg: {
|
|
const ty_src = mod.fieldSrcLoc(union_type.decl, .{
|
|
.index = field_i,
|
|
.range = .type,
|
|
}).lazy;
|
|
const msg = try sema.errMsg(&block_scope, ty_src, "opaque types have unknown size and therefore cannot be directly embedded in unions", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
try sema.addDeclaredHereNote(msg, field_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(&block_scope, msg);
|
|
}
|
|
const layout = union_type.getLayout(ip);
|
|
if (layout == .Extern and
|
|
!try sema.validateExternType(field_ty, .union_field))
|
|
{
|
|
const msg = msg: {
|
|
const ty_src = mod.fieldSrcLoc(union_type.decl, .{
|
|
.index = field_i,
|
|
.range = .type,
|
|
});
|
|
const msg = try sema.errMsg(&block_scope, ty_src.lazy, "extern unions cannot contain fields of type '{}'", .{field_ty.fmt(mod)});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
try sema.explainWhyTypeIsNotExtern(msg, ty_src, field_ty, .union_field);
|
|
|
|
try sema.addDeclaredHereNote(msg, field_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(&block_scope, msg);
|
|
} else if (layout == .Packed and !try sema.validatePackedType(field_ty)) {
|
|
const msg = msg: {
|
|
const ty_src = mod.fieldSrcLoc(union_type.decl, .{
|
|
.index = field_i,
|
|
.range = .type,
|
|
});
|
|
const msg = try sema.errMsg(&block_scope, ty_src.lazy, "packed unions cannot contain fields of type '{}'", .{field_ty.fmt(mod)});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
try sema.explainWhyTypeIsNotPacked(msg, ty_src, field_ty);
|
|
|
|
try sema.addDeclaredHereNote(msg, field_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(&block_scope, msg);
|
|
}
|
|
|
|
field_types.appendAssumeCapacity(field_ty.toIntern());
|
|
|
|
if (small.any_aligned_fields) {
|
|
field_aligns.appendAssumeCapacity(if (align_ref != .none)
|
|
sema.resolveAlign(&block_scope, .unneeded, align_ref) catch |err| switch (err) {
|
|
error.NeededSourceLocation => {
|
|
const align_src = mod.fieldSrcLoc(union_type.decl, .{
|
|
.index = field_i,
|
|
.range = .alignment,
|
|
}).lazy;
|
|
_ = try sema.resolveAlign(&block_scope, align_src, align_ref);
|
|
unreachable;
|
|
},
|
|
else => |e| return e,
|
|
}
|
|
else
|
|
.none);
|
|
} else {
|
|
assert(align_ref == .none);
|
|
}
|
|
}
|
|
|
|
union_type.setFieldTypes(ip, field_types.items);
|
|
union_type.setFieldAligns(ip, field_aligns.items);
|
|
|
|
if (explicit_tags_seen.len > 0) {
|
|
const tag_info = ip.indexToKey(union_type.tagTypePtr(ip).*).enum_type;
|
|
if (tag_info.names.len > fields_len) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(&block_scope, src, "enum field(s) missing in union", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
for (tag_info.names.get(ip), 0..) |field_name, field_index| {
|
|
if (explicit_tags_seen[field_index]) continue;
|
|
try sema.addFieldErrNote(Type.fromInterned(union_type.tagTypePtr(ip).*), field_index, msg, "field '{}' missing, declared here", .{
|
|
field_name.fmt(ip),
|
|
});
|
|
}
|
|
try sema.addDeclaredHereNote(msg, Type.fromInterned(union_type.tagTypePtr(ip).*));
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(&block_scope, msg);
|
|
}
|
|
} else if (enum_field_vals.count() > 0) {
|
|
const enum_ty = try sema.generateUnionTagTypeNumbered(&block_scope, enum_field_names, enum_field_vals.keys(), mod.declPtr(union_type.decl));
|
|
union_type.tagTypePtr(ip).* = enum_ty;
|
|
} else {
|
|
const enum_ty = try sema.generateUnionTagTypeSimple(&block_scope, enum_field_names, union_type.decl.toOptional());
|
|
union_type.tagTypePtr(ip).* = enum_ty;
|
|
}
|
|
}
|
|
|
|
fn semaUnionFieldVal(sema: *Sema, block: *Block, src: LazySrcLoc, int_tag_ty: Type, tag_ref: Air.Inst.Ref) CompileError!Value {
|
|
const coerced = try sema.coerce(block, int_tag_ty, tag_ref, src);
|
|
return sema.resolveConstDefinedValue(block, src, coerced, .{
|
|
.needed_comptime_reason = "enum tag value must be comptime-known",
|
|
});
|
|
}
|
|
|
|
fn generateUnionTagTypeNumbered(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
enum_field_names: []const InternPool.NullTerminatedString,
|
|
enum_field_vals: []const InternPool.Index,
|
|
decl: *Module.Decl,
|
|
) !InternPool.Index {
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
const ip = &mod.intern_pool;
|
|
|
|
const src_decl = mod.declPtr(block.src_decl);
|
|
const new_decl_index = try mod.allocateNewDecl(block.namespace, src_decl.src_node, block.wip_capture_scope);
|
|
errdefer mod.destroyDecl(new_decl_index);
|
|
const fqn = try decl.fullyQualifiedName(mod);
|
|
const name = try ip.getOrPutStringFmt(gpa, "@typeInfo({}).Union.tag_type.?", .{fqn.fmt(ip)});
|
|
try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, .{
|
|
.ty = Type.noreturn,
|
|
.val = Value.@"unreachable",
|
|
}, name);
|
|
errdefer mod.abortAnonDecl(new_decl_index);
|
|
|
|
const new_decl = mod.declPtr(new_decl_index);
|
|
new_decl.owns_tv = true;
|
|
new_decl.name_fully_qualified = true;
|
|
|
|
const enum_ty = try ip.getEnum(gpa, .{
|
|
.decl = new_decl_index,
|
|
.namespace = .none,
|
|
.tag_ty = if (enum_field_vals.len == 0)
|
|
(try mod.intType(.unsigned, 0)).toIntern()
|
|
else
|
|
ip.typeOf(enum_field_vals[0]),
|
|
.names = enum_field_names,
|
|
.values = enum_field_vals,
|
|
.tag_mode = .explicit,
|
|
.zir_index = .none,
|
|
});
|
|
|
|
new_decl.ty = Type.type;
|
|
new_decl.val = Value.fromInterned(enum_ty);
|
|
|
|
try mod.finalizeAnonDecl(new_decl_index);
|
|
return enum_ty;
|
|
}
|
|
|
|
fn generateUnionTagTypeSimple(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
enum_field_names: []const InternPool.NullTerminatedString,
|
|
maybe_decl_index: InternPool.OptionalDeclIndex,
|
|
) !InternPool.Index {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
const gpa = sema.gpa;
|
|
|
|
const new_decl_index = new_decl_index: {
|
|
const decl_index = maybe_decl_index.unwrap() orelse {
|
|
break :new_decl_index try mod.createAnonymousDecl(block, .{
|
|
.ty = Type.noreturn,
|
|
.val = Value.@"unreachable",
|
|
});
|
|
};
|
|
const fqn = try mod.declPtr(decl_index).fullyQualifiedName(mod);
|
|
const src_decl = mod.declPtr(block.src_decl);
|
|
const new_decl_index = try mod.allocateNewDecl(block.namespace, src_decl.src_node, block.wip_capture_scope);
|
|
errdefer mod.destroyDecl(new_decl_index);
|
|
const name = try ip.getOrPutStringFmt(gpa, "@typeInfo({}).Union.tag_type.?", .{fqn.fmt(ip)});
|
|
try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, .{
|
|
.ty = Type.noreturn,
|
|
.val = Value.@"unreachable",
|
|
}, name);
|
|
mod.declPtr(new_decl_index).name_fully_qualified = true;
|
|
break :new_decl_index new_decl_index;
|
|
};
|
|
errdefer mod.abortAnonDecl(new_decl_index);
|
|
|
|
const enum_ty = try ip.getEnum(gpa, .{
|
|
.decl = new_decl_index,
|
|
.namespace = .none,
|
|
.tag_ty = if (enum_field_names.len == 0)
|
|
(try mod.intType(.unsigned, 0)).toIntern()
|
|
else
|
|
(try mod.smallestUnsignedInt(enum_field_names.len - 1)).toIntern(),
|
|
.names = enum_field_names,
|
|
.values = &.{},
|
|
.tag_mode = .auto,
|
|
.zir_index = .none,
|
|
});
|
|
|
|
const new_decl = mod.declPtr(new_decl_index);
|
|
new_decl.owns_tv = true;
|
|
new_decl.ty = Type.type;
|
|
new_decl.val = Value.fromInterned(enum_ty);
|
|
|
|
try mod.finalizeAnonDecl(new_decl_index);
|
|
return enum_ty;
|
|
}
|
|
|
|
fn getBuiltin(sema: *Sema, name: []const u8) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
const src = LazySrcLoc.nodeOffset(0);
|
|
|
|
var block: Block = .{
|
|
.parent = null,
|
|
.sema = sema,
|
|
.src_decl = sema.owner_decl_index,
|
|
.namespace = sema.owner_decl.src_namespace,
|
|
.wip_capture_scope = try mod.createCaptureScope(sema.owner_decl.src_scope),
|
|
.instructions = .{},
|
|
.inlining = null,
|
|
.is_comptime = true,
|
|
};
|
|
defer block.instructions.deinit(gpa);
|
|
|
|
const decl_index = try getBuiltinDecl(sema, &block, name);
|
|
return sema.analyzeDeclVal(&block, src, decl_index);
|
|
}
|
|
|
|
fn getBuiltinDecl(sema: *Sema, block: *Block, name: []const u8) CompileError!InternPool.DeclIndex {
|
|
const gpa = sema.gpa;
|
|
|
|
const src = LazySrcLoc.nodeOffset(0);
|
|
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
const std_mod = mod.std_mod;
|
|
const std_file = (mod.importPkg(std_mod) catch unreachable).file;
|
|
const opt_builtin_inst = (try sema.namespaceLookupRef(
|
|
block,
|
|
src,
|
|
mod.declPtr(std_file.root_decl.unwrap().?).src_namespace,
|
|
try ip.getOrPutString(gpa, "builtin"),
|
|
)) orelse @panic("lib/std.zig is corrupt and missing 'builtin'");
|
|
const builtin_inst = try sema.analyzeLoad(block, src, opt_builtin_inst, src);
|
|
const builtin_ty = sema.analyzeAsType(block, src, builtin_inst) catch |err| switch (err) {
|
|
error.AnalysisFail => std.debug.panic("std.builtin is corrupt", .{}),
|
|
else => |e| return e,
|
|
};
|
|
const decl_index = (try sema.namespaceLookup(
|
|
block,
|
|
src,
|
|
builtin_ty.getNamespaceIndex(mod).unwrap().?,
|
|
try ip.getOrPutString(gpa, name),
|
|
)) orelse std.debug.panic("lib/std/builtin.zig is corrupt and missing '{s}'", .{name});
|
|
return decl_index;
|
|
}
|
|
|
|
fn getBuiltinType(sema: *Sema, name: []const u8) CompileError!Type {
|
|
const mod = sema.mod;
|
|
const ty_inst = try sema.getBuiltin(name);
|
|
|
|
var block: Block = .{
|
|
.parent = null,
|
|
.sema = sema,
|
|
.src_decl = sema.owner_decl_index,
|
|
.namespace = sema.owner_decl.src_namespace,
|
|
.wip_capture_scope = try mod.createCaptureScope(sema.owner_decl.src_scope),
|
|
.instructions = .{},
|
|
.inlining = null,
|
|
.is_comptime = true,
|
|
};
|
|
defer block.instructions.deinit(sema.gpa);
|
|
const src = LazySrcLoc.nodeOffset(0);
|
|
|
|
const result_ty = sema.analyzeAsType(&block, src, ty_inst) catch |err| switch (err) {
|
|
error.AnalysisFail => std.debug.panic("std.builtin.{s} is corrupt", .{name}),
|
|
else => |e| return e,
|
|
};
|
|
try sema.resolveTypeFully(result_ty); // Should not fail
|
|
return result_ty;
|
|
}
|
|
|
|
/// There is another implementation of this in `Type.onePossibleValue`. This one
|
|
/// in `Sema` is for calling during semantic analysis, and performs field resolution
|
|
/// to get the answer. The one in `Type` is for calling during codegen and asserts
|
|
/// that the types are already resolved.
|
|
/// TODO assert the return value matches `ty.onePossibleValue`
|
|
pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
return switch (ty.toIntern()) {
|
|
.u0_type,
|
|
.i0_type,
|
|
=> try mod.intValue(ty, 0),
|
|
.u1_type,
|
|
.u8_type,
|
|
.i8_type,
|
|
.u16_type,
|
|
.i16_type,
|
|
.u29_type,
|
|
.u32_type,
|
|
.i32_type,
|
|
.u64_type,
|
|
.i64_type,
|
|
.u80_type,
|
|
.u128_type,
|
|
.i128_type,
|
|
.usize_type,
|
|
.isize_type,
|
|
.c_char_type,
|
|
.c_short_type,
|
|
.c_ushort_type,
|
|
.c_int_type,
|
|
.c_uint_type,
|
|
.c_long_type,
|
|
.c_ulong_type,
|
|
.c_longlong_type,
|
|
.c_ulonglong_type,
|
|
.c_longdouble_type,
|
|
.f16_type,
|
|
.f32_type,
|
|
.f64_type,
|
|
.f80_type,
|
|
.f128_type,
|
|
.anyopaque_type,
|
|
.bool_type,
|
|
.type_type,
|
|
.anyerror_type,
|
|
.adhoc_inferred_error_set_type,
|
|
.comptime_int_type,
|
|
.comptime_float_type,
|
|
.enum_literal_type,
|
|
.atomic_order_type,
|
|
.atomic_rmw_op_type,
|
|
.calling_convention_type,
|
|
.address_space_type,
|
|
.float_mode_type,
|
|
.reduce_op_type,
|
|
.call_modifier_type,
|
|
.prefetch_options_type,
|
|
.export_options_type,
|
|
.extern_options_type,
|
|
.type_info_type,
|
|
.manyptr_u8_type,
|
|
.manyptr_const_u8_type,
|
|
.manyptr_const_u8_sentinel_0_type,
|
|
.single_const_pointer_to_comptime_int_type,
|
|
.slice_const_u8_type,
|
|
.slice_const_u8_sentinel_0_type,
|
|
.anyerror_void_error_union_type,
|
|
=> null,
|
|
.void_type => Value.void,
|
|
.noreturn_type => Value.@"unreachable",
|
|
.anyframe_type => unreachable,
|
|
.null_type => Value.null,
|
|
.undefined_type => Value.undef,
|
|
.optional_noreturn_type => try mod.nullValue(ty),
|
|
.generic_poison_type => error.GenericPoison,
|
|
.empty_struct_type => Value.empty_struct,
|
|
// values, not types
|
|
.undef,
|
|
.zero,
|
|
.zero_usize,
|
|
.zero_u8,
|
|
.one,
|
|
.one_usize,
|
|
.one_u8,
|
|
.four_u8,
|
|
.negative_one,
|
|
.calling_convention_c,
|
|
.calling_convention_inline,
|
|
.void_value,
|
|
.unreachable_value,
|
|
.null_value,
|
|
.bool_true,
|
|
.bool_false,
|
|
.empty_struct,
|
|
.generic_poison,
|
|
// invalid
|
|
.var_args_param_type,
|
|
.none,
|
|
=> unreachable,
|
|
|
|
_ => switch (ip.items.items(.tag)[@intFromEnum(ty.toIntern())]) {
|
|
.type_int_signed, // i0 handled above
|
|
.type_int_unsigned, // u0 handled above
|
|
.type_pointer,
|
|
.type_slice,
|
|
.type_optional, // ?noreturn handled above
|
|
.type_anyframe,
|
|
.type_error_union,
|
|
.type_anyerror_union,
|
|
.type_error_set,
|
|
.type_inferred_error_set,
|
|
.type_opaque,
|
|
.type_function,
|
|
=> null,
|
|
|
|
.simple_type, // handled above
|
|
// values, not types
|
|
.undef,
|
|
.simple_value,
|
|
.ptr_decl,
|
|
.ptr_anon_decl,
|
|
.ptr_anon_decl_aligned,
|
|
.ptr_mut_decl,
|
|
.ptr_comptime_field,
|
|
.ptr_int,
|
|
.ptr_eu_payload,
|
|
.ptr_opt_payload,
|
|
.ptr_elem,
|
|
.ptr_field,
|
|
.ptr_slice,
|
|
.opt_payload,
|
|
.opt_null,
|
|
.int_u8,
|
|
.int_u16,
|
|
.int_u32,
|
|
.int_i32,
|
|
.int_usize,
|
|
.int_comptime_int_u32,
|
|
.int_comptime_int_i32,
|
|
.int_small,
|
|
.int_positive,
|
|
.int_negative,
|
|
.int_lazy_align,
|
|
.int_lazy_size,
|
|
.error_set_error,
|
|
.error_union_error,
|
|
.error_union_payload,
|
|
.enum_literal,
|
|
.enum_tag,
|
|
.float_f16,
|
|
.float_f32,
|
|
.float_f64,
|
|
.float_f80,
|
|
.float_f128,
|
|
.float_c_longdouble_f80,
|
|
.float_c_longdouble_f128,
|
|
.float_comptime_float,
|
|
.variable,
|
|
.extern_func,
|
|
.func_decl,
|
|
.func_instance,
|
|
.func_coerced,
|
|
.only_possible_value,
|
|
.union_value,
|
|
.bytes,
|
|
.aggregate,
|
|
.repeated,
|
|
// memoized value, not types
|
|
.memoized_call,
|
|
=> unreachable,
|
|
|
|
.type_array_big,
|
|
.type_array_small,
|
|
.type_vector,
|
|
.type_enum_auto,
|
|
.type_enum_explicit,
|
|
.type_enum_nonexhaustive,
|
|
.type_struct,
|
|
.type_struct_ns,
|
|
.type_struct_anon,
|
|
.type_struct_packed,
|
|
.type_struct_packed_inits,
|
|
.type_tuple_anon,
|
|
.type_union,
|
|
=> switch (ip.indexToKey(ty.toIntern())) {
|
|
inline .array_type, .vector_type => |seq_type, seq_tag| {
|
|
const has_sentinel = seq_tag == .array_type and seq_type.sentinel != .none;
|
|
if (seq_type.len + @intFromBool(has_sentinel) == 0) return Value.fromInterned((try mod.intern(.{ .aggregate = .{
|
|
.ty = ty.toIntern(),
|
|
.storage = .{ .elems = &.{} },
|
|
} })));
|
|
|
|
if (try sema.typeHasOnePossibleValue(Type.fromInterned(seq_type.child))) |opv| {
|
|
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
|
|
.ty = ty.toIntern(),
|
|
.storage = .{ .repeated_elem = opv.toIntern() },
|
|
} })));
|
|
}
|
|
return null;
|
|
},
|
|
|
|
.struct_type => |struct_type| {
|
|
try sema.resolveTypeFields(ty);
|
|
|
|
if (struct_type.field_types.len == 0) {
|
|
// In this case the struct has no fields at all and
|
|
// therefore has one possible value.
|
|
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
|
|
.ty = ty.toIntern(),
|
|
.storage = .{ .elems = &.{} },
|
|
} })));
|
|
}
|
|
|
|
const field_vals = try sema.arena.alloc(
|
|
InternPool.Index,
|
|
struct_type.field_types.len,
|
|
);
|
|
for (field_vals, 0..) |*field_val, i| {
|
|
if (struct_type.fieldIsComptime(ip, i)) {
|
|
try sema.resolveStructFieldInits(ty);
|
|
field_val.* = struct_type.field_inits.get(ip)[i];
|
|
continue;
|
|
}
|
|
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
|
|
if (field_ty.eql(ty, mod)) {
|
|
const msg = try Module.ErrorMsg.create(
|
|
sema.gpa,
|
|
mod.declPtr(struct_type.decl.unwrap().?).srcLoc(mod),
|
|
"struct '{}' depends on itself",
|
|
.{ty.fmt(mod)},
|
|
);
|
|
try sema.addFieldErrNote(ty, i, msg, "while checking this field", .{});
|
|
return sema.failWithOwnedErrorMsg(null, msg);
|
|
}
|
|
if (try sema.typeHasOnePossibleValue(field_ty)) |field_opv| {
|
|
field_val.* = try field_opv.intern(field_ty, mod);
|
|
} else return null;
|
|
}
|
|
|
|
// In this case the struct has no runtime-known fields and
|
|
// therefore has one possible value.
|
|
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
|
|
.ty = ty.toIntern(),
|
|
.storage = .{ .elems = field_vals },
|
|
} })));
|
|
},
|
|
|
|
.anon_struct_type => |tuple| {
|
|
for (tuple.values.get(ip)) |val| {
|
|
if (val == .none) return null;
|
|
}
|
|
// In this case the struct has all comptime-known fields and
|
|
// therefore has one possible value.
|
|
// TODO: write something like getCoercedInts to avoid needing to dupe
|
|
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
|
|
.ty = ty.toIntern(),
|
|
.storage = .{ .elems = try sema.arena.dupe(InternPool.Index, tuple.values.get(ip)) },
|
|
} })));
|
|
},
|
|
|
|
.union_type => |union_type| {
|
|
try sema.resolveTypeFields(ty);
|
|
const union_obj = ip.loadUnionType(union_type);
|
|
const tag_val = (try sema.typeHasOnePossibleValue(Type.fromInterned(union_obj.enum_tag_ty))) orelse
|
|
return null;
|
|
if (union_obj.field_types.len == 0) {
|
|
const only = try mod.intern(.{ .empty_enum_value = ty.toIntern() });
|
|
return Value.fromInterned(only);
|
|
}
|
|
const only_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[0]);
|
|
if (only_field_ty.eql(ty, mod)) {
|
|
const msg = try Module.ErrorMsg.create(
|
|
sema.gpa,
|
|
mod.declPtr(union_obj.decl).srcLoc(mod),
|
|
"union '{}' depends on itself",
|
|
.{ty.fmt(mod)},
|
|
);
|
|
try sema.addFieldErrNote(ty, 0, msg, "while checking this field", .{});
|
|
return sema.failWithOwnedErrorMsg(null, msg);
|
|
}
|
|
const val_val = (try sema.typeHasOnePossibleValue(only_field_ty)) orelse
|
|
return null;
|
|
const only = try mod.intern(.{ .un = .{
|
|
.ty = ty.toIntern(),
|
|
.tag = tag_val.toIntern(),
|
|
.val = val_val.toIntern(),
|
|
} });
|
|
return Value.fromInterned(only);
|
|
},
|
|
|
|
.enum_type => |enum_type| switch (enum_type.tag_mode) {
|
|
.nonexhaustive => {
|
|
if (enum_type.tag_ty == .comptime_int_type) return null;
|
|
|
|
if (try sema.typeHasOnePossibleValue(Type.fromInterned(enum_type.tag_ty))) |int_opv| {
|
|
const only = try mod.intern(.{ .enum_tag = .{
|
|
.ty = ty.toIntern(),
|
|
.int = int_opv.toIntern(),
|
|
} });
|
|
return Value.fromInterned(only);
|
|
}
|
|
|
|
return null;
|
|
},
|
|
.auto, .explicit => {
|
|
if (Type.fromInterned(enum_type.tag_ty).hasRuntimeBits(mod)) return null;
|
|
|
|
return Value.fromInterned(switch (enum_type.names.len) {
|
|
0 => try mod.intern(.{ .empty_enum_value = ty.toIntern() }),
|
|
1 => try mod.intern(.{ .enum_tag = .{
|
|
.ty = ty.toIntern(),
|
|
.int = if (enum_type.values.len == 0)
|
|
(try mod.intValue(Type.fromInterned(enum_type.tag_ty), 0)).toIntern()
|
|
else
|
|
try mod.intern_pool.getCoercedInts(
|
|
mod.gpa,
|
|
mod.intern_pool.indexToKey(enum_type.values.get(ip)[0]).int,
|
|
enum_type.tag_ty,
|
|
),
|
|
} }),
|
|
else => return null,
|
|
});
|
|
},
|
|
},
|
|
|
|
else => unreachable,
|
|
},
|
|
},
|
|
};
|
|
}
|
|
|
|
/// Returns the type of the AIR instruction.
|
|
fn typeOf(sema: *Sema, inst: Air.Inst.Ref) Type {
|
|
return sema.getTmpAir().typeOf(inst, &sema.mod.intern_pool);
|
|
}
|
|
|
|
pub fn getTmpAir(sema: Sema) Air {
|
|
return .{
|
|
.instructions = sema.air_instructions.slice(),
|
|
.extra = sema.air_extra.items,
|
|
};
|
|
}
|
|
|
|
pub fn addExtra(sema: *Sema, extra: anytype) Allocator.Error!u32 {
|
|
const fields = std.meta.fields(@TypeOf(extra));
|
|
try sema.air_extra.ensureUnusedCapacity(sema.gpa, fields.len);
|
|
return sema.addExtraAssumeCapacity(extra);
|
|
}
|
|
|
|
pub fn addExtraAssumeCapacity(sema: *Sema, extra: anytype) u32 {
|
|
const fields = std.meta.fields(@TypeOf(extra));
|
|
const result: u32 = @intCast(sema.air_extra.items.len);
|
|
inline for (fields) |field| {
|
|
sema.air_extra.appendAssumeCapacity(switch (field.type) {
|
|
u32 => @field(extra, field.name),
|
|
i32 => @bitCast(@field(extra, field.name)),
|
|
Air.Inst.Ref, InternPool.Index => @intFromEnum(@field(extra, field.name)),
|
|
else => @compileError("bad field type: " ++ @typeName(field.type)),
|
|
});
|
|
}
|
|
return result;
|
|
}
|
|
|
|
fn appendRefsAssumeCapacity(sema: *Sema, refs: []const Air.Inst.Ref) void {
|
|
sema.air_extra.appendSliceAssumeCapacity(@ptrCast(refs));
|
|
}
|
|
|
|
fn getBreakBlock(sema: *Sema, inst_index: Air.Inst.Index) ?Air.Inst.Index {
|
|
const air_datas = sema.air_instructions.items(.data);
|
|
const air_tags = sema.air_instructions.items(.tag);
|
|
switch (air_tags[@intFromEnum(inst_index)]) {
|
|
.br => return air_datas[@intFromEnum(inst_index)].br.block_inst,
|
|
else => return null,
|
|
}
|
|
}
|
|
|
|
fn isComptimeKnown(
|
|
sema: *Sema,
|
|
inst: Air.Inst.Ref,
|
|
) !bool {
|
|
return (try sema.resolveValue(inst)) != null;
|
|
}
|
|
|
|
fn analyzeComptimeAlloc(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
var_type: Type,
|
|
alignment: Alignment,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
|
|
// Needed to make an anon decl with type `var_type` (the `finish()` call below).
|
|
_ = try sema.typeHasOnePossibleValue(var_type);
|
|
|
|
const ptr_type = try sema.ptrType(.{
|
|
.child = var_type.toIntern(),
|
|
.flags = .{
|
|
.alignment = alignment,
|
|
.address_space = target_util.defaultAddressSpace(mod.getTarget(), .global_constant),
|
|
},
|
|
});
|
|
|
|
var anon_decl = try block.startAnonDecl(); // TODO: comptime value mutation without Decl
|
|
defer anon_decl.deinit();
|
|
|
|
const decl_index = try anon_decl.finish(
|
|
var_type,
|
|
// There will be stores before the first load, but they may be to sub-elements or
|
|
// sub-fields. So we need to initialize with undef to allow the mechanism to expand
|
|
// into fields/elements and have those overridden with stored values.
|
|
Value.fromInterned((try mod.intern(.{ .undef = var_type.toIntern() }))),
|
|
alignment,
|
|
);
|
|
const decl = mod.declPtr(decl_index);
|
|
decl.alignment = alignment;
|
|
|
|
try sema.comptime_mutable_decls.append(decl_index);
|
|
return Air.internedToRef((try mod.intern(.{ .ptr = .{
|
|
.ty = ptr_type.toIntern(),
|
|
.addr = .{ .mut_decl = .{
|
|
.decl = decl_index,
|
|
.runtime_index = block.runtime_index,
|
|
} },
|
|
} })));
|
|
}
|
|
|
|
/// The places where a user can specify an address space attribute
|
|
pub const AddressSpaceContext = enum {
|
|
/// A function is specified to be placed in a certain address space.
|
|
function,
|
|
|
|
/// A (global) variable is specified to be placed in a certain address space.
|
|
/// In contrast to .constant, these values (and thus the address space they will be
|
|
/// placed in) are required to be mutable.
|
|
variable,
|
|
|
|
/// A (global) constant value is specified to be placed in a certain address space.
|
|
/// In contrast to .variable, values placed in this address space are not required to be mutable.
|
|
constant,
|
|
|
|
/// A pointer is ascripted to point into a certain address space.
|
|
pointer,
|
|
};
|
|
|
|
pub fn analyzeAddressSpace(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
zir_ref: Zir.Inst.Ref,
|
|
ctx: AddressSpaceContext,
|
|
) !std.builtin.AddressSpace {
|
|
const mod = sema.mod;
|
|
const air_ref = try sema.resolveInst(zir_ref);
|
|
const coerced = try sema.coerce(block, Type.fromInterned(.address_space_type), air_ref, src);
|
|
const addrspace_val = try sema.resolveConstDefinedValue(block, src, coerced, .{
|
|
.needed_comptime_reason = "address space must be comptime-known",
|
|
});
|
|
const address_space = mod.toEnum(std.builtin.AddressSpace, addrspace_val);
|
|
const target = sema.mod.getTarget();
|
|
const arch = target.cpu.arch;
|
|
|
|
const is_nv = arch == .nvptx or arch == .nvptx64;
|
|
const is_amd = arch == .amdgcn;
|
|
const is_spirv = arch == .spirv32 or arch == .spirv64;
|
|
const is_gpu = is_nv or is_amd or is_spirv;
|
|
|
|
const supported = switch (address_space) {
|
|
// TODO: on spir-v only when os is opencl.
|
|
.generic => true,
|
|
.gs, .fs, .ss => (arch == .x86 or arch == .x86_64) and ctx == .pointer,
|
|
// TODO: check that .shared and .local are left uninitialized
|
|
.param => is_nv,
|
|
.input, .output, .uniform => is_spirv,
|
|
.global, .shared, .local => is_gpu,
|
|
.constant => is_gpu and (ctx == .constant),
|
|
// TODO this should also check how many flash banks the cpu has
|
|
.flash, .flash1, .flash2, .flash3, .flash4, .flash5 => arch == .avr,
|
|
};
|
|
|
|
if (!supported) {
|
|
// TODO error messages could be made more elaborate here
|
|
const entity = switch (ctx) {
|
|
.function => "functions",
|
|
.variable => "mutable values",
|
|
.constant => "constant values",
|
|
.pointer => "pointers",
|
|
};
|
|
return sema.fail(
|
|
block,
|
|
src,
|
|
"{s} with address space '{s}' are not supported on {s}",
|
|
.{ entity, @tagName(address_space), arch.genericName() },
|
|
);
|
|
}
|
|
|
|
return address_space;
|
|
}
|
|
|
|
/// Asserts the value is a pointer and dereferences it.
|
|
/// Returns `null` if the pointer contents cannot be loaded at comptime.
|
|
fn pointerDeref(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, ptr_ty: Type) CompileError!?Value {
|
|
const mod = sema.mod;
|
|
const load_ty = ptr_ty.childType(mod);
|
|
const res = try sema.pointerDerefExtra(block, src, ptr_val, load_ty);
|
|
switch (res) {
|
|
.runtime_load => return null,
|
|
.val => |v| return v,
|
|
.needed_well_defined => |ty| return sema.fail(
|
|
block,
|
|
src,
|
|
"comptime dereference requires '{}' to have a well-defined layout, but it does not.",
|
|
.{ty.fmt(sema.mod)},
|
|
),
|
|
.out_of_bounds => |ty| return sema.fail(
|
|
block,
|
|
src,
|
|
"dereference of '{}' exceeds bounds of containing decl of type '{}'",
|
|
.{ ptr_ty.fmt(sema.mod), ty.fmt(sema.mod) },
|
|
),
|
|
}
|
|
}
|
|
|
|
const DerefResult = union(enum) {
|
|
runtime_load,
|
|
val: Value,
|
|
needed_well_defined: Type,
|
|
out_of_bounds: Type,
|
|
};
|
|
|
|
fn pointerDerefExtra(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, load_ty: Type) CompileError!DerefResult {
|
|
const mod = sema.mod;
|
|
const target = mod.getTarget();
|
|
const deref = sema.beginComptimePtrLoad(block, src, ptr_val, load_ty) catch |err| switch (err) {
|
|
error.RuntimeLoad => return DerefResult{ .runtime_load = {} },
|
|
else => |e| return e,
|
|
};
|
|
|
|
if (deref.pointee) |tv| {
|
|
const coerce_in_mem_ok =
|
|
(try sema.coerceInMemoryAllowed(block, load_ty, tv.ty, false, target, src, src)) == .ok or
|
|
(try sema.coerceInMemoryAllowed(block, tv.ty, load_ty, false, target, src, src)) == .ok;
|
|
if (coerce_in_mem_ok) {
|
|
// We have a Value that lines up in virtual memory exactly with what we want to load,
|
|
// and it is in-memory coercible to load_ty. It may be returned without modifications.
|
|
// Move mutable decl values to the InternPool and assert other decls are already in
|
|
// the InternPool.
|
|
const uncoerced_val = if (deref.is_mutable) try tv.val.intern(tv.ty, mod) else tv.val.toIntern();
|
|
const coerced_val = try mod.getCoerced(Value.fromInterned(uncoerced_val), load_ty);
|
|
return .{ .val = coerced_val };
|
|
}
|
|
}
|
|
|
|
// The type is not in-memory coercible or the direct dereference failed, so it must
|
|
// be bitcast according to the pointer type we are performing the load through.
|
|
if (!load_ty.hasWellDefinedLayout(mod)) {
|
|
return DerefResult{ .needed_well_defined = load_ty };
|
|
}
|
|
|
|
const load_sz = try sema.typeAbiSize(load_ty);
|
|
|
|
// Try the smaller bit-cast first, since that's more efficient than using the larger `parent`
|
|
if (deref.pointee) |tv| if (load_sz <= try sema.typeAbiSize(tv.ty))
|
|
return DerefResult{ .val = (try sema.bitCastVal(block, src, tv.val, tv.ty, load_ty, 0)) orelse return .runtime_load };
|
|
|
|
// If that fails, try to bit-cast from the largest parent value with a well-defined layout
|
|
if (deref.parent) |parent| if (load_sz + parent.byte_offset <= try sema.typeAbiSize(parent.tv.ty))
|
|
return DerefResult{ .val = (try sema.bitCastVal(block, src, parent.tv.val, parent.tv.ty, load_ty, parent.byte_offset)) orelse return .runtime_load };
|
|
|
|
if (deref.ty_without_well_defined_layout) |bad_ty| {
|
|
// We got no parent for bit-casting, or the parent we got was too small. Either way, the problem
|
|
// is that some type we encountered when de-referencing does not have a well-defined layout.
|
|
return DerefResult{ .needed_well_defined = bad_ty };
|
|
} else {
|
|
// If all encountered types had well-defined layouts, the parent is the root decl and it just
|
|
// wasn't big enough for the load.
|
|
return DerefResult{ .out_of_bounds = deref.parent.?.tv.ty };
|
|
}
|
|
}
|
|
|
|
/// Used to convert a u64 value to a usize value, emitting a compile error if the number
|
|
/// is too big to fit.
|
|
fn usizeCast(sema: *Sema, block: *Block, src: LazySrcLoc, int: u64) CompileError!usize {
|
|
if (@bitSizeOf(u64) <= @bitSizeOf(usize)) return int;
|
|
return std.math.cast(usize, int) orelse return sema.fail(block, src, "expression produces integer value '{d}' which is too big for this compiler implementation to handle", .{int});
|
|
}
|
|
|
|
/// For pointer-like optionals, it returns the pointer type. For pointers,
|
|
/// the type is returned unmodified.
|
|
/// This can return `error.AnalysisFail` because it sometimes requires resolving whether
|
|
/// a type has zero bits, which can cause a "foo depends on itself" compile error.
|
|
/// This logic must be kept in sync with `Type.isPtrLikeOptional`.
|
|
fn typePtrOrOptionalPtrTy(sema: *Sema, ty: Type) !?Type {
|
|
const mod = sema.mod;
|
|
return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
|
|
.ptr_type => |ptr_type| switch (ptr_type.flags.size) {
|
|
.One, .Many, .C => ty,
|
|
.Slice => null,
|
|
},
|
|
.opt_type => |opt_child| switch (mod.intern_pool.indexToKey(opt_child)) {
|
|
.ptr_type => |ptr_type| switch (ptr_type.flags.size) {
|
|
.Slice, .C => null,
|
|
.Many, .One => {
|
|
if (ptr_type.flags.is_allowzero) return null;
|
|
|
|
// optionals of zero sized types behave like bools, not pointers
|
|
const payload_ty = Type.fromInterned(opt_child);
|
|
if ((try sema.typeHasOnePossibleValue(payload_ty)) != null) {
|
|
return null;
|
|
}
|
|
|
|
return payload_ty;
|
|
},
|
|
},
|
|
else => null,
|
|
},
|
|
else => null,
|
|
};
|
|
}
|
|
|
|
/// `generic_poison` will return false.
|
|
/// May return false negatives when structs and unions are having their field types resolved.
|
|
pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
|
|
return ty.comptimeOnlyAdvanced(sema.mod, sema);
|
|
}
|
|
|
|
pub fn typeHasRuntimeBits(sema: *Sema, ty: Type) CompileError!bool {
|
|
const mod = sema.mod;
|
|
return ty.hasRuntimeBitsAdvanced(mod, false, .{ .sema = sema }) catch |err| switch (err) {
|
|
error.NeedLazy => unreachable,
|
|
else => |e| return e,
|
|
};
|
|
}
|
|
|
|
fn typeAbiSize(sema: *Sema, ty: Type) !u64 {
|
|
try sema.resolveTypeLayout(ty);
|
|
return ty.abiSize(sema.mod);
|
|
}
|
|
|
|
fn typeAbiAlignment(sema: *Sema, ty: Type) CompileError!Alignment {
|
|
return (try ty.abiAlignmentAdvanced(sema.mod, .{ .sema = sema })).scalar;
|
|
}
|
|
|
|
/// Not valid to call for packed unions.
|
|
/// Keep implementation in sync with `Module.unionFieldNormalAlignment`.
|
|
fn unionFieldAlignment(sema: *Sema, u: InternPool.UnionType, field_index: u32) !Alignment {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
const field_align = u.fieldAlign(ip, field_index);
|
|
if (field_align != .none) return field_align;
|
|
const field_ty = Type.fromInterned(u.field_types.get(ip)[field_index]);
|
|
if (field_ty.isNoReturn(sema.mod)) return .none;
|
|
return sema.typeAbiAlignment(field_ty);
|
|
}
|
|
|
|
/// Keep implementation in sync with `Module.structFieldAlignment`.
|
|
fn structFieldAlignment(
|
|
sema: *Sema,
|
|
explicit_alignment: InternPool.Alignment,
|
|
field_ty: Type,
|
|
layout: std.builtin.Type.ContainerLayout,
|
|
) !Alignment {
|
|
if (explicit_alignment != .none)
|
|
return explicit_alignment;
|
|
const mod = sema.mod;
|
|
switch (layout) {
|
|
.Packed => return .none,
|
|
.Auto => if (mod.getTarget().ofmt != .c) return sema.typeAbiAlignment(field_ty),
|
|
.Extern => {},
|
|
}
|
|
// extern
|
|
const ty_abi_align = try sema.typeAbiAlignment(field_ty);
|
|
if (field_ty.isAbiInt(mod) and field_ty.intInfo(mod).bits >= 128) {
|
|
return ty_abi_align.maxStrict(.@"16");
|
|
}
|
|
return ty_abi_align;
|
|
}
|
|
|
|
pub fn fnHasRuntimeBits(sema: *Sema, ty: Type) CompileError!bool {
|
|
return ty.fnHasRuntimeBitsAdvanced(sema.mod, sema);
|
|
}
|
|
|
|
fn unionFieldIndex(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
union_ty: Type,
|
|
field_name: InternPool.NullTerminatedString,
|
|
field_src: LazySrcLoc,
|
|
) !u32 {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
try sema.resolveTypeFields(union_ty);
|
|
const union_obj = mod.typeToUnion(union_ty).?;
|
|
const field_index = union_obj.nameIndex(ip, field_name) orelse
|
|
return sema.failWithBadUnionFieldAccess(block, union_obj, field_src, field_name);
|
|
return @intCast(field_index);
|
|
}
|
|
|
|
fn structFieldIndex(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
struct_ty: Type,
|
|
field_name: InternPool.NullTerminatedString,
|
|
field_src: LazySrcLoc,
|
|
) !u32 {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
try sema.resolveTypeFields(struct_ty);
|
|
if (struct_ty.isAnonStruct(mod)) {
|
|
return sema.anonStructFieldIndex(block, struct_ty, field_name, field_src);
|
|
} else {
|
|
const struct_type = mod.typeToStruct(struct_ty).?;
|
|
return struct_type.nameIndex(ip, field_name) orelse
|
|
return sema.failWithBadStructFieldAccess(block, struct_type, field_src, field_name);
|
|
}
|
|
}
|
|
|
|
fn anonStructFieldIndex(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
struct_ty: Type,
|
|
field_name: InternPool.NullTerminatedString,
|
|
field_src: LazySrcLoc,
|
|
) !u32 {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
switch (ip.indexToKey(struct_ty.toIntern())) {
|
|
.anon_struct_type => |anon_struct_type| for (anon_struct_type.names.get(ip), 0..) |name, i| {
|
|
if (name == field_name) return @intCast(i);
|
|
},
|
|
.struct_type => |struct_type| if (struct_type.nameIndex(ip, field_name)) |i| return i,
|
|
else => unreachable,
|
|
}
|
|
return sema.fail(block, field_src, "no field named '{}' in anonymous struct '{}'", .{
|
|
field_name.fmt(ip), struct_ty.fmt(sema.mod),
|
|
});
|
|
}
|
|
|
|
fn queueFullTypeResolution(sema: *Sema, ty: Type) !void {
|
|
try sema.types_to_resolve.put(sema.gpa, ty.toIntern(), {});
|
|
}
|
|
|
|
/// If the value overflowed the type, returns a comptime_int (or vector thereof) instead, setting
|
|
/// overflow_idx to the vector index the overflow was at (or 0 for a scalar).
|
|
fn intAdd(sema: *Sema, lhs: Value, rhs: Value, ty: Type, overflow_idx: *?usize) !Value {
|
|
var overflow: usize = undefined;
|
|
return sema.intAddInner(lhs, rhs, ty, &overflow) catch |err| switch (err) {
|
|
error.Overflow => {
|
|
const is_vec = ty.isVector(sema.mod);
|
|
overflow_idx.* = if (is_vec) overflow else 0;
|
|
const safe_ty = if (is_vec) try sema.mod.vectorType(.{
|
|
.len = ty.vectorLen(sema.mod),
|
|
.child = .comptime_int_type,
|
|
}) else Type.comptime_int;
|
|
return sema.intAddInner(lhs, rhs, safe_ty, undefined) catch |err1| switch (err1) {
|
|
error.Overflow => unreachable,
|
|
else => |e| return e,
|
|
};
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
}
|
|
|
|
fn intAddInner(sema: *Sema, lhs: Value, rhs: Value, ty: Type, overflow_idx: *usize) !Value {
|
|
const mod = sema.mod;
|
|
if (ty.zigTypeTag(mod) == .Vector) {
|
|
const result_data = try sema.arena.alloc(InternPool.Index, ty.vectorLen(mod));
|
|
const scalar_ty = ty.scalarType(mod);
|
|
for (result_data, 0..) |*scalar, i| {
|
|
const lhs_elem = try lhs.elemValue(mod, i);
|
|
const rhs_elem = try rhs.elemValue(mod, i);
|
|
const val = sema.intAddScalar(lhs_elem, rhs_elem, scalar_ty) catch |err| switch (err) {
|
|
error.Overflow => {
|
|
overflow_idx.* = i;
|
|
return error.Overflow;
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
scalar.* = try val.intern(scalar_ty, mod);
|
|
}
|
|
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
|
|
.ty = ty.toIntern(),
|
|
.storage = .{ .elems = result_data },
|
|
} })));
|
|
}
|
|
return sema.intAddScalar(lhs, rhs, ty);
|
|
}
|
|
|
|
fn intAddScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) !Value {
|
|
const mod = sema.mod;
|
|
if (scalar_ty.toIntern() != .comptime_int_type) {
|
|
const res = try sema.intAddWithOverflowScalar(lhs, rhs, scalar_ty);
|
|
if (res.overflow_bit.compareAllWithZero(.neq, mod)) return error.Overflow;
|
|
return res.wrapped_result;
|
|
}
|
|
// TODO is this a performance issue? maybe we should try the operation without
|
|
// resorting to BigInt first.
|
|
var lhs_space: Value.BigIntSpace = undefined;
|
|
var rhs_space: Value.BigIntSpace = undefined;
|
|
const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema);
|
|
const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema);
|
|
const limbs = try sema.arena.alloc(
|
|
std.math.big.Limb,
|
|
@max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1,
|
|
);
|
|
var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined };
|
|
result_bigint.add(lhs_bigint, rhs_bigint);
|
|
return mod.intValue_big(scalar_ty, result_bigint.toConst());
|
|
}
|
|
|
|
/// Supports both floats and ints; handles undefined.
|
|
fn numberAddWrapScalar(
|
|
sema: *Sema,
|
|
lhs: Value,
|
|
rhs: Value,
|
|
ty: Type,
|
|
) !Value {
|
|
const mod = sema.mod;
|
|
if (lhs.isUndef(mod) or rhs.isUndef(mod)) return mod.undefValue(ty);
|
|
|
|
if (ty.zigTypeTag(mod) == .ComptimeInt) {
|
|
return sema.intAdd(lhs, rhs, ty, undefined);
|
|
}
|
|
|
|
if (ty.isAnyFloat()) {
|
|
return Value.floatAdd(lhs, rhs, ty, sema.arena, mod);
|
|
}
|
|
|
|
const overflow_result = try sema.intAddWithOverflow(lhs, rhs, ty);
|
|
return overflow_result.wrapped_result;
|
|
}
|
|
|
|
/// If the value overflowed the type, returns a comptime_int (or vector thereof) instead, setting
|
|
/// overflow_idx to the vector index the overflow was at (or 0 for a scalar).
|
|
fn intSub(sema: *Sema, lhs: Value, rhs: Value, ty: Type, overflow_idx: *?usize) !Value {
|
|
var overflow: usize = undefined;
|
|
return sema.intSubInner(lhs, rhs, ty, &overflow) catch |err| switch (err) {
|
|
error.Overflow => {
|
|
const is_vec = ty.isVector(sema.mod);
|
|
overflow_idx.* = if (is_vec) overflow else 0;
|
|
const safe_ty = if (is_vec) try sema.mod.vectorType(.{
|
|
.len = ty.vectorLen(sema.mod),
|
|
.child = .comptime_int_type,
|
|
}) else Type.comptime_int;
|
|
return sema.intSubInner(lhs, rhs, safe_ty, undefined) catch |err1| switch (err1) {
|
|
error.Overflow => unreachable,
|
|
else => |e| return e,
|
|
};
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
}
|
|
|
|
fn intSubInner(sema: *Sema, lhs: Value, rhs: Value, ty: Type, overflow_idx: *usize) !Value {
|
|
const mod = sema.mod;
|
|
if (ty.zigTypeTag(mod) == .Vector) {
|
|
const result_data = try sema.arena.alloc(InternPool.Index, ty.vectorLen(mod));
|
|
const scalar_ty = ty.scalarType(mod);
|
|
for (result_data, 0..) |*scalar, i| {
|
|
const lhs_elem = try lhs.elemValue(sema.mod, i);
|
|
const rhs_elem = try rhs.elemValue(sema.mod, i);
|
|
const val = sema.intSubScalar(lhs_elem, rhs_elem, scalar_ty) catch |err| switch (err) {
|
|
error.Overflow => {
|
|
overflow_idx.* = i;
|
|
return error.Overflow;
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
scalar.* = try val.intern(scalar_ty, mod);
|
|
}
|
|
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
|
|
.ty = ty.toIntern(),
|
|
.storage = .{ .elems = result_data },
|
|
} })));
|
|
}
|
|
return sema.intSubScalar(lhs, rhs, ty);
|
|
}
|
|
|
|
fn intSubScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) !Value {
|
|
const mod = sema.mod;
|
|
if (scalar_ty.toIntern() != .comptime_int_type) {
|
|
const res = try sema.intSubWithOverflowScalar(lhs, rhs, scalar_ty);
|
|
if (res.overflow_bit.compareAllWithZero(.neq, mod)) return error.Overflow;
|
|
return res.wrapped_result;
|
|
}
|
|
// TODO is this a performance issue? maybe we should try the operation without
|
|
// resorting to BigInt first.
|
|
var lhs_space: Value.BigIntSpace = undefined;
|
|
var rhs_space: Value.BigIntSpace = undefined;
|
|
const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema);
|
|
const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema);
|
|
const limbs = try sema.arena.alloc(
|
|
std.math.big.Limb,
|
|
@max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1,
|
|
);
|
|
var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined };
|
|
result_bigint.sub(lhs_bigint, rhs_bigint);
|
|
return mod.intValue_big(scalar_ty, result_bigint.toConst());
|
|
}
|
|
|
|
/// Supports both floats and ints; handles undefined.
|
|
fn numberSubWrapScalar(
|
|
sema: *Sema,
|
|
lhs: Value,
|
|
rhs: Value,
|
|
ty: Type,
|
|
) !Value {
|
|
const mod = sema.mod;
|
|
if (lhs.isUndef(mod) or rhs.isUndef(mod)) return mod.undefValue(ty);
|
|
|
|
if (ty.zigTypeTag(mod) == .ComptimeInt) {
|
|
return sema.intSub(lhs, rhs, ty, undefined);
|
|
}
|
|
|
|
if (ty.isAnyFloat()) {
|
|
return Value.floatSub(lhs, rhs, ty, sema.arena, mod);
|
|
}
|
|
|
|
const overflow_result = try sema.intSubWithOverflow(lhs, rhs, ty);
|
|
return overflow_result.wrapped_result;
|
|
}
|
|
|
|
fn intSubWithOverflow(
|
|
sema: *Sema,
|
|
lhs: Value,
|
|
rhs: Value,
|
|
ty: Type,
|
|
) !Value.OverflowArithmeticResult {
|
|
const mod = sema.mod;
|
|
if (ty.zigTypeTag(mod) == .Vector) {
|
|
const vec_len = ty.vectorLen(mod);
|
|
const overflowed_data = try sema.arena.alloc(InternPool.Index, vec_len);
|
|
const result_data = try sema.arena.alloc(InternPool.Index, vec_len);
|
|
const scalar_ty = ty.scalarType(mod);
|
|
for (overflowed_data, result_data, 0..) |*of, *scalar, i| {
|
|
const lhs_elem = try lhs.elemValue(sema.mod, i);
|
|
const rhs_elem = try rhs.elemValue(sema.mod, i);
|
|
const of_math_result = try sema.intSubWithOverflowScalar(lhs_elem, rhs_elem, scalar_ty);
|
|
of.* = try of_math_result.overflow_bit.intern(Type.u1, mod);
|
|
scalar.* = try of_math_result.wrapped_result.intern(scalar_ty, mod);
|
|
}
|
|
return Value.OverflowArithmeticResult{
|
|
.overflow_bit = Value.fromInterned((try mod.intern(.{ .aggregate = .{
|
|
.ty = (try mod.vectorType(.{ .len = vec_len, .child = .u1_type })).toIntern(),
|
|
.storage = .{ .elems = overflowed_data },
|
|
} }))),
|
|
.wrapped_result = Value.fromInterned((try mod.intern(.{ .aggregate = .{
|
|
.ty = ty.toIntern(),
|
|
.storage = .{ .elems = result_data },
|
|
} }))),
|
|
};
|
|
}
|
|
return sema.intSubWithOverflowScalar(lhs, rhs, ty);
|
|
}
|
|
|
|
fn intSubWithOverflowScalar(
|
|
sema: *Sema,
|
|
lhs: Value,
|
|
rhs: Value,
|
|
ty: Type,
|
|
) !Value.OverflowArithmeticResult {
|
|
const mod = sema.mod;
|
|
const info = ty.intInfo(mod);
|
|
|
|
var lhs_space: Value.BigIntSpace = undefined;
|
|
var rhs_space: Value.BigIntSpace = undefined;
|
|
const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema);
|
|
const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema);
|
|
const limbs = try sema.arena.alloc(
|
|
std.math.big.Limb,
|
|
std.math.big.int.calcTwosCompLimbCount(info.bits),
|
|
);
|
|
var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined };
|
|
const overflowed = result_bigint.subWrap(lhs_bigint, rhs_bigint, info.signedness, info.bits);
|
|
const wrapped_result = try mod.intValue_big(ty, result_bigint.toConst());
|
|
return Value.OverflowArithmeticResult{
|
|
.overflow_bit = try mod.intValue(Type.u1, @intFromBool(overflowed)),
|
|
.wrapped_result = wrapped_result,
|
|
};
|
|
}
|
|
|
|
const IntFromFloatMode = enum { exact, truncate };
|
|
|
|
fn intFromFloat(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
val: Value,
|
|
float_ty: Type,
|
|
int_ty: Type,
|
|
mode: IntFromFloatMode,
|
|
) CompileError!Value {
|
|
const mod = sema.mod;
|
|
if (float_ty.zigTypeTag(mod) == .Vector) {
|
|
const elem_ty = float_ty.scalarType(mod);
|
|
const result_data = try sema.arena.alloc(InternPool.Index, float_ty.vectorLen(mod));
|
|
const scalar_ty = int_ty.scalarType(mod);
|
|
for (result_data, 0..) |*scalar, i| {
|
|
const elem_val = try val.elemValue(sema.mod, i);
|
|
scalar.* = try (try sema.intFromFloatScalar(block, src, elem_val, elem_ty, int_ty.scalarType(mod), mode)).intern(scalar_ty, mod);
|
|
}
|
|
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
|
|
.ty = int_ty.toIntern(),
|
|
.storage = .{ .elems = result_data },
|
|
} })));
|
|
}
|
|
return sema.intFromFloatScalar(block, src, val, float_ty, int_ty, mode);
|
|
}
|
|
|
|
// float is expected to be finite and non-NaN
|
|
fn float128IntPartToBigInt(
|
|
arena: Allocator,
|
|
float: f128,
|
|
) !std.math.big.int.Managed {
|
|
const is_negative = std.math.signbit(float);
|
|
const floored = @floor(@abs(float));
|
|
|
|
var rational = try std.math.big.Rational.init(arena);
|
|
defer rational.q.deinit();
|
|
rational.setFloat(f128, floored) catch |err| switch (err) {
|
|
error.NonFiniteFloat => unreachable,
|
|
error.OutOfMemory => return error.OutOfMemory,
|
|
};
|
|
|
|
// The float is reduced in rational.setFloat, so we assert that denominator is equal to one
|
|
const big_one = std.math.big.int.Const{ .limbs = &.{1}, .positive = true };
|
|
assert(rational.q.toConst().eqlAbs(big_one));
|
|
|
|
if (is_negative) {
|
|
rational.negate();
|
|
}
|
|
return rational.p;
|
|
}
|
|
|
|
fn intFromFloatScalar(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
val: Value,
|
|
float_ty: Type,
|
|
int_ty: Type,
|
|
mode: IntFromFloatMode,
|
|
) CompileError!Value {
|
|
const mod = sema.mod;
|
|
|
|
if (val.isUndef(mod)) return sema.failWithUseOfUndef(block, src);
|
|
|
|
if (mode == .exact and val.floatHasFraction(mod)) return sema.fail(
|
|
block,
|
|
src,
|
|
"fractional component prevents float value '{}' from coercion to type '{}'",
|
|
.{ val.fmtValue(float_ty, mod), int_ty.fmt(mod) },
|
|
);
|
|
|
|
const float = val.toFloat(f128, mod);
|
|
if (std.math.isNan(float)) {
|
|
return sema.fail(block, src, "float value NaN cannot be stored in integer type '{}'", .{
|
|
int_ty.fmt(sema.mod),
|
|
});
|
|
}
|
|
if (std.math.isInf(float)) {
|
|
return sema.fail(block, src, "float value Inf cannot be stored in integer type '{}'", .{
|
|
int_ty.fmt(sema.mod),
|
|
});
|
|
}
|
|
|
|
var big_int = try float128IntPartToBigInt(sema.arena, float);
|
|
defer big_int.deinit();
|
|
|
|
const cti_result = try mod.intValue_big(Type.comptime_int, big_int.toConst());
|
|
|
|
if (!(try sema.intFitsInType(cti_result, int_ty, null))) {
|
|
return sema.fail(block, src, "float value '{}' cannot be stored in integer type '{}'", .{
|
|
val.fmtValue(float_ty, sema.mod), int_ty.fmt(sema.mod),
|
|
});
|
|
}
|
|
return mod.getCoerced(cti_result, int_ty);
|
|
}
|
|
|
|
/// Asserts the value is an integer, and the destination type is ComptimeInt or Int.
|
|
/// Vectors are also accepted. Vector results are reduced with AND.
|
|
///
|
|
/// If provided, `vector_index` reports the first element that failed the range check.
|
|
fn intFitsInType(
|
|
sema: *Sema,
|
|
val: Value,
|
|
ty: Type,
|
|
vector_index: ?*usize,
|
|
) CompileError!bool {
|
|
const mod = sema.mod;
|
|
if (ty.toIntern() == .comptime_int_type) return true;
|
|
const info = ty.intInfo(mod);
|
|
switch (val.toIntern()) {
|
|
.zero_usize, .zero_u8 => return true,
|
|
else => switch (mod.intern_pool.indexToKey(val.toIntern())) {
|
|
.undef => return true,
|
|
.variable, .extern_func, .func, .ptr => {
|
|
const target = mod.getTarget();
|
|
const ptr_bits = target.ptrBitWidth();
|
|
return switch (info.signedness) {
|
|
.signed => info.bits > ptr_bits,
|
|
.unsigned => info.bits >= ptr_bits,
|
|
};
|
|
},
|
|
.int => |int| switch (int.storage) {
|
|
.u64, .i64, .big_int => {
|
|
var buffer: InternPool.Key.Int.Storage.BigIntSpace = undefined;
|
|
const big_int = int.storage.toBigInt(&buffer);
|
|
return big_int.fitsInTwosComp(info.signedness, info.bits);
|
|
},
|
|
.lazy_align => |lazy_ty| {
|
|
const max_needed_bits = @as(u16, 16) + @intFromBool(info.signedness == .signed);
|
|
// If it is u16 or bigger we know the alignment fits without resolving it.
|
|
if (info.bits >= max_needed_bits) return true;
|
|
const x = try sema.typeAbiAlignment(Type.fromInterned(lazy_ty));
|
|
if (x == .none) return true;
|
|
const actual_needed_bits = @as(usize, x.toLog2Units()) + 1 + @intFromBool(info.signedness == .signed);
|
|
return info.bits >= actual_needed_bits;
|
|
},
|
|
.lazy_size => |lazy_ty| {
|
|
const max_needed_bits = @as(u16, 64) + @intFromBool(info.signedness == .signed);
|
|
// If it is u64 or bigger we know the size fits without resolving it.
|
|
if (info.bits >= max_needed_bits) return true;
|
|
const x = try sema.typeAbiSize(Type.fromInterned(lazy_ty));
|
|
if (x == 0) return true;
|
|
const actual_needed_bits = std.math.log2(x) + 1 + @intFromBool(info.signedness == .signed);
|
|
return info.bits >= actual_needed_bits;
|
|
},
|
|
},
|
|
.aggregate => |aggregate| {
|
|
assert(ty.zigTypeTag(mod) == .Vector);
|
|
return switch (aggregate.storage) {
|
|
.bytes => |bytes| for (bytes, 0..) |byte, i| {
|
|
if (byte == 0) continue;
|
|
const actual_needed_bits = std.math.log2(byte) + 1 + @intFromBool(info.signedness == .signed);
|
|
if (info.bits >= actual_needed_bits) continue;
|
|
if (vector_index) |vi| vi.* = i;
|
|
break false;
|
|
} else true,
|
|
.elems, .repeated_elem => for (switch (aggregate.storage) {
|
|
.bytes => unreachable,
|
|
.elems => |elems| elems,
|
|
.repeated_elem => |elem| @as(*const [1]InternPool.Index, &elem),
|
|
}, 0..) |elem, i| {
|
|
if (try sema.intFitsInType(Value.fromInterned(elem), ty.scalarType(mod), null)) continue;
|
|
if (vector_index) |vi| vi.* = i;
|
|
break false;
|
|
} else true,
|
|
};
|
|
},
|
|
else => unreachable,
|
|
},
|
|
}
|
|
}
|
|
|
|
fn intInRange(sema: *Sema, tag_ty: Type, int_val: Value, end: usize) !bool {
|
|
const mod = sema.mod;
|
|
if (!(try int_val.compareAllWithZeroAdvanced(.gte, sema))) return false;
|
|
const end_val = try mod.intValue(tag_ty, end);
|
|
if (!(try sema.compareAll(int_val, .lt, end_val, tag_ty))) return false;
|
|
return true;
|
|
}
|
|
|
|
/// Asserts the type is an enum.
|
|
fn enumHasInt(sema: *Sema, ty: Type, int: Value) CompileError!bool {
|
|
const mod = sema.mod;
|
|
const enum_type = mod.intern_pool.indexToKey(ty.toIntern()).enum_type;
|
|
assert(enum_type.tag_mode != .nonexhaustive);
|
|
// The `tagValueIndex` function call below relies on the type being the integer tag type.
|
|
// `getCoerced` assumes the value will fit the new type.
|
|
if (!(try sema.intFitsInType(int, Type.fromInterned(enum_type.tag_ty), null))) return false;
|
|
const int_coerced = try mod.getCoerced(int, Type.fromInterned(enum_type.tag_ty));
|
|
|
|
return enum_type.tagValueIndex(&mod.intern_pool, int_coerced.toIntern()) != null;
|
|
}
|
|
|
|
fn intAddWithOverflow(
|
|
sema: *Sema,
|
|
lhs: Value,
|
|
rhs: Value,
|
|
ty: Type,
|
|
) !Value.OverflowArithmeticResult {
|
|
const mod = sema.mod;
|
|
if (ty.zigTypeTag(mod) == .Vector) {
|
|
const vec_len = ty.vectorLen(mod);
|
|
const overflowed_data = try sema.arena.alloc(InternPool.Index, vec_len);
|
|
const result_data = try sema.arena.alloc(InternPool.Index, vec_len);
|
|
const scalar_ty = ty.scalarType(mod);
|
|
for (overflowed_data, result_data, 0..) |*of, *scalar, i| {
|
|
const lhs_elem = try lhs.elemValue(sema.mod, i);
|
|
const rhs_elem = try rhs.elemValue(sema.mod, i);
|
|
const of_math_result = try sema.intAddWithOverflowScalar(lhs_elem, rhs_elem, scalar_ty);
|
|
of.* = try of_math_result.overflow_bit.intern(Type.u1, mod);
|
|
scalar.* = try of_math_result.wrapped_result.intern(scalar_ty, mod);
|
|
}
|
|
return Value.OverflowArithmeticResult{
|
|
.overflow_bit = Value.fromInterned((try mod.intern(.{ .aggregate = .{
|
|
.ty = (try mod.vectorType(.{ .len = vec_len, .child = .u1_type })).toIntern(),
|
|
.storage = .{ .elems = overflowed_data },
|
|
} }))),
|
|
.wrapped_result = Value.fromInterned((try mod.intern(.{ .aggregate = .{
|
|
.ty = ty.toIntern(),
|
|
.storage = .{ .elems = result_data },
|
|
} }))),
|
|
};
|
|
}
|
|
return sema.intAddWithOverflowScalar(lhs, rhs, ty);
|
|
}
|
|
|
|
fn intAddWithOverflowScalar(
|
|
sema: *Sema,
|
|
lhs: Value,
|
|
rhs: Value,
|
|
ty: Type,
|
|
) !Value.OverflowArithmeticResult {
|
|
const mod = sema.mod;
|
|
const info = ty.intInfo(mod);
|
|
|
|
var lhs_space: Value.BigIntSpace = undefined;
|
|
var rhs_space: Value.BigIntSpace = undefined;
|
|
const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema);
|
|
const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema);
|
|
const limbs = try sema.arena.alloc(
|
|
std.math.big.Limb,
|
|
std.math.big.int.calcTwosCompLimbCount(info.bits),
|
|
);
|
|
var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined };
|
|
const overflowed = result_bigint.addWrap(lhs_bigint, rhs_bigint, info.signedness, info.bits);
|
|
const result = try mod.intValue_big(ty, result_bigint.toConst());
|
|
return Value.OverflowArithmeticResult{
|
|
.overflow_bit = try mod.intValue(Type.u1, @intFromBool(overflowed)),
|
|
.wrapped_result = result,
|
|
};
|
|
}
|
|
|
|
/// Asserts the values are comparable. Both operands have type `ty`.
|
|
/// For vectors, returns true if the comparison is true for ALL elements.
|
|
///
|
|
/// Note that `!compareAll(.eq, ...) != compareAll(.neq, ...)`
|
|
fn compareAll(
|
|
sema: *Sema,
|
|
lhs: Value,
|
|
op: std.math.CompareOperator,
|
|
rhs: Value,
|
|
ty: Type,
|
|
) CompileError!bool {
|
|
const mod = sema.mod;
|
|
if (ty.zigTypeTag(mod) == .Vector) {
|
|
var i: usize = 0;
|
|
while (i < ty.vectorLen(mod)) : (i += 1) {
|
|
const lhs_elem = try lhs.elemValue(sema.mod, i);
|
|
const rhs_elem = try rhs.elemValue(sema.mod, i);
|
|
if (!(try sema.compareScalar(lhs_elem, op, rhs_elem, ty.scalarType(mod)))) {
|
|
return false;
|
|
}
|
|
}
|
|
return true;
|
|
}
|
|
return sema.compareScalar(lhs, op, rhs, ty);
|
|
}
|
|
|
|
/// Asserts the values are comparable. Both operands have type `ty`.
|
|
fn compareScalar(
|
|
sema: *Sema,
|
|
lhs: Value,
|
|
op: std.math.CompareOperator,
|
|
rhs: Value,
|
|
ty: Type,
|
|
) CompileError!bool {
|
|
const mod = sema.mod;
|
|
const coerced_lhs = try mod.getCoerced(lhs, ty);
|
|
const coerced_rhs = try mod.getCoerced(rhs, ty);
|
|
switch (op) {
|
|
.eq => return sema.valuesEqual(coerced_lhs, coerced_rhs, ty),
|
|
.neq => return !(try sema.valuesEqual(coerced_lhs, coerced_rhs, ty)),
|
|
else => return Value.compareHeteroAdvanced(coerced_lhs, op, coerced_rhs, mod, sema),
|
|
}
|
|
}
|
|
|
|
fn valuesEqual(
|
|
sema: *Sema,
|
|
lhs: Value,
|
|
rhs: Value,
|
|
ty: Type,
|
|
) CompileError!bool {
|
|
return lhs.eql(rhs, ty, sema.mod);
|
|
}
|
|
|
|
/// Asserts the values are comparable vectors of type `ty`.
|
|
fn compareVector(
|
|
sema: *Sema,
|
|
lhs: Value,
|
|
op: std.math.CompareOperator,
|
|
rhs: Value,
|
|
ty: Type,
|
|
) !Value {
|
|
const mod = sema.mod;
|
|
assert(ty.zigTypeTag(mod) == .Vector);
|
|
const result_data = try sema.arena.alloc(InternPool.Index, ty.vectorLen(mod));
|
|
for (result_data, 0..) |*scalar, i| {
|
|
const lhs_elem = try lhs.elemValue(sema.mod, i);
|
|
const rhs_elem = try rhs.elemValue(sema.mod, i);
|
|
const res_bool = try sema.compareScalar(lhs_elem, op, rhs_elem, ty.scalarType(mod));
|
|
scalar.* = try Value.makeBool(res_bool).intern(Type.bool, mod);
|
|
}
|
|
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
|
|
.ty = (try mod.vectorType(.{ .len = ty.vectorLen(mod), .child = .bool_type })).toIntern(),
|
|
.storage = .{ .elems = result_data },
|
|
} })));
|
|
}
|
|
|
|
/// Returns the type of a pointer to an element.
|
|
/// Asserts that the type is a pointer, and that the element type is indexable.
|
|
/// For *[N]T, return *T
|
|
/// For [*]T, returns *T
|
|
/// For []T, returns *T
|
|
/// Handles const-ness and address spaces in particular.
|
|
/// This code is duplicated in `analyzePtrArithmetic`.
|
|
fn elemPtrType(sema: *Sema, ptr_ty: Type, offset: ?usize) !Type {
|
|
const mod = sema.mod;
|
|
const ptr_info = ptr_ty.ptrInfo(mod);
|
|
const elem_ty = ptr_ty.elemType2(mod);
|
|
const is_allowzero = ptr_info.flags.is_allowzero and (offset orelse 0) == 0;
|
|
const parent_ty = ptr_ty.childType(mod);
|
|
|
|
const VI = InternPool.Key.PtrType.VectorIndex;
|
|
|
|
const vector_info: struct {
|
|
host_size: u16 = 0,
|
|
alignment: Alignment = .none,
|
|
vector_index: VI = .none,
|
|
} = if (parent_ty.isVector(mod) and ptr_info.flags.size == .One) blk: {
|
|
const elem_bits = elem_ty.bitSize(mod);
|
|
if (elem_bits == 0) break :blk .{};
|
|
const is_packed = elem_bits < 8 or !std.math.isPowerOfTwo(elem_bits);
|
|
if (!is_packed) break :blk .{};
|
|
|
|
break :blk .{
|
|
.host_size = @intCast(parent_ty.arrayLen(mod)),
|
|
.alignment = parent_ty.abiAlignment(mod),
|
|
.vector_index = if (offset) |some| @enumFromInt(some) else .runtime,
|
|
};
|
|
} else .{};
|
|
|
|
const alignment: Alignment = a: {
|
|
// Calculate the new pointer alignment.
|
|
if (ptr_info.flags.alignment == .none) {
|
|
// In case of an ABI-aligned pointer, any pointer arithmetic
|
|
// maintains the same ABI-alignedness.
|
|
break :a vector_info.alignment;
|
|
}
|
|
// If the addend is not a comptime-known value we can still count on
|
|
// it being a multiple of the type size.
|
|
const elem_size = try sema.typeAbiSize(elem_ty);
|
|
const addend = if (offset) |off| elem_size * off else elem_size;
|
|
|
|
// The resulting pointer is aligned to the lcd between the offset (an
|
|
// arbitrary number) and the alignment factor (always a power of two,
|
|
// non zero).
|
|
const new_align: Alignment = @enumFromInt(@min(
|
|
@ctz(addend),
|
|
ptr_info.flags.alignment.toLog2Units(),
|
|
));
|
|
assert(new_align != .none);
|
|
break :a new_align;
|
|
};
|
|
return sema.ptrType(.{
|
|
.child = elem_ty.toIntern(),
|
|
.flags = .{
|
|
.alignment = alignment,
|
|
.is_const = ptr_info.flags.is_const,
|
|
.is_volatile = ptr_info.flags.is_volatile,
|
|
.is_allowzero = is_allowzero,
|
|
.address_space = ptr_info.flags.address_space,
|
|
.vector_index = vector_info.vector_index,
|
|
},
|
|
.packed_offset = .{
|
|
.host_size = vector_info.host_size,
|
|
.bit_offset = 0,
|
|
},
|
|
});
|
|
}
|
|
|
|
/// Merge lhs with rhs.
|
|
/// Asserts that lhs and rhs are both error sets and are resolved.
|
|
fn errorSetMerge(sema: *Sema, lhs: Type, rhs: Type) !Type {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
const arena = sema.arena;
|
|
const lhs_names = lhs.errorSetNames(mod);
|
|
const rhs_names = rhs.errorSetNames(mod);
|
|
var names: InferredErrorSet.NameMap = .{};
|
|
try names.ensureUnusedCapacity(arena, lhs_names.len);
|
|
|
|
for (0..lhs_names.len) |lhs_index| {
|
|
names.putAssumeCapacityNoClobber(lhs_names.get(ip)[lhs_index], {});
|
|
}
|
|
for (0..rhs_names.len) |rhs_index| {
|
|
try names.put(arena, rhs_names.get(ip)[rhs_index], {});
|
|
}
|
|
|
|
return mod.errorSetFromUnsortedNames(names.keys());
|
|
}
|
|
|
|
/// Avoids crashing the compiler when asking if inferred allocations are noreturn.
|
|
fn isNoReturn(sema: *Sema, ref: Air.Inst.Ref) bool {
|
|
if (ref == .unreachable_value) return true;
|
|
if (ref.toIndex()) |inst| switch (sema.air_instructions.items(.tag)[@intFromEnum(inst)]) {
|
|
.inferred_alloc, .inferred_alloc_comptime => return false,
|
|
else => {},
|
|
};
|
|
return sema.typeOf(ref).isNoReturn(sema.mod);
|
|
}
|
|
|
|
/// Avoids crashing the compiler when asking if inferred allocations are known to be a certain zig type.
|
|
fn isKnownZigType(sema: *Sema, ref: Air.Inst.Ref, tag: std.builtin.TypeId) bool {
|
|
if (ref.toIndex()) |inst| switch (sema.air_instructions.items(.tag)[@intFromEnum(inst)]) {
|
|
.inferred_alloc, .inferred_alloc_comptime => return false,
|
|
else => {},
|
|
};
|
|
return sema.typeOf(ref).zigTypeTag(sema.mod) == tag;
|
|
}
|
|
|
|
fn ptrType(sema: *Sema, info: InternPool.Key.PtrType) CompileError!Type {
|
|
if (info.flags.alignment != .none) {
|
|
_ = try sema.typeAbiAlignment(Type.fromInterned(info.child));
|
|
}
|
|
return sema.mod.ptrType(info);
|
|
}
|
|
|
|
pub fn declareDependency(sema: *Sema, dependee: InternPool.Dependee) !void {
|
|
if (!sema.mod.comp.debug_incremental) return;
|
|
const depender = InternPool.Depender.wrap(
|
|
if (sema.owner_func_index != .none)
|
|
.{ .func = sema.owner_func_index }
|
|
else
|
|
.{ .decl = sema.owner_decl_index },
|
|
);
|
|
try sema.mod.intern_pool.addDependency(sema.gpa, depender, dependee);
|
|
}
|