compiler: disallow callconv etc from depending on function parameters

Resolves: #22261
This commit is contained in:
mlugg 2024-12-18 04:01:38 +00:00
parent 242bb44695
commit 7408679234
No known key found for this signature in database
GPG key ID: 3F5B7DCCBF4AF02E
10 changed files with 327 additions and 849 deletions

View file

@ -1420,19 +1420,9 @@ fn fnProtoExpr(
.cc_ref = cc,
.cc_gz = null,
.align_ref = .none,
.align_gz = null,
.ret_ref = ret_ty,
.ret_gz = null,
.section_ref = .none,
.section_gz = null,
.addrspace_ref = .none,
.addrspace_gz = null,
.align_param_refs = &.{},
.addrspace_param_refs = &.{},
.section_param_refs = &.{},
.cc_param_refs = &.{},
.ret_param_refs = &.{},
.param_insts = &.{},
@ -4129,6 +4119,8 @@ fn fnDecl(
const decl_inst = try gz.makeDeclaration(fn_proto.ast.proto_node);
astgen.advanceSourceCursorToNode(decl_node);
const saved_cursor = astgen.saveSourceCursor();
var decl_gz: GenZir = .{
.is_comptime = true,
.decl_node_index = fn_proto.ast.proto_node,
@ -4140,17 +4132,6 @@ fn fnDecl(
};
defer decl_gz.unstack();
var fn_gz: GenZir = .{
.is_comptime = false,
.decl_node_index = fn_proto.ast.proto_node,
.decl_line = decl_gz.decl_line,
.parent = &decl_gz.base,
.astgen = astgen,
.instructions = gz.instructions,
.instructions_top = GenZir.unstacked_top,
};
defer fn_gz.unstack();
const decl_column = astgen.source_column;
// Set this now, since parameter types, return type, etc may be generic.
@ -4182,7 +4163,7 @@ fn fnDecl(
var param_insts: std.ArrayListUnmanaged(Zir.Inst.Index) = try .initCapacity(astgen.arena, fn_proto.ast.params.len);
var noalias_bits: u32 = 0;
var params_scope = &fn_gz.base;
var params_scope = scope;
const is_var_args = is_var_args: {
var param_type_i: usize = 0;
var it = fn_proto.iterate(tree);
@ -4305,47 +4286,26 @@ fn fnDecl(
// instructions inside the expression blocks for align, addrspace, cc, and ret_ty
// to use the function instruction as the "block" to break from.
var align_gz = decl_gz.makeSubBlock(params_scope);
defer align_gz.unstack();
const align_ref: Zir.Inst.Ref = if (fn_proto.ast.align_expr == 0) .none else inst: {
const inst = try expr(&decl_gz, params_scope, coerced_align_ri, fn_proto.ast.align_expr);
if (align_gz.instructionsSlice().len == 0) {
var ret_gz = decl_gz.makeSubBlock(params_scope);
defer ret_gz.unstack();
const ret_ref: Zir.Inst.Ref = inst: {
// Parameters are in scope for the return type, so we use `params_scope` here.
// The calling convention will not have parameters in scope, so we'll just use `scope`.
// See #22263 for a proposal to solve the inconsistency here.
const inst = try fullBodyExpr(&ret_gz, params_scope, coerced_type_ri, fn_proto.ast.return_type, .normal);
if (ret_gz.instructionsSlice().len == 0) {
// In this case we will send a len=0 body which can be encoded more efficiently.
break :inst inst;
}
_ = try align_gz.addBreak(.break_inline, @enumFromInt(0), inst);
_ = try ret_gz.addBreak(.break_inline, @enumFromInt(0), inst);
break :inst inst;
};
const align_body_param_refs = try astgen.fetchRemoveRefEntries(param_insts.items);
const ret_body_param_refs = try astgen.fetchRemoveRefEntries(param_insts.items);
var addrspace_gz = decl_gz.makeSubBlock(params_scope);
defer addrspace_gz.unstack();
const addrspace_ref: Zir.Inst.Ref = if (fn_proto.ast.addrspace_expr == 0) .none else inst: {
const addrspace_ty = try decl_gz.addBuiltinValue(fn_proto.ast.addrspace_expr, .address_space);
const inst = try expr(&decl_gz, params_scope, .{ .rl = .{ .coerced_ty = addrspace_ty } }, fn_proto.ast.addrspace_expr);
if (addrspace_gz.instructionsSlice().len == 0) {
// In this case we will send a len=0 body which can be encoded more efficiently.
break :inst inst;
}
_ = try addrspace_gz.addBreak(.break_inline, @enumFromInt(0), inst);
break :inst inst;
};
const addrspace_body_param_refs = try astgen.fetchRemoveRefEntries(param_insts.items);
// We're jumping back in source, so restore the cursor.
astgen.restoreSourceCursor(saved_cursor);
var section_gz = decl_gz.makeSubBlock(params_scope);
defer section_gz.unstack();
const section_ref: Zir.Inst.Ref = if (fn_proto.ast.section_expr == 0) .none else inst: {
const inst = try expr(&decl_gz, params_scope, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, fn_proto.ast.section_expr);
if (section_gz.instructionsSlice().len == 0) {
// In this case we will send a len=0 body which can be encoded more efficiently.
break :inst inst;
}
_ = try section_gz.addBreak(.break_inline, @enumFromInt(0), inst);
break :inst inst;
};
const section_body_param_refs = try astgen.fetchRemoveRefEntries(param_insts.items);
var cc_gz = decl_gz.makeSubBlock(params_scope);
var cc_gz = decl_gz.makeSubBlock(scope);
defer cc_gz.unstack();
const cc_ref: Zir.Inst.Ref = blk: {
if (fn_proto.ast.callconv_expr != 0) {
@ -4358,7 +4318,7 @@ fn fnDecl(
}
const inst = try expr(
&cc_gz,
params_scope,
scope,
.{ .rl = .{ .coerced_ty = try cc_gz.addBuiltinValue(fn_proto.ast.callconv_expr, .calling_convention) } },
fn_proto.ast.callconv_expr,
);
@ -4380,20 +4340,6 @@ fn fnDecl(
break :blk .none;
}
};
const cc_body_param_refs = try astgen.fetchRemoveRefEntries(param_insts.items);
var ret_gz = decl_gz.makeSubBlock(params_scope);
defer ret_gz.unstack();
const ret_ref: Zir.Inst.Ref = inst: {
const inst = try fullBodyExpr(&ret_gz, params_scope, coerced_type_ri, fn_proto.ast.return_type, .normal);
if (ret_gz.instructionsSlice().len == 0) {
// In this case we will send a len=0 body which can be encoded more efficiently.
break :inst inst;
}
_ = try ret_gz.addBreak(.break_inline, @enumFromInt(0), inst);
break :inst inst;
};
const ret_body_param_refs = try astgen.fetchRemoveRefEntries(param_insts.items);
const func_inst: Zir.Inst.Ref = if (body_node == 0) func: {
if (!is_extern) {
@ -4406,19 +4352,9 @@ fn fnDecl(
.src_node = decl_node,
.cc_ref = cc_ref,
.cc_gz = &cc_gz,
.cc_param_refs = cc_body_param_refs,
.align_ref = align_ref,
.align_gz = &align_gz,
.align_param_refs = align_body_param_refs,
.ret_ref = ret_ref,
.ret_gz = &ret_gz,
.ret_param_refs = ret_body_param_refs,
.section_ref = section_ref,
.section_gz = &section_gz,
.section_param_refs = section_body_param_refs,
.addrspace_ref = addrspace_ref,
.addrspace_gz = &addrspace_gz,
.addrspace_param_refs = addrspace_body_param_refs,
.param_block = decl_inst,
.param_insts = param_insts.items,
.body_gz = null,
@ -4432,8 +4368,23 @@ fn fnDecl(
.proto_hash = undefined, // ignored for `body_gz == null`
});
} else func: {
// as a scope, fn_gz encloses ret_gz, but for instruction list, fn_gz stacks on ret_gz
fn_gz.instructions_top = ret_gz.instructions.items.len;
var body_gz: GenZir = .{
.is_comptime = false,
.decl_node_index = fn_proto.ast.proto_node,
.decl_line = decl_gz.decl_line,
.parent = params_scope,
.astgen = astgen,
.instructions = gz.instructions,
.instructions_top = gz.instructions.items.len,
};
defer body_gz.unstack();
// We want `params_scope` to be stacked like this:
// body_gz (top)
// param2
// param1
// param0
// decl_gz (bottom)
// Construct the prototype hash.
// Leave `astgen.src_hasher` unmodified; this will be used for hashing
@ -4450,13 +4401,13 @@ fn fnDecl(
astgen.fn_block = prev_fn_block;
astgen.fn_ret_ty = prev_fn_ret_ty;
}
astgen.fn_block = &fn_gz;
astgen.fn_block = &body_gz;
astgen.fn_ret_ty = if (is_inferred_error or ret_ref.toIndex() != null) r: {
// We're essentially guaranteed to need the return type at some point,
// since the return type is likely not `void` or `noreturn` so there
// will probably be an explicit return requiring RLS. Fetch this
// return type now so the rest of the function can use it.
break :r try fn_gz.addNode(.ret_type, decl_node);
break :r try body_gz.addNode(.ret_type, decl_node);
} else ret_ref;
const prev_var_args = astgen.fn_var_args;
@ -4467,39 +4418,29 @@ fn fnDecl(
const lbrace_line = astgen.source_line - decl_gz.decl_line;
const lbrace_column = astgen.source_column;
_ = try fullBodyExpr(&fn_gz, params_scope, .{ .rl = .none }, body_node, .allow_branch_hint);
try checkUsed(gz, &fn_gz.base, params_scope);
_ = try fullBodyExpr(&body_gz, &body_gz.base, .{ .rl = .none }, body_node, .allow_branch_hint);
try checkUsed(gz, scope, params_scope);
if (!fn_gz.endsWithNoReturn()) {
if (!body_gz.endsWithNoReturn()) {
// As our last action before the return, "pop" the error trace if needed
_ = try fn_gz.addRestoreErrRetIndex(.ret, .always, decl_node);
_ = try body_gz.addRestoreErrRetIndex(.ret, .always, decl_node);
// Add implicit return at end of function.
_ = try fn_gz.addUnTok(.ret_implicit, .void_value, tree.lastToken(body_node));
_ = try body_gz.addUnTok(.ret_implicit, .void_value, tree.lastToken(body_node));
}
break :func try decl_gz.addFunc(.{
.src_node = decl_node,
.cc_ref = cc_ref,
.cc_gz = &cc_gz,
.cc_param_refs = cc_body_param_refs,
.align_ref = align_ref,
.align_gz = &align_gz,
.align_param_refs = align_body_param_refs,
.ret_ref = ret_ref,
.ret_gz = &ret_gz,
.ret_param_refs = ret_body_param_refs,
.section_ref = section_ref,
.section_gz = &section_gz,
.section_param_refs = section_body_param_refs,
.addrspace_ref = addrspace_ref,
.addrspace_gz = &addrspace_gz,
.addrspace_param_refs = addrspace_body_param_refs,
.lbrace_line = lbrace_line,
.lbrace_column = lbrace_column,
.param_block = decl_inst,
.param_insts = param_insts.items,
.body_gz = &fn_gz,
.body_gz = &body_gz,
.lib_name = lib_name,
.is_var_args = is_var_args,
.is_inferred_error = is_inferred_error,
@ -4511,13 +4452,39 @@ fn fnDecl(
});
};
// Before we stack more stuff onto `decl_gz`, add its final instruction.
_ = try decl_gz.addBreak(.break_inline, decl_inst, func_inst);
// Now that `cc_gz,` `ret_gz`, and `body_gz` are unstacked, we evaluate align, addrspace, and linksection.
// We're jumping back in source, so restore the cursor.
astgen.restoreSourceCursor(saved_cursor);
var align_gz = decl_gz.makeSubBlock(scope);
defer align_gz.unstack();
if (fn_proto.ast.align_expr != 0) {
const inst = try expr(&decl_gz, &decl_gz.base, coerced_align_ri, fn_proto.ast.align_expr);
_ = try align_gz.addBreak(.break_inline, decl_inst, inst);
}
var section_gz = align_gz.makeSubBlock(scope);
defer section_gz.unstack();
if (fn_proto.ast.section_expr != 0) {
const inst = try expr(&decl_gz, scope, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, fn_proto.ast.section_expr);
_ = try section_gz.addBreak(.break_inline, decl_inst, inst);
}
var addrspace_gz = section_gz.makeSubBlock(scope);
defer addrspace_gz.unstack();
if (fn_proto.ast.addrspace_expr != 0) {
const addrspace_ty = try decl_gz.addBuiltinValue(fn_proto.ast.addrspace_expr, .address_space);
const inst = try expr(&decl_gz, scope, .{ .rl = .{ .coerced_ty = addrspace_ty } }, fn_proto.ast.addrspace_expr);
_ = try addrspace_gz.addBreak(.break_inline, decl_inst, inst);
}
// *Now* we can incorporate the full source code into the hasher.
astgen.src_hasher.update(tree.getNodeSource(decl_node));
// We add this at the end so that its instruction index marks the end range
// of the top level declaration. addFunc already unstacked fn_gz and ret_gz.
_ = try decl_gz.addBreak(.break_inline, decl_inst, func_inst);
var hash: std.zig.SrcHash = undefined;
astgen.src_hasher.final(&hash);
try setDeclaration(
@ -4529,9 +4496,11 @@ fn fnDecl(
is_pub,
is_export,
&decl_gz,
// align, linksection, and addrspace are passed in the func instruction in this case.
// TODO: move them from the function instruction to the declaration instruction?
null,
.{
.align_gz = &align_gz,
.linksection_gz = &section_gz,
.addrspace_gz = &addrspace_gz,
},
);
}
@ -4986,19 +4955,9 @@ fn testDecl(
.cc_ref = .none,
.cc_gz = null,
.align_ref = .none,
.align_gz = null,
.ret_ref = .anyerror_void_error_union_type,
.ret_gz = null,
.section_ref = .none,
.section_gz = null,
.addrspace_ref = .none,
.addrspace_gz = null,
.align_param_refs = &.{},
.addrspace_param_refs = &.{},
.section_param_refs = &.{},
.cc_param_refs = &.{},
.ret_param_refs = &.{},
.param_insts = &.{},
@ -11952,6 +11911,14 @@ const GenZir = struct {
self.instructions.items[self.instructions_top..];
}
fn instructionsSliceUptoOpt(gz: *const GenZir, maybe_stacked_gz: ?*GenZir) []Zir.Inst.Index {
if (maybe_stacked_gz) |stacked_gz| {
return gz.instructionsSliceUpto(stacked_gz);
} else {
return gz.instructionsSlice();
}
}
fn makeSubBlock(gz: *GenZir, scope: *Scope) GenZir {
return .{
.is_comptime = gz.is_comptime,
@ -12088,11 +12055,8 @@ const GenZir = struct {
/// Must be called with the following stack set up:
/// * gz (bottom)
/// * align_gz
/// * addrspace_gz
/// * section_gz
/// * cc_gz
/// * ret_gz
/// * cc_gz
/// * body_gz (top)
/// Unstacks all of those except for `gz`.
fn addFunc(
@ -12103,23 +12067,13 @@ const GenZir = struct {
lbrace_column: u32 = 0,
param_block: Zir.Inst.Index,
align_gz: ?*GenZir,
addrspace_gz: ?*GenZir,
section_gz: ?*GenZir,
cc_gz: ?*GenZir,
ret_gz: ?*GenZir,
body_gz: ?*GenZir,
cc_gz: ?*GenZir,
align_param_refs: []Zir.Inst.Index,
addrspace_param_refs: []Zir.Inst.Index,
section_param_refs: []Zir.Inst.Index,
cc_param_refs: []Zir.Inst.Index,
ret_param_refs: []Zir.Inst.Index,
param_insts: []Zir.Inst.Index, // refs to params in `body_gz` should still be in `astgen.ref_table`
align_ref: Zir.Inst.Ref,
addrspace_ref: Zir.Inst.Ref,
section_ref: Zir.Inst.Ref,
cc_ref: Zir.Inst.Ref,
ret_ref: Zir.Inst.Ref,
@ -12141,13 +12095,31 @@ const GenZir = struct {
const ret_ref = if (args.ret_ref == .void_type) .none else args.ret_ref;
const new_index: Zir.Inst.Index = @enumFromInt(astgen.instructions.len);
try gz.instructions.ensureUnusedCapacity(gpa, 1);
try astgen.instructions.ensureUnusedCapacity(gpa, 1);
var body: []Zir.Inst.Index = &[0]Zir.Inst.Index{};
var ret_body: []Zir.Inst.Index = &[0]Zir.Inst.Index{};
const body, const cc_body, const ret_body = bodies: {
var stacked_gz: ?*GenZir = null;
const body: []const Zir.Inst.Index = if (args.body_gz) |body_gz| body: {
const body = body_gz.instructionsSliceUptoOpt(stacked_gz);
stacked_gz = body_gz;
break :body body;
} else &.{};
const cc_body: []const Zir.Inst.Index = if (args.cc_gz) |cc_gz| body: {
const cc_body = cc_gz.instructionsSliceUptoOpt(stacked_gz);
stacked_gz = cc_gz;
break :body cc_body;
} else &.{};
const ret_body: []const Zir.Inst.Index = if (args.ret_gz) |ret_gz| body: {
const ret_body = ret_gz.instructionsSliceUptoOpt(stacked_gz);
stacked_gz = ret_gz;
break :body ret_body;
} else &.{};
break :bodies .{ body, cc_body, ret_body };
};
var src_locs_and_hash_buffer: [7]u32 = undefined;
var src_locs_and_hash: []u32 = src_locs_and_hash_buffer[0..0];
if (args.body_gz) |body_gz| {
const src_locs_and_hash: []const u32 = if (args.body_gz != null) src_locs_and_hash: {
const tree = astgen.tree;
const node_tags = tree.nodes.items(.tag);
const node_datas = tree.nodes.items(.data);
@ -12173,39 +12145,19 @@ const GenZir = struct {
proto_hash_arr[2],
proto_hash_arr[3],
};
src_locs_and_hash = &src_locs_and_hash_buffer;
break :src_locs_and_hash &src_locs_and_hash_buffer;
} else &.{};
body = body_gz.instructionsSlice();
if (args.ret_gz) |ret_gz|
ret_body = ret_gz.instructionsSliceUpto(body_gz);
} else {
if (args.ret_gz) |ret_gz|
ret_body = ret_gz.instructionsSlice();
}
const body_len = astgen.countBodyLenAfterFixupsExtraRefs(body, args.param_insts);
if (args.cc_ref != .none or args.lib_name != .empty or args.is_var_args or args.is_test or
args.is_extern or args.align_ref != .none or args.section_ref != .none or
args.addrspace_ref != .none or args.noalias_bits != 0 or args.is_noinline)
{
var align_body: []Zir.Inst.Index = &.{};
var addrspace_body: []Zir.Inst.Index = &.{};
var section_body: []Zir.Inst.Index = &.{};
var cc_body: []Zir.Inst.Index = &.{};
if (args.ret_gz != null) {
align_body = args.align_gz.?.instructionsSliceUpto(args.addrspace_gz.?);
addrspace_body = args.addrspace_gz.?.instructionsSliceUpto(args.section_gz.?);
section_body = args.section_gz.?.instructionsSliceUpto(args.cc_gz.?);
cc_body = args.cc_gz.?.instructionsSliceUpto(args.ret_gz.?);
}
const tag: Zir.Inst.Tag, const payload_index: u32 = if (args.cc_ref != .none or args.lib_name != .empty or
args.is_var_args or args.is_test or args.is_extern or
args.noalias_bits != 0 or args.is_noinline)
inst_info: {
try astgen.extra.ensureUnusedCapacity(
gpa,
@typeInfo(Zir.Inst.FuncFancy).@"struct".fields.len +
fancyFnExprExtraLen(astgen, args.align_param_refs, align_body, args.align_ref) +
fancyFnExprExtraLen(astgen, args.addrspace_param_refs, addrspace_body, args.addrspace_ref) +
fancyFnExprExtraLen(astgen, args.section_param_refs, section_body, args.section_ref) +
fancyFnExprExtraLen(astgen, args.cc_param_refs, cc_body, args.cc_ref) +
fancyFnExprExtraLen(astgen, &.{}, cc_body, args.cc_ref) +
fancyFnExprExtraLen(astgen, args.ret_param_refs, ret_body, ret_ref) +
body_len + src_locs_and_hash.len +
@intFromBool(args.lib_name != .empty) +
@ -12223,15 +12175,9 @@ const GenZir = struct {
.has_lib_name = args.lib_name != .empty,
.has_any_noalias = args.noalias_bits != 0,
.has_align_ref = args.align_ref != .none,
.has_addrspace_ref = args.addrspace_ref != .none,
.has_section_ref = args.section_ref != .none,
.has_cc_ref = args.cc_ref != .none,
.has_ret_ty_ref = ret_ref != .none,
.has_align_body = align_body.len != 0,
.has_addrspace_body = addrspace_body.len != 0,
.has_section_body = section_body.len != 0,
.has_cc_body = cc_body.len != 0,
.has_ret_ty_body = ret_body.len != 0,
},
@ -12241,53 +12187,8 @@ const GenZir = struct {
}
const zir_datas = astgen.instructions.items(.data);
if (align_body.len != 0) {
astgen.extra.appendAssumeCapacity(
astgen.countBodyLenAfterFixups(args.align_param_refs) +
astgen.countBodyLenAfterFixups(align_body),
);
astgen.appendBodyWithFixups(args.align_param_refs);
astgen.appendBodyWithFixups(align_body);
const break_extra = zir_datas[@intFromEnum(align_body[align_body.len - 1])].@"break".payload_index;
astgen.extra.items[break_extra + std.meta.fieldIndex(Zir.Inst.Break, "block_inst").?] =
@intFromEnum(new_index);
} else if (args.align_ref != .none) {
astgen.extra.appendAssumeCapacity(@intFromEnum(args.align_ref));
}
if (addrspace_body.len != 0) {
astgen.extra.appendAssumeCapacity(
astgen.countBodyLenAfterFixups(args.addrspace_param_refs) +
astgen.countBodyLenAfterFixups(addrspace_body),
);
astgen.appendBodyWithFixups(args.addrspace_param_refs);
astgen.appendBodyWithFixups(addrspace_body);
const break_extra =
zir_datas[@intFromEnum(addrspace_body[addrspace_body.len - 1])].@"break".payload_index;
astgen.extra.items[break_extra + std.meta.fieldIndex(Zir.Inst.Break, "block_inst").?] =
@intFromEnum(new_index);
} else if (args.addrspace_ref != .none) {
astgen.extra.appendAssumeCapacity(@intFromEnum(args.addrspace_ref));
}
if (section_body.len != 0) {
astgen.extra.appendAssumeCapacity(
astgen.countBodyLenAfterFixups(args.section_param_refs) +
astgen.countBodyLenAfterFixups(section_body),
);
astgen.appendBodyWithFixups(args.section_param_refs);
astgen.appendBodyWithFixups(section_body);
const break_extra =
zir_datas[@intFromEnum(section_body[section_body.len - 1])].@"break".payload_index;
astgen.extra.items[break_extra + std.meta.fieldIndex(Zir.Inst.Break, "block_inst").?] =
@intFromEnum(new_index);
} else if (args.section_ref != .none) {
astgen.extra.appendAssumeCapacity(@intFromEnum(args.section_ref));
}
if (cc_body.len != 0) {
astgen.extra.appendAssumeCapacity(
astgen.countBodyLenAfterFixups(args.cc_param_refs) +
astgen.countBodyLenAfterFixups(cc_body),
);
astgen.appendBodyWithFixups(args.cc_param_refs);
astgen.extra.appendAssumeCapacity(astgen.countBodyLenAfterFixups(cc_body));
astgen.appendBodyWithFixups(cc_body);
const break_extra = zir_datas[@intFromEnum(cc_body[cc_body.len - 1])].@"break".payload_index;
astgen.extra.items[break_extra + std.meta.fieldIndex(Zir.Inst.Break, "block_inst").?] =
@ -12316,28 +12217,8 @@ const GenZir = struct {
astgen.appendBodyWithFixupsExtraRefsArrayList(&astgen.extra, body, args.param_insts);
astgen.extra.appendSliceAssumeCapacity(src_locs_and_hash);
// Order is important when unstacking.
if (args.body_gz) |body_gz| body_gz.unstack();
if (args.ret_gz != null) {
args.ret_gz.?.unstack();
args.cc_gz.?.unstack();
args.section_gz.?.unstack();
args.addrspace_gz.?.unstack();
args.align_gz.?.unstack();
}
try gz.instructions.ensureUnusedCapacity(gpa, 1);
astgen.instructions.appendAssumeCapacity(.{
.tag = .func_fancy,
.data = .{ .pl_node = .{
.src_node = gz.nodeIndexToRelative(args.src_node),
.payload_index = payload_index,
} },
});
gz.instructions.appendAssumeCapacity(new_index);
return new_index.toRef();
} else {
break :inst_info .{ .func_fancy, payload_index };
} else inst_info: {
try astgen.extra.ensureUnusedCapacity(
gpa,
@typeInfo(Zir.Inst.Func).@"struct".fields.len + 1 +
@ -12369,30 +12250,29 @@ const GenZir = struct {
astgen.appendBodyWithFixupsExtraRefsArrayList(&astgen.extra, body, args.param_insts);
astgen.extra.appendSliceAssumeCapacity(src_locs_and_hash);
// Order is important when unstacking.
if (args.body_gz) |body_gz| body_gz.unstack();
if (args.ret_gz) |ret_gz| ret_gz.unstack();
if (args.cc_gz) |cc_gz| cc_gz.unstack();
if (args.section_gz) |section_gz| section_gz.unstack();
if (args.addrspace_gz) |addrspace_gz| addrspace_gz.unstack();
if (args.align_gz) |align_gz| align_gz.unstack();
break :inst_info .{
if (args.is_inferred_error) .func_inferred else .func,
payload_index,
};
};
try gz.instructions.ensureUnusedCapacity(gpa, 1);
// Order is important when unstacking.
if (args.body_gz) |body_gz| body_gz.unstack();
if (args.cc_gz) |cc_gz| cc_gz.unstack();
if (args.ret_gz) |ret_gz| ret_gz.unstack();
const tag: Zir.Inst.Tag = if (args.is_inferred_error) .func_inferred else .func;
astgen.instructions.appendAssumeCapacity(.{
.tag = tag,
.data = .{ .pl_node = .{
.src_node = gz.nodeIndexToRelative(args.src_node),
.payload_index = payload_index,
} },
});
gz.instructions.appendAssumeCapacity(new_index);
return new_index.toRef();
}
astgen.instructions.appendAssumeCapacity(.{
.tag = tag,
.data = .{ .pl_node = .{
.src_node = gz.nodeIndexToRelative(args.src_node),
.payload_index = payload_index,
} },
});
gz.instructions.appendAssumeCapacity(new_index);
return new_index.toRef();
}
fn fancyFnExprExtraLen(astgen: *AstGen, param_refs_body: []Zir.Inst.Index, main_body: []Zir.Inst.Index, ref: Zir.Inst.Ref) u32 {
fn fancyFnExprExtraLen(astgen: *AstGen, param_refs_body: []const Zir.Inst.Index, main_body: []const Zir.Inst.Index, ref: Zir.Inst.Ref) u32 {
return countBodyLenAfterFixups(astgen, param_refs_body) +
countBodyLenAfterFixups(astgen, main_body) +
// If there is a body, we need an element for its length; otherwise, if there is a ref, we need to include that.
@ -13576,6 +13456,27 @@ fn advanceSourceCursor(astgen: *AstGen, end: usize) void {
astgen.source_column = column;
}
const SourceCursor = struct {
offset: u32,
line: u32,
column: u32,
};
/// Get the current source cursor, to be restored later with `restoreSourceCursor`.
/// This is useful when analyzing source code out-of-order.
fn saveSourceCursor(astgen: *const AstGen) SourceCursor {
return .{
.offset = astgen.source_offset,
.line = astgen.source_line,
.column = astgen.source_column,
};
}
fn restoreSourceCursor(astgen: *AstGen, cursor: SourceCursor) void {
astgen.source_offset = cursor.offset;
astgen.source_line = cursor.line;
astgen.source_column = cursor.column;
}
/// Detects name conflicts for decls and fields, and populates `namespace.decls` with all named declarations.
/// Returns the number of declarations in the namespace, including unnamed declarations (e.g. `comptime` decls).
fn scanContainer(

View file

@ -2494,46 +2494,25 @@ pub const Inst = struct {
/// Trailing:
/// 0. lib_name: NullTerminatedString, // null terminated string index, if has_lib_name is set
/// if (has_align_ref and !has_align_body) {
/// 1. align: Ref,
/// }
/// if (has_align_body) {
/// 2. align_body_len: u32
/// 3. align_body: u32 // for each align_body_len
/// }
/// if (has_addrspace_ref and !has_addrspace_body) {
/// 4. addrspace: Ref,
/// }
/// if (has_addrspace_body) {
/// 5. addrspace_body_len: u32
/// 6. addrspace_body: u32 // for each addrspace_body_len
/// }
/// if (has_section_ref and !has_section_body) {
/// 7. section: Ref,
/// }
/// if (has_section_body) {
/// 8. section_body_len: u32
/// 9. section_body: u32 // for each section_body_len
/// }
/// if (has_cc_ref and !has_cc_body) {
/// 10. cc: Ref,
/// 1. cc: Ref,
/// }
/// if (has_cc_body) {
/// 11. cc_body_len: u32
/// 12. cc_body: u32 // for each cc_body_len
/// 2. cc_body_len: u32
/// 3. cc_body: u32 // for each cc_body_len
/// }
/// if (has_ret_ty_ref and !has_ret_ty_body) {
/// 13. ret_ty: Ref,
/// 4. ret_ty: Ref,
/// }
/// if (has_ret_ty_body) {
/// 14. ret_ty_body_len: u32
/// 15. ret_ty_body: u32 // for each ret_ty_body_len
/// 5. ret_ty_body_len: u32
/// 6. ret_ty_body: u32 // for each ret_ty_body_len
/// }
/// 16. noalias_bits: u32 // if has_any_noalias
/// - each bit starting with LSB corresponds to parameter indexes
/// 17. body: Index // for each body_len
/// 18. src_locs: Func.SrcLocs // if body_len != 0
/// 19. proto_hash: std.zig.SrcHash // if body_len != 0; hash of function prototype
/// 7. noalias_bits: u32 // if has_any_noalias
/// - each bit starting with LSB corresponds to parameter indexes
/// 8. body: Index // for each body_len
/// 9. src_locs: Func.SrcLocs // if body_len != 0
/// 10. proto_hash: std.zig.SrcHash // if body_len != 0; hash of function prototype
pub const FuncFancy = struct {
/// Points to the block that contains the param instructions for this function.
/// If this is a `declaration`, it refers to the declaration's value body.
@ -2542,29 +2521,20 @@ pub const Inst = struct {
bits: Bits,
/// If both has_cc_ref and has_cc_body are false, it means auto calling convention.
/// If both has_align_ref and has_align_body are false, it means default alignment.
/// If both has_ret_ty_ref and has_ret_ty_body are false, it means void return type.
/// If both has_section_ref and has_section_body are false, it means default section.
/// If both has_addrspace_ref and has_addrspace_body are false, it means default addrspace.
pub const Bits = packed struct {
is_var_args: bool,
is_inferred_error: bool,
is_test: bool,
is_extern: bool,
is_noinline: bool,
has_align_ref: bool,
has_align_body: bool,
has_addrspace_ref: bool,
has_addrspace_body: bool,
has_section_ref: bool,
has_section_body: bool,
has_cc_ref: bool,
has_cc_body: bool,
has_ret_ty_ref: bool,
has_ret_ty_body: bool,
has_lib_name: bool,
has_any_noalias: bool,
_: u15 = undefined,
_: u21 = undefined,
};
};
@ -4269,36 +4239,6 @@ fn findTrackableInner(
var extra_index: usize = extra.end;
extra_index += @intFromBool(extra.data.bits.has_lib_name);
if (extra.data.bits.has_align_body) {
const body_len = zir.extra[extra_index];
extra_index += 1;
const body = zir.bodySlice(extra_index, body_len);
try zir.findTrackableBody(gpa, contents, defers, body);
extra_index += body.len;
} else if (extra.data.bits.has_align_ref) {
extra_index += 1;
}
if (extra.data.bits.has_addrspace_body) {
const body_len = zir.extra[extra_index];
extra_index += 1;
const body = zir.bodySlice(extra_index, body_len);
try zir.findTrackableBody(gpa, contents, defers, body);
extra_index += body.len;
} else if (extra.data.bits.has_addrspace_ref) {
extra_index += 1;
}
if (extra.data.bits.has_section_body) {
const body_len = zir.extra[extra_index];
extra_index += 1;
const body = zir.bodySlice(extra_index, body_len);
try zir.findTrackableBody(gpa, contents, defers, body);
extra_index += body.len;
} else if (extra.data.bits.has_section_ref) {
extra_index += 1;
}
if (extra.data.bits.has_cc_body) {
const body_len = zir.extra[extra_index];
extra_index += 1;
@ -4587,21 +4527,6 @@ pub fn getFnInfo(zir: Zir, fn_inst: Inst.Index) FnInfo {
var ret_ty_body: []const Inst.Index = &.{};
extra_index += @intFromBool(extra.data.bits.has_lib_name);
if (extra.data.bits.has_align_body) {
extra_index += zir.extra[extra_index] + 1;
} else if (extra.data.bits.has_align_ref) {
extra_index += 1;
}
if (extra.data.bits.has_addrspace_body) {
extra_index += zir.extra[extra_index] + 1;
} else if (extra.data.bits.has_addrspace_ref) {
extra_index += 1;
}
if (extra.data.bits.has_section_body) {
extra_index += zir.extra[extra_index] + 1;
} else if (extra.data.bits.has_section_ref) {
extra_index += 1;
}
if (extra.data.bits.has_cc_body) {
extra_index += zir.extra[extra_index] + 1;
} else if (extra.data.bits.has_cc_ref) {
@ -4712,18 +4637,6 @@ pub fn getAssociatedSrcHash(zir: Zir, inst: Zir.Inst.Index) ?std.zig.SrcHash {
const bits = extra.data.bits;
var extra_index = extra.end;
extra_index += @intFromBool(bits.has_lib_name);
if (bits.has_align_body) {
const body_len = zir.extra[extra_index];
extra_index += 1 + body_len;
} else extra_index += @intFromBool(bits.has_align_ref);
if (bits.has_addrspace_body) {
const body_len = zir.extra[extra_index];
extra_index += 1 + body_len;
} else extra_index += @intFromBool(bits.has_addrspace_ref);
if (bits.has_section_body) {
const body_len = zir.extra[extra_index];
extra_index += 1 + body_len;
} else extra_index += @intFromBool(bits.has_section_ref);
if (bits.has_cc_body) {
const body_len = zir.extra[extra_index];
extra_index += 1 + body_len;

View file

@ -1961,9 +1961,6 @@ pub const Key = union(enum) {
is_var_args: bool,
is_generic: bool,
is_noinline: bool,
cc_is_generic: bool,
section_is_generic: bool,
addrspace_is_generic: bool,
pub fn paramIsComptime(self: @This(), i: u5) bool {
assert(i < self.param_types.len);
@ -5456,10 +5453,7 @@ pub const Tag = enum(u8) {
has_comptime_bits: bool,
has_noalias_bits: bool,
is_noinline: bool,
cc_is_generic: bool,
section_is_generic: bool,
addrspace_is_generic: bool,
_: u6 = 0,
_: u9 = 0,
};
};
@ -6885,9 +6879,6 @@ fn extraFuncType(tid: Zcu.PerThread.Id, extra: Local.Extra, extra_index: u32) Ke
.cc = type_function.data.flags.cc.unpack(),
.is_var_args = type_function.data.flags.is_var_args,
.is_noinline = type_function.data.flags.is_noinline,
.cc_is_generic = type_function.data.flags.cc_is_generic,
.section_is_generic = type_function.data.flags.section_is_generic,
.addrspace_is_generic = type_function.data.flags.addrspace_is_generic,
.is_generic = type_function.data.flags.is_generic,
};
}
@ -8529,9 +8520,6 @@ pub fn getFuncType(
.has_noalias_bits = key.noalias_bits != 0,
.is_generic = key.is_generic,
.is_noinline = key.is_noinline,
.cc_is_generic = key.cc == null,
.section_is_generic = key.section_is_generic,
.addrspace_is_generic = key.addrspace_is_generic,
},
});
@ -8703,10 +8691,6 @@ pub const GetFuncDeclIesKey = struct {
bare_return_type: Index,
/// null means generic.
cc: ?std.builtin.CallingConvention,
/// null means generic.
alignment: ?Alignment,
section_is_generic: bool,
addrspace_is_generic: bool,
is_var_args: bool,
is_generic: bool,
is_noinline: bool,
@ -8792,9 +8776,6 @@ pub fn getFuncDeclIes(
.has_noalias_bits = key.noalias_bits != 0,
.is_generic = key.is_generic,
.is_noinline = key.is_noinline,
.cc_is_generic = key.cc == null,
.section_is_generic = key.section_is_generic,
.addrspace_is_generic = key.addrspace_is_generic,
},
});
if (key.comptime_bits != 0) extra.appendAssumeCapacity(.{key.comptime_bits});
@ -8926,9 +8907,6 @@ pub const GetFuncInstanceKey = struct {
comptime_args: []const Index,
noalias_bits: u32,
bare_return_type: Index,
cc: std.builtin.CallingConvention,
alignment: Alignment,
section: OptionalNullTerminatedString,
is_noinline: bool,
generic_owner: Index,
inferred_error_set: bool,
@ -8943,11 +8921,14 @@ pub fn getFuncInstance(
if (arg.inferred_error_set)
return getFuncInstanceIes(ip, gpa, tid, arg);
const generic_owner = unwrapCoercedFunc(ip, arg.generic_owner);
const generic_owner_ty = ip.indexToKey(ip.funcDeclInfo(generic_owner).ty).func_type;
const func_ty = try ip.getFuncType(gpa, tid, .{
.param_types = arg.param_types,
.return_type = arg.bare_return_type,
.noalias_bits = arg.noalias_bits,
.cc = arg.cc,
.cc = generic_owner_ty.cc,
.is_noinline = arg.is_noinline,
});
@ -8957,8 +8938,6 @@ pub fn getFuncInstance(
try extra.ensureUnusedCapacity(@typeInfo(Tag.FuncInstance).@"struct".fields.len +
arg.comptime_args.len);
const generic_owner = unwrapCoercedFunc(ip, arg.generic_owner);
assert(arg.comptime_args.len == ip.funcTypeParamsLen(ip.typeOf(generic_owner)));
const prev_extra_len = extra.mutate.len;
@ -9005,8 +8984,6 @@ pub fn getFuncInstance(
generic_owner,
func_index,
func_extra_index,
arg.alignment,
arg.section,
);
return gop.put();
}
@ -9031,6 +9008,7 @@ pub fn getFuncInstanceIes(
try items.ensureUnusedCapacity(4);
const generic_owner = unwrapCoercedFunc(ip, arg.generic_owner);
const generic_owner_ty = ip.indexToKey(ip.funcDeclInfo(arg.generic_owner).ty).func_type;
// The strategy here is to add the function decl unconditionally, then to
// ask if it already exists, and if so, revert the lengths of the mutated
@ -9086,15 +9064,12 @@ pub fn getFuncInstanceIes(
.params_len = params_len,
.return_type = error_union_type,
.flags = .{
.cc = .pack(arg.cc),
.cc = .pack(generic_owner_ty.cc),
.is_var_args = false,
.has_comptime_bits = false,
.has_noalias_bits = arg.noalias_bits != 0,
.is_generic = false,
.is_noinline = arg.is_noinline,
.cc_is_generic = false,
.section_is_generic = false,
.addrspace_is_generic = false,
},
});
// no comptime_bits because has_comptime_bits is false
@ -9158,8 +9133,6 @@ pub fn getFuncInstanceIes(
generic_owner,
func_index,
func_extra_index,
arg.alignment,
arg.section,
);
func_gop.putFinal(func_index);
@ -9177,8 +9150,6 @@ fn finishFuncInstance(
generic_owner: Index,
func_index: Index,
func_extra_index: u32,
alignment: Alignment,
section: OptionalNullTerminatedString,
) Allocator.Error!void {
const fn_owner_nav = ip.getNav(ip.funcDeclInfo(generic_owner).owner_nav);
const fn_namespace = ip.getCau(fn_owner_nav.analysis_owner.unwrap().?).namespace;
@ -9191,8 +9162,8 @@ fn finishFuncInstance(
.name = nav_name,
.fqn = try ip.namespacePtr(fn_namespace).internFullyQualifiedName(ip, gpa, tid, nav_name),
.val = func_index,
.alignment = alignment,
.@"linksection" = section,
.alignment = fn_owner_nav.status.resolved.alignment,
.@"linksection" = fn_owner_nav.status.resolved.@"linksection",
.@"addrspace" = fn_owner_nav.status.resolved.@"addrspace",
});

View file

@ -7815,11 +7815,9 @@ fn analyzeCall(
.param_types = new_param_types,
.return_type = owner_info.return_type,
.noalias_bits = owner_info.noalias_bits,
.cc = if (owner_info.cc_is_generic) null else owner_info.cc,
.cc = owner_info.cc,
.is_var_args = owner_info.is_var_args,
.is_noinline = owner_info.is_noinline,
.section_is_generic = owner_info.section_is_generic,
.addrspace_is_generic = owner_info.addrspace_is_generic,
.is_generic = owner_info.is_generic,
};
@ -9555,9 +9553,6 @@ fn zirFunc(
block,
inst_data.src_node,
inst,
.none,
target_util.defaultAddressSpace(target, .function),
.default,
cc,
ret_ty,
false,
@ -9843,13 +9838,7 @@ fn funcCommon(
block: *Block,
src_node_offset: i32,
func_inst: Zir.Inst.Index,
/// null means generic poison
alignment: ?Alignment,
/// null means generic poison
address_space: ?std.builtin.AddressSpace,
section: Section,
/// null means generic poison
cc: ?std.builtin.CallingConvention,
cc: std.builtin.CallingConvention,
/// this might be Type.generic_poison
bare_return_type: Type,
var_args: bool,
@ -9870,26 +9859,17 @@ fn funcCommon(
const cc_src = block.src(.{ .node_offset_fn_type_cc = src_node_offset });
const func_src = block.nodeOffset(src_node_offset);
var is_generic = bare_return_type.isGenericPoison() or
alignment == null or
address_space == null or
section == .generic or
cc == null;
var is_generic = bare_return_type.isGenericPoison();
if (var_args) {
if (is_generic) {
return sema.fail(block, func_src, "generic function cannot be variadic", .{});
}
try sema.checkCallConvSupportsVarArgs(block, cc_src, cc.?);
try sema.checkCallConvSupportsVarArgs(block, cc_src, cc);
}
const is_source_decl = sema.generic_owner == .none;
// In the case of generic calling convention, or generic alignment, we use
// default values which are only meaningful for the generic function, *not*
// the instantiation, which can depend on comptime parameters.
// Related proposal: https://github.com/ziglang/zig/issues/11834
const cc_resolved = cc orelse .auto;
var comptime_bits: u32 = 0;
for (block.params.items(.ty), block.params.items(.is_comptime), 0..) |param_ty_ip, param_is_comptime, i| {
const param_ty = Type.fromInterned(param_ty_ip);
@ -9907,11 +9887,11 @@ fn funcCommon(
}
const this_generic = param_ty.isGenericPoison();
is_generic = is_generic or this_generic;
if (param_is_comptime and !target_util.fnCallConvAllowsZigTypes(cc_resolved)) {
return sema.fail(block, param_src, "comptime parameters not allowed in function with calling convention '{s}'", .{@tagName(cc_resolved)});
if (param_is_comptime and !target_util.fnCallConvAllowsZigTypes(cc)) {
return sema.fail(block, param_src, "comptime parameters not allowed in function with calling convention '{s}'", .{@tagName(cc)});
}
if (this_generic and !sema.no_partial_func_ty and !target_util.fnCallConvAllowsZigTypes(cc_resolved)) {
return sema.fail(block, param_src, "generic parameters not allowed in function with calling convention '{s}'", .{@tagName(cc_resolved)});
if (this_generic and !sema.no_partial_func_ty and !target_util.fnCallConvAllowsZigTypes(cc)) {
return sema.fail(block, param_src, "generic parameters not allowed in function with calling convention '{s}'", .{@tagName(cc)});
}
if (!param_ty.isValidParamType(zcu)) {
const opaque_str = if (param_ty.zigTypeTag(zcu) == .@"opaque") "opaque " else "";
@ -9919,10 +9899,10 @@ fn funcCommon(
opaque_str, param_ty.fmt(pt),
});
}
if (!this_generic and !target_util.fnCallConvAllowsZigTypes(cc_resolved) and !try sema.validateExternType(param_ty, .param_ty)) {
if (!this_generic and !target_util.fnCallConvAllowsZigTypes(cc) and !try sema.validateExternType(param_ty, .param_ty)) {
const msg = msg: {
const msg = try sema.errMsg(param_src, "parameter of type '{}' not allowed in function with calling convention '{s}'", .{
param_ty.fmt(pt), @tagName(cc_resolved),
param_ty.fmt(pt), @tagName(cc),
});
errdefer msg.destroy(sema.gpa);
@ -9952,13 +9932,13 @@ fn funcCommon(
{
return sema.fail(block, param_src, "non-pointer parameter declared noalias", .{});
}
switch (cc_resolved) {
switch (cc) {
.x86_64_interrupt, .x86_interrupt => {
const err_code_size = target.ptrBitWidth();
switch (i) {
0 => if (param_ty.zigTypeTag(zcu) != .pointer) return sema.fail(block, param_src, "first parameter of function with '{s}' calling convention must be a pointer type", .{@tagName(cc_resolved)}),
1 => if (param_ty.bitSize(zcu) != err_code_size) return sema.fail(block, param_src, "second parameter of function with '{s}' calling convention must be a {d}-bit integer", .{ @tagName(cc_resolved), err_code_size }),
else => return sema.fail(block, param_src, "'{s}' calling convention supports up to 2 parameters, found {d}", .{ @tagName(cc_resolved), i + 1 }),
0 => if (param_ty.zigTypeTag(zcu) != .pointer) return sema.fail(block, param_src, "first parameter of function with '{s}' calling convention must be a pointer type", .{@tagName(cc)}),
1 => if (param_ty.bitSize(zcu) != err_code_size) return sema.fail(block, param_src, "second parameter of function with '{s}' calling convention must be a {d}-bit integer", .{ @tagName(cc), err_code_size }),
else => return sema.fail(block, param_src, "'{s}' calling convention supports up to 2 parameters, found {d}", .{ @tagName(cc), i + 1 }),
}
},
.arm_interrupt,
@ -9970,7 +9950,7 @@ fn funcCommon(
.csky_interrupt,
.m68k_interrupt,
.avr_signal,
=> return sema.fail(block, param_src, "parameters are not allowed with '{s}' calling convention", .{@tagName(cc_resolved)}),
=> return sema.fail(block, param_src, "parameters are not allowed with '{s}' calling convention", .{@tagName(cc)}),
else => {},
}
}
@ -9985,9 +9965,6 @@ fn funcCommon(
assert(has_body);
assert(!is_generic);
assert(comptime_bits == 0);
assert(cc != null);
assert(section != .generic);
assert(address_space != null);
assert(!var_args);
if (inferred_error_set) {
try sema.validateErrorUnionPayloadType(block, bare_return_type, ret_ty_src);
@ -9996,13 +9973,6 @@ fn funcCommon(
.param_types = param_types,
.noalias_bits = noalias_bits,
.bare_return_type = bare_return_type.toIntern(),
.cc = cc_resolved,
.alignment = alignment.?,
.section = switch (section) {
.generic => unreachable,
.default => .none,
.explicit => |x| x.toOptional(),
},
.is_noinline = is_noinline,
.inferred_error_set = inferred_error_set,
.generic_owner = sema.generic_owner,
@ -10016,7 +9986,7 @@ fn funcCommon(
ret_poison,
bare_return_type,
ret_ty_src,
cc_resolved,
cc,
is_source_decl,
ret_ty_requires_comptime,
func_inst,
@ -10027,12 +9997,6 @@ fn funcCommon(
);
}
const section_name: InternPool.OptionalNullTerminatedString = switch (section) {
.generic => .none,
.default => .none,
.explicit => |name| name.toOptional(),
};
if (inferred_error_set) {
assert(!is_extern);
assert(has_body);
@ -10046,9 +10010,6 @@ fn funcCommon(
.comptime_bits = comptime_bits,
.bare_return_type = bare_return_type.toIntern(),
.cc = cc,
.alignment = alignment,
.section_is_generic = section == .generic,
.addrspace_is_generic = address_space == null,
.is_var_args = var_args,
.is_generic = final_is_generic,
.is_noinline = is_noinline,
@ -10059,13 +10020,6 @@ fn funcCommon(
.lbrace_column = @as(u16, @truncate(src_locs.columns)),
.rbrace_column = @as(u16, @truncate(src_locs.columns >> 16)),
});
// func_decl functions take ownership of the `Nav` of Sema'a owner `Cau`.
ip.resolveNavValue(sema.getOwnerCauNav(), .{
.val = func_index,
.alignment = alignment orelse .none,
.@"linksection" = section_name,
.@"addrspace" = address_space orelse .generic,
});
return finishFunc(
sema,
block,
@ -10074,7 +10028,7 @@ fn funcCommon(
ret_poison,
bare_return_type,
ret_ty_src,
cc_resolved,
cc,
is_source_decl,
ret_ty_requires_comptime,
func_inst,
@ -10091,8 +10045,6 @@ fn funcCommon(
.comptime_bits = comptime_bits,
.return_type = bare_return_type.toIntern(),
.cc = cc,
.section_is_generic = section == .generic,
.addrspace_is_generic = address_space == null,
.is_var_args = var_args,
.is_generic = final_is_generic,
.is_noinline = is_noinline,
@ -10100,38 +10052,20 @@ fn funcCommon(
if (is_extern) {
assert(comptime_bits == 0);
assert(cc != null);
assert(alignment != null);
assert(section != .generic);
assert(address_space != null);
assert(!is_generic);
if (opt_lib_name) |lib_name| try sema.handleExternLibName(block, block.src(.{
.node_offset_lib_name = src_node_offset,
}), lib_name);
const func_index = try pt.getExtern(.{
.name = sema.getOwnerCauNavName(),
.ty = func_ty,
.lib_name = try ip.getOrPutStringOpt(gpa, pt.tid, opt_lib_name, .no_embedded_nulls),
.is_const = true,
.is_threadlocal = false,
.is_weak_linkage = false,
.is_dll_import = false,
.alignment = alignment orelse .none,
.@"addrspace" = address_space orelse .generic,
.zir_index = sema.getOwnerCauDeclInst(), // `declaration` instruction
.owner_nav = undefined, // ignored by `getExtern`
});
// Note that unlike function declaration, extern functions don't touch the
// Sema's owner Cau's owner Nav. The alignment etc were passed above.
const extern_func_index = try sema.resolveExternDecl(block, .fromInterned(func_ty), opt_lib_name, true, false);
return finishFunc(
sema,
block,
func_index,
extern_func_index,
func_ty,
ret_poison,
bare_return_type,
ret_ty_src,
cc_resolved,
cc,
is_source_decl,
ret_ty_requires_comptime,
func_inst,
@ -10154,13 +10088,6 @@ fn funcCommon(
.lbrace_column = @as(u16, @truncate(src_locs.columns)),
.rbrace_column = @as(u16, @truncate(src_locs.columns >> 16)),
});
// func_decl functions take ownership of the `Nav` of Sema'a owner `Cau`.
ip.resolveNavValue(sema.getOwnerCauNav(), .{
.val = func_index,
.alignment = alignment orelse .none,
.@"linksection" = section_name,
.@"addrspace" = address_space orelse .generic,
});
return finishFunc(
sema,
block,
@ -10169,7 +10096,7 @@ fn funcCommon(
ret_poison,
bare_return_type,
ret_ty_src,
cc_resolved,
cc,
is_source_decl,
ret_ty_requires_comptime,
func_inst,
@ -10188,7 +10115,7 @@ fn funcCommon(
ret_poison,
bare_return_type,
ret_ty_src,
cc_resolved,
cc,
is_source_decl,
ret_ty_requires_comptime,
func_inst,
@ -26839,52 +26766,8 @@ fn zirVarExtended(
try sema.validateVarType(block, ty_src, var_ty, small.is_extern);
if (small.is_extern) {
// We need to resolve the alignment and addrspace early.
// Keep in sync with logic in `Zcu.PerThread.semaCau`.
const align_src = block.src(.{ .node_offset_var_decl_align = 0 });
const addrspace_src = block.src(.{ .node_offset_var_decl_addrspace = 0 });
const decl_inst, const decl_bodies = decl: {
const decl_inst = sema.getOwnerCauDeclInst().resolve(ip) orelse return error.AnalysisFail;
const zir_decl, const extra_end = sema.code.getDeclaration(decl_inst);
break :decl .{ decl_inst, zir_decl.getBodies(extra_end, sema.code) };
};
const alignment: InternPool.Alignment = a: {
const align_body = decl_bodies.align_body orelse break :a .none;
const align_ref = try sema.resolveInlineBody(block, align_body, decl_inst);
break :a try sema.analyzeAsAlign(block, align_src, align_ref);
};
const @"addrspace": std.builtin.AddressSpace = as: {
const addrspace_ctx: Sema.AddressSpaceContext = switch (ip.indexToKey(var_ty.toIntern())) {
.func_type => .function,
else => .variable,
};
const target = zcu.getTarget();
const addrspace_body = decl_bodies.addrspace_body orelse break :as switch (addrspace_ctx) {
.function => target_util.defaultAddressSpace(target, .function),
.variable => target_util.defaultAddressSpace(target, .global_mutable),
.constant => target_util.defaultAddressSpace(target, .global_constant),
else => unreachable,
};
const addrspace_ref = try sema.resolveInlineBody(block, addrspace_body, decl_inst);
break :as try sema.analyzeAsAddressSpace(block, addrspace_src, addrspace_ref, addrspace_ctx);
};
return Air.internedToRef(try pt.getExtern(.{
.name = sema.getOwnerCauNavName(),
.ty = var_ty.toIntern(),
.lib_name = try ip.getOrPutStringOpt(sema.gpa, pt.tid, lib_name, .no_embedded_nulls),
.is_const = small.is_const,
.is_threadlocal = small.is_threadlocal,
.is_weak_linkage = false,
.is_dll_import = false,
.alignment = alignment,
.@"addrspace" = @"addrspace",
.zir_index = sema.getOwnerCauDeclInst(), // `declaration` instruction
.owner_nav = undefined, // ignored by `getExtern`
}));
const extern_val = try sema.resolveExternDecl(block, var_ty, lib_name, small.is_const, small.is_threadlocal);
return Air.internedToRef(extern_val);
}
assert(!small.is_const); // non-const non-extern variable is not legal
return Air.internedToRef(try pt.intern(.{ .variable = .{
@ -26897,6 +26780,66 @@ fn zirVarExtended(
} }));
}
fn resolveExternDecl(
sema: *Sema,
block: *Block,
ty: Type,
opt_lib_name: ?[]const u8,
is_const: bool,
is_threadlocal: bool,
) CompileError!InternPool.Index {
const pt = sema.pt;
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
// We need to resolve the alignment and addrspace early.
// Keep in sync with logic in `Zcu.PerThread.semaCau`.
const align_src = block.src(.{ .node_offset_var_decl_align = 0 });
const addrspace_src = block.src(.{ .node_offset_var_decl_addrspace = 0 });
const decl_inst, const decl_bodies = decl: {
const decl_inst = sema.getOwnerCauDeclInst().resolve(ip) orelse return error.AnalysisFail;
const zir_decl, const extra_end = sema.code.getDeclaration(decl_inst);
break :decl .{ decl_inst, zir_decl.getBodies(extra_end, sema.code) };
};
const alignment: InternPool.Alignment = a: {
const align_body = decl_bodies.align_body orelse break :a .none;
const align_ref = try sema.resolveInlineBody(block, align_body, decl_inst);
break :a try sema.analyzeAsAlign(block, align_src, align_ref);
};
const @"addrspace": std.builtin.AddressSpace = as: {
const addrspace_ctx: Sema.AddressSpaceContext = switch (ip.indexToKey(ty.toIntern())) {
.func_type => .function,
else => .variable,
};
const target = zcu.getTarget();
const addrspace_body = decl_bodies.addrspace_body orelse break :as switch (addrspace_ctx) {
.function => target_util.defaultAddressSpace(target, .function),
.variable => target_util.defaultAddressSpace(target, .global_mutable),
.constant => target_util.defaultAddressSpace(target, .global_constant),
else => unreachable,
};
const addrspace_ref = try sema.resolveInlineBody(block, addrspace_body, decl_inst);
break :as try sema.analyzeAsAddressSpace(block, addrspace_src, addrspace_ref, addrspace_ctx);
};
return pt.getExtern(.{
.name = sema.getOwnerCauNavName(),
.ty = ty.toIntern(),
.lib_name = try ip.getOrPutStringOpt(sema.gpa, pt.tid, opt_lib_name, .no_embedded_nulls),
.is_const = is_const,
.is_threadlocal = is_threadlocal,
.is_weak_linkage = false,
.is_dll_import = false,
.alignment = alignment,
.@"addrspace" = @"addrspace",
.zir_index = sema.getOwnerCauDeclInst(), // `declaration` instruction
.owner_nav = undefined, // ignored by `getExtern`
});
}
fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
@ -26908,9 +26851,6 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
const extra = sema.code.extraData(Zir.Inst.FuncFancy, inst_data.payload_index);
const target = zcu.getTarget();
const align_src = block.src(.{ .node_offset_fn_type_align = inst_data.src_node });
const addrspace_src = block.src(.{ .node_offset_fn_type_addrspace = inst_data.src_node });
const section_src = block.src(.{ .node_offset_fn_type_section = inst_data.src_node });
const cc_src = block.src(.{ .node_offset_fn_type_cc = inst_data.src_node });
const ret_src = block.src(.{ .node_offset_fn_type_ret_ty = inst_data.src_node });
const has_body = extra.data.body_len != 0;
@ -26924,112 +26864,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
break :blk lib_name;
} else null;
if (has_body and
(extra.data.bits.has_align_body or extra.data.bits.has_align_ref) and
!target_util.supportsFunctionAlignment(target))
{
return sema.fail(block, align_src, "target does not support function alignment", .{});
}
const @"align": ?Alignment = if (extra.data.bits.has_align_body) blk: {
const body_len = sema.code.extra[extra_index];
extra_index += 1;
const body = sema.code.bodySlice(extra_index, body_len);
extra_index += body.len;
const val = try sema.resolveGenericBody(block, align_src, body, inst, Type.u29, .{
.needed_comptime_reason = "alignment must be comptime-known",
});
if (val.isGenericPoison()) {
break :blk null;
}
break :blk try sema.validateAlign(block, align_src, try val.toUnsignedIntSema(pt));
} else if (extra.data.bits.has_align_ref) blk: {
const align_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]);
extra_index += 1;
const uncoerced_align = sema.resolveInst(align_ref) catch |err| switch (err) {
error.GenericPoison => break :blk null,
else => |e| return e,
};
const coerced_align = sema.coerce(block, Type.u29, uncoerced_align, align_src) catch |err| switch (err) {
error.GenericPoison => break :blk null,
else => |e| return e,
};
const align_val = sema.resolveConstDefinedValue(block, align_src, coerced_align, .{
.needed_comptime_reason = "alignment must be comptime-known",
}) catch |err| switch (err) {
error.GenericPoison => break :blk null,
else => |e| return e,
};
break :blk try sema.validateAlign(block, align_src, try align_val.toUnsignedIntSema(pt));
} else .none;
const @"addrspace": ?std.builtin.AddressSpace = if (extra.data.bits.has_addrspace_body) blk: {
const body_len = sema.code.extra[extra_index];
extra_index += 1;
const body = sema.code.bodySlice(extra_index, body_len);
extra_index += body.len;
const addrspace_ty = try sema.getBuiltinType("AddressSpace");
const val = try sema.resolveGenericBody(block, addrspace_src, body, inst, addrspace_ty, .{
.needed_comptime_reason = "addrspace must be comptime-known",
});
if (val.isGenericPoison()) {
break :blk null;
}
break :blk zcu.toEnum(std.builtin.AddressSpace, val);
} else if (extra.data.bits.has_addrspace_ref) blk: {
const addrspace_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]);
extra_index += 1;
const addrspace_ty = try sema.getBuiltinType("AddressSpace");
const uncoerced_addrspace = sema.resolveInst(addrspace_ref) catch |err| switch (err) {
error.GenericPoison => break :blk null,
else => |e| return e,
};
const coerced_addrspace = sema.coerce(block, addrspace_ty, uncoerced_addrspace, addrspace_src) catch |err| switch (err) {
error.GenericPoison => break :blk null,
else => |e| return e,
};
const addrspace_val = sema.resolveConstDefinedValue(block, addrspace_src, coerced_addrspace, .{
.needed_comptime_reason = "addrspace must be comptime-known",
}) catch |err| switch (err) {
error.GenericPoison => break :blk null,
else => |e| return e,
};
break :blk zcu.toEnum(std.builtin.AddressSpace, addrspace_val);
} else target_util.defaultAddressSpace(target, .function);
const section: Section = if (extra.data.bits.has_section_body) blk: {
const body_len = sema.code.extra[extra_index];
extra_index += 1;
const body = sema.code.bodySlice(extra_index, body_len);
extra_index += body.len;
const ty = Type.slice_const_u8;
const val = try sema.resolveGenericBody(block, section_src, body, inst, ty, .{
.needed_comptime_reason = "linksection must be comptime-known",
});
if (val.isGenericPoison()) {
break :blk .generic;
}
break :blk .{ .explicit = try sema.sliceToIpString(block, section_src, val, .{
.needed_comptime_reason = "linksection must be comptime-known",
}) };
} else if (extra.data.bits.has_section_ref) blk: {
const section_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]);
extra_index += 1;
const section_name = sema.resolveConstStringIntern(block, section_src, section_ref, .{
.needed_comptime_reason = "linksection must be comptime-known",
}) catch |err| switch (err) {
error.GenericPoison => {
break :blk .generic;
},
else => |e| return e,
};
break :blk .{ .explicit = section_name };
} else .default;
const cc: ?std.builtin.CallingConvention = if (extra.data.bits.has_cc_body) blk: {
const cc: std.builtin.CallingConvention = if (extra.data.bits.has_cc_body) blk: {
const body_len = sema.code.extra[extra_index];
extra_index += 1;
const body = sema.code.bodySlice(extra_index, body_len);
@ -27039,28 +26874,16 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
const val = try sema.resolveGenericBody(block, cc_src, body, inst, cc_ty, .{
.needed_comptime_reason = "calling convention must be comptime-known",
});
if (val.isGenericPoison()) {
break :blk null;
}
break :blk try sema.analyzeValueAsCallconv(block, cc_src, val);
} else if (extra.data.bits.has_cc_ref) blk: {
const cc_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]);
extra_index += 1;
const cc_ty = try sema.getBuiltinType("CallingConvention");
const uncoerced_cc = sema.resolveInst(cc_ref) catch |err| switch (err) {
error.GenericPoison => break :blk null,
else => |e| return e,
};
const coerced_cc = sema.coerce(block, cc_ty, uncoerced_cc, cc_src) catch |err| switch (err) {
error.GenericPoison => break :blk null,
else => |e| return e,
};
const cc_val = sema.resolveConstDefinedValue(block, cc_src, coerced_cc, .{
const uncoerced_cc = try sema.resolveInst(cc_ref);
const coerced_cc = try sema.coerce(block, cc_ty, uncoerced_cc, cc_src);
const cc_val = try sema.resolveConstDefinedValue(block, cc_src, coerced_cc, .{
.needed_comptime_reason = "calling convention must be comptime-known",
}) catch |err| switch (err) {
error.GenericPoison => break :blk null,
else => |e| return e,
};
});
break :blk try sema.analyzeValueAsCallconv(block, cc_src, cc_val);
} else cc: {
if (has_body) {
@ -27142,9 +26965,6 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
block,
inst_data.src_node,
inst,
@"align",
@"addrspace",
section,
cc,
ret_ty,
is_var_args,

View file

@ -1314,67 +1314,61 @@ fn semaCau(pt: Zcu.PerThread, cau_index: InternPool.Cau.Index) !SemaCauResult {
};
}
const nav_already_populated, const queue_linker_work = switch (ip.indexToKey(decl_val.toIntern())) {
.func => |f| .{ f.owner_nav == nav_index, true },
.variable => |v| .{ false, v.owner_nav == nav_index },
.@"extern" => .{ false, false },
else => .{ false, true },
const queue_linker_work = switch (ip.indexToKey(decl_val.toIntern())) {
.func => true, // mote that this lets function aliases reach codegen
.variable => |v| v.owner_nav == nav_index,
.@"extern" => false,
else => true,
};
if (nav_already_populated) {
// This is a function declaration.
// Logic in `Sema.funcCommon` has already populated the `Nav` for us.
assert(ip.getNav(nav_index).status.resolved.val == decl_val.toIntern());
} else {
// Keep in sync with logic in `Sema.zirVarExtended`.
const alignment: InternPool.Alignment = a: {
const align_body = decl_bodies.align_body orelse break :a .none;
const align_ref = try sema.resolveInlineBody(&block, align_body, inst_info.inst);
break :a try sema.analyzeAsAlign(&block, align_src, align_ref);
};
// Keep in sync with logic in `Sema.zirVarExtended`.
const alignment: InternPool.Alignment = a: {
const align_body = decl_bodies.align_body orelse break :a .none;
const align_ref = try sema.resolveInlineBody(&block, align_body, inst_info.inst);
break :a try sema.analyzeAsAlign(&block, align_src, align_ref);
};
const @"linksection": InternPool.OptionalNullTerminatedString = ls: {
const linksection_body = decl_bodies.linksection_body orelse break :ls .none;
const linksection_ref = try sema.resolveInlineBody(&block, linksection_body, inst_info.inst);
const bytes = try sema.toConstString(&block, section_src, linksection_ref, .{
.needed_comptime_reason = "linksection must be comptime-known",
});
if (std.mem.indexOfScalar(u8, bytes, 0) != null) {
return sema.fail(&block, section_src, "linksection cannot contain null bytes", .{});
} else if (bytes.len == 0) {
return sema.fail(&block, section_src, "linksection cannot be empty", .{});
}
break :ls try ip.getOrPutStringOpt(gpa, pt.tid, bytes, .no_embedded_nulls);
};
const @"addrspace": std.builtin.AddressSpace = as: {
const addrspace_ctx: Sema.AddressSpaceContext = switch (ip.indexToKey(decl_val.toIntern())) {
.func => .function,
.variable => .variable,
.@"extern" => |e| if (ip.indexToKey(e.ty) == .func_type)
.function
else
.variable,
else => .constant,
};
const target = zcu.getTarget();
const addrspace_body = decl_bodies.addrspace_body orelse break :as switch (addrspace_ctx) {
.function => target_util.defaultAddressSpace(target, .function),
.variable => target_util.defaultAddressSpace(target, .global_mutable),
.constant => target_util.defaultAddressSpace(target, .global_constant),
else => unreachable,
};
const addrspace_ref = try sema.resolveInlineBody(&block, addrspace_body, inst_info.inst);
break :as try sema.analyzeAsAddressSpace(&block, addrspace_src, addrspace_ref, addrspace_ctx);
};
ip.resolveNavValue(nav_index, .{
.val = decl_val.toIntern(),
.alignment = alignment,
.@"linksection" = @"linksection",
.@"addrspace" = @"addrspace",
const @"linksection": InternPool.OptionalNullTerminatedString = ls: {
const linksection_body = decl_bodies.linksection_body orelse break :ls .none;
const linksection_ref = try sema.resolveInlineBody(&block, linksection_body, inst_info.inst);
const bytes = try sema.toConstString(&block, section_src, linksection_ref, .{
.needed_comptime_reason = "linksection must be comptime-known",
});
}
if (std.mem.indexOfScalar(u8, bytes, 0) != null) {
return sema.fail(&block, section_src, "linksection cannot contain null bytes", .{});
} else if (bytes.len == 0) {
return sema.fail(&block, section_src, "linksection cannot be empty", .{});
}
break :ls try ip.getOrPutStringOpt(gpa, pt.tid, bytes, .no_embedded_nulls);
};
const @"addrspace": std.builtin.AddressSpace = as: {
const addrspace_ctx: Sema.AddressSpaceContext = switch (ip.indexToKey(decl_val.toIntern())) {
.func => .function,
.variable => .variable,
.@"extern" => |e| if (ip.indexToKey(e.ty) == .func_type)
.function
else
.variable,
else => .constant,
};
const target = zcu.getTarget();
const addrspace_body = decl_bodies.addrspace_body orelse break :as switch (addrspace_ctx) {
.function => target_util.defaultAddressSpace(target, .function),
.variable => target_util.defaultAddressSpace(target, .global_mutable),
.constant => target_util.defaultAddressSpace(target, .global_constant),
else => unreachable,
};
const addrspace_ref = try sema.resolveInlineBody(&block, addrspace_body, inst_info.inst);
break :as try sema.analyzeAsAddressSpace(&block, addrspace_src, addrspace_ref, addrspace_ctx);
};
ip.resolveNavValue(nav_index, .{
.val = decl_val.toIntern(),
.alignment = alignment,
.@"linksection" = @"linksection",
.@"addrspace" = @"addrspace",
});
// Mark the `Cau` as completed before evaluating the export!
assert(zcu.analysis_in_progress.swapRemove(anal_unit));

View file

@ -2349,12 +2349,6 @@ const Writer = struct {
false,
false,
.none,
&.{},
.none,
&.{},
.none,
&.{},
.none,
&.{},
ret_ty_ref,
@ -2372,12 +2366,6 @@ const Writer = struct {
const extra = self.code.extraData(Zir.Inst.FuncFancy, inst_data.payload_index);
var extra_index: usize = extra.end;
var align_ref: Zir.Inst.Ref = .none;
var align_body: []const Zir.Inst.Index = &.{};
var addrspace_ref: Zir.Inst.Ref = .none;
var addrspace_body: []const Zir.Inst.Index = &.{};
var section_ref: Zir.Inst.Ref = .none;
var section_body: []const Zir.Inst.Index = &.{};
var cc_ref: Zir.Inst.Ref = .none;
var cc_body: []const Zir.Inst.Index = &.{};
var ret_ty_ref: Zir.Inst.Ref = .none;
@ -2390,33 +2378,6 @@ const Writer = struct {
}
try self.writeFlag(stream, "test, ", extra.data.bits.is_test);
if (extra.data.bits.has_align_body) {
const body_len = self.code.extra[extra_index];
extra_index += 1;
align_body = self.code.bodySlice(extra_index, body_len);
extra_index += align_body.len;
} else if (extra.data.bits.has_align_ref) {
align_ref = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index]));
extra_index += 1;
}
if (extra.data.bits.has_addrspace_body) {
const body_len = self.code.extra[extra_index];
extra_index += 1;
addrspace_body = self.code.bodySlice(extra_index, body_len);
extra_index += addrspace_body.len;
} else if (extra.data.bits.has_addrspace_ref) {
addrspace_ref = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index]));
extra_index += 1;
}
if (extra.data.bits.has_section_body) {
const body_len = self.code.extra[extra_index];
extra_index += 1;
section_body = self.code.bodySlice(extra_index, body_len);
extra_index += section_body.len;
} else if (extra.data.bits.has_section_ref) {
section_ref = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index]));
extra_index += 1;
}
if (extra.data.bits.has_cc_body) {
const body_len = self.code.extra[extra_index];
extra_index += 1;
@ -2455,12 +2416,6 @@ const Writer = struct {
extra.data.bits.is_var_args,
extra.data.bits.is_extern,
extra.data.bits.is_noinline,
align_ref,
align_body,
addrspace_ref,
addrspace_body,
section_ref,
section_body,
cc_ref,
cc_body,
ret_ty_ref,
@ -2651,12 +2606,6 @@ const Writer = struct {
var_args: bool,
is_extern: bool,
is_noinline: bool,
align_ref: Zir.Inst.Ref,
align_body: []const Zir.Inst.Index,
addrspace_ref: Zir.Inst.Ref,
addrspace_body: []const Zir.Inst.Index,
section_ref: Zir.Inst.Ref,
section_body: []const Zir.Inst.Index,
cc_ref: Zir.Inst.Ref,
cc_body: []const Zir.Inst.Index,
ret_ty_ref: Zir.Inst.Ref,
@ -2666,9 +2615,6 @@ const Writer = struct {
src_locs: Zir.Inst.Func.SrcLocs,
noalias_bits: u32,
) !void {
try self.writeOptionalInstRefOrBody(stream, "align=", align_ref, align_body);
try self.writeOptionalInstRefOrBody(stream, "addrspace=", addrspace_ref, addrspace_body);
try self.writeOptionalInstRefOrBody(stream, "section=", section_ref, section_body);
try self.writeOptionalInstRefOrBody(stream, "cc=", cc_ref, cc_body);
try self.writeOptionalInstRefOrBody(stream, "ret_ty=", ret_ty_ref, ret_ty_body);
try self.writeFlag(stream, "vargs, ", var_args);

View file

@ -361,47 +361,6 @@ fn simple4() align(4) i32 {
return 0x19;
}
test "function align expression depends on generic parameter" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
// function alignment is a compile error on wasm32/wasm64
if (native_arch == .wasm32 or native_arch == .wasm64) return error.SkipZigTest;
if (native_arch == .thumb) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
try expect(foobar(1) == 2);
try expect(foobar(4) == 5);
try expect(foobar(8) == 9);
}
fn foobar(comptime align_bytes: u8) align(align_bytes) u8 {
return align_bytes + 1;
}
};
try S.doTheTest();
try comptime S.doTheTest();
}
test "function callconv expression depends on generic parameter" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
fn doTheTest() !void {
try expect(foobar(.C, 1) == 2);
try expect(foobar(.Unspecified, 2) == 3);
}
fn foobar(comptime cc: std.builtin.CallingConvention, arg: u8) callconv(cc) u8 {
return arg + 1;
}
};
try S.doTheTest();
try comptime S.doTheTest();
}
test "runtime-known array index has best alignment possible" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO

View file

@ -637,24 +637,6 @@ test "address of function parameter is consistent in other parameter type" {
S.paramAddrMatch(1, 2);
}
test "address of function parameter is consistent in function align" {
switch (builtin.target.cpu.arch) {
.wasm32, .wasm64 => return, // function alignment not supported
else => {},
}
const S = struct {
fn paramAddrMatch(comptime x: u8) align(if (&x != &x) unreachable else 1) void {}
};
S.paramAddrMatch(1);
}
test "address of function parameter is consistent in function callconv" {
const S = struct {
fn paramAddrMatch(comptime x: u8) callconv(if (&x != &x) unreachable else .auto) void {}
};
S.paramAddrMatch(1);
}
test "address of function parameter is consistent in function return type" {
const S = struct {
fn paramAddrMatch(comptime x: u8) if (&x != &x) unreachable else void {}
@ -662,13 +644,6 @@ test "address of function parameter is consistent in function return type" {
S.paramAddrMatch(1);
}
test "address of function parameter is consistent in function addrspace" {
const S = struct {
fn paramAddrMatch(comptime x: u8) addrspace(if (&x != &x) unreachable else .generic) void {}
};
S.paramAddrMatch(1);
}
test "function parameter self equality" {
const S = struct {
fn equal(x: u32) bool {

View file

@ -1,13 +1,9 @@
fn foo(...) void {}
fn bar(a: anytype, ...) callconv(a) void {}
inline fn foo2(...) void {}
comptime {
_ = foo;
}
comptime {
_ = bar;
}
comptime {
_ = foo2;
}
@ -19,4 +15,3 @@ comptime {
// :1:1: note: supported calling conventions: 'x86_64_sysv', 'x86_64_win'
// :1:1: error: variadic function does not support 'inline' calling convention
// :1:1: note: supported calling conventions: 'x86_64_sysv', 'x86_64_win'
// :2:1: error: generic function cannot be variadic

View file

@ -937,9 +937,13 @@ fn testLinksection(b: *Build, opts: Options) *Step {
const obj = addObject(b, opts, .{ .name = "main", .zig_source_bytes =
\\export var test_global: u32 linksection("__DATA,__TestGlobal") = undefined;
\\export fn testFn() linksection("__TEXT,__TestFn") callconv(.C) void {
\\ testGenericFn("A");
\\ TestGenericFn("A").f();
\\}
\\fn TestGenericFn(comptime suffix: []const u8) type {
\\ return struct {
\\ fn f() linksection("__TEXT,__TestGenFn" ++ suffix) void {}
\\ };
\\}
\\fn testGenericFn(comptime suffix: []const u8) linksection("__TEXT,__TestGenFn" ++ suffix) void {}
});
const check = obj.checkObject();
@ -950,7 +954,7 @@ fn testLinksection(b: *Build, opts: Options) *Step {
if (opts.optimize == .Debug) {
check.checkInSymtab();
check.checkContains("(__TEXT,__TestGenFnA) _main.testGenericFn__anon_");
check.checkContains("(__TEXT,__TestGenFnA) _main.TestGenericFn(");
}
test_step.dependOn(&check.step);