mirror of
https://codeberg.org/ziglang/zig.git
synced 2025-12-06 13:54:21 +00:00
regalloc: ensure we only freeze/unfreeze at the outermost scope
This prevents a nasty type of bugs where we accidentally unfreeze a register that was frozen purposely in the outer scope, risking accidental realloc of a taken register. Fix CF flags spilling on aarch64 backend.
This commit is contained in:
parent
ac1aaec9c3
commit
ac954eb539
6 changed files with 735 additions and 413 deletions
|
|
@ -23,6 +23,7 @@ const log = std.log.scoped(.codegen);
|
|||
const build_options = @import("build_options");
|
||||
const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager;
|
||||
const RegisterManager = RegisterManagerFn(Self, Register, &callee_preserved_regs);
|
||||
const RegisterLock = RegisterManager.RegisterLock;
|
||||
|
||||
const GenerateSymbolError = @import("../../codegen.zig").GenerateSymbolError;
|
||||
const FnResult = @import("../../codegen.zig").FnResult;
|
||||
|
|
@ -910,16 +911,16 @@ pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void
|
|||
fn spillCompareFlagsIfOccupied(self: *Self) !void {
|
||||
if (self.compare_flags_inst) |inst_to_save| {
|
||||
const mcv = self.getResolvedInstValue(inst_to_save);
|
||||
switch (mcv) {
|
||||
const new_mcv = switch (mcv) {
|
||||
.compare_flags_signed,
|
||||
.compare_flags_unsigned,
|
||||
=> try self.allocRegOrMem(inst_to_save, true),
|
||||
.register_c_flag,
|
||||
.register_v_flag,
|
||||
=> {},
|
||||
=> try self.allocRegOrMem(inst_to_save, false),
|
||||
else => unreachable, // mcv doesn't occupy the compare flags
|
||||
}
|
||||
};
|
||||
|
||||
const new_mcv = try self.allocRegOrMem(inst_to_save, true);
|
||||
try self.setRegOrMem(self.air.typeOfIndex(inst_to_save), new_mcv, mcv);
|
||||
log.debug("spilling {d} to mcv {any}", .{ inst_to_save, new_mcv });
|
||||
|
||||
|
|
@ -927,6 +928,15 @@ fn spillCompareFlagsIfOccupied(self: *Self) !void {
|
|||
try branch.inst_table.put(self.gpa, inst_to_save, new_mcv);
|
||||
|
||||
self.compare_flags_inst = null;
|
||||
|
||||
// TODO consolidate with register manager and spillInstruction
|
||||
// this call should really belong in the register manager!
|
||||
switch (mcv) {
|
||||
.register_c_flag,
|
||||
.register_v_flag,
|
||||
=> |reg| self.register_manager.freeReg(reg),
|
||||
else => {},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1048,8 +1058,8 @@ fn trunc(
|
|||
}
|
||||
},
|
||||
};
|
||||
self.register_manager.freezeRegs(&.{operand_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{operand_reg});
|
||||
const lock = self.register_manager.freezeReg(operand_reg);
|
||||
defer if (lock) |reg| self.register_manager.unfreezeReg(reg);
|
||||
|
||||
const dest_reg = if (maybe_inst) |inst| blk: {
|
||||
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
|
||||
|
|
@ -1135,8 +1145,8 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void {
|
|||
.register => |r| r,
|
||||
else => try self.copyToTmpRegister(operand_ty, operand),
|
||||
};
|
||||
self.register_manager.freezeRegs(&.{op_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{op_reg});
|
||||
const reg_lock = self.register_manager.freezeRegAssumeUnused(op_reg);
|
||||
defer self.register_manager.unfreezeReg(reg_lock);
|
||||
|
||||
const dest_reg = blk: {
|
||||
if (operand == .register and self.reuseOperand(inst, ty_op.operand, 0, operand)) {
|
||||
|
|
@ -1168,8 +1178,8 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void {
|
|||
.register => |r| r,
|
||||
else => try self.copyToTmpRegister(operand_ty, operand),
|
||||
};
|
||||
self.register_manager.freezeRegs(&.{op_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{op_reg});
|
||||
const reg_lock = self.register_manager.freezeRegAssumeUnused(op_reg);
|
||||
defer self.register_manager.unfreezeReg(reg_lock);
|
||||
|
||||
const dest_reg = blk: {
|
||||
if (operand == .register and self.reuseOperand(inst, ty_op.operand, 0, operand)) {
|
||||
|
|
@ -1257,8 +1267,17 @@ fn binOpRegister(
|
|||
const lhs_is_register = lhs == .register;
|
||||
const rhs_is_register = rhs == .register;
|
||||
|
||||
if (lhs_is_register) self.register_manager.freezeRegs(&.{lhs.register});
|
||||
if (rhs_is_register) self.register_manager.freezeRegs(&.{rhs.register});
|
||||
const lhs_lock: ?RegisterLock = if (lhs_is_register)
|
||||
self.register_manager.freezeReg(lhs.register)
|
||||
else
|
||||
null;
|
||||
defer if (lhs_lock) |reg| self.register_manager.unfreezeReg(reg);
|
||||
|
||||
const rhs_lock: ?RegisterLock = if (rhs_is_register)
|
||||
self.register_manager.freezeReg(rhs.register)
|
||||
else
|
||||
null;
|
||||
defer if (rhs_lock) |reg| self.register_manager.unfreezeReg(reg);
|
||||
|
||||
const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
|
||||
|
||||
|
|
@ -1270,13 +1289,13 @@ fn binOpRegister(
|
|||
|
||||
const raw_reg = try self.register_manager.allocReg(track_inst);
|
||||
const reg = registerAlias(raw_reg, lhs_ty.abiSize(self.target.*));
|
||||
self.register_manager.freezeRegs(&.{reg});
|
||||
|
||||
if (track_inst) |inst| branch.inst_table.putAssumeCapacity(inst, .{ .register = reg });
|
||||
|
||||
break :blk reg;
|
||||
};
|
||||
defer self.register_manager.unfreezeRegs(&.{lhs_reg});
|
||||
const new_lhs_lock = self.register_manager.freezeReg(lhs_reg);
|
||||
defer if (new_lhs_lock) |reg| self.register_manager.unfreezeReg(reg);
|
||||
|
||||
const rhs_reg = if (rhs_is_register) rhs.register else blk: {
|
||||
const track_inst: ?Air.Inst.Index = if (maybe_inst) |inst| inst: {
|
||||
|
|
@ -1286,13 +1305,13 @@ fn binOpRegister(
|
|||
|
||||
const raw_reg = try self.register_manager.allocReg(track_inst);
|
||||
const reg = registerAlias(raw_reg, rhs_ty.abiAlignment(self.target.*));
|
||||
self.register_manager.freezeRegs(&.{reg});
|
||||
|
||||
if (track_inst) |inst| branch.inst_table.putAssumeCapacity(inst, .{ .register = reg });
|
||||
|
||||
break :blk reg;
|
||||
};
|
||||
defer self.register_manager.unfreezeRegs(&.{rhs_reg});
|
||||
const new_rhs_lock = self.register_manager.freezeReg(rhs_reg);
|
||||
defer if (new_rhs_lock) |reg| self.register_manager.unfreezeReg(reg);
|
||||
|
||||
const dest_reg = switch (mir_tag) {
|
||||
.cmp_shifted_register => undefined, // cmp has no destination register
|
||||
|
|
@ -1394,7 +1413,11 @@ fn binOpImmediate(
|
|||
) !MCValue {
|
||||
const lhs_is_register = lhs == .register;
|
||||
|
||||
if (lhs_is_register) self.register_manager.freezeRegs(&.{lhs.register});
|
||||
const lhs_lock: ?RegisterLock = if (lhs_is_register)
|
||||
self.register_manager.freezeReg(lhs.register)
|
||||
else
|
||||
null;
|
||||
defer if (lhs_lock) |reg| self.register_manager.unfreezeReg(reg);
|
||||
|
||||
const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
|
||||
|
||||
|
|
@ -1408,13 +1431,13 @@ fn binOpImmediate(
|
|||
|
||||
const raw_reg = try self.register_manager.allocReg(track_inst);
|
||||
const reg = registerAlias(raw_reg, lhs_ty.abiSize(self.target.*));
|
||||
self.register_manager.freezeRegs(&.{reg});
|
||||
|
||||
if (track_inst) |inst| branch.inst_table.putAssumeCapacity(inst, .{ .register = reg });
|
||||
|
||||
break :blk reg;
|
||||
};
|
||||
defer self.register_manager.unfreezeRegs(&.{lhs_reg});
|
||||
const new_lhs_lock = self.register_manager.freezeReg(lhs_reg);
|
||||
defer if (new_lhs_lock) |reg| self.register_manager.unfreezeReg(reg);
|
||||
|
||||
const dest_reg = switch (mir_tag) {
|
||||
.cmp_immediate => undefined, // cmp has no destination register
|
||||
|
|
@ -1758,7 +1781,10 @@ fn airBinOp(self: *Self, inst: Air.Inst.Index) !void {
|
|||
const lhs_ty = self.air.typeOf(bin_op.lhs);
|
||||
const rhs_ty = self.air.typeOf(bin_op.rhs);
|
||||
|
||||
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else try self.binOp(tag, inst, lhs, rhs, lhs_ty, rhs_ty);
|
||||
const result: MCValue = if (self.liveness.isUnused(inst))
|
||||
.dead
|
||||
else
|
||||
try self.binOp(tag, inst, lhs, rhs, lhs_ty, rhs_ty);
|
||||
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
|
||||
}
|
||||
|
||||
|
|
@ -1815,13 +1841,13 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
|||
};
|
||||
const dest = try self.binOp(base_tag, null, lhs, rhs, lhs_ty, rhs_ty);
|
||||
const dest_reg = dest.register;
|
||||
self.register_manager.freezeRegs(&.{dest_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{dest_reg});
|
||||
const dest_reg_lock = self.register_manager.freezeRegAssumeUnused(dest_reg);
|
||||
defer self.register_manager.unfreezeReg(dest_reg_lock);
|
||||
|
||||
const raw_truncated_reg = try self.register_manager.allocReg(null);
|
||||
const truncated_reg = registerAlias(raw_truncated_reg, lhs_ty.abiSize(self.target.*));
|
||||
self.register_manager.freezeRegs(&.{truncated_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{truncated_reg});
|
||||
const truncated_reg_lock = self.register_manager.freezeRegAssumeUnused(truncated_reg);
|
||||
defer self.register_manager.unfreezeReg(truncated_reg_lock);
|
||||
|
||||
// sbfx/ubfx truncated, dest, #0, #bits
|
||||
try self.truncRegister(dest_reg, truncated_reg, int_info.signedness, int_info.bits);
|
||||
|
|
@ -1922,12 +1948,12 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
|||
|
||||
const dest = try self.binOpRegister(base_tag, null, lhs, rhs, lhs_ty, rhs_ty);
|
||||
const dest_reg = dest.register;
|
||||
self.register_manager.freezeRegs(&.{dest_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{dest_reg});
|
||||
const dest_reg_lock = self.register_manager.freezeRegAssumeUnused(dest_reg);
|
||||
defer self.register_manager.unfreezeReg(dest_reg_lock);
|
||||
|
||||
const truncated_reg = try self.register_manager.allocReg(null);
|
||||
self.register_manager.freezeRegs(&.{truncated_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{truncated_reg});
|
||||
const truncated_reg_lock = self.register_manager.freezeRegAssumeUnused(truncated_reg);
|
||||
defer self.register_manager.unfreezeReg(truncated_reg_lock);
|
||||
|
||||
try self.truncRegister(
|
||||
dest_reg.to32(),
|
||||
|
|
@ -1977,36 +2003,44 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
|||
const lhs_is_register = lhs == .register;
|
||||
const rhs_is_register = rhs == .register;
|
||||
|
||||
if (lhs_is_register) self.register_manager.freezeRegs(&.{lhs.register});
|
||||
if (rhs_is_register) self.register_manager.freezeRegs(&.{rhs.register});
|
||||
const lhs_lock: ?RegisterLock = if (lhs_is_register)
|
||||
self.register_manager.freezeRegAssumeUnused(lhs.register)
|
||||
else
|
||||
null;
|
||||
defer if (lhs_lock) |reg| self.register_manager.unfreezeReg(reg);
|
||||
|
||||
const rhs_lock: ?RegisterLock = if (rhs_is_register)
|
||||
self.register_manager.freezeRegAssumeUnused(rhs.register)
|
||||
else
|
||||
null;
|
||||
defer if (rhs_lock) |reg| self.register_manager.unfreezeReg(reg);
|
||||
|
||||
const lhs_reg = if (lhs_is_register) lhs.register else blk: {
|
||||
const raw_reg = try self.register_manager.allocReg(null);
|
||||
const reg = registerAlias(raw_reg, lhs_ty.abiSize(self.target.*));
|
||||
self.register_manager.freezeRegs(&.{reg});
|
||||
break :blk reg;
|
||||
};
|
||||
defer self.register_manager.unfreezeRegs(&.{lhs_reg});
|
||||
const new_lhs_lock = self.register_manager.freezeReg(lhs_reg);
|
||||
defer if (new_lhs_lock) |reg| self.register_manager.unfreezeReg(reg);
|
||||
|
||||
const rhs_reg = if (rhs_is_register) rhs.register else blk: {
|
||||
const raw_reg = try self.register_manager.allocReg(null);
|
||||
const reg = registerAlias(raw_reg, rhs_ty.abiAlignment(self.target.*));
|
||||
self.register_manager.freezeRegs(&.{reg});
|
||||
break :blk reg;
|
||||
};
|
||||
defer self.register_manager.unfreezeRegs(&.{rhs_reg});
|
||||
const new_rhs_lock = self.register_manager.freezeReg(rhs_reg);
|
||||
defer if (new_rhs_lock) |reg| self.register_manager.unfreezeReg(reg);
|
||||
|
||||
if (!lhs_is_register) try self.genSetReg(lhs_ty, lhs_reg, lhs);
|
||||
if (!rhs_is_register) try self.genSetReg(rhs_ty, rhs_reg, rhs);
|
||||
|
||||
// TODO reuse operands
|
||||
const dest_reg = blk: {
|
||||
const raw_reg = try self.register_manager.allocReg(null);
|
||||
const reg = registerAlias(raw_reg, lhs_ty.abiSize(self.target.*));
|
||||
self.register_manager.freezeRegs(&.{reg});
|
||||
break :blk reg;
|
||||
};
|
||||
defer self.register_manager.unfreezeRegs(&.{dest_reg});
|
||||
const dest_reg_lock = self.register_manager.freezeRegAssumeUnused(dest_reg);
|
||||
defer self.register_manager.unfreezeReg(dest_reg_lock);
|
||||
|
||||
switch (int_info.signedness) {
|
||||
.signed => {
|
||||
|
|
@ -2021,8 +2055,8 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
|||
});
|
||||
|
||||
const dest_high_reg = try self.register_manager.allocReg(null);
|
||||
self.register_manager.freezeRegs(&.{dest_high_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{dest_high_reg});
|
||||
const dest_high_reg_lock = self.register_manager.freezeRegAssumeUnused(dest_high_reg);
|
||||
defer self.register_manager.unfreezeReg(dest_high_reg_lock);
|
||||
|
||||
// smulh dest_high, lhs, rhs
|
||||
_ = try self.addInst(.{
|
||||
|
|
@ -2071,8 +2105,8 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
|||
},
|
||||
.unsigned => {
|
||||
const dest_high_reg = try self.register_manager.allocReg(null);
|
||||
self.register_manager.freezeRegs(&.{dest_high_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{dest_high_reg});
|
||||
const dest_high_reg_lock = self.register_manager.freezeRegAssumeUnused(dest_high_reg);
|
||||
defer self.register_manager.unfreezeReg(dest_high_reg_lock);
|
||||
|
||||
// umulh dest_high, lhs, rhs
|
||||
_ = try self.addInst(.{
|
||||
|
|
@ -2127,8 +2161,8 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
|||
}
|
||||
|
||||
const truncated_reg = try self.register_manager.allocReg(null);
|
||||
self.register_manager.freezeRegs(&.{truncated_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{truncated_reg});
|
||||
const truncated_reg_lock = self.register_manager.freezeRegAssumeUnused(truncated_reg);
|
||||
defer self.register_manager.unfreezeReg(truncated_reg_lock);
|
||||
|
||||
try self.truncRegister(dest_reg, truncated_reg, int_info.signedness, int_info.bits);
|
||||
|
||||
|
|
@ -2168,14 +2202,20 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
|||
if (int_info.bits <= 64) {
|
||||
const stack_offset = try self.allocMem(inst, tuple_size, tuple_align);
|
||||
|
||||
if (lhs == .register) self.register_manager.freezeRegs(&.{lhs.register});
|
||||
defer if (lhs == .register) self.register_manager.unfreezeRegs(&.{lhs.register});
|
||||
const lhs_lock: ?RegisterLock = if (lhs == .register)
|
||||
self.register_manager.freezeRegAssumeUnused(lhs.register)
|
||||
else
|
||||
null;
|
||||
defer if (lhs_lock) |reg| self.register_manager.unfreezeReg(reg);
|
||||
|
||||
try self.spillCompareFlagsIfOccupied();
|
||||
self.compare_flags_inst = null;
|
||||
|
||||
// lsl dest, lhs, rhs
|
||||
const dest = try self.binOp(.shl, null, lhs, rhs, lhs_ty, rhs_ty);
|
||||
const dest_reg = dest.register;
|
||||
const dest_reg_lock = self.register_manager.freezeRegAssumeUnused(dest_reg);
|
||||
defer self.register_manager.unfreezeReg(dest_reg_lock);
|
||||
|
||||
// asr/lsr reconstructed, dest, rhs
|
||||
const reconstructed = try self.binOp(.shr, null, dest, rhs, lhs_ty, rhs_ty);
|
||||
|
|
@ -2184,7 +2224,9 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
|||
_ = try self.binOp(.cmp_eq, null, lhs, reconstructed, lhs_ty, lhs_ty);
|
||||
|
||||
try self.genSetStack(lhs_ty, stack_offset, dest);
|
||||
try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .compare_flags_unsigned = .neq });
|
||||
try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{
|
||||
.compare_flags_unsigned = .neq,
|
||||
});
|
||||
|
||||
break :result MCValue{ .stack_offset = stack_offset };
|
||||
} else {
|
||||
|
|
@ -2411,14 +2453,18 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
|
|||
var buf: Type.SlicePtrFieldTypeBuffer = undefined;
|
||||
const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf);
|
||||
|
||||
if (index_is_register) self.register_manager.freezeRegs(&.{index_mcv.register});
|
||||
defer if (index_is_register) self.register_manager.unfreezeRegs(&.{index_mcv.register});
|
||||
const index_lock: ?RegisterLock = if (index_is_register)
|
||||
self.register_manager.freezeRegAssumeUnused(index_mcv.register)
|
||||
else
|
||||
null;
|
||||
defer if (index_lock) |reg| self.register_manager.unfreezeReg(reg);
|
||||
|
||||
const base_mcv: MCValue = switch (slice_mcv) {
|
||||
.stack_offset => |off| .{ .register = try self.copyToTmpRegister(slice_ptr_field_type, .{ .stack_offset = off }) },
|
||||
else => return self.fail("TODO slice_elem_val when slice is {}", .{slice_mcv}),
|
||||
};
|
||||
self.register_manager.freezeRegs(&.{base_mcv.register});
|
||||
const base_lock = self.register_manager.freezeRegAssumeUnused(base_mcv.register);
|
||||
defer self.register_manager.unfreezeReg(base_lock);
|
||||
|
||||
switch (elem_size) {
|
||||
else => {
|
||||
|
|
@ -2559,8 +2605,8 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
|
|||
.immediate => |imm| try self.setRegOrMem(elem_ty, dst_mcv, .{ .memory = imm }),
|
||||
.ptr_stack_offset => |off| try self.setRegOrMem(elem_ty, dst_mcv, .{ .stack_offset = off }),
|
||||
.register => |addr_reg| {
|
||||
self.register_manager.freezeRegs(&.{addr_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{addr_reg});
|
||||
const addr_reg_lock = self.register_manager.freezeReg(addr_reg);
|
||||
defer if (addr_reg_lock) |reg| self.register_manager.unfreezeReg(reg);
|
||||
|
||||
switch (dst_mcv) {
|
||||
.dead => unreachable,
|
||||
|
|
@ -2573,16 +2619,19 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
|
|||
if (elem_size <= 8) {
|
||||
const raw_tmp_reg = try self.register_manager.allocReg(null);
|
||||
const tmp_reg = registerAlias(raw_tmp_reg, elem_size);
|
||||
self.register_manager.freezeRegs(&.{tmp_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{tmp_reg});
|
||||
const tmp_reg_lock = self.register_manager.freezeRegAssumeUnused(tmp_reg);
|
||||
defer self.register_manager.unfreezeReg(tmp_reg_lock);
|
||||
|
||||
try self.load(.{ .register = tmp_reg }, ptr, ptr_ty);
|
||||
try self.genSetStack(elem_ty, off, MCValue{ .register = tmp_reg });
|
||||
} else {
|
||||
// TODO optimize the register allocation
|
||||
const regs = try self.register_manager.allocRegs(4, .{ null, null, null, null });
|
||||
self.register_manager.freezeRegs(®s);
|
||||
defer self.register_manager.unfreezeRegs(®s);
|
||||
var regs_locks: [4]RegisterLock = undefined;
|
||||
self.register_manager.freezeRegsAssumeUnused(4, regs, ®s_locks);
|
||||
defer for (regs_locks) |reg| {
|
||||
self.register_manager.unfreezeReg(reg);
|
||||
};
|
||||
|
||||
const src_reg = addr_reg;
|
||||
const dst_reg = regs[0];
|
||||
|
|
@ -2784,8 +2833,8 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
|
|||
try self.genSetStack(value_ty, off, value);
|
||||
},
|
||||
.register => |addr_reg| {
|
||||
self.register_manager.freezeRegs(&.{addr_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{addr_reg});
|
||||
const addr_reg_lock = self.register_manager.freezeReg(addr_reg);
|
||||
defer if (addr_reg_lock) |reg| self.register_manager.unfreezeReg(reg);
|
||||
|
||||
switch (value) {
|
||||
.register => |value_reg| {
|
||||
|
|
@ -2795,8 +2844,8 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
|
|||
if (abi_size <= 8) {
|
||||
const raw_tmp_reg = try self.register_manager.allocReg(null);
|
||||
const tmp_reg = registerAlias(raw_tmp_reg, abi_size);
|
||||
self.register_manager.freezeRegs(&.{tmp_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{tmp_reg});
|
||||
const tmp_reg_lock = self.register_manager.freezeRegAssumeUnused(tmp_reg);
|
||||
defer self.register_manager.unfreezeReg(tmp_reg_lock);
|
||||
|
||||
try self.genSetReg(value_ty, tmp_reg, value);
|
||||
try self.store(ptr, .{ .register = tmp_reg }, ptr_ty, value_ty);
|
||||
|
|
@ -2856,12 +2905,12 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde
|
|||
const offset_reg = try self.copyToTmpRegister(ptr_ty, .{
|
||||
.immediate = struct_field_offset,
|
||||
});
|
||||
self.register_manager.freezeRegs(&.{offset_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{offset_reg});
|
||||
const offset_reg_lock = self.register_manager.freezeRegAssumeUnused(offset_reg);
|
||||
defer self.register_manager.unfreezeReg(offset_reg_lock);
|
||||
|
||||
const addr_reg = try self.copyToTmpRegister(ptr_ty, mcv);
|
||||
self.register_manager.freezeRegs(&.{addr_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{addr_reg});
|
||||
const addr_reg_lock = self.register_manager.freezeRegAssumeUnused(addr_reg);
|
||||
defer self.register_manager.unfreezeReg(addr_reg_lock);
|
||||
|
||||
const dest = try self.binOp(
|
||||
.add,
|
||||
|
|
@ -3369,6 +3418,9 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
|
|||
const parent_compare_flags_inst = self.compare_flags_inst;
|
||||
|
||||
try self.branch_stack.append(.{});
|
||||
errdefer {
|
||||
_ = self.branch_stack.pop();
|
||||
}
|
||||
|
||||
try self.ensureProcessDeathCapacity(liveness_condbr.then_deaths.len);
|
||||
for (liveness_condbr.then_deaths) |operand| {
|
||||
|
|
@ -3955,8 +4007,38 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
|
|||
},
|
||||
.register_c_flag,
|
||||
.register_v_flag,
|
||||
=> {
|
||||
return self.fail("TODO implement genSetStack {}", .{mcv});
|
||||
=> |reg| {
|
||||
const reg_lock = self.register_manager.freezeReg(reg);
|
||||
defer if (reg_lock) |locked_reg| self.register_manager.unfreezeReg(locked_reg);
|
||||
|
||||
const wrapped_ty = ty.structFieldType(0);
|
||||
try self.genSetStack(wrapped_ty, stack_offset, .{ .register = reg });
|
||||
|
||||
const overflow_bit_ty = ty.structFieldType(1);
|
||||
const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, self.target.*));
|
||||
const raw_cond_reg = try self.register_manager.allocReg(null);
|
||||
const cond_reg = registerAlias(
|
||||
raw_cond_reg,
|
||||
@intCast(u32, overflow_bit_ty.abiSize(self.target.*)),
|
||||
);
|
||||
|
||||
// C flag: cset reg, cs
|
||||
// V flag: cset reg, vs
|
||||
_ = try self.addInst(.{
|
||||
.tag = .cset,
|
||||
.data = .{ .r_cond = .{
|
||||
.rd = cond_reg,
|
||||
.cond = switch (mcv) {
|
||||
.register_c_flag => .cs,
|
||||
.register_v_flag => .vs,
|
||||
else => unreachable,
|
||||
},
|
||||
} },
|
||||
});
|
||||
|
||||
try self.genSetStack(overflow_bit_ty, stack_offset - overflow_bit_offset, .{
|
||||
.register = cond_reg,
|
||||
});
|
||||
},
|
||||
.got_load,
|
||||
.direct_load,
|
||||
|
|
@ -3983,8 +4065,11 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
|
|||
|
||||
// TODO call extern memcpy
|
||||
const regs = try self.register_manager.allocRegs(5, .{ null, null, null, null, null });
|
||||
self.register_manager.freezeRegs(®s);
|
||||
defer self.register_manager.unfreezeRegs(®s);
|
||||
var regs_locks: [5]RegisterLock = undefined;
|
||||
self.register_manager.freezeRegsAssumeUnused(5, regs, ®s_locks);
|
||||
defer for (regs_locks) |reg| {
|
||||
self.register_manager.unfreezeReg(reg);
|
||||
};
|
||||
|
||||
const src_reg = regs[0];
|
||||
const dst_reg = regs[1];
|
||||
|
|
|
|||
|
|
@ -23,6 +23,7 @@ const log = std.log.scoped(.codegen);
|
|||
const build_options = @import("build_options");
|
||||
const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager;
|
||||
const RegisterManager = RegisterManagerFn(Self, Register, &allocatable_registers);
|
||||
const RegisterLock = RegisterManager.RegisterLock;
|
||||
|
||||
const FnResult = @import("../../codegen.zig").FnResult;
|
||||
const GenerateSymbolError = @import("../../codegen.zig").GenerateSymbolError;
|
||||
|
|
@ -1038,8 +1039,8 @@ fn trunc(
|
|||
}
|
||||
},
|
||||
};
|
||||
self.register_manager.freezeRegs(&.{operand_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{operand_reg});
|
||||
const operand_reg_lock = self.register_manager.freezeReg(operand_reg);
|
||||
defer if (operand_reg_lock) |reg| self.register_manager.unfreezeReg(reg);
|
||||
|
||||
const dest_reg = if (maybe_inst) |inst| blk: {
|
||||
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
|
||||
|
|
@ -1127,8 +1128,8 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void {
|
|||
.register => |r| r,
|
||||
else => try self.copyToTmpRegister(operand_ty, operand),
|
||||
};
|
||||
self.register_manager.freezeRegs(&.{op_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{op_reg});
|
||||
const op_reg_lock = self.register_manager.freezeRegAssumeUnused(op_reg);
|
||||
defer self.register_manager.unfreezeReg(op_reg_lock);
|
||||
|
||||
const dest_reg = blk: {
|
||||
if (operand == .register and self.reuseOperand(inst, ty_op.operand, 0, operand)) {
|
||||
|
|
@ -1157,8 +1158,8 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void {
|
|||
.register => |r| r,
|
||||
else => try self.copyToTmpRegister(operand_ty, operand),
|
||||
};
|
||||
self.register_manager.freezeRegs(&.{op_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{op_reg});
|
||||
const op_reg_lock = self.register_manager.freezeRegAssumeUnused(op_reg);
|
||||
defer self.register_manager.unfreezeReg(op_reg_lock);
|
||||
|
||||
const dest_reg = blk: {
|
||||
if (operand == .register and self.reuseOperand(inst, ty_op.operand, 0, operand)) {
|
||||
|
|
@ -1218,15 +1219,15 @@ fn minMax(
|
|||
.register => |r| r,
|
||||
else => try self.copyToTmpRegister(lhs_ty, lhs),
|
||||
};
|
||||
self.register_manager.freezeRegs(&.{lhs_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{lhs_reg});
|
||||
const lhs_reg_lock = self.register_manager.freezeReg(lhs_reg);
|
||||
defer if (lhs_reg_lock) |reg| self.register_manager.unfreezeReg(reg);
|
||||
|
||||
const rhs_reg = switch (rhs) {
|
||||
.register => |r| r,
|
||||
else => try self.copyToTmpRegister(rhs_ty, rhs),
|
||||
};
|
||||
self.register_manager.freezeRegs(&.{rhs_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{rhs_reg});
|
||||
const rhs_reg_lock = self.register_manager.freezeReg(rhs_reg);
|
||||
defer if (rhs_reg_lock) |reg| self.register_manager.unfreezeReg(reg);
|
||||
|
||||
const dest_reg = if (maybe_inst) |inst| blk: {
|
||||
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
|
||||
|
|
@ -1392,12 +1393,12 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
|||
};
|
||||
const dest = try self.binOp(base_tag, null, lhs, rhs, lhs_ty, rhs_ty);
|
||||
const dest_reg = dest.register;
|
||||
self.register_manager.freezeRegs(&.{dest_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{dest_reg});
|
||||
const dest_reg_lock = self.register_manager.freezeRegAssumeUnused(dest_reg);
|
||||
defer self.register_manager.unfreezeReg(dest_reg_lock);
|
||||
|
||||
const truncated_reg = try self.register_manager.allocReg(null);
|
||||
self.register_manager.freezeRegs(&.{truncated_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{truncated_reg});
|
||||
const truncated_reg_lock = self.register_manager.freezeRegAssumeUnused(truncated_reg);
|
||||
defer self.register_manager.unfreezeReg(truncated_reg_lock);
|
||||
|
||||
// sbfx/ubfx truncated, dest, #0, #bits
|
||||
try self.truncRegister(dest_reg, truncated_reg, int_info.signedness, int_info.bits);
|
||||
|
|
@ -1493,12 +1494,12 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
|||
|
||||
const dest = try self.binOpRegister(base_tag, null, lhs, rhs, lhs_ty, rhs_ty);
|
||||
const dest_reg = dest.register;
|
||||
self.register_manager.freezeRegs(&.{dest_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{dest_reg});
|
||||
const dest_reg_lock = self.register_manager.freezeRegAssumeUnused(dest_reg);
|
||||
defer self.register_manager.unfreezeReg(dest_reg_lock);
|
||||
|
||||
const truncated_reg = try self.register_manager.allocReg(null);
|
||||
self.register_manager.freezeRegs(&.{truncated_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{truncated_reg});
|
||||
const truncated_reg_lock = self.register_manager.freezeRegAssumeUnused(truncated_reg);
|
||||
defer self.register_manager.unfreezeReg(truncated_reg_lock);
|
||||
|
||||
// sbfx/ubfx truncated, dest, #0, #bits
|
||||
try self.truncRegister(dest_reg, truncated_reg, int_info.signedness, int_info.bits);
|
||||
|
|
@ -1526,28 +1527,32 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
|||
const lhs_is_register = lhs == .register;
|
||||
const rhs_is_register = rhs == .register;
|
||||
|
||||
if (lhs_is_register) self.register_manager.freezeRegs(&.{lhs.register});
|
||||
if (rhs_is_register) self.register_manager.freezeRegs(&.{rhs.register});
|
||||
const lhs_lock: ?RegisterLock = if (lhs_is_register)
|
||||
self.register_manager.freezeReg(lhs.register)
|
||||
else
|
||||
null;
|
||||
defer if (lhs_lock) |reg| self.register_manager.unfreezeReg(reg);
|
||||
|
||||
const lhs_reg = if (lhs_is_register) lhs.register else blk: {
|
||||
const reg = try self.register_manager.allocReg(null);
|
||||
self.register_manager.freezeRegs(&.{reg});
|
||||
const lhs_reg = if (lhs_is_register)
|
||||
lhs.register
|
||||
else
|
||||
try self.register_manager.allocReg(null);
|
||||
const new_lhs_lock = self.register_manager.freezeReg(lhs_reg);
|
||||
defer if (new_lhs_lock) |reg| self.register_manager.unfreezeReg(reg);
|
||||
|
||||
break :blk reg;
|
||||
};
|
||||
defer self.register_manager.unfreezeRegs(&.{lhs_reg});
|
||||
|
||||
const rhs_reg = if (rhs_is_register) rhs.register else blk: {
|
||||
const reg = try self.register_manager.allocReg(null);
|
||||
self.register_manager.freezeRegs(&.{reg});
|
||||
|
||||
break :blk reg;
|
||||
};
|
||||
defer self.register_manager.unfreezeRegs(&.{rhs_reg});
|
||||
const rhs_reg = if (rhs_is_register)
|
||||
rhs.register
|
||||
else
|
||||
try self.register_manager.allocReg(null);
|
||||
const new_rhs_lock = self.register_manager.freezeReg(rhs_reg);
|
||||
defer if (new_rhs_lock) |reg| self.register_manager.unfreezeReg(reg);
|
||||
|
||||
const dest_regs = try self.register_manager.allocRegs(2, .{ null, null });
|
||||
self.register_manager.freezeRegs(&dest_regs);
|
||||
defer self.register_manager.unfreezeRegs(&dest_regs);
|
||||
var dest_regs_locks: [2]RegisterLock = undefined;
|
||||
self.register_manager.freezeRegsAssumeUnused(2, dest_regs, &dest_regs_locks);
|
||||
defer for (dest_regs_locks) |reg| {
|
||||
self.register_manager.unfreezeReg(reg);
|
||||
};
|
||||
const rdlo = dest_regs[0];
|
||||
const rdhi = dest_regs[1];
|
||||
|
||||
|
|
@ -1555,8 +1560,8 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
|||
if (!rhs_is_register) try self.genSetReg(rhs_ty, rhs_reg, rhs);
|
||||
|
||||
const truncated_reg = try self.register_manager.allocReg(null);
|
||||
self.register_manager.freezeRegs(&.{truncated_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{truncated_reg});
|
||||
const truncated_reg_lock = self.register_manager.freezeRegAssumeUnused(truncated_reg);
|
||||
defer self.register_manager.unfreezeReg(truncated_reg_lock);
|
||||
|
||||
_ = try self.addInst(.{
|
||||
.tag = base_tag,
|
||||
|
|
@ -1648,8 +1653,11 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
|||
if (int_info.bits <= 32) {
|
||||
const stack_offset = try self.allocMem(inst, tuple_size, tuple_align);
|
||||
|
||||
if (lhs == .register) self.register_manager.freezeRegs(&.{lhs.register});
|
||||
defer if (lhs == .register) self.register_manager.unfreezeRegs(&.{lhs.register});
|
||||
const lhs_lock: ?RegisterLock = if (lhs == .register)
|
||||
self.register_manager.freezeRegAssumeUnused(lhs.register)
|
||||
else
|
||||
null;
|
||||
defer if (lhs_lock) |reg| self.register_manager.unfreezeReg(reg);
|
||||
|
||||
try self.spillCompareFlagsIfOccupied();
|
||||
self.compare_flags_inst = null;
|
||||
|
|
@ -1939,8 +1947,11 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
|
|||
var buf: Type.SlicePtrFieldTypeBuffer = undefined;
|
||||
const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf);
|
||||
|
||||
if (index_is_register) self.register_manager.freezeRegs(&.{index_mcv.register});
|
||||
defer if (index_is_register) self.register_manager.unfreezeRegs(&.{index_mcv.register});
|
||||
const index_lock: ?RegisterLock = if (index_is_register)
|
||||
self.register_manager.freezeRegAssumeUnused(index_mcv.register)
|
||||
else
|
||||
null;
|
||||
defer if (index_lock) |reg| self.register_manager.unfreezeReg(reg);
|
||||
|
||||
const base_mcv = slicePtr(slice_mcv);
|
||||
|
||||
|
|
@ -1950,20 +1961,20 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
|
|||
.register => |r| r,
|
||||
else => try self.copyToTmpRegister(slice_ptr_field_type, base_mcv),
|
||||
};
|
||||
self.register_manager.freezeRegs(&.{base_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{base_reg});
|
||||
const base_reg_lock = self.register_manager.freezeRegAssumeUnused(base_reg);
|
||||
defer self.register_manager.unfreezeReg(base_reg_lock);
|
||||
|
||||
const dst_reg = try self.register_manager.allocReg(inst);
|
||||
const dst_mcv = MCValue{ .register = dst_reg };
|
||||
self.register_manager.freezeRegs(&.{dst_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{dst_reg});
|
||||
const dst_reg_lock = self.register_manager.freezeRegAssumeUnused(dst_reg);
|
||||
defer self.register_manager.unfreezeReg(dst_reg_lock);
|
||||
|
||||
const index_reg: Register = switch (index_mcv) {
|
||||
.register => |reg| reg,
|
||||
else => try self.copyToTmpRegister(Type.usize, index_mcv),
|
||||
};
|
||||
self.register_manager.freezeRegs(&.{index_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{index_reg});
|
||||
const index_reg_lock = self.register_manager.freezeRegAssumeUnused(index_reg);
|
||||
defer self.register_manager.unfreezeReg(index_reg_lock);
|
||||
|
||||
const tag: Mir.Inst.Tag = switch (elem_size) {
|
||||
1 => .ldrb,
|
||||
|
|
@ -2149,8 +2160,8 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
|
|||
.immediate => |imm| try self.setRegOrMem(elem_ty, dst_mcv, .{ .memory = imm }),
|
||||
.ptr_stack_offset => |off| try self.setRegOrMem(elem_ty, dst_mcv, .{ .stack_offset = off }),
|
||||
.register => |reg| {
|
||||
self.register_manager.freezeRegs(&.{reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{reg});
|
||||
const reg_lock = self.register_manager.freezeReg(reg);
|
||||
defer if (reg_lock) |reg_locked| self.register_manager.unfreezeReg(reg_locked);
|
||||
|
||||
switch (dst_mcv) {
|
||||
.dead => unreachable,
|
||||
|
|
@ -2162,16 +2173,19 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
|
|||
.stack_offset => |off| {
|
||||
if (elem_size <= 4) {
|
||||
const tmp_reg = try self.register_manager.allocReg(null);
|
||||
self.register_manager.freezeRegs(&.{tmp_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{tmp_reg});
|
||||
const tmp_reg_lock = self.register_manager.freezeRegAssumeUnused(tmp_reg);
|
||||
defer self.register_manager.unfreezeReg(tmp_reg_lock);
|
||||
|
||||
try self.load(.{ .register = tmp_reg }, ptr, ptr_ty);
|
||||
try self.genSetStack(elem_ty, off, MCValue{ .register = tmp_reg });
|
||||
} else {
|
||||
// TODO optimize the register allocation
|
||||
const regs = try self.register_manager.allocRegs(4, .{ null, null, null, null });
|
||||
self.register_manager.freezeRegs(®s);
|
||||
defer self.register_manager.unfreezeRegs(®s);
|
||||
var regs_locks: [4]RegisterLock = undefined;
|
||||
self.register_manager.freezeRegsAssumeUnused(4, regs, ®s_locks);
|
||||
defer for (regs_locks) |reg_locked| {
|
||||
self.register_manager.unfreezeReg(reg_locked);
|
||||
};
|
||||
|
||||
const src_reg = reg;
|
||||
const dst_reg = regs[0];
|
||||
|
|
@ -2197,8 +2211,8 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
|
|||
.stack_argument_offset,
|
||||
=> {
|
||||
const reg = try self.register_manager.allocReg(null);
|
||||
self.register_manager.freezeRegs(&.{reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{reg});
|
||||
const reg_lock = self.register_manager.freezeRegAssumeUnused(reg);
|
||||
defer self.register_manager.unfreezeReg(reg_lock);
|
||||
|
||||
try self.genSetReg(ptr_ty, reg, ptr);
|
||||
try self.load(dst_mcv, .{ .register = reg }, ptr_ty);
|
||||
|
|
@ -2252,8 +2266,8 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
|
|||
try self.genSetStack(value_ty, off, value);
|
||||
},
|
||||
.register => |addr_reg| {
|
||||
self.register_manager.freezeRegs(&.{addr_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{addr_reg});
|
||||
const addr_reg_lock = self.register_manager.freezeReg(addr_reg);
|
||||
defer if (addr_reg_lock) |reg| self.register_manager.unfreezeReg(reg);
|
||||
|
||||
switch (value) {
|
||||
.dead => unreachable,
|
||||
|
|
@ -2264,15 +2278,18 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
|
|||
else => {
|
||||
if (elem_size <= 4) {
|
||||
const tmp_reg = try self.register_manager.allocReg(null);
|
||||
self.register_manager.freezeRegs(&.{tmp_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{tmp_reg});
|
||||
const tmp_reg_lock = self.register_manager.freezeRegAssumeUnused(tmp_reg);
|
||||
defer self.register_manager.unfreezeReg(tmp_reg_lock);
|
||||
|
||||
try self.genSetReg(value_ty, tmp_reg, value);
|
||||
try self.store(ptr, .{ .register = tmp_reg }, ptr_ty, value_ty);
|
||||
} else {
|
||||
const regs = try self.register_manager.allocRegs(4, .{ null, null, null, null });
|
||||
self.register_manager.freezeRegs(®s);
|
||||
defer self.register_manager.unfreezeRegs(®s);
|
||||
var regs_locks: [4]RegisterLock = undefined;
|
||||
self.register_manager.freezeRegsAssumeUnused(4, regs, ®s_locks);
|
||||
defer for (regs_locks) |reg| {
|
||||
self.register_manager.unfreezeReg(reg);
|
||||
};
|
||||
|
||||
const src_reg = regs[0];
|
||||
const dst_reg = addr_reg;
|
||||
|
|
@ -2356,12 +2373,12 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde
|
|||
const offset_reg = try self.copyToTmpRegister(ptr_ty, .{
|
||||
.immediate = struct_field_offset,
|
||||
});
|
||||
self.register_manager.freezeRegs(&.{offset_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{offset_reg});
|
||||
const offset_reg_lock = self.register_manager.freezeRegAssumeUnused(offset_reg);
|
||||
defer self.register_manager.unfreezeReg(offset_reg_lock);
|
||||
|
||||
const addr_reg = try self.copyToTmpRegister(ptr_ty, mcv);
|
||||
self.register_manager.freezeRegs(&.{addr_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{addr_reg});
|
||||
const addr_reg_lock = self.register_manager.freezeRegAssumeUnused(addr_reg);
|
||||
defer self.register_manager.unfreezeReg(addr_reg_lock);
|
||||
|
||||
const dest = try self.binOp(
|
||||
.add,
|
||||
|
|
@ -2477,8 +2494,11 @@ fn binOpRegister(
|
|||
const lhs_is_register = lhs == .register;
|
||||
const rhs_is_register = rhs == .register;
|
||||
|
||||
if (lhs_is_register) self.register_manager.freezeRegs(&.{lhs.register});
|
||||
if (rhs_is_register) self.register_manager.freezeRegs(&.{rhs.register});
|
||||
const lhs_lock: ?RegisterLock = if (lhs_is_register)
|
||||
self.register_manager.freezeReg(lhs.register)
|
||||
else
|
||||
null;
|
||||
defer if (lhs_lock) |reg| self.register_manager.unfreezeReg(reg);
|
||||
|
||||
const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
|
||||
|
||||
|
|
@ -2489,13 +2509,13 @@ fn binOpRegister(
|
|||
} else null;
|
||||
|
||||
const reg = try self.register_manager.allocReg(track_inst);
|
||||
self.register_manager.freezeRegs(&.{reg});
|
||||
|
||||
if (track_inst) |inst| branch.inst_table.putAssumeCapacity(inst, .{ .register = reg });
|
||||
|
||||
break :blk reg;
|
||||
};
|
||||
defer self.register_manager.unfreezeRegs(&.{lhs_reg});
|
||||
const new_lhs_lock = self.register_manager.freezeReg(lhs_reg);
|
||||
defer if (new_lhs_lock) |reg| self.register_manager.unfreezeReg(reg);
|
||||
|
||||
const rhs_reg = if (rhs_is_register) rhs.register else blk: {
|
||||
const track_inst: ?Air.Inst.Index = if (maybe_inst) |inst| inst: {
|
||||
|
|
@ -2504,13 +2524,13 @@ fn binOpRegister(
|
|||
} else null;
|
||||
|
||||
const reg = try self.register_manager.allocReg(track_inst);
|
||||
self.register_manager.freezeRegs(&.{reg});
|
||||
|
||||
if (track_inst) |inst| branch.inst_table.putAssumeCapacity(inst, .{ .register = reg });
|
||||
|
||||
break :blk reg;
|
||||
};
|
||||
defer self.register_manager.unfreezeRegs(&.{rhs_reg});
|
||||
const new_rhs_lock = self.register_manager.freezeReg(rhs_reg);
|
||||
defer if (new_rhs_lock) |reg| self.register_manager.unfreezeReg(reg);
|
||||
|
||||
const dest_reg = switch (mir_tag) {
|
||||
.cmp => .r0, // cmp has no destination regardless
|
||||
|
|
@ -2593,7 +2613,11 @@ fn binOpImmediate(
|
|||
) !MCValue {
|
||||
const lhs_is_register = lhs == .register;
|
||||
|
||||
if (lhs_is_register) self.register_manager.freezeRegs(&.{lhs.register});
|
||||
const lhs_lock: ?RegisterLock = if (lhs_is_register)
|
||||
self.register_manager.freezeReg(lhs.register)
|
||||
else
|
||||
null;
|
||||
defer if (lhs_lock) |reg| self.register_manager.unfreezeReg(reg);
|
||||
|
||||
const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
|
||||
|
||||
|
|
@ -2606,13 +2630,13 @@ fn binOpImmediate(
|
|||
} else null;
|
||||
|
||||
const reg = try self.register_manager.allocReg(track_inst);
|
||||
self.register_manager.freezeRegs(&.{reg});
|
||||
|
||||
if (track_inst) |inst| branch.inst_table.putAssumeCapacity(inst, .{ .register = reg });
|
||||
|
||||
break :blk reg;
|
||||
};
|
||||
defer self.register_manager.unfreezeRegs(&.{lhs_reg});
|
||||
const new_lhs_lock = self.register_manager.freezeReg(lhs_reg);
|
||||
defer if (new_lhs_lock) |reg| self.register_manager.unfreezeReg(reg);
|
||||
|
||||
const dest_reg = switch (mir_tag) {
|
||||
.cmp => .r0, // cmp has no destination reg
|
||||
|
|
|
|||
|
|
@ -23,6 +23,7 @@ const log = std.log.scoped(.codegen);
|
|||
const build_options = @import("build_options");
|
||||
const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager;
|
||||
const RegisterManager = RegisterManagerFn(Self, Register, &callee_preserved_regs);
|
||||
const RegisterLock = RegisterManager.RegisterLock;
|
||||
|
||||
const FnResult = @import("../../codegen.zig").FnResult;
|
||||
const GenerateSymbolError = @import("../../codegen.zig").GenerateSymbolError;
|
||||
|
|
@ -937,8 +938,11 @@ fn binOpRegister(
|
|||
const lhs_is_register = lhs == .register;
|
||||
const rhs_is_register = rhs == .register;
|
||||
|
||||
if (lhs_is_register) self.register_manager.freezeRegs(&.{lhs.register});
|
||||
if (rhs_is_register) self.register_manager.freezeRegs(&.{rhs.register});
|
||||
const lhs_lock: ?RegisterLock = if (lhs_is_register)
|
||||
self.register_manager.freezeReg(lhs.register)
|
||||
else
|
||||
null;
|
||||
defer if (lhs_lock) |reg| self.register_manager.unfreezeReg(reg);
|
||||
|
||||
const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
|
||||
|
||||
|
|
@ -949,13 +953,13 @@ fn binOpRegister(
|
|||
} else null;
|
||||
|
||||
const reg = try self.register_manager.allocReg(track_inst);
|
||||
self.register_manager.freezeRegs(&.{reg});
|
||||
|
||||
if (track_inst) |inst| branch.inst_table.putAssumeCapacity(inst, .{ .register = reg });
|
||||
|
||||
break :blk reg;
|
||||
};
|
||||
defer self.register_manager.unfreezeRegs(&.{lhs_reg});
|
||||
const new_lhs_lock = self.register_manager.freezeReg(lhs_reg);
|
||||
defer if (new_lhs_lock) |reg| self.register_manager.unfreezeReg(reg);
|
||||
|
||||
const rhs_reg = if (rhs_is_register) rhs.register else blk: {
|
||||
const track_inst: ?Air.Inst.Index = if (maybe_inst) |inst| inst: {
|
||||
|
|
@ -964,13 +968,13 @@ fn binOpRegister(
|
|||
} else null;
|
||||
|
||||
const reg = try self.register_manager.allocReg(track_inst);
|
||||
self.register_manager.freezeRegs(&.{reg});
|
||||
|
||||
if (track_inst) |inst| branch.inst_table.putAssumeCapacity(inst, .{ .register = reg });
|
||||
|
||||
break :blk reg;
|
||||
};
|
||||
defer self.register_manager.unfreezeRegs(&.{rhs_reg});
|
||||
const new_rhs_lock = self.register_manager.freezeReg(rhs_reg);
|
||||
defer if (new_rhs_lock) |reg| self.register_manager.unfreezeReg(reg);
|
||||
|
||||
const dest_reg = if (maybe_inst) |inst| blk: {
|
||||
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
|
||||
|
|
@ -1448,8 +1452,8 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
|
|||
.stack_offset,
|
||||
=> {
|
||||
const reg = try self.register_manager.allocReg(null);
|
||||
self.register_manager.freezeRegs(&.{reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{reg});
|
||||
const reg_lock = self.register_manager.freezeRegAssumeUnused(reg);
|
||||
defer self.register_manager.unfreezeReg(reg_lock);
|
||||
|
||||
try self.genSetReg(ptr_ty, reg, ptr);
|
||||
try self.load(dst_mcv, .{ .register = reg }, ptr_ty);
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
|
|
@ -116,22 +116,51 @@ pub fn RegisterManager(
|
|||
return self.frozen_registers & mask != 0;
|
||||
}
|
||||
|
||||
/// Prevents the registers from being allocated until they are
|
||||
/// unfrozen again
|
||||
pub fn freezeRegs(self: *Self, regs: []const Register) void {
|
||||
for (regs) |reg| {
|
||||
const mask = getRegisterMask(reg) orelse continue;
|
||||
pub const RegisterLock = struct {
|
||||
register: Register,
|
||||
};
|
||||
|
||||
/// Prevents the register from being allocated until they are
|
||||
/// unfrozen again.
|
||||
/// Returns `RegisterLock` if the register was not already
|
||||
/// frozen, or `null` otherwise.
|
||||
/// Only the owner of the `RegisterLock` can unfreeze the
|
||||
/// register later.
|
||||
pub fn freezeReg(self: *Self, reg: Register) ?RegisterLock {
|
||||
if (self.isRegFrozen(reg)) return null;
|
||||
const mask = getRegisterMask(reg) orelse return null;
|
||||
self.frozen_registers |= mask;
|
||||
return RegisterLock{ .register = reg };
|
||||
}
|
||||
|
||||
/// Like `freezeReg` but asserts the register was unused always
|
||||
/// returning a valid lock.
|
||||
pub fn freezeRegAssumeUnused(self: *Self, reg: Register) RegisterLock {
|
||||
assert(!self.isRegFrozen(reg));
|
||||
const mask = getRegisterMask(reg) orelse unreachable;
|
||||
self.frozen_registers |= mask;
|
||||
return RegisterLock{ .register = reg };
|
||||
}
|
||||
|
||||
/// Like `freezeRegAssumeUnused` but locks multiple registers.
|
||||
pub fn freezeRegsAssumeUnused(
|
||||
self: *Self,
|
||||
comptime count: comptime_int,
|
||||
regs: [count]Register,
|
||||
buf: *[count]RegisterLock,
|
||||
) void {
|
||||
for (®s) |reg, i| {
|
||||
buf[i] = self.freezeRegAssumeUnused(reg);
|
||||
}
|
||||
}
|
||||
|
||||
/// Enables the allocation of the registers
|
||||
pub fn unfreezeRegs(self: *Self, regs: []const Register) void {
|
||||
for (regs) |reg| {
|
||||
const mask = getRegisterMask(reg) orelse continue;
|
||||
/// Unfreezes the register allowing its re-allocation and re-use.
|
||||
/// Requires `RegisterLock` to unfreeze a register.
|
||||
/// Call `freezeReg` to obtain the lock first.
|
||||
pub fn unfreezeReg(self: *Self, lock: RegisterLock) void {
|
||||
const mask = getRegisterMask(lock.register) orelse return;
|
||||
self.frozen_registers &= ~mask;
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true when at least one register is frozen
|
||||
pub fn frozenRegsExist(self: Self) bool {
|
||||
|
|
@ -419,8 +448,8 @@ test "allocReg: spilling" {
|
|||
// Frozen registers
|
||||
function.register_manager.freeReg(.r3);
|
||||
{
|
||||
function.register_manager.freezeRegs(&.{.r2});
|
||||
defer function.register_manager.unfreezeRegs(&.{.r2});
|
||||
const lock = function.register_manager.freezeReg(.r2);
|
||||
defer if (lock) |reg| function.register_manager.unfreezeReg(reg);
|
||||
|
||||
try expectEqual(@as(?MockRegister1, .r3), try function.register_manager.allocReg(mock_instruction));
|
||||
}
|
||||
|
|
@ -447,8 +476,8 @@ test "tryAllocRegs" {
|
|||
function.register_manager.freeReg(.r2);
|
||||
function.register_manager.freeReg(.r3);
|
||||
{
|
||||
function.register_manager.freezeRegs(&.{.r1});
|
||||
defer function.register_manager.unfreezeRegs(&.{.r1});
|
||||
const lock = function.register_manager.freezeReg(.r1);
|
||||
defer if (lock) |reg| function.register_manager.unfreezeReg(reg);
|
||||
|
||||
try expectEqual([_]MockRegister2{ .r0, .r2, .r3 }, function.register_manager.tryAllocRegs(3, .{ null, null, null }).?);
|
||||
}
|
||||
|
|
@ -486,8 +515,8 @@ test "allocRegs: normal usage" {
|
|||
// contain any valuable data anymore and can be reused. For an
|
||||
// example of that, see `selectively reducing register
|
||||
// pressure`.
|
||||
function.register_manager.freezeRegs(&.{result_reg});
|
||||
defer function.register_manager.unfreezeRegs(&.{result_reg});
|
||||
const lock = function.register_manager.freezeReg(result_reg);
|
||||
defer if (lock) |reg| function.register_manager.unfreezeReg(reg);
|
||||
|
||||
const regs = try function.register_manager.allocRegs(2, .{ null, null });
|
||||
try function.genAdd(result_reg, regs[0], regs[1]);
|
||||
|
|
@ -507,16 +536,14 @@ test "allocRegs: selectively reducing register pressure" {
|
|||
{
|
||||
const result_reg: MockRegister2 = .r1;
|
||||
|
||||
function.register_manager.freezeRegs(&.{result_reg});
|
||||
defer function.register_manager.unfreezeRegs(&.{result_reg});
|
||||
const lock = function.register_manager.freezeReg(result_reg);
|
||||
|
||||
// Here, we don't defer unfreeze because we manually unfreeze
|
||||
// after genAdd
|
||||
const regs = try function.register_manager.allocRegs(2, .{ null, null });
|
||||
function.register_manager.freezeRegs(&.{result_reg});
|
||||
|
||||
try function.genAdd(result_reg, regs[0], regs[1]);
|
||||
function.register_manager.unfreezeRegs(®s);
|
||||
function.register_manager.unfreezeReg(lock.?);
|
||||
|
||||
const extra_summand_reg = try function.register_manager.allocReg(null);
|
||||
try function.genAdd(result_reg, result_reg, extra_summand_reg);
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ var foo: u8 align(4) = 100;
|
|||
|
||||
test "global variable alignment" {
|
||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
|
||||
comptime try expect(@typeInfo(@TypeOf(&foo)).Pointer.alignment == 4);
|
||||
comptime try expect(@TypeOf(&foo) == *align(4) u8);
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue