diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index b68f214dfa..6202d2e74f 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -774,7 +774,7 @@ fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue { const ptr_bits = self.target.cpu.arch.ptrBitWidth(); const ptr_bytes: u64 = @divExact(ptr_bits, 8); if (abi_size <= ptr_bytes) { - if (self.register_manager.tryAllocReg(inst, &.{})) |reg| { + if (self.register_manager.tryAllocReg(inst)) |reg| { return MCValue{ .register = registerAlias(reg, abi_size) }; } } @@ -797,7 +797,7 @@ pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void /// allocated. A second call to `copyToTmpRegister` may return the same register. /// This can have a side effect of spilling instructions to the stack to free up a register. fn copyToTmpRegister(self: *Self, ty: Type, mcv: MCValue) !Register { - const reg = try self.register_manager.allocReg(null, &.{}); + const reg = try self.register_manager.allocReg(null); try self.genSetReg(ty, reg, mcv); return reg; } @@ -806,7 +806,7 @@ fn copyToTmpRegister(self: *Self, ty: Type, mcv: MCValue) !Register { /// `reg_owner` is the instruction that gets associated with the register in the register table. /// This can have a side effect of spilling instructions to the stack to free up a register. fn copyToNewRegister(self: *Self, reg_owner: Air.Inst.Index, mcv: MCValue) !MCValue { - const reg = try self.register_manager.allocReg(reg_owner, &.{}); + const reg = try self.register_manager.allocReg(reg_owner); try self.genSetReg(self.air.typeOfIndex(reg_owner), reg, mcv); return MCValue{ .register = reg }; } @@ -1270,7 +1270,7 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo .memory, .stack_offset, => { - const reg = try self.register_manager.allocReg(null, &.{}); + const reg = try self.register_manager.allocReg(null); self.register_manager.freezeRegs(&.{reg}); defer self.register_manager.unfreezeRegs(&.{reg}); @@ -1729,15 +1729,15 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { if (!lhs_is_register and !rhs_is_register) { const regs = try self.register_manager.allocRegs(2, .{ Air.refToIndex(bin_op.rhs).?, Air.refToIndex(bin_op.lhs).?, - }, &.{}); + }); lhs_mcv = MCValue{ .register = regs[0] }; rhs_mcv = MCValue{ .register = regs[1] }; } else if (!rhs_is_register) { - rhs_mcv = MCValue{ .register = try self.register_manager.allocReg(Air.refToIndex(bin_op.rhs).?, &.{}) }; + rhs_mcv = MCValue{ .register = try self.register_manager.allocReg(Air.refToIndex(bin_op.rhs).?) }; } } if (!lhs_is_register) { - lhs_mcv = MCValue{ .register = try self.register_manager.allocReg(Air.refToIndex(bin_op.lhs).?, &.{}) }; + lhs_mcv = MCValue{ .register = try self.register_manager.allocReg(Air.refToIndex(bin_op.lhs).?) }; } // Move the operands to the newly allocated registers diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index dfb08a4ed1..c7e80dbe24 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -750,7 +750,7 @@ fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue { const ptr_bits = self.target.cpu.arch.ptrBitWidth(); const ptr_bytes: u64 = @divExact(ptr_bits, 8); if (abi_size <= ptr_bytes) { - if (self.register_manager.tryAllocReg(inst, &.{})) |reg| { + if (self.register_manager.tryAllocReg(inst)) |reg| { return MCValue{ .register = reg }; } } @@ -791,7 +791,7 @@ fn spillCompareFlagsIfOccupied(self: *Self) !void { /// allocated. A second call to `copyToTmpRegister` may return the same register. /// This can have a side effect of spilling instructions to the stack to free up a register. fn copyToTmpRegister(self: *Self, ty: Type, mcv: MCValue) !Register { - const reg = try self.register_manager.allocReg(null, &.{}); + const reg = try self.register_manager.allocReg(null); try self.genSetReg(ty, reg, mcv); return reg; } @@ -800,7 +800,7 @@ fn copyToTmpRegister(self: *Self, ty: Type, mcv: MCValue) !Register { /// `reg_owner` is the instruction that gets associated with the register in the register table. /// This can have a side effect of spilling instructions to the stack to free up a register. fn copyToNewRegister(self: *Self, reg_owner: Air.Inst.Index, mcv: MCValue) !MCValue { - const reg = try self.register_manager.allocReg(reg_owner, &.{}); + const reg = try self.register_manager.allocReg(reg_owner); try self.genSetReg(self.air.typeOfIndex(reg_owner), reg, mcv); return MCValue{ .register = reg }; } @@ -1247,7 +1247,7 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { defer self.register_manager.unfreezeRegs(&.{base_mcv.register}); if (elem_size <= 4) { - const dst_reg = try self.register_manager.allocReg(inst, &.{}); + const dst_reg = try self.register_manager.allocReg(inst); self.register_manager.freezeRegs(&.{dst_reg}); defer self.register_manager.unfreezeRegs(&.{dst_reg}); @@ -1285,7 +1285,7 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { } else { const dst_mcv = try self.allocRegOrMem(inst, false); - const addr_reg = try self.register_manager.allocReg(null, &.{}); + const addr_reg = try self.register_manager.allocReg(null); self.register_manager.freezeRegs(&.{addr_reg}); defer self.register_manager.unfreezeRegs(&.{addr_reg}); @@ -1437,7 +1437,7 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo }, .stack_offset => |off| { if (elem_ty.abiSize(self.target.*) <= 4) { - const tmp_reg = try self.register_manager.allocReg(null, &.{}); + const tmp_reg = try self.register_manager.allocReg(null); self.register_manager.freezeRegs(&.{tmp_reg}); defer self.register_manager.unfreezeRegs(&.{tmp_reg}); @@ -1451,7 +1451,7 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo // larger const usize_ty = Type.initTag(.usize); - const tmp_regs = try self.register_manager.allocRegs(2, .{ null, null }, &.{}); + const tmp_regs = try self.register_manager.allocRegs(2, .{ null, null }); self.register_manager.freezeRegs(&tmp_regs); defer self.register_manager.unfreezeRegs(&tmp_regs); @@ -1475,7 +1475,7 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo try self.genSetStack(usize_ty, off + 4, MCValue{ .register = tmp_regs[1] }); } else { // TODO optimize the register allocation - const regs = try self.register_manager.allocRegs(4, .{ null, null, null, null }, &.{}); + const regs = try self.register_manager.allocRegs(4, .{ null, null, null, null }); self.register_manager.freezeRegs(®s); defer self.register_manager.unfreezeRegs(®s); @@ -1524,7 +1524,7 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo .stack_offset, .stack_argument_offset, => { - const reg = try self.register_manager.allocReg(null, &.{}); + const reg = try self.register_manager.allocReg(null); self.register_manager.freezeRegs(&.{reg}); defer self.register_manager.unfreezeRegs(&.{reg}); @@ -1597,7 +1597,7 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type }, else => { if (value_ty.abiSize(self.target.*) <= 4) { - const tmp_reg = try self.register_manager.allocReg(null, &.{}); + const tmp_reg = try self.register_manager.allocReg(null); self.register_manager.freezeRegs(&.{tmp_reg}); defer self.register_manager.unfreezeRegs(&.{tmp_reg}); @@ -1774,14 +1774,14 @@ fn genArmBinIntOp( if (reuse_lhs) { // Allocate 0 or 1 registers if (!rhs_is_register and rhs_should_be_register) { - rhs_mcv = MCValue{ .register = try self.register_manager.allocReg(Air.refToIndex(op_rhs).?, &.{}) }; + rhs_mcv = MCValue{ .register = try self.register_manager.allocReg(Air.refToIndex(op_rhs).?) }; branch.inst_table.putAssumeCapacity(Air.refToIndex(op_rhs).?, rhs_mcv); } dst_mcv = lhs; } else if (reuse_rhs and can_swap_lhs_and_rhs) { // Allocate 0 or 1 registers if (!lhs_is_register and lhs_should_be_register) { - lhs_mcv = MCValue{ .register = try self.register_manager.allocReg(Air.refToIndex(op_lhs).?, &.{}) }; + lhs_mcv = MCValue{ .register = try self.register_manager.allocReg(Air.refToIndex(op_lhs).?) }; branch.inst_table.putAssumeCapacity(Air.refToIndex(op_lhs).?, lhs_mcv); } dst_mcv = rhs; @@ -1791,18 +1791,18 @@ fn genArmBinIntOp( // Allocate 1 or 2 registers if (lhs_should_be_register and rhs_should_be_register) { if (lhs_is_register and rhs_is_register) { - dst_mcv = MCValue{ .register = try self.register_manager.allocReg(inst, &.{}) }; + dst_mcv = MCValue{ .register = try self.register_manager.allocReg(inst) }; } else if (lhs_is_register) { // Move RHS to register - dst_mcv = MCValue{ .register = try self.register_manager.allocReg(inst, &.{}) }; + dst_mcv = MCValue{ .register = try self.register_manager.allocReg(inst) }; rhs_mcv = dst_mcv; } else if (rhs_is_register) { // Move LHS to register - dst_mcv = MCValue{ .register = try self.register_manager.allocReg(inst, &.{}) }; + dst_mcv = MCValue{ .register = try self.register_manager.allocReg(inst) }; lhs_mcv = dst_mcv; } else { // Move LHS and RHS to register - const regs = try self.register_manager.allocRegs(2, .{ inst, Air.refToIndex(op_rhs).? }, &.{}); + const regs = try self.register_manager.allocRegs(2, .{ inst, Air.refToIndex(op_rhs).? }); lhs_mcv = MCValue{ .register = regs[0] }; rhs_mcv = MCValue{ .register = regs[1] }; dst_mcv = lhs_mcv; @@ -1812,17 +1812,17 @@ fn genArmBinIntOp( } else if (lhs_should_be_register) { // RHS is immediate if (lhs_is_register) { - dst_mcv = MCValue{ .register = try self.register_manager.allocReg(inst, &.{}) }; + dst_mcv = MCValue{ .register = try self.register_manager.allocReg(inst) }; } else { - dst_mcv = MCValue{ .register = try self.register_manager.allocReg(inst, &.{}) }; + dst_mcv = MCValue{ .register = try self.register_manager.allocReg(inst) }; lhs_mcv = dst_mcv; } } else if (rhs_should_be_register and can_swap_lhs_and_rhs) { // LHS is immediate if (rhs_is_register) { - dst_mcv = MCValue{ .register = try self.register_manager.allocReg(inst, &.{}) }; + dst_mcv = MCValue{ .register = try self.register_manager.allocReg(inst) }; } else { - dst_mcv = MCValue{ .register = try self.register_manager.allocReg(inst, &.{}) }; + dst_mcv = MCValue{ .register = try self.register_manager.allocReg(inst) }; rhs_mcv = dst_mcv; } @@ -1983,32 +1983,32 @@ fn genArmMul(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Ref, op_rhs: Ai if (reuse_lhs) { // Allocate 0 or 1 registers if (!rhs_is_register) { - rhs_mcv = MCValue{ .register = try self.register_manager.allocReg(Air.refToIndex(op_rhs).?, &.{}) }; + rhs_mcv = MCValue{ .register = try self.register_manager.allocReg(Air.refToIndex(op_rhs).?) }; branch.inst_table.putAssumeCapacity(Air.refToIndex(op_rhs).?, rhs_mcv); } dst_mcv = lhs; } else if (reuse_rhs) { // Allocate 0 or 1 registers if (!lhs_is_register) { - lhs_mcv = MCValue{ .register = try self.register_manager.allocReg(Air.refToIndex(op_lhs).?, &.{}) }; + lhs_mcv = MCValue{ .register = try self.register_manager.allocReg(Air.refToIndex(op_lhs).?) }; branch.inst_table.putAssumeCapacity(Air.refToIndex(op_lhs).?, lhs_mcv); } dst_mcv = rhs; } else { // Allocate 1 or 2 registers if (lhs_is_register and rhs_is_register) { - dst_mcv = MCValue{ .register = try self.register_manager.allocReg(inst, &.{}) }; + dst_mcv = MCValue{ .register = try self.register_manager.allocReg(inst) }; } else if (lhs_is_register) { // Move RHS to register - dst_mcv = MCValue{ .register = try self.register_manager.allocReg(inst, &.{}) }; + dst_mcv = MCValue{ .register = try self.register_manager.allocReg(inst) }; rhs_mcv = dst_mcv; } else if (rhs_is_register) { // Move LHS to register - dst_mcv = MCValue{ .register = try self.register_manager.allocReg(inst, &.{}) }; + dst_mcv = MCValue{ .register = try self.register_manager.allocReg(inst) }; lhs_mcv = dst_mcv; } else { // Move LHS and RHS to register - const regs = try self.register_manager.allocRegs(2, .{ inst, Air.refToIndex(op_rhs).? }, &.{}); + const regs = try self.register_manager.allocRegs(2, .{ inst, Air.refToIndex(op_rhs).? }); lhs_mcv = MCValue{ .register = regs[0] }; rhs_mcv = MCValue{ .register = regs[1] }; dst_mcv = lhs_mcv; @@ -2056,17 +2056,17 @@ fn genArmMulConstant(self: *Self, inst: Air.Inst.Index, op: Air.Inst.Ref, op_ind // Allocate registers for operands and/or destination if (reuse_lhs) { // Allocate 1 register - rhs_mcv = MCValue{ .register = try self.register_manager.allocReg(null, &.{}) }; + rhs_mcv = MCValue{ .register = try self.register_manager.allocReg(null) }; dst_mcv = lhs; } else { // Allocate 1 or 2 registers if (lhs_is_register) { // Move RHS to register - dst_mcv = MCValue{ .register = try self.register_manager.allocReg(null, &.{}) }; + dst_mcv = MCValue{ .register = try self.register_manager.allocReg(null) }; rhs_mcv = dst_mcv; } else { // Move LHS and RHS to register - const regs = try self.register_manager.allocRegs(2, .{ null, null }, &.{}); + const regs = try self.register_manager.allocRegs(2, .{ null, null }); lhs_mcv = MCValue{ .register = regs[0] }; rhs_mcv = MCValue{ .register = regs[1] }; dst_mcv = lhs_mcv; @@ -2432,20 +2432,20 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { if (!lhs_is_register and !rhs_is_register) { const regs = try self.register_manager.allocRegs(2, .{ Air.refToIndex(bin_op.lhs).?, Air.refToIndex(bin_op.rhs).?, - }, &.{}); + }); lhs_mcv = MCValue{ .register = regs[0] }; rhs_mcv = MCValue{ .register = regs[1] }; } else if (!rhs_is_register) { const track_inst = if (self.liveness.operandDies(inst, 1)) null else Air.refToIndex(bin_op.rhs).?; - rhs_mcv = MCValue{ .register = try self.register_manager.allocReg(track_inst, &.{}) }; + rhs_mcv = MCValue{ .register = try self.register_manager.allocReg(track_inst) }; } else if (!lhs_is_register) { const track_inst = if (self.liveness.operandDies(inst, 0)) null else Air.refToIndex(bin_op.lhs).?; - lhs_mcv = MCValue{ .register = try self.register_manager.allocReg(track_inst, &.{}) }; + lhs_mcv = MCValue{ .register = try self.register_manager.allocReg(track_inst) }; } } else { if (!lhs_is_register) { const track_inst = if (self.liveness.operandDies(inst, 0)) null else Air.refToIndex(bin_op.lhs).?; - lhs_mcv = MCValue{ .register = try self.register_manager.allocReg(track_inst, &.{}) }; + lhs_mcv = MCValue{ .register = try self.register_manager.allocReg(track_inst) }; } } @@ -3185,7 +3185,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro return self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); } else { // TODO optimize the register allocation - const regs = try self.register_manager.allocRegs(5, .{ null, null, null, null, null }, &.{}); + const regs = try self.register_manager.allocRegs(5, .{ null, null, null, null, null }); const src_reg = regs[0]; const dst_reg = regs[1]; const len_reg = regs[2]; diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 980d6ee9b9..9e850bd751 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -749,7 +749,7 @@ fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue { const ptr_bits = self.target.cpu.arch.ptrBitWidth(); const ptr_bytes: u64 = @divExact(ptr_bits, 8); if (abi_size <= ptr_bytes) { - if (self.register_manager.tryAllocReg(inst, &.{})) |reg| { + if (self.register_manager.tryAllocReg(inst)) |reg| { return MCValue{ .register = reg }; } } @@ -772,7 +772,7 @@ pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void /// allocated. A second call to `copyToTmpRegister` may return the same register. /// This can have a side effect of spilling instructions to the stack to free up a register. fn copyToTmpRegister(self: *Self, ty: Type, mcv: MCValue) !Register { - const reg = try self.register_manager.allocReg(null, &.{}); + const reg = try self.register_manager.allocReg(null); try self.genSetReg(ty, reg, mcv); return reg; } @@ -781,7 +781,7 @@ fn copyToTmpRegister(self: *Self, ty: Type, mcv: MCValue) !Register { /// `reg_owner` is the instruction that gets associated with the register in the register table. /// This can have a side effect of spilling instructions to the stack to free up a register. fn copyToNewRegister(self: *Self, reg_owner: Air.Inst.Index, mcv: MCValue) !MCValue { - const reg = try self.register_manager.allocReg(reg_owner, &.{}); + const reg = try self.register_manager.allocReg(reg_owner); try self.genSetReg(self.air.typeOfIndex(reg_owner), reg, mcv); return MCValue{ .register = reg }; } @@ -1211,7 +1211,7 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo .memory, .stack_offset, => { - const reg = try self.register_manager.allocReg(null, &.{}); + const reg = try self.register_manager.allocReg(null); self.register_manager.freezeRegs(&.{reg}); defer self.register_manager.unfreezeRegs(&.{reg}); diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 14aebc3724..e05a66228b 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -21,7 +21,7 @@ const Emit = @import("Emit.zig"); const Liveness = @import("../../Liveness.zig"); const Mir = @import("Mir.zig"); const Module = @import("../../Module.zig"); -const RegisterManager = @import("../../register_manager.zig").RegisterManager; +const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager; const Target = std.Target; const Type = @import("../../type.zig").Type; const TypedValue = @import("../../TypedValue.zig"); @@ -33,6 +33,8 @@ const InnerError = error{ CodegenFail, }; +const RegisterManager = RegisterManagerFn(Self, Register, &callee_preserved_regs); + gpa: Allocator, air: Air, liveness: Liveness, @@ -73,7 +75,7 @@ branch_stack: *std.ArrayList(Branch), // Key is the block instruction blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, BlockData) = .{}, -register_manager: RegisterManager(Self, Register, &callee_preserved_regs) = .{}, +register_manager: RegisterManager = .{}, /// Maps offset to what is stored there. stack: std.AutoHashMapUnmanaged(u32, StackAllocation) = .{}, @@ -169,6 +171,24 @@ pub const MCValue = union(enum) { else => false, }; } + + fn freezeIfRegister(mcv: MCValue, mgr: *RegisterManager) void { + switch (mcv) { + .register => |reg| { + mgr.freezeRegs(&.{reg}); + }, + else => {}, + } + } + + fn unfreezeIfRegister(mcv: MCValue, mgr: *RegisterManager) void { + switch (mcv) { + .register => |reg| { + mgr.unfreezeRegs(&.{reg}); + }, + else => {}, + } + } }; const Branch = struct { @@ -799,7 +819,7 @@ fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue { const ptr_bits = self.target.cpu.arch.ptrBitWidth(); const ptr_bytes: u64 = @divExact(ptr_bits, 8); if (abi_size <= ptr_bytes) { - if (self.register_manager.tryAllocReg(inst, &.{})) |reg| { + if (self.register_manager.tryAllocReg(inst)) |reg| { return MCValue{ .register = registerAlias(reg, abi_size) }; } } @@ -822,7 +842,7 @@ pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void /// allocated. A second call to `copyToTmpRegister` may return the same register. /// This can have a side effect of spilling instructions to the stack to free up a register. fn copyToTmpRegister(self: *Self, ty: Type, mcv: MCValue) !Register { - const reg = try self.register_manager.allocReg(null, &.{}); + const reg = try self.register_manager.allocReg(null); try self.genSetReg(ty, reg, mcv); return reg; } @@ -831,21 +851,7 @@ fn copyToTmpRegister(self: *Self, ty: Type, mcv: MCValue) !Register { /// `reg_owner` is the instruction that gets associated with the register in the register table. /// This can have a side effect of spilling instructions to the stack to free up a register. fn copyToNewRegister(self: *Self, reg_owner: Air.Inst.Index, ty: Type, mcv: MCValue) !MCValue { - const reg = try self.register_manager.allocReg(reg_owner, &.{}); - try self.genSetReg(ty, reg, mcv); - return MCValue{ .register = reg }; -} - -/// Like `copyToNewRegister` but allows to specify a list of excluded registers which -/// will not be selected for allocation. This can be done via `exceptions` slice. -fn copyToNewRegisterWithExceptions( - self: *Self, - reg_owner: Air.Inst.Index, - ty: Type, - mcv: MCValue, - exceptions: []const Register, -) !MCValue { - const reg = try self.register_manager.allocReg(reg_owner, exceptions); + const reg = try self.register_manager.allocReg(reg_owner); try self.genSetReg(ty, reg, mcv); return MCValue{ .register = reg }; } @@ -897,8 +903,9 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { return self.fail("TODO implement intCast for abi sizes larger than 8", .{}); } - if (operand.isRegister()) self.register_manager.freezeRegs(&.{operand.register}); - defer if (operand.isRegister()) self.register_manager.unfreezeRegs(&.{operand.register}); + operand.freezeIfRegister(&self.register_manager); + defer operand.unfreezeIfRegister(&self.register_manager); + break :blk try self.copyToNewRegister(inst, dest_ty, operand); }; @@ -925,7 +932,7 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { const reg = switch (operand) { .register => |reg| reg, else => inner: { - const reg = try self.register_manager.allocReg(inst, &.{}); + const reg = try self.register_manager.allocReg(inst); try self.genSetReg(src_ty, reg, operand); break :inner reg; }, @@ -1366,8 +1373,7 @@ fn airPtrSlicePtrPtr(self: *Self, inst: Air.Inst.Index) !void { } fn elemOffset(self: *Self, index_ty: Type, index: MCValue, elem_size: u64) !Register { - const reg = try self.register_manager.allocReg(null, &.{}); - try self.genSetReg(index_ty, reg, index); + const reg = try self.copyToTmpRegister(index_ty, index); try self.genIMulOpMir(index_ty, .{ .register = reg }, .{ .immediate = elem_size }); return reg; } @@ -1376,16 +1382,26 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { const is_volatile = false; // TODO const bin_op = self.air.instructions.items(.data)[inst].bin_op; const result: MCValue = if (!is_volatile and self.liveness.isUnused(inst)) .dead else result: { - const slice_mcv = try self.resolveInst(bin_op.lhs); const slice_ty = self.air.typeOf(bin_op.lhs); + const slice_mcv = try self.resolveInst(bin_op.lhs); + slice_mcv.freezeIfRegister(&self.register_manager); + defer slice_mcv.unfreezeIfRegister(&self.register_manager); + const elem_ty = slice_ty.childType(); const elem_size = elem_ty.abiSize(self.target.*); var buf: Type.SlicePtrFieldTypeBuffer = undefined; const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf); + const index_ty = self.air.typeOf(bin_op.rhs); const index_mcv = try self.resolveInst(bin_op.rhs); + index_mcv.freezeIfRegister(&self.register_manager); + defer index_mcv.unfreezeIfRegister(&self.register_manager); + const offset_reg = try self.elemOffset(index_ty, index_mcv, elem_size); - const addr_reg = try self.register_manager.allocReg(null, &.{offset_reg}); + self.register_manager.freezeRegs(&.{offset_reg}); + defer self.register_manager.unfreezeRegs(&.{offset_reg}); + + const addr_reg = try self.register_manager.allocReg(null); switch (slice_mcv) { .stack_offset => |off| { // mov reg, [rbp - 8] @@ -1401,7 +1417,7 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { }, else => return self.fail("TODO implement slice_elem_val when slice is {}", .{slice_mcv}), } - // TODO we could allocate register here, but need to except addr register and potentially + // TODO we could allocate register here, but need to expect addr register and potentially // offset register. const dst_mcv = try self.allocRegOrMem(inst, false); try self.genBinMathOpMir(.add, slice_ptr_field_type, .{ .register = addr_reg.to64() }, .{ @@ -1427,14 +1443,23 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const array_ty = self.air.typeOf(bin_op.lhs); - const array = try self.resolveInst(bin_op.lhs); const array_abi_size = array_ty.abiSize(self.target.*); + const array = try self.resolveInst(bin_op.lhs); + array.freezeIfRegister(&self.register_manager); + defer array.unfreezeIfRegister(&self.register_manager); + const elem_ty = array_ty.childType(); const elem_abi_size = elem_ty.abiSize(self.target.*); const index_ty = self.air.typeOf(bin_op.rhs); const index = try self.resolveInst(bin_op.rhs); + index.freezeIfRegister(&self.register_manager); + defer index.unfreezeIfRegister(&self.register_manager); + const offset_reg = try self.elemOffset(index_ty, index, elem_abi_size); - const addr_reg = try self.register_manager.allocReg(null, &.{offset_reg}); + self.register_manager.freezeRegs(&.{offset_reg}); + defer self.register_manager.unfreezeRegs(&.{offset_reg}); + + const addr_reg = try self.register_manager.allocReg(null); switch (array) { .stack_offset => |off| { // lea reg, [rbp] @@ -1449,7 +1474,7 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { }, else => return self.fail("TODO implement array_elem_val when array is {}", .{array}), } - // TODO we could allocate register here, but need to except addr register and potentially + // TODO we could allocate register here, but need to expect addr register and potentially // offset register. const dst_mcv = try self.allocRegOrMem(inst, false); try self.genBinMathOpMir(.add, array_ty, .{ .register = addr_reg.to64() }, .{ .register = offset_reg.to64() }); @@ -1475,12 +1500,17 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const ptr_ty = self.air.typeOf(extra.lhs); const ptr = try self.resolveInst(extra.lhs); + ptr.freezeIfRegister(&self.register_manager); + defer ptr.unfreezeIfRegister(&self.register_manager); + const elem_ty = ptr_ty.elemType2(); const elem_abi_size = elem_ty.abiSize(self.target.*); const index_ty = self.air.typeOf(extra.rhs); const index = try self.resolveInst(extra.rhs); - const offset_reg = try self.elemOffset(index_ty, index, elem_abi_size); + index.freezeIfRegister(&self.register_manager); + defer index.unfreezeIfRegister(&self.register_manager); + const offset_reg = try self.elemOffset(index_ty, index, elem_abi_size); self.register_manager.freezeRegs(&.{offset_reg}); defer self.register_manager.unfreezeRegs(&.{offset_reg}); @@ -1587,6 +1617,9 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo return self.fail("TODO implement loading from MCValue.embedded_in_code", .{}); }, .register => |reg| { + self.register_manager.freezeRegs(&.{reg}); + defer self.register_manager.unfreezeRegs(&.{reg}); + switch (dst_mcv) { .dead => unreachable, .undef => unreachable, @@ -1607,16 +1640,15 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo }, .stack_offset => |off| { if (abi_size <= 8) { - const tmp_reg = try self.register_manager.allocReg(null, &.{reg}); + const tmp_reg = try self.register_manager.allocReg(null); try self.load(.{ .register = tmp_reg }, ptr, ptr_ty); return self.genSetStack(elem_ty, off, MCValue{ .register = tmp_reg }); } - const regs = try self.register_manager.allocRegs( - 3, - .{ null, null, null }, - &.{ reg, .rax, .rcx }, - ); + self.register_manager.freezeRegs(&.{ .rax, .rcx }); + defer self.register_manager.unfreezeRegs(&.{ .rax, .rcx }); + + const regs = try self.register_manager.allocRegs(3, .{ null, null, null }); const addr_reg = regs[0]; const count_reg = regs[1]; const tmp_reg = regs[2]; @@ -1634,7 +1666,7 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo try self.register_manager.getReg(.rcx, null); // TODO allow for abi size to be u64 - try self.genSetReg(Type.initTag(.u32), count_reg, .{ .immediate = @intCast(u32, abi_size) }); + try self.genSetReg(Type.u32, count_reg, .{ .immediate = @intCast(u32, abi_size) }); return self.genInlineMemcpy( -(off + @intCast(i32, abi_size)), @@ -1786,8 +1818,8 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type // TODO: in case the address fits in an imm32 we can use [ds:imm32] // instead of wasting an instruction copying the address to a register - if (value.isRegister()) self.register_manager.freezeRegs(&.{value.register}); - defer if (value.isRegister()) self.register_manager.unfreezeRegs(&.{value.register}); + value.freezeIfRegister(&self.register_manager); + defer value.unfreezeIfRegister(&self.register_manager); const addr_reg = try self.copyToTmpRegister(ptr_ty, .{ .immediate = addr }); // to get the actual address of the value we want to modify we have to go through the GOT @@ -1925,7 +1957,7 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde break :blk reg; } else { self.register_manager.freezeRegs(&.{reg}); - const result_reg = try self.register_manager.allocReg(inst, &.{}); + const result_reg = try self.register_manager.allocReg(inst); try self.genSetReg(ptr_ty, result_reg, mcv); break :blk result_reg; } @@ -2012,22 +2044,16 @@ fn genBinMathOp(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Ref, op_rhs: } } else { if (lhs.isMemory()) { - dst_mcv = if (rhs.isRegister()) - // If the allocated register is the same as the rhs register, don't allocate that one - // and instead spill a subsequent one. Otherwise, this can result in a miscompilation - // in the presence of several binary operations performed in a single block. - try self.copyToNewRegisterWithExceptions(inst, dst_ty, lhs, &.{rhs.register}) - else - try self.copyToNewRegister(inst, dst_ty, lhs); + rhs.freezeIfRegister(&self.register_manager); + defer rhs.unfreezeIfRegister(&self.register_manager); + + dst_mcv = try self.copyToNewRegister(inst, dst_ty, lhs); src_mcv = rhs; } else { - dst_mcv = if (lhs.isRegister()) - // If the allocated register is the same as the rhs register, don't allocate that one - // and instead spill a subsequent one. Otherwise, this can result in a miscompilation - // in the presence of several binary operations performed in a single block. - try self.copyToNewRegisterWithExceptions(inst, dst_ty, rhs, &.{lhs.register}) - else - try self.copyToNewRegister(inst, dst_ty, rhs); + lhs.freezeIfRegister(&self.register_manager); + defer lhs.unfreezeIfRegister(&self.register_manager); + + dst_mcv = try self.copyToNewRegister(inst, dst_ty, rhs); src_mcv = lhs; } } @@ -2039,7 +2065,11 @@ fn genBinMathOp(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Ref, op_rhs: switch (src_mcv) { .immediate => |imm| { if (imm > math.maxInt(u31)) { - src_mcv = MCValue{ .register = try self.copyToTmpRegister(Type.initTag(.u64), src_mcv) }; + dst_mcv.freezeIfRegister(&self.register_manager); + defer dst_mcv.unfreezeIfRegister(&self.register_manager); + + const tmp_reg = try self.copyToTmpRegister(Type.u64, src_mcv); + src_mcv = MCValue{ .register = tmp_reg }; } }, else => {}, @@ -2901,6 +2931,8 @@ fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); + operand_ptr.freezeIfRegister(&self.register_manager); + defer operand_ptr.unfreezeIfRegister(&self.register_manager); const operand: MCValue = blk: { if (self.reuseOperand(inst, un_op, 0, operand_ptr)) { // The MCValue that holds the pointer can be re-used as the value. @@ -2930,6 +2962,8 @@ fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); + operand_ptr.freezeIfRegister(&self.register_manager); + defer operand_ptr.unfreezeIfRegister(&self.register_manager); const operand: MCValue = blk: { if (self.reuseOperand(inst, un_op, 0, operand_ptr)) { // The MCValue that holds the pointer can be re-used as the value. @@ -2959,6 +2993,8 @@ fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); + operand_ptr.freezeIfRegister(&self.register_manager); + defer operand_ptr.unfreezeIfRegister(&self.register_manager); const operand: MCValue = blk: { if (self.reuseOperand(inst, un_op, 0, operand_ptr)) { // The MCValue that holds the pointer can be re-used as the value. @@ -2988,6 +3024,8 @@ fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); + operand_ptr.freezeIfRegister(&self.register_manager); + defer operand_ptr.unfreezeIfRegister(&self.register_manager); const operand: MCValue = blk: { if (self.reuseOperand(inst, un_op, 0, operand_ptr)) { // The MCValue that holds the pointer can be re-used as the value. @@ -3345,7 +3383,10 @@ fn genSetStackArg(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue) InnerE return self.genSetStackArg(ty, stack_offset, MCValue{ .register = reg }); } - const regs = try self.register_manager.allocRegs(3, .{ null, null, null }, &.{ .rax, .rcx }); + self.register_manager.freezeRegs(&.{ .rax, .rcx }); + defer self.register_manager.unfreezeRegs(&.{ .rax, .rcx }); + + const regs = try self.register_manager.allocRegs(3, .{ null, null, null }); const addr_reg = regs[0]; const count_reg = regs[1]; const tmp_reg = regs[2]; @@ -3363,7 +3404,7 @@ fn genSetStackArg(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue) InnerE }); // TODO allow for abi_size to be u64 - try self.genSetReg(Type.initTag(.u32), count_reg, .{ .immediate = @intCast(u32, abi_size) }); + try self.genSetReg(Type.u32, count_reg, .{ .immediate = @intCast(u32, abi_size) }); try self.genInlineMemcpy( -(stack_offset + @intCast(i32, abi_size)), .rsp, @@ -3510,7 +3551,10 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue) InnerErro return self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); } - const regs = try self.register_manager.allocRegs(3, .{ null, null, null }, &.{ .rax, .rcx, .rbp }); + self.register_manager.freezeRegs(&.{ .rax, .rcx, .rbp }); + defer self.register_manager.unfreezeRegs(&.{ .rax, .rcx, .rbp }); + + const regs = try self.register_manager.allocRegs(3, .{ null, null, null }); const addr_reg = regs[0]; const count_reg = regs[1]; const tmp_reg = regs[2]; @@ -3528,7 +3572,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue) InnerErro }); // TODO allow for abi_size to be u64 - try self.genSetReg(Type.initTag(.u32), count_reg, .{ .immediate = @intCast(u32, abi_size) }); + try self.genSetReg(Type.u32, count_reg, .{ .immediate = @intCast(u32, abi_size) }); return self.genInlineMemcpy( -(stack_offset + @intCast(i32, abi_size)), diff --git a/src/register_manager.zig b/src/register_manager.zig index ec8f7478d6..63a0efbad8 100644 --- a/src/register_manager.zig +++ b/src/register_manager.zig @@ -118,17 +118,12 @@ pub fn RegisterManager( /// Allocates a specified number of registers, optionally /// tracking them. Returns `null` if not enough registers are /// free. - /// - /// Exceptions are deprecated, use freezeRegs and unfreezeRegs - /// instead. pub fn tryAllocRegs( self: *Self, comptime count: comptime_int, insts: [count]?Air.Inst.Index, - exceptions: []const Register, ) ?[count]Register { comptime assert(count > 0 and count <= callee_preserved_regs.len); - assert(count + exceptions.len <= callee_preserved_regs.len); const free_registers = @popCount(FreeRegInt, self.free_registers); if (free_registers < count) return null; @@ -137,7 +132,6 @@ pub fn RegisterManager( var i: usize = 0; for (callee_preserved_regs) |reg| { if (i >= count) break; - if (mem.indexOfScalar(Register, exceptions, reg) != null) continue; if (self.isRegFrozen(reg)) continue; if (self.isRegFree(reg)) { regs[i] = reg; @@ -163,29 +157,21 @@ pub fn RegisterManager( /// Allocates a register and optionally tracks it with a /// corresponding instruction. Returns `null` if all registers /// are allocated. - /// - /// Exceptions are deprecated, use freezeRegs and unfreezeRegs - /// instead. - pub fn tryAllocReg(self: *Self, inst: ?Air.Inst.Index, exceptions: []const Register) ?Register { - return if (tryAllocRegs(self, 1, .{inst}, exceptions)) |regs| regs[0] else null; + pub fn tryAllocReg(self: *Self, inst: ?Air.Inst.Index) ?Register { + return if (tryAllocRegs(self, 1, .{inst})) |regs| regs[0] else null; } /// Allocates a specified number of registers, optionally - /// tracking them. Asserts that count + exceptions.len is not + /// tracking them. Asserts that count is not /// larger than the total number of registers available. - /// - /// Exceptions are deprecated, use freezeRegs and unfreezeRegs - /// instead. pub fn allocRegs( self: *Self, comptime count: comptime_int, insts: [count]?Air.Inst.Index, - exceptions: []const Register, ) ![count]Register { comptime assert(count > 0 and count <= callee_preserved_regs.len); - assert(count + exceptions.len <= callee_preserved_regs.len); - const result = self.tryAllocRegs(count, insts, exceptions) orelse blk: { + const result = self.tryAllocRegs(count, insts) orelse blk: { // We'll take over the first count registers. Spill // the instructions that were previously there to a // stack allocations. @@ -193,7 +179,6 @@ pub fn RegisterManager( var i: usize = 0; for (callee_preserved_regs) |reg| { if (i >= count) break; - if (mem.indexOfScalar(Register, exceptions, reg) != null) continue; if (self.isRegFrozen(reg)) continue; regs[i] = reg; @@ -229,11 +214,8 @@ pub fn RegisterManager( /// Allocates a register and optionally tracks it with a /// corresponding instruction. - /// - /// Exceptions are deprecated, use freezeRegs and unfreezeRegs - /// instead. - pub fn allocReg(self: *Self, inst: ?Air.Inst.Index, exceptions: []const Register) !Register { - return (try self.allocRegs(1, .{inst}, exceptions))[0]; + pub fn allocReg(self: *Self, inst: ?Air.Inst.Index) !Register { + return (try self.allocRegs(1, .{inst}))[0]; } /// Spills the register if it is currently allocated. If a @@ -365,9 +347,9 @@ test "tryAllocReg: no spilling" { const mock_instruction: Air.Inst.Index = 1; - try expectEqual(@as(?MockRegister1, .r2), function.register_manager.tryAllocReg(mock_instruction, &.{})); - try expectEqual(@as(?MockRegister1, .r3), function.register_manager.tryAllocReg(mock_instruction, &.{})); - try expectEqual(@as(?MockRegister1, null), function.register_manager.tryAllocReg(mock_instruction, &.{})); + try expectEqual(@as(?MockRegister1, .r2), function.register_manager.tryAllocReg(mock_instruction)); + try expectEqual(@as(?MockRegister1, .r3), function.register_manager.tryAllocReg(mock_instruction)); + try expectEqual(@as(?MockRegister1, null), function.register_manager.tryAllocReg(mock_instruction)); try expect(function.register_manager.isRegAllocated(.r2)); try expect(function.register_manager.isRegAllocated(.r3)); @@ -393,33 +375,25 @@ test "allocReg: spilling" { const mock_instruction: Air.Inst.Index = 1; - try expectEqual(@as(?MockRegister1, .r2), try function.register_manager.allocReg(mock_instruction, &.{})); - try expectEqual(@as(?MockRegister1, .r3), try function.register_manager.allocReg(mock_instruction, &.{})); + try expectEqual(@as(?MockRegister1, .r2), try function.register_manager.allocReg(mock_instruction)); + try expectEqual(@as(?MockRegister1, .r3), try function.register_manager.allocReg(mock_instruction)); // Spill a register - try expectEqual(@as(?MockRegister1, .r2), try function.register_manager.allocReg(mock_instruction, &.{})); + try expectEqual(@as(?MockRegister1, .r2), try function.register_manager.allocReg(mock_instruction)); try expectEqualSlices(MockRegister1, &[_]MockRegister1{.r2}, function.spilled.items); // No spilling necessary function.register_manager.freeReg(.r3); - try expectEqual(@as(?MockRegister1, .r3), try function.register_manager.allocReg(mock_instruction, &.{})); + try expectEqual(@as(?MockRegister1, .r3), try function.register_manager.allocReg(mock_instruction)); try expectEqualSlices(MockRegister1, &[_]MockRegister1{.r2}, function.spilled.items); - // Exceptions - // - // TODO deprecated, remove test once no backend uses exceptions - // anymore - function.register_manager.freeReg(.r2); - function.register_manager.freeReg(.r3); - try expectEqual(@as(?MockRegister1, .r3), try function.register_manager.allocReg(mock_instruction, &.{.r2})); - // Frozen registers function.register_manager.freeReg(.r3); { function.register_manager.freezeRegs(&.{.r2}); defer function.register_manager.unfreezeRegs(&.{.r2}); - try expectEqual(@as(?MockRegister1, .r3), try function.register_manager.allocReg(mock_instruction, &.{})); + try expectEqual(@as(?MockRegister1, .r3), try function.register_manager.allocReg(mock_instruction)); } try expect(!function.register_manager.frozenRegsExist()); } @@ -432,22 +406,13 @@ test "tryAllocRegs" { }; defer function.deinit(); - try expectEqual([_]MockRegister2{ .r0, .r1, .r2 }, function.register_manager.tryAllocRegs(3, .{ null, null, null }, &.{}).?); + try expectEqual([_]MockRegister2{ .r0, .r1, .r2 }, function.register_manager.tryAllocRegs(3, .{ null, null, null }).?); try expect(function.register_manager.isRegAllocated(.r0)); try expect(function.register_manager.isRegAllocated(.r1)); try expect(function.register_manager.isRegAllocated(.r2)); try expect(!function.register_manager.isRegAllocated(.r3)); - // Exceptions - // - // TODO deprecated, remove test once no backend uses exceptions - // anymore - function.register_manager.freeReg(.r0); - function.register_manager.freeReg(.r1); - function.register_manager.freeReg(.r2); - try expectEqual([_]MockRegister2{ .r0, .r2, .r3 }, function.register_manager.tryAllocRegs(3, .{ null, null, null }, &.{.r1}).?); - // Frozen registers function.register_manager.freeReg(.r0); function.register_manager.freeReg(.r2); @@ -456,7 +421,7 @@ test "tryAllocRegs" { function.register_manager.freezeRegs(&.{.r1}); defer function.register_manager.unfreezeRegs(&.{.r1}); - try expectEqual([_]MockRegister2{ .r0, .r2, .r3 }, function.register_manager.tryAllocRegs(3, .{ null, null, null }, &.{}).?); + try expectEqual([_]MockRegister2{ .r0, .r2, .r3 }, function.register_manager.tryAllocRegs(3, .{ null, null, null }).?); } try expect(!function.register_manager.frozenRegsExist()); @@ -480,20 +445,13 @@ test "allocRegs" { mock_instruction, mock_instruction, mock_instruction, - }, &.{})); + })); try expect(function.register_manager.isRegAllocated(.r0)); try expect(function.register_manager.isRegAllocated(.r1)); try expect(function.register_manager.isRegAllocated(.r2)); try expect(!function.register_manager.isRegAllocated(.r3)); - // Exceptions - // - // TODO deprecated, remove test once no backend uses exceptions - // anymore - try expectEqual([_]MockRegister2{ .r0, .r2, .r3 }, try function.register_manager.allocRegs(3, .{ null, null, null }, &.{.r1})); - try expectEqualSlices(MockRegister2, &[_]MockRegister2{ .r0, .r2 }, function.spilled.items); - // Frozen registers function.register_manager.freeReg(.r0); function.register_manager.freeReg(.r2); @@ -502,7 +460,7 @@ test "allocRegs" { function.register_manager.freezeRegs(&.{.r1}); defer function.register_manager.unfreezeRegs(&.{.r1}); - try expectEqual([_]MockRegister2{ .r0, .r2, .r3 }, try function.register_manager.allocRegs(3, .{ null, null, null }, &.{})); + try expectEqual([_]MockRegister2{ .r0, .r2, .r3 }, try function.register_manager.allocRegs(3, .{ null, null, null })); } try expect(!function.register_manager.frozenRegsExist());