Merge pull request #11462 from joachimschmidt557/stage2-aarch64

stage2 AArch64: truncation support
This commit is contained in:
Joachim Schmidt 2022-04-20 09:44:43 +02:00 committed by GitHub
commit 1a1b5ee264
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
7 changed files with 619 additions and 372 deletions

View file

@ -376,7 +376,7 @@ fn gen(self: *Self) !void {
// mov fp, sp
_ = try self.addInst(.{
.tag = .mov_to_from_sp,
.data = .{ .rr = .{ .rd = .x29, .rn = .xzr } },
.data = .{ .rr = .{ .rd = .x29, .rn = .sp } },
});
// sub sp, sp, #reloc
@ -421,7 +421,7 @@ fn gen(self: *Self) !void {
if (math.cast(u12, stack_size)) |size| {
self.mir_instructions.set(backpatch_reloc, .{
.tag = .sub_immediate,
.data = .{ .rr_imm12_sh = .{ .rd = .xzr, .rn = .xzr, .imm12 = size } },
.data = .{ .rr_imm12_sh = .{ .rd = .sp, .rn = .sp, .imm12 = size } },
});
} else |_| {
return self.failSymbol("TODO AArch64: allow larger stacks", .{});
@ -453,7 +453,7 @@ fn gen(self: *Self) !void {
// add sp, sp, #stack_size
_ = try self.addInst(.{
.tag = .add_immediate,
.data = .{ .rr_imm12_sh = .{ .rd = .xzr, .rn = .xzr, .imm12 = @intCast(u12, stack_size) } },
.data = .{ .rr_imm12_sh = .{ .rd = .sp, .rn = .sp, .imm12 = @intCast(u12, stack_size) } },
});
// <load other registers>
@ -512,13 +512,13 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
switch (air_tags[inst]) {
// zig fmt: off
.add, .ptr_add => try self.airBinOp(inst),
.addwrap => try self.airAddWrap(inst),
.addwrap => try self.airBinOp(inst),
.add_sat => try self.airAddSat(inst),
.sub, .ptr_sub => try self.airBinOp(inst),
.subwrap => try self.airSubWrap(inst),
.subwrap => try self.airBinOp(inst),
.sub_sat => try self.airSubSat(inst),
.mul => try self.airBinOp(inst),
.mulwrap => try self.airMulWrap(inst),
.mulwrap => try self.airBinOp(inst),
.mul_sat => try self.airMulSat(inst),
.rem => try self.airRem(inst),
.mod => try self.airMod(inst),
@ -882,7 +882,8 @@ fn spillCompareFlagsIfOccupied(self: *Self) !void {
/// allocated. A second call to `copyToTmpRegister` may return the same register.
/// This can have a side effect of spilling instructions to the stack to free up a register.
fn copyToTmpRegister(self: *Self, ty: Type, mcv: MCValue) !Register {
const reg = try self.register_manager.allocReg(null);
const raw_reg = try self.register_manager.allocReg(null);
const reg = registerAlias(raw_reg, ty.abiSize(self.target.*));
try self.genSetReg(ty, reg, mcv);
return reg;
}
@ -891,7 +892,9 @@ fn copyToTmpRegister(self: *Self, ty: Type, mcv: MCValue) !Register {
/// `reg_owner` is the instruction that gets associated with the register in the register table.
/// This can have a side effect of spilling instructions to the stack to free up a register.
fn copyToNewRegister(self: *Self, reg_owner: Air.Inst.Index, mcv: MCValue) !MCValue {
const reg = try self.register_manager.allocReg(reg_owner);
const raw_reg = try self.register_manager.allocReg(reg_owner);
const ty = self.air.typeOfIndex(reg_owner);
const reg = registerAlias(raw_reg, ty.abiSize(self.target.*));
try self.genSetReg(self.air.typeOfIndex(reg_owner), reg, mcv);
return MCValue{ .register = reg };
}
@ -936,14 +939,99 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void {
return self.fail("TODO implement intCast for {}", .{self.target.cpu.arch});
}
fn truncRegister(
self: *Self,
operand_reg: Register,
dest_reg: Register,
int_signedness: std.builtin.Signedness,
int_bits: u16,
) !void {
switch (int_bits) {
1...31, 33...63 => {
_ = try self.addInst(.{
.tag = switch (int_signedness) {
.signed => .sbfx,
.unsigned => .ubfx,
},
.data = .{ .rr_lsb_width = .{
.rd = dest_reg,
.rn = operand_reg,
.lsb = 0,
.width = @intCast(u6, int_bits),
} },
});
},
32, 64 => {
_ = try self.addInst(.{
.tag = .mov_register,
.data = .{ .rr = .{
.rd = dest_reg,
.rn = operand_reg,
} },
});
},
else => unreachable,
}
}
fn trunc(
self: *Self,
maybe_inst: ?Air.Inst.Index,
operand: MCValue,
operand_ty: Type,
dest_ty: Type,
) !MCValue {
const info_a = operand_ty.intInfo(self.target.*);
const info_b = dest_ty.intInfo(self.target.*);
if (info_b.bits <= 64) {
const operand_reg = switch (operand) {
.register => |r| r,
else => operand_reg: {
if (info_a.bits <= 64) {
const raw_reg = try self.copyToTmpRegister(operand_ty, operand);
break :operand_reg registerAlias(raw_reg, operand_ty.abiSize(self.target.*));
} else {
return self.fail("TODO load least significant word into register", .{});
}
},
};
self.register_manager.freezeRegs(&.{operand_reg});
defer self.register_manager.unfreezeRegs(&.{operand_reg});
const dest_reg = if (maybe_inst) |inst| blk: {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
if (operand == .register and self.reuseOperand(inst, ty_op.operand, 0, operand)) {
break :blk registerAlias(operand_reg, dest_ty.abiSize(self.target.*));
} else {
const raw_reg = try self.register_manager.allocReg(inst);
break :blk registerAlias(raw_reg, dest_ty.abiSize(self.target.*));
}
} else blk: {
const raw_reg = try self.register_manager.allocReg(null);
break :blk registerAlias(raw_reg, dest_ty.abiSize(self.target.*));
};
try self.truncRegister(operand_reg, dest_reg, info_b.signedness, info_b.bits);
return MCValue{ .register = dest_reg };
} else {
return self.fail("TODO: truncate to ints > 32 bits", .{});
}
}
fn airTrunc(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
if (self.liveness.isUnused(inst))
return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none });
const operand = try self.resolveInst(ty_op.operand);
_ = operand;
return self.fail("TODO implement trunc for {}", .{self.target.cpu.arch});
const operand_ty = self.air.typeOf(ty_op.operand);
const dest_ty = self.air.typeOfIndex(inst);
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else blk: {
break :blk try self.trunc(inst, operand, operand_ty, dest_ty);
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airBoolToInt(self: *Self, inst: Air.Inst.Index) !void {
@ -1003,7 +1091,8 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void {
break :blk op_reg;
}
break :blk try self.register_manager.allocReg(null);
const raw_reg = try self.register_manager.allocReg(null);
break :blk raw_reg.to32();
};
_ = try self.addInst(.{
@ -1013,7 +1102,7 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void {
.rn = op_reg,
.imms = 0b000000,
.immr = 0b000000,
.n = 0b1,
.n = 0b0,
} },
});
@ -1035,7 +1124,8 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void {
break :blk op_reg;
}
break :blk try self.register_manager.allocReg(null);
const raw_reg = try self.register_manager.allocReg(null);
break :blk registerAlias(raw_reg, operand_ty.abiSize(self.target.*));
};
_ = try self.addInst(.{
@ -1048,6 +1138,8 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void {
} },
});
try self.truncRegister(dest_reg, dest_reg, int_info.signedness, int_info.bits);
break :result MCValue{ .register = dest_reg };
} else {
return self.fail("TODO AArch64 not on integers > u64/i64", .{});
@ -1103,7 +1195,7 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void {
/// Asserts that generating an instruction of that form is possible.
fn binOpRegister(
self: *Self,
tag: Air.Inst.Tag,
mir_tag: Mir.Inst.Tag,
maybe_inst: ?Air.Inst.Index,
lhs: MCValue,
rhs: MCValue,
@ -1124,7 +1216,8 @@ fn binOpRegister(
break :inst Air.refToIndex(bin_op.lhs).?;
} else null;
const reg = try self.register_manager.allocReg(track_inst);
const raw_reg = try self.register_manager.allocReg(track_inst);
const reg = registerAlias(raw_reg, lhs_ty.abiSize(self.target.*));
self.register_manager.freezeRegs(&.{reg});
if (track_inst) |inst| branch.inst_table.putAssumeCapacity(inst, .{ .register = reg });
@ -1139,7 +1232,8 @@ fn binOpRegister(
break :inst Air.refToIndex(bin_op.rhs).?;
} else null;
const reg = try self.register_manager.allocReg(track_inst);
const raw_reg = try self.register_manager.allocReg(track_inst);
const reg = registerAlias(raw_reg, rhs_ty.abiAlignment(self.target.*));
self.register_manager.freezeRegs(&.{reg});
if (track_inst) |inst| branch.inst_table.putAssumeCapacity(inst, .{ .register = reg });
@ -1156,45 +1250,17 @@ fn binOpRegister(
} else if (rhs_is_register and self.reuseOperand(inst, bin_op.rhs, 1, rhs)) {
break :blk rhs_reg;
} else {
break :blk try self.register_manager.allocReg(inst);
const raw_reg = try self.register_manager.allocReg(inst);
break :blk registerAlias(raw_reg, lhs_ty.abiSize(self.target.*));
}
} else try self.register_manager.allocReg(null);
if (!lhs_is_register) try self.genSetReg(lhs_ty, lhs_reg, lhs);
if (!rhs_is_register) try self.genSetReg(rhs_ty, rhs_reg, rhs);
const mir_tag: Mir.Inst.Tag = switch (tag) {
.add,
.ptr_add,
=> .add_shifted_register,
.sub,
.ptr_sub,
=> .sub_shifted_register,
.cmp_eq => .cmp_shifted_register,
.mul => .mul,
.bit_and,
.bool_and,
=> .and_shifted_register,
.bit_or,
.bool_or,
=> .orr_shifted_register,
.shl,
.shl_exact,
=> .lsl_register,
.shr,
.shr_exact,
=> switch (lhs_ty.intInfo(self.target.*).signedness) {
.signed => Mir.Inst.Tag.asr_register,
.unsigned => Mir.Inst.Tag.lsr_register,
},
.xor => .eor_shifted_register,
else => unreachable,
};
const mir_data: Mir.Inst.Data = switch (tag) {
.add,
.sub,
.ptr_add,
.ptr_sub,
const mir_data: Mir.Inst.Data = switch (mir_tag) {
.add_shifted_register,
.sub_shifted_register,
=> .{ .rrr_imm6_shift = .{
.rd = dest_reg,
.rn = lhs_reg,
@ -1202,27 +1268,24 @@ fn binOpRegister(
.imm6 = 0,
.shift = .lsl,
} },
.cmp_eq => .{ .rr_imm6_shift = .{
.cmp_shifted_register => .{ .rr_imm6_shift = .{
.rn = lhs_reg,
.rm = rhs_reg,
.imm6 = 0,
.shift = .lsl,
} },
.mul,
.shl,
.shl_exact,
.shr,
.shr_exact,
.lsl_register,
.asr_register,
.lsr_register,
=> .{ .rrr = .{
.rd = dest_reg,
.rn = lhs_reg,
.rm = rhs_reg,
} },
.bit_and,
.bool_and,
.bit_or,
.bool_or,
.xor,
.and_shifted_register,
.orr_shifted_register,
.eor_shifted_register,
=> .{ .rrr_imm6_logical_shift = .{
.rd = dest_reg,
.rn = lhs_reg,
@ -1255,7 +1318,7 @@ fn binOpRegister(
/// Asserts that generating an instruction of that form is possible.
fn binOpImmediate(
self: *Self,
tag: Air.Inst.Tag,
mir_tag: Mir.Inst.Tag,
maybe_inst: ?Air.Inst.Index,
lhs: MCValue,
rhs: MCValue,
@ -1276,7 +1339,8 @@ fn binOpImmediate(
).?;
} else null;
const reg = try self.register_manager.allocReg(track_inst);
const raw_reg = try self.register_manager.allocReg(track_inst);
const reg = registerAlias(raw_reg, lhs_ty.abiSize(self.target.*));
self.register_manager.freezeRegs(&.{reg});
if (track_inst) |inst| branch.inst_table.putAssumeCapacity(inst, .{ .register = reg });
@ -1285,8 +1349,8 @@ fn binOpImmediate(
};
defer self.register_manager.unfreezeRegs(&.{lhs_reg});
const dest_reg = switch (tag) {
.cmp_eq => undefined, // cmp has no destination register
const dest_reg = switch (mir_tag) {
.cmp_immediate => undefined, // cmp has no destination register
else => if (maybe_inst) |inst| blk: {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
@ -1298,46 +1362,31 @@ fn binOpImmediate(
)) {
break :blk lhs_reg;
} else {
break :blk try self.register_manager.allocReg(inst);
const raw_reg = try self.register_manager.allocReg(inst);
break :blk registerAlias(raw_reg, lhs_ty.abiSize(self.target.*));
}
} else try self.register_manager.allocReg(null),
};
if (!lhs_is_register) try self.genSetReg(lhs_ty, lhs_reg, lhs);
const mir_tag: Mir.Inst.Tag = switch (tag) {
.add => .add_immediate,
.sub => .sub_immediate,
.shl,
.shl_exact,
=> .lsl_immediate,
.shr,
.shr_exact,
=> switch (lhs_ty.intInfo(self.target.*).signedness) {
.signed => Mir.Inst.Tag.asr_immediate,
.unsigned => Mir.Inst.Tag.lsr_immediate,
},
.cmp_eq => .cmp_immediate,
else => unreachable,
};
const mir_data: Mir.Inst.Data = switch (tag) {
.add,
.sub,
const mir_data: Mir.Inst.Data = switch (mir_tag) {
.add_immediate,
.sub_immediate,
=> .{ .rr_imm12_sh = .{
.rd = dest_reg,
.rn = lhs_reg,
.imm12 = @intCast(u12, rhs.immediate),
} },
.shl,
.shl_exact,
.shr,
.shr_exact,
.lsl_immediate,
.asr_immediate,
.lsr_immediate,
=> .{ .rr_shift = .{
.rd = dest_reg,
.rn = lhs_reg,
.shift = @intCast(u6, rhs.immediate),
} },
.cmp_eq => .{ .r_imm12_sh = .{
.cmp_immediate => .{ .r_imm12_sh = .{
.rn = lhs_reg,
.imm12 = @intCast(u12, rhs.immediate),
} },
@ -1375,7 +1424,6 @@ fn binOp(
) InnerError!MCValue {
const target = self.target.*;
switch (tag) {
// Arithmetic operations on integers and floats
.add,
.sub,
.cmp_eq,
@ -1403,13 +1451,26 @@ fn binOp(
else => unreachable,
};
const mir_tag_register: Mir.Inst.Tag = switch (tag) {
.add => .add_shifted_register,
.sub => .sub_shifted_register,
.cmp_eq => .cmp_shifted_register,
else => unreachable,
};
const mir_tag_immediate: Mir.Inst.Tag = switch (tag) {
.add => .add_immediate,
.sub => .sub_immediate,
.cmp_eq => .cmp_immediate,
else => unreachable,
};
if (rhs_immediate_ok) {
return try self.binOpImmediate(tag, maybe_inst, lhs, rhs, lhs_ty, false);
return try self.binOpImmediate(mir_tag_immediate, maybe_inst, lhs, rhs, lhs_ty, false);
} else if (lhs_immediate_ok) {
// swap lhs and rhs
return try self.binOpImmediate(tag, maybe_inst, rhs, lhs, rhs_ty, true);
return try self.binOpImmediate(mir_tag_immediate, maybe_inst, rhs, lhs, rhs_ty, true);
} else {
return try self.binOpRegister(tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
return try self.binOpRegister(mir_tag_register, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
}
} else {
return self.fail("TODO binary operations on int with bits > 64", .{});
@ -1428,7 +1489,7 @@ fn binOp(
// TODO add optimisations for multiplication
// with immediates, for example a * 2 can be
// lowered to a << 1
return try self.binOpRegister(tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
return try self.binOpRegister(.mul, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
} else {
return self.fail("TODO binary operations on int with bits > 64", .{});
}
@ -1436,7 +1497,36 @@ fn binOp(
else => unreachable,
}
},
// Bitwise operations on integers
.addwrap,
.subwrap,
.mulwrap,
=> {
const base_tag: Air.Inst.Tag = switch (tag) {
.addwrap => .add,
.subwrap => .sub,
.mulwrap => .mul,
else => unreachable,
};
// Generate an add/sub/mul
const result = try self.binOp(base_tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
// Truncate if necessary
switch (lhs_ty.zigTypeTag()) {
.Vector => return self.fail("TODO binary operations on vectors", .{}),
.Int => {
const int_info = lhs_ty.intInfo(self.target.*);
if (int_info.bits <= 64) {
const result_reg = result.register;
try self.truncRegister(result_reg, result_reg, int_info.signedness, int_info.bits);
return result;
} else {
return self.fail("TODO binary operations on integers > u64/i64", .{});
}
},
else => unreachable,
}
},
.bit_and,
.bit_or,
.xor,
@ -1448,7 +1538,53 @@ fn binOp(
const int_info = lhs_ty.intInfo(self.target.*);
if (int_info.bits <= 64) {
// TODO implement bitwise operations with immediates
return try self.binOpRegister(tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
const mir_tag: Mir.Inst.Tag = switch (tag) {
.bit_and => .and_shifted_register,
.bit_or => .orr_shifted_register,
.xor => .eor_shifted_register,
else => unreachable,
};
return try self.binOpRegister(mir_tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
} else {
return self.fail("TODO binary operations on int with bits > 64", .{});
}
},
else => unreachable,
}
},
.shl_exact,
.shr_exact,
=> {
switch (lhs_ty.zigTypeTag()) {
.Vector => return self.fail("TODO binary operations on vectors", .{}),
.Int => {
const int_info = lhs_ty.intInfo(self.target.*);
if (int_info.bits <= 64) {
const rhs_immediate_ok = rhs == .immediate;
const mir_tag_register: Mir.Inst.Tag = switch (tag) {
.shl_exact => .lsl_register,
.shr_exact => switch (int_info.signedness) {
.signed => Mir.Inst.Tag.asr_register,
.unsigned => Mir.Inst.Tag.lsr_register,
},
else => unreachable,
};
const mir_tag_immediate: Mir.Inst.Tag = switch (tag) {
.shl_exact => .lsl_immediate,
.shr_exact => switch (int_info.signedness) {
.signed => Mir.Inst.Tag.asr_immediate,
.unsigned => Mir.Inst.Tag.lsr_immediate,
},
else => unreachable,
};
if (rhs_immediate_ok) {
return try self.binOpImmediate(mir_tag_immediate, maybe_inst, lhs, rhs, lhs_ty, false);
} else {
return try self.binOpRegister(mir_tag_register, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
}
} else {
return self.fail("TODO binary operations on int with bits > 64", .{});
}
@ -1459,23 +1595,33 @@ fn binOp(
.shl,
.shr,
=> {
switch (lhs_ty.zigTypeTag()) {
const base_tag: Air.Inst.Tag = switch (tag) {
.shl => .shl_exact,
.shr => .shr_exact,
else => unreachable,
};
// Generate a shl_exact/shr_exact
const result = try self.binOp(base_tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
// Truncate if necessary
switch (tag) {
.shr => return result,
.shl => switch (lhs_ty.zigTypeTag()) {
.Vector => return self.fail("TODO binary operations on vectors", .{}),
.Int => {
const int_info = lhs_ty.intInfo(self.target.*);
if (int_info.bits <= 64) {
const rhs_immediate_ok = rhs == .immediate;
if (rhs_immediate_ok) {
return try self.binOpImmediate(tag, maybe_inst, lhs, rhs, lhs_ty, false);
const result_reg = result.register;
try self.truncRegister(result_reg, result_reg, int_info.signedness, int_info.bits);
return result;
} else {
return try self.binOpRegister(tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
}
} else {
return self.fail("TODO binary operations on int with bits > 64", .{});
return self.fail("TODO binary operations on integers > u64/i64", .{});
}
},
else => unreachable,
},
else => unreachable,
}
},
.bool_and,
@ -1486,7 +1632,13 @@ fn binOp(
assert(lhs != .immediate); // should have been handled by Sema
assert(rhs != .immediate); // should have been handled by Sema
return try self.binOpRegister(tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
const mir_tag_register: Mir.Inst.Tag = switch (tag) {
.bool_and => .and_shifted_register,
.bool_or => .orr_shifted_register,
else => unreachable,
};
return try self.binOpRegister(mir_tag_register, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
},
else => unreachable,
}
@ -1504,9 +1656,9 @@ fn binOp(
const elem_size = elem_ty.abiSize(self.target.*);
if (elem_size == 1) {
const base_tag: Air.Inst.Tag = switch (tag) {
.ptr_add => .add,
.ptr_sub => .sub,
const base_tag: Mir.Inst.Tag = switch (tag) {
.ptr_add => .add_shifted_register,
.ptr_sub => .sub_shifted_register,
else => unreachable,
};
@ -1538,36 +1690,18 @@ fn airBinOp(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airAddWrap(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement addwrap for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airAddSat(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement add_sat for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airSubWrap(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement subwrap for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airSubSat(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement sub_sat for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airMulWrap(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement mulwrap for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airMulSat(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement mul_sat for {}", .{self.target.cpu.arch});
@ -1961,11 +2095,12 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
.undef => unreachable,
.compare_flags_signed, .compare_flags_unsigned => unreachable,
.register => |dst_reg| {
try self.genLdrRegister(dst_reg, addr_reg, elem_size);
try self.genLdrRegister(dst_reg, addr_reg, elem_ty);
},
.stack_offset => |off| {
if (elem_size <= 8) {
const tmp_reg = try self.register_manager.allocReg(null);
const raw_tmp_reg = try self.register_manager.allocReg(null);
const tmp_reg = registerAlias(raw_tmp_reg, elem_size);
self.register_manager.freezeRegs(&.{tmp_reg});
defer self.register_manager.unfreezeRegs(&.{tmp_reg});
@ -2001,12 +2136,8 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
.got_load,
.direct_load,
=> {
const reg = try self.register_manager.allocReg(null);
self.register_manager.freezeRegs(&.{reg});
defer self.register_manager.unfreezeRegs(&.{reg});
try self.genSetReg(ptr_ty, reg, ptr);
try self.load(dst_mcv, .{ .register = reg }, ptr_ty);
const addr_reg = try self.copyToTmpRegister(ptr_ty, ptr);
try self.load(dst_mcv, .{ .register = addr_reg }, ptr_ty);
},
}
}
@ -2091,6 +2222,7 @@ fn genInlineMemcpy(
fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const elem_ty = self.air.typeOfIndex(inst);
const elem_size = elem_ty.abiSize(self.target.*);
const result: MCValue = result: {
if (!elem_ty.hasRuntimeBits())
break :result MCValue.none;
@ -2101,9 +2233,12 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
break :result MCValue.dead;
const dst_mcv: MCValue = blk: {
if (self.reuseOperand(inst, ty_op.operand, 0, ptr)) {
if (elem_size <= 8 and self.reuseOperand(inst, ty_op.operand, 0, ptr)) {
// The MCValue that holds the pointer can be re-used as the value.
break :blk ptr;
break :blk switch (ptr) {
.register => |r| MCValue{ .register = registerAlias(r, elem_size) },
else => ptr,
};
} else {
break :blk try self.allocRegOrMem(inst, true);
}
@ -2114,101 +2249,52 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn genLdrRegister(self: *Self, value_reg: Register, addr_reg: Register, abi_size: u64) !void {
switch (abi_size) {
1 => {
_ = try self.addInst(.{
.tag = .ldrb_immediate,
.data = .{ .load_store_register_immediate = .{
.rt = value_reg.to32(),
.rn = addr_reg,
.offset = Instruction.LoadStoreOffset.none.immediate,
} },
});
},
2 => {
_ = try self.addInst(.{
.tag = .ldrh_immediate,
.data = .{ .load_store_register_immediate = .{
.rt = value_reg.to32(),
.rn = addr_reg,
.offset = Instruction.LoadStoreOffset.none.immediate,
} },
});
},
4 => {
_ = try self.addInst(.{
.tag = .ldr_immediate,
.data = .{ .load_store_register_immediate = .{
.rt = value_reg.to32(),
.rn = addr_reg,
.offset = Instruction.LoadStoreOffset.none.immediate,
} },
});
},
8 => {
_ = try self.addInst(.{
.tag = .ldr_immediate,
.data = .{ .load_store_register_immediate = .{
.rt = value_reg.to64(),
.rn = addr_reg,
.offset = Instruction.LoadStoreOffset.none.immediate,
} },
});
},
fn genLdrRegister(self: *Self, value_reg: Register, addr_reg: Register, ty: Type) !void {
const abi_size = ty.abiSize(self.target.*);
const tag: Mir.Inst.Tag = switch (abi_size) {
1 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsb_immediate else .ldrb_immediate,
2 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsh_immediate else .ldrh_immediate,
4 => .ldr_immediate,
8 => .ldr_immediate,
3, 5, 6, 7 => return self.fail("TODO: genLdrRegister for more abi_sizes", .{}),
else => unreachable,
}
};
_ = try self.addInst(.{
.tag = tag,
.data = .{ .load_store_register_immediate = .{
.rt = value_reg,
.rn = addr_reg,
.offset = Instruction.LoadStoreOffset.none.immediate,
} },
});
}
fn genStrRegister(self: *Self, value_reg: Register, addr_reg: Register, abi_size: u64) !void {
switch (abi_size) {
1 => {
_ = try self.addInst(.{
.tag = .strb_immediate,
.data = .{ .load_store_register_immediate = .{
.rt = value_reg.to32(),
.rn = addr_reg,
.offset = Instruction.LoadStoreOffset.none.immediate,
} },
});
},
2 => {
_ = try self.addInst(.{
.tag = .strh_immediate,
.data = .{ .load_store_register_immediate = .{
.rt = value_reg.to32(),
.rn = addr_reg,
.offset = Instruction.LoadStoreOffset.none.immediate,
} },
});
},
4 => {
_ = try self.addInst(.{
.tag = .str_immediate,
.data = .{ .load_store_register_immediate = .{
.rt = value_reg.to32(),
.rn = addr_reg,
.offset = Instruction.LoadStoreOffset.none.immediate,
} },
});
},
8 => {
_ = try self.addInst(.{
.tag = .str_immediate,
.data = .{ .load_store_register_immediate = .{
.rt = value_reg.to64(),
.rn = addr_reg,
.offset = Instruction.LoadStoreOffset.none.immediate,
} },
});
},
fn genStrRegister(self: *Self, value_reg: Register, addr_reg: Register, ty: Type) !void {
const abi_size = ty.abiSize(self.target.*);
const tag: Mir.Inst.Tag = switch (abi_size) {
1 => .strb_immediate,
2 => .strh_immediate,
4, 8 => .str_immediate,
3, 5, 6, 7 => return self.fail("TODO: genStrRegister for more abi_sizes", .{}),
else => unreachable,
}
};
_ = try self.addInst(.{
.tag = tag,
.data = .{ .load_store_register_immediate = .{
.rt = value_reg,
.rn = addr_reg,
.offset = Instruction.LoadStoreOffset.none.immediate,
} },
});
}
fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type) InnerError!void {
const abi_size = value_ty.abiSize(self.target.*);
switch (ptr) {
.none => unreachable,
.undef => unreachable,
@ -2226,14 +2312,14 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
self.register_manager.freezeRegs(&.{addr_reg});
defer self.register_manager.unfreezeRegs(&.{addr_reg});
const abi_size = value_ty.abiSize(self.target.*);
switch (value) {
.register => |value_reg| {
try self.genStrRegister(value_reg, addr_reg, abi_size);
try self.genStrRegister(value_reg, addr_reg, value_ty);
},
else => {
if (abi_size <= 8) {
const tmp_reg = try self.register_manager.allocReg(null);
const raw_tmp_reg = try self.register_manager.allocReg(null);
const tmp_reg = registerAlias(raw_tmp_reg, abi_size);
self.register_manager.freezeRegs(&.{tmp_reg});
defer self.register_manager.unfreezeRegs(&.{tmp_reg});
@ -3470,24 +3556,27 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
.data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @truncate(u16, x) } },
});
if (x > math.maxInt(u16)) {
if (x & 0x0000_0000_ffff_0000 != 0) {
_ = try self.addInst(.{
.tag = .movk,
.data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @truncate(u16, x >> 16), .hw = 1 } },
});
}
if (x > math.maxInt(u32)) {
if (reg.size() == 64) {
if (x & 0x0000_ffff_0000_0000 != 0) {
_ = try self.addInst(.{
.tag = .movk,
.data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @truncate(u16, x >> 32), .hw = 2 } },
});
}
if (x > math.maxInt(u48)) {
if (x & 0xffff_0000_0000_0000 != 0) {
_ = try self.addInst(.{
.tag = .movk,
.data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @truncate(u16, x >> 48), .hw = 3 } },
});
}
}
},
.register => |src_reg| {
// If the registers are the same, nothing to do.
@ -3522,8 +3611,8 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
.memory => |addr| {
// The value is in memory at a hard-coded address.
// If the type is a pointer, it means the pointer address is at this memory location.
try self.genSetReg(ty, reg, .{ .immediate = addr });
try self.genLdrRegister(reg, reg, ty.abiSize(self.target.*));
try self.genSetReg(ty, reg.to64(), .{ .immediate = addr });
try self.genLdrRegister(reg, reg.to64(), ty);
},
.stack_offset => |off| {
const abi_size = ty.abiSize(self.target.*);
@ -3531,21 +3620,16 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
switch (abi_size) {
1, 2, 4, 8 => {
const tag: Mir.Inst.Tag = switch (abi_size) {
1 => .ldrb_stack,
2 => .ldrh_stack,
1 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsb_stack else .ldrb_stack,
2 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsh_stack else .ldrh_stack,
4, 8 => .ldr_stack,
else => unreachable, // unexpected abi size
};
const rt: Register = switch (abi_size) {
1, 2, 4 => reg.to32(),
8 => reg.to64(),
else => unreachable, // unexpected abi size
};
_ = try self.addInst(.{
.tag = tag,
.data = .{ .load_store_stack = .{
.rt = rt,
.rt = reg,
.offset = @intCast(u32, off),
} },
});
@ -3998,6 +4082,12 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
var nsaa: u32 = 0; // Next stacked argument address
for (param_types) |ty, i| {
const param_size = @intCast(u32, ty.abiSize(self.target.*));
if (param_size == 0) {
result.args[i] = .{ .none = {} };
continue;
}
// We round up NCRN only for non-Apple platforms which allow the 16-byte aligned
// values to spread across odd-numbered registers.
if (ty.abiAlignment(self.target.*) == 16 and !self.target.isDarwin()) {
@ -4005,10 +4095,9 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
ncrn += ncrn % 2;
}
const param_size = @intCast(u32, ty.abiSize(self.target.*));
if (std.math.divCeil(u32, param_size, 8) catch unreachable <= 8 - ncrn) {
if (param_size <= 8) {
result.args[i] = .{ .register = c_abi_int_param_regs[ncrn] };
result.args[i] = .{ .register = registerAlias(c_abi_int_param_regs[ncrn], param_size) };
ncrn += 1;
} else {
return self.fail("TODO MCValues with multiple registers", .{});
@ -4045,7 +4134,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
.Unspecified, .C => {
const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*));
if (ret_ty_size <= 8) {
result.return_value = .{ .register = c_abi_int_return_regs[0] };
result.return_value = .{ .register = registerAlias(c_abi_int_return_regs[0], ret_ty_size) };
} else {
return self.fail("TODO support more return types for ARM backend", .{});
}

View file

@ -131,6 +131,8 @@ pub fn emitMir(
.ldr_stack => try emit.mirLoadStoreStack(inst),
.ldrb_stack => try emit.mirLoadStoreStack(inst),
.ldrh_stack => try emit.mirLoadStoreStack(inst),
.ldrsb_stack => try emit.mirLoadStoreStack(inst),
.ldrsh_stack => try emit.mirLoadStoreStack(inst),
.str_stack => try emit.mirLoadStoreStack(inst),
.strb_stack => try emit.mirLoadStoreStack(inst),
.strh_stack => try emit.mirLoadStoreStack(inst),
@ -145,6 +147,9 @@ pub fn emitMir(
.ldr_immediate => try emit.mirLoadStoreRegisterImmediate(inst),
.ldrb_immediate => try emit.mirLoadStoreRegisterImmediate(inst),
.ldrh_immediate => try emit.mirLoadStoreRegisterImmediate(inst),
.ldrsb_immediate => try emit.mirLoadStoreRegisterImmediate(inst),
.ldrsh_immediate => try emit.mirLoadStoreRegisterImmediate(inst),
.ldrsw_immediate => try emit.mirLoadStoreRegisterImmediate(inst),
.str_immediate => try emit.mirLoadStoreRegisterImmediate(inst),
.strb_immediate => try emit.mirLoadStoreRegisterImmediate(inst),
.strh_immediate => try emit.mirLoadStoreRegisterImmediate(inst),
@ -162,6 +167,17 @@ pub fn emitMir(
.push_regs => try emit.mirPushPopRegs(inst),
.pop_regs => try emit.mirPushPopRegs(inst),
.sbfx,
.ubfx,
=> try emit.mirBitfieldExtract(inst),
.sxtb,
.sxth,
.sxtw,
.uxtb,
.uxth,
=> try emit.mirExtend(inst),
}
}
}
@ -457,8 +473,13 @@ fn mirAddSubtractImmediate(emit: *Emit, inst: Mir.Inst.Index) !void {
const rn = r_imm12_sh.rn;
const imm12 = r_imm12_sh.imm12;
const sh = r_imm12_sh.sh == 1;
const zr: Register = switch (rn.size()) {
32 => .wzr,
64 => .xzr,
else => unreachable,
};
try emit.writeInstruction(Instruction.subs(.xzr, rn, imm12, sh));
try emit.writeInstruction(Instruction.subs(zr, rn, imm12, sh));
},
else => unreachable,
}
@ -674,8 +695,13 @@ fn mirAddSubtractShiftedRegister(emit: *Emit, inst: Mir.Inst.Index) !void {
const rm = rr_imm6_shift.rm;
const shift = rr_imm6_shift.shift;
const imm6 = rr_imm6_shift.imm6;
const zr: Register = switch (rn.size()) {
32 => .wzr,
64 => .xzr,
else => unreachable,
};
try emit.writeInstruction(Instruction.subsShiftedRegister(.xzr, rn, rm, shift, imm6));
try emit.writeInstruction(Instruction.subsShiftedRegister(zr, rn, rm, shift, imm6));
},
else => unreachable,
}
@ -686,7 +712,12 @@ fn mirConditionalSelect(emit: *Emit, inst: Mir.Inst.Index) !void {
switch (tag) {
.cset => {
const r_cond = emit.mir.instructions.items(.data)[inst].r_cond;
try emit.writeInstruction(Instruction.csinc(r_cond.rd, .xzr, .xzr, r_cond.cond));
const zr: Register = switch (r_cond.rd.size()) {
32 => .wzr,
64 => .xzr,
else => unreachable,
};
try emit.writeInstruction(Instruction.csinc(r_cond.rd, zr, zr, r_cond.cond));
},
else => unreachable,
}
@ -718,14 +749,14 @@ fn mirLoadMemoryPie(emit: *Emit, inst: Mir.Inst.Index) !void {
// PC-relative displacement to the entry in memory.
// adrp
const offset = @intCast(u32, emit.code.items.len);
try emit.writeInstruction(Instruction.adrp(reg, 0));
try emit.writeInstruction(Instruction.adrp(reg.to64(), 0));
switch (tag) {
.load_memory_got => {
// ldr reg, reg, offset
try emit.writeInstruction(Instruction.ldr(
reg,
reg,
reg.to64(),
Instruction.LoadStoreOffset.imm(0),
));
},
@ -739,11 +770,11 @@ fn mirLoadMemoryPie(emit: *Emit, inst: Mir.Inst.Index) !void {
// Note that this can potentially be optimised out by the codegen/linker if the
// target address is appropriately aligned.
// add reg, reg, offset
try emit.writeInstruction(Instruction.add(reg, reg, 0, false));
try emit.writeInstruction(Instruction.add(reg.to64(), reg.to64(), 0, false));
// ldr reg, reg, offset
try emit.writeInstruction(Instruction.ldr(
reg,
reg,
reg.to64(),
Instruction.LoadStoreOffset.imm(0),
));
},
@ -821,14 +852,14 @@ fn mirLoadStoreStack(emit: *Emit, inst: Mir.Inst.Index) !void {
const raw_offset = emit.stack_size - load_store_stack.offset;
const offset = switch (tag) {
.ldrb_stack, .strb_stack => blk: {
.ldrb_stack, .ldrsb_stack, .strb_stack => blk: {
if (math.cast(u12, raw_offset)) |imm| {
break :blk Instruction.LoadStoreOffset.imm(imm);
} else |_| {
return emit.fail("TODO load/store stack byte with larger offset", .{});
}
},
.ldrh_stack, .strh_stack => blk: {
.ldrh_stack, .ldrsh_stack, .strh_stack => blk: {
assert(std.mem.isAlignedGeneric(u32, raw_offset, 2)); // misaligned stack entry
if (math.cast(u12, @divExact(raw_offset, 2))) |imm| {
break :blk Instruction.LoadStoreOffset.imm(imm);
@ -857,6 +888,8 @@ fn mirLoadStoreStack(emit: *Emit, inst: Mir.Inst.Index) !void {
.ldr_stack => try emit.writeInstruction(Instruction.ldr(rt, .sp, offset)),
.ldrb_stack => try emit.writeInstruction(Instruction.ldrb(rt, .sp, offset)),
.ldrh_stack => try emit.writeInstruction(Instruction.ldrh(rt, .sp, offset)),
.ldrsb_stack => try emit.writeInstruction(Instruction.ldrsb(rt, .sp, offset)),
.ldrsh_stack => try emit.writeInstruction(Instruction.ldrsh(rt, .sp, offset)),
.str_stack => try emit.writeInstruction(Instruction.str(rt, .sp, offset)),
.strb_stack => try emit.writeInstruction(Instruction.strb(rt, .sp, offset)),
.strh_stack => try emit.writeInstruction(Instruction.strh(rt, .sp, offset)),
@ -875,6 +908,9 @@ fn mirLoadStoreRegisterImmediate(emit: *Emit, inst: Mir.Inst.Index) !void {
.ldr_immediate => try emit.writeInstruction(Instruction.ldr(rt, rn, offset)),
.ldrb_immediate => try emit.writeInstruction(Instruction.ldrb(rt, rn, offset)),
.ldrh_immediate => try emit.writeInstruction(Instruction.ldrh(rt, rn, offset)),
.ldrsb_immediate => try emit.writeInstruction(Instruction.ldrsb(rt, rn, offset)),
.ldrsh_immediate => try emit.writeInstruction(Instruction.ldrsh(rt, rn, offset)),
.ldrsw_immediate => try emit.writeInstruction(Instruction.ldrsw(rt, rn, offset)),
.str_immediate => try emit.writeInstruction(Instruction.str(rt, rn, offset)),
.strb_immediate => try emit.writeInstruction(Instruction.strb(rt, rn, offset)),
.strh_immediate => try emit.writeInstruction(Instruction.strh(rt, rn, offset)),
@ -905,7 +941,13 @@ fn mirMoveRegister(emit: *Emit, inst: Mir.Inst.Index) !void {
switch (tag) {
.mov_register => {
const rr = emit.mir.instructions.items(.data)[inst].rr;
try emit.writeInstruction(Instruction.orrShiftedRegister(rr.rd, .xzr, rr.rn, .lsl, 0));
const zr: Register = switch (rr.rd.size()) {
32 => .wzr,
64 => .xzr,
else => unreachable,
};
try emit.writeInstruction(Instruction.orrShiftedRegister(rr.rd, zr, rr.rn, .lsl, 0));
},
.mov_to_from_sp => {
const rr = emit.mir.instructions.items(.data)[inst].rr;
@ -917,8 +959,13 @@ fn mirMoveRegister(emit: *Emit, inst: Mir.Inst.Index) !void {
const rm = rr_imm6_logical_shift.rm;
const shift = rr_imm6_logical_shift.shift;
const imm6 = rr_imm6_logical_shift.imm6;
const zr: Register = switch (rd.size()) {
32 => .wzr,
64 => .xzr,
else => unreachable,
};
try emit.writeInstruction(Instruction.ornShiftedRegister(rd, .xzr, rm, shift, imm6));
try emit.writeInstruction(Instruction.ornShiftedRegister(rd, zr, rm, shift, imm6));
},
else => unreachable,
}
@ -1024,3 +1071,32 @@ fn mirPushPopRegs(emit: *Emit, inst: Mir.Inst.Index) !void {
else => unreachable,
}
}
fn mirBitfieldExtract(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const rr_lsb_width = emit.mir.instructions.items(.data)[inst].rr_lsb_width;
const rd = rr_lsb_width.rd;
const rn = rr_lsb_width.rn;
const lsb = rr_lsb_width.lsb;
const width = rr_lsb_width.width;
switch (tag) {
.sbfx => try emit.writeInstruction(Instruction.sbfx(rd, rn, lsb, width)),
.ubfx => try emit.writeInstruction(Instruction.ubfx(rd, rn, lsb, width)),
else => unreachable,
}
}
fn mirExtend(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const rr = emit.mir.instructions.items(.data)[inst].rr;
switch (tag) {
.sxtb => try emit.writeInstruction(Instruction.sxtb(rr.rd, rr.rn)),
.sxth => try emit.writeInstruction(Instruction.sxth(rr.rd, rr.rn)),
.sxtw => try emit.writeInstruction(Instruction.sxtw(rr.rd, rr.rn)),
.uxtb => try emit.writeInstruction(Instruction.uxtb(rr.rd, rr.rn)),
.uxth => try emit.writeInstruction(Instruction.uxth(rr.rd, rr.rn)),
else => unreachable,
}
}

View file

@ -100,6 +100,16 @@ pub const Inst = struct {
ldrh_immediate,
/// Load Register Halfword (register)
ldrh_register,
/// Load Register Signed Byte (immediate)
ldrsb_immediate,
/// Pseudo-instruction: Load signed byte from stack
ldrsb_stack,
/// Load Register Signed Halfword (immediate)
ldrsh_immediate,
/// Pseudo-instruction: Load signed halfword from stack
ldrsh_stack,
/// Load Register Signed Word (immediate)
ldrsw_immediate,
/// Logical Shift Left (immediate)
lsl_immediate,
/// Logical Shift Left (register)
@ -130,6 +140,14 @@ pub const Inst = struct {
push_regs,
/// Return from subroutine
ret,
/// Signed bitfield extract
sbfx,
/// Signed extend byte
sxtb,
/// Signed extend halfword
sxth,
/// Signed extend word
sxtw,
/// Store Pair of Registers
stp,
/// Pseudo-instruction: Store to stack
@ -156,6 +174,12 @@ pub const Inst = struct {
sub_shifted_register,
/// Supervisor Call
svc,
/// Unsigned bitfield extract
ubfx,
/// Unsigned extend byte
uxtb,
/// Unsigned extend halfword
uxth,
};
/// The position of an MIR instruction within the `Mir` instructions array.
@ -225,13 +249,6 @@ pub const Inst = struct {
rt: Register,
inst: Index,
},
/// Two registers
///
/// Used by e.g. mov_register
rr: struct {
rd: Register,
rn: Register,
},
/// A register, an unsigned 12-bit immediate, and an optional shift
///
/// Used by e.g. cmp_immediate
@ -240,6 +257,13 @@ pub const Inst = struct {
imm12: u12,
sh: u1 = 0,
},
/// Two registers
///
/// Used by e.g. mov_register
rr: struct {
rd: Register,
rn: Register,
},
/// Two registers, an unsigned 12-bit immediate, and an optional shift
///
/// Used by e.g. sub_immediate
@ -268,6 +292,16 @@ pub const Inst = struct {
imm6: u6,
shift: bits.Instruction.LogicalShiftedRegisterShift,
},
/// Two registers and a lsb (range 0-63) and a width (range
/// 1-64)
///
/// Used by e.g. ubfx
rr_lsb_width: struct {
rd: Register,
rn: Register,
lsb: u6,
width: u7,
},
/// Two registers and a bitmask immediate
///
/// Used by e.g. eor_immediate

View file

@ -510,33 +510,23 @@ pub const Instruction = union(enum) {
imm16: u16,
shift: u6,
) Instruction {
switch (rd.size()) {
32 => {
assert(shift % 16 == 0 and shift <= 16);
assert(shift % 16 == 0);
assert(!(rd.size() == 32 and shift > 16));
assert(!(rd.size() == 64 and shift > 48));
return Instruction{
.move_wide_immediate = .{
.rd = rd.enc(),
.imm16 = imm16,
.hw = @intCast(u2, shift / 16),
.opc = opc,
.sf = 0,
},
};
},
64 => {
assert(shift % 16 == 0 and shift <= 48);
return Instruction{
.move_wide_immediate = .{
.rd = rd.enc(),
.imm16 = imm16,
.hw = @intCast(u2, shift / 16),
.opc = opc,
.sf = 1,
},
};
},
.sf = switch (rd.size()) {
32 => 0,
64 => 1,
else => unreachable, // unexpected register size
}
},
},
};
}
fn pcRelativeAddress(rd: Register, imm21: i21, op: u1) Instruction {
@ -675,18 +665,24 @@ pub const Instruction = union(enum) {
/// Which kind of load/store to perform
const LoadStoreVariant = enum {
/// 32-bit or 64-bit
/// 32 bits or 64 bits
str,
/// 16-bit, zero-extended
strh,
/// 8-bit, zero-extended
/// 8 bits, zero-extended
strb,
/// 32-bit or 64-bit
/// 16 bits, zero-extended
strh,
/// 32 bits or 64 bits
ldr,
/// 16-bit, zero-extended
ldrh,
/// 8-bit, zero-extended
/// 8 bits, zero-extended
ldrb,
/// 16 bits, zero-extended
ldrh,
/// 8 bits, sign extended
ldrsb,
/// 16 bits, sign extended
ldrsh,
/// 32 bits, sign extended
ldrsw,
};
fn loadStoreRegister(
@ -695,7 +691,11 @@ pub const Instruction = union(enum) {
offset: LoadStoreOffset,
variant: LoadStoreVariant,
) Instruction {
assert(rn.size() == 64);
assert(rn.id() != Register.xzr.id());
const off = offset.toU12();
const op1: u2 = blk: {
switch (offset) {
.immediate => |imm| switch (imm) {
@ -706,10 +706,35 @@ pub const Instruction = union(enum) {
}
break :blk 0b00;
};
const opc: u2 = switch (variant) {
.ldr, .ldrh, .ldrb => 0b01,
.str, .strh, .strb => 0b00,
const opc: u2 = blk: {
switch (variant) {
.ldr, .ldrh, .ldrb => break :blk 0b01,
.str, .strh, .strb => break :blk 0b00,
.ldrsb,
.ldrsh,
=> switch (rt.size()) {
32 => break :blk 0b11,
64 => break :blk 0b10,
else => unreachable, // unexpected register size
},
.ldrsw => break :blk 0b10,
}
};
const size: u2 = blk: {
switch (variant) {
.ldr, .str => switch (rt.size()) {
32 => break :blk 0b10,
64 => break :blk 0b11,
else => unreachable, // unexpected register size
},
.ldrsw => break :blk 0b10,
.ldrh, .ldrsh, .strh => break :blk 0b01,
.ldrb, .ldrsb, .strb => break :blk 0b00,
}
};
return Instruction{
.load_store_register = .{
.rt = rt.enc(),
@ -718,17 +743,7 @@ pub const Instruction = union(enum) {
.opc = opc,
.op1 = op1,
.v = 0,
.size = blk: {
switch (variant) {
.ldr, .str => switch (rt.size()) {
32 => break :blk 0b10,
64 => break :blk 0b11,
else => unreachable, // unexpected register size
},
.ldrh, .strh => break :blk 0b01,
.ldrb, .strb => break :blk 0b00,
}
},
.size = size,
},
};
}
@ -741,6 +756,9 @@ pub const Instruction = union(enum) {
encoding: u2,
load: bool,
) Instruction {
assert(rn.size() == 64);
assert(rn.id() != Register.xzr.id());
switch (rt1.size()) {
32 => {
assert(-256 <= offset and offset <= 252);
@ -849,9 +867,10 @@ pub const Instruction = union(enum) {
shift: LogicalShiftedRegisterShift,
amount: u6,
) Instruction {
switch (rd.size()) {
32 => {
assert(amount < 32);
assert(rd.size() == rn.size());
assert(rd.size() == rm.size());
if (rd.size() == 32) assert(amount < 32);
return Instruction{
.logical_shifted_register = .{
.rd = rd.enc(),
@ -861,26 +880,13 @@ pub const Instruction = union(enum) {
.n = n,
.shift = @enumToInt(shift),
.opc = opc,
.sf = 0b0,
.sf = switch (rd.size()) {
32 => 0b0,
64 => 0b1,
else => unreachable,
},
},
};
},
64 => {
return Instruction{
.logical_shifted_register = .{
.rd = rd.enc(),
.rn = rn.enc(),
.imm6 = amount,
.rm = rm.enc(),
.n = n,
.shift = @enumToInt(shift),
.opc = opc,
.sf = 0b1,
},
};
},
else => unreachable, // unexpected register size
}
}
fn addSubtractImmediate(
@ -891,6 +897,9 @@ pub const Instruction = union(enum) {
imm12: u12,
shift: bool,
) Instruction {
assert(rd.size() == rn.size());
assert(rn.id() != Register.xzr.id());
return Instruction{
.add_subtract_immediate = .{
.rd = rd.enc(),
@ -916,6 +925,9 @@ pub const Instruction = union(enum) {
immr: u6,
n: u1,
) Instruction {
assert(rd.size() == rn.size());
assert(!(rd.size() == 32 and n != 0));
return Instruction{
.logical_immediate = .{
.rd = rd.enc(),
@ -941,6 +953,10 @@ pub const Instruction = union(enum) {
immr: u6,
imms: u6,
) Instruction {
assert(rd.size() == rn.size());
assert(!(rd.size() == 64 and n != 1));
assert(!(rd.size() == 32 and (n != 0 or immr >> 5 != 0 or immr >> 5 != 0)));
return Instruction{
.bitfield = .{
.rd = rd.enc(),
@ -969,6 +985,9 @@ pub const Instruction = union(enum) {
rm: Register,
imm6: u6,
) Instruction {
assert(rd.size() == rn.size());
assert(rd.size() == rm.size());
return Instruction{
.add_subtract_shifted_register = .{
.rd = rd.enc(),
@ -994,6 +1013,7 @@ pub const Instruction = union(enum) {
offset: i21,
) Instruction {
assert(offset & 0b11 == 0b00);
return Instruction{
.conditional_branch = .{
.cond = @enumToInt(cond),
@ -1010,6 +1030,7 @@ pub const Instruction = union(enum) {
offset: i21,
) Instruction {
assert(offset & 0b11 == 0b00);
return Instruction{
.compare_and_branch = .{
.rt = rt.enc(),
@ -1033,6 +1054,9 @@ pub const Instruction = union(enum) {
rm: Register,
cond: Condition,
) Instruction {
assert(rd.size() == rn.size());
assert(rd.size() == rm.size());
return Instruction{
.conditional_select = .{
.rd = rd.enc(),
@ -1085,6 +1109,9 @@ pub const Instruction = union(enum) {
rn: Register,
rm: Register,
) Instruction {
assert(rd.size() == rn.size());
assert(rd.size() == rm.size());
return Instruction{
.data_processing_2_source = .{
.rd = rd.enc(),
@ -1145,6 +1172,18 @@ pub const Instruction = union(enum) {
return loadStoreRegister(rt, rn, offset, .ldrb);
}
pub fn ldrsb(rt: Register, rn: Register, offset: LoadStoreOffset) Instruction {
return loadStoreRegister(rt, rn, offset, .ldrsb);
}
pub fn ldrsh(rt: Register, rn: Register, offset: LoadStoreOffset) Instruction {
return loadStoreRegister(rt, rn, offset, .ldrsh);
}
pub fn ldrsw(rt: Register, rn: Register, offset: LoadStoreOffset) Instruction {
return loadStoreRegister(rt, rn, offset, .ldrsw);
}
pub fn str(rt: Register, rn: Register, offset: LoadStoreOffset) Instruction {
return loadStoreRegister(rt, rn, offset, .str);
}
@ -1404,6 +1443,23 @@ pub const Instruction = union(enum) {
return sbfm(rd, rn, shift, imms);
}
pub fn sbfx(rd: Register, rn: Register, lsb: u6, width: u7) Instruction {
return sbfm(rd, rn, lsb, @intCast(u6, lsb + width - 1));
}
pub fn sxtb(rd: Register, rn: Register) Instruction {
return sbfm(rd, rn, 0, 7);
}
pub fn sxth(rd: Register, rn: Register) Instruction {
return sbfm(rd, rn, 0, 15);
}
pub fn sxtw(rd: Register, rn: Register) Instruction {
assert(rd.size() == 64);
return sbfm(rd, rn, 0, 31);
}
pub fn lslImmediate(rd: Register, rn: Register, shift: u6) Instruction {
const size = @intCast(u6, rd.size() - 1);
return ubfm(rd, rn, size - shift + 1, size - shift);
@ -1414,6 +1470,18 @@ pub const Instruction = union(enum) {
return ubfm(rd, rn, shift, imms);
}
pub fn ubfx(rd: Register, rn: Register, lsb: u6, width: u7) Instruction {
return ubfm(rd, rn, lsb, @intCast(u6, lsb + width - 1));
}
pub fn uxtb(rd: Register, rn: Register) Instruction {
return ubfm(rd, rn, 0, 7);
}
pub fn uxth(rd: Register, rn: Register) Instruction {
return ubfm(rd, rn, 0, 15);
}
// Add/subtract (shifted register)
pub fn addShiftedRegister(

View file

@ -15,8 +15,6 @@ test "empty function with comments" {
}
test "truncate" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
try expect(testTruncate(0x10fd) == 0xfd);
comptime try expect(testTruncate(0x10fd) == 0xfd);
}
@ -25,8 +23,6 @@ fn testTruncate(x: u32) u8 {
}
test "truncate to non-power-of-two integers" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
try testTrunc(u32, u1, 0b10101, 0b1);
try testTrunc(u32, u1, 0b10110, 0b0);
try testTrunc(u32, u2, 0b10101, 0b01);

View file

@ -363,7 +363,6 @@ fn comptimeAdd(comptime a: comptime_int, comptime b: comptime_int) comptime_int
test "binary not" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
try expect(comptime x: {
break :x ~@as(u16, 0b1010101010101010) == 0b0101010101010101;
@ -499,8 +498,6 @@ fn mod(comptime T: type, a: T, b: T) T {
}
test "unsigned wrapping" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
try testUnsignedWrappingEval(maxInt(u32));
comptime try testUnsignedWrappingEval(maxInt(u32));
}
@ -512,8 +509,6 @@ fn testUnsignedWrappingEval(x: u32) !void {
}
test "signed wrapping" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
try testSignedWrappingEval(maxInt(i32));
comptime try testSignedWrappingEval(maxInt(i32));
}
@ -525,8 +520,6 @@ fn testSignedWrappingEval(x: i32) !void {
}
test "signed negation wrapping" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
try testSignedNegationWrappingEval(minInt(i16));
comptime try testSignedNegationWrappingEval(minInt(i16));
}
@ -537,8 +530,6 @@ fn testSignedNegationWrappingEval(x: i16) !void {
}
test "unsigned negation wrapping" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
try testUnsignedNegationWrappingEval(1);
comptime try testUnsignedNegationWrappingEval(1);
}
@ -859,8 +850,6 @@ test "quad hex float literal parsing accurate" {
}
test "truncating shift left" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
try testShlTrunc(maxInt(u16));
comptime try testShlTrunc(maxInt(u16));
}
@ -871,7 +860,6 @@ fn testShlTrunc(x: u16) !void {
test "exact shift left" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
try testShlExact(0b00110101);
comptime try testShlExact(0b00110101);
@ -883,7 +871,6 @@ fn testShlExact(x: u8) !void {
test "exact shift right" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
try testShrExact(0b10110100);
comptime try testShrExact(0b10110100);
@ -895,7 +882,6 @@ fn testShrExact(x: u8) !void {
test "shift left/right on u0 operand" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
const S = struct {
fn doTheTest() !void {

View file

@ -49,8 +49,6 @@ test "truncate.i0.var" {
}
test "truncate on comptime integer" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
var x = @truncate(u16, 9999);
try expect(x == 9999);
var y = @truncate(u16, -21555);