std.Target adjustments

* move `ptrBitWidth` from Arch to Target since it needs to know about the abi
* double isn't always 8 bits
* AVR uses 1-byte alignment for everything in GCC
This commit is contained in:
Veikka Tuominen 2023-05-26 23:29:05 +03:00 committed by Andrew Kelley
parent dbd44658ff
commit ca16f1e8a7
28 changed files with 180 additions and 168 deletions

View file

@ -322,7 +322,7 @@ pub const RTLD = struct {
pub const dl_phdr_info = extern struct { pub const dl_phdr_info = extern struct {
/// Module relocation base. /// Module relocation base.
dlpi_addr: if (builtin.cpu.arch.ptrBitWidth() == 32) std.elf.Elf32_Addr else std.elf.Elf64_Addr, dlpi_addr: if (builtin.target.ptrBitWidth() == 32) std.elf.Elf32_Addr else std.elf.Elf64_Addr,
/// Module name. /// Module name.
dlpi_name: ?[*:0]const u8, dlpi_name: ?[*:0]const u8,
/// Pointer to module's phdr. /// Pointer to module's phdr.

View file

@ -5847,8 +5847,18 @@ pub const AUDIT = struct {
fn toAudit(arch: std.Target.Cpu.Arch) u32 { fn toAudit(arch: std.Target.Cpu.Arch) u32 {
var res: u32 = @enumToInt(arch.toElfMachine()); var res: u32 = @enumToInt(arch.toElfMachine());
if (arch.endian() == .Little) res |= LE; if (arch.endian() == .Little) res |= LE;
if (arch.ptrBitWidth() == 64) res |= @"64BIT"; switch (arch) {
.aarch64,
.mips64,
.mips64el,
.powerpc64,
.powerpc64le,
.riscv64,
.sparc64,
.x86_64,
=> res |= @"64BIT",
else => {},
}
return res; return res;
} }
}; };

View file

@ -1189,77 +1189,6 @@ pub const Target = struct {
}; };
} }
pub fn ptrBitWidth(arch: Arch) u16 {
switch (arch) {
.avr,
.msp430,
.spu_2,
=> return 16,
.arc,
.arm,
.armeb,
.csky,
.hexagon,
.m68k,
.le32,
.mips,
.mipsel,
.powerpc,
.powerpcle,
.r600,
.riscv32,
.sparc,
.sparcel,
.tce,
.tcele,
.thumb,
.thumbeb,
.x86,
.xcore,
.nvptx,
.amdil,
.hsail,
.spir,
.kalimba,
.shave,
.lanai,
.wasm32,
.renderscript32,
.aarch64_32,
.spirv32,
.loongarch32,
.dxil,
.xtensa,
=> return 32,
.aarch64,
.aarch64_be,
.mips64,
.mips64el,
.powerpc64,
.powerpc64le,
.riscv64,
.x86_64,
.nvptx64,
.le64,
.amdil64,
.hsail64,
.spir64,
.wasm64,
.renderscript64,
.amdgcn,
.bpfel,
.bpfeb,
.sparc64,
.s390x,
.ve,
.spirv64,
.loongarch64,
=> return 64,
}
}
/// Returns a name that matches the lib/std/target/* source file name. /// Returns a name that matches the lib/std/target/* source file name.
pub fn genericName(arch: Arch) []const u8 { pub fn genericName(arch: Arch) []const u8 {
return switch (arch) { return switch (arch) {
@ -1621,7 +1550,7 @@ pub const Target = struct {
const copy = S.copy; const copy = S.copy;
if (self.abi == .android) { if (self.abi == .android) {
const suffix = if (self.cpu.arch.ptrBitWidth() == 64) "64" else ""; const suffix = if (self.ptrBitWidth() == 64) "64" else "";
return print(&result, "/system/bin/linker{s}", .{suffix}); return print(&result, "/system/bin/linker{s}", .{suffix});
} }
@ -1904,6 +1833,83 @@ pub const Target = struct {
}; };
} }
pub fn ptrBitWidth(target: std.Target) u16 {
switch (target.abi) {
.gnux32, .muslx32, .gnuabin32, .gnuilp32 => return 32,
.gnuabi64 => return 64,
else => {},
}
switch (target.cpu.arch) {
.avr,
.msp430,
.spu_2,
=> return 16,
.arc,
.arm,
.armeb,
.csky,
.hexagon,
.m68k,
.le32,
.mips,
.mipsel,
.powerpc,
.powerpcle,
.r600,
.riscv32,
.sparcel,
.tce,
.tcele,
.thumb,
.thumbeb,
.x86,
.xcore,
.nvptx,
.amdil,
.hsail,
.spir,
.kalimba,
.shave,
.lanai,
.wasm32,
.renderscript32,
.aarch64_32,
.spirv32,
.loongarch32,
.dxil,
.xtensa,
=> return 32,
.aarch64,
.aarch64_be,
.mips64,
.mips64el,
.powerpc64,
.powerpc64le,
.riscv64,
.x86_64,
.nvptx64,
.le64,
.amdil64,
.hsail64,
.spir64,
.wasm64,
.renderscript64,
.amdgcn,
.bpfel,
.bpfeb,
.sparc64,
.s390x,
.ve,
.spirv64,
.loongarch64,
=> return 64,
.sparc => return if (std.Target.sparc.featureSetHas(target.cpu.features, .v9)) 64 else 32,
}
}
pub const CType = enum { pub const CType = enum {
char, char,
short, short,
@ -1930,11 +1936,10 @@ pub const Target = struct {
.ulong, .ulong,
.longlong, .longlong,
.ulonglong, .ulonglong,
.float,
.double,
=> @divExact(c_type_bit_size(t, c_type), 8), => @divExact(c_type_bit_size(t, c_type), 8),
.float => 4,
.double => 8,
.longdouble => switch (c_type_bit_size(t, c_type)) { .longdouble => switch (c_type_bit_size(t, c_type)) {
16 => 2, 16 => 2,
32 => 4, 32 => 4,
@ -1990,7 +1995,7 @@ pub const Target = struct {
.char => return 8, .char => return 8,
.short, .ushort => return 16, .short, .ushort => return 16,
.int, .uint, .float => return 32, .int, .uint, .float => return 32,
.long, .ulong => return target.cpu.arch.ptrBitWidth(), .long, .ulong => return target.ptrBitWidth(),
.longlong, .ulonglong, .double => return 64, .longlong, .ulonglong, .double => return 64,
.longdouble => switch (target.cpu.arch) { .longdouble => switch (target.cpu.arch) {
.x86 => switch (target.abi) { .x86 => switch (target.abi) {
@ -2084,7 +2089,7 @@ pub const Target = struct {
.char => return 8, .char => return 8,
.short, .ushort => return 16, .short, .ushort => return 16,
.int, .uint, .float => return 32, .int, .uint, .float => return 32,
.long, .ulong => return target.cpu.arch.ptrBitWidth(), .long, .ulong => return target.ptrBitWidth(),
.longlong, .ulonglong, .double => return 64, .longlong, .ulonglong, .double => return 64,
.longdouble => switch (target.cpu.arch) { .longdouble => switch (target.cpu.arch) {
.x86 => switch (target.abi) { .x86 => switch (target.abi) {
@ -2256,10 +2261,7 @@ pub const Target = struct {
pub fn c_type_alignment(target: Target, c_type: CType) u16 { pub fn c_type_alignment(target: Target, c_type: CType) u16 {
// Overrides for unusual alignments // Overrides for unusual alignments
switch (target.cpu.arch) { switch (target.cpu.arch) {
.avr => switch (c_type) { .avr => return 1,
.short, .ushort => return 2,
else => return 1,
},
.x86 => switch (target.os.tag) { .x86 => switch (target.os.tag) {
.windows, .uefi => switch (c_type) { .windows, .uefi => switch (c_type) {
.longlong, .ulonglong, .double => return 8, .longlong, .ulonglong, .double => return 8,

View file

@ -117,7 +117,7 @@ pub fn detect(allocator: Allocator, native_info: NativeTargetInfo) !NativePaths
const triple = try native_target.linuxTriple(allocator); const triple = try native_target.linuxTriple(allocator);
defer allocator.free(triple); defer allocator.free(triple);
const qual = native_target.cpu.arch.ptrBitWidth(); const qual = native_target.ptrBitWidth();
// TODO: $ ld --verbose | grep SEARCH_DIR // TODO: $ ld --verbose | grep SEARCH_DIR
// the output contains some paths that end with lib64, maybe include them too? // the output contains some paths that end with lib64, maybe include them too?

View file

@ -1095,7 +1095,7 @@ pub fn getExternalExecutor(
if (candidate.target.cpu.arch != builtin.cpu.arch) { if (candidate.target.cpu.arch != builtin.cpu.arch) {
return bad_result; return bad_result;
} }
switch (candidate.target.cpu.arch.ptrBitWidth()) { switch (candidate.target.ptrBitWidth()) {
32 => return Executor{ .wine = "wine" }, 32 => return Executor{ .wine = "wine" },
64 => return Executor{ .wine = "wine64" }, 64 => return Executor{ .wine = "wine64" },
else => return bad_result, else => return bad_result,
@ -1105,7 +1105,7 @@ pub fn getExternalExecutor(
}, },
.wasi => { .wasi => {
if (options.allow_wasmtime) { if (options.allow_wasmtime) {
switch (candidate.target.cpu.arch.ptrBitWidth()) { switch (candidate.target.ptrBitWidth()) {
32 => return Executor{ .wasmtime = "wasmtime" }, 32 => return Executor{ .wasmtime = "wasmtime" },
else => return bad_result, else => return bad_result,
} }

View file

@ -34002,7 +34002,7 @@ fn intFitsInType(
=> switch (ty.zigTypeTag()) { => switch (ty.zigTypeTag()) {
.Int => { .Int => {
const info = ty.intInfo(target); const info = ty.intInfo(target);
const ptr_bits = target.cpu.arch.ptrBitWidth(); const ptr_bits = target.ptrBitWidth();
return switch (info.signedness) { return switch (info.signedness) {
.signed => info.bits > ptr_bits, .signed => info.bits > ptr_bits,
.unsigned => info.bits >= ptr_bits, .unsigned => info.bits >= ptr_bits,

View file

@ -501,7 +501,7 @@ fn gen(self: *Self) !void {
// (or w0 when pointer size is 32 bits). As this register // (or w0 when pointer size is 32 bits). As this register
// might get overwritten along the way, save the address // might get overwritten along the way, save the address
// to the stack. // to the stack.
const ptr_bits = self.target.cpu.arch.ptrBitWidth(); const ptr_bits = self.target.ptrBitWidth();
const ptr_bytes = @divExact(ptr_bits, 8); const ptr_bytes = @divExact(ptr_bits, 8);
const ret_ptr_reg = self.registerAlias(.x0, Type.usize); const ret_ptr_reg = self.registerAlias(.x0, Type.usize);
@ -1512,7 +1512,7 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void {
const len = try self.resolveInst(bin_op.rhs); const len = try self.resolveInst(bin_op.rhs);
const len_ty = self.air.typeOf(bin_op.rhs); const len_ty = self.air.typeOf(bin_op.rhs);
const ptr_bits = self.target.cpu.arch.ptrBitWidth(); const ptr_bits = self.target.ptrBitWidth();
const ptr_bytes = @divExact(ptr_bits, 8); const ptr_bytes = @divExact(ptr_bits, 8);
const stack_offset = try self.allocMem(ptr_bytes * 2, ptr_bytes * 2, inst); const stack_offset = try self.allocMem(ptr_bytes * 2, ptr_bytes * 2, inst);
@ -3362,7 +3362,7 @@ fn airSlicePtr(self: *Self, inst: Air.Inst.Index) !void {
fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void { fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op; const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const ptr_bits = self.target.cpu.arch.ptrBitWidth(); const ptr_bits = self.target.ptrBitWidth();
const ptr_bytes = @divExact(ptr_bits, 8); const ptr_bytes = @divExact(ptr_bits, 8);
const mcv = try self.resolveInst(ty_op.operand); const mcv = try self.resolveInst(ty_op.operand);
switch (mcv) { switch (mcv) {
@ -3386,7 +3386,7 @@ fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void {
fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void { fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op; const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const ptr_bits = self.target.cpu.arch.ptrBitWidth(); const ptr_bits = self.target.ptrBitWidth();
const ptr_bytes = @divExact(ptr_bits, 8); const ptr_bytes = @divExact(ptr_bits, 8);
const mcv = try self.resolveInst(ty_op.operand); const mcv = try self.resolveInst(ty_op.operand);
switch (mcv) { switch (mcv) {
@ -4321,7 +4321,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
} else if (self.bin_file.cast(link.File.Plan9)) |p9| { } else if (self.bin_file.cast(link.File.Plan9)) |p9| {
const decl_block_index = try p9.seeDecl(func.owner_decl); const decl_block_index = try p9.seeDecl(func.owner_decl);
const decl_block = p9.getDeclBlock(decl_block_index); const decl_block = p9.getDeclBlock(decl_block_index);
const ptr_bits = self.target.cpu.arch.ptrBitWidth(); const ptr_bits = self.target.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8); const ptr_bytes: u64 = @divExact(ptr_bits, 8);
const got_addr = p9.bases.data; const got_addr = p9.bases.data;
const got_index = decl_block.got_index.?; const got_index = decl_block.got_index.?;
@ -5929,7 +5929,7 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void {
const array_ty = ptr_ty.childType(); const array_ty = ptr_ty.childType();
const array_len = @intCast(u32, array_ty.arrayLen()); const array_len = @intCast(u32, array_ty.arrayLen());
const ptr_bits = self.target.cpu.arch.ptrBitWidth(); const ptr_bits = self.target.ptrBitWidth();
const ptr_bytes = @divExact(ptr_bits, 8); const ptr_bytes = @divExact(ptr_bits, 8);
const stack_offset = try self.allocMem(ptr_bytes * 2, ptr_bytes * 2, inst); const stack_offset = try self.allocMem(ptr_bytes * 2, ptr_bytes * 2, inst);

View file

@ -1035,7 +1035,7 @@ fn allocRegOrMem(self: *Self, elem_ty: Type, reg_ok: bool, maybe_inst: ?Air.Inst
if (reg_ok) { if (reg_ok) {
// Make sure the type can fit in a register before we try to allocate one. // Make sure the type can fit in a register before we try to allocate one.
const ptr_bits = self.target.cpu.arch.ptrBitWidth(); const ptr_bits = self.target.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8); const ptr_bytes: u64 = @divExact(ptr_bits, 8);
if (abi_size <= ptr_bytes) { if (abi_size <= ptr_bytes) {
if (self.register_manager.tryAllocReg(maybe_inst, gp)) |reg| { if (self.register_manager.tryAllocReg(maybe_inst, gp)) |reg| {

View file

@ -826,7 +826,7 @@ fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue {
if (reg_ok) { if (reg_ok) {
// Make sure the type can fit in a register before we try to allocate one. // Make sure the type can fit in a register before we try to allocate one.
const ptr_bits = self.target.cpu.arch.ptrBitWidth(); const ptr_bits = self.target.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8); const ptr_bytes: u64 = @divExact(ptr_bits, 8);
if (abi_size <= ptr_bytes) { if (abi_size <= ptr_bytes) {
if (self.register_manager.tryAllocReg(inst, gp)) |reg| { if (self.register_manager.tryAllocReg(inst, gp)) |reg| {

View file

@ -9,7 +9,7 @@ pub const Class = enum { memory, byval, integer, double_integer };
pub fn classifyType(ty: Type, target: std.Target) Class { pub fn classifyType(ty: Type, target: std.Target) Class {
std.debug.assert(ty.hasRuntimeBitsIgnoreComptime()); std.debug.assert(ty.hasRuntimeBitsIgnoreComptime());
const max_byval_size = target.cpu.arch.ptrBitWidth() * 2; const max_byval_size = target.ptrBitWidth() * 2;
switch (ty.zigTypeTag()) { switch (ty.zigTypeTag()) {
.Struct => { .Struct => {
const bit_size = ty.bitSize(target); const bit_size = ty.bitSize(target);

View file

@ -876,7 +876,7 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void {
const array_ty = ptr_ty.childType(); const array_ty = ptr_ty.childType();
const array_len = @intCast(u32, array_ty.arrayLen()); const array_len = @intCast(u32, array_ty.arrayLen());
const ptr_bits = self.target.cpu.arch.ptrBitWidth(); const ptr_bits = self.target.ptrBitWidth();
const ptr_bytes = @divExact(ptr_bits, 8); const ptr_bytes = @divExact(ptr_bits, 8);
const stack_offset = try self.allocMem(inst, ptr_bytes * 2, ptr_bytes * 2); const stack_offset = try self.allocMem(inst, ptr_bytes * 2, ptr_bytes * 2);
@ -2241,7 +2241,7 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void {
fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void { fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op; const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const ptr_bits = self.target.cpu.arch.ptrBitWidth(); const ptr_bits = self.target.ptrBitWidth();
const ptr_bytes = @divExact(ptr_bits, 8); const ptr_bytes = @divExact(ptr_bits, 8);
const mcv = try self.resolveInst(ty_op.operand); const mcv = try self.resolveInst(ty_op.operand);
switch (mcv) { switch (mcv) {
@ -2427,7 +2427,7 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void {
const len = try self.resolveInst(bin_op.rhs); const len = try self.resolveInst(bin_op.rhs);
const len_ty = self.air.typeOf(bin_op.rhs); const len_ty = self.air.typeOf(bin_op.rhs);
const ptr_bits = self.target.cpu.arch.ptrBitWidth(); const ptr_bits = self.target.ptrBitWidth();
const ptr_bytes = @divExact(ptr_bits, 8); const ptr_bytes = @divExact(ptr_bits, 8);
const stack_offset = try self.allocMem(inst, ptr_bytes * 2, ptr_bytes * 2); const stack_offset = try self.allocMem(inst, ptr_bytes * 2, ptr_bytes * 2);
@ -2485,7 +2485,7 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void { fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op; const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const ptr_bits = self.target.cpu.arch.ptrBitWidth(); const ptr_bits = self.target.ptrBitWidth();
const ptr_bytes = @divExact(ptr_bits, 8); const ptr_bytes = @divExact(ptr_bits, 8);
const mcv = try self.resolveInst(ty_op.operand); const mcv = try self.resolveInst(ty_op.operand);
switch (mcv) { switch (mcv) {

View file

@ -1684,7 +1684,7 @@ fn memcpy(func: *CodeGen, dst: WValue, src: WValue, len: WValue) !void {
} }
fn ptrSize(func: *const CodeGen) u16 { fn ptrSize(func: *const CodeGen) u16 {
return @divExact(func.target.cpu.arch.ptrBitWidth(), 8); return @divExact(func.target.ptrBitWidth(), 8);
} }
fn arch(func: *const CodeGen) std.Target.Cpu.Arch { fn arch(func: *const CodeGen) std.Target.Cpu.Arch {

View file

@ -4000,7 +4000,7 @@ fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void {
registerAlias(dst_reg, dst_abi_size), registerAlias(dst_reg, dst_abi_size),
Memory.sib(.qword, .{ Memory.sib(.qword, .{
.base = .{ .reg = src_reg }, .base = .{ .reg = src_reg },
.disp = @divExact(self.target.cpu.arch.ptrBitWidth(), 8), .disp = @divExact(self.target.ptrBitWidth(), 8),
}), }),
); );
@ -8131,7 +8131,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
} else if (self.bin_file.cast(link.File.Plan9)) |p9| { } else if (self.bin_file.cast(link.File.Plan9)) |p9| {
const decl_block_index = try p9.seeDecl(owner_decl); const decl_block_index = try p9.seeDecl(owner_decl);
const decl_block = p9.getDeclBlock(decl_block_index); const decl_block = p9.getDeclBlock(decl_block_index);
const ptr_bits = self.target.cpu.arch.ptrBitWidth(); const ptr_bits = self.target.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8); const ptr_bytes: u64 = @divExact(ptr_bits, 8);
const got_addr = p9.bases.data; const got_addr = p9.bases.data;
const got_index = decl_block.got_index.?; const got_index = decl_block.got_index.?;

View file

@ -314,7 +314,7 @@ pub fn generateSymbol(
}, },
.Pointer => switch (typed_value.val.tag()) { .Pointer => switch (typed_value.val.tag()) {
.null_value => { .null_value => {
switch (target.cpu.arch.ptrBitWidth()) { switch (target.ptrBitWidth()) {
32 => { 32 => {
mem.writeInt(u32, try code.addManyAsArray(4), 0, endian); mem.writeInt(u32, try code.addManyAsArray(4), 0, endian);
if (typed_value.ty.isSlice()) try code.appendNTimes(0xaa, 4); if (typed_value.ty.isSlice()) try code.appendNTimes(0xaa, 4);
@ -328,7 +328,7 @@ pub fn generateSymbol(
return Result.ok; return Result.ok;
}, },
.zero, .one, .int_u64, .int_big_positive => { .zero, .one, .int_u64, .int_big_positive => {
switch (target.cpu.arch.ptrBitWidth()) { switch (target.ptrBitWidth()) {
32 => { 32 => {
const x = typed_value.val.toUnsignedInt(target); const x = typed_value.val.toUnsignedInt(target);
mem.writeInt(u32, try code.addManyAsArray(4), @intCast(u32, x), endian); mem.writeInt(u32, try code.addManyAsArray(4), @intCast(u32, x), endian);
@ -970,7 +970,7 @@ fn lowerDeclRef(
return Result.ok; return Result.ok;
} }
const ptr_width = target.cpu.arch.ptrBitWidth(); const ptr_width = target.ptrBitWidth();
const decl = module.declPtr(decl_index); const decl = module.declPtr(decl_index);
const is_fn_body = decl.ty.zigTypeTag() == .Fn; const is_fn_body = decl.ty.zigTypeTag() == .Fn;
if (!is_fn_body and !decl.ty.hasRuntimeBits()) { if (!is_fn_body and !decl.ty.hasRuntimeBits()) {
@ -1059,7 +1059,7 @@ fn genDeclRef(
log.debug("genDeclRef: ty = {}, val = {}", .{ tv.ty.fmt(module), tv.val.fmtValue(tv.ty, module) }); log.debug("genDeclRef: ty = {}, val = {}", .{ tv.ty.fmt(module), tv.val.fmtValue(tv.ty, module) });
const target = bin_file.options.target; const target = bin_file.options.target;
const ptr_bits = target.cpu.arch.ptrBitWidth(); const ptr_bits = target.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8); const ptr_bytes: u64 = @divExact(ptr_bits, 8);
const decl = module.declPtr(decl_index); const decl = module.declPtr(decl_index);
@ -1137,7 +1137,7 @@ fn genUnnamedConst(
} else if (bin_file.cast(link.File.Coff)) |_| { } else if (bin_file.cast(link.File.Coff)) |_| {
return GenResult.mcv(.{ .load_direct = local_sym_index }); return GenResult.mcv(.{ .load_direct = local_sym_index });
} else if (bin_file.cast(link.File.Plan9)) |p9| { } else if (bin_file.cast(link.File.Plan9)) |p9| {
const ptr_bits = target.cpu.arch.ptrBitWidth(); const ptr_bits = target.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8); const ptr_bytes: u64 = @divExact(ptr_bits, 8);
const got_index = local_sym_index; // the plan9 backend returns the got_index const got_index = local_sym_index; // the plan9 backend returns the got_index
const got_addr = p9.bases.data + got_index * ptr_bytes; const got_addr = p9.bases.data + got_index * ptr_bytes;
@ -1168,7 +1168,7 @@ pub fn genTypedValue(
return GenResult.mcv(.undef); return GenResult.mcv(.undef);
const target = bin_file.options.target; const target = bin_file.options.target;
const ptr_bits = target.cpu.arch.ptrBitWidth(); const ptr_bits = target.ptrBitWidth();
if (!typed_value.ty.isSlice()) { if (!typed_value.ty.isSlice()) {
if (typed_value.val.castTag(.variable)) |payload| { if (typed_value.val.castTag(.variable)) |payload| {

View file

@ -879,7 +879,7 @@ pub const CType = extern union {
.pointer_const, .pointer_const,
.pointer_volatile, .pointer_volatile,
.pointer_const_volatile, .pointer_const_volatile,
=> @divExact(target.cpu.arch.ptrBitWidth(), 8), => @divExact(target.ptrBitWidth(), 8),
.uint16_t, .int16_t, .zig_f16 => 2, .uint16_t, .int16_t, .zig_f16 => 2,
.uint32_t, .int32_t, .zig_f32 => 4, .uint32_t, .int32_t, .zig_f32 => 4,
.uint64_t, .int64_t, .zig_f64 => 8, .uint64_t, .int64_t, .zig_f64 => 8,

View file

@ -591,7 +591,7 @@ pub const Object = struct {
const target = mod.getTarget(); const target = mod.getTarget();
const llvm_ptr_ty = self.context.pointerType(0); // TODO: Address space const llvm_ptr_ty = self.context.pointerType(0); // TODO: Address space
const llvm_usize_ty = self.context.intType(target.cpu.arch.ptrBitWidth()); const llvm_usize_ty = self.context.intType(target.ptrBitWidth());
const type_fields = [_]*llvm.Type{ const type_fields = [_]*llvm.Type{
llvm_ptr_ty, llvm_ptr_ty,
llvm_usize_ty, llvm_usize_ty,
@ -1114,7 +1114,7 @@ pub const Object = struct {
llvm_arg_i += 1; llvm_arg_i += 1;
const field_ptr = builder.buildStructGEP(llvm_ty, arg_ptr, field_i, ""); const field_ptr = builder.buildStructGEP(llvm_ty, arg_ptr, field_i, "");
const store_inst = builder.buildStore(param, field_ptr); const store_inst = builder.buildStore(param, field_ptr);
store_inst.setAlignment(target.cpu.arch.ptrBitWidth() / 8); store_inst.setAlignment(target.ptrBitWidth() / 8);
} }
const is_by_ref = isByRef(param_ty); const is_by_ref = isByRef(param_ty);
@ -1718,7 +1718,7 @@ pub const Object = struct {
defer gpa.free(name); defer gpa.free(name);
const ptr_di_ty = dib.createPointerType( const ptr_di_ty = dib.createPointerType(
elem_di_ty, elem_di_ty,
target.cpu.arch.ptrBitWidth(), target.ptrBitWidth(),
ty.ptrAlignment(target) * 8, ty.ptrAlignment(target) * 8,
name, name,
); );
@ -4071,7 +4071,7 @@ pub const DeclGen = struct {
.Struct => { .Struct => {
if (parent_ty.containerLayout() == .Packed) { if (parent_ty.containerLayout() == .Packed) {
if (!byte_aligned) return parent_llvm_ptr; if (!byte_aligned) return parent_llvm_ptr;
const llvm_usize = dg.context.intType(target.cpu.arch.ptrBitWidth()); const llvm_usize = dg.context.intType(target.ptrBitWidth());
const base_addr = parent_llvm_ptr.constPtrToInt(llvm_usize); const base_addr = parent_llvm_ptr.constPtrToInt(llvm_usize);
// count bits of fields before this one // count bits of fields before this one
const prev_bits = b: { const prev_bits = b: {
@ -4261,7 +4261,7 @@ pub const DeclGen = struct {
// instruction is followed by a `wrap_optional`, it will return this value // instruction is followed by a `wrap_optional`, it will return this value
// verbatim, and the result should test as non-null. // verbatim, and the result should test as non-null.
const target = dg.module.getTarget(); const target = dg.module.getTarget();
const int = switch (target.cpu.arch.ptrBitWidth()) { const int = switch (target.ptrBitWidth()) {
16 => llvm_usize.constInt(0xaaaa, .False), 16 => llvm_usize.constInt(0xaaaa, .False),
32 => llvm_usize.constInt(0xaaaaaaaa, .False), 32 => llvm_usize.constInt(0xaaaaaaaa, .False),
64 => llvm_usize.constInt(0xaaaaaaaa_aaaaaaaa, .False), 64 => llvm_usize.constInt(0xaaaaaaaa_aaaaaaaa, .False),
@ -4910,7 +4910,7 @@ pub const FuncGen = struct {
const i = @intCast(c_uint, i_usize); const i = @intCast(c_uint, i_usize);
const field_ptr = self.builder.buildStructGEP(llvm_ty, arg_ptr, i, ""); const field_ptr = self.builder.buildStructGEP(llvm_ty, arg_ptr, i, "");
const load_inst = self.builder.buildLoad(field_ty, field_ptr, ""); const load_inst = self.builder.buildLoad(field_ty, field_ptr, "");
load_inst.setAlignment(target.cpu.arch.ptrBitWidth() / 8); load_inst.setAlignment(target.ptrBitWidth() / 8);
llvm_args.appendAssumeCapacity(load_inst); llvm_args.appendAssumeCapacity(load_inst);
} }
}, },
@ -5579,7 +5579,7 @@ pub const FuncGen = struct {
const switch_br = self.air.extraData(Air.SwitchBr, pl_op.payload); const switch_br = self.air.extraData(Air.SwitchBr, pl_op.payload);
const else_block = self.context.appendBasicBlock(self.llvm_func, "Else"); const else_block = self.context.appendBasicBlock(self.llvm_func, "Else");
const target = self.dg.module.getTarget(); const target = self.dg.module.getTarget();
const llvm_usize = self.context.intType(target.cpu.arch.ptrBitWidth()); const llvm_usize = self.context.intType(target.ptrBitWidth());
const cond_int = if (cond.typeOf().getTypeKind() == .Pointer) const cond_int = if (cond.typeOf().getTypeKind() == .Pointer)
self.builder.buildPtrToInt(cond, llvm_usize, "") self.builder.buildPtrToInt(cond, llvm_usize, "")
else else
@ -5787,7 +5787,7 @@ pub const FuncGen = struct {
fn sliceOrArrayLenInBytes(fg: *FuncGen, ptr: *llvm.Value, ty: Type) *llvm.Value { fn sliceOrArrayLenInBytes(fg: *FuncGen, ptr: *llvm.Value, ty: Type) *llvm.Value {
const target = fg.dg.module.getTarget(); const target = fg.dg.module.getTarget();
const llvm_usize_ty = fg.context.intType(target.cpu.arch.ptrBitWidth()); const llvm_usize_ty = fg.context.intType(target.ptrBitWidth());
switch (ty.ptrSize()) { switch (ty.ptrSize()) {
.Slice => { .Slice => {
const len = fg.builder.buildExtractValue(ptr, 1, ""); const len = fg.builder.buildExtractValue(ptr, 1, "");
@ -6085,7 +6085,7 @@ pub const FuncGen = struct {
if (field_offset == 0) { if (field_offset == 0) {
return field_ptr; return field_ptr;
} }
const llvm_usize_ty = self.context.intType(target.cpu.arch.ptrBitWidth()); const llvm_usize_ty = self.context.intType(target.ptrBitWidth());
const field_ptr_int = self.builder.buildPtrToInt(field_ptr, llvm_usize_ty, ""); const field_ptr_int = self.builder.buildPtrToInt(field_ptr, llvm_usize_ty, "");
const base_ptr_int = self.builder.buildNUWSub(field_ptr_int, llvm_usize_ty.constInt(field_offset, .False), ""); const base_ptr_int = self.builder.buildNUWSub(field_ptr_int, llvm_usize_ty.constInt(field_offset, .False), "");
@ -8534,7 +8534,7 @@ pub const FuncGen = struct {
const body_block = self.context.appendBasicBlock(self.llvm_func, "InlineMemsetBody"); const body_block = self.context.appendBasicBlock(self.llvm_func, "InlineMemsetBody");
const end_block = self.context.appendBasicBlock(self.llvm_func, "InlineMemsetEnd"); const end_block = self.context.appendBasicBlock(self.llvm_func, "InlineMemsetEnd");
const llvm_usize_ty = self.context.intType(target.cpu.arch.ptrBitWidth()); const llvm_usize_ty = self.context.intType(target.ptrBitWidth());
const len = switch (ptr_ty.ptrSize()) { const len = switch (ptr_ty.ptrSize()) {
.Slice => self.builder.buildExtractValue(dest_slice, 1, ""), .Slice => self.builder.buildExtractValue(dest_slice, 1, ""),
.One => llvm_usize_ty.constInt(ptr_ty.childType().arrayLen(), .False), .One => llvm_usize_ty.constInt(ptr_ty.childType().arrayLen(), .False),
@ -10013,7 +10013,7 @@ pub const FuncGen = struct {
fn valgrindMarkUndef(fg: *FuncGen, ptr: *llvm.Value, len: *llvm.Value) void { fn valgrindMarkUndef(fg: *FuncGen, ptr: *llvm.Value, len: *llvm.Value) void {
const VG_USERREQ__MAKE_MEM_UNDEFINED = 1296236545; const VG_USERREQ__MAKE_MEM_UNDEFINED = 1296236545;
const target = fg.dg.module.getTarget(); const target = fg.dg.module.getTarget();
const usize_llvm_ty = fg.context.intType(target.cpu.arch.ptrBitWidth()); const usize_llvm_ty = fg.context.intType(target.ptrBitWidth());
const zero = usize_llvm_ty.constInt(0, .False); const zero = usize_llvm_ty.constInt(0, .False);
const req = usize_llvm_ty.constInt(VG_USERREQ__MAKE_MEM_UNDEFINED, .False); const req = usize_llvm_ty.constInt(VG_USERREQ__MAKE_MEM_UNDEFINED, .False);
const ptr_as_usize = fg.builder.buildPtrToInt(ptr, usize_llvm_ty, ""); const ptr_as_usize = fg.builder.buildPtrToInt(ptr, usize_llvm_ty, "");
@ -10033,7 +10033,7 @@ pub const FuncGen = struct {
const target = fg.dg.module.getTarget(); const target = fg.dg.module.getTarget();
if (!target_util.hasValgrindSupport(target)) return default_value; if (!target_util.hasValgrindSupport(target)) return default_value;
const usize_llvm_ty = fg.context.intType(target.cpu.arch.ptrBitWidth()); const usize_llvm_ty = fg.context.intType(target.ptrBitWidth());
const usize_alignment = @intCast(c_uint, Type.usize.abiSize(target)); const usize_alignment = @intCast(c_uint, Type.usize.abiSize(target));
const array_llvm_ty = usize_llvm_ty.arrayType(6); const array_llvm_ty = usize_llvm_ty.arrayType(6);

View file

@ -556,7 +556,7 @@ pub const DeclGen = struct {
// TODO: Double check pointer sizes here. // TODO: Double check pointer sizes here.
// shared pointers might be u32... // shared pointers might be u32...
const target = self.dg.getTarget(); const target = self.dg.getTarget();
const width = @divExact(target.cpu.arch.ptrBitWidth(), 8); const width = @divExact(target.ptrBitWidth(), 8);
if (self.size % width != 0) { if (self.size % width != 0) {
return self.dg.todo("misaligned pointer constants", .{}); return self.dg.todo("misaligned pointer constants", .{});
} }
@ -1160,7 +1160,7 @@ pub const DeclGen = struct {
/// Create an integer type that represents 'usize'. /// Create an integer type that represents 'usize'.
fn sizeType(self: *DeclGen) !SpvType.Ref { fn sizeType(self: *DeclGen) !SpvType.Ref {
return try self.intType(.unsigned, self.getTarget().cpu.arch.ptrBitWidth()); return try self.intType(.unsigned, self.getTarget().ptrBitWidth());
} }
/// Generate a union type, optionally with a known field. If the tag alignment is greater /// Generate a union type, optionally with a known field. If the tag alignment is greater

View file

@ -378,7 +378,7 @@ fn start_asm_path(comp: *Compilation, arena: Allocator, basename: []const u8) ![
const is_ppc = arch == .powerpc or arch == .powerpc64 or arch == .powerpc64le; const is_ppc = arch == .powerpc or arch == .powerpc64 or arch == .powerpc64le;
const is_aarch64 = arch == .aarch64 or arch == .aarch64_be; const is_aarch64 = arch == .aarch64 or arch == .aarch64_be;
const is_sparc = arch == .sparc or arch == .sparcel or arch == .sparc64; const is_sparc = arch == .sparc or arch == .sparcel or arch == .sparc64;
const is_64 = arch.ptrBitWidth() == 64; const is_64 = comp.getTarget().ptrBitWidth() == 64;
const s = path.sep_str; const s = path.sep_str;
@ -435,7 +435,6 @@ fn start_asm_path(comp: *Compilation, arena: Allocator, basename: []const u8) ![
fn add_include_dirs(comp: *Compilation, arena: Allocator, args: *std.ArrayList([]const u8)) error{OutOfMemory}!void { fn add_include_dirs(comp: *Compilation, arena: Allocator, args: *std.ArrayList([]const u8)) error{OutOfMemory}!void {
const target = comp.getTarget(); const target = comp.getTarget();
const arch = target.cpu.arch;
const opt_nptl: ?[]const u8 = if (target.os.tag == .linux) "nptl" else "htl"; const opt_nptl: ?[]const u8 = if (target.os.tag == .linux) "nptl" else "htl";
const s = path.sep_str; const s = path.sep_str;
@ -444,11 +443,11 @@ fn add_include_dirs(comp: *Compilation, arena: Allocator, args: *std.ArrayList([
try args.append(try lib_path(comp, arena, lib_libc_glibc ++ "include")); try args.append(try lib_path(comp, arena, lib_libc_glibc ++ "include"));
if (target.os.tag == .linux) { if (target.os.tag == .linux) {
try add_include_dirs_arch(arena, args, arch, null, try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps" ++ s ++ "unix" ++ s ++ "sysv" ++ s ++ "linux")); try add_include_dirs_arch(arena, args, target, null, try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps" ++ s ++ "unix" ++ s ++ "sysv" ++ s ++ "linux"));
} }
if (opt_nptl) |nptl| { if (opt_nptl) |nptl| {
try add_include_dirs_arch(arena, args, arch, nptl, try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps")); try add_include_dirs_arch(arena, args, target, nptl, try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps"));
} }
if (target.os.tag == .linux) { if (target.os.tag == .linux) {
@ -474,12 +473,12 @@ fn add_include_dirs(comp: *Compilation, arena: Allocator, args: *std.ArrayList([
try args.append("-I"); try args.append("-I");
try args.append(try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps" ++ s ++ "unix" ++ s ++ "sysv")); try args.append(try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps" ++ s ++ "unix" ++ s ++ "sysv"));
try add_include_dirs_arch(arena, args, arch, null, try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps" ++ s ++ "unix")); try add_include_dirs_arch(arena, args, target, null, try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps" ++ s ++ "unix"));
try args.append("-I"); try args.append("-I");
try args.append(try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps" ++ s ++ "unix")); try args.append(try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps" ++ s ++ "unix"));
try add_include_dirs_arch(arena, args, arch, null, try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps")); try add_include_dirs_arch(arena, args, target, null, try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps"));
try args.append("-I"); try args.append("-I");
try args.append(try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps" ++ s ++ "generic")); try args.append(try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps" ++ s ++ "generic"));
@ -489,7 +488,7 @@ fn add_include_dirs(comp: *Compilation, arena: Allocator, args: *std.ArrayList([
try args.append("-I"); try args.append("-I");
try args.append(try std.fmt.allocPrint(arena, "{s}" ++ s ++ "libc" ++ s ++ "include" ++ s ++ "{s}-{s}-{s}", .{ try args.append(try std.fmt.allocPrint(arena, "{s}" ++ s ++ "libc" ++ s ++ "include" ++ s ++ "{s}-{s}-{s}", .{
comp.zig_lib_directory.path.?, @tagName(arch), @tagName(target.os.tag), @tagName(target.abi), comp.zig_lib_directory.path.?, @tagName(target.cpu.arch), @tagName(target.os.tag), @tagName(target.abi),
})); }));
try args.append("-I"); try args.append("-I");
@ -508,15 +507,16 @@ fn add_include_dirs(comp: *Compilation, arena: Allocator, args: *std.ArrayList([
fn add_include_dirs_arch( fn add_include_dirs_arch(
arena: Allocator, arena: Allocator,
args: *std.ArrayList([]const u8), args: *std.ArrayList([]const u8),
arch: std.Target.Cpu.Arch, target: std.Target,
opt_nptl: ?[]const u8, opt_nptl: ?[]const u8,
dir: []const u8, dir: []const u8,
) error{OutOfMemory}!void { ) error{OutOfMemory}!void {
const arch = target.cpu.arch;
const is_x86 = arch == .x86 or arch == .x86_64; const is_x86 = arch == .x86 or arch == .x86_64;
const is_aarch64 = arch == .aarch64 or arch == .aarch64_be; const is_aarch64 = arch == .aarch64 or arch == .aarch64_be;
const is_ppc = arch == .powerpc or arch == .powerpc64 or arch == .powerpc64le; const is_ppc = arch == .powerpc or arch == .powerpc64 or arch == .powerpc64le;
const is_sparc = arch == .sparc or arch == .sparcel or arch == .sparc64; const is_sparc = arch == .sparc or arch == .sparcel or arch == .sparc64;
const is_64 = arch.ptrBitWidth() == 64; const is_64 = target.ptrBitWidth() == 64;
const s = path.sep_str; const s = path.sep_str;

View file

@ -245,7 +245,7 @@ pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Option
} }
pub fn createEmpty(gpa: Allocator, options: link.Options) !*Coff { pub fn createEmpty(gpa: Allocator, options: link.Options) !*Coff {
const ptr_width: PtrWidth = switch (options.target.cpu.arch.ptrBitWidth()) { const ptr_width: PtrWidth = switch (options.target.ptrBitWidth()) {
0...32 => .p32, 0...32 => .p32,
33...64 => .p64, 33...64 => .p64,
else => return error.UnsupportedCOFFArchitecture, else => return error.UnsupportedCOFFArchitecture,

View file

@ -199,7 +199,7 @@ pub fn linkWithLLD(self: *Coff, comp: *Compilation, prog_node: *std.Progress.Nod
} else if (target.cpu.arch == .x86_64) { } else if (target.cpu.arch == .x86_64) {
try argv.append("-MACHINE:X64"); try argv.append("-MACHINE:X64");
} else if (target.cpu.arch.isARM()) { } else if (target.cpu.arch.isARM()) {
if (target.cpu.arch.ptrBitWidth() == 32) { if (target.ptrBitWidth() == 32) {
try argv.append("-MACHINE:ARM"); try argv.append("-MACHINE:ARM");
} else { } else {
try argv.append("-MACHINE:ARM64"); try argv.append("-MACHINE:ARM64");

View file

@ -260,7 +260,7 @@ pub const DeclState = struct {
.Pointer => { .Pointer => {
if (ty.isSlice()) { if (ty.isSlice()) {
// Slices are structs: struct { .ptr = *, .len = N } // Slices are structs: struct { .ptr = *, .len = N }
const ptr_bits = target.cpu.arch.ptrBitWidth(); const ptr_bits = target.ptrBitWidth();
const ptr_bytes = @intCast(u8, @divExact(ptr_bits, 8)); const ptr_bytes = @intCast(u8, @divExact(ptr_bits, 8));
// DW.AT.structure_type // DW.AT.structure_type
try dbg_info_buffer.ensureUnusedCapacity(2); try dbg_info_buffer.ensureUnusedCapacity(2);
@ -751,7 +751,7 @@ pub const DeclState = struct {
.memory, .memory,
.linker_load, .linker_load,
=> { => {
const ptr_width = @intCast(u8, @divExact(target.cpu.arch.ptrBitWidth(), 8)); const ptr_width = @intCast(u8, @divExact(target.ptrBitWidth(), 8));
try dbg_info.ensureUnusedCapacity(2 + ptr_width); try dbg_info.ensureUnusedCapacity(2 + ptr_width);
dbg_info.appendSliceAssumeCapacity(&[2]u8{ // DW.AT.location, DW.FORM.exprloc dbg_info.appendSliceAssumeCapacity(&[2]u8{ // DW.AT.location, DW.FORM.exprloc
1 + ptr_width + @boolToInt(is_ptr), 1 + ptr_width + @boolToInt(is_ptr),
@ -928,7 +928,7 @@ const min_nop_size = 2;
const ideal_factor = 3; const ideal_factor = 3;
pub fn init(allocator: Allocator, bin_file: *File, target: std.Target) Dwarf { pub fn init(allocator: Allocator, bin_file: *File, target: std.Target) Dwarf {
const ptr_width: PtrWidth = switch (target.cpu.arch.ptrBitWidth()) { const ptr_width: PtrWidth = switch (target.ptrBitWidth()) {
0...32 => .p32, 0...32 => .p32,
33...64 => .p64, 33...64 => .p64,
else => unreachable, else => unreachable,

View file

@ -273,7 +273,7 @@ pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Option
} }
pub fn createEmpty(gpa: Allocator, options: link.Options) !*Elf { pub fn createEmpty(gpa: Allocator, options: link.Options) !*Elf {
const ptr_width: PtrWidth = switch (options.target.cpu.arch.ptrBitWidth()) { const ptr_width: PtrWidth = switch (options.target.ptrBitWidth()) {
0...32 => .p32, 0...32 => .p32,
33...64 => .p64, 33...64 => .p64,
else => return error.UnsupportedELFArchitecture, else => return error.UnsupportedELFArchitecture,
@ -474,7 +474,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
if (self.phdr_table_load_index == null) { if (self.phdr_table_load_index == null) {
self.phdr_table_load_index = @intCast(u16, self.program_headers.items.len); self.phdr_table_load_index = @intCast(u16, self.program_headers.items.len);
// TODO Same as for GOT // TODO Same as for GOT
const phdr_addr: u64 = if (self.base.options.target.cpu.arch.ptrBitWidth() >= 32) 0x1000000 else 0x1000; const phdr_addr: u64 = if (self.base.options.target.ptrBitWidth() >= 32) 0x1000000 else 0x1000;
const p_align = self.page_size; const p_align = self.page_size;
try self.program_headers.append(gpa, .{ try self.program_headers.append(gpa, .{
.p_type = elf.PT_LOAD, .p_type = elf.PT_LOAD,
@ -521,7 +521,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
// TODO instead of hard coding the vaddr, make a function to find a vaddr to put things at. // TODO instead of hard coding the vaddr, make a function to find a vaddr to put things at.
// we'll need to re-use that function anyway, in case the GOT grows and overlaps something // we'll need to re-use that function anyway, in case the GOT grows and overlaps something
// else in virtual memory. // else in virtual memory.
const got_addr: u32 = if (self.base.options.target.cpu.arch.ptrBitWidth() >= 32) 0x4000000 else 0x8000; const got_addr: u32 = if (self.base.options.target.ptrBitWidth() >= 32) 0x4000000 else 0x8000;
try self.program_headers.append(gpa, .{ try self.program_headers.append(gpa, .{
.p_type = elf.PT_LOAD, .p_type = elf.PT_LOAD,
.p_offset = off, .p_offset = off,
@ -544,7 +544,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
const off = self.findFreeSpace(file_size, p_align); const off = self.findFreeSpace(file_size, p_align);
log.debug("found PT_LOAD RO free space 0x{x} to 0x{x}", .{ off, off + file_size }); log.debug("found PT_LOAD RO free space 0x{x} to 0x{x}", .{ off, off + file_size });
// TODO Same as for GOT // TODO Same as for GOT
const rodata_addr: u32 = if (self.base.options.target.cpu.arch.ptrBitWidth() >= 32) 0xc000000 else 0xa000; const rodata_addr: u32 = if (self.base.options.target.ptrBitWidth() >= 32) 0xc000000 else 0xa000;
try self.program_headers.append(gpa, .{ try self.program_headers.append(gpa, .{
.p_type = elf.PT_LOAD, .p_type = elf.PT_LOAD,
.p_offset = off, .p_offset = off,
@ -567,7 +567,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
const off = self.findFreeSpace(file_size, p_align); const off = self.findFreeSpace(file_size, p_align);
log.debug("found PT_LOAD RW free space 0x{x} to 0x{x}", .{ off, off + file_size }); log.debug("found PT_LOAD RW free space 0x{x} to 0x{x}", .{ off, off + file_size });
// TODO Same as for GOT // TODO Same as for GOT
const rwdata_addr: u32 = if (self.base.options.target.cpu.arch.ptrBitWidth() >= 32) 0x10000000 else 0xc000; const rwdata_addr: u32 = if (self.base.options.target.ptrBitWidth() >= 32) 0x10000000 else 0xc000;
try self.program_headers.append(gpa, .{ try self.program_headers.append(gpa, .{
.p_type = elf.PT_LOAD, .p_type = elf.PT_LOAD,
.p_offset = off, .p_offset = off,
@ -3180,7 +3180,7 @@ fn ptrWidthBytes(self: Elf) u8 {
/// Does not necessarily match `ptrWidthBytes` for example can be 2 bytes /// Does not necessarily match `ptrWidthBytes` for example can be 2 bytes
/// in a 32-bit ELF file. /// in a 32-bit ELF file.
fn archPtrWidthBytes(self: Elf) u8 { fn archPtrWidthBytes(self: Elf) u8 {
return @intCast(u8, self.base.options.target.cpu.arch.ptrBitWidth() / 8); return @intCast(u8, self.base.options.target.ptrBitWidth() / 8);
} }
fn progHeaderTo32(phdr: elf.Elf64_Phdr) elf.Elf32_Phdr { fn progHeaderTo32(phdr: elf.Elf64_Phdr) elf.Elf32_Phdr {

View file

@ -59,7 +59,7 @@ pub fn getOffsetTableAddress(self: Atom, elf_file: *Elf) u64 {
const sym_index = self.getSymbolIndex().?; const sym_index = self.getSymbolIndex().?;
const got_entry_index = elf_file.got_table.lookup.get(sym_index).?; const got_entry_index = elf_file.got_table.lookup.get(sym_index).?;
const target = elf_file.base.options.target; const target = elf_file.base.options.target;
const ptr_bits = target.cpu.arch.ptrBitWidth(); const ptr_bits = target.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8); const ptr_bytes: u64 = @divExact(ptr_bits, 8);
const got = elf_file.program_headers.items[elf_file.phdr_got_index.?]; const got = elf_file.program_headers.items[elf_file.phdr_got_index.?];
return got.p_vaddr + got_entry_index * ptr_bytes; return got.p_vaddr + got_entry_index * ptr_bytes;

View file

@ -183,7 +183,7 @@ pub fn defaultBaseAddrs(arch: std.Target.Cpu.Arch) Bases {
pub fn createEmpty(gpa: Allocator, options: link.Options) !*Plan9 { pub fn createEmpty(gpa: Allocator, options: link.Options) !*Plan9 {
if (options.use_llvm) if (options.use_llvm)
return error.LLVMBackendDoesNotSupportPlan9; return error.LLVMBackendDoesNotSupportPlan9;
const sixtyfour_bit: bool = switch (options.target.cpu.arch.ptrBitWidth()) { const sixtyfour_bit: bool = switch (options.target.ptrBitWidth()) {
0...32 => false, 0...32 => false,
33...64 => true, 33...64 => true,
else => return error.UnsupportedP9Architecture, else => return error.UnsupportedP9Architecture,

View file

@ -265,7 +265,7 @@ fn add_cc_args(
}); });
const target = comp.getTarget(); const target = comp.getTarget();
if (target.cpu.arch.isARM() and target.cpu.arch.ptrBitWidth() == 32) { if (target.cpu.arch.isARM() and target.ptrBitWidth() == 32) {
try args.append("-mfpu=vfp"); try args.append("-mfpu=vfp");
} }

View file

@ -194,7 +194,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile, prog_node: *std.Progr
const arch_define = try std.fmt.allocPrint(arena, "-DARCH_{s}", .{ const arch_define = try std.fmt.allocPrint(arena, "-DARCH_{s}", .{
@tagName(target.cpu.arch), @tagName(target.cpu.arch),
}); });
const clang_argv: []const []const u8 = if (target.cpu.arch.ptrBitWidth() == 64) const clang_argv: []const []const u8 = if (target.ptrBitWidth() == 64)
&[_][]const u8{ "-DPTR64", arch_define } &[_][]const u8{ "-DPTR64", arch_define }
else else
&[_][]const u8{arch_define}; &[_][]const u8{arch_define};

View file

@ -2936,7 +2936,7 @@ pub const Type = extern union {
.manyptr_const_u8_sentinel_0, .manyptr_const_u8_sentinel_0,
.@"anyframe", .@"anyframe",
.anyframe_T, .anyframe_T,
=> return AbiAlignmentAdvanced{ .scalar = @divExact(target.cpu.arch.ptrBitWidth(), 8) }, => return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) },
.c_char => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.char) }, .c_char => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.char) },
.c_short => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.short) }, .c_short => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.short) },
@ -3007,7 +3007,7 @@ pub const Type = extern union {
const child_type = ty.optionalChild(&buf); const child_type = ty.optionalChild(&buf);
switch (child_type.zigTypeTag()) { switch (child_type.zigTypeTag()) {
.Pointer => return AbiAlignmentAdvanced{ .scalar = @divExact(target.cpu.arch.ptrBitWidth(), 8) }, .Pointer => return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) },
.ErrorSet => return abiAlignmentAdvanced(Type.anyerror, target, strat), .ErrorSet => return abiAlignmentAdvanced(Type.anyerror, target, strat),
.NoReturn => return AbiAlignmentAdvanced{ .scalar = 0 }, .NoReturn => return AbiAlignmentAdvanced{ .scalar = 0 },
else => {}, else => {},
@ -3069,7 +3069,7 @@ pub const Type = extern union {
// We'll guess "pointer-aligned", if the struct has an // We'll guess "pointer-aligned", if the struct has an
// underaligned pointer field then some allocations // underaligned pointer field then some allocations
// might require explicit alignment. // might require explicit alignment.
return AbiAlignmentAdvanced{ .scalar = @divExact(target.cpu.arch.ptrBitWidth(), 8) }; return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) };
} }
_ = try sema.resolveTypeFields(ty); _ = try sema.resolveTypeFields(ty);
} }
@ -3195,7 +3195,7 @@ pub const Type = extern union {
// We'll guess "pointer-aligned", if the union has an // We'll guess "pointer-aligned", if the union has an
// underaligned pointer field then some allocations // underaligned pointer field then some allocations
// might require explicit alignment. // might require explicit alignment.
return AbiAlignmentAdvanced{ .scalar = @divExact(target.cpu.arch.ptrBitWidth(), 8) }; return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) };
} }
_ = try sema.resolveTypeFields(ty); _ = try sema.resolveTypeFields(ty);
} }
@ -3419,17 +3419,17 @@ pub const Type = extern union {
.manyptr_u8, .manyptr_u8,
.manyptr_const_u8, .manyptr_const_u8,
.manyptr_const_u8_sentinel_0, .manyptr_const_u8_sentinel_0,
=> return AbiSizeAdvanced{ .scalar = @divExact(target.cpu.arch.ptrBitWidth(), 8) }, => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) },
.const_slice, .const_slice,
.mut_slice, .mut_slice,
.const_slice_u8, .const_slice_u8,
.const_slice_u8_sentinel_0, .const_slice_u8_sentinel_0,
=> return AbiSizeAdvanced{ .scalar = @divExact(target.cpu.arch.ptrBitWidth(), 8) * 2 }, => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) * 2 },
.pointer => switch (ty.castTag(.pointer).?.data.size) { .pointer => switch (ty.castTag(.pointer).?.data.size) {
.Slice => return AbiSizeAdvanced{ .scalar = @divExact(target.cpu.arch.ptrBitWidth(), 8) * 2 }, .Slice => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) * 2 },
else => return AbiSizeAdvanced{ .scalar = @divExact(target.cpu.arch.ptrBitWidth(), 8) }, else => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) },
}, },
.c_char => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.char) }, .c_char => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.char) },
@ -3702,20 +3702,20 @@ pub const Type = extern union {
.usize, .usize,
.@"anyframe", .@"anyframe",
.anyframe_T, .anyframe_T,
=> return target.cpu.arch.ptrBitWidth(), => return target.ptrBitWidth(),
.const_slice, .const_slice,
.mut_slice, .mut_slice,
=> return target.cpu.arch.ptrBitWidth() * 2, => return target.ptrBitWidth() * 2,
.const_slice_u8, .const_slice_u8,
.const_slice_u8_sentinel_0, .const_slice_u8_sentinel_0,
=> return target.cpu.arch.ptrBitWidth() * 2, => return target.ptrBitWidth() * 2,
.optional_single_const_pointer, .optional_single_const_pointer,
.optional_single_mut_pointer, .optional_single_mut_pointer,
=> { => {
return target.cpu.arch.ptrBitWidth(); return target.ptrBitWidth();
}, },
.single_const_pointer, .single_const_pointer,
@ -3725,18 +3725,18 @@ pub const Type = extern union {
.c_const_pointer, .c_const_pointer,
.c_mut_pointer, .c_mut_pointer,
=> { => {
return target.cpu.arch.ptrBitWidth(); return target.ptrBitWidth();
}, },
.pointer => switch (ty.castTag(.pointer).?.data.size) { .pointer => switch (ty.castTag(.pointer).?.data.size) {
.Slice => return target.cpu.arch.ptrBitWidth() * 2, .Slice => return target.ptrBitWidth() * 2,
else => return target.cpu.arch.ptrBitWidth(), else => return target.ptrBitWidth(),
}, },
.manyptr_u8, .manyptr_u8,
.manyptr_const_u8, .manyptr_const_u8,
.manyptr_const_u8_sentinel_0, .manyptr_const_u8_sentinel_0,
=> return target.cpu.arch.ptrBitWidth(), => return target.ptrBitWidth(),
.c_char => return target.c_type_bit_size(.char), .c_char => return target.c_type_bit_size(.char),
.c_short => return target.c_type_bit_size(.short), .c_short => return target.c_type_bit_size(.short),
@ -4624,8 +4624,8 @@ pub const Type = extern union {
.i64 => return .{ .signedness = .signed, .bits = 64 }, .i64 => return .{ .signedness = .signed, .bits = 64 },
.u128 => return .{ .signedness = .unsigned, .bits = 128 }, .u128 => return .{ .signedness = .unsigned, .bits = 128 },
.i128 => return .{ .signedness = .signed, .bits = 128 }, .i128 => return .{ .signedness = .signed, .bits = 128 },
.usize => return .{ .signedness = .unsigned, .bits = target.cpu.arch.ptrBitWidth() }, .usize => return .{ .signedness = .unsigned, .bits = target.ptrBitWidth() },
.isize => return .{ .signedness = .signed, .bits = target.cpu.arch.ptrBitWidth() }, .isize => return .{ .signedness = .signed, .bits = target.ptrBitWidth() },
.c_char => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.char) }, .c_char => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.char) },
.c_short => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.short) }, .c_short => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.short) },
.c_ushort => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ushort) }, .c_ushort => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ushort) },

View file

@ -1922,7 +1922,7 @@ pub const Value = extern union {
.variable, .variable,
.eu_payload_ptr, .eu_payload_ptr,
.opt_payload_ptr, .opt_payload_ptr,
=> return target.cpu.arch.ptrBitWidth(), => return target.ptrBitWidth(),
else => { else => {
var buffer: BigIntSpace = undefined; var buffer: BigIntSpace = undefined;