mirror of
https://codeberg.org/ziglang/zig.git
synced 2025-12-06 05:44:20 +00:00
std.Target adjustments
* move `ptrBitWidth` from Arch to Target since it needs to know about the abi * double isn't always 8 bits * AVR uses 1-byte alignment for everything in GCC
This commit is contained in:
parent
dbd44658ff
commit
ca16f1e8a7
28 changed files with 180 additions and 168 deletions
|
|
@ -322,7 +322,7 @@ pub const RTLD = struct {
|
|||
|
||||
pub const dl_phdr_info = extern struct {
|
||||
/// Module relocation base.
|
||||
dlpi_addr: if (builtin.cpu.arch.ptrBitWidth() == 32) std.elf.Elf32_Addr else std.elf.Elf64_Addr,
|
||||
dlpi_addr: if (builtin.target.ptrBitWidth() == 32) std.elf.Elf32_Addr else std.elf.Elf64_Addr,
|
||||
/// Module name.
|
||||
dlpi_name: ?[*:0]const u8,
|
||||
/// Pointer to module's phdr.
|
||||
|
|
|
|||
|
|
@ -5847,8 +5847,18 @@ pub const AUDIT = struct {
|
|||
fn toAudit(arch: std.Target.Cpu.Arch) u32 {
|
||||
var res: u32 = @enumToInt(arch.toElfMachine());
|
||||
if (arch.endian() == .Little) res |= LE;
|
||||
if (arch.ptrBitWidth() == 64) res |= @"64BIT";
|
||||
|
||||
switch (arch) {
|
||||
.aarch64,
|
||||
.mips64,
|
||||
.mips64el,
|
||||
.powerpc64,
|
||||
.powerpc64le,
|
||||
.riscv64,
|
||||
.sparc64,
|
||||
.x86_64,
|
||||
=> res |= @"64BIT",
|
||||
else => {},
|
||||
}
|
||||
return res;
|
||||
}
|
||||
};
|
||||
|
|
|
|||
|
|
@ -1189,77 +1189,6 @@ pub const Target = struct {
|
|||
};
|
||||
}
|
||||
|
||||
pub fn ptrBitWidth(arch: Arch) u16 {
|
||||
switch (arch) {
|
||||
.avr,
|
||||
.msp430,
|
||||
.spu_2,
|
||||
=> return 16,
|
||||
|
||||
.arc,
|
||||
.arm,
|
||||
.armeb,
|
||||
.csky,
|
||||
.hexagon,
|
||||
.m68k,
|
||||
.le32,
|
||||
.mips,
|
||||
.mipsel,
|
||||
.powerpc,
|
||||
.powerpcle,
|
||||
.r600,
|
||||
.riscv32,
|
||||
.sparc,
|
||||
.sparcel,
|
||||
.tce,
|
||||
.tcele,
|
||||
.thumb,
|
||||
.thumbeb,
|
||||
.x86,
|
||||
.xcore,
|
||||
.nvptx,
|
||||
.amdil,
|
||||
.hsail,
|
||||
.spir,
|
||||
.kalimba,
|
||||
.shave,
|
||||
.lanai,
|
||||
.wasm32,
|
||||
.renderscript32,
|
||||
.aarch64_32,
|
||||
.spirv32,
|
||||
.loongarch32,
|
||||
.dxil,
|
||||
.xtensa,
|
||||
=> return 32,
|
||||
|
||||
.aarch64,
|
||||
.aarch64_be,
|
||||
.mips64,
|
||||
.mips64el,
|
||||
.powerpc64,
|
||||
.powerpc64le,
|
||||
.riscv64,
|
||||
.x86_64,
|
||||
.nvptx64,
|
||||
.le64,
|
||||
.amdil64,
|
||||
.hsail64,
|
||||
.spir64,
|
||||
.wasm64,
|
||||
.renderscript64,
|
||||
.amdgcn,
|
||||
.bpfel,
|
||||
.bpfeb,
|
||||
.sparc64,
|
||||
.s390x,
|
||||
.ve,
|
||||
.spirv64,
|
||||
.loongarch64,
|
||||
=> return 64,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a name that matches the lib/std/target/* source file name.
|
||||
pub fn genericName(arch: Arch) []const u8 {
|
||||
return switch (arch) {
|
||||
|
|
@ -1621,7 +1550,7 @@ pub const Target = struct {
|
|||
const copy = S.copy;
|
||||
|
||||
if (self.abi == .android) {
|
||||
const suffix = if (self.cpu.arch.ptrBitWidth() == 64) "64" else "";
|
||||
const suffix = if (self.ptrBitWidth() == 64) "64" else "";
|
||||
return print(&result, "/system/bin/linker{s}", .{suffix});
|
||||
}
|
||||
|
||||
|
|
@ -1904,6 +1833,83 @@ pub const Target = struct {
|
|||
};
|
||||
}
|
||||
|
||||
pub fn ptrBitWidth(target: std.Target) u16 {
|
||||
switch (target.abi) {
|
||||
.gnux32, .muslx32, .gnuabin32, .gnuilp32 => return 32,
|
||||
.gnuabi64 => return 64,
|
||||
else => {},
|
||||
}
|
||||
switch (target.cpu.arch) {
|
||||
.avr,
|
||||
.msp430,
|
||||
.spu_2,
|
||||
=> return 16,
|
||||
|
||||
.arc,
|
||||
.arm,
|
||||
.armeb,
|
||||
.csky,
|
||||
.hexagon,
|
||||
.m68k,
|
||||
.le32,
|
||||
.mips,
|
||||
.mipsel,
|
||||
.powerpc,
|
||||
.powerpcle,
|
||||
.r600,
|
||||
.riscv32,
|
||||
.sparcel,
|
||||
.tce,
|
||||
.tcele,
|
||||
.thumb,
|
||||
.thumbeb,
|
||||
.x86,
|
||||
.xcore,
|
||||
.nvptx,
|
||||
.amdil,
|
||||
.hsail,
|
||||
.spir,
|
||||
.kalimba,
|
||||
.shave,
|
||||
.lanai,
|
||||
.wasm32,
|
||||
.renderscript32,
|
||||
.aarch64_32,
|
||||
.spirv32,
|
||||
.loongarch32,
|
||||
.dxil,
|
||||
.xtensa,
|
||||
=> return 32,
|
||||
|
||||
.aarch64,
|
||||
.aarch64_be,
|
||||
.mips64,
|
||||
.mips64el,
|
||||
.powerpc64,
|
||||
.powerpc64le,
|
||||
.riscv64,
|
||||
.x86_64,
|
||||
.nvptx64,
|
||||
.le64,
|
||||
.amdil64,
|
||||
.hsail64,
|
||||
.spir64,
|
||||
.wasm64,
|
||||
.renderscript64,
|
||||
.amdgcn,
|
||||
.bpfel,
|
||||
.bpfeb,
|
||||
.sparc64,
|
||||
.s390x,
|
||||
.ve,
|
||||
.spirv64,
|
||||
.loongarch64,
|
||||
=> return 64,
|
||||
|
||||
.sparc => return if (std.Target.sparc.featureSetHas(target.cpu.features, .v9)) 64 else 32,
|
||||
}
|
||||
}
|
||||
|
||||
pub const CType = enum {
|
||||
char,
|
||||
short,
|
||||
|
|
@ -1930,11 +1936,10 @@ pub const Target = struct {
|
|||
.ulong,
|
||||
.longlong,
|
||||
.ulonglong,
|
||||
.float,
|
||||
.double,
|
||||
=> @divExact(c_type_bit_size(t, c_type), 8),
|
||||
|
||||
.float => 4,
|
||||
.double => 8,
|
||||
|
||||
.longdouble => switch (c_type_bit_size(t, c_type)) {
|
||||
16 => 2,
|
||||
32 => 4,
|
||||
|
|
@ -1990,7 +1995,7 @@ pub const Target = struct {
|
|||
.char => return 8,
|
||||
.short, .ushort => return 16,
|
||||
.int, .uint, .float => return 32,
|
||||
.long, .ulong => return target.cpu.arch.ptrBitWidth(),
|
||||
.long, .ulong => return target.ptrBitWidth(),
|
||||
.longlong, .ulonglong, .double => return 64,
|
||||
.longdouble => switch (target.cpu.arch) {
|
||||
.x86 => switch (target.abi) {
|
||||
|
|
@ -2084,7 +2089,7 @@ pub const Target = struct {
|
|||
.char => return 8,
|
||||
.short, .ushort => return 16,
|
||||
.int, .uint, .float => return 32,
|
||||
.long, .ulong => return target.cpu.arch.ptrBitWidth(),
|
||||
.long, .ulong => return target.ptrBitWidth(),
|
||||
.longlong, .ulonglong, .double => return 64,
|
||||
.longdouble => switch (target.cpu.arch) {
|
||||
.x86 => switch (target.abi) {
|
||||
|
|
@ -2256,10 +2261,7 @@ pub const Target = struct {
|
|||
pub fn c_type_alignment(target: Target, c_type: CType) u16 {
|
||||
// Overrides for unusual alignments
|
||||
switch (target.cpu.arch) {
|
||||
.avr => switch (c_type) {
|
||||
.short, .ushort => return 2,
|
||||
else => return 1,
|
||||
},
|
||||
.avr => return 1,
|
||||
.x86 => switch (target.os.tag) {
|
||||
.windows, .uefi => switch (c_type) {
|
||||
.longlong, .ulonglong, .double => return 8,
|
||||
|
|
|
|||
|
|
@ -117,7 +117,7 @@ pub fn detect(allocator: Allocator, native_info: NativeTargetInfo) !NativePaths
|
|||
const triple = try native_target.linuxTriple(allocator);
|
||||
defer allocator.free(triple);
|
||||
|
||||
const qual = native_target.cpu.arch.ptrBitWidth();
|
||||
const qual = native_target.ptrBitWidth();
|
||||
|
||||
// TODO: $ ld --verbose | grep SEARCH_DIR
|
||||
// the output contains some paths that end with lib64, maybe include them too?
|
||||
|
|
|
|||
|
|
@ -1095,7 +1095,7 @@ pub fn getExternalExecutor(
|
|||
if (candidate.target.cpu.arch != builtin.cpu.arch) {
|
||||
return bad_result;
|
||||
}
|
||||
switch (candidate.target.cpu.arch.ptrBitWidth()) {
|
||||
switch (candidate.target.ptrBitWidth()) {
|
||||
32 => return Executor{ .wine = "wine" },
|
||||
64 => return Executor{ .wine = "wine64" },
|
||||
else => return bad_result,
|
||||
|
|
@ -1105,7 +1105,7 @@ pub fn getExternalExecutor(
|
|||
},
|
||||
.wasi => {
|
||||
if (options.allow_wasmtime) {
|
||||
switch (candidate.target.cpu.arch.ptrBitWidth()) {
|
||||
switch (candidate.target.ptrBitWidth()) {
|
||||
32 => return Executor{ .wasmtime = "wasmtime" },
|
||||
else => return bad_result,
|
||||
}
|
||||
|
|
|
|||
|
|
@ -34002,7 +34002,7 @@ fn intFitsInType(
|
|||
=> switch (ty.zigTypeTag()) {
|
||||
.Int => {
|
||||
const info = ty.intInfo(target);
|
||||
const ptr_bits = target.cpu.arch.ptrBitWidth();
|
||||
const ptr_bits = target.ptrBitWidth();
|
||||
return switch (info.signedness) {
|
||||
.signed => info.bits > ptr_bits,
|
||||
.unsigned => info.bits >= ptr_bits,
|
||||
|
|
|
|||
|
|
@ -501,7 +501,7 @@ fn gen(self: *Self) !void {
|
|||
// (or w0 when pointer size is 32 bits). As this register
|
||||
// might get overwritten along the way, save the address
|
||||
// to the stack.
|
||||
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
|
||||
const ptr_bits = self.target.ptrBitWidth();
|
||||
const ptr_bytes = @divExact(ptr_bits, 8);
|
||||
const ret_ptr_reg = self.registerAlias(.x0, Type.usize);
|
||||
|
||||
|
|
@ -1512,7 +1512,7 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void {
|
|||
const len = try self.resolveInst(bin_op.rhs);
|
||||
const len_ty = self.air.typeOf(bin_op.rhs);
|
||||
|
||||
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
|
||||
const ptr_bits = self.target.ptrBitWidth();
|
||||
const ptr_bytes = @divExact(ptr_bits, 8);
|
||||
|
||||
const stack_offset = try self.allocMem(ptr_bytes * 2, ptr_bytes * 2, inst);
|
||||
|
|
@ -3362,7 +3362,7 @@ fn airSlicePtr(self: *Self, inst: Air.Inst.Index) !void {
|
|||
fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
|
||||
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
|
||||
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
|
||||
const ptr_bits = self.target.ptrBitWidth();
|
||||
const ptr_bytes = @divExact(ptr_bits, 8);
|
||||
const mcv = try self.resolveInst(ty_op.operand);
|
||||
switch (mcv) {
|
||||
|
|
@ -3386,7 +3386,7 @@ fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void {
|
|||
fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
|
||||
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
|
||||
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
|
||||
const ptr_bits = self.target.ptrBitWidth();
|
||||
const ptr_bytes = @divExact(ptr_bits, 8);
|
||||
const mcv = try self.resolveInst(ty_op.operand);
|
||||
switch (mcv) {
|
||||
|
|
@ -4321,7 +4321,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
|
|||
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
|
||||
const decl_block_index = try p9.seeDecl(func.owner_decl);
|
||||
const decl_block = p9.getDeclBlock(decl_block_index);
|
||||
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
|
||||
const ptr_bits = self.target.ptrBitWidth();
|
||||
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
|
||||
const got_addr = p9.bases.data;
|
||||
const got_index = decl_block.got_index.?;
|
||||
|
|
@ -5929,7 +5929,7 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void {
|
|||
const array_ty = ptr_ty.childType();
|
||||
const array_len = @intCast(u32, array_ty.arrayLen());
|
||||
|
||||
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
|
||||
const ptr_bits = self.target.ptrBitWidth();
|
||||
const ptr_bytes = @divExact(ptr_bits, 8);
|
||||
|
||||
const stack_offset = try self.allocMem(ptr_bytes * 2, ptr_bytes * 2, inst);
|
||||
|
|
|
|||
|
|
@ -1035,7 +1035,7 @@ fn allocRegOrMem(self: *Self, elem_ty: Type, reg_ok: bool, maybe_inst: ?Air.Inst
|
|||
|
||||
if (reg_ok) {
|
||||
// Make sure the type can fit in a register before we try to allocate one.
|
||||
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
|
||||
const ptr_bits = self.target.ptrBitWidth();
|
||||
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
|
||||
if (abi_size <= ptr_bytes) {
|
||||
if (self.register_manager.tryAllocReg(maybe_inst, gp)) |reg| {
|
||||
|
|
|
|||
|
|
@ -826,7 +826,7 @@ fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue {
|
|||
|
||||
if (reg_ok) {
|
||||
// Make sure the type can fit in a register before we try to allocate one.
|
||||
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
|
||||
const ptr_bits = self.target.ptrBitWidth();
|
||||
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
|
||||
if (abi_size <= ptr_bytes) {
|
||||
if (self.register_manager.tryAllocReg(inst, gp)) |reg| {
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ pub const Class = enum { memory, byval, integer, double_integer };
|
|||
pub fn classifyType(ty: Type, target: std.Target) Class {
|
||||
std.debug.assert(ty.hasRuntimeBitsIgnoreComptime());
|
||||
|
||||
const max_byval_size = target.cpu.arch.ptrBitWidth() * 2;
|
||||
const max_byval_size = target.ptrBitWidth() * 2;
|
||||
switch (ty.zigTypeTag()) {
|
||||
.Struct => {
|
||||
const bit_size = ty.bitSize(target);
|
||||
|
|
|
|||
|
|
@ -876,7 +876,7 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void {
|
|||
const array_ty = ptr_ty.childType();
|
||||
const array_len = @intCast(u32, array_ty.arrayLen());
|
||||
|
||||
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
|
||||
const ptr_bits = self.target.ptrBitWidth();
|
||||
const ptr_bytes = @divExact(ptr_bits, 8);
|
||||
|
||||
const stack_offset = try self.allocMem(inst, ptr_bytes * 2, ptr_bytes * 2);
|
||||
|
|
@ -2241,7 +2241,7 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void {
|
|||
fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
|
||||
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
|
||||
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
|
||||
const ptr_bits = self.target.ptrBitWidth();
|
||||
const ptr_bytes = @divExact(ptr_bits, 8);
|
||||
const mcv = try self.resolveInst(ty_op.operand);
|
||||
switch (mcv) {
|
||||
|
|
@ -2427,7 +2427,7 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void {
|
|||
const len = try self.resolveInst(bin_op.rhs);
|
||||
const len_ty = self.air.typeOf(bin_op.rhs);
|
||||
|
||||
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
|
||||
const ptr_bits = self.target.ptrBitWidth();
|
||||
const ptr_bytes = @divExact(ptr_bits, 8);
|
||||
|
||||
const stack_offset = try self.allocMem(inst, ptr_bytes * 2, ptr_bytes * 2);
|
||||
|
|
@ -2485,7 +2485,7 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
|
|||
fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
|
||||
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
|
||||
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
|
||||
const ptr_bits = self.target.ptrBitWidth();
|
||||
const ptr_bytes = @divExact(ptr_bits, 8);
|
||||
const mcv = try self.resolveInst(ty_op.operand);
|
||||
switch (mcv) {
|
||||
|
|
|
|||
|
|
@ -1684,7 +1684,7 @@ fn memcpy(func: *CodeGen, dst: WValue, src: WValue, len: WValue) !void {
|
|||
}
|
||||
|
||||
fn ptrSize(func: *const CodeGen) u16 {
|
||||
return @divExact(func.target.cpu.arch.ptrBitWidth(), 8);
|
||||
return @divExact(func.target.ptrBitWidth(), 8);
|
||||
}
|
||||
|
||||
fn arch(func: *const CodeGen) std.Target.Cpu.Arch {
|
||||
|
|
|
|||
|
|
@ -4000,7 +4000,7 @@ fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void {
|
|||
registerAlias(dst_reg, dst_abi_size),
|
||||
Memory.sib(.qword, .{
|
||||
.base = .{ .reg = src_reg },
|
||||
.disp = @divExact(self.target.cpu.arch.ptrBitWidth(), 8),
|
||||
.disp = @divExact(self.target.ptrBitWidth(), 8),
|
||||
}),
|
||||
);
|
||||
|
||||
|
|
@ -8131,7 +8131,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
|
|||
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
|
||||
const decl_block_index = try p9.seeDecl(owner_decl);
|
||||
const decl_block = p9.getDeclBlock(decl_block_index);
|
||||
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
|
||||
const ptr_bits = self.target.ptrBitWidth();
|
||||
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
|
||||
const got_addr = p9.bases.data;
|
||||
const got_index = decl_block.got_index.?;
|
||||
|
|
|
|||
|
|
@ -314,7 +314,7 @@ pub fn generateSymbol(
|
|||
},
|
||||
.Pointer => switch (typed_value.val.tag()) {
|
||||
.null_value => {
|
||||
switch (target.cpu.arch.ptrBitWidth()) {
|
||||
switch (target.ptrBitWidth()) {
|
||||
32 => {
|
||||
mem.writeInt(u32, try code.addManyAsArray(4), 0, endian);
|
||||
if (typed_value.ty.isSlice()) try code.appendNTimes(0xaa, 4);
|
||||
|
|
@ -328,7 +328,7 @@ pub fn generateSymbol(
|
|||
return Result.ok;
|
||||
},
|
||||
.zero, .one, .int_u64, .int_big_positive => {
|
||||
switch (target.cpu.arch.ptrBitWidth()) {
|
||||
switch (target.ptrBitWidth()) {
|
||||
32 => {
|
||||
const x = typed_value.val.toUnsignedInt(target);
|
||||
mem.writeInt(u32, try code.addManyAsArray(4), @intCast(u32, x), endian);
|
||||
|
|
@ -970,7 +970,7 @@ fn lowerDeclRef(
|
|||
return Result.ok;
|
||||
}
|
||||
|
||||
const ptr_width = target.cpu.arch.ptrBitWidth();
|
||||
const ptr_width = target.ptrBitWidth();
|
||||
const decl = module.declPtr(decl_index);
|
||||
const is_fn_body = decl.ty.zigTypeTag() == .Fn;
|
||||
if (!is_fn_body and !decl.ty.hasRuntimeBits()) {
|
||||
|
|
@ -1059,7 +1059,7 @@ fn genDeclRef(
|
|||
log.debug("genDeclRef: ty = {}, val = {}", .{ tv.ty.fmt(module), tv.val.fmtValue(tv.ty, module) });
|
||||
|
||||
const target = bin_file.options.target;
|
||||
const ptr_bits = target.cpu.arch.ptrBitWidth();
|
||||
const ptr_bits = target.ptrBitWidth();
|
||||
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
|
||||
|
||||
const decl = module.declPtr(decl_index);
|
||||
|
|
@ -1137,7 +1137,7 @@ fn genUnnamedConst(
|
|||
} else if (bin_file.cast(link.File.Coff)) |_| {
|
||||
return GenResult.mcv(.{ .load_direct = local_sym_index });
|
||||
} else if (bin_file.cast(link.File.Plan9)) |p9| {
|
||||
const ptr_bits = target.cpu.arch.ptrBitWidth();
|
||||
const ptr_bits = target.ptrBitWidth();
|
||||
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
|
||||
const got_index = local_sym_index; // the plan9 backend returns the got_index
|
||||
const got_addr = p9.bases.data + got_index * ptr_bytes;
|
||||
|
|
@ -1168,7 +1168,7 @@ pub fn genTypedValue(
|
|||
return GenResult.mcv(.undef);
|
||||
|
||||
const target = bin_file.options.target;
|
||||
const ptr_bits = target.cpu.arch.ptrBitWidth();
|
||||
const ptr_bits = target.ptrBitWidth();
|
||||
|
||||
if (!typed_value.ty.isSlice()) {
|
||||
if (typed_value.val.castTag(.variable)) |payload| {
|
||||
|
|
|
|||
|
|
@ -879,7 +879,7 @@ pub const CType = extern union {
|
|||
.pointer_const,
|
||||
.pointer_volatile,
|
||||
.pointer_const_volatile,
|
||||
=> @divExact(target.cpu.arch.ptrBitWidth(), 8),
|
||||
=> @divExact(target.ptrBitWidth(), 8),
|
||||
.uint16_t, .int16_t, .zig_f16 => 2,
|
||||
.uint32_t, .int32_t, .zig_f32 => 4,
|
||||
.uint64_t, .int64_t, .zig_f64 => 8,
|
||||
|
|
|
|||
|
|
@ -591,7 +591,7 @@ pub const Object = struct {
|
|||
const target = mod.getTarget();
|
||||
|
||||
const llvm_ptr_ty = self.context.pointerType(0); // TODO: Address space
|
||||
const llvm_usize_ty = self.context.intType(target.cpu.arch.ptrBitWidth());
|
||||
const llvm_usize_ty = self.context.intType(target.ptrBitWidth());
|
||||
const type_fields = [_]*llvm.Type{
|
||||
llvm_ptr_ty,
|
||||
llvm_usize_ty,
|
||||
|
|
@ -1114,7 +1114,7 @@ pub const Object = struct {
|
|||
llvm_arg_i += 1;
|
||||
const field_ptr = builder.buildStructGEP(llvm_ty, arg_ptr, field_i, "");
|
||||
const store_inst = builder.buildStore(param, field_ptr);
|
||||
store_inst.setAlignment(target.cpu.arch.ptrBitWidth() / 8);
|
||||
store_inst.setAlignment(target.ptrBitWidth() / 8);
|
||||
}
|
||||
|
||||
const is_by_ref = isByRef(param_ty);
|
||||
|
|
@ -1718,7 +1718,7 @@ pub const Object = struct {
|
|||
defer gpa.free(name);
|
||||
const ptr_di_ty = dib.createPointerType(
|
||||
elem_di_ty,
|
||||
target.cpu.arch.ptrBitWidth(),
|
||||
target.ptrBitWidth(),
|
||||
ty.ptrAlignment(target) * 8,
|
||||
name,
|
||||
);
|
||||
|
|
@ -4071,7 +4071,7 @@ pub const DeclGen = struct {
|
|||
.Struct => {
|
||||
if (parent_ty.containerLayout() == .Packed) {
|
||||
if (!byte_aligned) return parent_llvm_ptr;
|
||||
const llvm_usize = dg.context.intType(target.cpu.arch.ptrBitWidth());
|
||||
const llvm_usize = dg.context.intType(target.ptrBitWidth());
|
||||
const base_addr = parent_llvm_ptr.constPtrToInt(llvm_usize);
|
||||
// count bits of fields before this one
|
||||
const prev_bits = b: {
|
||||
|
|
@ -4261,7 +4261,7 @@ pub const DeclGen = struct {
|
|||
// instruction is followed by a `wrap_optional`, it will return this value
|
||||
// verbatim, and the result should test as non-null.
|
||||
const target = dg.module.getTarget();
|
||||
const int = switch (target.cpu.arch.ptrBitWidth()) {
|
||||
const int = switch (target.ptrBitWidth()) {
|
||||
16 => llvm_usize.constInt(0xaaaa, .False),
|
||||
32 => llvm_usize.constInt(0xaaaaaaaa, .False),
|
||||
64 => llvm_usize.constInt(0xaaaaaaaa_aaaaaaaa, .False),
|
||||
|
|
@ -4910,7 +4910,7 @@ pub const FuncGen = struct {
|
|||
const i = @intCast(c_uint, i_usize);
|
||||
const field_ptr = self.builder.buildStructGEP(llvm_ty, arg_ptr, i, "");
|
||||
const load_inst = self.builder.buildLoad(field_ty, field_ptr, "");
|
||||
load_inst.setAlignment(target.cpu.arch.ptrBitWidth() / 8);
|
||||
load_inst.setAlignment(target.ptrBitWidth() / 8);
|
||||
llvm_args.appendAssumeCapacity(load_inst);
|
||||
}
|
||||
},
|
||||
|
|
@ -5579,7 +5579,7 @@ pub const FuncGen = struct {
|
|||
const switch_br = self.air.extraData(Air.SwitchBr, pl_op.payload);
|
||||
const else_block = self.context.appendBasicBlock(self.llvm_func, "Else");
|
||||
const target = self.dg.module.getTarget();
|
||||
const llvm_usize = self.context.intType(target.cpu.arch.ptrBitWidth());
|
||||
const llvm_usize = self.context.intType(target.ptrBitWidth());
|
||||
const cond_int = if (cond.typeOf().getTypeKind() == .Pointer)
|
||||
self.builder.buildPtrToInt(cond, llvm_usize, "")
|
||||
else
|
||||
|
|
@ -5787,7 +5787,7 @@ pub const FuncGen = struct {
|
|||
|
||||
fn sliceOrArrayLenInBytes(fg: *FuncGen, ptr: *llvm.Value, ty: Type) *llvm.Value {
|
||||
const target = fg.dg.module.getTarget();
|
||||
const llvm_usize_ty = fg.context.intType(target.cpu.arch.ptrBitWidth());
|
||||
const llvm_usize_ty = fg.context.intType(target.ptrBitWidth());
|
||||
switch (ty.ptrSize()) {
|
||||
.Slice => {
|
||||
const len = fg.builder.buildExtractValue(ptr, 1, "");
|
||||
|
|
@ -6085,7 +6085,7 @@ pub const FuncGen = struct {
|
|||
if (field_offset == 0) {
|
||||
return field_ptr;
|
||||
}
|
||||
const llvm_usize_ty = self.context.intType(target.cpu.arch.ptrBitWidth());
|
||||
const llvm_usize_ty = self.context.intType(target.ptrBitWidth());
|
||||
|
||||
const field_ptr_int = self.builder.buildPtrToInt(field_ptr, llvm_usize_ty, "");
|
||||
const base_ptr_int = self.builder.buildNUWSub(field_ptr_int, llvm_usize_ty.constInt(field_offset, .False), "");
|
||||
|
|
@ -8534,7 +8534,7 @@ pub const FuncGen = struct {
|
|||
const body_block = self.context.appendBasicBlock(self.llvm_func, "InlineMemsetBody");
|
||||
const end_block = self.context.appendBasicBlock(self.llvm_func, "InlineMemsetEnd");
|
||||
|
||||
const llvm_usize_ty = self.context.intType(target.cpu.arch.ptrBitWidth());
|
||||
const llvm_usize_ty = self.context.intType(target.ptrBitWidth());
|
||||
const len = switch (ptr_ty.ptrSize()) {
|
||||
.Slice => self.builder.buildExtractValue(dest_slice, 1, ""),
|
||||
.One => llvm_usize_ty.constInt(ptr_ty.childType().arrayLen(), .False),
|
||||
|
|
@ -10013,7 +10013,7 @@ pub const FuncGen = struct {
|
|||
fn valgrindMarkUndef(fg: *FuncGen, ptr: *llvm.Value, len: *llvm.Value) void {
|
||||
const VG_USERREQ__MAKE_MEM_UNDEFINED = 1296236545;
|
||||
const target = fg.dg.module.getTarget();
|
||||
const usize_llvm_ty = fg.context.intType(target.cpu.arch.ptrBitWidth());
|
||||
const usize_llvm_ty = fg.context.intType(target.ptrBitWidth());
|
||||
const zero = usize_llvm_ty.constInt(0, .False);
|
||||
const req = usize_llvm_ty.constInt(VG_USERREQ__MAKE_MEM_UNDEFINED, .False);
|
||||
const ptr_as_usize = fg.builder.buildPtrToInt(ptr, usize_llvm_ty, "");
|
||||
|
|
@ -10033,7 +10033,7 @@ pub const FuncGen = struct {
|
|||
const target = fg.dg.module.getTarget();
|
||||
if (!target_util.hasValgrindSupport(target)) return default_value;
|
||||
|
||||
const usize_llvm_ty = fg.context.intType(target.cpu.arch.ptrBitWidth());
|
||||
const usize_llvm_ty = fg.context.intType(target.ptrBitWidth());
|
||||
const usize_alignment = @intCast(c_uint, Type.usize.abiSize(target));
|
||||
|
||||
const array_llvm_ty = usize_llvm_ty.arrayType(6);
|
||||
|
|
|
|||
|
|
@ -556,7 +556,7 @@ pub const DeclGen = struct {
|
|||
// TODO: Double check pointer sizes here.
|
||||
// shared pointers might be u32...
|
||||
const target = self.dg.getTarget();
|
||||
const width = @divExact(target.cpu.arch.ptrBitWidth(), 8);
|
||||
const width = @divExact(target.ptrBitWidth(), 8);
|
||||
if (self.size % width != 0) {
|
||||
return self.dg.todo("misaligned pointer constants", .{});
|
||||
}
|
||||
|
|
@ -1160,7 +1160,7 @@ pub const DeclGen = struct {
|
|||
|
||||
/// Create an integer type that represents 'usize'.
|
||||
fn sizeType(self: *DeclGen) !SpvType.Ref {
|
||||
return try self.intType(.unsigned, self.getTarget().cpu.arch.ptrBitWidth());
|
||||
return try self.intType(.unsigned, self.getTarget().ptrBitWidth());
|
||||
}
|
||||
|
||||
/// Generate a union type, optionally with a known field. If the tag alignment is greater
|
||||
|
|
|
|||
|
|
@ -378,7 +378,7 @@ fn start_asm_path(comp: *Compilation, arena: Allocator, basename: []const u8) ![
|
|||
const is_ppc = arch == .powerpc or arch == .powerpc64 or arch == .powerpc64le;
|
||||
const is_aarch64 = arch == .aarch64 or arch == .aarch64_be;
|
||||
const is_sparc = arch == .sparc or arch == .sparcel or arch == .sparc64;
|
||||
const is_64 = arch.ptrBitWidth() == 64;
|
||||
const is_64 = comp.getTarget().ptrBitWidth() == 64;
|
||||
|
||||
const s = path.sep_str;
|
||||
|
||||
|
|
@ -435,7 +435,6 @@ fn start_asm_path(comp: *Compilation, arena: Allocator, basename: []const u8) ![
|
|||
|
||||
fn add_include_dirs(comp: *Compilation, arena: Allocator, args: *std.ArrayList([]const u8)) error{OutOfMemory}!void {
|
||||
const target = comp.getTarget();
|
||||
const arch = target.cpu.arch;
|
||||
const opt_nptl: ?[]const u8 = if (target.os.tag == .linux) "nptl" else "htl";
|
||||
|
||||
const s = path.sep_str;
|
||||
|
|
@ -444,11 +443,11 @@ fn add_include_dirs(comp: *Compilation, arena: Allocator, args: *std.ArrayList([
|
|||
try args.append(try lib_path(comp, arena, lib_libc_glibc ++ "include"));
|
||||
|
||||
if (target.os.tag == .linux) {
|
||||
try add_include_dirs_arch(arena, args, arch, null, try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps" ++ s ++ "unix" ++ s ++ "sysv" ++ s ++ "linux"));
|
||||
try add_include_dirs_arch(arena, args, target, null, try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps" ++ s ++ "unix" ++ s ++ "sysv" ++ s ++ "linux"));
|
||||
}
|
||||
|
||||
if (opt_nptl) |nptl| {
|
||||
try add_include_dirs_arch(arena, args, arch, nptl, try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps"));
|
||||
try add_include_dirs_arch(arena, args, target, nptl, try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps"));
|
||||
}
|
||||
|
||||
if (target.os.tag == .linux) {
|
||||
|
|
@ -474,12 +473,12 @@ fn add_include_dirs(comp: *Compilation, arena: Allocator, args: *std.ArrayList([
|
|||
try args.append("-I");
|
||||
try args.append(try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps" ++ s ++ "unix" ++ s ++ "sysv"));
|
||||
|
||||
try add_include_dirs_arch(arena, args, arch, null, try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps" ++ s ++ "unix"));
|
||||
try add_include_dirs_arch(arena, args, target, null, try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps" ++ s ++ "unix"));
|
||||
|
||||
try args.append("-I");
|
||||
try args.append(try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps" ++ s ++ "unix"));
|
||||
|
||||
try add_include_dirs_arch(arena, args, arch, null, try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps"));
|
||||
try add_include_dirs_arch(arena, args, target, null, try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps"));
|
||||
|
||||
try args.append("-I");
|
||||
try args.append(try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps" ++ s ++ "generic"));
|
||||
|
|
@ -489,7 +488,7 @@ fn add_include_dirs(comp: *Compilation, arena: Allocator, args: *std.ArrayList([
|
|||
|
||||
try args.append("-I");
|
||||
try args.append(try std.fmt.allocPrint(arena, "{s}" ++ s ++ "libc" ++ s ++ "include" ++ s ++ "{s}-{s}-{s}", .{
|
||||
comp.zig_lib_directory.path.?, @tagName(arch), @tagName(target.os.tag), @tagName(target.abi),
|
||||
comp.zig_lib_directory.path.?, @tagName(target.cpu.arch), @tagName(target.os.tag), @tagName(target.abi),
|
||||
}));
|
||||
|
||||
try args.append("-I");
|
||||
|
|
@ -508,15 +507,16 @@ fn add_include_dirs(comp: *Compilation, arena: Allocator, args: *std.ArrayList([
|
|||
fn add_include_dirs_arch(
|
||||
arena: Allocator,
|
||||
args: *std.ArrayList([]const u8),
|
||||
arch: std.Target.Cpu.Arch,
|
||||
target: std.Target,
|
||||
opt_nptl: ?[]const u8,
|
||||
dir: []const u8,
|
||||
) error{OutOfMemory}!void {
|
||||
const arch = target.cpu.arch;
|
||||
const is_x86 = arch == .x86 or arch == .x86_64;
|
||||
const is_aarch64 = arch == .aarch64 or arch == .aarch64_be;
|
||||
const is_ppc = arch == .powerpc or arch == .powerpc64 or arch == .powerpc64le;
|
||||
const is_sparc = arch == .sparc or arch == .sparcel or arch == .sparc64;
|
||||
const is_64 = arch.ptrBitWidth() == 64;
|
||||
const is_64 = target.ptrBitWidth() == 64;
|
||||
|
||||
const s = path.sep_str;
|
||||
|
||||
|
|
|
|||
|
|
@ -245,7 +245,7 @@ pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Option
|
|||
}
|
||||
|
||||
pub fn createEmpty(gpa: Allocator, options: link.Options) !*Coff {
|
||||
const ptr_width: PtrWidth = switch (options.target.cpu.arch.ptrBitWidth()) {
|
||||
const ptr_width: PtrWidth = switch (options.target.ptrBitWidth()) {
|
||||
0...32 => .p32,
|
||||
33...64 => .p64,
|
||||
else => return error.UnsupportedCOFFArchitecture,
|
||||
|
|
|
|||
|
|
@ -199,7 +199,7 @@ pub fn linkWithLLD(self: *Coff, comp: *Compilation, prog_node: *std.Progress.Nod
|
|||
} else if (target.cpu.arch == .x86_64) {
|
||||
try argv.append("-MACHINE:X64");
|
||||
} else if (target.cpu.arch.isARM()) {
|
||||
if (target.cpu.arch.ptrBitWidth() == 32) {
|
||||
if (target.ptrBitWidth() == 32) {
|
||||
try argv.append("-MACHINE:ARM");
|
||||
} else {
|
||||
try argv.append("-MACHINE:ARM64");
|
||||
|
|
|
|||
|
|
@ -260,7 +260,7 @@ pub const DeclState = struct {
|
|||
.Pointer => {
|
||||
if (ty.isSlice()) {
|
||||
// Slices are structs: struct { .ptr = *, .len = N }
|
||||
const ptr_bits = target.cpu.arch.ptrBitWidth();
|
||||
const ptr_bits = target.ptrBitWidth();
|
||||
const ptr_bytes = @intCast(u8, @divExact(ptr_bits, 8));
|
||||
// DW.AT.structure_type
|
||||
try dbg_info_buffer.ensureUnusedCapacity(2);
|
||||
|
|
@ -751,7 +751,7 @@ pub const DeclState = struct {
|
|||
.memory,
|
||||
.linker_load,
|
||||
=> {
|
||||
const ptr_width = @intCast(u8, @divExact(target.cpu.arch.ptrBitWidth(), 8));
|
||||
const ptr_width = @intCast(u8, @divExact(target.ptrBitWidth(), 8));
|
||||
try dbg_info.ensureUnusedCapacity(2 + ptr_width);
|
||||
dbg_info.appendSliceAssumeCapacity(&[2]u8{ // DW.AT.location, DW.FORM.exprloc
|
||||
1 + ptr_width + @boolToInt(is_ptr),
|
||||
|
|
@ -928,7 +928,7 @@ const min_nop_size = 2;
|
|||
const ideal_factor = 3;
|
||||
|
||||
pub fn init(allocator: Allocator, bin_file: *File, target: std.Target) Dwarf {
|
||||
const ptr_width: PtrWidth = switch (target.cpu.arch.ptrBitWidth()) {
|
||||
const ptr_width: PtrWidth = switch (target.ptrBitWidth()) {
|
||||
0...32 => .p32,
|
||||
33...64 => .p64,
|
||||
else => unreachable,
|
||||
|
|
|
|||
|
|
@ -273,7 +273,7 @@ pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Option
|
|||
}
|
||||
|
||||
pub fn createEmpty(gpa: Allocator, options: link.Options) !*Elf {
|
||||
const ptr_width: PtrWidth = switch (options.target.cpu.arch.ptrBitWidth()) {
|
||||
const ptr_width: PtrWidth = switch (options.target.ptrBitWidth()) {
|
||||
0...32 => .p32,
|
||||
33...64 => .p64,
|
||||
else => return error.UnsupportedELFArchitecture,
|
||||
|
|
@ -474,7 +474,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
|
|||
if (self.phdr_table_load_index == null) {
|
||||
self.phdr_table_load_index = @intCast(u16, self.program_headers.items.len);
|
||||
// TODO Same as for GOT
|
||||
const phdr_addr: u64 = if (self.base.options.target.cpu.arch.ptrBitWidth() >= 32) 0x1000000 else 0x1000;
|
||||
const phdr_addr: u64 = if (self.base.options.target.ptrBitWidth() >= 32) 0x1000000 else 0x1000;
|
||||
const p_align = self.page_size;
|
||||
try self.program_headers.append(gpa, .{
|
||||
.p_type = elf.PT_LOAD,
|
||||
|
|
@ -521,7 +521,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
|
|||
// TODO instead of hard coding the vaddr, make a function to find a vaddr to put things at.
|
||||
// we'll need to re-use that function anyway, in case the GOT grows and overlaps something
|
||||
// else in virtual memory.
|
||||
const got_addr: u32 = if (self.base.options.target.cpu.arch.ptrBitWidth() >= 32) 0x4000000 else 0x8000;
|
||||
const got_addr: u32 = if (self.base.options.target.ptrBitWidth() >= 32) 0x4000000 else 0x8000;
|
||||
try self.program_headers.append(gpa, .{
|
||||
.p_type = elf.PT_LOAD,
|
||||
.p_offset = off,
|
||||
|
|
@ -544,7 +544,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
|
|||
const off = self.findFreeSpace(file_size, p_align);
|
||||
log.debug("found PT_LOAD RO free space 0x{x} to 0x{x}", .{ off, off + file_size });
|
||||
// TODO Same as for GOT
|
||||
const rodata_addr: u32 = if (self.base.options.target.cpu.arch.ptrBitWidth() >= 32) 0xc000000 else 0xa000;
|
||||
const rodata_addr: u32 = if (self.base.options.target.ptrBitWidth() >= 32) 0xc000000 else 0xa000;
|
||||
try self.program_headers.append(gpa, .{
|
||||
.p_type = elf.PT_LOAD,
|
||||
.p_offset = off,
|
||||
|
|
@ -567,7 +567,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
|
|||
const off = self.findFreeSpace(file_size, p_align);
|
||||
log.debug("found PT_LOAD RW free space 0x{x} to 0x{x}", .{ off, off + file_size });
|
||||
// TODO Same as for GOT
|
||||
const rwdata_addr: u32 = if (self.base.options.target.cpu.arch.ptrBitWidth() >= 32) 0x10000000 else 0xc000;
|
||||
const rwdata_addr: u32 = if (self.base.options.target.ptrBitWidth() >= 32) 0x10000000 else 0xc000;
|
||||
try self.program_headers.append(gpa, .{
|
||||
.p_type = elf.PT_LOAD,
|
||||
.p_offset = off,
|
||||
|
|
@ -3180,7 +3180,7 @@ fn ptrWidthBytes(self: Elf) u8 {
|
|||
/// Does not necessarily match `ptrWidthBytes` for example can be 2 bytes
|
||||
/// in a 32-bit ELF file.
|
||||
fn archPtrWidthBytes(self: Elf) u8 {
|
||||
return @intCast(u8, self.base.options.target.cpu.arch.ptrBitWidth() / 8);
|
||||
return @intCast(u8, self.base.options.target.ptrBitWidth() / 8);
|
||||
}
|
||||
|
||||
fn progHeaderTo32(phdr: elf.Elf64_Phdr) elf.Elf32_Phdr {
|
||||
|
|
|
|||
|
|
@ -59,7 +59,7 @@ pub fn getOffsetTableAddress(self: Atom, elf_file: *Elf) u64 {
|
|||
const sym_index = self.getSymbolIndex().?;
|
||||
const got_entry_index = elf_file.got_table.lookup.get(sym_index).?;
|
||||
const target = elf_file.base.options.target;
|
||||
const ptr_bits = target.cpu.arch.ptrBitWidth();
|
||||
const ptr_bits = target.ptrBitWidth();
|
||||
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
|
||||
const got = elf_file.program_headers.items[elf_file.phdr_got_index.?];
|
||||
return got.p_vaddr + got_entry_index * ptr_bytes;
|
||||
|
|
|
|||
|
|
@ -183,7 +183,7 @@ pub fn defaultBaseAddrs(arch: std.Target.Cpu.Arch) Bases {
|
|||
pub fn createEmpty(gpa: Allocator, options: link.Options) !*Plan9 {
|
||||
if (options.use_llvm)
|
||||
return error.LLVMBackendDoesNotSupportPlan9;
|
||||
const sixtyfour_bit: bool = switch (options.target.cpu.arch.ptrBitWidth()) {
|
||||
const sixtyfour_bit: bool = switch (options.target.ptrBitWidth()) {
|
||||
0...32 => false,
|
||||
33...64 => true,
|
||||
else => return error.UnsupportedP9Architecture,
|
||||
|
|
|
|||
|
|
@ -265,7 +265,7 @@ fn add_cc_args(
|
|||
});
|
||||
|
||||
const target = comp.getTarget();
|
||||
if (target.cpu.arch.isARM() and target.cpu.arch.ptrBitWidth() == 32) {
|
||||
if (target.cpu.arch.isARM() and target.ptrBitWidth() == 32) {
|
||||
try args.append("-mfpu=vfp");
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -194,7 +194,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile, prog_node: *std.Progr
|
|||
const arch_define = try std.fmt.allocPrint(arena, "-DARCH_{s}", .{
|
||||
@tagName(target.cpu.arch),
|
||||
});
|
||||
const clang_argv: []const []const u8 = if (target.cpu.arch.ptrBitWidth() == 64)
|
||||
const clang_argv: []const []const u8 = if (target.ptrBitWidth() == 64)
|
||||
&[_][]const u8{ "-DPTR64", arch_define }
|
||||
else
|
||||
&[_][]const u8{arch_define};
|
||||
|
|
|
|||
36
src/type.zig
36
src/type.zig
|
|
@ -2936,7 +2936,7 @@ pub const Type = extern union {
|
|||
.manyptr_const_u8_sentinel_0,
|
||||
.@"anyframe",
|
||||
.anyframe_T,
|
||||
=> return AbiAlignmentAdvanced{ .scalar = @divExact(target.cpu.arch.ptrBitWidth(), 8) },
|
||||
=> return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) },
|
||||
|
||||
.c_char => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.char) },
|
||||
.c_short => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.short) },
|
||||
|
|
@ -3007,7 +3007,7 @@ pub const Type = extern union {
|
|||
const child_type = ty.optionalChild(&buf);
|
||||
|
||||
switch (child_type.zigTypeTag()) {
|
||||
.Pointer => return AbiAlignmentAdvanced{ .scalar = @divExact(target.cpu.arch.ptrBitWidth(), 8) },
|
||||
.Pointer => return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) },
|
||||
.ErrorSet => return abiAlignmentAdvanced(Type.anyerror, target, strat),
|
||||
.NoReturn => return AbiAlignmentAdvanced{ .scalar = 0 },
|
||||
else => {},
|
||||
|
|
@ -3069,7 +3069,7 @@ pub const Type = extern union {
|
|||
// We'll guess "pointer-aligned", if the struct has an
|
||||
// underaligned pointer field then some allocations
|
||||
// might require explicit alignment.
|
||||
return AbiAlignmentAdvanced{ .scalar = @divExact(target.cpu.arch.ptrBitWidth(), 8) };
|
||||
return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) };
|
||||
}
|
||||
_ = try sema.resolveTypeFields(ty);
|
||||
}
|
||||
|
|
@ -3195,7 +3195,7 @@ pub const Type = extern union {
|
|||
// We'll guess "pointer-aligned", if the union has an
|
||||
// underaligned pointer field then some allocations
|
||||
// might require explicit alignment.
|
||||
return AbiAlignmentAdvanced{ .scalar = @divExact(target.cpu.arch.ptrBitWidth(), 8) };
|
||||
return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) };
|
||||
}
|
||||
_ = try sema.resolveTypeFields(ty);
|
||||
}
|
||||
|
|
@ -3419,17 +3419,17 @@ pub const Type = extern union {
|
|||
.manyptr_u8,
|
||||
.manyptr_const_u8,
|
||||
.manyptr_const_u8_sentinel_0,
|
||||
=> return AbiSizeAdvanced{ .scalar = @divExact(target.cpu.arch.ptrBitWidth(), 8) },
|
||||
=> return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) },
|
||||
|
||||
.const_slice,
|
||||
.mut_slice,
|
||||
.const_slice_u8,
|
||||
.const_slice_u8_sentinel_0,
|
||||
=> return AbiSizeAdvanced{ .scalar = @divExact(target.cpu.arch.ptrBitWidth(), 8) * 2 },
|
||||
=> return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) * 2 },
|
||||
|
||||
.pointer => switch (ty.castTag(.pointer).?.data.size) {
|
||||
.Slice => return AbiSizeAdvanced{ .scalar = @divExact(target.cpu.arch.ptrBitWidth(), 8) * 2 },
|
||||
else => return AbiSizeAdvanced{ .scalar = @divExact(target.cpu.arch.ptrBitWidth(), 8) },
|
||||
.Slice => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) * 2 },
|
||||
else => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) },
|
||||
},
|
||||
|
||||
.c_char => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.char) },
|
||||
|
|
@ -3702,20 +3702,20 @@ pub const Type = extern union {
|
|||
.usize,
|
||||
.@"anyframe",
|
||||
.anyframe_T,
|
||||
=> return target.cpu.arch.ptrBitWidth(),
|
||||
=> return target.ptrBitWidth(),
|
||||
|
||||
.const_slice,
|
||||
.mut_slice,
|
||||
=> return target.cpu.arch.ptrBitWidth() * 2,
|
||||
=> return target.ptrBitWidth() * 2,
|
||||
|
||||
.const_slice_u8,
|
||||
.const_slice_u8_sentinel_0,
|
||||
=> return target.cpu.arch.ptrBitWidth() * 2,
|
||||
=> return target.ptrBitWidth() * 2,
|
||||
|
||||
.optional_single_const_pointer,
|
||||
.optional_single_mut_pointer,
|
||||
=> {
|
||||
return target.cpu.arch.ptrBitWidth();
|
||||
return target.ptrBitWidth();
|
||||
},
|
||||
|
||||
.single_const_pointer,
|
||||
|
|
@ -3725,18 +3725,18 @@ pub const Type = extern union {
|
|||
.c_const_pointer,
|
||||
.c_mut_pointer,
|
||||
=> {
|
||||
return target.cpu.arch.ptrBitWidth();
|
||||
return target.ptrBitWidth();
|
||||
},
|
||||
|
||||
.pointer => switch (ty.castTag(.pointer).?.data.size) {
|
||||
.Slice => return target.cpu.arch.ptrBitWidth() * 2,
|
||||
else => return target.cpu.arch.ptrBitWidth(),
|
||||
.Slice => return target.ptrBitWidth() * 2,
|
||||
else => return target.ptrBitWidth(),
|
||||
},
|
||||
|
||||
.manyptr_u8,
|
||||
.manyptr_const_u8,
|
||||
.manyptr_const_u8_sentinel_0,
|
||||
=> return target.cpu.arch.ptrBitWidth(),
|
||||
=> return target.ptrBitWidth(),
|
||||
|
||||
.c_char => return target.c_type_bit_size(.char),
|
||||
.c_short => return target.c_type_bit_size(.short),
|
||||
|
|
@ -4624,8 +4624,8 @@ pub const Type = extern union {
|
|||
.i64 => return .{ .signedness = .signed, .bits = 64 },
|
||||
.u128 => return .{ .signedness = .unsigned, .bits = 128 },
|
||||
.i128 => return .{ .signedness = .signed, .bits = 128 },
|
||||
.usize => return .{ .signedness = .unsigned, .bits = target.cpu.arch.ptrBitWidth() },
|
||||
.isize => return .{ .signedness = .signed, .bits = target.cpu.arch.ptrBitWidth() },
|
||||
.usize => return .{ .signedness = .unsigned, .bits = target.ptrBitWidth() },
|
||||
.isize => return .{ .signedness = .signed, .bits = target.ptrBitWidth() },
|
||||
.c_char => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.char) },
|
||||
.c_short => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.short) },
|
||||
.c_ushort => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ushort) },
|
||||
|
|
|
|||
|
|
@ -1922,7 +1922,7 @@ pub const Value = extern union {
|
|||
.variable,
|
||||
.eu_payload_ptr,
|
||||
.opt_payload_ptr,
|
||||
=> return target.cpu.arch.ptrBitWidth(),
|
||||
=> return target.ptrBitWidth(),
|
||||
|
||||
else => {
|
||||
var buffer: BigIntSpace = undefined;
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue