Elf2: create a new linker from scratch

This iteration already has significantly better incremental support.

Closes #24110
This commit is contained in:
Jacob Young 2025-08-30 12:08:18 -04:00 committed by Andrew Kelley
parent 2a97e0af6d
commit f58200e3f2
45 changed files with 4140 additions and 535 deletions

View file

@ -583,6 +583,7 @@ set(ZIG_STAGE2_SOURCES
src/link/Elf/relocatable.zig
src/link/Elf/relocation.zig
src/link/Elf/synthetic_sections.zig
src/link/Elf2.zig
src/link/Goff.zig
src/link/LdScript.zig
src/link/Lld.zig
@ -612,6 +613,7 @@ set(ZIG_STAGE2_SOURCES
src/link/MachO/synthetic.zig
src/link/MachO/Thunk.zig
src/link/MachO/uuid.zig
src/link/MappedFile.zig
src/link/Queue.zig
src/link/StringTable.zig
src/link/Wasm.zig

View file

@ -202,6 +202,7 @@ pub fn build(b: *std.Build) !void {
});
exe.pie = pie;
exe.entitlements = entitlements;
exe.use_new_linker = b.option(bool, "new-linker", "Use the new linker");
const use_llvm = b.option(bool, "use-llvm", "Use the llvm backend");
exe.use_llvm = use_llvm;

0
ci/x86_64-linux-debug-llvm.sh Normal file → Executable file
View file

View file

@ -203,7 +203,7 @@ fn add_adjusted(a: f64, b: f64) f64 {
if (uhii & 1 == 0) {
// hibits += copysign(1.0, sum.hi, sum.lo)
const uloi: u64 = @bitCast(sum.lo);
uhii += 1 - ((uhii ^ uloi) >> 62);
uhii = uhii + 1 - ((uhii ^ uloi) >> 62);
sum.hi = @bitCast(uhii);
}
}
@ -217,7 +217,7 @@ fn add_and_denorm(a: f64, b: f64, scale: i32) f64 {
const bits_lost = -@as(i32, @intCast((uhii >> 52) & 0x7FF)) - scale + 1;
if ((bits_lost != 1) == (uhii & 1 != 0)) {
const uloi: u64 = @bitCast(sum.lo);
uhii += 1 - (((uhii ^ uloi) >> 62) & 2);
uhii = uhii + 1 - (((uhii ^ uloi) >> 62) & 2);
sum.hi = @bitCast(uhii);
}
}
@ -259,7 +259,7 @@ fn add_adjusted128(a: f128, b: f128) f128 {
if (uhii & 1 == 0) {
// hibits += copysign(1.0, sum.hi, sum.lo)
const uloi: u128 = @bitCast(sum.lo);
uhii += 1 - ((uhii ^ uloi) >> 126);
uhii = uhii + 1 - ((uhii ^ uloi) >> 126);
sum.hi = @bitCast(uhii);
}
}
@ -284,7 +284,7 @@ fn add_and_denorm128(a: f128, b: f128, scale: i32) f128 {
const bits_lost = -@as(i32, @intCast((uhii >> 112) & 0x7FFF)) - scale + 1;
if ((bits_lost != 1) == (uhii & 1 != 0)) {
const uloi: u128 = @bitCast(sum.lo);
uhii += 1 - (((uhii ^ uloi) >> 126) & 2);
uhii = uhii + 1 - (((uhii ^ uloi) >> 126) & 2);
sum.hi = @bitCast(uhii);
}
}

View file

@ -192,6 +192,7 @@ want_lto: ?bool = null,
use_llvm: ?bool,
use_lld: ?bool,
use_new_linker: ?bool,
/// Corresponds to the `-fallow-so-scripts` / `-fno-allow-so-scripts` CLI
/// flags, overriding the global user setting provided to the `zig build`
@ -441,6 +442,7 @@ pub fn create(owner: *std.Build, options: Options) *Compile {
.use_llvm = options.use_llvm,
.use_lld = options.use_lld,
.use_new_linker = null,
.zig_process = null,
};
@ -1096,6 +1098,7 @@ fn getZigArgs(compile: *Compile, fuzz: bool) ![][]const u8 {
try addFlag(&zig_args, "llvm", compile.use_llvm);
try addFlag(&zig_args, "lld", compile.use_lld);
try addFlag(&zig_args, "new-linker", compile.use_new_linker);
if (compile.root_module.resolved_target.?.query.ofmt) |ofmt| {
try zig_args.append(try std.fmt.allocPrint(arena, "-ofmt={s}", .{@tagName(ofmt)}));

View file

@ -323,6 +323,8 @@ pub const PT_LOPROC = 0x70000000;
/// End of processor-specific
pub const PT_HIPROC = 0x7fffffff;
pub const PN_XNUM = 0xffff;
/// Section header table entry unused
pub const SHT_NULL = 0;
/// Program data
@ -385,63 +387,149 @@ pub const SHT_HIUSER = 0xffffffff;
// Note type for .note.gnu.build_id
pub const NT_GNU_BUILD_ID = 3;
/// Local symbol
pub const STB_LOCAL = 0;
/// Global symbol
pub const STB_GLOBAL = 1;
/// Weak symbol
pub const STB_WEAK = 2;
/// Number of defined types
pub const STB_NUM = 3;
/// Start of OS-specific
pub const STB_LOOS = 10;
/// Unique symbol
pub const STB_GNU_UNIQUE = 10;
/// End of OS-specific
pub const STB_HIOS = 12;
/// Start of processor-specific
pub const STB_LOPROC = 13;
/// End of processor-specific
pub const STB_HIPROC = 15;
/// Deprecated, use `@intFromEnum(std.elf.STB.LOCAL)`
pub const STB_LOCAL = @intFromEnum(STB.LOCAL);
/// Deprecated, use `@intFromEnum(std.elf.STB.GLOBAL)`
pub const STB_GLOBAL = @intFromEnum(STB.GLOBAL);
/// Deprecated, use `@intFromEnum(std.elf.STB.WEAK)`
pub const STB_WEAK = @intFromEnum(STB.WEAK);
/// Deprecated, use `std.elf.STB.NUM`
pub const STB_NUM = STB.NUM;
/// Deprecated, use `@intFromEnum(std.elf.STB.LOOS)`
pub const STB_LOOS = @intFromEnum(STB.LOOS);
/// Deprecated, use `@intFromEnum(std.elf.STB.GNU_UNIQUE)`
pub const STB_GNU_UNIQUE = @intFromEnum(STB.GNU_UNIQUE);
/// Deprecated, use `@intFromEnum(std.elf.STB.HIOS)`
pub const STB_HIOS = @intFromEnum(STB.HIOS);
/// Deprecated, use `@intFromEnum(std.elf.STB.LOPROC)`
pub const STB_LOPROC = @intFromEnum(STB.LOPROC);
/// Deprecated, use `@intFromEnum(std.elf.STB.HIPROC)`
pub const STB_HIPROC = @intFromEnum(STB.HIPROC);
pub const STB_MIPS_SPLIT_COMMON = 13;
/// Deprecated, use `@intFromEnum(std.elf.STB.MIPS_SPLIT_COMMON)`
pub const STB_MIPS_SPLIT_COMMON = @intFromEnum(STB.MIBS_SPLIT_COMMON);
/// Symbol type is unspecified
pub const STT_NOTYPE = 0;
/// Symbol is a data object
pub const STT_OBJECT = 1;
/// Symbol is a code object
pub const STT_FUNC = 2;
/// Symbol associated with a section
pub const STT_SECTION = 3;
/// Symbol's name is file name
pub const STT_FILE = 4;
/// Symbol is a common data object
pub const STT_COMMON = 5;
/// Symbol is thread-local data object
pub const STT_TLS = 6;
/// Number of defined types
pub const STT_NUM = 7;
/// Start of OS-specific
pub const STT_LOOS = 10;
/// Symbol is indirect code object
pub const STT_GNU_IFUNC = 10;
/// End of OS-specific
pub const STT_HIOS = 12;
/// Start of processor-specific
pub const STT_LOPROC = 13;
/// End of processor-specific
pub const STT_HIPROC = 15;
/// Deprecated, use `@intFromEnum(std.elf.STT.NOTYPE)`
pub const STT_NOTYPE = @intFromEnum(STT.NOTYPE);
/// Deprecated, use `@intFromEnum(std.elf.STT.OBJECT)`
pub const STT_OBJECT = @intFromEnum(STT.OBJECT);
/// Deprecated, use `@intFromEnum(std.elf.STT.FUNC)`
pub const STT_FUNC = @intFromEnum(STT.FUNC);
/// Deprecated, use `@intFromEnum(std.elf.STT.SECTION)`
pub const STT_SECTION = @intFromEnum(STT.SECTION);
/// Deprecated, use `@intFromEnum(std.elf.STT.FILE)`
pub const STT_FILE = @intFromEnum(STT.FILE);
/// Deprecated, use `@intFromEnum(std.elf.STT.COMMON)`
pub const STT_COMMON = @intFromEnum(STT.COMMON);
/// Deprecated, use `@intFromEnum(std.elf.STT.TLS)`
pub const STT_TLS = @intFromEnum(STT.TLS);
/// Deprecated, use `std.elf.STT.NUM`
pub const STT_NUM = STT.NUM;
/// Deprecated, use `@intFromEnum(std.elf.STT.LOOS)`
pub const STT_LOOS = @intFromEnum(STT.LOOS);
/// Deprecated, use `@intFromEnum(std.elf.STT.GNU_IFUNC)`
pub const STT_GNU_IFUNC = @intFromEnum(STT.GNU_IFUNC);
/// Deprecated, use `@intFromEnum(std.elf.STT.HIOS)`
pub const STT_HIOS = @intFromEnum(STT.HIOS);
/// Deprecated, use `@intFromEnum(std.elf.STT.LOPROC)`
pub const STT_LOPROC = @intFromEnum(STT.LOPROC);
/// Deprecated, use `@intFromEnum(std.elf.STT.HIPROC)`
pub const STT_HIPROC = @intFromEnum(STT.HIPROC);
pub const STT_SPARC_REGISTER = 13;
/// Deprecated, use `@intFromEnum(std.elf.STT.SPARC_REGISTER)`
pub const STT_SPARC_REGISTER = @intFromEnum(STT.SPARC_REGISTER);
pub const STT_PARISC_MILLICODE = 13;
/// Deprecated, use `@intFromEnum(std.elf.STT.PARISC_MILLICODE)`
pub const STT_PARISC_MILLICODE = @intFromEnum(STT.PARISC_MILLICODE);
pub const STT_HP_OPAQUE = (STT_LOOS + 0x1);
pub const STT_HP_STUB = (STT_LOOS + 0x2);
/// Deprecated, use `@intFromEnum(std.elf.STT.HP_OPAQUE)`
pub const STT_HP_OPAQUE = @intFromEnum(STT.HP_OPAQUE);
/// Deprecated, use `@intFromEnum(std.elf.STT.HP_STUB)`
pub const STT_HP_STUB = @intFromEnum(STT.HP_STUB);
pub const STT_ARM_TFUNC = STT_LOPROC;
pub const STT_ARM_16BIT = STT_HIPROC;
/// Deprecated, use `@intFromEnum(std.elf.STT.ARM_TFUNC)`
pub const STT_ARM_TFUNC = @intFromEnum(STT.ARM_TFUNC);
/// Deprecated, use `@intFromEnum(std.elf.STT.ARM_16BIT)`
pub const STT_ARM_16BIT = @intFromEnum(STT.ARM_16BIT);
pub const STB = enum(u4) {
/// Local symbol
LOCAL = 0,
/// Global symbol
GLOBAL = 1,
/// Weak symbol
WEAK = 2,
_,
/// Number of defined types
pub const NUM = @typeInfo(STB).@"enum".fields.len;
/// Start of OS-specific
pub const LOOS: STB = @enumFromInt(10);
/// End of OS-specific
pub const HIOS: STB = @enumFromInt(12);
/// Unique symbol
pub const GNU_UNIQUE: STB = @enumFromInt(@intFromEnum(LOOS) + 0);
/// Start of processor-specific
pub const LOPROC: STB = @enumFromInt(13);
/// End of processor-specific
pub const HIPROC: STB = @enumFromInt(15);
pub const MIPS_SPLIT_COMMON: STB = @enumFromInt(@intFromEnum(LOPROC) + 0);
};
pub const STT = enum(u4) {
/// Symbol type is unspecified
NOTYPE = 0,
/// Symbol is a data object
OBJECT = 1,
/// Symbol is a code object
FUNC = 2,
/// Symbol associated with a section
SECTION = 3,
/// Symbol's name is file name
FILE = 4,
/// Symbol is a common data object
COMMON = 5,
/// Symbol is thread-local data object
TLS = 6,
_,
/// Number of defined types
pub const NUM = @typeInfo(STT).@"enum".fields.len;
/// Start of OS-specific
pub const LOOS: STT = @enumFromInt(10);
/// End of OS-specific
pub const HIOS: STT = @enumFromInt(12);
/// Symbol is indirect code object
pub const GNU_IFUNC: STT = @enumFromInt(@intFromEnum(LOOS) + 0);
pub const HP_OPAQUE: STT = @enumFromInt(@intFromEnum(LOOS) + 1);
pub const HP_STUB: STT = @enumFromInt(@intFromEnum(LOOS) + 2);
/// Start of processor-specific
pub const LOPROC: STT = @enumFromInt(13);
/// End of processor-specific
pub const HIPROC: STT = @enumFromInt(15);
pub const SPARC_REGISTER: STT = @enumFromInt(@intFromEnum(LOPROC) + 0);
pub const PARISC_MILLICODE: STT = @enumFromInt(@intFromEnum(LOPROC) + 0);
pub const ARM_TFUNC: STT = @enumFromInt(@intFromEnum(LOPROC) + 0);
pub const ARM_16BIT: STT = @enumFromInt(@intFromEnum(HIPROC) + 2);
};
pub const STV = enum(u3) {
DEFAULT = 0,
INTERNAL = 1,
HIDDEN = 2,
PROTECTED = 3,
};
pub const MAGIC = "\x7fELF";
@ -534,15 +622,15 @@ pub const Header = struct {
const buf = try r.peek(@sizeOf(Elf64_Ehdr));
if (!mem.eql(u8, buf[0..4], MAGIC)) return error.InvalidElfMagic;
if (buf[EI_VERSION] != 1) return error.InvalidElfVersion;
if (buf[EI.VERSION] != 1) return error.InvalidElfVersion;
const endian: std.builtin.Endian = switch (buf[EI_DATA]) {
const endian: std.builtin.Endian = switch (buf[EI.DATA]) {
ELFDATA2LSB => .little,
ELFDATA2MSB => .big,
else => return error.InvalidElfEndian,
};
return switch (buf[EI_CLASS]) {
return switch (buf[EI.CLASS]) {
ELFCLASS32 => .init(try r.takeStruct(Elf32_Ehdr, endian), endian),
ELFCLASS64 => .init(try r.takeStruct(Elf64_Ehdr, endian), endian),
else => return error.InvalidElfClass,
@ -559,8 +647,8 @@ pub const Header = struct {
else => @compileError("bad type"),
},
.endian = endian,
.os_abi = @enumFromInt(hdr.e_ident[EI_OSABI]),
.abi_version = hdr.e_ident[EI_ABIVERSION],
.os_abi = @enumFromInt(hdr.e_ident[EI.OSABI]),
.abi_version = hdr.e_ident[EI.ABIVERSION],
.type = hdr.e_type,
.machine = hdr.e_machine,
.entry = hdr.e_entry,
@ -683,38 +771,200 @@ fn takeShdr(reader: *std.Io.Reader, elf_header: Header) !?Elf64_Shdr {
};
}
pub const ELFCLASSNONE = 0;
pub const ELFCLASS32 = 1;
pub const ELFCLASS64 = 2;
pub const ELFCLASSNUM = 3;
pub const EI = struct {
pub const CLASS = 4;
pub const DATA = 5;
pub const VERSION = 6;
pub const OSABI = 7;
pub const ABIVERSION = 8;
pub const PAD = 9;
pub const NIDENT = 16;
};
pub const ELFDATANONE = 0;
pub const ELFDATA2LSB = 1;
pub const ELFDATA2MSB = 2;
pub const ELFDATANUM = 3;
pub const EI_CLASS = 4;
pub const EI_DATA = 5;
pub const EI_VERSION = 6;
pub const EI_OSABI = 7;
pub const EI_ABIVERSION = 8;
pub const EI_PAD = 9;
pub const EI_NIDENT = 16;
/// Deprecated, use `std.elf.EI.CLASS`
pub const EI_CLASS = EI.CLASS;
/// Deprecated, use `std.elf.EI.DATA`
pub const EI_DATA = EI.DATA;
/// Deprecated, use `std.elf.EI.VERSION`
pub const EI_VERSION = EI.VERSION;
/// Deprecated, use `std.elf.EI.OSABI`
pub const EI_OSABI = EI.OSABI;
/// Deprecated, use `std.elf.EI.ABIVERSION`
pub const EI_ABIVERSION = EI.ABIVERSION;
/// Deprecated, use `std.elf.EI.PAD`
pub const EI_PAD = EI.PAD;
/// Deprecated, use `std.elf.EI.NIDENT`
pub const EI_NIDENT = EI.NIDENT;
pub const Half = u16;
pub const Word = u32;
pub const Sword = i32;
pub const Elf32_Xword = u64;
pub const Elf32_Sxword = i64;
pub const Elf64_Xword = u64;
pub const Xword = u64;
pub const Sxword = i64;
pub const Section = u16;
pub const Elf32 = struct {
pub const Addr = u32;
pub const Off = u32;
pub const Ehdr = extern struct {
ident: [EI.NIDENT]u8,
type: ET,
machine: EM,
version: Word,
entry: Elf32.Addr,
phoff: Elf32.Off,
shoff: Elf32.Off,
flags: Word,
ehsize: Half,
phentsize: Half,
phnum: Half,
shentsize: Half,
shnum: Half,
shstrndx: Half,
};
pub const Phdr = extern struct {
type: Word,
offset: Elf32.Off,
vaddr: Elf32.Addr,
paddr: Elf32.Addr,
filesz: Word,
memsz: Word,
flags: PF,
@"align": Word,
};
pub const Shdr = extern struct {
name: Word,
type: Word,
flags: packed struct { shf: SHF },
addr: Elf32.Addr,
offset: Elf32.Off,
size: Word,
link: Word,
info: Word,
addralign: Word,
entsize: Word,
};
pub const Chdr = extern struct {
type: COMPRESS,
size: Word,
addralign: Word,
};
pub const Sym = extern struct {
name: Word,
value: Elf32.Addr,
size: Word,
info: Info,
other: Other,
shndx: Section,
pub const Info = packed struct(u8) {
type: STT,
bind: STB,
};
pub const Other = packed struct(u8) {
visibility: STV,
unused: u5 = 0,
};
};
comptime {
assert(@sizeOf(Elf32.Ehdr) == 52);
assert(@sizeOf(Elf32.Phdr) == 32);
assert(@sizeOf(Elf32.Shdr) == 40);
assert(@sizeOf(Elf32.Sym) == 16);
}
};
pub const Elf64 = struct {
pub const Addr = u64;
pub const Off = u64;
pub const Ehdr = extern struct {
ident: [EI.NIDENT]u8,
type: ET,
machine: EM,
version: Word,
entry: Elf64.Addr,
phoff: Elf64.Off,
shoff: Elf64.Off,
flags: Word,
ehsize: Half,
phentsize: Half,
phnum: Half,
shentsize: Half,
shnum: Half,
shstrndx: Half,
};
pub const Phdr = extern struct {
type: Word,
flags: PF,
offset: Elf64.Off,
vaddr: Elf64.Addr,
paddr: Elf64.Addr,
filesz: Xword,
memsz: Xword,
@"align": Xword,
};
pub const Shdr = extern struct {
name: Word,
type: Word,
flags: packed struct { shf: SHF, unused: Word = 0 },
addr: Elf64.Addr,
offset: Elf64.Off,
size: Xword,
link: Word,
info: Word,
addralign: Xword,
entsize: Xword,
};
pub const Chdr = extern struct {
type: COMPRESS,
reserved: Word = 0,
size: Xword,
addralign: Xword,
};
pub const Sym = extern struct {
name: Word,
info: Info,
other: Other,
shndx: Section,
value: Elf64.Addr,
size: Xword,
pub const Info = Elf32.Sym.Info;
pub const Other = Elf32.Sym.Other;
};
comptime {
assert(@sizeOf(Elf64.Ehdr) == 64);
assert(@sizeOf(Elf64.Phdr) == 56);
assert(@sizeOf(Elf64.Shdr) == 64);
assert(@sizeOf(Elf64.Sym) == 24);
}
};
pub const ElfN = switch (@sizeOf(usize)) {
4 => Elf32,
8 => Elf64,
else => @compileError("expected pointer size of 32 or 64"),
};
/// Deprecated, use `std.elf.Xword`
pub const Elf32_Xword = Xword;
/// Deprecated, use `std.elf.Sxword`
pub const Elf32_Sxword = Sxword;
/// Deprecated, use `std.elf.Xword`
pub const Elf64_Xword = Xword;
/// Deprecated, use `std.elf.Sxword`
pub const Elf64_Sxword = i64;
/// Deprecated, use `std.elf.Elf32.Addr`
pub const Elf32_Addr = u32;
/// Deprecated, use `std.elf.Elf64.Addr`
pub const Elf64_Addr = u64;
/// Deprecated, use `std.elf.Elf32.Off`
pub const Elf32_Off = u32;
/// Deprecated, use `std.elf.Elf64.Off`
pub const Elf64_Off = u64;
/// Deprecated, use `std.elf.Section`
pub const Elf32_Section = u16;
/// Deprecated, use `std.elf.Section`
pub const Elf64_Section = u16;
/// Deprecated, use `std.elf.Elf32.Ehdr`
pub const Elf32_Ehdr = extern struct {
e_ident: [EI_NIDENT]u8,
e_type: ET,
@ -731,8 +981,9 @@ pub const Elf32_Ehdr = extern struct {
e_shnum: Half,
e_shstrndx: Half,
};
/// Deprecated, use `std.elf.Elf64.Ehdr`
pub const Elf64_Ehdr = extern struct {
e_ident: [EI_NIDENT]u8,
e_ident: [EI.NIDENT]u8,
e_type: ET,
e_machine: EM,
e_version: Word,
@ -747,6 +998,7 @@ pub const Elf64_Ehdr = extern struct {
e_shnum: Half,
e_shstrndx: Half,
};
/// Deprecated, use `std.elf.Elf32.Phdr`
pub const Elf32_Phdr = extern struct {
p_type: Word,
p_offset: Elf32_Off,
@ -757,6 +1009,7 @@ pub const Elf32_Phdr = extern struct {
p_flags: Word,
p_align: Word,
};
/// Deprecated, use `std.elf.Elf64.Phdr`
pub const Elf64_Phdr = extern struct {
p_type: Word,
p_flags: Word,
@ -767,6 +1020,7 @@ pub const Elf64_Phdr = extern struct {
p_memsz: Elf64_Xword,
p_align: Elf64_Xword,
};
/// Deprecated, use `std.elf.Elf32.Shdr`
pub const Elf32_Shdr = extern struct {
sh_name: Word,
sh_type: Word,
@ -779,6 +1033,7 @@ pub const Elf32_Shdr = extern struct {
sh_addralign: Word,
sh_entsize: Word,
};
/// Deprecated, use `std.elf.Elf64.Shdr`
pub const Elf64_Shdr = extern struct {
sh_name: Word,
sh_type: Word,
@ -791,17 +1046,20 @@ pub const Elf64_Shdr = extern struct {
sh_addralign: Elf64_Xword,
sh_entsize: Elf64_Xword,
};
/// Deprecated, use `std.elf.Elf32.Chdr`
pub const Elf32_Chdr = extern struct {
ch_type: COMPRESS,
ch_size: Word,
ch_addralign: Word,
};
/// Deprecated, use `std.elf.Elf64.Chdr`
pub const Elf64_Chdr = extern struct {
ch_type: COMPRESS,
ch_reserved: Word = 0,
ch_size: Elf64_Xword,
ch_addralign: Elf64_Xword,
};
/// Deprecated, use `std.elf.Elf32.Sym`
pub const Elf32_Sym = extern struct {
st_name: Word,
st_value: Elf32_Addr,
@ -817,6 +1075,7 @@ pub const Elf32_Sym = extern struct {
return @truncate(self.st_info >> 4);
}
};
/// Deprecated, use `std.elf.Elf64.Sym`
pub const Elf64_Sym = extern struct {
st_name: Word,
st_info: u8,
@ -1020,27 +1279,18 @@ pub const Elf_MIPS_ABIFlags_v0 = extern struct {
flags2: Word,
};
comptime {
assert(@sizeOf(Elf32_Ehdr) == 52);
assert(@sizeOf(Elf64_Ehdr) == 64);
assert(@sizeOf(Elf32_Phdr) == 32);
assert(@sizeOf(Elf64_Phdr) == 56);
assert(@sizeOf(Elf32_Shdr) == 40);
assert(@sizeOf(Elf64_Shdr) == 64);
}
pub const Auxv = switch (@sizeOf(usize)) {
4 => Elf32_auxv_t,
8 => Elf64_auxv_t,
else => @compileError("expected pointer size of 32 or 64"),
};
/// Deprecated, use `std.elf.ElfN.Ehdr`
pub const Ehdr = switch (@sizeOf(usize)) {
4 => Elf32_Ehdr,
8 => Elf64_Ehdr,
else => @compileError("expected pointer size of 32 or 64"),
};
/// Deprecated, use `std.elf.ElfN.Phdr`
pub const Phdr = switch (@sizeOf(usize)) {
4 => Elf32_Phdr,
8 => Elf64_Phdr,
@ -1071,20 +1321,53 @@ pub const Shdr = switch (@sizeOf(usize)) {
8 => Elf64_Shdr,
else => @compileError("expected pointer size of 32 or 64"),
};
/// Deprecated, use `std.elf.ElfN.Chdr`
pub const Chdr = switch (@sizeOf(usize)) {
4 => Elf32_Chdr,
8 => Elf64_Chdr,
else => @compileError("expected pointer size of 32 or 64"),
};
/// Deprecated, use `std.elf.ElfN.Sym`
pub const Sym = switch (@sizeOf(usize)) {
4 => Elf32_Sym,
8 => Elf64_Sym,
else => @compileError("expected pointer size of 32 or 64"),
};
pub const Addr = switch (@sizeOf(usize)) {
4 => Elf32_Addr,
8 => Elf64_Addr,
else => @compileError("expected pointer size of 32 or 64"),
/// Deprecated, use `std.elf.ElfN.Addr`
pub const Addr = ElfN.Addr;
/// Deprecated, use `@intFromEnum(std.elf.CLASS.NONE)`
pub const ELFCLASSNONE = @intFromEnum(CLASS.NONE);
/// Deprecated, use `@intFromEnum(std.elf.CLASS.@"32")`
pub const ELFCLASS32 = @intFromEnum(CLASS.@"32");
/// Deprecated, use `@intFromEnum(std.elf.CLASS.@"64")`
pub const ELFCLASS64 = @intFromEnum(CLASS.@"64");
/// Deprecated, use `@intFromEnum(std.elf.CLASS.NUM)`
pub const ELFCLASSNUM = CLASS.NUM;
pub const CLASS = enum(u8) {
NONE = 0,
@"32" = 1,
@"64" = 2,
_,
pub const NUM = @typeInfo(CLASS).@"enum".fields.len;
};
/// Deprecated, use `@intFromEnum(std.elf.DATA.NONE)`
pub const ELFDATANONE = @intFromEnum(DATA.NONE);
/// Deprecated, use `@intFromEnum(std.elf.DATA.@"2LSB")`
pub const ELFDATA2LSB = @intFromEnum(DATA.@"2LSB");
/// Deprecated, use `@intFromEnum(std.elf.DATA.@"2MSB")`
pub const ELFDATA2MSB = @intFromEnum(DATA.@"2MSB");
/// Deprecated, use `@intFromEnum(std.elf.DATA.NUM)`
pub const ELFDATANUM = DATA.NUM;
pub const DATA = enum(u8) {
NONE = 0,
@"2LSB" = 1,
@"2MSB" = 2,
_,
pub const NUM = @typeInfo(DATA).@"enum".fields.len;
};
pub const OSABI = enum(u8) {
@ -1718,6 +2001,108 @@ pub const SHF_MIPS_STRING = 0x80000000;
/// Make code section unreadable when in execute-only mode
pub const SHF_ARM_PURECODE = 0x2000000;
pub const SHF = packed struct(Word) {
/// Section data should be writable during execution.
WRITE: bool = false,
/// Section occupies memory during program execution.
ALLOC: bool = false,
/// Section contains executable machine instructions.
EXECINSTR: bool = false,
unused3: u1 = 0,
/// The data in this section may be merged.
MERGE: bool = false,
/// The data in this section is null-terminated strings.
STRINGS: bool = false,
/// A field in this section holds a section header table index.
INFO_LINK: bool = false,
/// Adds special ordering requirements for link editors.
LINK_ORDER: bool = false,
/// This section requires special OS-specific processing to avoid incorrect behavior.
OS_NONCONFORMING: bool = false,
/// This section is a member of a section group.
GROUP: bool = false,
/// This section holds Thread-Local Storage.
TLS: bool = false,
/// Identifies a section containing compressed data.
COMPRESSED: bool = false,
unused12: u8 = 0,
OS: packed union {
MASK: u8,
GNU: packed struct(u8) {
unused0: u1 = 0,
/// Not to be GCed by the linker
RETAIN: bool = false,
unused2: u6 = 0,
},
MIPS: packed struct(u8) {
unused0: u4 = 0,
/// Section contains text/data which may be replicated in other sections.
/// Linker must retain only one copy.
NODUPES: bool = false,
/// Linker must generate implicit hidden weak names.
NAMES: bool = false,
/// Section data local to process.
LOCAL: bool = false,
/// Do not strip this section.
NOSTRIP: bool = false,
},
ARM: packed struct(u8) {
unused0: u5 = 0,
/// Make code section unreadable when in execute-only mode
PURECODE: bool = false,
unused6: u2 = 0,
},
} = .{ .MASK = 0 },
PROC: packed union {
MASK: u4,
XCORE: packed struct(u4) {
/// All sections with the "d" flag are grouped together by the linker to form
/// the data section and the dp register is set to the start of the section by
/// the boot code.
DP_SECTION: bool = false,
/// All sections with the "c" flag are grouped together by the linker to form
/// the constant pool and the cp register is set to the start of the constant
/// pool by the boot code.
CP_SECTION: bool = false,
unused2: u1 = 0,
/// This section is excluded from the final executable or shared library.
EXCLUDE: bool = false,
},
X86_64: packed struct(u4) {
/// If an object file section does not have this flag set, then it may not hold
/// more than 2GB and can be freely referred to in objects using smaller code
/// models. Otherwise, only objects using larger code models can refer to them.
/// For example, a medium code model object can refer to data in a section that
/// sets this flag besides being able to refer to data in a section that does
/// not set it; likewise, a small code model object can refer only to code in a
/// section that does not set this flag.
LARGE: bool = false,
unused1: u2 = 0,
/// This section is excluded from the final executable or shared library.
EXCLUDE: bool = false,
},
HEX: packed struct(u4) {
/// All sections with the GPREL flag are grouped into a global data area
/// for faster accesses
GPREL: bool = false,
unused1: u2 = 0,
/// This section is excluded from the final executable or shared library.
EXCLUDE: bool = false,
},
MIPS: packed struct(u4) {
/// All sections with the GPREL flag are grouped into a global data area
/// for faster accesses
GPREL: bool = false,
/// This section should be merged.
MERGE: bool = false,
/// Address size to be inferred from section entry size.
ADDR: bool = false,
/// Section data is string data by default.
STRING: bool = false,
},
} = .{ .MASK = 0 },
};
/// Execute
pub const PF_X = 1;
@ -1733,6 +2118,19 @@ pub const PF_MASKOS = 0x0ff00000;
/// Bits for processor-specific semantics.
pub const PF_MASKPROC = 0xf0000000;
pub const PF = packed struct(Word) {
X: bool = false,
W: bool = false,
R: bool = false,
unused3: u17 = 0,
OS: packed union {
MASK: u8,
} = .{ .MASK = 0 },
PROC: packed union {
MASK: u4,
} = .{ .MASK = 0 },
};
/// Undefined section
pub const SHN_UNDEF = 0;
/// Start of reserved indices
@ -2303,13 +2701,6 @@ pub const R_PPC64 = enum(u32) {
_,
};
pub const STV = enum(u3) {
DEFAULT = 0,
INTERNAL = 1,
HIDDEN = 2,
PROTECTED = 3,
};
pub const ar_hdr = extern struct {
/// Member file name, sometimes / terminated.
ar_name: [16]u8,

View file

@ -516,15 +516,15 @@ pub fn abiAndDynamicLinkerFromFile(
const hdr32: *elf.Elf32_Ehdr = @ptrCast(&hdr_buf);
const hdr64: *elf.Elf64_Ehdr = @ptrCast(&hdr_buf);
if (!mem.eql(u8, hdr32.e_ident[0..4], elf.MAGIC)) return error.InvalidElfMagic;
const elf_endian: std.builtin.Endian = switch (hdr32.e_ident[elf.EI_DATA]) {
const elf_endian: std.builtin.Endian = switch (hdr32.e_ident[elf.EI.DATA]) {
elf.ELFDATA2LSB => .little,
elf.ELFDATA2MSB => .big,
else => return error.InvalidElfEndian,
};
const need_bswap = elf_endian != native_endian;
if (hdr32.e_ident[elf.EI_VERSION] != 1) return error.InvalidElfVersion;
if (hdr32.e_ident[elf.EI.VERSION] != 1) return error.InvalidElfVersion;
const is_64 = switch (hdr32.e_ident[elf.EI_CLASS]) {
const is_64 = switch (hdr32.e_ident[elf.EI.CLASS]) {
elf.ELFCLASS32 => false,
elf.ELFCLASS64 => true,
else => return error.InvalidElfClass,
@ -920,15 +920,15 @@ fn glibcVerFromSoFile(file: fs.File) !std.SemanticVersion {
const hdr32: *elf.Elf32_Ehdr = @ptrCast(&hdr_buf);
const hdr64: *elf.Elf64_Ehdr = @ptrCast(&hdr_buf);
if (!mem.eql(u8, hdr32.e_ident[0..4], elf.MAGIC)) return error.InvalidElfMagic;
const elf_endian: std.builtin.Endian = switch (hdr32.e_ident[elf.EI_DATA]) {
const elf_endian: std.builtin.Endian = switch (hdr32.e_ident[elf.EI.DATA]) {
elf.ELFDATA2LSB => .little,
elf.ELFDATA2MSB => .big,
else => return error.InvalidElfEndian,
};
const need_bswap = elf_endian != native_endian;
if (hdr32.e_ident[elf.EI_VERSION] != 1) return error.InvalidElfVersion;
if (hdr32.e_ident[elf.EI.VERSION] != 1) return error.InvalidElfVersion;
const is_64 = switch (hdr32.e_ident[elf.EI_CLASS]) {
const is_64 = switch (hdr32.e_ident[elf.EI.CLASS]) {
elf.ELFCLASS32 => false,
elf.ELFCLASS64 => true,
else => return error.InvalidElfClass,

View file

@ -177,7 +177,6 @@ debug_compiler_runtime_libs: bool,
debug_compile_errors: bool,
/// Do not check this field directly. Instead, use the `debugIncremental` wrapper function.
debug_incremental: bool,
incremental: bool,
alloc_failure_occurred: bool = false,
last_update_was_cache_hit: bool = false,
@ -256,7 +255,9 @@ mutex: if (builtin.single_threaded) struct {
test_filters: []const []const u8,
link_task_wait_group: WaitGroup = .{},
link_prog_node: std.Progress.Node = std.Progress.Node.none,
link_prog_node: std.Progress.Node = .none,
link_uav_prog_node: std.Progress.Node = .none,
link_lazy_prog_node: std.Progress.Node = .none,
llvm_opt_bisect_limit: c_int,
@ -1746,7 +1747,6 @@ pub const CreateOptions = struct {
debug_compiler_runtime_libs: bool = false,
debug_compile_errors: bool = false,
debug_incremental: bool = false,
incremental: bool = false,
/// Normally when you create a `Compilation`, Zig will automatically build
/// and link in required dependencies, such as compiler-rt and libc. When
/// building such dependencies themselves, this flag must be set to avoid
@ -1982,6 +1982,7 @@ pub fn create(gpa: Allocator, arena: Allocator, diag: *CreateDiagnostic, options
};
if (have_zcu and (!need_llvm or use_llvm)) {
if (output_mode == .Obj) break :s .zcu;
if (options.config.use_new_linker) break :s .zcu;
switch (target_util.zigBackend(target, use_llvm)) {
else => {},
.stage2_aarch64, .stage2_x86_64 => if (target.ofmt == .coff) {
@ -2188,8 +2189,8 @@ pub fn create(gpa: Allocator, arena: Allocator, diag: *CreateDiagnostic, options
.inherited = .{},
.global = options.config,
.parent = options.root_mod,
}) catch |err| return switch (err) {
error.OutOfMemory => |e| return e,
}) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
// None of these are possible because the configuration matches the root module
// which already passed these checks.
error.ValgrindUnsupportedOnTarget => unreachable,
@ -2266,7 +2267,6 @@ pub fn create(gpa: Allocator, arena: Allocator, diag: *CreateDiagnostic, options
.debug_compiler_runtime_libs = options.debug_compiler_runtime_libs,
.debug_compile_errors = options.debug_compile_errors,
.debug_incremental = options.debug_incremental,
.incremental = options.incremental,
.root_name = root_name,
.sysroot = sysroot,
.windows_libs = .empty,
@ -2409,6 +2409,8 @@ pub fn create(gpa: Allocator, arena: Allocator, diag: *CreateDiagnostic, options
// Synchronize with other matching comments: ZigOnlyHashStuff
hash.add(use_llvm);
hash.add(options.config.use_lib_llvm);
hash.add(options.config.use_lld);
hash.add(options.config.use_new_linker);
hash.add(options.config.dll_export_fns);
hash.add(options.config.is_test);
hash.addListOfBytes(options.test_filters);
@ -3075,14 +3077,29 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) UpdateE
// The linker progress node is set up here instead of in `performAllTheWork`, because
// we also want it around during `flush`.
const have_link_node = comp.bin_file != null;
if (have_link_node) {
if (comp.bin_file) |lf| {
comp.link_prog_node = main_progress_node.start("Linking", 0);
if (lf.cast(.elf2)) |elf| {
comp.link_prog_node.increaseEstimatedTotalItems(3);
comp.link_uav_prog_node = comp.link_prog_node.start("Constants", 0);
comp.link_lazy_prog_node = comp.link_prog_node.start("Synthetics", 0);
elf.mf.update_prog_node = comp.link_prog_node.start("Relocations", elf.mf.updates.items.len);
}
}
defer if (have_link_node) {
defer {
comp.link_prog_node.end();
comp.link_prog_node = .none;
};
comp.link_uav_prog_node.end();
comp.link_uav_prog_node = .none;
comp.link_lazy_prog_node.end();
comp.link_lazy_prog_node = .none;
if (comp.bin_file) |lf| {
if (lf.cast(.elf2)) |elf| {
elf.mf.update_prog_node.end();
elf.mf.update_prog_node = .none;
}
}
}
try comp.performAllTheWork(main_progress_node);
@ -3100,6 +3117,8 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) UpdateE
try pt.populateTestFunctions();
}
link.updateErrorData(pt);
try pt.processExports();
}
@ -3474,6 +3493,8 @@ fn addNonIncrementalStuffToCacheManifest(
man.hash.add(comp.config.use_llvm);
man.hash.add(comp.config.use_lib_llvm);
man.hash.add(comp.config.use_lld);
man.hash.add(comp.config.use_new_linker);
man.hash.add(comp.config.is_test);
man.hash.add(comp.config.import_memory);
man.hash.add(comp.config.export_memory);
@ -4073,7 +4094,7 @@ pub fn getAllErrorsAlloc(comp: *Compilation) error{OutOfMemory}!ErrorBundle {
defer sorted_failed_analysis.deinit(gpa);
var added_any_analysis_error = false;
for (sorted_failed_analysis.items(.key), sorted_failed_analysis.items(.value)) |anal_unit, error_msg| {
if (comp.incremental) {
if (comp.config.incremental) {
const refs = try zcu.resolveReferences();
if (!refs.contains(anal_unit)) continue;
}
@ -4240,7 +4261,7 @@ pub fn getAllErrorsAlloc(comp: *Compilation) error{OutOfMemory}!ErrorBundle {
// TODO: eventually, this should be behind `std.debug.runtime_safety`. But right now, this is a
// very common way for incremental compilation bugs to manifest, so let's always check it.
if (comp.zcu) |zcu| if (comp.incremental and bundle.root_list.items.len == 0) {
if (comp.zcu) |zcu| if (comp.config.incremental and bundle.root_list.items.len == 0) {
for (zcu.transitive_failed_analysis.keys()) |failed_unit| {
const refs = try zcu.resolveReferences();
var ref = refs.get(failed_unit) orelse continue;
@ -4949,7 +4970,7 @@ fn performAllTheWork(
tr.stats.n_reachable_files = @intCast(zcu.alive_files.count());
}
if (comp.incremental) {
if (comp.config.incremental) {
const update_zir_refs_node = main_progress_node.start("Update ZIR References", 0);
defer update_zir_refs_node.end();
try pt.updateZirRefs();

View file

@ -49,6 +49,8 @@ use_lib_llvm: bool,
use_lld: bool,
c_frontend: CFrontend,
lto: std.zig.LtoMode,
use_new_linker: bool,
incremental: bool,
/// WASI-only. Type of WASI execution model ("command" or "reactor").
/// Always set to `command` for non-WASI targets.
wasi_exec_model: std.builtin.WasiExecModel,
@ -104,6 +106,8 @@ pub const Options = struct {
use_lld: ?bool = null,
use_clang: ?bool = null,
lto: ?std.zig.LtoMode = null,
use_new_linker: ?bool = null,
incremental: bool = false,
/// WASI-only. Type of WASI execution model ("command" or "reactor").
wasi_exec_model: ?std.builtin.WasiExecModel = null,
import_memory: ?bool = null,
@ -147,6 +151,8 @@ pub const ResolveError = error{
LldUnavailable,
ClangUnavailable,
DllExportFnsRequiresWindows,
NewLinkerIncompatibleWithLld,
NewLinkerIncompatibleObjectFormat,
};
pub fn resolve(options: Options) ResolveError!Config {
@ -458,6 +464,22 @@ pub fn resolve(options: Options) ResolveError!Config {
break :b .none;
};
const use_new_linker = b: {
if (use_lld) {
if (options.use_new_linker == true) return error.NewLinkerIncompatibleWithLld;
break :b false;
}
if (!target_util.hasNewLinkerSupport(target.ofmt)) {
if (options.use_new_linker == true) return error.NewLinkerIncompatibleObjectFormat;
break :b false;
}
if (options.use_new_linker) |x| break :b x;
break :b options.incremental;
};
const root_strip = b: {
if (options.root_strip) |x| break :b x;
if (root_optimize_mode == .ReleaseSmall) break :b true;
@ -531,6 +553,8 @@ pub fn resolve(options: Options) ResolveError!Config {
.root_error_tracing = root_error_tracing,
.pie = pie,
.lto = lto,
.use_new_linker = use_new_linker,
.incremental = options.incremental,
.import_memory = import_memory,
.export_memory = export_memory,
.shared_memory = shared_memory,

View file

@ -6424,14 +6424,25 @@ pub const Alignment = enum(u6) {
return n + 1;
}
const LlvmBuilderAlignment = std.zig.llvm.Builder.Alignment;
pub fn toLlvm(this: @This()) LlvmBuilderAlignment {
return @enumFromInt(@intFromEnum(this));
pub fn toStdMem(a: Alignment) std.mem.Alignment {
assert(a != .none);
return @enumFromInt(@intFromEnum(a));
}
pub fn fromLlvm(other: LlvmBuilderAlignment) @This() {
return @enumFromInt(@intFromEnum(other));
pub fn fromStdMem(a: std.mem.Alignment) Alignment {
const r: Alignment = @enumFromInt(@intFromEnum(a));
assert(r != .none);
return r;
}
const LlvmBuilderAlignment = std.zig.llvm.Builder.Alignment;
pub fn toLlvm(a: Alignment) LlvmBuilderAlignment {
return @enumFromInt(@intFromEnum(a));
}
pub fn fromLlvm(a: LlvmBuilderAlignment) Alignment {
return @enumFromInt(@intFromEnum(a));
}
};

View file

@ -3032,7 +3032,7 @@ fn zirStructDecl(
});
errdefer pt.destroyNamespace(new_namespace_index);
if (pt.zcu.comp.incremental) {
if (pt.zcu.comp.config.incremental) {
try pt.addDependency(.wrap(.{ .type = wip_ty.index }), .{ .src_hash = tracked_inst });
}
@ -3430,7 +3430,7 @@ fn zirUnionDecl(
});
errdefer pt.destroyNamespace(new_namespace_index);
if (pt.zcu.comp.incremental) {
if (pt.zcu.comp.config.incremental) {
try pt.addDependency(.wrap(.{ .type = wip_ty.index }), .{ .src_hash = tracked_inst });
}
@ -6217,7 +6217,7 @@ fn zirExport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
if (ptr_info.byte_offset != 0) {
return sema.fail(block, ptr_src, "TODO: export pointer in middle of value", .{});
}
if (options.linkage == .internal) return;
if (zcu.llvm_object != null and options.linkage == .internal) return;
const export_ty = Value.fromInterned(uav.val).typeOf(zcu);
if (!try sema.validateExternType(export_ty, .other)) {
return sema.failWithOwnedErrorMsg(block, msg: {
@ -6256,7 +6256,7 @@ pub fn analyzeExport(
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
if (options.linkage == .internal)
if (zcu.llvm_object != null and options.linkage == .internal)
return;
try sema.ensureNavResolved(block, src, orig_nav_index, .fully);
@ -7709,7 +7709,7 @@ fn analyzeCall(
// TODO: comptime call memoization is currently not supported under incremental compilation
// since dependencies are not marked on callers. If we want to keep this around (we should
// check that it's worthwhile first!), each memoized call needs an `AnalUnit`.
if (zcu.comp.incremental) break :m false;
if (zcu.comp.config.incremental) break :m false;
if (!block.isComptime()) break :m false;
for (args) |a| {
const val = (try sema.resolveValue(a)).?;
@ -31208,7 +31208,7 @@ fn addReferenceEntry(
.func => |f| assert(ip.unwrapCoercedFunc(f) == f), // for `.{ .func = f }`, `f` must be uncoerced
else => {},
}
if (!zcu.comp.incremental and zcu.comp.reference_trace == 0) return;
if (!zcu.comp.config.incremental and zcu.comp.reference_trace == 0) return;
const gop = try sema.references.getOrPut(sema.gpa, referenced_unit);
if (gop.found_existing) return;
try zcu.addUnitReference(sema.owner, referenced_unit, src, inline_frame: {
@ -31225,7 +31225,7 @@ pub fn addTypeReferenceEntry(
referenced_type: InternPool.Index,
) !void {
const zcu = sema.pt.zcu;
if (!zcu.comp.incremental and zcu.comp.reference_trace == 0) return;
if (!zcu.comp.config.incremental and zcu.comp.reference_trace == 0) return;
const gop = try sema.type_references.getOrPut(sema.gpa, referenced_type);
if (gop.found_existing) return;
try zcu.addTypeReference(sema.owner, referenced_type, src);
@ -36875,7 +36875,7 @@ fn isKnownZigType(sema: *Sema, ref: Air.Inst.Ref, tag: std.builtin.TypeId) bool
pub fn declareDependency(sema: *Sema, dependee: InternPool.Dependee) !void {
const pt = sema.pt;
if (!pt.zcu.comp.incremental) return;
if (!pt.zcu.comp.config.incremental) return;
const gop = try sema.dependencies.getOrPut(sema.gpa, dependee);
if (gop.found_existing) return;

View file

@ -23,7 +23,7 @@ pub fn format(val: Value, writer: *std.Io.Writer) !void {
/// This is a debug function. In order to print values in a meaningful way
/// we also need access to the type.
pub fn dump(start_val: Value, w: std.Io.Writer) std.Io.Writer.Error!void {
pub fn dump(start_val: Value, w: *std.Io.Writer) std.Io.Writer.Error!void {
try w.print("(interned: {})", .{start_val.toIntern()});
}

View file

@ -3166,7 +3166,7 @@ fn markTransitiveDependersPotentiallyOutdated(zcu: *Zcu, maybe_outdated: AnalUni
}
pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?AnalUnit {
if (!zcu.comp.incremental) return null;
if (!zcu.comp.config.incremental) return null;
if (zcu.outdated.count() == 0) {
// Any units in `potentially_outdated` must just be stuck in loops with one another: none of those

View file

@ -1815,7 +1815,7 @@ fn createFileRootStruct(
wip_ty.setName(ip, try file.internFullyQualifiedName(pt), .none);
ip.namespacePtr(namespace_index).owner_type = wip_ty.index;
if (zcu.comp.incremental) {
if (zcu.comp.config.incremental) {
try pt.addDependency(.wrap(.{ .type = wip_ty.index }), .{ .src_hash = tracked_inst });
}

View file

@ -858,9 +858,11 @@ pub fn generateLazy(
pt: Zcu.PerThread,
src_loc: Zcu.LazySrcLoc,
lazy_sym: link.File.LazySymbol,
code: *std.ArrayListUnmanaged(u8),
atom_index: u32,
w: *std.Io.Writer,
debug_output: link.File.DebugInfoOutput,
) CodeGenError!void {
) (CodeGenError || std.Io.Writer.Error)!void {
_ = atom_index;
const comp = bin_file.comp;
const gpa = comp.gpa;
const mod = comp.root_mod;
@ -914,7 +916,7 @@ pub fn generateLazy(
},
.bin_file = bin_file,
.debug_output = debug_output,
.code = code,
.w = w,
.prev_di_pc = undefined, // no debug info yet
.prev_di_line = undefined, // no debug info yet
.prev_di_column = undefined, // no debug info yet

View file

@ -3,7 +3,7 @@
bin_file: *link.File,
lower: Lower,
debug_output: link.File.DebugInfoOutput,
code: *std.ArrayListUnmanaged(u8),
w: *std.Io.Writer,
prev_di_line: u32,
prev_di_column: u32,
@ -13,7 +13,7 @@ prev_di_pc: usize,
code_offset_mapping: std.AutoHashMapUnmanaged(Mir.Inst.Index, usize) = .empty,
relocs: std.ArrayListUnmanaged(Reloc) = .empty,
pub const Error = Lower.Error || error{
pub const Error = Lower.Error || std.Io.Writer.Error || error{
EmitFail,
};
@ -25,13 +25,13 @@ pub fn emitMir(emit: *Emit) Error!void {
try emit.code_offset_mapping.putNoClobber(
emit.lower.allocator,
mir_index,
@intCast(emit.code.items.len),
@intCast(emit.w.end),
);
const lowered = try emit.lower.lowerMir(mir_index, .{ .allow_frame_locs = true });
var lowered_relocs = lowered.relocs;
for (lowered.insts, 0..) |lowered_inst, lowered_index| {
const start_offset: u32 = @intCast(emit.code.items.len);
std.mem.writeInt(u32, try emit.code.addManyAsArray(gpa, 4), lowered_inst.toU32(), .little);
const start_offset: u32 = @intCast(emit.w.end);
try emit.w.writeInt(u32, lowered_inst.toU32(), .little);
while (lowered_relocs.len > 0 and
lowered_relocs[0].lowered_inst_index == lowered_index) : ({
@ -175,7 +175,7 @@ fn fixupRelocs(emit: *Emit) Error!void {
return emit.fail("relocation target not found!", .{});
const disp = @as(i32, @intCast(target)) - @as(i32, @intCast(reloc.source));
const code: *[4]u8 = emit.code.items[reloc.source + reloc.offset ..][0..4];
const code = emit.w.buffered()[reloc.source + reloc.offset ..][0..4];
switch (reloc.fmt) {
.J => riscv_util.writeInstJ(code, @bitCast(disp)),
@ -187,7 +187,7 @@ fn fixupRelocs(emit: *Emit) Error!void {
fn dbgAdvancePCAndLine(emit: *Emit, line: u32, column: u32) Error!void {
const delta_line = @as(i33, line) - @as(i33, emit.prev_di_line);
const delta_pc: usize = emit.code.items.len - emit.prev_di_pc;
const delta_pc: usize = emit.w.end - emit.prev_di_pc;
log.debug(" (advance pc={d} and line={d})", .{ delta_pc, delta_line });
switch (emit.debug_output) {
.dwarf => |dw| {
@ -196,7 +196,7 @@ fn dbgAdvancePCAndLine(emit: *Emit, line: u32, column: u32) Error!void {
try dw.advancePCAndLine(delta_line, delta_pc);
emit.prev_di_line = line;
emit.prev_di_column = column;
emit.prev_di_pc = emit.code.items.len;
emit.prev_di_pc = emit.w.end;
},
.none => {},
}

View file

@ -109,9 +109,11 @@ pub fn emit(
pt: Zcu.PerThread,
src_loc: Zcu.LazySrcLoc,
func_index: InternPool.Index,
code: *std.ArrayListUnmanaged(u8),
atom_index: u32,
w: *std.Io.Writer,
debug_output: link.File.DebugInfoOutput,
) codegen.CodeGenError!void {
) (codegen.CodeGenError || std.Io.Writer.Error)!void {
_ = atom_index;
const zcu = pt.zcu;
const comp = zcu.comp;
const gpa = comp.gpa;
@ -132,7 +134,7 @@ pub fn emit(
},
.bin_file = lf,
.debug_output = debug_output,
.code = code,
.w = w,
.prev_di_pc = 0,
.prev_di_line = func.lbrace_line,
.prev_di_column = func.lbrace_column,

View file

@ -21,7 +21,7 @@ debug_output: link.File.DebugInfoOutput,
target: *const std.Target,
err_msg: ?*ErrorMsg = null,
src_loc: Zcu.LazySrcLoc,
code: *std.ArrayListUnmanaged(u8),
w: *std.Io.Writer,
prev_di_line: u32,
prev_di_column: u32,
@ -40,7 +40,7 @@ branch_forward_origins: std.AutoHashMapUnmanaged(Mir.Inst.Index, std.ArrayListUn
/// instruction
code_offset_mapping: std.AutoHashMapUnmanaged(Mir.Inst.Index, usize) = .empty,
const InnerError = error{
const InnerError = std.Io.Writer.Error || error{
OutOfMemory,
EmitFail,
};
@ -292,7 +292,7 @@ fn mirConditionalBranch(emit: *Emit, inst: Mir.Inst.Index) !void {
.bpcc => switch (tag) {
.bpcc => {
const branch_predict_int = emit.mir.instructions.items(.data)[inst].branch_predict_int;
const offset = @as(i64, @intCast(emit.code_offset_mapping.get(branch_predict_int.inst).?)) - @as(i64, @intCast(emit.code.items.len));
const offset = @as(i64, @intCast(emit.code_offset_mapping.get(branch_predict_int.inst).?)) - @as(i64, @intCast(emit.w.end));
log.debug("mirConditionalBranch: {} offset={}", .{ inst, offset });
try emit.writeInstruction(
@ -310,7 +310,7 @@ fn mirConditionalBranch(emit: *Emit, inst: Mir.Inst.Index) !void {
.bpr => switch (tag) {
.bpr => {
const branch_predict_reg = emit.mir.instructions.items(.data)[inst].branch_predict_reg;
const offset = @as(i64, @intCast(emit.code_offset_mapping.get(branch_predict_reg.inst).?)) - @as(i64, @intCast(emit.code.items.len));
const offset = @as(i64, @intCast(emit.code_offset_mapping.get(branch_predict_reg.inst).?)) - @as(i64, @intCast(emit.w.end));
log.debug("mirConditionalBranch: {} offset={}", .{ inst, offset });
try emit.writeInstruction(
@ -494,13 +494,13 @@ fn branchTarget(emit: *Emit, inst: Mir.Inst.Index) Mir.Inst.Index {
fn dbgAdvancePCAndLine(emit: *Emit, line: u32, column: u32) !void {
const delta_line = @as(i32, @intCast(line)) - @as(i32, @intCast(emit.prev_di_line));
const delta_pc: usize = emit.code.items.len - emit.prev_di_pc;
const delta_pc: usize = emit.w.end - emit.prev_di_pc;
switch (emit.debug_output) {
.dwarf => |dbg_out| {
try dbg_out.advancePCAndLine(delta_line, delta_pc);
emit.prev_di_line = line;
emit.prev_di_column = column;
emit.prev_di_pc = emit.code.items.len;
emit.prev_di_pc = emit.w.end;
},
else => {},
}
@ -675,13 +675,8 @@ fn optimalBranchType(emit: *Emit, tag: Mir.Inst.Tag, offset: i64) !BranchType {
}
fn writeInstruction(emit: *Emit, instruction: Instruction) !void {
const comp = emit.bin_file.comp;
const gpa = comp.gpa;
// SPARCv9 instructions are always arranged in BE regardless of the
// endianness mode the CPU is running in (Section 3.1 of the ISA specification).
// This is to ease porting in case someone wants to do a LE SPARCv9 backend.
const endian: Endian = .big;
std.mem.writeInt(u32, try emit.code.addManyAsArray(gpa, 4), instruction.toU32(), endian);
try emit.w.writeInt(u32, instruction.toU32(), .big);
}

View file

@ -380,9 +380,11 @@ pub fn emit(
pt: Zcu.PerThread,
src_loc: Zcu.LazySrcLoc,
func_index: InternPool.Index,
code: *std.ArrayListUnmanaged(u8),
atom_index: u32,
w: *std.Io.Writer,
debug_output: link.File.DebugInfoOutput,
) codegen.CodeGenError!void {
) (codegen.CodeGenError || std.Io.Writer.Error)!void {
_ = atom_index;
const zcu = pt.zcu;
const func = zcu.funcInfo(func_index);
const nav = func.owner_nav;
@ -393,7 +395,7 @@ pub fn emit(
.debug_output = debug_output,
.target = &mod.resolved_target.result,
.src_loc = src_loc,
.code = code,
.w = w,
.prev_di_pc = 0,
.prev_di_line = func.lbrace_line,
.prev_di_column = func.lbrace_column,

View file

@ -550,9 +550,9 @@ pub const MCValue = union(enum) {
@tagName(pl.reg),
}),
.indirect => |pl| try w.print("[{s} + 0x{x}]", .{ @tagName(pl.reg), pl.off }),
.indirect_load_frame => |pl| try w.print("[[{} + 0x{x}]]", .{ pl.index, pl.off }),
.load_frame => |pl| try w.print("[{} + 0x{x}]", .{ pl.index, pl.off }),
.lea_frame => |pl| try w.print("{} + 0x{x}", .{ pl.index, pl.off }),
.indirect_load_frame => |pl| try w.print("[[{f} + 0x{x}]]", .{ pl.index, pl.off }),
.load_frame => |pl| try w.print("[{f} + 0x{x}]", .{ pl.index, pl.off }),
.lea_frame => |pl| try w.print("{f} + 0x{x}", .{ pl.index, pl.off }),
.load_nav => |pl| try w.print("[nav:{d}]", .{@intFromEnum(pl)}),
.lea_nav => |pl| try w.print("nav:{d}", .{@intFromEnum(pl)}),
.load_uav => |pl| try w.print("[uav:{d}]", .{@intFromEnum(pl.val)}),
@ -561,10 +561,10 @@ pub const MCValue = union(enum) {
.lea_lazy_sym => |pl| try w.print("lazy:{s}:{d}", .{ @tagName(pl.kind), @intFromEnum(pl.ty) }),
.load_extern_func => |pl| try w.print("[extern:{d}]", .{@intFromEnum(pl)}),
.lea_extern_func => |pl| try w.print("extern:{d}", .{@intFromEnum(pl)}),
.elementwise_args => |pl| try w.print("elementwise:{d}:[{} + 0x{x}]", .{
.elementwise_args => |pl| try w.print("elementwise:{d}:[{f} + 0x{x}]", .{
pl.regs, pl.frame_index, pl.frame_off,
}),
.reserved_frame => |pl| try w.print("(dead:{})", .{pl}),
.reserved_frame => |pl| try w.print("(dead:{f})", .{pl}),
.air_ref => |pl| try w.print("(air:0x{x})", .{@intFromEnum(pl)}),
}
}
@ -1038,7 +1038,8 @@ pub fn generateLazy(
pt: Zcu.PerThread,
src_loc: Zcu.LazySrcLoc,
lazy_sym: link.File.LazySymbol,
code: *std.ArrayListUnmanaged(u8),
atom_index: u32,
w: *std.Io.Writer,
debug_output: link.File.DebugInfoOutput,
) codegen.CodeGenError!void {
const gpa = pt.zcu.gpa;
@ -1081,7 +1082,7 @@ pub fn generateLazy(
else => |e| return e,
};
try function.getTmpMir().emitLazy(bin_file, pt, src_loc, lazy_sym, code, debug_output);
try function.getTmpMir().emitLazy(bin_file, pt, src_loc, lazy_sym, atom_index, w, debug_output);
}
const FormatNavData = struct {
@ -2022,7 +2023,7 @@ fn gen(
.{},
);
self.ret_mcv.long = .{ .load_frame = .{ .index = frame_index } };
tracking_log.debug("spill {f} to {}", .{ self.ret_mcv.long, frame_index });
tracking_log.debug("spill {f} to {f}", .{ self.ret_mcv.long, frame_index });
},
else => unreachable,
}

View file

@ -6,7 +6,7 @@ pt: Zcu.PerThread,
pic: bool,
atom_index: u32,
debug_output: link.File.DebugInfoOutput,
code: *std.ArrayListUnmanaged(u8),
w: *std.Io.Writer,
prev_di_loc: Loc,
/// Relative to the beginning of `code`.
@ -18,7 +18,8 @@ table_relocs: std.ArrayListUnmanaged(TableReloc),
pub const Error = Lower.Error || error{
EmitFail,
} || link.File.UpdateDebugInfoError;
NotFile,
} || std.posix.MMapError || std.posix.MRemapError || link.File.UpdateDebugInfoError;
pub fn emitMir(emit: *Emit) Error!void {
const comp = emit.bin_file.comp;
@ -29,12 +30,12 @@ pub fn emitMir(emit: *Emit) Error!void {
var local_index: usize = 0;
for (0..emit.lower.mir.instructions.len) |mir_i| {
const mir_index: Mir.Inst.Index = @intCast(mir_i);
emit.code_offset_mapping.items[mir_index] = @intCast(emit.code.items.len);
emit.code_offset_mapping.items[mir_index] = @intCast(emit.w.end);
const lowered = try emit.lower.lowerMir(mir_index);
var lowered_relocs = lowered.relocs;
lowered_inst: for (lowered.insts, 0..) |lowered_inst, lowered_index| {
if (lowered_inst.prefix == .directive) {
const start_offset: u32 = @intCast(emit.code.items.len);
const start_offset: u32 = @intCast(emit.w.end);
switch (emit.debug_output) {
.dwarf => |dwarf| switch (lowered_inst.encoding.mnemonic) {
.@".cfi_def_cfa" => try dwarf.genDebugFrame(start_offset, .{ .def_cfa = .{
@ -164,6 +165,8 @@ pub fn emitMir(emit: *Emit) Error!void {
.index = if (emit.bin_file.cast(.elf)) |elf_file|
elf_file.zigObjectPtr().?.getOrCreateMetadataForLazySymbol(elf_file, emit.pt, lazy_sym) catch |err|
return emit.fail("{s} creating lazy symbol", .{@errorName(err)})
else if (emit.bin_file.cast(.elf2)) |elf|
@intFromEnum(try elf.lazySymbol(lazy_sym))
else if (emit.bin_file.cast(.macho)) |macho_file|
macho_file.getZigObject().?.getOrCreateMetadataForLazySymbol(macho_file, emit.pt, lazy_sym) catch |err|
return emit.fail("{s} creating lazy symbol", .{@errorName(err)})
@ -180,12 +183,15 @@ pub fn emitMir(emit: *Emit) Error!void {
.extern_func => |extern_func| .{
.index = if (emit.bin_file.cast(.elf)) |elf_file|
try elf_file.getGlobalSymbol(extern_func.toSlice(&emit.lower.mir).?, null)
else if (emit.bin_file.cast(.macho)) |macho_file|
else if (emit.bin_file.cast(.elf2)) |elf| @intFromEnum(try elf.globalSymbol(.{
.name = extern_func.toSlice(&emit.lower.mir).?,
.type = .FUNC,
})) else if (emit.bin_file.cast(.macho)) |macho_file|
try macho_file.getGlobalSymbol(extern_func.toSlice(&emit.lower.mir).?, null)
else if (emit.bin_file.cast(.coff)) |coff_file|
try coff_file.getGlobalSymbol(extern_func.toSlice(&emit.lower.mir).?, "compiler_rt")
else
return emit.fail("external symbols unimplemented for {s}", .{@tagName(emit.bin_file.tag)}),
return emit.fail("external symbol unimplemented for {s}", .{@tagName(emit.bin_file.tag)}),
.is_extern = true,
.type = .symbol,
},
@ -205,7 +211,7 @@ pub fn emitMir(emit: *Emit) Error!void {
},
else => {},
}
if (emit.bin_file.cast(.elf)) |_| {
if (emit.bin_file.cast(.elf) != null or emit.bin_file.cast(.elf2) != null) {
if (!emit.pic) switch (lowered_inst.encoding.mnemonic) {
.lea => try emit.encodeInst(try .new(.none, .mov, &.{
lowered_inst.ops[0],
@ -315,7 +321,7 @@ pub fn emitMir(emit: *Emit) Error!void {
},
.branch, .tls => unreachable,
.tlv => {
if (emit.bin_file.cast(.elf)) |elf_file| {
if (emit.bin_file.cast(.elf) != null or emit.bin_file.cast(.elf2) != null) {
// TODO handle extern TLS vars, i.e., emit GD model
if (emit.pic) switch (lowered_inst.encoding.mnemonic) {
.lea, .mov => {
@ -337,7 +343,12 @@ pub fn emitMir(emit: *Emit) Error!void {
}, emit.lower.target), &.{.{
.op_index = 0,
.target = .{
.index = try elf_file.getGlobalSymbol("__tls_get_addr", null),
.index = if (emit.bin_file.cast(.elf)) |elf_file|
try elf_file.getGlobalSymbol("__tls_get_addr", null)
else if (emit.bin_file.cast(.elf2)) |elf| @intFromEnum(try elf.globalSymbol(.{
.name = "__tls_get_addr",
.type = .FUNC,
})) else unreachable,
.is_extern = true,
.type = .branch,
},
@ -441,7 +452,7 @@ pub fn emitMir(emit: *Emit) Error!void {
log.debug("mirDbgEnterBlock (line={d}, col={d})", .{
emit.prev_di_loc.line, emit.prev_di_loc.column,
});
try dwarf.enterBlock(emit.code.items.len);
try dwarf.enterBlock(emit.w.end);
},
.none => {},
},
@ -450,7 +461,7 @@ pub fn emitMir(emit: *Emit) Error!void {
log.debug("mirDbgLeaveBlock (line={d}, col={d})", .{
emit.prev_di_loc.line, emit.prev_di_loc.column,
});
try dwarf.leaveBlock(emit.code.items.len);
try dwarf.leaveBlock(emit.w.end);
},
.none => {},
},
@ -459,7 +470,7 @@ pub fn emitMir(emit: *Emit) Error!void {
log.debug("mirDbgEnterInline (line={d}, col={d})", .{
emit.prev_di_loc.line, emit.prev_di_loc.column,
});
try dwarf.enterInlineFunc(mir_inst.data.ip_index, emit.code.items.len, emit.prev_di_loc.line, emit.prev_di_loc.column);
try dwarf.enterInlineFunc(mir_inst.data.ip_index, emit.w.end, emit.prev_di_loc.line, emit.prev_di_loc.column);
},
.none => {},
},
@ -468,7 +479,7 @@ pub fn emitMir(emit: *Emit) Error!void {
log.debug("mirDbgLeaveInline (line={d}, col={d})", .{
emit.prev_di_loc.line, emit.prev_di_loc.column,
});
try dwarf.leaveInlineFunc(mir_inst.data.ip_index, emit.code.items.len);
try dwarf.leaveInlineFunc(mir_inst.data.ip_index, emit.w.end);
},
.none => {},
},
@ -634,7 +645,7 @@ pub fn emitMir(emit: *Emit) Error!void {
for (emit.relocs.items) |reloc| {
const target = emit.code_offset_mapping.items[reloc.target];
const disp = @as(i64, @intCast(target)) - @as(i64, @intCast(reloc.inst_offset + reloc.inst_length)) + reloc.target_offset;
const inst_bytes = emit.code.items[reloc.inst_offset..][0..reloc.inst_length];
const inst_bytes = emit.w.buffered()[reloc.inst_offset..][0..reloc.inst_length];
switch (reloc.source_length) {
else => unreachable,
inline 1, 4 => |source_length| std.mem.writeInt(
@ -646,12 +657,12 @@ pub fn emitMir(emit: *Emit) Error!void {
}
}
if (emit.lower.mir.table.len > 0) {
const ptr_size = @divExact(emit.lower.target.ptrBitWidth(), 8);
var table_offset = std.mem.alignForward(u32, @intCast(emit.w.end), ptr_size);
if (emit.bin_file.cast(.elf)) |elf_file| {
const zo = elf_file.zigObjectPtr().?;
const atom = zo.symbol(emit.atom_index).atom(elf_file).?;
const ptr_size = @divExact(emit.lower.target.ptrBitWidth(), 8);
var table_offset = std.mem.alignForward(u32, @intCast(emit.code.items.len), ptr_size);
for (emit.table_relocs.items) |table_reloc| try atom.addReloc(gpa, .{
.r_offset = table_reloc.source_offset,
.r_info = @as(u64, emit.atom_index) << 32 | @intFromEnum(std.elf.R_X86_64.@"32"),
@ -665,7 +676,26 @@ pub fn emitMir(emit: *Emit) Error!void {
}, zo);
table_offset += ptr_size;
}
try emit.code.appendNTimes(gpa, 0, table_offset - emit.code.items.len);
try emit.w.splatByteAll(0, table_offset - emit.w.end);
} else if (emit.bin_file.cast(.elf2)) |elf| {
for (emit.table_relocs.items) |table_reloc| try elf.addReloc(
@enumFromInt(emit.atom_index),
table_reloc.source_offset,
@enumFromInt(emit.atom_index),
@as(i64, table_offset) + table_reloc.target_offset,
.{ .x86_64 = .@"32" },
);
for (emit.lower.mir.table) |entry| {
try elf.addReloc(
@enumFromInt(emit.atom_index),
table_offset,
@enumFromInt(emit.atom_index),
emit.code_offset_mapping.items[entry],
.{ .x86_64 = .@"64" },
);
table_offset += ptr_size;
}
try emit.w.splatByteAll(0, table_offset - emit.w.end);
} else unreachable;
}
}
@ -696,16 +726,12 @@ const RelocInfo = struct {
fn encodeInst(emit: *Emit, lowered_inst: Instruction, reloc_info: []const RelocInfo) Error!void {
const comp = emit.bin_file.comp;
const gpa = comp.gpa;
const start_offset: u32 = @intCast(emit.code.items.len);
{
var aw: std.Io.Writer.Allocating = .fromArrayList(gpa, emit.code);
defer emit.code.* = aw.toArrayList();
lowered_inst.encode(&aw.writer, .{}) catch |err| switch (err) {
error.WriteFailed => return error.OutOfMemory,
else => |e| return e,
};
}
const end_offset: u32 = @intCast(emit.code.items.len);
const start_offset: u32 = @intCast(emit.w.end);
lowered_inst.encode(emit.w, .{}) catch |err| switch (err) {
error.WriteFailed => return error.OutOfMemory,
else => |e| return e,
};
const end_offset: u32 = @intCast(emit.w.end);
for (reloc_info) |reloc| switch (reloc.target.type) {
.inst => {
const inst_length: u4 = @intCast(end_offset - start_offset);
@ -769,7 +795,13 @@ fn encodeInst(emit: *Emit, lowered_inst: Instruction, reloc_info: []const RelocI
.symbolnum = @intCast(reloc.target.index),
},
});
} else if (emit.bin_file.cast(.coff)) |coff_file| {
} else if (emit.bin_file.cast(.elf2)) |elf| try elf.addReloc(
@enumFromInt(emit.atom_index),
end_offset - 4,
@enumFromInt(reloc.target.index),
reloc.off,
.{ .x86_64 = .@"32" },
) else if (emit.bin_file.cast(.coff)) |coff_file| {
const atom_index = coff_file.getAtomIndexForSymbol(
.{ .sym_index = emit.atom_index, .file = null },
).?;
@ -794,7 +826,13 @@ fn encodeInst(emit: *Emit, lowered_inst: Instruction, reloc_info: []const RelocI
.r_info = @as(u64, reloc.target.index) << 32 | @intFromEnum(r_type),
.r_addend = reloc.off - 4,
}, zo);
} else if (emit.bin_file.cast(.macho)) |macho_file| {
} else if (emit.bin_file.cast(.elf2)) |elf| try elf.addReloc(
@enumFromInt(emit.atom_index),
end_offset - 4,
@enumFromInt(reloc.target.index),
reloc.off - 4,
.{ .x86_64 = .PC32 },
) else if (emit.bin_file.cast(.macho)) |macho_file| {
const zo = macho_file.getZigObject().?;
const atom = zo.symbols.items[emit.atom_index].getAtom(macho_file).?;
try atom.addReloc(macho_file, .{
@ -849,7 +887,13 @@ fn encodeInst(emit: *Emit, lowered_inst: Instruction, reloc_info: []const RelocI
.r_info = @as(u64, reloc.target.index) << 32 | @intFromEnum(r_type),
.r_addend = reloc.off,
}, zo);
} else if (emit.bin_file.cast(.macho)) |macho_file| {
} else if (emit.bin_file.cast(.elf2)) |elf| try elf.addReloc(
@enumFromInt(emit.atom_index),
end_offset - 4,
@enumFromInt(reloc.target.index),
reloc.off,
.{ .x86_64 = .TPOFF32 },
) else if (emit.bin_file.cast(.macho)) |macho_file| {
const zo = macho_file.getZigObject().?;
const atom = zo.symbols.items[emit.atom_index].getAtom(macho_file).?;
try atom.addReloc(macho_file, .{
@ -908,7 +952,7 @@ const Loc = struct {
fn dbgAdvancePCAndLine(emit: *Emit, loc: Loc) Error!void {
const delta_line = @as(i33, loc.line) - @as(i33, emit.prev_di_loc.line);
const delta_pc: usize = emit.code.items.len - emit.prev_di_pc;
const delta_pc: usize = emit.w.end - emit.prev_di_pc;
log.debug(" (advance pc={d} and line={d})", .{ delta_pc, delta_line });
switch (emit.debug_output) {
.dwarf => |dwarf| {
@ -916,7 +960,7 @@ fn dbgAdvancePCAndLine(emit: *Emit, loc: Loc) Error!void {
if (loc.column != emit.prev_di_loc.column) try dwarf.setColumn(loc.column);
try dwarf.advancePCAndLine(delta_line, delta_pc);
emit.prev_di_loc = loc;
emit.prev_di_pc = emit.code.items.len;
emit.prev_di_pc = emit.w.end;
},
.none => {},
}

View file

@ -1976,7 +1976,8 @@ pub fn emit(
pt: Zcu.PerThread,
src_loc: Zcu.LazySrcLoc,
func_index: InternPool.Index,
code: *std.ArrayListUnmanaged(u8),
atom_index: u32,
w: *std.Io.Writer,
debug_output: link.File.DebugInfoOutput,
) codegen.CodeGenError!void {
const zcu = pt.zcu;
@ -1997,17 +1998,9 @@ pub fn emit(
.bin_file = lf,
.pt = pt,
.pic = mod.pic,
.atom_index = sym: {
if (lf.cast(.elf)) |ef| break :sym try ef.zigObjectPtr().?.getOrCreateMetadataForNav(zcu, nav);
if (lf.cast(.macho)) |mf| break :sym try mf.getZigObject().?.getOrCreateMetadataForNav(mf, nav);
if (lf.cast(.coff)) |cf| {
const atom = try cf.getOrCreateAtomForNav(nav);
break :sym cf.getAtom(atom).getSymbolIndex().?;
}
unreachable;
},
.atom_index = atom_index,
.debug_output = debug_output,
.code = code,
.w = w,
.prev_di_loc = .{
.line = func.lbrace_line,
@ -2037,7 +2030,8 @@ pub fn emitLazy(
pt: Zcu.PerThread,
src_loc: Zcu.LazySrcLoc,
lazy_sym: link.File.LazySymbol,
code: *std.ArrayListUnmanaged(u8),
atom_index: u32,
w: *std.Io.Writer,
debug_output: link.File.DebugInfoOutput,
) codegen.CodeGenError!void {
const zcu = pt.zcu;
@ -2055,20 +2049,9 @@ pub fn emitLazy(
.bin_file = lf,
.pt = pt,
.pic = mod.pic,
.atom_index = sym: {
if (lf.cast(.elf)) |ef| break :sym ef.zigObjectPtr().?.getOrCreateMetadataForLazySymbol(ef, pt, lazy_sym) catch |err|
return zcu.codegenFailType(lazy_sym.ty, "{s} creating lazy symbol", .{@errorName(err)});
if (lf.cast(.macho)) |mf| break :sym mf.getZigObject().?.getOrCreateMetadataForLazySymbol(mf, pt, lazy_sym) catch |err|
return zcu.codegenFailType(lazy_sym.ty, "{s} creating lazy symbol", .{@errorName(err)});
if (lf.cast(.coff)) |cf| {
const atom = cf.getOrCreateAtomForLazySymbol(pt, lazy_sym) catch |err|
return zcu.codegenFailType(lazy_sym.ty, "{s} creating lazy symbol", .{@errorName(err)});
break :sym cf.getAtom(atom).getSymbolIndex().?;
}
unreachable;
},
.atom_index = atom_index,
.debug_output = debug_output,
.code = code,
.w = w,
.prev_di_loc = undefined,
.prev_di_pc = undefined,

View file

@ -727,6 +727,14 @@ pub const FrameIndex = enum(u32) {
pub fn isNamed(fi: FrameIndex) bool {
return @intFromEnum(fi) < named_count;
}
pub fn format(fi: FrameIndex, writer: *std.Io.Writer) std.Io.Writer.Error!void {
if (fi.isNamed()) {
try writer.print("FrameIndex.{t}", .{fi});
} else {
try writer.print("FrameIndex({d})", .{@intFromEnum(fi)});
}
}
};
pub const FrameAddr = struct { index: FrameIndex, off: i32 = 0 };

View file

@ -259,7 +259,7 @@ pub const Instruction = struct {
switch (sib.base) {
.none => any = false,
.reg => |reg| try w.print("{s}", .{@tagName(reg)}),
.frame => |frame_index| try w.print("{}", .{frame_index}),
.frame => |frame_index| try w.print("{f}", .{frame_index}),
.table => try w.print("Table", .{}),
.rip_inst => |inst_index| try w.print("RipInst({d})", .{inst_index}),
.nav => |nav| try w.print("Nav({d})", .{@intFromEnum(nav)}),

View file

@ -6,7 +6,6 @@ const link = @import("link.zig");
const log = std.log.scoped(.codegen);
const mem = std.mem;
const math = std.math;
const ArrayList = std.ArrayList;
const target_util = @import("target.zig");
const trace = @import("tracy.zig").trace;
@ -179,10 +178,11 @@ pub fn emitFunction(
pt: Zcu.PerThread,
src_loc: Zcu.LazySrcLoc,
func_index: InternPool.Index,
atom_index: u32,
any_mir: *const AnyMir,
code: *ArrayList(u8),
w: *std.Io.Writer,
debug_output: link.File.DebugInfoOutput,
) CodeGenError!void {
) (CodeGenError || std.Io.Writer.Error)!void {
const zcu = pt.zcu;
const func = zcu.funcInfo(func_index);
const target = &zcu.navFileScope(func.owner_nav).mod.?.resolved_target.result;
@ -195,7 +195,7 @@ pub fn emitFunction(
=> |backend| {
dev.check(devFeatureForBackend(backend));
const mir = &@field(any_mir, AnyMir.tag(backend));
return mir.emit(lf, pt, src_loc, func_index, code, debug_output);
return mir.emit(lf, pt, src_loc, func_index, atom_index, w, debug_output);
},
}
}
@ -205,9 +205,10 @@ pub fn generateLazyFunction(
pt: Zcu.PerThread,
src_loc: Zcu.LazySrcLoc,
lazy_sym: link.File.LazySymbol,
code: *ArrayList(u8),
atom_index: u32,
w: *std.Io.Writer,
debug_output: link.File.DebugInfoOutput,
) CodeGenError!void {
) (CodeGenError || std.Io.Writer.Error)!void {
const zcu = pt.zcu;
const target = if (Type.fromInterned(lazy_sym.ty).typeDeclInstAllowGeneratedTag(zcu)) |inst_index|
&zcu.fileByIndex(inst_index.resolveFile(&zcu.intern_pool)).mod.?.resolved_target.result
@ -217,19 +218,11 @@ pub fn generateLazyFunction(
else => unreachable,
inline .stage2_riscv64, .stage2_x86_64 => |backend| {
dev.check(devFeatureForBackend(backend));
return importBackend(backend).generateLazy(lf, pt, src_loc, lazy_sym, code, debug_output);
return importBackend(backend).generateLazy(lf, pt, src_loc, lazy_sym, atom_index, w, debug_output);
},
}
}
fn writeFloat(comptime F: type, f: F, target: *const std.Target, endian: std.builtin.Endian, code: []u8) void {
_ = target;
const bits = @typeInfo(F).float.bits;
const Int = @Type(.{ .int = .{ .signedness = .unsigned, .bits = bits } });
const int: Int = @bitCast(f);
mem.writeInt(Int, code[0..@divExact(bits, 8)], int, endian);
}
pub fn generateLazySymbol(
bin_file: *link.File,
pt: Zcu.PerThread,
@ -237,17 +230,14 @@ pub fn generateLazySymbol(
lazy_sym: link.File.LazySymbol,
// TODO don't use an "out" parameter like this; put it in the result instead
alignment: *Alignment,
code: *ArrayList(u8),
w: *std.Io.Writer,
debug_output: link.File.DebugInfoOutput,
reloc_parent: link.File.RelocInfo.Parent,
) CodeGenError!void {
_ = reloc_parent;
) (CodeGenError || std.Io.Writer.Error)!void {
const tracy = trace(@src());
defer tracy.end();
const comp = bin_file.comp;
const gpa = comp.gpa;
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const target = &comp.root_mod.resolved_target.result;
@ -260,37 +250,36 @@ pub fn generateLazySymbol(
if (lazy_sym.kind == .code) {
alignment.* = target_util.defaultFunctionAlignment(target);
return generateLazyFunction(bin_file, pt, src_loc, lazy_sym, code, debug_output);
return generateLazyFunction(bin_file, pt, src_loc, lazy_sym, reloc_parent.atom_index, w, debug_output);
}
if (lazy_sym.ty == .anyerror_type) {
alignment.* = .@"4";
const err_names = ip.global_error_set.getNamesFromMainThread();
var offset_index: u32 = @intCast(code.items.len);
var string_index: u32 = @intCast(4 * (1 + err_names.len + @intFromBool(err_names.len > 0)));
try code.resize(gpa, offset_index + string_index);
mem.writeInt(u32, code.items[offset_index..][0..4], @intCast(err_names.len), endian);
const strings_start: u32 = @intCast(4 * (1 + err_names.len + @intFromBool(err_names.len > 0)));
var string_index = strings_start;
try w.rebase(w.end, string_index);
w.writeInt(u32, @intCast(err_names.len), endian) catch unreachable;
if (err_names.len == 0) return;
offset_index += 4;
for (err_names) |err_name_nts| {
const err_name = err_name_nts.toSlice(ip);
mem.writeInt(u32, code.items[offset_index..][0..4], string_index, endian);
offset_index += 4;
try code.ensureUnusedCapacity(gpa, err_name.len + 1);
code.appendSliceAssumeCapacity(err_name);
code.appendAssumeCapacity(0);
string_index += @intCast(err_name.len + 1);
w.writeInt(u32, string_index, endian) catch unreachable;
string_index += @intCast(err_name_nts.toSlice(ip).len + 1);
}
w.writeInt(u32, string_index, endian) catch unreachable;
try w.rebase(w.end, string_index - strings_start);
for (err_names) |err_name_nts| {
w.writeAll(err_name_nts.toSlice(ip)) catch unreachable;
w.writeByte(0) catch unreachable;
}
mem.writeInt(u32, code.items[offset_index..][0..4], string_index, endian);
} else if (Type.fromInterned(lazy_sym.ty).zigTypeTag(zcu) == .@"enum") {
alignment.* = .@"1";
const enum_ty = Type.fromInterned(lazy_sym.ty);
const tag_names = enum_ty.enumFields(zcu);
for (0..tag_names.len) |tag_index| {
const tag_name = tag_names.get(ip)[tag_index].toSlice(ip);
try code.ensureUnusedCapacity(gpa, tag_name.len + 1);
code.appendSliceAssumeCapacity(tag_name);
code.appendAssumeCapacity(0);
try w.rebase(w.end, tag_name.len + 1);
w.writeAll(tag_name) catch unreachable;
w.writeByte(0) catch unreachable;
}
} else {
return zcu.codegenFailType(lazy_sym.ty, "TODO implement generateLazySymbol for {s} {f}", .{
@ -312,14 +301,13 @@ pub fn generateSymbol(
pt: Zcu.PerThread,
src_loc: Zcu.LazySrcLoc,
val: Value,
code: *ArrayList(u8),
w: *std.Io.Writer,
reloc_parent: link.File.RelocInfo.Parent,
) GenerateSymbolError!void {
) (GenerateSymbolError || std.Io.Writer.Error)!void {
const tracy = trace(@src());
defer tracy.end();
const zcu = pt.zcu;
const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
const ty = val.typeOf(zcu);
@ -330,7 +318,7 @@ pub fn generateSymbol(
if (val.isUndef(zcu)) {
const abi_size = math.cast(usize, ty.abiSize(zcu)) orelse return error.Overflow;
try code.appendNTimes(gpa, 0xaa, abi_size);
try w.splatByteAll(0xaa, abi_size);
return;
}
@ -360,7 +348,7 @@ pub fn generateSymbol(
.null => unreachable, // non-runtime value
.@"unreachable" => unreachable, // non-runtime value
.empty_tuple => return,
.false, .true => try code.append(gpa, switch (simple_value) {
.false, .true => try w.writeByte(switch (simple_value) {
.false => 0,
.true => 1,
else => unreachable,
@ -376,11 +364,11 @@ pub fn generateSymbol(
const abi_size = math.cast(usize, ty.abiSize(zcu)) orelse return error.Overflow;
var space: Value.BigIntSpace = undefined;
const int_val = val.toBigInt(&space, zcu);
int_val.writeTwosComplement(try code.addManyAsSlice(gpa, abi_size), endian);
int_val.writeTwosComplement(try w.writableSlice(abi_size), endian);
},
.err => |err| {
const int = try pt.getErrorValue(err.name);
mem.writeInt(u16, try code.addManyAsArray(gpa, 2), @intCast(int), endian);
try w.writeInt(u16, @intCast(int), endian);
},
.error_union => |error_union| {
const payload_ty = ty.errorUnionPayload(zcu);
@ -390,7 +378,7 @@ pub fn generateSymbol(
};
if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
mem.writeInt(u16, try code.addManyAsArray(gpa, 2), err_val, endian);
try w.writeInt(u16, err_val, endian);
return;
}
@ -400,63 +388,63 @@ pub fn generateSymbol(
// error value first when its type is larger than the error union's payload
if (error_align.order(payload_align) == .gt) {
mem.writeInt(u16, try code.addManyAsArray(gpa, 2), err_val, endian);
try w.writeInt(u16, err_val, endian);
}
// emit payload part of the error union
{
const begin = code.items.len;
const begin = w.end;
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(switch (error_union.val) {
.err_name => try pt.intern(.{ .undef = payload_ty.toIntern() }),
.payload => |payload| payload,
}), code, reloc_parent);
const unpadded_end = code.items.len - begin;
}), w, reloc_parent);
const unpadded_end = w.end - begin;
const padded_end = abi_align.forward(unpadded_end);
const padding = math.cast(usize, padded_end - unpadded_end) orelse return error.Overflow;
if (padding > 0) {
try code.appendNTimes(gpa, 0, padding);
try w.splatByteAll(0, padding);
}
}
// Payload size is larger than error set, so emit our error set last
if (error_align.compare(.lte, payload_align)) {
const begin = code.items.len;
mem.writeInt(u16, try code.addManyAsArray(gpa, 2), err_val, endian);
const unpadded_end = code.items.len - begin;
const begin = w.end;
try w.writeInt(u16, err_val, endian);
const unpadded_end = w.end - begin;
const padded_end = abi_align.forward(unpadded_end);
const padding = math.cast(usize, padded_end - unpadded_end) orelse return error.Overflow;
if (padding > 0) {
try code.appendNTimes(gpa, 0, padding);
try w.splatByteAll(0, padding);
}
}
},
.enum_tag => |enum_tag| {
const int_tag_ty = ty.intTagType(zcu);
try generateSymbol(bin_file, pt, src_loc, try pt.getCoerced(Value.fromInterned(enum_tag.int), int_tag_ty), code, reloc_parent);
try generateSymbol(bin_file, pt, src_loc, try pt.getCoerced(Value.fromInterned(enum_tag.int), int_tag_ty), w, reloc_parent);
},
.float => |float| storage: switch (float.storage) {
.f16 => |f16_val| writeFloat(f16, f16_val, target, endian, try code.addManyAsArray(gpa, 2)),
.f32 => |f32_val| writeFloat(f32, f32_val, target, endian, try code.addManyAsArray(gpa, 4)),
.f64 => |f64_val| writeFloat(f64, f64_val, target, endian, try code.addManyAsArray(gpa, 8)),
.f16 => |f16_val| try w.writeInt(u16, @bitCast(f16_val), endian),
.f32 => |f32_val| try w.writeInt(u32, @bitCast(f32_val), endian),
.f64 => |f64_val| try w.writeInt(u64, @bitCast(f64_val), endian),
.f80 => |f80_val| {
writeFloat(f80, f80_val, target, endian, try code.addManyAsArray(gpa, 10));
try w.writeInt(u80, @bitCast(f80_val), endian);
const abi_size = math.cast(usize, ty.abiSize(zcu)) orelse return error.Overflow;
try code.appendNTimes(gpa, 0, abi_size - 10);
try w.splatByteAll(0, abi_size - 10);
},
.f128 => |f128_val| switch (Type.fromInterned(float.ty).floatBits(target)) {
else => unreachable,
16 => continue :storage .{ .f16 = @floatCast(f128_val) },
32 => continue :storage .{ .f32 = @floatCast(f128_val) },
64 => continue :storage .{ .f64 = @floatCast(f128_val) },
128 => writeFloat(f128, f128_val, target, endian, try code.addManyAsArray(gpa, 16)),
128 => try w.writeInt(u128, @bitCast(f128_val), endian),
},
},
.ptr => try lowerPtr(bin_file, pt, src_loc, val.toIntern(), code, reloc_parent, 0),
.ptr => try lowerPtr(bin_file, pt, src_loc, val.toIntern(), w, reloc_parent, 0),
.slice => |slice| {
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(slice.ptr), code, reloc_parent);
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(slice.len), code, reloc_parent);
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(slice.ptr), w, reloc_parent);
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(slice.len), w, reloc_parent);
},
.opt => {
const payload_type = ty.optionalChild(zcu);
@ -465,9 +453,9 @@ pub fn generateSymbol(
if (ty.optionalReprIsPayload(zcu)) {
if (payload_val) |value| {
try generateSymbol(bin_file, pt, src_loc, value, code, reloc_parent);
try generateSymbol(bin_file, pt, src_loc, value, w, reloc_parent);
} else {
try code.appendNTimes(gpa, 0, abi_size);
try w.splatByteAll(0, abi_size);
}
} else {
const padding = abi_size - (math.cast(usize, payload_type.abiSize(zcu)) orelse return error.Overflow) - 1;
@ -475,15 +463,15 @@ pub fn generateSymbol(
const value = payload_val orelse Value.fromInterned(try pt.intern(.{
.undef = payload_type.toIntern(),
}));
try generateSymbol(bin_file, pt, src_loc, value, code, reloc_parent);
try generateSymbol(bin_file, pt, src_loc, value, w, reloc_parent);
}
try code.append(gpa, @intFromBool(payload_val != null));
try code.appendNTimes(gpa, 0, padding);
try w.writeByte(@intFromBool(payload_val != null));
try w.splatByteAll(0, padding);
}
},
.aggregate => |aggregate| switch (ip.indexToKey(ty.toIntern())) {
.array_type => |array_type| switch (aggregate.storage) {
.bytes => |bytes| try code.appendSlice(gpa, bytes.toSlice(array_type.lenIncludingSentinel(), ip)),
.bytes => |bytes| try w.writeAll(bytes.toSlice(array_type.lenIncludingSentinel(), ip)),
.elems, .repeated_elem => {
var index: u64 = 0;
while (index < array_type.lenIncludingSentinel()) : (index += 1) {
@ -494,14 +482,14 @@ pub fn generateSymbol(
elem
else
array_type.sentinel,
}), code, reloc_parent);
}), w, reloc_parent);
}
},
},
.vector_type => |vector_type| {
const abi_size = math.cast(usize, ty.abiSize(zcu)) orelse return error.Overflow;
if (vector_type.child == .bool_type) {
const bytes = try code.addManyAsSlice(gpa, abi_size);
const bytes = try w.writableSlice(abi_size);
@memset(bytes, 0xaa);
var index: usize = 0;
const len = math.cast(usize, vector_type.len) orelse return error.Overflow;
@ -540,7 +528,7 @@ pub fn generateSymbol(
}
} else {
switch (aggregate.storage) {
.bytes => |bytes| try code.appendSlice(gpa, bytes.toSlice(vector_type.len, ip)),
.bytes => |bytes| try w.writeAll(bytes.toSlice(vector_type.len, ip)),
.elems, .repeated_elem => {
var index: u64 = 0;
while (index < vector_type.len) : (index += 1) {
@ -550,7 +538,7 @@ pub fn generateSymbol(
math.cast(usize, index) orelse return error.Overflow
],
.repeated_elem => |elem| elem,
}), code, reloc_parent);
}), w, reloc_parent);
}
},
}
@ -558,11 +546,11 @@ pub fn generateSymbol(
const padding = abi_size -
(math.cast(usize, Type.fromInterned(vector_type.child).abiSize(zcu) * vector_type.len) orelse
return error.Overflow);
if (padding > 0) try code.appendNTimes(gpa, 0, padding);
if (padding > 0) try w.splatByteAll(0, padding);
}
},
.tuple_type => |tuple| {
const struct_begin = code.items.len;
const struct_begin = w.end;
for (
tuple.types.get(ip),
tuple.values.get(ip),
@ -580,8 +568,8 @@ pub fn generateSymbol(
.repeated_elem => |elem| elem,
};
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(field_val), code, reloc_parent);
const unpadded_field_end = code.items.len - struct_begin;
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(field_val), w, reloc_parent);
const unpadded_field_end = w.end - struct_begin;
// Pad struct members if required
const padded_field_end = ty.structFieldOffset(index + 1, zcu);
@ -589,7 +577,7 @@ pub fn generateSymbol(
return error.Overflow;
if (padding > 0) {
try code.appendNTimes(gpa, 0, padding);
try w.splatByteAll(0, padding);
}
}
},
@ -598,8 +586,9 @@ pub fn generateSymbol(
switch (struct_type.layout) {
.@"packed" => {
const abi_size = math.cast(usize, ty.abiSize(zcu)) orelse return error.Overflow;
const current_pos = code.items.len;
try code.appendNTimes(gpa, 0, abi_size);
const start = w.end;
const buffer = try w.writableSlice(abi_size);
@memset(buffer, 0);
var bits: u16 = 0;
for (struct_type.field_types.get(ip), 0..) |field_ty, index| {
@ -619,22 +608,20 @@ pub fn generateSymbol(
error.DivisionByZero => unreachable,
error.UnexpectedRemainder => return error.RelocationNotByteAligned,
};
code.items.len = current_pos + field_offset;
// TODO: code.lockPointers();
w.end = start + field_offset;
defer {
assert(code.items.len == current_pos + field_offset + @divExact(target.ptrBitWidth(), 8));
// TODO: code.unlockPointers();
code.items.len = current_pos + abi_size;
assert(w.end == start + field_offset + @divExact(target.ptrBitWidth(), 8));
w.end = start + abi_size;
}
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(field_val), code, reloc_parent);
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(field_val), w, reloc_parent);
} else {
Value.fromInterned(field_val).writeToPackedMemory(Type.fromInterned(field_ty), pt, code.items[current_pos..], bits) catch unreachable;
Value.fromInterned(field_val).writeToPackedMemory(.fromInterned(field_ty), pt, buffer, bits) catch unreachable;
}
bits += @intCast(Type.fromInterned(field_ty).bitSize(zcu));
}
},
.auto, .@"extern" => {
const struct_begin = code.items.len;
const struct_begin = w.end;
const field_types = struct_type.field_types.get(ip);
const offsets = struct_type.offsets.get(ip);
@ -654,11 +641,11 @@ pub fn generateSymbol(
const padding = math.cast(
usize,
offsets[field_index] - (code.items.len - struct_begin),
offsets[field_index] - (w.end - struct_begin),
) orelse return error.Overflow;
if (padding > 0) try code.appendNTimes(gpa, 0, padding);
if (padding > 0) try w.splatByteAll(0, padding);
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(field_val), code, reloc_parent);
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(field_val), w, reloc_parent);
}
const size = struct_type.sizeUnordered(ip);
@ -666,10 +653,9 @@ pub fn generateSymbol(
const padding = math.cast(
usize,
std.mem.alignForward(u64, size, @max(alignment, 1)) -
(code.items.len - struct_begin),
std.mem.alignForward(u64, size, @max(alignment, 1)) - (w.end - struct_begin),
) orelse return error.Overflow;
if (padding > 0) try code.appendNTimes(gpa, 0, padding);
if (padding > 0) try w.splatByteAll(0, padding);
},
}
},
@ -679,12 +665,12 @@ pub fn generateSymbol(
const layout = ty.unionGetLayout(zcu);
if (layout.payload_size == 0) {
return generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.tag), code, reloc_parent);
return generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.tag), w, reloc_parent);
}
// Check if we should store the tag first.
if (layout.tag_size > 0 and layout.tag_align.compare(.gte, layout.payload_align)) {
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.tag), code, reloc_parent);
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.tag), w, reloc_parent);
}
const union_obj = zcu.typeToUnion(ty).?;
@ -692,24 +678,24 @@ pub fn generateSymbol(
const field_index = ty.unionTagFieldIndex(Value.fromInterned(un.tag), zcu).?;
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
if (!field_ty.hasRuntimeBits(zcu)) {
try code.appendNTimes(gpa, 0xaa, math.cast(usize, layout.payload_size) orelse return error.Overflow);
try w.splatByteAll(0xaa, math.cast(usize, layout.payload_size) orelse return error.Overflow);
} else {
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.val), code, reloc_parent);
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.val), w, reloc_parent);
const padding = math.cast(usize, layout.payload_size - field_ty.abiSize(zcu)) orelse return error.Overflow;
if (padding > 0) {
try code.appendNTimes(gpa, 0, padding);
try w.splatByteAll(0, padding);
}
}
} else {
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.val), code, reloc_parent);
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.val), w, reloc_parent);
}
if (layout.tag_size > 0 and layout.tag_align.compare(.lt, layout.payload_align)) {
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.tag), code, reloc_parent);
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.tag), w, reloc_parent);
if (layout.padding > 0) {
try code.appendNTimes(gpa, 0, layout.padding);
try w.splatByteAll(0, layout.padding);
}
}
},
@ -722,30 +708,30 @@ fn lowerPtr(
pt: Zcu.PerThread,
src_loc: Zcu.LazySrcLoc,
ptr_val: InternPool.Index,
code: *ArrayList(u8),
w: *std.Io.Writer,
reloc_parent: link.File.RelocInfo.Parent,
prev_offset: u64,
) GenerateSymbolError!void {
) (GenerateSymbolError || std.Io.Writer.Error)!void {
const zcu = pt.zcu;
const ptr = zcu.intern_pool.indexToKey(ptr_val).ptr;
const offset: u64 = prev_offset + ptr.byte_offset;
return switch (ptr.base_addr) {
.nav => |nav| try lowerNavRef(bin_file, pt, nav, code, reloc_parent, offset),
.uav => |uav| try lowerUavRef(bin_file, pt, src_loc, uav, code, reloc_parent, offset),
.int => try generateSymbol(bin_file, pt, src_loc, try pt.intValue(Type.usize, offset), code, reloc_parent),
.nav => |nav| try lowerNavRef(bin_file, pt, nav, w, reloc_parent, offset),
.uav => |uav| try lowerUavRef(bin_file, pt, src_loc, uav, w, reloc_parent, offset),
.int => try generateSymbol(bin_file, pt, src_loc, try pt.intValue(Type.usize, offset), w, reloc_parent),
.eu_payload => |eu_ptr| try lowerPtr(
bin_file,
pt,
src_loc,
eu_ptr,
code,
w,
reloc_parent,
offset + errUnionPayloadOffset(
Value.fromInterned(eu_ptr).typeOf(zcu).childType(zcu).errorUnionPayload(zcu),
zcu,
),
),
.opt_payload => |opt_ptr| try lowerPtr(bin_file, pt, src_loc, opt_ptr, code, reloc_parent, offset),
.opt_payload => |opt_ptr| try lowerPtr(bin_file, pt, src_loc, opt_ptr, w, reloc_parent, offset),
.field => |field| {
const base_ptr = Value.fromInterned(field.base);
const base_ty = base_ptr.typeOf(zcu).childType(zcu);
@ -764,7 +750,7 @@ fn lowerPtr(
},
else => unreachable,
};
return lowerPtr(bin_file, pt, src_loc, field.base, code, reloc_parent, offset + field_off);
return lowerPtr(bin_file, pt, src_loc, field.base, w, reloc_parent, offset + field_off);
},
.arr_elem, .comptime_field, .comptime_alloc => unreachable,
};
@ -775,12 +761,11 @@ fn lowerUavRef(
pt: Zcu.PerThread,
src_loc: Zcu.LazySrcLoc,
uav: InternPool.Key.Ptr.BaseAddr.Uav,
code: *ArrayList(u8),
w: *std.Io.Writer,
reloc_parent: link.File.RelocInfo.Parent,
offset: u64,
) GenerateSymbolError!void {
) (GenerateSymbolError || std.Io.Writer.Error)!void {
const zcu = pt.zcu;
const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
const comp = lf.comp;
const target = &comp.root_mod.resolved_target.result;
@ -790,10 +775,9 @@ fn lowerUavRef(
const is_fn_body = uav_ty.zigTypeTag(zcu) == .@"fn";
log.debug("lowerUavRef: ty = {f}", .{uav_ty.fmt(pt)});
try code.ensureUnusedCapacity(gpa, ptr_width_bytes);
if (!is_fn_body and !uav_ty.hasRuntimeBits(zcu)) {
code.appendNTimesAssumeCapacity(0xaa, ptr_width_bytes);
try w.splatByteAll(0xaa, ptr_width_bytes);
return;
}
@ -804,29 +788,32 @@ fn lowerUavRef(
dev.check(link.File.Tag.wasm.devFeature());
const wasm = lf.cast(.wasm).?;
assert(reloc_parent == .none);
try wasm.addUavReloc(code.items.len, uav.val, uav.orig_ty, @intCast(offset));
code.appendNTimesAssumeCapacity(0, ptr_width_bytes);
try wasm.addUavReloc(w.end, uav.val, uav.orig_ty, @intCast(offset));
try w.splatByteAll(0, ptr_width_bytes);
return;
},
else => {},
}
const uav_align = ip.indexToKey(uav.orig_ty).ptr_type.flags.alignment;
const uav_align = Type.fromInterned(uav.orig_ty).ptrAlignment(zcu);
switch (try lf.lowerUav(pt, uav_val, uav_align, src_loc)) {
.sym_index => {},
.fail => |em| std.debug.panic("TODO rework lowerUav. internal error: {s}", .{em.msg}),
}
const vaddr = try lf.getUavVAddr(uav_val, .{
const vaddr = lf.getUavVAddr(uav_val, .{
.parent = reloc_parent,
.offset = code.items.len,
.offset = w.end,
.addend = @intCast(offset),
});
}) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
else => |e| std.debug.panic("TODO rework lowerUav. internal error: {t}", .{e}),
};
const endian = target.cpu.arch.endian();
switch (ptr_width_bytes) {
2 => mem.writeInt(u16, code.addManyAsArrayAssumeCapacity(2), @intCast(vaddr), endian),
4 => mem.writeInt(u32, code.addManyAsArrayAssumeCapacity(4), @intCast(vaddr), endian),
8 => mem.writeInt(u64, code.addManyAsArrayAssumeCapacity(8), vaddr, endian),
2 => try w.writeInt(u16, @intCast(vaddr), endian),
4 => try w.writeInt(u32, @intCast(vaddr), endian),
8 => try w.writeInt(u64, vaddr, endian),
else => unreachable,
}
}
@ -835,10 +822,10 @@ fn lowerNavRef(
lf: *link.File,
pt: Zcu.PerThread,
nav_index: InternPool.Nav.Index,
code: *ArrayList(u8),
w: *std.Io.Writer,
reloc_parent: link.File.RelocInfo.Parent,
offset: u64,
) GenerateSymbolError!void {
) (GenerateSymbolError || std.Io.Writer.Error)!void {
const zcu = pt.zcu;
const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
@ -848,10 +835,8 @@ fn lowerNavRef(
const nav_ty = Type.fromInterned(ip.getNav(nav_index).typeOf(ip));
const is_fn_body = nav_ty.zigTypeTag(zcu) == .@"fn";
try code.ensureUnusedCapacity(gpa, ptr_width_bytes);
if (!is_fn_body and !nav_ty.hasRuntimeBits(zcu)) {
code.appendNTimesAssumeCapacity(0xaa, ptr_width_bytes);
try w.splatByteAll(0xaa, ptr_width_bytes);
return;
}
@ -870,13 +855,13 @@ fn lowerNavRef(
} else {
try wasm.func_table_fixups.append(gpa, .{
.table_index = @enumFromInt(gop.index),
.offset = @intCast(code.items.len),
.offset = @intCast(w.end),
});
}
} else {
if (is_obj) {
try wasm.out_relocs.append(gpa, .{
.offset = @intCast(code.items.len),
.offset = @intCast(w.end),
.pointee = .{ .symbol_index = try wasm.navSymbolIndex(nav_index) },
.tag = if (ptr_width_bytes == 4) .memory_addr_i32 else .memory_addr_i64,
.addend = @intCast(offset),
@ -885,12 +870,12 @@ fn lowerNavRef(
try wasm.nav_fixups.ensureUnusedCapacity(gpa, 1);
wasm.nav_fixups.appendAssumeCapacity(.{
.navs_exe_index = try wasm.refNavExe(nav_index),
.offset = @intCast(code.items.len),
.offset = @intCast(w.end),
.addend = @intCast(offset),
});
}
}
code.appendNTimesAssumeCapacity(0, ptr_width_bytes);
try w.splatByteAll(0, ptr_width_bytes);
return;
},
else => {},
@ -898,14 +883,14 @@ fn lowerNavRef(
const vaddr = lf.getNavVAddr(pt, nav_index, .{
.parent = reloc_parent,
.offset = code.items.len,
.offset = w.end,
.addend = @intCast(offset),
}) catch @panic("TODO rework getNavVAddr");
const endian = target.cpu.arch.endian();
switch (ptr_width_bytes) {
2 => mem.writeInt(u16, code.addManyAsArrayAssumeCapacity(2), @intCast(vaddr), endian),
4 => mem.writeInt(u32, code.addManyAsArrayAssumeCapacity(4), @intCast(vaddr), endian),
8 => mem.writeInt(u64, code.addManyAsArrayAssumeCapacity(8), vaddr, endian),
2 => try w.writeInt(u16, @intCast(vaddr), endian),
4 => try w.writeInt(u32, @intCast(vaddr), endian),
8 => try w.writeInt(u64, vaddr, endian),
else => unreachable,
}
}
@ -962,6 +947,16 @@ pub fn genNavRef(
},
.link_once => unreachable,
}
} else if (lf.cast(.elf2)) |elf| {
return .{ .sym_index = @intFromEnum(elf.navSymbol(zcu, nav_index) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
else => |e| return .{ .fail = try ErrorMsg.create(
zcu.gpa,
src_loc,
"linker failed to create a nav: {t}",
.{e},
) },
}) };
} else if (lf.cast(.macho)) |macho_file| {
const zo = macho_file.getZigObject().?;
switch (linkage) {

View file

@ -56,13 +56,13 @@ pub fn emit(
pt: Zcu.PerThread,
src_loc: Zcu.LazySrcLoc,
func_index: InternPool.Index,
code: *std.ArrayListUnmanaged(u8),
atom_index: u32,
w: *std.Io.Writer,
debug_output: link.File.DebugInfoOutput,
) !void {
_ = debug_output;
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const gpa = zcu.gpa;
const func = zcu.funcInfo(func_index);
const nav = ip.getNav(func.owner_nav);
const mod = zcu.navFileScope(func.owner_nav).mod.?;
@ -81,20 +81,19 @@ pub fn emit(
@as(u5, @intCast(func_align.minStrict(.@"16").toByteUnits().?)),
Instruction.size,
) - 1);
try code.ensureUnusedCapacity(gpa, Instruction.size *
(code_len + literals_align_gap + mir.literals.len));
emitInstructionsForward(code, mir.prologue);
emitInstructionsBackward(code, mir.body);
const body_end: u32 = @intCast(code.items.len);
emitInstructionsBackward(code, mir.epilogue);
code.appendNTimesAssumeCapacity(0, Instruction.size * literals_align_gap);
code.appendSliceAssumeCapacity(@ptrCast(mir.literals));
try w.rebase(w.end, Instruction.size * (code_len + literals_align_gap + mir.literals.len));
emitInstructionsForward(w, mir.prologue) catch unreachable;
emitInstructionsBackward(w, mir.body) catch unreachable;
const body_end: u32 = @intCast(w.end);
emitInstructionsBackward(w, mir.epilogue) catch unreachable;
w.splatByteAll(0, Instruction.size * literals_align_gap) catch unreachable;
w.writeAll(@ptrCast(mir.literals)) catch unreachable;
mir_log.debug("", .{});
for (mir.nav_relocs) |nav_reloc| try emitReloc(
lf,
zcu,
func.owner_nav,
atom_index,
switch (try @import("../../codegen.zig").genNavRef(
lf,
pt,
@ -112,7 +111,7 @@ pub fn emit(
for (mir.uav_relocs) |uav_reloc| try emitReloc(
lf,
zcu,
func.owner_nav,
atom_index,
switch (try lf.lowerUav(
pt,
uav_reloc.uav.val,
@ -129,7 +128,7 @@ pub fn emit(
for (mir.lazy_relocs) |lazy_reloc| try emitReloc(
lf,
zcu,
func.owner_nav,
atom_index,
if (lf.cast(.elf)) |ef|
ef.zigObjectPtr().?.getOrCreateMetadataForLazySymbol(ef, pt, lazy_reloc.symbol) catch |err|
return zcu.codegenFail(func.owner_nav, "{s} creating lazy symbol", .{@errorName(err)})
@ -150,7 +149,7 @@ pub fn emit(
for (mir.global_relocs) |global_reloc| try emitReloc(
lf,
zcu,
func.owner_nav,
atom_index,
if (lf.cast(.elf)) |ef|
try ef.getGlobalSymbol(std.mem.span(global_reloc.name), null)
else if (lf.cast(.macho)) |mf|
@ -168,30 +167,30 @@ pub fn emit(
var instruction = mir.body[literal_reloc.label];
instruction.load_store.register_literal.group.imm19 += literal_reloc_offset;
instruction.write(
code.items[body_end - Instruction.size * (1 + literal_reloc.label) ..][0..Instruction.size],
w.buffered()[body_end - Instruction.size * (1 + literal_reloc.label) ..][0..Instruction.size],
);
}
}
fn emitInstructionsForward(code: *std.ArrayListUnmanaged(u8), instructions: []const Instruction) void {
for (instructions) |instruction| emitInstruction(code, instruction);
fn emitInstructionsForward(w: *std.Io.Writer, instructions: []const Instruction) !void {
for (instructions) |instruction| try emitInstruction(w, instruction);
}
fn emitInstructionsBackward(code: *std.ArrayListUnmanaged(u8), instructions: []const Instruction) void {
fn emitInstructionsBackward(w: *std.Io.Writer, instructions: []const Instruction) !void {
var instruction_index = instructions.len;
while (instruction_index > 0) {
instruction_index -= 1;
emitInstruction(code, instructions[instruction_index]);
try emitInstruction(w, instructions[instruction_index]);
}
}
fn emitInstruction(code: *std.ArrayListUnmanaged(u8), instruction: Instruction) void {
fn emitInstruction(w: *std.Io.Writer, instruction: Instruction) !void {
mir_log.debug(" {f}", .{instruction});
instruction.write(code.addManyAsArrayAssumeCapacity(Instruction.size));
instruction.write(try w.writableArray(Instruction.size));
}
fn emitReloc(
lf: *link.File,
zcu: *Zcu,
owner_nav: InternPool.Nav.Index,
atom_index: u32,
sym_index: u32,
instruction: Instruction,
offset: u32,
@ -202,7 +201,7 @@ fn emitReloc(
else => unreachable,
.data_processing_immediate => |decoded| if (lf.cast(.elf)) |ef| {
const zo = ef.zigObjectPtr().?;
const atom = zo.symbol(try zo.getOrCreateMetadataForNav(zcu, owner_nav)).atom(ef).?;
const atom = zo.symbol(atom_index).atom(ef).?;
const r_type: std.elf.R_AARCH64 = switch (decoded.decode()) {
else => unreachable,
.pc_relative_addressing => |pc_relative_addressing| switch (pc_relative_addressing.group.op) {
@ -221,7 +220,7 @@ fn emitReloc(
}, zo);
} else if (lf.cast(.macho)) |mf| {
const zo = mf.getZigObject().?;
const atom = zo.symbols.items[try zo.getOrCreateMetadataForNav(mf, owner_nav)].getAtom(mf).?;
const atom = zo.symbols.items[atom_index].getAtom(mf).?;
switch (decoded.decode()) {
else => unreachable,
.pc_relative_addressing => |pc_relative_addressing| switch (pc_relative_addressing.group.op) {
@ -260,7 +259,7 @@ fn emitReloc(
},
.branch_exception_generating_system => |decoded| if (lf.cast(.elf)) |ef| {
const zo = ef.zigObjectPtr().?;
const atom = zo.symbol(try zo.getOrCreateMetadataForNav(zcu, owner_nav)).atom(ef).?;
const atom = zo.symbol(atom_index).atom(ef).?;
const r_type: std.elf.R_AARCH64 = switch (decoded.decode().unconditional_branch_immediate.group.op) {
.b => .JUMP26,
.bl => .CALL26,
@ -272,7 +271,7 @@ fn emitReloc(
}, zo);
} else if (lf.cast(.macho)) |mf| {
const zo = mf.getZigObject().?;
const atom = zo.symbols.items[try zo.getOrCreateMetadataForNav(mf, owner_nav)].getAtom(mf).?;
const atom = zo.symbols.items[atom_index].getAtom(mf).?;
try atom.addReloc(mf, .{
.tag = .@"extern",
.offset = offset,
@ -289,7 +288,7 @@ fn emitReloc(
},
.load_store => |decoded| if (lf.cast(.elf)) |ef| {
const zo = ef.zigObjectPtr().?;
const atom = zo.symbol(try zo.getOrCreateMetadataForNav(zcu, owner_nav)).atom(ef).?;
const atom = zo.symbol(atom_index).atom(ef).?;
const r_type: std.elf.R_AARCH64 = switch (decoded.decode().register_unsigned_immediate.decode()) {
.integer => |integer| switch (integer.decode()) {
.unallocated, .prfm => unreachable,
@ -316,7 +315,7 @@ fn emitReloc(
}, zo);
} else if (lf.cast(.macho)) |mf| {
const zo = mf.getZigObject().?;
const atom = zo.symbols.items[try zo.getOrCreateMetadataForNav(mf, owner_nav)].getAtom(mf).?;
const atom = zo.symbols.items[atom_index].getAtom(mf).?;
try atom.addReloc(mf, .{
.tag = .@"extern",
.offset = offset,

View file

@ -97,6 +97,7 @@ pub const Env = enum {
.lld_linker,
.coff_linker,
.elf_linker,
.elf2_linker,
.macho_linker,
.c_linker,
.wasm_linker,
@ -163,6 +164,7 @@ pub const Env = enum {
.incremental,
.aarch64_backend,
.elf_linker,
.elf2_linker,
=> true,
else => Env.sema.supports(feature),
},
@ -210,6 +212,7 @@ pub const Env = enum {
.legalize,
.x86_64_backend,
.elf_linker,
.elf2_linker,
=> true,
else => Env.sema.supports(feature),
},
@ -282,6 +285,7 @@ pub const Feature = enum {
lld_linker,
coff_linker,
elf_linker,
elf2_linker,
macho_linker,
c_linker,
wasm_linker,

View file

@ -219,6 +219,7 @@ pub const Diags = struct {
}
pub fn addError(diags: *Diags, comptime format: []const u8, args: anytype) void {
@branchHint(.cold);
return addErrorSourceLocation(diags, .none, format, args);
}
@ -529,7 +530,7 @@ pub const File = struct {
const lld: *Lld = try .createEmpty(arena, comp, emit, options);
return &lld.base;
}
switch (Tag.fromObjectFormat(comp.root_mod.resolved_target.result.ofmt)) {
switch (Tag.fromObjectFormat(comp.root_mod.resolved_target.result.ofmt, comp.config.use_new_linker)) {
.plan9 => return error.UnsupportedObjectFormat,
inline else => |tag| {
dev.check(tag.devFeature());
@ -552,7 +553,7 @@ pub const File = struct {
const lld: *Lld = try .createEmpty(arena, comp, emit, options);
return &lld.base;
}
switch (Tag.fromObjectFormat(comp.root_mod.resolved_target.result.ofmt)) {
switch (Tag.fromObjectFormat(comp.root_mod.resolved_target.result.ofmt, comp.config.use_new_linker)) {
.plan9 => return error.UnsupportedObjectFormat,
inline else => |tag| {
dev.check(tag.devFeature());
@ -579,7 +580,8 @@ pub const File = struct {
const emit = base.emit;
if (base.child_pid) |pid| {
if (builtin.os.tag == .windows) {
base.cast(.coff).?.ptraceAttach(pid) catch |err| {
const coff_file = base.cast(.coff).?;
coff_file.ptraceAttach(pid) catch |err| {
log.warn("attaching failed with error: {s}", .{@errorName(err)});
};
} else {
@ -597,8 +599,11 @@ pub const File = struct {
.linux => std.posix.ptrace(std.os.linux.PTRACE.ATTACH, pid, 0, 0) catch |err| {
log.warn("ptrace failure: {s}", .{@errorName(err)});
},
.macos => base.cast(.macho).?.ptraceAttach(pid) catch |err| {
log.warn("attaching failed with error: {s}", .{@errorName(err)});
.macos => {
const macho_file = base.cast(.macho).?;
macho_file.ptraceAttach(pid) catch |err| {
log.warn("attaching failed with error: {s}", .{@errorName(err)});
};
},
.windows => unreachable,
else => return error.HotSwapUnavailableOnHostOperatingSystem,
@ -613,6 +618,20 @@ pub const File = struct {
.mode = determineMode(output_mode, link_mode),
});
},
.elf2 => {
const elf = base.cast(.elf2).?;
if (base.file == null) {
elf.mf.file = try base.emit.root_dir.handle.createFile(base.emit.sub_path, .{
.truncate = false,
.read = true,
.mode = determineMode(comp.config.output_mode, comp.config.link_mode),
});
base.file = elf.mf.file;
try elf.mf.ensureTotalCapacity(
@intCast(elf.mf.nodes.items[0].location().resolve(&elf.mf)[1]),
);
}
},
.c, .spirv => dev.checkAny(&.{ .c_linker, .spirv_linker }),
.plan9 => unreachable,
}
@ -669,14 +688,30 @@ pub const File = struct {
if (base.child_pid) |pid| {
switch (builtin.os.tag) {
.macos => base.cast(.macho).?.ptraceDetach(pid) catch |err| {
log.warn("detaching failed with error: {s}", .{@errorName(err)});
.macos => {
const macho_file = base.cast(.macho).?;
macho_file.ptraceDetach(pid) catch |err| {
log.warn("detaching failed with error: {s}", .{@errorName(err)});
};
},
.windows => {
const coff_file = base.cast(.coff).?;
coff_file.ptraceDetach(pid);
},
.windows => base.cast(.coff).?.ptraceDetach(pid),
else => return error.HotSwapUnavailableOnHostOperatingSystem,
}
}
},
.elf2 => {
const elf = base.cast(.elf2).?;
if (base.file) |f| {
elf.mf.unmap();
assert(elf.mf.file.handle == f.handle);
elf.mf.file = undefined;
f.close();
base.file = null;
}
},
.c, .spirv => dev.checkAny(&.{ .c_linker, .spirv_linker }),
.plan9 => unreachable,
}
@ -793,6 +828,7 @@ pub const File = struct {
.spirv => {},
.goff, .xcoff => {},
.plan9 => unreachable,
.elf2 => {},
inline else => |tag| {
dev.check(tag.devFeature());
return @as(*tag.Type(), @fieldParentPtr("base", base)).updateLineNumber(pt, ti_id);
@ -825,6 +861,26 @@ pub const File = struct {
}
}
pub fn idle(base: *File, tid: Zcu.PerThread.Id) !bool {
switch (base.tag) {
else => return false,
inline .elf2 => |tag| {
dev.check(tag.devFeature());
return @as(*tag.Type(), @fieldParentPtr("base", base)).idle(tid);
},
}
}
pub fn updateErrorData(base: *File, pt: Zcu.PerThread) !void {
switch (base.tag) {
else => {},
inline .elf2 => |tag| {
dev.check(tag.devFeature());
return @as(*tag.Type(), @fieldParentPtr("base", base)).updateErrorData(pt);
},
}
}
pub const FlushError = error{
/// Indicates an error will be present in `Compilation.link_diags`.
LinkFailure,
@ -1099,7 +1155,7 @@ pub const File = struct {
if (base.zcu_object_basename != null) return;
switch (base.tag) {
inline .wasm => |tag| {
inline .elf2, .wasm => |tag| {
dev.check(tag.devFeature());
return @as(*tag.Type(), @fieldParentPtr("base", base)).prelink(base.comp.link_prog_node);
},
@ -1110,6 +1166,7 @@ pub const File = struct {
pub const Tag = enum {
coff,
elf,
elf2,
macho,
c,
wasm,
@ -1123,6 +1180,7 @@ pub const File = struct {
return switch (tag) {
.coff => Coff,
.elf => Elf,
.elf2 => Elf2,
.macho => MachO,
.c => C,
.wasm => Wasm,
@ -1134,10 +1192,10 @@ pub const File = struct {
};
}
fn fromObjectFormat(ofmt: std.Target.ObjectFormat) Tag {
fn fromObjectFormat(ofmt: std.Target.ObjectFormat, use_new_linker: bool) Tag {
return switch (ofmt) {
.coff => .coff,
.elf => .elf,
.elf => if (use_new_linker) .elf2 else .elf,
.macho => .macho,
.wasm => .wasm,
.plan9 => .plan9,
@ -1223,6 +1281,7 @@ pub const File = struct {
pub const C = @import("link/C.zig");
pub const Coff = @import("link/Coff.zig");
pub const Elf = @import("link/Elf.zig");
pub const Elf2 = @import("link/Elf2.zig");
pub const MachO = @import("link/MachO.zig");
pub const SpirV = @import("link/SpirV.zig");
pub const Wasm = @import("link/Wasm.zig");
@ -1548,6 +1607,9 @@ pub fn doZcuTask(comp: *Compilation, tid: usize, task: ZcuTask) void {
}
}
}
pub fn doIdleTask(comp: *Compilation, tid: usize) error{ OutOfMemory, LinkFailure }!bool {
return if (comp.bin_file) |lf| lf.idle(@enumFromInt(tid)) else false;
}
/// After the main pipeline is done, but before flush, the compilation may need to link one final
/// `Nav` into the binary: the `builtin.test_functions` value. Since the link thread isn't running
/// by then, we expose this function which can be called directly.
@ -1573,6 +1635,13 @@ pub fn linkTestFunctionsNav(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index)
};
}
}
pub fn updateErrorData(pt: Zcu.PerThread) void {
const comp = pt.zcu.comp;
if (comp.bin_file) |lf| lf.updateErrorData(pt) catch |err| switch (err) {
error.OutOfMemory => comp.link_diags.setAllocFailure(),
error.LinkFailure => {},
};
}
/// Provided by the CLI, processed into `LinkInput` instances at the start of
/// the compilation pipeline.

View file

@ -953,7 +953,7 @@ fn writeOffsetTableEntry(coff: *Coff, index: usize) !void {
}
fn markRelocsDirtyByTarget(coff: *Coff, target: SymbolWithLoc) void {
if (!coff.base.comp.incremental) return;
if (!coff.base.comp.config.incremental) return;
// TODO: reverse-lookup might come in handy here
for (coff.relocs.values()) |*relocs| {
for (relocs.items) |*reloc| {
@ -964,7 +964,7 @@ fn markRelocsDirtyByTarget(coff: *Coff, target: SymbolWithLoc) void {
}
fn markRelocsDirtyByAddress(coff: *Coff, addr: u32) void {
if (!coff.base.comp.incremental) return;
if (!coff.base.comp.config.incremental) return;
const got_moved = blk: {
const sect_id = coff.got_section_index orelse break :blk false;
break :blk coff.sections.items(.header)[sect_id].virtual_address >= addr;
@ -1111,20 +1111,24 @@ pub fn updateFunc(
coff.navs.getPtr(func.owner_nav).?.section = coff.text_section_index.?;
var code_buffer: std.ArrayListUnmanaged(u8) = .empty;
defer code_buffer.deinit(gpa);
var aw: std.Io.Writer.Allocating = .init(gpa);
defer aw.deinit();
try codegen.emitFunction(
codegen.emitFunction(
&coff.base,
pt,
zcu.navSrcLoc(nav_index),
func_index,
coff.getAtom(atom_index).getSymbolIndex().?,
mir,
&code_buffer,
&aw.writer,
.none,
);
) catch |err| switch (err) {
error.WriteFailed => return error.OutOfMemory,
else => |e| return e,
};
try coff.updateNavCode(pt, nav_index, code_buffer.items, .FUNCTION);
try coff.updateNavCode(pt, nav_index, aw.written(), .FUNCTION);
// Exports will be updated by `Zcu.processExports` after the update.
}
@ -1145,18 +1149,18 @@ fn lowerConst(
) !LowerConstResult {
const gpa = coff.base.comp.gpa;
var code_buffer: std.ArrayListUnmanaged(u8) = .empty;
defer code_buffer.deinit(gpa);
var aw: std.Io.Writer.Allocating = .init(gpa);
defer aw.deinit();
const atom_index = try coff.createAtom();
const sym = coff.getAtom(atom_index).getSymbolPtr(coff);
try coff.setSymbolName(sym, name);
sym.section_number = @as(coff_util.SectionNumber, @enumFromInt(sect_id + 1));
try codegen.generateSymbol(&coff.base, pt, src_loc, val, &code_buffer, .{
try codegen.generateSymbol(&coff.base, pt, src_loc, val, &aw.writer, .{
.atom_index = coff.getAtom(atom_index).getSymbolIndex().?,
});
const code = code_buffer.items;
const code = aw.written();
const atom = coff.getAtomPtr(atom_index);
atom.size = @intCast(code.len);
@ -1170,7 +1174,7 @@ fn lowerConst(
log.debug("allocated atom for {s} at 0x{x}", .{ name, atom.getSymbol(coff).value });
log.debug(" (required alignment 0x{x})", .{required_alignment});
try coff.writeAtom(atom_index, code, coff.base.comp.incremental);
try coff.writeAtom(atom_index, code, coff.base.comp.config.incremental);
return .{ .ok = atom_index };
}
@ -1214,19 +1218,22 @@ pub fn updateNav(
coff.navs.getPtr(nav_index).?.section = coff.getNavOutputSection(nav_index);
var code_buffer: std.ArrayListUnmanaged(u8) = .empty;
defer code_buffer.deinit(gpa);
var aw: std.Io.Writer.Allocating = .init(gpa);
defer aw.deinit();
try codegen.generateSymbol(
codegen.generateSymbol(
&coff.base,
pt,
zcu.navSrcLoc(nav_index),
nav_init,
&code_buffer,
&aw.writer,
.{ .atom_index = atom.getSymbolIndex().? },
);
) catch |err| switch (err) {
error.WriteFailed => return error.OutOfMemory,
else => |e| return e,
};
try coff.updateNavCode(pt, nav_index, code_buffer.items, .NULL);
try coff.updateNavCode(pt, nav_index, aw.written(), .NULL);
}
// Exports will be updated by `Zcu.processExports` after the update.
@ -1244,8 +1251,8 @@ fn updateLazySymbolAtom(
const gpa = comp.gpa;
var required_alignment: InternPool.Alignment = .none;
var code_buffer: std.ArrayListUnmanaged(u8) = .empty;
defer code_buffer.deinit(gpa);
var aw: std.Io.Writer.Allocating = .init(gpa);
defer aw.deinit();
const name = try allocPrint(gpa, "__lazy_{s}_{f}", .{
@tagName(sym.kind),
@ -1262,11 +1269,11 @@ fn updateLazySymbolAtom(
src,
sym,
&required_alignment,
&code_buffer,
&aw.writer,
.none,
.{ .atom_index = local_sym_index },
);
const code = code_buffer.items;
const code = aw.written();
const atom = coff.getAtomPtr(atom_index);
const symbol = atom.getSymbolPtr(coff);
@ -1285,7 +1292,7 @@ fn updateLazySymbolAtom(
symbol.value = vaddr;
try coff.addGotEntry(.{ .sym_index = local_sym_index });
try coff.writeAtom(atom_index, code, coff.base.comp.incremental);
try coff.writeAtom(atom_index, code, coff.base.comp.config.incremental);
}
pub fn getOrCreateAtomForLazySymbol(
@ -1437,7 +1444,7 @@ fn updateNavCode(
};
}
coff.writeAtom(atom_index, code, coff.base.comp.incremental) catch |err| switch (err) {
coff.writeAtom(atom_index, code, coff.base.comp.config.incremental) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
else => |e| return coff.base.cgFail(nav_index, "failed to write atom: {s}", .{@errorName(e)}),
};
@ -1539,14 +1546,12 @@ pub fn updateExports(
sym.section_number = @as(coff_util.SectionNumber, @enumFromInt(metadata.section + 1));
sym.type = atom.getSymbol(coff).type;
switch (exp.opts.linkage) {
.strong => {
sym.storage_class = .EXTERNAL;
},
.internal => @panic("TODO Internal"),
sym.storage_class = switch (exp.opts.linkage) {
.internal => .EXTERNAL,
.strong => .EXTERNAL,
.weak => @panic("TODO WeakExternal"),
else => unreachable,
}
};
try coff.resolveGlobalSymbol(sym_loc);
}

View file

@ -2126,19 +2126,22 @@ pub const WipNav = struct {
const size = if (ty.hasRuntimeBits(wip_nav.pt.zcu)) ty.abiSize(wip_nav.pt.zcu) else 0;
try diw.writeUleb128(size);
if (size == 0) return;
var bytes = wip_nav.debug_info.toArrayList();
defer wip_nav.debug_info = .fromArrayList(wip_nav.dwarf.gpa, &bytes);
const old_len = bytes.items.len;
const old_end = wip_nav.debug_info.writer.end;
try codegen.generateSymbol(
wip_nav.dwarf.bin_file,
wip_nav.pt,
src_loc,
val,
&bytes,
&wip_nav.debug_info.writer,
.{ .debug_output = .{ .dwarf = wip_nav } },
);
if (old_len + size != bytes.items.len) {
std.debug.print("{f} [{}]: {} != {}\n", .{ ty.fmt(wip_nav.pt), ty.toIntern(), size, bytes.items.len - old_len });
if (old_end + size != wip_nav.debug_info.writer.end) {
std.debug.print("{f} [{}]: {} != {}\n", .{
ty.fmt(wip_nav.pt),
ty.toIntern(),
size,
wip_nav.debug_info.writer.end - old_end,
});
unreachable;
}
}
@ -6429,7 +6432,7 @@ fn sleb128Bytes(value: anytype) u32 {
/// overrides `-fno-incremental` for testing incremental debug info until `-fincremental` is functional
const force_incremental = false;
inline fn incremental(dwarf: Dwarf) bool {
return force_incremental or dwarf.bin_file.comp.incremental;
return force_incremental or dwarf.bin_file.comp.config.incremental;
}
const Allocator = std.mem.Allocator;

View file

@ -47,7 +47,7 @@ fn newSymbolAssumeCapacity(self: *LinkerDefined, name_off: u32, elf_file: *Elf)
const esym = self.symtab.addOneAssumeCapacity();
esym.* = .{
.st_name = name_off,
.st_info = elf.STB_WEAK << 4,
.st_info = @as(u8, elf.STB_WEAK) << 4,
.st_other = @intFromEnum(elf.STV.HIDDEN),
.st_shndx = elf.SHN_ABS,
.st_value = 0,

View file

@ -105,7 +105,7 @@ pub fn parseHeader(
if (amt != buf.len) return error.UnexpectedEndOfFile;
}
if (!mem.eql(u8, ehdr.e_ident[0..4], "\x7fELF")) return error.BadMagic;
if (ehdr.e_ident[elf.EI_VERSION] != 1) return error.BadElfVersion;
if (ehdr.e_ident[elf.EI.VERSION] != 1) return error.BadElfVersion;
if (ehdr.e_type != elf.ET.DYN) return error.NotSharedObject;
if (target.toElfMachine() != ehdr.e_machine)

View file

@ -277,8 +277,8 @@ pub fn flush(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !void {
pt,
.{ .kind = .code, .ty = .anyerror_type },
metadata.text_symbol_index,
) catch |err| return switch (err) {
error.CodegenFail => error.LinkFailure,
) catch |err| switch (err) {
error.CodegenFail => return error.LinkFailure,
else => |e| return e,
};
if (metadata.rodata_state != .unused) self.updateLazySymbol(
@ -286,8 +286,8 @@ pub fn flush(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !void {
pt,
.{ .kind = .const_data, .ty = .anyerror_type },
metadata.rodata_symbol_index,
) catch |err| return switch (err) {
error.CodegenFail => error.LinkFailure,
) catch |err| switch (err) {
error.CodegenFail => return error.LinkFailure,
else => |e| return e,
};
}
@ -1533,22 +1533,26 @@ pub fn updateFunc(
const sym_index = try self.getOrCreateMetadataForNav(zcu, func.owner_nav);
self.atom(self.symbol(sym_index).ref.index).?.freeRelocs(self);
var code_buffer: std.ArrayListUnmanaged(u8) = .empty;
defer code_buffer.deinit(gpa);
var aw: std.Io.Writer.Allocating = .init(gpa);
defer aw.deinit();
var debug_wip_nav = if (self.dwarf) |*dwarf| try dwarf.initWipNav(pt, func.owner_nav, sym_index) else null;
defer if (debug_wip_nav) |*wip_nav| wip_nav.deinit();
try codegen.emitFunction(
codegen.emitFunction(
&elf_file.base,
pt,
zcu.navSrcLoc(func.owner_nav),
func_index,
sym_index,
mir,
&code_buffer,
&aw.writer,
if (debug_wip_nav) |*dn| .{ .dwarf = dn } else .none,
);
const code = code_buffer.items;
) catch |err| switch (err) {
error.WriteFailed => return error.OutOfMemory,
else => |e| return e,
};
const code = aw.written();
const shndx = try self.getNavShdrIndex(elf_file, zcu, func.owner_nav, sym_index, code);
log.debug("setting shdr({x},{s}) for {f}", .{
@ -1663,21 +1667,24 @@ pub fn updateNav(
const sym_index = try self.getOrCreateMetadataForNav(zcu, nav_index);
self.symbol(sym_index).atom(elf_file).?.freeRelocs(self);
var code_buffer: std.ArrayListUnmanaged(u8) = .empty;
defer code_buffer.deinit(zcu.gpa);
var aw: std.Io.Writer.Allocating = .init(zcu.gpa);
defer aw.deinit();
var debug_wip_nav = if (self.dwarf) |*dwarf| try dwarf.initWipNav(pt, nav_index, sym_index) else null;
defer if (debug_wip_nav) |*wip_nav| wip_nav.deinit();
try codegen.generateSymbol(
codegen.generateSymbol(
&elf_file.base,
pt,
zcu.navSrcLoc(nav_index),
Value.fromInterned(nav_init),
&code_buffer,
&aw.writer,
.{ .atom_index = sym_index },
);
const code = code_buffer.items;
) catch |err| switch (err) {
error.WriteFailed => return error.OutOfMemory,
else => |e| return e,
};
const code = aw.written();
const shndx = try self.getNavShdrIndex(elf_file, zcu, nav_index, sym_index, code);
log.debug("setting shdr({x},{s}) for {f}", .{
@ -1722,8 +1729,8 @@ fn updateLazySymbol(
const gpa = zcu.gpa;
var required_alignment: InternPool.Alignment = .none;
var code_buffer: std.ArrayListUnmanaged(u8) = .empty;
defer code_buffer.deinit(gpa);
var aw: std.Io.Writer.Allocating = .init(gpa);
defer aw.deinit();
const name_str_index = blk: {
const name = try std.fmt.allocPrint(gpa, "__lazy_{s}_{f}", .{
@ -1734,18 +1741,20 @@ fn updateLazySymbol(
break :blk try self.strtab.insert(gpa, name);
};
const src = Type.fromInterned(sym.ty).srcLocOrNull(zcu) orelse Zcu.LazySrcLoc.unneeded;
try codegen.generateLazySymbol(
codegen.generateLazySymbol(
&elf_file.base,
pt,
src,
Type.fromInterned(sym.ty).srcLocOrNull(zcu) orelse .unneeded,
sym,
&required_alignment,
&code_buffer,
&aw.writer,
.none,
.{ .atom_index = symbol_index },
);
const code = code_buffer.items;
) catch |err| switch (err) {
error.WriteFailed => return error.OutOfMemory,
else => |e| return e,
};
const code = aw.written();
const output_section_index = switch (sym.kind) {
.code => if (self.text_index) |sym_index|
@ -1807,21 +1816,24 @@ fn lowerConst(
) !codegen.SymbolResult {
const gpa = pt.zcu.gpa;
var code_buffer: std.ArrayListUnmanaged(u8) = .empty;
defer code_buffer.deinit(gpa);
var aw: std.Io.Writer.Allocating = .init(gpa);
defer aw.deinit();
const name_off = try self.addString(gpa, name);
const sym_index = try self.newSymbolWithAtom(gpa, name_off);
try codegen.generateSymbol(
codegen.generateSymbol(
&elf_file.base,
pt,
src_loc,
val,
&code_buffer,
&aw.writer,
.{ .atom_index = sym_index },
);
const code = code_buffer.items;
) catch |err| switch (err) {
error.WriteFailed => return error.OutOfMemory,
else => |e| return e,
};
const code = aw.written();
const local_sym = self.symbol(sym_index);
const local_esym = &self.symtab.items(.elf_sym)[local_sym.esym_index];

2036
src/link/Elf2.zig Normal file

File diff suppressed because it is too large Load diff

View file

@ -784,22 +784,26 @@ pub fn updateFunc(
const sym_index = try self.getOrCreateMetadataForNav(macho_file, func.owner_nav);
self.symbols.items[sym_index].getAtom(macho_file).?.freeRelocs(macho_file);
var code_buffer: std.ArrayListUnmanaged(u8) = .empty;
defer code_buffer.deinit(gpa);
var aw: std.Io.Writer.Allocating = .init(gpa);
defer aw.deinit();
var debug_wip_nav = if (self.dwarf) |*dwarf| try dwarf.initWipNav(pt, func.owner_nav, sym_index) else null;
defer if (debug_wip_nav) |*wip_nav| wip_nav.deinit();
try codegen.emitFunction(
codegen.emitFunction(
&macho_file.base,
pt,
zcu.navSrcLoc(func.owner_nav),
func_index,
sym_index,
mir,
&code_buffer,
&aw.writer,
if (debug_wip_nav) |*wip_nav| .{ .dwarf = wip_nav } else .none,
);
const code = code_buffer.items;
) catch |err| switch (err) {
error.WriteFailed => return error.OutOfMemory,
else => |e| return e,
};
const code = aw.written();
const sect_index = try self.getNavOutputSection(macho_file, zcu, func.owner_nav, code);
const old_rva, const old_alignment = blk: {
@ -895,21 +899,24 @@ pub fn updateNav(
const sym_index = try self.getOrCreateMetadataForNav(macho_file, nav_index);
self.symbols.items[sym_index].getAtom(macho_file).?.freeRelocs(macho_file);
var code_buffer: std.ArrayListUnmanaged(u8) = .empty;
defer code_buffer.deinit(zcu.gpa);
var aw: std.Io.Writer.Allocating = .init(zcu.gpa);
defer aw.deinit();
var debug_wip_nav = if (self.dwarf) |*dwarf| try dwarf.initWipNav(pt, nav_index, sym_index) else null;
defer if (debug_wip_nav) |*wip_nav| wip_nav.deinit();
try codegen.generateSymbol(
codegen.generateSymbol(
&macho_file.base,
pt,
zcu.navSrcLoc(nav_index),
Value.fromInterned(nav_init),
&code_buffer,
&aw.writer,
.{ .atom_index = sym_index },
);
const code = code_buffer.items;
) catch |err| switch (err) {
error.WriteFailed => return error.OutOfMemory,
else => |e| return e,
};
const code = aw.written();
const sect_index = try self.getNavOutputSection(macho_file, zcu, nav_index, code);
if (isThreadlocal(macho_file, nav_index))
@ -1198,21 +1205,24 @@ fn lowerConst(
) !codegen.SymbolResult {
const gpa = macho_file.base.comp.gpa;
var code_buffer: std.ArrayListUnmanaged(u8) = .empty;
defer code_buffer.deinit(gpa);
var aw: std.Io.Writer.Allocating = .init(gpa);
defer aw.deinit();
const name_str = try self.addString(gpa, name);
const sym_index = try self.newSymbolWithAtom(gpa, name_str, macho_file);
try codegen.generateSymbol(
codegen.generateSymbol(
&macho_file.base,
pt,
src_loc,
val,
&code_buffer,
&aw.writer,
.{ .atom_index = sym_index },
);
const code = code_buffer.items;
) catch |err| switch (err) {
error.WriteFailed => return error.OutOfMemory,
else => |e| return e,
};
const code = aw.written();
const sym = &self.symbols.items[sym_index];
sym.out_n_sect = output_section_index;
@ -1349,8 +1359,8 @@ fn updateLazySymbol(
const gpa = zcu.gpa;
var required_alignment: Atom.Alignment = .none;
var code_buffer: std.ArrayListUnmanaged(u8) = .empty;
defer code_buffer.deinit(gpa);
var aw: std.Io.Writer.Allocating = .init(gpa);
defer aw.deinit();
const name_str = blk: {
const name = try std.fmt.allocPrint(gpa, "__lazy_{s}_{f}", .{
@ -1368,11 +1378,11 @@ fn updateLazySymbol(
src,
lazy_sym,
&required_alignment,
&code_buffer,
&aw.writer,
.none,
.{ .atom_index = symbol_index },
);
const code = code_buffer.items;
const code = aw.written();
const output_section_index = switch (lazy_sym.kind) {
.code => macho_file.zig_text_sect_index.?,

929
src/link/MappedFile.zig Normal file
View file

@ -0,0 +1,929 @@
file: std.fs.File,
flags: packed struct {
block_size: std.mem.Alignment,
copy_file_range_unsupported: bool,
fallocate_punch_hole_unsupported: bool,
fallocate_insert_range_unsupported: bool,
},
section: if (is_windows) windows.HANDLE else void,
contents: []align(std.heap.page_size_min) u8,
nodes: std.ArrayList(Node),
free_ni: Node.Index,
large: std.ArrayList(u64),
updates: std.ArrayList(Node.Index),
update_prog_node: std.Progress.Node,
writers: std.SinglyLinkedList,
pub const Error = std.posix.MMapError ||
std.posix.MRemapError ||
std.fs.File.SetEndPosError ||
std.fs.File.CopyRangeError ||
error{NotFile};
pub fn init(file: std.fs.File, gpa: std.mem.Allocator) !MappedFile {
var mf: MappedFile = .{
.file = file,
.flags = undefined,
.section = if (is_windows) windows.INVALID_HANDLE_VALUE else {},
.contents = &.{},
.nodes = .empty,
.free_ni = .none,
.large = .empty,
.updates = .empty,
.update_prog_node = .none,
.writers = .{},
};
errdefer mf.deinit(gpa);
const size: u64, const blksize = if (is_windows)
.{ try windows.GetFileSizeEx(file.handle), 1 }
else stat: {
const stat = try std.posix.fstat(mf.file.handle);
if (!std.posix.S.ISREG(stat.mode)) return error.PathAlreadyExists;
break :stat .{ @bitCast(stat.size), stat.blksize };
};
mf.flags = .{
.block_size = .fromByteUnits(
std.math.ceilPowerOfTwoAssert(usize, @max(std.heap.pageSize(), blksize)),
),
.copy_file_range_unsupported = false,
.fallocate_insert_range_unsupported = false,
.fallocate_punch_hole_unsupported = false,
};
try mf.nodes.ensureUnusedCapacity(gpa, 1);
assert(try mf.addNode(gpa, .{
.add_node = .{
.size = size,
.fixed = true,
},
}) == Node.Index.root);
try mf.ensureTotalCapacity(@intCast(size));
return mf;
}
pub fn deinit(mf: *MappedFile, gpa: std.mem.Allocator) void {
mf.unmap();
mf.nodes.deinit(gpa);
mf.large.deinit(gpa);
mf.updates.deinit(gpa);
mf.update_prog_node.end();
assert(mf.writers.first == null);
mf.* = undefined;
}
pub const Node = extern struct {
parent: Node.Index,
prev: Node.Index,
next: Node.Index,
first: Node.Index,
last: Node.Index,
flags: Flags,
location_payload: Location.Payload,
pub const Flags = packed struct(u32) {
location_tag: Location.Tag,
alignment: std.mem.Alignment,
/// Whether this node can be moved.
fixed: bool,
/// Whether this node has been moved.
moved: bool,
/// Whether this node has been resized.
resized: bool,
/// Whether this node might contain non-zero bytes.
has_content: bool,
unused: @Type(.{ .int = .{
.signedness = .unsigned,
.bits = 32 - @bitSizeOf(std.mem.Alignment) - 5,
} }) = 0,
};
pub const Location = union(enum(u1)) {
small: extern struct {
/// Relative to `parent`.
offset: u32,
size: u32,
},
large: extern struct {
index: usize,
unused: @Type(.{ .int = .{
.signedness = .unsigned,
.bits = 64 - @bitSizeOf(usize),
} }) = 0,
},
pub const Tag = @typeInfo(Location).@"union".tag_type.?;
pub const Payload = @Type(.{ .@"union" = .{
.layout = .@"extern",
.tag_type = null,
.fields = @typeInfo(Location).@"union".fields,
.decls = &.{},
} });
pub fn resolve(loc: Location, mf: *const MappedFile) [2]u64 {
return switch (loc) {
.small => |small| .{ small.offset, small.size },
.large => |large| mf.large.items[large.index..][0..2].*,
};
}
};
pub const Index = enum(u32) {
none,
_,
pub const root: Node.Index = .none;
fn get(ni: Node.Index, mf: *const MappedFile) *Node {
return &mf.nodes.items[@intFromEnum(ni)];
}
pub fn childrenMoved(ni: Node.Index, gpa: std.mem.Allocator, mf: *MappedFile) !void {
var child_ni = ni.get(mf).last;
while (child_ni != .none) {
try child_ni.moved(gpa, mf);
child_ni = child_ni.get(mf).prev;
}
}
pub fn hasMoved(ni: Node.Index, mf: *const MappedFile) bool {
var parent_ni = ni;
while (parent_ni != .none) {
const parent = parent_ni.get(mf);
if (parent.flags.moved) return true;
parent_ni = parent.parent;
}
return false;
}
pub fn moved(ni: Node.Index, gpa: std.mem.Allocator, mf: *MappedFile) !void {
try mf.updates.ensureUnusedCapacity(gpa, 1);
ni.movedAssumeCapacity(mf);
}
pub fn cleanMoved(ni: Node.Index, mf: *const MappedFile) bool {
const node_moved = &ni.get(mf).flags.moved;
defer node_moved.* = false;
return node_moved.*;
}
fn movedAssumeCapacity(ni: Node.Index, mf: *MappedFile) void {
var parent_ni = ni;
while (parent_ni != .none) {
const parent_node = parent_ni.get(mf);
if (parent_node.flags.moved) return;
parent_ni = parent_node.parent;
}
const node = ni.get(mf);
node.flags.moved = true;
if (node.flags.resized) return;
mf.updates.appendAssumeCapacity(ni);
mf.update_prog_node.increaseEstimatedTotalItems(1);
}
pub fn hasResized(ni: Node.Index, mf: *const MappedFile) bool {
return ni.get(mf).flags.resized;
}
pub fn resized(ni: Node.Index, gpa: std.mem.Allocator, mf: *MappedFile) !void {
try mf.updates.ensureUnusedCapacity(gpa, 1);
ni.resizedAssumeCapacity(mf);
}
pub fn cleanResized(ni: Node.Index, mf: *const MappedFile) bool {
const node_resized = &ni.get(mf).flags.resized;
defer node_resized.* = false;
return node_resized.*;
}
fn resizedAssumeCapacity(ni: Node.Index, mf: *MappedFile) void {
const node = ni.get(mf);
if (node.flags.resized) return;
node.flags.resized = true;
if (node.flags.moved) return;
mf.updates.appendAssumeCapacity(ni);
mf.update_prog_node.increaseEstimatedTotalItems(1);
}
pub fn alignment(ni: Node.Index, mf: *const MappedFile) std.mem.Alignment {
return ni.get(mf).flags.alignment;
}
fn setLocationAssumeCapacity(ni: Node.Index, mf: *MappedFile, offset: u64, size: u64) void {
const node = ni.get(mf);
if (size == 0) node.flags.has_content = false;
switch (node.location()) {
.small => |small| {
if (small.offset != offset) ni.movedAssumeCapacity(mf);
if (small.size != size) ni.resizedAssumeCapacity(mf);
if (std.math.cast(u32, offset)) |small_offset| {
if (std.math.cast(u32, size)) |small_size| {
node.location_payload.small = .{
.offset = small_offset,
.size = small_size,
};
return;
}
}
defer mf.large.appendSliceAssumeCapacity(&.{ offset, size });
node.flags.location_tag = .large;
node.location_payload = .{ .large = .{ .index = mf.large.items.len } };
},
.large => |large| {
const large_items = mf.large.items[large.index..][0..2];
if (large_items[0] != offset) ni.movedAssumeCapacity(mf);
if (large_items[1] != size) ni.resizedAssumeCapacity(mf);
large_items.* = .{ offset, size };
},
}
}
pub fn location(ni: Node.Index, mf: *const MappedFile) Location {
return ni.get(mf).location();
}
pub fn fileLocation(
ni: Node.Index,
mf: *const MappedFile,
set_has_content: bool,
) struct { offset: u64, size: u64 } {
var offset, const size = ni.location(mf).resolve(mf);
var parent_ni = ni;
while (true) {
const parent = parent_ni.get(mf);
if (set_has_content) parent.flags.has_content = true;
if (parent_ni == .none) break;
parent_ni = parent.parent;
offset += parent_ni.location(mf).resolve(mf)[0];
}
return .{ .offset = offset, .size = size };
}
pub fn slice(ni: Node.Index, mf: *const MappedFile) []u8 {
const file_loc = ni.fileLocation(mf, true);
return mf.contents[@intCast(file_loc.offset)..][0..@intCast(file_loc.size)];
}
pub fn sliceConst(ni: Node.Index, mf: *const MappedFile) []const u8 {
const file_loc = ni.fileLocation(mf, false);
return mf.contents[@intCast(file_loc.offset)..][0..@intCast(file_loc.size)];
}
pub fn resize(ni: Node.Index, mf: *MappedFile, gpa: std.mem.Allocator, size: u64) !void {
try mf.resizeNode(gpa, ni, size);
var writers_it = mf.writers.first;
while (writers_it) |writer_node| : (writers_it = writer_node.next) {
const w: *Node.Writer = @fieldParentPtr("writer_node", writer_node);
w.interface.buffer = w.ni.slice(mf);
}
}
pub fn writer(ni: Node.Index, mf: *MappedFile, gpa: std.mem.Allocator, w: *Writer) void {
w.* = .{
.gpa = gpa,
.mf = mf,
.writer_node = .{},
.ni = ni,
.interface = .{
.buffer = ni.slice(mf),
.vtable = &Writer.vtable,
},
.err = null,
};
mf.writers.prepend(&w.writer_node);
}
};
pub fn location(node: *const Node) Location {
return switch (node.flags.location_tag) {
inline else => |tag| @unionInit(
Location,
@tagName(tag),
@field(node.location_payload, @tagName(tag)),
),
};
}
pub const Writer = struct {
gpa: std.mem.Allocator,
mf: *MappedFile,
writer_node: std.SinglyLinkedList.Node,
ni: Node.Index,
interface: std.Io.Writer,
err: ?Error,
pub fn deinit(w: *Writer) void {
assert(w.mf.writers.popFirst() == &w.writer_node);
w.* = undefined;
}
const vtable: std.Io.Writer.VTable = .{
.drain = drain,
.sendFile = sendFile,
.flush = std.Io.Writer.noopFlush,
.rebase = growingRebase,
};
fn drain(
interface: *std.Io.Writer,
data: []const []const u8,
splat: usize,
) std.Io.Writer.Error!usize {
const pattern = data[data.len - 1];
const splat_len = pattern.len * splat;
const start_len = interface.end;
assert(data.len != 0);
for (data) |bytes| {
try growingRebase(interface, interface.end, bytes.len + splat_len + 1);
@memcpy(interface.buffer[interface.end..][0..bytes.len], bytes);
interface.end += bytes.len;
}
if (splat == 0) {
interface.end -= pattern.len;
} else switch (pattern.len) {
0 => {},
1 => {
@memset(interface.buffer[interface.end..][0 .. splat - 1], pattern[0]);
interface.end += splat - 1;
},
else => for (0..splat - 1) |_| {
@memcpy(interface.buffer[interface.end..][0..pattern.len], pattern);
interface.end += pattern.len;
},
}
return interface.end - start_len;
}
fn sendFile(
interface: *std.Io.Writer,
file_reader: *std.fs.File.Reader,
limit: std.Io.Limit,
) std.Io.Writer.FileError!usize {
if (limit == .nothing) return 0;
const pos = file_reader.logicalPos();
const additional = if (file_reader.getSize()) |size| size - pos else |_| std.atomic.cache_line;
if (additional == 0) return error.EndOfStream;
try growingRebase(interface, interface.end, limit.minInt64(additional));
switch (file_reader.mode) {
.positional => {
const fr_buf = file_reader.interface.buffered();
const buf_copy_size = interface.write(fr_buf) catch unreachable;
file_reader.interface.toss(buf_copy_size);
if (buf_copy_size < fr_buf.len) return buf_copy_size;
assert(file_reader.logicalPos() == file_reader.pos);
const w: *Writer = @fieldParentPtr("interface", interface);
const copy_size: usize = @intCast(w.mf.copyFileRange(
file_reader.file,
file_reader.pos,
w.ni.fileLocation(w.mf, true).offset + interface.end,
limit.minInt(interface.unusedCapacityLen()),
) catch |err| {
w.err = err;
return error.WriteFailed;
});
interface.end += copy_size;
return copy_size;
},
.streaming,
.streaming_reading,
.positional_reading,
.failure,
=> {
const dest = limit.slice(interface.unusedCapacitySlice());
const n = try file_reader.read(dest);
interface.end += n;
return n;
},
}
}
fn growingRebase(
interface: *std.Io.Writer,
preserve: usize,
unused_capacity: usize,
) std.Io.Writer.Error!void {
_ = preserve;
const total_capacity = interface.end + unused_capacity;
if (interface.buffer.len >= total_capacity) return;
const w: *Writer = @fieldParentPtr("interface", interface);
w.ni.resize(w.mf, w.gpa, total_capacity +| total_capacity / 2) catch |err| {
w.err = err;
return error.WriteFailed;
};
}
};
comptime {
if (!std.debug.runtime_safety) std.debug.assert(@sizeOf(Node) == 32);
}
};
fn addNode(mf: *MappedFile, gpa: std.mem.Allocator, opts: struct {
parent: Node.Index = .none,
prev: Node.Index = .none,
next: Node.Index = .none,
offset: u64 = 0,
add_node: AddNodeOptions,
}) !Node.Index {
if (opts.add_node.moved or opts.add_node.resized) try mf.updates.ensureUnusedCapacity(gpa, 1);
const offset = opts.add_node.alignment.forward(@intCast(opts.offset));
const location_tag: Node.Location.Tag, const location_payload: Node.Location.Payload = location: {
if (std.math.cast(u32, offset)) |small_offset| break :location .{ .small, .{
.small = .{ .offset = small_offset, .size = 0 },
} };
try mf.large.ensureUnusedCapacity(gpa, 2);
defer mf.large.appendSliceAssumeCapacity(&.{ offset, 0 });
break :location .{ .large, .{ .large = .{ .index = mf.large.items.len } } };
};
const free_ni: Node.Index, const free_node = free: switch (mf.free_ni) {
.none => .{ @enumFromInt(mf.nodes.items.len), mf.nodes.addOneAssumeCapacity() },
else => |free_ni| {
const free_node = free_ni.get(mf);
mf.free_ni = free_node.next;
break :free .{ free_ni, free_node };
},
};
free_node.* = .{
.parent = opts.parent,
.prev = opts.prev,
.next = opts.next,
.first = .none,
.last = .none,
.flags = .{
.location_tag = location_tag,
.alignment = opts.add_node.alignment,
.fixed = opts.add_node.fixed,
.moved = true,
.resized = true,
.has_content = false,
},
.location_payload = location_payload,
};
{
defer {
free_node.flags.moved = false;
free_node.flags.resized = false;
}
if (offset > opts.parent.location(mf).resolve(mf)[1]) try opts.parent.resize(mf, gpa, offset);
try free_ni.resize(mf, gpa, opts.add_node.size);
}
if (opts.add_node.moved) free_ni.movedAssumeCapacity(mf);
if (opts.add_node.resized) free_ni.resizedAssumeCapacity(mf);
return free_ni;
}
pub const AddNodeOptions = struct {
size: u64 = 0,
alignment: std.mem.Alignment = .@"1",
fixed: bool = false,
moved: bool = false,
resized: bool = false,
};
pub fn addOnlyChildNode(
mf: *MappedFile,
gpa: std.mem.Allocator,
parent_ni: Node.Index,
opts: AddNodeOptions,
) !Node.Index {
try mf.nodes.ensureUnusedCapacity(gpa, 1);
const parent = parent_ni.get(mf);
assert(parent.first == .none and parent.last == .none);
const ni = try mf.addNode(gpa, .{
.parent = parent_ni,
.add_node = opts,
});
parent.first = ni;
parent.last = ni;
return ni;
}
pub fn addLastChildNode(
mf: *MappedFile,
gpa: std.mem.Allocator,
parent_ni: Node.Index,
opts: AddNodeOptions,
) !Node.Index {
try mf.nodes.ensureUnusedCapacity(gpa, 1);
const parent = parent_ni.get(mf);
const ni = try mf.addNode(gpa, .{
.parent = parent_ni,
.prev = parent.last,
.offset = offset: switch (parent.last) {
.none => 0,
else => |last_ni| {
const last_offset, const last_size = last_ni.location(mf).resolve(mf);
break :offset last_offset + last_size;
},
},
.add_node = opts,
});
switch (parent.last) {
.none => parent.first = ni,
else => |last_ni| last_ni.get(mf).next = ni,
}
parent.last = ni;
return ni;
}
pub fn addNodeAfter(
mf: *MappedFile,
gpa: std.mem.Allocator,
prev_ni: Node.Index,
opts: AddNodeOptions,
) !Node.Index {
assert(prev_ni != .none);
try mf.nodes.ensureUnusedCapacity(gpa, 1);
const prev = prev_ni.get(mf);
const prev_offset, const prev_size = prev.location().resolve(mf);
const ni = try mf.addNode(gpa, .{
.parent = prev.parent,
.prev = prev_ni,
.next = prev.next,
.offset = prev_offset + prev_size,
.add_node = opts,
});
switch (prev.next) {
.none => prev.parent.get(mf).last = ni,
else => |next_ni| next_ni.get(mf).prev = ni,
}
prev.next = ni;
return ni;
}
fn resizeNode(mf: *MappedFile, gpa: std.mem.Allocator, ni: Node.Index, requested_size: u64) !void {
const node = ni.get(mf);
var old_offset, const old_size = node.location().resolve(mf);
const new_size = node.flags.alignment.forward(@intCast(requested_size));
// Resize the entire file
if (ni == Node.Index.root) {
try mf.file.setEndPos(new_size);
try mf.ensureTotalCapacity(@intCast(new_size));
try mf.ensureCapacityForSetLocation(gpa);
ni.setLocationAssumeCapacity(mf, old_offset, new_size);
return;
}
while (true) {
const parent = node.parent.get(mf);
_, const old_parent_size = parent.location().resolve(mf);
const trailing_end = switch (node.next) {
.none => parent.location().resolve(mf)[1],
else => |next_ni| next_ni.location(mf).resolve(mf)[0],
};
assert(old_offset + old_size <= trailing_end);
// Expand the node into available trailing free space
if (old_offset + new_size <= trailing_end) {
try mf.ensureCapacityForSetLocation(gpa);
ni.setLocationAssumeCapacity(mf, old_offset, new_size);
return;
}
// Ask the filesystem driver to insert an extent into the file without copying any data
if (is_linux and !mf.flags.fallocate_insert_range_unsupported and
node.flags.alignment.order(mf.flags.block_size).compare(.gte))
insert_range: {
const last_offset, const last_size = parent.last.location(mf).resolve(mf);
const last_end = last_offset + last_size;
assert(last_end <= old_parent_size);
const range_size =
node.flags.alignment.forward(@intCast(requested_size +| requested_size / 2)) - old_size;
const new_parent_size = last_end + range_size;
if (new_parent_size > old_parent_size) {
try mf.resizeNode(gpa, node.parent, new_parent_size +| new_parent_size / 2);
continue;
}
const range_file_offset = ni.fileLocation(mf, false).offset + old_size;
retry: while (true) {
switch (linux.E.init(linux.fallocate(
mf.file.handle,
linux.FALLOC.FL_INSERT_RANGE,
@intCast(range_file_offset),
@intCast(range_size),
))) {
.SUCCESS => {
var enclosing_ni = ni;
while (enclosing_ni != .none) {
try mf.ensureCapacityForSetLocation(gpa);
const enclosing = enclosing_ni.get(mf);
const enclosing_offset, const enclosing_size =
enclosing.location().resolve(mf);
enclosing_ni.setLocationAssumeCapacity(
mf,
enclosing_offset,
enclosing_size + range_size,
);
var after_ni = enclosing.next;
while (after_ni != .none) {
try mf.ensureCapacityForSetLocation(gpa);
const after = after_ni.get(mf);
const after_offset, const after_size = after.location().resolve(mf);
after_ni.setLocationAssumeCapacity(
mf,
range_size + after_offset,
after_size,
);
after_ni = after.next;
}
enclosing_ni = enclosing.parent;
}
return;
},
.INTR => continue :retry,
.BADF, .FBIG, .INVAL => unreachable,
.IO => return error.InputOutput,
.NODEV => return error.NotFile,
.NOSPC => return error.NoSpaceLeft,
.NOSYS, .OPNOTSUPP => {
mf.flags.fallocate_insert_range_unsupported = true;
break :insert_range;
},
.PERM => return error.PermissionDenied,
.SPIPE => return error.Unseekable,
.TXTBSY => return error.FileBusy,
else => |e| return std.posix.unexpectedErrno(e),
}
}
}
switch (node.next) {
.none => {
// As this is the last node, we simply need more space in the parent
const new_parent_size = old_offset + new_size;
try mf.resizeNode(gpa, node.parent, new_parent_size +| new_parent_size / 2);
},
else => |*next_ni_ptr| switch (node.flags.fixed) {
false => {
// Make space at the end of the parent for this floating node
const last = parent.last.get(mf);
const last_offset, const last_size = last.location().resolve(mf);
const new_offset = node.flags.alignment.forward(@intCast(last_offset + last_size));
const new_parent_size = new_offset + new_size;
if (new_parent_size > old_parent_size) {
try mf.resizeNode(
gpa,
node.parent,
new_parent_size +| new_parent_size / 2,
);
continue;
}
const next_ni = next_ni_ptr.*;
next_ni.get(mf).prev = node.prev;
switch (node.prev) {
.none => parent.first = next_ni,
else => |prev_ni| prev_ni.get(mf).next = next_ni,
}
last.next = ni;
node.prev = parent.last;
next_ni_ptr.* = .none;
parent.last = ni;
if (node.flags.has_content) {
const parent_file_offset = node.parent.fileLocation(mf, false).offset;
try mf.moveRange(
parent_file_offset + old_offset,
parent_file_offset + new_offset,
old_size,
);
}
old_offset = new_offset;
},
true => {
// Move the next floating node to make space for this fixed node
const next_ni = next_ni_ptr.*;
const next = next_ni.get(mf);
assert(!next.flags.fixed);
const next_offset, const next_size = next.location().resolve(mf);
const last = parent.last.get(mf);
const last_offset, const last_size = last.location().resolve(mf);
const new_offset = next.flags.alignment.forward(@intCast(
@max(old_offset + new_size, last_offset + last_size),
));
const new_parent_size = new_offset + next_size;
if (new_parent_size > old_parent_size) {
try mf.resizeNode(
gpa,
node.parent,
new_parent_size +| new_parent_size / 2,
);
continue;
}
try mf.ensureCapacityForSetLocation(gpa);
next.prev = parent.last;
parent.last = next_ni;
last.next = next_ni;
next_ni_ptr.* = next.next;
switch (next.next) {
.none => {},
else => |next_next_ni| next_next_ni.get(mf).prev = ni,
}
next.next = .none;
if (node.flags.has_content) {
const parent_file_offset = node.parent.fileLocation(mf, false).offset;
try mf.moveRange(
parent_file_offset + next_offset,
parent_file_offset + new_offset,
next_size,
);
}
next_ni.setLocationAssumeCapacity(mf, new_offset, next_size);
},
},
}
}
}
fn moveRange(mf: *MappedFile, old_file_offset: u64, new_file_offset: u64, size: u64) !void {
// make a copy of this node at the new location
try mf.copyRange(old_file_offset, new_file_offset, size);
// delete the copy of this node at the old location
if (is_linux and !mf.flags.fallocate_punch_hole_unsupported and
size >= mf.flags.block_size.toByteUnits() * 2 - 1) while (true)
{
switch (linux.E.init(linux.fallocate(
mf.file.handle,
linux.FALLOC.FL_PUNCH_HOLE | linux.FALLOC.FL_KEEP_SIZE,
@intCast(old_file_offset),
@intCast(size),
))) {
.SUCCESS => return,
.INTR => continue,
.BADF, .FBIG, .INVAL => unreachable,
.IO => return error.InputOutput,
.NODEV => return error.NotFile,
.NOSPC => return error.NoSpaceLeft,
.NOSYS, .OPNOTSUPP => {
mf.flags.fallocate_punch_hole_unsupported = true;
break;
},
.PERM => return error.PermissionDenied,
.SPIPE => return error.Unseekable,
.TXTBSY => return error.FileBusy,
else => |e| return std.posix.unexpectedErrno(e),
}
};
@memset(mf.contents[@intCast(old_file_offset)..][0..@intCast(size)], 0);
}
fn copyRange(mf: *MappedFile, old_file_offset: u64, new_file_offset: u64, size: u64) !void {
const copy_size = try mf.copyFileRange(mf.file, old_file_offset, new_file_offset, size);
if (copy_size < size) @memcpy(
mf.contents[@intCast(new_file_offset + copy_size)..][0..@intCast(size - copy_size)],
mf.contents[@intCast(old_file_offset + copy_size)..][0..@intCast(size - copy_size)],
);
}
fn copyFileRange(
mf: *MappedFile,
old_file: std.fs.File,
old_file_offset: u64,
new_file_offset: u64,
size: u64,
) !u64 {
var remaining_size = size;
if (is_linux and !mf.flags.copy_file_range_unsupported) {
var old_file_offset_mut: i64 = @intCast(old_file_offset);
var new_file_offset_mut: i64 = @intCast(new_file_offset);
while (remaining_size >= mf.flags.block_size.toByteUnits() * 2 - 1) {
const copy_len = linux.copy_file_range(
old_file.handle,
&old_file_offset_mut,
mf.file.handle,
&new_file_offset_mut,
@intCast(remaining_size),
0,
);
switch (linux.E.init(copy_len)) {
.SUCCESS => {
if (copy_len == 0) break;
remaining_size -= copy_len;
if (remaining_size == 0) break;
},
.INTR => continue,
.BADF, .FBIG, .INVAL, .OVERFLOW => unreachable,
.IO => return error.InputOutput,
.ISDIR => return error.IsDir,
.NOMEM => return error.SystemResources,
.NOSPC => return error.NoSpaceLeft,
.NOSYS, .OPNOTSUPP, .XDEV => {
mf.flags.copy_file_range_unsupported = true;
break;
},
.PERM => return error.PermissionDenied,
.TXTBSY => return error.FileBusy,
else => |e| return std.posix.unexpectedErrno(e),
}
}
}
return size - remaining_size;
}
fn ensureCapacityForSetLocation(mf: *MappedFile, gpa: std.mem.Allocator) !void {
try mf.large.ensureUnusedCapacity(gpa, 2);
try mf.updates.ensureUnusedCapacity(gpa, 1);
}
pub fn ensureTotalCapacity(mf: *MappedFile, new_capacity: usize) !void {
if (mf.contents.len >= new_capacity) return;
try mf.ensureTotalCapacityPrecise(new_capacity +| new_capacity / 2);
}
pub fn ensureTotalCapacityPrecise(mf: *MappedFile, new_capacity: usize) !void {
if (mf.contents.len >= new_capacity) return;
const aligned_capacity = mf.flags.block_size.forward(new_capacity);
if (!is_linux) mf.unmap() else if (mf.contents.len > 0) {
mf.contents = try std.posix.mremap(
mf.contents.ptr,
mf.contents.len,
aligned_capacity,
.{ .MAYMOVE = true },
null,
);
return;
}
if (is_windows) {
if (mf.section == windows.INVALID_HANDLE_VALUE) switch (windows.ntdll.NtCreateSection(
&mf.section,
windows.STANDARD_RIGHTS_REQUIRED | windows.SECTION_QUERY |
windows.SECTION_MAP_WRITE | windows.SECTION_MAP_READ | windows.SECTION_EXTEND_SIZE,
null,
@constCast(&@as(i64, @intCast(aligned_capacity))),
windows.PAGE_READWRITE,
windows.SEC_COMMIT,
mf.file.handle,
)) {
.SUCCESS => {},
else => return error.MemoryMappingNotSupported,
};
var contents_ptr: ?[*]align(std.heap.page_size_min) u8 = null;
var contents_len = aligned_capacity;
switch (windows.ntdll.NtMapViewOfSection(
mf.section,
windows.GetCurrentProcess(),
@ptrCast(&contents_ptr),
null,
0,
null,
&contents_len,
.ViewUnmap,
0,
windows.PAGE_READWRITE,
)) {
.SUCCESS => mf.contents = contents_ptr.?[0..contents_len],
else => return error.MemoryMappingNotSupported,
}
} else mf.contents = try std.posix.mmap(
null,
aligned_capacity,
std.posix.PROT.READ | std.posix.PROT.WRITE,
.{ .TYPE = if (is_linux) .SHARED_VALIDATE else .SHARED },
mf.file.handle,
0,
);
}
pub fn unmap(mf: *MappedFile) void {
if (mf.contents.len == 0) return;
if (is_windows)
_ = windows.ntdll.NtUnmapViewOfSection(windows.GetCurrentProcess(), mf.contents.ptr)
else
std.posix.munmap(mf.contents);
mf.contents = &.{};
if (is_windows and mf.section != windows.INVALID_HANDLE_VALUE) {
windows.CloseHandle(mf.section);
mf.section = windows.INVALID_HANDLE_VALUE;
}
}
fn verify(mf: *MappedFile) void {
const root = Node.Index.root.get(mf);
assert(root.parent == .none);
assert(root.prev == .none);
assert(root.next == .none);
mf.verifyNode(Node.Index.root);
}
fn verifyNode(mf: *MappedFile, parent_ni: Node.Index) void {
const parent = parent_ni.get(mf);
const parent_offset, const parent_size = parent.location().resolve(mf);
var prev_ni: Node.Index = .none;
var prev_end: u64 = 0;
var ni = parent.first;
while (true) {
if (ni == .none) {
assert(parent.last == prev_ni);
return;
}
const node = ni.get(mf);
assert(node.parent == parent_ni);
const offset, const size = node.location().resolve(mf);
assert(node.flags.alignment.check(@intCast(offset)));
assert(node.flags.alignment.check(@intCast(size)));
const end = offset + size;
assert(end <= parent_offset + parent_size);
assert(offset >= prev_end);
assert(node.prev == prev_ni);
mf.verifyNode(ni);
prev_ni = ni;
prev_end = end;
ni = node.next;
}
}
const assert = std.debug.assert;
const builtin = @import("builtin");
const is_linux = builtin.os.tag == .linux;
const is_windows = builtin.os.tag == .windows;
const linux = std.os.linux;
const MappedFile = @This();
const std = @import("std");
const windows = std.os.windows;

View file

@ -22,17 +22,17 @@ prelink_wait_count: u32,
/// Prelink tasks which have been enqueued and are not yet owned by the worker thread.
/// Allocated into `gpa`, guarded by `mutex`.
queued_prelink: std.ArrayListUnmanaged(PrelinkTask),
queued_prelink: std.ArrayList(PrelinkTask),
/// The worker thread moves items from `queued_prelink` into this array in order to process them.
/// Allocated into `gpa`, accessed only by the worker thread.
wip_prelink: std.ArrayListUnmanaged(PrelinkTask),
wip_prelink: std.ArrayList(PrelinkTask),
/// Like `queued_prelink`, but for ZCU tasks.
/// Allocated into `gpa`, guarded by `mutex`.
queued_zcu: std.ArrayListUnmanaged(ZcuTask),
queued_zcu: std.ArrayList(ZcuTask),
/// Like `wip_prelink`, but for ZCU tasks.
/// Allocated into `gpa`, accessed only by the worker thread.
wip_zcu: std.ArrayListUnmanaged(ZcuTask),
wip_zcu: std.ArrayList(ZcuTask),
/// When processing ZCU link tasks, we might have to block due to unpopulated MIR. When this
/// happens, some tasks in `wip_zcu` have been run, and some are still pending. This is the
@ -213,32 +213,41 @@ pub fn enqueueZcu(q: *Queue, comp: *Compilation, task: ZcuTask) Allocator.Error!
fn flushTaskQueue(tid: usize, q: *Queue, comp: *Compilation) void {
q.flush_safety.lock(); // every `return` site should unlock this before unlocking `q.mutex`
if (std.debug.runtime_safety) {
q.mutex.lock();
defer q.mutex.unlock();
assert(q.state == .running);
}
var have_idle_tasks = true;
prelink: while (true) {
assert(q.wip_prelink.items.len == 0);
{
q.mutex.lock();
defer q.mutex.unlock();
std.mem.swap(std.ArrayListUnmanaged(PrelinkTask), &q.queued_prelink, &q.wip_prelink);
if (q.wip_prelink.items.len == 0) {
if (q.prelink_wait_count == 0) {
break :prelink; // prelink is done
} else {
swap_queues: while (true) {
{
q.mutex.lock();
defer q.mutex.unlock();
std.mem.swap(std.ArrayList(PrelinkTask), &q.queued_prelink, &q.wip_prelink);
if (q.wip_prelink.items.len > 0) break :swap_queues;
if (q.prelink_wait_count == 0) break :prelink; // prelink is done
if (!have_idle_tasks) {
// We're expecting more prelink tasks so can't move on to ZCU tasks.
q.state = .finished;
q.flush_safety.unlock();
return;
}
}
have_idle_tasks = link.doIdleTask(comp, tid) catch |err| switch (err) {
error.OutOfMemory => have_idle_tasks: {
comp.link_diags.setAllocFailure();
break :have_idle_tasks false;
},
error.LinkFailure => false,
};
}
for (q.wip_prelink.items) |task| {
link.doPrelinkTask(comp, task);
}
have_idle_tasks = true;
q.wip_prelink.clearRetainingCapacity();
}
@ -256,17 +265,29 @@ fn flushTaskQueue(tid: usize, q: *Queue, comp: *Compilation) void {
// Now we can run ZCU tasks.
while (true) {
if (q.wip_zcu.items.len == q.wip_zcu_idx) {
if (q.wip_zcu.items.len == q.wip_zcu_idx) swap_queues: {
q.wip_zcu.clearRetainingCapacity();
q.wip_zcu_idx = 0;
q.mutex.lock();
defer q.mutex.unlock();
std.mem.swap(std.ArrayListUnmanaged(ZcuTask), &q.queued_zcu, &q.wip_zcu);
if (q.wip_zcu.items.len == 0) {
// We've exhausted all available tasks.
q.state = .finished;
q.flush_safety.unlock();
return;
while (true) {
{
q.mutex.lock();
defer q.mutex.unlock();
std.mem.swap(std.ArrayList(ZcuTask), &q.queued_zcu, &q.wip_zcu);
if (q.wip_zcu.items.len > 0) break :swap_queues;
if (!have_idle_tasks) {
// We've exhausted all available tasks.
q.state = .finished;
q.flush_safety.unlock();
return;
}
}
have_idle_tasks = link.doIdleTask(comp, tid) catch |err| switch (err) {
error.OutOfMemory => have_idle_tasks: {
comp.link_diags.setAllocFailure();
break :have_idle_tasks false;
},
error.LinkFailure => false,
};
}
}
const task = q.wip_zcu.items[q.wip_zcu_idx];
@ -274,8 +295,18 @@ fn flushTaskQueue(tid: usize, q: *Queue, comp: *Compilation) void {
pending: {
if (task != .link_func) break :pending;
const status_ptr = &task.link_func.mir.status;
// First check without the mutex to optimize for the common case where MIR is ready.
if (status_ptr.load(.acquire) != .pending) break :pending;
while (true) {
// First check without the mutex to optimize for the common case where MIR is ready.
if (status_ptr.load(.acquire) != .pending) break :pending;
if (have_idle_tasks) have_idle_tasks = link.doIdleTask(comp, tid) catch |err| switch (err) {
error.OutOfMemory => have_idle_tasks: {
comp.link_diags.setAllocFailure();
break :have_idle_tasks false;
},
error.LinkFailure => false,
};
if (!have_idle_tasks) break;
}
q.mutex.lock();
defer q.mutex.unlock();
if (status_ptr.load(.acquire) != .pending) break :pending;
@ -298,6 +329,7 @@ fn flushTaskQueue(tid: usize, q: *Queue, comp: *Compilation) void {
}
}
q.wip_zcu_idx += 1;
have_idle_tasks = true;
}
}

View file

@ -4257,7 +4257,14 @@ fn lowerZcuData(wasm: *Wasm, pt: Zcu.PerThread, ip_index: InternPool.Index) !Zcu
const func_table_fixups_start: u32 = @intCast(wasm.func_table_fixups.items.len);
wasm.string_bytes_lock.lock();
try codegen.generateSymbol(&wasm.base, pt, .unneeded, .fromInterned(ip_index), &wasm.string_bytes, .none);
{
var aw: std.Io.Writer.Allocating = .fromArrayList(wasm.base.comp.gpa, &wasm.string_bytes);
defer wasm.string_bytes = aw.toArrayList();
codegen.generateSymbol(&wasm.base, pt, .unneeded, .fromInterned(ip_index), &aw.writer, .none) catch |err| switch (err) {
error.WriteFailed => return error.OutOfMemory,
else => |e| return e,
};
}
const code_len: u32 = @intCast(wasm.string_bytes.items.len - code_start);
const relocs_len: u32 = @intCast(wasm.out_relocs.len - relocs_start);

View file

@ -904,7 +904,6 @@ fn buildOutputType(
var mingw_unicode_entry_point: bool = false;
var enable_link_snapshots: bool = false;
var debug_compiler_runtime_libs = false;
var opt_incremental: ?bool = null;
var install_name: ?[]const u8 = null;
var hash_style: link.File.Lld.Elf.HashStyle = .both;
var entitlements: ?[]const u8 = null;
@ -1374,9 +1373,9 @@ fn buildOutputType(
}
} else if (mem.eql(u8, arg, "-fincremental")) {
dev.check(.incremental);
opt_incremental = true;
create_module.opts.incremental = true;
} else if (mem.eql(u8, arg, "-fno-incremental")) {
opt_incremental = false;
create_module.opts.incremental = false;
} else if (mem.eql(u8, arg, "--entitlements")) {
entitlements = args_iter.nextOrFatal();
} else if (mem.eql(u8, arg, "-fcompiler-rt")) {
@ -1479,6 +1478,10 @@ fn buildOutputType(
create_module.opts.use_lld = true;
} else if (mem.eql(u8, arg, "-fno-lld")) {
create_module.opts.use_lld = false;
} else if (mem.eql(u8, arg, "-fnew-linker")) {
create_module.opts.use_new_linker = true;
} else if (mem.eql(u8, arg, "-fno-new-linker")) {
create_module.opts.use_new_linker = false;
} else if (mem.eql(u8, arg, "-fclang")) {
create_module.opts.use_clang = true;
} else if (mem.eql(u8, arg, "-fno-clang")) {
@ -3371,7 +3374,7 @@ fn buildOutputType(
else => false,
};
const incremental = opt_incremental orelse false;
const incremental = create_module.resolved_options.incremental;
if (debug_incremental and !incremental) {
fatal("--debug-incremental requires -fincremental", .{});
}
@ -3502,7 +3505,6 @@ fn buildOutputType(
.subsystem = subsystem,
.debug_compile_errors = debug_compile_errors,
.debug_incremental = debug_incremental,
.incremental = incremental,
.enable_link_snapshots = enable_link_snapshots,
.install_name = install_name,
.entitlements = entitlements,
@ -4016,6 +4018,8 @@ fn createModule(
error.LldUnavailable => fatal("zig was compiled without LLD libraries", .{}),
error.ClangUnavailable => fatal("zig was compiled without Clang libraries", .{}),
error.DllExportFnsRequiresWindows => fatal("only Windows OS targets support DLLs", .{}),
error.NewLinkerIncompatibleObjectFormat => fatal("using the new linker to link {s} files is unsupported", .{@tagName(target.ofmt)}),
error.NewLinkerIncompatibleWithLld => fatal("using the new linker is incompatible with using lld", .{}),
};
}

View file

@ -231,6 +231,13 @@ pub fn hasLldSupport(ofmt: std.Target.ObjectFormat) bool {
};
}
pub fn hasNewLinkerSupport(ofmt: std.Target.ObjectFormat) bool {
return switch (ofmt) {
.elf => true,
else => false,
};
}
/// The set of targets that our own self-hosted backends have robust support for.
/// Used to select between LLVM backend and self-hosted backend when compiling in
/// debug mode. A given target should only return true here if it is passing greater

View file

@ -1,4 +1,4 @@
//#target=x86_64-linux-selfhosted
#target=x86_64-linux-selfhosted
#target=x86_64-linux-cbe
#target=x86_64-windows-cbe

View file

@ -1,3 +1,4 @@
#target=x86_64-linux-selfhosted
#target=x86_64-linux-cbe
#target=x86_64-windows-cbe
#update=initial version

View file

@ -1,3 +1,4 @@
#target=x86_64-linux-selfhosted
#target=x86_64-linux-cbe
#target=x86_64-windows-cbe
#update=initial version

View file

@ -1,4 +1,4 @@
//#target=x86_64-linux-selfhosted
#target=x86_64-linux-selfhosted
#target=x86_64-linux-cbe
#target=x86_64-windows-cbe
#target=wasm32-wasi-selfhosted

View file

@ -1,3 +1,4 @@
#target=x86_64-linux-selfhosted
#target=x86_64-linux-cbe
#target=x86_64-windows-cbe
#target=wasm32-wasi-selfhosted