Merge pull request #14086 from ziglang/issue-14082

macho: ensure LINKEDIT layout follows Apple strict validation rules
This commit is contained in:
Jakub Konka 2022-12-27 21:39:05 +01:00 committed by GitHub
commit 601ab9a251
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
5 changed files with 197 additions and 93 deletions

View file

@ -126,7 +126,7 @@ const Action = struct {
/// its reduced, computed value compares using `op` with the expected value, either
/// a literal or another extracted variable.
fn computeCmp(act: Action, gpa: Allocator, global_vars: anytype) !bool {
var op_stack = std.ArrayList(enum { add, sub, mod }).init(gpa);
var op_stack = std.ArrayList(enum { add, sub, mod, mul }).init(gpa);
var values = std.ArrayList(u64).init(gpa);
var it = mem.tokenize(u8, act.phrase, " ");
@ -137,6 +137,8 @@ const Action = struct {
try op_stack.append(.sub);
} else if (mem.eql(u8, next, "%")) {
try op_stack.append(.mod);
} else if (mem.eql(u8, next, "*")) {
try op_stack.append(.mul);
} else {
const val = std.fmt.parseInt(u64, next, 0) catch blk: {
break :blk global_vars.get(next) orelse {
@ -167,6 +169,9 @@ const Action = struct {
.mod => {
reduced %= other;
},
.mul => {
reduced *= other;
},
}
op_i += 1;
}

View file

@ -3358,27 +3358,36 @@ fn writeDyldInfoData(self: *MachO) !void {
try self.collectExportData(&trie);
const link_seg = self.getLinkeditSegmentPtr();
const rebase_off = mem.alignForwardGeneric(u64, link_seg.fileoff, @alignOf(u64));
assert(rebase_off == link_seg.fileoff);
assert(mem.isAlignedGeneric(u64, link_seg.fileoff, @alignOf(u64)));
const rebase_off = link_seg.fileoff;
const rebase_size = try bind.rebaseInfoSize(rebase_pointers.items);
log.debug("writing rebase info from 0x{x} to 0x{x}", .{ rebase_off, rebase_off + rebase_size });
const rebase_size_aligned = mem.alignForwardGeneric(u64, rebase_size, @alignOf(u64));
log.debug("writing rebase info from 0x{x} to 0x{x}", .{ rebase_off, rebase_off + rebase_size_aligned });
const bind_off = mem.alignForwardGeneric(u64, rebase_off + rebase_size, @alignOf(u64));
const bind_off = rebase_off + rebase_size_aligned;
const bind_size = try bind.bindInfoSize(bind_pointers.items);
log.debug("writing bind info from 0x{x} to 0x{x}", .{ bind_off, bind_off + bind_size });
const bind_size_aligned = mem.alignForwardGeneric(u64, bind_size, @alignOf(u64));
log.debug("writing bind info from 0x{x} to 0x{x}", .{ bind_off, bind_off + bind_size_aligned });
const lazy_bind_off = mem.alignForwardGeneric(u64, bind_off + bind_size, @alignOf(u64));
const lazy_bind_off = bind_off + bind_size_aligned;
const lazy_bind_size = try bind.lazyBindInfoSize(lazy_bind_pointers.items);
log.debug("writing lazy bind info from 0x{x} to 0x{x}", .{ lazy_bind_off, lazy_bind_off + lazy_bind_size });
const lazy_bind_size_aligned = mem.alignForwardGeneric(u64, lazy_bind_size, @alignOf(u64));
log.debug("writing lazy bind info from 0x{x} to 0x{x}", .{
lazy_bind_off,
lazy_bind_off + lazy_bind_size_aligned,
});
const export_off = mem.alignForwardGeneric(u64, lazy_bind_off + lazy_bind_size, @alignOf(u64));
const export_off = lazy_bind_off + lazy_bind_size_aligned;
const export_size = trie.size;
log.debug("writing export trie from 0x{x} to 0x{x}", .{ export_off, export_off + export_size });
const export_size_aligned = mem.alignForwardGeneric(u64, export_size, @alignOf(u64));
log.debug("writing export trie from 0x{x} to 0x{x}", .{ export_off, export_off + export_size_aligned });
const needed_size = export_off + export_size - rebase_off;
const needed_size = math.cast(usize, export_off + export_size_aligned - rebase_off) orelse
return error.Overflow;
link_seg.filesize = needed_size;
assert(mem.isAlignedGeneric(u64, link_seg.fileoff + link_seg.filesize, @alignOf(u64)));
var buffer = try gpa.alloc(u8, math.cast(usize, needed_size) orelse return error.Overflow);
var buffer = try gpa.alloc(u8, needed_size);
defer gpa.free(buffer);
mem.set(u8, buffer, 0);
@ -3407,13 +3416,13 @@ fn writeDyldInfoData(self: *MachO) !void {
try self.populateLazyBindOffsetsInStubHelper(buffer[start..end]);
self.dyld_info_cmd.rebase_off = @intCast(u32, rebase_off);
self.dyld_info_cmd.rebase_size = @intCast(u32, rebase_size);
self.dyld_info_cmd.rebase_size = @intCast(u32, rebase_size_aligned);
self.dyld_info_cmd.bind_off = @intCast(u32, bind_off);
self.dyld_info_cmd.bind_size = @intCast(u32, bind_size);
self.dyld_info_cmd.bind_size = @intCast(u32, bind_size_aligned);
self.dyld_info_cmd.lazy_bind_off = @intCast(u32, lazy_bind_off);
self.dyld_info_cmd.lazy_bind_size = @intCast(u32, lazy_bind_size);
self.dyld_info_cmd.lazy_bind_size = @intCast(u32, lazy_bind_size_aligned);
self.dyld_info_cmd.export_off = @intCast(u32, export_off);
self.dyld_info_cmd.export_size = @intCast(u32, export_size);
self.dyld_info_cmd.export_size = @intCast(u32, export_size_aligned);
}
fn populateLazyBindOffsetsInStubHelper(self: *MachO, buffer: []const u8) !void {
@ -3569,13 +3578,11 @@ fn writeSymtab(self: *MachO) !SymtabCtx {
const nsyms = nlocals + nexports + nimports;
const seg = self.getLinkeditSegmentPtr();
const offset = mem.alignForwardGeneric(
u64,
seg.fileoff + seg.filesize,
@alignOf(macho.nlist_64),
);
const offset = seg.fileoff + seg.filesize;
assert(mem.isAlignedGeneric(u64, offset, @alignOf(u64)));
const needed_size = nsyms * @sizeOf(macho.nlist_64);
seg.filesize = offset + needed_size - seg.fileoff;
assert(mem.isAlignedGeneric(u64, seg.fileoff + seg.filesize, @alignOf(u64)));
var buffer = std.ArrayList(u8).init(gpa);
defer buffer.deinit();
@ -3599,17 +3606,25 @@ fn writeSymtab(self: *MachO) !SymtabCtx {
}
fn writeStrtab(self: *MachO) !void {
const gpa = self.base.allocator;
const seg = self.getLinkeditSegmentPtr();
const offset = mem.alignForwardGeneric(u64, seg.fileoff + seg.filesize, @alignOf(u64));
const offset = seg.fileoff + seg.filesize;
assert(mem.isAlignedGeneric(u64, offset, @alignOf(u64)));
const needed_size = self.strtab.buffer.items.len;
seg.filesize = offset + needed_size - seg.fileoff;
const needed_size_aligned = mem.alignForwardGeneric(u64, needed_size, @alignOf(u64));
seg.filesize = offset + needed_size_aligned - seg.fileoff;
log.debug("writing string table from 0x{x} to 0x{x}", .{ offset, offset + needed_size });
log.debug("writing string table from 0x{x} to 0x{x}", .{ offset, offset + needed_size_aligned });
try self.base.file.?.pwriteAll(self.strtab.buffer.items, offset);
const buffer = try gpa.alloc(u8, math.cast(usize, needed_size_aligned) orelse return error.Overflow);
defer gpa.free(buffer);
mem.set(u8, buffer, 0);
mem.copy(u8, buffer, self.strtab.buffer.items);
try self.base.file.?.pwriteAll(buffer, offset);
self.symtab_cmd.stroff = @intCast(u32, offset);
self.symtab_cmd.strsize = @intCast(u32, needed_size);
self.symtab_cmd.strsize = @intCast(u32, needed_size_aligned);
}
const SymtabCtx = struct {
@ -3628,15 +3643,17 @@ fn writeDysymtab(self: *MachO, ctx: SymtabCtx) !void {
const iundefsym = iextdefsym + ctx.nextdefsym;
const seg = self.getLinkeditSegmentPtr();
const offset = mem.alignForwardGeneric(u64, seg.fileoff + seg.filesize, @alignOf(u64));
const offset = seg.fileoff + seg.filesize;
assert(mem.isAlignedGeneric(u64, offset, @alignOf(u64)));
const needed_size = nindirectsyms * @sizeOf(u32);
seg.filesize = offset + needed_size - seg.fileoff;
const needed_size_aligned = mem.alignForwardGeneric(u64, needed_size, @alignOf(u64));
seg.filesize = offset + needed_size_aligned - seg.fileoff;
log.debug("writing indirect symbol table from 0x{x} to 0x{x}", .{ offset, offset + needed_size });
log.debug("writing indirect symbol table from 0x{x} to 0x{x}", .{ offset, offset + needed_size_aligned });
var buf = std.ArrayList(u8).init(gpa);
defer buf.deinit();
try buf.ensureTotalCapacity(needed_size);
try buf.ensureTotalCapacity(math.cast(usize, needed_size_aligned) orelse return error.Overflow);
const writer = buf.writer();
if (self.stubs_section_index) |sect_id| {
@ -3675,7 +3692,12 @@ fn writeDysymtab(self: *MachO, ctx: SymtabCtx) !void {
}
}
assert(buf.items.len == needed_size);
const padding = math.cast(usize, needed_size_aligned - needed_size) orelse return error.Overflow;
if (padding > 0) {
buf.appendNTimesAssumeCapacity(0, padding);
}
assert(buf.items.len == needed_size_aligned);
try self.base.file.?.pwriteAll(buf.items, offset);
self.dysymtab_cmd.nlocalsym = ctx.nlocalsym;

View file

@ -2178,25 +2178,34 @@ pub const Zld = struct {
try self.collectExportData(&trie);
const link_seg = self.getLinkeditSegmentPtr();
const rebase_off = mem.alignForwardGeneric(u64, link_seg.fileoff, @alignOf(u64));
assert(rebase_off == link_seg.fileoff);
assert(mem.isAlignedGeneric(u64, link_seg.fileoff, @alignOf(u64)));
const rebase_off = link_seg.fileoff;
const rebase_size = try bind.rebaseInfoSize(rebase_pointers.items);
log.debug("writing rebase info from 0x{x} to 0x{x}", .{ rebase_off, rebase_off + rebase_size });
const rebase_size_aligned = mem.alignForwardGeneric(u64, rebase_size, @alignOf(u64));
log.debug("writing rebase info from 0x{x} to 0x{x}", .{ rebase_off, rebase_off + rebase_size_aligned });
const bind_off = mem.alignForwardGeneric(u64, rebase_off + rebase_size, @alignOf(u64));
const bind_off = rebase_off + rebase_size_aligned;
const bind_size = try bind.bindInfoSize(bind_pointers.items);
log.debug("writing bind info from 0x{x} to 0x{x}", .{ bind_off, bind_off + bind_size });
const bind_size_aligned = mem.alignForwardGeneric(u64, bind_size, @alignOf(u64));
log.debug("writing bind info from 0x{x} to 0x{x}", .{ bind_off, bind_off + bind_size_aligned });
const lazy_bind_off = mem.alignForwardGeneric(u64, bind_off + bind_size, @alignOf(u64));
const lazy_bind_off = bind_off + bind_size_aligned;
const lazy_bind_size = try bind.lazyBindInfoSize(lazy_bind_pointers.items);
log.debug("writing lazy bind info from 0x{x} to 0x{x}", .{ lazy_bind_off, lazy_bind_off + lazy_bind_size });
const lazy_bind_size_aligned = mem.alignForwardGeneric(u64, lazy_bind_size, @alignOf(u64));
log.debug("writing lazy bind info from 0x{x} to 0x{x}", .{
lazy_bind_off,
lazy_bind_off + lazy_bind_size_aligned,
});
const export_off = mem.alignForwardGeneric(u64, lazy_bind_off + lazy_bind_size, @alignOf(u64));
const export_off = lazy_bind_off + lazy_bind_size_aligned;
const export_size = trie.size;
log.debug("writing export trie from 0x{x} to 0x{x}", .{ export_off, export_off + export_size });
const export_size_aligned = mem.alignForwardGeneric(u64, export_size, @alignOf(u64));
log.debug("writing export trie from 0x{x} to 0x{x}", .{ export_off, export_off + export_size_aligned });
const needed_size = math.cast(usize, export_off + export_size - rebase_off) orelse return error.Overflow;
const needed_size = math.cast(usize, export_off + export_size_aligned - rebase_off) orelse
return error.Overflow;
link_seg.filesize = needed_size;
assert(mem.isAlignedGeneric(u64, link_seg.fileoff + link_seg.filesize, @alignOf(u64)));
var buffer = try gpa.alloc(u8, needed_size);
defer gpa.free(buffer);
@ -2228,13 +2237,13 @@ pub const Zld = struct {
try self.populateLazyBindOffsetsInStubHelper(buffer[offset..][0..size]);
self.dyld_info_cmd.rebase_off = @intCast(u32, rebase_off);
self.dyld_info_cmd.rebase_size = @intCast(u32, rebase_size);
self.dyld_info_cmd.rebase_size = @intCast(u32, rebase_size_aligned);
self.dyld_info_cmd.bind_off = @intCast(u32, bind_off);
self.dyld_info_cmd.bind_size = @intCast(u32, bind_size);
self.dyld_info_cmd.bind_size = @intCast(u32, bind_size_aligned);
self.dyld_info_cmd.lazy_bind_off = @intCast(u32, lazy_bind_off);
self.dyld_info_cmd.lazy_bind_size = @intCast(u32, lazy_bind_size);
self.dyld_info_cmd.lazy_bind_size = @intCast(u32, lazy_bind_size_aligned);
self.dyld_info_cmd.export_off = @intCast(u32, export_off);
self.dyld_info_cmd.export_size = @intCast(u32, export_size);
self.dyld_info_cmd.export_size = @intCast(u32, export_size_aligned);
}
fn populateLazyBindOffsetsInStubHelper(self: *Zld, buffer: []const u8) !void {
@ -2403,16 +2412,23 @@ pub const Zld = struct {
}
const link_seg = self.getLinkeditSegmentPtr();
const offset = mem.alignForwardGeneric(u64, link_seg.fileoff + link_seg.filesize, @alignOf(u64));
const offset = link_seg.fileoff + link_seg.filesize;
assert(mem.isAlignedGeneric(u64, offset, @alignOf(u64)));
const needed_size = buffer.items.len;
link_seg.filesize = offset + needed_size - link_seg.fileoff;
const needed_size_aligned = mem.alignForwardGeneric(u64, needed_size, @alignOf(u64));
const padding = math.cast(usize, needed_size_aligned - needed_size) orelse return error.Overflow;
if (padding > 0) {
try buffer.ensureUnusedCapacity(padding);
buffer.appendNTimesAssumeCapacity(0, padding);
}
link_seg.filesize = offset + needed_size_aligned - link_seg.fileoff;
log.debug("writing function starts info from 0x{x} to 0x{x}", .{ offset, offset + needed_size });
log.debug("writing function starts info from 0x{x} to 0x{x}", .{ offset, offset + needed_size_aligned });
try self.file.pwriteAll(buffer.items, offset);
self.function_starts_cmd.dataoff = @intCast(u32, offset);
self.function_starts_cmd.datasize = @intCast(u32, needed_size);
self.function_starts_cmd.datasize = @intCast(u32, needed_size_aligned);
}
fn filterDataInCode(
@ -2477,16 +2493,23 @@ pub const Zld = struct {
}
const seg = self.getLinkeditSegmentPtr();
const offset = mem.alignForwardGeneric(u64, seg.fileoff + seg.filesize, @alignOf(u64));
const offset = seg.fileoff + seg.filesize;
assert(mem.isAlignedGeneric(u64, offset, @alignOf(u64)));
const needed_size = out_dice.items.len * @sizeOf(macho.data_in_code_entry);
seg.filesize = offset + needed_size - seg.fileoff;
const needed_size_aligned = mem.alignForwardGeneric(u64, needed_size, @alignOf(u64));
seg.filesize = offset + needed_size_aligned - seg.fileoff;
log.debug("writing data-in-code from 0x{x} to 0x{x}", .{ offset, offset + needed_size });
const buffer = try self.gpa.alloc(u8, math.cast(usize, needed_size_aligned) orelse return error.Overflow);
defer self.gpa.free(buffer);
mem.set(u8, buffer, 0);
mem.copy(u8, buffer, mem.sliceAsBytes(out_dice.items));
try self.file.pwriteAll(mem.sliceAsBytes(out_dice.items), offset);
log.debug("writing data-in-code from 0x{x} to 0x{x}", .{ offset, offset + needed_size_aligned });
try self.file.pwriteAll(buffer, offset);
self.data_in_code_cmd.dataoff = @intCast(u32, offset);
self.data_in_code_cmd.datasize = @intCast(u32, needed_size);
self.data_in_code_cmd.datasize = @intCast(u32, needed_size_aligned);
}
fn writeSymtabs(self: *Zld) !void {
@ -2561,13 +2584,11 @@ pub const Zld = struct {
const nsyms = nlocals + nexports + nimports;
const seg = self.getLinkeditSegmentPtr();
const offset = mem.alignForwardGeneric(
u64,
seg.fileoff + seg.filesize,
@alignOf(macho.nlist_64),
);
const offset = seg.fileoff + seg.filesize;
assert(mem.isAlignedGeneric(u64, offset, @alignOf(u64)));
const needed_size = nsyms * @sizeOf(macho.nlist_64);
seg.filesize = offset + needed_size - seg.fileoff;
assert(mem.isAlignedGeneric(u64, seg.fileoff + seg.filesize, @alignOf(u64)));
var buffer = std.ArrayList(u8).init(gpa);
defer buffer.deinit();
@ -2592,16 +2613,23 @@ pub const Zld = struct {
fn writeStrtab(self: *Zld) !void {
const seg = self.getLinkeditSegmentPtr();
const offset = mem.alignForwardGeneric(u64, seg.fileoff + seg.filesize, @alignOf(u64));
const offset = seg.fileoff + seg.filesize;
assert(mem.isAlignedGeneric(u64, offset, @alignOf(u64)));
const needed_size = self.strtab.buffer.items.len;
seg.filesize = offset + needed_size - seg.fileoff;
const needed_size_aligned = mem.alignForwardGeneric(u64, needed_size, @alignOf(u64));
seg.filesize = offset + needed_size_aligned - seg.fileoff;
log.debug("writing string table from 0x{x} to 0x{x}", .{ offset, offset + needed_size });
log.debug("writing string table from 0x{x} to 0x{x}", .{ offset, offset + needed_size_aligned });
try self.file.pwriteAll(self.strtab.buffer.items, offset);
const buffer = try self.gpa.alloc(u8, math.cast(usize, needed_size_aligned) orelse return error.Overflow);
defer self.gpa.free(buffer);
mem.set(u8, buffer, 0);
mem.copy(u8, buffer, self.strtab.buffer.items);
try self.file.pwriteAll(buffer, offset);
self.symtab_cmd.stroff = @intCast(u32, offset);
self.symtab_cmd.strsize = @intCast(u32, needed_size);
self.symtab_cmd.strsize = @intCast(u32, needed_size_aligned);
}
const SymtabCtx = struct {
@ -2620,15 +2648,17 @@ pub const Zld = struct {
const iundefsym = iextdefsym + ctx.nextdefsym;
const seg = self.getLinkeditSegmentPtr();
const offset = mem.alignForwardGeneric(u64, seg.fileoff + seg.filesize, @alignOf(u64));
const offset = seg.fileoff + seg.filesize;
assert(mem.isAlignedGeneric(u64, offset, @alignOf(u64)));
const needed_size = nindirectsyms * @sizeOf(u32);
seg.filesize = offset + needed_size - seg.fileoff;
const needed_size_aligned = mem.alignForwardGeneric(u64, needed_size, @alignOf(u64));
seg.filesize = offset + needed_size_aligned - seg.fileoff;
log.debug("writing indirect symbol table from 0x{x} to 0x{x}", .{ offset, offset + needed_size });
log.debug("writing indirect symbol table from 0x{x} to 0x{x}", .{ offset, offset + needed_size_aligned });
var buf = std.ArrayList(u8).init(gpa);
defer buf.deinit();
try buf.ensureTotalCapacity(needed_size);
try buf.ensureTotalCapacityPrecise(math.cast(usize, needed_size_aligned) orelse return error.Overflow);
const writer = buf.writer();
if (self.getSectionByName("__TEXT", "__stubs")) |sect_id| {
@ -2664,7 +2694,12 @@ pub const Zld = struct {
}
}
assert(buf.items.len == needed_size);
const padding = math.cast(usize, needed_size_aligned - needed_size) orelse return error.Overflow;
if (padding > 0) {
buf.appendNTimesAssumeCapacity(0, padding);
}
assert(buf.items.len == needed_size_aligned);
try self.file.pwriteAll(buf.items, offset);
self.dysymtab_cmd.nlocalsym = ctx.nlocalsym;
@ -2692,7 +2727,8 @@ pub const Zld = struct {
conformUuid(&self.uuid_cmd.uuid);
},
else => {
const max_file_end = self.symtab_cmd.stroff + self.symtab_cmd.strsize;
// We set the max file size to the actual strtab buffer length to exclude any strtab padding.
const max_file_end = @intCast(u32, self.symtab_cmd.stroff + self.strtab.buffer.items.len);
const FileSubsection = struct {
start: u32,

View file

@ -32,13 +32,23 @@ pub fn build(b: *Builder) void {
check_exe.checkNext("exportoff {exportoff}");
check_exe.checkNext("exportsize {exportsize}");
check_exe.checkStart("cmd FUNCTION_STARTS");
check_exe.checkNext("dataoff {fstartoff}");
check_exe.checkNext("datasize {fstartsize}");
check_exe.checkStart("cmd DATA_IN_CODE");
check_exe.checkNext("dataoff {diceoff}");
check_exe.checkNext("datasize {dicesize}");
check_exe.checkStart("cmd SYMTAB");
check_exe.checkNext("symoff {symoff}");
check_exe.checkNext("nsyms {symnsyms}");
check_exe.checkNext("stroff {stroff}");
check_exe.checkNext("strsize {strsize}");
check_exe.checkStart("cmd DYSYMTAB");
check_exe.checkNext("indirectsymoff {dysymoff}");
check_exe.checkNext("nindirectsyms {dysymnsyms}");
switch (builtin.cpu.arch) {
.aarch64 => {
@ -50,42 +60,51 @@ pub fn build(b: *Builder) void {
else => unreachable,
}
// Next check: DYLD_INFO_ONLY subsections are in order: rebase < bind < lazy < export
check_exe.checkComputeCompare("rebaseoff ", .{ .op = .lt, .value = .{ .variable = "bindoff" } });
check_exe.checkComputeCompare("bindoff", .{ .op = .lt, .value = .{ .variable = "lazybindoff" } });
check_exe.checkComputeCompare("lazybindoff", .{ .op = .lt, .value = .{ .variable = "exportoff" } });
// DYLD_INFO_ONLY subsections are in order: rebase < bind < lazy < export,
// and there are no gaps between them
check_exe.checkComputeCompare("rebaseoff rebasesize +", .{ .op = .eq, .value = .{ .variable = "bindoff" } });
check_exe.checkComputeCompare("bindoff bindsize +", .{ .op = .eq, .value = .{ .variable = "lazybindoff" } });
check_exe.checkComputeCompare("lazybindoff lazybindsize +", .{ .op = .eq, .value = .{ .variable = "exportoff" } });
// Next check: DYLD_INFO_ONLY subsections do not overlap
check_exe.checkComputeCompare("rebaseoff rebasesize +", .{ .op = .lte, .value = .{ .variable = "bindoff" } });
check_exe.checkComputeCompare("bindoff bindsize +", .{ .op = .lte, .value = .{ .variable = "lazybindoff" } });
check_exe.checkComputeCompare("lazybindoff lazybindsize +", .{ .op = .lte, .value = .{ .variable = "exportoff" } });
// FUNCTION_STARTS directly follows DYLD_INFO_ONLY (no gap)
check_exe.checkComputeCompare("exportoff exportsize +", .{ .op = .eq, .value = .{ .variable = "fstartoff" } });
// Next check: we maintain order: symtab < dysymtab < strtab
check_exe.checkComputeCompare("symoff", .{ .op = .lt, .value = .{ .variable = "dysymoff" } });
check_exe.checkComputeCompare("dysymoff", .{ .op = .lt, .value = .{ .variable = "stroff" } });
// DATA_IN_CODE directly follows FUNCTION_STARTS (no gap)
check_exe.checkComputeCompare("fstartoff fstartsize +", .{ .op = .eq, .value = .{ .variable = "diceoff" } });
// Next check: all LINKEDIT sections apart from CODE_SIGNATURE are 8-bytes aligned
// SYMTAB directly follows DATA_IN_CODE (no gap)
check_exe.checkComputeCompare("diceoff dicesize +", .{ .op = .eq, .value = .{ .variable = "symoff" } });
// DYSYMTAB directly follows SYMTAB (no gap)
check_exe.checkComputeCompare("symnsyms 16 symoff * +", .{ .op = .eq, .value = .{ .variable = "dysymoff" } });
// STRTAB follows DYSYMTAB with possible gap
check_exe.checkComputeCompare("dysymnsyms 4 dysymoff * +", .{ .op = .lte, .value = .{ .variable = "stroff" } });
// all LINKEDIT sections apart from CODE_SIGNATURE are 8-bytes aligned
check_exe.checkComputeCompare("rebaseoff 8 %", .{ .op = .eq, .value = .{ .literal = 0 } });
check_exe.checkComputeCompare("bindoff 8 %", .{ .op = .eq, .value = .{ .literal = 0 } });
check_exe.checkComputeCompare("lazybindoff 8 %", .{ .op = .eq, .value = .{ .literal = 0 } });
check_exe.checkComputeCompare("exportoff 8 %", .{ .op = .eq, .value = .{ .literal = 0 } });
check_exe.checkComputeCompare("fstartoff 8 %", .{ .op = .eq, .value = .{ .literal = 0 } });
check_exe.checkComputeCompare("diceoff 8 %", .{ .op = .eq, .value = .{ .literal = 0 } });
check_exe.checkComputeCompare("symoff 8 %", .{ .op = .eq, .value = .{ .literal = 0 } });
check_exe.checkComputeCompare("stroff 8 %", .{ .op = .eq, .value = .{ .literal = 0 } });
check_exe.checkComputeCompare("dysymoff 8 %", .{ .op = .eq, .value = .{ .literal = 0 } });
switch (builtin.cpu.arch) {
.aarch64 => {
// Next check: LINKEDIT segment does not extend beyond, or does not include, CODE_SIGNATURE data
// LINKEDIT segment does not extend beyond, or does not include, CODE_SIGNATURE data
check_exe.checkComputeCompare("fileoff filesz codesigoff codesigsize + - -", .{
.op = .eq,
.value = .{ .literal = 0 },
});
// Next check: CODE_SIGNATURE data offset is 16-bytes aligned
// CODE_SIGNATURE data offset is 16-bytes aligned
check_exe.checkComputeCompare("codesigoff 16 %", .{ .op = .eq, .value = .{ .literal = 0 } });
},
.x86_64 => {
// Next check: LINKEDIT segment does not extend beyond, or does not include, strtab data
// LINKEDIT segment does not extend beyond, or does not include, strtab data
check_exe.checkComputeCompare("fileoff filesz stroff strsize + - -", .{
.op = .eq,
.value = .{ .literal = 0 },

View file

@ -5,23 +5,45 @@ const LibExeObjectStep = std.build.LibExeObjStep;
pub fn build(b: *Builder) void {
const test_step = b.step("test", "Test");
test_step.dependOn(b.getInstallStep());
testUuid(b, test_step, .ReleaseSafe, "eb1203019e453d808d4f1e71053af9af");
testUuid(b, test_step, .ReleaseFast, "eb1203019e453d808d4f1e71053af9af");
testUuid(b, test_step, .ReleaseSmall, "eb1203019e453d808d4f1e71053af9af");
// We force cross-compilation to ensure we always pick a generic CPU with constant set of CPU features.
const aarch64_macos = std.zig.CrossTarget{
.cpu_arch = .aarch64,
.os_tag = .macos,
};
testUuid(b, test_step, .ReleaseSafe, aarch64_macos, "46b333df88f5314686fc0cba3b939ca8");
testUuid(b, test_step, .ReleaseFast, aarch64_macos, "46b333df88f5314686fc0cba3b939ca8");
testUuid(b, test_step, .ReleaseSmall, aarch64_macos, "46b333df88f5314686fc0cba3b939ca8");
const x86_64_macos = std.zig.CrossTarget{
.cpu_arch = .x86_64,
.os_tag = .macos,
};
testUuid(b, test_step, .ReleaseSafe, x86_64_macos, "342ac765194131e1bad5692b9e0e54a4");
testUuid(b, test_step, .ReleaseFast, x86_64_macos, "342ac765194131e1bad5692b9e0e54a4");
testUuid(b, test_step, .ReleaseSmall, x86_64_macos, "f119310e24773ecf8ec42e09d0379dad");
}
fn testUuid(b: *Builder, test_step: *std.build.Step, mode: std.builtin.Mode, comptime exp: []const u8) void {
fn testUuid(
b: *Builder,
test_step: *std.build.Step,
mode: std.builtin.Mode,
target: std.zig.CrossTarget,
comptime exp: []const u8,
) void {
// The calculated UUID value is independent of debug info and so it should
// stay the same across builds.
{
const dylib = simpleDylib(b, mode);
const dylib = simpleDylib(b, mode, target);
const check_dylib = dylib.checkObject(.macho);
check_dylib.checkStart("cmd UUID");
check_dylib.checkNext("uuid " ++ exp);
test_step.dependOn(&check_dylib.step);
}
{
const dylib = simpleDylib(b, mode);
const dylib = simpleDylib(b, mode, target);
dylib.strip = true;
const check_dylib = dylib.checkObject(.macho);
check_dylib.checkStart("cmd UUID");
@ -30,10 +52,10 @@ fn testUuid(b: *Builder, test_step: *std.build.Step, mode: std.builtin.Mode, com
}
}
fn simpleDylib(b: *Builder, mode: std.builtin.Mode) *LibExeObjectStep {
fn simpleDylib(b: *Builder, mode: std.builtin.Mode, target: std.zig.CrossTarget) *LibExeObjectStep {
const dylib = b.addSharedLibrary("test", null, b.version(1, 0, 0));
dylib.setTarget(target);
dylib.setBuildMode(mode);
dylib.setTarget(.{ .cpu_arch = .aarch64, .os_tag = .macos });
dylib.addCSourceFile("test.c", &.{});
dylib.linkLibC();
return dylib;