mirror of
https://codeberg.org/ziglang/zig.git
synced 2025-12-06 13:54:21 +00:00
fuzzer: account for runtime address slide
This is relevant to PIEs, which are notably enabled by default on macOS. The build system needs to only see virtual addresses, that is, those which do not have the slide applied; but the fuzzer itself naturally sees relocated addresses (i.e. with the slide applied). We just need to subtract the slide when we communicate addresses to the build system.
This commit is contained in:
parent
0a330d4f94
commit
010dcd6a9b
7 changed files with 56 additions and 8 deletions
|
|
@ -184,7 +184,7 @@ fn mainServer() !void {
|
||||||
const test_fn = builtin.test_functions[index];
|
const test_fn = builtin.test_functions[index];
|
||||||
const entry_addr = @intFromPtr(test_fn.func);
|
const entry_addr = @intFromPtr(test_fn.func);
|
||||||
|
|
||||||
try server.serveU64Message(.fuzz_start_addr, entry_addr);
|
try server.serveU64Message(.fuzz_start_addr, fuzz_abi.fuzzer_unslide_address(entry_addr));
|
||||||
defer if (testing.allocator_instance.deinit() == .leak) std.process.exit(1);
|
defer if (testing.allocator_instance.deinit() == .leak) std.process.exit(1);
|
||||||
is_fuzz_test = false;
|
is_fuzz_test = false;
|
||||||
fuzz_test_index = index;
|
fuzz_test_index = index;
|
||||||
|
|
|
||||||
|
|
@ -116,13 +116,18 @@ const Executable = struct {
|
||||||
"failed to init memory map for coverage file '{s}': {t}",
|
"failed to init memory map for coverage file '{s}': {t}",
|
||||||
.{ &coverage_file_name, e },
|
.{ &coverage_file_name, e },
|
||||||
);
|
);
|
||||||
map.appendSliceAssumeCapacity(mem.asBytes(&abi.SeenPcsHeader{
|
map.appendSliceAssumeCapacity(@ptrCast(&abi.SeenPcsHeader{
|
||||||
.n_runs = 0,
|
.n_runs = 0,
|
||||||
.unique_runs = 0,
|
.unique_runs = 0,
|
||||||
.pcs_len = pcs.len,
|
.pcs_len = pcs.len,
|
||||||
}));
|
}));
|
||||||
map.appendNTimesAssumeCapacity(0, pc_bitset_usizes * @sizeOf(usize));
|
map.appendNTimesAssumeCapacity(0, pc_bitset_usizes * @sizeOf(usize));
|
||||||
map.appendSliceAssumeCapacity(mem.sliceAsBytes(pcs));
|
// Relocations have been applied to `pcs` so it contains runtime addresses (with slide
|
||||||
|
// applied). We need to translate these to the virtual addresses as on disk.
|
||||||
|
for (pcs) |pc| {
|
||||||
|
const pc_vaddr = fuzzer_unslide_address(pc);
|
||||||
|
map.appendSliceAssumeCapacity(@ptrCast(&pc_vaddr));
|
||||||
|
}
|
||||||
return map;
|
return map;
|
||||||
} else {
|
} else {
|
||||||
const size = coverage_file.getEndPos() catch |e| panic(
|
const size = coverage_file.getEndPos() catch |e| panic(
|
||||||
|
|
@ -215,7 +220,16 @@ const Executable = struct {
|
||||||
.{ self.pc_counters.len, pcs.len },
|
.{ self.pc_counters.len, pcs.len },
|
||||||
);
|
);
|
||||||
|
|
||||||
self.pc_digest = std.hash.Wyhash.hash(0, mem.sliceAsBytes(pcs));
|
self.pc_digest = digest: {
|
||||||
|
// Relocations have been applied to `pcs` so it contains runtime addresses (with slide
|
||||||
|
// applied). We need to translate these to the virtual addresses as on disk.
|
||||||
|
var h: std.hash.Wyhash = .init(0);
|
||||||
|
for (pcs) |pc| {
|
||||||
|
const pc_vaddr = fuzzer_unslide_address(pc);
|
||||||
|
h.update(@ptrCast(&pc_vaddr));
|
||||||
|
}
|
||||||
|
break :digest h.final();
|
||||||
|
};
|
||||||
self.shared_seen_pcs = getCoverageFile(cache_dir, pcs, self.pc_digest);
|
self.shared_seen_pcs = getCoverageFile(cache_dir, pcs, self.pc_digest);
|
||||||
|
|
||||||
return self;
|
return self;
|
||||||
|
|
@ -622,6 +636,14 @@ export fn fuzzer_main(limit_kind: abi.LimitKind, amount: u64) void {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export fn fuzzer_unslide_address(addr: usize) usize {
|
||||||
|
const si = std.debug.getSelfDebugInfo() catch @compileError("unsupported");
|
||||||
|
const slide = si.getModuleSlide(std.debug.getDebugInfoAllocator(), addr) catch |err| {
|
||||||
|
std.debug.panic("failed to find virtual address slide: {t}", .{err});
|
||||||
|
};
|
||||||
|
return addr - slide;
|
||||||
|
}
|
||||||
|
|
||||||
/// Helps determine run uniqueness in the face of recursion.
|
/// Helps determine run uniqueness in the face of recursion.
|
||||||
/// Currently not used by the fuzzer.
|
/// Currently not used by the fuzzer.
|
||||||
export threadlocal var __sancov_lowest_stack: usize = 0;
|
export threadlocal var __sancov_lowest_stack: usize = 0;
|
||||||
|
|
@ -1185,13 +1207,13 @@ const Mutation = enum {
|
||||||
const j = rng.uintAtMostBiased(usize, corpus[splice_i].len - len);
|
const j = rng.uintAtMostBiased(usize, corpus[splice_i].len - len);
|
||||||
out.appendSliceAssumeCapacity(corpus[splice_i][j..][0..len]);
|
out.appendSliceAssumeCapacity(corpus[splice_i][j..][0..len]);
|
||||||
},
|
},
|
||||||
.@"const" => out.appendSliceAssumeCapacity(mem.asBytes(
|
.@"const" => out.appendSliceAssumeCapacity(@ptrCast(
|
||||||
&data_ctx[rng.uintLessThanBiased(usize, data_ctx.len)],
|
&data_ctx[rng.uintLessThanBiased(usize, data_ctx.len)],
|
||||||
)),
|
)),
|
||||||
.small => out.appendSliceAssumeCapacity(mem.asBytes(
|
.small => out.appendSliceAssumeCapacity(@ptrCast(
|
||||||
&mem.nativeTo(data_ctx[0], rng.int(SmallValue), data_ctx[1]),
|
&mem.nativeTo(data_ctx[0], rng.int(SmallValue), data_ctx[1]),
|
||||||
)),
|
)),
|
||||||
.few => out.appendSliceAssumeCapacity(mem.asBytes(
|
.few => out.appendSliceAssumeCapacity(@ptrCast(
|
||||||
&fewValue(rng, data_ctx[0], data_ctx[1]),
|
&fewValue(rng, data_ctx[0], data_ctx[1]),
|
||||||
)),
|
)),
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -145,6 +145,7 @@ pub const fuzz = struct {
|
||||||
pub extern fn fuzzer_init_test(test_one: TestOne, unit_test_name: Slice) void;
|
pub extern fn fuzzer_init_test(test_one: TestOne, unit_test_name: Slice) void;
|
||||||
pub extern fn fuzzer_new_input(bytes: Slice) void;
|
pub extern fn fuzzer_new_input(bytes: Slice) void;
|
||||||
pub extern fn fuzzer_main(limit_kind: LimitKind, amount: u64) void;
|
pub extern fn fuzzer_main(limit_kind: LimitKind, amount: u64) void;
|
||||||
|
pub extern fn fuzzer_unslide_address(addr: usize) usize;
|
||||||
|
|
||||||
pub const Slice = extern struct {
|
pub const Slice = extern struct {
|
||||||
ptr: [*]const u8,
|
ptr: [*]const u8,
|
||||||
|
|
|
||||||
|
|
@ -1367,7 +1367,7 @@ test printLineFromFile {
|
||||||
|
|
||||||
/// The returned allocator should be thread-safe if the compilation is multi-threaded, because
|
/// The returned allocator should be thread-safe if the compilation is multi-threaded, because
|
||||||
/// multiple threads could capture and/or print stack traces simultaneously.
|
/// multiple threads could capture and/or print stack traces simultaneously.
|
||||||
fn getDebugInfoAllocator() Allocator {
|
pub fn getDebugInfoAllocator() Allocator {
|
||||||
// Allow overriding the debug info allocator by exposing `root.debug.getDebugInfoAllocator`.
|
// Allow overriding the debug info allocator by exposing `root.debug.getDebugInfoAllocator`.
|
||||||
if (@hasDecl(root, "debug") and @hasDecl(root.debug, "getDebugInfoAllocator")) {
|
if (@hasDecl(root, "debug") and @hasDecl(root.debug, "getDebugInfoAllocator")) {
|
||||||
return root.debug.getDebugInfoAllocator();
|
return root.debug.getDebugInfoAllocator();
|
||||||
|
|
|
||||||
|
|
@ -80,6 +80,11 @@ pub fn getModuleName(si: *SelfInfo, gpa: Allocator, address: usize) Error![]cons
|
||||||
if (module.name.len == 0) return error.MissingDebugInfo;
|
if (module.name.len == 0) return error.MissingDebugInfo;
|
||||||
return module.name;
|
return module.name;
|
||||||
}
|
}
|
||||||
|
pub fn getModuleSlide(si: *SelfInfo, gpa: Allocator, address: usize) Error!usize {
|
||||||
|
const module = try si.findModule(gpa, address, .shared);
|
||||||
|
defer si.rwlock.unlockShared();
|
||||||
|
return module.load_offset;
|
||||||
|
}
|
||||||
|
|
||||||
pub const can_unwind: bool = s: {
|
pub const can_unwind: bool = s: {
|
||||||
// The DWARF code can't deal with ILP32 ABIs yet: https://github.com/ziglang/zig/issues/25447
|
// The DWARF code can't deal with ILP32 ABIs yet: https://github.com/ziglang/zig/issues/25447
|
||||||
|
|
|
||||||
|
|
@ -82,6 +82,20 @@ pub fn getModuleName(si: *SelfInfo, gpa: Allocator, address: usize) Error![]cons
|
||||||
defer si.mutex.unlock();
|
defer si.mutex.unlock();
|
||||||
return module.name;
|
return module.name;
|
||||||
}
|
}
|
||||||
|
pub fn getModuleSlide(si: *SelfInfo, gpa: Allocator, address: usize) Error!usize {
|
||||||
|
const module = try si.findModule(gpa, address);
|
||||||
|
defer si.mutex.unlock();
|
||||||
|
const header: *std.macho.mach_header_64 = @ptrFromInt(module.text_base);
|
||||||
|
const raw_macho: [*]u8 = @ptrCast(header);
|
||||||
|
var it = macho.LoadCommandIterator.init(header, raw_macho[@sizeOf(macho.mach_header_64)..][0..header.sizeofcmds]) catch unreachable;
|
||||||
|
const text_vmaddr = while (it.next() catch unreachable) |load_cmd| {
|
||||||
|
if (load_cmd.hdr.cmd != .SEGMENT_64) continue;
|
||||||
|
const segment_cmd = load_cmd.cast(macho.segment_command_64).?;
|
||||||
|
if (!mem.eql(u8, segment_cmd.segName(), "__TEXT")) continue;
|
||||||
|
break segment_cmd.vmaddr;
|
||||||
|
} else unreachable;
|
||||||
|
return module.text_base - text_vmaddr;
|
||||||
|
}
|
||||||
|
|
||||||
pub const can_unwind: bool = true;
|
pub const can_unwind: bool = true;
|
||||||
pub const UnwindContext = std.debug.Dwarf.SelfUnwinder;
|
pub const UnwindContext = std.debug.Dwarf.SelfUnwinder;
|
||||||
|
|
|
||||||
|
|
@ -33,6 +33,12 @@ pub fn getModuleName(si: *SelfInfo, gpa: Allocator, address: usize) Error![]cons
|
||||||
const module = try si.findModule(gpa, address);
|
const module = try si.findModule(gpa, address);
|
||||||
return module.name;
|
return module.name;
|
||||||
}
|
}
|
||||||
|
pub fn getModuleSlide(si: *SelfInfo, gpa: Allocator, address: usize) Error!usize {
|
||||||
|
si.mutex.lock();
|
||||||
|
defer si.mutex.unlock();
|
||||||
|
const module = try si.findModule(gpa, address);
|
||||||
|
return module.base_address;
|
||||||
|
}
|
||||||
|
|
||||||
pub const can_unwind: bool = switch (builtin.cpu.arch) {
|
pub const can_unwind: bool = switch (builtin.cpu.arch) {
|
||||||
else => true,
|
else => true,
|
||||||
|
|
|
||||||
Loading…
Add table
Reference in a new issue