fuzzer web UI: receive coverage information

* libfuzzer: track unique runs instead of deduplicated runs
  - easier for consumers to notice when to recheck the covered bits.
* move common definitions to `std.Build.Fuzz.abi`.

build runner sends all the information needed to fuzzer web interface
client needed in order to display inline coverage information along with
source code.
This commit is contained in:
Andrew Kelley 2024-08-04 15:27:13 -07:00
parent 22925636f7
commit dec7e45f7c
7 changed files with 877 additions and 648 deletions

View file

@ -3,6 +3,7 @@ const std = @import("std");
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const fatal = std.process.fatal;
const SeenPcsHeader = std.Build.Fuzz.abi.SeenPcsHeader;
pub const std_options = .{
.logFn = logOverride,
@ -120,13 +121,6 @@ const Fuzzer = struct {
/// information, available to other processes.
coverage_id: u64,
const SeenPcsHeader = extern struct {
n_runs: usize,
deduplicated_runs: usize,
pcs_len: usize,
lowest_stack: usize,
};
const RunMap = std.ArrayHashMapUnmanaged(Run, void, Run.HashContext, false);
const Coverage = struct {
@ -247,7 +241,7 @@ const Fuzzer = struct {
} else {
const header: SeenPcsHeader = .{
.n_runs = 0,
.deduplicated_runs = 0,
.unique_runs = 0,
.pcs_len = flagged_pcs.len,
.lowest_stack = std.math.maxInt(usize),
};
@ -292,8 +286,6 @@ const Fuzzer = struct {
});
if (gop.found_existing) {
//std.log.info("duplicate analysis: score={d} id={d}", .{ analysis.score, analysis.id });
const header: *volatile SeenPcsHeader = @ptrCast(f.seen_pcs.items[0..@sizeOf(SeenPcsHeader)]);
_ = @atomicRmw(usize, &header.deduplicated_runs, .Add, 1, .monotonic);
if (f.input.items.len < gop.key_ptr.input.len or gop.key_ptr.score == 0) {
gpa.free(gop.key_ptr.input);
gop.key_ptr.input = try gpa.dupe(u8, f.input.items);
@ -325,6 +317,9 @@ const Fuzzer = struct {
_ = @atomicRmw(u8, elem, .Or, mask, .monotonic);
}
}
const header: *volatile SeenPcsHeader = @ptrCast(f.seen_pcs.items[0..@sizeOf(SeenPcsHeader)]);
_ = @atomicRmw(usize, &header.unique_runs, .Add, 1, .monotonic);
}
if (f.recent_cases.entries.len >= 100) {

View file

@ -124,6 +124,7 @@
</style>
</head>
<body>
<p id="status">Loading JavaScript...</p>
<div id="sectSource" class="hidden">
<h2>Source Code</h2>
<pre><code id="sourceText"></code></pre>

View file

@ -1,95 +1,148 @@
(function() {
const domSectSource = document.getElementById("sectSource");
const domSourceText = document.getElementById("sourceText");
const domStatus = document.getElementById("status");
const domSectSource = document.getElementById("sectSource");
const domSourceText = document.getElementById("sourceText");
let wasm_promise = fetch("main.wasm");
let sources_promise = fetch("sources.tar").then(function(response) {
if (!response.ok) throw new Error("unable to download sources");
return response.arrayBuffer();
});
var wasm_exports = null;
let wasm_promise = fetch("main.wasm");
let sources_promise = fetch("sources.tar").then(function(response) {
if (!response.ok) throw new Error("unable to download sources");
return response.arrayBuffer();
});
var wasm_exports = null;
const text_decoder = new TextDecoder();
const text_encoder = new TextEncoder();
const text_decoder = new TextDecoder();
const text_encoder = new TextEncoder();
const eventSource = new EventSource("events");
eventSource.addEventListener('message', onMessage, false);
WebAssembly.instantiateStreaming(wasm_promise, {
js: {
log: function(ptr, len) {
const msg = decodeString(ptr, len);
console.log(msg);
},
panic: function (ptr, len) {
const msg = decodeString(ptr, len);
throw new Error("panic: " + msg);
},
domStatus.textContent = "Loading WebAssembly...";
WebAssembly.instantiateStreaming(wasm_promise, {
js: {
log: function(ptr, len) {
const msg = decodeString(ptr, len);
console.log(msg);
},
}).then(function(obj) {
wasm_exports = obj.instance.exports;
window.wasm = obj; // for debugging
panic: function (ptr, len) {
const msg = decodeString(ptr, len);
throw new Error("panic: " + msg);
},
emitSourceIndexChange: onSourceIndexChange,
emitCoverageUpdate: onCoverageUpdate,
},
}).then(function(obj) {
wasm_exports = obj.instance.exports;
window.wasm = obj; // for debugging
domStatus.textContent = "Loading sources tarball...";
sources_promise.then(function(buffer) {
const js_array = new Uint8Array(buffer);
const ptr = wasm_exports.alloc(js_array.length);
const wasm_array = new Uint8Array(wasm_exports.memory.buffer, ptr, js_array.length);
wasm_array.set(js_array);
wasm_exports.unpack(ptr, js_array.length);
sources_promise.then(function(buffer) {
domStatus.textContent = "Parsing sources...";
const js_array = new Uint8Array(buffer);
const ptr = wasm_exports.alloc(js_array.length);
const wasm_array = new Uint8Array(wasm_exports.memory.buffer, ptr, js_array.length);
wasm_array.set(js_array);
wasm_exports.unpack(ptr, js_array.length);
render();
});
domStatus.textContent = "Waiting for server to send source location metadata...";
connectWebSocket();
});
});
function onMessage(e) {
console.log("Message", e.data);
}
function connectWebSocket() {
const host = window.document.location.host;
const pathname = window.document.location.pathname;
const isHttps = window.document.location.protocol === 'https:';
const match = host.match(/^(.+):(\d+)$/);
const defaultPort = isHttps ? 443 : 80;
const port = match ? parseInt(match[2], 10) : defaultPort;
const hostName = match ? match[1] : host;
const wsProto = isHttps ? "wss:" : "ws:";
const wsUrl = wsProto + '//' + hostName + ':' + port + pathname;
ws = new WebSocket(wsUrl);
ws.binaryType = "arraybuffer";
ws.addEventListener('message', onWebSocketMessage, false);
ws.addEventListener('error', timeoutThenCreateNew, false);
ws.addEventListener('close', timeoutThenCreateNew, false);
ws.addEventListener('open', onWebSocketOpen, false);
}
function render() {
domSectSource.classList.add("hidden");
function onWebSocketOpen() {
console.log("web socket opened");
}
// TODO this is temporary debugging data
renderSource("/home/andy/dev/zig/lib/std/zig/tokenizer.zig");
}
function onWebSocketMessage(ev) {
wasmOnMessage(ev.data);
}
function renderSource(path) {
const decl_index = findFileRoot(path);
if (decl_index == null) throw new Error("file not found: " + path);
function timeoutThenCreateNew() {
ws.removeEventListener('message', onWebSocketMessage, false);
ws.removeEventListener('error', timeoutThenCreateNew, false);
ws.removeEventListener('close', timeoutThenCreateNew, false);
ws.removeEventListener('open', onWebSocketOpen, false);
ws = null;
setTimeout(connectWebSocket, 1000);
}
const h2 = domSectSource.children[0];
h2.innerText = path;
domSourceText.innerHTML = declSourceHtml(decl_index);
function wasmOnMessage(data) {
const jsArray = new Uint8Array(data);
const ptr = wasm_exports.message_begin(jsArray.length);
const wasmArray = new Uint8Array(wasm_exports.memory.buffer, ptr, jsArray.length);
wasmArray.set(jsArray);
wasm_exports.message_end();
}
domSectSource.classList.remove("hidden");
}
function onSourceIndexChange() {
console.log("source location index metadata updated");
render();
}
function findFileRoot(path) {
setInputString(path);
const result = wasm_exports.find_file_root();
if (result === -1) return null;
return result;
}
function onCoverageUpdate() {
console.log("coverage update");
}
function decodeString(ptr, len) {
if (len === 0) return "";
return text_decoder.decode(new Uint8Array(wasm_exports.memory.buffer, ptr, len));
}
function render() {
domStatus.classList.add("hidden");
domSectSource.classList.add("hidden");
function setInputString(s) {
const jsArray = text_encoder.encode(s);
const len = jsArray.length;
const ptr = wasm_exports.set_input_string(len);
const wasmArray = new Uint8Array(wasm_exports.memory.buffer, ptr, len);
wasmArray.set(jsArray);
}
// TODO this is temporary debugging data
renderSource("/home/andy/dev/zig/lib/std/zig/tokenizer.zig");
}
function declSourceHtml(decl_index) {
return unwrapString(wasm_exports.decl_source_html(decl_index));
}
function renderSource(path) {
const decl_index = findFileRoot(path);
if (decl_index == null) throw new Error("file not found: " + path);
function unwrapString(bigint) {
const ptr = Number(bigint & 0xffffffffn);
const len = Number(bigint >> 32n);
return decodeString(ptr, len);
}
const h2 = domSectSource.children[0];
h2.innerText = path;
domSourceText.innerHTML = declSourceHtml(decl_index);
domSectSource.classList.remove("hidden");
}
function findFileRoot(path) {
setInputString(path);
const result = wasm_exports.find_file_root();
if (result === -1) return null;
return result;
}
function decodeString(ptr, len) {
if (len === 0) return "";
return text_decoder.decode(new Uint8Array(wasm_exports.memory.buffer, ptr, len));
}
function setInputString(s) {
const jsArray = text_encoder.encode(s);
const len = jsArray.length;
const ptr = wasm_exports.set_input_string(len);
const wasmArray = new Uint8Array(wasm_exports.memory.buffer, ptr, len);
wasmArray.set(jsArray);
}
function declSourceHtml(decl_index) {
return unwrapString(wasm_exports.decl_source_html(decl_index));
}
function unwrapString(bigint) {
const ptr = Number(bigint & 0xffffffffn);
const len = Number(bigint >> 32n);
return decodeString(ptr, len);
}
})();

View file

@ -1,16 +1,19 @@
const std = @import("std");
const assert = std.debug.assert;
const abi = std.Build.Fuzz.abi;
const gpa = std.heap.wasm_allocator;
const log = std.log;
const Coverage = std.debug.Coverage;
const Walk = @import("Walk");
const Decl = Walk.Decl;
const html_render = @import("html_render");
const gpa = std.heap.wasm_allocator;
const log = std.log;
const js = struct {
extern "js" fn log(ptr: [*]const u8, len: usize) void;
extern "js" fn panic(ptr: [*]const u8, len: usize) noreturn;
extern "js" fn emitSourceIndexChange() void;
extern "js" fn emitCoverageUpdate() void;
};
pub const std_options: std.Options = .{
@ -45,6 +48,26 @@ export fn alloc(n: usize) [*]u8 {
return slice.ptr;
}
var message_buffer: std.ArrayListAlignedUnmanaged(u8, @alignOf(u64)) = .{};
/// Resizes the message buffer to be the correct length; returns the pointer to
/// the query string.
export fn message_begin(len: usize) [*]u8 {
message_buffer.resize(gpa, len) catch @panic("OOM");
return message_buffer.items.ptr;
}
export fn message_end() void {
const msg_bytes = message_buffer.items;
const tag: abi.ToClientTag = @enumFromInt(msg_bytes[0]);
switch (tag) {
.source_index => return sourceIndexMessage(msg_bytes) catch @panic("OOM"),
.coverage_update => return coverageUpdateMessage(msg_bytes) catch @panic("OOM"),
_ => unreachable,
}
}
export fn unpack(tar_ptr: [*]u8, tar_len: usize) void {
const tar_bytes = tar_ptr[0..tar_len];
log.debug("received {d} bytes of tar file", .{tar_bytes.len});
@ -141,3 +164,57 @@ fn fatal(comptime format: []const u8, args: anytype) noreturn {
};
js.panic(line.ptr, line.len);
}
fn sourceIndexMessage(msg_bytes: []u8) error{OutOfMemory}!void {
const Header = abi.SourceIndexHeader;
const header: Header = @bitCast(msg_bytes[0..@sizeOf(Header)].*);
const directories_start = @sizeOf(Header);
const directories_end = directories_start + header.directories_len * @sizeOf(Coverage.String);
const files_start = directories_end;
const files_end = files_start + header.files_len * @sizeOf(Coverage.File);
const source_locations_start = files_end;
const source_locations_end = source_locations_start + header.source_locations_len * @sizeOf(Coverage.SourceLocation);
const string_bytes = msg_bytes[source_locations_end..][0..header.string_bytes_len];
const directories: []const Coverage.String = @alignCast(std.mem.bytesAsSlice(Coverage.String, msg_bytes[directories_start..directories_end]));
const files: []const Coverage.File = @alignCast(std.mem.bytesAsSlice(Coverage.File, msg_bytes[files_start..files_end]));
const source_locations: []const Coverage.SourceLocation = @alignCast(std.mem.bytesAsSlice(Coverage.SourceLocation, msg_bytes[source_locations_start..source_locations_end]));
try updateCoverage(directories, files, source_locations, string_bytes);
js.emitSourceIndexChange();
}
fn coverageUpdateMessage(msg_bytes: []u8) error{OutOfMemory}!void {
recent_coverage_update.clearRetainingCapacity();
recent_coverage_update.appendSlice(gpa, msg_bytes) catch @panic("OOM");
js.emitCoverageUpdate();
}
var coverage = Coverage.init;
var coverage_source_locations: std.ArrayListUnmanaged(Coverage.SourceLocation) = .{};
/// Contains the most recent coverage update message, unmodified.
var recent_coverage_update: std.ArrayListUnmanaged(u8) = .{};
fn updateCoverage(
directories: []const Coverage.String,
files: []const Coverage.File,
source_locations: []const Coverage.SourceLocation,
string_bytes: []const u8,
) !void {
coverage.directories.clearRetainingCapacity();
coverage.files.clearRetainingCapacity();
coverage.string_bytes.clearRetainingCapacity();
coverage_source_locations.clearRetainingCapacity();
try coverage_source_locations.appendSlice(gpa, source_locations);
try coverage.string_bytes.appendSlice(gpa, string_bytes);
try coverage.files.entries.resize(gpa, files.len);
@memcpy(coverage.files.entries.items(.key), files);
try coverage.files.reIndexContext(gpa, .{ .string_bytes = coverage.string_bytes.items });
try coverage.directories.entries.resize(gpa, directories.len);
@memcpy(coverage.directories.entries.items(.key), directories);
try coverage.directories.reIndexContext(gpa, .{ .string_bytes = coverage.string_bytes.items });
}

View file

@ -6,11 +6,13 @@ const assert = std.debug.assert;
const fatal = std.process.fatal;
const Allocator = std.mem.Allocator;
const log = std.log;
const Coverage = std.debug.Coverage;
const Fuzz = @This();
const build_runner = @import("root");
pub const WebServer = @import("Fuzz/WebServer.zig");
pub const abi = @import("Fuzz/abi.zig");
pub fn start(
gpa: Allocator,
arena: Allocator,
@ -97,565 +99,6 @@ pub fn start(
log.err("all fuzz workers crashed", .{});
}
pub const WebServer = struct {
gpa: Allocator,
global_cache_directory: Build.Cache.Directory,
zig_lib_directory: Build.Cache.Directory,
zig_exe_path: []const u8,
listen_address: std.net.Address,
fuzz_run_steps: []const *Step.Run,
/// Messages from fuzz workers. Protected by mutex.
msg_queue: std.ArrayListUnmanaged(Msg),
/// Protects `msg_queue` only.
mutex: std.Thread.Mutex,
/// Signaled when there is a message in `msg_queue`.
condition: std.Thread.Condition,
coverage_files: std.AutoArrayHashMapUnmanaged(u64, CoverageMap),
/// Protects `coverage_files` only.
coverage_mutex: std.Thread.Mutex,
/// Signaled when `coverage_files` changes.
coverage_condition: std.Thread.Condition,
const CoverageMap = struct {
mapped_memory: []align(std.mem.page_size) const u8,
coverage: Coverage,
fn deinit(cm: *CoverageMap, gpa: Allocator) void {
std.posix.munmap(cm.mapped_memory);
cm.coverage.deinit(gpa);
cm.* = undefined;
}
};
const Msg = union(enum) {
coverage: struct {
id: u64,
run: *Step.Run,
},
};
fn run(ws: *WebServer) void {
var http_server = ws.listen_address.listen(.{
.reuse_address = true,
}) catch |err| {
log.err("failed to listen to port {d}: {s}", .{ ws.listen_address.in.getPort(), @errorName(err) });
return;
};
const port = http_server.listen_address.in.getPort();
log.info("web interface listening at http://127.0.0.1:{d}/", .{port});
while (true) {
const connection = http_server.accept() catch |err| {
log.err("failed to accept connection: {s}", .{@errorName(err)});
return;
};
_ = std.Thread.spawn(.{}, accept, .{ ws, connection }) catch |err| {
log.err("unable to spawn connection thread: {s}", .{@errorName(err)});
connection.stream.close();
continue;
};
}
}
fn accept(ws: *WebServer, connection: std.net.Server.Connection) void {
defer connection.stream.close();
var read_buffer: [8000]u8 = undefined;
var server = std.http.Server.init(connection, &read_buffer);
while (server.state == .ready) {
var request = server.receiveHead() catch |err| switch (err) {
error.HttpConnectionClosing => return,
else => {
log.err("closing http connection: {s}", .{@errorName(err)});
return;
},
};
serveRequest(ws, &request) catch |err| switch (err) {
error.AlreadyReported => return,
else => |e| {
log.err("unable to serve {s}: {s}", .{ request.head.target, @errorName(e) });
return;
},
};
}
}
fn serveRequest(ws: *WebServer, request: *std.http.Server.Request) !void {
if (std.mem.eql(u8, request.head.target, "/") or
std.mem.eql(u8, request.head.target, "/debug") or
std.mem.eql(u8, request.head.target, "/debug/"))
{
try serveFile(ws, request, "fuzzer/index.html", "text/html");
} else if (std.mem.eql(u8, request.head.target, "/main.js") or
std.mem.eql(u8, request.head.target, "/debug/main.js"))
{
try serveFile(ws, request, "fuzzer/main.js", "application/javascript");
} else if (std.mem.eql(u8, request.head.target, "/main.wasm")) {
try serveWasm(ws, request, .ReleaseFast);
} else if (std.mem.eql(u8, request.head.target, "/debug/main.wasm")) {
try serveWasm(ws, request, .Debug);
} else if (std.mem.eql(u8, request.head.target, "/sources.tar") or
std.mem.eql(u8, request.head.target, "/debug/sources.tar"))
{
try serveSourcesTar(ws, request);
} else if (std.mem.eql(u8, request.head.target, "/events") or
std.mem.eql(u8, request.head.target, "/debug/events"))
{
try serveEvents(ws, request);
} else {
try request.respond("not found", .{
.status = .not_found,
.extra_headers = &.{
.{ .name = "content-type", .value = "text/plain" },
},
});
}
}
fn serveFile(
ws: *WebServer,
request: *std.http.Server.Request,
name: []const u8,
content_type: []const u8,
) !void {
const gpa = ws.gpa;
// The desired API is actually sendfile, which will require enhancing std.http.Server.
// We load the file with every request so that the user can make changes to the file
// and refresh the HTML page without restarting this server.
const file_contents = ws.zig_lib_directory.handle.readFileAlloc(gpa, name, 10 * 1024 * 1024) catch |err| {
log.err("failed to read '{}{s}': {s}", .{ ws.zig_lib_directory, name, @errorName(err) });
return error.AlreadyReported;
};
defer gpa.free(file_contents);
try request.respond(file_contents, .{
.extra_headers = &.{
.{ .name = "content-type", .value = content_type },
cache_control_header,
},
});
}
fn serveWasm(
ws: *WebServer,
request: *std.http.Server.Request,
optimize_mode: std.builtin.OptimizeMode,
) !void {
const gpa = ws.gpa;
var arena_instance = std.heap.ArenaAllocator.init(gpa);
defer arena_instance.deinit();
const arena = arena_instance.allocator();
// Do the compilation every request, so that the user can edit the files
// and see the changes without restarting the server.
const wasm_binary_path = try buildWasmBinary(ws, arena, optimize_mode);
// std.http.Server does not have a sendfile API yet.
const file_contents = try std.fs.cwd().readFileAlloc(gpa, wasm_binary_path, 10 * 1024 * 1024);
defer gpa.free(file_contents);
try request.respond(file_contents, .{
.extra_headers = &.{
.{ .name = "content-type", .value = "application/wasm" },
cache_control_header,
},
});
}
fn buildWasmBinary(
ws: *WebServer,
arena: Allocator,
optimize_mode: std.builtin.OptimizeMode,
) ![]const u8 {
const gpa = ws.gpa;
const main_src_path: Build.Cache.Path = .{
.root_dir = ws.zig_lib_directory,
.sub_path = "fuzzer/wasm/main.zig",
};
const walk_src_path: Build.Cache.Path = .{
.root_dir = ws.zig_lib_directory,
.sub_path = "docs/wasm/Walk.zig",
};
const html_render_src_path: Build.Cache.Path = .{
.root_dir = ws.zig_lib_directory,
.sub_path = "docs/wasm/html_render.zig",
};
var argv: std.ArrayListUnmanaged([]const u8) = .{};
try argv.appendSlice(arena, &.{
ws.zig_exe_path, "build-exe", //
"-fno-entry", //
"-O", @tagName(optimize_mode), //
"-target", "wasm32-freestanding", //
"-mcpu", "baseline+atomics+bulk_memory+multivalue+mutable_globals+nontrapping_fptoint+reference_types+sign_ext", //
"--cache-dir", ws.global_cache_directory.path orelse ".", //
"--global-cache-dir", ws.global_cache_directory.path orelse ".", //
"--name", "fuzzer", //
"-rdynamic", //
"--dep", "Walk", //
"--dep", "html_render", //
try std.fmt.allocPrint(arena, "-Mroot={}", .{main_src_path}), //
try std.fmt.allocPrint(arena, "-MWalk={}", .{walk_src_path}), //
"--dep", "Walk", //
try std.fmt.allocPrint(arena, "-Mhtml_render={}", .{html_render_src_path}), //
"--listen=-",
});
var child = std.process.Child.init(argv.items, gpa);
child.stdin_behavior = .Pipe;
child.stdout_behavior = .Pipe;
child.stderr_behavior = .Pipe;
try child.spawn();
var poller = std.io.poll(gpa, enum { stdout, stderr }, .{
.stdout = child.stdout.?,
.stderr = child.stderr.?,
});
defer poller.deinit();
try sendMessage(child.stdin.?, .update);
try sendMessage(child.stdin.?, .exit);
const Header = std.zig.Server.Message.Header;
var result: ?[]const u8 = null;
var result_error_bundle = std.zig.ErrorBundle.empty;
const stdout = poller.fifo(.stdout);
poll: while (true) {
while (stdout.readableLength() < @sizeOf(Header)) {
if (!(try poller.poll())) break :poll;
}
const header = stdout.reader().readStruct(Header) catch unreachable;
while (stdout.readableLength() < header.bytes_len) {
if (!(try poller.poll())) break :poll;
}
const body = stdout.readableSliceOfLen(header.bytes_len);
switch (header.tag) {
.zig_version => {
if (!std.mem.eql(u8, builtin.zig_version_string, body)) {
return error.ZigProtocolVersionMismatch;
}
},
.error_bundle => {
const EbHdr = std.zig.Server.Message.ErrorBundle;
const eb_hdr = @as(*align(1) const EbHdr, @ptrCast(body));
const extra_bytes =
body[@sizeOf(EbHdr)..][0 .. @sizeOf(u32) * eb_hdr.extra_len];
const string_bytes =
body[@sizeOf(EbHdr) + extra_bytes.len ..][0..eb_hdr.string_bytes_len];
// TODO: use @ptrCast when the compiler supports it
const unaligned_extra = std.mem.bytesAsSlice(u32, extra_bytes);
const extra_array = try arena.alloc(u32, unaligned_extra.len);
@memcpy(extra_array, unaligned_extra);
result_error_bundle = .{
.string_bytes = try arena.dupe(u8, string_bytes),
.extra = extra_array,
};
},
.emit_bin_path => {
const EbpHdr = std.zig.Server.Message.EmitBinPath;
const ebp_hdr = @as(*align(1) const EbpHdr, @ptrCast(body));
if (!ebp_hdr.flags.cache_hit) {
log.info("source changes detected; rebuilt wasm component", .{});
}
result = try arena.dupe(u8, body[@sizeOf(EbpHdr)..]);
},
else => {}, // ignore other messages
}
stdout.discard(body.len);
}
const stderr = poller.fifo(.stderr);
if (stderr.readableLength() > 0) {
const owned_stderr = try stderr.toOwnedSlice();
defer gpa.free(owned_stderr);
std.debug.print("{s}", .{owned_stderr});
}
// Send EOF to stdin.
child.stdin.?.close();
child.stdin = null;
switch (try child.wait()) {
.Exited => |code| {
if (code != 0) {
log.err(
"the following command exited with error code {d}:\n{s}",
.{ code, try Build.Step.allocPrintCmd(arena, null, argv.items) },
);
return error.WasmCompilationFailed;
}
},
.Signal, .Stopped, .Unknown => {
log.err(
"the following command terminated unexpectedly:\n{s}",
.{try Build.Step.allocPrintCmd(arena, null, argv.items)},
);
return error.WasmCompilationFailed;
},
}
if (result_error_bundle.errorMessageCount() > 0) {
const color = std.zig.Color.auto;
result_error_bundle.renderToStdErr(color.renderOptions());
log.err("the following command failed with {d} compilation errors:\n{s}", .{
result_error_bundle.errorMessageCount(),
try Build.Step.allocPrintCmd(arena, null, argv.items),
});
return error.WasmCompilationFailed;
}
return result orelse {
log.err("child process failed to report result\n{s}", .{
try Build.Step.allocPrintCmd(arena, null, argv.items),
});
return error.WasmCompilationFailed;
};
}
fn sendMessage(file: std.fs.File, tag: std.zig.Client.Message.Tag) !void {
const header: std.zig.Client.Message.Header = .{
.tag = tag,
.bytes_len = 0,
};
try file.writeAll(std.mem.asBytes(&header));
}
fn serveEvents(ws: *WebServer, request: *std.http.Server.Request) !void {
var send_buffer: [0x4000]u8 = undefined;
var response = request.respondStreaming(.{
.send_buffer = &send_buffer,
.respond_options = .{
.extra_headers = &.{
.{ .name = "content-type", .value = "text/event-stream" },
},
.transfer_encoding = .none,
},
});
ws.coverage_mutex.lock();
defer ws.coverage_mutex.unlock();
if (getStats(ws)) |stats| {
try response.writer().print("data: {d}\n\n", .{stats.n_runs});
} else {
try response.writeAll("data: loading debug information\n\n");
}
try response.flush();
while (true) {
ws.coverage_condition.timedWait(&ws.coverage_mutex, std.time.ns_per_ms * 500) catch {};
if (getStats(ws)) |stats| {
try response.writer().print("data: {d}\n\n", .{stats.n_runs});
try response.flush();
}
}
}
const Stats = struct {
n_runs: u64,
};
fn getStats(ws: *WebServer) ?Stats {
const coverage_maps = ws.coverage_files.values();
if (coverage_maps.len == 0) return null;
// TODO: make each events URL correspond to one coverage map
const ptr = coverage_maps[0].mapped_memory;
const SeenPcsHeader = extern struct {
n_runs: usize,
deduplicated_runs: usize,
pcs_len: usize,
lowest_stack: usize,
};
const header: *const SeenPcsHeader = @ptrCast(ptr[0..@sizeOf(SeenPcsHeader)]);
return .{
.n_runs = @atomicLoad(usize, &header.n_runs, .monotonic),
};
}
fn serveSourcesTar(ws: *WebServer, request: *std.http.Server.Request) !void {
const gpa = ws.gpa;
var arena_instance = std.heap.ArenaAllocator.init(gpa);
defer arena_instance.deinit();
const arena = arena_instance.allocator();
var send_buffer: [0x4000]u8 = undefined;
var response = request.respondStreaming(.{
.send_buffer = &send_buffer,
.respond_options = .{
.extra_headers = &.{
.{ .name = "content-type", .value = "application/x-tar" },
cache_control_header,
},
},
});
const w = response.writer();
const DedupeTable = std.ArrayHashMapUnmanaged(Build.Cache.Path, void, Build.Cache.Path.TableAdapter, false);
var dedupe_table: DedupeTable = .{};
defer dedupe_table.deinit(gpa);
for (ws.fuzz_run_steps) |run_step| {
const compile_step_inputs = run_step.producer.?.step.inputs.table;
for (compile_step_inputs.keys(), compile_step_inputs.values()) |dir_path, *file_list| {
try dedupe_table.ensureUnusedCapacity(gpa, file_list.items.len);
for (file_list.items) |sub_path| {
// Special file "." means the entire directory.
if (std.mem.eql(u8, sub_path, ".")) continue;
const joined_path = try dir_path.join(arena, sub_path);
_ = dedupe_table.getOrPutAssumeCapacity(joined_path);
}
}
}
const deduped_paths = dedupe_table.keys();
const SortContext = struct {
pub fn lessThan(this: @This(), lhs: Build.Cache.Path, rhs: Build.Cache.Path) bool {
_ = this;
return switch (std.mem.order(u8, lhs.root_dir.path orelse ".", rhs.root_dir.path orelse ".")) {
.lt => true,
.gt => false,
.eq => std.mem.lessThan(u8, lhs.sub_path, rhs.sub_path),
};
}
};
std.mem.sortUnstable(Build.Cache.Path, deduped_paths, SortContext{}, SortContext.lessThan);
for (deduped_paths) |joined_path| {
var file = joined_path.root_dir.handle.openFile(joined_path.sub_path, .{}) catch |err| {
log.err("failed to open {}: {s}", .{ joined_path, @errorName(err) });
continue;
};
defer file.close();
const stat = file.stat() catch |err| {
log.err("failed to stat {}: {s}", .{ joined_path, @errorName(err) });
continue;
};
if (stat.kind != .file)
continue;
const padding = p: {
const remainder = stat.size % 512;
break :p if (remainder > 0) 512 - remainder else 0;
};
var file_header = std.tar.output.Header.init();
file_header.typeflag = .regular;
try file_header.setPath(joined_path.root_dir.path orelse ".", joined_path.sub_path);
try file_header.setSize(stat.size);
try file_header.updateChecksum();
try w.writeAll(std.mem.asBytes(&file_header));
try w.writeFile(file);
try w.writeByteNTimes(0, padding);
}
// intentionally omitting the pointless trailer
//try w.writeByteNTimes(0, 512 * 2);
try response.end();
}
const cache_control_header: std.http.Header = .{
.name = "cache-control",
.value = "max-age=0, must-revalidate",
};
fn coverageRun(ws: *WebServer) void {
ws.mutex.lock();
defer ws.mutex.unlock();
while (true) {
ws.condition.wait(&ws.mutex);
for (ws.msg_queue.items) |msg| switch (msg) {
.coverage => |coverage| prepareTables(ws, coverage.run, coverage.id) catch |err| switch (err) {
error.AlreadyReported => continue,
else => |e| log.err("failed to prepare code coverage tables: {s}", .{@errorName(e)}),
},
};
ws.msg_queue.clearRetainingCapacity();
}
}
fn prepareTables(
ws: *WebServer,
run_step: *Step.Run,
coverage_id: u64,
) error{ OutOfMemory, AlreadyReported }!void {
const gpa = ws.gpa;
ws.coverage_mutex.lock();
defer ws.coverage_mutex.unlock();
const gop = try ws.coverage_files.getOrPut(gpa, coverage_id);
if (gop.found_existing) {
// We are fuzzing the same executable with multiple threads.
// Perhaps the same unit test; perhaps a different one. In any
// case, since the coverage file is the same, we only have to
// notice changes to that one file in order to learn coverage for
// this particular executable.
return;
}
errdefer _ = ws.coverage_files.pop();
gop.value_ptr.* = .{
.coverage = std.debug.Coverage.init,
.mapped_memory = undefined, // populated below
};
errdefer gop.value_ptr.coverage.deinit(gpa);
const rebuilt_exe_path: Build.Cache.Path = .{
.root_dir = Build.Cache.Directory.cwd(),
.sub_path = run_step.rebuilt_executable.?,
};
var debug_info = std.debug.Info.load(gpa, rebuilt_exe_path, &gop.value_ptr.coverage) catch |err| {
log.err("step '{s}': failed to load debug information for '{}': {s}", .{
run_step.step.name, rebuilt_exe_path, @errorName(err),
});
return error.AlreadyReported;
};
defer debug_info.deinit(gpa);
const coverage_file_path: Build.Cache.Path = .{
.root_dir = run_step.step.owner.cache_root,
.sub_path = "v/" ++ std.fmt.hex(coverage_id),
};
var coverage_file = coverage_file_path.root_dir.handle.openFile(coverage_file_path.sub_path, .{}) catch |err| {
log.err("step '{s}': failed to load coverage file '{}': {s}", .{
run_step.step.name, coverage_file_path, @errorName(err),
});
return error.AlreadyReported;
};
defer coverage_file.close();
const file_size = coverage_file.getEndPos() catch |err| {
log.err("unable to check len of coverage file '{}': {s}", .{ coverage_file_path, @errorName(err) });
return error.AlreadyReported;
};
const mapped_memory = std.posix.mmap(
null,
file_size,
std.posix.PROT.READ,
.{ .TYPE = .SHARED },
coverage_file.handle,
0,
) catch |err| {
log.err("failed to map coverage file '{}': {s}", .{ coverage_file_path, @errorName(err) });
return error.AlreadyReported;
};
gop.value_ptr.mapped_memory = mapped_memory;
ws.coverage_condition.broadcast();
}
};
fn rebuildTestsWorkerRun(run: *Step.Run, ttyconf: std.io.tty.Config, parent_prog_node: std.Progress.Node) void {
const gpa = run.step.owner.allocator;
const stderr = std.io.getStdErr();

View file

@ -0,0 +1,605 @@
const builtin = @import("builtin");
const std = @import("../../std.zig");
const Allocator = std.mem.Allocator;
const Build = std.Build;
const Step = std.Build.Step;
const Coverage = std.debug.Coverage;
const abi = std.Build.Fuzz.abi;
const log = std.log;
const WebServer = @This();
gpa: Allocator,
global_cache_directory: Build.Cache.Directory,
zig_lib_directory: Build.Cache.Directory,
zig_exe_path: []const u8,
listen_address: std.net.Address,
fuzz_run_steps: []const *Step.Run,
/// Messages from fuzz workers. Protected by mutex.
msg_queue: std.ArrayListUnmanaged(Msg),
/// Protects `msg_queue` only.
mutex: std.Thread.Mutex,
/// Signaled when there is a message in `msg_queue`.
condition: std.Thread.Condition,
coverage_files: std.AutoArrayHashMapUnmanaged(u64, CoverageMap),
/// Protects `coverage_files` only.
coverage_mutex: std.Thread.Mutex,
/// Signaled when `coverage_files` changes.
coverage_condition: std.Thread.Condition,
const CoverageMap = struct {
mapped_memory: []align(std.mem.page_size) const u8,
coverage: Coverage,
source_locations: []Coverage.SourceLocation,
fn deinit(cm: *CoverageMap, gpa: Allocator) void {
std.posix.munmap(cm.mapped_memory);
cm.coverage.deinit(gpa);
cm.* = undefined;
}
};
const Msg = union(enum) {
coverage: struct {
id: u64,
run: *Step.Run,
},
};
pub fn run(ws: *WebServer) void {
var http_server = ws.listen_address.listen(.{
.reuse_address = true,
}) catch |err| {
log.err("failed to listen to port {d}: {s}", .{ ws.listen_address.in.getPort(), @errorName(err) });
return;
};
const port = http_server.listen_address.in.getPort();
log.info("web interface listening at http://127.0.0.1:{d}/", .{port});
while (true) {
const connection = http_server.accept() catch |err| {
log.err("failed to accept connection: {s}", .{@errorName(err)});
return;
};
_ = std.Thread.spawn(.{}, accept, .{ ws, connection }) catch |err| {
log.err("unable to spawn connection thread: {s}", .{@errorName(err)});
connection.stream.close();
continue;
};
}
}
fn accept(ws: *WebServer, connection: std.net.Server.Connection) void {
defer connection.stream.close();
var read_buffer: [0x4000]u8 = undefined;
var server = std.http.Server.init(connection, &read_buffer);
var web_socket: std.http.WebSocket = undefined;
var send_buffer: [0x4000]u8 = undefined;
var ws_recv_buffer: [0x4000]u8 align(4) = undefined;
while (server.state == .ready) {
var request = server.receiveHead() catch |err| switch (err) {
error.HttpConnectionClosing => return,
else => {
log.err("closing http connection: {s}", .{@errorName(err)});
return;
},
};
if (web_socket.init(&request, &send_buffer, &ws_recv_buffer) catch |err| {
log.err("initializing web socket: {s}", .{@errorName(err)});
return;
}) {
serveWebSocket(ws, &web_socket) catch |err| {
log.err("unable to serve web socket connection: {s}", .{@errorName(err)});
return;
};
} else {
serveRequest(ws, &request) catch |err| switch (err) {
error.AlreadyReported => return,
else => |e| {
log.err("unable to serve {s}: {s}", .{ request.head.target, @errorName(e) });
return;
},
};
}
}
}
fn serveRequest(ws: *WebServer, request: *std.http.Server.Request) !void {
if (std.mem.eql(u8, request.head.target, "/") or
std.mem.eql(u8, request.head.target, "/debug") or
std.mem.eql(u8, request.head.target, "/debug/"))
{
try serveFile(ws, request, "fuzzer/index.html", "text/html");
} else if (std.mem.eql(u8, request.head.target, "/main.js") or
std.mem.eql(u8, request.head.target, "/debug/main.js"))
{
try serveFile(ws, request, "fuzzer/main.js", "application/javascript");
} else if (std.mem.eql(u8, request.head.target, "/main.wasm")) {
try serveWasm(ws, request, .ReleaseFast);
} else if (std.mem.eql(u8, request.head.target, "/debug/main.wasm")) {
try serveWasm(ws, request, .Debug);
} else if (std.mem.eql(u8, request.head.target, "/sources.tar") or
std.mem.eql(u8, request.head.target, "/debug/sources.tar"))
{
try serveSourcesTar(ws, request);
} else {
try request.respond("not found", .{
.status = .not_found,
.extra_headers = &.{
.{ .name = "content-type", .value = "text/plain" },
},
});
}
}
fn serveFile(
ws: *WebServer,
request: *std.http.Server.Request,
name: []const u8,
content_type: []const u8,
) !void {
const gpa = ws.gpa;
// The desired API is actually sendfile, which will require enhancing std.http.Server.
// We load the file with every request so that the user can make changes to the file
// and refresh the HTML page without restarting this server.
const file_contents = ws.zig_lib_directory.handle.readFileAlloc(gpa, name, 10 * 1024 * 1024) catch |err| {
log.err("failed to read '{}{s}': {s}", .{ ws.zig_lib_directory, name, @errorName(err) });
return error.AlreadyReported;
};
defer gpa.free(file_contents);
try request.respond(file_contents, .{
.extra_headers = &.{
.{ .name = "content-type", .value = content_type },
cache_control_header,
},
});
}
fn serveWasm(
ws: *WebServer,
request: *std.http.Server.Request,
optimize_mode: std.builtin.OptimizeMode,
) !void {
const gpa = ws.gpa;
var arena_instance = std.heap.ArenaAllocator.init(gpa);
defer arena_instance.deinit();
const arena = arena_instance.allocator();
// Do the compilation every request, so that the user can edit the files
// and see the changes without restarting the server.
const wasm_binary_path = try buildWasmBinary(ws, arena, optimize_mode);
// std.http.Server does not have a sendfile API yet.
const file_contents = try std.fs.cwd().readFileAlloc(gpa, wasm_binary_path, 10 * 1024 * 1024);
defer gpa.free(file_contents);
try request.respond(file_contents, .{
.extra_headers = &.{
.{ .name = "content-type", .value = "application/wasm" },
cache_control_header,
},
});
}
fn buildWasmBinary(
ws: *WebServer,
arena: Allocator,
optimize_mode: std.builtin.OptimizeMode,
) ![]const u8 {
const gpa = ws.gpa;
const main_src_path: Build.Cache.Path = .{
.root_dir = ws.zig_lib_directory,
.sub_path = "fuzzer/wasm/main.zig",
};
const walk_src_path: Build.Cache.Path = .{
.root_dir = ws.zig_lib_directory,
.sub_path = "docs/wasm/Walk.zig",
};
const html_render_src_path: Build.Cache.Path = .{
.root_dir = ws.zig_lib_directory,
.sub_path = "docs/wasm/html_render.zig",
};
var argv: std.ArrayListUnmanaged([]const u8) = .{};
try argv.appendSlice(arena, &.{
ws.zig_exe_path, "build-exe", //
"-fno-entry", //
"-O", @tagName(optimize_mode), //
"-target", "wasm32-freestanding", //
"-mcpu", "baseline+atomics+bulk_memory+multivalue+mutable_globals+nontrapping_fptoint+reference_types+sign_ext", //
"--cache-dir", ws.global_cache_directory.path orelse ".", //
"--global-cache-dir", ws.global_cache_directory.path orelse ".", //
"--name", "fuzzer", //
"-rdynamic", //
"-fsingle-threaded", //
"--dep", "Walk", //
"--dep", "html_render", //
try std.fmt.allocPrint(arena, "-Mroot={}", .{main_src_path}), //
try std.fmt.allocPrint(arena, "-MWalk={}", .{walk_src_path}), //
"--dep", "Walk", //
try std.fmt.allocPrint(arena, "-Mhtml_render={}", .{html_render_src_path}), //
"--listen=-",
});
var child = std.process.Child.init(argv.items, gpa);
child.stdin_behavior = .Pipe;
child.stdout_behavior = .Pipe;
child.stderr_behavior = .Pipe;
try child.spawn();
var poller = std.io.poll(gpa, enum { stdout, stderr }, .{
.stdout = child.stdout.?,
.stderr = child.stderr.?,
});
defer poller.deinit();
try sendMessage(child.stdin.?, .update);
try sendMessage(child.stdin.?, .exit);
const Header = std.zig.Server.Message.Header;
var result: ?[]const u8 = null;
var result_error_bundle = std.zig.ErrorBundle.empty;
const stdout = poller.fifo(.stdout);
poll: while (true) {
while (stdout.readableLength() < @sizeOf(Header)) {
if (!(try poller.poll())) break :poll;
}
const header = stdout.reader().readStruct(Header) catch unreachable;
while (stdout.readableLength() < header.bytes_len) {
if (!(try poller.poll())) break :poll;
}
const body = stdout.readableSliceOfLen(header.bytes_len);
switch (header.tag) {
.zig_version => {
if (!std.mem.eql(u8, builtin.zig_version_string, body)) {
return error.ZigProtocolVersionMismatch;
}
},
.error_bundle => {
const EbHdr = std.zig.Server.Message.ErrorBundle;
const eb_hdr = @as(*align(1) const EbHdr, @ptrCast(body));
const extra_bytes =
body[@sizeOf(EbHdr)..][0 .. @sizeOf(u32) * eb_hdr.extra_len];
const string_bytes =
body[@sizeOf(EbHdr) + extra_bytes.len ..][0..eb_hdr.string_bytes_len];
// TODO: use @ptrCast when the compiler supports it
const unaligned_extra = std.mem.bytesAsSlice(u32, extra_bytes);
const extra_array = try arena.alloc(u32, unaligned_extra.len);
@memcpy(extra_array, unaligned_extra);
result_error_bundle = .{
.string_bytes = try arena.dupe(u8, string_bytes),
.extra = extra_array,
};
},
.emit_bin_path => {
const EbpHdr = std.zig.Server.Message.EmitBinPath;
const ebp_hdr = @as(*align(1) const EbpHdr, @ptrCast(body));
if (!ebp_hdr.flags.cache_hit) {
log.info("source changes detected; rebuilt wasm component", .{});
}
result = try arena.dupe(u8, body[@sizeOf(EbpHdr)..]);
},
else => {}, // ignore other messages
}
stdout.discard(body.len);
}
const stderr = poller.fifo(.stderr);
if (stderr.readableLength() > 0) {
const owned_stderr = try stderr.toOwnedSlice();
defer gpa.free(owned_stderr);
std.debug.print("{s}", .{owned_stderr});
}
// Send EOF to stdin.
child.stdin.?.close();
child.stdin = null;
switch (try child.wait()) {
.Exited => |code| {
if (code != 0) {
log.err(
"the following command exited with error code {d}:\n{s}",
.{ code, try Build.Step.allocPrintCmd(arena, null, argv.items) },
);
return error.WasmCompilationFailed;
}
},
.Signal, .Stopped, .Unknown => {
log.err(
"the following command terminated unexpectedly:\n{s}",
.{try Build.Step.allocPrintCmd(arena, null, argv.items)},
);
return error.WasmCompilationFailed;
},
}
if (result_error_bundle.errorMessageCount() > 0) {
const color = std.zig.Color.auto;
result_error_bundle.renderToStdErr(color.renderOptions());
log.err("the following command failed with {d} compilation errors:\n{s}", .{
result_error_bundle.errorMessageCount(),
try Build.Step.allocPrintCmd(arena, null, argv.items),
});
return error.WasmCompilationFailed;
}
return result orelse {
log.err("child process failed to report result\n{s}", .{
try Build.Step.allocPrintCmd(arena, null, argv.items),
});
return error.WasmCompilationFailed;
};
}
fn sendMessage(file: std.fs.File, tag: std.zig.Client.Message.Tag) !void {
const header: std.zig.Client.Message.Header = .{
.tag = tag,
.bytes_len = 0,
};
try file.writeAll(std.mem.asBytes(&header));
}
fn serveWebSocket(ws: *WebServer, web_socket: *std.http.WebSocket) !void {
ws.coverage_mutex.lock();
defer ws.coverage_mutex.unlock();
// On first connection, the client needs all the coverage information
// so that subsequent updates can contain only the updated bits.
var prev_unique_runs: usize = 0;
try sendCoverageContext(ws, web_socket, &prev_unique_runs);
while (true) {
ws.coverage_condition.timedWait(&ws.coverage_mutex, std.time.ns_per_ms * 500) catch {};
try sendCoverageContext(ws, web_socket, &prev_unique_runs);
}
}
fn sendCoverageContext(ws: *WebServer, web_socket: *std.http.WebSocket, prev_unique_runs: *usize) !void {
const coverage_maps = ws.coverage_files.values();
if (coverage_maps.len == 0) return;
// TODO: make each events URL correspond to one coverage map
const coverage_map = &coverage_maps[0];
const cov_header: *const abi.SeenPcsHeader = @ptrCast(coverage_map.mapped_memory[0..@sizeOf(abi.SeenPcsHeader)]);
const seen_pcs = coverage_map.mapped_memory[@sizeOf(abi.SeenPcsHeader) + coverage_map.source_locations.len * @sizeOf(usize) ..];
const n_runs = @atomicLoad(usize, &cov_header.n_runs, .monotonic);
const unique_runs = @atomicLoad(usize, &cov_header.unique_runs, .monotonic);
const lowest_stack = @atomicLoad(usize, &cov_header.lowest_stack, .monotonic);
if (prev_unique_runs.* != unique_runs) {
// There has been an update.
if (prev_unique_runs.* == 0) {
// We need to send initial context.
const header: abi.SourceIndexHeader = .{
.flags = .{},
.directories_len = @intCast(coverage_map.coverage.directories.entries.len),
.files_len = @intCast(coverage_map.coverage.files.entries.len),
.source_locations_len = @intCast(coverage_map.source_locations.len),
.string_bytes_len = @intCast(coverage_map.coverage.string_bytes.items.len),
};
const iovecs: [5]std.posix.iovec_const = .{
makeIov(std.mem.asBytes(&header)),
makeIov(std.mem.sliceAsBytes(coverage_map.coverage.directories.keys())),
makeIov(std.mem.sliceAsBytes(coverage_map.coverage.files.keys())),
makeIov(std.mem.sliceAsBytes(coverage_map.source_locations)),
makeIov(coverage_map.coverage.string_bytes.items),
};
try web_socket.writeMessagev(&iovecs, .binary);
}
const header: abi.CoverageUpdateHeader = .{
.n_runs = n_runs,
.unique_runs = unique_runs,
.lowest_stack = lowest_stack,
};
const iovecs: [2]std.posix.iovec_const = .{
makeIov(std.mem.asBytes(&header)),
makeIov(seen_pcs),
};
try web_socket.writeMessagev(&iovecs, .binary);
prev_unique_runs.* = unique_runs;
}
}
fn serveSourcesTar(ws: *WebServer, request: *std.http.Server.Request) !void {
const gpa = ws.gpa;
var arena_instance = std.heap.ArenaAllocator.init(gpa);
defer arena_instance.deinit();
const arena = arena_instance.allocator();
var send_buffer: [0x4000]u8 = undefined;
var response = request.respondStreaming(.{
.send_buffer = &send_buffer,
.respond_options = .{
.extra_headers = &.{
.{ .name = "content-type", .value = "application/x-tar" },
cache_control_header,
},
},
});
const w = response.writer();
const DedupeTable = std.ArrayHashMapUnmanaged(Build.Cache.Path, void, Build.Cache.Path.TableAdapter, false);
var dedupe_table: DedupeTable = .{};
defer dedupe_table.deinit(gpa);
for (ws.fuzz_run_steps) |run_step| {
const compile_step_inputs = run_step.producer.?.step.inputs.table;
for (compile_step_inputs.keys(), compile_step_inputs.values()) |dir_path, *file_list| {
try dedupe_table.ensureUnusedCapacity(gpa, file_list.items.len);
for (file_list.items) |sub_path| {
// Special file "." means the entire directory.
if (std.mem.eql(u8, sub_path, ".")) continue;
const joined_path = try dir_path.join(arena, sub_path);
_ = dedupe_table.getOrPutAssumeCapacity(joined_path);
}
}
}
const deduped_paths = dedupe_table.keys();
const SortContext = struct {
pub fn lessThan(this: @This(), lhs: Build.Cache.Path, rhs: Build.Cache.Path) bool {
_ = this;
return switch (std.mem.order(u8, lhs.root_dir.path orelse ".", rhs.root_dir.path orelse ".")) {
.lt => true,
.gt => false,
.eq => std.mem.lessThan(u8, lhs.sub_path, rhs.sub_path),
};
}
};
std.mem.sortUnstable(Build.Cache.Path, deduped_paths, SortContext{}, SortContext.lessThan);
for (deduped_paths) |joined_path| {
var file = joined_path.root_dir.handle.openFile(joined_path.sub_path, .{}) catch |err| {
log.err("failed to open {}: {s}", .{ joined_path, @errorName(err) });
continue;
};
defer file.close();
const stat = file.stat() catch |err| {
log.err("failed to stat {}: {s}", .{ joined_path, @errorName(err) });
continue;
};
if (stat.kind != .file)
continue;
const padding = p: {
const remainder = stat.size % 512;
break :p if (remainder > 0) 512 - remainder else 0;
};
var file_header = std.tar.output.Header.init();
file_header.typeflag = .regular;
try file_header.setPath(joined_path.root_dir.path orelse ".", joined_path.sub_path);
try file_header.setSize(stat.size);
try file_header.updateChecksum();
try w.writeAll(std.mem.asBytes(&file_header));
try w.writeFile(file);
try w.writeByteNTimes(0, padding);
}
// intentionally omitting the pointless trailer
//try w.writeByteNTimes(0, 512 * 2);
try response.end();
}
const cache_control_header: std.http.Header = .{
.name = "cache-control",
.value = "max-age=0, must-revalidate",
};
pub fn coverageRun(ws: *WebServer) void {
ws.mutex.lock();
defer ws.mutex.unlock();
while (true) {
ws.condition.wait(&ws.mutex);
for (ws.msg_queue.items) |msg| switch (msg) {
.coverage => |coverage| prepareTables(ws, coverage.run, coverage.id) catch |err| switch (err) {
error.AlreadyReported => continue,
else => |e| log.err("failed to prepare code coverage tables: {s}", .{@errorName(e)}),
},
};
ws.msg_queue.clearRetainingCapacity();
}
}
fn prepareTables(
ws: *WebServer,
run_step: *Step.Run,
coverage_id: u64,
) error{ OutOfMemory, AlreadyReported }!void {
const gpa = ws.gpa;
ws.coverage_mutex.lock();
defer ws.coverage_mutex.unlock();
const gop = try ws.coverage_files.getOrPut(gpa, coverage_id);
if (gop.found_existing) {
// We are fuzzing the same executable with multiple threads.
// Perhaps the same unit test; perhaps a different one. In any
// case, since the coverage file is the same, we only have to
// notice changes to that one file in order to learn coverage for
// this particular executable.
return;
}
errdefer _ = ws.coverage_files.pop();
gop.value_ptr.* = .{
.coverage = std.debug.Coverage.init,
.mapped_memory = undefined, // populated below
.source_locations = undefined, // populated below
};
errdefer gop.value_ptr.coverage.deinit(gpa);
const rebuilt_exe_path: Build.Cache.Path = .{
.root_dir = Build.Cache.Directory.cwd(),
.sub_path = run_step.rebuilt_executable.?,
};
var debug_info = std.debug.Info.load(gpa, rebuilt_exe_path, &gop.value_ptr.coverage) catch |err| {
log.err("step '{s}': failed to load debug information for '{}': {s}", .{
run_step.step.name, rebuilt_exe_path, @errorName(err),
});
return error.AlreadyReported;
};
defer debug_info.deinit(gpa);
const coverage_file_path: Build.Cache.Path = .{
.root_dir = run_step.step.owner.cache_root,
.sub_path = "v/" ++ std.fmt.hex(coverage_id),
};
var coverage_file = coverage_file_path.root_dir.handle.openFile(coverage_file_path.sub_path, .{}) catch |err| {
log.err("step '{s}': failed to load coverage file '{}': {s}", .{
run_step.step.name, coverage_file_path, @errorName(err),
});
return error.AlreadyReported;
};
defer coverage_file.close();
const file_size = coverage_file.getEndPos() catch |err| {
log.err("unable to check len of coverage file '{}': {s}", .{ coverage_file_path, @errorName(err) });
return error.AlreadyReported;
};
const mapped_memory = std.posix.mmap(
null,
file_size,
std.posix.PROT.READ,
.{ .TYPE = .SHARED },
coverage_file.handle,
0,
) catch |err| {
log.err("failed to map coverage file '{}': {s}", .{ coverage_file_path, @errorName(err) });
return error.AlreadyReported;
};
gop.value_ptr.mapped_memory = mapped_memory;
const header: *const abi.SeenPcsHeader = @ptrCast(mapped_memory[0..@sizeOf(abi.SeenPcsHeader)]);
const pcs_bytes = mapped_memory[@sizeOf(abi.SeenPcsHeader)..][0 .. header.pcs_len * @sizeOf(usize)];
const pcs = std.mem.bytesAsSlice(usize, pcs_bytes);
const source_locations = try gpa.alloc(Coverage.SourceLocation, pcs.len);
errdefer gpa.free(source_locations);
debug_info.resolveAddresses(gpa, pcs, source_locations) catch |err| {
log.err("failed to resolve addresses to source locations: {s}", .{@errorName(err)});
return error.AlreadyReported;
};
gop.value_ptr.source_locations = source_locations;
ws.coverage_condition.broadcast();
}
fn makeIov(s: []const u8) std.posix.iovec_const {
return .{
.base = s.ptr,
.len = s.len,
};
}

View file

@ -0,0 +1,55 @@
//! This file is shared among Zig code running in wildly different contexts:
//! libfuzzer, compiled alongside unit tests, the build runner, running on the
//! host computer, and the fuzzing web interface webassembly code running in
//! the browser. All of these components interface to some degree via an ABI.
/// libfuzzer uses this and its usize is the one that counts. To match the ABI,
/// make the ints be the size of the target used with libfuzzer.
///
/// Trailing:
/// * pc_addr: usize for each pcs_len
/// * 1 bit per pc_addr, usize elements
pub const SeenPcsHeader = extern struct {
n_runs: usize,
unique_runs: usize,
pcs_len: usize,
lowest_stack: usize,
};
pub const ToClientTag = enum(u8) {
source_index,
coverage_update,
_,
};
/// Sent to the fuzzer web client on first connection to the websocket URL.
///
/// Trailing:
/// * std.debug.Coverage.String for each directories_len
/// * std.debug.Coverage.File for each files_len
/// * std.debug.Coverage.SourceLocation for each source_locations_len
/// * u8 for each string_bytes_len
pub const SourceIndexHeader = extern struct {
flags: Flags,
directories_len: u32,
files_len: u32,
source_locations_len: u32,
string_bytes_len: u32,
pub const Flags = packed struct(u32) {
tag: ToClientTag = .source_index,
_: u24 = 0,
};
};
/// Sent to the fuzzer web client whenever the set of covered source locations
/// changes.
///
/// Trailing:
/// * one bit per source_locations_len, contained in u8 elements
pub const CoverageUpdateHeader = extern struct {
tag: ToClientTag = .coverage_update,
n_runs: u64 align(1),
unique_runs: u64 align(1),
lowest_stack: u64 align(1),
};