mirror of
https://codeberg.org/ziglang/zig.git
synced 2025-12-06 05:44:20 +00:00
This commit replaces the "fuzzer" UI, previously accessed with the `--fuzz` and `--port` flags, with a more interesting web UI which allows more interactions with the Zig build system. Most notably, it allows accessing the data emitted by a new "time report" system, which allows users to see which parts of Zig programs take the longest to compile. The option to expose the web UI is `--webui`. By default, it will listen on `[::1]` on a random port, but any IPv6 or IPv4 address can be specified with e.g. `--webui=[::1]:8000` or `--webui=127.0.0.1:8000`. The options `--fuzz` and `--time-report` both imply `--webui` if not given. Currently, `--webui` is incompatible with `--watch`; specifying both will cause `zig build` to exit with a fatal error. When the web UI is enabled, the build runner spawns the web server as soon as the configure phase completes. The frontend code consists of one HTML file, one JavaScript file, two CSS files, and a few Zig source files which are built into a WASM blob on-demand -- this is all very similar to the old fuzzer UI. Also inherited from the fuzzer UI is that the build system communicates with web clients over a WebSocket connection. When the build finishes, if `--webui` was passed (i.e. if the web server is running), the build runner does not terminate; it continues running to serve web requests, allowing interactive control of the build system. In the web interface is an overall "status" indicating whether a build is currently running, and also a list of all steps in this build. There are visual indicators (colors and spinners) for in-progress, succeeded, and failed steps. There is a "Rebuild" button which will cause the build system to reset the state of every step (note that this does not affect caching) and evaluate the step graph again. If `--time-report` is passed to `zig build`, a new section of the interface becomes visible, which associates every build step with a "time report". For most steps, this is just a simple "time taken" value. However, for `Compile` steps, the compiler communicates with the build system to provide it with much more interesting information: time taken for various pipeline phases, with a per-declaration and per-file breakdown, sorted by slowest declarations/files first. This feature is still in its early stages: the data can be a little tricky to understand, and there is no way to, for instance, sort by different properties, or filter to certain files. However, it has already given us some interesting statistics, and can be useful for spotting, for instance, particularly complex and slow compile-time logic. Additionally, if a compilation uses LLVM, its time report includes the "LLVM pass timing" information, which was previously accessible with the (now removed) `-ftime-report` compiler flag. To make time reports more useful, ZIR and compilation caches are ignored by the Zig compiler when they are enabled -- in other words, `Compile` steps *always* run, even if their result should be cached. This means that the flag can be used to analyze a project's compile time without having to repeatedly clear cache directory, for instance. However, when using `-fincremental`, updates other than the first will only show you the statistics for what changed on that particular update. Notably, this gives us a fairly nice way to see exactly which declarations were re-analyzed by an incremental update. If `--fuzz` is passed to `zig build`, another section of the web interface becomes visible, this time exposing the fuzzer. This is quite similar to the fuzzer UI this commit replaces, with only a few cosmetic tweaks. The interface is closer than before to supporting multiple fuzz steps at a time (in line with the overall strategy for this build UI, the goal will be for all of the fuzz steps to be accessible in the same interface), but still doesn't actually support it. The fuzzer UI looks quite different under the hood: as a result, various bugs are fixed, although other bugs remain. For instance, viewing the source code of any file other than the root of the main module is completely broken (as on master) due to some bogus file-to-module assignment logic in the fuzzer UI. Implementation notes: * The `lib/build-web/` directory holds the client side of the web UI. * The general server logic is in `std.Build.WebServer`. * Fuzzing-specific logic is in `std.Build.Fuzz`. * `std.Build.abi` is the new home of `std.Build.Fuzz.abi`, since it now relates to the build system web UI in general. * The build runner now has an **actual** general-purpose allocator, because thanks to `--watch` and `--webui`, the process can be arbitrarily long-lived. The gpa is `std.heap.DebugAllocator`, but the arena remains backed by `std.heap.page_allocator` for efficiency. I fixed several crashes caused by conflation of `gpa` and `arena` in the build runner and `std.Build`, but there may still be some I have missed. * The I/O logic in `std.Build.WebServer` is pretty gnarly; there are a *lot* of threads involved. I anticipate this situation improving significantly once the `std.Io` interface (with concurrency support) is introduced.
377 lines
15 KiB
Zig
377 lines
15 KiB
Zig
// Server timestamp.
|
|
var start_fuzzing_timestamp: i64 = undefined;
|
|
|
|
const js = struct {
|
|
extern "fuzz" fn requestSources() void;
|
|
extern "fuzz" fn ready() void;
|
|
|
|
extern "fuzz" fn updateStats(html_ptr: [*]const u8, html_len: usize) void;
|
|
extern "fuzz" fn updateEntryPoints(html_ptr: [*]const u8, html_len: usize) void;
|
|
extern "fuzz" fn updateSource(html_ptr: [*]const u8, html_len: usize) void;
|
|
extern "fuzz" fn updateCoverage(covered_ptr: [*]const SourceLocationIndex, covered_len: u32) void;
|
|
};
|
|
|
|
pub fn sourceIndexMessage(msg_bytes: []u8) error{OutOfMemory}!void {
|
|
Walk.files.clearRetainingCapacity();
|
|
Walk.decls.clearRetainingCapacity();
|
|
Walk.modules.clearRetainingCapacity();
|
|
recent_coverage_update.clearRetainingCapacity();
|
|
selected_source_location = null;
|
|
|
|
js.requestSources();
|
|
|
|
const Header = abi.fuzz.SourceIndexHeader;
|
|
const header: Header = @bitCast(msg_bytes[0..@sizeOf(Header)].*);
|
|
|
|
const directories_start = @sizeOf(Header);
|
|
const directories_end = directories_start + header.directories_len * @sizeOf(Coverage.String);
|
|
const files_start = directories_end;
|
|
const files_end = files_start + header.files_len * @sizeOf(Coverage.File);
|
|
const source_locations_start = files_end;
|
|
const source_locations_end = source_locations_start + header.source_locations_len * @sizeOf(Coverage.SourceLocation);
|
|
const string_bytes = msg_bytes[source_locations_end..][0..header.string_bytes_len];
|
|
|
|
const directories: []const Coverage.String = @alignCast(std.mem.bytesAsSlice(Coverage.String, msg_bytes[directories_start..directories_end]));
|
|
const files: []const Coverage.File = @alignCast(std.mem.bytesAsSlice(Coverage.File, msg_bytes[files_start..files_end]));
|
|
const source_locations: []const Coverage.SourceLocation = @alignCast(std.mem.bytesAsSlice(Coverage.SourceLocation, msg_bytes[source_locations_start..source_locations_end]));
|
|
|
|
start_fuzzing_timestamp = header.start_timestamp;
|
|
try updateCoverageSources(directories, files, source_locations, string_bytes);
|
|
js.ready();
|
|
}
|
|
|
|
var coverage = Coverage.init;
|
|
/// Index of type `SourceLocationIndex`.
|
|
var coverage_source_locations: std.ArrayListUnmanaged(Coverage.SourceLocation) = .empty;
|
|
/// Contains the most recent coverage update message, unmodified.
|
|
var recent_coverage_update: std.ArrayListAlignedUnmanaged(u8, .of(u64)) = .empty;
|
|
|
|
fn updateCoverageSources(
|
|
directories: []const Coverage.String,
|
|
files: []const Coverage.File,
|
|
source_locations: []const Coverage.SourceLocation,
|
|
string_bytes: []const u8,
|
|
) !void {
|
|
coverage.directories.clearRetainingCapacity();
|
|
coverage.files.clearRetainingCapacity();
|
|
coverage.string_bytes.clearRetainingCapacity();
|
|
coverage_source_locations.clearRetainingCapacity();
|
|
|
|
try coverage_source_locations.appendSlice(gpa, source_locations);
|
|
try coverage.string_bytes.appendSlice(gpa, string_bytes);
|
|
|
|
try coverage.files.entries.resize(gpa, files.len);
|
|
@memcpy(coverage.files.entries.items(.key), files);
|
|
try coverage.files.reIndexContext(gpa, .{ .string_bytes = coverage.string_bytes.items });
|
|
|
|
try coverage.directories.entries.resize(gpa, directories.len);
|
|
@memcpy(coverage.directories.entries.items(.key), directories);
|
|
try coverage.directories.reIndexContext(gpa, .{ .string_bytes = coverage.string_bytes.items });
|
|
}
|
|
|
|
pub fn coverageUpdateMessage(msg_bytes: []u8) error{OutOfMemory}!void {
|
|
recent_coverage_update.clearRetainingCapacity();
|
|
recent_coverage_update.appendSlice(gpa, msg_bytes) catch @panic("OOM");
|
|
try updateStats();
|
|
try updateCoverage();
|
|
}
|
|
|
|
var entry_points: std.ArrayListUnmanaged(SourceLocationIndex) = .empty;
|
|
|
|
pub fn entryPointsMessage(msg_bytes: []u8) error{OutOfMemory}!void {
|
|
const header: abi.fuzz.EntryPointHeader = @bitCast(msg_bytes[0..@sizeOf(abi.fuzz.EntryPointHeader)].*);
|
|
const slis: []align(1) const SourceLocationIndex = @ptrCast(msg_bytes[@sizeOf(abi.fuzz.EntryPointHeader)..]);
|
|
assert(slis.len == header.locsLen());
|
|
try entry_points.resize(gpa, slis.len);
|
|
@memcpy(entry_points.items, slis);
|
|
try updateEntryPoints();
|
|
}
|
|
|
|
/// Index into `coverage_source_locations`.
|
|
const SourceLocationIndex = enum(u32) {
|
|
_,
|
|
|
|
fn haveCoverage(sli: SourceLocationIndex) bool {
|
|
return @intFromEnum(sli) < coverage_source_locations.items.len;
|
|
}
|
|
|
|
fn ptr(sli: SourceLocationIndex) *Coverage.SourceLocation {
|
|
return &coverage_source_locations.items[@intFromEnum(sli)];
|
|
}
|
|
|
|
fn sourceLocationLinkHtml(
|
|
sli: SourceLocationIndex,
|
|
out: *std.ArrayListUnmanaged(u8),
|
|
focused: bool,
|
|
) Allocator.Error!void {
|
|
const sl = sli.ptr();
|
|
try out.writer(gpa).print("<code{s}>", .{
|
|
@as([]const u8, if (focused) " class=\"status-running\"" else ""),
|
|
});
|
|
try sli.appendPath(out);
|
|
try out.writer(gpa).print(":{d}:{d} </code><button class=\"linkish\" onclick=\"wasm_exports.fuzzSelectSli({d});\">View</button>", .{
|
|
sl.line,
|
|
sl.column,
|
|
@intFromEnum(sli),
|
|
});
|
|
}
|
|
|
|
fn appendPath(sli: SourceLocationIndex, out: *std.ArrayListUnmanaged(u8)) Allocator.Error!void {
|
|
const sl = sli.ptr();
|
|
const file = coverage.fileAt(sl.file);
|
|
const file_name = coverage.stringAt(file.basename);
|
|
const dir_name = coverage.stringAt(coverage.directories.keys()[file.directory_index]);
|
|
try html_render.appendEscaped(out, dir_name);
|
|
try out.appendSlice(gpa, "/");
|
|
try html_render.appendEscaped(out, file_name);
|
|
}
|
|
|
|
fn toWalkFile(sli: SourceLocationIndex) ?Walk.File.Index {
|
|
var buf: std.ArrayListUnmanaged(u8) = .empty;
|
|
defer buf.deinit(gpa);
|
|
sli.appendPath(&buf) catch @panic("OOM");
|
|
return @enumFromInt(Walk.files.getIndex(buf.items) orelse return null);
|
|
}
|
|
|
|
fn fileHtml(
|
|
sli: SourceLocationIndex,
|
|
out: *std.ArrayListUnmanaged(u8),
|
|
) error{ OutOfMemory, SourceUnavailable }!void {
|
|
const walk_file_index = sli.toWalkFile() orelse return error.SourceUnavailable;
|
|
const root_node = walk_file_index.findRootDecl().get().ast_node;
|
|
var annotations: std.ArrayListUnmanaged(html_render.Annotation) = .empty;
|
|
defer annotations.deinit(gpa);
|
|
try computeSourceAnnotations(sli.ptr().file, walk_file_index, &annotations, coverage_source_locations.items);
|
|
html_render.fileSourceHtml(walk_file_index, out, root_node, .{
|
|
.source_location_annotations = annotations.items,
|
|
}) catch |err| {
|
|
fatal("unable to render source: {s}", .{@errorName(err)});
|
|
};
|
|
}
|
|
};
|
|
|
|
fn computeSourceAnnotations(
|
|
cov_file_index: Coverage.File.Index,
|
|
walk_file_index: Walk.File.Index,
|
|
annotations: *std.ArrayListUnmanaged(html_render.Annotation),
|
|
source_locations: []const Coverage.SourceLocation,
|
|
) !void {
|
|
// Collect all the source locations from only this file into this array
|
|
// first, then sort by line, col, so that we can collect annotations with
|
|
// O(N) time complexity.
|
|
var locs: std.ArrayListUnmanaged(SourceLocationIndex) = .empty;
|
|
defer locs.deinit(gpa);
|
|
|
|
for (source_locations, 0..) |sl, sli_usize| {
|
|
if (sl.file != cov_file_index) continue;
|
|
const sli: SourceLocationIndex = @enumFromInt(sli_usize);
|
|
try locs.append(gpa, sli);
|
|
}
|
|
|
|
std.mem.sortUnstable(SourceLocationIndex, locs.items, {}, struct {
|
|
pub fn lessThan(context: void, lhs: SourceLocationIndex, rhs: SourceLocationIndex) bool {
|
|
_ = context;
|
|
const lhs_ptr = lhs.ptr();
|
|
const rhs_ptr = rhs.ptr();
|
|
if (lhs_ptr.line < rhs_ptr.line) return true;
|
|
if (lhs_ptr.line > rhs_ptr.line) return false;
|
|
return lhs_ptr.column < rhs_ptr.column;
|
|
}
|
|
}.lessThan);
|
|
|
|
const source = walk_file_index.get_ast().source;
|
|
var line: usize = 1;
|
|
var column: usize = 1;
|
|
var next_loc_index: usize = 0;
|
|
for (source, 0..) |byte, offset| {
|
|
if (byte == '\n') {
|
|
line += 1;
|
|
column = 1;
|
|
} else {
|
|
column += 1;
|
|
}
|
|
while (true) {
|
|
if (next_loc_index >= locs.items.len) return;
|
|
const next_sli = locs.items[next_loc_index];
|
|
const next_sl = next_sli.ptr();
|
|
if (next_sl.line > line or (next_sl.line == line and next_sl.column >= column)) break;
|
|
try annotations.append(gpa, .{
|
|
.file_byte_offset = offset,
|
|
.dom_id = @intFromEnum(next_sli),
|
|
});
|
|
next_loc_index += 1;
|
|
}
|
|
}
|
|
}
|
|
|
|
export fn fuzzUnpackSources(tar_ptr: [*]u8, tar_len: usize) void {
|
|
const tar_bytes = tar_ptr[0..tar_len];
|
|
log.debug("received {d} bytes of sources.tar", .{tar_bytes.len});
|
|
|
|
unpackSourcesInner(tar_bytes) catch |err| {
|
|
fatal("unable to unpack sources.tar: {s}", .{@errorName(err)});
|
|
};
|
|
}
|
|
|
|
fn unpackSourcesInner(tar_bytes: []u8) !void {
|
|
var tar_reader: std.Io.Reader = .fixed(tar_bytes);
|
|
var file_name_buffer: [1024]u8 = undefined;
|
|
var link_name_buffer: [1024]u8 = undefined;
|
|
var it: std.tar.Iterator = .init(&tar_reader, .{
|
|
.file_name_buffer = &file_name_buffer,
|
|
.link_name_buffer = &link_name_buffer,
|
|
});
|
|
while (try it.next()) |tar_file| {
|
|
switch (tar_file.kind) {
|
|
.file => {
|
|
if (tar_file.size == 0 and tar_file.name.len == 0) break;
|
|
if (std.mem.endsWith(u8, tar_file.name, ".zig")) {
|
|
log.debug("found file: '{s}'", .{tar_file.name});
|
|
const file_name = try gpa.dupe(u8, tar_file.name);
|
|
if (std.mem.indexOfScalar(u8, file_name, '/')) |pkg_name_end| {
|
|
const pkg_name = file_name[0..pkg_name_end];
|
|
const gop = try Walk.modules.getOrPut(gpa, pkg_name);
|
|
const file: Walk.File.Index = @enumFromInt(Walk.files.entries.len);
|
|
if (!gop.found_existing or
|
|
std.mem.eql(u8, file_name[pkg_name_end..], "/root.zig") or
|
|
std.mem.eql(u8, file_name[pkg_name_end + 1 .. file_name.len - ".zig".len], pkg_name))
|
|
{
|
|
gop.value_ptr.* = file;
|
|
}
|
|
const file_bytes = tar_reader.take(@intCast(tar_file.size)) catch unreachable;
|
|
it.unread_file_bytes = 0; // we have read the whole thing
|
|
assert(file == try Walk.add_file(file_name, file_bytes));
|
|
}
|
|
} else {
|
|
log.warn("skipping: '{s}' - the tar creation should have done that", .{tar_file.name});
|
|
}
|
|
},
|
|
else => continue,
|
|
}
|
|
}
|
|
}
|
|
|
|
fn updateStats() error{OutOfMemory}!void {
|
|
@setFloatMode(.optimized);
|
|
|
|
if (recent_coverage_update.items.len == 0) return;
|
|
|
|
const hdr: *abi.fuzz.CoverageUpdateHeader = @alignCast(@ptrCast(
|
|
recent_coverage_update.items[0..@sizeOf(abi.fuzz.CoverageUpdateHeader)],
|
|
));
|
|
|
|
const covered_src_locs: usize = n: {
|
|
var n: usize = 0;
|
|
const covered_bits = recent_coverage_update.items[@sizeOf(abi.fuzz.CoverageUpdateHeader)..];
|
|
for (covered_bits) |byte| n += @popCount(byte);
|
|
break :n n;
|
|
};
|
|
const total_src_locs = coverage_source_locations.items.len;
|
|
|
|
const avg_speed: f64 = speed: {
|
|
const ns_elapsed: f64 = @floatFromInt(nsSince(start_fuzzing_timestamp));
|
|
const n_runs: f64 = @floatFromInt(hdr.n_runs);
|
|
break :speed n_runs / (ns_elapsed / std.time.ns_per_s);
|
|
};
|
|
|
|
const html = try std.fmt.allocPrint(gpa,
|
|
\\<span slot="stat-total-runs">{d}</span>
|
|
\\<span slot="stat-unique-runs">{d} ({d:.1}%)</span>
|
|
\\<span slot="stat-coverage">{d} / {d} ({d:.1}%)</span>
|
|
\\<span slot="stat-speed">{d:.0}</span>
|
|
, .{
|
|
hdr.n_runs,
|
|
hdr.unique_runs,
|
|
@as(f64, @floatFromInt(hdr.unique_runs)) / @as(f64, @floatFromInt(hdr.n_runs)),
|
|
covered_src_locs,
|
|
total_src_locs,
|
|
@as(f64, @floatFromInt(covered_src_locs)) / @as(f64, @floatFromInt(total_src_locs)),
|
|
avg_speed,
|
|
});
|
|
defer gpa.free(html);
|
|
|
|
js.updateStats(html.ptr, html.len);
|
|
}
|
|
|
|
fn updateEntryPoints() error{OutOfMemory}!void {
|
|
var html: std.ArrayListUnmanaged(u8) = .empty;
|
|
defer html.deinit(gpa);
|
|
for (entry_points.items) |sli| {
|
|
try html.appendSlice(gpa, "<li>");
|
|
try sli.sourceLocationLinkHtml(&html, selected_source_location == sli);
|
|
try html.appendSlice(gpa, "</li>\n");
|
|
}
|
|
js.updateEntryPoints(html.items.ptr, html.items.len);
|
|
}
|
|
|
|
fn updateCoverage() error{OutOfMemory}!void {
|
|
if (recent_coverage_update.items.len == 0) return;
|
|
const want_file = (selected_source_location orelse return).ptr().file;
|
|
|
|
var covered: std.ArrayListUnmanaged(SourceLocationIndex) = .empty;
|
|
defer covered.deinit(gpa);
|
|
|
|
// This code assumes 64-bit elements, which is incorrect if the executable
|
|
// being fuzzed is not a 64-bit CPU. It also assumes little-endian which
|
|
// can also be incorrect.
|
|
comptime assert(abi.fuzz.CoverageUpdateHeader.trailing[0] == .pc_bits_usize);
|
|
const n_bitset_elems = (coverage_source_locations.items.len + @bitSizeOf(u64) - 1) / @bitSizeOf(u64);
|
|
const covered_bits = std.mem.bytesAsSlice(
|
|
u64,
|
|
recent_coverage_update.items[@sizeOf(abi.fuzz.CoverageUpdateHeader)..][0 .. n_bitset_elems * @sizeOf(u64)],
|
|
);
|
|
var sli: SourceLocationIndex = @enumFromInt(0);
|
|
for (covered_bits) |elem| {
|
|
try covered.ensureUnusedCapacity(gpa, 64);
|
|
for (0..@bitSizeOf(u64)) |i| {
|
|
if ((elem & (@as(u64, 1) << @intCast(i))) != 0) {
|
|
if (sli.ptr().file == want_file) {
|
|
covered.appendAssumeCapacity(sli);
|
|
}
|
|
}
|
|
sli = @enumFromInt(@intFromEnum(sli) + 1);
|
|
}
|
|
}
|
|
|
|
js.updateCoverage(covered.items.ptr, covered.items.len);
|
|
}
|
|
|
|
fn updateSource() error{OutOfMemory}!void {
|
|
if (recent_coverage_update.items.len == 0) return;
|
|
const file_sli = selected_source_location.?;
|
|
var html: std.ArrayListUnmanaged(u8) = .empty;
|
|
defer html.deinit(gpa);
|
|
file_sli.fileHtml(&html) catch |err| switch (err) {
|
|
error.OutOfMemory => |e| return e,
|
|
error.SourceUnavailable => {},
|
|
};
|
|
js.updateSource(html.items.ptr, html.items.len);
|
|
}
|
|
|
|
var selected_source_location: ?SourceLocationIndex = null;
|
|
|
|
/// This function is not used directly by `main.js`, but a reference to it is
|
|
/// emitted by `SourceLocationIndex.sourceLocationLinkHtml`.
|
|
export fn fuzzSelectSli(sli: SourceLocationIndex) void {
|
|
if (!sli.haveCoverage()) return;
|
|
selected_source_location = sli;
|
|
updateEntryPoints() catch @panic("out of memory"); // highlights the selected one green
|
|
updateSource() catch @panic("out of memory");
|
|
updateCoverage() catch @panic("out of memory");
|
|
}
|
|
|
|
const std = @import("std");
|
|
const Allocator = std.mem.Allocator;
|
|
const Coverage = std.debug.Coverage;
|
|
const abi = std.Build.abi;
|
|
const assert = std.debug.assert;
|
|
const gpa = std.heap.wasm_allocator;
|
|
|
|
const Walk = @import("Walk");
|
|
const html_render = @import("html_render");
|
|
|
|
const nsSince = @import("main.zig").nsSince;
|
|
const Slice = @import("main.zig").Slice;
|
|
const fatal = @import("main.zig").fatal;
|
|
const log = std.log;
|
|
const String = Slice(u8);
|