mirror of
https://codeberg.org/ziglang/zig.git
synced 2025-12-06 13:54:21 +00:00
dwarf: optimize dwarf parsing for speed
This code is run when printing a stack trace in a debug executable, so it has to be fast even without compiler optimizations. Adding a `@panic` to the top of `main` and running an x86_64 backend compiled compiler goes from `1m32.773s` to `0m3.232s`.
This commit is contained in:
parent
57b2b3df52
commit
247e4ac3cc
3 changed files with 699 additions and 723 deletions
1360
lib/std/dwarf.zig
1360
lib/std/dwarf.zig
File diff suppressed because it is too large
Load diff
|
|
@ -12,8 +12,8 @@ const native_endian = builtin.cpu.arch.endian();
|
|||
/// Callers should specify all the fields relevant to their context. If a field is required
|
||||
/// by the expression and it isn't in the context, error.IncompleteExpressionContext is returned.
|
||||
pub const ExpressionContext = struct {
|
||||
/// This expression is from a DWARF64 section
|
||||
is_64: bool = false,
|
||||
/// The dwarf format of the section this expression is in
|
||||
format: dwarf.Format = .@"32",
|
||||
|
||||
/// If specified, any addresses will pass through this function before being acccessed
|
||||
isValidMemory: ?*const fn (address: usize) bool = null,
|
||||
|
|
@ -190,10 +190,10 @@ pub fn StackMachine(comptime options: ExpressionOptions) type {
|
|||
const reader = stream.reader();
|
||||
return switch (opcode) {
|
||||
OP.addr => generic(try reader.readInt(addr_type, options.endian)),
|
||||
OP.call_ref => if (context.is_64)
|
||||
generic(try reader.readInt(u64, options.endian))
|
||||
else
|
||||
generic(try reader.readInt(u32, options.endian)),
|
||||
OP.call_ref => switch (context.format) {
|
||||
.@"32" => generic(try reader.readInt(u32, options.endian)),
|
||||
.@"64" => generic(try reader.readInt(u64, options.endian)),
|
||||
},
|
||||
OP.const1u,
|
||||
OP.pick,
|
||||
=> generic(try reader.readByte()),
|
||||
|
|
@ -366,15 +366,15 @@ pub fn StackMachine(comptime options: ExpressionOptions) type {
|
|||
_ = offset;
|
||||
|
||||
switch (context.compile_unit.?.frame_base.?.*) {
|
||||
.ExprLoc => {
|
||||
.exprloc => {
|
||||
// TODO: Run this expression in a nested stack machine
|
||||
return error.UnimplementedOpcode;
|
||||
},
|
||||
.LocListOffset => {
|
||||
.loclistx => {
|
||||
// TODO: Read value from .debug_loclists
|
||||
return error.UnimplementedOpcode;
|
||||
},
|
||||
.SecOffset => {
|
||||
.sec_offset => {
|
||||
// TODO: Read value from .debug_loclists
|
||||
return error.UnimplementedOpcode;
|
||||
},
|
||||
|
|
|
|||
|
|
@ -62,11 +62,7 @@ pub fn FixedBufferStream(comptime Buffer: type) type {
|
|||
if (bytes.len == 0) return 0;
|
||||
if (self.pos >= self.buffer.len) return error.NoSpaceLeft;
|
||||
|
||||
const n = if (self.pos + bytes.len <= self.buffer.len)
|
||||
bytes.len
|
||||
else
|
||||
self.buffer.len - self.pos;
|
||||
|
||||
const n = @min(self.buffer.len - self.pos, bytes.len);
|
||||
@memcpy(self.buffer[self.pos..][0..n], bytes[0..n]);
|
||||
self.pos += n;
|
||||
|
||||
|
|
@ -76,7 +72,7 @@ pub fn FixedBufferStream(comptime Buffer: type) type {
|
|||
}
|
||||
|
||||
pub fn seekTo(self: *Self, pos: u64) SeekError!void {
|
||||
self.pos = if (std.math.cast(usize, pos)) |x| @min(self.buffer.len, x) else self.buffer.len;
|
||||
self.pos = @min(std.math.lossyCast(usize, pos), self.buffer.len);
|
||||
}
|
||||
|
||||
pub fn seekBy(self: *Self, amt: i64) SeekError!void {
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue