mirror of
https://codeberg.org/ziglang/zig.git
synced 2025-12-06 13:54:21 +00:00
The previous float-parsing method was lacking in a lot of areas. This
commit introduces a state-of-the art implementation that is both
accurate and fast to std.
Code is derived from working repo https://github.com/tiehuis/zig-parsefloat.
This includes more test-cases and performance numbers that are present
in this commit.
* Accuracy
The primary testing regime has been using test-data found at
https://github.com/tiehuis/parse-number-fxx-test-data. This is a fork of
upstream with support for f128 test-cases added. This data has been
verified against other independent implementations and represents
accurate round-to-even IEEE-754 floating point semantics.
* Performance
Compared to the existing parseFloat implementation there is ~5-10x
performance improvement using the above corpus. (f128 parsing excluded
in below measurements).
** Old
$ time ./test_all_fxx_data
3520298/5296694 succeeded (1776396 fail)
________________________________________________________
Executed in 28.68 secs fish external
usr time 28.48 secs 0.00 micros 28.48 secs
sys time 0.08 secs 694.00 micros 0.08 secs
** This Implementation
$ time ./test_all_fxx_data
5296693/5296694 succeeded (1 fail)
________________________________________________________
Executed in 4.54 secs fish external
usr time 4.37 secs 515.00 micros 4.37 secs
sys time 0.10 secs 171.00 micros 0.10 secs
Further performance numbers can be seen using the
https://github.com/tiehuis/simple_fastfloat_benchmark/ repository, which
compares against some other well-known string-to-float conversion
functions. A breakdown can be found here:
0d9f020f1a/PERFORMANCE.md (commit-b15406a0d2e18b50a4b62fceb5a6a3bb60ca5706)
In summary, we are within 20% of the C++ reference implementation and
have about ~600-700MB/s throughput on a Intel I5-6500 3.5Ghz.
* F128 Support
Finally, f128 is now completely supported with full accuracy. This does
use a slower path which is possible to improve in future.
* Behavioural Changes
There are a few behavioural changes to note.
- `parseHexFloat` is now redundant and these are now supported directly
in `parseFloat`.
- We implement round-to-even in all parsing routines. This is as
specified by IEEE-754. Previous code used different rounding
mechanisms (standard was round-to-zero, hex-parsing looked to use
round-up) so there may be subtle differences.
Closes #2207.
Fixes #11169.
91 lines
2.8 KiB
Zig
91 lines
2.8 KiB
Zig
const std = @import("std");
|
|
|
|
/// A custom N-bit floating point type, representing `f * 2^e`.
|
|
/// e is biased, so it be directly shifted into the exponent bits.
|
|
/// Negative exponent indicates an invalid result.
|
|
pub fn BiasedFp(comptime T: type) type {
|
|
const MantissaT = mantissaType(T);
|
|
|
|
return struct {
|
|
const Self = @This();
|
|
|
|
/// The significant digits.
|
|
f: MantissaT,
|
|
/// The biased, binary exponent.
|
|
e: i32,
|
|
|
|
pub fn zero() Self {
|
|
return .{ .f = 0, .e = 0 };
|
|
}
|
|
|
|
pub fn zeroPow2(e: i32) Self {
|
|
return .{ .f = 0, .e = e };
|
|
}
|
|
|
|
pub fn inf(comptime FloatT: type) Self {
|
|
return .{ .f = 0, .e = (1 << std.math.floatExponentBits(FloatT)) - 1 };
|
|
}
|
|
|
|
pub fn eql(self: Self, other: Self) bool {
|
|
return self.f == other.f and self.e == other.e;
|
|
}
|
|
|
|
pub fn toFloat(self: Self, comptime FloatT: type, negative: bool) FloatT {
|
|
var word = self.f;
|
|
word |= @intCast(MantissaT, self.e) << std.math.floatMantissaBits(FloatT);
|
|
var f = floatFromUnsigned(FloatT, MantissaT, word);
|
|
if (negative) f = -f;
|
|
return f;
|
|
}
|
|
};
|
|
}
|
|
|
|
pub fn floatFromUnsigned(comptime T: type, comptime MantissaT: type, v: MantissaT) T {
|
|
return switch (T) {
|
|
f16 => @bitCast(f16, @truncate(u16, v)),
|
|
f32 => @bitCast(f32, @truncate(u32, v)),
|
|
f64 => @bitCast(f64, @truncate(u64, v)),
|
|
f128 => @bitCast(f128, v),
|
|
else => unreachable,
|
|
};
|
|
}
|
|
|
|
/// Represents a parsed floating point value as its components.
|
|
pub fn Number(comptime T: type) type {
|
|
return struct {
|
|
exponent: i64,
|
|
mantissa: mantissaType(T),
|
|
negative: bool,
|
|
/// More than max_mantissa digits were found during parse
|
|
many_digits: bool,
|
|
/// The number was a hex-float (e.g. 0x1.234p567)
|
|
hex: bool,
|
|
};
|
|
}
|
|
|
|
/// Determine if 8 bytes are all decimal digits.
|
|
/// This does not care about the order in which the bytes were loaded.
|
|
pub fn isEightDigits(v: u64) bool {
|
|
const a = v +% 0x4646_4646_4646_4646;
|
|
const b = v -% 0x3030_3030_3030_3030;
|
|
return ((a | b) & 0x8080_8080_8080_8080) == 0;
|
|
}
|
|
|
|
pub fn isDigit(c: u8, comptime base: u8) bool {
|
|
std.debug.assert(base == 10 or base == 16);
|
|
|
|
return if (base == 10)
|
|
'0' <= c and c <= '9'
|
|
else
|
|
'0' <= c and c <= '9' or 'a' <= c and c <= 'f' or 'A' <= c and c <= 'F';
|
|
}
|
|
|
|
/// Returns the underlying storage type used for the mantissa of floating-point type.
|
|
/// The output unsigned type must have at least as many bits as the input floating-point type.
|
|
pub fn mantissaType(comptime T: type) type {
|
|
return switch (T) {
|
|
f16, f32, f64 => u64,
|
|
f128 => u128,
|
|
else => unreachable,
|
|
};
|
|
}
|