lib: correct unnecessary uses of 'var'

This commit is contained in:
mlugg 2023-11-10 05:27:17 +00:00
parent baabc6013e
commit 51595d6b75
No known key found for this signature in database
GPG key ID: 58978E823BDE3EF9
174 changed files with 738 additions and 711 deletions

View file

@ -24,7 +24,7 @@ pub fn main() !void {
};
const arena = thread_safe_arena.allocator();
var args = try process.argsAlloc(arena);
const args = try process.argsAlloc(arena);
// skip my own exe name
var arg_idx: usize = 1;

View file

@ -3,7 +3,7 @@ const testing = @import("std").testing;
const __absvdi2 = @import("absvdi2.zig").__absvdi2;
fn test__absvdi2(a: i64, expected: i64) !void {
var result = __absvdi2(a);
const result = __absvdi2(a);
try testing.expectEqual(expected, result);
}

View file

@ -3,7 +3,7 @@ const testing = @import("std").testing;
const __absvsi2 = @import("absvsi2.zig").__absvsi2;
fn test__absvsi2(a: i32, expected: i32) !void {
var result = __absvsi2(a);
const result = __absvsi2(a);
try testing.expectEqual(expected, result);
}

View file

@ -3,7 +3,7 @@ const testing = @import("std").testing;
const __absvti2 = @import("absvti2.zig").__absvti2;
fn test__absvti2(a: i128, expected: i128) !void {
var result = __absvti2(a);
const result = __absvti2(a);
try testing.expectEqual(expected, result);
}

View file

@ -18,7 +18,7 @@ comptime {
inline fn addoXi4_generic(comptime ST: type, a: ST, b: ST, overflow: *c_int) ST {
@setRuntimeSafety(builtin.is_test);
overflow.* = 0;
var sum: ST = a +% b;
const sum: ST = a +% b;
// Hackers Delight: section Overflow Detection, subsection Signed Add/Subtract
// Let sum = a +% b == a + b + carry == wraparound addition.
// Overflow in a+b+carry occurs, iff a and b have opposite signs

View file

@ -6,8 +6,8 @@ const math = std.math;
fn test__addodi4(a: i64, b: i64) !void {
var result_ov: c_int = undefined;
var expected_ov: c_int = undefined;
var result = addv.__addodi4(a, b, &result_ov);
var expected: i64 = simple_addodi4(a, b, &expected_ov);
const result = addv.__addodi4(a, b, &result_ov);
const expected: i64 = simple_addodi4(a, b, &expected_ov);
try testing.expectEqual(expected, result);
try testing.expectEqual(expected_ov, result_ov);
}

View file

@ -4,8 +4,8 @@ const testing = @import("std").testing;
fn test__addosi4(a: i32, b: i32) !void {
var result_ov: c_int = undefined;
var expected_ov: c_int = undefined;
var result = addv.__addosi4(a, b, &result_ov);
var expected: i32 = simple_addosi4(a, b, &expected_ov);
const result = addv.__addosi4(a, b, &result_ov);
const expected: i32 = simple_addosi4(a, b, &expected_ov);
try testing.expectEqual(expected, result);
try testing.expectEqual(expected_ov, result_ov);
}

View file

@ -6,8 +6,8 @@ const math = std.math;
fn test__addoti4(a: i128, b: i128) !void {
var result_ov: c_int = undefined;
var expected_ov: c_int = undefined;
var result = addv.__addoti4(a, b, &result_ov);
var expected: i128 = simple_addoti4(a, b, &expected_ov);
const result = addv.__addoti4(a, b, &result_ov);
const expected: i128 = simple_addoti4(a, b, &expected_ov);
try testing.expectEqual(expected, result);
try testing.expectEqual(expected_ov, result_ov);
}

View file

@ -2,7 +2,7 @@ const bswap = @import("bswap.zig");
const testing = @import("std").testing;
fn test__bswapdi2(a: u64, expected: u64) !void {
var result = bswap.__bswapdi2(a);
const result = bswap.__bswapdi2(a);
try testing.expectEqual(expected, result);
}

View file

@ -2,7 +2,7 @@ const bswap = @import("bswap.zig");
const testing = @import("std").testing;
fn test__bswapsi2(a: u32, expected: u32) !void {
var result = bswap.__bswapsi2(a);
const result = bswap.__bswapsi2(a);
try testing.expectEqual(expected, result);
}

View file

@ -2,7 +2,7 @@ const bswap = @import("bswap.zig");
const testing = @import("std").testing;
fn test__bswapti2(a: u128, expected: u128) !void {
var result = bswap.__bswapti2(a);
const result = bswap.__bswapti2(a);
try testing.expectEqual(expected, result);
}

View file

@ -32,7 +32,7 @@ pub fn __ceilh(x: f16) callconv(.C) f16 {
pub fn ceilf(x: f32) callconv(.C) f32 {
var u: u32 = @bitCast(x);
var e = @as(i32, @intCast((u >> 23) & 0xFF)) - 0x7F;
const e = @as(i32, @intCast((u >> 23) & 0xFF)) - 0x7F;
var m: u32 = undefined;
// TODO: Shouldn't need this explicit check.

View file

@ -2,8 +2,8 @@ const clz = @import("count0bits.zig");
const testing = @import("std").testing;
fn test__clzdi2(a: u64, expected: i64) !void {
var x: i64 = @bitCast(a);
var result = clz.__clzdi2(x);
const x: i64 = @bitCast(a);
const result = clz.__clzdi2(x);
try testing.expectEqual(expected, result);
}

View file

@ -2,8 +2,8 @@ const clz = @import("count0bits.zig");
const testing = @import("std").testing;
fn test__clzti2(a: u128, expected: i64) !void {
var x: i128 = @bitCast(a);
var result = clz.__clzti2(x);
const x: i128 = @bitCast(a);
const result = clz.__clzti2(x);
try testing.expectEqual(expected, result);
}

View file

@ -2,7 +2,7 @@ const cmp = @import("cmp.zig");
const testing = @import("std").testing;
fn test__cmpdi2(a: i64, b: i64, expected: i64) !void {
var result = cmp.__cmpdi2(a, b);
const result = cmp.__cmpdi2(a, b);
try testing.expectEqual(expected, result);
}

View file

@ -2,7 +2,7 @@ const cmp = @import("cmp.zig");
const testing = @import("std").testing;
fn test__cmpsi2(a: i32, b: i32, expected: i32) !void {
var result = cmp.__cmpsi2(a, b);
const result = cmp.__cmpsi2(a, b);
try testing.expectEqual(expected, result);
}

View file

@ -2,7 +2,7 @@ const cmp = @import("cmp.zig");
const testing = @import("std").testing;
fn test__cmpti2(a: i128, b: i128, expected: i128) !void {
var result = cmp.__cmpti2(a, b);
const result = cmp.__cmpti2(a, b);
try testing.expectEqual(expected, result);
}

View file

@ -2,8 +2,8 @@ const ctz = @import("count0bits.zig");
const testing = @import("std").testing;
fn test__ctzdi2(a: u64, expected: i32) !void {
var x: i64 = @bitCast(a);
var result = ctz.__ctzdi2(x);
const x: i64 = @bitCast(a);
const result = ctz.__ctzdi2(x);
try testing.expectEqual(expected, result);
}

View file

@ -2,8 +2,8 @@ const ctz = @import("count0bits.zig");
const testing = @import("std").testing;
fn test__ctzsi2(a: u32, expected: i32) !void {
var x: i32 = @bitCast(a);
var result = ctz.__ctzsi2(x);
const x: i32 = @bitCast(a);
const result = ctz.__ctzsi2(x);
try testing.expectEqual(expected, result);
}

View file

@ -2,8 +2,8 @@ const ctz = @import("count0bits.zig");
const testing = @import("std").testing;
fn test__ctzti2(a: u128, expected: i32) !void {
var x: i128 = @bitCast(a);
var result = ctz.__ctzti2(x);
const x: i128 = @bitCast(a);
const result = ctz.__ctzti2(x);
try testing.expectEqual(expected, result);
}

View file

@ -19,20 +19,20 @@ test {
fn testDiv(comptime T: type, comptime f: fn (T, T, T, T) callconv(.C) Complex(T)) !void {
{
var a: T = 1.0;
var b: T = 0.0;
var c: T = -1.0;
var d: T = 0.0;
const a: T = 1.0;
const b: T = 0.0;
const c: T = -1.0;
const d: T = 0.0;
const result = f(a, b, c, d);
try expect(result.real == -1.0);
try expect(result.imag == 0.0);
}
{
var a: T = 1.0;
var b: T = 0.0;
var c: T = -4.0;
var d: T = 0.0;
const a: T = 1.0;
const b: T = 0.0;
const c: T = -4.0;
const d: T = 0.0;
const result = f(a, b, c, d);
try expect(result.real == -0.25);
@ -41,10 +41,10 @@ fn testDiv(comptime T: type, comptime f: fn (T, T, T, T) callconv(.C) Complex(T)
{
// if the first operand is an infinity and the second operand is a finite number, then the
// result of the / operator is an infinity;
var a: T = -math.inf(T);
var b: T = 0.0;
var c: T = -4.0;
var d: T = 1.0;
const a: T = -math.inf(T);
const b: T = 0.0;
const c: T = -4.0;
const d: T = 1.0;
const result = f(a, b, c, d);
try expect(result.real == math.inf(T));
@ -53,10 +53,10 @@ fn testDiv(comptime T: type, comptime f: fn (T, T, T, T) callconv(.C) Complex(T)
{
// if the first operand is a finite number and the second operand is an infinity, then the
// result of the / operator is a zero;
var a: T = 17.2;
var b: T = 0.0;
var c: T = -math.inf(T);
var d: T = 0.0;
const a: T = 17.2;
const b: T = 0.0;
const c: T = -math.inf(T);
const d: T = 0.0;
const result = f(a, b, c, d);
try expect(result.real == -0.0);
@ -65,10 +65,10 @@ fn testDiv(comptime T: type, comptime f: fn (T, T, T, T) callconv(.C) Complex(T)
{
// if the first operand is a nonzero finite number or an infinity and the second operand is
// a zero, then the result of the / operator is an infinity
var a: T = 1.1;
var b: T = 0.1;
var c: T = 0.0;
var d: T = 0.0;
const a: T = 1.1;
const b: T = 0.1;
const c: T = 0.0;
const d: T = 0.0;
const result = f(a, b, c, d);
try expect(result.real == math.inf(T));

View file

@ -162,7 +162,7 @@ pub fn __divxf3(a: f80, b: f80) callconv(.C) f80 {
// Two cases: quotient is in [0.5, 1.0) or quotient is in [1.0, 2.0).
// Right shift the quotient if it falls in the [1,2) range and adjust the
// exponent accordingly.
var quotient: u64 = if (quotient128 < (integerBit << 1)) b: {
const quotient: u64 = if (quotient128 < (integerBit << 1)) b: {
quotientExponent -= 1;
break :b @intCast(quotient128);
} else @intCast(quotient128 >> 1);
@ -177,7 +177,7 @@ pub fn __divxf3(a: f80, b: f80) callconv(.C) f80 {
//
// If r is greater than 1/2 ulp(q)*b, then q rounds up. Otherwise, we
// already have the correct result. The exact halfway case cannot occur.
var residual: u64 = -%(quotient *% q63b);
const residual: u64 = -%(quotient *% q63b);
const writtenExponent = quotientExponent + exponentBias;
if (writtenExponent >= maxExponent) {

View file

@ -57,8 +57,8 @@ const simple_allocator = struct {
/// Resize a slice.
pub fn reallocSlice(comptime T: type, slice: []T, len: usize) []T {
var c_ptr: *anyopaque = @ptrCast(slice.ptr);
var new_array: [*]T = @ptrCast(@alignCast(std.c.realloc(c_ptr, @sizeOf(T) * len) orelse abort()));
const c_ptr: *anyopaque = @ptrCast(slice.ptr);
const new_array: [*]T = @ptrCast(@alignCast(std.c.realloc(c_ptr, @sizeOf(T) * len) orelse abort()));
return new_array[0..len];
}
@ -78,7 +78,7 @@ const ObjectArray = struct {
/// create a new ObjectArray with n slots. must call deinit() to deallocate.
pub fn init(n: usize) *ObjectArray {
var array = simple_allocator.alloc(ObjectArray);
const array = simple_allocator.alloc(ObjectArray);
array.* = ObjectArray{
.slots = simple_allocator.allocSlice(?ObjectPointer, n),
@ -166,7 +166,7 @@ const current_thread_storage = struct {
const size = @max(16, index);
// create a new array and store it.
var array: *ObjectArray = ObjectArray.init(size);
const array: *ObjectArray = ObjectArray.init(size);
current_thread_storage.setspecific(array);
return array;
}
@ -304,13 +304,13 @@ const emutls_control = extern struct {
test "simple_allocator" {
if (!builtin.link_libc or builtin.os.tag != .openbsd) return error.SkipZigTest;
var data1: *[64]u8 = simple_allocator.alloc([64]u8);
const data1: *[64]u8 = simple_allocator.alloc([64]u8);
defer simple_allocator.free(data1);
for (data1) |*c| {
c.* = 0xff;
}
var data2: [*]u8 = simple_allocator.advancedAlloc(@alignOf(u8), 64);
const data2: [*]u8 = simple_allocator.advancedAlloc(@alignOf(u8), 64);
defer simple_allocator.free(data2);
for (data2[0..63]) |*c| {
c.* = 0xff;
@ -324,7 +324,7 @@ test "__emutls_get_address zeroed" {
try expect(ctl.object.index == 0);
// retrieve a variable from ctl
var x: *usize = @ptrCast(@alignCast(__emutls_get_address(&ctl)));
const x: *usize = @ptrCast(@alignCast(__emutls_get_address(&ctl)));
try expect(ctl.object.index != 0); // index has been allocated for this ctl
try expect(x.* == 0); // storage has been zeroed
@ -332,7 +332,7 @@ test "__emutls_get_address zeroed" {
x.* = 1234;
// retrieve a variable from ctl (same ctl)
var y: *usize = @ptrCast(@alignCast(__emutls_get_address(&ctl)));
const y: *usize = @ptrCast(@alignCast(__emutls_get_address(&ctl)));
try expect(y.* == 1234); // same content that x.*
try expect(x == y); // same pointer
@ -345,7 +345,7 @@ test "__emutls_get_address with default_value" {
var ctl = emutls_control.init(usize, &value);
try expect(ctl.object.index == 0);
var x: *usize = @ptrCast(@alignCast(__emutls_get_address(&ctl)));
const x: *usize = @ptrCast(@alignCast(__emutls_get_address(&ctl)));
try expect(ctl.object.index != 0);
try expect(x.* == 5678); // storage initialized with default value
@ -354,7 +354,7 @@ test "__emutls_get_address with default_value" {
try expect(value == 5678); // the default value didn't change
var y: *usize = @ptrCast(@alignCast(__emutls_get_address(&ctl)));
const y: *usize = @ptrCast(@alignCast(__emutls_get_address(&ctl)));
try expect(y.* == 9012); // the modified storage persists
}
@ -364,7 +364,7 @@ test "test default_value with differents sizes" {
const testType = struct {
fn _testType(comptime T: type, value: T) !void {
var ctl = emutls_control.init(T, &value);
var x = ctl.get_typed_pointer(T);
const x = ctl.get_typed_pointer(T);
try expect(x.* == value);
}
}._testType;

View file

@ -117,7 +117,7 @@ pub fn exp(x_: f64) callconv(.C) f64 {
const P5: f64 = 4.13813679705723846039e-08;
var x = x_;
var ux: u64 = @bitCast(x);
const ux: u64 = @bitCast(x);
var hx = ux >> 32;
const sign: i32 = @intCast(hx >> 31);
hx &= 0x7FFFFFFF;

View file

@ -38,7 +38,7 @@ pub fn exp2f(x: f32) callconv(.C) f32 {
const P3: f32 = 0x1.c6b348p-5;
const P4: f32 = 0x1.3b2c9cp-7;
var u: u32 = @bitCast(x);
const u: u32 = @bitCast(x);
const ix = u & 0x7FFFFFFF;
// |x| > 126

View file

@ -2,8 +2,8 @@ const ffs = @import("count0bits.zig");
const testing = @import("std").testing;
fn test__ffsdi2(a: u64, expected: i32) !void {
var x = @as(i64, @bitCast(a));
var result = ffs.__ffsdi2(x);
const x = @as(i64, @bitCast(a));
const result = ffs.__ffsdi2(x);
try testing.expectEqual(expected, result);
}

View file

@ -2,8 +2,8 @@ const ffs = @import("count0bits.zig");
const testing = @import("std").testing;
fn test__ffssi2(a: u32, expected: i32) !void {
var x = @as(i32, @bitCast(a));
var result = ffs.__ffssi2(x);
const x = @as(i32, @bitCast(a));
const result = ffs.__ffssi2(x);
try testing.expectEqual(expected, result);
}

View file

@ -2,8 +2,8 @@ const ffs = @import("count0bits.zig");
const testing = @import("std").testing;
fn test__ffsti2(a: u128, expected: i32) !void {
var x = @as(i128, @bitCast(a));
var result = ffs.__ffsti2(x);
const x = @as(i128, @bitCast(a));
const result = ffs.__ffsti2(x);
try testing.expectEqual(expected, result);
}

View file

@ -18,12 +18,12 @@ pub fn floatFromInt(comptime T: type, x: anytype) T {
const max_exp = exp_bias;
// Sign
var abs_val = if (@TypeOf(x) == comptime_int or @typeInfo(@TypeOf(x)).Int.signedness == .signed) @abs(x) else x;
const abs_val = if (@TypeOf(x) == comptime_int or @typeInfo(@TypeOf(x)).Int.signedness == .signed) @abs(x) else x;
const sign_bit = if (x < 0) @as(uT, 1) << (float_bits - 1) else 0;
var result: uT = sign_bit;
// Compute significand
var exp = int_bits - @clz(abs_val) - 1;
const exp = int_bits - @clz(abs_val) - 1;
if (int_bits <= fractional_bits or exp <= fractional_bits) {
const shift_amt = fractional_bits - @as(math.Log2Int(uT), @intCast(exp));
@ -31,7 +31,7 @@ pub fn floatFromInt(comptime T: type, x: anytype) T {
result = @as(uT, @intCast(abs_val)) << shift_amt;
result ^= implicit_bit; // Remove implicit integer bit
} else {
var shift_amt: math.Log2Int(Z) = @intCast(exp - fractional_bits);
const shift_amt: math.Log2Int(Z) = @intCast(exp - fractional_bits);
const exact_tie: bool = @ctz(abs_val) == shift_amt - 1;
// Shift down result and remove implicit integer bit

View file

@ -59,13 +59,13 @@ pub fn fma(x: f64, y: f64, z: f64) callconv(.C) f64 {
}
const x1 = math.frexp(x);
var ex = x1.exponent;
var xs = x1.significand;
const ex = x1.exponent;
const xs = x1.significand;
const x2 = math.frexp(y);
var ey = x2.exponent;
var ys = x2.significand;
const ey = x2.exponent;
const ys = x2.significand;
const x3 = math.frexp(z);
var ez = x3.exponent;
const ez = x3.exponent;
var zs = x3.significand;
var spread = ex + ey - ez;
@ -118,13 +118,13 @@ pub fn fmaq(x: f128, y: f128, z: f128) callconv(.C) f128 {
}
const x1 = math.frexp(x);
var ex = x1.exponent;
var xs = x1.significand;
const ex = x1.exponent;
const xs = x1.significand;
const x2 = math.frexp(y);
var ey = x2.exponent;
var ys = x2.significand;
const ey = x2.exponent;
const ys = x2.significand;
const x3 = math.frexp(z);
var ez = x3.exponent;
const ez = x3.exponent;
var zs = x3.significand;
var spread = ex + ey - ez;
@ -181,15 +181,15 @@ fn dd_mul(a: f64, b: f64) dd {
var p = a * split;
var ha = a - p;
ha += p;
var la = a - ha;
const la = a - ha;
p = b * split;
var hb = b - p;
hb += p;
var lb = b - hb;
const lb = b - hb;
p = ha * hb;
var q = ha * lb + la * hb;
const q = ha * lb + la * hb;
ret.hi = p + q;
ret.lo = p - ret.hi + q + la * lb;
@ -301,15 +301,15 @@ fn dd_mul128(a: f128, b: f128) dd128 {
var p = a * split;
var ha = a - p;
ha += p;
var la = a - ha;
const la = a - ha;
p = b * split;
var hb = b - p;
hb += p;
var lb = b - hb;
const lb = b - hb;
p = ha * hb;
var q = ha * lb + la * hb;
const q = ha * lb + la * hb;
ret.hi = p + q;
ret.lo = p - ret.hi + q + la * lb;

View file

@ -81,13 +81,13 @@ pub fn __fmodx(a: f80, b: f80) callconv(.C) f80 {
if (expB == 0) expB = normalize(f80, &bRep);
var highA: u64 = 0;
var highB: u64 = 0;
const highB: u64 = 0;
var lowA: u64 = @truncate(aRep);
var lowB: u64 = @truncate(bRep);
const lowB: u64 = @truncate(bRep);
while (expA > expB) : (expA -= 1) {
var high = highA -% highB;
var low = lowA -% lowB;
const low = lowA -% lowB;
if (lowA < lowB) {
high -%= 1;
}
@ -104,7 +104,7 @@ pub fn __fmodx(a: f80, b: f80) callconv(.C) f80 {
}
var high = highA -% highB;
var low = lowA -% lowB;
const low = lowA -% lowB;
if (lowA < lowB) {
high -%= 1;
}
@ -194,13 +194,13 @@ pub fn fmodq(a: f128, b: f128) callconv(.C) f128 {
// OR in extra non-stored mantissa digit
var highA: u64 = (aPtr_u64[high_index] & (std.math.maxInt(u64) >> 16)) | 1 << 48;
var highB: u64 = (bPtr_u64[high_index] & (std.math.maxInt(u64) >> 16)) | 1 << 48;
const highB: u64 = (bPtr_u64[high_index] & (std.math.maxInt(u64) >> 16)) | 1 << 48;
var lowA: u64 = aPtr_u64[low_index];
var lowB: u64 = bPtr_u64[low_index];
const lowB: u64 = bPtr_u64[low_index];
while (expA > expB) : (expA -= 1) {
var high = highA -% highB;
var low = lowA -% lowB;
const low = lowA -% lowB;
if (lowA < lowB) {
high -%= 1;
}
@ -217,7 +217,7 @@ pub fn fmodq(a: f128, b: f128) callconv(.C) f128 {
}
var high = highA -% highB;
var low = lowA -% lowB;
const low = lowA -% lowB;
if (lowA < lowB) {
high -= 1;
}

View file

@ -25,7 +25,7 @@ pub inline fn mulc3(comptime T: type, a_in: T, b_in: T, c_in: T, d_in: T) Comple
const zero: T = 0.0;
const one: T = 1.0;
var z = Complex(T){
const z: Complex(T) = .{
.real = ac - bd,
.imag = ad + bc,
};

View file

@ -19,20 +19,20 @@ test {
fn testMul(comptime T: type, comptime f: fn (T, T, T, T) callconv(.C) Complex(T)) !void {
{
var a: T = 1.0;
var b: T = 0.0;
var c: T = -1.0;
var d: T = 0.0;
const a: T = 1.0;
const b: T = 0.0;
const c: T = -1.0;
const d: T = 0.0;
const result = f(a, b, c, d);
try expect(result.real == -1.0);
try expect(result.imag == 0.0);
}
{
var a: T = 1.0;
var b: T = 0.0;
var c: T = -4.0;
var d: T = 0.0;
const a: T = 1.0;
const b: T = 0.0;
const c: T = -4.0;
const d: T = 0.0;
const result = f(a, b, c, d);
try expect(result.real == -4.0);
@ -41,10 +41,10 @@ fn testMul(comptime T: type, comptime f: fn (T, T, T, T) callconv(.C) Complex(T)
{
// if one operand is an infinity and the other operand is a nonzero finite number or an infinity,
// then the result of the * operator is an infinity;
var a: T = math.inf(T);
var b: T = -math.inf(T);
var c: T = 1.0;
var d: T = 0.0;
const a: T = math.inf(T);
const b: T = -math.inf(T);
const c: T = 1.0;
const d: T = 0.0;
const result = f(a, b, c, d);
try expect(result.real == math.inf(T));
@ -53,10 +53,10 @@ fn testMul(comptime T: type, comptime f: fn (T, T, T, T) callconv(.C) Complex(T)
{
// if one operand is an infinity and the other operand is a nonzero finite number or an infinity,
// then the result of the * operator is an infinity;
var a: T = math.inf(T);
var b: T = -1.0;
var c: T = 1.0;
var d: T = math.inf(T);
const a: T = math.inf(T);
const b: T = -1.0;
const c: T = 1.0;
const d: T = math.inf(T);
const result = f(a, b, c, d);
try expect(result.real == math.inf(T));

View file

@ -20,7 +20,7 @@ comptime {
inline fn muloXi4_genericSmall(comptime ST: type, a: ST, b: ST, overflow: *c_int) ST {
overflow.* = 0;
const min = math.minInt(ST);
var res: ST = a *% b;
const res: ST = a *% b;
// Hacker's Delight section Overflow subsection Multiplication
// case a=-2^{31}, b=-1 problem, because
// on some machines a*b = -2^{31} with overflow
@ -41,7 +41,7 @@ inline fn muloXi4_genericFast(comptime ST: type, a: ST, b: ST, overflow: *c_int)
};
const min = math.minInt(ST);
const max = math.maxInt(ST);
var res: EST = @as(EST, a) * @as(EST, b);
const res: EST = @as(EST, a) * @as(EST, b);
//invariant: -2^{bitwidth(EST)} < res < 2^{bitwidth(EST)-1}
if (res < min or max < res)
overflow.* = 1;

View file

@ -2,7 +2,7 @@ const neg = @import("negXi2.zig");
const testing = @import("std").testing;
fn test__negdi2(a: i64, expected: i64) !void {
var result = neg.__negdi2(a);
const result = neg.__negdi2(a);
try testing.expectEqual(expected, result);
}

View file

@ -5,7 +5,7 @@ const testing = std.testing;
const print = std.debug.print;
fn test__negsi2(a: i32, expected: i32) !void {
var result = neg.__negsi2(a);
const result = neg.__negsi2(a);
try testing.expectEqual(expected, result);
}

View file

@ -2,7 +2,7 @@ const neg = @import("negXi2.zig");
const testing = @import("std").testing;
fn test__negti2(a: i128, expected: i128) !void {
var result = neg.__negti2(a);
const result = neg.__negti2(a);
try testing.expectEqual(expected, result);
}

View file

@ -2,7 +2,7 @@ const negv = @import("negv.zig");
const testing = @import("std").testing;
fn test__negvdi2(a: i64, expected: i64) !void {
var result = negv.__negvdi2(a);
const result = negv.__negvdi2(a);
try testing.expectEqual(expected, result);
}

View file

@ -2,7 +2,7 @@ const negv = @import("negv.zig");
const testing = @import("std").testing;
fn test__negvsi2(a: i32, expected: i32) !void {
var result = negv.__negvsi2(a);
const result = negv.__negvsi2(a);
try testing.expectEqual(expected, result);
}

View file

@ -2,7 +2,7 @@ const negv = @import("negv.zig");
const testing = @import("std").testing;
fn test__negvti2(a: i128, expected: i128) !void {
var result = negv.__negvti2(a);
const result = negv.__negvti2(a);
try testing.expectEqual(expected, result);
}

View file

@ -13,8 +13,8 @@ fn paritydi2Naive(a: i64) i32 {
}
fn test__paritydi2(a: i64) !void {
var x = parity.__paritydi2(a);
var expected: i64 = paritydi2Naive(a);
const x = parity.__paritydi2(a);
const expected: i64 = paritydi2Naive(a);
try testing.expectEqual(expected, x);
}
@ -30,7 +30,7 @@ test "paritydi2" {
var rnd = RndGen.init(42);
var i: u32 = 0;
while (i < 10_000) : (i += 1) {
var rand_num = rnd.random().int(i64);
const rand_num = rnd.random().int(i64);
try test__paritydi2(rand_num);
}
}

View file

@ -13,8 +13,8 @@ fn paritysi2Naive(a: i32) i32 {
}
fn test__paritysi2(a: i32) !void {
var x = parity.__paritysi2(a);
var expected: i32 = paritysi2Naive(a);
const x = parity.__paritysi2(a);
const expected: i32 = paritysi2Naive(a);
try testing.expectEqual(expected, x);
}
@ -30,7 +30,7 @@ test "paritysi2" {
var rnd = RndGen.init(42);
var i: u32 = 0;
while (i < 10_000) : (i += 1) {
var rand_num = rnd.random().int(i32);
const rand_num = rnd.random().int(i32);
try test__paritysi2(rand_num);
}
}

View file

@ -13,8 +13,8 @@ fn parityti2Naive(a: i128) i32 {
}
fn test__parityti2(a: i128) !void {
var x = parity.__parityti2(a);
var expected: i128 = parityti2Naive(a);
const x = parity.__parityti2(a);
const expected: i128 = parityti2Naive(a);
try testing.expectEqual(expected, x);
}
@ -30,7 +30,7 @@ test "parityti2" {
var rnd = RndGen.init(42);
var i: u32 = 0;
while (i < 10_000) : (i += 1) {
var rand_num = rnd.random().int(i128);
const rand_num = rnd.random().int(i128);
try test__parityti2(rand_num);
}
}

View file

@ -29,7 +29,7 @@ test "popcountdi2" {
var rnd = RndGen.init(42);
var i: u32 = 0;
while (i < 10_000) : (i += 1) {
var rand_num = rnd.random().int(i64);
const rand_num = rnd.random().int(i64);
try test__popcountdi2(rand_num);
}
}

View file

@ -29,7 +29,7 @@ test "popcountsi2" {
var rnd = RndGen.init(42);
var i: u32 = 0;
while (i < 10_000) : (i += 1) {
var rand_num = rnd.random().int(i32);
const rand_num = rnd.random().int(i32);
try test__popcountsi2(rand_num);
}
}

View file

@ -29,7 +29,7 @@ test "popcountti2" {
var rnd = RndGen.init(42);
var i: u32 = 0;
while (i < 10_000) : (i += 1) {
var rand_num = rnd.random().int(i128);
const rand_num = rnd.random().int(i128);
try test__popcountti2(rand_num);
}
}

View file

@ -9,27 +9,27 @@ const testing = std.testing;
const math = std.math;
fn test__powihf2(a: f16, b: i32, expected: f16) !void {
var result = powiXf2.__powihf2(a, b);
const result = powiXf2.__powihf2(a, b);
try testing.expectEqual(expected, result);
}
fn test__powisf2(a: f32, b: i32, expected: f32) !void {
var result = powiXf2.__powisf2(a, b);
const result = powiXf2.__powisf2(a, b);
try testing.expectEqual(expected, result);
}
fn test__powidf2(a: f64, b: i32, expected: f64) !void {
var result = powiXf2.__powidf2(a, b);
const result = powiXf2.__powidf2(a, b);
try testing.expectEqual(expected, result);
}
fn test__powitf2(a: f128, b: i32, expected: f128) !void {
var result = powiXf2.__powitf2(a, b);
const result = powiXf2.__powitf2(a, b);
try testing.expectEqual(expected, result);
}
fn test__powixf2(a: f80, b: i32, expected: f80) !void {
var result = powiXf2.__powixf2(a, b);
const result = powiXf2.__powixf2(a, b);
try testing.expectEqual(expected, result);
}

View file

@ -27,7 +27,7 @@ pub fn __suboti4(a: i128, b: i128, overflow: *c_int) callconv(.C) i128 {
inline fn suboXi4_generic(comptime ST: type, a: ST, b: ST, overflow: *c_int) ST {
overflow.* = 0;
var sum: ST = a -% b;
const sum: ST = a -% b;
// Hackers Delight: section Overflow Detection, subsection Signed Add/Subtract
// Let sum = a -% b == a - b - carry == wraparound subtraction.
// Overflow in a-b-carry occurs, iff a and b have opposite signs

View file

@ -6,8 +6,8 @@ const math = std.math;
fn test__subodi4(a: i64, b: i64) !void {
var result_ov: c_int = undefined;
var expected_ov: c_int = undefined;
var result = subo.__subodi4(a, b, &result_ov);
var expected: i64 = simple_subodi4(a, b, &expected_ov);
const result = subo.__subodi4(a, b, &result_ov);
const expected: i64 = simple_subodi4(a, b, &expected_ov);
try testing.expectEqual(expected, result);
try testing.expectEqual(expected_ov, result_ov);
}

View file

@ -4,8 +4,8 @@ const testing = @import("std").testing;
fn test__subosi4(a: i32, b: i32) !void {
var result_ov: c_int = undefined;
var expected_ov: c_int = undefined;
var result = subo.__subosi4(a, b, &result_ov);
var expected: i32 = simple_subosi4(a, b, &expected_ov);
const result = subo.__subosi4(a, b, &result_ov);
const expected: i32 = simple_subosi4(a, b, &expected_ov);
try testing.expectEqual(expected, result);
try testing.expectEqual(expected_ov, result_ov);
}

View file

@ -6,8 +6,8 @@ const math = std.math;
fn test__suboti4(a: i128, b: i128) !void {
var result_ov: c_int = undefined;
var expected_ov: c_int = undefined;
var result = subo.__suboti4(a, b, &result_ov);
var expected: i128 = simple_suboti4(a, b, &expected_ov);
const result = subo.__suboti4(a, b, &result_ov);
const expected: i128 = simple_suboti4(a, b, &expected_ov);
try testing.expectEqual(expected, result);
try testing.expectEqual(expected_ov, result_ov);
}

View file

@ -2,7 +2,7 @@ const cmp = @import("cmp.zig");
const testing = @import("std").testing;
fn test__ucmpdi2(a: u64, b: u64, expected: i32) !void {
var result = cmp.__ucmpdi2(a, b);
const result = cmp.__ucmpdi2(a, b);
try testing.expectEqual(expected, result);
}

View file

@ -2,7 +2,7 @@ const cmp = @import("cmp.zig");
const testing = @import("std").testing;
fn test__ucmpsi2(a: u32, b: u32, expected: i32) !void {
var result = cmp.__ucmpsi2(a, b);
const result = cmp.__ucmpsi2(a, b);
try testing.expectEqual(expected, result);
}

View file

@ -2,7 +2,7 @@ const cmp = @import("cmp.zig");
const testing = @import("std").testing;
fn test__ucmpti2(a: u128, b: u128, expected: i32) !void {
var result = cmp.__ucmpti2(a, b);
const result = cmp.__ucmpti2(a, b);
try testing.expectEqual(expected, result);
}

View file

@ -52,7 +52,7 @@ fn divwide_generic(comptime T: type, _u1: T, _u0: T, v_: T, r: *T) T {
if (rhat >= b) break;
}
var un21 = un64 *% b +% un1 -% q1 *% v;
const un21 = un64 *% b +% un1 -% q1 *% v;
// Compute the second quotient digit
var q0 = un21 / vn1;
@ -101,8 +101,8 @@ pub fn udivmod(comptime T: type, a_: T, b_: T, maybe_rem: ?*T) T {
return 0;
}
var a: [2]HalfT = @bitCast(a_);
var b: [2]HalfT = @bitCast(b_);
const a: [2]HalfT = @bitCast(a_);
const b: [2]HalfT = @bitCast(b_);
var q: [2]HalfT = undefined;
var r: [2]HalfT = undefined;
@ -125,7 +125,7 @@ pub fn udivmod(comptime T: type, a_: T, b_: T, maybe_rem: ?*T) T {
}
// 0 <= shift <= 63
var shift: Log2Int(T) = @clz(b[hi]) - @clz(a[hi]);
const shift: Log2Int(T) = @clz(b[hi]) - @clz(a[hi]);
var af: T = @bitCast(a);
var bf = @as(T, @bitCast(b)) << shift;
q = @bitCast(@as(T, 0));

View file

@ -116,7 +116,7 @@ pub fn __udivei4(r_q: [*]u32, u_p: [*]const u32, v_p: [*]const u32, bits: usize)
@setRuntimeSafety(builtin.is_test);
const u = u_p[0 .. bits / 32];
const v = v_p[0 .. bits / 32];
var q = r_q[0 .. bits / 32];
const q = r_q[0 .. bits / 32];
@call(.always_inline, divmod, .{ q, null, u, v }) catch unreachable;
}
@ -124,7 +124,7 @@ pub fn __umodei4(r_p: [*]u32, u_p: [*]const u32, v_p: [*]const u32, bits: usize)
@setRuntimeSafety(builtin.is_test);
const u = u_p[0 .. bits / 32];
const v = v_p[0 .. bits / 32];
var r = r_p[0 .. bits / 32];
const r = r_p[0 .. bits / 32];
@call(.always_inline, divmod, .{ null, r, u, v }) catch unreachable;
}

View file

@ -141,7 +141,7 @@ fn findPrefixResolved(cache: *const Cache, resolved_path: []u8) !PrefixedPath {
var i: u8 = 1; // Start at 1 to skip over checking the null prefix.
while (i < prefixes_slice.len) : (i += 1) {
const p = prefixes_slice[i].path.?;
var sub_path = getPrefixSubpath(gpa, p, resolved_path) catch |err| switch (err) {
const sub_path = getPrefixSubpath(gpa, p, resolved_path) catch |err| switch (err) {
error.NotASubPath => continue,
else => |e| return e,
};

View file

@ -950,7 +950,7 @@ fn printSection(out: anytype, label: []const u8, bytes: []const u8) !void {
fn printLabel(out: anytype, label: []const u8, bytes: []const u8) !void {
var buf: [80]u8 = undefined;
var text = try std.fmt.bufPrint(buf[0..], "{s} {d} bytes ", .{ label, bytes.len });
const text = try std.fmt.bufPrint(buf[0..], "{s} {d} bytes ", .{ label, bytes.len });
try out.writeAll(text);
var i: usize = text.len;
const end = 79;
@ -983,12 +983,12 @@ fn hexDump(out: anytype, bytes: []const u8) !void {
try printDecValue(out, offset, 8);
try out.writeAll(":");
try out.writeAll(" ");
var end1 = @min(offset + n, offset + 8);
const end1 = @min(offset + n, offset + 8);
for (bytes[offset..end1]) |b| {
try out.writeAll(" ");
try printHexValue(out, b, 2);
}
var end2 = offset + n;
const end2 = offset + n;
if (end2 > end1) {
try out.writeAll(" ");
for (bytes[end1..end2]) |b| {

View file

@ -293,7 +293,7 @@ const Check = struct {
/// Creates a new empty sequence of actions.
pub fn checkStart(self: *CheckObject) void {
var new_check = Check.create(self.step.owner.allocator);
const new_check = Check.create(self.step.owner.allocator);
self.checks.append(new_check) catch @panic("OOM");
}

View file

@ -307,8 +307,8 @@ fn render_cmake(
values: std.StringArrayHashMap(Value),
src_path: []const u8,
) !void {
var build = step.owner;
var allocator = build.allocator;
const build = step.owner;
const allocator = build.allocator;
var values_copy = try values.clone();
defer values_copy.deinit();

View file

@ -301,7 +301,7 @@ pub fn addPathDir(self: *Run, search_path: []const u8) void {
const env_map = getEnvMapInternal(self);
const key = "PATH";
var prev_path = env_map.get(key);
const prev_path = env_map.get(key);
if (prev_path) |pp| {
const new_path = b.fmt("{s}" ++ [1]u8{fs.path.delimiter} ++ "{s}", .{ pp, search_path });

View file

@ -397,6 +397,7 @@ fn bufWrite(self: *Progress, end: *usize, comptime format: []const u8, args: any
test "basic functionality" {
var disable = true;
_ = &disable;
if (disable) {
// This test is disabled because it uses time.sleep() and is therefore slow. It also
// prints bogus progress data to stderr.

View file

@ -25,7 +25,7 @@ pub fn finish(self: *WaitGroup) void {
}
pub fn wait(self: *WaitGroup) void {
var state = self.state.fetchAdd(is_waiting, .Acquire);
const state = self.state.fetchAdd(is_waiting, .Acquire);
assert(state & is_waiting == 0);
if ((state / one_pending) > 0) {

View file

@ -2076,11 +2076,11 @@ test "iterator hash map" {
try reset_map.putNoClobber(1, 22);
try reset_map.putNoClobber(2, 33);
var keys = [_]i32{
const keys = [_]i32{
0, 2, 1,
};
var values = [_]i32{
const values = [_]i32{
11, 33, 22,
};
@ -2116,7 +2116,7 @@ test "iterator hash map" {
}
it.reset();
var entry = it.next().?;
const entry = it.next().?;
try testing.expect(entry.key_ptr.* == first_entry.key_ptr.*);
try testing.expect(entry.value_ptr.* == first_entry.value_ptr.*);
}

View file

@ -979,7 +979,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
pub fn ensureTotalCapacity(self: *Self, allocator: Allocator, new_capacity: usize) Allocator.Error!void {
if (self.capacity >= new_capacity) return;
var better_capacity = growCapacity(self.capacity, new_capacity);
const better_capacity = growCapacity(self.capacity, new_capacity);
return self.ensureTotalCapacityPrecise(allocator, better_capacity);
}
@ -1159,7 +1159,7 @@ test "std.ArrayList/ArrayListUnmanaged.init" {
}
{
var list = ArrayListUnmanaged(i32){};
const list = ArrayListUnmanaged(i32){};
try testing.expect(list.items.len == 0);
try testing.expect(list.capacity == 0);

View file

@ -125,7 +125,7 @@ pub fn Atomic(comptime T: type) type {
@compileError(@tagName(Ordering.Unordered) ++ " is only allowed on atomic loads and stores");
}
comptime var success_is_stronger = switch (failure) {
const success_is_stronger = switch (failure) {
.SeqCst => success == .SeqCst,
.AcqRel => @compileError(@tagName(failure) ++ " implies " ++ @tagName(Ordering.Release) ++ " which is only allowed on success"),
.Acquire => success == .SeqCst or success == .AcqRel or success == .Acquire,

View file

@ -175,11 +175,11 @@ const puts_per_thread = 500;
const put_thread_count = 3;
test "std.atomic.Queue" {
var plenty_of_memory = try std.heap.page_allocator.alloc(u8, 300 * 1024);
const plenty_of_memory = try std.heap.page_allocator.alloc(u8, 300 * 1024);
defer std.heap.page_allocator.free(plenty_of_memory);
var fixed_buffer_allocator = std.heap.FixedBufferAllocator.init(plenty_of_memory);
var a = fixed_buffer_allocator.threadSafeAllocator();
const a = fixed_buffer_allocator.threadSafeAllocator();
var queue = Queue(i32).init();
var context = Context{

View file

@ -85,11 +85,11 @@ const puts_per_thread = 500;
const put_thread_count = 3;
test "std.atomic.stack" {
var plenty_of_memory = try std.heap.page_allocator.alloc(u8, 300 * 1024);
const plenty_of_memory = try std.heap.page_allocator.alloc(u8, 300 * 1024);
defer std.heap.page_allocator.free(plenty_of_memory);
var fixed_buffer_allocator = std.heap.FixedBufferAllocator.init(plenty_of_memory);
var a = fixed_buffer_allocator.threadSafeAllocator();
const a = fixed_buffer_allocator.threadSafeAllocator();
var stack = Stack(i32).init();
var context = Context{

View file

@ -239,7 +239,7 @@ pub const Base64Decoder = struct {
if ((bits & invalid_char_tst) != 0) return error.InvalidCharacter;
std.mem.writeInt(u32, dest[dest_idx..][0..4], bits, .little);
}
var remaining = source[fast_src_idx..];
const remaining = source[fast_src_idx..];
for (remaining, fast_src_idx..) |c, src_idx| {
const d = decoder.char_to_index[c];
if (d == invalid_char) {
@ -259,7 +259,7 @@ pub const Base64Decoder = struct {
return error.InvalidPadding;
}
if (leftover_idx == null) return;
var leftover = source[leftover_idx.?..];
const leftover = source[leftover_idx.?..];
if (decoder.pad_char) |pad_char| {
const padding_len = acc_len / 2;
var padding_chars: usize = 0;
@ -338,7 +338,7 @@ pub const Base64DecoderWithIgnore = struct {
if (decoder.pad_char != null and padding_len != 0) return error.InvalidPadding;
return dest_idx;
}
var leftover = source[leftover_idx.?..];
const leftover = source[leftover_idx.?..];
if (decoder.pad_char) |pad_char| {
var padding_chars: usize = 0;
for (leftover) |c| {
@ -483,7 +483,7 @@ fn testAllApis(codecs: Codecs, expected_decoded: []const u8, expected_encoded: [
// Base64Decoder
{
var buffer: [0x100]u8 = undefined;
var decoded = buffer[0..try codecs.Decoder.calcSizeForSlice(expected_encoded)];
const decoded = buffer[0..try codecs.Decoder.calcSizeForSlice(expected_encoded)];
try codecs.Decoder.decode(decoded, expected_encoded);
try testing.expectEqualSlices(u8, expected_decoded, decoded);
}
@ -492,8 +492,8 @@ fn testAllApis(codecs: Codecs, expected_decoded: []const u8, expected_encoded: [
{
const decoder_ignore_nothing = codecs.decoderWithIgnore("");
var buffer: [0x100]u8 = undefined;
var decoded = buffer[0..try decoder_ignore_nothing.calcSizeUpperBound(expected_encoded.len)];
var written = try decoder_ignore_nothing.decode(decoded, expected_encoded);
const decoded = buffer[0..try decoder_ignore_nothing.calcSizeUpperBound(expected_encoded.len)];
const written = try decoder_ignore_nothing.decode(decoded, expected_encoded);
try testing.expect(written <= decoded.len);
try testing.expectEqualSlices(u8, expected_decoded, decoded[0..written]);
}
@ -502,8 +502,8 @@ fn testAllApis(codecs: Codecs, expected_decoded: []const u8, expected_encoded: [
fn testDecodeIgnoreSpace(codecs: Codecs, expected_decoded: []const u8, encoded: []const u8) !void {
const decoder_ignore_space = codecs.decoderWithIgnore(" ");
var buffer: [0x100]u8 = undefined;
var decoded = buffer[0..try decoder_ignore_space.calcSizeUpperBound(encoded.len)];
var written = try decoder_ignore_space.decode(decoded, encoded);
const decoded = buffer[0..try decoder_ignore_space.calcSizeUpperBound(encoded.len)];
const written = try decoder_ignore_space.decode(decoded, encoded);
try testing.expectEqualSlices(u8, expected_decoded, decoded[0..written]);
}
@ -511,7 +511,7 @@ fn testError(codecs: Codecs, encoded: []const u8, expected_err: anyerror) !void
const decoder_ignore_space = codecs.decoderWithIgnore(" ");
var buffer: [0x100]u8 = undefined;
if (codecs.Decoder.calcSizeForSlice(encoded)) |decoded_size| {
var decoded = buffer[0..decoded_size];
const decoded = buffer[0..decoded_size];
if (codecs.Decoder.decode(decoded, encoded)) |_| {
return error.ExpectedError;
} else |err| if (err != expected_err) return err;
@ -525,7 +525,7 @@ fn testError(codecs: Codecs, encoded: []const u8, expected_err: anyerror) !void
fn testNoSpaceLeftError(codecs: Codecs, encoded: []const u8) !void {
const decoder_ignore_space = codecs.decoderWithIgnore(" ");
var buffer: [0x100]u8 = undefined;
var decoded = buffer[0 .. (try codecs.Decoder.calcSizeForSlice(encoded)) - 1];
const decoded = buffer[0 .. (try codecs.Decoder.calcSizeForSlice(encoded)) - 1];
if (decoder_ignore_space.decode(decoded, encoded)) |_| {
return error.ExpectedError;
} else |err| if (err != error.NoSpaceLeft) return err;
@ -534,7 +534,7 @@ fn testNoSpaceLeftError(codecs: Codecs, encoded: []const u8) !void {
fn testFourBytesDestNoSpaceLeftError(codecs: Codecs, encoded: []const u8) !void {
const decoder_ignore_space = codecs.decoderWithIgnore(" ");
var buffer: [0x100]u8 = undefined;
var decoded = buffer[0..4];
const decoded = buffer[0..4];
if (decoder_ignore_space.decode(decoded, encoded)) |_| {
return error.ExpectedError;
} else |err| if (err != error.NoSpaceLeft) return err;

View file

@ -15,8 +15,7 @@ pub const BufMap = struct {
/// That allocator will be used for both backing allocations
/// and string deduplication.
pub fn init(allocator: Allocator) BufMap {
var self = BufMap{ .hash_map = BufMapHashMap.init(allocator) };
return self;
return .{ .hash_map = BufMapHashMap.init(allocator) };
}
/// Free the backing storage of the map, as well as all

View file

@ -17,8 +17,7 @@ pub const BufSet = struct {
/// be used internally for both backing allocations and
/// string duplication.
pub fn init(a: Allocator) BufSet {
var self = BufSet{ .hash_map = BufSetHashMap.init(a) };
return self;
return .{ .hash_map = BufSetHashMap.init(a) };
}
/// Free a BufSet along with all stored keys.
@ -76,8 +75,8 @@ pub const BufSet = struct {
self: *const BufSet,
new_allocator: Allocator,
) Allocator.Error!BufSet {
var cloned_hashmap = try self.hash_map.cloneWithAllocator(new_allocator);
var cloned = BufSet{ .hash_map = cloned_hashmap };
const cloned_hashmap = try self.hash_map.cloneWithAllocator(new_allocator);
const cloned = BufSet{ .hash_map = cloned_hashmap };
var it = cloned.hash_map.keyIterator();
while (it.next()) |key_ptr| {
key_ptr.* = try cloned.copy(key_ptr.*);
@ -134,7 +133,7 @@ test "BufSet clone" {
}
test "BufSet.clone with arena" {
var allocator = std.testing.allocator;
const allocator = std.testing.allocator;
var arena = std.heap.ArenaAllocator.init(allocator);
defer arena.deinit();

View file

@ -777,9 +777,8 @@ pub fn default_panic(msg: []const u8, error_return_trace: ?*StackTrace, ret_addr
}
var fmt: [256]u8 = undefined;
var slice = try std.fmt.bufPrint(&fmt, "\r\nerr: {s}\r\n", .{exit_msg});
var len = try std.unicode.utf8ToUtf16Le(utf16, slice);
const slice = try std.fmt.bufPrint(&fmt, "\r\nerr: {s}\r\n", .{exit_msg});
const len = try std.unicode.utf8ToUtf16Le(utf16, slice);
utf16[len] = 0;
@ -790,7 +789,7 @@ pub fn default_panic(msg: []const u8, error_return_trace: ?*StackTrace, ret_addr
};
var exit_size: usize = 0;
var exit_data = ExitData.create_exit_data(msg, &exit_size) catch null;
const exit_data = ExitData.create_exit_data(msg, &exit_size) catch null;
if (exit_data) |data| {
if (uefi.system_table.std_err) |out| {

View file

@ -847,7 +847,7 @@ pub const ChildProcess = struct {
}
windowsCreateProcessPathExt(self.allocator, &dir_buf, &app_buf, PATHEXT, cmd_line_w.ptr, envp_ptr, cwd_w_ptr, &siStartInfo, &piProcInfo) catch |no_path_err| {
var original_err = switch (no_path_err) {
const original_err = switch (no_path_err) {
error.FileNotFound, error.InvalidExe, error.AccessDenied => |e| e,
error.UnrecoverableInvalidExe => return error.InvalidExe,
else => |e| return e,

View file

@ -1075,7 +1075,7 @@ pub const Coff = struct {
var stream = std.io.fixedBufferStream(data);
const reader = stream.reader();
try stream.seekTo(pe_pointer_offset);
var coff_header_offset = try reader.readInt(u32, .little);
const coff_header_offset = try reader.readInt(u32, .little);
try stream.seekTo(coff_header_offset);
var buf: [4]u8 = undefined;
try reader.readNoEof(&buf);

View file

@ -15,7 +15,7 @@ test "bitReverse" {
out: u16,
};
var reverse_bits_tests = [_]ReverseBitsTest{
const reverse_bits_tests = [_]ReverseBitsTest{
.{ .in = 1, .bit_count = 1, .out = 1 },
.{ .in = 1, .bit_count = 2, .out = 2 },
.{ .in = 1, .bit_count = 3, .out = 4 },
@ -27,7 +27,7 @@ test "bitReverse" {
};
for (reverse_bits_tests) |h| {
var v = bitReverse(u16, h.in, h.bit_count);
const v = bitReverse(u16, h.in, h.bit_count);
try std.testing.expectEqual(h.out, v);
}
}

View file

@ -156,8 +156,8 @@ fn levels(compression: Compression) CompressionLevel {
// up to length 'max'. Both slices must be at least 'max'
// bytes in size.
fn matchLen(a: []u8, b: []u8, max: u32) u32 {
var bounded_a = a[0..max];
var bounded_b = b[0..max];
const bounded_a = a[0..max];
const bounded_b = b[0..max];
for (bounded_a, 0..) |av, i| {
if (bounded_b[i] != av) {
return @as(u32, @intCast(i));
@ -191,7 +191,7 @@ fn bulkHash4(b: []u8, dst: []u32) u32 {
@as(u32, b[0]) << 24;
dst[0] = (hb *% hash_mul) >> (32 - hash_bits);
var end = b.len - min_match_length + 1;
const end = b.len - min_match_length + 1;
var i: u32 = 1;
while (i < end) : (i += 1) {
hb = (hb << 8) | @as(u32, b[i + 3]);
@ -305,7 +305,7 @@ pub fn Compressor(comptime WriterType: anytype) type {
}
self.hash_offset += window_size;
if (self.hash_offset > max_hash_offset) {
var delta = self.hash_offset - 1;
const delta = self.hash_offset - 1;
self.hash_offset -= delta;
self.chain_head -|= delta;
@ -369,31 +369,31 @@ pub fn Compressor(comptime WriterType: anytype) type {
}
// Add all to window.
@memcpy(self.window[0..b.len], b);
var n = b.len;
const n = b.len;
// Calculate 256 hashes at the time (more L1 cache hits)
var loops = (n + 256 - min_match_length) / 256;
const loops = (n + 256 - min_match_length) / 256;
var j: usize = 0;
while (j < loops) : (j += 1) {
var index = j * 256;
const index = j * 256;
var end = index + 256 + min_match_length - 1;
if (end > n) {
end = n;
}
var to_check = self.window[index..end];
var dst_size = to_check.len - min_match_length + 1;
const to_check = self.window[index..end];
const dst_size = to_check.len - min_match_length + 1;
if (dst_size <= 0) {
continue;
}
var dst = self.hash_match[0..dst_size];
const dst = self.hash_match[0..dst_size];
_ = self.bulk_hasher(to_check, dst);
var new_h: u32 = 0;
for (dst, 0..) |val, i| {
var di = i + index;
const di = i + index;
new_h = val;
var hh = &self.hash_head[new_h & hash_mask];
const hh = &self.hash_head[new_h & hash_mask];
// Get previous value with the same hash.
// Our chain should point to the previous value.
self.hash_prev[di & window_mask] = hh.*;
@ -447,13 +447,13 @@ pub fn Compressor(comptime WriterType: anytype) type {
}
var w_end = win[pos + length];
var w_pos = win[pos..];
var min_index = pos -| window_size;
const w_pos = win[pos..];
const min_index = pos -| window_size;
var i = prev_head;
while (tries > 0) : (tries -= 1) {
if (w_end == win[i + length]) {
var n = matchLen(win[i..], w_pos, min_match_look);
const n = matchLen(win[i..], w_pos, min_match_look);
if (n > length and (n > min_match_length or pos - i <= 4096)) {
length = n;
@ -565,7 +565,7 @@ pub fn Compressor(comptime WriterType: anytype) type {
while (true) {
assert(self.index <= self.window_end);
var lookahead = self.window_end -| self.index;
const lookahead = self.window_end -| self.index;
if (lookahead < min_match_length + max_match_length) {
if (!self.sync) {
break;
@ -590,16 +590,16 @@ pub fn Compressor(comptime WriterType: anytype) type {
if (self.index < self.max_insert_index) {
// Update the hash
self.hash = hash4(self.window[self.index .. self.index + min_match_length]);
var hh = &self.hash_head[self.hash & hash_mask];
const hh = &self.hash_head[self.hash & hash_mask];
self.chain_head = @as(u32, @intCast(hh.*));
self.hash_prev[self.index & window_mask] = @as(u32, @intCast(self.chain_head));
hh.* = @as(u32, @intCast(self.index + self.hash_offset));
}
var prev_length = self.length;
var prev_offset = self.offset;
const prev_length = self.length;
const prev_offset = self.offset;
self.length = min_match_length - 1;
self.offset = 0;
var min_index = self.index -| window_size;
const min_index = self.index -| window_size;
if (self.hash_offset <= self.chain_head and
self.chain_head - self.hash_offset >= min_index and
@ -610,7 +610,7 @@ pub fn Compressor(comptime WriterType: anytype) type {
prev_length < self.compression_level.lazy))
{
{
var fmatch = self.findMatch(
const fmatch = self.findMatch(
self.index,
self.chain_head -| self.hash_offset,
min_match_length - 1,
@ -658,7 +658,7 @@ pub fn Compressor(comptime WriterType: anytype) type {
self.hash = hash4(self.window[index .. index + min_match_length]);
// Get previous value with the same hash.
// Our chain should point to the previous value.
var hh = &self.hash_head[self.hash & hash_mask];
const hh = &self.hash_head[self.hash & hash_mask];
self.hash_prev[index & window_mask] = hh.*;
// Set the head of the hash chain to us.
hh.* = @as(u32, @intCast(index + self.hash_offset));
@ -740,7 +740,7 @@ pub fn Compressor(comptime WriterType: anytype) type {
// compressed form of data to its underlying writer.
while (buf.len > 0) {
try self.step();
var filled = self.fill(buf);
const filled = self.fill(buf);
buf = buf[filled..];
}
@ -1097,12 +1097,12 @@ test "bulkHash4" {
while (j < out.len) : (j += 1) {
var y = out[0..j];
var dst = try testing.allocator.alloc(u32, y.len - min_match_length + 1);
const dst = try testing.allocator.alloc(u32, y.len - min_match_length + 1);
defer testing.allocator.free(dst);
_ = bulkHash4(y, dst);
for (dst, 0..) |got, i| {
var want = hash4(y[i..]);
const want = hash4(y[i..]);
try testing.expectEqual(want, got);
}
}

View file

@ -27,7 +27,7 @@ fn testSync(level: deflate.Compression, input: []const u8) !void {
var whole_buf = std.ArrayList(u8).init(testing.allocator);
defer whole_buf.deinit();
var multi_writer = io.multiWriter(.{
const multi_writer = io.multiWriter(.{
divided_buf.writer(),
whole_buf.writer(),
}).writer();
@ -48,7 +48,7 @@ fn testSync(level: deflate.Compression, input: []const u8) !void {
defer decomp.deinit();
// Write first half of the input and flush()
var half: usize = (input.len + 1) / 2;
const half: usize = (input.len + 1) / 2;
var half_len: usize = half - 0;
{
_ = try comp.writer().writeAll(input[0..half]);
@ -57,10 +57,10 @@ fn testSync(level: deflate.Compression, input: []const u8) !void {
try comp.flush();
// Read back
var decompressed = try testing.allocator.alloc(u8, half_len);
const decompressed = try testing.allocator.alloc(u8, half_len);
defer testing.allocator.free(decompressed);
var read = try decomp.reader().readAll(decompressed); // read at least half
const read = try decomp.reader().readAll(decompressed); // read at least half
try testing.expectEqual(half_len, read);
try testing.expectEqualSlices(u8, input[0..half], decompressed);
}
@ -74,7 +74,7 @@ fn testSync(level: deflate.Compression, input: []const u8) !void {
try comp.close();
// Read back
var decompressed = try testing.allocator.alloc(u8, half_len);
const decompressed = try testing.allocator.alloc(u8, half_len);
defer testing.allocator.free(decompressed);
var read = try decomp.reader().readAll(decompressed);
@ -94,11 +94,11 @@ fn testSync(level: deflate.Compression, input: []const u8) !void {
try comp.close();
// stream should work for ordinary reader too (reading whole_buf in one go)
var whole_buf_reader = io.fixedBufferStream(whole_buf.items).reader();
const whole_buf_reader = io.fixedBufferStream(whole_buf.items).reader();
var decomp = try decompressor(testing.allocator, whole_buf_reader, null);
defer decomp.deinit();
var decompressed = try testing.allocator.alloc(u8, input.len);
const decompressed = try testing.allocator.alloc(u8, input.len);
defer testing.allocator.free(decompressed);
_ = try decomp.reader().readAll(decompressed);
@ -125,10 +125,10 @@ fn testToFromWithLevelAndLimit(level: deflate.Compression, input: []const u8, li
var decomp = try decompressor(testing.allocator, fib.reader(), null);
defer decomp.deinit();
var decompressed = try testing.allocator.alloc(u8, input.len);
const decompressed = try testing.allocator.alloc(u8, input.len);
defer testing.allocator.free(decompressed);
var read: usize = try decomp.reader().readAll(decompressed);
const read: usize = try decomp.reader().readAll(decompressed);
try testing.expectEqual(input.len, read);
try testing.expectEqualSlices(u8, input, decompressed);
@ -153,7 +153,7 @@ fn testToFromWithLimit(input: []const u8, limit: [11]u32) !void {
}
test "deflate/inflate" {
var limits = [_]u32{0} ** 11;
const limits = [_]u32{0} ** 11;
var test0 = [_]u8{};
var test1 = [_]u8{0x11};
@ -313,7 +313,7 @@ test "decompressor dictionary" {
try comp.writer().writeAll(text);
try comp.close();
var decompressed = try testing.allocator.alloc(u8, text.len);
const decompressed = try testing.allocator.alloc(u8, text.len);
defer testing.allocator.free(decompressed);
var decomp = try decompressor(
@ -432,7 +432,7 @@ test "deflate/inflate string" {
};
inline for (deflate_inflate_string_tests) |t| {
var golden = @embedFile("testdata/" ++ t.filename);
const golden = @embedFile("testdata/" ++ t.filename);
try testToFromWithLimit(golden, t.limit);
}
}
@ -466,14 +466,14 @@ test "inflate reset" {
var decomp = try decompressor(testing.allocator, fib.reader(), null);
defer decomp.deinit();
var decompressed_0: []u8 = try decomp.reader()
const decompressed_0: []u8 = try decomp.reader()
.readAllAlloc(testing.allocator, math.maxInt(usize));
defer testing.allocator.free(decompressed_0);
fib = io.fixedBufferStream(compressed_strings[1].items);
try decomp.reset(fib.reader(), null);
var decompressed_1: []u8 = try decomp.reader()
const decompressed_1: []u8 = try decomp.reader()
.readAllAlloc(testing.allocator, math.maxInt(usize));
defer testing.allocator.free(decompressed_1);
@ -513,14 +513,14 @@ test "inflate reset dictionary" {
var decomp = try decompressor(testing.allocator, fib.reader(), dict);
defer decomp.deinit();
var decompressed_0: []u8 = try decomp.reader()
const decompressed_0: []u8 = try decomp.reader()
.readAllAlloc(testing.allocator, math.maxInt(usize));
defer testing.allocator.free(decompressed_0);
fib = io.fixedBufferStream(compressed_strings[1].items);
try decomp.reset(fib.reader(), dict);
var decompressed_1: []u8 = try decomp.reader()
const decompressed_1: []u8 = try decomp.reader()
.readAllAlloc(testing.allocator, math.maxInt(usize));
defer testing.allocator.free(decompressed_1);

View file

@ -136,11 +136,11 @@ const HuffmanDecoder = struct {
self.min = min;
if (max > huffman_chunk_bits) {
var num_links = @as(u32, 1) << @as(u5, @intCast(max - huffman_chunk_bits));
const num_links = @as(u32, 1) << @as(u5, @intCast(max - huffman_chunk_bits));
self.link_mask = @as(u32, @intCast(num_links - 1));
// create link tables
var link = next_code[huffman_chunk_bits + 1] >> 1;
const link = next_code[huffman_chunk_bits + 1] >> 1;
self.links = try self.allocator.alloc([]u16, huffman_num_chunks - link);
self.sub_chunks = ArrayList(u32).init(self.allocator);
self.initialized = true;
@ -148,7 +148,7 @@ const HuffmanDecoder = struct {
while (j < huffman_num_chunks) : (j += 1) {
var reverse = @as(u32, @intCast(bu.bitReverse(u16, @as(u16, @intCast(j)), 16)));
reverse >>= @as(u32, @intCast(16 - huffman_chunk_bits));
var off = j - @as(u32, @intCast(link));
const off = j - @as(u32, @intCast(link));
if (sanity) {
// check we are not overwriting an existing chunk
assert(self.chunks[reverse] == 0);
@ -168,9 +168,9 @@ const HuffmanDecoder = struct {
if (n == 0) {
continue;
}
var ncode = next_code[n];
const ncode = next_code[n];
next_code[n] += 1;
var chunk = @as(u16, @intCast((li << huffman_value_shift) | n));
const chunk = @as(u16, @intCast((li << huffman_value_shift) | n));
var reverse = @as(u16, @intCast(bu.bitReverse(u16, @as(u16, @intCast(ncode)), 16)));
reverse >>= @as(u4, @intCast(16 - n));
if (n <= huffman_chunk_bits) {
@ -187,14 +187,14 @@ const HuffmanDecoder = struct {
self.chunks[off] = chunk;
}
} else {
var j = reverse & (huffman_num_chunks - 1);
const j = reverse & (huffman_num_chunks - 1);
if (sanity) {
// Expect an indirect chunk
assert(self.chunks[j] & huffman_count_mask == huffman_chunk_bits + 1);
// Longer codes should have been
// associated with a link table above.
}
var value = self.chunks[j] >> huffman_value_shift;
const value = self.chunks[j] >> huffman_value_shift;
var link_tab = self.links[value];
reverse >>= huffman_chunk_bits;
var off = reverse;
@ -354,8 +354,8 @@ pub fn Decompressor(comptime ReaderType: type) type {
fn init(allocator: Allocator, in_reader: ReaderType, dict: ?[]const u8) !Self {
fixed_huffman_decoder = try fixedHuffmanDecoderInit(allocator);
var bits = try allocator.create([max_num_lit + max_num_dist]u32);
var codebits = try allocator.create([num_codes]u32);
const bits = try allocator.create([max_num_lit + max_num_dist]u32);
const codebits = try allocator.create([num_codes]u32);
var dd = ddec.DictDecoder{};
try dd.init(allocator, max_match_offset, dict);
@ -416,7 +416,7 @@ pub fn Decompressor(comptime ReaderType: type) type {
}
self.final = self.b & 1 == 1;
self.b >>= 1;
var typ = self.b & 3;
const typ = self.b & 3;
self.b >>= 2;
self.nb -= 1 + 2;
switch (typ) {
@ -494,21 +494,21 @@ pub fn Decompressor(comptime ReaderType: type) type {
while (self.nb < 5 + 5 + 4) {
try self.moreBits();
}
var nlit = @as(u32, @intCast(self.b & 0x1F)) + 257;
const nlit = @as(u32, @intCast(self.b & 0x1F)) + 257;
if (nlit > max_num_lit) {
corrupt_input_error_offset = self.roffset;
self.err = InflateError.CorruptInput;
return InflateError.CorruptInput;
}
self.b >>= 5;
var ndist = @as(u32, @intCast(self.b & 0x1F)) + 1;
const ndist = @as(u32, @intCast(self.b & 0x1F)) + 1;
if (ndist > max_num_dist) {
corrupt_input_error_offset = self.roffset;
self.err = InflateError.CorruptInput;
return InflateError.CorruptInput;
}
self.b >>= 5;
var nclen = @as(u32, @intCast(self.b & 0xF)) + 4;
const nclen = @as(u32, @intCast(self.b & 0xF)) + 4;
// num_codes is 19, so nclen is always valid.
self.b >>= 4;
self.nb -= 5 + 5 + 4;
@ -536,9 +536,9 @@ pub fn Decompressor(comptime ReaderType: type) type {
// HLIT + 257 code lengths, HDIST + 1 code lengths,
// using the code length Huffman code.
i = 0;
var n = nlit + ndist;
const n = nlit + ndist;
while (i < n) {
var x = try self.huffSym(&self.hd1);
const x = try self.huffSym(&self.hd1);
if (x < 16) {
// Actual length.
self.bits[i] = x;
@ -618,7 +618,7 @@ pub fn Decompressor(comptime ReaderType: type) type {
switch (self.step_state) {
.init => {
// Read literal and/or (length, distance) according to RFC section 3.2.3.
var v = try self.huffSym(self.hl.?);
const v = try self.huffSym(self.hl.?);
var n: u32 = 0; // number of bits extra
var length: u32 = 0;
switch (v) {
@ -699,7 +699,7 @@ pub fn Decompressor(comptime ReaderType: type) type {
switch (dist) {
0...3 => dist += 1,
4...max_num_dist - 1 => { // 4...29
var nb = @as(u32, @intCast(dist - 2)) >> 1;
const nb = @as(u32, @intCast(dist - 2)) >> 1;
// have 1 bit in bottom of dist, need nb more.
var extra = (dist & 1) << @as(u5, @intCast(nb));
while (self.nb < nb) {
@ -757,14 +757,14 @@ pub fn Decompressor(comptime ReaderType: type) type {
self.b = 0;
// Length then ones-complement of length.
var nr: u32 = 4;
const nr: u32 = 4;
self.inner_reader.readNoEof(self.buf[0..nr]) catch {
self.err = InflateError.UnexpectedEndOfStream;
return InflateError.UnexpectedEndOfStream;
};
self.roffset += @as(u64, @intCast(nr));
var n = @as(u32, @intCast(self.buf[0])) | @as(u32, @intCast(self.buf[1])) << 8;
var nn = @as(u32, @intCast(self.buf[2])) | @as(u32, @intCast(self.buf[3])) << 8;
const n = @as(u32, @intCast(self.buf[0])) | @as(u32, @intCast(self.buf[1])) << 8;
const nn = @as(u32, @intCast(self.buf[2])) | @as(u32, @intCast(self.buf[3])) << 8;
if (@as(u16, @intCast(nn)) != @as(u16, @truncate(~n))) {
corrupt_input_error_offset = self.roffset;
self.err = InflateError.CorruptInput;
@ -789,7 +789,7 @@ pub fn Decompressor(comptime ReaderType: type) type {
buf = buf[0..self.copy_len];
}
var cnt = try self.inner_reader.read(buf);
const cnt = try self.inner_reader.read(buf);
if (cnt < buf.len) {
self.err = InflateError.UnexpectedEndOfStream;
}
@ -819,7 +819,7 @@ pub fn Decompressor(comptime ReaderType: type) type {
}
fn moreBits(self: *Self) InflateError!void {
var c = self.inner_reader.readByte() catch |e| {
const c = self.inner_reader.readByte() catch |e| {
if (e == error.EndOfStream) {
return InflateError.UnexpectedEndOfStream;
}
@ -845,7 +845,7 @@ pub fn Decompressor(comptime ReaderType: type) type {
var b = self.b;
while (true) {
while (nb < n) {
var c = self.inner_reader.readByte() catch |e| {
const c = self.inner_reader.readByte() catch |e| {
self.b = b;
self.nb = nb;
if (e == error.EndOfStream) {
@ -1053,7 +1053,7 @@ test "inflate A Tale of Two Cities (1859) intro" {
defer decomp.deinit();
var got: [700]u8 = undefined;
var got_len = try decomp.reader().read(&got);
const got_len = try decomp.reader().read(&got);
try testing.expectEqual(@as(usize, 616), got_len);
try testing.expectEqualSlices(u8, expected, got[0..expected.len]);
}
@ -1117,6 +1117,6 @@ fn decompress(input: []const u8) !void {
const reader = fib.reader();
var decomp = try decompressor(allocator, reader, null);
defer decomp.deinit();
var output = try decomp.reader().readAllAlloc(allocator, math.maxInt(usize));
const output = try decomp.reader().readAllAlloc(allocator, math.maxInt(usize));
defer std.testing.allocator.free(output);
}

View file

@ -30,7 +30,7 @@ const table_size = 1 << table_bits; // Size of the table.
const buffer_reset = math.maxInt(i32) - max_store_block_size * 2;
fn load32(b: []u8, i: i32) u32 {
var s = b[@as(usize, @intCast(i)) .. @as(usize, @intCast(i)) + 4];
const s = b[@as(usize, @intCast(i)) .. @as(usize, @intCast(i)) + 4];
return @as(u32, @intCast(s[0])) |
@as(u32, @intCast(s[1])) << 8 |
@as(u32, @intCast(s[2])) << 16 |
@ -38,7 +38,7 @@ fn load32(b: []u8, i: i32) u32 {
}
fn load64(b: []u8, i: i32) u64 {
var s = b[@as(usize, @intCast(i))..@as(usize, @intCast(i + 8))];
const s = b[@as(usize, @intCast(i))..@as(usize, @intCast(i + 8))];
return @as(u64, @intCast(s[0])) |
@as(u64, @intCast(s[1])) << 8 |
@as(u64, @intCast(s[2])) << 16 |
@ -117,7 +117,7 @@ pub const DeflateFast = struct {
// s_limit is when to stop looking for offset/length copies. The input_margin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
var s_limit = @as(i32, @intCast(src.len - input_margin));
const s_limit = @as(i32, @intCast(src.len - input_margin));
// next_emit is where in src the next emitLiteral should start from.
var next_emit: i32 = 0;
@ -147,18 +147,18 @@ pub const DeflateFast = struct {
var candidate: TableEntry = undefined;
while (true) {
s = next_s;
var bytes_between_hash_lookups = skip >> 5;
const bytes_between_hash_lookups = skip >> 5;
next_s = s + bytes_between_hash_lookups;
skip += bytes_between_hash_lookups;
if (next_s > s_limit) {
break :outer;
}
candidate = self.table[next_hash & table_mask];
var now = load32(src, next_s);
const now = load32(src, next_s);
self.table[next_hash & table_mask] = .{ .offset = s + self.cur, .val = cv };
next_hash = hash(now);
var offset = s - (candidate.offset - self.cur);
const offset = s - (candidate.offset - self.cur);
if (offset > max_match_offset or cv != candidate.val) {
// Out of range or not matched.
cv = now;
@ -187,8 +187,8 @@ pub const DeflateFast = struct {
// Extend the 4-byte match as long as possible.
//
s += 4;
var t = candidate.offset - self.cur + 4;
var l = self.matchLen(s, t, src);
const t = candidate.offset - self.cur + 4;
const l = self.matchLen(s, t, src);
// matchToken is flate's equivalent of Snappy's emitCopy. (length,offset)
dst[tokens_count.*] = token.matchToken(
@ -209,20 +209,20 @@ pub const DeflateFast = struct {
// are faster as one load64 call (with some shifts) instead of
// three load32 calls.
var x = load64(src, s - 1);
var prev_hash = hash(@as(u32, @truncate(x)));
const prev_hash = hash(@as(u32, @truncate(x)));
self.table[prev_hash & table_mask] = TableEntry{
.offset = self.cur + s - 1,
.val = @as(u32, @truncate(x)),
};
x >>= 8;
var curr_hash = hash(@as(u32, @truncate(x)));
const curr_hash = hash(@as(u32, @truncate(x)));
candidate = self.table[curr_hash & table_mask];
self.table[curr_hash & table_mask] = TableEntry{
.offset = self.cur + s,
.val = @as(u32, @truncate(x)),
};
var offset = s - (candidate.offset - self.cur);
const offset = s - (candidate.offset - self.cur);
if (offset > max_match_offset or @as(u32, @truncate(x)) != candidate.val) {
cv = @as(u32, @truncate(x >> 8));
next_hash = hash(cv);
@ -261,7 +261,7 @@ pub const DeflateFast = struct {
// If we are inside the current block
if (t >= 0) {
var b = src[@as(usize, @intCast(t))..];
var a = src[@as(usize, @intCast(s))..@as(usize, @intCast(s1))];
const a = src[@as(usize, @intCast(s))..@as(usize, @intCast(s1))];
b = b[0..a.len];
// Extend the match to be as long as possible.
for (a, 0..) |_, i| {
@ -273,7 +273,7 @@ pub const DeflateFast = struct {
}
// We found a match in the previous block.
var tp = @as(i32, @intCast(self.prev_len)) + t;
const tp = @as(i32, @intCast(self.prev_len)) + t;
if (tp < 0) {
return 0;
}
@ -293,7 +293,7 @@ pub const DeflateFast = struct {
// If we reached our limit, we matched everything we are
// allowed to in the previous block and we return.
var n = @as(i32, @intCast(b.len));
const n = @as(i32, @intCast(b.len));
if (@as(u32, @intCast(s + n)) == s1) {
return n;
}
@ -366,7 +366,7 @@ test "best speed match 1/3" {
.cur = 0,
};
var current = [_]u8{ 3, 4, 5, 0, 1, 2, 3, 4, 5 };
var got: i32 = e.matchLen(3, -3, &current);
const got: i32 = e.matchLen(3, -3, &current);
try expectEqual(@as(i32, 6), got);
}
{
@ -379,7 +379,7 @@ test "best speed match 1/3" {
.cur = 0,
};
var current = [_]u8{ 2, 4, 5, 0, 1, 2, 3, 4, 5 };
var got: i32 = e.matchLen(3, -3, &current);
const got: i32 = e.matchLen(3, -3, &current);
try expectEqual(@as(i32, 3), got);
}
{
@ -392,7 +392,7 @@ test "best speed match 1/3" {
.cur = 0,
};
var current = [_]u8{ 3, 4, 5, 0, 1, 2, 3, 4, 5 };
var got: i32 = e.matchLen(3, -3, &current);
const got: i32 = e.matchLen(3, -3, &current);
try expectEqual(@as(i32, 2), got);
}
{
@ -405,7 +405,7 @@ test "best speed match 1/3" {
.cur = 0,
};
var current = [_]u8{ 2, 2, 2, 2, 1, 2, 3, 4, 5 };
var got: i32 = e.matchLen(0, -1, &current);
const got: i32 = e.matchLen(0, -1, &current);
try expectEqual(@as(i32, 4), got);
}
{
@ -418,7 +418,7 @@ test "best speed match 1/3" {
.cur = 0,
};
var current = [_]u8{ 2, 2, 2, 2, 1, 2, 3, 4, 5 };
var got: i32 = e.matchLen(4, -7, &current);
const got: i32 = e.matchLen(4, -7, &current);
try expectEqual(@as(i32, 5), got);
}
{
@ -431,7 +431,7 @@ test "best speed match 1/3" {
.cur = 0,
};
var current = [_]u8{ 2, 2, 2, 2, 1, 2, 3, 4, 5 };
var got: i32 = e.matchLen(0, -1, &current);
const got: i32 = e.matchLen(0, -1, &current);
try expectEqual(@as(i32, 0), got);
}
{
@ -444,7 +444,7 @@ test "best speed match 1/3" {
.cur = 0,
};
var current = [_]u8{ 9, 2, 2, 2, 1, 2, 3, 4, 5 };
var got: i32 = e.matchLen(1, 0, &current);
const got: i32 = e.matchLen(1, 0, &current);
try expectEqual(@as(i32, 0), got);
}
}
@ -462,7 +462,7 @@ test "best speed match 2/3" {
.cur = 0,
};
var current = [_]u8{ 9, 2, 2, 2, 1, 2, 3, 4, 5 };
var got: i32 = e.matchLen(1, -5, &current);
const got: i32 = e.matchLen(1, -5, &current);
try expectEqual(@as(i32, 0), got);
}
{
@ -475,7 +475,7 @@ test "best speed match 2/3" {
.cur = 0,
};
var current = [_]u8{ 9, 2, 2, 2, 1, 2, 3, 4, 5 };
var got: i32 = e.matchLen(1, -1, &current);
const got: i32 = e.matchLen(1, -1, &current);
try expectEqual(@as(i32, 0), got);
}
{
@ -488,7 +488,7 @@ test "best speed match 2/3" {
.cur = 0,
};
var current = [_]u8{ 2, 2, 2, 2, 1, 2, 3, 4, 5 };
var got: i32 = e.matchLen(1, 0, &current);
const got: i32 = e.matchLen(1, 0, &current);
try expectEqual(@as(i32, 3), got);
}
{
@ -501,7 +501,7 @@ test "best speed match 2/3" {
.cur = 0,
};
var current = [_]u8{ 3, 4, 5 };
var got: i32 = e.matchLen(0, -3, &current);
const got: i32 = e.matchLen(0, -3, &current);
try expectEqual(@as(i32, 3), got);
}
}
@ -564,11 +564,11 @@ test "best speed match 2/2" {
};
for (cases) |c| {
var previous = try testing.allocator.alloc(u8, c.previous);
const previous = try testing.allocator.alloc(u8, c.previous);
defer testing.allocator.free(previous);
@memset(previous, 0);
var current = try testing.allocator.alloc(u8, c.current);
const current = try testing.allocator.alloc(u8, c.current);
defer testing.allocator.free(current);
@memset(current, 0);
@ -579,7 +579,7 @@ test "best speed match 2/2" {
.allocator = undefined,
.cur = 0,
};
var got: i32 = e.matchLen(c.s, c.t, current);
const got: i32 = e.matchLen(c.s, c.t, current);
try expectEqual(@as(i32, c.expected), got);
}
}
@ -609,10 +609,10 @@ test "best speed shift offsets" {
// Second part should pick up matches from the first block.
tokens_count = 0;
enc.encode(&tokens, &tokens_count, &test_data);
var want_first_tokens = tokens_count;
const want_first_tokens = tokens_count;
tokens_count = 0;
enc.encode(&tokens, &tokens_count, &test_data);
var want_second_tokens = tokens_count;
const want_second_tokens = tokens_count;
try expect(want_first_tokens > want_second_tokens);
@ -657,7 +657,7 @@ test "best speed reset" {
const ArrayList = std.ArrayList;
const input_size = 65536;
var input = try testing.allocator.alloc(u8, input_size);
const input = try testing.allocator.alloc(u8, input_size);
defer testing.allocator.free(input);
var i: usize = 0;
@ -699,7 +699,7 @@ test "best speed reset" {
// Reset until we are right before the wraparound.
// Each reset adds max_match_offset to the offset.
i = 0;
var limit = (buffer_reset - input.len - o - max_match_offset) / max_match_offset;
const limit = (buffer_reset - input.len - o - max_match_offset) / max_match_offset;
while (i < limit) : (i += 1) {
// skip ahead to where we are close to wrap around...
comp.reset(discard.writer());

View file

@ -39,18 +39,18 @@ test "best speed" {
var tc_15 = [_]u32{ 65536, 129 };
var tc_16 = [_]u32{ 65536, 65536, 256 };
var tc_17 = [_]u32{ 65536, 65536, 65536 };
var test_cases = [_][]u32{
const test_cases = [_][]u32{
&tc_01, &tc_02, &tc_03, &tc_04, &tc_05, &tc_06, &tc_07, &tc_08, &tc_09, &tc_10,
&tc_11, &tc_12, &tc_13, &tc_14, &tc_15, &tc_16, &tc_17,
};
for (test_cases) |tc| {
var firsts = [_]u32{ 1, 65534, 65535, 65536, 65537, 131072 };
const firsts = [_]u32{ 1, 65534, 65535, 65536, 65537, 131072 };
for (firsts) |first_n| {
tc[0] = first_n;
var to_flush = [_]bool{ false, true };
const to_flush = [_]bool{ false, true };
for (to_flush) |flush| {
var compressed = ArrayList(u8).init(testing.allocator);
defer compressed.deinit();
@ -75,14 +75,14 @@ test "best speed" {
try comp.close();
var decompressed = try testing.allocator.alloc(u8, want.items.len);
const decompressed = try testing.allocator.alloc(u8, want.items.len);
defer testing.allocator.free(decompressed);
var fib = io.fixedBufferStream(compressed.items);
var decomp = try inflate.decompressor(testing.allocator, fib.reader(), null);
defer decomp.deinit();
var read = try decomp.reader().readAll(decompressed);
const read = try decomp.reader().readAll(decompressed);
_ = decomp.close();
try testing.expectEqual(want.items.len, read);
@ -109,7 +109,7 @@ test "best speed max match offset" {
for (extras) |extra| {
var offset_adj: i32 = -5;
while (offset_adj <= 5) : (offset_adj += 1) {
var offset = deflate_const.max_match_offset + offset_adj;
const offset = deflate_const.max_match_offset + offset_adj;
// Make src to be a []u8 of the form
// fmt("{s}{s}{s}{s}{s}", .{abc, zeros0, xyzMaybe, abc, zeros1})
@ -119,7 +119,7 @@ test "best speed max match offset" {
// zeros1 is between 0 and 30 zeros.
// The difference between the two abc's will be offset, which
// is max_match_offset plus or minus a small adjustment.
var src_len: usize = @as(usize, @intCast(offset + @as(i32, abc.len) + @as(i32, @intCast(extra))));
const src_len: usize = @as(usize, @intCast(offset + @as(i32, abc.len) + @as(i32, @intCast(extra))));
var src = try testing.allocator.alloc(u8, src_len);
defer testing.allocator.free(src);
@ -143,13 +143,13 @@ test "best speed max match offset" {
try comp.writer().writeAll(src);
_ = try comp.close();
var decompressed = try testing.allocator.alloc(u8, src.len);
const decompressed = try testing.allocator.alloc(u8, src.len);
defer testing.allocator.free(decompressed);
var fib = io.fixedBufferStream(compressed.items);
var decomp = try inflate.decompressor(testing.allocator, fib.reader(), null);
defer decomp.deinit();
var read = try decomp.reader().readAll(decompressed);
const read = try decomp.reader().readAll(decompressed);
_ = decomp.close();
try testing.expectEqual(src.len, read);

View file

@ -123,7 +123,7 @@ pub const DictDecoder = struct {
// This invariant must be kept: 0 < dist <= histSize()
pub fn writeCopy(self: *Self, dist: u32, length: u32) u32 {
assert(0 < dist and dist <= self.histSize());
var dst_base = self.wr_pos;
const dst_base = self.wr_pos;
var dst_pos = dst_base;
var src_pos: i32 = @as(i32, @intCast(dst_pos)) - @as(i32, @intCast(dist));
var end_pos = dst_pos + length;
@ -175,12 +175,12 @@ pub const DictDecoder = struct {
// This invariant must be kept: 0 < dist <= histSize()
pub fn tryWriteCopy(self: *Self, dist: u32, length: u32) u32 {
var dst_pos = self.wr_pos;
var end_pos = dst_pos + length;
const end_pos = dst_pos + length;
if (dst_pos < dist or end_pos > self.hist.len) {
return 0;
}
var dst_base = dst_pos;
var src_pos = dst_pos - dist;
const dst_base = dst_pos;
const src_pos = dst_pos - dist;
// Copy possibly overlapping section before destination position.
while (dst_pos < end_pos) {
@ -195,7 +195,7 @@ pub const DictDecoder = struct {
// emitted to the user. The data returned by readFlush must be fully consumed
// before calling any other DictDecoder methods.
pub fn readFlush(self: *Self) []u8 {
var to_read = self.hist[self.rd_pos..self.wr_pos];
const to_read = self.hist[self.rd_pos..self.wr_pos];
self.rd_pos = self.wr_pos;
if (self.wr_pos == self.hist.len) {
self.wr_pos = 0;
@ -279,7 +279,7 @@ test "dictionary decoder" {
length: u32, // Length of copy or insertion
};
var poem_refs = [_]PoemRefs{
const poem_refs = [_]PoemRefs{
.{ .dist = 0, .length = 38 }, .{ .dist = 33, .length = 3 }, .{ .dist = 0, .length = 48 },
.{ .dist = 79, .length = 3 }, .{ .dist = 0, .length = 11 }, .{ .dist = 34, .length = 5 },
.{ .dist = 0, .length = 6 }, .{ .dist = 23, .length = 7 }, .{ .dist = 0, .length = 8 },
@ -368,7 +368,7 @@ test "dictionary decoder" {
fn writeString(dst_dd: *DictDecoder, dst: anytype, str: []const u8) !void {
var string = str;
while (string.len > 0) {
var cnt = DictDecoder.copy(dst_dd.writeSlice(), string);
const cnt = DictDecoder.copy(dst_dd.writeSlice(), string);
dst_dd.writeMark(cnt);
string = string[cnt..];
if (dst_dd.availWrite() == 0) {

View file

@ -134,7 +134,7 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type {
self.bits |= @as(u64, @intCast(b)) << @as(u6, @intCast(self.nbits));
self.nbits += nb;
if (self.nbits >= 48) {
var bits = self.bits;
const bits = self.bits;
self.bits >>= 48;
self.nbits -= 48;
var n = self.nbytes;
@ -224,7 +224,7 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type {
while (size != bad_code) : (in_index += 1) {
// INVARIANT: We have seen "count" copies of size that have not yet
// had output generated for them.
var next_size = codegen[in_index];
const next_size = codegen[in_index];
if (next_size == size) {
count += 1;
continue;
@ -295,12 +295,12 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type {
while (num_codegens > 4 and self.codegen_freq[codegen_order[num_codegens - 1]] == 0) {
num_codegens -= 1;
}
var header = 3 + 5 + 5 + 4 + (3 * num_codegens) +
const header = 3 + 5 + 5 + 4 + (3 * num_codegens) +
self.codegen_encoding.bitLength(self.codegen_freq[0..]) +
self.codegen_freq[16] * 2 +
self.codegen_freq[17] * 3 +
self.codegen_freq[18] * 7;
var size = header +
const size = header +
lit_enc.bitLength(self.literal_freq) +
off_enc.bitLength(self.offset_freq) +
extra_bits;
@ -339,7 +339,7 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type {
self.bits |= @as(u64, @intCast(c.code)) << @as(u6, @intCast(self.nbits));
self.nbits += @as(u32, @intCast(c.len));
if (self.nbits >= 48) {
var bits = self.bits;
const bits = self.bits;
self.bits >>= 48;
self.nbits -= 48;
var n = self.nbytes;
@ -386,13 +386,13 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type {
var i: u32 = 0;
while (i < num_codegens) : (i += 1) {
var value = @as(u32, @intCast(self.codegen_encoding.codes[codegen_order[i]].len));
const value = @as(u32, @intCast(self.codegen_encoding.codes[codegen_order[i]].len));
try self.writeBits(@as(u32, @intCast(value)), 3);
}
i = 0;
while (true) {
var code_word: u32 = @as(u32, @intCast(self.codegen[i]));
const code_word: u32 = @as(u32, @intCast(self.codegen[i]));
i += 1;
if (code_word == bad_code) {
break;
@ -458,14 +458,14 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type {
return;
}
var lit_and_off = self.indexTokens(tokens);
var num_literals = lit_and_off.num_literals;
var num_offsets = lit_and_off.num_offsets;
const lit_and_off = self.indexTokens(tokens);
const num_literals = lit_and_off.num_literals;
const num_offsets = lit_and_off.num_offsets;
var extra_bits: u32 = 0;
var ret = storedSizeFits(input);
var stored_size = ret.size;
var storable = ret.storable;
const ret = storedSizeFits(input);
const stored_size = ret.size;
const storable = ret.storable;
if (storable) {
// We only bother calculating the costs of the extra bits required by
@ -504,12 +504,12 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type {
&self.offset_encoding,
);
self.codegen_encoding.generate(self.codegen_freq[0..], 7);
var dynamic_size = self.dynamicSize(
const dynamic_size = self.dynamicSize(
&self.literal_encoding,
&self.offset_encoding,
extra_bits,
);
var dyn_size = dynamic_size.size;
const dyn_size = dynamic_size.size;
num_codegens = dynamic_size.num_codegens;
if (dyn_size < size) {
@ -551,9 +551,9 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type {
return;
}
var total_tokens = self.indexTokens(tokens);
var num_literals = total_tokens.num_literals;
var num_offsets = total_tokens.num_offsets;
const total_tokens = self.indexTokens(tokens);
const num_literals = total_tokens.num_literals;
const num_offsets = total_tokens.num_offsets;
// Generate codegen and codegenFrequencies, which indicates how to encode
// the literal_encoding and the offset_encoding.
@ -564,15 +564,15 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type {
&self.offset_encoding,
);
self.codegen_encoding.generate(self.codegen_freq[0..], 7);
var dynamic_size = self.dynamicSize(&self.literal_encoding, &self.offset_encoding, 0);
var size = dynamic_size.size;
var num_codegens = dynamic_size.num_codegens;
const dynamic_size = self.dynamicSize(&self.literal_encoding, &self.offset_encoding, 0);
const size = dynamic_size.size;
const num_codegens = dynamic_size.num_codegens;
// Store bytes, if we don't get a reasonable improvement.
var stored_size = storedSizeFits(input);
var ssize = stored_size.size;
var storable = stored_size.storable;
const stored_size = storedSizeFits(input);
const ssize = stored_size.size;
const storable = stored_size.storable;
if (storable and ssize < (size + (size >> 4))) {
try self.writeStoredHeader(input.?.len, eof);
try self.writeBytes(input.?);
@ -611,8 +611,8 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type {
self.literal_freq[token.literal(t)] += 1;
continue;
}
var length = token.length(t);
var offset = token.offset(t);
const length = token.length(t);
const offset = token.offset(t);
self.literal_freq[length_codes_start + token.lengthCode(length)] += 1;
self.offset_freq[token.offsetCode(offset)] += 1;
}
@ -660,21 +660,21 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type {
continue;
}
// Write the length
var length = token.length(t);
var length_code = token.lengthCode(length);
const length = token.length(t);
const length_code = token.lengthCode(length);
try self.writeCode(le_codes[length_code + length_codes_start]);
var extra_length_bits = @as(u32, @intCast(length_extra_bits[length_code]));
const extra_length_bits = @as(u32, @intCast(length_extra_bits[length_code]));
if (extra_length_bits > 0) {
var extra_length = @as(u32, @intCast(length - length_base[length_code]));
const extra_length = @as(u32, @intCast(length - length_base[length_code]));
try self.writeBits(extra_length, extra_length_bits);
}
// Write the offset
var offset = token.offset(t);
var offset_code = token.offsetCode(offset);
const offset = token.offset(t);
const offset_code = token.offsetCode(offset);
try self.writeCode(oe_codes[offset_code]);
var extra_offset_bits = @as(u32, @intCast(offset_extra_bits[offset_code]));
const extra_offset_bits = @as(u32, @intCast(offset_extra_bits[offset_code]));
if (extra_offset_bits > 0) {
var extra_offset = @as(u32, @intCast(offset - offset_base[offset_code]));
const extra_offset = @as(u32, @intCast(offset - offset_base[offset_code]));
try self.writeBits(extra_offset, extra_offset_bits);
}
}
@ -718,15 +718,15 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type {
&self.huff_offset,
);
self.codegen_encoding.generate(self.codegen_freq[0..], 7);
var dynamic_size = self.dynamicSize(&self.literal_encoding, &self.huff_offset, 0);
var size = dynamic_size.size;
const dynamic_size = self.dynamicSize(&self.literal_encoding, &self.huff_offset, 0);
const size = dynamic_size.size;
num_codegens = dynamic_size.num_codegens;
// Store bytes, if we don't get a reasonable improvement.
var stored_size_ret = storedSizeFits(input);
var ssize = stored_size_ret.size;
var storable = stored_size_ret.storable;
const stored_size_ret = storedSizeFits(input);
const ssize = stored_size_ret.size;
const storable = stored_size_ret.storable;
if (storable and ssize < (size + (size >> 4))) {
try self.writeStoredHeader(input.len, eof);
@ -736,18 +736,18 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type {
// Huffman.
try self.writeDynamicHeader(num_literals, num_offsets, num_codegens, eof);
var encoding = self.literal_encoding.codes[0..257];
const encoding = self.literal_encoding.codes[0..257];
var n = self.nbytes;
for (input) |t| {
// Bitwriting inlined, ~30% speedup
var c = encoding[t];
const c = encoding[t];
self.bits |= @as(u64, @intCast(c.code)) << @as(u6, @intCast(self.nbits));
self.nbits += @as(u32, @intCast(c.len));
if (self.nbits < 48) {
continue;
}
// Store 6 bytes
var bits = self.bits;
const bits = self.bits;
self.bits >>= 48;
self.nbits -= 48;
var bytes = self.bytes[n..][0..6];
@ -1679,7 +1679,7 @@ fn testWriterEOF(ttype: TestType, ht_tokens: []const token.Token, input: []const
try bw.flush();
var b = buf.items;
const b = buf.items;
try expect(b.len > 0);
try expect(b[0] & 1 == 1);
}

View file

@ -96,7 +96,7 @@ pub const HuffmanEncoder = struct {
mem.sort(LiteralNode, self.lfs, {}, byFreq);
// Get the number of literals for each bit count
var bit_count = self.bitCounts(list, max_bits);
const bit_count = self.bitCounts(list, max_bits);
// And do the assignment
self.assignEncodingAndSize(bit_count, list);
}
@ -128,7 +128,7 @@ pub const HuffmanEncoder = struct {
// that should be encoded in i bits.
fn bitCounts(self: *HuffmanEncoder, list: []LiteralNode, max_bits_to_use: usize) []u32 {
var max_bits = max_bits_to_use;
var n = list.len;
const n = list.len;
assert(max_bits < max_bits_limit);
@ -184,10 +184,10 @@ pub const HuffmanEncoder = struct {
continue;
}
var prev_freq = l.last_freq;
const prev_freq = l.last_freq;
if (l.next_char_freq < l.next_pair_freq) {
// The next item on this row is a leaf node.
var next = leaf_counts[level][level] + 1;
const next = leaf_counts[level][level] + 1;
l.last_freq = l.next_char_freq;
// Lower leaf_counts are the same of the previous node.
leaf_counts[level][level] = next;
@ -236,7 +236,7 @@ pub const HuffmanEncoder = struct {
var bit_count = self.bit_count[0 .. max_bits + 1];
var bits: u32 = 1;
var counts = &leaf_counts[max_bits];
const counts = &leaf_counts[max_bits];
{
var level = max_bits;
while (level > 0) : (level -= 1) {
@ -267,7 +267,7 @@ pub const HuffmanEncoder = struct {
// are encoded using "bits" bits, and get the values
// code, code + 1, .... The code values are
// assigned in literal order (not frequency order).
var chunk = list[list.len - @as(u32, @intCast(bits)) ..];
const chunk = list[list.len - @as(u32, @intCast(bits)) ..];
self.lns = chunk;
mem.sort(LiteralNode, self.lns, {}, byLiteral);
@ -303,7 +303,7 @@ pub fn newHuffmanEncoder(allocator: Allocator, size: u32) !HuffmanEncoder {
// Generates a HuffmanCode corresponding to the fixed literal table
pub fn generateFixedLiteralEncoding(allocator: Allocator) !HuffmanEncoder {
var h = try newHuffmanEncoder(allocator, deflate_const.max_num_frequencies);
const h = try newHuffmanEncoder(allocator, deflate_const.max_num_frequencies);
var codes = h.codes;
var ch: u16 = 0;
@ -338,7 +338,7 @@ pub fn generateFixedLiteralEncoding(allocator: Allocator) !HuffmanEncoder {
}
pub fn generateFixedOffsetEncoding(allocator: Allocator) !HuffmanEncoder {
var h = try newHuffmanEncoder(allocator, 30);
const h = try newHuffmanEncoder(allocator, 30);
var codes = h.codes;
for (codes, 0..) |_, ch| {
codes[ch] = HuffCode{ .code = bu.bitReverse(u16, @as(u16, @intCast(ch)), 5), .len = 5 };

View file

@ -268,7 +268,7 @@ test "zstandard decompression" {
const compressed3 = @embedFile("testdata/rfc8478.txt.zst.3");
const compressed19 = @embedFile("testdata/rfc8478.txt.zst.19");
var buffer = try std.testing.allocator.alloc(u8, uncompressed.len);
const buffer = try std.testing.allocator.alloc(u8, uncompressed.len);
defer std.testing.allocator.free(buffer);
const res3 = try decompress.decode(buffer, compressed3, true);

View file

@ -54,7 +54,7 @@ fn decodeFseHuffmanTreeSlice(src: []const u8, compressed_size: usize, weights: *
const start_index = std.math.cast(usize, counting_reader.bytes_read) orelse
return error.MalformedHuffmanTree;
var huff_data = src[start_index..compressed_size];
const huff_data = src[start_index..compressed_size];
var huff_bits: readers.ReverseBitReader = undefined;
huff_bits.init(huff_data) catch return error.MalformedHuffmanTree;

View file

@ -304,7 +304,7 @@ pub fn decodeZstandardFrame(
var frame_context = context: {
var fbs = std.io.fixedBufferStream(src[consumed_count..]);
var source = fbs.reader();
const source = fbs.reader();
const frame_header = try decodeZstandardHeader(source);
consumed_count += fbs.pos;
break :context FrameContext.init(
@ -447,7 +447,7 @@ pub fn decodeZstandardFrameArrayList(
var frame_context = context: {
var fbs = std.io.fixedBufferStream(src[consumed_count..]);
var source = fbs.reader();
const source = fbs.reader();
const frame_header = try decodeZstandardHeader(source);
consumed_count += fbs.pos;
break :context try FrameContext.init(frame_header, window_size_max, verify_checksum);

View file

@ -129,7 +129,7 @@ test "non-affine edwards25519 to curve25519 projection" {
const skh = "90e7595fc89e52fdfddce9c6a43d74dbf6047025ee0462d2d172e8b6a2841d6e";
var sk: [32]u8 = undefined;
_ = std.fmt.hexToBytes(&sk, skh) catch unreachable;
var edp = try crypto.ecc.Edwards25519.basePoint.mul(sk);
const edp = try crypto.ecc.Edwards25519.basePoint.mul(sk);
const xp = try Curve25519.fromEdwards25519(edp);
const expected_hex = "cc4f2cdb695dd766f34118eb67b98652fed1d8bc49c330b119bbfa8a64989378";
var expected: [32]u8 = undefined;

View file

@ -416,7 +416,7 @@ pub const Fe = struct {
/// Compute the square root of `x2`, returning `error.NotSquare` if `x2` was not a square
pub fn sqrt(x2: Fe) NotSquareError!Fe {
var x2_copy = x2;
const x2_copy = x2;
const x = x2.uncheckedSqrt();
const check = x.sq().sub(x2_copy);
if (check.isZero()) {

View file

@ -982,7 +982,7 @@ pub const rsa = struct {
if (mgf_len > mgf_out_buf.len) { // Modulus > 4096 bits
return error.InvalidSignature;
}
var mgf_out = mgf_out_buf[0 .. ((mgf_len - 1) / Hash.digest_length + 1) * Hash.digest_length];
const mgf_out = mgf_out_buf[0 .. ((mgf_len - 1) / Hash.digest_length + 1) * Hash.digest_length];
var dbMask = try MGF1(Hash, mgf_out, h, mgf_len);
// 8. Let DB = maskedDB \xor dbMask.

View file

@ -47,7 +47,7 @@ test "ctr" {
};
var out: [exp_out.len]u8 = undefined;
var ctx = Aes128.initEnc(key);
const ctx = Aes128.initEnc(key);
ctr(AesEncryptCtx(Aes128), ctx, out[0..], in[0..], iv, std.builtin.Endian.big);
try testing.expectEqualSlices(u8, exp_out[0..], out[0..]);
}

View file

@ -95,7 +95,7 @@ fn AesOcb(comptime Aes: anytype) type {
var ktop_: Block = undefined;
aes_enc_ctx.encrypt(&ktop_, &nx);
const ktop = mem.readInt(u128, &ktop_, .big);
var stretch = (@as(u192, ktop) << 64) | @as(u192, @as(u64, @truncate(ktop >> 64)) ^ @as(u64, @truncate(ktop >> 56)));
const stretch = (@as(u192, ktop) << 64) | @as(u192, @as(u64, @truncate(ktop >> 64)) ^ @as(u64, @truncate(ktop >> 56)));
var offset: Block = undefined;
mem.writeInt(u128, &offset, @as(u128, @truncate(stretch >> (64 - @as(u7, bottom)))), .big);
return offset;

View file

@ -565,7 +565,7 @@ const PhcFormatHasher = struct {
const expected_hash = hash_result.hash.constSlice();
var hash_buf: [max_hash_len]u8 = undefined;
if (expected_hash.len > hash_buf.len) return HasherError.InvalidEncoding;
var hash = hash_buf[0..expected_hash.len];
const hash = hash_buf[0..expected_hash.len];
try kdf(allocator, hash, password, hash_result.salt.constSlice(), params, mode);
if (!mem.eql(u8, hash, expected_hash)) return HasherError.PasswordVerificationFailed;

View file

@ -42,8 +42,7 @@ pub fn State(comptime endian: std.builtin.Endian) type {
/// Initialize the state from u64 words in native endianness.
pub fn initFromWords(initial_state: [5]u64) Self {
var state = Self{ .st = initial_state };
return state;
return .{ .st = initial_state };
}
/// Initialize the state for Ascon XOF

View file

@ -431,7 +431,7 @@ pub fn bcrypt(
const trimmed_len = @min(password.len, password_buf.len - 1);
@memcpy(password_buf[0..trimmed_len], password[0..trimmed_len]);
password_buf[trimmed_len] = 0;
var passwordZ = password_buf[0 .. trimmed_len + 1];
const passwordZ = password_buf[0 .. trimmed_len + 1];
state.expand(salt[0..], passwordZ);
const rounds: u64 = @as(u64, 1) << params.rounds_log;

View file

@ -241,7 +241,7 @@ const Output = struct {
var out_block_it = ChunkIterator.init(output, 2 * OUT_LEN);
var output_block_counter: usize = 0;
while (out_block_it.next()) |out_block| {
var words = compress(
const words = compress(
self.input_chaining_value,
self.block_words,
self.block_len,

View file

@ -201,7 +201,7 @@ pub fn Ecdsa(comptime Curve: type, comptime Hash: type) type {
const scalar_encoded_length = Curve.scalar.encoded_length;
const h_len = @max(Hash.digest_length, scalar_encoded_length);
var h: [h_len]u8 = [_]u8{0} ** h_len;
var h_slice = h[h_len - Hash.digest_length .. h_len];
const h_slice = h[h_len - Hash.digest_length .. h_len];
self.h.final(h_slice);
std.debug.assert(h.len >= scalar_encoded_length);

View file

@ -255,10 +255,8 @@ test "Very large dk_len" {
const c = 1;
const dk_len = 1 << 33;
var dk = try std.testing.allocator.alloc(u8, dk_len);
defer {
std.testing.allocator.free(dk);
}
const dk = try std.testing.allocator.alloc(u8, dk_len);
defer std.testing.allocator.free(dk);
// Just verify this doesn't crash with an overflow
try pbkdf2(dk, p, s, c, HmacSha1);

View file

@ -71,7 +71,7 @@ pub fn Field(comptime params: FieldParams) type {
/// Unpack a field element.
pub fn fromBytes(s_: [encoded_length]u8, endian: std.builtin.Endian) NonCanonicalError!Fe {
var s = if (endian == .little) s_ else orderSwap(s_);
const s = if (endian == .little) s_ else orderSwap(s_);
try rejectNonCanonical(s, .little);
var limbs_z: NonMontgomeryDomainFieldElement = undefined;
fiat.fromBytes(&limbs_z, s);

View file

@ -90,8 +90,8 @@ pub const Poly1305 = struct {
h2 = t2 & 3;
// Add c*(4+1)
var cclo = t2 & ~@as(u64, 3);
var cchi = t3;
const cclo = t2 & ~@as(u64, 3);
const cchi = t3;
v = @addWithOverflow(h0, cclo);
h0 = v[0];
v = add(h1, cchi, v[1]);
@ -163,7 +163,7 @@ pub const Poly1305 = struct {
var h0 = st.h[0];
var h1 = st.h[1];
var h2 = st.h[2];
const h2 = st.h[2];
// H - (2^130 - 5)
var v = @subWithOverflow(h0, 0xfffffffffffffffb);

View file

@ -605,8 +605,8 @@ test "xsalsa20poly1305 box" {
crypto.random.bytes(&msg);
crypto.random.bytes(&nonce);
var kp1 = try Box.KeyPair.create(null);
var kp2 = try Box.KeyPair.create(null);
const kp1 = try Box.KeyPair.create(null);
const kp2 = try Box.KeyPair.create(null);
try Box.seal(boxed[0..], msg[0..], nonce, kp1.public_key, kp2.secret_key);
try Box.open(msg2[0..], boxed[0..], nonce, kp2.public_key, kp1.secret_key);
}
@ -617,7 +617,7 @@ test "xsalsa20poly1305 sealedbox" {
var boxed: [msg.len + SealedBox.seal_length]u8 = undefined;
crypto.random.bytes(&msg);
var kp = try Box.KeyPair.create(null);
const kp = try Box.KeyPair.create(null);
try SealedBox.seal(boxed[0..], msg[0..], kp.public_key);
try SealedBox.open(msg2[0..], boxed[0..], kp);
}

Some files were not shown because too many files have changed in this diff Show more