std.builtin.Endian: make the tags lower case

Let's take this breaking change opportunity to fix the style of this
enum.
This commit is contained in:
Andrew Kelley 2023-10-31 13:02:38 -07:00 committed by Jacob Young
parent 149200aac5
commit 3fc6fc6812
160 changed files with 1306 additions and 1306 deletions

View file

@ -402,7 +402,7 @@ pub fn generateBuiltinMacros(comp: *Compilation) !Source {
\\#define __ORDER_PDP_ENDIAN__ 3412
\\
);
if (comp.target.cpu.arch.endian() == .Little) try w.writeAll(
if (comp.target.cpu.arch.endian() == .little) try w.writeAll(
\\#define __BYTE_ORDER__ __ORDER_LITTLE_ENDIAN__
\\#define __LITTLE_ENDIAN__ 1
\\

4
deps/aro/target.zig vendored
View file

@ -398,8 +398,8 @@ pub fn ldEmulationOption(target: std.Target, arm_endianness: ?std.builtin.Endian
.thumb,
.thumbeb,
=> switch (arm_endianness orelse target.cpu.arch.endian()) {
.Little => "armelf_linux_eabi",
.Big => "armelfb_linux_eabi",
.little => "armelf_linux_eabi",
.big => "armelfb_linux_eabi",
},
.aarch64 => "aarch64linux",
.aarch64_be => "aarch64linuxb",

View file

@ -3376,11 +3376,11 @@ fn doTheTest() !void {
var ordered: [2]u8 = @bitCast(full);
switch (native_endian) {
.Big => {
.big => {
try expect(ordered[0] == 0x12);
try expect(ordered[1] == 0x34);
},
.Little => {
.little => {
try expect(ordered[0] == 0x34);
try expect(ordered[1] == 0x12);
},

View file

@ -249,7 +249,7 @@ pub fn HalveInt(comptime T: type, comptime signed_half: bool) type {
pub const HalfT = if (signed_half) HalfTS else HalfTU;
all: T,
s: if (native_endian == .Little)
s: if (native_endian == .little)
extern struct { low: HalfT, high: HalfT }
else
extern struct { high: HalfT, low: HalfT },

View file

@ -142,16 +142,16 @@ pub fn fmodq(a: f128, b: f128) callconv(.C) f128 {
const bPtr_u16: [*]u16 = @ptrCast(&bmod);
const exp_and_sign_index = comptime switch (builtin.target.cpu.arch.endian()) {
.Little => 7,
.Big => 0,
.little => 7,
.big => 0,
};
const low_index = comptime switch (builtin.target.cpu.arch.endian()) {
.Little => 0,
.Big => 1,
.little => 0,
.big => 1,
};
const high_index = comptime switch (builtin.target.cpu.arch.endian()) {
.Little => 1,
.Big => 0,
.little => 1,
.big => 0,
};
const signA = aPtr_u16[exp_and_sign_index] & 0x8000;

View file

@ -5,8 +5,8 @@ const Log2Int = std.math.Log2Int;
const HalveInt = @import("common.zig").HalveInt;
const lo = switch (builtin.cpu.arch.endian()) {
.Big => 1,
.Little => 0,
.big => 1,
.little => 0,
};
const hi = 1 - lo;

View file

@ -15,12 +15,12 @@ const endian = builtin.cpu.arch.endian();
/// Get the value of a limb.
inline fn limb(x: []const u32, i: usize) u32 {
return if (endian == .Little) x[i] else x[x.len - 1 - i];
return if (endian == .little) x[i] else x[x.len - 1 - i];
}
/// Change the value of a limb.
inline fn limb_set(x: []u32, i: usize, v: u32) void {
if (endian == .Little) {
if (endian == .little) {
x[i] = v;
} else {
x[x.len - 1 - i] = v;

View file

@ -1624,8 +1624,8 @@ const WasmDumper = struct {
switch (opcode) {
.i32_const => try writer.print("i32.const {x}\n", .{try std.leb.readILEB128(i32, reader)}),
.i64_const => try writer.print("i64.const {x}\n", .{try std.leb.readILEB128(i64, reader)}),
.f32_const => try writer.print("f32.const {x}\n", .{@as(f32, @bitCast(try reader.readInt(u32, .Little)))}),
.f64_const => try writer.print("f64.const {x}\n", .{@as(f64, @bitCast(try reader.readInt(u64, .Little)))}),
.f32_const => try writer.print("f32.const {x}\n", .{@as(f32, @bitCast(try reader.readInt(u32, .little)))}),
.f64_const => try writer.print("f64.const {x}\n", .{@as(f64, @bitCast(try reader.readInt(u64, .little)))}),
.global_get => try writer.print("global.get {x}\n", .{try std.leb.readULEB128(u32, reader)}),
else => unreachable,
}

View file

@ -104,14 +104,14 @@ pub const Base64Encoder = struct {
var idx: usize = 0;
var out_idx: usize = 0;
while (idx + 15 < source.len) : (idx += 12) {
const bits = std.mem.readInt(u128, source[idx..][0..16], .Big);
const bits = std.mem.readInt(u128, source[idx..][0..16], .big);
inline for (0..16) |i| {
dest[out_idx + i] = encoder.alphabet_chars[@truncate((bits >> (122 - i * 6)) & 0x3f)];
}
out_idx += 16;
}
while (idx + 3 < source.len) : (idx += 3) {
const bits = std.mem.readInt(u32, source[idx..][0..4], .Big);
const bits = std.mem.readInt(u32, source[idx..][0..4], .big);
dest[out_idx] = encoder.alphabet_chars[(bits >> 26) & 0x3f];
dest[out_idx + 1] = encoder.alphabet_chars[(bits >> 20) & 0x3f];
dest[out_idx + 2] = encoder.alphabet_chars[(bits >> 14) & 0x3f];
@ -226,7 +226,7 @@ pub const Base64Decoder = struct {
if ((new_bits & invalid_char_tst) != 0) return error.InvalidCharacter;
bits |= (new_bits << (24 * i));
}
std.mem.writeInt(u128, dest[dest_idx..][0..16], bits, .Little);
std.mem.writeInt(u128, dest[dest_idx..][0..16], bits, .little);
}
while (fast_src_idx + 4 < source.len and dest_idx + 3 < dest.len) : ({
fast_src_idx += 4;
@ -237,7 +237,7 @@ pub const Base64Decoder = struct {
bits |= decoder.fast_char_to_index[2][source[fast_src_idx + 2]];
bits |= decoder.fast_char_to_index[3][source[fast_src_idx + 3]];
if ((bits & invalid_char_tst) != 0) return error.InvalidCharacter;
std.mem.writeInt(u32, dest[dest_idx..][0..4], bits, .Little);
std.mem.writeInt(u32, dest[dest_idx..][0..4], bits, .little);
}
var remaining = source[fast_src_idx..];
for (remaining, fast_src_idx..) |c, src_idx| {

View file

@ -450,8 +450,8 @@ pub const FloatMode = enum {
/// This data structure is used by the Zig language code generation and
/// therefore must be kept in sync with the compiler implementation.
pub const Endian = enum {
Big,
Little,
big,
little,
};
/// This data structure is used by the Zig language code generation and

View file

@ -1376,7 +1376,7 @@ fn writeIntFd(fd: i32, value: ErrInt) !void {
.capable_io_mode = .blocking,
.intended_io_mode = .blocking,
};
file.writer().writeInt(u64, @intCast(value), .Little) catch return error.SystemResources;
file.writer().writeInt(u64, @intCast(value), .little) catch return error.SystemResources;
}
fn readIntFd(fd: i32) !ErrInt {
@ -1385,7 +1385,7 @@ fn readIntFd(fd: i32) !ErrInt {
.capable_io_mode = .blocking,
.intended_io_mode = .blocking,
};
return @as(ErrInt, @intCast(file.reader().readInt(u64, .Little) catch return error.SystemResources));
return @as(ErrInt, @intCast(file.reader().readInt(u64, .little) catch return error.SystemResources));
}
/// Caller must free result.

View file

@ -663,7 +663,7 @@ pub const Symbol = struct {
pub fn getNameOffset(self: Symbol) ?u32 {
if (!std.mem.eql(u8, self.name[0..4], "\x00\x00\x00\x00")) return null;
const offset = std.mem.readInt(u32, self.name[4..8], .Little);
const offset = std.mem.readInt(u32, self.name[4..8], .little);
return offset;
}
};
@ -1075,7 +1075,7 @@ pub const Coff = struct {
var stream = std.io.fixedBufferStream(data);
const reader = stream.reader();
try stream.seekTo(pe_pointer_offset);
var coff_header_offset = try reader.readInt(u32, .Little);
var coff_header_offset = try reader.readInt(u32, .little);
try stream.seekTo(coff_header_offset);
var buf: [4]u8 = undefined;
try reader.readNoEof(&buf);
@ -1142,7 +1142,7 @@ pub const Coff = struct {
if (!mem.eql(u8, &cv_signature, "RSDS"))
return error.InvalidPEMagic;
try reader.readNoEof(self.guid[0..]);
self.age = try reader.readInt(u32, .Little);
self.age = try reader.readInt(u32, .little);
// Finally read the null-terminated string.
var byte = try reader.readByte();
@ -1223,7 +1223,7 @@ pub const Coff = struct {
if (coff_header.pointer_to_symbol_table == 0) return null;
const offset = coff_header.pointer_to_symbol_table + Symbol.sizeOf() * coff_header.number_of_symbols;
const size = mem.readInt(u32, self.data[offset..][0..4], .Little);
const size = mem.readInt(u32, self.data[offset..][0..4], .little);
if ((offset + size) > self.data.len) return error.InvalidStrtabSize;
return Strtab{ .buffer = self.data[offset..][0..size] };
@ -1324,9 +1324,9 @@ pub const Symtab = struct {
fn asSymbol(raw: []const u8) Symbol {
return .{
.name = raw[0..8].*,
.value = mem.readInt(u32, raw[8..12], .Little),
.section_number = @as(SectionNumber, @enumFromInt(mem.readInt(u16, raw[12..14], .Little))),
.type = @as(SymType, @bitCast(mem.readInt(u16, raw[14..16], .Little))),
.value = mem.readInt(u32, raw[8..12], .little),
.section_number = @as(SectionNumber, @enumFromInt(mem.readInt(u16, raw[12..14], .little))),
.type = @as(SymType, @bitCast(mem.readInt(u16, raw[14..16], .little))),
.storage_class = @as(StorageClass, @enumFromInt(raw[16])),
.number_of_aux_symbols = raw[17],
};
@ -1335,27 +1335,27 @@ pub const Symtab = struct {
fn asDebugInfo(raw: []const u8) DebugInfoDefinition {
return .{
.unused_1 = raw[0..4].*,
.linenumber = mem.readInt(u16, raw[4..6], .Little),
.linenumber = mem.readInt(u16, raw[4..6], .little),
.unused_2 = raw[6..12].*,
.pointer_to_next_function = mem.readInt(u32, raw[12..16], .Little),
.pointer_to_next_function = mem.readInt(u32, raw[12..16], .little),
.unused_3 = raw[16..18].*,
};
}
fn asFuncDef(raw: []const u8) FunctionDefinition {
return .{
.tag_index = mem.readInt(u32, raw[0..4], .Little),
.total_size = mem.readInt(u32, raw[4..8], .Little),
.pointer_to_linenumber = mem.readInt(u32, raw[8..12], .Little),
.pointer_to_next_function = mem.readInt(u32, raw[12..16], .Little),
.tag_index = mem.readInt(u32, raw[0..4], .little),
.total_size = mem.readInt(u32, raw[4..8], .little),
.pointer_to_linenumber = mem.readInt(u32, raw[8..12], .little),
.pointer_to_next_function = mem.readInt(u32, raw[12..16], .little),
.unused = raw[16..18].*,
};
}
fn asWeakExtDef(raw: []const u8) WeakExternalDefinition {
return .{
.tag_index = mem.readInt(u32, raw[0..4], .Little),
.flag = @as(WeakExternalFlag, @enumFromInt(mem.readInt(u32, raw[4..8], .Little))),
.tag_index = mem.readInt(u32, raw[0..4], .little),
.flag = @as(WeakExternalFlag, @enumFromInt(mem.readInt(u32, raw[4..8], .little))),
.unused = raw[8..18].*,
};
}
@ -1368,11 +1368,11 @@ pub const Symtab = struct {
fn asSectDef(raw: []const u8) SectionDefinition {
return .{
.length = mem.readInt(u32, raw[0..4], .Little),
.number_of_relocations = mem.readInt(u16, raw[4..6], .Little),
.number_of_linenumbers = mem.readInt(u16, raw[6..8], .Little),
.checksum = mem.readInt(u32, raw[8..12], .Little),
.number = mem.readInt(u16, raw[12..14], .Little),
.length = mem.readInt(u32, raw[0..4], .little),
.number_of_relocations = mem.readInt(u16, raw[4..6], .little),
.number_of_linenumbers = mem.readInt(u16, raw[6..8], .little),
.checksum = mem.readInt(u32, raw[8..12], .little),
.number = mem.readInt(u16, raw[12..14], .little),
.selection = @as(ComdatSelection, @enumFromInt(raw[14])),
.unused = raw[15..18].*,
};

View file

@ -58,7 +58,7 @@ pub fn Decompress(comptime ReaderType: type) type {
const FLG = header[3];
// Modification time, as a Unix timestamp.
// If zero there's no timestamp available.
const MTIME = mem.readInt(u32, header[4..8], .Little);
const MTIME = mem.readInt(u32, header[4..8], .little);
// Extra flags
const XFL = header[8];
// Operating system where the compression took place
@ -66,7 +66,7 @@ pub fn Decompress(comptime ReaderType: type) type {
_ = XFL;
const extra = if (FLG & FEXTRA != 0) blk: {
const len = try hashed_reader.readInt(u16, .Little);
const len = try hashed_reader.readInt(u16, .little);
const tmp_buf = try allocator.alloc(u8, len);
errdefer allocator.free(tmp_buf);
@ -88,7 +88,7 @@ pub fn Decompress(comptime ReaderType: type) type {
errdefer if (comment) |p| allocator.free(p);
if (FLG & FHCRC != 0) {
const hash = try source.readInt(u16, .Little);
const hash = try source.readInt(u16, .little);
if (hash != @as(u16, @truncate(hasher.hasher.final())))
return error.WrongChecksum;
}
@ -133,12 +133,12 @@ pub fn Decompress(comptime ReaderType: type) type {
}
// We've reached the end of stream, check if the checksum matches
const hash = try self.in_reader.readInt(u32, .Little);
const hash = try self.in_reader.readInt(u32, .little);
if (hash != self.hasher.final())
return error.WrongChecksum;
// The ISIZE field is the size of the uncompressed input modulo 2^32
const input_size = try self.in_reader.readInt(u32, .Little);
const input_size = try self.in_reader.readInt(u32, .little);
if (self.read_amt & 0xffffffff != input_size)
return error.CorruptedData;

View file

@ -58,12 +58,12 @@ pub const Params = struct {
props /= 5;
const pb = @as(u3, @intCast(props));
const dict_size_provided = try reader.readInt(u32, .Little);
const dict_size_provided = try reader.readInt(u32, .little);
const dict_size = @max(0x1000, dict_size_provided);
const unpacked_size = switch (options.unpacked_size) {
.read_from_header => blk: {
const unpacked_size_provided = try reader.readInt(u64, .Little);
const unpacked_size_provided = try reader.readInt(u64, .little);
const marker_mandatory = unpacked_size_provided == 0xFFFF_FFFF_FFFF_FFFF;
break :blk if (marker_mandatory)
null
@ -71,7 +71,7 @@ pub const Params = struct {
unpacked_size_provided;
},
.read_header_but_use_provided => |x| blk: {
_ = try reader.readInt(u64, .Little);
_ = try reader.readInt(u64, .little);
break :blk x;
},
.use_provided => |x| x,

View file

@ -12,7 +12,7 @@ pub const RangeDecoder = struct {
}
return RangeDecoder{
.range = 0xFFFF_FFFF,
.code = try reader.readInt(u32, .Big),
.code = try reader.readInt(u32, .big),
};
}

View file

@ -97,12 +97,12 @@ pub const Decoder = struct {
const unpacked_size = blk: {
var tmp: u64 = status & 0x1F;
tmp <<= 16;
tmp |= try reader.readInt(u16, .Big);
tmp |= try reader.readInt(u16, .big);
break :blk tmp + 1;
};
const packed_size = blk: {
const tmp: u17 = try reader.readInt(u16, .Big);
const tmp: u17 = try reader.readInt(u16, .big);
break :blk tmp + 1;
};
@ -155,7 +155,7 @@ pub const Decoder = struct {
accum: *LzAccumBuffer,
reset_dict: bool,
) !void {
const unpacked_size = @as(u17, try reader.readInt(u16, .Big)) + 1;
const unpacked_size = @as(u17, try reader.readInt(u16, .big)) + 1;
if (reset_dict) {
try accum.reset(writer);

View file

@ -12,7 +12,7 @@ pub const Check = enum(u4) {
};
fn readStreamFlags(reader: anytype, check: *Check) !void {
var bit_reader = std.io.bitReader(.Little, reader);
var bit_reader = std.io.bitReader(.little, reader);
const reserved1 = try bit_reader.readBitsNoEof(u8, 8);
if (reserved1 != 0)
@ -52,7 +52,7 @@ pub fn Decompress(comptime ReaderType: type) type {
break :blk hasher.hasher.final();
};
const hash_b = try source.readInt(u32, .Little);
const hash_b = try source.readInt(u32, .little);
if (hash_a != hash_b)
return error.WrongChecksum;
@ -105,20 +105,20 @@ pub fn Decompress(comptime ReaderType: type) type {
}
const hash_a = hasher.hasher.final();
const hash_b = try counting_reader.readInt(u32, .Little);
const hash_b = try counting_reader.readInt(u32, .little);
if (hash_a != hash_b)
return error.WrongChecksum;
break :blk counter.bytes_read;
};
const hash_a = try self.in_reader.readInt(u32, .Little);
const hash_a = try self.in_reader.readInt(u32, .little);
const hash_b = blk: {
var hasher = std.compress.hashedReader(self.in_reader, Crc32.init());
const hashed_reader = hasher.reader();
const backward_size = (@as(u64, try hashed_reader.readInt(u32, .Little)) + 1) * 4;
const backward_size = (@as(u64, try hashed_reader.readInt(u32, .little)) + 1) * 4;
if (backward_size != index_size)
return error.CorruptInput;

View file

@ -148,7 +148,7 @@ pub fn Decoder(comptime ReaderType: type) type {
}
const hash_a = header_hasher.hasher.final();
const hash_b = try header_reader.readInt(u32, .Little);
const hash_b = try header_reader.readInt(u32, .little);
if (hash_a != hash_b)
return error.WrongChecksum;
}
@ -182,13 +182,13 @@ pub fn Decoder(comptime ReaderType: type) type {
.none => {},
.crc32 => {
const hash_a = Crc32.hash(unpacked_bytes);
const hash_b = try self.inner_reader.readInt(u32, .Little);
const hash_b = try self.inner_reader.readInt(u32, .little);
if (hash_a != hash_b)
return error.WrongChecksum;
},
.crc64 => {
const hash_a = Crc64.hash(unpacked_bytes);
const hash_b = try self.inner_reader.readInt(u64, .Little);
const hash_b = try self.inner_reader.readInt(u64, .little);
if (hash_a != hash_b)
return error.WrongChecksum;
},

View file

@ -36,7 +36,7 @@ pub fn DecompressStream(comptime ReaderType: type) type {
fn init(allocator: mem.Allocator, source: ReaderType) !Self {
// Zlib header format is specified in RFC1950
const header_u16 = try source.readInt(u16, .Big);
const header_u16 = try source.readInt(u16, .big);
// verify the header checksum
if (header_u16 % 31 != 0)
@ -81,7 +81,7 @@ pub fn DecompressStream(comptime ReaderType: type) type {
}
// We've reached the end of stream, check if the checksum matches
const hash = try self.in_reader.readInt(u32, .Big);
const hash = try self.in_reader.readInt(u32, .big);
if (hash != self.hasher.final())
return error.WrongChecksum;
@ -132,7 +132,7 @@ pub fn CompressStream(comptime WriterType: type) type {
};
header.checksum = @as(u5, @truncate(31 - @as(u16, @bitCast(header)) % 31));
try dest.writeInt(u16, @as(u16, @bitCast(header)), .Big);
try dest.writeInt(u16, @as(u16, @bitCast(header)), .big);
const compression_level: deflate.Compression = switch (options.level) {
.no_compression => .no_compression,
@ -171,7 +171,7 @@ pub fn CompressStream(comptime WriterType: type) type {
pub fn finish(self: *Self) !void {
const hash = self.hasher.final();
try self.deflator.close();
try self.in_writer.writeInt(u32, hash, .Big);
try self.in_writer.writeInt(u32, hash, .big);
}
};
}

View file

@ -201,7 +201,7 @@ pub fn DecompressStream(
if (block_header.last_block) {
self.state = .LastBlock;
if (self.frame_context.has_checksum) {
const checksum = source_reader.readInt(u32, .Little) catch
const checksum = source_reader.readInt(u32, .little) catch
return error.MalformedFrame;
if (comptime options.verify_checksum) {
if (self.frame_context.hasher_opt) |*hasher| {

View file

@ -1029,9 +1029,9 @@ fn decodeStreams(size_format: u2, stream_data: []const u8) !LiteralsSection.Stre
if (stream_data.len < 6) return error.MalformedLiteralsSection;
const stream_1_length: usize = std.mem.readInt(u16, stream_data[0..2], .Little);
const stream_2_length: usize = std.mem.readInt(u16, stream_data[2..4], .Little);
const stream_3_length: usize = std.mem.readInt(u16, stream_data[4..6], .Little);
const stream_1_length: usize = std.mem.readInt(u16, stream_data[0..2], .little);
const stream_2_length: usize = std.mem.readInt(u16, stream_data[2..4], .little);
const stream_3_length: usize = std.mem.readInt(u16, stream_data[4..6], .little);
const stream_1_start = 6;
const stream_2_start = stream_1_start + stream_1_length;

View file

@ -28,7 +28,7 @@ pub fn isSkippableMagic(magic: u32) bool {
/// skippable frames.
/// - `error.EndOfStream` if `source` contains fewer than 4 bytes
pub fn decodeFrameType(source: anytype) error{ BadMagic, EndOfStream }!frame.Kind {
const magic = try source.readInt(u32, .Little);
const magic = try source.readInt(u32, .little);
return frameType(magic);
}
@ -62,14 +62,14 @@ pub const HeaderError = error{ BadMagic, EndOfStream, ReservedBitSet };
/// - `error.ReservedBitSet` if the frame is a Zstandard frame and any of the
/// reserved bits are set
pub fn decodeFrameHeader(source: anytype) (@TypeOf(source).Error || HeaderError)!FrameHeader {
const magic = try source.readInt(u32, .Little);
const magic = try source.readInt(u32, .little);
const frame_type = try frameType(magic);
switch (frame_type) {
.zstandard => return FrameHeader{ .zstandard = try decodeZstandardHeader(source) },
.skippable => return FrameHeader{
.skippable = .{
.magic_number = magic,
.frame_size = try source.readInt(u32, .Little),
.frame_size = try source.readInt(u32, .little),
},
},
}
@ -190,7 +190,7 @@ pub fn decodeFrame(
switch (try decodeFrameType(fbs.reader())) {
.zstandard => return decodeZstandardFrame(dest, src, verify_checksum),
.skippable => {
const content_size = try fbs.reader().readInt(u32, .Little);
const content_size = try fbs.reader().readInt(u32, .little);
if (content_size > std.math.maxInt(usize) - 8) return error.SkippableSizeTooLarge;
const read_count = @as(usize, content_size) + 8;
if (read_count > src.len) return error.SkippableSizeTooLarge;
@ -235,7 +235,7 @@ pub fn decodeFrameArrayList(
) (error{ BadMagic, OutOfMemory, SkippableSizeTooLarge } || FrameContext.Error || FrameError)!usize {
var fbs = std.io.fixedBufferStream(src);
const reader = fbs.reader();
const magic = try reader.readInt(u32, .Little);
const magic = try reader.readInt(u32, .little);
switch (try frameType(magic)) {
.zstandard => return decodeZstandardFrameArrayList(
allocator,
@ -245,7 +245,7 @@ pub fn decodeFrameArrayList(
window_size_max,
),
.skippable => {
const content_size = try fbs.reader().readInt(u32, .Little);
const content_size = try fbs.reader().readInt(u32, .little);
if (content_size > std.math.maxInt(usize) - 8) return error.SkippableSizeTooLarge;
const read_count = @as(usize, content_size) + 8;
if (read_count > src.len) return error.SkippableSizeTooLarge;
@ -299,7 +299,7 @@ pub fn decodeZstandardFrame(
WindowSizeUnknown,
DictionaryIdFlagUnsupported,
} || FrameError)!ReadWriteCount {
assert(std.mem.readInt(u32, src[0..4], .Little) == frame.Zstandard.magic_number);
assert(std.mem.readInt(u32, src[0..4], .little) == frame.Zstandard.magic_number);
var consumed_count: usize = 4;
var frame_context = context: {
@ -351,7 +351,7 @@ pub fn decodeZStandardFrameBlocks(
if (written_count != content_size) return error.BadContentSize;
if (frame_context.has_checksum) {
if (src.len < consumed_count + 4) return error.EndOfStream;
const checksum = std.mem.readInt(u32, src[consumed_count..][0..4], .Little);
const checksum = std.mem.readInt(u32, src[consumed_count..][0..4], .little);
consumed_count += 4;
if (frame_context.hasher_opt) |*hasher| {
if (checksum != computeChecksum(hasher)) return error.ChecksumFailure;
@ -442,7 +442,7 @@ pub fn decodeZstandardFrameArrayList(
verify_checksum: bool,
window_size_max: usize,
) (error{OutOfMemory} || FrameContext.Error || FrameError)!usize {
assert(std.mem.readInt(u32, src[0..4], .Little) == frame.Zstandard.magic_number);
assert(std.mem.readInt(u32, src[0..4], .little) == frame.Zstandard.magic_number);
var consumed_count: usize = 4;
var frame_context = context: {
@ -517,7 +517,7 @@ pub fn decodeZstandardFrameBlocksArrayList(
if (frame_context.has_checksum) {
if (src.len < consumed_count + 4) return error.EndOfStream;
const checksum = std.mem.readInt(u32, src[consumed_count..][0..4], .Little);
const checksum = std.mem.readInt(u32, src[consumed_count..][0..4], .little);
consumed_count += 4;
if (frame_context.hasher_opt) |*hasher| {
if (checksum != computeChecksum(hasher)) return error.ChecksumFailure;
@ -566,9 +566,9 @@ fn decodeFrameBlocksInner(
/// Decode the header of a skippable frame. The first four bytes of `src` must
/// be a valid magic number for a skippable frame.
pub fn decodeSkippableHeader(src: *const [8]u8) SkippableHeader {
const magic = std.mem.readInt(u32, src[0..4], .Little);
const magic = std.mem.readInt(u32, src[0..4], .little);
assert(isSkippableMagic(magic));
const frame_size = std.mem.readInt(u32, src[4..8], .Little);
const frame_size = std.mem.readInt(u32, src[4..8], .little);
return .{
.magic_number = magic,
.frame_size = frame_size,
@ -609,13 +609,13 @@ pub fn decodeZstandardHeader(
if (descriptor.dictionary_id_flag > 0) {
// if flag is 3 then field_size = 4, else field_size = flag
const field_size = (@as(u4, 1) << descriptor.dictionary_id_flag) >> 1;
dictionary_id = try source.readVarInt(u32, .Little, field_size);
dictionary_id = try source.readVarInt(u32, .little, field_size);
}
var content_size: ?u64 = null;
if (descriptor.single_segment_flag or descriptor.content_size_flag > 0) {
const field_size = @as(u4, 1) << descriptor.content_size_flag;
content_size = try source.readVarInt(u64, .Little, field_size);
content_size = try source.readVarInt(u64, .little, field_size);
if (field_size == 2) content_size.? += 256;
}

View file

@ -31,11 +31,11 @@ pub const ReversedByteReader = struct {
/// FSE compressed data.
pub const ReverseBitReader = struct {
byte_reader: ReversedByteReader,
bit_reader: std.io.BitReader(.Big, ReversedByteReader.Reader),
bit_reader: std.io.BitReader(.big, ReversedByteReader.Reader),
pub fn init(self: *ReverseBitReader, bytes: []const u8) error{BitStreamHasNoStartBit}!void {
self.byte_reader = ReversedByteReader.init(bytes);
self.bit_reader = std.io.bitReader(.Big, self.byte_reader.reader());
self.bit_reader = std.io.bitReader(.big, self.byte_reader.reader());
if (bytes.len == 0) return;
var i: usize = 0;
while (i < 8 and 0 == self.readBitsNoEof(u1, 1) catch unreachable) : (i += 1) {}
@ -61,7 +61,7 @@ pub const ReverseBitReader = struct {
pub fn BitReader(comptime Reader: type) type {
return struct {
underlying: std.io.BitReader(.Little, Reader),
underlying: std.io.BitReader(.little, Reader),
pub fn readBitsNoEof(self: *@This(), comptime U: type, num_bits: usize) !U {
return self.underlying.readBitsNoEof(U, num_bits);
@ -78,5 +78,5 @@ pub fn BitReader(comptime Reader: type) type {
}
pub fn bitReader(reader: anytype) BitReader(@TypeOf(reader)) {
return .{ .underlying = std.io.bitReader(.Little, reader) };
return .{ .underlying = std.io.bitReader(.little, reader) };
}

View file

@ -71,11 +71,11 @@ pub const Fe = struct {
/// Unpack a field element
pub fn fromBytes(s: [32]u8) Fe {
var fe: Fe = undefined;
fe.limbs[0] = std.mem.readInt(u64, s[0..8], .Little) & MASK51;
fe.limbs[1] = (std.mem.readInt(u64, s[6..14], .Little) >> 3) & MASK51;
fe.limbs[2] = (std.mem.readInt(u64, s[12..20], .Little) >> 6) & MASK51;
fe.limbs[3] = (std.mem.readInt(u64, s[19..27], .Little) >> 1) & MASK51;
fe.limbs[4] = (std.mem.readInt(u64, s[24..32], .Little) >> 12) & MASK51;
fe.limbs[0] = std.mem.readInt(u64, s[0..8], .little) & MASK51;
fe.limbs[1] = (std.mem.readInt(u64, s[6..14], .little) >> 3) & MASK51;
fe.limbs[2] = (std.mem.readInt(u64, s[12..20], .little) >> 6) & MASK51;
fe.limbs[3] = (std.mem.readInt(u64, s[19..27], .little) >> 1) & MASK51;
fe.limbs[4] = (std.mem.readInt(u64, s[24..32], .little) >> 12) & MASK51;
return fe;
}
@ -85,10 +85,10 @@ pub const Fe = struct {
var reduced = fe;
reduced.reduce();
var s: [32]u8 = undefined;
std.mem.writeInt(u64, s[0..8], reduced.limbs[0] | (reduced.limbs[1] << 51), .Little);
std.mem.writeInt(u64, s[8..16], (reduced.limbs[1] >> 13) | (reduced.limbs[2] << 38), .Little);
std.mem.writeInt(u64, s[16..24], (reduced.limbs[2] >> 26) | (reduced.limbs[3] << 25), .Little);
std.mem.writeInt(u64, s[24..32], (reduced.limbs[3] >> 39) | (reduced.limbs[4] << 12), .Little);
std.mem.writeInt(u64, s[0..8], reduced.limbs[0] | (reduced.limbs[1] << 51), .little);
std.mem.writeInt(u64, s[8..16], (reduced.limbs[1] >> 13) | (reduced.limbs[2] << 38), .little);
std.mem.writeInt(u64, s[16..24], (reduced.limbs[2] >> 26) | (reduced.limbs[3] << 25), .little);
std.mem.writeInt(u64, s[24..32], (reduced.limbs[3] >> 39) | (reduced.limbs[4] << 12), .little);
return s;
}

View file

@ -15,7 +15,7 @@ pub const zero = [_]u8{0} ** 32;
const field_order_s = s: {
var s: [32]u8 = undefined;
mem.writeInt(u256, &s, field_order, .Little);
mem.writeInt(u256, &s, field_order, .little);
break :s s;
};
@ -127,9 +127,9 @@ pub const Scalar = struct {
var bytes: CompressedScalar = undefined;
var i: usize = 0;
while (i < 4) : (i += 1) {
mem.writeInt(u64, bytes[i * 7 ..][0..8], expanded.limbs[i], .Little);
mem.writeInt(u64, bytes[i * 7 ..][0..8], expanded.limbs[i], .little);
}
mem.writeInt(u32, bytes[i * 7 ..][0..4], @intCast(expanded.limbs[i]), .Little);
mem.writeInt(u32, bytes[i * 7 ..][0..4], @intCast(expanded.limbs[i]), .little);
return bytes;
}
@ -580,7 +580,7 @@ const ScalarDouble = struct {
var limbs: Limbs = undefined;
var i: usize = 0;
while (i < 9) : (i += 1) {
limbs[i] = mem.readInt(u64, bytes[i * 7 ..][0..8], .Little) & 0xffffffffffffff;
limbs[i] = mem.readInt(u64, bytes[i * 7 ..][0..8], .little) & 0xffffffffffffff;
}
limbs[i] = @as(u64, bytes[i * 7]);
return ScalarDouble{ .limbs = limbs };
@ -590,9 +590,9 @@ const ScalarDouble = struct {
var limbs: Limbs = undefined;
var i: usize = 0;
while (i < 4) : (i += 1) {
limbs[i] = mem.readInt(u64, bytes[i * 7 ..][0..8], .Little) & 0xffffffffffffff;
limbs[i] = mem.readInt(u64, bytes[i * 7 ..][0..8], .little) & 0xffffffffffffff;
}
limbs[i] = @as(u64, mem.readInt(u32, bytes[i * 7 ..][0..4], .Little));
limbs[i] = @as(u64, mem.readInt(u32, bytes[i * 7 ..][0..4], .little));
@memset(limbs[5..], 0);
return ScalarDouble{ .limbs = limbs };
}

View file

@ -1075,7 +1075,7 @@ pub const rsa = struct {
// Reject modulus below 512 bits.
// 512-bit RSA was factored in 1999, so this limit barely means anything,
// but establish some limit now to ratchet in what we can.
const _n = Modulus.fromBytes(modulus_bytes, .Big) catch return error.CertificatePublicKeyInvalid;
const _n = Modulus.fromBytes(modulus_bytes, .big) catch return error.CertificatePublicKeyInvalid;
if (_n.bits() < 512) return error.CertificatePublicKeyInvalid;
// Exponent must be odd and greater than 2.
@ -1085,7 +1085,7 @@ pub const rsa = struct {
// Windows commonly does.
// [1] https://learn.microsoft.com/en-us/windows/win32/api/wincrypt/ns-wincrypt-rsapubkey
if (pub_bytes.len > 4) return error.CertificatePublicKeyInvalid;
const _e = Fe.fromBytes(_n, pub_bytes, .Big) catch return error.CertificatePublicKeyInvalid;
const _e = Fe.fromBytes(_n, pub_bytes, .big) catch return error.CertificatePublicKeyInvalid;
if (!_e.isOdd()) return error.CertificatePublicKeyInvalid;
const e_v = _e.toPrimitive(u32) catch return error.CertificatePublicKeyInvalid;
if (e_v < 2) return error.CertificatePublicKeyInvalid;
@ -1116,10 +1116,10 @@ pub const rsa = struct {
};
fn encrypt(comptime modulus_len: usize, msg: [modulus_len]u8, public_key: PublicKey) ![modulus_len]u8 {
const m = Fe.fromBytes(public_key.n, &msg, .Big) catch return error.MessageTooLong;
const m = Fe.fromBytes(public_key.n, &msg, .big) catch return error.MessageTooLong;
const e = public_key.n.powPublic(m, public_key.e) catch unreachable;
var res: [modulus_len]u8 = undefined;
e.toBytes(&res, .Big) catch unreachable;
e.toBytes(&res, .big) catch unreachable;
return res;
}
};

View file

@ -32,7 +32,7 @@ pub fn rescanMac(cb: *Bundle, gpa: Allocator) RescanMacError!void {
var table_idx: u32 = 0;
while (table_idx < table_list.len) : (table_idx += 1) {
table_list[table_idx] = try reader.readInt(u32, .Big);
table_list[table_idx] = try reader.readInt(u32, .big);
}
const now_sec = std.time.timestamp();
@ -51,7 +51,7 @@ pub fn rescanMac(cb: *Bundle, gpa: Allocator) RescanMacError!void {
var record_idx: u32 = 0;
while (record_idx < record_list.len) : (record_idx += 1) {
record_list[record_idx] = try reader.readInt(u32, .Big);
record_list[record_idx] = try reader.readInt(u32, .big);
}
for (record_list) |record_offset| {

View file

@ -106,8 +106,8 @@ const State128L = struct {
fn mac(state: *State128L, comptime tag_bits: u9, adlen: usize, mlen: usize) [tag_bits / 8]u8 {
const blocks = &state.blocks;
var sizes: [16]u8 = undefined;
mem.writeInt(u64, sizes[0..8], adlen * 8, .Little);
mem.writeInt(u64, sizes[8..16], mlen * 8, .Little);
mem.writeInt(u64, sizes[0..8], adlen * 8, .little);
mem.writeInt(u64, sizes[8..16], mlen * 8, .little);
const tmp = AesBlock.fromBytes(&sizes).xorBlocks(blocks[2]);
var i: usize = 0;
while (i < 7) : (i += 1) {
@ -284,8 +284,8 @@ const State256 = struct {
fn mac(state: *State256, comptime tag_bits: u9, adlen: usize, mlen: usize) [tag_bits / 8]u8 {
const blocks = &state.blocks;
var sizes: [16]u8 = undefined;
mem.writeInt(u64, sizes[0..8], adlen * 8, .Little);
mem.writeInt(u64, sizes[8..16], mlen * 8, .Little);
mem.writeInt(u64, sizes[0..8], adlen * 8, .little);
mem.writeInt(u64, sizes[8..16], mlen * 8, .little);
const tmp = AesBlock.fromBytes(&sizes).xorBlocks(blocks[3]);
var i: usize = 0;
while (i < 7) : (i += 1) {

View file

@ -50,7 +50,7 @@ test "ctr" {
var out: [exp_out.len]u8 = undefined;
var ctx = Aes128.initEnc(key);
ctr(AesEncryptCtx(Aes128), ctx, out[0..], in[0..], iv, std.builtin.Endian.Big);
ctr(AesEncryptCtx(Aes128), ctx, out[0..], in[0..], iv, std.builtin.Endian.big);
try testing.expectEqualSlices(u8, exp_out[0..], out[0..]);
}

View file

@ -15,20 +15,20 @@ pub const Block = struct {
/// Convert a byte sequence into an internal representation.
pub inline fn fromBytes(bytes: *const [16]u8) Block {
const s0 = mem.readInt(u32, bytes[0..4], .Little);
const s1 = mem.readInt(u32, bytes[4..8], .Little);
const s2 = mem.readInt(u32, bytes[8..12], .Little);
const s3 = mem.readInt(u32, bytes[12..16], .Little);
const s0 = mem.readInt(u32, bytes[0..4], .little);
const s1 = mem.readInt(u32, bytes[4..8], .little);
const s2 = mem.readInt(u32, bytes[8..12], .little);
const s3 = mem.readInt(u32, bytes[12..16], .little);
return Block{ .repr = BlockVec{ s0, s1, s2, s3 } };
}
/// Convert the internal representation of a block into a byte sequence.
pub inline fn toBytes(block: Block) [16]u8 {
var bytes: [16]u8 = undefined;
mem.writeInt(u32, bytes[0..4], block.repr[0], .Little);
mem.writeInt(u32, bytes[4..8], block.repr[1], .Little);
mem.writeInt(u32, bytes[8..12], block.repr[2], .Little);
mem.writeInt(u32, bytes[12..16], block.repr[3], .Little);
mem.writeInt(u32, bytes[0..4], block.repr[0], .little);
mem.writeInt(u32, bytes[4..8], block.repr[1], .little);
mem.writeInt(u32, bytes[8..12], block.repr[2], .little);
mem.writeInt(u32, bytes[12..16], block.repr[3], .little);
return bytes;
}
@ -123,13 +123,13 @@ pub const Block = struct {
// Last round uses s-box directly and XORs to produce output.
var x: [4]u8 = undefined;
x = sbox_lookup(&sbox_encrypt, @as(u8, @truncate(s0)), @as(u8, @truncate(s1 >> 8)), @as(u8, @truncate(s2 >> 16)), @as(u8, @truncate(s3 >> 24)));
var t0 = mem.readInt(u32, &x, .Little);
var t0 = mem.readInt(u32, &x, .little);
x = sbox_lookup(&sbox_encrypt, @as(u8, @truncate(s1)), @as(u8, @truncate(s2 >> 8)), @as(u8, @truncate(s3 >> 16)), @as(u8, @truncate(s0 >> 24)));
var t1 = mem.readInt(u32, &x, .Little);
var t1 = mem.readInt(u32, &x, .little);
x = sbox_lookup(&sbox_encrypt, @as(u8, @truncate(s2)), @as(u8, @truncate(s3 >> 8)), @as(u8, @truncate(s0 >> 16)), @as(u8, @truncate(s1 >> 24)));
var t2 = mem.readInt(u32, &x, .Little);
var t2 = mem.readInt(u32, &x, .little);
x = sbox_lookup(&sbox_encrypt, @as(u8, @truncate(s3)), @as(u8, @truncate(s0 >> 8)), @as(u8, @truncate(s1 >> 16)), @as(u8, @truncate(s2 >> 24)));
var t3 = mem.readInt(u32, &x, .Little);
var t3 = mem.readInt(u32, &x, .little);
t0 ^= round_key.repr[0];
t1 ^= round_key.repr[1];
@ -219,13 +219,13 @@ pub const Block = struct {
// Last round uses s-box directly and XORs to produce output.
var x: [4]u8 = undefined;
x = sbox_lookup(&sbox_decrypt, @as(u8, @truncate(s0)), @as(u8, @truncate(s3 >> 8)), @as(u8, @truncate(s2 >> 16)), @as(u8, @truncate(s1 >> 24)));
var t0 = mem.readInt(u32, &x, .Little);
var t0 = mem.readInt(u32, &x, .little);
x = sbox_lookup(&sbox_decrypt, @as(u8, @truncate(s1)), @as(u8, @truncate(s0 >> 8)), @as(u8, @truncate(s3 >> 16)), @as(u8, @truncate(s2 >> 24)));
var t1 = mem.readInt(u32, &x, .Little);
var t1 = mem.readInt(u32, &x, .little);
x = sbox_lookup(&sbox_decrypt, @as(u8, @truncate(s2)), @as(u8, @truncate(s1 >> 8)), @as(u8, @truncate(s0 >> 16)), @as(u8, @truncate(s3 >> 24)));
var t2 = mem.readInt(u32, &x, .Little);
var t2 = mem.readInt(u32, &x, .little);
x = sbox_lookup(&sbox_decrypt, @as(u8, @truncate(s3)), @as(u8, @truncate(s2 >> 8)), @as(u8, @truncate(s1 >> 16)), @as(u8, @truncate(s0 >> 24)));
var t3 = mem.readInt(u32, &x, .Little);
var t3 = mem.readInt(u32, &x, .little);
t0 ^= round_key.repr[0];
t1 ^= round_key.repr[1];
@ -349,14 +349,14 @@ fn KeySchedule(comptime Aes: type) type {
// Apply sbox_encrypt to each byte in w.
fn func(w: u32) u32 {
const x = sbox_lookup(&sbox_key_schedule, @as(u8, @truncate(w)), @as(u8, @truncate(w >> 8)), @as(u8, @truncate(w >> 16)), @as(u8, @truncate(w >> 24)));
return mem.readInt(u32, &x, .Little);
return mem.readInt(u32, &x, .little);
}
}.func;
var round_keys: [rounds + 1]Block = undefined;
comptime var i: usize = 0;
inline while (i < words_in_key) : (i += 1) {
round_keys[i / 4].repr[i % 4] = mem.readInt(u32, key[4 * i ..][0..4], .Big);
round_keys[i / 4].repr[i % 4] = mem.readInt(u32, key[4 * i ..][0..4], .big);
}
inline while (i < round_keys.len * 4) : (i += 1) {
var t = round_keys[(i - 1) / 4].repr[(i - 1) % 4];

View file

@ -33,7 +33,7 @@ fn AesGcm(comptime Aes: anytype) type {
var t: [16]u8 = undefined;
var j: [16]u8 = undefined;
j[0..nonce_length].* = npub;
mem.writeInt(u32, j[nonce_length..][0..4], 1, .Big);
mem.writeInt(u32, j[nonce_length..][0..4], 1, .big);
aes.encrypt(&t, &j);
const block_count = (math.divCeil(usize, ad.len, Ghash.block_length) catch unreachable) + (math.divCeil(usize, c.len, Ghash.block_length) catch unreachable) + 1;
@ -41,14 +41,14 @@ fn AesGcm(comptime Aes: anytype) type {
mac.update(ad);
mac.pad();
mem.writeInt(u32, j[nonce_length..][0..4], 2, .Big);
modes.ctr(@TypeOf(aes), aes, c, m, j, std.builtin.Endian.Big);
mem.writeInt(u32, j[nonce_length..][0..4], 2, .big);
modes.ctr(@TypeOf(aes), aes, c, m, j, std.builtin.Endian.big);
mac.update(c[0..m.len][0..]);
mac.pad();
var final_block = h;
mem.writeInt(u64, final_block[0..8], ad.len * 8, .Big);
mem.writeInt(u64, final_block[8..16], m.len * 8, .Big);
mem.writeInt(u64, final_block[0..8], ad.len * 8, .big);
mem.writeInt(u64, final_block[8..16], m.len * 8, .big);
mac.update(&final_block);
mac.final(tag);
for (t, 0..) |x, i| {
@ -75,7 +75,7 @@ fn AesGcm(comptime Aes: anytype) type {
var t: [16]u8 = undefined;
var j: [16]u8 = undefined;
j[0..nonce_length].* = npub;
mem.writeInt(u32, j[nonce_length..][0..4], 1, .Big);
mem.writeInt(u32, j[nonce_length..][0..4], 1, .big);
aes.encrypt(&t, &j);
const block_count = (math.divCeil(usize, ad.len, Ghash.block_length) catch unreachable) + (math.divCeil(usize, c.len, Ghash.block_length) catch unreachable) + 1;
@ -87,8 +87,8 @@ fn AesGcm(comptime Aes: anytype) type {
mac.pad();
var final_block = h;
mem.writeInt(u64, final_block[0..8], ad.len * 8, .Big);
mem.writeInt(u64, final_block[8..16], m.len * 8, .Big);
mem.writeInt(u64, final_block[0..8], ad.len * 8, .big);
mem.writeInt(u64, final_block[8..16], m.len * 8, .big);
mac.update(&final_block);
var computed_tag: [Ghash.mac_length]u8 = undefined;
mac.final(&computed_tag);
@ -103,8 +103,8 @@ fn AesGcm(comptime Aes: anytype) type {
return error.AuthenticationFailed;
}
mem.writeInt(u32, j[nonce_length..][0..4], 2, .Big);
modes.ctr(@TypeOf(aes), aes, m, c, j, std.builtin.Endian.Big);
mem.writeInt(u32, j[nonce_length..][0..4], 2, .big);
modes.ctr(@TypeOf(aes), aes, m, c, j, std.builtin.Endian.big);
}
};
}

View file

@ -29,10 +29,10 @@ fn AesOcb(comptime Aes: anytype) type {
upto: usize,
inline fn double(l: Block) Block {
const l_ = mem.readInt(u128, &l, .Big);
const l_ = mem.readInt(u128, &l, .big);
const l_2 = (l_ << 1) ^ (0x87 & -%(l_ >> 127));
var l2: Block = undefined;
mem.writeInt(u128, &l2, l_2, .Big);
mem.writeInt(u128, &l2, l_2, .big);
return l2;
}
@ -94,10 +94,10 @@ fn AesOcb(comptime Aes: anytype) type {
nx[15] &= 0xc0;
var ktop_: Block = undefined;
aes_enc_ctx.encrypt(&ktop_, &nx);
const ktop = mem.readInt(u128, &ktop_, .Big);
const ktop = mem.readInt(u128, &ktop_, .big);
var stretch = (@as(u192, ktop) << 64) | @as(u192, @as(u64, @truncate(ktop >> 64)) ^ @as(u64, @truncate(ktop >> 56)));
var offset: Block = undefined;
mem.writeInt(u128, &offset, @as(u128, @truncate(stretch >> (64 - @as(u7, bottom)))), .Big);
mem.writeInt(u128, &offset, @as(u128, @truncate(stretch >> (64 - @as(u7, bottom)))), .big);
return offset;
}

View file

@ -110,27 +110,27 @@ fn initHash(
var parameters: [24]u8 = undefined;
var tmp: [4]u8 = undefined;
var b2 = Blake2b512.init(.{});
mem.writeInt(u32, parameters[0..4], params.p, .Little);
mem.writeInt(u32, parameters[4..8], @as(u32, @intCast(dk_len)), .Little);
mem.writeInt(u32, parameters[8..12], params.m, .Little);
mem.writeInt(u32, parameters[12..16], params.t, .Little);
mem.writeInt(u32, parameters[16..20], version, .Little);
mem.writeInt(u32, parameters[20..24], @intFromEnum(mode), .Little);
mem.writeInt(u32, parameters[0..4], params.p, .little);
mem.writeInt(u32, parameters[4..8], @as(u32, @intCast(dk_len)), .little);
mem.writeInt(u32, parameters[8..12], params.m, .little);
mem.writeInt(u32, parameters[12..16], params.t, .little);
mem.writeInt(u32, parameters[16..20], version, .little);
mem.writeInt(u32, parameters[20..24], @intFromEnum(mode), .little);
b2.update(&parameters);
mem.writeInt(u32, &tmp, @as(u32, @intCast(password.len)), .Little);
mem.writeInt(u32, &tmp, @as(u32, @intCast(password.len)), .little);
b2.update(&tmp);
b2.update(password);
mem.writeInt(u32, &tmp, @as(u32, @intCast(salt.len)), .Little);
mem.writeInt(u32, &tmp, @as(u32, @intCast(salt.len)), .little);
b2.update(&tmp);
b2.update(salt);
const secret = params.secret orelse "";
std.debug.assert(secret.len <= max_int);
mem.writeInt(u32, &tmp, @as(u32, @intCast(secret.len)), .Little);
mem.writeInt(u32, &tmp, @as(u32, @intCast(secret.len)), .little);
b2.update(&tmp);
b2.update(secret);
const ad = params.ad orelse "";
std.debug.assert(ad.len <= max_int);
mem.writeInt(u32, &tmp, @as(u32, @intCast(ad.len)), .Little);
mem.writeInt(u32, &tmp, @as(u32, @intCast(ad.len)), .little);
b2.update(&tmp);
b2.update(ad);
b2.final(h0[0..Blake2b512.digest_length]);
@ -140,7 +140,7 @@ fn initHash(
fn blake2bLong(out: []u8, in: []const u8) void {
const H = Blake2b512;
var outlen_bytes: [4]u8 = undefined;
mem.writeInt(u32, &outlen_bytes, @as(u32, @intCast(out.len)), .Little);
mem.writeInt(u32, &outlen_bytes, @as(u32, @intCast(out.len)), .little);
var out_buf: [H.digest_length]u8 = undefined;
@ -183,18 +183,18 @@ fn initBlocks(
var lane: u24 = 0;
while (lane < threads) : (lane += 1) {
const j = lane * (memory / threads);
mem.writeInt(u32, h0[Blake2b512.digest_length + 4 ..][0..4], lane, .Little);
mem.writeInt(u32, h0[Blake2b512.digest_length + 4 ..][0..4], lane, .little);
mem.writeInt(u32, h0[Blake2b512.digest_length..][0..4], 0, .Little);
mem.writeInt(u32, h0[Blake2b512.digest_length..][0..4], 0, .little);
blake2bLong(&block0, h0);
for (&blocks.items[j + 0], 0..) |*v, i| {
v.* = mem.readInt(u64, block0[i * 8 ..][0..8], .Little);
v.* = mem.readInt(u64, block0[i * 8 ..][0..8], .little);
}
mem.writeInt(u32, h0[Blake2b512.digest_length..][0..4], 1, .Little);
mem.writeInt(u32, h0[Blake2b512.digest_length..][0..4], 1, .little);
blake2bLong(&block0, h0);
for (&blocks.items[j + 1], 0..) |*v, i| {
v.* = mem.readInt(u64, block0[i * 8 ..][0..8], .Little);
v.* = mem.readInt(u64, block0[i * 8 ..][0..8], .little);
}
}
}
@ -433,7 +433,7 @@ fn finalize(
}
var block: [1024]u8 = undefined;
for (blocks.items[memory - 1], 0..) |v, i| {
mem.writeInt(u64, block[i * 8 ..][0..8], v, .Little);
mem.writeInt(u64, block[i * 8 ..][0..8], v, .little);
}
blake2bLong(out, &block);
}

View file

@ -96,8 +96,8 @@ pub fn State(comptime endian: std.builtin.Endian) type {
/// XOR a byte into the state at a given offset.
pub fn addByte(self: *Self, byte: u8, offset: usize) void {
const z = switch (endian) {
.Big => 64 - 8 - 8 * @as(u6, @truncate(offset % 8)),
.Little => 8 * @as(u6, @truncate(offset % 8)),
.big => 64 - 8 - 8 * @as(u6, @truncate(offset % 8)),
.little => 8 * @as(u6, @truncate(offset % 8)),
};
self.st[offset / 8] ^= @as(u64, byte) << z;
}
@ -215,7 +215,7 @@ pub fn State(comptime endian: std.builtin.Endian) type {
}
test "ascon" {
const Ascon = State(.Big);
const Ascon = State(.big);
const bytes = [_]u8{0x01} ** Ascon.block_bytes;
var st = Ascon.init(bytes);
var out: [Ascon.block_bytes]u8 = undefined;

View file

@ -451,7 +451,7 @@ pub fn bcrypt(
var ct: [ct_length]u8 = undefined;
for (cdata, 0..) |c, i| {
mem.writeInt(u32, ct[i * 4 ..][0..4], c, .Big);
mem.writeInt(u32, ct[i * 4 ..][0..4], c, .big);
}
return ct[0..dk_length].*;
}
@ -547,7 +547,7 @@ const pbkdf_prf = struct {
// copy out
var out: [32]u8 = undefined;
for (cdata, 0..) |v, i| {
std.mem.writeInt(u32, out[4 * i ..][0..4], v, .Little);
std.mem.writeInt(u32, out[4 * i ..][0..4], v, .little);
}
// zap

View file

@ -85,12 +85,12 @@ pub fn Blake2s(comptime out_bits: usize) type {
d.buf_len = 0;
if (options.salt) |salt| {
d.h[4] ^= mem.readInt(u32, salt[0..4], .Little);
d.h[5] ^= mem.readInt(u32, salt[4..8], .Little);
d.h[4] ^= mem.readInt(u32, salt[0..4], .little);
d.h[5] ^= mem.readInt(u32, salt[4..8], .little);
}
if (options.context) |context| {
d.h[6] ^= mem.readInt(u32, context[0..4], .Little);
d.h[7] ^= mem.readInt(u32, context[4..8], .Little);
d.h[6] ^= mem.readInt(u32, context[0..4], .little);
d.h[7] ^= mem.readInt(u32, context[4..8], .little);
}
if (key_len > 0) {
@memset(d.buf[key_len..], 0);
@ -143,7 +143,7 @@ pub fn Blake2s(comptime out_bits: usize) type {
var v: [16]u32 = undefined;
for (&m, 0..) |*r, i| {
r.* = mem.readInt(u32, b[4 * i ..][0..4], .Little);
r.* = mem.readInt(u32, b[4 * i ..][0..4], .little);
}
var k: usize = 0;
@ -521,12 +521,12 @@ pub fn Blake2b(comptime out_bits: usize) type {
d.buf_len = 0;
if (options.salt) |salt| {
d.h[4] ^= mem.readInt(u64, salt[0..8], .Little);
d.h[5] ^= mem.readInt(u64, salt[8..16], .Little);
d.h[4] ^= mem.readInt(u64, salt[0..8], .little);
d.h[5] ^= mem.readInt(u64, salt[8..16], .little);
}
if (options.context) |context| {
d.h[6] ^= mem.readInt(u64, context[0..8], .Little);
d.h[7] ^= mem.readInt(u64, context[8..16], .Little);
d.h[6] ^= mem.readInt(u64, context[0..8], .little);
d.h[7] ^= mem.readInt(u64, context[8..16], .little);
}
if (key_len > 0) {
@memset(d.buf[key_len..], 0);
@ -579,7 +579,7 @@ pub fn Blake2b(comptime out_bits: usize) type {
var v: [16]u64 = undefined;
for (&m, 0..) |*r, i| {
r.* = mem.readInt(u64, b[8 * i ..][0..8], .Little);
r.* = mem.readInt(u64, b[8 * i ..][0..8], .little);
}
var k: usize = 0;

View file

@ -212,7 +212,7 @@ fn first8Words(words: [16]u32) [8]u32 {
fn wordsFromLittleEndianBytes(comptime count: usize, bytes: [count * 4]u8) [count]u32 {
var words: [count]u32 = undefined;
for (&words, 0..) |*word, i| {
word.* = mem.readInt(u32, bytes[4 * i ..][0..4], .Little);
word.* = mem.readInt(u32, bytes[4 * i ..][0..4], .little);
}
return words;
}
@ -252,7 +252,7 @@ const Output = struct {
var word_counter: usize = 0;
while (out_word_it.next()) |out_word| {
var word_bytes: [4]u8 = undefined;
mem.writeInt(u32, &word_bytes, words[word_counter], .Little);
mem.writeInt(u32, &word_bytes, words[word_counter], .little);
@memcpy(out_word, word_bytes[0..out_word.len]);
word_counter += 1;
}

View file

@ -87,10 +87,10 @@ fn ChaChaVecImpl(comptime rounds_nb: usize, comptime degree: comptime_int) type
switch (degree) {
1 => {
const constant_le = Lane{
mem.readInt(u32, c[0..4], .Little),
mem.readInt(u32, c[4..8], .Little),
mem.readInt(u32, c[8..12], .Little),
mem.readInt(u32, c[12..16], .Little),
mem.readInt(u32, c[0..4], .little),
mem.readInt(u32, c[4..8], .little),
mem.readInt(u32, c[8..12], .little),
mem.readInt(u32, c[12..16], .little),
};
return BlockVec{
constant_le,
@ -101,14 +101,14 @@ fn ChaChaVecImpl(comptime rounds_nb: usize, comptime degree: comptime_int) type
},
2 => {
const constant_le = Lane{
mem.readInt(u32, c[0..4], .Little),
mem.readInt(u32, c[4..8], .Little),
mem.readInt(u32, c[8..12], .Little),
mem.readInt(u32, c[12..16], .Little),
mem.readInt(u32, c[0..4], .Little),
mem.readInt(u32, c[4..8], .Little),
mem.readInt(u32, c[8..12], .Little),
mem.readInt(u32, c[12..16], .Little),
mem.readInt(u32, c[0..4], .little),
mem.readInt(u32, c[4..8], .little),
mem.readInt(u32, c[8..12], .little),
mem.readInt(u32, c[12..16], .little),
mem.readInt(u32, c[0..4], .little),
mem.readInt(u32, c[4..8], .little),
mem.readInt(u32, c[8..12], .little),
mem.readInt(u32, c[12..16], .little),
};
const n1 = @addWithOverflow(d[0], 1);
return BlockVec{
@ -123,22 +123,22 @@ fn ChaChaVecImpl(comptime rounds_nb: usize, comptime degree: comptime_int) type
const n2 = @addWithOverflow(d[0], 2);
const n3 = @addWithOverflow(d[0], 3);
const constant_le = Lane{
mem.readInt(u32, c[0..4], .Little),
mem.readInt(u32, c[4..8], .Little),
mem.readInt(u32, c[8..12], .Little),
mem.readInt(u32, c[12..16], .Little),
mem.readInt(u32, c[0..4], .Little),
mem.readInt(u32, c[4..8], .Little),
mem.readInt(u32, c[8..12], .Little),
mem.readInt(u32, c[12..16], .Little),
mem.readInt(u32, c[0..4], .Little),
mem.readInt(u32, c[4..8], .Little),
mem.readInt(u32, c[8..12], .Little),
mem.readInt(u32, c[12..16], .Little),
mem.readInt(u32, c[0..4], .Little),
mem.readInt(u32, c[4..8], .Little),
mem.readInt(u32, c[8..12], .Little),
mem.readInt(u32, c[12..16], .Little),
mem.readInt(u32, c[0..4], .little),
mem.readInt(u32, c[4..8], .little),
mem.readInt(u32, c[8..12], .little),
mem.readInt(u32, c[12..16], .little),
mem.readInt(u32, c[0..4], .little),
mem.readInt(u32, c[4..8], .little),
mem.readInt(u32, c[8..12], .little),
mem.readInt(u32, c[12..16], .little),
mem.readInt(u32, c[0..4], .little),
mem.readInt(u32, c[4..8], .little),
mem.readInt(u32, c[8..12], .little),
mem.readInt(u32, c[12..16], .little),
mem.readInt(u32, c[0..4], .little),
mem.readInt(u32, c[4..8], .little),
mem.readInt(u32, c[8..12], .little),
mem.readInt(u32, c[12..16], .little),
};
return BlockVec{
constant_le,
@ -218,10 +218,10 @@ fn ChaChaVecImpl(comptime rounds_nb: usize, comptime degree: comptime_int) type
inline fn hashToBytes(comptime dm: usize, out: *[64 * dm]u8, x: BlockVec) void {
for (0..dm) |d| {
for (0..4) |i| {
mem.writeInt(u32, out[64 * d + 16 * i + 0 ..][0..4], x[i][0 + 4 * d], .Little);
mem.writeInt(u32, out[64 * d + 16 * i + 4 ..][0..4], x[i][1 + 4 * d], .Little);
mem.writeInt(u32, out[64 * d + 16 * i + 8 ..][0..4], x[i][2 + 4 * d], .Little);
mem.writeInt(u32, out[64 * d + 16 * i + 12 ..][0..4], x[i][3 + 4 * d], .Little);
mem.writeInt(u32, out[64 * d + 16 * i + 0 ..][0..4], x[i][0 + 4 * d], .little);
mem.writeInt(u32, out[64 * d + 16 * i + 4 ..][0..4], x[i][1 + 4 * d], .little);
mem.writeInt(u32, out[64 * d + 16 * i + 8 ..][0..4], x[i][2 + 4 * d], .little);
mem.writeInt(u32, out[64 * d + 16 * i + 12 ..][0..4], x[i][3 + 4 * d], .little);
}
}
}
@ -309,20 +309,20 @@ fn ChaChaVecImpl(comptime rounds_nb: usize, comptime degree: comptime_int) type
fn hchacha20(input: [16]u8, key: [32]u8) [32]u8 {
var c: [4]u32 = undefined;
for (c, 0..) |_, i| {
c[i] = mem.readInt(u32, input[4 * i ..][0..4], .Little);
c[i] = mem.readInt(u32, input[4 * i ..][0..4], .little);
}
const ctx = initContext(keyToWords(key), c);
var x: BlockVec = undefined;
chacha20Core(x[0..], ctx);
var out: [32]u8 = undefined;
mem.writeInt(u32, out[0..4], x[0][0], .Little);
mem.writeInt(u32, out[4..8], x[0][1], .Little);
mem.writeInt(u32, out[8..12], x[0][2], .Little);
mem.writeInt(u32, out[12..16], x[0][3], .Little);
mem.writeInt(u32, out[16..20], x[3][0], .Little);
mem.writeInt(u32, out[20..24], x[3][1], .Little);
mem.writeInt(u32, out[24..28], x[3][2], .Little);
mem.writeInt(u32, out[28..32], x[3][3], .Little);
mem.writeInt(u32, out[0..4], x[0][0], .little);
mem.writeInt(u32, out[4..8], x[0][1], .little);
mem.writeInt(u32, out[8..12], x[0][2], .little);
mem.writeInt(u32, out[12..16], x[0][3], .little);
mem.writeInt(u32, out[16..20], x[3][0], .little);
mem.writeInt(u32, out[20..24], x[3][1], .little);
mem.writeInt(u32, out[24..28], x[3][2], .little);
mem.writeInt(u32, out[28..32], x[3][3], .little);
return out;
}
};
@ -336,10 +336,10 @@ fn ChaChaNonVecImpl(comptime rounds_nb: usize) type {
fn initContext(key: [8]u32, d: [4]u32) BlockVec {
const c = "expand 32-byte k";
const constant_le = comptime [4]u32{
mem.readInt(u32, c[0..4], .Little),
mem.readInt(u32, c[4..8], .Little),
mem.readInt(u32, c[8..12], .Little),
mem.readInt(u32, c[12..16], .Little),
mem.readInt(u32, c[0..4], .little),
mem.readInt(u32, c[4..8], .little),
mem.readInt(u32, c[8..12], .little),
mem.readInt(u32, c[12..16], .little),
};
return BlockVec{
constant_le[0], constant_le[1], constant_le[2], constant_le[3],
@ -396,10 +396,10 @@ fn ChaChaNonVecImpl(comptime rounds_nb: usize) type {
inline fn hashToBytes(out: *[64]u8, x: BlockVec) void {
for (0..4) |i| {
mem.writeInt(u32, out[16 * i + 0 ..][0..4], x[i * 4 + 0], .Little);
mem.writeInt(u32, out[16 * i + 4 ..][0..4], x[i * 4 + 1], .Little);
mem.writeInt(u32, out[16 * i + 8 ..][0..4], x[i * 4 + 2], .Little);
mem.writeInt(u32, out[16 * i + 12 ..][0..4], x[i * 4 + 3], .Little);
mem.writeInt(u32, out[16 * i + 0 ..][0..4], x[i * 4 + 0], .little);
mem.writeInt(u32, out[16 * i + 4 ..][0..4], x[i * 4 + 1], .little);
mem.writeInt(u32, out[16 * i + 8 ..][0..4], x[i * 4 + 2], .little);
mem.writeInt(u32, out[16 * i + 12 ..][0..4], x[i * 4 + 3], .little);
}
}
@ -477,20 +477,20 @@ fn ChaChaNonVecImpl(comptime rounds_nb: usize) type {
fn hchacha20(input: [16]u8, key: [32]u8) [32]u8 {
var c: [4]u32 = undefined;
for (c, 0..) |_, i| {
c[i] = mem.readInt(u32, input[4 * i ..][0..4], .Little);
c[i] = mem.readInt(u32, input[4 * i ..][0..4], .little);
}
const ctx = initContext(keyToWords(key), c);
var x: BlockVec = undefined;
chacha20Core(x[0..], ctx);
var out: [32]u8 = undefined;
mem.writeInt(u32, out[0..4], x[0], .Little);
mem.writeInt(u32, out[4..8], x[1], .Little);
mem.writeInt(u32, out[8..12], x[2], .Little);
mem.writeInt(u32, out[12..16], x[3], .Little);
mem.writeInt(u32, out[16..20], x[12], .Little);
mem.writeInt(u32, out[20..24], x[13], .Little);
mem.writeInt(u32, out[24..28], x[14], .Little);
mem.writeInt(u32, out[28..32], x[15], .Little);
mem.writeInt(u32, out[0..4], x[0], .little);
mem.writeInt(u32, out[4..8], x[1], .little);
mem.writeInt(u32, out[8..12], x[2], .little);
mem.writeInt(u32, out[12..16], x[3], .little);
mem.writeInt(u32, out[16..20], x[12], .little);
mem.writeInt(u32, out[20..24], x[13], .little);
mem.writeInt(u32, out[24..28], x[14], .little);
mem.writeInt(u32, out[28..32], x[15], .little);
return out;
}
};
@ -519,7 +519,7 @@ fn ChaChaImpl(comptime rounds_nb: usize) type {
fn keyToWords(key: [32]u8) [8]u32 {
var k: [8]u32 = undefined;
for (0..8) |i| {
k[i] = mem.readInt(u32, key[i * 4 ..][0..4], .Little);
k[i] = mem.readInt(u32, key[i * 4 ..][0..4], .little);
}
return k;
}
@ -552,9 +552,9 @@ fn ChaChaIETF(comptime rounds_nb: usize) type {
var d: [4]u32 = undefined;
d[0] = counter;
d[1] = mem.readInt(u32, nonce[0..4], .Little);
d[2] = mem.readInt(u32, nonce[4..8], .Little);
d[3] = mem.readInt(u32, nonce[8..12], .Little);
d[1] = mem.readInt(u32, nonce[0..4], .little);
d[2] = mem.readInt(u32, nonce[4..8], .little);
d[3] = mem.readInt(u32, nonce[8..12], .little);
ChaChaImpl(rounds_nb).chacha20Xor(out, in, keyToWords(key), d, false);
}
@ -564,9 +564,9 @@ fn ChaChaIETF(comptime rounds_nb: usize) type {
var d: [4]u32 = undefined;
d[0] = counter;
d[1] = mem.readInt(u32, nonce[0..4], .Little);
d[2] = mem.readInt(u32, nonce[4..8], .Little);
d[3] = mem.readInt(u32, nonce[8..12], .Little);
d[1] = mem.readInt(u32, nonce[0..4], .little);
d[2] = mem.readInt(u32, nonce[4..8], .little);
d[3] = mem.readInt(u32, nonce[8..12], .little);
ChaChaImpl(rounds_nb).chacha20Stream(out, keyToWords(key), d, false);
}
};
@ -592,8 +592,8 @@ fn ChaChaWith64BitNonce(comptime rounds_nb: usize) type {
var c: [4]u32 = undefined;
c[0] = @as(u32, @truncate(counter));
c[1] = @as(u32, @truncate(counter >> 32));
c[2] = mem.readInt(u32, nonce[0..4], .Little);
c[3] = mem.readInt(u32, nonce[4..8], .Little);
c[2] = mem.readInt(u32, nonce[0..4], .little);
c[3] = mem.readInt(u32, nonce[4..8], .little);
ChaChaImpl(rounds_nb).chacha20Xor(out, in, k, c, true);
}
@ -605,8 +605,8 @@ fn ChaChaWith64BitNonce(comptime rounds_nb: usize) type {
var c: [4]u32 = undefined;
c[0] = @as(u32, @truncate(counter));
c[1] = @as(u32, @truncate(counter >> 32));
c[2] = mem.readInt(u32, nonce[0..4], .Little);
c[3] = mem.readInt(u32, nonce[4..8], .Little);
c[2] = mem.readInt(u32, nonce[0..4], .little);
c[3] = mem.readInt(u32, nonce[4..8], .little);
ChaChaImpl(rounds_nb).chacha20Stream(out, k, c, true);
}
};
@ -672,8 +672,8 @@ fn ChaChaPoly1305(comptime rounds_nb: usize) type {
mac.update(zeros[0..padding]);
}
var lens: [16]u8 = undefined;
mem.writeInt(u64, lens[0..8], ad.len, .Little);
mem.writeInt(u64, lens[8..16], m.len, .Little);
mem.writeInt(u64, lens[0..8], ad.len, .little);
mem.writeInt(u64, lens[8..16], m.len, .little);
mac.update(lens[0..]);
mac.final(tag);
}
@ -708,8 +708,8 @@ fn ChaChaPoly1305(comptime rounds_nb: usize) type {
mac.update(zeros[0..padding]);
}
var lens: [16]u8 = undefined;
mem.writeInt(u64, lens[0..8], ad.len, .Little);
mem.writeInt(u64, lens[8..16], c.len, .Little);
mem.writeInt(u64, lens[0..8], ad.len, .little);
mem.writeInt(u64, lens[8..16], c.len, .little);
mac.update(lens[0..]);
var computed_tag: [16]u8 = undefined;
mac.final(computed_tag[0..]);

View file

@ -76,7 +76,7 @@ pub fn Cmac(comptime BlockCipher: type) type {
fn double(l: Block) Block {
const Int = std.meta.Int(.unsigned, block_length * 8);
const l_ = mem.readInt(Int, &l, .Big);
const l_ = mem.readInt(Int, &l, .big);
const l_2 = switch (block_length) {
8 => (l_ << 1) ^ (0x1b & -%(l_ >> 63)), // mod x^64 + x^4 + x^3 + x + 1
16 => (l_ << 1) ^ (0x87 & -%(l_ >> 127)), // mod x^128 + x^7 + x^2 + x + 1
@ -85,7 +85,7 @@ pub fn Cmac(comptime BlockCipher: type) type {
else => @compileError("unsupported block length"),
};
var l2: Block = undefined;
mem.writeInt(Int, &l2, l_2, .Big);
mem.writeInt(Int, &l2, l_2, .big);
return l2;
}
};

View file

@ -209,17 +209,17 @@ pub fn Ecdsa(comptime Curve: type, comptime Hash: type) type {
const k = deterministicScalar(h_slice.*, self.secret_key.bytes, self.noise);
const p = try Curve.basePoint.mul(k.toBytes(.Big), .Big);
const xs = p.affineCoordinates().x.toBytes(.Big);
const p = try Curve.basePoint.mul(k.toBytes(.big), .big);
const xs = p.affineCoordinates().x.toBytes(.big);
const r = reduceToScalar(Curve.Fe.encoded_length, xs);
if (r.isZero()) return error.IdentityElement;
const k_inv = k.invert();
const zrs = z.add(r.mul(try Curve.scalar.Scalar.fromBytes(self.secret_key.bytes, .Big)));
const zrs = z.add(r.mul(try Curve.scalar.Scalar.fromBytes(self.secret_key.bytes, .big)));
const s = k_inv.mul(zrs);
if (s.isZero()) return error.IdentityElement;
return Signature{ .r = r.toBytes(.Big), .s = s.toBytes(.Big) };
return Signature{ .r = r.toBytes(.big), .s = s.toBytes(.big) };
}
};
@ -232,8 +232,8 @@ pub fn Ecdsa(comptime Curve: type, comptime Hash: type) type {
public_key: PublicKey,
fn init(sig: Signature, public_key: PublicKey) (IdentityElementError || NonCanonicalError)!Verifier {
const r = try Curve.scalar.Scalar.fromBytes(sig.r, .Big);
const s = try Curve.scalar.Scalar.fromBytes(sig.s, .Big);
const r = try Curve.scalar.Scalar.fromBytes(sig.r, .big);
const s = try Curve.scalar.Scalar.fromBytes(sig.s, .big);
if (r.isZero() or s.isZero()) return error.IdentityElement;
return Verifier{
@ -262,11 +262,11 @@ pub fn Ecdsa(comptime Curve: type, comptime Hash: type) type {
}
const s_inv = self.s.invert();
const v1 = z.mul(s_inv).toBytes(.Little);
const v2 = self.r.mul(s_inv).toBytes(.Little);
const v1g = try Curve.basePoint.mulPublic(v1, .Little);
const v2pk = try self.public_key.p.mulPublic(v2, .Little);
const vxs = v1g.add(v2pk).affineCoordinates().x.toBytes(.Big);
const v1 = z.mul(s_inv).toBytes(.little);
const v2 = self.r.mul(s_inv).toBytes(.little);
const v1g = try Curve.basePoint.mulPublic(v1, .little);
const v2pk = try self.public_key.p.mulPublic(v2, .little);
const vxs = v1g.add(v2pk).affineCoordinates().x.toBytes(.big);
const vr = reduceToScalar(Curve.Fe.encoded_length, vxs);
if (!self.r.equivalent(vr)) {
return error.SignatureVerificationFailed;
@ -295,13 +295,13 @@ pub fn Ecdsa(comptime Curve: type, comptime Hash: type) type {
}
const h = [_]u8{0x00} ** Hash.digest_length;
const k0 = [_]u8{0x01} ** SecretKey.encoded_length;
const secret_key = deterministicScalar(h, k0, seed_).toBytes(.Big);
const secret_key = deterministicScalar(h, k0, seed_).toBytes(.big);
return fromSecretKey(SecretKey{ .bytes = secret_key });
}
/// Return the public key corresponding to the secret key.
pub fn fromSecretKey(secret_key: SecretKey) IdentityElementError!KeyPair {
const public_key = try Curve.basePoint.mul(secret_key.bytes, .Big);
const public_key = try Curve.basePoint.mul(secret_key.bytes, .big);
return KeyPair{ .secret_key = secret_key, .public_key = PublicKey{ .p = public_key } };
}
@ -326,11 +326,11 @@ pub fn Ecdsa(comptime Curve: type, comptime Hash: type) type {
if (unreduced_len >= 48) {
var xs = [_]u8{0} ** 64;
@memcpy(xs[xs.len - s.len ..], s[0..]);
return Curve.scalar.Scalar.fromBytes64(xs, .Big);
return Curve.scalar.Scalar.fromBytes64(xs, .big);
}
var xs = [_]u8{0} ** 48;
@memcpy(xs[xs.len - s.len ..], s[0..]);
return Curve.scalar.Scalar.fromBytes48(xs, .Big);
return Curve.scalar.Scalar.fromBytes48(xs, .big);
}
// Create a deterministic scalar according to a secret key and optional noise.
@ -362,7 +362,7 @@ pub fn Ecdsa(comptime Curve: type, comptime Hash: type) type {
Hmac.create(m_v, m_v, &k);
@memcpy(t[t_off..t_end], m_v[0 .. t_end - t_off]);
}
if (Curve.scalar.Scalar.fromBytes(t, .Big)) |s| return s else |_| {}
if (Curve.scalar.Scalar.fromBytes(t, .big)) |s| return s else |_| {}
m_i.* = 0x00;
Hmac.create(&k, m[0 .. m_v.len + 1], &k);
Hmac.create(m_v, m_v, &k);

View file

@ -137,8 +137,8 @@ pub fn Uint(comptime max_bits: comptime_int) type {
@memset(bytes, 0);
var shift: usize = 0;
var out_i: usize = switch (endian) {
.Big => bytes.len - 1,
.Little => 0,
.big => bytes.len - 1,
.little => 0,
};
for (0..self.limbs.len) |i| {
var remaining_bits = t_bits;
@ -150,7 +150,7 @@ pub fn Uint(comptime max_bits: comptime_int) type {
remaining_bits -= consumed;
shift = 0;
switch (endian) {
.Big => {
.big => {
if (out_i == 0) {
if (i != self.limbs.len - 1 or limb != 0) {
return error.Overflow;
@ -159,7 +159,7 @@ pub fn Uint(comptime max_bits: comptime_int) type {
}
out_i -= 1;
},
.Little => {
.little => {
out_i += 1;
if (out_i == bytes.len) {
if (i != self.limbs.len - 1 or limb != 0) {
@ -182,8 +182,8 @@ pub fn Uint(comptime max_bits: comptime_int) type {
var out = Self.zero;
var out_i: usize = 0;
var i: usize = switch (endian) {
.Big => bytes.len - 1,
.Little => 0,
.big => bytes.len - 1,
.little => 0,
};
while (true) {
const bi = bytes[i];
@ -203,11 +203,11 @@ pub fn Uint(comptime max_bits: comptime_int) type {
out.limbs.set(out_i, overflow);
}
switch (endian) {
.Big => {
.big => {
if (i == 0) break;
i -= 1;
},
.Little => {
.little => {
i += 1;
if (i == bytes.len) break;
},
@ -227,7 +227,7 @@ pub fn Uint(comptime max_bits: comptime_int) type {
Limb,
x.limbs.constSlice(),
y.limbs.constSlice(),
.Little,
.little,
);
}
@ -667,15 +667,15 @@ pub fn Modulus(comptime max_bits: comptime_int) type {
var out = self.one();
self.toMontgomery(&out) catch unreachable;
if (public and e.len < 3 or (e.len == 3 and e[if (endian == .Big) 0 else 2] <= 0b1111)) {
if (public and e.len < 3 or (e.len == 3 and e[if (endian == .big) 0 else 2] <= 0b1111)) {
// Do not use a precomputation table for short, public exponents
var x_m = x;
if (x.montgomery == false) {
self.toMontgomery(&x_m) catch unreachable;
}
var s = switch (endian) {
.Big => 0,
.Little => e.len - 1,
.big => 0,
.little => e.len - 1,
};
while (true) {
const b = e[s];
@ -690,11 +690,11 @@ pub fn Modulus(comptime max_bits: comptime_int) type {
if (j == 0) break;
}
switch (endian) {
.Big => {
.big => {
s += 1;
if (s == e.len) break;
},
.Little => {
.little => {
if (s == 0) break;
s -= 1;
},
@ -711,8 +711,8 @@ pub fn Modulus(comptime max_bits: comptime_int) type {
}
var t0 = self.zero;
var s = switch (endian) {
.Big => 0,
.Little => e.len - 1,
.big => 0,
.little => e.len - 1,
};
while (true) {
const b = e[s];
@ -737,11 +737,11 @@ pub fn Modulus(comptime max_bits: comptime_int) type {
}
}
switch (endian) {
.Big => {
.big => {
s += 1;
if (s == e.len) break;
},
.Little => {
.little => {
if (s == 0) break;
s -= 1;
},
@ -791,10 +791,10 @@ pub fn Modulus(comptime max_bits: comptime_int) type {
var e_normalized = Fe{ .v = e.v.normalize() };
var buf_: [Fe.encoded_bytes]u8 = undefined;
var buf = buf_[0 .. math.divCeil(usize, e_normalized.v.limbs_count() * t_bits, 8) catch unreachable];
e_normalized.toBytes(buf, .Little) catch unreachable;
e_normalized.toBytes(buf, .little) catch unreachable;
const leading = @clz(e_normalized.v.limbs.get(e_normalized.v.limbs_count() - carry_bits));
buf = buf[0 .. buf.len - leading / 8];
return self.powWithEncodedPublicExponent(x, buf, .Little);
return self.powWithEncodedPublicExponent(x, buf, .little);
}
/// Returns x^e (mod m), with the exponent provided as a byte string.

View file

@ -13,7 +13,7 @@ const Precomp = u128;
/// It is not a general purpose hash function - The key must be secret, unpredictable and never reused.
///
/// GHASH is typically used to compute the authentication tag in the AES-GCM construction.
pub const Ghash = Hash(.Big, true);
pub const Ghash = Hash(.big, true);
/// POLYVAL is a universal hash function that uses multiplication by a fixed
/// parameter within a Galois field.
@ -21,7 +21,7 @@ pub const Ghash = Hash(.Big, true);
/// It is not a general purpose hash function - The key must be secret, unpredictable and never reused.
///
/// POLYVAL is typically used to compute the authentication tag in the AES-GCM-SIV construction.
pub const Polyval = Hash(.Little, false);
pub const Polyval = Hash(.little, false);
fn Hash(comptime endian: std.builtin.Endian, comptime shift_key: bool) type {
return struct {

View file

@ -4,7 +4,7 @@ const debug = std.debug;
const mem = std.mem;
const math = std.math;
const testing = std.testing;
const Ascon = crypto.core.Ascon(.Big);
const Ascon = crypto.core.Ascon(.big);
const AuthenticationError = crypto.errors.AuthenticationError;
/// ISAPv2 is an authenticated encryption system hardened against side channels and fault attacks.
@ -55,9 +55,9 @@ pub const IsapA128A = struct {
fn trickle(k: [16]u8, iv: [8]u8, y: []const u8, comptime out_len: usize) [out_len]u8 {
var isap = IsapA128A{
.st = Ascon.initFromWords(.{
mem.readInt(u64, k[0..8], .Big),
mem.readInt(u64, k[8..16], .Big),
mem.readInt(u64, iv[0..8], .Big),
mem.readInt(u64, k[0..8], .big),
mem.readInt(u64, k[8..16], .big),
mem.readInt(u64, iv[0..8], .big),
0,
0,
}),
@ -85,9 +85,9 @@ pub const IsapA128A = struct {
fn mac(c: []const u8, ad: []const u8, npub: [16]u8, key: [16]u8) [16]u8 {
var isap = IsapA128A{
.st = Ascon.initFromWords(.{
mem.readInt(u64, npub[0..8], .Big),
mem.readInt(u64, npub[8..16], .Big),
mem.readInt(u64, iv1[0..], .Big),
mem.readInt(u64, npub[0..8], .big),
mem.readInt(u64, npub[8..16], .big),
mem.readInt(u64, iv1[0..], .big),
0,
0,
}),
@ -116,11 +116,11 @@ pub const IsapA128A = struct {
const nb = trickle(key, iv3, npub[0..], 24);
var isap = IsapA128A{
.st = Ascon.initFromWords(.{
mem.readInt(u64, nb[0..8], .Big),
mem.readInt(u64, nb[8..16], .Big),
mem.readInt(u64, nb[16..24], .Big),
mem.readInt(u64, npub[0..8], .Big),
mem.readInt(u64, npub[8..16], .Big),
mem.readInt(u64, nb[0..8], .big),
mem.readInt(u64, nb[8..16], .big),
mem.readInt(u64, nb[16..24], .big),
mem.readInt(u64, npub[0..8], .big),
mem.readInt(u64, npub[8..16], .big),
}),
};
isap.st.permuteR(6);

View file

@ -45,7 +45,7 @@ pub fn KeccakF(comptime f: u11) type {
pub fn init(bytes: [block_bytes]u8) Self {
var self: Self = undefined;
inline for (&self.st, 0..) |*r, i| {
r.* = mem.readInt(T, bytes[@sizeOf(T) * i ..][0..@sizeOf(T)], .Little);
r.* = mem.readInt(T, bytes[@sizeOf(T) * i ..][0..@sizeOf(T)], .little);
}
return self;
}
@ -66,12 +66,12 @@ pub fn KeccakF(comptime f: u11) type {
pub fn setBytes(self: *Self, bytes: []const u8) void {
var i: usize = 0;
while (i + @sizeOf(T) <= bytes.len) : (i += @sizeOf(T)) {
self.st[i / @sizeOf(T)] = mem.readInt(T, bytes[i..][0..@sizeOf(T)], .Little);
self.st[i / @sizeOf(T)] = mem.readInt(T, bytes[i..][0..@sizeOf(T)], .little);
}
if (i < bytes.len) {
var padded = [_]u8{0} ** @sizeOf(T);
@memcpy(padded[0 .. bytes.len - i], bytes[i..]);
self.st[i / @sizeOf(T)] = mem.readInt(T, padded[0..], .Little);
self.st[i / @sizeOf(T)] = mem.readInt(T, padded[0..], .little);
}
}
@ -85,12 +85,12 @@ pub fn KeccakF(comptime f: u11) type {
pub fn addBytes(self: *Self, bytes: []const u8) void {
var i: usize = 0;
while (i + @sizeOf(T) <= bytes.len) : (i += @sizeOf(T)) {
self.st[i / @sizeOf(T)] ^= mem.readInt(T, bytes[i..][0..@sizeOf(T)], .Little);
self.st[i / @sizeOf(T)] ^= mem.readInt(T, bytes[i..][0..@sizeOf(T)], .little);
}
if (i < bytes.len) {
var padded = [_]u8{0} ** @sizeOf(T);
@memcpy(padded[0 .. bytes.len - i], bytes[i..]);
self.st[i / @sizeOf(T)] ^= mem.readInt(T, padded[0..], .Little);
self.st[i / @sizeOf(T)] ^= mem.readInt(T, padded[0..], .little);
}
}
@ -98,11 +98,11 @@ pub fn KeccakF(comptime f: u11) type {
pub fn extractBytes(self: *Self, out: []u8) void {
var i: usize = 0;
while (i + @sizeOf(T) <= out.len) : (i += @sizeOf(T)) {
mem.writeInt(T, out[i..][0..@sizeOf(T)], self.st[i / @sizeOf(T)], .Little);
mem.writeInt(T, out[i..][0..@sizeOf(T)], self.st[i / @sizeOf(T)], .little);
}
if (i < out.len) {
var padded = [_]u8{0} ** @sizeOf(T);
mem.writeInt(T, padded[0..], self.st[i / @sizeOf(T)], .Little);
mem.writeInt(T, padded[0..], self.st[i / @sizeOf(T)], .little);
@memcpy(out[i..], padded[0 .. out.len - i]);
}
}

View file

@ -112,7 +112,7 @@ pub const Md5 = struct {
d.round(d.buf[0..]);
for (d.s, 0..) |s, j| {
mem.writeInt(u32, out[4 * j ..][0..4], s, .Little);
mem.writeInt(u32, out[4 * j ..][0..4], s, .little);
}
}
@ -121,7 +121,7 @@ pub const Md5 = struct {
var i: usize = 0;
while (i < 16) : (i += 1) {
s[i] = mem.readInt(u32, b[i * 4 ..][0..4], .Little);
s[i] = mem.readInt(u32, b[i * 4 ..][0..4], .little);
}
var v: [4]u32 = [_]u32{

View file

@ -51,13 +51,13 @@ pub fn Field(comptime params: FieldParams) type {
/// Reject non-canonical encodings of an element.
pub fn rejectNonCanonical(s_: [encoded_length]u8, endian: std.builtin.Endian) NonCanonicalError!void {
var s = if (endian == .Little) s_ else orderSwap(s_);
var s = if (endian == .little) s_ else orderSwap(s_);
const field_order_s = comptime fos: {
var fos: [encoded_length]u8 = undefined;
mem.writeInt(std.meta.Int(.unsigned, encoded_length * 8), &fos, field_order, .Little);
mem.writeInt(std.meta.Int(.unsigned, encoded_length * 8), &fos, field_order, .little);
break :fos fos;
};
if (crypto.utils.timingSafeCompare(u8, &s, &field_order_s, .Little) != .lt) {
if (crypto.utils.timingSafeCompare(u8, &s, &field_order_s, .little) != .lt) {
return error.NonCanonical;
}
}
@ -71,8 +71,8 @@ pub fn Field(comptime params: FieldParams) type {
/// Unpack a field element.
pub fn fromBytes(s_: [encoded_length]u8, endian: std.builtin.Endian) NonCanonicalError!Fe {
var s = if (endian == .Little) s_ else orderSwap(s_);
try rejectNonCanonical(s, .Little);
var s = if (endian == .little) s_ else orderSwap(s_);
try rejectNonCanonical(s, .little);
var limbs_z: NonMontgomeryDomainFieldElement = undefined;
fiat.fromBytes(&limbs_z, s);
var limbs: MontgomeryDomainFieldElement = undefined;
@ -86,7 +86,7 @@ pub fn Field(comptime params: FieldParams) type {
fiat.fromMontgomery(&limbs_z, fe.limbs);
var s: [encoded_length]u8 = undefined;
fiat.toBytes(&s, limbs_z);
return if (endian == .Little) s else orderSwap(s);
return if (endian == .little) s else orderSwap(s);
}
/// Element as an integer.
@ -95,14 +95,14 @@ pub fn Field(comptime params: FieldParams) type {
/// Create a field element from an integer.
pub fn fromInt(comptime x: IntRepr) NonCanonicalError!Fe {
var s: [encoded_length]u8 = undefined;
mem.writeInt(IntRepr, &s, x, .Little);
return fromBytes(s, .Little);
mem.writeInt(IntRepr, &s, x, .little);
return fromBytes(s, .little);
}
/// Return the field element as an integer.
pub fn toInt(fe: Fe) IntRepr {
const s = fe.toBytes(.Little);
return mem.readInt(IntRepr, &s, .Little);
const s = fe.toBytes(.little);
return mem.readInt(IntRepr, &s, .little);
}
/// Return true if the field element is zero.
@ -119,7 +119,7 @@ pub fn Field(comptime params: FieldParams) type {
/// Return true if the element is odd.
pub fn isOdd(fe: Fe) bool {
const s = fe.toBytes(.Little);
const s = fe.toBytes(.little);
return @as(u1, @truncate(s[0])) != 0;
}

View file

@ -87,15 +87,15 @@ pub const P256 = struct {
},
2, 3 => {
if (encoded.len != 32) return error.InvalidEncoding;
const x = try Fe.fromBytes(encoded[0..32].*, .Big);
const x = try Fe.fromBytes(encoded[0..32].*, .big);
const y_is_odd = (encoding_type == 3);
const y = try recoverY(x, y_is_odd);
return P256{ .x = x, .y = y };
},
4 => {
if (encoded.len != 64) return error.InvalidEncoding;
const x = try Fe.fromBytes(encoded[0..32].*, .Big);
const y = try Fe.fromBytes(encoded[32..64].*, .Big);
const x = try Fe.fromBytes(encoded[0..32].*, .big);
const y = try Fe.fromBytes(encoded[32..64].*, .big);
return P256.fromAffineCoordinates(.{ .x = x, .y = y });
},
else => return error.InvalidEncoding,
@ -107,7 +107,7 @@ pub const P256 = struct {
var out: [33]u8 = undefined;
const xy = p.affineCoordinates();
out[0] = if (xy.y.isOdd()) 3 else 2;
out[1..].* = xy.x.toBytes(.Big);
out[1..].* = xy.x.toBytes(.big);
return out;
}
@ -116,15 +116,15 @@ pub const P256 = struct {
var out: [65]u8 = undefined;
out[0] = 4;
const xy = p.affineCoordinates();
out[1..33].* = xy.x.toBytes(.Big);
out[33..65].* = xy.y.toBytes(.Big);
out[1..33].* = xy.x.toBytes(.big);
out[33..65].* = xy.y.toBytes(.big);
return out;
}
/// Return a random point.
pub fn random() P256 {
const n = scalar.random(.Little);
return basePoint.mul(n, .Little) catch unreachable;
const n = scalar.random(.little);
return basePoint.mul(n, .little) catch unreachable;
}
/// Flip the sign of the X coordinate.
@ -400,7 +400,7 @@ pub const P256 = struct {
/// Multiply an elliptic curve point by a scalar.
/// Return error.IdentityElement if the result is the identity element.
pub fn mul(p: P256, s_: [32]u8, endian: std.builtin.Endian) IdentityElementError!P256 {
const s = if (endian == .Little) s_ else Fe.orderSwap(s_);
const s = if (endian == .little) s_ else Fe.orderSwap(s_);
if (p.is_base) {
return pcMul16(&basePointPc, s, false);
}
@ -412,7 +412,7 @@ pub const P256 = struct {
/// Multiply an elliptic curve point by a *PUBLIC* scalar *IN VARIABLE TIME*
/// This can be used for signature verification.
pub fn mulPublic(p: P256, s_: [32]u8, endian: std.builtin.Endian) IdentityElementError!P256 {
const s = if (endian == .Little) s_ else Fe.orderSwap(s_);
const s = if (endian == .little) s_ else Fe.orderSwap(s_);
if (p.is_base) {
return pcMul16(&basePointPc, s, true);
}
@ -424,8 +424,8 @@ pub const P256 = struct {
/// Double-base multiplication of public parameters - Compute (p1*s1)+(p2*s2) *IN VARIABLE TIME*
/// This can be used for signature verification.
pub fn mulDoubleBasePublic(p1: P256, s1_: [32]u8, p2: P256, s2_: [32]u8, endian: std.builtin.Endian) IdentityElementError!P256 {
const s1 = if (endian == .Little) s1_ else Fe.orderSwap(s1_);
const s2 = if (endian == .Little) s2_ else Fe.orderSwap(s2_);
const s1 = if (endian == .little) s1_ else Fe.orderSwap(s1_);
const s2 = if (endian == .little) s2_ else Fe.orderSwap(s2_);
try p1.rejectIdentity();
var pc1_array: [9]P256 = undefined;
const pc1 = if (p1.is_base) basePointPc[0..9] else pc: {

View file

@ -174,7 +174,7 @@ pub const Scalar = struct {
var s: [48]u8 = undefined;
while (true) {
crypto.random.bytes(&s);
const n = Scalar.fromBytes48(s, .Little);
const n = Scalar.fromBytes48(s, .little);
if (!n.isZero()) {
return n;
}
@ -191,7 +191,7 @@ const ScalarDouble = struct {
debug.assert(bits > 0 and bits <= 512 and bits >= Fe.saturated_bits and bits <= Fe.saturated_bits * 3);
var s = s_;
if (endian == .Big) {
if (endian == .big) {
for (s_, 0..) |x, i| s[s.len - 1 - i] = x;
}
var t = ScalarDouble{ .x1 = undefined, .x2 = Fe.zero, .x3 = Fe.zero };
@ -199,19 +199,19 @@ const ScalarDouble = struct {
var b = [_]u8{0} ** encoded_length;
const len = @min(s.len, 24);
b[0..len].* = s[0..len].*;
t.x1 = Fe.fromBytes(b, .Little) catch unreachable;
t.x1 = Fe.fromBytes(b, .little) catch unreachable;
}
if (s_.len >= 24) {
var b = [_]u8{0} ** encoded_length;
const len = @min(s.len - 24, 24);
b[0..len].* = s[24..][0..len].*;
t.x2 = Fe.fromBytes(b, .Little) catch unreachable;
t.x2 = Fe.fromBytes(b, .little) catch unreachable;
}
if (s_.len >= 48) {
var b = [_]u8{0} ** encoded_length;
const len = s.len - 48;
b[0..len].* = s[48..][0..len].*;
t.x3 = Fe.fromBytes(b, .Little) catch unreachable;
t.x3 = Fe.fromBytes(b, .little) catch unreachable;
}
return t;
}

View file

@ -87,15 +87,15 @@ pub const P384 = struct {
},
2, 3 => {
if (encoded.len != 48) return error.InvalidEncoding;
const x = try Fe.fromBytes(encoded[0..48].*, .Big);
const x = try Fe.fromBytes(encoded[0..48].*, .big);
const y_is_odd = (encoding_type == 3);
const y = try recoverY(x, y_is_odd);
return P384{ .x = x, .y = y };
},
4 => {
if (encoded.len != 96) return error.InvalidEncoding;
const x = try Fe.fromBytes(encoded[0..48].*, .Big);
const y = try Fe.fromBytes(encoded[48..96].*, .Big);
const x = try Fe.fromBytes(encoded[0..48].*, .big);
const y = try Fe.fromBytes(encoded[48..96].*, .big);
return P384.fromAffineCoordinates(.{ .x = x, .y = y });
},
else => return error.InvalidEncoding,
@ -107,7 +107,7 @@ pub const P384 = struct {
var out: [49]u8 = undefined;
const xy = p.affineCoordinates();
out[0] = if (xy.y.isOdd()) 3 else 2;
out[1..].* = xy.x.toBytes(.Big);
out[1..].* = xy.x.toBytes(.big);
return out;
}
@ -116,15 +116,15 @@ pub const P384 = struct {
var out: [97]u8 = undefined;
out[0] = 4;
const xy = p.affineCoordinates();
out[1..49].* = xy.x.toBytes(.Big);
out[49..97].* = xy.y.toBytes(.Big);
out[1..49].* = xy.x.toBytes(.big);
out[49..97].* = xy.y.toBytes(.big);
return out;
}
/// Return a random point.
pub fn random() P384 {
const n = scalar.random(.Little);
return basePoint.mul(n, .Little) catch unreachable;
const n = scalar.random(.little);
return basePoint.mul(n, .little) catch unreachable;
}
/// Flip the sign of the X coordinate.
@ -400,7 +400,7 @@ pub const P384 = struct {
/// Multiply an elliptic curve point by a scalar.
/// Return error.IdentityElement if the result is the identity element.
pub fn mul(p: P384, s_: [48]u8, endian: std.builtin.Endian) IdentityElementError!P384 {
const s = if (endian == .Little) s_ else Fe.orderSwap(s_);
const s = if (endian == .little) s_ else Fe.orderSwap(s_);
if (p.is_base) {
return pcMul16(&basePointPc, s, false);
}
@ -412,7 +412,7 @@ pub const P384 = struct {
/// Multiply an elliptic curve point by a *PUBLIC* scalar *IN VARIABLE TIME*
/// This can be used for signature verification.
pub fn mulPublic(p: P384, s_: [48]u8, endian: std.builtin.Endian) IdentityElementError!P384 {
const s = if (endian == .Little) s_ else Fe.orderSwap(s_);
const s = if (endian == .little) s_ else Fe.orderSwap(s_);
if (p.is_base) {
return pcMul16(&basePointPc, s, true);
}
@ -424,8 +424,8 @@ pub const P384 = struct {
/// Double-base multiplication of public parameters - Compute (p1*s1)+(p2*s2) *IN VARIABLE TIME*
/// This can be used for signature verification.
pub fn mulDoubleBasePublic(p1: P384, s1_: [48]u8, p2: P384, s2_: [48]u8, endian: std.builtin.Endian) IdentityElementError!P384 {
const s1 = if (endian == .Little) s1_ else Fe.orderSwap(s1_);
const s2 = if (endian == .Little) s2_ else Fe.orderSwap(s2_);
const s1 = if (endian == .little) s1_ else Fe.orderSwap(s1_);
const s2 = if (endian == .little) s2_ else Fe.orderSwap(s2_);
try p1.rejectIdentity();
var pc1_array: [9]P384 = undefined;
const pc1 = if (p1.is_base) basePointPc[0..9] else pc: {

View file

@ -163,7 +163,7 @@ pub const Scalar = struct {
var s: [64]u8 = undefined;
while (true) {
crypto.random.bytes(&s);
const n = Scalar.fromBytes64(s, .Little);
const n = Scalar.fromBytes64(s, .little);
if (!n.isZero()) {
return n;
}
@ -179,7 +179,7 @@ const ScalarDouble = struct {
debug.assert(bits > 0 and bits <= 512 and bits >= Fe.saturated_bits and bits <= Fe.saturated_bits * 2);
var s = s_;
if (endian == .Big) {
if (endian == .big) {
for (s_, 0..) |x, i| s[s.len - 1 - i] = x;
}
var t = ScalarDouble{ .x1 = undefined, .x2 = Fe.zero };
@ -187,13 +187,13 @@ const ScalarDouble = struct {
var b = [_]u8{0} ** encoded_length;
const len = @min(s.len, 32);
b[0..len].* = s[0..len].*;
t.x1 = Fe.fromBytes(b, .Little) catch unreachable;
t.x1 = Fe.fromBytes(b, .little) catch unreachable;
}
if (s_.len >= 32) {
var b = [_]u8{0} ** encoded_length;
const len = @min(s.len - 32, 32);
b[0..len].* = s[32..][0..len].*;
t.x2 = Fe.fromBytes(b, .Little) catch unreachable;
t.x2 = Fe.fromBytes(b, .little) catch unreachable;
}
return t;
}

View file

@ -41,7 +41,7 @@ pub const Secp256k1 = struct {
const lambda_s = s: {
var buf: [32]u8 = undefined;
mem.writeInt(u256, &buf, Endormorphism.lambda, .Little);
mem.writeInt(u256, &buf, Endormorphism.lambda, .little);
break :s buf;
};
@ -54,12 +54,12 @@ pub const Secp256k1 = struct {
pub fn splitScalar(s: [32]u8, endian: std.builtin.Endian) NonCanonicalError!SplitScalar {
const b1_neg_s = comptime s: {
var buf: [32]u8 = undefined;
mem.writeInt(u256, &buf, 303414439467246543595250775667605759171, .Little);
mem.writeInt(u256, &buf, 303414439467246543595250775667605759171, .little);
break :s buf;
};
const b2_neg_s = comptime s: {
var buf: [32]u8 = undefined;
mem.writeInt(u256, &buf, scalar.field_order - 64502973549206556628585045361533709077, .Little);
mem.writeInt(u256, &buf, scalar.field_order - 64502973549206556628585045361533709077, .little);
break :s buf;
};
const k = mem.readInt(u256, &s, endian);
@ -72,16 +72,16 @@ pub const Secp256k1 = struct {
var buf: [32]u8 = undefined;
mem.writeInt(u256, &buf, c1, .Little);
const c1x = try scalar.mul(buf, b1_neg_s, .Little);
mem.writeInt(u256, &buf, c1, .little);
const c1x = try scalar.mul(buf, b1_neg_s, .little);
mem.writeInt(u256, &buf, c2, .Little);
const c2x = try scalar.mul(buf, b2_neg_s, .Little);
mem.writeInt(u256, &buf, c2, .little);
const c2x = try scalar.mul(buf, b2_neg_s, .little);
const r2 = try scalar.add(c1x, c2x, .Little);
const r2 = try scalar.add(c1x, c2x, .little);
var r1 = try scalar.mul(r2, lambda_s, .Little);
r1 = try scalar.sub(s, r1, .Little);
var r1 = try scalar.mul(r2, lambda_s, .little);
r1 = try scalar.sub(s, r1, .little);
return SplitScalar{ .r1 = r1, .r2 = r2 };
}
@ -140,15 +140,15 @@ pub const Secp256k1 = struct {
},
2, 3 => {
if (encoded.len != 32) return error.InvalidEncoding;
const x = try Fe.fromBytes(encoded[0..32].*, .Big);
const x = try Fe.fromBytes(encoded[0..32].*, .big);
const y_is_odd = (encoding_type == 3);
const y = try recoverY(x, y_is_odd);
return Secp256k1{ .x = x, .y = y };
},
4 => {
if (encoded.len != 64) return error.InvalidEncoding;
const x = try Fe.fromBytes(encoded[0..32].*, .Big);
const y = try Fe.fromBytes(encoded[32..64].*, .Big);
const x = try Fe.fromBytes(encoded[0..32].*, .big);
const y = try Fe.fromBytes(encoded[32..64].*, .big);
return Secp256k1.fromAffineCoordinates(.{ .x = x, .y = y });
},
else => return error.InvalidEncoding,
@ -160,7 +160,7 @@ pub const Secp256k1 = struct {
var out: [33]u8 = undefined;
const xy = p.affineCoordinates();
out[0] = if (xy.y.isOdd()) 3 else 2;
out[1..].* = xy.x.toBytes(.Big);
out[1..].* = xy.x.toBytes(.big);
return out;
}
@ -169,15 +169,15 @@ pub const Secp256k1 = struct {
var out: [65]u8 = undefined;
out[0] = 4;
const xy = p.affineCoordinates();
out[1..33].* = xy.x.toBytes(.Big);
out[33..65].* = xy.y.toBytes(.Big);
out[1..33].* = xy.x.toBytes(.big);
out[33..65].* = xy.y.toBytes(.big);
return out;
}
/// Return a random point.
pub fn random() Secp256k1 {
const n = scalar.random(.Little);
return basePoint.mul(n, .Little) catch unreachable;
const n = scalar.random(.little);
return basePoint.mul(n, .little) catch unreachable;
}
/// Flip the sign of the X coordinate.
@ -428,7 +428,7 @@ pub const Secp256k1 = struct {
/// Multiply an elliptic curve point by a scalar.
/// Return error.IdentityElement if the result is the identity element.
pub fn mul(p: Secp256k1, s_: [32]u8, endian: std.builtin.Endian) IdentityElementError!Secp256k1 {
const s = if (endian == .Little) s_ else Fe.orderSwap(s_);
const s = if (endian == .little) s_ else Fe.orderSwap(s_);
if (p.is_base) {
return pcMul16(&basePointPc, s, false);
}
@ -440,24 +440,24 @@ pub const Secp256k1 = struct {
/// Multiply an elliptic curve point by a *PUBLIC* scalar *IN VARIABLE TIME*
/// This can be used for signature verification.
pub fn mulPublic(p: Secp256k1, s_: [32]u8, endian: std.builtin.Endian) (IdentityElementError || NonCanonicalError)!Secp256k1 {
const s = if (endian == .Little) s_ else Fe.orderSwap(s_);
const zero = comptime scalar.Scalar.zero.toBytes(.Little);
const s = if (endian == .little) s_ else Fe.orderSwap(s_);
const zero = comptime scalar.Scalar.zero.toBytes(.little);
if (mem.eql(u8, &zero, &s)) {
return error.IdentityElement;
}
const pc = precompute(p, 8);
var lambda_p = try pcMul(&pc, Endormorphism.lambda_s, true);
var split_scalar = try Endormorphism.splitScalar(s, .Little);
var split_scalar = try Endormorphism.splitScalar(s, .little);
var px = p;
// If a key is negative, flip the sign to keep it half-sized,
// and flip the sign of the Y point coordinate to compensate.
if (split_scalar.r1[split_scalar.r1.len / 2] != 0) {
split_scalar.r1 = scalar.neg(split_scalar.r1, .Little) catch zero;
split_scalar.r1 = scalar.neg(split_scalar.r1, .little) catch zero;
px = px.neg();
}
if (split_scalar.r2[split_scalar.r2.len / 2] != 0) {
split_scalar.r2 = scalar.neg(split_scalar.r2, .Little) catch zero;
split_scalar.r2 = scalar.neg(split_scalar.r2, .little) catch zero;
lambda_p = lambda_p.neg();
}
return mulDoubleBasePublicEndo(px, split_scalar.r1, lambda_p, split_scalar.r2);
@ -502,8 +502,8 @@ pub const Secp256k1 = struct {
/// Double-base multiplication of public parameters - Compute (p1*s1)+(p2*s2) *IN VARIABLE TIME*
/// This can be used for signature verification.
pub fn mulDoubleBasePublic(p1: Secp256k1, s1_: [32]u8, p2: Secp256k1, s2_: [32]u8, endian: std.builtin.Endian) IdentityElementError!Secp256k1 {
const s1 = if (endian == .Little) s1_ else Fe.orderSwap(s1_);
const s2 = if (endian == .Little) s2_ else Fe.orderSwap(s2_);
const s1 = if (endian == .little) s1_ else Fe.orderSwap(s1_);
const s2 = if (endian == .little) s2_ else Fe.orderSwap(s2_);
try p1.rejectIdentity();
var pc1_array: [9]Secp256k1 = undefined;
const pc1 = if (p1.is_base) basePointPc[0..9] else pc: {

View file

@ -174,7 +174,7 @@ pub const Scalar = struct {
var s: [48]u8 = undefined;
while (true) {
crypto.random.bytes(&s);
const n = Scalar.fromBytes48(s, .Little);
const n = Scalar.fromBytes48(s, .little);
if (!n.isZero()) {
return n;
}
@ -191,7 +191,7 @@ const ScalarDouble = struct {
debug.assert(bits > 0 and bits <= 512 and bits >= Fe.saturated_bits and bits <= Fe.saturated_bits * 3);
var s = s_;
if (endian == .Big) {
if (endian == .big) {
for (s_, 0..) |x, i| s[s.len - 1 - i] = x;
}
var t = ScalarDouble{ .x1 = undefined, .x2 = Fe.zero, .x3 = Fe.zero };
@ -199,19 +199,19 @@ const ScalarDouble = struct {
var b = [_]u8{0} ** encoded_length;
const len = @min(s.len, 24);
b[0..len].* = s[0..len].*;
t.x1 = Fe.fromBytes(b, .Little) catch unreachable;
t.x1 = Fe.fromBytes(b, .little) catch unreachable;
}
if (s_.len >= 24) {
var b = [_]u8{0} ** encoded_length;
const len = @min(s.len - 24, 24);
b[0..len].* = s[24..][0..len].*;
t.x2 = Fe.fromBytes(b, .Little) catch unreachable;
t.x2 = Fe.fromBytes(b, .little) catch unreachable;
}
if (s_.len >= 48) {
var b = [_]u8{0} ** encoded_length;
const len = s.len - 48;
b[0..len].* = s[48..][0..len].*;
t.x3 = Fe.fromBytes(b, .Little) catch unreachable;
t.x3 = Fe.fromBytes(b, .little) catch unreachable;
}
return t;
}

View file

@ -5,12 +5,12 @@ const testing = std.testing;
const P256 = @import("../p256.zig").P256;
test "p256 ECDH key exchange" {
const dha = P256.scalar.random(.Little);
const dhb = P256.scalar.random(.Little);
const dhA = try P256.basePoint.mul(dha, .Little);
const dhB = try P256.basePoint.mul(dhb, .Little);
const shareda = try dhA.mul(dhb, .Little);
const sharedb = try dhB.mul(dha, .Little);
const dha = P256.scalar.random(.little);
const dhb = P256.scalar.random(.little);
const dhA = try P256.basePoint.mul(dha, .little);
const dhB = try P256.basePoint.mul(dhb, .little);
const shareda = try dhA.mul(dhb, .little);
const sharedb = try dhB.mul(dha, .little);
try testing.expect(shareda.equivalent(sharedb));
}
@ -21,7 +21,7 @@ test "p256 point from affine coordinates" {
_ = try fmt.hexToBytes(&xs, xh);
var ys: [32]u8 = undefined;
_ = try fmt.hexToBytes(&ys, yh);
var p = try P256.fromSerializedAffineCoordinates(xs, ys, .Big);
var p = try P256.fromSerializedAffineCoordinates(xs, ys, .big);
try testing.expect(p.equivalent(P256.basePoint));
}
@ -44,7 +44,7 @@ test "p256 test vectors" {
p = p.add(P256.basePoint);
var xs: [32]u8 = undefined;
_ = try fmt.hexToBytes(&xs, xh);
try testing.expectEqualSlices(u8, &x.toBytes(.Big), &xs);
try testing.expectEqualSlices(u8, &x.toBytes(.big), &xs);
}
}
@ -61,7 +61,7 @@ test "p256 test vectors - doubling" {
p = p.dbl();
var xs: [32]u8 = undefined;
_ = try fmt.hexToBytes(&xs, xh);
try testing.expectEqualSlices(u8, &x.toBytes(.Big), &xs);
try testing.expectEqualSlices(u8, &x.toBytes(.big), &xs);
}
}
@ -80,20 +80,20 @@ test "p256 uncompressed sec1 encoding/decoding" {
}
test "p256 public key is the neutral element" {
const n = P256.scalar.Scalar.zero.toBytes(.Little);
const n = P256.scalar.Scalar.zero.toBytes(.little);
const p = P256.random();
try testing.expectError(error.IdentityElement, p.mul(n, .Little));
try testing.expectError(error.IdentityElement, p.mul(n, .little));
}
test "p256 public key is the neutral element (public verification)" {
const n = P256.scalar.Scalar.zero.toBytes(.Little);
const n = P256.scalar.Scalar.zero.toBytes(.little);
const p = P256.random();
try testing.expectError(error.IdentityElement, p.mulPublic(n, .Little));
try testing.expectError(error.IdentityElement, p.mulPublic(n, .little));
}
test "p256 field element non-canonical encoding" {
const s = [_]u8{0xff} ** 32;
try testing.expectError(error.NonCanonical, P256.Fe.fromBytes(s, .Little));
try testing.expectError(error.NonCanonical, P256.Fe.fromBytes(s, .little));
}
test "p256 neutral element decoding" {
@ -107,8 +107,8 @@ test "p256 double base multiplication" {
const p2 = P256.basePoint.dbl();
const s1 = [_]u8{0x01} ** 32;
const s2 = [_]u8{0x02} ** 32;
const pr1 = try P256.mulDoubleBasePublic(p1, s1, p2, s2, .Little);
const pr2 = (try p1.mul(s1, .Little)).add(try p2.mul(s2, .Little));
const pr1 = try P256.mulDoubleBasePublic(p1, s1, p2, s2, .little);
const pr2 = (try p1.mul(s1, .little)).add(try p2.mul(s2, .little));
try testing.expect(pr1.equivalent(pr2));
}
@ -117,8 +117,8 @@ test "p256 double base multiplication with large scalars" {
const p2 = P256.basePoint.dbl();
const s1 = [_]u8{0xee} ** 32;
const s2 = [_]u8{0xdd} ** 32;
const pr1 = try P256.mulDoubleBasePublic(p1, s1, p2, s2, .Little);
const pr2 = (try p1.mul(s1, .Little)).add(try p2.mul(s2, .Little));
const pr1 = try P256.mulDoubleBasePublic(p1, s1, p2, s2, .little);
const pr2 = (try p1.mul(s1, .little)).add(try p2.mul(s2, .little));
try testing.expect(pr1.equivalent(pr2));
}
@ -130,9 +130,9 @@ test "p256 scalar inverse" {
const scalar = try P256.scalar.Scalar.fromBytes(.{
0x94, 0xa1, 0xbb, 0xb1, 0x4b, 0x90, 0x6a, 0x61, 0xa2, 0x80, 0xf2, 0x45, 0xf9, 0xe9, 0x3c, 0x7f,
0x3b, 0x4a, 0x62, 0x47, 0x82, 0x4f, 0x5d, 0x33, 0xb9, 0x67, 0x07, 0x87, 0x64, 0x2a, 0x68, 0xde,
}, .Big);
}, .big);
const inverse = scalar.invert();
try std.testing.expectEqualSlices(u8, &out, &inverse.toBytes(.Big));
try std.testing.expectEqualSlices(u8, &out, &inverse.toBytes(.big));
}
test "p256 scalar parity" {

View file

@ -5,12 +5,12 @@ const testing = std.testing;
const P384 = @import("../p384.zig").P384;
test "p384 ECDH key exchange" {
const dha = P384.scalar.random(.Little);
const dhb = P384.scalar.random(.Little);
const dhA = try P384.basePoint.mul(dha, .Little);
const dhB = try P384.basePoint.mul(dhb, .Little);
const shareda = try dhA.mul(dhb, .Little);
const sharedb = try dhB.mul(dha, .Little);
const dha = P384.scalar.random(.little);
const dhb = P384.scalar.random(.little);
const dhA = try P384.basePoint.mul(dha, .little);
const dhB = try P384.basePoint.mul(dhb, .little);
const shareda = try dhA.mul(dhb, .little);
const sharedb = try dhB.mul(dha, .little);
try testing.expect(shareda.equivalent(sharedb));
}
@ -21,7 +21,7 @@ test "p384 point from affine coordinates" {
_ = try fmt.hexToBytes(&xs, xh);
var ys: [48]u8 = undefined;
_ = try fmt.hexToBytes(&ys, yh);
var p = try P384.fromSerializedAffineCoordinates(xs, ys, .Big);
var p = try P384.fromSerializedAffineCoordinates(xs, ys, .big);
try testing.expect(p.equivalent(P384.basePoint));
}
@ -45,7 +45,7 @@ test "p384 test vectors" {
p = p.add(P384.basePoint);
var xs: [48]u8 = undefined;
_ = try fmt.hexToBytes(&xs, xh);
try testing.expectEqualSlices(u8, &x.toBytes(.Big), &xs);
try testing.expectEqualSlices(u8, &x.toBytes(.big), &xs);
}
}
@ -62,7 +62,7 @@ test "p384 test vectors - doubling" {
p = p.dbl();
var xs: [48]u8 = undefined;
_ = try fmt.hexToBytes(&xs, xh);
try testing.expectEqualSlices(u8, &x.toBytes(.Big), &xs);
try testing.expectEqualSlices(u8, &x.toBytes(.big), &xs);
}
}
@ -83,20 +83,20 @@ test "p384 uncompressed sec1 encoding/decoding" {
}
test "p384 public key is the neutral element" {
const n = P384.scalar.Scalar.zero.toBytes(.Little);
const n = P384.scalar.Scalar.zero.toBytes(.little);
const p = P384.random();
try testing.expectError(error.IdentityElement, p.mul(n, .Little));
try testing.expectError(error.IdentityElement, p.mul(n, .little));
}
test "p384 public key is the neutral element (public verification)" {
const n = P384.scalar.Scalar.zero.toBytes(.Little);
const n = P384.scalar.Scalar.zero.toBytes(.little);
const p = P384.random();
try testing.expectError(error.IdentityElement, p.mulPublic(n, .Little));
try testing.expectError(error.IdentityElement, p.mulPublic(n, .little));
}
test "p384 field element non-canonical encoding" {
const s = [_]u8{0xff} ** 48;
try testing.expectError(error.NonCanonical, P384.Fe.fromBytes(s, .Little));
try testing.expectError(error.NonCanonical, P384.Fe.fromBytes(s, .little));
}
test "p384 neutral element decoding" {
@ -110,8 +110,8 @@ test "p384 double base multiplication" {
const p2 = P384.basePoint.dbl();
const s1 = [_]u8{0x01} ** 48;
const s2 = [_]u8{0x02} ** 48;
const pr1 = try P384.mulDoubleBasePublic(p1, s1, p2, s2, .Little);
const pr2 = (try p1.mul(s1, .Little)).add(try p2.mul(s2, .Little));
const pr1 = try P384.mulDoubleBasePublic(p1, s1, p2, s2, .little);
const pr2 = (try p1.mul(s1, .little)).add(try p2.mul(s2, .little));
try testing.expect(pr1.equivalent(pr2));
}
@ -120,8 +120,8 @@ test "p384 double base multiplication with large scalars" {
const p2 = P384.basePoint.dbl();
const s1 = [_]u8{0xee} ** 48;
const s2 = [_]u8{0xdd} ** 48;
const pr1 = try P384.mulDoubleBasePublic(p1, s1, p2, s2, .Little);
const pr2 = (try p1.mul(s1, .Little)).add(try p2.mul(s2, .Little));
const pr1 = try P384.mulDoubleBasePublic(p1, s1, p2, s2, .little);
const pr2 = (try p1.mul(s1, .little)).add(try p2.mul(s2, .little));
try testing.expect(pr1.equivalent(pr2));
}
@ -134,10 +134,10 @@ test "p384 scalar inverse" {
0x94, 0xa1, 0xbb, 0xb1, 0x4b, 0x90, 0x6a, 0x61, 0xa2, 0x80, 0xf2, 0x45, 0xf9, 0xe9, 0x3c, 0x7f,
0x3b, 0x4a, 0x62, 0x47, 0x82, 0x4f, 0x5d, 0x33, 0xb9, 0x67, 0x07, 0x87, 0x64, 0x2a, 0x68, 0xde,
0x38, 0x36, 0xe8, 0x0f, 0xa2, 0x84, 0x6b, 0x4e, 0xf3, 0x9a, 0x02, 0x31, 0x24, 0x41, 0x22, 0xca,
}, .Big);
}, .big);
const inverse = scalar.invert();
const inverse2 = inverse.invert();
try testing.expectEqualSlices(u8, &out, &inverse.toBytes(.Big));
try testing.expectEqualSlices(u8, &out, &inverse.toBytes(.big));
try testing.expect(inverse2.equivalent(scalar));
const sq = scalar.sq();

View file

@ -5,22 +5,22 @@ const testing = std.testing;
const Secp256k1 = @import("../secp256k1.zig").Secp256k1;
test "secp256k1 ECDH key exchange" {
const dha = Secp256k1.scalar.random(.Little);
const dhb = Secp256k1.scalar.random(.Little);
const dhA = try Secp256k1.basePoint.mul(dha, .Little);
const dhB = try Secp256k1.basePoint.mul(dhb, .Little);
const shareda = try dhA.mul(dhb, .Little);
const sharedb = try dhB.mul(dha, .Little);
const dha = Secp256k1.scalar.random(.little);
const dhb = Secp256k1.scalar.random(.little);
const dhA = try Secp256k1.basePoint.mul(dha, .little);
const dhB = try Secp256k1.basePoint.mul(dhb, .little);
const shareda = try dhA.mul(dhb, .little);
const sharedb = try dhB.mul(dha, .little);
try testing.expect(shareda.equivalent(sharedb));
}
test "secp256k1 ECDH key exchange including public multiplication" {
const dha = Secp256k1.scalar.random(.Little);
const dhb = Secp256k1.scalar.random(.Little);
const dhA = try Secp256k1.basePoint.mul(dha, .Little);
const dhB = try Secp256k1.basePoint.mulPublic(dhb, .Little);
const shareda = try dhA.mul(dhb, .Little);
const sharedb = try dhB.mulPublic(dha, .Little);
const dha = Secp256k1.scalar.random(.little);
const dhb = Secp256k1.scalar.random(.little);
const dhA = try Secp256k1.basePoint.mul(dha, .little);
const dhB = try Secp256k1.basePoint.mulPublic(dhb, .little);
const shareda = try dhA.mul(dhb, .little);
const sharedb = try dhB.mulPublic(dha, .little);
try testing.expect(shareda.equivalent(sharedb));
}
@ -31,7 +31,7 @@ test "secp256k1 point from affine coordinates" {
_ = try fmt.hexToBytes(&xs, xh);
var ys: [32]u8 = undefined;
_ = try fmt.hexToBytes(&ys, yh);
var p = try Secp256k1.fromSerializedAffineCoordinates(xs, ys, .Big);
var p = try Secp256k1.fromSerializedAffineCoordinates(xs, ys, .big);
try testing.expect(p.equivalent(Secp256k1.basePoint));
}
@ -54,7 +54,7 @@ test "secp256k1 test vectors" {
p = p.add(Secp256k1.basePoint);
var xs: [32]u8 = undefined;
_ = try fmt.hexToBytes(&xs, xh);
try testing.expectEqualSlices(u8, &x.toBytes(.Big), &xs);
try testing.expectEqualSlices(u8, &x.toBytes(.big), &xs);
}
}
@ -72,7 +72,7 @@ test "secp256k1 test vectors - doubling" {
p = p.dbl();
var xs: [32]u8 = undefined;
_ = try fmt.hexToBytes(&xs, xh);
try testing.expectEqualSlices(u8, &x.toBytes(.Big), &xs);
try testing.expectEqualSlices(u8, &x.toBytes(.big), &xs);
}
}
@ -91,20 +91,20 @@ test "secp256k1 uncompressed sec1 encoding/decoding" {
}
test "secp256k1 public key is the neutral element" {
const n = Secp256k1.scalar.Scalar.zero.toBytes(.Little);
const n = Secp256k1.scalar.Scalar.zero.toBytes(.little);
const p = Secp256k1.random();
try testing.expectError(error.IdentityElement, p.mul(n, .Little));
try testing.expectError(error.IdentityElement, p.mul(n, .little));
}
test "secp256k1 public key is the neutral element (public verification)" {
const n = Secp256k1.scalar.Scalar.zero.toBytes(.Little);
const n = Secp256k1.scalar.Scalar.zero.toBytes(.little);
const p = Secp256k1.random();
try testing.expectError(error.IdentityElement, p.mulPublic(n, .Little));
try testing.expectError(error.IdentityElement, p.mulPublic(n, .little));
}
test "secp256k1 field element non-canonical encoding" {
const s = [_]u8{0xff} ** 32;
try testing.expectError(error.NonCanonical, Secp256k1.Fe.fromBytes(s, .Little));
try testing.expectError(error.NonCanonical, Secp256k1.Fe.fromBytes(s, .little));
}
test "secp256k1 neutral element decoding" {
@ -118,8 +118,8 @@ test "secp256k1 double base multiplication" {
const p2 = Secp256k1.basePoint.dbl();
const s1 = [_]u8{0x01} ** 32;
const s2 = [_]u8{0x02} ** 32;
const pr1 = try Secp256k1.mulDoubleBasePublic(p1, s1, p2, s2, .Little);
const pr2 = (try p1.mul(s1, .Little)).add(try p2.mul(s2, .Little));
const pr1 = try Secp256k1.mulDoubleBasePublic(p1, s1, p2, s2, .little);
const pr2 = (try p1.mul(s1, .little)).add(try p2.mul(s2, .little));
try testing.expect(pr1.equivalent(pr2));
}
@ -131,9 +131,9 @@ test "secp256k1 scalar inverse" {
const scalar = try Secp256k1.scalar.Scalar.fromBytes(.{
0x94, 0xa1, 0xbb, 0xb1, 0x4b, 0x90, 0x6a, 0x61, 0xa2, 0x80, 0xf2, 0x45, 0xf9, 0xe9, 0x3c, 0x7f,
0x3b, 0x4a, 0x62, 0x47, 0x82, 0x4f, 0x5d, 0x33, 0xb9, 0x67, 0x07, 0x87, 0x64, 0x2a, 0x68, 0xde,
}, .Big);
}, .big);
const inverse = scalar.invert();
try std.testing.expectEqualSlices(u8, &out, &inverse.toBytes(.Big));
try std.testing.expectEqualSlices(u8, &out, &inverse.toBytes(.big));
}
test "secp256k1 scalar parity" {

View file

@ -22,12 +22,12 @@ pub const Poly1305 = struct {
pub fn init(key: *const [key_length]u8) Poly1305 {
return Poly1305{
.r = [_]u64{
mem.readInt(u64, key[0..8], .Little) & 0x0ffffffc0fffffff,
mem.readInt(u64, key[8..16], .Little) & 0x0ffffffc0ffffffc,
mem.readInt(u64, key[0..8], .little) & 0x0ffffffc0fffffff,
mem.readInt(u64, key[8..16], .little) & 0x0ffffffc0ffffffc,
},
.pad = [_]u64{
mem.readInt(u64, key[16..24], .Little),
mem.readInt(u64, key[24..32], .Little),
mem.readInt(u64, key[16..24], .little),
mem.readInt(u64, key[24..32], .little),
},
};
}
@ -56,8 +56,8 @@ pub const Poly1305 = struct {
var i: usize = 0;
while (i + block_length <= m.len) : (i += block_length) {
const in0 = mem.readInt(u64, m[i..][0..8], .Little);
const in1 = mem.readInt(u64, m[i + 8 ..][0..8], .Little);
const in0 = mem.readInt(u64, m[i..][0..8], .little);
const in1 = mem.readInt(u64, m[i + 8 ..][0..8], .little);
// Add the input message to H
var v = @addWithOverflow(h0, in0);
@ -182,8 +182,8 @@ pub const Poly1305 = struct {
const c = ((h0 & st.pad[0]) | ((h0 | st.pad[0]) & ~st.h[0])) >> 63;
st.h[1] = h1 +% st.pad[1] +% c;
mem.writeInt(u64, out[0..8], st.h[0], .Little);
mem.writeInt(u64, out[8..16], st.h[1], .Little);
mem.writeInt(u64, out[0..8], st.h[0], .little);
mem.writeInt(u64, out[8..16], st.h[1], .little);
utils.secureZero(u8, @as([*]u8, @ptrCast(st))[0..@sizeOf(Poly1305)]);
}

View file

@ -29,10 +29,10 @@ fn SalsaVecImpl(comptime rounds: comptime_int) type {
fn initContext(key: [8]u32, d: [4]u32) BlockVec {
const c = "expand 32-byte k";
const constant_le = comptime [4]u32{
mem.readInt(u32, c[0..4], .Little),
mem.readInt(u32, c[4..8], .Little),
mem.readInt(u32, c[8..12], .Little),
mem.readInt(u32, c[12..16], .Little),
mem.readInt(u32, c[0..4], .little),
mem.readInt(u32, c[4..8], .little),
mem.readInt(u32, c[8..12], .little),
mem.readInt(u32, c[12..16], .little),
};
return BlockVec{
Lane{ key[0], key[1], key[2], key[3] },
@ -112,10 +112,10 @@ fn SalsaVecImpl(comptime rounds: comptime_int) type {
fn hashToBytes(out: *[64]u8, x: BlockVec) void {
var i: usize = 0;
while (i < 4) : (i += 1) {
mem.writeInt(u32, out[16 * i + 0 ..][0..4], x[i][0], .Little);
mem.writeInt(u32, out[16 * i + 4 ..][0..4], x[i][1], .Little);
mem.writeInt(u32, out[16 * i + 8 ..][0..4], x[i][2], .Little);
mem.writeInt(u32, out[16 * i + 12 ..][0..4], x[i][3], .Little);
mem.writeInt(u32, out[16 * i + 0 ..][0..4], x[i][0], .little);
mem.writeInt(u32, out[16 * i + 4 ..][0..4], x[i][1], .little);
mem.writeInt(u32, out[16 * i + 8 ..][0..4], x[i][2], .little);
mem.writeInt(u32, out[16 * i + 12 ..][0..4], x[i][3], .little);
}
}
@ -158,20 +158,20 @@ fn SalsaVecImpl(comptime rounds: comptime_int) type {
fn hsalsa(input: [16]u8, key: [32]u8) [32]u8 {
var c: [4]u32 = undefined;
for (c, 0..) |_, i| {
c[i] = mem.readInt(u32, input[4 * i ..][0..4], .Little);
c[i] = mem.readInt(u32, input[4 * i ..][0..4], .little);
}
const ctx = initContext(keyToWords(key), c);
var x: BlockVec = undefined;
salsaCore(x[0..], ctx, false);
var out: [32]u8 = undefined;
mem.writeInt(u32, out[0..4], x[0][0], .Little);
mem.writeInt(u32, out[4..8], x[1][1], .Little);
mem.writeInt(u32, out[8..12], x[2][2], .Little);
mem.writeInt(u32, out[12..16], x[3][3], .Little);
mem.writeInt(u32, out[16..20], x[1][2], .Little);
mem.writeInt(u32, out[20..24], x[1][3], .Little);
mem.writeInt(u32, out[24..28], x[2][0], .Little);
mem.writeInt(u32, out[28..32], x[2][1], .Little);
mem.writeInt(u32, out[0..4], x[0][0], .little);
mem.writeInt(u32, out[4..8], x[1][1], .little);
mem.writeInt(u32, out[8..12], x[2][2], .little);
mem.writeInt(u32, out[12..16], x[3][3], .little);
mem.writeInt(u32, out[16..20], x[1][2], .little);
mem.writeInt(u32, out[20..24], x[1][3], .little);
mem.writeInt(u32, out[24..28], x[2][0], .little);
mem.writeInt(u32, out[28..32], x[2][1], .little);
return out;
}
};
@ -184,10 +184,10 @@ fn SalsaNonVecImpl(comptime rounds: comptime_int) type {
fn initContext(key: [8]u32, d: [4]u32) BlockVec {
const c = "expand 32-byte k";
const constant_le = comptime [4]u32{
mem.readInt(u32, c[0..4], .Little),
mem.readInt(u32, c[4..8], .Little),
mem.readInt(u32, c[8..12], .Little),
mem.readInt(u32, c[12..16], .Little),
mem.readInt(u32, c[0..4], .little),
mem.readInt(u32, c[4..8], .little),
mem.readInt(u32, c[8..12], .little),
mem.readInt(u32, c[12..16], .little),
};
return BlockVec{
constant_le[0], key[0], key[1], key[2],
@ -241,7 +241,7 @@ fn SalsaNonVecImpl(comptime rounds: comptime_int) type {
fn hashToBytes(out: *[64]u8, x: BlockVec) void {
for (x, 0..) |w, i| {
mem.writeInt(u32, out[i * 4 ..][0..4], w, .Little);
mem.writeInt(u32, out[i * 4 ..][0..4], w, .little);
}
}
@ -283,20 +283,20 @@ fn SalsaNonVecImpl(comptime rounds: comptime_int) type {
fn hsalsa(input: [16]u8, key: [32]u8) [32]u8 {
var c: [4]u32 = undefined;
for (c, 0..) |_, i| {
c[i] = mem.readInt(u32, input[4 * i ..][0..4], .Little);
c[i] = mem.readInt(u32, input[4 * i ..][0..4], .little);
}
const ctx = initContext(keyToWords(key), c);
var x: BlockVec = undefined;
salsaCore(x[0..], ctx, false);
var out: [32]u8 = undefined;
mem.writeInt(u32, out[0..4], x[0], .Little);
mem.writeInt(u32, out[4..8], x[5], .Little);
mem.writeInt(u32, out[8..12], x[10], .Little);
mem.writeInt(u32, out[12..16], x[15], .Little);
mem.writeInt(u32, out[16..20], x[6], .Little);
mem.writeInt(u32, out[20..24], x[7], .Little);
mem.writeInt(u32, out[24..28], x[8], .Little);
mem.writeInt(u32, out[28..32], x[9], .Little);
mem.writeInt(u32, out[0..4], x[0], .little);
mem.writeInt(u32, out[4..8], x[5], .little);
mem.writeInt(u32, out[8..12], x[10], .little);
mem.writeInt(u32, out[12..16], x[15], .little);
mem.writeInt(u32, out[16..20], x[6], .little);
mem.writeInt(u32, out[20..24], x[7], .little);
mem.writeInt(u32, out[24..28], x[8], .little);
mem.writeInt(u32, out[28..32], x[9], .little);
return out;
}
};
@ -308,7 +308,7 @@ fn keyToWords(key: [32]u8) [8]u32 {
var k: [8]u32 = undefined;
var i: usize = 0;
while (i < 8) : (i += 1) {
k[i] = mem.readInt(u32, key[i * 4 ..][0..4], .Little);
k[i] = mem.readInt(u32, key[i * 4 ..][0..4], .little);
}
return k;
}
@ -335,8 +335,8 @@ pub fn Salsa(comptime rounds: comptime_int) type {
debug.assert(in.len == out.len);
var d: [4]u32 = undefined;
d[0] = mem.readInt(u32, nonce[0..4], .Little);
d[1] = mem.readInt(u32, nonce[4..8], .Little);
d[0] = mem.readInt(u32, nonce[0..4], .little);
d[1] = mem.readInt(u32, nonce[4..8], .little);
d[2] = @as(u32, @truncate(counter));
d[3] = @as(u32, @truncate(counter >> 32));
SalsaImpl(rounds).salsaXor(out, in, keyToWords(key), d);

View file

@ -91,7 +91,7 @@ fn smix(b: []align(16) u8, r: u30, n: usize, v: []align(16) u32, xy: []align(16)
var y: []align(16) u32 = @alignCast(xy[32 * r ..]);
for (x, 0..) |*v1, j| {
v1.* = mem.readInt(u32, b[4 * j ..][0..4], .Little);
v1.* = mem.readInt(u32, b[4 * j ..][0..4], .little);
}
var tmp: [16]u32 align(16) = undefined;
@ -116,7 +116,7 @@ fn smix(b: []align(16) u8, r: u30, n: usize, v: []align(16) u32, xy: []align(16)
}
for (x, 0..) |v1, j| {
mem.writeInt(u32, b[4 * j ..][0..4], v1, .Little);
mem.writeInt(u32, b[4 * j ..][0..4], v1, .little);
}
}
@ -361,7 +361,7 @@ const crypt_format = struct {
std.debug.assert(dst.len == decodedLen(src.len));
var i: usize = 0;
while (i < src.len / 4) : (i += 1) {
mem.writeInt(u24, dst[i * 3 ..][0..3], try intDecode(u24, src[i * 4 ..][0..4]), .Little);
mem.writeInt(u24, dst[i * 3 ..][0..3], try intDecode(u24, src[i * 4 ..][0..4]), .little);
}
const leftover = src[i * 4 ..];
var v: u24 = 0;
@ -377,7 +377,7 @@ const crypt_format = struct {
std.debug.assert(dst.len == encodedLen(src.len));
var i: usize = 0;
while (i < src.len / 3) : (i += 1) {
intEncode(dst[i * 4 ..][0..4], mem.readInt(u24, src[i * 3 ..][0..3], .Little));
intEncode(dst[i * 4 ..][0..4], mem.readInt(u24, src[i * 3 ..][0..3], .little));
}
const leftover = src[i * 3 ..];
var v: u24 = 0;

View file

@ -111,7 +111,7 @@ pub const Sha1 = struct {
d.round(d.buf[0..]);
for (d.s, 0..) |s, j| {
mem.writeInt(u32, out[4 * j ..][0..4], s, .Big);
mem.writeInt(u32, out[4 * j ..][0..4], s, .big);
}
}
@ -151,7 +151,7 @@ pub const Sha1 = struct {
roundParam(0, 1, 2, 3, 4, 15),
};
inline for (round0a) |r| {
s[r.i] = mem.readInt(u32, b[r.i * 4 ..][0..4], .Big);
s[r.i] = mem.readInt(u32, b[r.i * 4 ..][0..4], .big);
v[r.e] = v[r.e] +% math.rotl(u32, v[r.a], @as(u32, 5)) +% 0x5A827999 +% s[r.i & 0xf] +% ((v[r.b] & v[r.c]) | (~v[r.b] & v[r.d]));
v[r.b] = math.rotl(u32, v[r.b], @as(u32, 30));

View file

@ -171,7 +171,7 @@ fn Sha2x32(comptime params: Sha2Params32) type {
const rr = d.s[0 .. params.digest_bits / 32];
for (rr, 0..) |s, j| {
mem.writeInt(u32, out[4 * j ..][0..4], s, .Big);
mem.writeInt(u32, out[4 * j ..][0..4], s, .big);
}
}
@ -195,7 +195,7 @@ fn Sha2x32(comptime params: Sha2Params32) type {
fn round(d: *Self, b: *const [64]u8) void {
var s: [64]u32 align(16) = undefined;
for (@as(*align(1) const [16]u32, @ptrCast(b)), 0..) |*elem, i| {
s[i] = mem.readInt(u32, mem.asBytes(elem), .Big);
s[i] = mem.readInt(u32, mem.asBytes(elem), .big);
}
if (!@inComptime()) {
@ -663,7 +663,7 @@ fn Sha2x64(comptime params: Sha2Params64) type {
const rr = d.s[0 .. params.digest_bits / 64];
for (rr, 0..) |s, j| {
mem.writeInt(u64, out[8 * j ..][0..8], s, .Big);
mem.writeInt(u64, out[8 * j ..][0..8], s, .big);
}
}
@ -678,7 +678,7 @@ fn Sha2x64(comptime params: Sha2Params64) type {
var i: usize = 0;
while (i < 16) : (i += 1) {
s[i] = mem.readInt(u64, b[i * 8 ..][0..8], .Big);
s[i] = mem.readInt(u64, b[i * 8 ..][0..8], .big);
}
while (i < 80) : (i += 1) {
s[i] = s[i - 16] +% s[i - 7] +%

View file

@ -56,8 +56,8 @@ fn SipHashStateless(comptime T: type, comptime c_rounds: usize, comptime d_round
msg_len: u8,
fn init(key: *const [key_length]u8) Self {
const k0 = mem.readInt(u64, key[0..8], .Little);
const k1 = mem.readInt(u64, key[8..16], .Little);
const k0 = mem.readInt(u64, key[0..8], .little);
const k1 = mem.readInt(u64, key[8..16], .little);
var d = Self{
.v0 = k0 ^ 0x736f6d6570736575,
@ -124,7 +124,7 @@ fn SipHashStateless(comptime T: type, comptime c_rounds: usize, comptime d_round
}
fn round(self: *Self, b: [8]u8) void {
const m = mem.readInt(u64, &b, .Little);
const m = mem.readInt(u64, &b, .little);
self.v3 ^= m;
comptime var i: usize = 0;
@ -213,7 +213,7 @@ fn SipHash(comptime T: type, comptime c_rounds: usize, comptime d_rounds: usize)
/// Return an authentication tag for the current state
/// Assumes `out` is less than or equal to `mac_length`.
pub fn final(self: *Self, out: *[mac_length]u8) void {
mem.writeInt(T, out, self.state.final(self.buf[0..self.buf_len]), .Little);
mem.writeInt(T, out, self.state.final(self.buf[0..self.buf_len]), .little);
}
pub fn finalResult(self: *Self) [mac_length]u8 {

View file

@ -370,7 +370,7 @@ pub fn hkdfExpandLabel(
const max_context_len = 255;
const tls13 = "tls13 ";
var buf: [2 + 1 + tls13.len + max_label_len + 1 + max_context_len]u8 = undefined;
mem.writeInt(u16, buf[0..2], len, .Big);
mem.writeInt(u16, buf[0..2], len, .big);
buf[2] = @as(u8, @intCast(tls13.len + label.len));
buf[3..][0..tls13.len].* = tls13.*;
var i: usize = 3 + tls13.len;

View file

@ -332,10 +332,10 @@ pub fn init(stream: anytype, ca_bundle: Certificate.Bundle, host: []const u8) In
const pk = PublicKey.fromSec1(server_pub_key) catch {
return error.TlsDecryptFailure;
};
const mul = pk.p.mulPublic(secp256r1_kp.secret_key.bytes, .Big) catch {
const mul = pk.p.mulPublic(secp256r1_kp.secret_key.bytes, .big) catch {
return error.TlsDecryptFailure;
};
shared_key = &mul.affineCoordinates().x.toBytes(.Big);
shared_key = &mul.affineCoordinates().x.toBytes(.big);
},
else => {
return error.TlsIllegalParameter;
@ -1049,10 +1049,10 @@ pub fn readvAdvanced(c: *Client, stream: anytype, iovecs: []const std.os.iovec)
}
const ct: tls.ContentType = @enumFromInt(frag[in]);
in += 1;
const legacy_version = mem.readInt(u16, frag[in..][0..2], .Big);
const legacy_version = mem.readInt(u16, frag[in..][0..2], .big);
in += 2;
_ = legacy_version;
const record_len = mem.readInt(u16, frag[in..][0..2], .Big);
const record_len = mem.readInt(u16, frag[in..][0..2], .big);
if (record_len > max_ciphertext_len) return error.TlsRecordOverflow;
in += 2;
const end = in + record_len;
@ -1136,7 +1136,7 @@ pub fn readvAdvanced(c: *Client, stream: anytype, iovecs: []const std.os.iovec)
while (true) {
const handshake_type: tls.HandshakeType = @enumFromInt(cleartext[ct_i]);
ct_i += 1;
const handshake_len = mem.readInt(u24, cleartext[ct_i..][0..3], .Big);
const handshake_len = mem.readInt(u24, cleartext[ct_i..][0..3], .big);
ct_i += 3;
const next_handshake_i = ct_i + handshake_len;
if (next_handshake_i > cleartext.len - 1)
@ -1284,8 +1284,8 @@ const native_endian = builtin.cpu.arch.endian();
inline fn big(x: anytype) @TypeOf(x) {
return switch (native_endian) {
.Big => x,
.Little => @byteSwap(x),
.big => x,
.little => @byteSwap(x),
};
}

View file

@ -54,7 +54,7 @@ pub fn timingSafeCompare(comptime T: type, a: []const T, b: []const T, endian: E
const Cext = std.meta.Int(.unsigned, bits + 1);
var gt: T = 0;
var eq: T = 1;
if (endian == .Little) {
if (endian == .little) {
var i = a.len;
while (i != 0) {
i -= 1;
@ -84,7 +84,7 @@ pub fn timingSafeAdd(comptime T: type, a: []const T, b: []const T, result: []T,
const len = a.len;
debug.assert(len == b.len and len == result.len);
var carry: u1 = 0;
if (endian == .Little) {
if (endian == .little) {
var i: usize = 0;
while (i < len) : (i += 1) {
const ov1 = @addWithOverflow(a[i], b[i]);
@ -111,7 +111,7 @@ pub fn timingSafeSub(comptime T: type, a: []const T, b: []const T, result: []T,
const len = a.len;
debug.assert(len == b.len and len == result.len);
var borrow: u1 = 0;
if (endian == .Little) {
if (endian == .little) {
var i: usize = 0;
while (i < len) : (i += 1) {
const ov1 = @subWithOverflow(a[i], b[i]);
@ -165,14 +165,14 @@ test "crypto.utils.timingSafeEql (vectors)" {
test "crypto.utils.timingSafeCompare" {
var a = [_]u8{10} ** 32;
var b = [_]u8{10} ** 32;
try testing.expectEqual(timingSafeCompare(u8, &a, &b, .Big), .eq);
try testing.expectEqual(timingSafeCompare(u8, &a, &b, .Little), .eq);
try testing.expectEqual(timingSafeCompare(u8, &a, &b, .big), .eq);
try testing.expectEqual(timingSafeCompare(u8, &a, &b, .little), .eq);
a[31] = 1;
try testing.expectEqual(timingSafeCompare(u8, &a, &b, .Big), .lt);
try testing.expectEqual(timingSafeCompare(u8, &a, &b, .Little), .lt);
try testing.expectEqual(timingSafeCompare(u8, &a, &b, .big), .lt);
try testing.expectEqual(timingSafeCompare(u8, &a, &b, .little), .lt);
a[0] = 20;
try testing.expectEqual(timingSafeCompare(u8, &a, &b, .Big), .gt);
try testing.expectEqual(timingSafeCompare(u8, &a, &b, .Little), .lt);
try testing.expectEqual(timingSafeCompare(u8, &a, &b, .big), .gt);
try testing.expectEqual(timingSafeCompare(u8, &a, &b, .little), .lt);
}
test "crypto.utils.timingSafe{Add,Sub}" {
@ -185,7 +185,7 @@ test "crypto.utils.timingSafe{Add,Sub}" {
while (iterations != 0) : (iterations -= 1) {
random.bytes(&a);
random.bytes(&b);
const endian = if (iterations % 2 == 0) Endian.Big else Endian.Little;
const endian = if (iterations % 2 == 0) Endian.big else Endian.little;
_ = timingSafeSub(u8, &a, &b, &c, endian); // a-b
_ = timingSafeAdd(u8, &c, &b, &c, endian); // (a-b)+b
try testing.expectEqualSlices(u8, &c, &a);

View file

@ -1098,8 +1098,8 @@ pub fn readElfDebugInfo(
if (hdr.e_ident[elf.EI_VERSION] != 1) return error.InvalidElfVersion;
const endian: std.builtin.Endian = switch (hdr.e_ident[elf.EI_DATA]) {
elf.ELFDATA2LSB => .Little,
elf.ELFDATA2MSB => .Big,
elf.ELFDATA2LSB => .little,
elf.ELFDATA2MSB => .big,
else => return error.InvalidElfEndian,
};
assert(endian == native_endian); // this is our own debug info
@ -2040,7 +2040,7 @@ pub const ModuleDebugInfo = switch (native_os) {
if (missing_debug_info) return error.MissingDebugInfo;
var di = DW.DwarfInfo{
.endian = .Little,
.endian = .little,
.sections = sections,
.is_macho = true,
};

View file

@ -497,8 +497,8 @@ pub const Header = struct {
if (hdr32.e_ident[EI_VERSION] != 1) return error.InvalidElfVersion;
const endian: std.builtin.Endian = switch (hdr32.e_ident[EI_DATA]) {
ELFDATA2LSB => .Little,
ELFDATA2MSB => .Big,
ELFDATA2LSB => .little,
ELFDATA2MSB => .big,
else => return error.InvalidElfEndian,
};
const need_bswap = endian != native_endian;

View file

@ -98,7 +98,7 @@ pub fn skipChars2(self: *FloatStream, c1: u8, c2: u8) void {
}
pub fn readU64Unchecked(self: FloatStream) u64 {
return std.mem.readInt(u64, self.slice[self.offset..][0..8], .Little);
return std.mem.readInt(u64, self.slice[self.offset..][0..8], .little);
}
pub fn readU64(self: FloatStream) ?u64 {

View file

@ -260,7 +260,7 @@ pub fn Decimal(comptime T: type) type {
if (!isEightDigits(v)) {
break;
}
std.mem.writeInt(u64, d.digits[d.num_digits..][0..8], v - 0x3030_3030_3030_3030, .Little);
std.mem.writeInt(u64, d.digits[d.num_digits..][0..8], v - 0x3030_3030_3030_3030, .little);
d.num_digits += 8;
stream.advance(8);
}

View file

@ -1828,7 +1828,7 @@ test "ComponentIterator windows" {
test "ComponentIterator windows UTF-16" {
// TODO: Fix on big endian architectures
if (builtin.cpu.arch.endian() != .Little) {
if (builtin.cpu.arch.endian() != .little) {
return error.SkipZigTest;
}

View file

@ -6,11 +6,11 @@ inline fn offsetPtr(ptr: [*]const u8, offset: usize) [*]const u8 {
}
fn fetch32(ptr: [*]const u8, offset: usize) u32 {
return std.mem.readInt(u32, offsetPtr(ptr, offset)[0..4], .Little);
return std.mem.readInt(u32, offsetPtr(ptr, offset)[0..4], .little);
}
fn fetch64(ptr: [*]const u8, offset: usize) u64 {
return std.mem.readInt(u64, offsetPtr(ptr, offset)[0..8], .Little);
return std.mem.readInt(u64, offsetPtr(ptr, offset)[0..8], .little);
}
pub const CityHash32 = struct {

View file

@ -163,7 +163,7 @@ pub fn Crc32WithPoly(comptime poly: Polynomial) type {
const p = input[i..][0..8];
// Unrolling this way gives ~50Mb/s increase
self.crc ^= std.mem.readInt(u32, p[0..4], .Little);
self.crc ^= std.mem.readInt(u32, p[0..4], .little);
self.crc =
lookup_tables[0][p[7]] ^

View file

@ -18,7 +18,7 @@ pub const Murmur2_32 = struct {
var h1: u32 = seed ^ len;
for (@as([*]align(1) const u32, @ptrCast(str.ptr))[0..(len >> 2)]) |v| {
var k1: u32 = v;
if (native_endian == .Big)
if (native_endian == .big)
k1 = @byteSwap(k1);
k1 *%= m;
k1 ^= k1 >> 24;
@ -102,7 +102,7 @@ pub const Murmur2_64 = struct {
var h1: u64 = seed ^ (@as(u64, str.len) *% m);
for (@as([*]align(1) const u64, @ptrCast(str.ptr))[0 .. str.len / 8]) |v| {
var k1: u64 = v;
if (native_endian == .Big)
if (native_endian == .big)
k1 = @byteSwap(k1);
k1 *%= m;
k1 ^= k1 >> 47;
@ -115,7 +115,7 @@ pub const Murmur2_64 = struct {
if (rest > 0) {
var k1: u64 = 0;
@memcpy(@as([*]u8, @ptrCast(&k1))[0..rest], str[offset..]);
if (native_endian == .Big)
if (native_endian == .big)
k1 = @byteSwap(k1);
h1 ^= k1;
h1 *%= m;
@ -182,7 +182,7 @@ pub const Murmur3_32 = struct {
var h1: u32 = seed;
for (@as([*]align(1) const u32, @ptrCast(str.ptr))[0..(len >> 2)]) |v| {
var k1: u32 = v;
if (native_endian == .Big)
if (native_endian == .big)
k1 = @byteSwap(k1);
k1 *%= c1;
k1 = rotl32(k1, 15);
@ -286,7 +286,7 @@ test "murmur2_32" {
var v1: u64 = 0x1234567812345678;
var v0le: u32 = v0;
var v1le: u64 = v1;
if (native_endian == .Big) {
if (native_endian == .big) {
v0le = @byteSwap(v0le);
v1le = @byteSwap(v1le);
}
@ -310,7 +310,7 @@ test "murmur2_64" {
var v1: u64 = 0x1234567812345678;
var v0le: u32 = v0;
var v1le: u64 = v1;
if (native_endian == .Big) {
if (native_endian == .big) {
v0le = @byteSwap(v0le);
v1le = @byteSwap(v1le);
}
@ -334,7 +334,7 @@ test "murmur3_32" {
var v1: u64 = 0x1234567812345678;
var v0le: u32 = v0;
var v1le: u64 = v1;
if (native_endian == .Big) {
if (native_endian == .big) {
v0le = @byteSwap(v0le);
v1le = @byteSwap(v1le);
}

View file

@ -37,7 +37,7 @@ pub fn smhasher(comptime hash_fn: anytype) u32 {
for (0..256) |i| {
buf[i] = @intCast(i);
const h = hashMaybeSeed(hash_fn, 256 - i, buf[0..i]);
std.mem.writeInt(HashResult, buf_all[i * hash_size ..][0..hash_size], h, .Little);
std.mem.writeInt(HashResult, buf_all[i * hash_size ..][0..hash_size], h, .little);
}
return @truncate(hashMaybeSeed(hash_fn, 0, buf_all[0..]));

View file

@ -131,7 +131,7 @@ pub const Wyhash = struct {
inline fn read(comptime bytes: usize, data: []const u8) u64 {
std.debug.assert(bytes <= 8);
const T = std.meta.Int(.unsigned, 8 * bytes);
return @as(u64, std.mem.readInt(T, data[0..bytes], .Little));
return @as(u64, std.mem.readInt(T, data[0..bytes], .little));
}
inline fn mum(a: *u64, b: *u64) void {

View file

@ -53,10 +53,10 @@ pub const XxHash64 = struct {
}
fn processStripe(self: *Accumulator, buf: *const [32]u8) void {
self.acc1 = round(self.acc1, mem.readInt(u64, buf[0..8], .Little));
self.acc2 = round(self.acc2, mem.readInt(u64, buf[8..16], .Little));
self.acc3 = round(self.acc3, mem.readInt(u64, buf[16..24], .Little));
self.acc4 = round(self.acc4, mem.readInt(u64, buf[24..32], .Little));
self.acc1 = round(self.acc1, mem.readInt(u64, buf[0..8], .little));
self.acc2 = round(self.acc2, mem.readInt(u64, buf[8..16], .little));
self.acc3 = round(self.acc3, mem.readInt(u64, buf[16..24], .little));
self.acc4 = round(self.acc4, mem.readInt(u64, buf[24..32], .little));
}
fn merge(self: Accumulator) u64 {
@ -139,7 +139,7 @@ pub const XxHash64 = struct {
fn finalize8(v: u64, bytes: *const [8]u8) u64 {
var acc = v;
const lane = mem.readInt(u64, bytes, .Little);
const lane = mem.readInt(u64, bytes, .little);
acc ^= round(0, lane);
acc = rotl(u64, acc, 27) *% prime_1;
acc +%= prime_4;
@ -148,7 +148,7 @@ pub const XxHash64 = struct {
fn finalize4(v: u64, bytes: *const [4]u8) u64 {
var acc = v;
const lane = @as(u64, mem.readInt(u32, bytes, .Little));
const lane = @as(u64, mem.readInt(u32, bytes, .little));
acc ^= lane *% prime_1;
acc = rotl(u64, acc, 23) *% prime_2;
acc +%= prime_3;
@ -291,10 +291,10 @@ pub const XxHash32 = struct {
}
fn processStripe(self: *Accumulator, buf: *const [16]u8) void {
self.acc1 = round(self.acc1, mem.readInt(u32, buf[0..4], .Little));
self.acc2 = round(self.acc2, mem.readInt(u32, buf[4..8], .Little));
self.acc3 = round(self.acc3, mem.readInt(u32, buf[8..12], .Little));
self.acc4 = round(self.acc4, mem.readInt(u32, buf[12..16], .Little));
self.acc1 = round(self.acc1, mem.readInt(u32, buf[0..4], .little));
self.acc2 = round(self.acc2, mem.readInt(u32, buf[4..8], .little));
self.acc3 = round(self.acc3, mem.readInt(u32, buf[8..12], .little));
self.acc4 = round(self.acc4, mem.readInt(u32, buf[12..16], .little));
}
fn merge(self: Accumulator) u32 {
@ -390,7 +390,7 @@ pub const XxHash32 = struct {
fn finalize4(v: u32, bytes: *const [4]u8) u32 {
var acc = v;
const lane = mem.readInt(u32, bytes, .Little);
const lane = mem.readInt(u32, bytes, .little);
acc +%= lane *% prime_3;
acc = rotl(u32, acc, 17) *% prime_4;
return acc;
@ -472,7 +472,7 @@ pub const XxHash3 = struct {
}
inline fn swap(x: anytype) @TypeOf(x) {
return if (builtin.cpu.arch.endian() == .Big) @byteSwap(x) else x;
return if (builtin.cpu.arch.endian() == .big) @byteSwap(x) else x;
}
inline fn disableAutoVectorization(x: anytype) void {

View file

@ -28,7 +28,7 @@ const PageStatus = enum(u1) {
const FreeBlock = struct {
data: []u128,
const Io = std.packed_int_array.PackedIntIo(u1, .Little);
const Io = std.packed_int_array.PackedIntIo(u1, .little);
fn totalPages(self: FreeBlock) usize {
return self.data.len * 128;

View file

@ -1581,7 +1581,7 @@ pub fn fetch(client: *Client, allocator: Allocator, options: FetchOptions) !Fetc
test {
const native_endian = comptime builtin.cpu.arch.endian();
if (builtin.zig_backend == .stage2_llvm and native_endian == .Big) {
if (builtin.zig_backend == .stage2_llvm and native_endian == .big) {
// https://github.com/ziglang/zig/issues/13782
return error.SkipZigTest;
}

View file

@ -739,7 +739,7 @@ test "HTTP server handles a chunked transfer coding request" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
const native_endian = comptime builtin.cpu.arch.endian();
if (builtin.zig_backend == .stage2_llvm and native_endian == .Big) {
if (builtin.zig_backend == .stage2_llvm and native_endian == .big) {
// https://github.com/ziglang/zig/issues/13782
return error.SkipZigTest;
}

View file

@ -617,8 +617,8 @@ inline fn int32(array: *const [4]u8) u32 {
inline fn intShift(comptime T: type, x: anytype) T {
switch (@import("builtin").cpu.arch.endian()) {
.Little => return @as(T, @truncate(x >> (@bitSizeOf(@TypeOf(x)) - @bitSizeOf(T)))),
.Big => return @as(T, @truncate(x)),
.little => return @as(T, @truncate(x >> (@bitSizeOf(@TypeOf(x)) - @bitSizeOf(T)))),
.big => return @as(T, @truncate(x)),
}
}

View file

@ -334,7 +334,7 @@ pub fn readStruct(self: Self, comptime T: type) anyerror!T {
pub fn readStructBig(self: Self, comptime T: type) anyerror!T {
var res = try self.readStruct(T);
if (native_endian != std.builtin.Endian.Big) {
if (native_endian != std.builtin.Endian.big) {
mem.byteSwapAllFields(T, &res);
}
return res;

View file

@ -63,14 +63,14 @@ pub fn BitReader(comptime endian: std.builtin.Endian, comptime ReaderType: type)
const n = if (self.bit_count >= bits) @as(u3, @intCast(bits)) else self.bit_count;
const shift = u7_bit_count - n;
switch (endian) {
.Big => {
.big => {
out_buffer = @as(Buf, self.bit_buffer >> shift);
if (n >= u7_bit_count)
self.bit_buffer = 0
else
self.bit_buffer <<= n;
},
.Little => {
.little => {
const value = (self.bit_buffer << shift) >> shift;
out_buffer = @as(Buf, value);
if (n >= u7_bit_count)
@ -93,7 +93,7 @@ pub fn BitReader(comptime endian: std.builtin.Endian, comptime ReaderType: type)
};
switch (endian) {
.Big => {
.big => {
if (n >= u8_bit_count) {
out_buffer <<= @as(u3, @intCast(u8_bit_count - 1));
out_buffer <<= 1;
@ -109,7 +109,7 @@ pub fn BitReader(comptime endian: std.builtin.Endian, comptime ReaderType: type)
self.bit_buffer = @as(u7, @truncate(next_byte << @as(u3, @intCast(n - 1))));
self.bit_count = shift;
},
.Little => {
.little => {
if (n >= u8_bit_count) {
out_buffer |= @as(Buf, next_byte) << @as(BufShift, @intCast(out_bits.*));
out_bits.* += u8_bit_count;
@ -168,7 +168,7 @@ test "api coverage" {
const mem_le = [_]u8{ 0b00011101, 0b10010101 };
var mem_in_be = io.fixedBufferStream(&mem_be);
var bit_stream_be = bitReader(.Big, mem_in_be.reader());
var bit_stream_be = bitReader(.big, mem_in_be.reader());
var out_bits: usize = undefined;
@ -205,7 +205,7 @@ test "api coverage" {
try expectError(error.EndOfStream, bit_stream_be.readBitsNoEof(u1, 1));
var mem_in_le = io.fixedBufferStream(&mem_le);
var bit_stream_le = bitReader(.Little, mem_in_le.reader());
var bit_stream_le = bitReader(.little, mem_in_le.reader());
try expect(1 == try bit_stream_le.readBits(u2, 1, &out_bits));
try expect(out_bits == 1);

View file

@ -51,8 +51,8 @@ pub fn BitWriter(comptime endian: std.builtin.Endian, comptime WriterType: type)
const high_byte_shift = @as(BufShift, @intCast(buf_bit_count - u8_bit_count));
var in_buffer = switch (endian) {
.Big => buf_value << @as(BufShift, @intCast(buf_bit_count - bits)),
.Little => buf_value,
.big => buf_value << @as(BufShift, @intCast(buf_bit_count - bits)),
.little => buf_value,
};
var in_bits = bits;
@ -60,13 +60,13 @@ pub fn BitWriter(comptime endian: std.builtin.Endian, comptime WriterType: type)
const bits_remaining = u8_bit_count - self.bit_count;
const n = @as(u3, @intCast(if (bits_remaining > bits) bits else bits_remaining));
switch (endian) {
.Big => {
.big => {
const shift = @as(BufShift, @intCast(high_byte_shift + self.bit_count));
const v = @as(u8, @intCast(in_buffer >> shift));
self.bit_buffer |= v;
in_buffer <<= n;
},
.Little => {
.little => {
const v = @as(u8, @truncate(in_buffer)) << @as(u3, @intCast(self.bit_count));
self.bit_buffer |= v;
in_buffer >>= n;
@ -86,13 +86,13 @@ pub fn BitWriter(comptime endian: std.builtin.Endian, comptime WriterType: type)
//copy bytes until we can't fill one anymore, then leave the rest in bit_buffer
while (in_bits >= u8_bit_count) {
switch (endian) {
.Big => {
.big => {
const v = @as(u8, @intCast(in_buffer >> high_byte_shift));
try self.forward_writer.writeByte(v);
in_buffer <<= @as(u3, @intCast(u8_bit_count - 1));
in_buffer <<= 1;
},
.Little => {
.little => {
const v = @as(u8, @truncate(in_buffer));
try self.forward_writer.writeByte(v);
in_buffer >>= @as(u3, @intCast(u8_bit_count - 1));
@ -105,8 +105,8 @@ pub fn BitWriter(comptime endian: std.builtin.Endian, comptime WriterType: type)
if (in_bits > 0) {
self.bit_count = @as(u4, @intCast(in_bits));
self.bit_buffer = switch (endian) {
.Big => @as(u8, @truncate(in_buffer >> high_byte_shift)),
.Little => @as(u8, @truncate(in_buffer)),
.big => @as(u8, @truncate(in_buffer >> high_byte_shift)),
.little => @as(u8, @truncate(in_buffer)),
};
}
}
@ -148,7 +148,7 @@ test "api coverage" {
var mem_le = [_]u8{0} ** 2;
var mem_out_be = io.fixedBufferStream(&mem_be);
var bit_stream_be = bitWriter(.Big, mem_out_be.writer());
var bit_stream_be = bitWriter(.big, mem_out_be.writer());
try bit_stream_be.writeBits(@as(u2, 1), 1);
try bit_stream_be.writeBits(@as(u5, 2), 2);
@ -172,7 +172,7 @@ test "api coverage" {
try bit_stream_be.writeBits(@as(u0, 0), 0);
var mem_out_le = io.fixedBufferStream(&mem_le);
var bit_stream_le = bitWriter(.Little, mem_out_le.writer());
var bit_stream_le = bitWriter(.little, mem_out_le.writer());
try bit_stream_le.writeBits(@as(u2, 1), 1);
try bit_stream_le.writeBits(@as(u5, 2), 2);

View file

@ -183,7 +183,7 @@ test "GenericReader methods can return error.EndOfStream" {
var fbs = std.io.fixedBufferStream("");
try std.testing.expectError(
error.EndOfStream,
fbs.reader().readEnum(enum(u8) { a, b }, .Little),
fbs.reader().readEnum(enum(u8) { a, b }, .little),
);
try std.testing.expectError(
error.EndOfStream,

View file

@ -798,7 +798,7 @@ pub const Mutable = struct {
const endian_mask: usize = (@sizeOf(Limb) - 1) << 3;
var bytes = std.mem.sliceAsBytes(r.limbs);
var bits = std.packed_int_array.PackedIntSliceEndian(u1, .Little).init(bytes, limbs_required * @bitSizeOf(Limb));
var bits = std.packed_int_array.PackedIntSliceEndian(u1, .little).init(bytes, limbs_required * @bitSizeOf(Limb));
var k: usize = 0;
while (k < ((bit_count + 1) / 2)) : (k += 1) {
@ -807,7 +807,7 @@ pub const Mutable = struct {
// This "endian mask" remaps a low (LE) byte to the corresponding high
// (BE) byte in the Limb, without changing which limbs we are indexing
if (native_endian == .Big) {
if (native_endian == .big) {
i ^= endian_mask;
rev_i ^= endian_mask;
}
@ -821,8 +821,8 @@ pub const Mutable = struct {
// Calculate signed-magnitude representation for output
if (signedness == .signed) {
const last_bit = switch (native_endian) {
.Little => bits.get(bit_count - 1),
.Big => bits.get((bit_count - 1) ^ endian_mask),
.little => bits.get(bit_count - 1),
.big => bits.get((bit_count - 1) ^ endian_mask),
};
if (last_bit == 1) {
r.bitNotWrap(r.toConst(), .unsigned, bit_count); // Bitwise NOT.
@ -869,7 +869,7 @@ pub const Mutable = struct {
// This "endian mask" remaps a low (LE) byte to the corresponding high
// (BE) byte in the Limb, without changing which limbs we are indexing
if (native_endian == .Big) {
if (native_endian == .big) {
i ^= endian_mask;
rev_i ^= endian_mask;
}
@ -883,8 +883,8 @@ pub const Mutable = struct {
// Calculate signed-magnitude representation for output
if (signedness == .signed) {
const last_byte = switch (native_endian) {
.Little => bytes[byte_count - 1],
.Big => bytes[(byte_count - 1) ^ endian_mask],
.little => bytes[byte_count - 1],
.big => bytes[(byte_count - 1) ^ endian_mask],
};
if (last_byte & (1 << 7) != 0) { // Check sign bit of last byte
@ -1912,8 +1912,8 @@ pub const Mutable = struct {
if (signedness == .signed) {
const total_bits = bit_offset + bit_count;
var last_byte = switch (endian) {
.Little => ((total_bits + 7) / 8) - 1,
.Big => buffer.len - ((total_bits + 7) / 8),
.little => ((total_bits + 7) / 8) - 1,
.big => buffer.len - ((total_bits + 7) / 8),
};
const sign_bit = @as(u8, 1) << @as(u3, @intCast((total_bits - 1) % 8));

View file

@ -2774,7 +2774,7 @@ test "big int conversion read/write twos complement" {
var buffer1 = try testing.allocator.alloc(u8, 64);
defer testing.allocator.free(buffer1);
const endians = [_]std.builtin.Endian{ .Little, .Big };
const endians = [_]std.builtin.Endian{ .little, .big };
const abi_size = 64;
for (endians) |endian| {
@ -2804,26 +2804,26 @@ test "big int conversion read twos complement with padding" {
// (3) should sign-extend any bits from bit_count to 8 * abi_size
var bit_count: usize = 12 * 8 + 1;
a.toConst().writeTwosComplement(buffer1[0..13], .Little);
a.toConst().writeTwosComplement(buffer1[0..13], .little);
try testing.expect(std.mem.eql(u8, buffer1, &[_]u8{ 0xd, 0xc, 0xb, 0xa, 0x9, 0x8, 0x7, 0x6, 0x5, 0x4, 0x3, 0x2, 0x1, 0xaa, 0xaa, 0xaa }));
a.toConst().writeTwosComplement(buffer1[0..13], .Big);
a.toConst().writeTwosComplement(buffer1[0..13], .big);
try testing.expect(std.mem.eql(u8, buffer1, &[_]u8{ 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xaa, 0xaa, 0xaa }));
a.toConst().writeTwosComplement(buffer1[0..16], .Little);
a.toConst().writeTwosComplement(buffer1[0..16], .little);
try testing.expect(std.mem.eql(u8, buffer1, &[_]u8{ 0xd, 0xc, 0xb, 0xa, 0x9, 0x8, 0x7, 0x6, 0x5, 0x4, 0x3, 0x2, 0x1, 0x0, 0x0, 0x0 }));
a.toConst().writeTwosComplement(buffer1[0..16], .Big);
a.toConst().writeTwosComplement(buffer1[0..16], .big);
try testing.expect(std.mem.eql(u8, buffer1, &[_]u8{ 0x0, 0x0, 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd }));
@memset(buffer1, 0xaa);
try a.set(-0x01_02030405_06070809_0a0b0c0d);
bit_count = 12 * 8 + 2;
a.toConst().writeTwosComplement(buffer1[0..13], .Little);
a.toConst().writeTwosComplement(buffer1[0..13], .little);
try testing.expect(std.mem.eql(u8, buffer1, &[_]u8{ 0xf3, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xaa, 0xaa, 0xaa }));
a.toConst().writeTwosComplement(buffer1[0..13], .Big);
a.toConst().writeTwosComplement(buffer1[0..13], .big);
try testing.expect(std.mem.eql(u8, buffer1, &[_]u8{ 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf3, 0xaa, 0xaa, 0xaa }));
a.toConst().writeTwosComplement(buffer1[0..16], .Little);
a.toConst().writeTwosComplement(buffer1[0..16], .little);
try testing.expect(std.mem.eql(u8, buffer1, &[_]u8{ 0xf3, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, 0xff, 0xff }));
a.toConst().writeTwosComplement(buffer1[0..16], .Big);
a.toConst().writeTwosComplement(buffer1[0..16], .big);
try testing.expect(std.mem.eql(u8, buffer1, &[_]u8{ 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf3 }));
}
@ -2838,13 +2838,13 @@ test "big int write twos complement +/- zero" {
// Test zero
m.toConst().writeTwosComplement(buffer1[0..13], .Little);
m.toConst().writeTwosComplement(buffer1[0..13], .little);
try testing.expect(std.mem.eql(u8, buffer1, &(([_]u8{0} ** 13) ++ ([_]u8{0xaa} ** 3))));
m.toConst().writeTwosComplement(buffer1[0..13], .Big);
m.toConst().writeTwosComplement(buffer1[0..13], .big);
try testing.expect(std.mem.eql(u8, buffer1, &(([_]u8{0} ** 13) ++ ([_]u8{0xaa} ** 3))));
m.toConst().writeTwosComplement(buffer1[0..16], .Little);
m.toConst().writeTwosComplement(buffer1[0..16], .little);
try testing.expect(std.mem.eql(u8, buffer1, &(([_]u8{0} ** 16))));
m.toConst().writeTwosComplement(buffer1[0..16], .Big);
m.toConst().writeTwosComplement(buffer1[0..16], .big);
try testing.expect(std.mem.eql(u8, buffer1, &(([_]u8{0} ** 16))));
@memset(buffer1, 0xaa);
@ -2852,13 +2852,13 @@ test "big int write twos complement +/- zero" {
// Test negative zero
m.toConst().writeTwosComplement(buffer1[0..13], .Little);
m.toConst().writeTwosComplement(buffer1[0..13], .little);
try testing.expect(std.mem.eql(u8, buffer1, &(([_]u8{0} ** 13) ++ ([_]u8{0xaa} ** 3))));
m.toConst().writeTwosComplement(buffer1[0..13], .Big);
m.toConst().writeTwosComplement(buffer1[0..13], .big);
try testing.expect(std.mem.eql(u8, buffer1, &(([_]u8{0} ** 13) ++ ([_]u8{0xaa} ** 3))));
m.toConst().writeTwosComplement(buffer1[0..16], .Little);
m.toConst().writeTwosComplement(buffer1[0..16], .little);
try testing.expect(std.mem.eql(u8, buffer1, &(([_]u8{0} ** 16))));
m.toConst().writeTwosComplement(buffer1[0..16], .Big);
m.toConst().writeTwosComplement(buffer1[0..16], .big);
try testing.expect(std.mem.eql(u8, buffer1, &(([_]u8{0} ** 16))));
}
@ -2881,19 +2881,19 @@ test "big int conversion write twos complement with padding" {
// Test 0x01_02030405_06070809_0a0b0c0d
buffer = &[_]u8{ 0xd, 0xc, 0xb, 0xa, 0x9, 0x8, 0x7, 0x6, 0x5, 0x4, 0x3, 0x2, 0xb };
m.readTwosComplement(buffer[0..13], bit_count, .Little, .unsigned);
m.readTwosComplement(buffer[0..13], bit_count, .little, .unsigned);
try testing.expect(m.toConst().orderAgainstScalar(0x01_02030405_06070809_0a0b0c0d) == .eq);
buffer = &[_]u8{ 0xb, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd };
m.readTwosComplement(buffer[0..13], bit_count, .Big, .unsigned);
m.readTwosComplement(buffer[0..13], bit_count, .big, .unsigned);
try testing.expect(m.toConst().orderAgainstScalar(0x01_02030405_06070809_0a0b0c0d) == .eq);
buffer = &[_]u8{ 0xd, 0xc, 0xb, 0xa, 0x9, 0x8, 0x7, 0x6, 0x5, 0x4, 0x3, 0x2, 0xab, 0xaa, 0xaa, 0xaa };
m.readTwosComplement(buffer[0..16], bit_count, .Little, .unsigned);
m.readTwosComplement(buffer[0..16], bit_count, .little, .unsigned);
try testing.expect(m.toConst().orderAgainstScalar(0x01_02030405_06070809_0a0b0c0d) == .eq);
buffer = &[_]u8{ 0xaa, 0xaa, 0xaa, 0xab, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd };
m.readTwosComplement(buffer[0..16], bit_count, .Big, .unsigned);
m.readTwosComplement(buffer[0..16], bit_count, .big, .unsigned);
try testing.expect(m.toConst().orderAgainstScalar(0x01_02030405_06070809_0a0b0c0d) == .eq);
bit_count = @sizeOf(Limb) * 8;
@ -2901,19 +2901,19 @@ test "big int conversion write twos complement with padding" {
// Test 0x0a0a0a0a_02030405_06070809_0a0b0c0d
buffer = &[_]u8{ 0xd, 0xc, 0xb, 0xa, 0x9, 0x8, 0x7, 0x6, 0x5, 0x4, 0x3, 0x2, 0xaa };
m.readTwosComplement(buffer[0..13], bit_count, .Little, .unsigned);
m.readTwosComplement(buffer[0..13], bit_count, .little, .unsigned);
try testing.expect(m.toConst().orderAgainstScalar(@as(Limb, @truncate(0xaa_02030405_06070809_0a0b0c0d))) == .eq);
buffer = &[_]u8{ 0xaa, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd };
m.readTwosComplement(buffer[0..13], bit_count, .Big, .unsigned);
m.readTwosComplement(buffer[0..13], bit_count, .big, .unsigned);
try testing.expect(m.toConst().orderAgainstScalar(@as(Limb, @truncate(0xaa_02030405_06070809_0a0b0c0d))) == .eq);
buffer = &[_]u8{ 0xd, 0xc, 0xb, 0xa, 0x9, 0x8, 0x7, 0x6, 0x5, 0x4, 0x3, 0x2, 0xaa, 0xaa, 0xaa, 0xaa };
m.readTwosComplement(buffer[0..16], bit_count, .Little, .unsigned);
m.readTwosComplement(buffer[0..16], bit_count, .little, .unsigned);
try testing.expect(m.toConst().orderAgainstScalar(@as(Limb, @truncate(0xaaaaaaaa_02030405_06070809_0a0b0c0d))) == .eq);
buffer = &[_]u8{ 0xaa, 0xaa, 0xaa, 0xaa, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd };
m.readTwosComplement(buffer[0..16], bit_count, .Big, .unsigned);
m.readTwosComplement(buffer[0..16], bit_count, .big, .unsigned);
try testing.expect(m.toConst().orderAgainstScalar(@as(Limb, @truncate(0xaaaaaaaa_02030405_06070809_0a0b0c0d))) == .eq);
bit_count = 12 * 8 + 2;
@ -2921,42 +2921,42 @@ test "big int conversion write twos complement with padding" {
// Test -0x01_02030405_06070809_0a0b0c0d
buffer = &[_]u8{ 0xf3, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0x02 };
m.readTwosComplement(buffer[0..13], bit_count, .Little, .signed);
m.readTwosComplement(buffer[0..13], bit_count, .little, .signed);
try testing.expect(m.toConst().orderAgainstScalar(-0x01_02030405_06070809_0a0b0c0d) == .eq);
buffer = &[_]u8{ 0x02, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf3 };
m.readTwosComplement(buffer[0..13], bit_count, .Big, .signed);
m.readTwosComplement(buffer[0..13], bit_count, .big, .signed);
try testing.expect(m.toConst().orderAgainstScalar(-0x01_02030405_06070809_0a0b0c0d) == .eq);
buffer = &[_]u8{ 0xf3, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0x02, 0xaa, 0xaa, 0xaa };
m.readTwosComplement(buffer[0..16], bit_count, .Little, .signed);
m.readTwosComplement(buffer[0..16], bit_count, .little, .signed);
try testing.expect(m.toConst().orderAgainstScalar(-0x01_02030405_06070809_0a0b0c0d) == .eq);
buffer = &[_]u8{ 0xaa, 0xaa, 0xaa, 0x02, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf3 };
m.readTwosComplement(buffer[0..16], bit_count, .Big, .signed);
m.readTwosComplement(buffer[0..16], bit_count, .big, .signed);
try testing.expect(m.toConst().orderAgainstScalar(-0x01_02030405_06070809_0a0b0c0d) == .eq);
// Test 0
buffer = &([_]u8{0} ** 16);
m.readTwosComplement(buffer[0..13], bit_count, .Little, .unsigned);
m.readTwosComplement(buffer[0..13], bit_count, .little, .unsigned);
try testing.expect(m.toConst().orderAgainstScalar(0x0) == .eq);
m.readTwosComplement(buffer[0..13], bit_count, .Big, .unsigned);
m.readTwosComplement(buffer[0..13], bit_count, .big, .unsigned);
try testing.expect(m.toConst().orderAgainstScalar(0x0) == .eq);
m.readTwosComplement(buffer[0..16], bit_count, .Little, .unsigned);
m.readTwosComplement(buffer[0..16], bit_count, .little, .unsigned);
try testing.expect(m.toConst().orderAgainstScalar(0x0) == .eq);
m.readTwosComplement(buffer[0..16], bit_count, .Big, .unsigned);
m.readTwosComplement(buffer[0..16], bit_count, .big, .unsigned);
try testing.expect(m.toConst().orderAgainstScalar(0x0) == .eq);
bit_count = 0;
buffer = &([_]u8{0xaa} ** 16);
m.readTwosComplement(buffer[0..13], bit_count, .Little, .unsigned);
m.readTwosComplement(buffer[0..13], bit_count, .little, .unsigned);
try testing.expect(m.toConst().orderAgainstScalar(0x0) == .eq);
m.readTwosComplement(buffer[0..13], bit_count, .Big, .unsigned);
m.readTwosComplement(buffer[0..13], bit_count, .big, .unsigned);
try testing.expect(m.toConst().orderAgainstScalar(0x0) == .eq);
m.readTwosComplement(buffer[0..16], bit_count, .Little, .unsigned);
m.readTwosComplement(buffer[0..16], bit_count, .little, .unsigned);
try testing.expect(m.toConst().orderAgainstScalar(0x0) == .eq);
m.readTwosComplement(buffer[0..16], bit_count, .Big, .unsigned);
m.readTwosComplement(buffer[0..16], bit_count, .big, .unsigned);
try testing.expect(m.toConst().orderAgainstScalar(0x0) == .eq);
}
@ -2975,15 +2975,15 @@ test "big int conversion write twos complement zero" {
var buffer: []const u8 = undefined;
buffer = &([_]u8{0} ** 13);
m.readTwosComplement(buffer[0..13], bit_count, .Little, .unsigned);
m.readTwosComplement(buffer[0..13], bit_count, .little, .unsigned);
try testing.expect(m.toConst().orderAgainstScalar(0x0) == .eq);
m.readTwosComplement(buffer[0..13], bit_count, .Big, .unsigned);
m.readTwosComplement(buffer[0..13], bit_count, .big, .unsigned);
try testing.expect(m.toConst().orderAgainstScalar(0x0) == .eq);
buffer = &([_]u8{0} ** 16);
m.readTwosComplement(buffer[0..16], bit_count, .Little, .unsigned);
m.readTwosComplement(buffer[0..16], bit_count, .little, .unsigned);
try testing.expect(m.toConst().orderAgainstScalar(0x0) == .eq);
m.readTwosComplement(buffer[0..16], bit_count, .Big, .unsigned);
m.readTwosComplement(buffer[0..16], bit_count, .big, .unsigned);
try testing.expect(m.toConst().orderAgainstScalar(0x0) == .eq);
}

View file

@ -1499,12 +1499,12 @@ test "containsAtLeast" {
pub fn readVarInt(comptime ReturnType: type, bytes: []const u8, endian: Endian) ReturnType {
var result: ReturnType = 0;
switch (endian) {
.Big => {
.big => {
for (bytes) |b| {
result = (result << 8) | b;
}
},
.Little => {
.little => {
const ShiftType = math.Log2Int(ReturnType);
for (bytes, 0..) |b, index| {
result = result | (@as(ReturnType, b) << @as(ShiftType, @intCast(index * 8)));
@ -1539,8 +1539,8 @@ pub fn readVarPackedInt(
const pad = @as(Log2N, @intCast(@bitSizeOf(T) - bit_count));
const lowest_byte = switch (endian) {
.Big => bytes.len - (bit_offset / 8) - read_size,
.Little => bit_offset / 8,
.big => bytes.len - (bit_offset / 8) - read_size,
.little => bit_offset / 8,
};
const read_bytes = bytes[lowest_byte..][0..read_size];
@ -1550,7 +1550,7 @@ pub fn readVarPackedInt(
const value = if (read_size == 1) b: {
break :b @as(uN, @truncate(read_bytes[0] >> bit_shift));
} else b: {
const i: u1 = @intFromBool(endian == .Big);
const i: u1 = @intFromBool(endian == .big);
const head = @as(uN, @truncate(read_bytes[i] >> bit_shift));
const tail_shift = @as(Log2N, @intCast(@as(u4, 8) - bit_shift));
const tail = @as(uN, @truncate(read_bytes[1 - i]));
@ -1565,13 +1565,13 @@ pub fn readVarPackedInt(
// Copy the value out (respecting endianness), accounting for bit_shift
var int: uN = 0;
switch (endian) {
.Big => {
.big => {
for (read_bytes[0 .. read_size - 1]) |elem| {
int = elem | (int << 8);
}
int = (read_bytes[read_size - 1] >> bit_shift) | (int << (@as(u4, 8) - bit_shift));
},
.Little => {
.little => {
int = read_bytes[0] >> bit_shift;
for (read_bytes[1..], 0..) |elem, i| {
int |= (@as(uN, elem) << @as(Log2N, @intCast((8 * (i + 1) - bit_shift))));
@ -1593,23 +1593,23 @@ pub inline fn readInt(comptime T: type, buffer: *const [@divExact(@typeInfo(T).I
}
test readInt {
try testing.expect(readInt(u0, &[_]u8{}, .Big) == 0x0);
try testing.expect(readInt(u0, &[_]u8{}, .Little) == 0x0);
try testing.expect(readInt(u0, &[_]u8{}, .big) == 0x0);
try testing.expect(readInt(u0, &[_]u8{}, .little) == 0x0);
try testing.expect(readInt(u8, &[_]u8{0x32}, .Big) == 0x32);
try testing.expect(readInt(u8, &[_]u8{0x12}, .Little) == 0x12);
try testing.expect(readInt(u8, &[_]u8{0x32}, .big) == 0x32);
try testing.expect(readInt(u8, &[_]u8{0x12}, .little) == 0x12);
try testing.expect(readInt(u16, &[_]u8{ 0x12, 0x34 }, .Big) == 0x1234);
try testing.expect(readInt(u16, &[_]u8{ 0x12, 0x34 }, .Little) == 0x3412);
try testing.expect(readInt(u16, &[_]u8{ 0x12, 0x34 }, .big) == 0x1234);
try testing.expect(readInt(u16, &[_]u8{ 0x12, 0x34 }, .little) == 0x3412);
try testing.expect(readInt(u72, &[_]u8{ 0x12, 0x34, 0x56, 0x78, 0x9a, 0xbc, 0xde, 0xf0, 0x24 }, .Big) == 0x123456789abcdef024);
try testing.expect(readInt(u72, &[_]u8{ 0xec, 0x10, 0x32, 0x54, 0x76, 0x98, 0xba, 0xdc, 0xfe }, .Little) == 0xfedcba9876543210ec);
try testing.expect(readInt(u72, &[_]u8{ 0x12, 0x34, 0x56, 0x78, 0x9a, 0xbc, 0xde, 0xf0, 0x24 }, .big) == 0x123456789abcdef024);
try testing.expect(readInt(u72, &[_]u8{ 0xec, 0x10, 0x32, 0x54, 0x76, 0x98, 0xba, 0xdc, 0xfe }, .little) == 0xfedcba9876543210ec);
try testing.expect(readInt(i8, &[_]u8{0xff}, .Big) == -1);
try testing.expect(readInt(i8, &[_]u8{0xfe}, .Little) == -2);
try testing.expect(readInt(i8, &[_]u8{0xff}, .big) == -1);
try testing.expect(readInt(i8, &[_]u8{0xfe}, .little) == -2);
try testing.expect(readInt(i16, &[_]u8{ 0xff, 0xfd }, .Big) == -3);
try testing.expect(readInt(i16, &[_]u8{ 0xfc, 0xff }, .Little) == -4);
try testing.expect(readInt(i16, &[_]u8{ 0xff, 0xfd }, .big) == -3);
try testing.expect(readInt(i16, &[_]u8{ 0xfc, 0xff }, .little) == -4);
}
fn readPackedIntLittle(comptime T: type, bytes: []const u8, bit_offset: usize) T {
@ -1629,7 +1629,7 @@ fn readPackedIntLittle(comptime T: type, bytes: []const u8, bit_offset: usize) T
// Read by loading a LoadInt, and then follow it up with a 1-byte read
// of the tail if bit_offset pushed us over a byte boundary.
const read_bytes = bytes[bit_offset / 8 ..];
const val = @as(uN, @truncate(readInt(LoadInt, read_bytes[0..load_size], .Little) >> bit_shift));
const val = @as(uN, @truncate(readInt(LoadInt, read_bytes[0..load_size], .little) >> bit_shift));
if (bit_shift > load_tail_bits) {
const tail_bits = @as(Log2N, @intCast(bit_shift - load_tail_bits));
const tail_byte = read_bytes[load_size];
@ -1657,7 +1657,7 @@ fn readPackedIntBig(comptime T: type, bytes: []const u8, bit_offset: usize) T {
// of the tail if bit_offset pushed us over a byte boundary.
const end = bytes.len - (bit_offset / 8);
const read_bytes = bytes[(end - byte_count)..end];
const val = @as(uN, @truncate(readInt(LoadInt, bytes[(end - load_size)..end][0..load_size], .Big) >> bit_shift));
const val = @as(uN, @truncate(readInt(LoadInt, bytes[(end - load_size)..end][0..load_size], .big) >> bit_shift));
if (bit_shift > load_tail_bits) {
const tail_bits = @as(Log2N, @intCast(bit_shift - load_tail_bits));
const tail_byte = if (bit_count < 8) @as(uN, @truncate(read_bytes[0])) else @as(uN, read_bytes[0]);
@ -1666,13 +1666,13 @@ fn readPackedIntBig(comptime T: type, bytes: []const u8, bit_offset: usize) T {
}
pub const readPackedIntNative = switch (native_endian) {
.Little => readPackedIntLittle,
.Big => readPackedIntBig,
.little => readPackedIntLittle,
.big => readPackedIntBig,
};
pub const readPackedIntForeign = switch (native_endian) {
.Little => readPackedIntBig,
.Big => readPackedIntLittle,
.little => readPackedIntBig,
.big => readPackedIntLittle,
};
/// Loads an integer from packed memory.
@ -1685,22 +1685,22 @@ pub const readPackedIntForeign = switch (native_endian) {
///
pub fn readPackedInt(comptime T: type, bytes: []const u8, bit_offset: usize, endian: Endian) T {
switch (endian) {
.Little => return readPackedIntLittle(T, bytes, bit_offset),
.Big => return readPackedIntBig(T, bytes, bit_offset),
.little => return readPackedIntLittle(T, bytes, bit_offset),
.big => return readPackedIntBig(T, bytes, bit_offset),
}
}
test "comptime read/write int" {
comptime {
var bytes: [2]u8 = undefined;
writeInt(u16, &bytes, 0x1234, .Little);
const result = readInt(u16, &bytes, .Big);
writeInt(u16, &bytes, 0x1234, .little);
const result = readInt(u16, &bytes, .big);
try testing.expect(result == 0x3412);
}
comptime {
var bytes: [2]u8 = undefined;
writeInt(u16, &bytes, 0x1234, .Big);
const result = readInt(u16, &bytes, .Little);
writeInt(u16, &bytes, 0x1234, .big);
const result = readInt(u16, &bytes, .little);
try testing.expect(result == 0x3412);
}
}
@ -1718,34 +1718,34 @@ test writeInt {
var buf2: [2]u8 = undefined;
var buf9: [9]u8 = undefined;
writeInt(u0, &buf0, 0x0, .Big);
writeInt(u0, &buf0, 0x0, .big);
try testing.expect(eql(u8, buf0[0..], &[_]u8{}));
writeInt(u0, &buf0, 0x0, .Little);
writeInt(u0, &buf0, 0x0, .little);
try testing.expect(eql(u8, buf0[0..], &[_]u8{}));
writeInt(u8, &buf1, 0x12, .Big);
writeInt(u8, &buf1, 0x12, .big);
try testing.expect(eql(u8, buf1[0..], &[_]u8{0x12}));
writeInt(u8, &buf1, 0x34, .Little);
writeInt(u8, &buf1, 0x34, .little);
try testing.expect(eql(u8, buf1[0..], &[_]u8{0x34}));
writeInt(u16, &buf2, 0x1234, .Big);
writeInt(u16, &buf2, 0x1234, .big);
try testing.expect(eql(u8, buf2[0..], &[_]u8{ 0x12, 0x34 }));
writeInt(u16, &buf2, 0x5678, .Little);
writeInt(u16, &buf2, 0x5678, .little);
try testing.expect(eql(u8, buf2[0..], &[_]u8{ 0x78, 0x56 }));
writeInt(u72, &buf9, 0x123456789abcdef024, .Big);
writeInt(u72, &buf9, 0x123456789abcdef024, .big);
try testing.expect(eql(u8, buf9[0..], &[_]u8{ 0x12, 0x34, 0x56, 0x78, 0x9a, 0xbc, 0xde, 0xf0, 0x24 }));
writeInt(u72, &buf9, 0xfedcba9876543210ec, .Little);
writeInt(u72, &buf9, 0xfedcba9876543210ec, .little);
try testing.expect(eql(u8, buf9[0..], &[_]u8{ 0xec, 0x10, 0x32, 0x54, 0x76, 0x98, 0xba, 0xdc, 0xfe }));
writeInt(i8, &buf1, -1, .Big);
writeInt(i8, &buf1, -1, .big);
try testing.expect(eql(u8, buf1[0..], &[_]u8{0xff}));
writeInt(i8, &buf1, -2, .Little);
writeInt(i8, &buf1, -2, .little);
try testing.expect(eql(u8, buf1[0..], &[_]u8{0xfe}));
writeInt(i16, &buf2, -3, .Big);
writeInt(i16, &buf2, -3, .big);
try testing.expect(eql(u8, buf2[0..], &[_]u8{ 0xff, 0xfd }));
writeInt(i16, &buf2, -4, .Little);
writeInt(i16, &buf2, -4, .little);
try testing.expect(eql(u8, buf2[0..], &[_]u8{ 0xfc, 0xff }));
}
@ -1779,7 +1779,7 @@ fn writePackedIntLittle(comptime T: type, bytes: []u8, bit_offset: usize, value:
write_value |= @as(StoreInt, tail) << (8 * (store_size - 1));
}
writeInt(StoreInt, write_bytes[0..store_size], write_value, .Little);
writeInt(StoreInt, write_bytes[0..store_size], write_value, .little);
}
fn writePackedIntBig(comptime T: type, bytes: []u8, bit_offset: usize, value: T) void {
@ -1814,17 +1814,17 @@ fn writePackedIntBig(comptime T: type, bytes: []u8, bit_offset: usize, value: T)
write_value |= @as(StoreInt, tail) << (8 * (store_size - 1));
}
writeInt(StoreInt, write_bytes[(byte_count - store_size)..][0..store_size], write_value, .Big);
writeInt(StoreInt, write_bytes[(byte_count - store_size)..][0..store_size], write_value, .big);
}
pub const writePackedIntNative = switch (native_endian) {
.Little => writePackedIntLittle,
.Big => writePackedIntBig,
.little => writePackedIntLittle,
.big => writePackedIntBig,
};
pub const writePackedIntForeign = switch (native_endian) {
.Little => writePackedIntBig,
.Big => writePackedIntLittle,
.little => writePackedIntBig,
.big => writePackedIntLittle,
};
/// Stores an integer to packed memory.
@ -1838,8 +1838,8 @@ pub const writePackedIntForeign = switch (native_endian) {
///
pub fn writePackedInt(comptime T: type, bytes: []u8, bit_offset: usize, value: T, endian: Endian) void {
switch (endian) {
.Little => writePackedIntLittle(T, bytes, bit_offset, value),
.Big => writePackedIntBig(T, bytes, bit_offset, value),
.little => writePackedIntLittle(T, bytes, bit_offset, value),
.big => writePackedIntBig(T, bytes, bit_offset, value),
}
}
@ -1860,8 +1860,8 @@ pub fn writeVarPackedInt(bytes: []u8, bit_offset: usize, bit_count: usize, value
const bit_shift = @as(u3, @intCast(bit_offset % 8));
const write_size = (bit_count + bit_shift + 7) / 8;
const lowest_byte = switch (endian) {
.Big => bytes.len - (bit_offset / 8) - write_size,
.Little => bit_offset / 8,
.big => bytes.len - (bit_offset / 8) - write_size,
.little => bit_offset / 8,
};
const write_bytes = bytes[lowest_byte..][0..write_size];
@ -1877,8 +1877,8 @@ pub fn writeVarPackedInt(bytes: []u8, bit_offset: usize, bit_count: usize, value
var remaining: T = value;
// Iterate bytes forward for Little-endian, backward for Big-endian
const delta: i2 = if (endian == .Big) -1 else 1;
const start = if (endian == .Big) @as(isize, @intCast(write_bytes.len - 1)) else 0;
const delta: i2 = if (endian == .big) -1 else 1;
const start = if (endian == .big) @as(isize, @intCast(write_bytes.len - 1)) else 0;
var i: isize = start; // isize for signed index arithmetic
@ -3122,12 +3122,12 @@ fn testReadIntImpl() !void {
0x56,
0x78,
};
try testing.expect(readInt(u32, &bytes, .Big) == 0x12345678);
try testing.expect(readInt(u32, &bytes, .Big) == 0x12345678);
try testing.expect(readInt(i32, &bytes, .Big) == 0x12345678);
try testing.expect(readInt(u32, &bytes, .Little) == 0x78563412);
try testing.expect(readInt(u32, &bytes, .Little) == 0x78563412);
try testing.expect(readInt(i32, &bytes, .Little) == 0x78563412);
try testing.expect(readInt(u32, &bytes, .big) == 0x12345678);
try testing.expect(readInt(u32, &bytes, .big) == 0x12345678);
try testing.expect(readInt(i32, &bytes, .big) == 0x12345678);
try testing.expect(readInt(u32, &bytes, .little) == 0x78563412);
try testing.expect(readInt(u32, &bytes, .little) == 0x78563412);
try testing.expect(readInt(i32, &bytes, .little) == 0x78563412);
}
{
const buf = [_]u8{
@ -3136,7 +3136,7 @@ fn testReadIntImpl() !void {
0x12,
0x34,
};
const answer = readInt(u32, &buf, .Big);
const answer = readInt(u32, &buf, .big);
try testing.expect(answer == 0x00001234);
}
{
@ -3146,7 +3146,7 @@ fn testReadIntImpl() !void {
0x00,
0x00,
};
const answer = readInt(u32, &buf, .Little);
const answer = readInt(u32, &buf, .little);
try testing.expect(answer == 0x00003412);
}
{
@ -3154,10 +3154,10 @@ fn testReadIntImpl() !void {
0xff,
0xfe,
};
try testing.expect(readInt(u16, &bytes, .Big) == 0xfffe);
try testing.expect(readInt(i16, &bytes, .Big) == -0x0002);
try testing.expect(readInt(u16, &bytes, .Little) == 0xfeff);
try testing.expect(readInt(i16, &bytes, .Little) == -0x0101);
try testing.expect(readInt(u16, &bytes, .big) == 0xfffe);
try testing.expect(readInt(i16, &bytes, .big) == -0x0002);
try testing.expect(readInt(u16, &bytes, .little) == 0xfeff);
try testing.expect(readInt(i16, &bytes, .little) == -0x0101);
}
}
@ -3603,48 +3603,48 @@ test "replaceOwned" {
/// Converts a little-endian integer to host endianness.
pub fn littleToNative(comptime T: type, x: T) T {
return switch (native_endian) {
.Little => x,
.Big => @byteSwap(x),
.little => x,
.big => @byteSwap(x),
};
}
/// Converts a big-endian integer to host endianness.
pub fn bigToNative(comptime T: type, x: T) T {
return switch (native_endian) {
.Little => @byteSwap(x),
.Big => x,
.little => @byteSwap(x),
.big => x,
};
}
/// Converts an integer from specified endianness to host endianness.
pub fn toNative(comptime T: type, x: T, endianness_of_x: Endian) T {
return switch (endianness_of_x) {
.Little => littleToNative(T, x),
.Big => bigToNative(T, x),
.little => littleToNative(T, x),
.big => bigToNative(T, x),
};
}
/// Converts an integer which has host endianness to the desired endianness.
pub fn nativeTo(comptime T: type, x: T, desired_endianness: Endian) T {
return switch (desired_endianness) {
.Little => nativeToLittle(T, x),
.Big => nativeToBig(T, x),
.little => nativeToLittle(T, x),
.big => nativeToBig(T, x),
};
}
/// Converts an integer which has host endianness to little endian.
pub fn nativeToLittle(comptime T: type, x: T) T {
return switch (native_endian) {
.Little => x,
.Big => @byteSwap(x),
.little => x,
.big => @byteSwap(x),
};
}
/// Converts an integer which has host endianness to big endian.
pub fn nativeToBig(comptime T: type, x: T) T {
return switch (native_endian) {
.Little => @byteSwap(x),
.Big => x,
.little => @byteSwap(x),
.big => x,
};
}
@ -3748,8 +3748,8 @@ pub fn asBytes(ptr: anytype) AsBytesReturnType(@TypeOf(ptr)) {
test "asBytes" {
const deadbeef = @as(u32, 0xDEADBEEF);
const deadbeef_bytes = switch (native_endian) {
.Big => "\xDE\xAD\xBE\xEF",
.Little => "\xEF\xBE\xAD\xDE",
.big => "\xDE\xAD\xBE\xEF",
.little => "\xEF\xBE\xAD\xDE",
};
try testing.expect(eql(u8, asBytes(&deadbeef), deadbeef_bytes));
@ -3773,10 +3773,10 @@ test "asBytes" {
.d = 0xA1,
};
switch (native_endian) {
.Little => {
.little => {
try testing.expect(eql(u8, asBytes(&inst), "\xBE\xEF\xDE\xA1"));
},
.Big => {
.big => {
try testing.expect(eql(u8, asBytes(&inst), "\xA1\xDE\xEF\xBE"));
},
}
@ -3808,14 +3808,14 @@ pub fn toBytes(value: anytype) [@sizeOf(@TypeOf(value))]u8 {
test "toBytes" {
var my_bytes = toBytes(@as(u32, 0x12345678));
switch (native_endian) {
.Big => try testing.expect(eql(u8, &my_bytes, "\x12\x34\x56\x78")),
.Little => try testing.expect(eql(u8, &my_bytes, "\x78\x56\x34\x12")),
.big => try testing.expect(eql(u8, &my_bytes, "\x12\x34\x56\x78")),
.little => try testing.expect(eql(u8, &my_bytes, "\x78\x56\x34\x12")),
}
my_bytes[0] = '\x99';
switch (native_endian) {
.Big => try testing.expect(eql(u8, &my_bytes, "\x99\x34\x56\x78")),
.Little => try testing.expect(eql(u8, &my_bytes, "\x99\x56\x34\x12")),
.big => try testing.expect(eql(u8, &my_bytes, "\x99\x34\x56\x78")),
.little => try testing.expect(eql(u8, &my_bytes, "\x99\x56\x34\x12")),
}
}
@ -3840,15 +3840,15 @@ pub fn bytesAsValue(comptime T: type, bytes: anytype) BytesAsValueReturnType(T,
test "bytesAsValue" {
const deadbeef = @as(u32, 0xDEADBEEF);
const deadbeef_bytes = switch (native_endian) {
.Big => "\xDE\xAD\xBE\xEF",
.Little => "\xEF\xBE\xAD\xDE",
.big => "\xDE\xAD\xBE\xEF",
.little => "\xEF\xBE\xAD\xDE",
};
try testing.expect(deadbeef == bytesAsValue(u32, deadbeef_bytes).*);
var codeface_bytes: [4]u8 = switch (native_endian) {
.Big => "\xC0\xDE\xFA\xCE",
.Little => "\xCE\xFA\xDE\xC0",
.big => "\xC0\xDE\xFA\xCE",
.little => "\xCE\xFA\xDE\xC0",
}.*;
var codeface = bytesAsValue(u32, &codeface_bytes);
try testing.expect(codeface.* == 0xC0DEFACE);
@ -3870,8 +3870,8 @@ test "bytesAsValue" {
.d = 0xA1,
};
const inst_bytes = switch (native_endian) {
.Little => "\xBE\xEF\xDE\xA1",
.Big => "\xA1\xDE\xEF\xBE",
.little => "\xBE\xEF\xDE\xA1",
.big => "\xA1\xDE\xEF\xBE",
};
const inst2 = bytesAsValue(S, inst_bytes);
try testing.expect(meta.eql(inst, inst2.*));
@ -3898,8 +3898,8 @@ pub fn bytesToValue(comptime T: type, bytes: anytype) T {
}
test "bytesToValue" {
const deadbeef_bytes = switch (native_endian) {
.Big => "\xDE\xAD\xBE\xEF",
.Little => "\xEF\xBE\xAD\xDE",
.big => "\xDE\xAD\xBE\xEF",
.little => "\xEF\xBE\xAD\xDE",
};
const deadbeef = bytesToValue(u32, deadbeef_bytes);
@ -4028,8 +4028,8 @@ test "sliceAsBytes" {
const slice = sliceAsBytes(bytes[0..]);
try testing.expect(slice.len == 4);
try testing.expect(eql(u8, slice, switch (native_endian) {
.Big => "\xDE\xAD\xBE\xEF",
.Little => "\xAD\xDE\xEF\xBE",
.big => "\xDE\xAD\xBE\xEF",
.little => "\xAD\xDE\xEF\xBE",
}));
}
@ -4204,7 +4204,7 @@ test "doNotOptimizeAway" {
doNotOptimizeAway(@as(f64, 0.0));
doNotOptimizeAway([_]u8{0} ** 4);
doNotOptimizeAway([_]u8{0} ** 100);
doNotOptimizeAway(@as(std.builtin.Endian, .Little));
doNotOptimizeAway(@as(std.builtin.Endian, .little));
}
test "alignForward" {
@ -4356,7 +4356,7 @@ test "read/write(Var)PackedInt" {
return error.SkipZigTest;
}
const foreign_endian: Endian = if (native_endian == .Big) .Little else .Big;
const foreign_endian: Endian = if (native_endian == .big) .little else .big;
const expect = std.testing.expect;
var prng = std.rand.DefaultPrng.init(1234);
const random = prng.random();

View file

@ -602,8 +602,8 @@ pub const Ip6Address = extern struct {
}
const big_endian_parts = @as(*align(1) const [8]u16, @ptrCast(&self.sa.addr));
const native_endian_parts = switch (native_endian) {
.Big => big_endian_parts.*,
.Little => blk: {
.big => big_endian_parts.*,
.little => blk: {
var buf: [8]u16 = undefined;
for (big_endian_parts, 0..) |part, i| {
buf[i] = mem.bigToNative(u16, part);

View file

@ -206,11 +206,11 @@ fn splitValueBE64(val: i64) [2]u32 {
fn splitValue64(val: i64) [2]u32 {
const u: u64 = @bitCast(val);
switch (native_endian) {
.Little => return [2]u32{
.little => return [2]u32{
@as(u32, @truncate(u)),
@as(u32, @truncate(u >> 32)),
},
.Big => return [2]u32{
.big => return [2]u32{
@as(u32, @truncate(u >> 32)),
@as(u32, @truncate(u)),
},
@ -6028,7 +6028,7 @@ pub const AUDIT = struct {
fn toAudit(arch: std.Target.Cpu.Arch) u32 {
var res: u32 = @intFromEnum(arch.toElfMachine());
if (arch.endian() == .Little) res |= LE;
if (arch.endian() == .little) res |= LE;
switch (arch) {
.aarch64,
.mips64,

View file

@ -696,8 +696,8 @@ pub const Insn = packed struct {
fn endian_swap(endian: std.builtin.Endian, comptime size: Size, dst: Reg) Insn {
return Insn{
.code = switch (endian) {
.Big => 0xdc,
.Little => 0xd4,
.big => 0xdc,
.little => 0xd4,
},
.dst = @intFromEnum(dst),
.src = 0,
@ -712,11 +712,11 @@ pub const Insn = packed struct {
}
pub fn le(comptime size: Size, dst: Reg) Insn {
return endian_swap(.Little, size, dst);
return endian_swap(.little, size, dst);
}
pub fn be(comptime size: Size, dst: Reg) Insn {
return endian_swap(.Big, size, dst);
return endian_swap(.big, size, dst);
}
pub fn call(helper: Helper) Insn {

View file

@ -54,7 +54,7 @@
//! manner to the OpenSSH example:
//!
//! ```zig
//! const offset = if (native_endian == .Little) struct {
//! const offset = if (native_endian == .little) struct {
//! pub const low = 0;
//! pub const high = @sizeOf(u32);
//! } else struct {

View file

@ -609,7 +609,7 @@ test "mmap" {
var i: u32 = 0;
while (i < alloc_size / @sizeOf(u32)) : (i += 1) {
try stream.writeInt(u32, i, .Little);
try stream.writeInt(u32, i, .little);
}
}
@ -633,7 +633,7 @@ test "mmap" {
var i: u32 = 0;
while (i < alloc_size / @sizeOf(u32)) : (i += 1) {
try testing.expectEqual(i, try stream.readInt(u32, .Little));
try testing.expectEqual(i, try stream.readInt(u32, .little));
}
}
@ -657,7 +657,7 @@ test "mmap" {
var i: u32 = alloc_size / 2 / @sizeOf(u32);
while (i < alloc_size / @sizeOf(u32)) : (i += 1) {
try testing.expectEqual(i, try stream.readInt(u32, .Little));
try testing.expectEqual(i, try stream.readInt(u32, .little));
}
}

View file

@ -3360,13 +3360,13 @@ pub const GUID = extern struct {
Data4: [8]u8,
const hex_offsets = switch (builtin.target.cpu.arch.endian()) {
.Big => [16]u6{
.big => [16]u6{
0, 2, 4, 6,
9, 11, 14, 16,
19, 21, 24, 26,
28, 30, 32, 34,
},
.Little => [16]u6{
.little => [16]u6{
6, 4, 2, 0,
11, 9, 16, 14,
19, 21, 24, 26,

View file

@ -79,12 +79,12 @@ pub fn PackedIntIo(comptime Int: type, comptime endian: Endian) type {
if (endian != native_endian) value = @byteSwap(value);
switch (endian) {
.Big => {
.big => {
value <<= @as(Shift, @intCast(head_keep_bits));
value >>= @as(Shift, @intCast(head_keep_bits));
value >>= @as(Shift, @intCast(tail_keep_bits));
},
.Little => {
.little => {
value <<= @as(Shift, @intCast(tail_keep_bits));
value >>= @as(Shift, @intCast(tail_keep_bits));
value >>= @as(Shift, @intCast(head_keep_bits));
@ -115,8 +115,8 @@ pub fn PackedIntIo(comptime Int: type, comptime endian: Endian) type {
const head_keep_bits = bit_index - (start_byte * 8);
const tail_keep_bits = container_bits - (int_bits + head_keep_bits);
const keep_shift = switch (endian) {
.Big => @as(Shift, @intCast(tail_keep_bits)),
.Little => @as(Shift, @intCast(head_keep_bits)),
.big => @as(Shift, @intCast(tail_keep_bits)),
.little => @as(Shift, @intCast(head_keep_bits)),
};
//position the bits where they need to be in the container
@ -388,10 +388,10 @@ test "PackedIntArray" {
test "PackedIntIo" {
const bytes = [_]u8{ 0b01101_000, 0b01011_110, 0b00011_101 };
try testing.expectEqual(@as(u15, 0x2bcd), PackedIntIo(u15, .Little).get(&bytes, 0, 3));
try testing.expectEqual(@as(u16, 0xabcd), PackedIntIo(u16, .Little).get(&bytes, 0, 3));
try testing.expectEqual(@as(u17, 0x1abcd), PackedIntIo(u17, .Little).get(&bytes, 0, 3));
try testing.expectEqual(@as(u18, 0x3abcd), PackedIntIo(u18, .Little).get(&bytes, 0, 3));
try testing.expectEqual(@as(u15, 0x2bcd), PackedIntIo(u15, .little).get(&bytes, 0, 3));
try testing.expectEqual(@as(u16, 0xabcd), PackedIntIo(u16, .little).get(&bytes, 0, 3));
try testing.expectEqual(@as(u17, 0x1abcd), PackedIntIo(u17, .little).get(&bytes, 0, 3));
try testing.expectEqual(@as(u18, 0x3abcd), PackedIntIo(u18, .little).get(&bytes, 0, 3));
}
test "PackedIntArray init" {
@ -555,16 +555,16 @@ test "PackedInt(Array/Slice) sliceCast" {
var i = @as(usize, 0);
while (i < packed_slice_cast_2.len) : (i += 1) {
const val = switch (native_endian) {
.Big => 0b01,
.Little => 0b10,
.big => 0b01,
.little => 0b10,
};
try testing.expect(packed_slice_cast_2.get(i) == val);
}
i = 0;
while (i < packed_slice_cast_4.len) : (i += 1) {
const val = switch (native_endian) {
.Big => 0b0101,
.Little => 0b1010,
.big => 0b0101,
.little => 0b1010,
};
try testing.expect(packed_slice_cast_4.get(i) == val);
}
@ -577,8 +577,8 @@ test "PackedInt(Array/Slice) sliceCast" {
i = 0;
while (i < packed_slice_cast_3.len) : (i += 1) {
const val = switch (native_endian) {
.Big => if (i % 2 == 0) @as(u3, 0b111) else @as(u3, 0b000),
.Little => if (i % 2 == 0) @as(u3, 0b111) else @as(u3, 0b000),
.big => if (i % 2 == 0) @as(u3, 0b111) else @as(u3, 0b000),
.little => if (i % 2 == 0) @as(u3, 0b111) else @as(u3, 0b000),
};
try testing.expect(packed_slice_cast_3.get(i) == val);
}
@ -586,7 +586,7 @@ test "PackedInt(Array/Slice) sliceCast" {
test "PackedInt(Array/Slice)Endian" {
{
const PackedArrayBe = PackedIntArrayEndian(u4, .Big, 8);
const PackedArrayBe = PackedIntArrayEndian(u4, .big, 8);
var packed_array_be = PackedArrayBe.init([_]u4{ 0, 1, 2, 3, 4, 5, 6, 7 });
try testing.expect(packed_array_be.bytes[0] == 0b00000001);
try testing.expect(packed_array_be.bytes[1] == 0b00100011);
@ -596,14 +596,14 @@ test "PackedInt(Array/Slice)Endian" {
try testing.expect(packed_array_be.get(i) == i);
}
var packed_slice_le = packed_array_be.sliceCastEndian(u4, .Little);
var packed_slice_le = packed_array_be.sliceCastEndian(u4, .little);
i = 0;
while (i < packed_slice_le.len) : (i += 1) {
const val = if (i % 2 == 0) i + 1 else i - 1;
try testing.expect(packed_slice_le.get(i) == val);
}
var packed_slice_le_shift = packed_array_be.slice(1, 5).sliceCastEndian(u4, .Little);
var packed_slice_le_shift = packed_array_be.slice(1, 5).sliceCastEndian(u4, .little);
i = 0;
while (i < packed_slice_le_shift.len) : (i += 1) {
const val = if (i % 2 == 0) i else i + 2;
@ -612,7 +612,7 @@ test "PackedInt(Array/Slice)Endian" {
}
{
const PackedArrayBe = PackedIntArrayEndian(u11, .Big, 8);
const PackedArrayBe = PackedIntArrayEndian(u11, .big, 8);
var packed_array_be = PackedArrayBe.init([_]u11{ 0, 1, 2, 3, 4, 5, 6, 7 });
try testing.expect(packed_array_be.bytes[0] == 0b00000000);
try testing.expect(packed_array_be.bytes[1] == 0b00000000);
@ -625,7 +625,7 @@ test "PackedInt(Array/Slice)Endian" {
try testing.expect(packed_array_be.get(i) == i);
}
var packed_slice_le = packed_array_be.sliceCastEndian(u11, .Little);
var packed_slice_le = packed_array_be.sliceCastEndian(u11, .little);
try testing.expect(packed_slice_le.get(0) == 0b00000000000);
try testing.expect(packed_slice_le.get(1) == 0b00010000000);
try testing.expect(packed_slice_le.get(2) == 0b00000000100);
@ -635,7 +635,7 @@ test "PackedInt(Array/Slice)Endian" {
try testing.expect(packed_slice_le.get(6) == 0b10000010000);
try testing.expect(packed_slice_le.get(7) == 0b00000111001);
var packed_slice_le_shift = packed_array_be.slice(1, 5).sliceCastEndian(u11, .Little);
var packed_slice_le_shift = packed_array_be.slice(1, 5).sliceCastEndian(u11, .little);
try testing.expect(packed_slice_le_shift.get(0) == 0b00010000000);
try testing.expect(packed_slice_le_shift.get(1) == 0b00000000100);
try testing.expect(packed_slice_le_shift.get(2) == 0b00000000000);

View file

@ -464,12 +464,12 @@ pub const PDBStringTableHeader = extern struct {
};
fn readSparseBitVector(stream: anytype, allocator: mem.Allocator) ![]u32 {
const num_words = try stream.readInt(u32, .Little);
const num_words = try stream.readInt(u32, .little);
var list = ArrayList(u32).init(allocator);
errdefer list.deinit();
var word_i: u32 = 0;
while (word_i != num_words) : (word_i += 1) {
const word = try stream.readInt(u32, .Little);
const word = try stream.readInt(u32, .little);
var bit_i: u5 = 0;
while (true) : (bit_i += 1) {
if (word & (@as(u32, 1) << bit_i) != 0) {
@ -599,7 +599,7 @@ pub const Pdb = struct {
var sect_cont_offset: usize = 0;
if (section_contrib_size != 0) {
const version = reader.readEnum(SectionContrSubstreamVersion, .Little) catch |err| switch (err) {
const version = reader.readEnum(SectionContrSubstreamVersion, .little) catch |err| switch (err) {
error.InvalidValue => return error.InvalidDebugInfo,
else => |e| return e,
};
@ -625,10 +625,10 @@ pub const Pdb = struct {
const reader = stream.reader();
// Parse the InfoStreamHeader.
const version = try reader.readInt(u32, .Little);
const signature = try reader.readInt(u32, .Little);
const version = try reader.readInt(u32, .little);
const signature = try reader.readInt(u32, .little);
_ = signature;
const age = try reader.readInt(u32, .Little);
const age = try reader.readInt(u32, .little);
const guid = try reader.readBytesNoEof(16);
if (version != 20000404) // VC70, only value observed by LLVM team
@ -639,7 +639,7 @@ pub const Pdb = struct {
// Find the string table.
const string_table_index = str_tab_index: {
const name_bytes_len = try reader.readInt(u32, .Little);
const name_bytes_len = try reader.readInt(u32, .little);
const name_bytes = try self.allocator.alloc(u8, name_bytes_len);
defer self.allocator.free(name_bytes);
try reader.readNoEof(name_bytes);
@ -667,8 +667,8 @@ pub const Pdb = struct {
defer self.allocator.free(deleted);
for (present) |_| {
const name_offset = try reader.readInt(u32, .Little);
const name_index = try reader.readInt(u32, .Little);
const name_offset = try reader.readInt(u32, .little);
const name_index = try reader.readInt(u32, .little);
if (name_offset > name_bytes.len)
return error.InvalidDebugInfo;
const name = mem.sliceTo(name_bytes[name_offset..], 0);
@ -821,7 +821,7 @@ pub const Pdb = struct {
return error.MissingDebugInfo;
const reader = stream.reader();
const signature = try reader.readInt(u32, .Little);
const signature = try reader.readInt(u32, .little);
if (signature != 4)
return error.InvalidDebugInfo;
@ -899,7 +899,7 @@ const Msf = struct {
try file.seekTo(superblock.BlockSize * superblock.BlockMapAddr);
var dir_blocks = try allocator.alloc(u32, dir_block_count);
for (dir_blocks) |*b| {
b.* = try in.readInt(u32, .Little);
b.* = try in.readInt(u32, .little);
}
var directory = MsfStream.init(
superblock.BlockSize,
@ -908,7 +908,7 @@ const Msf = struct {
);
const begin = directory.pos;
const stream_count = try directory.reader().readInt(u32, .Little);
const stream_count = try directory.reader().readInt(u32, .little);
const stream_sizes = try allocator.alloc(u32, stream_count);
defer allocator.free(stream_sizes);
@ -917,7 +917,7 @@ const Msf = struct {
// and must be taken into account when resolving stream indices.
const Nil = 0xFFFFFFFF;
for (stream_sizes) |*s| {
const size = try directory.reader().readInt(u32, .Little);
const size = try directory.reader().readInt(u32, .little);
s.* = if (size == Nil) 0 else blockCountFromSize(size, superblock.BlockSize);
}
@ -932,7 +932,7 @@ const Msf = struct {
var blocks = try allocator.alloc(u32, size);
var j: u32 = 0;
while (j < size) : (j += 1) {
const block_id = try directory.reader().readInt(u32, .Little);
const block_id = try directory.reader().readInt(u32, .little);
const n = (block_id % superblock.BlockSize);
// 0 is for SuperBlock, 1 and 2 for FPMs.
if (block_id == 0 or n == 1 or n == 2 or block_id * superblock.BlockSize > file_len)

View file

@ -113,7 +113,7 @@ pub const Random = struct {
// use LE instead of native endian for better portability maybe?
// TODO: endian portability is pointless if the underlying prng isn't endian portable.
// TODO: document the endian portability of this library.
const byte_aligned_result = mem.readInt(ByteAlignedT, &rand_bytes, .Little);
const byte_aligned_result = mem.readInt(ByteAlignedT, &rand_bytes, .little);
const unsigned_result: UnsignedT = @truncate(byte_aligned_result);
return @bitCast(unsigned_result);
}

View file

@ -13,7 +13,7 @@ const mem = std.mem;
const Random = std.rand.Random;
const Self = @This();
const Ascon = std.crypto.core.Ascon(.Little);
const Ascon = std.crypto.core.Ascon(.little);
state: Ascon,

View file

@ -228,7 +228,7 @@ test "isaac64 fill" {
for (seq) |s| {
var buf0: [8]u8 = undefined;
var buf1: [7]u8 = undefined;
std.mem.writeInt(u64, &buf0, s, .Little);
std.mem.writeInt(u64, &buf0, s, .little);
r.fill(&buf1);
try std.testing.expect(std.mem.eql(u8, buf0[0..7], buf1[0..]));
}

View file

@ -111,8 +111,8 @@ test "pcg fill" {
var i: u32 = 0;
while (i < seq.len) : (i += 2) {
var buf0: [8]u8 = undefined;
std.mem.writeInt(u32, buf0[0..4], seq[i], .Little);
std.mem.writeInt(u32, buf0[4..8], seq[i + 1], .Little);
std.mem.writeInt(u32, buf0[0..4], seq[i], .little);
std.mem.writeInt(u32, buf0[4..8], seq[i + 1], .little);
var buf1: [7]u8 = undefined;
r.fill(&buf1);

View file

@ -115,7 +115,7 @@ test "RomuTrio fill" {
for (seq) |s| {
var buf0: [8]u8 = undefined;
var buf1: [7]u8 = undefined;
std.mem.writeInt(u64, &buf0, s, .Little);
std.mem.writeInt(u64, &buf0, s, .little);
r.fill(&buf1);
try std.testing.expect(std.mem.eql(u8, buf0[0..7], buf1[0..]));
}

View file

@ -125,7 +125,7 @@ test "Sfc64 fill" {
for (seq) |s| {
var buf0: [8]u8 = undefined;
var buf1: [7]u8 = undefined;
std.mem.writeInt(u64, &buf0, s, .Little);
std.mem.writeInt(u64, &buf0, s, .little);
r.fill(&buf1);
try std.testing.expect(std.mem.eql(u8, buf0[0..7], buf1[0..]));
}

Some files were not shown because too many files have changed in this diff Show more