mirror of
https://codeberg.org/ziglang/zig.git
synced 2025-12-06 13:54:21 +00:00
std.crypto.Sha1: make it a Writer
This commit is contained in:
parent
eb1a4970da
commit
18bc688471
18 changed files with 201 additions and 150 deletions
|
|
@ -191,7 +191,7 @@ pub fn Decoder(comptime ReaderType: type) type {
|
|||
},
|
||||
.sha256 => {
|
||||
var hash_a: [Sha256.digest_length]u8 = undefined;
|
||||
Sha256.hash(unpacked_bytes, &hash_a, .{});
|
||||
Sha256.hash(unpacked_bytes, &hash_a);
|
||||
|
||||
var hash_b: [Sha256.digest_length]u8 = undefined;
|
||||
try self.inner_reader.readNoEof(&hash_b);
|
||||
|
|
|
|||
|
|
@ -347,7 +347,6 @@ test "CSPRNG" {
|
|||
test "issue #4532: no index out of bounds" {
|
||||
const types = [_]type{
|
||||
hash.Md5,
|
||||
hash.Sha1,
|
||||
hash.sha2.Sha224,
|
||||
hash.sha2.Sha256,
|
||||
hash.sha2.Sha384,
|
||||
|
|
@ -380,6 +379,25 @@ test "issue #4532: no index out of bounds" {
|
|||
|
||||
try std.testing.expectEqual(out1, out2);
|
||||
}
|
||||
|
||||
try checkIndexOob(hash.Sha1);
|
||||
}
|
||||
|
||||
fn checkIndexOob(Hasher: type) !void {
|
||||
var buffer1: [Hasher.block_length]u8 = undefined;
|
||||
var buffer2: [Hasher.block_length]u8 = undefined;
|
||||
var block: [Hasher.block_length]u8 = @splat('#');
|
||||
var out1: [Hasher.digest_length]u8 = undefined;
|
||||
var out2: [Hasher.digest_length]u8 = undefined;
|
||||
var h0: Hasher = .init(&buffer1);
|
||||
var h = h0.copy(&buffer2);
|
||||
h.update(&block);
|
||||
out1 = h.final();
|
||||
h = h0.copy(&buffer2);
|
||||
h.update(block[0..1]);
|
||||
h.update(block[1..]);
|
||||
out2 = h.final();
|
||||
try std.testing.expectEqualSlices(u8, &out1, &out2);
|
||||
}
|
||||
|
||||
/// Sets a slice to zeroes.
|
||||
|
|
|
|||
|
|
@ -163,7 +163,7 @@ pub const Ed25519 = struct {
|
|||
const expected_r = try Curve.fromBytes(r);
|
||||
try expected_r.rejectIdentity();
|
||||
|
||||
var h = Sha512.init(.{});
|
||||
var h = Sha512.init();
|
||||
h.update(&r);
|
||||
h.update(&public_key.bytes);
|
||||
|
||||
|
|
|
|||
|
|
@ -949,7 +949,7 @@ pub const rsa = struct {
|
|||
// 2. Let mHash = Hash(M), an octet string of length hLen.
|
||||
var mHash: [Hash.digest_length]u8 = undefined;
|
||||
{
|
||||
var hasher: Hash = .init(.{});
|
||||
var hasher: Hash = .init();
|
||||
for (msg) |part| hasher.update(part);
|
||||
hasher.final(&mHash);
|
||||
}
|
||||
|
|
@ -1038,7 +1038,7 @@ pub const rsa = struct {
|
|||
|
||||
// 13. Let H' = Hash(M'), an octet string of length hLen.
|
||||
var h_p: [Hash.digest_length]u8 = undefined;
|
||||
Hash.hash(m_p, &h_p, .{});
|
||||
Hash.hash(m_p, &h_p);
|
||||
|
||||
// 14. If H = H', output "consistent". Otherwise, output
|
||||
// "inconsistent".
|
||||
|
|
@ -1054,7 +1054,7 @@ pub const rsa = struct {
|
|||
|
||||
while (idx < len) {
|
||||
std.mem.writeInt(u32, hash[seed.len..][0..4], counter, .big);
|
||||
Hash.hash(&hash, out[idx..][0..Hash.digest_length], .{});
|
||||
Hash.hash(&hash, out[idx..][0..Hash.digest_length]);
|
||||
idx += Hash.digest_length;
|
||||
counter += 1;
|
||||
}
|
||||
|
|
@ -1081,13 +1081,14 @@ pub const rsa = struct {
|
|||
public_key: PublicKey,
|
||||
comptime Hash: type,
|
||||
) VerifyError!void {
|
||||
try concatVerify(modulus_len, sig, &.{msg}, public_key, Hash);
|
||||
var msgs: [1][]const u8 = .{msg};
|
||||
try concatVerify(modulus_len, sig, &msgs, public_key, Hash);
|
||||
}
|
||||
|
||||
pub fn concatVerify(
|
||||
comptime modulus_len: usize,
|
||||
sig: [modulus_len]u8,
|
||||
msg: []const []const u8,
|
||||
msg: [][]const u8,
|
||||
public_key: PublicKey,
|
||||
comptime Hash: type,
|
||||
) VerifyError!void {
|
||||
|
|
@ -1096,7 +1097,7 @@ pub const rsa = struct {
|
|||
if (!std.mem.eql(u8, &em_dec, &em)) return error.InvalidSignature;
|
||||
}
|
||||
|
||||
fn EMSA_PKCS1_V1_5_ENCODE(msg: []const []const u8, comptime emLen: usize, comptime Hash: type) VerifyError![emLen]u8 {
|
||||
fn EMSA_PKCS1_V1_5_ENCODE(msg: [][]const u8, comptime emLen: usize, comptime Hash: type) VerifyError![emLen]u8 {
|
||||
comptime var em_index = emLen;
|
||||
var em: [emLen]u8 = undefined;
|
||||
|
||||
|
|
@ -1107,10 +1108,21 @@ pub const rsa = struct {
|
|||
//
|
||||
// If the hash function outputs "message too long," output "message
|
||||
// too long" and stop.
|
||||
var hasher: Hash = .init(.{});
|
||||
switch (Hash) {
|
||||
crypto.hash.Sha1 => {
|
||||
var buffer: [64]u8 = undefined;
|
||||
var hasher: Hash = .init(&buffer);
|
||||
hasher.writer.writeVecAll(msg) catch unreachable; // writing to hasher cannot fail
|
||||
em_index -= Hash.digest_length;
|
||||
em[em_index..][0..Hash.digest_length].* = hasher.final();
|
||||
},
|
||||
else => {
|
||||
var hasher: Hash = .init();
|
||||
for (msg) |part| hasher.update(part);
|
||||
em_index -= Hash.digest_length;
|
||||
hasher.final(em[em_index..]);
|
||||
},
|
||||
}
|
||||
|
||||
// 2. Encode the algorithm ID for the hash function and the hash value
|
||||
// into an ASN.1 value of type DigestInfo (see Appendix A.2.4) with
|
||||
|
|
|
|||
|
|
@ -2,115 +2,121 @@
|
|||
//! Namely, it is feasible to find multiple inputs producing the same hash.
|
||||
//! For a fast-performing, cryptographically secure hash function, see SHA512/256, BLAKE2 or BLAKE3.
|
||||
|
||||
const Sha1 = @This();
|
||||
const std = @import("../std.zig");
|
||||
const mem = std.mem;
|
||||
const math = std.math;
|
||||
const Sha1 = @This();
|
||||
const assert = std.debug.assert;
|
||||
const Writer = std.Io.Writer;
|
||||
|
||||
pub const block_length = 64;
|
||||
pub const digest_length = 20;
|
||||
pub const Options = struct {};
|
||||
|
||||
s: [5]u32,
|
||||
/// Streaming Cache
|
||||
buf: [64]u8 = undefined,
|
||||
buf_len: u8 = 0,
|
||||
total_len: u64 = 0,
|
||||
total_len: u64,
|
||||
writer: Writer,
|
||||
|
||||
pub fn init(options: Options) Sha1 {
|
||||
_ = options;
|
||||
pub fn init(buffer: []u8) Sha1 {
|
||||
assert(buffer.len >= block_length);
|
||||
return .{
|
||||
.s = [_]u32{
|
||||
0x67452301,
|
||||
0xEFCDAB89,
|
||||
0x98BADCFE,
|
||||
0x10325476,
|
||||
0xC3D2E1F0,
|
||||
.s = .{ 0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0 },
|
||||
.total_len = 0,
|
||||
.writer = .{
|
||||
.buffer = buffer,
|
||||
.vtable = &vtable,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
pub fn hash(b: []const u8, out: *[digest_length]u8, options: Options) void {
|
||||
var d = Sha1.init(options);
|
||||
d.update(b);
|
||||
d.final(out);
|
||||
pub fn copy(sha1: *Sha1, buffer: []u8) Sha1 {
|
||||
assert(buffer.len >= block_length);
|
||||
const mine = sha1.writer.buffered();
|
||||
assert(mine.len <= block_length);
|
||||
@memcpy(buffer[0..mine.len], mine);
|
||||
return .{
|
||||
.s = sha1.s,
|
||||
.total_len = sha1.total_len,
|
||||
.writer = .{
|
||||
.buffer = buffer,
|
||||
.end = mine.len,
|
||||
.vtable = &vtable,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
const vtable: Writer.VTable = .{ .drain = drain };
|
||||
|
||||
fn drain(w: *Writer, data: []const []const u8, splat: usize) Writer.Error!usize {
|
||||
const d: *Sha1 = @alignCast(@fieldParentPtr("writer", w));
|
||||
{
|
||||
const buf = w.buffered();
|
||||
var off: usize = 0;
|
||||
while (off + block_length <= buf.len) : (off += block_length) {
|
||||
round(&d.s, buf[off..][0..block_length]);
|
||||
}
|
||||
d.total_len += off;
|
||||
if (off != buf.len) return w.consume(off);
|
||||
w.end = 0;
|
||||
}
|
||||
if (data.len == 1 and splat == 0) return 0;
|
||||
var total_off: usize = 0;
|
||||
for (data) |buf| {
|
||||
var off: usize = 0;
|
||||
while (off + block_length <= buf.len) : (off += block_length) {
|
||||
round(&d.s, buf[off..][0..block_length]);
|
||||
}
|
||||
total_off += off;
|
||||
if (off != buf.len) break;
|
||||
}
|
||||
d.total_len += total_off;
|
||||
return total_off;
|
||||
}
|
||||
|
||||
pub fn hash(data: []const u8) [digest_length]u8 {
|
||||
var buf: [block_length]u8 = undefined;
|
||||
var s: Sha1 = .init(&buf);
|
||||
s.writer.writeAll(data) catch unreachable;
|
||||
return s.final();
|
||||
}
|
||||
|
||||
pub fn update(d: *Sha1, b: []const u8) void {
|
||||
var off: usize = 0;
|
||||
|
||||
// Partial buffer exists from previous update. Copy into buffer then hash.
|
||||
if (d.buf_len != 0 and d.buf_len + b.len >= 64) {
|
||||
off += 64 - d.buf_len;
|
||||
@memcpy(d.buf[d.buf_len..][0..off], b[0..off]);
|
||||
|
||||
d.round(d.buf[0..]);
|
||||
d.buf_len = 0;
|
||||
d.writer.writeAll(b) catch unreachable;
|
||||
}
|
||||
|
||||
// Full middle blocks.
|
||||
while (off + 64 <= b.len) : (off += 64) {
|
||||
d.round(b[off..][0..64]);
|
||||
}
|
||||
|
||||
// Copy any remainder for next pass.
|
||||
@memcpy(d.buf[d.buf_len..][0 .. b.len - off], b[off..]);
|
||||
d.buf_len += @as(u8, @intCast(b[off..].len));
|
||||
|
||||
d.total_len += b.len;
|
||||
}
|
||||
|
||||
pub fn peek(d: Sha1) [digest_length]u8 {
|
||||
var copy = d;
|
||||
return copy.finalResult();
|
||||
}
|
||||
|
||||
pub fn final(d: *Sha1, out: *[digest_length]u8) void {
|
||||
// The buffer here will never be completely full.
|
||||
@memset(d.buf[d.buf_len..], 0);
|
||||
|
||||
// Append padding bits.
|
||||
d.buf[d.buf_len] = 0x80;
|
||||
d.buf_len += 1;
|
||||
pub fn final(d: *Sha1) [digest_length]u8 {
|
||||
_ = drain(&d.writer, &.{""}, 1) catch unreachable;
|
||||
const buf = d.writer.buffer[0..block_length];
|
||||
const pad = d.writer.end;
|
||||
assert(pad < block_length);
|
||||
d.total_len += pad;
|
||||
buf[pad] = 0x80; // Append padding bits.
|
||||
const end = pad + 1;
|
||||
@memset(buf[end..], 0);
|
||||
|
||||
// > 448 mod 512 so need to add an extra round to wrap around.
|
||||
if (64 - d.buf_len < 8) {
|
||||
d.round(d.buf[0..]);
|
||||
@memset(d.buf[0..], 0);
|
||||
if (block_length - end < 8) {
|
||||
round(&d.s, buf);
|
||||
@memset(buf, 0);
|
||||
}
|
||||
|
||||
// Append message length.
|
||||
var i: usize = 1;
|
||||
var len = d.total_len >> 5;
|
||||
d.buf[63] = @as(u8, @intCast(d.total_len & 0x1f)) << 3;
|
||||
while (i < 8) : (i += 1) {
|
||||
d.buf[63 - i] = @as(u8, @intCast(len & 0xff));
|
||||
buf[63] = @as(u8, @intCast(d.total_len & 0x1f)) << 3;
|
||||
for (1..8) |i| {
|
||||
buf[63 - i] = @as(u8, @intCast(len & 0xff));
|
||||
len >>= 8;
|
||||
}
|
||||
|
||||
d.round(d.buf[0..]);
|
||||
round(&d.s, buf);
|
||||
|
||||
for (d.s, 0..) |s, j| {
|
||||
mem.writeInt(u32, out[4 * j ..][0..4], s, .big);
|
||||
}
|
||||
var out: [digest_length]u8 = undefined;
|
||||
for (&d.s, 0..) |s, j| mem.writeInt(u32, out[4 * j ..][0..4], s, .big);
|
||||
return out;
|
||||
}
|
||||
|
||||
pub fn finalResult(d: *Sha1) [digest_length]u8 {
|
||||
var result: [digest_length]u8 = undefined;
|
||||
d.final(&result);
|
||||
return result;
|
||||
}
|
||||
|
||||
fn round(d: *Sha1, b: *const [64]u8) void {
|
||||
pub fn round(d_s: *[5]u32, b: *const [block_length]u8) void {
|
||||
var s: [16]u32 = undefined;
|
||||
|
||||
var v: [5]u32 = [_]u32{
|
||||
d.s[0],
|
||||
d.s[1],
|
||||
d.s[2],
|
||||
d.s[3],
|
||||
d.s[4],
|
||||
};
|
||||
var v = d_s.*;
|
||||
|
||||
const round0a = comptime [_]RoundParam{
|
||||
.abcdei(0, 1, 2, 3, 4, 0),
|
||||
|
|
@ -241,11 +247,11 @@ fn round(d: *Sha1, b: *const [64]u8) void {
|
|||
v[r.b] = math.rotl(u32, v[r.b], @as(u32, 30));
|
||||
}
|
||||
|
||||
d.s[0] +%= v[0];
|
||||
d.s[1] +%= v[1];
|
||||
d.s[2] +%= v[2];
|
||||
d.s[3] +%= v[3];
|
||||
d.s[4] +%= v[4];
|
||||
d_s[0] +%= v[0];
|
||||
d_s[1] +%= v[1];
|
||||
d_s[2] +%= v[2];
|
||||
d_s[3] +%= v[3];
|
||||
d_s[4] +%= v[4];
|
||||
}
|
||||
|
||||
const RoundParam = struct {
|
||||
|
|
@ -271,36 +277,38 @@ const RoundParam = struct {
|
|||
const htest = @import("test.zig");
|
||||
|
||||
test "sha1 single" {
|
||||
try htest.assertEqualHash(Sha1, "da39a3ee5e6b4b0d3255bfef95601890afd80709", "");
|
||||
try htest.assertEqualHash(Sha1, "a9993e364706816aba3e25717850c26c9cd0d89d", "abc");
|
||||
try htest.assertEqualHash(Sha1, "a49b2446a02c645bf419f995b67091253a04a259", "abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu");
|
||||
try htest.assertEqualHashInterface(Sha1, "da39a3ee5e6b4b0d3255bfef95601890afd80709", "");
|
||||
try htest.assertEqualHashInterface(Sha1, "a9993e364706816aba3e25717850c26c9cd0d89d", "abc");
|
||||
try htest.assertEqualHashInterface(Sha1, "a49b2446a02c645bf419f995b67091253a04a259", "abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu");
|
||||
}
|
||||
|
||||
test "sha1 streaming" {
|
||||
var h = Sha1.init(.{});
|
||||
var buffer: [block_length]u8 = undefined;
|
||||
var h: Sha1 = .init(&buffer);
|
||||
var out: [20]u8 = undefined;
|
||||
|
||||
h.final(&out);
|
||||
out = h.final();
|
||||
try htest.assertEqual("da39a3ee5e6b4b0d3255bfef95601890afd80709", out[0..]);
|
||||
|
||||
h = Sha1.init(.{});
|
||||
h = .init(&buffer);
|
||||
h.update("abc");
|
||||
h.final(&out);
|
||||
out = h.final();
|
||||
try htest.assertEqual("a9993e364706816aba3e25717850c26c9cd0d89d", out[0..]);
|
||||
|
||||
h = Sha1.init(.{});
|
||||
h = .init(&buffer);
|
||||
h.update("a");
|
||||
h.update("b");
|
||||
h.update("c");
|
||||
h.final(&out);
|
||||
out = h.final();
|
||||
try htest.assertEqual("a9993e364706816aba3e25717850c26c9cd0d89d", out[0..]);
|
||||
}
|
||||
|
||||
test "sha1 aligned final" {
|
||||
var block = [_]u8{0} ** Sha1.block_length;
|
||||
var block: [block_length]u8 = @splat(0);
|
||||
var out: [Sha1.digest_length]u8 = undefined;
|
||||
var buffer: [block_length]u8 = undefined;
|
||||
|
||||
var h = Sha1.init(.{});
|
||||
var h: Sha1 = .init(&buffer);
|
||||
h.update(&block);
|
||||
h.final(out[0..]);
|
||||
out = h.final();
|
||||
}
|
||||
|
|
|
|||
|
|
@ -258,8 +258,8 @@ pub fn Ecdsa(comptime Curve: type, comptime Hash: type) type {
|
|||
const s = try Curve.scalar.Scalar.fromBytes(sig.s, .big);
|
||||
if (r.isZero() or s.isZero()) return error.IdentityElement;
|
||||
|
||||
return Verifier{
|
||||
.h = Hash.init(.{}),
|
||||
return .{
|
||||
.h = Hash.init(),
|
||||
.r = r,
|
||||
.s = s,
|
||||
.public_key = public_key,
|
||||
|
|
|
|||
|
|
@ -37,7 +37,7 @@ pub fn Hmac(comptime Hash: type) type {
|
|||
|
||||
// Normalize key length to block size of hash
|
||||
if (key.len > Hash.block_length) {
|
||||
Hash.hash(key, scratch[0..mac_length], .{});
|
||||
Hash.hash(key, scratch[0..mac_length]);
|
||||
@memset(scratch[mac_length..Hash.block_length], 0);
|
||||
} else if (key.len < Hash.block_length) {
|
||||
@memcpy(scratch[0..key.len], key);
|
||||
|
|
@ -54,7 +54,7 @@ pub fn Hmac(comptime Hash: type) type {
|
|||
b.* = scratch[i] ^ 0x36;
|
||||
}
|
||||
|
||||
ctx.hash = Hash.init(.{});
|
||||
ctx.hash = Hash.init();
|
||||
ctx.hash.update(&i_key_pad);
|
||||
return ctx;
|
||||
}
|
||||
|
|
@ -66,7 +66,7 @@ pub fn Hmac(comptime Hash: type) type {
|
|||
pub fn final(ctx: *Self, out: *[mac_length]u8) void {
|
||||
var scratch: [mac_length]u8 = undefined;
|
||||
ctx.hash.final(&scratch);
|
||||
var ohash = Hash.init(.{});
|
||||
var ohash = Hash.init();
|
||||
ohash.update(&ctx.o_key_pad);
|
||||
ohash.update(&scratch);
|
||||
ohash.final(out);
|
||||
|
|
|
|||
|
|
@ -31,7 +31,6 @@ pub const Md5 = struct {
|
|||
const Self = @This();
|
||||
pub const block_length = 64;
|
||||
pub const digest_length = 16;
|
||||
pub const Options = struct {};
|
||||
|
||||
s: [4]u32,
|
||||
// Streaming Cache
|
||||
|
|
@ -39,8 +38,7 @@ pub const Md5 = struct {
|
|||
buf_len: u8,
|
||||
total_len: u64,
|
||||
|
||||
pub fn init(options: Options) Self {
|
||||
_ = options;
|
||||
pub fn init() Self {
|
||||
return Self{
|
||||
.s = [_]u32{
|
||||
0x67452301,
|
||||
|
|
@ -54,8 +52,8 @@ pub const Md5 = struct {
|
|||
};
|
||||
}
|
||||
|
||||
pub fn hash(data: []const u8, out: *[digest_length]u8, options: Options) void {
|
||||
var d = Md5.init(options);
|
||||
pub fn hash(data: []const u8, out: *[digest_length]u8) void {
|
||||
var d = Md5.init();
|
||||
d.update(data);
|
||||
d.final(out);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,19 +3,27 @@ const testing = std.testing;
|
|||
const fmt = std.fmt;
|
||||
|
||||
// Hash using the specified hasher `H` asserting `expected == H(input)`.
|
||||
pub fn assertEqualHash(comptime Hasher: anytype, comptime expected_hex: *const [Hasher.digest_length * 2:0]u8, input: []const u8) !void {
|
||||
pub fn assertEqualHash(
|
||||
comptime Hasher: type,
|
||||
expected_hex: *const [Hasher.digest_length * 2:0]u8,
|
||||
input: []const u8,
|
||||
) !void {
|
||||
var h: [Hasher.digest_length]u8 = undefined;
|
||||
Hasher.hash(input, &h, .{});
|
||||
|
||||
try assertEqual(expected_hex, &h);
|
||||
}
|
||||
|
||||
// Assert `expected` == hex(`input`) where `input` is a bytestring
|
||||
pub fn assertEqual(comptime expected_hex: [:0]const u8, input: []const u8) !void {
|
||||
var expected_bytes: [expected_hex.len / 2]u8 = undefined;
|
||||
for (&expected_bytes, 0..) |*r, i| {
|
||||
r.* = fmt.parseInt(u8, expected_hex[2 * i .. 2 * i + 2], 16) catch unreachable;
|
||||
pub fn assertEqualHashInterface(
|
||||
comptime Hasher: type,
|
||||
expected_hex: *const [Hasher.digest_length * 2:0]u8,
|
||||
input: []const u8,
|
||||
) !void {
|
||||
const digest = Hasher.hash(input);
|
||||
try assertEqual(expected_hex, &digest);
|
||||
}
|
||||
|
||||
try testing.expectEqualSlices(u8, &expected_bytes, input);
|
||||
pub fn assertEqual(expected_hex: [:0]const u8, actual_bin_digest: []const u8) !void {
|
||||
var buffer: [200]u8 = undefined;
|
||||
const actual_hex = std.fmt.bufPrint(&buffer, "{x}", .{actual_bin_digest}) catch @panic("buffer too small");
|
||||
try testing.expectEqualStrings(expected_hex, actual_hex);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -578,7 +578,7 @@ pub fn hkdfExpandLabel(
|
|||
|
||||
pub fn emptyHash(comptime Hash: type) [Hash.digest_length]u8 {
|
||||
var result: [Hash.digest_length]u8 = undefined;
|
||||
Hash.hash(&.{}, &result, .{});
|
||||
Hash.hash(&.{}, &result);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -498,7 +498,7 @@ pub fn init(stream: anytype, options: Options) InitError(@TypeOf(stream))!Client
|
|||
.ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,
|
||||
=> |tag| {
|
||||
handshake_cipher = @unionInit(tls.HandshakeCipher, @tagName(tag.with()), .{
|
||||
.transcript_hash = .init(.{}),
|
||||
.transcript_hash = .init(),
|
||||
.version = undefined,
|
||||
});
|
||||
const p = &@field(handshake_cipher, @tagName(tag.with()));
|
||||
|
|
@ -680,7 +680,8 @@ pub fn init(stream: anytype, options: Options) InitError(@TypeOf(stream))!Client
|
|||
const key_size = hsd.decode(u8);
|
||||
try hsd.ensure(key_size);
|
||||
const server_pub_key = hsd.slice(key_size);
|
||||
try main_cert_pub_key.verifySignature(&hsd, &.{ &client_hello_rand, &server_hello_rand, hsd.buf[0..hsd.idx] });
|
||||
var msgs: [3][]const u8 = .{ &client_hello_rand, &server_hello_rand, hsd.buf[0..hsd.idx] };
|
||||
try main_cert_pub_key.verifySignature(&hsd, &msgs);
|
||||
try key_share.exchange(named_group, server_pub_key);
|
||||
handshake_state = .server_hello_done;
|
||||
},
|
||||
|
|
@ -776,10 +777,11 @@ pub fn init(stream: anytype, options: Options) InitError(@TypeOf(stream))!Client
|
|||
}
|
||||
switch (handshake_cipher) {
|
||||
inline else => |*p| {
|
||||
try main_cert_pub_key.verifySignature(&hsd, &.{
|
||||
var msgs: [2][]const u8 = .{
|
||||
" " ** 64 ++ "TLS 1.3, server CertificateVerify\x00",
|
||||
&p.transcript_hash.peek(),
|
||||
});
|
||||
};
|
||||
try main_cert_pub_key.verifySignature(&hsd, &msgs);
|
||||
p.transcript_hash.update(wrapped_handshake);
|
||||
},
|
||||
}
|
||||
|
|
@ -1755,7 +1757,7 @@ const CertificatePublicKey = struct {
|
|||
fn verifySignature(
|
||||
cert_pub_key: *const CertificatePublicKey,
|
||||
sigd: *tls.Decoder,
|
||||
msg: []const []const u8,
|
||||
msg: [][]const u8,
|
||||
) VerifyError!void {
|
||||
const pub_key = cert_pub_key.buf[0..cert_pub_key.len];
|
||||
|
||||
|
|
|
|||
|
|
@ -45,11 +45,11 @@ pub fn init(
|
|||
|
||||
const key = sec_websocket_key orelse return error.WebSocketUpgradeMissingKey;
|
||||
|
||||
var sha1 = std.crypto.hash.Sha1.init(.{});
|
||||
var sha1_buffer: [64]u8 = undefined;
|
||||
var sha1: std.crypto.hash.Sha1 = .init(&sha1_buffer);
|
||||
sha1.update(key);
|
||||
sha1.update("258EAFA5-E914-47DA-95CA-C5AB0DC85B11");
|
||||
var digest: [std.crypto.hash.Sha1.digest_length]u8 = undefined;
|
||||
sha1.final(&digest);
|
||||
const digest = sha1.final();
|
||||
var base64_digest: [28]u8 = undefined;
|
||||
assert(std.base64.standard.Encoder.encode(&base64_digest, &digest).len == base64_digest.len);
|
||||
|
||||
|
|
|
|||
|
|
@ -133,7 +133,7 @@ pub const Hash = struct {
|
|||
return result;
|
||||
}
|
||||
var bin_digest: [Algo.digest_length]u8 = undefined;
|
||||
Algo.hash(sub_path, &bin_digest, .{});
|
||||
Algo.hash(sub_path, &bin_digest);
|
||||
_ = std.fmt.bufPrint(result.bytes[i..], "{x}", .{&bin_digest}) catch unreachable;
|
||||
return result;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1621,7 +1621,7 @@ fn computeHash(f: *Fetch, pkg_path: Cache.Path, filter: Filter) RunError!Compute
|
|||
|
||||
std.mem.sortUnstable(*HashedFile, all_files.items, {}, HashedFile.lessThan);
|
||||
|
||||
var hasher = Package.Hash.Algo.init(.{});
|
||||
var hasher = Package.Hash.Algo.init();
|
||||
var any_failures = false;
|
||||
for (all_files.items) |hashed_file| {
|
||||
hashed_file.failure catch |err| {
|
||||
|
|
@ -1690,7 +1690,7 @@ fn workerDeleteFile(dir: fs.Dir, deleted_file: *DeletedFile) void {
|
|||
|
||||
fn hashFileFallible(dir: fs.Dir, hashed_file: *HashedFile) HashedFile.Error!void {
|
||||
var buf: [8000]u8 = undefined;
|
||||
var hasher = Package.Hash.Algo.init(.{});
|
||||
var hasher = Package.Hash.Algo.init();
|
||||
hasher.update(hashed_file.normalized_path);
|
||||
var file_size: u64 = 0;
|
||||
|
||||
|
|
|
|||
|
|
@ -45,10 +45,10 @@ pub const Oid = union(Format) {
|
|||
sha1: Sha1,
|
||||
sha256: Sha256,
|
||||
|
||||
fn init(oid_format: Format) Hasher {
|
||||
fn init(oid_format: Format, buffer: []u8) Hasher {
|
||||
return switch (oid_format) {
|
||||
.sha1 => .{ .sha1 = Sha1.init(.{}) },
|
||||
.sha256 => .{ .sha256 = Sha256.init(.{}) },
|
||||
.sha1 => .{ .sha1 = .init(buffer) },
|
||||
.sha256 => .{ .sha256 = Sha256.init() },
|
||||
};
|
||||
}
|
||||
|
||||
|
|
@ -61,6 +61,7 @@ pub const Oid = union(Format) {
|
|||
|
||||
fn finalResult(hasher: *Hasher) Oid {
|
||||
return switch (hasher.*) {
|
||||
.sha1 => |*inner| .{ .sha1 = inner.final() },
|
||||
inline else => |*inner, tag| @unionInit(Oid, @tagName(tag), inner.finalResult()),
|
||||
};
|
||||
}
|
||||
|
|
@ -1281,7 +1282,8 @@ pub fn indexPack(allocator: Allocator, format: Oid.Format, pack: std.fs.File, in
|
|||
}
|
||||
@memset(fan_out_table[fan_out_index..], count);
|
||||
|
||||
var index_hashed_writer = hashedWriter(index_writer, Oid.Hasher.init(format));
|
||||
var hash_buffer: [64]u8 = undefined;
|
||||
var index_hashed_writer = hashedWriter(index_writer, Oid.Hasher.init(format, &hash_buffer));
|
||||
const writer = index_hashed_writer.writer();
|
||||
try writer.writeAll(IndexHeader.signature);
|
||||
try writer.writeInt(u32, IndexHeader.supported_version, .big);
|
||||
|
|
@ -1331,7 +1333,8 @@ fn indexPackFirstPass(
|
|||
) !Oid {
|
||||
var pack_buffered_reader = std.io.bufferedReader(pack.deprecatedReader());
|
||||
var pack_counting_reader = std.io.countingReader(pack_buffered_reader.reader());
|
||||
var pack_hashed_reader = hashedReader(pack_counting_reader.reader(), Oid.Hasher.init(format));
|
||||
var hash_buffer: [64]u8 = undefined;
|
||||
var pack_hashed_reader = hashedReader(pack_counting_reader.reader(), Oid.Hasher.init(format, &hash_buffer));
|
||||
const pack_reader = pack_hashed_reader.reader();
|
||||
|
||||
const pack_header = try PackHeader.read(pack_reader);
|
||||
|
|
@ -1345,7 +1348,8 @@ fn indexPackFirstPass(
|
|||
.commit, .tree, .blob, .tag => |object| {
|
||||
var entry_decompress_stream = std.compress.zlib.decompressor(entry_crc32_reader.reader());
|
||||
var entry_counting_reader = std.io.countingReader(entry_decompress_stream.reader());
|
||||
var entry_hashed_writer = hashedWriter(std.io.null_writer, Oid.Hasher.init(format));
|
||||
var entry_hash_buffer: [64]u8 = undefined;
|
||||
var entry_hashed_writer = hashedWriter(std.io.null_writer, Oid.Hasher.init(format, &entry_hash_buffer));
|
||||
const entry_writer = entry_hashed_writer.writer();
|
||||
// The object header is not included in the pack data but is
|
||||
// part of the object's ID
|
||||
|
|
@ -1431,7 +1435,8 @@ fn indexPackHashDelta(
|
|||
|
||||
const base_data = try resolveDeltaChain(allocator, format, pack, base_object, delta_offsets.items, cache);
|
||||
|
||||
var entry_hasher: Oid.Hasher = .init(format);
|
||||
var hash_buffer: [64]u8 = undefined;
|
||||
var entry_hasher: Oid.Hasher = .init(format, &hash_buffer);
|
||||
var entry_hashed_writer = hashedWriter(std.io.null_writer, &entry_hasher);
|
||||
try entry_hashed_writer.writer().print("{s} {}\x00", .{ @tagName(base_object.type), base_data.len });
|
||||
entry_hasher.update(base_data);
|
||||
|
|
|
|||
|
|
@ -307,7 +307,7 @@ pub fn writeAdhocSignature(
|
|||
var buf = std.ArrayList(u8).init(allocator);
|
||||
defer buf.deinit();
|
||||
try req.write(buf.writer());
|
||||
Sha256.hash(buf.items, &hash, .{});
|
||||
Sha256.hash(buf.items, &hash);
|
||||
self.code_directory.addSpecialHash(req.slotType(), hash);
|
||||
|
||||
try blobs.append(.{ .requirements = req });
|
||||
|
|
@ -319,7 +319,7 @@ pub fn writeAdhocSignature(
|
|||
var buf = std.ArrayList(u8).init(allocator);
|
||||
defer buf.deinit();
|
||||
try ents.write(buf.writer());
|
||||
Sha256.hash(buf.items, &hash, .{});
|
||||
Sha256.hash(buf.items, &hash);
|
||||
self.code_directory.addSpecialHash(ents.slotType(), hash);
|
||||
|
||||
try blobs.append(.{ .entitlements = ents });
|
||||
|
|
|
|||
|
|
@ -58,7 +58,7 @@ pub fn ParallelHasher(comptime Hasher: type) type {
|
|||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
err.* = file.preadAll(buffer, fstart);
|
||||
Hasher.hash(buffer, out, .{});
|
||||
Hasher.hash(buffer, out);
|
||||
}
|
||||
|
||||
const Self = @This();
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@ pub fn calcUuid(comp: *const Compilation, file: fs.File, file_size: u64, out: *[
|
|||
@memcpy(final_buffer[i * Md5.digest_length ..][0..Md5.digest_length], &hash);
|
||||
}
|
||||
|
||||
Md5.hash(final_buffer, out, .{});
|
||||
Md5.hash(final_buffer, out);
|
||||
conform(out);
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue