mirror of
https://codeberg.org/ziglang/zig.git
synced 2025-12-06 13:54:21 +00:00
fetch: More Git fixes
This commit is contained in:
parent
64814dc986
commit
d91744401f
2 changed files with 12 additions and 15 deletions
|
|
@ -2418,7 +2418,7 @@ pub fn Hashing(comptime Hasher: type) type {
|
||||||
n += slice.len;
|
n += slice.len;
|
||||||
}
|
}
|
||||||
for (0..splat) |_| hasher.update(data[data.len - 1]);
|
for (0..splat) |_| hasher.update(data[data.len - 1]);
|
||||||
return n + splat;
|
return n + splat * data[data.len - 1].len;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1500,13 +1500,11 @@ fn readObjectRaw(allocator: Allocator, reader: *std.Io.Reader, size: u64) ![]u8
|
||||||
return aw.toOwnedSlice();
|
return aw.toOwnedSlice();
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Expands delta data from `delta_reader` to `writer`. `base_object` must
|
/// Expands delta data from `delta_reader` to `writer`.
|
||||||
/// support `reader` and `seekTo` (such as a `std.io.FixedBufferStream`).
|
|
||||||
///
|
///
|
||||||
/// The format of the delta data is documented in
|
/// The format of the delta data is documented in
|
||||||
/// [pack-format](https://git-scm.com/docs/pack-format).
|
/// [pack-format](https://git-scm.com/docs/pack-format).
|
||||||
fn expandDelta(base_object: []const u8, delta_reader: *std.Io.Reader, writer: *std.Io.Writer) !void {
|
fn expandDelta(base_object: []const u8, delta_reader: *std.Io.Reader, writer: *std.Io.Writer) !void {
|
||||||
var base_offset: u32 = 0;
|
|
||||||
while (true) {
|
while (true) {
|
||||||
const inst: packed struct { value: u7, copy: bool } = @bitCast(delta_reader.takeByte() catch |e| switch (e) {
|
const inst: packed struct { value: u7, copy: bool } = @bitCast(delta_reader.takeByte() catch |e| switch (e) {
|
||||||
error.EndOfStream => return,
|
error.EndOfStream => return,
|
||||||
|
|
@ -1528,7 +1526,7 @@ fn expandDelta(base_object: []const u8, delta_reader: *std.Io.Reader, writer: *s
|
||||||
.offset3 = if (available.offset3) try delta_reader.takeByte() else 0,
|
.offset3 = if (available.offset3) try delta_reader.takeByte() else 0,
|
||||||
.offset4 = if (available.offset4) try delta_reader.takeByte() else 0,
|
.offset4 = if (available.offset4) try delta_reader.takeByte() else 0,
|
||||||
};
|
};
|
||||||
base_offset = @bitCast(offset_parts);
|
const base_offset: u32 = @bitCast(offset_parts);
|
||||||
const size_parts: packed struct { size1: u8, size2: u8, size3: u8 } = .{
|
const size_parts: packed struct { size1: u8, size2: u8, size3: u8 } = .{
|
||||||
.size1 = if (available.size1) try delta_reader.takeByte() else 0,
|
.size1 = if (available.size1) try delta_reader.takeByte() else 0,
|
||||||
.size2 = if (available.size2) try delta_reader.takeByte() else 0,
|
.size2 = if (available.size2) try delta_reader.takeByte() else 0,
|
||||||
|
|
@ -1537,7 +1535,6 @@ fn expandDelta(base_object: []const u8, delta_reader: *std.Io.Reader, writer: *s
|
||||||
var size: u24 = @bitCast(size_parts);
|
var size: u24 = @bitCast(size_parts);
|
||||||
if (size == 0) size = 0x10000;
|
if (size == 0) size = 0x10000;
|
||||||
try writer.writeAll(base_object[base_offset..][0..size]);
|
try writer.writeAll(base_object[base_offset..][0..size]);
|
||||||
base_offset += size;
|
|
||||||
} else if (inst.value != 0) {
|
} else if (inst.value != 0) {
|
||||||
try delta_reader.streamExact(writer, inst.value);
|
try delta_reader.streamExact(writer, inst.value);
|
||||||
} else {
|
} else {
|
||||||
|
|
@ -1582,13 +1579,15 @@ fn runRepositoryTest(comptime format: Oid.Format, head_commit: []const u8) !void
|
||||||
// (all files in the test repo are known to be smaller than this)
|
// (all files in the test repo are known to be smaller than this)
|
||||||
const max_file_size = 8192;
|
const max_file_size = 8192;
|
||||||
|
|
||||||
const index_file_data = try git_dir.dir.readFileAlloc(testing.allocator, "testrepo.idx", max_file_size);
|
if (!skip_checksums) {
|
||||||
defer testing.allocator.free(index_file_data);
|
const index_file_data = try git_dir.dir.readFileAlloc(testing.allocator, "testrepo.idx", max_file_size);
|
||||||
// testrepo.idx is generated by Git. The index created by this file should
|
defer testing.allocator.free(index_file_data);
|
||||||
// match it exactly. Running `git verify-pack -v testrepo.pack` can verify
|
// testrepo.idx is generated by Git. The index created by this file should
|
||||||
// this.
|
// match it exactly. Running `git verify-pack -v testrepo.pack` can verify
|
||||||
const testrepo_idx = @embedFile("git/testdata/testrepo-" ++ @tagName(format) ++ ".idx");
|
// this.
|
||||||
try testing.expectEqualSlices(u8, testrepo_idx, index_file_data);
|
const testrepo_idx = @embedFile("git/testdata/testrepo-" ++ @tagName(format) ++ ".idx");
|
||||||
|
try testing.expectEqualSlices(u8, testrepo_idx, index_file_data);
|
||||||
|
}
|
||||||
|
|
||||||
var index_file_reader = index_file.reader(&index_file_buffer);
|
var index_file_reader = index_file.reader(&index_file_buffer);
|
||||||
var repository: Repository = undefined;
|
var repository: Repository = undefined;
|
||||||
|
|
@ -1669,12 +1668,10 @@ fn runRepositoryTest(comptime format: Oid.Format, head_commit: []const u8) !void
|
||||||
const skip_checksums = true;
|
const skip_checksums = true;
|
||||||
|
|
||||||
test "SHA-1 packfile indexing and checkout" {
|
test "SHA-1 packfile indexing and checkout" {
|
||||||
if (skip_checksums) return error.SkipZigTest;
|
|
||||||
try runRepositoryTest(.sha1, "dd582c0720819ab7130b103635bd7271b9fd4feb");
|
try runRepositoryTest(.sha1, "dd582c0720819ab7130b103635bd7271b9fd4feb");
|
||||||
}
|
}
|
||||||
|
|
||||||
test "SHA-256 packfile indexing and checkout" {
|
test "SHA-256 packfile indexing and checkout" {
|
||||||
if (skip_checksums) return error.SkipZigTest;
|
|
||||||
try runRepositoryTest(.sha256, "7f444a92bd4572ee4a28b2c63059924a9ca1829138553ef3e7c41ee159afae7a");
|
try runRepositoryTest(.sha256, "7f444a92bd4572ee4a28b2c63059924a9ca1829138553ef3e7c41ee159afae7a");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Add table
Reference in a new issue