mirror of
https://codeberg.org/ziglang/zig.git
synced 2025-12-06 13:54:21 +00:00
compiler: update to new flate API
This commit is contained in:
parent
2569f4ff85
commit
05ce1f99a6
7 changed files with 122 additions and 107 deletions
|
|
@ -1709,6 +1709,15 @@ fn failingDiscard(r: *Reader, limit: Limit) Error!usize {
|
|||
return error.ReadFailed;
|
||||
}
|
||||
|
||||
pub fn adaptToOldInterface(r: *Reader) std.Io.AnyReader {
|
||||
return .{ .context = r, .readFn = derpRead };
|
||||
}
|
||||
|
||||
fn derpRead(context: *const anyopaque, buffer: []u8) anyerror!usize {
|
||||
const r: *Reader = @constCast(@alignCast(@ptrCast(context)));
|
||||
return r.readSliceShort(buffer);
|
||||
}
|
||||
|
||||
test "readAlloc when the backing reader provides one byte at a time" {
|
||||
const str = "This is a test";
|
||||
var tiny_buffer: [1]u8 = undefined;
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ dst_dec: DistanceDecoder,
|
|||
final_block: bool,
|
||||
state: State,
|
||||
|
||||
read_err: ?Error,
|
||||
err: ?Error,
|
||||
|
||||
const BlockType = enum(u2) {
|
||||
stored = 0,
|
||||
|
|
@ -74,7 +74,7 @@ pub fn init(input: *Reader, container: Container, buffer: []u8) Decompress {
|
|||
.dst_dec = .{},
|
||||
.final_block = false,
|
||||
.state = .protocol_header,
|
||||
.read_err = null,
|
||||
.err = null,
|
||||
};
|
||||
}
|
||||
|
||||
|
|
@ -153,7 +153,7 @@ pub fn stream(r: *Reader, w: *Writer, limit: std.Io.Limit) Reader.StreamError!us
|
|||
if (d.state == .end) {
|
||||
return error.EndOfStream;
|
||||
} else {
|
||||
d.read_err = error.EndOfStream;
|
||||
d.err = error.EndOfStream;
|
||||
return error.ReadFailed;
|
||||
}
|
||||
},
|
||||
|
|
@ -161,7 +161,7 @@ pub fn stream(r: *Reader, w: *Writer, limit: std.Io.Limit) Reader.StreamError!us
|
|||
else => |e| {
|
||||
// In the event of an error, state is unmodified so that it can be
|
||||
// better used to diagnose the failure.
|
||||
d.read_err = e;
|
||||
d.err = e;
|
||||
return error.ReadFailed;
|
||||
},
|
||||
};
|
||||
|
|
@ -1179,7 +1179,7 @@ fn testFailure(container: Container, in: []const u8, expected_err: anyerror) !vo
|
|||
|
||||
var decompress: Decompress = .init(&reader, container, &.{});
|
||||
try testing.expectError(error.ReadFailed, decompress.reader.streamRemaining(&aw.writer));
|
||||
try testing.expectEqual(expected_err, decompress.read_err orelse return error.TestFailed);
|
||||
try testing.expectEqual(expected_err, decompress.err orelse return error.TestFailed);
|
||||
}
|
||||
|
||||
fn testDecompress(container: Container, compressed: []const u8, expected_plain: []const u8) !void {
|
||||
|
|
|
|||
|
|
@ -405,8 +405,8 @@ pub const RequestTransfer = union(enum) {
|
|||
|
||||
/// The decompressor for response messages.
|
||||
pub const Compression = union(enum) {
|
||||
deflate: std.compress.flate.Decompress,
|
||||
gzip: std.compress.flate.Decompress,
|
||||
//deflate: std.compress.flate.Decompress,
|
||||
//gzip: std.compress.flate.Decompress,
|
||||
// https://github.com/ziglang/zig/issues/18937
|
||||
//zstd: ZstdDecompressor,
|
||||
none: void,
|
||||
|
|
@ -1074,12 +1074,10 @@ pub const Request = struct {
|
|||
switch (req.response.transfer_compression) {
|
||||
.identity => req.response.compression = .none,
|
||||
.compress, .@"x-compress" => return error.CompressionUnsupported,
|
||||
.deflate => req.response.compression = .{
|
||||
.deflate = std.compress.zlib.decompressor(req.transferReader()),
|
||||
},
|
||||
.gzip, .@"x-gzip" => req.response.compression = .{
|
||||
.gzip = std.compress.gzip.decompressor(req.transferReader()),
|
||||
},
|
||||
// I'm about to upstream my http.Client rewrite
|
||||
.deflate => return error.CompressionUnsupported,
|
||||
// I'm about to upstream my http.Client rewrite
|
||||
.gzip, .@"x-gzip" => return error.CompressionUnsupported,
|
||||
// https://github.com/ziglang/zig/issues/18937
|
||||
//.zstd => req.response.compression = .{
|
||||
// .zstd = std.compress.zstd.decompressStream(req.client.allocator, req.transferReader()),
|
||||
|
|
@ -1105,8 +1103,9 @@ pub const Request = struct {
|
|||
/// Reads data from the response body. Must be called after `wait`.
|
||||
pub fn read(req: *Request, buffer: []u8) ReadError!usize {
|
||||
const out_index = switch (req.response.compression) {
|
||||
.deflate => |*deflate| deflate.read(buffer) catch return error.DecompressionFailure,
|
||||
.gzip => |*gzip| gzip.read(buffer) catch return error.DecompressionFailure,
|
||||
// I'm about to upstream my http client rewrite
|
||||
//.deflate => |*deflate| deflate.readSlice(buffer) catch return error.DecompressionFailure,
|
||||
//.gzip => |*gzip| gzip.read(buffer) catch return error.DecompressionFailure,
|
||||
// https://github.com/ziglang/zig/issues/18937
|
||||
//.zstd => |*zstd| zstd.read(buffer) catch return error.DecompressionFailure,
|
||||
else => try req.transferRead(buffer),
|
||||
|
|
|
|||
|
|
@ -7,8 +7,9 @@ const builtin = @import("builtin");
|
|||
const std = @import("std");
|
||||
const File = std.fs.File;
|
||||
const is_le = builtin.target.cpu.arch.endian() == .little;
|
||||
const Writer = std.io.Writer;
|
||||
const Reader = std.io.Reader;
|
||||
const Writer = std.Io.Writer;
|
||||
const Reader = std.Io.Reader;
|
||||
const flate = std.compress.flate;
|
||||
|
||||
pub const CompressionMethod = enum(u16) {
|
||||
store = 0,
|
||||
|
|
@ -117,6 +118,7 @@ pub const EndRecord = extern struct {
|
|||
pub const FindFileError = File.GetEndPosError || File.SeekError || File.ReadError || error{
|
||||
ZipNoEndRecord,
|
||||
EndOfStream,
|
||||
ReadFailed,
|
||||
};
|
||||
|
||||
pub fn findFile(fr: *File.Reader) FindFileError!EndRecord {
|
||||
|
|
@ -137,8 +139,7 @@ pub const EndRecord = extern struct {
|
|||
|
||||
try fr.seekTo(end_pos - @as(u64, new_loaded_len));
|
||||
const read_buf: []u8 = buf[buf.len - new_loaded_len ..][0..read_len];
|
||||
var br = fr.interface().unbuffered();
|
||||
br.readSlice(read_buf) catch |err| switch (err) {
|
||||
fr.interface.readSliceAll(read_buf) catch |err| switch (err) {
|
||||
error.ReadFailed => return fr.err.?,
|
||||
error.EndOfStream => return error.EndOfStream,
|
||||
};
|
||||
|
|
@ -164,7 +165,7 @@ pub const EndRecord = extern struct {
|
|||
pub const Decompress = struct {
|
||||
interface: Reader,
|
||||
state: union {
|
||||
inflate: std.compress.flate.Decompress,
|
||||
inflate: flate.Decompress,
|
||||
store: *Reader,
|
||||
},
|
||||
|
||||
|
|
@ -201,7 +202,7 @@ pub const Decompress = struct {
|
|||
|
||||
fn streamDeflate(r: *Reader, w: *Writer, limit: std.io.Limit) Reader.StreamError!usize {
|
||||
const d: *Decompress = @fieldParentPtr("interface", r);
|
||||
return std.compress.flate.Decompress.read(&d.inflate, w, limit);
|
||||
return flate.Decompress.read(&d.inflate, w, limit);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
@ -305,7 +306,7 @@ pub const Iterator = struct {
|
|||
if (locator_end_offset > stream_len)
|
||||
return error.ZipTruncated;
|
||||
try input.seekTo(stream_len - locator_end_offset);
|
||||
const locator = input.interface.takeStructEndian(EndLocator64, .little) catch |err| switch (err) {
|
||||
const locator = input.interface.takeStruct(EndLocator64, .little) catch |err| switch (err) {
|
||||
error.ReadFailed => return input.err.?,
|
||||
error.EndOfStream => return error.EndOfStream,
|
||||
};
|
||||
|
|
@ -318,7 +319,7 @@ pub const Iterator = struct {
|
|||
|
||||
try input.seekTo(locator.record_file_offset);
|
||||
|
||||
const record64 = input.interface.takeStructEndian(EndRecord64, .little) catch |err| switch (err) {
|
||||
const record64 = input.interface.takeStruct(EndRecord64, .little) catch |err| switch (err) {
|
||||
error.ReadFailed => return input.err.?,
|
||||
error.EndOfStream => return error.EndOfStream,
|
||||
};
|
||||
|
|
@ -374,7 +375,7 @@ pub const Iterator = struct {
|
|||
const header_zip_offset = self.cd_zip_offset + self.cd_record_offset;
|
||||
const input = self.input;
|
||||
try input.seekTo(header_zip_offset);
|
||||
const header = input.interface.takeStructEndian(CentralDirectoryFileHeader, .little) catch |err| switch (err) {
|
||||
const header = input.interface.takeStruct(CentralDirectoryFileHeader, .little) catch |err| switch (err) {
|
||||
error.ReadFailed => return input.err.?,
|
||||
error.EndOfStream => return error.EndOfStream,
|
||||
};
|
||||
|
|
@ -405,7 +406,7 @@ pub const Iterator = struct {
|
|||
const extra = extra_buf[0..header.extra_len];
|
||||
|
||||
try input.seekTo(header_zip_offset + @sizeOf(CentralDirectoryFileHeader) + header.filename_len);
|
||||
input.interface.readSlice(extra) catch |err| switch (err) {
|
||||
input.interface.readSliceAll(extra) catch |err| switch (err) {
|
||||
error.ReadFailed => return input.err.?,
|
||||
error.EndOfStream => return error.EndOfStream,
|
||||
};
|
||||
|
|
@ -460,7 +461,7 @@ pub const Iterator = struct {
|
|||
options: ExtractOptions,
|
||||
filename_buf: []u8,
|
||||
dest: std.fs.Dir,
|
||||
) !u32 {
|
||||
) !void {
|
||||
if (filename_buf.len < self.filename_len)
|
||||
return error.ZipInsufficientBuffer;
|
||||
switch (self.compression_method) {
|
||||
|
|
@ -470,13 +471,13 @@ pub const Iterator = struct {
|
|||
const filename = filename_buf[0..self.filename_len];
|
||||
{
|
||||
try stream.seekTo(self.header_zip_offset + @sizeOf(CentralDirectoryFileHeader));
|
||||
try stream.interface.readSlice(filename);
|
||||
try stream.interface.readSliceAll(filename);
|
||||
}
|
||||
|
||||
const local_data_header_offset: u64 = local_data_header_offset: {
|
||||
const local_header = blk: {
|
||||
try stream.seekTo(self.file_offset);
|
||||
break :blk try stream.interface.takeStructEndian(LocalFileHeader, .little);
|
||||
break :blk try stream.interface.takeStruct(LocalFileHeader, .little);
|
||||
};
|
||||
if (!std.mem.eql(u8, &local_header.signature, &local_file_header_sig))
|
||||
return error.ZipBadFileOffset;
|
||||
|
|
@ -502,7 +503,7 @@ pub const Iterator = struct {
|
|||
|
||||
{
|
||||
try stream.seekTo(self.file_offset + @sizeOf(LocalFileHeader) + local_header.filename_len);
|
||||
try stream.interface.readSlice(extra);
|
||||
try stream.interface.readSliceAll(extra);
|
||||
}
|
||||
|
||||
var extra_offset: usize = 0;
|
||||
|
|
@ -550,7 +551,7 @@ pub const Iterator = struct {
|
|||
if (self.uncompressed_size != 0)
|
||||
return error.ZipBadDirectorySize;
|
||||
try dest.makePath(filename[0 .. filename.len - 1]);
|
||||
return std.hash.Crc32.hash(&.{});
|
||||
return;
|
||||
}
|
||||
|
||||
const out_file = blk: {
|
||||
|
|
@ -564,31 +565,36 @@ pub const Iterator = struct {
|
|||
break :blk try dest.createFile(filename, .{ .exclusive = true });
|
||||
};
|
||||
defer out_file.close();
|
||||
var file_writer = out_file.writer();
|
||||
var file_bw = file_writer.writer(&.{});
|
||||
var out_file_buffer: [1024]u8 = undefined;
|
||||
var file_writer = out_file.writer(&out_file_buffer);
|
||||
const local_data_file_offset: u64 =
|
||||
@as(u64, self.file_offset) +
|
||||
@as(u64, @sizeOf(LocalFileHeader)) +
|
||||
local_data_header_offset;
|
||||
try stream.seekTo(local_data_file_offset);
|
||||
var limited_file_reader = stream.interface.limited(.limited(self.compressed_size));
|
||||
var file_read_buffer: [1000]u8 = undefined;
|
||||
var decompress_read_buffer: [1000]u8 = undefined;
|
||||
var limited_br = limited_file_reader.reader().buffered(&file_read_buffer);
|
||||
var decompress: Decompress = undefined;
|
||||
var decompress_br = decompress.readable(&limited_br, self.compression_method, &decompress_read_buffer);
|
||||
const start_out = file_bw.count;
|
||||
var hash_writer = file_bw.hashed(std.hash.Crc32.init());
|
||||
var hash_bw = hash_writer.writer(&.{});
|
||||
decompress_br.readAll(&hash_bw, .limited(self.uncompressed_size)) catch |err| switch (err) {
|
||||
|
||||
// TODO limit based on self.compressed_size
|
||||
|
||||
switch (self.compression_method) {
|
||||
.store => {
|
||||
stream.interface.streamExact(&file_writer.interface, self.uncompressed_size) catch |err| switch (err) {
|
||||
error.ReadFailed => return stream.err.?,
|
||||
error.WriteFailed => return file_writer.err.?,
|
||||
error.EndOfStream => return error.ZipDecompressTruncated,
|
||||
};
|
||||
if (limited_file_reader.remaining.nonzero()) return error.ZipDecompressTruncated;
|
||||
const written = file_bw.count - start_out;
|
||||
if (written != self.uncompressed_size) return error.ZipUncompressSizeMismatch;
|
||||
return hash_writer.hasher.final();
|
||||
},
|
||||
.deflate => {
|
||||
var flate_buffer: [flate.max_window_len]u8 = undefined;
|
||||
var decompress: flate.Decompress = .init(&stream.interface, .raw, &flate_buffer);
|
||||
decompress.reader.streamExact(&file_writer.interface, self.uncompressed_size) catch |err| switch (err) {
|
||||
error.ReadFailed => return stream.err.?,
|
||||
error.WriteFailed => return file_writer.err orelse decompress.err.?,
|
||||
error.EndOfStream => return error.ZipDecompressTruncated,
|
||||
};
|
||||
},
|
||||
else => return error.UnsupportedCompressionMethod,
|
||||
}
|
||||
try file_writer.end();
|
||||
}
|
||||
};
|
||||
};
|
||||
|
|
@ -636,19 +642,19 @@ pub const ExtractOptions = struct {
|
|||
/// Allow filenames within the zip to use backslashes. Back slashes are normalized
|
||||
/// to forward slashes before forwarding them to platform APIs.
|
||||
allow_backslashes: bool = false,
|
||||
|
||||
diagnostics: ?*Diagnostics = null,
|
||||
verify_checksums: bool = false,
|
||||
};
|
||||
|
||||
/// Extract the zipped files to the given `dest` directory.
|
||||
pub fn extract(dest: std.fs.Dir, fr: *File.Reader, options: ExtractOptions) !void {
|
||||
if (options.verify_checksums) @panic("TODO unimplemented");
|
||||
|
||||
var iter = try Iterator.init(fr);
|
||||
|
||||
var filename_buf: [std.fs.max_path_bytes]u8 = undefined;
|
||||
while (try iter.next()) |entry| {
|
||||
const crc32 = try entry.extract(fr, options, &filename_buf, dest);
|
||||
if (crc32 != entry.crc32)
|
||||
return error.ZipCrcMismatch;
|
||||
try entry.extract(fr, options, &filename_buf, dest);
|
||||
if (options.diagnostics) |d| {
|
||||
try d.nextFilename(filename_buf[0..entry.filename_len]);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1203,12 +1203,11 @@ fn unpackResource(
|
|||
return unpackTarball(f, tmp_directory.handle, &adapter.new_interface);
|
||||
},
|
||||
.@"tar.gz" => {
|
||||
const reader = resource.reader();
|
||||
var br = std.io.bufferedReaderSize(std.crypto.tls.max_ciphertext_record_len, reader);
|
||||
var dcp = std.compress.gzip.decompressor(br.reader());
|
||||
var adapter_buffer: [1024]u8 = undefined;
|
||||
var adapter = dcp.reader().adaptToNewApi(&adapter_buffer);
|
||||
return try unpackTarball(f, tmp_directory.handle, &adapter.new_interface);
|
||||
var adapter_buffer: [std.crypto.tls.max_ciphertext_record_len]u8 = undefined;
|
||||
var adapter = resource.reader().adaptToNewApi(&adapter_buffer);
|
||||
var flate_buffer: [std.compress.flate.max_window_len]u8 = undefined;
|
||||
var decompress: std.compress.flate.Decompress = .init(&adapter.new_interface, .gzip, &flate_buffer);
|
||||
return try unpackTarball(f, tmp_directory.handle, &decompress.reader);
|
||||
},
|
||||
.@"tar.xz" => {
|
||||
const gpa = f.arena.child_allocator;
|
||||
|
|
@ -1352,7 +1351,10 @@ fn unzip(f: *Fetch, out_dir: fs.Dir, reader: anytype) RunError!UnpackResult {
|
|||
));
|
||||
defer zip_file.close();
|
||||
|
||||
std.zip.extract(out_dir, zip_file.seekableStream(), .{
|
||||
var zip_file_buffer: [1024]u8 = undefined;
|
||||
var zip_file_reader = zip_file.reader(&zip_file_buffer);
|
||||
|
||||
std.zip.extract(out_dir, &zip_file_reader, .{
|
||||
.allow_backslashes = true,
|
||||
.diagnostics = &diagnostics,
|
||||
}) catch |err| return f.fail(f.location_tok, try eb.printString(
|
||||
|
|
@ -1384,23 +1386,26 @@ fn unpackGitPack(f: *Fetch, out_dir: fs.Dir, resource: *Resource.Git) anyerror!U
|
|||
defer pack_dir.close();
|
||||
var pack_file = try pack_dir.createFile("pkg.pack", .{ .read = true });
|
||||
defer pack_file.close();
|
||||
var fifo = std.fifo.LinearFifo(u8, .{ .Static = 4096 }).init();
|
||||
var pack_file_buffer: [4096]u8 = undefined;
|
||||
var fifo = std.fifo.LinearFifo(u8, .{ .Slice = {} }).init(&pack_file_buffer);
|
||||
try fifo.pump(resource.fetch_stream.reader(), pack_file.deprecatedWriter());
|
||||
|
||||
var pack_file_reader = pack_file.reader(&pack_file_buffer);
|
||||
|
||||
var index_file = try pack_dir.createFile("pkg.idx", .{ .read = true });
|
||||
defer index_file.close();
|
||||
{
|
||||
const index_prog_node = f.prog_node.start("Index pack", 0);
|
||||
defer index_prog_node.end();
|
||||
var index_buffered_writer = std.io.bufferedWriter(index_file.deprecatedWriter());
|
||||
try git.indexPack(gpa, object_format, pack_file, index_buffered_writer.writer());
|
||||
try git.indexPack(gpa, object_format, &pack_file_reader, index_buffered_writer.writer());
|
||||
try index_buffered_writer.flush();
|
||||
}
|
||||
|
||||
{
|
||||
const checkout_prog_node = f.prog_node.start("Checkout", 0);
|
||||
defer checkout_prog_node.end();
|
||||
var repository = try git.Repository.init(gpa, object_format, pack_file, index_file);
|
||||
var repository = try git.Repository.init(gpa, object_format, &pack_file_reader, index_file);
|
||||
defer repository.deinit();
|
||||
var diagnostics: git.Diagnostics = .{ .allocator = arena };
|
||||
try repository.checkout(out_dir, resource.want_oid, &diagnostics);
|
||||
|
|
|
|||
|
|
@ -73,7 +73,7 @@ pub const Oid = union(Format) {
|
|||
};
|
||||
}
|
||||
|
||||
pub fn readBytes(oid_format: Format, reader: anytype) @TypeOf(reader).NoEofError!Oid {
|
||||
pub fn readBytes(oid_format: Format, reader: anytype) !Oid {
|
||||
return switch (oid_format) {
|
||||
inline else => |tag| @unionInit(Oid, @tagName(tag), try reader.readBytesNoEof(tag.byteLength())),
|
||||
};
|
||||
|
|
@ -166,7 +166,7 @@ pub const Diagnostics = struct {
|
|||
pub const Repository = struct {
|
||||
odb: Odb,
|
||||
|
||||
pub fn init(allocator: Allocator, format: Oid.Format, pack_file: std.fs.File, index_file: std.fs.File) !Repository {
|
||||
pub fn init(allocator: Allocator, format: Oid.Format, pack_file: *std.fs.File.Reader, index_file: std.fs.File) !Repository {
|
||||
return .{ .odb = try Odb.init(allocator, format, pack_file, index_file) };
|
||||
}
|
||||
|
||||
|
|
@ -335,14 +335,14 @@ pub const Repository = struct {
|
|||
/// [pack-format](https://git-scm.com/docs/pack-format).
|
||||
const Odb = struct {
|
||||
format: Oid.Format,
|
||||
pack_file: std.fs.File,
|
||||
pack_file: *std.fs.File.Reader,
|
||||
index_header: IndexHeader,
|
||||
index_file: std.fs.File,
|
||||
cache: ObjectCache = .{},
|
||||
allocator: Allocator,
|
||||
|
||||
/// Initializes the database from open pack and index files.
|
||||
fn init(allocator: Allocator, format: Oid.Format, pack_file: std.fs.File, index_file: std.fs.File) !Odb {
|
||||
fn init(allocator: Allocator, format: Oid.Format, pack_file: *std.fs.File.Reader, index_file: std.fs.File) !Odb {
|
||||
try pack_file.seekTo(0);
|
||||
try index_file.seekTo(0);
|
||||
const index_header = try IndexHeader.read(index_file.deprecatedReader());
|
||||
|
|
@ -362,14 +362,14 @@ const Odb = struct {
|
|||
|
||||
/// Reads the object at the current position in the database.
|
||||
fn readObject(odb: *Odb) !Object {
|
||||
var base_offset = try odb.pack_file.getPos();
|
||||
var base_offset = odb.pack_file.logicalPos();
|
||||
var base_header: EntryHeader = undefined;
|
||||
var delta_offsets: std.ArrayListUnmanaged(u64) = .empty;
|
||||
defer delta_offsets.deinit(odb.allocator);
|
||||
const base_object = while (true) {
|
||||
if (odb.cache.get(base_offset)) |base_object| break base_object;
|
||||
|
||||
base_header = try EntryHeader.read(odb.format, odb.pack_file.deprecatedReader());
|
||||
base_header = try EntryHeader.read(odb.format, odb.pack_file.interface.adaptToOldInterface());
|
||||
switch (base_header) {
|
||||
.ofs_delta => |ofs_delta| {
|
||||
try delta_offsets.append(odb.allocator, base_offset);
|
||||
|
|
@ -379,10 +379,10 @@ const Odb = struct {
|
|||
.ref_delta => |ref_delta| {
|
||||
try delta_offsets.append(odb.allocator, base_offset);
|
||||
try odb.seekOid(ref_delta.base_object);
|
||||
base_offset = try odb.pack_file.getPos();
|
||||
base_offset = odb.pack_file.logicalPos();
|
||||
},
|
||||
else => {
|
||||
const base_data = try readObjectRaw(odb.allocator, odb.pack_file.deprecatedReader(), base_header.uncompressedLength());
|
||||
const base_data = try readObjectRaw(odb.allocator, &odb.pack_file.interface, base_header.uncompressedLength());
|
||||
errdefer odb.allocator.free(base_data);
|
||||
const base_object: Object = .{ .type = base_header.objectType(), .data = base_data };
|
||||
try odb.cache.put(odb.allocator, base_offset, base_object);
|
||||
|
|
@ -1227,7 +1227,7 @@ const IndexEntry = struct {
|
|||
|
||||
/// Writes out a version 2 index for the given packfile, as documented in
|
||||
/// [pack-format](https://git-scm.com/docs/pack-format).
|
||||
pub fn indexPack(allocator: Allocator, format: Oid.Format, pack: std.fs.File, index_writer: anytype) !void {
|
||||
pub fn indexPack(allocator: Allocator, format: Oid.Format, pack: *std.fs.File.Reader, index_writer: anytype) !void {
|
||||
try pack.seekTo(0);
|
||||
|
||||
var index_entries: std.AutoHashMapUnmanaged(Oid, IndexEntry) = .empty;
|
||||
|
|
@ -1324,12 +1324,11 @@ pub fn indexPack(allocator: Allocator, format: Oid.Format, pack: std.fs.File, in
|
|||
fn indexPackFirstPass(
|
||||
allocator: Allocator,
|
||||
format: Oid.Format,
|
||||
pack: std.fs.File,
|
||||
pack: *std.fs.File.Reader,
|
||||
index_entries: *std.AutoHashMapUnmanaged(Oid, IndexEntry),
|
||||
pending_deltas: *std.ArrayListUnmanaged(IndexEntry),
|
||||
) !Oid {
|
||||
var pack_buffered_reader = std.io.bufferedReader(pack.deprecatedReader());
|
||||
var pack_counting_reader = std.io.countingReader(pack_buffered_reader.reader());
|
||||
var pack_counting_reader = std.io.countingReader(pack.interface.adaptToOldInterface());
|
||||
var pack_hashed_reader = hashedReader(pack_counting_reader.reader(), Oid.Hasher.init(format));
|
||||
const pack_reader = pack_hashed_reader.reader();
|
||||
|
||||
|
|
@ -1340,15 +1339,19 @@ fn indexPackFirstPass(
|
|||
const entry_offset = pack_counting_reader.bytes_read;
|
||||
var entry_crc32_reader = hashedReader(pack_reader, std.hash.Crc32.init());
|
||||
const entry_header = try EntryHeader.read(format, entry_crc32_reader.reader());
|
||||
var adapter_buffer: [1024]u8 = undefined;
|
||||
var adapter = entry_crc32_reader.reader().adaptToNewApi(&adapter_buffer);
|
||||
var flate_buffer: [std.compress.flate.max_window_len]u8 = undefined;
|
||||
var entry_decompress_stream: std.compress.flate.Decompress = .init(&adapter.new_interface, .zlib, &flate_buffer);
|
||||
const old = entry_decompress_stream.reader.adaptToOldInterface();
|
||||
var entry_counting_reader = std.io.countingReader(old);
|
||||
switch (entry_header) {
|
||||
.commit, .tree, .blob, .tag => |object| {
|
||||
var entry_decompress_stream = std.compress.zlib.decompressor(entry_crc32_reader.reader());
|
||||
var entry_counting_reader = std.io.countingReader(entry_decompress_stream.reader());
|
||||
var entry_hashed_writer = hashedWriter(std.io.null_writer, Oid.Hasher.init(format));
|
||||
const entry_writer = entry_hashed_writer.writer();
|
||||
// The object header is not included in the pack data but is
|
||||
// part of the object's ID
|
||||
try entry_writer.print("{s} {}\x00", .{ @tagName(entry_header), object.uncompressed_length });
|
||||
try entry_writer.print("{s} {d}\x00", .{ @tagName(entry_header), object.uncompressed_length });
|
||||
var fifo = std.fifo.LinearFifo(u8, .{ .Static = 4096 }).init();
|
||||
try fifo.pump(entry_counting_reader.reader(), entry_writer);
|
||||
if (entry_counting_reader.bytes_read != object.uncompressed_length) {
|
||||
|
|
@ -1361,8 +1364,6 @@ fn indexPackFirstPass(
|
|||
});
|
||||
},
|
||||
inline .ofs_delta, .ref_delta => |delta| {
|
||||
var entry_decompress_stream = std.compress.zlib.decompressor(entry_crc32_reader.reader());
|
||||
var entry_counting_reader = std.io.countingReader(entry_decompress_stream.reader());
|
||||
var fifo = std.fifo.LinearFifo(u8, .{ .Static = 4096 }).init();
|
||||
try fifo.pump(entry_counting_reader.reader(), std.io.null_writer);
|
||||
if (entry_counting_reader.bytes_read != delta.uncompressed_length) {
|
||||
|
|
@ -1377,7 +1378,7 @@ fn indexPackFirstPass(
|
|||
}
|
||||
|
||||
const pack_checksum = pack_hashed_reader.hasher.finalResult();
|
||||
const recorded_checksum = try Oid.readBytes(format, pack_buffered_reader.reader());
|
||||
const recorded_checksum = try Oid.readBytes(format, pack.interface.adaptToOldInterface());
|
||||
if (!mem.eql(u8, pack_checksum.slice(), recorded_checksum.slice())) {
|
||||
return error.CorruptedPack;
|
||||
}
|
||||
|
|
@ -1394,7 +1395,7 @@ fn indexPackFirstPass(
|
|||
fn indexPackHashDelta(
|
||||
allocator: Allocator,
|
||||
format: Oid.Format,
|
||||
pack: std.fs.File,
|
||||
pack: *std.fs.File.Reader,
|
||||
delta: IndexEntry,
|
||||
index_entries: std.AutoHashMapUnmanaged(Oid, IndexEntry),
|
||||
cache: *ObjectCache,
|
||||
|
|
@ -1408,7 +1409,7 @@ fn indexPackHashDelta(
|
|||
if (cache.get(base_offset)) |base_object| break base_object;
|
||||
|
||||
try pack.seekTo(base_offset);
|
||||
base_header = try EntryHeader.read(format, pack.deprecatedReader());
|
||||
base_header = try EntryHeader.read(format, pack.interface.adaptToOldInterface());
|
||||
switch (base_header) {
|
||||
.ofs_delta => |ofs_delta| {
|
||||
try delta_offsets.append(allocator, base_offset);
|
||||
|
|
@ -1419,7 +1420,7 @@ fn indexPackHashDelta(
|
|||
base_offset = (index_entries.get(ref_delta.base_object) orelse return null).offset;
|
||||
},
|
||||
else => {
|
||||
const base_data = try readObjectRaw(allocator, pack.deprecatedReader(), base_header.uncompressedLength());
|
||||
const base_data = try readObjectRaw(allocator, &pack.interface, base_header.uncompressedLength());
|
||||
errdefer allocator.free(base_data);
|
||||
const base_object: Object = .{ .type = base_header.objectType(), .data = base_data };
|
||||
try cache.put(allocator, base_offset, base_object);
|
||||
|
|
@ -1444,7 +1445,7 @@ fn indexPackHashDelta(
|
|||
fn resolveDeltaChain(
|
||||
allocator: Allocator,
|
||||
format: Oid.Format,
|
||||
pack: std.fs.File,
|
||||
pack: *std.fs.File.Reader,
|
||||
base_object: Object,
|
||||
delta_offsets: []const u64,
|
||||
cache: *ObjectCache,
|
||||
|
|
@ -1456,8 +1457,8 @@ fn resolveDeltaChain(
|
|||
|
||||
const delta_offset = delta_offsets[i];
|
||||
try pack.seekTo(delta_offset);
|
||||
const delta_header = try EntryHeader.read(format, pack.deprecatedReader());
|
||||
const delta_data = try readObjectRaw(allocator, pack.deprecatedReader(), delta_header.uncompressedLength());
|
||||
const delta_header = try EntryHeader.read(format, pack.interface.adaptToOldInterface());
|
||||
const delta_data = try readObjectRaw(allocator, &pack.interface, delta_header.uncompressedLength());
|
||||
defer allocator.free(delta_data);
|
||||
var delta_stream = std.io.fixedBufferStream(delta_data);
|
||||
const delta_reader = delta_stream.reader();
|
||||
|
|
@ -1481,18 +1482,14 @@ fn resolveDeltaChain(
|
|||
/// Reads the complete contents of an object from `reader`. This function may
|
||||
/// read more bytes than required from `reader`, so the reader position after
|
||||
/// returning is not reliable.
|
||||
fn readObjectRaw(allocator: Allocator, reader: anytype, size: u64) ![]u8 {
|
||||
fn readObjectRaw(allocator: Allocator, reader: *std.Io.Reader, size: u64) ![]u8 {
|
||||
const alloc_size = std.math.cast(usize, size) orelse return error.ObjectTooLarge;
|
||||
var buffered_reader = std.io.bufferedReader(reader);
|
||||
var decompress_stream = std.compress.zlib.decompressor(buffered_reader.reader());
|
||||
const data = try allocator.alloc(u8, alloc_size);
|
||||
errdefer allocator.free(data);
|
||||
try decompress_stream.reader().readNoEof(data);
|
||||
_ = decompress_stream.reader().readByte() catch |e| switch (e) {
|
||||
error.EndOfStream => return data,
|
||||
else => |other| return other,
|
||||
};
|
||||
return error.InvalidFormat;
|
||||
var aw: std.Io.Writer.Allocating = .init(allocator);
|
||||
try aw.ensureTotalCapacity(alloc_size);
|
||||
defer aw.deinit();
|
||||
var decompress: std.compress.flate.Decompress = .init(reader, .zlib, &.{});
|
||||
try decompress.reader.streamExact(&aw.writer, alloc_size);
|
||||
return aw.toOwnedSlice();
|
||||
}
|
||||
|
||||
/// Expands delta data from `delta_reader` to `writer`. `base_object` must
|
||||
|
|
|
|||
|
|
@ -1198,15 +1198,14 @@ pub fn codeDecompressAlloc(self: *Object, elf_file: *Elf, atom_index: Atom.Index
|
|||
const chdr = @as(*align(1) const elf.Elf64_Chdr, @ptrCast(data.ptr)).*;
|
||||
switch (chdr.ch_type) {
|
||||
.ZLIB => {
|
||||
var stream = std.io.fixedBufferStream(data[@sizeOf(elf.Elf64_Chdr)..]);
|
||||
var zlib_stream = std.compress.zlib.decompressor(stream.reader());
|
||||
var stream: std.Io.Reader = .fixed(data[@sizeOf(elf.Elf64_Chdr)..]);
|
||||
var zlib_stream: std.compress.flate.Decompress = .init(&stream, .zlib, &.{});
|
||||
const size = std.math.cast(usize, chdr.ch_size) orelse return error.Overflow;
|
||||
const decomp = try gpa.alloc(u8, size);
|
||||
const nread = zlib_stream.reader().readAll(decomp) catch return error.InputOutput;
|
||||
if (nread != decomp.len) {
|
||||
return error.InputOutput;
|
||||
}
|
||||
return decomp;
|
||||
var aw: std.Io.Writer.Allocating = .init(gpa);
|
||||
try aw.ensureUnusedCapacity(size);
|
||||
defer aw.deinit();
|
||||
_ = try zlib_stream.reader.streamRemaining(&aw.writer);
|
||||
return aw.toOwnedSlice();
|
||||
},
|
||||
else => @panic("TODO unhandled compression scheme"),
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue