mirror of
https://codeberg.org/ziglang/zig.git
synced 2025-12-06 05:44:20 +00:00
std.Io.Reader: use readVec for fill functions
readVec has two updated responsibilities: 1. it must respect any existing already buffered data. 2. it must write to the buffer if data is empty
This commit is contained in:
parent
e17a050bc6
commit
04fe1bfe3c
2 changed files with 123 additions and 65 deletions
|
|
@ -70,13 +70,14 @@ pub const VTable = struct {
|
|||
|
||||
/// Returns number of bytes written to `data`.
|
||||
///
|
||||
/// `data` may not have nonzero length.
|
||||
/// `data` must have nonzero length. `data[0]` may have zero length, in
|
||||
/// which case the implementation must write to `Reader.buffer`.
|
||||
///
|
||||
/// `data` may not contain an alias to `Reader.buffer`.
|
||||
///
|
||||
/// `data` is mutable because the implementation may to temporarily modify
|
||||
/// the fields in order to handle partial reads. Implementations must
|
||||
/// restore the original value before returning.
|
||||
/// `data` is mutable because the implementation may temporarily modify the
|
||||
/// fields in order to handle partial reads. Implementations must restore
|
||||
/// the original value before returning.
|
||||
///
|
||||
/// Implementations may ignore `data`, writing directly to `Reader.buffer`,
|
||||
/// modifying `seek` and `end` accordingly, and returning 0 from this
|
||||
|
|
@ -421,23 +422,29 @@ pub fn readVec(r: *Reader, data: [][]u8) Error!usize {
|
|||
|
||||
/// Writes to `Reader.buffer` or `data`, whichever has larger capacity.
|
||||
pub fn defaultReadVec(r: *Reader, data: [][]u8) Error!usize {
|
||||
assert(r.seek == r.end);
|
||||
r.seek = 0;
|
||||
r.end = 0;
|
||||
const first = data[0];
|
||||
const direct = first.len >= r.buffer.len;
|
||||
if (r.seek == r.end and first.len >= r.buffer.len) {
|
||||
var writer: Writer = .{
|
||||
.buffer = first,
|
||||
.end = 0,
|
||||
.vtable = &.{ .drain = Writer.fixedDrain },
|
||||
};
|
||||
const limit: Limit = .limited(writer.buffer.len - writer.end);
|
||||
return r.vtable.stream(r, &writer, limit) catch |err| switch (err) {
|
||||
error.WriteFailed => unreachable,
|
||||
else => |e| return e,
|
||||
};
|
||||
}
|
||||
var writer: Writer = .{
|
||||
.buffer = if (direct) first else r.buffer,
|
||||
.end = 0,
|
||||
.buffer = r.buffer,
|
||||
.end = r.end,
|
||||
.vtable = &.{ .drain = Writer.fixedDrain },
|
||||
};
|
||||
const limit: Limit = .limited(writer.buffer.len - writer.end);
|
||||
const n = r.vtable.stream(r, &writer, limit) catch |err| switch (err) {
|
||||
r.end += r.vtable.stream(r, &writer, limit) catch |err| switch (err) {
|
||||
error.WriteFailed => unreachable,
|
||||
else => |e| return e,
|
||||
};
|
||||
if (direct) return n;
|
||||
r.end += n;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
@ -1059,17 +1066,8 @@ pub fn fill(r: *Reader, n: usize) Error!void {
|
|||
/// increasing by a factor of 5 or more.
|
||||
fn fillUnbuffered(r: *Reader, n: usize) Error!void {
|
||||
try rebase(r, n);
|
||||
var writer: Writer = .{
|
||||
.buffer = r.buffer,
|
||||
.vtable = &.{ .drain = Writer.fixedDrain },
|
||||
};
|
||||
while (r.end < r.seek + n) {
|
||||
writer.end = r.end;
|
||||
r.end += r.vtable.stream(r, &writer, .limited(r.buffer.len - r.end)) catch |err| switch (err) {
|
||||
error.WriteFailed => unreachable,
|
||||
error.ReadFailed, error.EndOfStream => |e| return e,
|
||||
};
|
||||
}
|
||||
var bufs: [1][]u8 = .{""};
|
||||
while (r.end < r.seek + n) _ = try r.vtable.readVec(r, &bufs);
|
||||
}
|
||||
|
||||
/// Without advancing the seek position, does exactly one underlying read, filling the buffer as
|
||||
|
|
@ -1079,15 +1077,8 @@ fn fillUnbuffered(r: *Reader, n: usize) Error!void {
|
|||
/// Asserts buffer capacity is at least 1.
|
||||
pub fn fillMore(r: *Reader) Error!void {
|
||||
try rebase(r, 1);
|
||||
var writer: Writer = .{
|
||||
.buffer = r.buffer,
|
||||
.end = r.end,
|
||||
.vtable = &.{ .drain = Writer.fixedDrain },
|
||||
};
|
||||
r.end += r.vtable.stream(r, &writer, .limited(r.buffer.len - r.end)) catch |err| switch (err) {
|
||||
error.WriteFailed => unreachable,
|
||||
else => |e| return e,
|
||||
};
|
||||
var bufs: [1][]u8 = .{""};
|
||||
_ = try r.vtable.readVec(r, &bufs);
|
||||
}
|
||||
|
||||
/// Returns the next byte from the stream or returns `error.EndOfStream`.
|
||||
|
|
@ -1796,18 +1787,26 @@ pub fn Hashed(comptime Hasher: type) type {
|
|||
|
||||
fn readVec(r: *Reader, data: [][]u8) Error!usize {
|
||||
const this: *@This() = @alignCast(@fieldParentPtr("reader", r));
|
||||
const n = try this.in.readVec(data);
|
||||
var vecs: [8][]u8 = undefined; // Arbitrarily chosen amount.
|
||||
const dest_n, const data_size = try r.writableVector(&vecs, data);
|
||||
const dest = vecs[0..dest_n];
|
||||
const n = try this.in.readVec(dest);
|
||||
var remaining: usize = n;
|
||||
for (data) |slice| {
|
||||
for (dest) |slice| {
|
||||
if (remaining < slice.len) {
|
||||
this.hasher.update(slice[0..remaining]);
|
||||
return n;
|
||||
remaining = 0;
|
||||
break;
|
||||
} else {
|
||||
remaining -= slice.len;
|
||||
this.hasher.update(slice);
|
||||
}
|
||||
}
|
||||
assert(remaining == 0);
|
||||
if (n > data_size) {
|
||||
r.end += n - data_size;
|
||||
return data_size;
|
||||
}
|
||||
return n;
|
||||
}
|
||||
|
||||
|
|
@ -1824,17 +1823,24 @@ pub fn Hashed(comptime Hasher: type) type {
|
|||
pub fn writableVectorPosix(r: *Reader, buffer: []std.posix.iovec, data: []const []u8) Error!struct { usize, usize } {
|
||||
var i: usize = 0;
|
||||
var n: usize = 0;
|
||||
for (data) |buf| {
|
||||
if (buffer.len - i == 0) return .{ i, n };
|
||||
if (r.seek == r.end) {
|
||||
for (data) |buf| {
|
||||
if (buffer.len - i == 0) return .{ i, n };
|
||||
if (buf.len != 0) {
|
||||
buffer[i] = .{ .base = buf.ptr, .len = buf.len };
|
||||
i += 1;
|
||||
n += buf.len;
|
||||
}
|
||||
}
|
||||
const buf = r.buffer;
|
||||
if (buf.len != 0) {
|
||||
r.seek = 0;
|
||||
r.end = 0;
|
||||
buffer[i] = .{ .base = buf.ptr, .len = buf.len };
|
||||
i += 1;
|
||||
n += buf.len;
|
||||
}
|
||||
}
|
||||
assert(r.seek == r.end);
|
||||
const buf = r.buffer;
|
||||
if (buf.len != 0) {
|
||||
} else {
|
||||
const buf = r.buffer[r.end..];
|
||||
buffer[i] = .{ .base = buf.ptr, .len = buf.len };
|
||||
i += 1;
|
||||
}
|
||||
|
|
@ -1848,28 +1854,62 @@ pub fn writableVectorWsa(
|
|||
) Error!struct { usize, usize } {
|
||||
var i: usize = 0;
|
||||
var n: usize = 0;
|
||||
for (data) |buf| {
|
||||
if (buffer.len - i == 0) return .{ i, n };
|
||||
if (buf.len == 0) continue;
|
||||
if (std.math.cast(u32, buf.len)) |len| {
|
||||
buffer[i] = .{ .buf = buf.ptr, .len = len };
|
||||
i += 1;
|
||||
n += len;
|
||||
continue;
|
||||
}
|
||||
buffer[i] = .{ .buf = buf.ptr, .len = std.math.maxInt(u32) };
|
||||
i += 1;
|
||||
n += std.math.maxInt(u32);
|
||||
return .{ i, n };
|
||||
}
|
||||
assert(r.seek == r.end);
|
||||
const buf = r.buffer;
|
||||
if (buf.len != 0) {
|
||||
if (std.math.cast(u32, buf.len)) |len| {
|
||||
buffer[i] = .{ .buf = buf.ptr, .len = len };
|
||||
} else {
|
||||
if (r.seek == r.end) {
|
||||
for (data) |buf| {
|
||||
if (buffer.len - i == 0) return .{ i, n };
|
||||
if (buf.len == 0) continue;
|
||||
if (std.math.cast(u32, buf.len)) |len| {
|
||||
buffer[i] = .{ .buf = buf.ptr, .len = len };
|
||||
i += 1;
|
||||
n += len;
|
||||
continue;
|
||||
}
|
||||
buffer[i] = .{ .buf = buf.ptr, .len = std.math.maxInt(u32) };
|
||||
i += 1;
|
||||
n += std.math.maxInt(u32);
|
||||
return .{ i, n };
|
||||
}
|
||||
const buf = r.buffer;
|
||||
if (buf.len != 0) {
|
||||
r.seek = 0;
|
||||
r.end = 0;
|
||||
if (std.math.cast(u32, buf.len)) |len| {
|
||||
buffer[i] = .{ .buf = buf.ptr, .len = len };
|
||||
} else {
|
||||
buffer[i] = .{ .buf = buf.ptr, .len = std.math.maxInt(u32) };
|
||||
}
|
||||
i += 1;
|
||||
}
|
||||
} else {
|
||||
buffer[i] = .{
|
||||
.buf = r.buffer.ptr + r.end,
|
||||
.len = @min(std.math.maxInt(u32), r.buffer.len - r.end),
|
||||
};
|
||||
i += 1;
|
||||
}
|
||||
return .{ i, n };
|
||||
}
|
||||
|
||||
pub fn writableVector(r: *Reader, buffer: [][]u8, data: []const []u8) Error!struct { usize, usize } {
|
||||
var i: usize = 0;
|
||||
var n: usize = 0;
|
||||
if (r.seek == r.end) {
|
||||
for (data) |buf| {
|
||||
if (buffer.len - i == 0) return .{ i, n };
|
||||
if (buf.len != 0) {
|
||||
buffer[i] = buf;
|
||||
i += 1;
|
||||
n += buf.len;
|
||||
}
|
||||
}
|
||||
if (r.buffer.len != 0) {
|
||||
r.seek = 0;
|
||||
r.end = 0;
|
||||
buffer[i] = r.buffer;
|
||||
i += 1;
|
||||
}
|
||||
} else {
|
||||
buffer[i] = r.buffer[r.end..];
|
||||
i += 1;
|
||||
}
|
||||
return .{ i, n };
|
||||
|
|
|
|||
|
|
@ -1312,7 +1312,16 @@ pub const Reader = struct {
|
|||
if (is_windows) {
|
||||
// Unfortunately, `ReadFileScatter` cannot be used since it
|
||||
// requires page alignment.
|
||||
return readPositional(r, data[0]);
|
||||
assert(io_reader.seek == io_reader.end);
|
||||
io_reader.seek = 0;
|
||||
io_reader.end = 0;
|
||||
const first = data[0];
|
||||
if (first.len >= io_reader.buffer.len) {
|
||||
return readPositional(r, first);
|
||||
} else {
|
||||
io_reader.end += try readPositional(r, io_reader.buffer);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
var iovecs_buffer: [max_buffers_len]posix.iovec = undefined;
|
||||
const dest_n, const data_size = try io_reader.writableVectorPosix(&iovecs_buffer, data);
|
||||
|
|
@ -1352,7 +1361,16 @@ pub const Reader = struct {
|
|||
if (is_windows) {
|
||||
// Unfortunately, `ReadFileScatter` cannot be used since it
|
||||
// requires page alignment.
|
||||
return readStreaming(r, data[0]);
|
||||
assert(io_reader.seek == io_reader.end);
|
||||
io_reader.seek = 0;
|
||||
io_reader.end = 0;
|
||||
const first = data[0];
|
||||
if (first.len >= io_reader.buffer.len) {
|
||||
return readStreaming(r, first);
|
||||
} else {
|
||||
io_reader.end += try readStreaming(r, io_reader.buffer);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
var iovecs_buffer: [max_buffers_len]posix.iovec = undefined;
|
||||
const dest_n, const data_size = try io_reader.writableVectorPosix(&iovecs_buffer, data);
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue