Merge pull request #25512 from ziglang/sendfile-fixes

std.Io: Writer and Reader bug fixes related to sendFile, delimiters, Limited, and seeking
This commit is contained in:
Andrew Kelley 2025-10-09 02:30:31 -07:00 committed by GitHub
commit 529aa9f270
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
10 changed files with 517 additions and 321 deletions

View file

@ -81,12 +81,13 @@ pub fn build(b: *std.Build) !void {
docs_step.dependOn(langref_step);
docs_step.dependOn(std_docs_step);
const test_default_only = b.option(bool, "test-default-only", "Limit test matrix to exactly one target configuration") orelse false;
const skip_debug = b.option(bool, "skip-debug", "Main test suite skips debug builds") orelse false;
const skip_release = b.option(bool, "skip-release", "Main test suite skips release builds") orelse false;
const skip_release = b.option(bool, "skip-release", "Main test suite skips release builds") orelse test_default_only;
const skip_release_small = b.option(bool, "skip-release-small", "Main test suite skips release-small builds") orelse skip_release;
const skip_release_fast = b.option(bool, "skip-release-fast", "Main test suite skips release-fast builds") orelse skip_release;
const skip_release_safe = b.option(bool, "skip-release-safe", "Main test suite skips release-safe builds") orelse skip_release;
const skip_non_native = b.option(bool, "skip-non-native", "Main test suite skips non-native builds") orelse false;
const skip_non_native = b.option(bool, "skip-non-native", "Main test suite skips non-native builds") orelse test_default_only;
const skip_libc = b.option(bool, "skip-libc", "Main test suite skips tests that link libc") orelse false;
const skip_single_threaded = b.option(bool, "skip-single-threaded", "Main test suite skips tests that are single-threaded") orelse false;
const skip_compile_errors = b.option(bool, "skip-compile-errors", "Main test suite skips compile error tests") orelse false;
@ -449,6 +450,7 @@ pub fn build(b: *std.Build) !void {
.include_paths = &.{},
.skip_single_threaded = skip_single_threaded,
.skip_non_native = skip_non_native,
.test_default_only = test_default_only,
.skip_freebsd = skip_freebsd,
.skip_netbsd = skip_netbsd,
.skip_windows = skip_windows,
@ -471,6 +473,7 @@ pub fn build(b: *std.Build) !void {
.include_paths = &.{},
.skip_single_threaded = true,
.skip_non_native = skip_non_native,
.test_default_only = test_default_only,
.skip_freebsd = skip_freebsd,
.skip_netbsd = skip_netbsd,
.skip_windows = skip_windows,
@ -492,6 +495,7 @@ pub fn build(b: *std.Build) !void {
.include_paths = &.{},
.skip_single_threaded = true,
.skip_non_native = skip_non_native,
.test_default_only = test_default_only,
.skip_freebsd = skip_freebsd,
.skip_netbsd = skip_netbsd,
.skip_windows = skip_windows,
@ -513,6 +517,7 @@ pub fn build(b: *std.Build) !void {
.include_paths = &.{},
.skip_single_threaded = skip_single_threaded,
.skip_non_native = skip_non_native,
.test_default_only = test_default_only,
.skip_freebsd = skip_freebsd,
.skip_netbsd = skip_netbsd,
.skip_windows = skip_windows,

View file

@ -481,7 +481,6 @@ pub fn readVecAll(r: *Reader, data: [][]u8) Error!void {
/// is returned instead.
///
/// See also:
/// * `peek`
/// * `toss`
pub fn peek(r: *Reader, n: usize) Error![]u8 {
try r.fill(n);
@ -732,7 +731,7 @@ pub const DelimiterError = error{
};
/// Returns a slice of the next bytes of buffered data from the stream until
/// `sentinel` is found, advancing the seek position.
/// `sentinel` is found, advancing the seek position past the sentinel.
///
/// Returned slice has a sentinel.
///
@ -765,7 +764,7 @@ pub fn peekSentinel(r: *Reader, comptime sentinel: u8) DelimiterError![:sentinel
}
/// Returns a slice of the next bytes of buffered data from the stream until
/// `delimiter` is found, advancing the seek position.
/// `delimiter` is found, advancing the seek position past the delimiter.
///
/// Returned slice includes the delimiter as the last byte.
///
@ -793,32 +792,42 @@ pub fn takeDelimiterInclusive(r: *Reader, delimiter: u8) DelimiterError![]u8 {
/// * `peekDelimiterExclusive`
/// * `takeDelimiterInclusive`
pub fn peekDelimiterInclusive(r: *Reader, delimiter: u8) DelimiterError![]u8 {
const buffer = r.buffer[0..r.end];
const seek = r.seek;
if (std.mem.indexOfScalarPos(u8, buffer, seek, delimiter)) |delimiter_index| {
@branchHint(.likely);
return buffer[seek .. delimiter_index + 1];
}
// TODO take a parameter for max search length rather than relying on buffer capacity
try rebase(r, r.buffer.len);
while (r.buffer.len - r.end != 0) {
const existing_buffered_len = r.end - r.seek;
const end_cap = r.buffer[r.end..];
var writer: Writer = .fixed(end_cap);
const n = r.vtable.stream(r, &writer, .limited(end_cap.len)) catch |err| switch (err) {
error.WriteFailed => unreachable,
else => |e| return e,
};
r.end += n;
if (std.mem.indexOfScalarPos(u8, r.buffer[0..r.end], r.seek + existing_buffered_len, delimiter)) |delimiter_index| {
return r.buffer[r.seek .. delimiter_index + 1];
{
const contents = r.buffer[0..r.end];
const seek = r.seek;
if (std.mem.findScalarPos(u8, contents, seek, delimiter)) |end| {
@branchHint(.likely);
return contents[seek .. end + 1];
}
}
return error.StreamTooLong;
while (true) {
const content_len = r.end - r.seek;
if (r.buffer.len - content_len == 0) break;
try fillMore(r);
const seek = r.seek;
const contents = r.buffer[0..r.end];
if (std.mem.findScalarPos(u8, contents, seek + content_len, delimiter)) |end| {
return contents[seek .. end + 1];
}
}
// It might or might not be end of stream. There is no more buffer space
// left to disambiguate. If `StreamTooLong` was added to `RebaseError` then
// this logic could be replaced by removing the exit condition from the
// above while loop. That error code would represent when `buffer` capacity
// is too small for an operation, replacing the current use of asserts.
var failing_writer = Writer.failing;
while (r.vtable.stream(r, &failing_writer, .limited(1))) |n| {
assert(n == 0);
} else |err| switch (err) {
error.WriteFailed => return error.StreamTooLong,
error.ReadFailed => |e| return e,
error.EndOfStream => |e| return e,
}
}
/// Returns a slice of the next bytes of buffered data from the stream until
/// `delimiter` is found, advancing the seek position up to the delimiter.
/// `delimiter` is found, advancing the seek position up to (but not past)
/// the delimiter.
///
/// Returned slice excludes the delimiter. End-of-stream is treated equivalent
/// to a delimiter, unless it would result in a length 0 return value, in which
@ -832,20 +841,13 @@ pub fn peekDelimiterInclusive(r: *Reader, delimiter: u8) DelimiterError![]u8 {
/// Invalidates previously returned values from `peek`.
///
/// See also:
/// * `takeDelimiter`
/// * `takeDelimiterInclusive`
/// * `peekDelimiterExclusive`
pub fn takeDelimiterExclusive(r: *Reader, delimiter: u8) DelimiterError![]u8 {
const result = r.peekDelimiterInclusive(delimiter) catch |err| switch (err) {
error.EndOfStream => {
const remaining = r.buffer[r.seek..r.end];
if (remaining.len == 0) return error.EndOfStream;
r.toss(remaining.len);
return remaining;
},
else => |e| return e,
};
const result = try r.peekDelimiterExclusive(delimiter);
r.toss(result.len);
return result[0 .. result.len - 1];
return result;
}
/// Returns a slice of the next bytes of buffered data from the stream until
@ -866,7 +868,7 @@ pub fn takeDelimiterExclusive(r: *Reader, delimiter: u8) DelimiterError![]u8 {
/// * `takeDelimiterInclusive`
/// * `takeDelimiterExclusive`
pub fn takeDelimiter(r: *Reader, delimiter: u8) error{ ReadFailed, StreamTooLong }!?[]u8 {
const result = r.peekDelimiterInclusive(delimiter) catch |err| switch (err) {
const inclusive = r.peekDelimiterInclusive(delimiter) catch |err| switch (err) {
error.EndOfStream => {
const remaining = r.buffer[r.seek..r.end];
if (remaining.len == 0) return null;
@ -875,8 +877,8 @@ pub fn takeDelimiter(r: *Reader, delimiter: u8) error{ ReadFailed, StreamTooLong
},
else => |e| return e,
};
r.toss(result.len + 1);
return result[0 .. result.len - 1];
r.toss(inclusive.len);
return inclusive[0 .. inclusive.len - 1];
}
/// Returns a slice of the next bytes of buffered data from the stream until
@ -1403,6 +1405,9 @@ test peekSentinel {
var r: Reader = .fixed("ab\nc");
try testing.expectEqualStrings("ab", try r.peekSentinel('\n'));
try testing.expectEqualStrings("ab", try r.peekSentinel('\n'));
r.toss(3);
try testing.expectError(error.EndOfStream, r.peekSentinel('\n'));
try testing.expectEqualStrings("c", try r.peek(1));
}
test takeDelimiterInclusive {
@ -1417,22 +1422,52 @@ test peekDelimiterInclusive {
try testing.expectEqualStrings("ab\n", try r.peekDelimiterInclusive('\n'));
r.toss(3);
try testing.expectError(error.EndOfStream, r.peekDelimiterInclusive('\n'));
try testing.expectEqualStrings("c", try r.peek(1));
}
test takeDelimiterExclusive {
var r: Reader = .fixed("ab\nc");
try testing.expectEqualStrings("ab", try r.takeDelimiterExclusive('\n'));
try testing.expectEqualStrings("", try r.takeDelimiterExclusive('\n'));
try testing.expectEqualStrings("", try r.takeDelimiterExclusive('\n'));
try testing.expectEqualStrings("\n", try r.take(1));
try testing.expectEqualStrings("c", try r.takeDelimiterExclusive('\n'));
try testing.expectError(error.EndOfStream, r.takeDelimiterExclusive('\n'));
}
test peekDelimiterExclusive {
var r: Reader = .fixed("ab\nc");
try testing.expectEqualStrings("ab", try r.peekDelimiterExclusive('\n'));
try testing.expectEqualStrings("ab", try r.peekDelimiterExclusive('\n'));
r.toss(3);
r.toss(2);
try testing.expectEqualStrings("", try r.peekDelimiterExclusive('\n'));
try testing.expectEqualStrings("\n", try r.take(1));
try testing.expectEqualStrings("c", try r.peekDelimiterExclusive('\n'));
try testing.expectEqualStrings("c", try r.peekDelimiterExclusive('\n'));
r.toss(1);
try testing.expectError(error.EndOfStream, r.peekDelimiterExclusive('\n'));
}
test takeDelimiter {
var r: Reader = .fixed("ab\nc\n\nd");
try testing.expectEqualStrings("ab", (try r.takeDelimiter('\n')).?);
try testing.expectEqualStrings("c", (try r.takeDelimiter('\n')).?);
try testing.expectEqualStrings("", (try r.takeDelimiter('\n')).?);
try testing.expectEqualStrings("d", (try r.takeDelimiter('\n')).?);
try testing.expectEqual(null, try r.takeDelimiter('\n'));
try testing.expectEqual(null, try r.takeDelimiter('\n'));
r = .fixed("ab\nc\n\nd\n"); // one trailing newline does not affect behavior
try testing.expectEqualStrings("ab", (try r.takeDelimiter('\n')).?);
try testing.expectEqualStrings("c", (try r.takeDelimiter('\n')).?);
try testing.expectEqualStrings("", (try r.takeDelimiter('\n')).?);
try testing.expectEqualStrings("d", (try r.takeDelimiter('\n')).?);
try testing.expectEqual(null, try r.takeDelimiter('\n'));
try testing.expectEqual(null, try r.takeDelimiter('\n'));
}
test streamDelimiter {

View file

@ -27,6 +27,7 @@ pub fn init(reader: *Reader, limit: Limit, buffer: []u8) Limited {
fn stream(r: *Reader, w: *Writer, limit: Limit) Reader.StreamError!usize {
const l: *Limited = @fieldParentPtr("interface", r);
if (l.remaining == .nothing) return error.EndOfStream;
const combined_limit = limit.min(l.remaining);
const n = try l.unlimited.stream(w, combined_limit);
l.remaining = l.remaining.subtract(n).?;
@ -51,8 +52,51 @@ test stream {
fn discard(r: *Reader, limit: Limit) Reader.Error!usize {
const l: *Limited = @fieldParentPtr("interface", r);
if (l.remaining == .nothing) return error.EndOfStream;
const combined_limit = limit.min(l.remaining);
const n = try l.unlimited.discard(combined_limit);
l.remaining = l.remaining.subtract(n).?;
return n;
}
test "end of stream, read, hit limit exactly" {
var f: Reader = .fixed("i'm dying");
var l = f.limited(.limited(4), &.{});
const r = &l.interface;
var buf: [2]u8 = undefined;
try r.readSliceAll(&buf);
try r.readSliceAll(&buf);
try std.testing.expectError(error.EndOfStream, l.interface.readSliceAll(&buf));
}
test "end of stream, read, hit limit after partial read" {
var f: Reader = .fixed("i'm dying");
var l = f.limited(.limited(5), &.{});
const r = &l.interface;
var buf: [2]u8 = undefined;
try r.readSliceAll(&buf);
try r.readSliceAll(&buf);
try std.testing.expectError(error.EndOfStream, l.interface.readSliceAll(&buf));
}
test "end of stream, discard, hit limit exactly" {
var f: Reader = .fixed("i'm dying");
var l = f.limited(.limited(4), &.{});
const r = &l.interface;
try r.discardAll(2);
try r.discardAll(2);
try std.testing.expectError(error.EndOfStream, l.interface.discardAll(2));
}
test "end of stream, discard, hit limit after partial read" {
var f: Reader = .fixed("i'm dying");
var l = f.limited(.limited(5), &.{});
const r = &l.interface;
try r.discardAll(2);
try r.discardAll(2);
try std.testing.expectError(error.EndOfStream, l.interface.discardAll(2));
}

View file

@ -923,10 +923,12 @@ pub fn sendFileHeader(
return n;
}
/// Asserts nonzero buffer capacity.
/// Asserts nonzero buffer capacity and nonzero `limit`.
pub fn sendFileReading(w: *Writer, file_reader: *File.Reader, limit: Limit) FileReadingError!usize {
assert(limit != .nothing);
const dest = limit.slice(try w.writableSliceGreedy(1));
const n = try file_reader.read(dest);
const n = try file_reader.interface.readSliceShort(dest);
if (n == 0) return error.EndOfStream;
w.advance(n);
return n;
}
@ -2778,7 +2780,8 @@ pub const Allocating = struct {
if (additional == 0) return error.EndOfStream;
a.ensureUnusedCapacity(limit.minInt64(additional)) catch return error.WriteFailed;
const dest = limit.slice(a.writer.buffer[a.writer.end..]);
const n = try file_reader.read(dest);
const n = try file_reader.interface.readSliceShort(dest);
if (n == 0) return error.EndOfStream;
a.writer.end += n;
return n;
}
@ -2849,18 +2852,40 @@ test "allocating sendFile" {
const file = try tmp_dir.dir.createFile("input.txt", .{ .read = true });
defer file.close();
var r_buffer: [256]u8 = undefined;
var r_buffer: [2]u8 = undefined;
var file_writer: std.fs.File.Writer = .init(file, &r_buffer);
try file_writer.interface.writeByte('h');
try file_writer.interface.writeAll("abcd");
try file_writer.interface.flush();
var file_reader = file_writer.moveToReader();
try file_reader.seekTo(0);
try file_reader.interface.fill(2);
var allocating: Writer.Allocating = .init(testing.allocator);
defer allocating.deinit();
try allocating.ensureUnusedCapacity(1);
try testing.expectEqual(4, allocating.writer.sendFileAll(&file_reader, .unlimited));
try testing.expectEqualStrings("abcd", allocating.writer.buffered());
}
_ = try file_reader.interface.streamRemaining(&allocating.writer);
test sendFileReading {
var tmp_dir = testing.tmpDir(.{});
defer tmp_dir.cleanup();
const file = try tmp_dir.dir.createFile("input.txt", .{ .read = true });
defer file.close();
var r_buffer: [2]u8 = undefined;
var file_writer: std.fs.File.Writer = .init(file, &r_buffer);
try file_writer.interface.writeAll("abcd");
try file_writer.interface.flush();
var file_reader = file_writer.moveToReader();
try file_reader.seekTo(0);
try file_reader.interface.fill(2);
var w_buffer: [1]u8 = undefined;
var discarding: Writer.Discarding = .init(&w_buffer);
try testing.expectEqual(4, discarding.writer.sendFileReadingAll(&file_reader, .unlimited));
}
test writeStruct {

View file

@ -1189,7 +1189,7 @@ pub const Reader = struct {
pub fn seekBy(r: *Reader, offset: i64) Reader.SeekError!void {
switch (r.mode) {
.positional, .positional_reading => {
setPosAdjustingBuffer(r, @intCast(@as(i64, @intCast(r.pos)) + offset));
setLogicalPos(r, @intCast(@as(i64, @intCast(logicalPos(r))) + offset));
},
.streaming, .streaming_reading => {
if (posix.SEEK == void) {
@ -1198,7 +1198,7 @@ pub const Reader = struct {
}
const seek_err = r.seek_err orelse e: {
if (posix.lseek_CUR(r.file.handle, offset)) |_| {
setPosAdjustingBuffer(r, @intCast(@as(i64, @intCast(r.pos)) + offset));
setLogicalPos(r, @intCast(@as(i64, @intCast(logicalPos(r))) + offset));
return;
} else |err| {
r.seek_err = err;
@ -1222,16 +1222,16 @@ pub const Reader = struct {
pub fn seekTo(r: *Reader, offset: u64) Reader.SeekError!void {
switch (r.mode) {
.positional, .positional_reading => {
setPosAdjustingBuffer(r, offset);
setLogicalPos(r, offset);
},
.streaming, .streaming_reading => {
if (offset >= r.pos) return Reader.seekBy(r, @intCast(offset - r.pos));
if (offset >= r.pos) return Reader.seekBy(r, @intCast(offset - logicalPos(r)));
if (r.seek_err) |err| return err;
posix.lseek_SET(r.file.handle, offset) catch |err| {
r.seek_err = err;
return err;
};
setPosAdjustingBuffer(r, offset);
setLogicalPos(r, offset);
},
.failure => return r.seek_err.?,
}
@ -1241,7 +1241,7 @@ pub const Reader = struct {
return r.pos - r.interface.bufferedLen();
}
fn setPosAdjustingBuffer(r: *Reader, offset: u64) void {
fn setLogicalPos(r: *Reader, offset: u64) void {
const logical_pos = logicalPos(r);
if (offset < logical_pos or offset >= r.pos) {
r.interface.seek = 0;
@ -1269,13 +1269,15 @@ pub const Reader = struct {
},
.positional_reading => {
const dest = limit.slice(try w.writableSliceGreedy(1));
const n = try readPositional(r, dest);
var data: [1][]u8 = .{dest};
const n = try readVecPositional(r, &data);
w.advance(n);
return n;
},
.streaming_reading => {
const dest = limit.slice(try w.writableSliceGreedy(1));
const n = try readStreaming(r, dest);
var data: [1][]u8 = .{dest};
const n = try readVecStreaming(r, &data);
w.advance(n);
return n;
},
@ -1286,94 +1288,100 @@ pub const Reader = struct {
fn readVec(io_reader: *std.Io.Reader, data: [][]u8) std.Io.Reader.Error!usize {
const r: *Reader = @alignCast(@fieldParentPtr("interface", io_reader));
switch (r.mode) {
.positional, .positional_reading => {
if (is_windows) {
// Unfortunately, `ReadFileScatter` cannot be used since it
// requires page alignment.
if (io_reader.seek == io_reader.end) {
io_reader.seek = 0;
io_reader.end = 0;
}
const first = data[0];
if (first.len >= io_reader.buffer.len - io_reader.end) {
return readPositional(r, first);
} else {
io_reader.end += try readPositional(r, io_reader.buffer[io_reader.end..]);
return 0;
}
}
var iovecs_buffer: [max_buffers_len]posix.iovec = undefined;
const dest_n, const data_size = try io_reader.writableVectorPosix(&iovecs_buffer, data);
const dest = iovecs_buffer[0..dest_n];
assert(dest[0].len > 0);
const n = posix.preadv(r.file.handle, dest, r.pos) catch |err| switch (err) {
error.Unseekable => {
r.mode = r.mode.toStreaming();
const pos = r.pos;
if (pos != 0) {
r.pos = 0;
r.seekBy(@intCast(pos)) catch {
r.mode = .failure;
return error.ReadFailed;
};
}
return 0;
},
else => |e| {
r.err = e;
return error.ReadFailed;
},
};
if (n == 0) {
r.size = r.pos;
return error.EndOfStream;
}
r.pos += n;
if (n > data_size) {
io_reader.end += n - data_size;
return data_size;
}
return n;
},
.streaming, .streaming_reading => {
if (is_windows) {
// Unfortunately, `ReadFileScatter` cannot be used since it
// requires page alignment.
if (io_reader.seek == io_reader.end) {
io_reader.seek = 0;
io_reader.end = 0;
}
const first = data[0];
if (first.len >= io_reader.buffer.len - io_reader.end) {
return readStreaming(r, first);
} else {
io_reader.end += try readStreaming(r, io_reader.buffer[io_reader.end..]);
return 0;
}
}
var iovecs_buffer: [max_buffers_len]posix.iovec = undefined;
const dest_n, const data_size = try io_reader.writableVectorPosix(&iovecs_buffer, data);
const dest = iovecs_buffer[0..dest_n];
assert(dest[0].len > 0);
const n = posix.readv(r.file.handle, dest) catch |err| {
r.err = err;
return error.ReadFailed;
};
if (n == 0) {
r.size = r.pos;
return error.EndOfStream;
}
r.pos += n;
if (n > data_size) {
io_reader.end += n - data_size;
return data_size;
}
return n;
},
.positional, .positional_reading => return readVecPositional(r, data),
.streaming, .streaming_reading => return readVecStreaming(r, data),
.failure => return error.ReadFailed,
}
}
fn readVecPositional(r: *Reader, data: [][]u8) std.Io.Reader.Error!usize {
const io_reader = &r.interface;
if (is_windows) {
// Unfortunately, `ReadFileScatter` cannot be used since it
// requires page alignment.
if (io_reader.seek == io_reader.end) {
io_reader.seek = 0;
io_reader.end = 0;
}
const first = data[0];
if (first.len >= io_reader.buffer.len - io_reader.end) {
return readPositional(r, first);
} else {
io_reader.end += try readPositional(r, io_reader.buffer[io_reader.end..]);
return 0;
}
}
var iovecs_buffer: [max_buffers_len]posix.iovec = undefined;
const dest_n, const data_size = try io_reader.writableVectorPosix(&iovecs_buffer, data);
const dest = iovecs_buffer[0..dest_n];
assert(dest[0].len > 0);
const n = posix.preadv(r.file.handle, dest, r.pos) catch |err| switch (err) {
error.Unseekable => {
r.mode = r.mode.toStreaming();
const pos = r.pos;
if (pos != 0) {
r.pos = 0;
r.seekBy(@intCast(pos)) catch {
r.mode = .failure;
return error.ReadFailed;
};
}
return 0;
},
else => |e| {
r.err = e;
return error.ReadFailed;
},
};
if (n == 0) {
r.size = r.pos;
return error.EndOfStream;
}
r.pos += n;
if (n > data_size) {
io_reader.end += n - data_size;
return data_size;
}
return n;
}
fn readVecStreaming(r: *Reader, data: [][]u8) std.Io.Reader.Error!usize {
const io_reader = &r.interface;
if (is_windows) {
// Unfortunately, `ReadFileScatter` cannot be used since it
// requires page alignment.
if (io_reader.seek == io_reader.end) {
io_reader.seek = 0;
io_reader.end = 0;
}
const first = data[0];
if (first.len >= io_reader.buffer.len - io_reader.end) {
return readStreaming(r, first);
} else {
io_reader.end += try readStreaming(r, io_reader.buffer[io_reader.end..]);
return 0;
}
}
var iovecs_buffer: [max_buffers_len]posix.iovec = undefined;
const dest_n, const data_size = try io_reader.writableVectorPosix(&iovecs_buffer, data);
const dest = iovecs_buffer[0..dest_n];
assert(dest[0].len > 0);
const n = posix.readv(r.file.handle, dest) catch |err| {
r.err = err;
return error.ReadFailed;
};
if (n == 0) {
r.size = r.pos;
return error.EndOfStream;
}
r.pos += n;
if (n > data_size) {
io_reader.end += n - data_size;
return data_size;
}
return n;
}
fn discard(io_reader: *std.Io.Reader, limit: std.Io.Limit) std.Io.Reader.Error!usize {
const r: *Reader = @alignCast(@fieldParentPtr("interface", io_reader));
const file = r.file;
@ -1440,7 +1448,7 @@ pub const Reader = struct {
}
}
pub fn readPositional(r: *Reader, dest: []u8) std.Io.Reader.Error!usize {
fn readPositional(r: *Reader, dest: []u8) std.Io.Reader.Error!usize {
const n = r.file.pread(dest, r.pos) catch |err| switch (err) {
error.Unseekable => {
r.mode = r.mode.toStreaming();
@ -1467,7 +1475,7 @@ pub const Reader = struct {
return n;
}
pub fn readStreaming(r: *Reader, dest: []u8) std.Io.Reader.Error!usize {
fn readStreaming(r: *Reader, dest: []u8) std.Io.Reader.Error!usize {
const n = r.file.read(dest) catch |err| {
r.err = err;
return error.ReadFailed;
@ -1480,14 +1488,6 @@ pub const Reader = struct {
return n;
}
pub fn read(r: *Reader, dest: []u8) std.Io.Reader.Error!usize {
switch (r.mode) {
.positional, .positional_reading => return readPositional(r, dest),
.streaming, .streaming_reading => return readStreaming(r, dest),
.failure => return error.ReadFailed,
}
}
pub fn atEnd(r: *Reader) bool {
// Even if stat fails, size is set when end is encountered.
const size = r.size orelse return false;
@ -1732,7 +1732,7 @@ pub const Writer = struct {
) std.Io.Writer.FileError!usize {
const reader_buffered = file_reader.interface.buffered();
if (reader_buffered.len >= @intFromEnum(limit))
return sendFileBuffered(io_w, file_reader, reader_buffered);
return sendFileBuffered(io_w, file_reader, limit.slice(reader_buffered));
const writer_buffered = io_w.buffered();
const file_limit = @intFromEnum(limit) - reader_buffered.len;
const w: *Writer = @alignCast(@fieldParentPtr("interface", io_w));
@ -1804,7 +1804,7 @@ pub const Writer = struct {
return error.EndOfStream;
}
const consumed = io_w.consume(@intCast(sbytes));
file_reader.seekTo(file_reader.pos + consumed) catch return error.ReadFailed;
file_reader.seekBy(@intCast(consumed)) catch return error.ReadFailed;
return consumed;
}
@ -1865,7 +1865,7 @@ pub const Writer = struct {
return error.EndOfStream;
}
const consumed = io_w.consume(@bitCast(len));
file_reader.seekTo(file_reader.pos + consumed) catch return error.ReadFailed;
file_reader.seekBy(@intCast(consumed)) catch return error.ReadFailed;
return consumed;
}
@ -1998,7 +1998,7 @@ pub const Writer = struct {
reader_buffered: []const u8,
) std.Io.Writer.FileError!usize {
const n = try drain(io_w, &.{reader_buffered}, 1);
file_reader.seekTo(file_reader.pos + n) catch return error.ReadFailed;
file_reader.seekBy(@intCast(n)) catch return error.ReadFailed;
return n;
}

View file

@ -1525,6 +1525,41 @@ test "sendfile" {
try testing.expectEqualStrings("header1\nsecond header\nine1\nsecontrailer1\nsecond trailer\n", written_buf[0..amt]);
}
test "sendfile with buffered data" {
var tmp = tmpDir(.{});
defer tmp.cleanup();
try tmp.dir.makePath("os_test_tmp");
var dir = try tmp.dir.openDir("os_test_tmp", .{});
defer dir.close();
var src_file = try dir.createFile("sendfile1.txt", .{ .read = true });
defer src_file.close();
try src_file.writeAll("AAAABBBB");
var dest_file = try dir.createFile("sendfile2.txt", .{ .read = true });
defer dest_file.close();
var src_buffer: [32]u8 = undefined;
var file_reader = src_file.reader(&src_buffer);
try file_reader.seekTo(0);
try file_reader.interface.fill(8);
var fallback_buffer: [32]u8 = undefined;
var file_writer = dest_file.writer(&fallback_buffer);
try std.testing.expectEqual(4, try file_writer.interface.sendFileAll(&file_reader, .limited(4)));
var written_buf: [8]u8 = undefined;
const amt = try dest_file.preadAll(&written_buf, 0);
try std.testing.expectEqual(4, amt);
try std.testing.expectEqualSlices(u8, "AAAA", written_buf[0..amt]);
}
test "copyRangeAll" {
var tmp = tmpDir(.{});
defer tmp.cleanup();
@ -2250,3 +2285,34 @@ test "seekTo flushes buffered data" {
try file_reader.interface.readSliceAll(&buf);
try std.testing.expectEqualStrings(contents, &buf);
}
test "File.Writer sendfile with buffered contents" {
var tmp_dir = testing.tmpDir(.{});
defer tmp_dir.cleanup();
{
try tmp_dir.dir.writeFile(.{ .sub_path = "a", .data = "bcd" });
const in = try tmp_dir.dir.openFile("a", .{});
defer in.close();
const out = try tmp_dir.dir.createFile("b", .{});
defer out.close();
var in_buf: [2]u8 = undefined;
var in_r = in.reader(&in_buf);
_ = try in_r.getSize(); // Catch seeks past end by populating size
try in_r.interface.fill(2);
var out_buf: [1]u8 = undefined;
var out_w = out.writerStreaming(&out_buf);
try out_w.interface.writeByte('a');
try testing.expectEqual(3, try out_w.interface.sendFileAll(&in_r, .unlimited));
try out_w.interface.flush();
}
var check = try tmp_dir.dir.openFile("b", .{});
defer check.close();
var check_buf: [4]u8 = undefined;
var check_r = check.reader(&check_buf);
try testing.expectEqualStrings("abcd", try check_r.interface.take(4));
try testing.expectError(error.EndOfStream, check_r.interface.takeByte());
}

View file

@ -1396,7 +1396,7 @@ fn parseHosts(
br: *Io.Reader,
) error{ OutOfMemory, ReadFailed }!void {
while (true) {
const line = br.takeDelimiterExclusive('\n') catch |err| switch (err) {
const line = br.takeDelimiter('\n') catch |err| switch (err) {
error.StreamTooLong => {
// Skip lines that are too long.
_ = br.discardDelimiterInclusive('\n') catch |e| switch (e) {
@ -1406,7 +1406,8 @@ fn parseHosts(
continue;
},
error.ReadFailed => return error.ReadFailed,
error.EndOfStream => break,
} orelse {
break; // end of stream
};
var split_it = mem.splitScalar(u8, line, '#');
const no_comment_line = split_it.first();

View file

@ -359,14 +359,11 @@ fn CpuinfoParser(comptime impl: anytype) type {
return struct {
fn parse(arch: Target.Cpu.Arch, reader: *std.Io.Reader) !?Target.Cpu {
var obj: impl = .{};
while (reader.takeDelimiterExclusive('\n')) |line| {
while (try reader.takeDelimiter('\n')) |line| {
const colon_pos = mem.indexOfScalar(u8, line, ':') orelse continue;
const key = mem.trimEnd(u8, line[0..colon_pos], " \t");
const value = mem.trimStart(u8, line[colon_pos + 1 ..], " \t");
if (!try obj.line_hook(key, value)) break;
} else |err| switch (err) {
error.EndOfStream => {},
else => |e| return e,
}
return obj.finalize(arch);
}

View file

@ -411,7 +411,8 @@ pub const Node = extern struct {
.failure,
=> {
const dest = limit.slice(interface.unusedCapacitySlice());
const n = try file_reader.read(dest);
const n = try file_reader.interface.readSliceShort(dest);
if (n == 0) return error.EndOfStream;
interface.end += n;
return n;
},

View file

@ -44,7 +44,7 @@ const test_targets = blk: {
break :blk [_]TestTarget{
// Native Targets
.{},
.{}, // 0 index must be all defaults
.{
.link_libc = true,
},
@ -2224,6 +2224,7 @@ const ModuleTestOptions = struct {
desc: []const u8,
optimize_modes: []const OptimizeMode,
include_paths: []const []const u8,
test_default_only: bool,
skip_single_threaded: bool,
skip_non_native: bool,
skip_freebsd: bool,
@ -2235,13 +2236,21 @@ const ModuleTestOptions = struct {
skip_libc: bool,
max_rss: usize = 0,
no_builtin: bool = false,
build_options: ?*std.Build.Step.Options = null,
build_options: ?*Step.Options = null,
};
pub fn addModuleTests(b: *std.Build, options: ModuleTestOptions) *Step {
const step = b.step(b.fmt("test-{s}", .{options.name}), options.desc);
for_targets: for (test_targets) |test_target| {
if (options.test_default_only) {
const test_target = &test_targets[0];
const resolved_target = b.resolveTargetQuery(test_target.target);
const triple_txt = resolved_target.query.zigTriple(b.allocator) catch @panic("OOM");
addOneModuleTest(b, step, test_target, &resolved_target, triple_txt, options);
return step;
}
for_targets: for (&test_targets) |*test_target| {
if (test_target.skip_modules.len > 0) {
for (test_target.skip_modules) |skip_mod| {
if (std.mem.eql(u8, options.name, skip_mod)) continue :for_targets;
@ -2306,169 +2315,182 @@ pub fn addModuleTests(b: *std.Build, options: ModuleTestOptions) *Step {
} else false;
if (!want_this_mode) continue;
const libc_suffix = if (test_target.link_libc == true) "-libc" else "";
const model_txt = target.cpu.model.name;
// wasm32-wasi builds need more RAM, idk why
const max_rss = if (target.os.tag == .wasi)
options.max_rss * 2
else
options.max_rss;
const these_tests = b.addTest(.{
.root_module = b.createModule(.{
.root_source_file = b.path(options.root_src),
.optimize = test_target.optimize_mode,
.target = resolved_target,
.link_libc = test_target.link_libc,
.pic = test_target.pic,
.strip = test_target.strip,
.single_threaded = test_target.single_threaded,
}),
.max_rss = max_rss,
.filters = options.test_filters,
.use_llvm = test_target.use_llvm,
.use_lld = test_target.use_lld,
.zig_lib_dir = b.path("lib"),
});
these_tests.linkage = test_target.linkage;
if (options.no_builtin) these_tests.root_module.no_builtin = false;
if (options.build_options) |build_options| {
these_tests.root_module.addOptions("build_options", build_options);
}
const single_threaded_suffix = if (test_target.single_threaded == true) "-single" else "";
const backend_suffix = if (test_target.use_llvm == true)
"-llvm"
else if (target.ofmt == std.Target.ObjectFormat.c)
"-cbe"
else if (test_target.use_llvm == false)
"-selfhosted"
else
"";
const use_lld = if (test_target.use_lld == false) "-no-lld" else "";
const linkage_name = if (test_target.linkage) |linkage| switch (linkage) {
inline else => |t| "-" ++ @tagName(t),
} else "";
const use_pic = if (test_target.pic == true) "-pic" else "";
for (options.include_paths) |include_path| these_tests.root_module.addIncludePath(b.path(include_path));
const qualified_name = b.fmt("{s}-{s}-{s}-{s}{s}{s}{s}{s}{s}{s}", .{
options.name,
triple_txt,
model_txt,
@tagName(test_target.optimize_mode),
libc_suffix,
single_threaded_suffix,
backend_suffix,
use_lld,
linkage_name,
use_pic,
});
if (target.ofmt == std.Target.ObjectFormat.c) {
var altered_query = test_target.target;
altered_query.ofmt = null;
const compile_c = b.createModule(.{
.root_source_file = null,
.link_libc = test_target.link_libc,
.target = b.resolveTargetQuery(altered_query),
});
const compile_c_exe = b.addExecutable(.{
.name = qualified_name,
.root_module = compile_c,
.zig_lib_dir = b.path("lib"),
});
compile_c.addCSourceFile(.{
.file = these_tests.getEmittedBin(),
.flags = &.{
// Tracking issue for making the C backend generate C89 compatible code:
// https://github.com/ziglang/zig/issues/19468
"-std=c99",
"-Werror",
"-Wall",
"-Wembedded-directive",
"-Wempty-translation-unit",
"-Wextra",
"-Wgnu",
"-Winvalid-utf8",
"-Wkeyword-macro",
"-Woverlength-strings",
// Tracking issue for making the C backend generate code
// that does not trigger warnings:
// https://github.com/ziglang/zig/issues/19467
// spotted everywhere
"-Wno-builtin-requires-header",
// spotted on linux
"-Wno-braced-scalar-init",
"-Wno-excess-initializers",
"-Wno-incompatible-pointer-types-discards-qualifiers",
"-Wno-unused",
"-Wno-unused-parameter",
// spotted on darwin
"-Wno-incompatible-pointer-types",
// https://github.com/llvm/llvm-project/issues/153314
"-Wno-unterminated-string-initialization",
// In both Zig and C it is legal to return a pointer to a
// local. The C backend lowers such thing directly, so the
// corresponding warning in C must be disabled.
"-Wno-return-stack-address",
},
});
compile_c.addIncludePath(b.path("lib")); // for zig.h
if (target.os.tag == .windows) {
if (true) {
// Unfortunately this requires about 8G of RAM for clang to compile
// and our Windows CI runners do not have this much.
step.dependOn(&these_tests.step);
continue;
}
if (test_target.link_libc == false) {
compile_c_exe.subsystem = .Console;
compile_c.linkSystemLibrary("kernel32", .{});
compile_c.linkSystemLibrary("ntdll", .{});
}
if (mem.eql(u8, options.name, "std")) {
if (test_target.link_libc == false) {
compile_c.linkSystemLibrary("shell32", .{});
compile_c.linkSystemLibrary("advapi32", .{});
}
compile_c.linkSystemLibrary("crypt32", .{});
compile_c.linkSystemLibrary("ws2_32", .{});
compile_c.linkSystemLibrary("ole32", .{});
}
}
const run = b.addRunArtifact(compile_c_exe);
run.skip_foreign_checks = true;
run.enableTestRunnerMode();
run.setName(b.fmt("run test {s}", .{qualified_name}));
step.dependOn(&run.step);
} else if (target.cpu.arch.isSpirV()) {
// Don't run spirv binaries
_ = these_tests.getEmittedBin();
step.dependOn(&these_tests.step);
} else {
const run = b.addRunArtifact(these_tests);
run.skip_foreign_checks = true;
run.setName(b.fmt("run test {s}", .{qualified_name}));
step.dependOn(&run.step);
}
addOneModuleTest(b, step, test_target, &resolved_target, triple_txt, options);
}
return step;
}
fn addOneModuleTest(
b: *std.Build,
step: *Step,
test_target: *const TestTarget,
resolved_target: *const std.Build.ResolvedTarget,
triple_txt: []const u8,
options: ModuleTestOptions,
) void {
const target = &resolved_target.result;
const libc_suffix = if (test_target.link_libc == true) "-libc" else "";
const model_txt = target.cpu.model.name;
// wasm32-wasi builds need more RAM, idk why
const max_rss = if (target.os.tag == .wasi)
options.max_rss * 2
else
options.max_rss;
const these_tests = b.addTest(.{
.root_module = b.createModule(.{
.root_source_file = b.path(options.root_src),
.optimize = test_target.optimize_mode,
.target = resolved_target.*,
.link_libc = test_target.link_libc,
.pic = test_target.pic,
.strip = test_target.strip,
.single_threaded = test_target.single_threaded,
}),
.max_rss = max_rss,
.filters = options.test_filters,
.use_llvm = test_target.use_llvm,
.use_lld = test_target.use_lld,
.zig_lib_dir = b.path("lib"),
});
these_tests.linkage = test_target.linkage;
if (options.no_builtin) these_tests.root_module.no_builtin = false;
if (options.build_options) |build_options| {
these_tests.root_module.addOptions("build_options", build_options);
}
const single_threaded_suffix = if (test_target.single_threaded == true) "-single" else "";
const backend_suffix = if (test_target.use_llvm == true)
"-llvm"
else if (target.ofmt == std.Target.ObjectFormat.c)
"-cbe"
else if (test_target.use_llvm == false)
"-selfhosted"
else
"";
const use_lld = if (test_target.use_lld == false) "-no-lld" else "";
const linkage_name = if (test_target.linkage) |linkage| switch (linkage) {
inline else => |t| "-" ++ @tagName(t),
} else "";
const use_pic = if (test_target.pic == true) "-pic" else "";
for (options.include_paths) |include_path| these_tests.root_module.addIncludePath(b.path(include_path));
const qualified_name = b.fmt("{s}-{s}-{s}-{t}{s}{s}{s}{s}{s}{s}", .{
options.name,
triple_txt,
model_txt,
test_target.optimize_mode,
libc_suffix,
single_threaded_suffix,
backend_suffix,
use_lld,
linkage_name,
use_pic,
});
if (target.ofmt == std.Target.ObjectFormat.c) {
var altered_query = test_target.target;
altered_query.ofmt = null;
const compile_c = b.createModule(.{
.root_source_file = null,
.link_libc = test_target.link_libc,
.target = b.resolveTargetQuery(altered_query),
});
const compile_c_exe = b.addExecutable(.{
.name = qualified_name,
.root_module = compile_c,
.zig_lib_dir = b.path("lib"),
});
compile_c.addCSourceFile(.{
.file = these_tests.getEmittedBin(),
.flags = &.{
// Tracking issue for making the C backend generate C89 compatible code:
// https://github.com/ziglang/zig/issues/19468
"-std=c99",
"-Werror",
"-Wall",
"-Wembedded-directive",
"-Wempty-translation-unit",
"-Wextra",
"-Wgnu",
"-Winvalid-utf8",
"-Wkeyword-macro",
"-Woverlength-strings",
// Tracking issue for making the C backend generate code
// that does not trigger warnings:
// https://github.com/ziglang/zig/issues/19467
// spotted everywhere
"-Wno-builtin-requires-header",
// spotted on linux
"-Wno-braced-scalar-init",
"-Wno-excess-initializers",
"-Wno-incompatible-pointer-types-discards-qualifiers",
"-Wno-unused",
"-Wno-unused-parameter",
// spotted on darwin
"-Wno-incompatible-pointer-types",
// https://github.com/llvm/llvm-project/issues/153314
"-Wno-unterminated-string-initialization",
// In both Zig and C it is legal to return a pointer to a
// local. The C backend lowers such thing directly, so the
// corresponding warning in C must be disabled.
"-Wno-return-stack-address",
},
});
compile_c.addIncludePath(b.path("lib")); // for zig.h
if (target.os.tag == .windows) {
if (true) {
// Unfortunately this requires about 8G of RAM for clang to compile
// and our Windows CI runners do not have this much.
// TODO This is not an appropriate way to work around this problem.
step.dependOn(&these_tests.step);
return;
}
if (test_target.link_libc == false) {
compile_c_exe.subsystem = .Console;
compile_c.linkSystemLibrary("kernel32", .{});
compile_c.linkSystemLibrary("ntdll", .{});
}
if (mem.eql(u8, options.name, "std")) {
if (test_target.link_libc == false) {
compile_c.linkSystemLibrary("shell32", .{});
compile_c.linkSystemLibrary("advapi32", .{});
}
compile_c.linkSystemLibrary("crypt32", .{});
compile_c.linkSystemLibrary("ws2_32", .{});
compile_c.linkSystemLibrary("ole32", .{});
}
}
const run = b.addRunArtifact(compile_c_exe);
run.skip_foreign_checks = true;
run.enableTestRunnerMode();
run.setName(b.fmt("run test {s}", .{qualified_name}));
step.dependOn(&run.step);
} else if (target.cpu.arch.isSpirV()) {
// Don't run spirv binaries
_ = these_tests.getEmittedBin();
step.dependOn(&these_tests.step);
} else {
const run = b.addRunArtifact(these_tests);
run.skip_foreign_checks = true;
run.setName(b.fmt("run test {s}", .{qualified_name}));
step.dependOn(&run.step);
}
}
pub fn wouldUseLlvm(use_llvm: ?bool, query: std.Target.Query, optimize_mode: OptimizeMode) bool {
if (use_llvm) |x| return x;
if (query.ofmt == .c) return false;