std.io: deprecated Reader/Writer; introduce new API

This commit is contained in:
Andrew Kelley 2025-06-27 19:33:03 -07:00
parent fc2c1883b3
commit 9f27d770a1
56 changed files with 4272 additions and 474 deletions

View file

@ -2949,7 +2949,7 @@ pub fn HeaderSlurpingReader(comptime size: usize, comptime ReaderType: anytype)
slurped_header: [size]u8 = [_]u8{0x00} ** size,
pub const Error = ReaderType.Error;
pub const Reader = std.io.Reader(*@This(), Error, read);
pub const Reader = std.io.GenericReader(*@This(), Error, read);
pub fn read(self: *@This(), buf: []u8) Error!usize {
const amt = try self.child_reader.read(buf);
@ -2983,7 +2983,7 @@ pub fn LimitedWriter(comptime WriterType: type) type {
bytes_left: u64,
pub const Error = error{NoSpaceLeft} || WriterType.Error;
pub const Writer = std.io.Writer(*Self, Error, write);
pub const Writer = std.io.GenericWriter(*Self, Error, write);
const Self = @This();

View file

@ -471,7 +471,7 @@ const IoStream = struct {
allocator: std.mem.Allocator,
};
pub const WriteError = std.mem.Allocator.Error || std.fs.File.WriteError;
pub const Writer = std.io.Writer(WriterContext, WriteError, write);
pub const Writer = std.io.GenericWriter(WriterContext, WriteError, write);
pub fn write(ctx: WriterContext, bytes: []const u8) WriteError!usize {
switch (ctx.self.*) {

View file

@ -338,11 +338,14 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?mem.Alignment) ty
@memcpy(self.items[old_len..][0..items.len], items);
}
pub const Writer = if (T != u8)
@compileError("The Writer interface is only defined for ArrayList(u8) " ++
"but the given type is ArrayList(" ++ @typeName(T) ++ ")")
else
std.io.Writer(*Self, Allocator.Error, appendWrite);
pub fn print(self: *Self, comptime fmt: []const u8, args: anytype) error{OutOfMemory}!void {
const gpa = self.allocator;
var unmanaged = self.moveToUnmanaged();
defer self.* = unmanaged.toManaged(gpa);
try unmanaged.print(gpa, fmt, args);
}
pub const Writer = if (T != u8) void else std.io.GenericWriter(*Self, Allocator.Error, appendWrite);
/// Initializes a Writer which will append to the list.
pub fn writer(self: *Self) Writer {
@ -350,14 +353,14 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?mem.Alignment) ty
}
/// Same as `append` except it returns the number of bytes written, which is always the same
/// as `m.len`. The purpose of this function existing is to match `std.io.Writer` API.
/// as `m.len`. The purpose of this function existing is to match `std.io.GenericWriter` API.
/// Invalidates element pointers if additional memory is needed.
fn appendWrite(self: *Self, m: []const u8) Allocator.Error!usize {
try self.appendSlice(m);
return m.len;
}
pub const FixedWriter = std.io.Writer(*Self, Allocator.Error, appendWriteFixed);
pub const FixedWriter = std.io.GenericWriter(*Self, Allocator.Error, appendWriteFixed);
/// Initializes a Writer which will append to the list but will return
/// `error.OutOfMemory` rather than increasing capacity.
@ -365,7 +368,7 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?mem.Alignment) ty
return .{ .context = self };
}
/// The purpose of this function existing is to match `std.io.Writer` API.
/// The purpose of this function existing is to match `std.io.GenericWriter` API.
fn appendWriteFixed(self: *Self, m: []const u8) error{OutOfMemory}!usize {
const available_capacity = self.capacity - self.items.len;
if (m.len > available_capacity)
@ -933,40 +936,56 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?mem.Alig
@memcpy(self.items[old_len..][0..items.len], items);
}
pub fn print(self: *Self, gpa: Allocator, comptime fmt: []const u8, args: anytype) error{OutOfMemory}!void {
comptime assert(T == u8);
try self.ensureUnusedCapacity(gpa, fmt.len);
var aw: std.io.Writer.Allocating = .fromArrayList(gpa, self);
defer self.* = aw.toArrayList();
return aw.interface.print(fmt, args) catch |err| switch (err) {
error.WriteFailed => return error.OutOfMemory,
};
}
pub fn printAssumeCapacity(self: *Self, comptime fmt: []const u8, args: anytype) void {
comptime assert(T == u8);
var w: std.io.Writer = .fixed(self.unusedCapacitySlice());
w.print(fmt, args) catch unreachable;
self.items.len += w.end;
}
/// Deprecated in favor of `print` or `std.io.Writer.Allocating`.
pub const WriterContext = struct {
self: *Self,
allocator: Allocator,
};
/// Deprecated in favor of `print` or `std.io.Writer.Allocating`.
pub const Writer = if (T != u8)
@compileError("The Writer interface is only defined for ArrayList(u8) " ++
"but the given type is ArrayList(" ++ @typeName(T) ++ ")")
else
std.io.Writer(WriterContext, Allocator.Error, appendWrite);
std.io.GenericWriter(WriterContext, Allocator.Error, appendWrite);
/// Initializes a Writer which will append to the list.
/// Deprecated in favor of `print` or `std.io.Writer.Allocating`.
pub fn writer(self: *Self, gpa: Allocator) Writer {
return .{ .context = .{ .self = self, .allocator = gpa } };
}
/// Same as `append` except it returns the number of bytes written,
/// which is always the same as `m.len`. The purpose of this function
/// existing is to match `std.io.Writer` API.
/// Invalidates element pointers if additional memory is needed.
/// Deprecated in favor of `print` or `std.io.Writer.Allocating`.
fn appendWrite(context: WriterContext, m: []const u8) Allocator.Error!usize {
try context.self.appendSlice(context.allocator, m);
return m.len;
}
pub const FixedWriter = std.io.Writer(*Self, Allocator.Error, appendWriteFixed);
/// Deprecated in favor of `print` or `std.io.Writer.Allocating`.
pub const FixedWriter = std.io.GenericWriter(*Self, Allocator.Error, appendWriteFixed);
/// Initializes a Writer which will append to the list but will return
/// `error.OutOfMemory` rather than increasing capacity.
/// Deprecated in favor of `print` or `std.io.Writer.Allocating`.
pub fn fixedWriter(self: *Self) FixedWriter {
return .{ .context = self };
}
/// The purpose of this function existing is to match `std.io.Writer` API.
/// Deprecated in favor of `print` or `std.io.Writer.Allocating`.
fn appendWriteFixed(self: *Self, m: []const u8) error{OutOfMemory}!usize {
const available_capacity = self.capacity - self.items.len;
if (m.len > available_capacity)

View file

@ -108,7 +108,7 @@ pub const Base64Encoder = struct {
}
}
// dest must be compatible with std.io.Writer's writeAll interface
// dest must be compatible with std.io.GenericWriter's writeAll interface
pub fn encodeWriter(encoder: *const Base64Encoder, dest: anytype, source: []const u8) !void {
var chunker = window(u8, source, 3, 3);
while (chunker.next()) |chunk| {
@ -118,8 +118,8 @@ pub const Base64Encoder = struct {
}
}
// destWriter must be compatible with std.io.Writer's writeAll interface
// sourceReader must be compatible with std.io.Reader's read interface
// destWriter must be compatible with std.io.GenericWriter's writeAll interface
// sourceReader must be compatible with `std.io.GenericReader` read interface
pub fn encodeFromReaderToWriter(encoder: *const Base64Encoder, destWriter: anytype, sourceReader: anytype) !void {
while (true) {
var tempSource: [3]u8 = undefined;

View file

@ -277,7 +277,7 @@ pub fn BoundedArrayAligned(
@compileError("The Writer interface is only defined for BoundedArray(u8, ...) " ++
"but the given type is BoundedArray(" ++ @typeName(T) ++ ", ...)")
else
std.io.Writer(*Self, error{Overflow}, appendWrite);
std.io.GenericWriter(*Self, error{Overflow}, appendWrite);
/// Initializes a writer which will write into the array.
pub fn writer(self: *Self) Writer {
@ -285,7 +285,7 @@ pub fn BoundedArrayAligned(
}
/// Same as `appendSlice` except it returns the number of bytes written, which is always the same
/// as `m.len`. The purpose of this function existing is to match `std.io.Writer` API.
/// as `m.len`. The purpose of this function existing is to match `std.io.GenericWriter` API.
fn appendWrite(self: *Self, m: []const u8) error{Overflow}!usize {
try self.appendSlice(m);
return m.len;

View file

@ -16,7 +16,7 @@ pub fn HashedReader(ReaderType: type, HasherType: type) type {
hasher: HasherType,
pub const Error = ReaderType.Error;
pub const Reader = std.io.Reader(*@This(), Error, read);
pub const Reader = std.io.GenericReader(*@This(), Error, read);
pub fn read(self: *@This(), buf: []u8) Error!usize {
const amt = try self.child_reader.read(buf);
@ -43,7 +43,7 @@ pub fn HashedWriter(WriterType: type, HasherType: type) type {
hasher: HasherType,
pub const Error = WriterType.Error;
pub const Writer = std.io.Writer(*@This(), Error, write);
pub const Writer = std.io.GenericWriter(*@This(), Error, write);
pub fn write(self: *@This(), buf: []const u8) Error!usize {
const amt = try self.child_writer.write(buf);

View file

@ -355,7 +355,7 @@ fn Deflate(comptime container: Container, comptime WriterType: type, comptime Bl
// Writer interface
pub const Writer = io.Writer(*Self, Error, write);
pub const Writer = io.GenericWriter(*Self, Error, write);
pub const Error = BlockWriterType.Error;
/// Write `input` of uncompressed data.
@ -512,7 +512,7 @@ fn SimpleCompressor(
// Writer interface
pub const Writer = io.Writer(*Self, Error, write);
pub const Writer = io.GenericWriter(*Self, Error, write);
pub const Error = BlockWriterType.Error;
// Write `input` of uncompressed data.

View file

@ -341,7 +341,7 @@ pub fn Inflate(comptime container: Container, comptime LookaheadType: type, comp
// Reader interface
pub const Reader = std.io.Reader(*Self, Error, read);
pub const Reader = std.io.GenericReader(*Self, Error, read);
/// Returns the number of bytes read. It may be less than buffer.len.
/// If the number of bytes read is 0, it means end of stream.

View file

@ -30,7 +30,7 @@ pub fn Decompress(comptime ReaderType: type) type {
Allocator.Error ||
error{ CorruptInput, EndOfStream, Overflow };
pub const Reader = std.io.Reader(*Self, Error, read);
pub const Reader = std.io.GenericReader(*Self, Error, read);
allocator: Allocator,
in_reader: ReaderType,

View file

@ -34,7 +34,7 @@ pub fn Decompress(comptime ReaderType: type) type {
const Self = @This();
pub const Error = ReaderType.Error || block.Decoder(ReaderType).Error;
pub const Reader = std.io.Reader(*Self, Error, read);
pub const Reader = std.io.GenericReader(*Self, Error, read);
allocator: Allocator,
block_decoder: block.Decoder(ReaderType),

View file

@ -27,7 +27,7 @@ pub fn Decoder(comptime ReaderType: type) type {
ReaderType.Error ||
DecodeError ||
Allocator.Error;
pub const Reader = std.io.Reader(*Self, Error, read);
pub const Reader = std.io.GenericReader(*Self, Error, read);
allocator: Allocator,
inner_reader: ReaderType,

View file

@ -50,7 +50,7 @@ pub fn Decompressor(comptime ReaderType: type) type {
OutOfMemory,
};
pub const Reader = std.io.Reader(*Self, Error, read);
pub const Reader = std.io.GenericReader(*Self, Error, read);
pub fn init(source: ReaderType, options: DecompressorOptions) Self {
return .{

View file

@ -4,7 +4,7 @@ pub const ReversedByteReader = struct {
remaining_bytes: usize,
bytes: []const u8,
const Reader = std.io.Reader(*ReversedByteReader, error{}, readFn);
const Reader = std.io.GenericReader(*ReversedByteReader, error{}, readFn);
pub fn init(bytes: []const u8) ReversedByteReader {
return .{

View file

@ -803,7 +803,7 @@ fn AegisMac(comptime T: type) type {
}
pub const Error = error{};
pub const Writer = std.io.Writer(*Mac, Error, write);
pub const Writer = std.io.GenericWriter(*Mac, Error, write);
fn write(self: *Mac, bytes: []const u8) Error!usize {
self.update(bytes);

View file

@ -187,7 +187,7 @@ pub fn Blake2s(comptime out_bits: usize) type {
}
pub const Error = error{};
pub const Writer = std.io.Writer(*Self, Error, write);
pub const Writer = std.io.GenericWriter(*Self, Error, write);
fn write(self: *Self, bytes: []const u8) Error!usize {
self.update(bytes);

View file

@ -476,7 +476,7 @@ pub const Blake3 = struct {
}
pub const Error = error{};
pub const Writer = std.io.Writer(*Blake3, Error, write);
pub const Writer = std.io.GenericWriter(*Blake3, Error, write);
fn write(self: *Blake3, bytes: []const u8) Error!usize {
self.update(bytes);

View file

@ -45,7 +45,7 @@ pub fn prependSlice(self: *ArrayListReverse, data: []const u8) Error!void {
self.data.ptr = begin;
}
pub const Writer = std.io.Writer(*ArrayListReverse, Error, prependSliceSize);
pub const Writer = std.io.GenericWriter(*ArrayListReverse, Error, prependSliceSize);
/// Warning: This writer writes backwards. `fn print` will NOT work as expected.
pub fn writer(self: *ArrayListReverse) Writer {
return .{ .context = self };

View file

@ -269,7 +269,7 @@ pub const Sha1 = struct {
}
pub const Error = error{};
pub const Writer = std.io.Writer(*Self, Error, write);
pub const Writer = std.io.GenericWriter(*Self, Error, write);
fn write(self: *Self, bytes: []const u8) Error!usize {
self.update(bytes);

View file

@ -376,7 +376,7 @@ fn Sha2x32(comptime iv: Iv32, digest_bits: comptime_int) type {
}
pub const Error = error{};
pub const Writer = std.io.Writer(*Self, Error, write);
pub const Writer = std.io.GenericWriter(*Self, Error, write);
fn write(self: *Self, bytes: []const u8) Error!usize {
self.update(bytes);

View file

@ -82,7 +82,7 @@ pub fn Keccak(comptime f: u11, comptime output_bits: u11, comptime default_delim
}
pub const Error = error{};
pub const Writer = std.io.Writer(*Self, Error, write);
pub const Writer = std.io.GenericWriter(*Self, Error, write);
fn write(self: *Self, bytes: []const u8) Error!usize {
self.update(bytes);
@ -193,7 +193,7 @@ fn ShakeLike(comptime security_level: u11, comptime default_delim: u8, comptime
}
pub const Error = error{};
pub const Writer = std.io.Writer(*Self, Error, write);
pub const Writer = std.io.GenericWriter(*Self, Error, write);
fn write(self: *Self, bytes: []const u8) Error!usize {
self.update(bytes);
@ -286,7 +286,7 @@ fn CShakeLike(comptime security_level: u11, comptime default_delim: u8, comptime
}
pub const Error = error{};
pub const Writer = std.io.Writer(*Self, Error, write);
pub const Writer = std.io.GenericWriter(*Self, Error, write);
fn write(self: *Self, bytes: []const u8) Error!usize {
self.update(bytes);
@ -392,7 +392,7 @@ fn KMacLike(comptime security_level: u11, comptime default_delim: u8, comptime r
}
pub const Error = error{};
pub const Writer = std.io.Writer(*Self, Error, write);
pub const Writer = std.io.GenericWriter(*Self, Error, write);
fn write(self: *Self, bytes: []const u8) Error!usize {
self.update(bytes);
@ -484,7 +484,7 @@ fn TupleHashLike(comptime security_level: u11, comptime default_delim: u8, compt
}
pub const Error = error{};
pub const Writer = std.io.Writer(*Self, Error, write);
pub const Writer = std.io.GenericWriter(*Self, Error, write);
fn write(self: *Self, bytes: []const u8) Error!usize {
self.update(bytes);

View file

@ -240,7 +240,7 @@ fn SipHash(comptime T: type, comptime c_rounds: usize, comptime d_rounds: usize)
}
pub const Error = error{};
pub const Writer = std.io.Writer(*Self, Error, write);
pub const Writer = std.io.GenericWriter(*Self, Error, write);
fn write(self: *Self, bytes: []const u8) Error!usize {
self.update(bytes);

View file

@ -562,7 +562,7 @@ const MsfStream = struct {
return block * self.block_size + offset;
}
pub fn reader(self: *MsfStream) std.io.Reader(*MsfStream, Error, read) {
pub fn reader(self: *MsfStream) std.io.GenericReader(*MsfStream, Error, read) {
return .{ .context = self };
}
};

View file

@ -38,8 +38,8 @@ pub fn LinearFifo(
count: usize,
const Self = @This();
pub const Reader = std.io.Reader(*Self, error{}, readFn);
pub const Writer = std.io.Writer(*Self, error{OutOfMemory}, appendWrite);
pub const Reader = std.io.GenericReader(*Self, error{}, readFn);
pub const Writer = std.io.GenericWriter(*Self, error{OutOfMemory}, appendWrite);
// Type of Self argument for slice operations.
// If buffer is inline (Static) then we need to ensure we haven't
@ -231,7 +231,7 @@ pub fn LinearFifo(
}
/// Same as `read` except it returns an error union
/// The purpose of this function existing is to match `std.io.Reader` API.
/// The purpose of this function existing is to match `std.io.GenericReader` API.
fn readFn(self: *Self, dest: []u8) error{}!usize {
return self.read(dest);
}
@ -320,7 +320,7 @@ pub fn LinearFifo(
}
/// Same as `write` except it returns the number of bytes written, which is always the same
/// as `bytes.len`. The purpose of this function existing is to match `std.io.Writer` API.
/// as `bytes.len`. The purpose of this function existing is to match `std.io.GenericWriter` API.
fn appendWrite(self: *Self, bytes: []const u8) error{OutOfMemory}!usize {
try self.write(bytes);
return bytes.len;

View file

@ -1581,13 +1581,13 @@ fn writeFileAllSendfile(self: File, in_file: File, args: WriteFileOptions) posix
}
}
pub const Reader = io.Reader(File, ReadError, read);
pub const Reader = io.GenericReader(File, ReadError, read);
pub fn reader(file: File) Reader {
return .{ .context = file };
}
pub const Writer = io.Writer(File, WriteError, write);
pub const Writer = io.GenericWriter(File, WriteError, write);
pub fn writer(file: File) Writer {
return .{ .context = file };

View file

@ -311,7 +311,7 @@ pub const Connection = struct {
EndOfStream,
};
pub const Reader = std.io.Reader(*Connection, ReadError, read);
pub const Reader = std.io.GenericReader(*Connection, ReadError, read);
pub fn reader(conn: *Connection) Reader {
return Reader{ .context = conn };
@ -374,7 +374,7 @@ pub const Connection = struct {
UnexpectedWriteFailure,
};
pub const Writer = std.io.Writer(*Connection, WriteError, write);
pub const Writer = std.io.GenericWriter(*Connection, WriteError, write);
pub fn writer(conn: *Connection) Writer {
return Writer{ .context = conn };
@ -934,7 +934,7 @@ pub const Request = struct {
const TransferReadError = Connection.ReadError || proto.HeadersParser.ReadError;
const TransferReader = std.io.Reader(*Request, TransferReadError, transferRead);
const TransferReader = std.io.GenericReader(*Request, TransferReadError, transferRead);
fn transferReader(req: *Request) TransferReader {
return .{ .context = req };
@ -1094,7 +1094,7 @@ pub const Request = struct {
pub const ReadError = TransferReadError || proto.HeadersParser.CheckCompleteHeadError ||
error{ DecompressionFailure, InvalidTrailers };
pub const Reader = std.io.Reader(*Request, ReadError, read);
pub const Reader = std.io.GenericReader(*Request, ReadError, read);
pub fn reader(req: *Request) Reader {
return .{ .context = req };
@ -1134,7 +1134,7 @@ pub const Request = struct {
pub const WriteError = Connection.WriteError || error{ NotWriteable, MessageTooLong };
pub const Writer = std.io.Writer(*Request, WriteError, write);
pub const Writer = std.io.GenericWriter(*Request, WriteError, write);
pub fn writer(req: *Request) Writer {
return .{ .context = req };

View file

@ -344,7 +344,7 @@ const MockBufferedConnection = struct {
}
pub const ReadError = std.io.FixedBufferStream([]const u8).ReadError || error{EndOfStream};
pub const Reader = std.io.Reader(*MockBufferedConnection, ReadError, read);
pub const Reader = std.io.GenericReader(*MockBufferedConnection, ReadError, read);
pub fn reader(conn: *MockBufferedConnection) Reader {
return Reader{ .context = conn };
@ -359,7 +359,7 @@ const MockBufferedConnection = struct {
}
pub const WriteError = std.io.FixedBufferStream([]const u8).WriteError;
pub const Writer = std.io.Writer(*MockBufferedConnection, WriteError, write);
pub const Writer = std.io.GenericWriter(*MockBufferedConnection, WriteError, write);
pub fn writer(conn: *MockBufferedConnection) Writer {
return Writer{ .context = conn };

View file

@ -14,6 +14,69 @@ const File = std.fs.File;
const Allocator = std.mem.Allocator;
const Alignment = std.mem.Alignment;
pub const Limit = enum(usize) {
nothing = 0,
unlimited = std.math.maxInt(usize),
_,
/// `std.math.maxInt(usize)` is interpreted to mean `.unlimited`.
pub fn limited(n: usize) Limit {
return @enumFromInt(n);
}
pub fn countVec(data: []const []const u8) Limit {
var total: usize = 0;
for (data) |d| total += d.len;
return .limited(total);
}
pub fn min(a: Limit, b: Limit) Limit {
return @enumFromInt(@min(@intFromEnum(a), @intFromEnum(b)));
}
pub fn minInt(l: Limit, n: usize) usize {
return @min(n, @intFromEnum(l));
}
pub fn slice(l: Limit, s: []u8) []u8 {
return s[0..l.minInt(s.len)];
}
pub fn sliceConst(l: Limit, s: []const u8) []const u8 {
return s[0..l.minInt(s.len)];
}
pub fn toInt(l: Limit) ?usize {
return switch (l) {
else => @intFromEnum(l),
.unlimited => null,
};
}
/// Reduces a slice to account for the limit, leaving room for one extra
/// byte above the limit, allowing for the use case of differentiating
/// between end-of-stream and reaching the limit.
pub fn slice1(l: Limit, non_empty_buffer: []u8) []u8 {
assert(non_empty_buffer.len >= 1);
return non_empty_buffer[0..@min(@intFromEnum(l) +| 1, non_empty_buffer.len)];
}
pub fn nonzero(l: Limit) bool {
return @intFromEnum(l) > 0;
}
/// Return a new limit reduced by `amount` or return `null` indicating
/// limit would be exceeded.
pub fn subtract(l: Limit, amount: usize) ?Limit {
if (l == .unlimited) return .unlimited;
if (amount > @intFromEnum(l)) return null;
return @enumFromInt(@intFromEnum(l) - amount);
}
};
pub const Reader = @import("io/Reader.zig");
pub const Writer = @import("io/Writer.zig");
fn getStdOutHandle() posix.fd_t {
if (is_windows) {
return windows.peb().ProcessParameters.hStdOutput;
@ -62,6 +125,7 @@ pub fn getStdIn() File {
return .{ .handle = getStdInHandle() };
}
/// Deprecated in favor of `Reader`.
pub fn GenericReader(
comptime Context: type,
comptime ReadError: type,
@ -289,6 +353,7 @@ pub fn GenericReader(
};
}
/// Deprecated in favor of `Writer`.
pub fn GenericWriter(
comptime Context: type,
comptime WriteError: type,
@ -350,15 +415,10 @@ pub fn GenericWriter(
};
}
/// Deprecated; consider switching to `AnyReader` or use `GenericReader`
/// to use previous API.
pub const Reader = GenericReader;
/// Deprecated; consider switching to `AnyWriter` or use `GenericWriter`
/// to use previous API.
pub const Writer = GenericWriter;
pub const AnyReader = @import("io/Reader.zig");
pub const AnyWriter = @import("io/Writer.zig");
/// Deprecated in favor of `Reader`.
pub const AnyReader = @import("io/DeprecatedReader.zig");
/// Deprecated in favor of `Writer`.
pub const AnyWriter = @import("io/DeprecatedWriter.zig");
pub const SeekableStream = @import("io/seekable_stream.zig").SeekableStream;
@ -819,8 +879,8 @@ pub fn PollFiles(comptime StreamEnum: type) type {
}
test {
_ = AnyReader;
_ = AnyWriter;
_ = Reader;
_ = Writer;
_ = @import("io/bit_reader.zig");
_ = @import("io/bit_writer.zig");
_ = @import("io/buffered_atomic_file.zig");

View file

@ -0,0 +1,386 @@
context: *const anyopaque,
readFn: *const fn (context: *const anyopaque, buffer: []u8) anyerror!usize,
pub const Error = anyerror;
/// Returns the number of bytes read. It may be less than buffer.len.
/// If the number of bytes read is 0, it means end of stream.
/// End of stream is not an error condition.
pub fn read(self: Self, buffer: []u8) anyerror!usize {
return self.readFn(self.context, buffer);
}
/// Returns the number of bytes read. If the number read is smaller than `buffer.len`, it
/// means the stream reached the end. Reaching the end of a stream is not an error
/// condition.
pub fn readAll(self: Self, buffer: []u8) anyerror!usize {
return readAtLeast(self, buffer, buffer.len);
}
/// Returns the number of bytes read, calling the underlying read
/// function the minimal number of times until the buffer has at least
/// `len` bytes filled. If the number read is less than `len` it means
/// the stream reached the end. Reaching the end of the stream is not
/// an error condition.
pub fn readAtLeast(self: Self, buffer: []u8, len: usize) anyerror!usize {
assert(len <= buffer.len);
var index: usize = 0;
while (index < len) {
const amt = try self.read(buffer[index..]);
if (amt == 0) break;
index += amt;
}
return index;
}
/// If the number read would be smaller than `buf.len`, `error.EndOfStream` is returned instead.
pub fn readNoEof(self: Self, buf: []u8) anyerror!void {
const amt_read = try self.readAll(buf);
if (amt_read < buf.len) return error.EndOfStream;
}
/// Appends to the `std.ArrayList` contents by reading from the stream
/// until end of stream is found.
/// If the number of bytes appended would exceed `max_append_size`,
/// `error.StreamTooLong` is returned
/// and the `std.ArrayList` has exactly `max_append_size` bytes appended.
pub fn readAllArrayList(
self: Self,
array_list: *std.ArrayList(u8),
max_append_size: usize,
) anyerror!void {
return self.readAllArrayListAligned(null, array_list, max_append_size);
}
pub fn readAllArrayListAligned(
self: Self,
comptime alignment: ?Alignment,
array_list: *std.ArrayListAligned(u8, alignment),
max_append_size: usize,
) anyerror!void {
try array_list.ensureTotalCapacity(@min(max_append_size, 4096));
const original_len = array_list.items.len;
var start_index: usize = original_len;
while (true) {
array_list.expandToCapacity();
const dest_slice = array_list.items[start_index..];
const bytes_read = try self.readAll(dest_slice);
start_index += bytes_read;
if (start_index - original_len > max_append_size) {
array_list.shrinkAndFree(original_len + max_append_size);
return error.StreamTooLong;
}
if (bytes_read != dest_slice.len) {
array_list.shrinkAndFree(start_index);
return;
}
// This will trigger ArrayList to expand superlinearly at whatever its growth rate is.
try array_list.ensureTotalCapacity(start_index + 1);
}
}
/// Allocates enough memory to hold all the contents of the stream. If the allocated
/// memory would be greater than `max_size`, returns `error.StreamTooLong`.
/// Caller owns returned memory.
/// If this function returns an error, the contents from the stream read so far are lost.
pub fn readAllAlloc(self: Self, allocator: mem.Allocator, max_size: usize) anyerror![]u8 {
var array_list = std.ArrayList(u8).init(allocator);
defer array_list.deinit();
try self.readAllArrayList(&array_list, max_size);
return try array_list.toOwnedSlice();
}
/// Deprecated: use `streamUntilDelimiter` with ArrayList's writer instead.
/// Replaces the `std.ArrayList` contents by reading from the stream until `delimiter` is found.
/// Does not include the delimiter in the result.
/// If the `std.ArrayList` length would exceed `max_size`, `error.StreamTooLong` is returned and the
/// `std.ArrayList` is populated with `max_size` bytes from the stream.
pub fn readUntilDelimiterArrayList(
self: Self,
array_list: *std.ArrayList(u8),
delimiter: u8,
max_size: usize,
) anyerror!void {
array_list.shrinkRetainingCapacity(0);
try self.streamUntilDelimiter(array_list.writer(), delimiter, max_size);
}
/// Deprecated: use `streamUntilDelimiter` with ArrayList's writer instead.
/// Allocates enough memory to read until `delimiter`. If the allocated
/// memory would be greater than `max_size`, returns `error.StreamTooLong`.
/// Caller owns returned memory.
/// If this function returns an error, the contents from the stream read so far are lost.
pub fn readUntilDelimiterAlloc(
self: Self,
allocator: mem.Allocator,
delimiter: u8,
max_size: usize,
) anyerror![]u8 {
var array_list = std.ArrayList(u8).init(allocator);
defer array_list.deinit();
try self.streamUntilDelimiter(array_list.writer(), delimiter, max_size);
return try array_list.toOwnedSlice();
}
/// Deprecated: use `streamUntilDelimiter` with FixedBufferStream's writer instead.
/// Reads from the stream until specified byte is found. If the buffer is not
/// large enough to hold the entire contents, `error.StreamTooLong` is returned.
/// If end-of-stream is found, `error.EndOfStream` is returned.
/// Returns a slice of the stream data, with ptr equal to `buf.ptr`. The
/// delimiter byte is written to the output buffer but is not included
/// in the returned slice.
pub fn readUntilDelimiter(self: Self, buf: []u8, delimiter: u8) anyerror![]u8 {
var fbs = std.io.fixedBufferStream(buf);
try self.streamUntilDelimiter(fbs.writer(), delimiter, fbs.buffer.len);
const output = fbs.getWritten();
buf[output.len] = delimiter; // emulating old behaviour
return output;
}
/// Deprecated: use `streamUntilDelimiter` with ArrayList's (or any other's) writer instead.
/// Allocates enough memory to read until `delimiter` or end-of-stream.
/// If the allocated memory would be greater than `max_size`, returns
/// `error.StreamTooLong`. If end-of-stream is found, returns the rest
/// of the stream. If this function is called again after that, returns
/// null.
/// Caller owns returned memory.
/// If this function returns an error, the contents from the stream read so far are lost.
pub fn readUntilDelimiterOrEofAlloc(
self: Self,
allocator: mem.Allocator,
delimiter: u8,
max_size: usize,
) anyerror!?[]u8 {
var array_list = std.ArrayList(u8).init(allocator);
defer array_list.deinit();
self.streamUntilDelimiter(array_list.writer(), delimiter, max_size) catch |err| switch (err) {
error.EndOfStream => if (array_list.items.len == 0) {
return null;
},
else => |e| return e,
};
return try array_list.toOwnedSlice();
}
/// Deprecated: use `streamUntilDelimiter` with FixedBufferStream's writer instead.
/// Reads from the stream until specified byte is found. If the buffer is not
/// large enough to hold the entire contents, `error.StreamTooLong` is returned.
/// If end-of-stream is found, returns the rest of the stream. If this
/// function is called again after that, returns null.
/// Returns a slice of the stream data, with ptr equal to `buf.ptr`. The
/// delimiter byte is written to the output buffer but is not included
/// in the returned slice.
pub fn readUntilDelimiterOrEof(self: Self, buf: []u8, delimiter: u8) anyerror!?[]u8 {
var fbs = std.io.fixedBufferStream(buf);
self.streamUntilDelimiter(fbs.writer(), delimiter, fbs.buffer.len) catch |err| switch (err) {
error.EndOfStream => if (fbs.getWritten().len == 0) {
return null;
},
else => |e| return e,
};
const output = fbs.getWritten();
buf[output.len] = delimiter; // emulating old behaviour
return output;
}
/// Appends to the `writer` contents by reading from the stream until `delimiter` is found.
/// Does not write the delimiter itself.
/// If `optional_max_size` is not null and amount of written bytes exceeds `optional_max_size`,
/// returns `error.StreamTooLong` and finishes appending.
/// If `optional_max_size` is null, appending is unbounded.
pub fn streamUntilDelimiter(
self: Self,
writer: anytype,
delimiter: u8,
optional_max_size: ?usize,
) anyerror!void {
if (optional_max_size) |max_size| {
for (0..max_size) |_| {
const byte: u8 = try self.readByte();
if (byte == delimiter) return;
try writer.writeByte(byte);
}
return error.StreamTooLong;
} else {
while (true) {
const byte: u8 = try self.readByte();
if (byte == delimiter) return;
try writer.writeByte(byte);
}
// Can not throw `error.StreamTooLong` since there are no boundary.
}
}
/// Reads from the stream until specified byte is found, discarding all data,
/// including the delimiter.
/// If end-of-stream is found, this function succeeds.
pub fn skipUntilDelimiterOrEof(self: Self, delimiter: u8) anyerror!void {
while (true) {
const byte = self.readByte() catch |err| switch (err) {
error.EndOfStream => return,
else => |e| return e,
};
if (byte == delimiter) return;
}
}
/// Reads 1 byte from the stream or returns `error.EndOfStream`.
pub fn readByte(self: Self) anyerror!u8 {
var result: [1]u8 = undefined;
const amt_read = try self.read(result[0..]);
if (amt_read < 1) return error.EndOfStream;
return result[0];
}
/// Same as `readByte` except the returned byte is signed.
pub fn readByteSigned(self: Self) anyerror!i8 {
return @as(i8, @bitCast(try self.readByte()));
}
/// Reads exactly `num_bytes` bytes and returns as an array.
/// `num_bytes` must be comptime-known
pub fn readBytesNoEof(self: Self, comptime num_bytes: usize) anyerror![num_bytes]u8 {
var bytes: [num_bytes]u8 = undefined;
try self.readNoEof(&bytes);
return bytes;
}
/// Reads bytes until `bounded.len` is equal to `num_bytes`,
/// or the stream ends.
///
/// * it is assumed that `num_bytes` will not exceed `bounded.capacity()`
pub fn readIntoBoundedBytes(
self: Self,
comptime num_bytes: usize,
bounded: *std.BoundedArray(u8, num_bytes),
) anyerror!void {
while (bounded.len < num_bytes) {
// get at most the number of bytes free in the bounded array
const bytes_read = try self.read(bounded.unusedCapacitySlice());
if (bytes_read == 0) return;
// bytes_read will never be larger than @TypeOf(bounded.len)
// due to `self.read` being bounded by `bounded.unusedCapacitySlice()`
bounded.len += @as(@TypeOf(bounded.len), @intCast(bytes_read));
}
}
/// Reads at most `num_bytes` and returns as a bounded array.
pub fn readBoundedBytes(self: Self, comptime num_bytes: usize) anyerror!std.BoundedArray(u8, num_bytes) {
var result = std.BoundedArray(u8, num_bytes){};
try self.readIntoBoundedBytes(num_bytes, &result);
return result;
}
pub inline fn readInt(self: Self, comptime T: type, endian: std.builtin.Endian) anyerror!T {
const bytes = try self.readBytesNoEof(@divExact(@typeInfo(T).int.bits, 8));
return mem.readInt(T, &bytes, endian);
}
pub fn readVarInt(
self: Self,
comptime ReturnType: type,
endian: std.builtin.Endian,
size: usize,
) anyerror!ReturnType {
assert(size <= @sizeOf(ReturnType));
var bytes_buf: [@sizeOf(ReturnType)]u8 = undefined;
const bytes = bytes_buf[0..size];
try self.readNoEof(bytes);
return mem.readVarInt(ReturnType, bytes, endian);
}
/// Optional parameters for `skipBytes`
pub const SkipBytesOptions = struct {
buf_size: usize = 512,
};
// `num_bytes` is a `u64` to match `off_t`
/// Reads `num_bytes` bytes from the stream and discards them
pub fn skipBytes(self: Self, num_bytes: u64, comptime options: SkipBytesOptions) anyerror!void {
var buf: [options.buf_size]u8 = undefined;
var remaining = num_bytes;
while (remaining > 0) {
const amt = @min(remaining, options.buf_size);
try self.readNoEof(buf[0..amt]);
remaining -= amt;
}
}
/// Reads `slice.len` bytes from the stream and returns if they are the same as the passed slice
pub fn isBytes(self: Self, slice: []const u8) anyerror!bool {
var i: usize = 0;
var matches = true;
while (i < slice.len) : (i += 1) {
if (slice[i] != try self.readByte()) {
matches = false;
}
}
return matches;
}
pub fn readStruct(self: Self, comptime T: type) anyerror!T {
// Only extern and packed structs have defined in-memory layout.
comptime assert(@typeInfo(T).@"struct".layout != .auto);
var res: [1]T = undefined;
try self.readNoEof(mem.sliceAsBytes(res[0..]));
return res[0];
}
pub fn readStructEndian(self: Self, comptime T: type, endian: std.builtin.Endian) anyerror!T {
var res = try self.readStruct(T);
if (native_endian != endian) {
mem.byteSwapAllFields(T, &res);
}
return res;
}
/// Reads an integer with the same size as the given enum's tag type. If the integer matches
/// an enum tag, casts the integer to the enum tag and returns it. Otherwise, returns an `error.InvalidValue`.
/// TODO optimization taking advantage of most fields being in order
pub fn readEnum(self: Self, comptime Enum: type, endian: std.builtin.Endian) anyerror!Enum {
const E = error{
/// An integer was read, but it did not match any of the tags in the supplied enum.
InvalidValue,
};
const type_info = @typeInfo(Enum).@"enum";
const tag = try self.readInt(type_info.tag_type, endian);
inline for (std.meta.fields(Enum)) |field| {
if (tag == field.value) {
return @field(Enum, field.name);
}
}
return E.InvalidValue;
}
/// Reads the stream until the end, ignoring all the data.
/// Returns the number of bytes discarded.
pub fn discard(self: Self) anyerror!u64 {
var trash: [4096]u8 = undefined;
var index: u64 = 0;
while (true) {
const n = try self.read(&trash);
if (n == 0) return index;
index += n;
}
}
const std = @import("../std.zig");
const Self = @This();
const math = std.math;
const assert = std.debug.assert;
const mem = std.mem;
const testing = std.testing;
const native_endian = @import("builtin").target.cpu.arch.endian();
const Alignment = std.mem.Alignment;
test {
_ = @import("Reader/test.zig");
}

View file

@ -0,0 +1,83 @@
const std = @import("../std.zig");
const assert = std.debug.assert;
const mem = std.mem;
const native_endian = @import("builtin").target.cpu.arch.endian();
context: *const anyopaque,
writeFn: *const fn (context: *const anyopaque, bytes: []const u8) anyerror!usize,
const Self = @This();
pub const Error = anyerror;
pub fn write(self: Self, bytes: []const u8) anyerror!usize {
return self.writeFn(self.context, bytes);
}
pub fn writeAll(self: Self, bytes: []const u8) anyerror!void {
var index: usize = 0;
while (index != bytes.len) {
index += try self.write(bytes[index..]);
}
}
pub fn print(self: Self, comptime format: []const u8, args: anytype) anyerror!void {
return std.fmt.format(self, format, args);
}
pub fn writeByte(self: Self, byte: u8) anyerror!void {
const array = [1]u8{byte};
return self.writeAll(&array);
}
pub fn writeByteNTimes(self: Self, byte: u8, n: usize) anyerror!void {
var bytes: [256]u8 = undefined;
@memset(bytes[0..], byte);
var remaining: usize = n;
while (remaining > 0) {
const to_write = @min(remaining, bytes.len);
try self.writeAll(bytes[0..to_write]);
remaining -= to_write;
}
}
pub fn writeBytesNTimes(self: Self, bytes: []const u8, n: usize) anyerror!void {
var i: usize = 0;
while (i < n) : (i += 1) {
try self.writeAll(bytes);
}
}
pub inline fn writeInt(self: Self, comptime T: type, value: T, endian: std.builtin.Endian) anyerror!void {
var bytes: [@divExact(@typeInfo(T).int.bits, 8)]u8 = undefined;
mem.writeInt(std.math.ByteAlignedInt(@TypeOf(value)), &bytes, value, endian);
return self.writeAll(&bytes);
}
pub fn writeStruct(self: Self, value: anytype) anyerror!void {
// Only extern and packed structs have defined in-memory layout.
comptime assert(@typeInfo(@TypeOf(value)).@"struct".layout != .auto);
return self.writeAll(mem.asBytes(&value));
}
pub fn writeStructEndian(self: Self, value: anytype, endian: std.builtin.Endian) anyerror!void {
// TODO: make sure this value is not a reference type
if (native_endian == endian) {
return self.writeStruct(value);
} else {
var copy = value;
mem.byteSwapAllFields(@TypeOf(value), &copy);
return self.writeStruct(copy);
}
}
pub fn writeFile(self: Self, file: std.fs.File) anyerror!void {
// TODO: figure out how to adjust std lib abstractions so that this ends up
// doing sendfile or maybe even copy_file_range under the right conditions.
var buf: [4000]u8 = undefined;
while (true) {
const n = try file.readAll(&buf);
try self.writeAll(buf[0..n]);
if (n < buf.len) return;
}
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,42 @@
const Limited = @This();
const std = @import("../../std.zig");
const Reader = std.io.Reader;
const Writer = std.io.Writer;
const Limit = std.io.Limit;
unlimited: *Reader,
remaining: Limit,
interface: Reader,
pub fn init(reader: *Reader, limit: Limit, buffer: []u8) Limited {
return .{
.unlimited = reader,
.remaining = limit,
.interface = .{
.vtable = &.{
.stream = stream,
.discard = discard,
},
.buffer = buffer,
.seek = 0,
.end = 0,
},
};
}
fn stream(context: ?*anyopaque, w: *Writer, limit: Limit) Reader.StreamError!usize {
const l: *Limited = @alignCast(@ptrCast(context));
const combined_limit = limit.min(l.remaining);
const n = try l.unlimited_reader.read(w, combined_limit);
l.remaining = l.remaining.subtract(n).?;
return n;
}
fn discard(context: ?*anyopaque, limit: Limit) Reader.Error!usize {
const l: *Limited = @alignCast(@ptrCast(context));
const combined_limit = limit.min(l.remaining);
const n = try l.unlimited_reader.discard(combined_limit);
l.remaining = l.remaining.subtract(n).?;
return n;
}

File diff suppressed because it is too large Load diff

View file

@ -11,7 +11,7 @@ pub const BufferedAtomicFile = struct {
pub const buffer_size = 4096;
pub const BufferedWriter = std.io.BufferedWriter(buffer_size, File.Writer);
pub const Writer = std.io.Writer(*BufferedWriter, BufferedWriter.Error, BufferedWriter.write);
pub const Writer = std.io.GenericWriter(*BufferedWriter, BufferedWriter.Error, BufferedWriter.write);
/// TODO when https://github.com/ziglang/zig/issues/2761 is solved
/// this API will not need an allocator

View file

@ -12,7 +12,7 @@ pub fn BufferedReader(comptime buffer_size: usize, comptime ReaderType: type) ty
end: usize = 0,
pub const Error = ReaderType.Error;
pub const Reader = io.Reader(*Self, Error, read);
pub const Reader = io.GenericReader(*Self, Error, read);
const Self = @This();
@ -61,7 +61,7 @@ test "OneByte" {
const Error = error{NoError};
const Self = @This();
const Reader = io.Reader(*Self, Error, read);
const Reader = io.GenericReader(*Self, Error, read);
fn init(str: []const u8) Self {
return Self{
@ -105,7 +105,7 @@ test "Block" {
const Error = error{NoError};
const Self = @This();
const Reader = io.Reader(*Self, Error, read);
const Reader = io.GenericReader(*Self, Error, read);
fn init(block: []const u8, reads_allowed: usize) Self {
return Self{

View file

@ -10,7 +10,7 @@ pub fn BufferedWriter(comptime buffer_size: usize, comptime WriterType: type) ty
end: usize = 0,
pub const Error = WriterType.Error;
pub const Writer = io.Writer(*Self, Error, write);
pub const Writer = io.GenericWriter(*Self, Error, write);
const Self = @This();

View file

@ -3,7 +3,7 @@ const builtin = @import("builtin");
const io = std.io;
const testing = std.testing;
pub const CWriter = io.Writer(*std.c.FILE, std.fs.File.WriteError, cWriterWrite);
pub const CWriter = io.GenericWriter(*std.c.FILE, std.fs.File.WriteError, cWriterWrite);
pub fn cWriter(c_file: *std.c.FILE) CWriter {
return .{ .context = c_file };

View file

@ -8,7 +8,7 @@ pub fn ChangeDetectionStream(comptime WriterType: type) type {
return struct {
const Self = @This();
pub const Error = WriterType.Error;
pub const Writer = io.Writer(*Self, Error, write);
pub const Writer = io.GenericWriter(*Self, Error, write);
anything_changed: bool,
underlying_writer: WriterType,

View file

@ -9,7 +9,7 @@ pub fn CountingReader(comptime ReaderType: anytype) type {
bytes_read: u64 = 0,
pub const Error = ReaderType.Error;
pub const Reader = io.Reader(*@This(), Error, read);
pub const Reader = io.GenericReader(*@This(), Error, read);
pub fn read(self: *@This(), buf: []u8) Error!usize {
const amt = try self.child_reader.read(buf);

View file

@ -9,7 +9,7 @@ pub fn CountingWriter(comptime WriterType: type) type {
child_stream: WriterType,
pub const Error = WriterType.Error;
pub const Writer = io.Writer(*Self, Error, write);
pub const Writer = io.GenericWriter(*Self, Error, write);
const Self = @This();

View file

@ -8,7 +8,7 @@ pub fn FindByteWriter(comptime UnderlyingWriter: type) type {
return struct {
const Self = @This();
pub const Error = UnderlyingWriter.Error;
pub const Writer = io.Writer(*Self, Error, write);
pub const Writer = io.GenericWriter(*Self, Error, write);
underlying_writer: UnderlyingWriter,
byte_found: bool,

View file

@ -4,8 +4,8 @@ const testing = std.testing;
const mem = std.mem;
const assert = std.debug.assert;
/// This turns a byte buffer into an `io.Writer`, `io.Reader`, or `io.SeekableStream`.
/// If the supplied byte buffer is const, then `io.Writer` is not available.
/// This turns a byte buffer into an `io.GenericWriter`, `io.GenericReader`, or `io.SeekableStream`.
/// If the supplied byte buffer is const, then `io.GenericWriter` is not available.
pub fn FixedBufferStream(comptime Buffer: type) type {
return struct {
/// `Buffer` is either a `[]u8` or `[]const u8`.
@ -17,8 +17,8 @@ pub fn FixedBufferStream(comptime Buffer: type) type {
pub const SeekError = error{};
pub const GetSeekPosError = error{};
pub const Reader = io.Reader(*Self, ReadError, read);
pub const Writer = io.Writer(*Self, WriteError, write);
pub const Reader = io.GenericReader(*Self, ReadError, read);
pub const Writer = io.GenericWriter(*Self, WriteError, write);
pub const SeekableStream = io.SeekableStream(
*Self,

View file

@ -9,7 +9,7 @@ pub fn LimitedReader(comptime ReaderType: type) type {
bytes_left: u64,
pub const Error = ReaderType.Error;
pub const Reader = io.Reader(*Self, Error, read);
pub const Reader = io.GenericReader(*Self, Error, read);
const Self = @This();

View file

@ -15,7 +15,7 @@ pub fn MultiWriter(comptime Writers: type) type {
streams: Writers,
pub const Error = ErrSet;
pub const Writer = io.Writer(*Self, Error, write);
pub const Writer = io.GenericWriter(*Self, Error, write);
pub fn writer(self: *Self) Writer {
return .{ .context = self };

View file

@ -2,9 +2,9 @@ const std = @import("../std.zig");
const builtin = @import("builtin");
const io = std.io;
/// Provides `io.Reader`, `io.Writer`, and `io.SeekableStream` for in-memory buffers as
/// Provides `io.GenericReader`, `io.GenericWriter`, and `io.SeekableStream` for in-memory buffers as
/// well as files.
/// For memory sources, if the supplied byte buffer is const, then `io.Writer` is not available.
/// For memory sources, if the supplied byte buffer is const, then `io.GenericWriter` is not available.
/// The error set of the stream functions is the error set of the corresponding file functions.
pub const StreamSource = union(enum) {
// TODO: expose UEFI files to std.os in a way that allows this to be true
@ -26,8 +26,8 @@ pub const StreamSource = union(enum) {
pub const SeekError = io.FixedBufferStream([]u8).SeekError || (if (has_file) std.fs.File.SeekError else error{});
pub const GetSeekPosError = io.FixedBufferStream([]u8).GetSeekPosError || (if (has_file) std.fs.File.GetSeekPosError else error{});
pub const Reader = io.Reader(*StreamSource, ReadError, read);
pub const Writer = io.Writer(*StreamSource, WriteError, write);
pub const Reader = io.GenericReader(*StreamSource, ReadError, read);
pub const Writer = io.GenericWriter(*StreamSource, WriteError, write);
pub const SeekableStream = io.SeekableStream(
*StreamSource,
SeekError,

View file

@ -1,12 +1,12 @@
//! JSON parsing and stringification conforming to RFC 8259. https://datatracker.ietf.org/doc/html/rfc8259
//!
//! The low-level `Scanner` API produces `Token`s from an input slice or successive slices of inputs,
//! The `Reader` API connects a `std.io.Reader` to a `Scanner`.
//! The `Reader` API connects a `std.io.GenericReader` to a `Scanner`.
//!
//! The high-level `parseFromSlice` and `parseFromTokenSource` deserialize a JSON document into a Zig type.
//! Parse into a dynamically-typed `Value` to load any JSON value for runtime inspection.
//!
//! The low-level `writeStream` emits syntax-conformant JSON tokens to a `std.io.Writer`.
//! The low-level `writeStream` emits syntax-conformant JSON tokens to a `std.io.GenericWriter`.
//! The high-level `stringify` serializes a Zig or `Value` type into JSON.
const builtin = @import("builtin");

View file

@ -219,7 +219,7 @@ pub const AllocWhen = enum { alloc_if_needed, alloc_always };
/// This limit can be specified by calling `nextAllocMax()` instead of `nextAlloc()`.
pub const default_max_value_len = 4 * 1024 * 1024;
/// Connects a `std.io.Reader` to a `std.json.Scanner`.
/// Connects a `std.io.GenericReader` to a `std.json.Scanner`.
/// All `next*()` methods here handle `error.BufferUnderrun` from `std.json.Scanner`, and then read from the reader.
pub fn Reader(comptime buffer_size: usize, comptime ReaderType: type) type {
return struct {

View file

@ -38,7 +38,7 @@ pub const StringifyOptions = struct {
emit_nonportable_numbers_as_strings: bool = false,
};
/// Writes the given value to the `std.io.Writer` stream.
/// Writes the given value to the `std.io.GenericWriter` stream.
/// See `WriteStream` for how the given value is serialized into JSON.
/// The maximum nesting depth of the output JSON document is 256.
/// See also `stringifyMaxDepth` and `stringifyArbitraryDepth`.
@ -81,7 +81,7 @@ pub fn stringifyArbitraryDepth(
}
/// Calls `stringifyArbitraryDepth` and stores the result in dynamically allocated memory
/// instead of taking a `std.io.Writer`.
/// instead of taking a `std.io.GenericWriter`.
///
/// Caller owns returned memory.
pub fn stringifyAlloc(

View file

@ -307,7 +307,7 @@ test "stringify tuple" {
fn testStringify(expected: []const u8, value: anytype, options: StringifyOptions) !void {
const ValidationWriter = struct {
const Self = @This();
pub const Writer = std.io.Writer(*Self, Error, write);
pub const Writer = std.io.GenericWriter(*Self, Error, write);
pub const Error = error{
TooMuchData,
DifferentData,

View file

@ -1845,8 +1845,8 @@ pub const Stream = struct {
pub const ReadError = posix.ReadError;
pub const WriteError = posix.WriteError;
pub const Reader = io.Reader(Stream, ReadError, read);
pub const Writer = io.Writer(Stream, WriteError, write);
pub const Reader = io.GenericReader(Stream, ReadError, read);
pub const Writer = io.GenericWriter(Stream, WriteError, write);
pub fn reader(self: Stream) Reader {
return .{ .context = self };

View file

@ -88,8 +88,8 @@ pub const File = extern struct {
getPosition,
getEndPos,
);
pub const Reader = io.Reader(*File, ReadError, read);
pub const Writer = io.Writer(*File, WriteError, write);
pub const Reader = io.GenericReader(*File, ReadError, read);
pub const Writer = io.GenericWriter(*File, WriteError, write);
pub fn seekableStream(self: *File) SeekableStream {
return .{ .context = self };

View file

@ -348,7 +348,7 @@ pub fn Iterator(comptime ReaderType: type) type {
unread_bytes: *u64,
parent_reader: ReaderType,
pub const Reader = std.io.Reader(File, ReaderType.Error, File.read);
pub const Reader = std.io.GenericReader(File, ReaderType.Error, File.read);
pub fn reader(self: File) Reader {
return .{ .context = self };

View file

@ -9520,7 +9520,7 @@ fn WriterWithErrors(comptime BackingWriter: type, comptime ExtraErrors: type) ty
backing_writer: BackingWriter,
pub const Error = BackingWriter.Error || ExtraErrors;
pub const Writer = std.io.Writer(*const Self, Error, write);
pub const Writer = std.io.GenericWriter(*const Self, Error, write);
const Self = @This();

View file

@ -3245,7 +3245,7 @@ fn AutoIndentingStream(comptime UnderlyingWriter: type) type {
return struct {
const Self = @This();
pub const WriteError = UnderlyingWriter.Error;
pub const Writer = std.io.Writer(*Self, WriteError, write);
pub const Writer = std.io.GenericWriter(*Self, WriteError, write);
pub const IndentType = enum {
normal,

View file

@ -322,7 +322,7 @@ test parseCharLiteral {
);
}
/// Parses `bytes` as a Zig string literal and writes the result to the std.io.Writer type.
/// Parses `bytes` as a Zig string literal and writes the result to the `std.io.GenericWriter` type.
/// Asserts `bytes` has '"' at beginning and end.
pub fn parseWrite(writer: anytype, bytes: []const u8) error{OutOfMemory}!Result {
assert(bytes.len >= 2 and bytes[0] == '"' and bytes[bytes.len - 1] == '"');

View file

@ -106,7 +106,7 @@ pub const EndRecord = extern struct {
/// Find and return the end record for the given seekable zip stream.
/// Note that `seekable_stream` must be an instance of `std.io.SeekableStream` and
/// its context must also have a `.reader()` method that returns an instance of
/// `std.io.Reader`.
/// `std.io.GenericReader`.
pub fn findEndRecord(seekable_stream: anytype, stream_len: u64) !EndRecord {
var buf: [@sizeOf(EndRecord) + std.math.maxInt(u16)]u8 = undefined;
const record_len_max = @min(stream_len, buf.len);
@ -617,7 +617,7 @@ pub const ExtractOptions = struct {
/// Extract the zipped files inside `seekable_stream` to the given `dest` directory.
/// Note that `seekable_stream` must be an instance of `std.io.SeekableStream` and
/// its context must also have a `.reader()` method that returns an instance of
/// `std.io.Reader`.
/// `std.io.GenericReader`.
pub fn extract(dest: std.fs.Dir, seekable_stream: anytype, options: ExtractOptions) !void {
const SeekableStream = @TypeOf(seekable_stream);
var iter = try Iterator(SeekableStream).init(seekable_stream);

View file

@ -1026,7 +1026,7 @@ pub const Session = struct {
ProtocolError,
UnexpectedPacket,
};
pub const Reader = std.io.Reader(*FetchStream, ReadError, read);
pub const Reader = std.io.GenericReader(*FetchStream, ReadError, read);
const StreamCode = enum(u8) {
pack_data = 1,