fix 32-bit builds

This commit is contained in:
Andrew Kelley 2025-08-04 20:35:07 -07:00
parent 18c4a500b6
commit fe0ff7f718
3 changed files with 33 additions and 33 deletions

View file

@ -910,7 +910,7 @@ pub fn init(input: *Reader, output: *Writer, options: Options) InitError!Client
} }
fn drain(w: *Writer, data: []const []const u8, splat: usize) Writer.Error!usize { fn drain(w: *Writer, data: []const []const u8, splat: usize) Writer.Error!usize {
const c: *Client = @fieldParentPtr("writer", w); const c: *Client = @alignCast(@fieldParentPtr("writer", w));
if (true) @panic("update to use the buffer and flush"); if (true) @panic("update to use the buffer and flush");
const sliced_data = if (splat == 0) data[0..data.len -| 1] else data; const sliced_data = if (splat == 0) data[0..data.len -| 1] else data;
const output = c.output; const output = c.output;
@ -1046,7 +1046,7 @@ pub fn eof(c: Client) bool {
} }
fn stream(r: *Reader, w: *Writer, limit: std.io.Limit) Reader.StreamError!usize { fn stream(r: *Reader, w: *Writer, limit: std.io.Limit) Reader.StreamError!usize {
const c: *Client = @fieldParentPtr("reader", r); const c: *Client = @alignCast(@fieldParentPtr("reader", r));
if (c.eof()) return error.EndOfStream; if (c.eof()) return error.EndOfStream;
const input = c.input; const input = c.input;
// If at least one full encrypted record is not buffered, read once. // If at least one full encrypted record is not buffered, read once.

View file

@ -519,33 +519,33 @@ pub const Reader = struct {
w: *Writer, w: *Writer,
limit: std.Io.Limit, limit: std.Io.Limit,
) std.Io.Reader.StreamError!usize { ) std.Io.Reader.StreamError!usize {
const reader: *Reader = @fieldParentPtr("interface", io_r); const reader: *Reader = @alignCast(@fieldParentPtr("interface", io_r));
const remaining_content_length = &reader.state.body_remaining_content_length; const remaining_content_length = &reader.state.body_remaining_content_length;
const remaining = remaining_content_length.*; const remaining = remaining_content_length.*;
if (remaining == 0) { if (remaining == 0) {
reader.state = .ready; reader.state = .ready;
return error.EndOfStream; return error.EndOfStream;
} }
const n = try reader.in.stream(w, limit.min(.limited(remaining))); const n = try reader.in.stream(w, limit.min(.limited64(remaining)));
remaining_content_length.* = remaining - n; remaining_content_length.* = remaining - n;
return n; return n;
} }
fn contentLengthDiscard(io_r: *std.Io.Reader, limit: std.Io.Limit) std.Io.Reader.Error!usize { fn contentLengthDiscard(io_r: *std.Io.Reader, limit: std.Io.Limit) std.Io.Reader.Error!usize {
const reader: *Reader = @fieldParentPtr("interface", io_r); const reader: *Reader = @alignCast(@fieldParentPtr("interface", io_r));
const remaining_content_length = &reader.state.body_remaining_content_length; const remaining_content_length = &reader.state.body_remaining_content_length;
const remaining = remaining_content_length.*; const remaining = remaining_content_length.*;
if (remaining == 0) { if (remaining == 0) {
reader.state = .ready; reader.state = .ready;
return error.EndOfStream; return error.EndOfStream;
} }
const n = try reader.in.discard(limit.min(.limited(remaining))); const n = try reader.in.discard(limit.min(.limited64(remaining)));
remaining_content_length.* = remaining - n; remaining_content_length.* = remaining - n;
return n; return n;
} }
fn chunkedStream(io_r: *std.Io.Reader, w: *Writer, limit: std.Io.Limit) std.Io.Reader.StreamError!usize { fn chunkedStream(io_r: *std.Io.Reader, w: *Writer, limit: std.Io.Limit) std.Io.Reader.StreamError!usize {
const reader: *Reader = @fieldParentPtr("interface", io_r); const reader: *Reader = @alignCast(@fieldParentPtr("interface", io_r));
const chunk_len_ptr = switch (reader.state) { const chunk_len_ptr = switch (reader.state) {
.ready => return error.EndOfStream, .ready => return error.EndOfStream,
.body_remaining_chunk_len => |*x| x, .body_remaining_chunk_len => |*x| x,
@ -591,7 +591,7 @@ pub const Reader = struct {
} }
} }
if (cp.chunk_len == 0) return parseTrailers(reader, 0); if (cp.chunk_len == 0) return parseTrailers(reader, 0);
const n = try in.stream(w, limit.min(.limited(cp.chunk_len))); const n = try in.stream(w, limit.min(.limited64(cp.chunk_len)));
chunk_len_ptr.* = .init(cp.chunk_len + 2 - n); chunk_len_ptr.* = .init(cp.chunk_len + 2 - n);
return n; return n;
}, },
@ -607,7 +607,7 @@ pub const Reader = struct {
continue :len .head; continue :len .head;
}, },
else => |remaining_chunk_len| { else => |remaining_chunk_len| {
const n = try in.stream(w, limit.min(.limited(@intFromEnum(remaining_chunk_len) - 2))); const n = try in.stream(w, limit.min(.limited64(@intFromEnum(remaining_chunk_len) - 2)));
chunk_len_ptr.* = .init(@intFromEnum(remaining_chunk_len) - n); chunk_len_ptr.* = .init(@intFromEnum(remaining_chunk_len) - n);
return n; return n;
}, },
@ -615,7 +615,7 @@ pub const Reader = struct {
} }
fn chunkedDiscard(io_r: *std.Io.Reader, limit: std.Io.Limit) std.Io.Reader.Error!usize { fn chunkedDiscard(io_r: *std.Io.Reader, limit: std.Io.Limit) std.Io.Reader.Error!usize {
const reader: *Reader = @fieldParentPtr("interface", io_r); const reader: *Reader = @alignCast(@fieldParentPtr("interface", io_r));
const chunk_len_ptr = switch (reader.state) { const chunk_len_ptr = switch (reader.state) {
.ready => return error.EndOfStream, .ready => return error.EndOfStream,
.body_remaining_chunk_len => |*x| x, .body_remaining_chunk_len => |*x| x,
@ -659,7 +659,7 @@ pub const Reader = struct {
} }
} }
if (cp.chunk_len == 0) return parseTrailers(reader, 0); if (cp.chunk_len == 0) return parseTrailers(reader, 0);
const n = try in.discard(limit.min(.limited(cp.chunk_len))); const n = try in.discard(limit.min(.limited64(cp.chunk_len)));
chunk_len_ptr.* = .init(cp.chunk_len + 2 - n); chunk_len_ptr.* = .init(cp.chunk_len + 2 - n);
return n; return n;
}, },
@ -675,7 +675,7 @@ pub const Reader = struct {
continue :len .head; continue :len .head;
}, },
else => |remaining_chunk_len| { else => |remaining_chunk_len| {
const n = try in.discard(limit.min(.limited(remaining_chunk_len.int() - 2))); const n = try in.discard(limit.min(.limited64(remaining_chunk_len.int() - 2)));
chunk_len_ptr.* = .init(remaining_chunk_len.int() - n); chunk_len_ptr.* = .init(remaining_chunk_len.int() - n);
return n; return n;
}, },
@ -758,7 +758,7 @@ pub const BodyWriter = struct {
/// How many zeroes to reserve for hex-encoded chunk length. /// How many zeroes to reserve for hex-encoded chunk length.
const chunk_len_digits = 8; const chunk_len_digits = 8;
const max_chunk_len: usize = std.math.pow(usize, 16, chunk_len_digits) - 1; const max_chunk_len: usize = std.math.pow(u64, 16, chunk_len_digits) - 1;
const chunk_header_template = ("0" ** chunk_len_digits) ++ "\r\n"; const chunk_header_template = ("0" ** chunk_len_digits) ++ "\r\n";
comptime { comptime {
@ -918,7 +918,7 @@ pub const BodyWriter = struct {
} }
pub fn contentLengthDrain(w: *Writer, data: []const []const u8, splat: usize) Error!usize { pub fn contentLengthDrain(w: *Writer, data: []const []const u8, splat: usize) Error!usize {
const bw: *BodyWriter = @fieldParentPtr("writer", w); const bw: *BodyWriter = @alignCast(@fieldParentPtr("writer", w));
assert(!bw.isEliding()); assert(!bw.isEliding());
const out = bw.http_protocol_output; const out = bw.http_protocol_output;
const n = try out.writeSplatHeader(w.buffered(), data, splat); const n = try out.writeSplatHeader(w.buffered(), data, splat);
@ -927,7 +927,7 @@ pub const BodyWriter = struct {
} }
pub fn noneDrain(w: *Writer, data: []const []const u8, splat: usize) Error!usize { pub fn noneDrain(w: *Writer, data: []const []const u8, splat: usize) Error!usize {
const bw: *BodyWriter = @fieldParentPtr("writer", w); const bw: *BodyWriter = @alignCast(@fieldParentPtr("writer", w));
assert(!bw.isEliding()); assert(!bw.isEliding());
const out = bw.http_protocol_output; const out = bw.http_protocol_output;
const n = try out.writeSplatHeader(w.buffered(), data, splat); const n = try out.writeSplatHeader(w.buffered(), data, splat);
@ -935,7 +935,7 @@ pub const BodyWriter = struct {
} }
pub fn elidingDrain(w: *Writer, data: []const []const u8, splat: usize) Error!usize { pub fn elidingDrain(w: *Writer, data: []const []const u8, splat: usize) Error!usize {
const bw: *BodyWriter = @fieldParentPtr("writer", w); const bw: *BodyWriter = @alignCast(@fieldParentPtr("writer", w));
const slice = data[0 .. data.len - 1]; const slice = data[0 .. data.len - 1];
const pattern = data[slice.len]; const pattern = data[slice.len];
var written: usize = pattern.len * splat; var written: usize = pattern.len * splat;
@ -949,7 +949,7 @@ pub const BodyWriter = struct {
} }
pub fn elidingSendFile(w: *Writer, file_reader: *File.Reader, limit: std.Io.Limit) Writer.FileError!usize { pub fn elidingSendFile(w: *Writer, file_reader: *File.Reader, limit: std.Io.Limit) Writer.FileError!usize {
const bw: *BodyWriter = @fieldParentPtr("writer", w); const bw: *BodyWriter = @alignCast(@fieldParentPtr("writer", w));
if (File.Handle == void) return error.Unimplemented; if (File.Handle == void) return error.Unimplemented;
if (builtin.zig_backend == .stage2_aarch64) return error.Unimplemented; if (builtin.zig_backend == .stage2_aarch64) return error.Unimplemented;
switch (bw.state) { switch (bw.state) {
@ -976,7 +976,7 @@ pub const BodyWriter = struct {
/// Returns `null` if size cannot be computed without making any syscalls. /// Returns `null` if size cannot be computed without making any syscalls.
pub fn noneSendFile(w: *Writer, file_reader: *File.Reader, limit: std.Io.Limit) Writer.FileError!usize { pub fn noneSendFile(w: *Writer, file_reader: *File.Reader, limit: std.Io.Limit) Writer.FileError!usize {
const bw: *BodyWriter = @fieldParentPtr("writer", w); const bw: *BodyWriter = @alignCast(@fieldParentPtr("writer", w));
assert(!bw.isEliding()); assert(!bw.isEliding());
const out = bw.http_protocol_output; const out = bw.http_protocol_output;
const n = try out.sendFileHeader(w.buffered(), file_reader, limit); const n = try out.sendFileHeader(w.buffered(), file_reader, limit);
@ -984,7 +984,7 @@ pub const BodyWriter = struct {
} }
pub fn contentLengthSendFile(w: *Writer, file_reader: *File.Reader, limit: std.Io.Limit) Writer.FileError!usize { pub fn contentLengthSendFile(w: *Writer, file_reader: *File.Reader, limit: std.Io.Limit) Writer.FileError!usize {
const bw: *BodyWriter = @fieldParentPtr("writer", w); const bw: *BodyWriter = @alignCast(@fieldParentPtr("writer", w));
assert(!bw.isEliding()); assert(!bw.isEliding());
const out = bw.http_protocol_output; const out = bw.http_protocol_output;
const n = try out.sendFileHeader(w.buffered(), file_reader, limit); const n = try out.sendFileHeader(w.buffered(), file_reader, limit);
@ -993,7 +993,7 @@ pub const BodyWriter = struct {
} }
pub fn chunkedSendFile(w: *Writer, file_reader: *File.Reader, limit: std.Io.Limit) Writer.FileError!usize { pub fn chunkedSendFile(w: *Writer, file_reader: *File.Reader, limit: std.Io.Limit) Writer.FileError!usize {
const bw: *BodyWriter = @fieldParentPtr("writer", w); const bw: *BodyWriter = @alignCast(@fieldParentPtr("writer", w));
assert(!bw.isEliding()); assert(!bw.isEliding());
const data_len = Writer.countSendFileLowerBound(w.end, file_reader, limit) orelse { const data_len = Writer.countSendFileLowerBound(w.end, file_reader, limit) orelse {
// If the file size is unknown, we cannot lower to a `sendFile` since we would // If the file size is unknown, we cannot lower to a `sendFile` since we would
@ -1041,7 +1041,7 @@ pub const BodyWriter = struct {
} }
pub fn chunkedDrain(w: *Writer, data: []const []const u8, splat: usize) Error!usize { pub fn chunkedDrain(w: *Writer, data: []const []const u8, splat: usize) Error!usize {
const bw: *BodyWriter = @fieldParentPtr("writer", w); const bw: *BodyWriter = @alignCast(@fieldParentPtr("writer", w));
assert(!bw.isEliding()); assert(!bw.isEliding());
const out = bw.http_protocol_output; const out = bw.http_protocol_output;
const data_len = w.end + Writer.countSplat(data, splat); const data_len = w.end + Writer.countSplat(data, splat);

View file

@ -82,7 +82,7 @@ pub const ConnectionPool = struct {
var next = pool.free.last; var next = pool.free.last;
while (next) |node| : (next = node.prev) { while (next) |node| : (next = node.prev) {
const connection: *Connection = @fieldParentPtr("pool_node", node); const connection: *Connection = @alignCast(@fieldParentPtr("pool_node", node));
if (connection.protocol != criteria.protocol) continue; if (connection.protocol != criteria.protocol) continue;
if (connection.port != criteria.port) continue; if (connection.port != criteria.port) continue;
@ -127,7 +127,7 @@ pub const ConnectionPool = struct {
if (connection.closing or pool.free_size == 0) return connection.destroy(); if (connection.closing or pool.free_size == 0) return connection.destroy();
if (pool.free_len >= pool.free_size) { if (pool.free_len >= pool.free_size) {
const popped: *Connection = @fieldParentPtr("pool_node", pool.free.popFirst().?); const popped: *Connection = @alignCast(@fieldParentPtr("pool_node", pool.free.popFirst().?));
pool.free_len -= 1; pool.free_len -= 1;
popped.destroy(); popped.destroy();
@ -183,14 +183,14 @@ pub const ConnectionPool = struct {
var next = pool.free.first; var next = pool.free.first;
while (next) |node| { while (next) |node| {
const connection: *Connection = @fieldParentPtr("pool_node", node); const connection: *Connection = @alignCast(@fieldParentPtr("pool_node", node));
next = node.next; next = node.next;
connection.destroy(); connection.destroy();
} }
next = pool.used.first; next = pool.used.first;
while (next) |node| { while (next) |node| {
const connection: *Connection = @fieldParentPtr("pool_node", node); const connection: *Connection = @alignCast(@fieldParentPtr("pool_node", node));
next = node.next; next = node.next;
connection.destroy(); connection.destroy();
} }
@ -366,11 +366,11 @@ pub const Connection = struct {
return switch (c.protocol) { return switch (c.protocol) {
.tls => { .tls => {
if (disable_tls) unreachable; if (disable_tls) unreachable;
const tls: *Tls = @fieldParentPtr("connection", c); const tls: *Tls = @alignCast(@fieldParentPtr("connection", c));
return tls.host(); return tls.host();
}, },
.plain => { .plain => {
const plain: *Plain = @fieldParentPtr("connection", c); const plain: *Plain = @alignCast(@fieldParentPtr("connection", c));
return plain.host(); return plain.host();
}, },
}; };
@ -383,11 +383,11 @@ pub const Connection = struct {
switch (c.protocol) { switch (c.protocol) {
.tls => { .tls => {
if (disable_tls) unreachable; if (disable_tls) unreachable;
const tls: *Tls = @fieldParentPtr("connection", c); const tls: *Tls = @alignCast(@fieldParentPtr("connection", c));
tls.destroy(); tls.destroy();
}, },
.plain => { .plain => {
const plain: *Plain = @fieldParentPtr("connection", c); const plain: *Plain = @alignCast(@fieldParentPtr("connection", c));
plain.destroy(); plain.destroy();
}, },
} }
@ -399,7 +399,7 @@ pub const Connection = struct {
return switch (c.protocol) { return switch (c.protocol) {
.tls => { .tls => {
if (disable_tls) unreachable; if (disable_tls) unreachable;
const tls: *Tls = @fieldParentPtr("connection", c); const tls: *Tls = @alignCast(@fieldParentPtr("connection", c));
return &tls.client.writer; return &tls.client.writer;
}, },
.plain => &c.stream_writer.interface, .plain => &c.stream_writer.interface,
@ -412,7 +412,7 @@ pub const Connection = struct {
return switch (c.protocol) { return switch (c.protocol) {
.tls => { .tls => {
if (disable_tls) unreachable; if (disable_tls) unreachable;
const tls: *Tls = @fieldParentPtr("connection", c); const tls: *Tls = @alignCast(@fieldParentPtr("connection", c));
return &tls.client.reader; return &tls.client.reader;
}, },
.plain => c.stream_reader.interface(), .plain => c.stream_reader.interface(),
@ -422,7 +422,7 @@ pub const Connection = struct {
pub fn flush(c: *Connection) Writer.Error!void { pub fn flush(c: *Connection) Writer.Error!void {
if (c.protocol == .tls) { if (c.protocol == .tls) {
if (disable_tls) unreachable; if (disable_tls) unreachable;
const tls: *Tls = @fieldParentPtr("connection", c); const tls: *Tls = @alignCast(@fieldParentPtr("connection", c));
try tls.client.writer.flush(); try tls.client.writer.flush();
} }
try c.stream_writer.interface.flush(); try c.stream_writer.interface.flush();
@ -434,7 +434,7 @@ pub const Connection = struct {
pub fn end(c: *Connection) Writer.Error!void { pub fn end(c: *Connection) Writer.Error!void {
if (c.protocol == .tls) { if (c.protocol == .tls) {
if (disable_tls) unreachable; if (disable_tls) unreachable;
const tls: *Tls = @fieldParentPtr("connection", c); const tls: *Tls = @alignCast(@fieldParentPtr("connection", c));
try tls.client.end(); try tls.client.end();
try tls.client.writer.flush(); try tls.client.writer.flush();
} }