mirror of
https://codeberg.org/ziglang/zig.git
synced 2025-12-06 05:44:20 +00:00
Merge pull request #24698 from ziglang/http
std: rework HTTP and TLS for new I/O API
This commit is contained in:
commit
5998a8cebe
31 changed files with 3774 additions and 5326 deletions
|
|
@ -1141,6 +1141,8 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
|
|||
}
|
||||
output_format = .res;
|
||||
}
|
||||
} else {
|
||||
output_format_source = .output_format_arg;
|
||||
}
|
||||
options.output_source = .{ .filename = try filepathWithExtension(allocator, options.input_source.filename, output_format.?.extension()) };
|
||||
} else {
|
||||
|
|
@ -1529,21 +1531,21 @@ fn testParseOutput(args: []const []const u8, expected_output: []const u8) !?Opti
|
|||
var diagnostics = Diagnostics.init(std.testing.allocator);
|
||||
defer diagnostics.deinit();
|
||||
|
||||
var output = std.ArrayList(u8).init(std.testing.allocator);
|
||||
var output: std.io.Writer.Allocating = .init(std.testing.allocator);
|
||||
defer output.deinit();
|
||||
|
||||
var options = parse(std.testing.allocator, args, &diagnostics) catch |err| switch (err) {
|
||||
error.ParseError => {
|
||||
try diagnostics.renderToWriter(args, output.writer(), .no_color);
|
||||
try std.testing.expectEqualStrings(expected_output, output.items);
|
||||
try diagnostics.renderToWriter(args, &output.writer, .no_color);
|
||||
try std.testing.expectEqualStrings(expected_output, output.getWritten());
|
||||
return null;
|
||||
},
|
||||
else => |e| return e,
|
||||
};
|
||||
errdefer options.deinit();
|
||||
|
||||
try diagnostics.renderToWriter(args, output.writer(), .no_color);
|
||||
try std.testing.expectEqualStrings(expected_output, output.items);
|
||||
try diagnostics.renderToWriter(args, &output.writer, .no_color);
|
||||
try std.testing.expectEqualStrings(expected_output, output.getWritten());
|
||||
return options;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -550,7 +550,7 @@ pub const Compiler = struct {
|
|||
// so get it here to simplify future usage.
|
||||
const filename_token = node.filename.getFirstToken();
|
||||
|
||||
const file = self.searchForFile(filename_utf8) catch |err| switch (err) {
|
||||
const file_handle = self.searchForFile(filename_utf8) catch |err| switch (err) {
|
||||
error.OutOfMemory => |e| return e,
|
||||
else => |e| {
|
||||
const filename_string_index = try self.diagnostics.putString(filename_utf8);
|
||||
|
|
@ -564,13 +564,15 @@ pub const Compiler = struct {
|
|||
});
|
||||
},
|
||||
};
|
||||
defer file.close();
|
||||
defer file_handle.close();
|
||||
var file_buffer: [2048]u8 = undefined;
|
||||
var file_reader = file_handle.reader(&file_buffer);
|
||||
|
||||
if (maybe_predefined_type) |predefined_type| {
|
||||
switch (predefined_type) {
|
||||
.GROUP_ICON, .GROUP_CURSOR => {
|
||||
// Check for animated icon first
|
||||
if (ani.isAnimatedIcon(file.deprecatedReader())) {
|
||||
if (ani.isAnimatedIcon(file_reader.interface.adaptToOldInterface())) {
|
||||
// Animated icons are just put into the resource unmodified,
|
||||
// and the resource type changes to ANIICON/ANICURSOR
|
||||
|
||||
|
|
@ -582,18 +584,18 @@ pub const Compiler = struct {
|
|||
header.type_value.ordinal = @intFromEnum(new_predefined_type);
|
||||
header.memory_flags = MemoryFlags.defaults(new_predefined_type);
|
||||
header.applyMemoryFlags(node.common_resource_attributes, self.source);
|
||||
header.data_size = @intCast(try file.getEndPos());
|
||||
header.data_size = @intCast(try file_reader.getSize());
|
||||
|
||||
try header.write(writer, self.errContext(node.id));
|
||||
try file.seekTo(0);
|
||||
try writeResourceData(writer, file.deprecatedReader(), header.data_size);
|
||||
try file_reader.seekTo(0);
|
||||
try writeResourceData(writer, &file_reader.interface, header.data_size);
|
||||
return;
|
||||
}
|
||||
|
||||
// isAnimatedIcon moved the file cursor so reset to the start
|
||||
try file.seekTo(0);
|
||||
try file_reader.seekTo(0);
|
||||
|
||||
const icon_dir = ico.read(self.allocator, file.deprecatedReader(), try file.getEndPos()) catch |err| switch (err) {
|
||||
const icon_dir = ico.read(self.allocator, file_reader.interface.adaptToOldInterface(), try file_reader.getSize()) catch |err| switch (err) {
|
||||
error.OutOfMemory => |e| return e,
|
||||
else => |e| {
|
||||
return self.iconReadError(
|
||||
|
|
@ -671,15 +673,15 @@ pub const Compiler = struct {
|
|||
try writer.writeInt(u16, entry.type_specific_data.cursor.hotspot_y, .little);
|
||||
}
|
||||
|
||||
try file.seekTo(entry.data_offset_from_start_of_file);
|
||||
var header_bytes = file.deprecatedReader().readBytesNoEof(16) catch {
|
||||
try file_reader.seekTo(entry.data_offset_from_start_of_file);
|
||||
var header_bytes = (file_reader.interface.takeArray(16) catch {
|
||||
return self.iconReadError(
|
||||
error.UnexpectedEOF,
|
||||
filename_utf8,
|
||||
filename_token,
|
||||
predefined_type,
|
||||
);
|
||||
};
|
||||
}).*;
|
||||
|
||||
const image_format = ico.ImageFormat.detect(&header_bytes);
|
||||
if (!image_format.validate(&header_bytes)) {
|
||||
|
|
@ -802,8 +804,8 @@ pub const Compiler = struct {
|
|||
},
|
||||
}
|
||||
|
||||
try file.seekTo(entry.data_offset_from_start_of_file);
|
||||
try writeResourceDataNoPadding(writer, file.deprecatedReader(), entry.data_size_in_bytes);
|
||||
try file_reader.seekTo(entry.data_offset_from_start_of_file);
|
||||
try writeResourceDataNoPadding(writer, &file_reader.interface, entry.data_size_in_bytes);
|
||||
try writeDataPadding(writer, full_data_size);
|
||||
|
||||
if (self.state.icon_id == std.math.maxInt(u16)) {
|
||||
|
|
@ -857,9 +859,9 @@ pub const Compiler = struct {
|
|||
},
|
||||
.BITMAP => {
|
||||
header.applyMemoryFlags(node.common_resource_attributes, self.source);
|
||||
const file_size = try file.getEndPos();
|
||||
const file_size = try file_reader.getSize();
|
||||
|
||||
const bitmap_info = bmp.read(file.deprecatedReader(), file_size) catch |err| {
|
||||
const bitmap_info = bmp.read(file_reader.interface.adaptToOldInterface(), file_size) catch |err| {
|
||||
const filename_string_index = try self.diagnostics.putString(filename_utf8);
|
||||
return self.addErrorDetailsAndFail(.{
|
||||
.err = .bmp_read_error,
|
||||
|
|
@ -921,18 +923,17 @@ pub const Compiler = struct {
|
|||
|
||||
header.data_size = bmp_bytes_to_write;
|
||||
try header.write(writer, self.errContext(node.id));
|
||||
try file.seekTo(bmp.file_header_len);
|
||||
const file_reader = file.deprecatedReader();
|
||||
try writeResourceDataNoPadding(writer, file_reader, bitmap_info.dib_header_size);
|
||||
try file_reader.seekTo(bmp.file_header_len);
|
||||
try writeResourceDataNoPadding(writer, &file_reader.interface, bitmap_info.dib_header_size);
|
||||
if (bitmap_info.getBitmasksByteLen() > 0) {
|
||||
try writeResourceDataNoPadding(writer, file_reader, bitmap_info.getBitmasksByteLen());
|
||||
try writeResourceDataNoPadding(writer, &file_reader.interface, bitmap_info.getBitmasksByteLen());
|
||||
}
|
||||
if (bitmap_info.getExpectedPaletteByteLen() > 0) {
|
||||
try writeResourceDataNoPadding(writer, file_reader, @intCast(bitmap_info.getActualPaletteByteLen()));
|
||||
try writeResourceDataNoPadding(writer, &file_reader.interface, @intCast(bitmap_info.getActualPaletteByteLen()));
|
||||
}
|
||||
try file.seekTo(bitmap_info.pixel_data_offset);
|
||||
try file_reader.seekTo(bitmap_info.pixel_data_offset);
|
||||
const pixel_bytes: u32 = @intCast(file_size - bitmap_info.pixel_data_offset);
|
||||
try writeResourceDataNoPadding(writer, file_reader, pixel_bytes);
|
||||
try writeResourceDataNoPadding(writer, &file_reader.interface, pixel_bytes);
|
||||
try writeDataPadding(writer, bmp_bytes_to_write);
|
||||
return;
|
||||
},
|
||||
|
|
@ -956,7 +957,7 @@ pub const Compiler = struct {
|
|||
return;
|
||||
}
|
||||
header.applyMemoryFlags(node.common_resource_attributes, self.source);
|
||||
const file_size = try file.getEndPos();
|
||||
const file_size = try file_reader.getSize();
|
||||
if (file_size > std.math.maxInt(u32)) {
|
||||
return self.addErrorDetailsAndFail(.{
|
||||
.err = .resource_data_size_exceeds_max,
|
||||
|
|
@ -968,8 +969,9 @@ pub const Compiler = struct {
|
|||
header.data_size = @intCast(file_size);
|
||||
try header.write(writer, self.errContext(node.id));
|
||||
|
||||
var header_slurping_reader = headerSlurpingReader(148, file.deprecatedReader());
|
||||
try writeResourceData(writer, header_slurping_reader.reader(), header.data_size);
|
||||
var header_slurping_reader = headerSlurpingReader(148, file_reader.interface.adaptToOldInterface());
|
||||
var adapter = header_slurping_reader.reader().adaptToNewApi(&.{});
|
||||
try writeResourceData(writer, &adapter.new_interface, header.data_size);
|
||||
|
||||
try self.state.font_dir.add(self.arena, FontDir.Font{
|
||||
.id = header.name_value.ordinal,
|
||||
|
|
@ -992,7 +994,7 @@ pub const Compiler = struct {
|
|||
}
|
||||
|
||||
// Fallback to just writing out the entire contents of the file
|
||||
const data_size = try file.getEndPos();
|
||||
const data_size = try file_reader.getSize();
|
||||
if (data_size > std.math.maxInt(u32)) {
|
||||
return self.addErrorDetailsAndFail(.{
|
||||
.err = .resource_data_size_exceeds_max,
|
||||
|
|
@ -1002,7 +1004,7 @@ pub const Compiler = struct {
|
|||
// We now know that the data size will fit in a u32
|
||||
header.data_size = @intCast(data_size);
|
||||
try header.write(writer, self.errContext(node.id));
|
||||
try writeResourceData(writer, file.deprecatedReader(), header.data_size);
|
||||
try writeResourceData(writer, &file_reader.interface, header.data_size);
|
||||
}
|
||||
|
||||
fn iconReadError(
|
||||
|
|
@ -1250,8 +1252,8 @@ pub const Compiler = struct {
|
|||
const data_len: u32 = @intCast(data_buffer.items.len);
|
||||
try self.writeResourceHeader(writer, node.id, node.type, data_len, node.common_resource_attributes, self.state.language);
|
||||
|
||||
var data_fbs = std.io.fixedBufferStream(data_buffer.items);
|
||||
try writeResourceData(writer, data_fbs.reader(), data_len);
|
||||
var data_fbs: std.Io.Reader = .fixed(data_buffer.items);
|
||||
try writeResourceData(writer, &data_fbs, data_len);
|
||||
}
|
||||
|
||||
pub fn writeResourceHeader(self: *Compiler, writer: anytype, id_token: Token, type_token: Token, data_size: u32, common_resource_attributes: []Token, language: res.Language) !void {
|
||||
|
|
@ -1266,15 +1268,15 @@ pub const Compiler = struct {
|
|||
try header.write(writer, self.errContext(id_token));
|
||||
}
|
||||
|
||||
pub fn writeResourceDataNoPadding(writer: anytype, data_reader: anytype, data_size: u32) !void {
|
||||
var limited_reader = std.io.limitedReader(data_reader, data_size);
|
||||
|
||||
const FifoBuffer = std.fifo.LinearFifo(u8, .{ .Static = 4096 });
|
||||
var fifo = FifoBuffer.init();
|
||||
try fifo.pump(limited_reader.reader(), writer);
|
||||
pub fn writeResourceDataNoPadding(writer: anytype, data_reader: *std.Io.Reader, data_size: u32) !void {
|
||||
var adapted = writer.adaptToNewApi();
|
||||
var buffer: [128]u8 = undefined;
|
||||
adapted.new_interface.buffer = &buffer;
|
||||
try data_reader.streamExact(&adapted.new_interface, data_size);
|
||||
try adapted.new_interface.flush();
|
||||
}
|
||||
|
||||
pub fn writeResourceData(writer: anytype, data_reader: anytype, data_size: u32) !void {
|
||||
pub fn writeResourceData(writer: anytype, data_reader: *std.Io.Reader, data_size: u32) !void {
|
||||
try writeResourceDataNoPadding(writer, data_reader, data_size);
|
||||
try writeDataPadding(writer, data_size);
|
||||
}
|
||||
|
|
@ -1339,8 +1341,8 @@ pub const Compiler = struct {
|
|||
|
||||
try header.write(writer, self.errContext(node.id));
|
||||
|
||||
var data_fbs = std.io.fixedBufferStream(data_buffer.items);
|
||||
try writeResourceData(writer, data_fbs.reader(), data_size);
|
||||
var data_fbs: std.Io.Reader = .fixed(data_buffer.items);
|
||||
try writeResourceData(writer, &data_fbs, data_size);
|
||||
}
|
||||
|
||||
/// Expects `data_writer` to be a LimitedWriter limited to u32, meaning all writes to
|
||||
|
|
@ -1732,8 +1734,8 @@ pub const Compiler = struct {
|
|||
|
||||
try header.write(writer, self.errContext(node.id));
|
||||
|
||||
var data_fbs = std.io.fixedBufferStream(data_buffer.items);
|
||||
try writeResourceData(writer, data_fbs.reader(), data_size);
|
||||
var data_fbs: std.Io.Reader = .fixed(data_buffer.items);
|
||||
try writeResourceData(writer, &data_fbs, data_size);
|
||||
}
|
||||
|
||||
fn writeDialogHeaderAndStrings(
|
||||
|
|
@ -2046,8 +2048,8 @@ pub const Compiler = struct {
|
|||
|
||||
try header.write(writer, self.errContext(node.id));
|
||||
|
||||
var data_fbs = std.io.fixedBufferStream(data_buffer.items);
|
||||
try writeResourceData(writer, data_fbs.reader(), data_size);
|
||||
var data_fbs: std.Io.Reader = .fixed(data_buffer.items);
|
||||
try writeResourceData(writer, &data_fbs, data_size);
|
||||
}
|
||||
|
||||
/// Weight and italic carry over from previous FONT statements within a single resource,
|
||||
|
|
@ -2121,8 +2123,8 @@ pub const Compiler = struct {
|
|||
|
||||
try header.write(writer, self.errContext(node.id));
|
||||
|
||||
var data_fbs = std.io.fixedBufferStream(data_buffer.items);
|
||||
try writeResourceData(writer, data_fbs.reader(), data_size);
|
||||
var data_fbs: std.Io.Reader = .fixed(data_buffer.items);
|
||||
try writeResourceData(writer, &data_fbs, data_size);
|
||||
}
|
||||
|
||||
/// Expects `data_writer` to be a LimitedWriter limited to u32, meaning all writes to
|
||||
|
|
@ -2386,8 +2388,8 @@ pub const Compiler = struct {
|
|||
|
||||
try header.write(writer, self.errContext(node.id));
|
||||
|
||||
var data_fbs = std.io.fixedBufferStream(data_buffer.items);
|
||||
try writeResourceData(writer, data_fbs.reader(), data_size);
|
||||
var data_fbs: std.Io.Reader = .fixed(data_buffer.items);
|
||||
try writeResourceData(writer, &data_fbs, data_size);
|
||||
}
|
||||
|
||||
/// Expects writer to be a LimitedWriter limited to u16, meaning all writes to
|
||||
|
|
@ -3321,8 +3323,8 @@ pub const StringTable = struct {
|
|||
// we fully control and know are numbers, so they have a fixed size.
|
||||
try header.writeAssertNoOverflow(writer);
|
||||
|
||||
var data_fbs = std.io.fixedBufferStream(data_buffer.items);
|
||||
try Compiler.writeResourceData(writer, data_fbs.reader(), data_size);
|
||||
var data_fbs: std.Io.Reader = .fixed(data_buffer.items);
|
||||
try Compiler.writeResourceData(writer, &data_fbs, data_size);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -65,7 +65,7 @@ pub const ParseResOptions = struct {
|
|||
};
|
||||
|
||||
/// The returned ParsedResources should be freed by calling its `deinit` function.
|
||||
pub fn parseRes(allocator: Allocator, reader: anytype, options: ParseResOptions) !ParsedResources {
|
||||
pub fn parseRes(allocator: Allocator, reader: *std.Io.Reader, options: ParseResOptions) !ParsedResources {
|
||||
var resources = ParsedResources.init(allocator);
|
||||
errdefer resources.deinit();
|
||||
|
||||
|
|
@ -74,7 +74,7 @@ pub fn parseRes(allocator: Allocator, reader: anytype, options: ParseResOptions)
|
|||
return resources;
|
||||
}
|
||||
|
||||
pub fn parseResInto(resources: *ParsedResources, reader: anytype, options: ParseResOptions) !void {
|
||||
pub fn parseResInto(resources: *ParsedResources, reader: *std.Io.Reader, options: ParseResOptions) !void {
|
||||
const allocator = resources.allocator;
|
||||
var bytes_remaining: u64 = options.max_size;
|
||||
{
|
||||
|
|
@ -103,43 +103,38 @@ pub const ResourceAndSize = struct {
|
|||
total_size: u64,
|
||||
};
|
||||
|
||||
pub fn parseResource(allocator: Allocator, reader: anytype, max_size: u64) !ResourceAndSize {
|
||||
var header_counting_reader = std.io.countingReader(reader);
|
||||
const header_reader = header_counting_reader.reader();
|
||||
const data_size = try header_reader.readInt(u32, .little);
|
||||
const header_size = try header_reader.readInt(u32, .little);
|
||||
pub fn parseResource(allocator: Allocator, reader: *std.Io.Reader, max_size: u64) !ResourceAndSize {
|
||||
const data_size = try reader.takeInt(u32, .little);
|
||||
const header_size = try reader.takeInt(u32, .little);
|
||||
const total_size: u64 = @as(u64, header_size) + data_size;
|
||||
if (total_size > max_size) return error.ImpossibleSize;
|
||||
|
||||
var header_bytes_available = header_size -| 8;
|
||||
var type_reader = std.io.limitedReader(header_reader, header_bytes_available);
|
||||
const type_value = try parseNameOrOrdinal(allocator, type_reader.reader());
|
||||
const remaining_header_bytes = try reader.take(header_size -| 8);
|
||||
var remaining_header_reader: std.Io.Reader = .fixed(remaining_header_bytes);
|
||||
const type_value = try parseNameOrOrdinal(allocator, &remaining_header_reader);
|
||||
errdefer type_value.deinit(allocator);
|
||||
|
||||
header_bytes_available -|= @intCast(type_value.byteLen());
|
||||
var name_reader = std.io.limitedReader(header_reader, header_bytes_available);
|
||||
const name_value = try parseNameOrOrdinal(allocator, name_reader.reader());
|
||||
const name_value = try parseNameOrOrdinal(allocator, &remaining_header_reader);
|
||||
errdefer name_value.deinit(allocator);
|
||||
|
||||
const padding_after_name = numPaddingBytesNeeded(@intCast(header_counting_reader.bytes_read));
|
||||
try header_reader.skipBytes(padding_after_name, .{ .buf_size = 3 });
|
||||
const padding_after_name = numPaddingBytesNeeded(@intCast(remaining_header_reader.seek));
|
||||
try remaining_header_reader.discardAll(padding_after_name);
|
||||
|
||||
std.debug.assert(header_counting_reader.bytes_read % 4 == 0);
|
||||
const data_version = try header_reader.readInt(u32, .little);
|
||||
const memory_flags: MemoryFlags = @bitCast(try header_reader.readInt(u16, .little));
|
||||
const language: Language = @bitCast(try header_reader.readInt(u16, .little));
|
||||
const version = try header_reader.readInt(u32, .little);
|
||||
const characteristics = try header_reader.readInt(u32, .little);
|
||||
std.debug.assert(remaining_header_reader.seek % 4 == 0);
|
||||
const data_version = try remaining_header_reader.takeInt(u32, .little);
|
||||
const memory_flags: MemoryFlags = @bitCast(try remaining_header_reader.takeInt(u16, .little));
|
||||
const language: Language = @bitCast(try remaining_header_reader.takeInt(u16, .little));
|
||||
const version = try remaining_header_reader.takeInt(u32, .little);
|
||||
const characteristics = try remaining_header_reader.takeInt(u32, .little);
|
||||
|
||||
const header_bytes_read = header_counting_reader.bytes_read;
|
||||
if (header_size != header_bytes_read) return error.HeaderSizeMismatch;
|
||||
if (remaining_header_reader.seek != remaining_header_reader.end) return error.HeaderSizeMismatch;
|
||||
|
||||
const data = try allocator.alloc(u8, data_size);
|
||||
errdefer allocator.free(data);
|
||||
try reader.readNoEof(data);
|
||||
try reader.readSliceAll(data);
|
||||
|
||||
const padding_after_data = numPaddingBytesNeeded(@intCast(data_size));
|
||||
try reader.skipBytes(padding_after_data, .{ .buf_size = 3 });
|
||||
try reader.discardAll(padding_after_data);
|
||||
|
||||
return .{
|
||||
.resource = .{
|
||||
|
|
@ -156,10 +151,10 @@ pub fn parseResource(allocator: Allocator, reader: anytype, max_size: u64) !Reso
|
|||
};
|
||||
}
|
||||
|
||||
pub fn parseNameOrOrdinal(allocator: Allocator, reader: anytype) !NameOrOrdinal {
|
||||
const first_code_unit = try reader.readInt(u16, .little);
|
||||
pub fn parseNameOrOrdinal(allocator: Allocator, reader: *std.Io.Reader) !NameOrOrdinal {
|
||||
const first_code_unit = try reader.takeInt(u16, .little);
|
||||
if (first_code_unit == 0xFFFF) {
|
||||
const ordinal_value = try reader.readInt(u16, .little);
|
||||
const ordinal_value = try reader.takeInt(u16, .little);
|
||||
return .{ .ordinal = ordinal_value };
|
||||
}
|
||||
var name_buf = try std.ArrayListUnmanaged(u16).initCapacity(allocator, 16);
|
||||
|
|
@ -167,7 +162,7 @@ pub fn parseNameOrOrdinal(allocator: Allocator, reader: anytype) !NameOrOrdinal
|
|||
var code_unit = first_code_unit;
|
||||
while (code_unit != 0) {
|
||||
try name_buf.append(allocator, std.mem.nativeToLittle(u16, code_unit));
|
||||
code_unit = try reader.readInt(u16, .little);
|
||||
code_unit = try reader.takeInt(u16, .little);
|
||||
}
|
||||
return .{ .name = try name_buf.toOwnedSliceSentinel(allocator, 0) };
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1078,11 +1078,9 @@ const CorrespondingLines = struct {
|
|||
at_eof: bool = false,
|
||||
span: SourceMappings.CorrespondingSpan,
|
||||
file: std.fs.File,
|
||||
buffered_reader: BufferedReaderType,
|
||||
buffered_reader: std.fs.File.Reader,
|
||||
code_page: SupportedCodePage,
|
||||
|
||||
const BufferedReaderType = std.io.BufferedReader(512, std.fs.File.DeprecatedReader);
|
||||
|
||||
pub fn init(cwd: std.fs.Dir, err_details: ErrorDetails, line_for_comparison: []const u8, corresponding_span: SourceMappings.CorrespondingSpan, corresponding_file: []const u8) !CorrespondingLines {
|
||||
// We don't do line comparison for this error, so don't print the note if the line
|
||||
// number is different
|
||||
|
|
@ -1101,9 +1099,7 @@ const CorrespondingLines = struct {
|
|||
.buffered_reader = undefined,
|
||||
.code_page = err_details.code_page,
|
||||
};
|
||||
corresponding_lines.buffered_reader = BufferedReaderType{
|
||||
.unbuffered_reader = corresponding_lines.file.deprecatedReader(),
|
||||
};
|
||||
corresponding_lines.buffered_reader = corresponding_lines.file.reader(&.{});
|
||||
errdefer corresponding_lines.deinit();
|
||||
|
||||
var fbs = std.io.fixedBufferStream(&corresponding_lines.line_buf);
|
||||
|
|
@ -1111,7 +1107,7 @@ const CorrespondingLines = struct {
|
|||
|
||||
try corresponding_lines.writeLineFromStreamVerbatim(
|
||||
writer,
|
||||
corresponding_lines.buffered_reader.reader(),
|
||||
corresponding_lines.buffered_reader.interface.adaptToOldInterface(),
|
||||
corresponding_span.start_line,
|
||||
);
|
||||
|
||||
|
|
@ -1154,7 +1150,7 @@ const CorrespondingLines = struct {
|
|||
|
||||
try self.writeLineFromStreamVerbatim(
|
||||
writer,
|
||||
self.buffered_reader.reader(),
|
||||
self.buffered_reader.interface.adaptToOldInterface(),
|
||||
self.line_num,
|
||||
);
|
||||
|
||||
|
|
|
|||
|
|
@ -14,8 +14,9 @@ pub fn read(allocator: std.mem.Allocator, reader: anytype, max_size: u64) ReadEr
|
|||
// Some Reader implementations have an empty ReadError error set which would
|
||||
// cause 'unreachable else' if we tried to use an else in the switch, so we
|
||||
// need to detect this case and not try to translate to ReadError
|
||||
const anyerror_reader_errorset = @TypeOf(reader).Error == anyerror;
|
||||
const empty_reader_errorset = @typeInfo(@TypeOf(reader).Error).error_set == null or @typeInfo(@TypeOf(reader).Error).error_set.?.len == 0;
|
||||
if (empty_reader_errorset) {
|
||||
if (empty_reader_errorset and !anyerror_reader_errorset) {
|
||||
return readAnyError(allocator, reader, max_size) catch |err| switch (err) {
|
||||
error.EndOfStream => error.UnexpectedEOF,
|
||||
else => |e| return e,
|
||||
|
|
|
|||
|
|
@ -325,8 +325,8 @@ pub fn main() !void {
|
|||
std.debug.assert(options.output_format == .coff);
|
||||
|
||||
// TODO: Maybe use a buffered file reader instead of reading file into memory -> fbs
|
||||
var fbs = std.io.fixedBufferStream(res_data.bytes);
|
||||
break :resources cvtres.parseRes(allocator, fbs.reader(), .{ .max_size = res_data.bytes.len }) catch |err| {
|
||||
var res_reader: std.Io.Reader = .fixed(res_data.bytes);
|
||||
break :resources cvtres.parseRes(allocator, &res_reader, .{ .max_size = res_data.bytes.len }) catch |err| {
|
||||
// TODO: Better errors
|
||||
try error_handler.emitMessage(allocator, .err, "unable to parse res from '{s}': {s}", .{ res_stream.name, @errorName(err) });
|
||||
std.process.exit(1);
|
||||
|
|
|
|||
|
|
@ -145,13 +145,12 @@ fn mainImpl() !void {
|
|||
var parser = try Parser.init(gpa);
|
||||
defer parser.deinit();
|
||||
|
||||
var stdin_buf = std.io.bufferedReader(std.fs.File.stdin().deprecatedReader());
|
||||
var line_buf = std.ArrayList(u8).init(gpa);
|
||||
defer line_buf.deinit();
|
||||
while (stdin_buf.reader().streamUntilDelimiter(line_buf.writer(), '\n', null)) {
|
||||
if (line_buf.getLastOrNull() == '\r') _ = line_buf.pop();
|
||||
try parser.feedLine(line_buf.items);
|
||||
line_buf.clearRetainingCapacity();
|
||||
var stdin_buffer: [1024]u8 = undefined;
|
||||
var stdin_reader = std.fs.File.stdin().reader(&stdin_buffer);
|
||||
|
||||
while (stdin_reader.takeDelimiterExclusive('\n')) |line| {
|
||||
const trimmed = std.mem.trimRight(u8, line, '\r');
|
||||
try parser.feedLine(trimmed);
|
||||
} else |err| switch (err) {
|
||||
error.EndOfStream => {},
|
||||
else => |e| return e,
|
||||
|
|
|
|||
|
|
@ -234,7 +234,7 @@ pub const Previous = struct {
|
|||
};
|
||||
pub fn sendUpdate(
|
||||
fuzz: *Fuzz,
|
||||
socket: *std.http.WebSocket,
|
||||
socket: *std.http.Server.WebSocket,
|
||||
prev: *Previous,
|
||||
) !void {
|
||||
fuzz.coverage_mutex.lock();
|
||||
|
|
@ -263,36 +263,36 @@ pub fn sendUpdate(
|
|||
.string_bytes_len = @intCast(coverage_map.coverage.string_bytes.items.len),
|
||||
.start_timestamp = coverage_map.start_timestamp,
|
||||
};
|
||||
const iovecs: [5]std.posix.iovec_const = .{
|
||||
makeIov(@ptrCast(&header)),
|
||||
makeIov(@ptrCast(coverage_map.coverage.directories.keys())),
|
||||
makeIov(@ptrCast(coverage_map.coverage.files.keys())),
|
||||
makeIov(@ptrCast(coverage_map.source_locations)),
|
||||
makeIov(coverage_map.coverage.string_bytes.items),
|
||||
var iovecs: [5][]const u8 = .{
|
||||
@ptrCast(&header),
|
||||
@ptrCast(coverage_map.coverage.directories.keys()),
|
||||
@ptrCast(coverage_map.coverage.files.keys()),
|
||||
@ptrCast(coverage_map.source_locations),
|
||||
coverage_map.coverage.string_bytes.items,
|
||||
};
|
||||
try socket.writeMessagev(&iovecs, .binary);
|
||||
try socket.writeMessageVec(&iovecs, .binary);
|
||||
}
|
||||
|
||||
const header: abi.CoverageUpdateHeader = .{
|
||||
.n_runs = n_runs,
|
||||
.unique_runs = unique_runs,
|
||||
};
|
||||
const iovecs: [2]std.posix.iovec_const = .{
|
||||
makeIov(@ptrCast(&header)),
|
||||
makeIov(@ptrCast(seen_pcs)),
|
||||
var iovecs: [2][]const u8 = .{
|
||||
@ptrCast(&header),
|
||||
@ptrCast(seen_pcs),
|
||||
};
|
||||
try socket.writeMessagev(&iovecs, .binary);
|
||||
try socket.writeMessageVec(&iovecs, .binary);
|
||||
|
||||
prev.unique_runs = unique_runs;
|
||||
}
|
||||
|
||||
if (prev.entry_points != coverage_map.entry_points.items.len) {
|
||||
const header: abi.EntryPointHeader = .init(@intCast(coverage_map.entry_points.items.len));
|
||||
const iovecs: [2]std.posix.iovec_const = .{
|
||||
makeIov(@ptrCast(&header)),
|
||||
makeIov(@ptrCast(coverage_map.entry_points.items)),
|
||||
var iovecs: [2][]const u8 = .{
|
||||
@ptrCast(&header),
|
||||
@ptrCast(coverage_map.entry_points.items),
|
||||
};
|
||||
try socket.writeMessagev(&iovecs, .binary);
|
||||
try socket.writeMessageVec(&iovecs, .binary);
|
||||
|
||||
prev.entry_points = coverage_map.entry_points.items.len;
|
||||
}
|
||||
|
|
@ -448,10 +448,3 @@ fn addEntryPoint(fuzz: *Fuzz, coverage_id: u64, addr: u64) error{ AlreadyReporte
|
|||
}
|
||||
try coverage_map.entry_points.append(fuzz.ws.gpa, @intCast(index));
|
||||
}
|
||||
|
||||
fn makeIov(s: []const u8) std.posix.iovec_const {
|
||||
return .{
|
||||
.base = s.ptr,
|
||||
.len = s.len,
|
||||
};
|
||||
}
|
||||
|
|
|
|||
|
|
@ -251,48 +251,44 @@ pub fn now(s: *const WebServer) i64 {
|
|||
fn accept(ws: *WebServer, connection: std.net.Server.Connection) void {
|
||||
defer connection.stream.close();
|
||||
|
||||
var read_buf: [0x4000]u8 = undefined;
|
||||
var server: std.http.Server = .init(connection, &read_buf);
|
||||
var send_buffer: [4096]u8 = undefined;
|
||||
var recv_buffer: [4096]u8 = undefined;
|
||||
var connection_reader = connection.stream.reader(&recv_buffer);
|
||||
var connection_writer = connection.stream.writer(&send_buffer);
|
||||
var server: http.Server = .init(connection_reader.interface(), &connection_writer.interface);
|
||||
|
||||
while (true) {
|
||||
var request = server.receiveHead() catch |err| switch (err) {
|
||||
error.HttpConnectionClosing => return,
|
||||
else => {
|
||||
log.err("failed to receive http request: {s}", .{@errorName(err)});
|
||||
return;
|
||||
},
|
||||
else => return log.err("failed to receive http request: {t}", .{err}),
|
||||
};
|
||||
var ws_send_buf: [0x4000]u8 = undefined;
|
||||
var ws_recv_buf: [0x4000]u8 align(4) = undefined;
|
||||
if (std.http.WebSocket.init(&request, &ws_send_buf, &ws_recv_buf) catch |err| {
|
||||
log.err("failed to initialize websocket connection: {s}", .{@errorName(err)});
|
||||
return;
|
||||
}) |ws_init| {
|
||||
var web_socket = ws_init;
|
||||
ws.serveWebSocket(&web_socket) catch |err| {
|
||||
log.err("failed to serve websocket: {s}", .{@errorName(err)});
|
||||
return;
|
||||
};
|
||||
comptime unreachable;
|
||||
} else {
|
||||
ws.serveRequest(&request) catch |err| switch (err) {
|
||||
error.AlreadyReported => return,
|
||||
else => {
|
||||
log.err("failed to serve '{s}': {s}", .{ request.head.target, @errorName(err) });
|
||||
switch (request.upgradeRequested()) {
|
||||
.websocket => |opt_key| {
|
||||
const key = opt_key orelse return log.err("missing websocket key", .{});
|
||||
var web_socket = request.respondWebSocket(.{ .key = key }) catch {
|
||||
return log.err("failed to respond web socket: {t}", .{connection_writer.err.?});
|
||||
};
|
||||
ws.serveWebSocket(&web_socket) catch |err| {
|
||||
log.err("failed to serve websocket: {t}", .{err});
|
||||
return;
|
||||
},
|
||||
};
|
||||
};
|
||||
comptime unreachable;
|
||||
},
|
||||
.other => |name| return log.err("unknown upgrade request: {s}", .{name}),
|
||||
.none => {
|
||||
ws.serveRequest(&request) catch |err| switch (err) {
|
||||
error.AlreadyReported => return,
|
||||
else => {
|
||||
log.err("failed to serve '{s}': {t}", .{ request.head.target, err });
|
||||
return;
|
||||
},
|
||||
};
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn makeIov(s: []const u8) std.posix.iovec_const {
|
||||
return .{
|
||||
.base = s.ptr,
|
||||
.len = s.len,
|
||||
};
|
||||
}
|
||||
fn serveWebSocket(ws: *WebServer, sock: *std.http.WebSocket) !noreturn {
|
||||
fn serveWebSocket(ws: *WebServer, sock: *http.Server.WebSocket) !noreturn {
|
||||
var prev_build_status = ws.build_status.load(.monotonic);
|
||||
|
||||
const prev_step_status_bits = try ws.gpa.alloc(u8, ws.step_status_bits.len);
|
||||
|
|
@ -312,11 +308,8 @@ fn serveWebSocket(ws: *WebServer, sock: *std.http.WebSocket) !noreturn {
|
|||
.timestamp = ws.now(),
|
||||
.steps_len = @intCast(ws.all_steps.len),
|
||||
};
|
||||
try sock.writeMessagev(&.{
|
||||
makeIov(@ptrCast(&hello_header)),
|
||||
makeIov(ws.step_names_trailing),
|
||||
makeIov(prev_step_status_bits),
|
||||
}, .binary);
|
||||
var bufs: [3][]const u8 = .{ @ptrCast(&hello_header), ws.step_names_trailing, prev_step_status_bits };
|
||||
try sock.writeMessageVec(&bufs, .binary);
|
||||
}
|
||||
|
||||
var prev_fuzz: Fuzz.Previous = .init;
|
||||
|
|
@ -380,7 +373,7 @@ fn serveWebSocket(ws: *WebServer, sock: *std.http.WebSocket) !noreturn {
|
|||
std.Thread.Futex.timedWait(&ws.update_id, start_update_id, std.time.ns_per_ms * default_update_interval_ms) catch {};
|
||||
}
|
||||
}
|
||||
fn recvWebSocketMessages(ws: *WebServer, sock: *std.http.WebSocket) void {
|
||||
fn recvWebSocketMessages(ws: *WebServer, sock: *http.Server.WebSocket) void {
|
||||
while (true) {
|
||||
const msg = sock.readSmallMessage() catch return;
|
||||
if (msg.opcode != .binary) continue;
|
||||
|
|
@ -402,7 +395,7 @@ fn recvWebSocketMessages(ws: *WebServer, sock: *std.http.WebSocket) void {
|
|||
}
|
||||
}
|
||||
|
||||
fn serveRequest(ws: *WebServer, req: *std.http.Server.Request) !void {
|
||||
fn serveRequest(ws: *WebServer, req: *http.Server.Request) !void {
|
||||
// Strip an optional leading '/debug' component from the request.
|
||||
const target: []const u8, const debug: bool = target: {
|
||||
if (mem.eql(u8, req.head.target, "/debug")) break :target .{ "/", true };
|
||||
|
|
@ -431,7 +424,7 @@ fn serveRequest(ws: *WebServer, req: *std.http.Server.Request) !void {
|
|||
|
||||
fn serveLibFile(
|
||||
ws: *WebServer,
|
||||
request: *std.http.Server.Request,
|
||||
request: *http.Server.Request,
|
||||
sub_path: []const u8,
|
||||
content_type: []const u8,
|
||||
) !void {
|
||||
|
|
@ -442,7 +435,7 @@ fn serveLibFile(
|
|||
}
|
||||
fn serveClientWasm(
|
||||
ws: *WebServer,
|
||||
req: *std.http.Server.Request,
|
||||
req: *http.Server.Request,
|
||||
optimize_mode: std.builtin.OptimizeMode,
|
||||
) !void {
|
||||
var arena_state: std.heap.ArenaAllocator = .init(ws.gpa);
|
||||
|
|
@ -456,12 +449,12 @@ fn serveClientWasm(
|
|||
|
||||
pub fn serveFile(
|
||||
ws: *WebServer,
|
||||
request: *std.http.Server.Request,
|
||||
request: *http.Server.Request,
|
||||
path: Cache.Path,
|
||||
content_type: []const u8,
|
||||
) !void {
|
||||
const gpa = ws.gpa;
|
||||
// The desired API is actually sendfile, which will require enhancing std.http.Server.
|
||||
// The desired API is actually sendfile, which will require enhancing http.Server.
|
||||
// We load the file with every request so that the user can make changes to the file
|
||||
// and refresh the HTML page without restarting this server.
|
||||
const file_contents = path.root_dir.handle.readFileAlloc(gpa, path.sub_path, 10 * 1024 * 1024) catch |err| {
|
||||
|
|
@ -478,14 +471,13 @@ pub fn serveFile(
|
|||
}
|
||||
pub fn serveTarFile(
|
||||
ws: *WebServer,
|
||||
request: *std.http.Server.Request,
|
||||
request: *http.Server.Request,
|
||||
paths: []const Cache.Path,
|
||||
) !void {
|
||||
const gpa = ws.gpa;
|
||||
|
||||
var send_buf: [0x4000]u8 = undefined;
|
||||
var response = request.respondStreaming(.{
|
||||
.send_buffer = &send_buf,
|
||||
var send_buffer: [0x4000]u8 = undefined;
|
||||
var response = try request.respondStreaming(&send_buffer, .{
|
||||
.respond_options = .{
|
||||
.extra_headers = &.{
|
||||
.{ .name = "Content-Type", .value = "application/x-tar" },
|
||||
|
|
@ -497,10 +489,7 @@ pub fn serveTarFile(
|
|||
var cached_cwd_path: ?[]const u8 = null;
|
||||
defer if (cached_cwd_path) |p| gpa.free(p);
|
||||
|
||||
var response_buf: [1024]u8 = undefined;
|
||||
var adapter = response.writer().adaptToNewApi();
|
||||
adapter.new_interface.buffer = &response_buf;
|
||||
var archiver: std.tar.Writer = .{ .underlying_writer = &adapter.new_interface };
|
||||
var archiver: std.tar.Writer = .{ .underlying_writer = &response.writer };
|
||||
|
||||
for (paths) |path| {
|
||||
var file = path.root_dir.handle.openFile(path.sub_path, .{}) catch |err| {
|
||||
|
|
@ -526,7 +515,6 @@ pub fn serveTarFile(
|
|||
}
|
||||
|
||||
// intentionally not calling `archiver.finishPedantically`
|
||||
try adapter.new_interface.flush();
|
||||
try response.end();
|
||||
}
|
||||
|
||||
|
|
@ -804,7 +792,7 @@ pub fn wait(ws: *WebServer) RunnerRequest {
|
|||
}
|
||||
}
|
||||
|
||||
const cache_control_header: std.http.Header = .{
|
||||
const cache_control_header: http.Header = .{
|
||||
.name = "Cache-Control",
|
||||
.value = "max-age=0, must-revalidate",
|
||||
};
|
||||
|
|
@ -819,5 +807,6 @@ const Build = std.Build;
|
|||
const Cache = Build.Cache;
|
||||
const Fuzz = Build.Fuzz;
|
||||
const abi = Build.abi;
|
||||
const http = std.http;
|
||||
|
||||
const WebServer = @This();
|
||||
|
|
|
|||
|
|
@ -428,19 +428,9 @@ pub const BufferedWriter = @import("Io/buffered_writer.zig").BufferedWriter;
|
|||
/// Deprecated in favor of `Writer`.
|
||||
pub const bufferedWriter = @import("Io/buffered_writer.zig").bufferedWriter;
|
||||
/// Deprecated in favor of `Reader`.
|
||||
pub const BufferedReader = @import("Io/buffered_reader.zig").BufferedReader;
|
||||
/// Deprecated in favor of `Reader`.
|
||||
pub const bufferedReader = @import("Io/buffered_reader.zig").bufferedReader;
|
||||
/// Deprecated in favor of `Reader`.
|
||||
pub const bufferedReaderSize = @import("Io/buffered_reader.zig").bufferedReaderSize;
|
||||
/// Deprecated in favor of `Reader`.
|
||||
pub const FixedBufferStream = @import("Io/fixed_buffer_stream.zig").FixedBufferStream;
|
||||
/// Deprecated in favor of `Reader`.
|
||||
pub const fixedBufferStream = @import("Io/fixed_buffer_stream.zig").fixedBufferStream;
|
||||
/// Deprecated in favor of `Reader.Limited`.
|
||||
pub const LimitedReader = @import("Io/limited_reader.zig").LimitedReader;
|
||||
/// Deprecated in favor of `Reader.Limited`.
|
||||
pub const limitedReader = @import("Io/limited_reader.zig").limitedReader;
|
||||
/// Deprecated with no replacement; inefficient pattern
|
||||
pub const CountingWriter = @import("Io/counting_writer.zig").CountingWriter;
|
||||
/// Deprecated with no replacement; inefficient pattern
|
||||
|
|
@ -926,7 +916,6 @@ pub fn PollFiles(comptime StreamEnum: type) type {
|
|||
test {
|
||||
_ = Reader;
|
||||
_ = Writer;
|
||||
_ = BufferedReader;
|
||||
_ = BufferedWriter;
|
||||
_ = CountingWriter;
|
||||
_ = CountingReader;
|
||||
|
|
|
|||
|
|
@ -367,8 +367,11 @@ pub fn appendRemainingUnlimited(
|
|||
const buffer_contents = r.buffer[r.seek..r.end];
|
||||
try list.ensureUnusedCapacity(gpa, buffer_contents.len + bump);
|
||||
list.appendSliceAssumeCapacity(buffer_contents);
|
||||
r.seek = 0;
|
||||
r.end = 0;
|
||||
// If statement protects `ending`.
|
||||
if (r.end != 0) {
|
||||
r.seek = 0;
|
||||
r.end = 0;
|
||||
}
|
||||
// From here, we leave `buffer` empty, appending directly to `list`.
|
||||
var writer: Writer = .{
|
||||
.buffer = undefined,
|
||||
|
|
@ -1306,31 +1309,6 @@ pub fn defaultRebase(r: *Reader, capacity: usize) RebaseError!void {
|
|||
r.end = data.len;
|
||||
}
|
||||
|
||||
/// Advances the stream and decreases the size of the storage buffer by `n`,
|
||||
/// returning the range of bytes no longer accessible by `r`.
|
||||
///
|
||||
/// This action can be undone by `restitute`.
|
||||
///
|
||||
/// Asserts there are at least `n` buffered bytes already.
|
||||
///
|
||||
/// Asserts that `r.seek` is zero, i.e. the buffer is in a rebased state.
|
||||
pub fn steal(r: *Reader, n: usize) []u8 {
|
||||
assert(r.seek == 0);
|
||||
assert(n <= r.end);
|
||||
const stolen = r.buffer[0..n];
|
||||
r.buffer = r.buffer[n..];
|
||||
r.end -= n;
|
||||
return stolen;
|
||||
}
|
||||
|
||||
/// Expands the storage buffer, undoing the effects of `steal`
|
||||
/// Assumes that `n` does not exceed the total number of stolen bytes.
|
||||
pub fn restitute(r: *Reader, n: usize) void {
|
||||
r.buffer = (r.buffer.ptr - n)[0 .. r.buffer.len + n];
|
||||
r.end += n;
|
||||
r.seek += n;
|
||||
}
|
||||
|
||||
test fixed {
|
||||
var r: Reader = .fixed("a\x02");
|
||||
try testing.expect((try r.takeByte()) == 'a');
|
||||
|
|
|
|||
|
|
@ -191,29 +191,87 @@ pub fn writeSplatHeader(
|
|||
data: []const []const u8,
|
||||
splat: usize,
|
||||
) Error!usize {
|
||||
const new_end = w.end + header.len;
|
||||
if (new_end <= w.buffer.len) {
|
||||
@memcpy(w.buffer[w.end..][0..header.len], header);
|
||||
w.end = new_end;
|
||||
return header.len + try writeSplat(w, data, splat);
|
||||
return writeSplatHeaderLimit(w, header, data, splat, .unlimited);
|
||||
}
|
||||
|
||||
/// Equivalent to `writeSplatHeader` but writes at most `limit` bytes.
|
||||
pub fn writeSplatHeaderLimit(
|
||||
w: *Writer,
|
||||
header: []const u8,
|
||||
data: []const []const u8,
|
||||
splat: usize,
|
||||
limit: Limit,
|
||||
) Error!usize {
|
||||
var remaining = @intFromEnum(limit);
|
||||
{
|
||||
const copy_len = @min(header.len, w.buffer.len - w.end, remaining);
|
||||
if (header.len - copy_len != 0) return writeSplatHeaderLimitFinish(w, header, data, splat, remaining);
|
||||
@memcpy(w.buffer[w.end..][0..copy_len], header[0..copy_len]);
|
||||
w.end += copy_len;
|
||||
remaining -= copy_len;
|
||||
}
|
||||
var vecs: [8][]const u8 = undefined; // Arbitrarily chosen size.
|
||||
var i: usize = 1;
|
||||
vecs[0] = header;
|
||||
for (data[0 .. data.len - 1]) |buf| {
|
||||
if (buf.len == 0) continue;
|
||||
vecs[i] = buf;
|
||||
i += 1;
|
||||
if (vecs.len - i == 0) break;
|
||||
for (data[0 .. data.len - 1], 0..) |buf, i| {
|
||||
const copy_len = @min(buf.len, w.buffer.len - w.end, remaining);
|
||||
if (buf.len - copy_len != 0) return @intFromEnum(limit) - remaining +
|
||||
try writeSplatHeaderLimitFinish(w, &.{}, data[i..], splat, remaining);
|
||||
@memcpy(w.buffer[w.end..][0..copy_len], buf[0..copy_len]);
|
||||
w.end += copy_len;
|
||||
remaining -= copy_len;
|
||||
}
|
||||
const pattern = data[data.len - 1];
|
||||
const new_splat = s: {
|
||||
if (pattern.len == 0 or vecs.len - i == 0) break :s 1;
|
||||
const splat_n = pattern.len * splat;
|
||||
if (splat_n > @min(w.buffer.len - w.end, remaining)) {
|
||||
const buffered_n = @intFromEnum(limit) - remaining;
|
||||
const written = try writeSplatHeaderLimitFinish(w, &.{}, data[data.len - 1 ..][0..1], splat, remaining);
|
||||
return buffered_n + written;
|
||||
}
|
||||
|
||||
for (0..splat) |_| {
|
||||
@memcpy(w.buffer[w.end..][0..pattern.len], pattern);
|
||||
w.end += pattern.len;
|
||||
}
|
||||
|
||||
remaining -= splat_n;
|
||||
return @intFromEnum(limit) - remaining;
|
||||
}
|
||||
|
||||
fn writeSplatHeaderLimitFinish(
|
||||
w: *Writer,
|
||||
header: []const u8,
|
||||
data: []const []const u8,
|
||||
splat: usize,
|
||||
limit: usize,
|
||||
) Error!usize {
|
||||
var remaining = limit;
|
||||
var vecs: [8][]const u8 = undefined;
|
||||
var i: usize = 0;
|
||||
v: {
|
||||
if (header.len != 0) {
|
||||
const copy_len = @min(header.len, remaining);
|
||||
vecs[i] = header[0..copy_len];
|
||||
i += 1;
|
||||
remaining -= copy_len;
|
||||
if (remaining == 0) break :v;
|
||||
}
|
||||
for (data[0 .. data.len - 1]) |buf| if (buf.len != 0) {
|
||||
const copy_len = @min(header.len, remaining);
|
||||
vecs[i] = buf;
|
||||
i += 1;
|
||||
remaining -= copy_len;
|
||||
if (remaining == 0) break :v;
|
||||
if (vecs.len - i == 0) break :v;
|
||||
};
|
||||
const pattern = data[data.len - 1];
|
||||
if (splat == 1) {
|
||||
vecs[i] = pattern[0..@min(remaining, pattern.len)];
|
||||
i += 1;
|
||||
break :v;
|
||||
}
|
||||
vecs[i] = pattern;
|
||||
i += 1;
|
||||
break :s splat;
|
||||
};
|
||||
return w.vtable.drain(w, vecs[0..i], new_splat);
|
||||
return w.vtable.drain(w, (&vecs)[0..i], @min(remaining / pattern.len, splat));
|
||||
}
|
||||
return w.vtable.drain(w, (&vecs)[0..i], 1);
|
||||
}
|
||||
|
||||
test "writeSplatHeader splatting avoids buffer aliasing temptation" {
|
||||
|
|
|
|||
|
|
@ -1,201 +0,0 @@
|
|||
const std = @import("../std.zig");
|
||||
const io = std.io;
|
||||
const mem = std.mem;
|
||||
const assert = std.debug.assert;
|
||||
const testing = std.testing;
|
||||
|
||||
pub fn BufferedReader(comptime buffer_size: usize, comptime ReaderType: type) type {
|
||||
return struct {
|
||||
unbuffered_reader: ReaderType,
|
||||
buf: [buffer_size]u8 = undefined,
|
||||
start: usize = 0,
|
||||
end: usize = 0,
|
||||
|
||||
pub const Error = ReaderType.Error;
|
||||
pub const Reader = io.GenericReader(*Self, Error, read);
|
||||
|
||||
const Self = @This();
|
||||
|
||||
pub fn read(self: *Self, dest: []u8) Error!usize {
|
||||
// First try reading from the already buffered data onto the destination.
|
||||
const current = self.buf[self.start..self.end];
|
||||
if (current.len != 0) {
|
||||
const to_transfer = @min(current.len, dest.len);
|
||||
@memcpy(dest[0..to_transfer], current[0..to_transfer]);
|
||||
self.start += to_transfer;
|
||||
return to_transfer;
|
||||
}
|
||||
|
||||
// If dest is large, read from the unbuffered reader directly into the destination.
|
||||
if (dest.len >= buffer_size) {
|
||||
return self.unbuffered_reader.read(dest);
|
||||
}
|
||||
|
||||
// If dest is small, read from the unbuffered reader into our own internal buffer,
|
||||
// and then transfer to destination.
|
||||
self.end = try self.unbuffered_reader.read(&self.buf);
|
||||
const to_transfer = @min(self.end, dest.len);
|
||||
@memcpy(dest[0..to_transfer], self.buf[0..to_transfer]);
|
||||
self.start = to_transfer;
|
||||
return to_transfer;
|
||||
}
|
||||
|
||||
pub fn reader(self: *Self) Reader {
|
||||
return .{ .context = self };
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
pub fn bufferedReader(reader: anytype) BufferedReader(4096, @TypeOf(reader)) {
|
||||
return .{ .unbuffered_reader = reader };
|
||||
}
|
||||
|
||||
pub fn bufferedReaderSize(comptime size: usize, reader: anytype) BufferedReader(size, @TypeOf(reader)) {
|
||||
return .{ .unbuffered_reader = reader };
|
||||
}
|
||||
|
||||
test "OneByte" {
|
||||
const OneByteReadReader = struct {
|
||||
str: []const u8,
|
||||
curr: usize,
|
||||
|
||||
const Error = error{NoError};
|
||||
const Self = @This();
|
||||
const Reader = io.GenericReader(*Self, Error, read);
|
||||
|
||||
fn init(str: []const u8) Self {
|
||||
return Self{
|
||||
.str = str,
|
||||
.curr = 0,
|
||||
};
|
||||
}
|
||||
|
||||
fn read(self: *Self, dest: []u8) Error!usize {
|
||||
if (self.str.len <= self.curr or dest.len == 0)
|
||||
return 0;
|
||||
|
||||
dest[0] = self.str[self.curr];
|
||||
self.curr += 1;
|
||||
return 1;
|
||||
}
|
||||
|
||||
fn reader(self: *Self) Reader {
|
||||
return .{ .context = self };
|
||||
}
|
||||
};
|
||||
|
||||
const str = "This is a test";
|
||||
var one_byte_stream = OneByteReadReader.init(str);
|
||||
var buf_reader = bufferedReader(one_byte_stream.reader());
|
||||
const stream = buf_reader.reader();
|
||||
|
||||
const res = try stream.readAllAlloc(testing.allocator, str.len + 1);
|
||||
defer testing.allocator.free(res);
|
||||
try testing.expectEqualSlices(u8, str, res);
|
||||
}
|
||||
|
||||
fn smallBufferedReader(underlying_stream: anytype) BufferedReader(8, @TypeOf(underlying_stream)) {
|
||||
return .{ .unbuffered_reader = underlying_stream };
|
||||
}
|
||||
test "Block" {
|
||||
const BlockReader = struct {
|
||||
block: []const u8,
|
||||
reads_allowed: usize,
|
||||
curr_read: usize,
|
||||
|
||||
const Error = error{NoError};
|
||||
const Self = @This();
|
||||
const Reader = io.GenericReader(*Self, Error, read);
|
||||
|
||||
fn init(block: []const u8, reads_allowed: usize) Self {
|
||||
return Self{
|
||||
.block = block,
|
||||
.reads_allowed = reads_allowed,
|
||||
.curr_read = 0,
|
||||
};
|
||||
}
|
||||
|
||||
fn read(self: *Self, dest: []u8) Error!usize {
|
||||
if (self.curr_read >= self.reads_allowed) return 0;
|
||||
@memcpy(dest[0..self.block.len], self.block);
|
||||
|
||||
self.curr_read += 1;
|
||||
return self.block.len;
|
||||
}
|
||||
|
||||
fn reader(self: *Self) Reader {
|
||||
return .{ .context = self };
|
||||
}
|
||||
};
|
||||
|
||||
const block = "0123";
|
||||
|
||||
// len out == block
|
||||
{
|
||||
var test_buf_reader: BufferedReader(4, BlockReader) = .{
|
||||
.unbuffered_reader = BlockReader.init(block, 2),
|
||||
};
|
||||
const reader = test_buf_reader.reader();
|
||||
var out_buf: [4]u8 = undefined;
|
||||
_ = try reader.readAll(&out_buf);
|
||||
try testing.expectEqualSlices(u8, &out_buf, block);
|
||||
_ = try reader.readAll(&out_buf);
|
||||
try testing.expectEqualSlices(u8, &out_buf, block);
|
||||
try testing.expectEqual(try reader.readAll(&out_buf), 0);
|
||||
}
|
||||
|
||||
// len out < block
|
||||
{
|
||||
var test_buf_reader: BufferedReader(4, BlockReader) = .{
|
||||
.unbuffered_reader = BlockReader.init(block, 2),
|
||||
};
|
||||
const reader = test_buf_reader.reader();
|
||||
var out_buf: [3]u8 = undefined;
|
||||
_ = try reader.readAll(&out_buf);
|
||||
try testing.expectEqualSlices(u8, &out_buf, "012");
|
||||
_ = try reader.readAll(&out_buf);
|
||||
try testing.expectEqualSlices(u8, &out_buf, "301");
|
||||
const n = try reader.readAll(&out_buf);
|
||||
try testing.expectEqualSlices(u8, out_buf[0..n], "23");
|
||||
try testing.expectEqual(try reader.readAll(&out_buf), 0);
|
||||
}
|
||||
|
||||
// len out > block
|
||||
{
|
||||
var test_buf_reader: BufferedReader(4, BlockReader) = .{
|
||||
.unbuffered_reader = BlockReader.init(block, 2),
|
||||
};
|
||||
const reader = test_buf_reader.reader();
|
||||
var out_buf: [5]u8 = undefined;
|
||||
_ = try reader.readAll(&out_buf);
|
||||
try testing.expectEqualSlices(u8, &out_buf, "01230");
|
||||
const n = try reader.readAll(&out_buf);
|
||||
try testing.expectEqualSlices(u8, out_buf[0..n], "123");
|
||||
try testing.expectEqual(try reader.readAll(&out_buf), 0);
|
||||
}
|
||||
|
||||
// len out == 0
|
||||
{
|
||||
var test_buf_reader: BufferedReader(4, BlockReader) = .{
|
||||
.unbuffered_reader = BlockReader.init(block, 2),
|
||||
};
|
||||
const reader = test_buf_reader.reader();
|
||||
var out_buf: [0]u8 = undefined;
|
||||
_ = try reader.readAll(&out_buf);
|
||||
try testing.expectEqualSlices(u8, &out_buf, "");
|
||||
}
|
||||
|
||||
// len bufreader buf > block
|
||||
{
|
||||
var test_buf_reader: BufferedReader(5, BlockReader) = .{
|
||||
.unbuffered_reader = BlockReader.init(block, 2),
|
||||
};
|
||||
const reader = test_buf_reader.reader();
|
||||
var out_buf: [4]u8 = undefined;
|
||||
_ = try reader.readAll(&out_buf);
|
||||
try testing.expectEqualSlices(u8, &out_buf, block);
|
||||
_ = try reader.readAll(&out_buf);
|
||||
try testing.expectEqualSlices(u8, &out_buf, block);
|
||||
try testing.expectEqual(try reader.readAll(&out_buf), 0);
|
||||
}
|
||||
}
|
||||
|
|
@ -1,45 +0,0 @@
|
|||
const std = @import("../std.zig");
|
||||
const io = std.io;
|
||||
const assert = std.debug.assert;
|
||||
const testing = std.testing;
|
||||
|
||||
pub fn LimitedReader(comptime ReaderType: type) type {
|
||||
return struct {
|
||||
inner_reader: ReaderType,
|
||||
bytes_left: u64,
|
||||
|
||||
pub const Error = ReaderType.Error;
|
||||
pub const Reader = io.GenericReader(*Self, Error, read);
|
||||
|
||||
const Self = @This();
|
||||
|
||||
pub fn read(self: *Self, dest: []u8) Error!usize {
|
||||
const max_read = @min(self.bytes_left, dest.len);
|
||||
const n = try self.inner_reader.read(dest[0..max_read]);
|
||||
self.bytes_left -= n;
|
||||
return n;
|
||||
}
|
||||
|
||||
pub fn reader(self: *Self) Reader {
|
||||
return .{ .context = self };
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/// Returns an initialised `LimitedReader`.
|
||||
/// `bytes_left` is a `u64` to be able to take 64 bit file offsets
|
||||
pub fn limitedReader(inner_reader: anytype, bytes_left: u64) LimitedReader(@TypeOf(inner_reader)) {
|
||||
return .{ .inner_reader = inner_reader, .bytes_left = bytes_left };
|
||||
}
|
||||
|
||||
test "basic usage" {
|
||||
const data = "hello world";
|
||||
var fbs = std.io.fixedBufferStream(data);
|
||||
var early_stream = limitedReader(fbs.reader(), 3);
|
||||
|
||||
var buf: [5]u8 = undefined;
|
||||
try testing.expectEqual(@as(usize, 3), try early_stream.reader().read(&buf));
|
||||
try testing.expectEqualSlices(u8, data[0..3], buf[0..3]);
|
||||
try testing.expectEqual(@as(usize, 0), try early_stream.reader().read(&buf));
|
||||
try testing.expectError(error.EndOfStream, early_stream.reader().skipBytes(10, .{}));
|
||||
}
|
||||
|
|
@ -45,9 +45,9 @@ test "write a file, read it, then delete it" {
|
|||
const expected_file_size: u64 = "begin".len + data.len + "end".len;
|
||||
try expectEqual(expected_file_size, file_size);
|
||||
|
||||
var buf_stream = io.bufferedReader(file.deprecatedReader());
|
||||
const st = buf_stream.reader();
|
||||
const contents = try st.readAllAlloc(std.testing.allocator, 2 * 1024);
|
||||
var file_buffer: [1024]u8 = undefined;
|
||||
var file_reader = file.reader(&file_buffer);
|
||||
const contents = try file_reader.interface.allocRemaining(std.testing.allocator, .limited(2 * 1024));
|
||||
defer std.testing.allocator.free(contents);
|
||||
|
||||
try expect(mem.eql(u8, contents[0.."begin".len], "begin"));
|
||||
|
|
|
|||
239
lib/std/Uri.zig
239
lib/std/Uri.zig
|
|
@ -4,6 +4,8 @@
|
|||
const std = @import("std.zig");
|
||||
const testing = std.testing;
|
||||
const Uri = @This();
|
||||
const Allocator = std.mem.Allocator;
|
||||
const Writer = std.Io.Writer;
|
||||
|
||||
scheme: []const u8,
|
||||
user: ?Component = null,
|
||||
|
|
@ -14,6 +16,32 @@ path: Component = Component.empty,
|
|||
query: ?Component = null,
|
||||
fragment: ?Component = null,
|
||||
|
||||
pub const host_name_max = 255;
|
||||
|
||||
/// Returned value may point into `buffer` or be the original string.
|
||||
///
|
||||
/// Suggested buffer length: `host_name_max`.
|
||||
///
|
||||
/// See also:
|
||||
/// * `getHostAlloc`
|
||||
pub fn getHost(uri: Uri, buffer: []u8) error{ UriMissingHost, UriHostTooLong }![]const u8 {
|
||||
const component = uri.host orelse return error.UriMissingHost;
|
||||
return component.toRaw(buffer) catch |err| switch (err) {
|
||||
error.NoSpaceLeft => return error.UriHostTooLong,
|
||||
};
|
||||
}
|
||||
|
||||
/// Returned value may point into `buffer` or be the original string.
|
||||
///
|
||||
/// See also:
|
||||
/// * `getHost`
|
||||
pub fn getHostAlloc(uri: Uri, arena: Allocator) error{ UriMissingHost, UriHostTooLong, OutOfMemory }![]const u8 {
|
||||
const component = uri.host orelse return error.UriMissingHost;
|
||||
const result = try component.toRawMaybeAlloc(arena);
|
||||
if (result.len > host_name_max) return error.UriHostTooLong;
|
||||
return result;
|
||||
}
|
||||
|
||||
pub const Component = union(enum) {
|
||||
/// Invalid characters in this component must be percent encoded
|
||||
/// before being printed as part of a URI.
|
||||
|
|
@ -30,11 +58,19 @@ pub const Component = union(enum) {
|
|||
};
|
||||
}
|
||||
|
||||
/// Returned value may point into `buffer` or be the original string.
|
||||
pub fn toRaw(component: Component, buffer: []u8) error{NoSpaceLeft}![]const u8 {
|
||||
return switch (component) {
|
||||
.raw => |raw| raw,
|
||||
.percent_encoded => |percent_encoded| if (std.mem.indexOfScalar(u8, percent_encoded, '%')) |_|
|
||||
try std.fmt.bufPrint(buffer, "{f}", .{std.fmt.alt(component, .formatRaw)})
|
||||
else
|
||||
percent_encoded,
|
||||
};
|
||||
}
|
||||
|
||||
/// Allocates the result with `arena` only if needed, so the result should not be freed.
|
||||
pub fn toRawMaybeAlloc(
|
||||
component: Component,
|
||||
arena: std.mem.Allocator,
|
||||
) std.mem.Allocator.Error![]const u8 {
|
||||
pub fn toRawMaybeAlloc(component: Component, arena: Allocator) Allocator.Error![]const u8 {
|
||||
return switch (component) {
|
||||
.raw => |raw| raw,
|
||||
.percent_encoded => |percent_encoded| if (std.mem.indexOfScalar(u8, percent_encoded, '%')) |_|
|
||||
|
|
@ -44,7 +80,7 @@ pub const Component = union(enum) {
|
|||
};
|
||||
}
|
||||
|
||||
pub fn formatRaw(component: Component, w: *std.io.Writer) std.io.Writer.Error!void {
|
||||
pub fn formatRaw(component: Component, w: *Writer) Writer.Error!void {
|
||||
switch (component) {
|
||||
.raw => |raw| try w.writeAll(raw),
|
||||
.percent_encoded => |percent_encoded| {
|
||||
|
|
@ -67,56 +103,56 @@ pub const Component = union(enum) {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn formatEscaped(component: Component, w: *std.io.Writer) std.io.Writer.Error!void {
|
||||
pub fn formatEscaped(component: Component, w: *Writer) Writer.Error!void {
|
||||
switch (component) {
|
||||
.raw => |raw| try percentEncode(w, raw, isUnreserved),
|
||||
.percent_encoded => |percent_encoded| try w.writeAll(percent_encoded),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn formatUser(component: Component, w: *std.io.Writer) std.io.Writer.Error!void {
|
||||
pub fn formatUser(component: Component, w: *Writer) Writer.Error!void {
|
||||
switch (component) {
|
||||
.raw => |raw| try percentEncode(w, raw, isUserChar),
|
||||
.percent_encoded => |percent_encoded| try w.writeAll(percent_encoded),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn formatPassword(component: Component, w: *std.io.Writer) std.io.Writer.Error!void {
|
||||
pub fn formatPassword(component: Component, w: *Writer) Writer.Error!void {
|
||||
switch (component) {
|
||||
.raw => |raw| try percentEncode(w, raw, isPasswordChar),
|
||||
.percent_encoded => |percent_encoded| try w.writeAll(percent_encoded),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn formatHost(component: Component, w: *std.io.Writer) std.io.Writer.Error!void {
|
||||
pub fn formatHost(component: Component, w: *Writer) Writer.Error!void {
|
||||
switch (component) {
|
||||
.raw => |raw| try percentEncode(w, raw, isHostChar),
|
||||
.percent_encoded => |percent_encoded| try w.writeAll(percent_encoded),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn formatPath(component: Component, w: *std.io.Writer) std.io.Writer.Error!void {
|
||||
pub fn formatPath(component: Component, w: *Writer) Writer.Error!void {
|
||||
switch (component) {
|
||||
.raw => |raw| try percentEncode(w, raw, isPathChar),
|
||||
.percent_encoded => |percent_encoded| try w.writeAll(percent_encoded),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn formatQuery(component: Component, w: *std.io.Writer) std.io.Writer.Error!void {
|
||||
pub fn formatQuery(component: Component, w: *Writer) Writer.Error!void {
|
||||
switch (component) {
|
||||
.raw => |raw| try percentEncode(w, raw, isQueryChar),
|
||||
.percent_encoded => |percent_encoded| try w.writeAll(percent_encoded),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn formatFragment(component: Component, w: *std.io.Writer) std.io.Writer.Error!void {
|
||||
pub fn formatFragment(component: Component, w: *Writer) Writer.Error!void {
|
||||
switch (component) {
|
||||
.raw => |raw| try percentEncode(w, raw, isFragmentChar),
|
||||
.percent_encoded => |percent_encoded| try w.writeAll(percent_encoded),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn percentEncode(w: *std.io.Writer, raw: []const u8, comptime isValidChar: fn (u8) bool) std.io.Writer.Error!void {
|
||||
pub fn percentEncode(w: *Writer, raw: []const u8, comptime isValidChar: fn (u8) bool) Writer.Error!void {
|
||||
var start: usize = 0;
|
||||
for (raw, 0..) |char, index| {
|
||||
if (isValidChar(char)) continue;
|
||||
|
|
@ -165,17 +201,15 @@ pub const ParseError = error{ UnexpectedCharacter, InvalidFormat, InvalidPort };
|
|||
/// The return value will contain strings pointing into the original `text`.
|
||||
/// Each component that is provided, will be non-`null`.
|
||||
pub fn parseAfterScheme(scheme: []const u8, text: []const u8) ParseError!Uri {
|
||||
var reader = SliceReader{ .slice = text };
|
||||
|
||||
var uri: Uri = .{ .scheme = scheme, .path = undefined };
|
||||
var i: usize = 0;
|
||||
|
||||
if (reader.peekPrefix("//")) a: { // authority part
|
||||
std.debug.assert(reader.get().? == '/');
|
||||
std.debug.assert(reader.get().? == '/');
|
||||
|
||||
const authority = reader.readUntil(isAuthoritySeparator);
|
||||
if (std.mem.startsWith(u8, text, "//")) a: {
|
||||
i = std.mem.indexOfAnyPos(u8, text, 2, &authority_sep) orelse text.len;
|
||||
const authority = text[2..i];
|
||||
if (authority.len == 0) {
|
||||
if (reader.peekPrefix("/")) break :a else return error.InvalidFormat;
|
||||
if (!std.mem.startsWith(u8, text[2..], "/")) return error.InvalidFormat;
|
||||
break :a;
|
||||
}
|
||||
|
||||
var start_of_host: usize = 0;
|
||||
|
|
@ -225,26 +259,28 @@ pub fn parseAfterScheme(scheme: []const u8, text: []const u8) ParseError!Uri {
|
|||
uri.host = .{ .percent_encoded = authority[start_of_host..end_of_host] };
|
||||
}
|
||||
|
||||
uri.path = .{ .percent_encoded = reader.readUntil(isPathSeparator) };
|
||||
const path_start = i;
|
||||
i = std.mem.indexOfAnyPos(u8, text, path_start, &path_sep) orelse text.len;
|
||||
uri.path = .{ .percent_encoded = text[path_start..i] };
|
||||
|
||||
if ((reader.peek() orelse 0) == '?') { // query part
|
||||
std.debug.assert(reader.get().? == '?');
|
||||
uri.query = .{ .percent_encoded = reader.readUntil(isQuerySeparator) };
|
||||
if (std.mem.startsWith(u8, text[i..], "?")) {
|
||||
const query_start = i + 1;
|
||||
i = std.mem.indexOfScalarPos(u8, text, query_start, '#') orelse text.len;
|
||||
uri.query = .{ .percent_encoded = text[query_start..i] };
|
||||
}
|
||||
|
||||
if ((reader.peek() orelse 0) == '#') { // fragment part
|
||||
std.debug.assert(reader.get().? == '#');
|
||||
uri.fragment = .{ .percent_encoded = reader.readUntilEof() };
|
||||
if (std.mem.startsWith(u8, text[i..], "#")) {
|
||||
uri.fragment = .{ .percent_encoded = text[i + 1 ..] };
|
||||
}
|
||||
|
||||
return uri;
|
||||
}
|
||||
|
||||
pub fn format(uri: *const Uri, writer: *std.io.Writer) std.io.Writer.Error!void {
|
||||
pub fn format(uri: *const Uri, writer: *Writer) Writer.Error!void {
|
||||
return writeToStream(uri, writer, .all);
|
||||
}
|
||||
|
||||
pub fn writeToStream(uri: *const Uri, writer: *std.io.Writer, flags: Format.Flags) std.io.Writer.Error!void {
|
||||
pub fn writeToStream(uri: *const Uri, writer: *Writer, flags: Format.Flags) Writer.Error!void {
|
||||
if (flags.scheme) {
|
||||
try writer.print("{s}:", .{uri.scheme});
|
||||
if (flags.authority and uri.host != null) {
|
||||
|
|
@ -318,7 +354,7 @@ pub const Format = struct {
|
|||
};
|
||||
};
|
||||
|
||||
pub fn default(f: Format, writer: *std.io.Writer) std.io.Writer.Error!void {
|
||||
pub fn default(f: Format, writer: *Writer) Writer.Error!void {
|
||||
return writeToStream(f.uri, writer, f.flags);
|
||||
}
|
||||
};
|
||||
|
|
@ -327,41 +363,34 @@ pub fn fmt(uri: *const Uri, flags: Format.Flags) std.fmt.Formatter(Format, Forma
|
|||
return .{ .data = .{ .uri = uri, .flags = flags } };
|
||||
}
|
||||
|
||||
/// Parses the URI or returns an error.
|
||||
/// The return value will contain strings pointing into the
|
||||
/// original `text`. Each component that is provided, will be non-`null`.
|
||||
/// The return value will contain strings pointing into the original `text`.
|
||||
/// Each component that is provided will be non-`null`.
|
||||
pub fn parse(text: []const u8) ParseError!Uri {
|
||||
var reader: SliceReader = .{ .slice = text };
|
||||
const scheme = reader.readWhile(isSchemeChar);
|
||||
|
||||
// after the scheme, a ':' must appear
|
||||
if (reader.get()) |c| {
|
||||
if (c != ':')
|
||||
return error.UnexpectedCharacter;
|
||||
} else {
|
||||
return error.InvalidFormat;
|
||||
}
|
||||
|
||||
return parseAfterScheme(scheme, reader.readUntilEof());
|
||||
const end = for (text, 0..) |byte, i| {
|
||||
if (!isSchemeChar(byte)) break i;
|
||||
} else text.len;
|
||||
// After the scheme, a ':' must appear.
|
||||
if (end >= text.len) return error.InvalidFormat;
|
||||
if (text[end] != ':') return error.UnexpectedCharacter;
|
||||
return parseAfterScheme(text[0..end], text[end + 1 ..]);
|
||||
}
|
||||
|
||||
pub const ResolveInPlaceError = ParseError || error{NoSpaceLeft};
|
||||
|
||||
/// Resolves a URI against a base URI, conforming to RFC 3986, Section 5.
|
||||
/// Copies `new` to the beginning of `aux_buf.*`, allowing the slices to overlap,
|
||||
/// then parses `new` as a URI, and then resolves the path in place.
|
||||
/// Resolves a URI against a base URI, conforming to
|
||||
/// [RFC 3986, Section 5](https://www.rfc-editor.org/rfc/rfc3986#section-5)
|
||||
///
|
||||
/// Assumes new location is already copied to the beginning of `aux_buf.*`.
|
||||
/// Parses that new location as a URI, and then resolves the path in place.
|
||||
///
|
||||
/// If a merge needs to take place, the newly constructed path will be stored
|
||||
/// in `aux_buf.*` just after the copied `new`, and `aux_buf.*` will be modified
|
||||
/// to only contain the remaining unused space.
|
||||
pub fn resolve_inplace(base: Uri, new: []const u8, aux_buf: *[]u8) ResolveInPlaceError!Uri {
|
||||
std.mem.copyForwards(u8, aux_buf.*, new);
|
||||
// At this point, new is an invalid pointer.
|
||||
const new_mut = aux_buf.*[0..new.len];
|
||||
aux_buf.* = aux_buf.*[new.len..];
|
||||
|
||||
const new_parsed = parse(new_mut) catch |err|
|
||||
(parseAfterScheme("", new_mut) catch return err);
|
||||
// As you can see above, `new_mut` is not a const pointer.
|
||||
/// in `aux_buf.*` just after the copied location, and `aux_buf.*` will be
|
||||
/// modified to only contain the remaining unused space.
|
||||
pub fn resolveInPlace(base: Uri, new_len: usize, aux_buf: *[]u8) ResolveInPlaceError!Uri {
|
||||
const new = aux_buf.*[0..new_len];
|
||||
const new_parsed = parse(new) catch |err| (parseAfterScheme("", new) catch return err);
|
||||
aux_buf.* = aux_buf.*[new_len..];
|
||||
// As you can see above, `new` is not a const pointer.
|
||||
const new_path: []u8 = @constCast(new_parsed.path.percent_encoded);
|
||||
|
||||
if (new_parsed.scheme.len > 0) return .{
|
||||
|
|
@ -461,7 +490,7 @@ test remove_dot_segments {
|
|||
|
||||
/// 5.2.3. Merge Paths
|
||||
fn merge_paths(base: Component, new: []u8, aux_buf: *[]u8) error{NoSpaceLeft}!Component {
|
||||
var aux: std.io.Writer = .fixed(aux_buf.*);
|
||||
var aux: Writer = .fixed(aux_buf.*);
|
||||
if (!base.isEmpty()) {
|
||||
base.formatPath(&aux) catch return error.NoSpaceLeft;
|
||||
aux.end = std.mem.lastIndexOfScalar(u8, aux.buffered(), '/') orelse return remove_dot_segments(new);
|
||||
|
|
@ -472,59 +501,6 @@ fn merge_paths(base: Component, new: []u8, aux_buf: *[]u8) error{NoSpaceLeft}!Co
|
|||
return merged_path;
|
||||
}
|
||||
|
||||
const SliceReader = struct {
|
||||
const Self = @This();
|
||||
|
||||
slice: []const u8,
|
||||
offset: usize = 0,
|
||||
|
||||
fn get(self: *Self) ?u8 {
|
||||
if (self.offset >= self.slice.len)
|
||||
return null;
|
||||
const c = self.slice[self.offset];
|
||||
self.offset += 1;
|
||||
return c;
|
||||
}
|
||||
|
||||
fn peek(self: Self) ?u8 {
|
||||
if (self.offset >= self.slice.len)
|
||||
return null;
|
||||
return self.slice[self.offset];
|
||||
}
|
||||
|
||||
fn readWhile(self: *Self, comptime predicate: fn (u8) bool) []const u8 {
|
||||
const start = self.offset;
|
||||
var end = start;
|
||||
while (end < self.slice.len and predicate(self.slice[end])) {
|
||||
end += 1;
|
||||
}
|
||||
self.offset = end;
|
||||
return self.slice[start..end];
|
||||
}
|
||||
|
||||
fn readUntil(self: *Self, comptime predicate: fn (u8) bool) []const u8 {
|
||||
const start = self.offset;
|
||||
var end = start;
|
||||
while (end < self.slice.len and !predicate(self.slice[end])) {
|
||||
end += 1;
|
||||
}
|
||||
self.offset = end;
|
||||
return self.slice[start..end];
|
||||
}
|
||||
|
||||
fn readUntilEof(self: *Self) []const u8 {
|
||||
const start = self.offset;
|
||||
self.offset = self.slice.len;
|
||||
return self.slice[start..];
|
||||
}
|
||||
|
||||
fn peekPrefix(self: Self, prefix: []const u8) bool {
|
||||
if (self.offset + prefix.len > self.slice.len)
|
||||
return false;
|
||||
return std.mem.eql(u8, self.slice[self.offset..][0..prefix.len], prefix);
|
||||
}
|
||||
};
|
||||
|
||||
/// scheme = ALPHA *( ALPHA / DIGIT / "+" / "-" / "." )
|
||||
fn isSchemeChar(c: u8) bool {
|
||||
return switch (c) {
|
||||
|
|
@ -533,19 +509,6 @@ fn isSchemeChar(c: u8) bool {
|
|||
};
|
||||
}
|
||||
|
||||
/// reserved = gen-delims / sub-delims
|
||||
fn isReserved(c: u8) bool {
|
||||
return isGenLimit(c) or isSubLimit(c);
|
||||
}
|
||||
|
||||
/// gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@"
|
||||
fn isGenLimit(c: u8) bool {
|
||||
return switch (c) {
|
||||
':', ',', '?', '#', '[', ']', '@' => true,
|
||||
else => false,
|
||||
};
|
||||
}
|
||||
|
||||
/// sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
|
||||
/// / "*" / "+" / "," / ";" / "="
|
||||
fn isSubLimit(c: u8) bool {
|
||||
|
|
@ -585,26 +548,8 @@ fn isQueryChar(c: u8) bool {
|
|||
|
||||
const isFragmentChar = isQueryChar;
|
||||
|
||||
fn isAuthoritySeparator(c: u8) bool {
|
||||
return switch (c) {
|
||||
'/', '?', '#' => true,
|
||||
else => false,
|
||||
};
|
||||
}
|
||||
|
||||
fn isPathSeparator(c: u8) bool {
|
||||
return switch (c) {
|
||||
'?', '#' => true,
|
||||
else => false,
|
||||
};
|
||||
}
|
||||
|
||||
fn isQuerySeparator(c: u8) bool {
|
||||
return switch (c) {
|
||||
'#' => true,
|
||||
else => false,
|
||||
};
|
||||
}
|
||||
const authority_sep: [3]u8 = .{ '/', '?', '#' };
|
||||
const path_sep: [2]u8 = .{ '?', '#' };
|
||||
|
||||
test "basic" {
|
||||
const parsed = try parse("https://ziglang.org/download");
|
||||
|
|
|
|||
|
|
@ -49,8 +49,8 @@ pub const hello_retry_request_sequence = [32]u8{
|
|||
};
|
||||
|
||||
pub const close_notify_alert = [_]u8{
|
||||
@intFromEnum(AlertLevel.warning),
|
||||
@intFromEnum(AlertDescription.close_notify),
|
||||
@intFromEnum(Alert.Level.warning),
|
||||
@intFromEnum(Alert.Description.close_notify),
|
||||
};
|
||||
|
||||
pub const ProtocolVersion = enum(u16) {
|
||||
|
|
@ -138,103 +138,108 @@ pub const ExtensionType = enum(u16) {
|
|||
_,
|
||||
};
|
||||
|
||||
pub const AlertLevel = enum(u8) {
|
||||
warning = 1,
|
||||
fatal = 2,
|
||||
_,
|
||||
};
|
||||
pub const Alert = struct {
|
||||
level: Level,
|
||||
description: Description,
|
||||
|
||||
pub const AlertDescription = enum(u8) {
|
||||
pub const Error = error{
|
||||
TlsAlertUnexpectedMessage,
|
||||
TlsAlertBadRecordMac,
|
||||
TlsAlertRecordOverflow,
|
||||
TlsAlertHandshakeFailure,
|
||||
TlsAlertBadCertificate,
|
||||
TlsAlertUnsupportedCertificate,
|
||||
TlsAlertCertificateRevoked,
|
||||
TlsAlertCertificateExpired,
|
||||
TlsAlertCertificateUnknown,
|
||||
TlsAlertIllegalParameter,
|
||||
TlsAlertUnknownCa,
|
||||
TlsAlertAccessDenied,
|
||||
TlsAlertDecodeError,
|
||||
TlsAlertDecryptError,
|
||||
TlsAlertProtocolVersion,
|
||||
TlsAlertInsufficientSecurity,
|
||||
TlsAlertInternalError,
|
||||
TlsAlertInappropriateFallback,
|
||||
TlsAlertMissingExtension,
|
||||
TlsAlertUnsupportedExtension,
|
||||
TlsAlertUnrecognizedName,
|
||||
TlsAlertBadCertificateStatusResponse,
|
||||
TlsAlertUnknownPskIdentity,
|
||||
TlsAlertCertificateRequired,
|
||||
TlsAlertNoApplicationProtocol,
|
||||
TlsAlertUnknown,
|
||||
pub const Level = enum(u8) {
|
||||
warning = 1,
|
||||
fatal = 2,
|
||||
_,
|
||||
};
|
||||
|
||||
close_notify = 0,
|
||||
unexpected_message = 10,
|
||||
bad_record_mac = 20,
|
||||
record_overflow = 22,
|
||||
handshake_failure = 40,
|
||||
bad_certificate = 42,
|
||||
unsupported_certificate = 43,
|
||||
certificate_revoked = 44,
|
||||
certificate_expired = 45,
|
||||
certificate_unknown = 46,
|
||||
illegal_parameter = 47,
|
||||
unknown_ca = 48,
|
||||
access_denied = 49,
|
||||
decode_error = 50,
|
||||
decrypt_error = 51,
|
||||
protocol_version = 70,
|
||||
insufficient_security = 71,
|
||||
internal_error = 80,
|
||||
inappropriate_fallback = 86,
|
||||
user_canceled = 90,
|
||||
missing_extension = 109,
|
||||
unsupported_extension = 110,
|
||||
unrecognized_name = 112,
|
||||
bad_certificate_status_response = 113,
|
||||
unknown_psk_identity = 115,
|
||||
certificate_required = 116,
|
||||
no_application_protocol = 120,
|
||||
_,
|
||||
pub const Description = enum(u8) {
|
||||
pub const Error = error{
|
||||
TlsAlertUnexpectedMessage,
|
||||
TlsAlertBadRecordMac,
|
||||
TlsAlertRecordOverflow,
|
||||
TlsAlertHandshakeFailure,
|
||||
TlsAlertBadCertificate,
|
||||
TlsAlertUnsupportedCertificate,
|
||||
TlsAlertCertificateRevoked,
|
||||
TlsAlertCertificateExpired,
|
||||
TlsAlertCertificateUnknown,
|
||||
TlsAlertIllegalParameter,
|
||||
TlsAlertUnknownCa,
|
||||
TlsAlertAccessDenied,
|
||||
TlsAlertDecodeError,
|
||||
TlsAlertDecryptError,
|
||||
TlsAlertProtocolVersion,
|
||||
TlsAlertInsufficientSecurity,
|
||||
TlsAlertInternalError,
|
||||
TlsAlertInappropriateFallback,
|
||||
TlsAlertMissingExtension,
|
||||
TlsAlertUnsupportedExtension,
|
||||
TlsAlertUnrecognizedName,
|
||||
TlsAlertBadCertificateStatusResponse,
|
||||
TlsAlertUnknownPskIdentity,
|
||||
TlsAlertCertificateRequired,
|
||||
TlsAlertNoApplicationProtocol,
|
||||
TlsAlertUnknown,
|
||||
};
|
||||
|
||||
pub fn toError(alert: AlertDescription) Error!void {
|
||||
switch (alert) {
|
||||
.close_notify => {}, // not an error
|
||||
.unexpected_message => return error.TlsAlertUnexpectedMessage,
|
||||
.bad_record_mac => return error.TlsAlertBadRecordMac,
|
||||
.record_overflow => return error.TlsAlertRecordOverflow,
|
||||
.handshake_failure => return error.TlsAlertHandshakeFailure,
|
||||
.bad_certificate => return error.TlsAlertBadCertificate,
|
||||
.unsupported_certificate => return error.TlsAlertUnsupportedCertificate,
|
||||
.certificate_revoked => return error.TlsAlertCertificateRevoked,
|
||||
.certificate_expired => return error.TlsAlertCertificateExpired,
|
||||
.certificate_unknown => return error.TlsAlertCertificateUnknown,
|
||||
.illegal_parameter => return error.TlsAlertIllegalParameter,
|
||||
.unknown_ca => return error.TlsAlertUnknownCa,
|
||||
.access_denied => return error.TlsAlertAccessDenied,
|
||||
.decode_error => return error.TlsAlertDecodeError,
|
||||
.decrypt_error => return error.TlsAlertDecryptError,
|
||||
.protocol_version => return error.TlsAlertProtocolVersion,
|
||||
.insufficient_security => return error.TlsAlertInsufficientSecurity,
|
||||
.internal_error => return error.TlsAlertInternalError,
|
||||
.inappropriate_fallback => return error.TlsAlertInappropriateFallback,
|
||||
.user_canceled => {}, // not an error
|
||||
.missing_extension => return error.TlsAlertMissingExtension,
|
||||
.unsupported_extension => return error.TlsAlertUnsupportedExtension,
|
||||
.unrecognized_name => return error.TlsAlertUnrecognizedName,
|
||||
.bad_certificate_status_response => return error.TlsAlertBadCertificateStatusResponse,
|
||||
.unknown_psk_identity => return error.TlsAlertUnknownPskIdentity,
|
||||
.certificate_required => return error.TlsAlertCertificateRequired,
|
||||
.no_application_protocol => return error.TlsAlertNoApplicationProtocol,
|
||||
_ => return error.TlsAlertUnknown,
|
||||
close_notify = 0,
|
||||
unexpected_message = 10,
|
||||
bad_record_mac = 20,
|
||||
record_overflow = 22,
|
||||
handshake_failure = 40,
|
||||
bad_certificate = 42,
|
||||
unsupported_certificate = 43,
|
||||
certificate_revoked = 44,
|
||||
certificate_expired = 45,
|
||||
certificate_unknown = 46,
|
||||
illegal_parameter = 47,
|
||||
unknown_ca = 48,
|
||||
access_denied = 49,
|
||||
decode_error = 50,
|
||||
decrypt_error = 51,
|
||||
protocol_version = 70,
|
||||
insufficient_security = 71,
|
||||
internal_error = 80,
|
||||
inappropriate_fallback = 86,
|
||||
user_canceled = 90,
|
||||
missing_extension = 109,
|
||||
unsupported_extension = 110,
|
||||
unrecognized_name = 112,
|
||||
bad_certificate_status_response = 113,
|
||||
unknown_psk_identity = 115,
|
||||
certificate_required = 116,
|
||||
no_application_protocol = 120,
|
||||
_,
|
||||
|
||||
pub fn toError(description: Description) Error!void {
|
||||
switch (description) {
|
||||
.close_notify => {}, // not an error
|
||||
.unexpected_message => return error.TlsAlertUnexpectedMessage,
|
||||
.bad_record_mac => return error.TlsAlertBadRecordMac,
|
||||
.record_overflow => return error.TlsAlertRecordOverflow,
|
||||
.handshake_failure => return error.TlsAlertHandshakeFailure,
|
||||
.bad_certificate => return error.TlsAlertBadCertificate,
|
||||
.unsupported_certificate => return error.TlsAlertUnsupportedCertificate,
|
||||
.certificate_revoked => return error.TlsAlertCertificateRevoked,
|
||||
.certificate_expired => return error.TlsAlertCertificateExpired,
|
||||
.certificate_unknown => return error.TlsAlertCertificateUnknown,
|
||||
.illegal_parameter => return error.TlsAlertIllegalParameter,
|
||||
.unknown_ca => return error.TlsAlertUnknownCa,
|
||||
.access_denied => return error.TlsAlertAccessDenied,
|
||||
.decode_error => return error.TlsAlertDecodeError,
|
||||
.decrypt_error => return error.TlsAlertDecryptError,
|
||||
.protocol_version => return error.TlsAlertProtocolVersion,
|
||||
.insufficient_security => return error.TlsAlertInsufficientSecurity,
|
||||
.internal_error => return error.TlsAlertInternalError,
|
||||
.inappropriate_fallback => return error.TlsAlertInappropriateFallback,
|
||||
.user_canceled => {}, // not an error
|
||||
.missing_extension => return error.TlsAlertMissingExtension,
|
||||
.unsupported_extension => return error.TlsAlertUnsupportedExtension,
|
||||
.unrecognized_name => return error.TlsAlertUnrecognizedName,
|
||||
.bad_certificate_status_response => return error.TlsAlertBadCertificateStatusResponse,
|
||||
.unknown_psk_identity => return error.TlsAlertUnknownPskIdentity,
|
||||
.certificate_required => return error.TlsAlertCertificateRequired,
|
||||
.no_application_protocol => return error.TlsAlertNoApplicationProtocol,
|
||||
_ => return error.TlsAlertUnknown,
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
pub const SignatureScheme = enum(u16) {
|
||||
|
|
@ -650,7 +655,7 @@ pub const Decoder = struct {
|
|||
}
|
||||
|
||||
/// Use this function to increase `their_end`.
|
||||
pub fn readAtLeast(d: *Decoder, stream: anytype, their_amt: usize) !void {
|
||||
pub fn readAtLeast(d: *Decoder, stream: *std.io.Reader, their_amt: usize) !void {
|
||||
assert(!d.disable_reads);
|
||||
const existing_amt = d.cap - d.idx;
|
||||
d.their_end = d.idx + their_amt;
|
||||
|
|
@ -658,14 +663,16 @@ pub const Decoder = struct {
|
|||
const request_amt = their_amt - existing_amt;
|
||||
const dest = d.buf[d.cap..];
|
||||
if (request_amt > dest.len) return error.TlsRecordOverflow;
|
||||
const actual_amt = try stream.readAtLeast(dest, request_amt);
|
||||
if (actual_amt < request_amt) return error.TlsConnectionTruncated;
|
||||
d.cap += actual_amt;
|
||||
stream.readSlice(dest[0..request_amt]) catch |err| switch (err) {
|
||||
error.EndOfStream => return error.TlsConnectionTruncated,
|
||||
error.ReadFailed => return error.ReadFailed,
|
||||
};
|
||||
d.cap += request_amt;
|
||||
}
|
||||
|
||||
/// Same as `readAtLeast` but also increases `our_end` by exactly `our_amt`.
|
||||
/// Use when `our_amt` is calculated by us, not by them.
|
||||
pub fn readAtLeastOurAmt(d: *Decoder, stream: anytype, our_amt: usize) !void {
|
||||
pub fn readAtLeastOurAmt(d: *Decoder, stream: *std.io.Reader, our_amt: usize) !void {
|
||||
assert(!d.disable_reads);
|
||||
try readAtLeast(d, stream, our_amt);
|
||||
d.our_end = d.idx + our_amt;
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
548
lib/std/fifo.zig
548
lib/std/fifo.zig
|
|
@ -1,548 +0,0 @@
|
|||
// FIFO of fixed size items
|
||||
// Usually used for e.g. byte buffers
|
||||
|
||||
const std = @import("std");
|
||||
const math = std.math;
|
||||
const mem = std.mem;
|
||||
const Allocator = mem.Allocator;
|
||||
const assert = std.debug.assert;
|
||||
const testing = std.testing;
|
||||
|
||||
pub const LinearFifoBufferType = union(enum) {
|
||||
/// The buffer is internal to the fifo; it is of the specified size.
|
||||
Static: usize,
|
||||
|
||||
/// The buffer is passed as a slice to the initialiser.
|
||||
Slice,
|
||||
|
||||
/// The buffer is managed dynamically using a `mem.Allocator`.
|
||||
Dynamic,
|
||||
};
|
||||
|
||||
pub fn LinearFifo(
|
||||
comptime T: type,
|
||||
comptime buffer_type: LinearFifoBufferType,
|
||||
) type {
|
||||
const autoalign = false;
|
||||
|
||||
const powers_of_two = switch (buffer_type) {
|
||||
.Static => std.math.isPowerOfTwo(buffer_type.Static),
|
||||
.Slice => false, // Any size slice could be passed in
|
||||
.Dynamic => true, // This could be configurable in future
|
||||
};
|
||||
|
||||
return struct {
|
||||
allocator: if (buffer_type == .Dynamic) Allocator else void,
|
||||
buf: if (buffer_type == .Static) [buffer_type.Static]T else []T,
|
||||
head: usize,
|
||||
count: usize,
|
||||
|
||||
const Self = @This();
|
||||
pub const Reader = std.io.GenericReader(*Self, error{}, readFn);
|
||||
pub const Writer = std.io.GenericWriter(*Self, error{OutOfMemory}, appendWrite);
|
||||
|
||||
// Type of Self argument for slice operations.
|
||||
// If buffer is inline (Static) then we need to ensure we haven't
|
||||
// returned a slice into a copy on the stack
|
||||
const SliceSelfArg = if (buffer_type == .Static) *Self else Self;
|
||||
|
||||
pub const init = switch (buffer_type) {
|
||||
.Static => initStatic,
|
||||
.Slice => initSlice,
|
||||
.Dynamic => initDynamic,
|
||||
};
|
||||
|
||||
fn initStatic() Self {
|
||||
comptime assert(buffer_type == .Static);
|
||||
return .{
|
||||
.allocator = {},
|
||||
.buf = undefined,
|
||||
.head = 0,
|
||||
.count = 0,
|
||||
};
|
||||
}
|
||||
|
||||
fn initSlice(buf: []T) Self {
|
||||
comptime assert(buffer_type == .Slice);
|
||||
return .{
|
||||
.allocator = {},
|
||||
.buf = buf,
|
||||
.head = 0,
|
||||
.count = 0,
|
||||
};
|
||||
}
|
||||
|
||||
fn initDynamic(allocator: Allocator) Self {
|
||||
comptime assert(buffer_type == .Dynamic);
|
||||
return .{
|
||||
.allocator = allocator,
|
||||
.buf = &.{},
|
||||
.head = 0,
|
||||
.count = 0,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn deinit(self: Self) void {
|
||||
if (buffer_type == .Dynamic) self.allocator.free(self.buf);
|
||||
}
|
||||
|
||||
pub fn realign(self: *Self) void {
|
||||
if (self.buf.len - self.head >= self.count) {
|
||||
mem.copyForwards(T, self.buf[0..self.count], self.buf[self.head..][0..self.count]);
|
||||
self.head = 0;
|
||||
} else {
|
||||
var tmp: [4096 / 2 / @sizeOf(T)]T = undefined;
|
||||
|
||||
while (self.head != 0) {
|
||||
const n = @min(self.head, tmp.len);
|
||||
const m = self.buf.len - n;
|
||||
@memcpy(tmp[0..n], self.buf[0..n]);
|
||||
mem.copyForwards(T, self.buf[0..m], self.buf[n..][0..m]);
|
||||
@memcpy(self.buf[m..][0..n], tmp[0..n]);
|
||||
self.head -= n;
|
||||
}
|
||||
}
|
||||
{ // set unused area to undefined
|
||||
const unused = mem.sliceAsBytes(self.buf[self.count..]);
|
||||
@memset(unused, undefined);
|
||||
}
|
||||
}
|
||||
|
||||
/// Reduce allocated capacity to `size`.
|
||||
pub fn shrink(self: *Self, size: usize) void {
|
||||
assert(size >= self.count);
|
||||
if (buffer_type == .Dynamic) {
|
||||
self.realign();
|
||||
self.buf = self.allocator.realloc(self.buf, size) catch |e| switch (e) {
|
||||
error.OutOfMemory => return, // no problem, capacity is still correct then.
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/// Ensure that the buffer can fit at least `size` items
|
||||
pub fn ensureTotalCapacity(self: *Self, size: usize) !void {
|
||||
if (self.buf.len >= size) return;
|
||||
if (buffer_type == .Dynamic) {
|
||||
self.realign();
|
||||
const new_size = if (powers_of_two) math.ceilPowerOfTwo(usize, size) catch return error.OutOfMemory else size;
|
||||
self.buf = try self.allocator.realloc(self.buf, new_size);
|
||||
} else {
|
||||
return error.OutOfMemory;
|
||||
}
|
||||
}
|
||||
|
||||
/// Makes sure at least `size` items are unused
|
||||
pub fn ensureUnusedCapacity(self: *Self, size: usize) error{OutOfMemory}!void {
|
||||
if (self.writableLength() >= size) return;
|
||||
|
||||
return try self.ensureTotalCapacity(math.add(usize, self.count, size) catch return error.OutOfMemory);
|
||||
}
|
||||
|
||||
/// Returns number of items currently in fifo
|
||||
pub fn readableLength(self: Self) usize {
|
||||
return self.count;
|
||||
}
|
||||
|
||||
/// Returns a writable slice from the 'read' end of the fifo
|
||||
fn readableSliceMut(self: SliceSelfArg, offset: usize) []T {
|
||||
if (offset > self.count) return &[_]T{};
|
||||
|
||||
var start = self.head + offset;
|
||||
if (start >= self.buf.len) {
|
||||
start -= self.buf.len;
|
||||
return self.buf[start .. start + (self.count - offset)];
|
||||
} else {
|
||||
const end = @min(self.head + self.count, self.buf.len);
|
||||
return self.buf[start..end];
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a readable slice from `offset`
|
||||
pub fn readableSlice(self: SliceSelfArg, offset: usize) []const T {
|
||||
return self.readableSliceMut(offset);
|
||||
}
|
||||
|
||||
pub fn readableSliceOfLen(self: *Self, len: usize) []const T {
|
||||
assert(len <= self.count);
|
||||
const buf = self.readableSlice(0);
|
||||
if (buf.len >= len) {
|
||||
return buf[0..len];
|
||||
} else {
|
||||
self.realign();
|
||||
return self.readableSlice(0)[0..len];
|
||||
}
|
||||
}
|
||||
|
||||
/// Discard first `count` items in the fifo
|
||||
pub fn discard(self: *Self, count: usize) void {
|
||||
assert(count <= self.count);
|
||||
{ // set old range to undefined. Note: may be wrapped around
|
||||
const slice = self.readableSliceMut(0);
|
||||
if (slice.len >= count) {
|
||||
const unused = mem.sliceAsBytes(slice[0..count]);
|
||||
@memset(unused, undefined);
|
||||
} else {
|
||||
const unused = mem.sliceAsBytes(slice[0..]);
|
||||
@memset(unused, undefined);
|
||||
const unused2 = mem.sliceAsBytes(self.readableSliceMut(slice.len)[0 .. count - slice.len]);
|
||||
@memset(unused2, undefined);
|
||||
}
|
||||
}
|
||||
if (autoalign and self.count == count) {
|
||||
self.head = 0;
|
||||
self.count = 0;
|
||||
} else {
|
||||
var head = self.head + count;
|
||||
if (powers_of_two) {
|
||||
// Note it is safe to do a wrapping subtract as
|
||||
// bitwise & with all 1s is a noop
|
||||
head &= self.buf.len -% 1;
|
||||
} else {
|
||||
head %= self.buf.len;
|
||||
}
|
||||
self.head = head;
|
||||
self.count -= count;
|
||||
}
|
||||
}
|
||||
|
||||
/// Read the next item from the fifo
|
||||
pub fn readItem(self: *Self) ?T {
|
||||
if (self.count == 0) return null;
|
||||
|
||||
const c = self.buf[self.head];
|
||||
self.discard(1);
|
||||
return c;
|
||||
}
|
||||
|
||||
/// Read data from the fifo into `dst`, returns number of items copied.
|
||||
pub fn read(self: *Self, dst: []T) usize {
|
||||
var dst_left = dst;
|
||||
|
||||
while (dst_left.len > 0) {
|
||||
const slice = self.readableSlice(0);
|
||||
if (slice.len == 0) break;
|
||||
const n = @min(slice.len, dst_left.len);
|
||||
@memcpy(dst_left[0..n], slice[0..n]);
|
||||
self.discard(n);
|
||||
dst_left = dst_left[n..];
|
||||
}
|
||||
|
||||
return dst.len - dst_left.len;
|
||||
}
|
||||
|
||||
/// Same as `read` except it returns an error union
|
||||
/// The purpose of this function existing is to match `std.io.GenericReader` API.
|
||||
fn readFn(self: *Self, dest: []u8) error{}!usize {
|
||||
return self.read(dest);
|
||||
}
|
||||
|
||||
pub fn reader(self: *Self) Reader {
|
||||
return .{ .context = self };
|
||||
}
|
||||
|
||||
/// Returns number of items available in fifo
|
||||
pub fn writableLength(self: Self) usize {
|
||||
return self.buf.len - self.count;
|
||||
}
|
||||
|
||||
/// Returns the first section of writable buffer.
|
||||
/// Note that this may be of length 0
|
||||
pub fn writableSlice(self: SliceSelfArg, offset: usize) []T {
|
||||
if (offset > self.buf.len) return &[_]T{};
|
||||
|
||||
const tail = self.head + offset + self.count;
|
||||
if (tail < self.buf.len) {
|
||||
return self.buf[tail..];
|
||||
} else {
|
||||
return self.buf[tail - self.buf.len ..][0 .. self.writableLength() - offset];
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a writable buffer of at least `size` items, allocating memory as needed.
|
||||
/// Use `fifo.update` once you've written data to it.
|
||||
pub fn writableWithSize(self: *Self, size: usize) ![]T {
|
||||
try self.ensureUnusedCapacity(size);
|
||||
|
||||
// try to avoid realigning buffer
|
||||
var slice = self.writableSlice(0);
|
||||
if (slice.len < size) {
|
||||
self.realign();
|
||||
slice = self.writableSlice(0);
|
||||
}
|
||||
return slice;
|
||||
}
|
||||
|
||||
/// Update the tail location of the buffer (usually follows use of writable/writableWithSize)
|
||||
pub fn update(self: *Self, count: usize) void {
|
||||
assert(self.count + count <= self.buf.len);
|
||||
self.count += count;
|
||||
}
|
||||
|
||||
/// Appends the data in `src` to the fifo.
|
||||
/// You must have ensured there is enough space.
|
||||
pub fn writeAssumeCapacity(self: *Self, src: []const T) void {
|
||||
assert(self.writableLength() >= src.len);
|
||||
|
||||
var src_left = src;
|
||||
while (src_left.len > 0) {
|
||||
const writable_slice = self.writableSlice(0);
|
||||
assert(writable_slice.len != 0);
|
||||
const n = @min(writable_slice.len, src_left.len);
|
||||
@memcpy(writable_slice[0..n], src_left[0..n]);
|
||||
self.update(n);
|
||||
src_left = src_left[n..];
|
||||
}
|
||||
}
|
||||
|
||||
/// Write a single item to the fifo
|
||||
pub fn writeItem(self: *Self, item: T) !void {
|
||||
try self.ensureUnusedCapacity(1);
|
||||
return self.writeItemAssumeCapacity(item);
|
||||
}
|
||||
|
||||
pub fn writeItemAssumeCapacity(self: *Self, item: T) void {
|
||||
var tail = self.head + self.count;
|
||||
if (powers_of_two) {
|
||||
tail &= self.buf.len - 1;
|
||||
} else {
|
||||
tail %= self.buf.len;
|
||||
}
|
||||
self.buf[tail] = item;
|
||||
self.update(1);
|
||||
}
|
||||
|
||||
/// Appends the data in `src` to the fifo.
|
||||
/// Allocates more memory as necessary
|
||||
pub fn write(self: *Self, src: []const T) !void {
|
||||
try self.ensureUnusedCapacity(src.len);
|
||||
|
||||
return self.writeAssumeCapacity(src);
|
||||
}
|
||||
|
||||
/// Same as `write` except it returns the number of bytes written, which is always the same
|
||||
/// as `bytes.len`. The purpose of this function existing is to match `std.io.GenericWriter` API.
|
||||
fn appendWrite(self: *Self, bytes: []const u8) error{OutOfMemory}!usize {
|
||||
try self.write(bytes);
|
||||
return bytes.len;
|
||||
}
|
||||
|
||||
pub fn writer(self: *Self) Writer {
|
||||
return .{ .context = self };
|
||||
}
|
||||
|
||||
/// Make `count` items available before the current read location
|
||||
fn rewind(self: *Self, count: usize) void {
|
||||
assert(self.writableLength() >= count);
|
||||
|
||||
var head = self.head + (self.buf.len - count);
|
||||
if (powers_of_two) {
|
||||
head &= self.buf.len - 1;
|
||||
} else {
|
||||
head %= self.buf.len;
|
||||
}
|
||||
self.head = head;
|
||||
self.count += count;
|
||||
}
|
||||
|
||||
/// Place data back into the read stream
|
||||
pub fn unget(self: *Self, src: []const T) !void {
|
||||
try self.ensureUnusedCapacity(src.len);
|
||||
|
||||
self.rewind(src.len);
|
||||
|
||||
const slice = self.readableSliceMut(0);
|
||||
if (src.len < slice.len) {
|
||||
@memcpy(slice[0..src.len], src);
|
||||
} else {
|
||||
@memcpy(slice, src[0..slice.len]);
|
||||
const slice2 = self.readableSliceMut(slice.len);
|
||||
@memcpy(slice2[0 .. src.len - slice.len], src[slice.len..]);
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the item at `offset`.
|
||||
/// Asserts offset is within bounds.
|
||||
pub fn peekItem(self: Self, offset: usize) T {
|
||||
assert(offset < self.count);
|
||||
|
||||
var index = self.head + offset;
|
||||
if (powers_of_two) {
|
||||
index &= self.buf.len - 1;
|
||||
} else {
|
||||
index %= self.buf.len;
|
||||
}
|
||||
return self.buf[index];
|
||||
}
|
||||
|
||||
/// Pump data from a reader into a writer.
|
||||
/// Stops when reader returns 0 bytes (EOF).
|
||||
/// Buffer size must be set before calling; a buffer length of 0 is invalid.
|
||||
pub fn pump(self: *Self, src_reader: anytype, dest_writer: anytype) !void {
|
||||
assert(self.buf.len > 0);
|
||||
while (true) {
|
||||
if (self.writableLength() > 0) {
|
||||
const n = try src_reader.read(self.writableSlice(0));
|
||||
if (n == 0) break; // EOF
|
||||
self.update(n);
|
||||
}
|
||||
self.discard(try dest_writer.write(self.readableSlice(0)));
|
||||
}
|
||||
// flush remaining data
|
||||
while (self.readableLength() > 0) {
|
||||
self.discard(try dest_writer.write(self.readableSlice(0)));
|
||||
}
|
||||
}
|
||||
|
||||
pub fn toOwnedSlice(self: *Self) Allocator.Error![]T {
|
||||
if (self.head != 0) self.realign();
|
||||
assert(self.head == 0);
|
||||
assert(self.count <= self.buf.len);
|
||||
const allocator = self.allocator;
|
||||
if (allocator.resize(self.buf, self.count)) {
|
||||
const result = self.buf[0..self.count];
|
||||
self.* = Self.init(allocator);
|
||||
return result;
|
||||
}
|
||||
const new_memory = try allocator.dupe(T, self.buf[0..self.count]);
|
||||
allocator.free(self.buf);
|
||||
self.* = Self.init(allocator);
|
||||
return new_memory;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
test "LinearFifo(u8, .Dynamic) discard(0) from empty buffer should not error on overflow" {
|
||||
var fifo = LinearFifo(u8, .Dynamic).init(testing.allocator);
|
||||
defer fifo.deinit();
|
||||
|
||||
// If overflow is not explicitly allowed this will crash in debug / safe mode
|
||||
fifo.discard(0);
|
||||
}
|
||||
|
||||
test "LinearFifo(u8, .Dynamic)" {
|
||||
var fifo = LinearFifo(u8, .Dynamic).init(testing.allocator);
|
||||
defer fifo.deinit();
|
||||
|
||||
try fifo.write("HELLO");
|
||||
try testing.expectEqual(@as(usize, 5), fifo.readableLength());
|
||||
try testing.expectEqualSlices(u8, "HELLO", fifo.readableSlice(0));
|
||||
|
||||
{
|
||||
var i: usize = 0;
|
||||
while (i < 5) : (i += 1) {
|
||||
try fifo.write(&[_]u8{fifo.peekItem(i)});
|
||||
}
|
||||
try testing.expectEqual(@as(usize, 10), fifo.readableLength());
|
||||
try testing.expectEqualSlices(u8, "HELLOHELLO", fifo.readableSlice(0));
|
||||
}
|
||||
|
||||
{
|
||||
try testing.expectEqual(@as(u8, 'H'), fifo.readItem().?);
|
||||
try testing.expectEqual(@as(u8, 'E'), fifo.readItem().?);
|
||||
try testing.expectEqual(@as(u8, 'L'), fifo.readItem().?);
|
||||
try testing.expectEqual(@as(u8, 'L'), fifo.readItem().?);
|
||||
try testing.expectEqual(@as(u8, 'O'), fifo.readItem().?);
|
||||
}
|
||||
try testing.expectEqual(@as(usize, 5), fifo.readableLength());
|
||||
|
||||
{ // Writes that wrap around
|
||||
try testing.expectEqual(@as(usize, 11), fifo.writableLength());
|
||||
try testing.expectEqual(@as(usize, 6), fifo.writableSlice(0).len);
|
||||
fifo.writeAssumeCapacity("6<chars<11");
|
||||
try testing.expectEqualSlices(u8, "HELLO6<char", fifo.readableSlice(0));
|
||||
try testing.expectEqualSlices(u8, "s<11", fifo.readableSlice(11));
|
||||
try testing.expectEqualSlices(u8, "11", fifo.readableSlice(13));
|
||||
try testing.expectEqualSlices(u8, "", fifo.readableSlice(15));
|
||||
fifo.discard(11);
|
||||
try testing.expectEqualSlices(u8, "s<11", fifo.readableSlice(0));
|
||||
fifo.discard(4);
|
||||
try testing.expectEqual(@as(usize, 0), fifo.readableLength());
|
||||
}
|
||||
|
||||
{
|
||||
const buf = try fifo.writableWithSize(12);
|
||||
try testing.expectEqual(@as(usize, 12), buf.len);
|
||||
var i: u8 = 0;
|
||||
while (i < 10) : (i += 1) {
|
||||
buf[i] = i + 'a';
|
||||
}
|
||||
fifo.update(10);
|
||||
try testing.expectEqualSlices(u8, "abcdefghij", fifo.readableSlice(0));
|
||||
}
|
||||
|
||||
{
|
||||
try fifo.unget("prependedstring");
|
||||
var result: [30]u8 = undefined;
|
||||
try testing.expectEqualSlices(u8, "prependedstringabcdefghij", result[0..fifo.read(&result)]);
|
||||
try fifo.unget("b");
|
||||
try fifo.unget("a");
|
||||
try testing.expectEqualSlices(u8, "ab", result[0..fifo.read(&result)]);
|
||||
}
|
||||
|
||||
fifo.shrink(0);
|
||||
|
||||
{
|
||||
try fifo.writer().print("{s}, {s}!", .{ "Hello", "World" });
|
||||
var result: [30]u8 = undefined;
|
||||
try testing.expectEqualSlices(u8, "Hello, World!", result[0..fifo.read(&result)]);
|
||||
try testing.expectEqual(@as(usize, 0), fifo.readableLength());
|
||||
}
|
||||
|
||||
{
|
||||
try fifo.writer().writeAll("This is a test");
|
||||
var result: [30]u8 = undefined;
|
||||
try testing.expectEqualSlices(u8, "This", (try fifo.reader().readUntilDelimiterOrEof(&result, ' ')).?);
|
||||
try testing.expectEqualSlices(u8, "is", (try fifo.reader().readUntilDelimiterOrEof(&result, ' ')).?);
|
||||
try testing.expectEqualSlices(u8, "a", (try fifo.reader().readUntilDelimiterOrEof(&result, ' ')).?);
|
||||
try testing.expectEqualSlices(u8, "test", (try fifo.reader().readUntilDelimiterOrEof(&result, ' ')).?);
|
||||
}
|
||||
|
||||
{
|
||||
try fifo.ensureTotalCapacity(1);
|
||||
var in_fbs = std.io.fixedBufferStream("pump test");
|
||||
var out_buf: [50]u8 = undefined;
|
||||
var out_fbs = std.io.fixedBufferStream(&out_buf);
|
||||
try fifo.pump(in_fbs.reader(), out_fbs.writer());
|
||||
try testing.expectEqualSlices(u8, in_fbs.buffer, out_fbs.getWritten());
|
||||
}
|
||||
}
|
||||
|
||||
test LinearFifo {
|
||||
inline for ([_]type{ u1, u8, u16, u64 }) |T| {
|
||||
inline for ([_]LinearFifoBufferType{ LinearFifoBufferType{ .Static = 32 }, .Slice, .Dynamic }) |bt| {
|
||||
const FifoType = LinearFifo(T, bt);
|
||||
var buf: if (bt == .Slice) [32]T else void = undefined;
|
||||
var fifo = switch (bt) {
|
||||
.Static => FifoType.init(),
|
||||
.Slice => FifoType.init(buf[0..]),
|
||||
.Dynamic => FifoType.init(testing.allocator),
|
||||
};
|
||||
defer fifo.deinit();
|
||||
|
||||
try fifo.write(&[_]T{ 0, 1, 1, 0, 1 });
|
||||
try testing.expectEqual(@as(usize, 5), fifo.readableLength());
|
||||
|
||||
{
|
||||
try testing.expectEqual(@as(T, 0), fifo.readItem().?);
|
||||
try testing.expectEqual(@as(T, 1), fifo.readItem().?);
|
||||
try testing.expectEqual(@as(T, 1), fifo.readItem().?);
|
||||
try testing.expectEqual(@as(T, 0), fifo.readItem().?);
|
||||
try testing.expectEqual(@as(T, 1), fifo.readItem().?);
|
||||
try testing.expectEqual(@as(usize, 0), fifo.readableLength());
|
||||
}
|
||||
|
||||
{
|
||||
try fifo.writeItem(1);
|
||||
try fifo.writeItem(1);
|
||||
try fifo.writeItem(1);
|
||||
try testing.expectEqual(@as(usize, 3), fifo.readableLength());
|
||||
}
|
||||
|
||||
{
|
||||
var readBuf: [3]T = undefined;
|
||||
const n = fifo.read(&readBuf);
|
||||
try testing.expectEqual(@as(usize, 3), n); // NOTE: It should be the number of items.
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1351,8 +1351,7 @@ pub const Reader = struct {
|
|||
}
|
||||
r.pos += n;
|
||||
if (n > data_size) {
|
||||
io_reader.seek = 0;
|
||||
io_reader.end = n - data_size;
|
||||
io_reader.end += n - data_size;
|
||||
return data_size;
|
||||
}
|
||||
return n;
|
||||
|
|
@ -1386,8 +1385,7 @@ pub const Reader = struct {
|
|||
}
|
||||
r.pos += n;
|
||||
if (n > data_size) {
|
||||
io_reader.seek = 0;
|
||||
io_reader.end = n - data_size;
|
||||
io_reader.end += n - data_size;
|
||||
return data_size;
|
||||
}
|
||||
return n;
|
||||
|
|
|
|||
876
lib/std/http.zig
876
lib/std/http.zig
|
|
@ -1,14 +1,14 @@
|
|||
const builtin = @import("builtin");
|
||||
const std = @import("std.zig");
|
||||
const assert = std.debug.assert;
|
||||
const Writer = std.Io.Writer;
|
||||
const File = std.fs.File;
|
||||
|
||||
pub const Client = @import("http/Client.zig");
|
||||
pub const Server = @import("http/Server.zig");
|
||||
pub const protocol = @import("http/protocol.zig");
|
||||
pub const HeadParser = @import("http/HeadParser.zig");
|
||||
pub const ChunkParser = @import("http/ChunkParser.zig");
|
||||
pub const HeaderIterator = @import("http/HeaderIterator.zig");
|
||||
pub const WebSocket = @import("http/WebSocket.zig");
|
||||
|
||||
pub const Version = enum {
|
||||
@"HTTP/1.0",
|
||||
|
|
@ -20,51 +20,32 @@ pub const Version = enum {
|
|||
/// https://datatracker.ietf.org/doc/html/rfc7231#section-4 Initial definition
|
||||
///
|
||||
/// https://datatracker.ietf.org/doc/html/rfc5789#section-2 PATCH
|
||||
pub const Method = enum(u64) {
|
||||
GET = parse("GET"),
|
||||
HEAD = parse("HEAD"),
|
||||
POST = parse("POST"),
|
||||
PUT = parse("PUT"),
|
||||
DELETE = parse("DELETE"),
|
||||
CONNECT = parse("CONNECT"),
|
||||
OPTIONS = parse("OPTIONS"),
|
||||
TRACE = parse("TRACE"),
|
||||
PATCH = parse("PATCH"),
|
||||
|
||||
_,
|
||||
|
||||
/// Converts `s` into a type that may be used as a `Method` field.
|
||||
/// Asserts that `s` is 24 or fewer bytes.
|
||||
pub fn parse(s: []const u8) u64 {
|
||||
var x: u64 = 0;
|
||||
const len = @min(s.len, @sizeOf(@TypeOf(x)));
|
||||
@memcpy(std.mem.asBytes(&x)[0..len], s[0..len]);
|
||||
return x;
|
||||
}
|
||||
|
||||
pub fn format(self: Method, w: *std.io.Writer) std.io.Writer.Error!void {
|
||||
const bytes: []const u8 = @ptrCast(&@intFromEnum(self));
|
||||
const str = std.mem.sliceTo(bytes, 0);
|
||||
try w.writeAll(str);
|
||||
}
|
||||
pub const Method = enum {
|
||||
GET,
|
||||
HEAD,
|
||||
POST,
|
||||
PUT,
|
||||
DELETE,
|
||||
CONNECT,
|
||||
OPTIONS,
|
||||
TRACE,
|
||||
PATCH,
|
||||
|
||||
/// Returns true if a request of this method is allowed to have a body
|
||||
/// Actual behavior from servers may vary and should still be checked
|
||||
pub fn requestHasBody(self: Method) bool {
|
||||
return switch (self) {
|
||||
pub fn requestHasBody(m: Method) bool {
|
||||
return switch (m) {
|
||||
.POST, .PUT, .PATCH => true,
|
||||
.GET, .HEAD, .DELETE, .CONNECT, .OPTIONS, .TRACE => false,
|
||||
else => true,
|
||||
};
|
||||
}
|
||||
|
||||
/// Returns true if a response to this method is allowed to have a body
|
||||
/// Actual behavior from clients may vary and should still be checked
|
||||
pub fn responseHasBody(self: Method) bool {
|
||||
return switch (self) {
|
||||
pub fn responseHasBody(m: Method) bool {
|
||||
return switch (m) {
|
||||
.GET, .POST, .DELETE, .CONNECT, .OPTIONS, .PATCH => true,
|
||||
.HEAD, .PUT, .TRACE => false,
|
||||
else => true,
|
||||
};
|
||||
}
|
||||
|
||||
|
|
@ -73,11 +54,10 @@ pub const Method = enum(u64) {
|
|||
/// https://developer.mozilla.org/en-US/docs/Glossary/Safe/HTTP
|
||||
///
|
||||
/// https://datatracker.ietf.org/doc/html/rfc7231#section-4.2.1
|
||||
pub fn safe(self: Method) bool {
|
||||
return switch (self) {
|
||||
pub fn safe(m: Method) bool {
|
||||
return switch (m) {
|
||||
.GET, .HEAD, .OPTIONS, .TRACE => true,
|
||||
.POST, .PUT, .DELETE, .CONNECT, .PATCH => false,
|
||||
else => false,
|
||||
};
|
||||
}
|
||||
|
||||
|
|
@ -88,11 +68,10 @@ pub const Method = enum(u64) {
|
|||
/// https://developer.mozilla.org/en-US/docs/Glossary/Idempotent
|
||||
///
|
||||
/// https://datatracker.ietf.org/doc/html/rfc7231#section-4.2.2
|
||||
pub fn idempotent(self: Method) bool {
|
||||
return switch (self) {
|
||||
pub fn idempotent(m: Method) bool {
|
||||
return switch (m) {
|
||||
.GET, .HEAD, .PUT, .DELETE, .OPTIONS, .TRACE => true,
|
||||
.CONNECT, .POST, .PATCH => false,
|
||||
else => false,
|
||||
};
|
||||
}
|
||||
|
||||
|
|
@ -102,11 +81,10 @@ pub const Method = enum(u64) {
|
|||
/// https://developer.mozilla.org/en-US/docs/Glossary/cacheable
|
||||
///
|
||||
/// https://datatracker.ietf.org/doc/html/rfc7231#section-4.2.3
|
||||
pub fn cacheable(self: Method) bool {
|
||||
return switch (self) {
|
||||
pub fn cacheable(m: Method) bool {
|
||||
return switch (m) {
|
||||
.GET, .HEAD => true,
|
||||
.POST, .PUT, .DELETE, .CONNECT, .OPTIONS, .TRACE, .PATCH => false,
|
||||
else => false,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
|
@ -296,13 +274,24 @@ pub const TransferEncoding = enum {
|
|||
};
|
||||
|
||||
pub const ContentEncoding = enum {
|
||||
identity,
|
||||
compress,
|
||||
@"x-compress",
|
||||
deflate,
|
||||
gzip,
|
||||
@"x-gzip",
|
||||
zstd,
|
||||
gzip,
|
||||
deflate,
|
||||
compress,
|
||||
identity,
|
||||
|
||||
pub fn fromString(s: []const u8) ?ContentEncoding {
|
||||
const map = std.StaticStringMap(ContentEncoding).initComptime(.{
|
||||
.{ "zstd", .zstd },
|
||||
.{ "gzip", .gzip },
|
||||
.{ "x-gzip", .gzip },
|
||||
.{ "deflate", .deflate },
|
||||
.{ "compress", .compress },
|
||||
.{ "x-compress", .compress },
|
||||
.{ "identity", .identity },
|
||||
});
|
||||
return map.get(s);
|
||||
}
|
||||
};
|
||||
|
||||
pub const Connection = enum {
|
||||
|
|
@ -315,15 +304,790 @@ pub const Header = struct {
|
|||
value: []const u8,
|
||||
};
|
||||
|
||||
pub const Reader = struct {
|
||||
in: *std.Io.Reader,
|
||||
/// This is preallocated memory that might be used by `bodyReader`. That
|
||||
/// function might return a pointer to this field, or a different
|
||||
/// `*std.Io.Reader`. Advisable to not access this field directly.
|
||||
interface: std.Io.Reader,
|
||||
/// Keeps track of whether the stream is ready to accept a new request,
|
||||
/// making invalid API usage cause assertion failures rather than HTTP
|
||||
/// protocol violations.
|
||||
state: State,
|
||||
/// HTTP trailer bytes. These are at the end of a transfer-encoding:
|
||||
/// chunked message. This data is available only after calling one of the
|
||||
/// "end" functions and points to data inside the buffer of `in`, and is
|
||||
/// therefore invalidated on the next call to `receiveHead`, or any other
|
||||
/// read from `in`.
|
||||
trailers: []const u8 = &.{},
|
||||
body_err: ?BodyError = null,
|
||||
|
||||
pub const RemainingChunkLen = enum(u64) {
|
||||
head = 0,
|
||||
n = 1,
|
||||
rn = 2,
|
||||
_,
|
||||
|
||||
pub fn init(integer: u64) RemainingChunkLen {
|
||||
return @enumFromInt(integer);
|
||||
}
|
||||
|
||||
pub fn int(rcl: RemainingChunkLen) u64 {
|
||||
return @intFromEnum(rcl);
|
||||
}
|
||||
};
|
||||
|
||||
pub const State = union(enum) {
|
||||
/// The stream is available to be used for the first time, or reused.
|
||||
ready,
|
||||
received_head,
|
||||
/// The stream goes until the connection is closed.
|
||||
body_none,
|
||||
body_remaining_content_length: u64,
|
||||
body_remaining_chunk_len: RemainingChunkLen,
|
||||
/// The stream would be eligible for another HTTP request, however the
|
||||
/// client and server did not negotiate a persistent connection.
|
||||
closing,
|
||||
};
|
||||
|
||||
pub const BodyError = error{
|
||||
HttpChunkInvalid,
|
||||
HttpChunkTruncated,
|
||||
HttpHeadersOversize,
|
||||
};
|
||||
|
||||
pub const HeadError = error{
|
||||
/// Too many bytes of HTTP headers.
|
||||
///
|
||||
/// The HTTP specification suggests to respond with a 431 status code
|
||||
/// before closing the connection.
|
||||
HttpHeadersOversize,
|
||||
/// Partial HTTP request was received but the connection was closed
|
||||
/// before fully receiving the headers.
|
||||
HttpRequestTruncated,
|
||||
/// The client sent 0 bytes of headers before closing the stream. This
|
||||
/// happens when a keep-alive connection is finally closed.
|
||||
HttpConnectionClosing,
|
||||
/// Transitive error occurred reading from `in`.
|
||||
ReadFailed,
|
||||
};
|
||||
|
||||
/// Buffers the entire head inside `in`.
|
||||
///
|
||||
/// The resulting memory is invalidated by any subsequent consumption of
|
||||
/// the input stream.
|
||||
pub fn receiveHead(reader: *Reader) HeadError![]const u8 {
|
||||
reader.trailers = &.{};
|
||||
const in = reader.in;
|
||||
var hp: HeadParser = .{};
|
||||
var head_len: usize = 0;
|
||||
while (true) {
|
||||
if (in.buffer.len - head_len == 0) return error.HttpHeadersOversize;
|
||||
const remaining = in.buffered()[head_len..];
|
||||
if (remaining.len == 0) {
|
||||
in.fillMore() catch |err| switch (err) {
|
||||
error.EndOfStream => switch (head_len) {
|
||||
0 => return error.HttpConnectionClosing,
|
||||
else => return error.HttpRequestTruncated,
|
||||
},
|
||||
error.ReadFailed => return error.ReadFailed,
|
||||
};
|
||||
continue;
|
||||
}
|
||||
head_len += hp.feed(remaining);
|
||||
if (hp.state == .finished) {
|
||||
reader.state = .received_head;
|
||||
const head_buffer = in.buffered()[0..head_len];
|
||||
in.toss(head_len);
|
||||
return head_buffer;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// If compressed body has been negotiated this will return compressed bytes.
|
||||
///
|
||||
/// Asserts only called once and after `receiveHead`.
|
||||
///
|
||||
/// See also:
|
||||
/// * `interfaceDecompressing`
|
||||
pub fn bodyReader(
|
||||
reader: *Reader,
|
||||
buffer: []u8,
|
||||
transfer_encoding: TransferEncoding,
|
||||
content_length: ?u64,
|
||||
) *std.Io.Reader {
|
||||
assert(reader.state == .received_head);
|
||||
switch (transfer_encoding) {
|
||||
.chunked => {
|
||||
reader.state = .{ .body_remaining_chunk_len = .head };
|
||||
reader.interface = .{
|
||||
.buffer = buffer,
|
||||
.seek = 0,
|
||||
.end = 0,
|
||||
.vtable = &.{
|
||||
.stream = chunkedStream,
|
||||
.discard = chunkedDiscard,
|
||||
},
|
||||
};
|
||||
return &reader.interface;
|
||||
},
|
||||
.none => {
|
||||
if (content_length) |len| {
|
||||
reader.state = .{ .body_remaining_content_length = len };
|
||||
reader.interface = .{
|
||||
.buffer = buffer,
|
||||
.seek = 0,
|
||||
.end = 0,
|
||||
.vtable = &.{
|
||||
.stream = contentLengthStream,
|
||||
.discard = contentLengthDiscard,
|
||||
},
|
||||
};
|
||||
return &reader.interface;
|
||||
} else {
|
||||
reader.state = .body_none;
|
||||
return reader.in;
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// If compressed body has been negotiated this will return decompressed bytes.
|
||||
///
|
||||
/// Asserts only called once and after `receiveHead`.
|
||||
///
|
||||
/// See also:
|
||||
/// * `interface`
|
||||
pub fn bodyReaderDecompressing(
|
||||
reader: *Reader,
|
||||
transfer_encoding: TransferEncoding,
|
||||
content_length: ?u64,
|
||||
content_encoding: ContentEncoding,
|
||||
decompressor: *Decompressor,
|
||||
decompression_buffer: []u8,
|
||||
) *std.Io.Reader {
|
||||
if (transfer_encoding == .none and content_length == null) {
|
||||
assert(reader.state == .received_head);
|
||||
reader.state = .body_none;
|
||||
switch (content_encoding) {
|
||||
.identity => {
|
||||
return reader.in;
|
||||
},
|
||||
.deflate => {
|
||||
decompressor.* = .{ .flate = .init(reader.in, .zlib, decompression_buffer) };
|
||||
return &decompressor.flate.reader;
|
||||
},
|
||||
.gzip => {
|
||||
decompressor.* = .{ .flate = .init(reader.in, .gzip, decompression_buffer) };
|
||||
return &decompressor.flate.reader;
|
||||
},
|
||||
.zstd => {
|
||||
decompressor.* = .{ .zstd = .init(reader.in, decompression_buffer, .{ .verify_checksum = false }) };
|
||||
return &decompressor.zstd.reader;
|
||||
},
|
||||
.compress => unreachable,
|
||||
}
|
||||
}
|
||||
const transfer_reader = bodyReader(reader, &.{}, transfer_encoding, content_length);
|
||||
return decompressor.init(transfer_reader, decompression_buffer, content_encoding);
|
||||
}
|
||||
|
||||
fn contentLengthStream(
|
||||
io_r: *std.Io.Reader,
|
||||
w: *Writer,
|
||||
limit: std.Io.Limit,
|
||||
) std.Io.Reader.StreamError!usize {
|
||||
const reader: *Reader = @alignCast(@fieldParentPtr("interface", io_r));
|
||||
const remaining_content_length = &reader.state.body_remaining_content_length;
|
||||
const remaining = remaining_content_length.*;
|
||||
if (remaining == 0) {
|
||||
reader.state = .ready;
|
||||
return error.EndOfStream;
|
||||
}
|
||||
const n = try reader.in.stream(w, limit.min(.limited64(remaining)));
|
||||
remaining_content_length.* = remaining - n;
|
||||
return n;
|
||||
}
|
||||
|
||||
fn contentLengthDiscard(io_r: *std.Io.Reader, limit: std.Io.Limit) std.Io.Reader.Error!usize {
|
||||
const reader: *Reader = @alignCast(@fieldParentPtr("interface", io_r));
|
||||
const remaining_content_length = &reader.state.body_remaining_content_length;
|
||||
const remaining = remaining_content_length.*;
|
||||
if (remaining == 0) {
|
||||
reader.state = .ready;
|
||||
return error.EndOfStream;
|
||||
}
|
||||
const n = try reader.in.discard(limit.min(.limited64(remaining)));
|
||||
remaining_content_length.* = remaining - n;
|
||||
return n;
|
||||
}
|
||||
|
||||
fn chunkedStream(io_r: *std.Io.Reader, w: *Writer, limit: std.Io.Limit) std.Io.Reader.StreamError!usize {
|
||||
const reader: *Reader = @alignCast(@fieldParentPtr("interface", io_r));
|
||||
const chunk_len_ptr = switch (reader.state) {
|
||||
.ready => return error.EndOfStream,
|
||||
.body_remaining_chunk_len => |*x| x,
|
||||
else => unreachable,
|
||||
};
|
||||
return chunkedReadEndless(reader, w, limit, chunk_len_ptr) catch |err| switch (err) {
|
||||
error.ReadFailed => return error.ReadFailed,
|
||||
error.WriteFailed => return error.WriteFailed,
|
||||
error.EndOfStream => {
|
||||
reader.body_err = error.HttpChunkTruncated;
|
||||
return error.ReadFailed;
|
||||
},
|
||||
else => |e| {
|
||||
reader.body_err = e;
|
||||
return error.ReadFailed;
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
fn chunkedReadEndless(
|
||||
reader: *Reader,
|
||||
w: *Writer,
|
||||
limit: std.Io.Limit,
|
||||
chunk_len_ptr: *RemainingChunkLen,
|
||||
) (BodyError || std.Io.Reader.StreamError)!usize {
|
||||
const in = reader.in;
|
||||
len: switch (chunk_len_ptr.*) {
|
||||
.head => {
|
||||
var cp: ChunkParser = .init;
|
||||
while (true) {
|
||||
const i = cp.feed(in.buffered());
|
||||
switch (cp.state) {
|
||||
.invalid => return error.HttpChunkInvalid,
|
||||
.data => {
|
||||
in.toss(i);
|
||||
break;
|
||||
},
|
||||
else => {
|
||||
in.toss(i);
|
||||
try in.fillMore();
|
||||
continue;
|
||||
},
|
||||
}
|
||||
}
|
||||
if (cp.chunk_len == 0) return parseTrailers(reader, 0);
|
||||
const n = try in.stream(w, limit.min(.limited64(cp.chunk_len)));
|
||||
chunk_len_ptr.* = .init(cp.chunk_len + 2 - n);
|
||||
return n;
|
||||
},
|
||||
.n => {
|
||||
if ((try in.peekByte()) != '\n') return error.HttpChunkInvalid;
|
||||
in.toss(1);
|
||||
continue :len .head;
|
||||
},
|
||||
.rn => {
|
||||
const rn = try in.peekArray(2);
|
||||
if (rn[0] != '\r' or rn[1] != '\n') return error.HttpChunkInvalid;
|
||||
in.toss(2);
|
||||
continue :len .head;
|
||||
},
|
||||
else => |remaining_chunk_len| {
|
||||
const n = try in.stream(w, limit.min(.limited64(@intFromEnum(remaining_chunk_len) - 2)));
|
||||
chunk_len_ptr.* = .init(@intFromEnum(remaining_chunk_len) - n);
|
||||
return n;
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn chunkedDiscard(io_r: *std.Io.Reader, limit: std.Io.Limit) std.Io.Reader.Error!usize {
|
||||
const reader: *Reader = @alignCast(@fieldParentPtr("interface", io_r));
|
||||
const chunk_len_ptr = switch (reader.state) {
|
||||
.ready => return error.EndOfStream,
|
||||
.body_remaining_chunk_len => |*x| x,
|
||||
else => unreachable,
|
||||
};
|
||||
return chunkedDiscardEndless(reader, limit, chunk_len_ptr) catch |err| switch (err) {
|
||||
error.ReadFailed => return error.ReadFailed,
|
||||
error.EndOfStream => {
|
||||
reader.body_err = error.HttpChunkTruncated;
|
||||
return error.ReadFailed;
|
||||
},
|
||||
else => |e| {
|
||||
reader.body_err = e;
|
||||
return error.ReadFailed;
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
fn chunkedDiscardEndless(
|
||||
reader: *Reader,
|
||||
limit: std.Io.Limit,
|
||||
chunk_len_ptr: *RemainingChunkLen,
|
||||
) (BodyError || std.Io.Reader.Error)!usize {
|
||||
const in = reader.in;
|
||||
len: switch (chunk_len_ptr.*) {
|
||||
.head => {
|
||||
var cp: ChunkParser = .init;
|
||||
while (true) {
|
||||
const i = cp.feed(in.buffered());
|
||||
switch (cp.state) {
|
||||
.invalid => return error.HttpChunkInvalid,
|
||||
.data => {
|
||||
in.toss(i);
|
||||
break;
|
||||
},
|
||||
else => {
|
||||
in.toss(i);
|
||||
try in.fillMore();
|
||||
continue;
|
||||
},
|
||||
}
|
||||
}
|
||||
if (cp.chunk_len == 0) return parseTrailers(reader, 0);
|
||||
const n = try in.discard(limit.min(.limited64(cp.chunk_len)));
|
||||
chunk_len_ptr.* = .init(cp.chunk_len + 2 - n);
|
||||
return n;
|
||||
},
|
||||
.n => {
|
||||
if ((try in.peekByte()) != '\n') return error.HttpChunkInvalid;
|
||||
in.toss(1);
|
||||
continue :len .head;
|
||||
},
|
||||
.rn => {
|
||||
const rn = try in.peekArray(2);
|
||||
if (rn[0] != '\r' or rn[1] != '\n') return error.HttpChunkInvalid;
|
||||
in.toss(2);
|
||||
continue :len .head;
|
||||
},
|
||||
else => |remaining_chunk_len| {
|
||||
const n = try in.discard(limit.min(.limited64(remaining_chunk_len.int() - 2)));
|
||||
chunk_len_ptr.* = .init(remaining_chunk_len.int() - n);
|
||||
return n;
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Called when next bytes in the stream are trailers, or "\r\n" to indicate
|
||||
/// end of chunked body.
|
||||
fn parseTrailers(reader: *Reader, amt_read: usize) (BodyError || std.Io.Reader.Error)!usize {
|
||||
const in = reader.in;
|
||||
const rn = try in.peekArray(2);
|
||||
if (rn[0] == '\r' and rn[1] == '\n') {
|
||||
in.toss(2);
|
||||
reader.state = .ready;
|
||||
assert(reader.trailers.len == 0);
|
||||
return amt_read;
|
||||
}
|
||||
var hp: HeadParser = .{ .state = .seen_rn };
|
||||
var trailers_len: usize = 2;
|
||||
while (true) {
|
||||
if (in.buffer.len - trailers_len == 0) return error.HttpHeadersOversize;
|
||||
const remaining = in.buffered()[trailers_len..];
|
||||
if (remaining.len == 0) {
|
||||
try in.fillMore();
|
||||
continue;
|
||||
}
|
||||
trailers_len += hp.feed(remaining);
|
||||
if (hp.state == .finished) {
|
||||
reader.state = .ready;
|
||||
reader.trailers = in.buffered()[0..trailers_len];
|
||||
in.toss(trailers_len);
|
||||
return amt_read;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
pub const Decompressor = union(enum) {
|
||||
flate: std.compress.flate.Decompress,
|
||||
zstd: std.compress.zstd.Decompress,
|
||||
none: *std.Io.Reader,
|
||||
|
||||
pub fn init(
|
||||
decompressor: *Decompressor,
|
||||
transfer_reader: *std.Io.Reader,
|
||||
buffer: []u8,
|
||||
content_encoding: ContentEncoding,
|
||||
) *std.Io.Reader {
|
||||
switch (content_encoding) {
|
||||
.identity => {
|
||||
decompressor.* = .{ .none = transfer_reader };
|
||||
return transfer_reader;
|
||||
},
|
||||
.deflate => {
|
||||
decompressor.* = .{ .flate = .init(transfer_reader, .zlib, buffer) };
|
||||
return &decompressor.flate.reader;
|
||||
},
|
||||
.gzip => {
|
||||
decompressor.* = .{ .flate = .init(transfer_reader, .gzip, buffer) };
|
||||
return &decompressor.flate.reader;
|
||||
},
|
||||
.zstd => {
|
||||
decompressor.* = .{ .zstd = .init(transfer_reader, buffer, .{ .verify_checksum = false }) };
|
||||
return &decompressor.zstd.reader;
|
||||
},
|
||||
.compress => unreachable,
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
/// Request or response body.
|
||||
pub const BodyWriter = struct {
|
||||
/// Until the lifetime of `BodyWriter` ends, it is illegal to modify the
|
||||
/// state of this other than via methods of `BodyWriter`.
|
||||
http_protocol_output: *Writer,
|
||||
state: State,
|
||||
writer: Writer,
|
||||
|
||||
pub const Error = Writer.Error;
|
||||
|
||||
/// How many zeroes to reserve for hex-encoded chunk length.
|
||||
const chunk_len_digits = 8;
|
||||
const max_chunk_len: usize = std.math.pow(u64, 16, chunk_len_digits) - 1;
|
||||
const chunk_header_template = ("0" ** chunk_len_digits) ++ "\r\n";
|
||||
|
||||
comptime {
|
||||
assert(max_chunk_len == std.math.maxInt(u32));
|
||||
}
|
||||
|
||||
pub const State = union(enum) {
|
||||
/// End of connection signals the end of the stream.
|
||||
none,
|
||||
/// As a debugging utility, counts down to zero as bytes are written.
|
||||
content_length: u64,
|
||||
/// Each chunk is wrapped in a header and trailer.
|
||||
chunked: Chunked,
|
||||
/// Cleanly finished stream; connection can be reused.
|
||||
end,
|
||||
|
||||
pub const Chunked = union(enum) {
|
||||
/// Index to the start of the hex-encoded chunk length in the chunk
|
||||
/// header within the buffer of `BodyWriter.http_protocol_output`.
|
||||
/// Buffered chunk data starts here plus length of `chunk_header_template`.
|
||||
offset: usize,
|
||||
/// We are in the middle of a chunk and this is how many bytes are
|
||||
/// left until the next header. This includes +2 for "\r"\n", and
|
||||
/// is zero for the beginning of the stream.
|
||||
chunk_len: usize,
|
||||
|
||||
pub const init: Chunked = .{ .chunk_len = 0 };
|
||||
};
|
||||
};
|
||||
|
||||
pub fn isEliding(w: *const BodyWriter) bool {
|
||||
return w.writer.vtable.drain == elidingDrain;
|
||||
}
|
||||
|
||||
/// Sends all buffered data across `BodyWriter.http_protocol_output`.
|
||||
pub fn flush(w: *BodyWriter) Error!void {
|
||||
const out = w.http_protocol_output;
|
||||
switch (w.state) {
|
||||
.end, .none, .content_length => return out.flush(),
|
||||
.chunked => |*chunked| switch (chunked.*) {
|
||||
.offset => |offset| {
|
||||
const chunk_len = out.end - offset - chunk_header_template.len;
|
||||
if (chunk_len > 0) {
|
||||
writeHex(out.buffer[offset..][0..chunk_len_digits], chunk_len);
|
||||
chunked.* = .{ .chunk_len = 2 };
|
||||
} else {
|
||||
out.end = offset;
|
||||
chunked.* = .{ .chunk_len = 0 };
|
||||
}
|
||||
try out.flush();
|
||||
},
|
||||
.chunk_len => return out.flush(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// When using content-length, asserts that the amount of data sent matches
|
||||
/// the value sent in the header, then flushes.
|
||||
///
|
||||
/// When using transfer-encoding: chunked, writes the end-of-stream message
|
||||
/// with empty trailers, then flushes the stream to the system. Asserts any
|
||||
/// started chunk has been completely finished.
|
||||
///
|
||||
/// Respects the value of `isEliding` to omit all data after the headers.
|
||||
///
|
||||
/// See also:
|
||||
/// * `endUnflushed`
|
||||
/// * `endChunked`
|
||||
pub fn end(w: *BodyWriter) Error!void {
|
||||
try endUnflushed(w);
|
||||
try w.http_protocol_output.flush();
|
||||
}
|
||||
|
||||
/// When using content-length, asserts that the amount of data sent matches
|
||||
/// the value sent in the header.
|
||||
///
|
||||
/// Otherwise, transfer-encoding: chunked is being used, and it writes the
|
||||
/// end-of-stream message with empty trailers.
|
||||
///
|
||||
/// Respects the value of `isEliding` to omit all data after the headers.
|
||||
///
|
||||
/// See also:
|
||||
/// * `end`
|
||||
/// * `endChunked`
|
||||
pub fn endUnflushed(w: *BodyWriter) Error!void {
|
||||
switch (w.state) {
|
||||
.end => unreachable,
|
||||
.content_length => |len| {
|
||||
assert(len == 0); // Trips when end() called before all bytes written.
|
||||
w.state = .end;
|
||||
},
|
||||
.none => {},
|
||||
.chunked => return endChunkedUnflushed(w, .{}),
|
||||
}
|
||||
}
|
||||
|
||||
pub const EndChunkedOptions = struct {
|
||||
trailers: []const Header = &.{},
|
||||
};
|
||||
|
||||
/// Writes the end-of-stream message and any optional trailers, flushing
|
||||
/// the underlying stream.
|
||||
///
|
||||
/// Asserts that the BodyWriter is using transfer-encoding: chunked.
|
||||
///
|
||||
/// Respects the value of `isEliding` to omit all data after the headers.
|
||||
///
|
||||
/// See also:
|
||||
/// * `endChunkedUnflushed`
|
||||
/// * `end`
|
||||
pub fn endChunked(w: *BodyWriter, options: EndChunkedOptions) Error!void {
|
||||
try endChunkedUnflushed(w, options);
|
||||
try w.http_protocol_output.flush();
|
||||
}
|
||||
|
||||
/// Writes the end-of-stream message and any optional trailers.
|
||||
///
|
||||
/// Does not flush.
|
||||
///
|
||||
/// Asserts that the BodyWriter is using transfer-encoding: chunked.
|
||||
///
|
||||
/// Respects the value of `isEliding` to omit all data after the headers.
|
||||
///
|
||||
/// See also:
|
||||
/// * `endChunked`
|
||||
/// * `endUnflushed`
|
||||
/// * `end`
|
||||
pub fn endChunkedUnflushed(w: *BodyWriter, options: EndChunkedOptions) Error!void {
|
||||
const chunked = &w.state.chunked;
|
||||
if (w.isEliding()) {
|
||||
w.state = .end;
|
||||
return;
|
||||
}
|
||||
const bw = w.http_protocol_output;
|
||||
switch (chunked.*) {
|
||||
.offset => |offset| {
|
||||
const chunk_len = bw.end - offset - chunk_header_template.len;
|
||||
writeHex(bw.buffer[offset..][0..chunk_len_digits], chunk_len);
|
||||
try bw.writeAll("\r\n");
|
||||
},
|
||||
.chunk_len => |chunk_len| switch (chunk_len) {
|
||||
0 => {},
|
||||
1 => try bw.writeByte('\n'),
|
||||
2 => try bw.writeAll("\r\n"),
|
||||
else => unreachable, // An earlier write call indicated more data would follow.
|
||||
},
|
||||
}
|
||||
try bw.writeAll("0\r\n");
|
||||
for (options.trailers) |trailer| {
|
||||
try bw.writeAll(trailer.name);
|
||||
try bw.writeAll(": ");
|
||||
try bw.writeAll(trailer.value);
|
||||
try bw.writeAll("\r\n");
|
||||
}
|
||||
try bw.writeAll("\r\n");
|
||||
w.state = .end;
|
||||
}
|
||||
|
||||
pub fn contentLengthDrain(w: *Writer, data: []const []const u8, splat: usize) Error!usize {
|
||||
const bw: *BodyWriter = @alignCast(@fieldParentPtr("writer", w));
|
||||
assert(!bw.isEliding());
|
||||
const out = bw.http_protocol_output;
|
||||
const n = try out.writeSplatHeader(w.buffered(), data, splat);
|
||||
bw.state.content_length -= n;
|
||||
return w.consume(n);
|
||||
}
|
||||
|
||||
pub fn noneDrain(w: *Writer, data: []const []const u8, splat: usize) Error!usize {
|
||||
const bw: *BodyWriter = @alignCast(@fieldParentPtr("writer", w));
|
||||
assert(!bw.isEliding());
|
||||
const out = bw.http_protocol_output;
|
||||
const n = try out.writeSplatHeader(w.buffered(), data, splat);
|
||||
return w.consume(n);
|
||||
}
|
||||
|
||||
pub fn elidingDrain(w: *Writer, data: []const []const u8, splat: usize) Error!usize {
|
||||
const bw: *BodyWriter = @alignCast(@fieldParentPtr("writer", w));
|
||||
const slice = data[0 .. data.len - 1];
|
||||
const pattern = data[slice.len];
|
||||
var written: usize = pattern.len * splat;
|
||||
for (slice) |bytes| written += bytes.len;
|
||||
switch (bw.state) {
|
||||
.content_length => |*len| len.* -= written + w.end,
|
||||
else => {},
|
||||
}
|
||||
w.end = 0;
|
||||
return written;
|
||||
}
|
||||
|
||||
pub fn elidingSendFile(w: *Writer, file_reader: *File.Reader, limit: std.Io.Limit) Writer.FileError!usize {
|
||||
const bw: *BodyWriter = @alignCast(@fieldParentPtr("writer", w));
|
||||
if (File.Handle == void) return error.Unimplemented;
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.Unimplemented;
|
||||
switch (bw.state) {
|
||||
.content_length => |*len| len.* -= w.end,
|
||||
else => {},
|
||||
}
|
||||
w.end = 0;
|
||||
if (limit == .nothing) return 0;
|
||||
if (file_reader.getSize()) |size| {
|
||||
const n = limit.minInt64(size - file_reader.pos);
|
||||
if (n == 0) return error.EndOfStream;
|
||||
file_reader.seekBy(@intCast(n)) catch return error.Unimplemented;
|
||||
switch (bw.state) {
|
||||
.content_length => |*len| len.* -= n,
|
||||
else => {},
|
||||
}
|
||||
return n;
|
||||
} else |_| {
|
||||
// Error is observable on `file_reader` instance, and it is better to
|
||||
// treat the file as a pipe.
|
||||
return error.Unimplemented;
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns `null` if size cannot be computed without making any syscalls.
|
||||
pub fn noneSendFile(w: *Writer, file_reader: *File.Reader, limit: std.Io.Limit) Writer.FileError!usize {
|
||||
const bw: *BodyWriter = @alignCast(@fieldParentPtr("writer", w));
|
||||
assert(!bw.isEliding());
|
||||
const out = bw.http_protocol_output;
|
||||
const n = try out.sendFileHeader(w.buffered(), file_reader, limit);
|
||||
return w.consume(n);
|
||||
}
|
||||
|
||||
pub fn contentLengthSendFile(w: *Writer, file_reader: *File.Reader, limit: std.Io.Limit) Writer.FileError!usize {
|
||||
const bw: *BodyWriter = @alignCast(@fieldParentPtr("writer", w));
|
||||
assert(!bw.isEliding());
|
||||
const out = bw.http_protocol_output;
|
||||
const n = try out.sendFileHeader(w.buffered(), file_reader, limit);
|
||||
bw.state.content_length -= n;
|
||||
return w.consume(n);
|
||||
}
|
||||
|
||||
pub fn chunkedSendFile(w: *Writer, file_reader: *File.Reader, limit: std.Io.Limit) Writer.FileError!usize {
|
||||
const bw: *BodyWriter = @alignCast(@fieldParentPtr("writer", w));
|
||||
assert(!bw.isEliding());
|
||||
const data_len = Writer.countSendFileLowerBound(w.end, file_reader, limit) orelse {
|
||||
// If the file size is unknown, we cannot lower to a `sendFile` since we would
|
||||
// have to flush the chunk header before knowing the chunk length.
|
||||
return error.Unimplemented;
|
||||
};
|
||||
const out = bw.http_protocol_output;
|
||||
const chunked = &bw.state.chunked;
|
||||
state: switch (chunked.*) {
|
||||
.offset => |off| {
|
||||
// TODO: is it better perf to read small files into the buffer?
|
||||
const buffered_len = out.end - off - chunk_header_template.len;
|
||||
const chunk_len = data_len + buffered_len;
|
||||
writeHex(out.buffer[off..][0..chunk_len_digits], chunk_len);
|
||||
const n = try out.sendFileHeader(w.buffered(), file_reader, limit);
|
||||
chunked.* = .{ .chunk_len = data_len + 2 - n };
|
||||
return w.consume(n);
|
||||
},
|
||||
.chunk_len => |chunk_len| l: switch (chunk_len) {
|
||||
0 => {
|
||||
const off = out.end;
|
||||
const header_buf = try out.writableArray(chunk_header_template.len);
|
||||
@memcpy(header_buf, chunk_header_template);
|
||||
chunked.* = .{ .offset = off };
|
||||
continue :state .{ .offset = off };
|
||||
},
|
||||
1 => {
|
||||
try out.writeByte('\n');
|
||||
chunked.chunk_len = 0;
|
||||
continue :l 0;
|
||||
},
|
||||
2 => {
|
||||
try out.writeByte('\r');
|
||||
chunked.chunk_len = 1;
|
||||
continue :l 1;
|
||||
},
|
||||
else => {
|
||||
const new_limit = limit.min(.limited(chunk_len - 2));
|
||||
const n = try out.sendFileHeader(w.buffered(), file_reader, new_limit);
|
||||
chunked.chunk_len = chunk_len - n;
|
||||
return w.consume(n);
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub fn chunkedDrain(w: *Writer, data: []const []const u8, splat: usize) Error!usize {
|
||||
const bw: *BodyWriter = @alignCast(@fieldParentPtr("writer", w));
|
||||
assert(!bw.isEliding());
|
||||
const out = bw.http_protocol_output;
|
||||
const data_len = w.end + Writer.countSplat(data, splat);
|
||||
const chunked = &bw.state.chunked;
|
||||
state: switch (chunked.*) {
|
||||
.offset => |offset| {
|
||||
if (out.unusedCapacityLen() >= data_len) {
|
||||
return w.consume(out.writeSplatHeader(w.buffered(), data, splat) catch unreachable);
|
||||
}
|
||||
const buffered_len = out.end - offset - chunk_header_template.len;
|
||||
const chunk_len = data_len + buffered_len;
|
||||
writeHex(out.buffer[offset..][0..chunk_len_digits], chunk_len);
|
||||
const n = try out.writeSplatHeader(w.buffered(), data, splat);
|
||||
chunked.* = .{ .chunk_len = data_len + 2 - n };
|
||||
return w.consume(n);
|
||||
},
|
||||
.chunk_len => |chunk_len| l: switch (chunk_len) {
|
||||
0 => {
|
||||
const offset = out.end;
|
||||
const header_buf = try out.writableArray(chunk_header_template.len);
|
||||
@memcpy(header_buf, chunk_header_template);
|
||||
chunked.* = .{ .offset = offset };
|
||||
continue :state .{ .offset = offset };
|
||||
},
|
||||
1 => {
|
||||
try out.writeByte('\n');
|
||||
chunked.chunk_len = 0;
|
||||
continue :l 0;
|
||||
},
|
||||
2 => {
|
||||
try out.writeByte('\r');
|
||||
chunked.chunk_len = 1;
|
||||
continue :l 1;
|
||||
},
|
||||
else => {
|
||||
const n = try out.writeSplatHeaderLimit(w.buffered(), data, splat, .limited(chunk_len - 2));
|
||||
chunked.chunk_len = chunk_len - n;
|
||||
return w.consume(n);
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Writes an integer as base 16 to `buf`, right-aligned, assuming the
|
||||
/// buffer has already been filled with zeroes.
|
||||
fn writeHex(buf: []u8, x: usize) void {
|
||||
assert(std.mem.allEqual(u8, buf, '0'));
|
||||
const base = 16;
|
||||
var index: usize = buf.len;
|
||||
var a = x;
|
||||
while (a > 0) {
|
||||
const digit = a % base;
|
||||
index -= 1;
|
||||
buf[index] = std.fmt.digitToChar(@intCast(digit), .lower);
|
||||
a /= base;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
test {
|
||||
_ = Server;
|
||||
_ = Status;
|
||||
_ = Method;
|
||||
_ = ChunkParser;
|
||||
_ = HeadParser;
|
||||
|
||||
if (builtin.os.tag != .wasi) {
|
||||
_ = Client;
|
||||
_ = Method;
|
||||
_ = Server;
|
||||
_ = Status;
|
||||
_ = HeadParser;
|
||||
_ = ChunkParser;
|
||||
_ = WebSocket;
|
||||
_ = @import("http/test.zig");
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,5 +1,8 @@
|
|||
//! Parser for transfer-encoding: chunked.
|
||||
|
||||
const ChunkParser = @This();
|
||||
const std = @import("std");
|
||||
|
||||
state: State,
|
||||
chunk_len: u64,
|
||||
|
||||
|
|
@ -97,9 +100,6 @@ pub fn feed(p: *ChunkParser, bytes: []const u8) usize {
|
|||
return bytes.len;
|
||||
}
|
||||
|
||||
const ChunkParser = @This();
|
||||
const std = @import("std");
|
||||
|
||||
test feed {
|
||||
const testing = std.testing;
|
||||
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
|
|
@ -1,246 +0,0 @@
|
|||
//! See https://tools.ietf.org/html/rfc6455
|
||||
|
||||
const builtin = @import("builtin");
|
||||
const std = @import("std");
|
||||
const WebSocket = @This();
|
||||
const assert = std.debug.assert;
|
||||
const native_endian = builtin.cpu.arch.endian();
|
||||
|
||||
key: []const u8,
|
||||
request: *std.http.Server.Request,
|
||||
recv_fifo: std.fifo.LinearFifo(u8, .Slice),
|
||||
reader: std.io.AnyReader,
|
||||
response: std.http.Server.Response,
|
||||
/// Number of bytes that have been peeked but not discarded yet.
|
||||
outstanding_len: usize,
|
||||
|
||||
pub const InitError = error{WebSocketUpgradeMissingKey} ||
|
||||
std.http.Server.Request.ReaderError;
|
||||
|
||||
pub fn init(
|
||||
request: *std.http.Server.Request,
|
||||
send_buffer: []u8,
|
||||
recv_buffer: []align(4) u8,
|
||||
) InitError!?WebSocket {
|
||||
switch (request.head.version) {
|
||||
.@"HTTP/1.0" => return null,
|
||||
.@"HTTP/1.1" => if (request.head.method != .GET) return null,
|
||||
}
|
||||
|
||||
var sec_websocket_key: ?[]const u8 = null;
|
||||
var upgrade_websocket: bool = false;
|
||||
var it = request.iterateHeaders();
|
||||
while (it.next()) |header| {
|
||||
if (std.ascii.eqlIgnoreCase(header.name, "sec-websocket-key")) {
|
||||
sec_websocket_key = header.value;
|
||||
} else if (std.ascii.eqlIgnoreCase(header.name, "upgrade")) {
|
||||
if (!std.ascii.eqlIgnoreCase(header.value, "websocket"))
|
||||
return null;
|
||||
upgrade_websocket = true;
|
||||
}
|
||||
}
|
||||
if (!upgrade_websocket)
|
||||
return null;
|
||||
|
||||
const key = sec_websocket_key orelse return error.WebSocketUpgradeMissingKey;
|
||||
|
||||
var sha1 = std.crypto.hash.Sha1.init(.{});
|
||||
sha1.update(key);
|
||||
sha1.update("258EAFA5-E914-47DA-95CA-C5AB0DC85B11");
|
||||
var digest: [std.crypto.hash.Sha1.digest_length]u8 = undefined;
|
||||
sha1.final(&digest);
|
||||
var base64_digest: [28]u8 = undefined;
|
||||
assert(std.base64.standard.Encoder.encode(&base64_digest, &digest).len == base64_digest.len);
|
||||
|
||||
request.head.content_length = std.math.maxInt(u64);
|
||||
|
||||
return .{
|
||||
.key = key,
|
||||
.recv_fifo = std.fifo.LinearFifo(u8, .Slice).init(recv_buffer),
|
||||
.reader = try request.reader(),
|
||||
.response = request.respondStreaming(.{
|
||||
.send_buffer = send_buffer,
|
||||
.respond_options = .{
|
||||
.status = .switching_protocols,
|
||||
.extra_headers = &.{
|
||||
.{ .name = "upgrade", .value = "websocket" },
|
||||
.{ .name = "connection", .value = "upgrade" },
|
||||
.{ .name = "sec-websocket-accept", .value = &base64_digest },
|
||||
},
|
||||
.transfer_encoding = .none,
|
||||
},
|
||||
}),
|
||||
.request = request,
|
||||
.outstanding_len = 0,
|
||||
};
|
||||
}
|
||||
|
||||
pub const Header0 = packed struct(u8) {
|
||||
opcode: Opcode,
|
||||
rsv3: u1 = 0,
|
||||
rsv2: u1 = 0,
|
||||
rsv1: u1 = 0,
|
||||
fin: bool,
|
||||
};
|
||||
|
||||
pub const Header1 = packed struct(u8) {
|
||||
payload_len: enum(u7) {
|
||||
len16 = 126,
|
||||
len64 = 127,
|
||||
_,
|
||||
},
|
||||
mask: bool,
|
||||
};
|
||||
|
||||
pub const Opcode = enum(u4) {
|
||||
continuation = 0,
|
||||
text = 1,
|
||||
binary = 2,
|
||||
connection_close = 8,
|
||||
ping = 9,
|
||||
/// "A Pong frame MAY be sent unsolicited. This serves as a unidirectional
|
||||
/// heartbeat. A response to an unsolicited Pong frame is not expected."
|
||||
pong = 10,
|
||||
_,
|
||||
};
|
||||
|
||||
pub const ReadSmallTextMessageError = error{
|
||||
ConnectionClose,
|
||||
UnexpectedOpCode,
|
||||
MessageTooBig,
|
||||
MissingMaskBit,
|
||||
} || RecvError;
|
||||
|
||||
pub const SmallMessage = struct {
|
||||
/// Can be text, binary, or ping.
|
||||
opcode: Opcode,
|
||||
data: []u8,
|
||||
};
|
||||
|
||||
/// Reads the next message from the WebSocket stream, failing if the message does not fit
|
||||
/// into `recv_buffer`.
|
||||
pub fn readSmallMessage(ws: *WebSocket) ReadSmallTextMessageError!SmallMessage {
|
||||
while (true) {
|
||||
const header_bytes = (try recv(ws, 2))[0..2];
|
||||
const h0: Header0 = @bitCast(header_bytes[0]);
|
||||
const h1: Header1 = @bitCast(header_bytes[1]);
|
||||
|
||||
switch (h0.opcode) {
|
||||
.text, .binary, .pong, .ping => {},
|
||||
.connection_close => return error.ConnectionClose,
|
||||
.continuation => return error.UnexpectedOpCode,
|
||||
_ => return error.UnexpectedOpCode,
|
||||
}
|
||||
|
||||
if (!h0.fin) return error.MessageTooBig;
|
||||
if (!h1.mask) return error.MissingMaskBit;
|
||||
|
||||
const len: usize = switch (h1.payload_len) {
|
||||
.len16 => try recvReadInt(ws, u16),
|
||||
.len64 => std.math.cast(usize, try recvReadInt(ws, u64)) orelse return error.MessageTooBig,
|
||||
else => @intFromEnum(h1.payload_len),
|
||||
};
|
||||
if (len > ws.recv_fifo.buf.len) return error.MessageTooBig;
|
||||
|
||||
const mask: u32 = @bitCast((try recv(ws, 4))[0..4].*);
|
||||
const payload = try recv(ws, len);
|
||||
|
||||
// Skip pongs.
|
||||
if (h0.opcode == .pong) continue;
|
||||
|
||||
// The last item may contain a partial word of unused data.
|
||||
const floored_len = (payload.len / 4) * 4;
|
||||
const u32_payload: []align(1) u32 = @alignCast(std.mem.bytesAsSlice(u32, payload[0..floored_len]));
|
||||
for (u32_payload) |*elem| elem.* ^= mask;
|
||||
const mask_bytes = std.mem.asBytes(&mask)[0 .. payload.len - floored_len];
|
||||
for (payload[floored_len..], mask_bytes) |*leftover, m| leftover.* ^= m;
|
||||
|
||||
return .{
|
||||
.opcode = h0.opcode,
|
||||
.data = payload,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
const RecvError = std.http.Server.Request.ReadError || error{EndOfStream};
|
||||
|
||||
fn recv(ws: *WebSocket, len: usize) RecvError![]u8 {
|
||||
ws.recv_fifo.discard(ws.outstanding_len);
|
||||
assert(len <= ws.recv_fifo.buf.len);
|
||||
if (len > ws.recv_fifo.count) {
|
||||
const small_buf = ws.recv_fifo.writableSlice(0);
|
||||
const needed = len - ws.recv_fifo.count;
|
||||
const buf = if (small_buf.len >= needed) small_buf else b: {
|
||||
ws.recv_fifo.realign();
|
||||
break :b ws.recv_fifo.writableSlice(0);
|
||||
};
|
||||
const n = try @as(RecvError!usize, @errorCast(ws.reader.readAtLeast(buf, needed)));
|
||||
if (n < needed) return error.EndOfStream;
|
||||
ws.recv_fifo.update(n);
|
||||
}
|
||||
ws.outstanding_len = len;
|
||||
// TODO: improve the std lib API so this cast isn't necessary.
|
||||
return @constCast(ws.recv_fifo.readableSliceOfLen(len));
|
||||
}
|
||||
|
||||
fn recvReadInt(ws: *WebSocket, comptime I: type) !I {
|
||||
const unswapped: I = @bitCast((try recv(ws, @sizeOf(I)))[0..@sizeOf(I)].*);
|
||||
return switch (native_endian) {
|
||||
.little => @byteSwap(unswapped),
|
||||
.big => unswapped,
|
||||
};
|
||||
}
|
||||
|
||||
pub const WriteError = std.http.Server.Response.WriteError;
|
||||
|
||||
pub fn writeMessage(ws: *WebSocket, message: []const u8, opcode: Opcode) WriteError!void {
|
||||
const iovecs: [1]std.posix.iovec_const = .{
|
||||
.{ .base = message.ptr, .len = message.len },
|
||||
};
|
||||
return writeMessagev(ws, &iovecs, opcode);
|
||||
}
|
||||
|
||||
pub fn writeMessagev(ws: *WebSocket, message: []const std.posix.iovec_const, opcode: Opcode) WriteError!void {
|
||||
const total_len = l: {
|
||||
var total_len: u64 = 0;
|
||||
for (message) |iovec| total_len += iovec.len;
|
||||
break :l total_len;
|
||||
};
|
||||
|
||||
var header_buf: [2 + 8]u8 = undefined;
|
||||
header_buf[0] = @bitCast(@as(Header0, .{
|
||||
.opcode = opcode,
|
||||
.fin = true,
|
||||
}));
|
||||
const header = switch (total_len) {
|
||||
0...125 => blk: {
|
||||
header_buf[1] = @bitCast(@as(Header1, .{
|
||||
.payload_len = @enumFromInt(total_len),
|
||||
.mask = false,
|
||||
}));
|
||||
break :blk header_buf[0..2];
|
||||
},
|
||||
126...0xffff => blk: {
|
||||
header_buf[1] = @bitCast(@as(Header1, .{
|
||||
.payload_len = .len16,
|
||||
.mask = false,
|
||||
}));
|
||||
std.mem.writeInt(u16, header_buf[2..4], @intCast(total_len), .big);
|
||||
break :blk header_buf[0..4];
|
||||
},
|
||||
else => blk: {
|
||||
header_buf[1] = @bitCast(@as(Header1, .{
|
||||
.payload_len = .len64,
|
||||
.mask = false,
|
||||
}));
|
||||
std.mem.writeInt(u64, header_buf[2..10], total_len, .big);
|
||||
break :blk header_buf[0..10];
|
||||
},
|
||||
};
|
||||
|
||||
const response = &ws.response;
|
||||
try response.writeAll(header);
|
||||
for (message) |iovec|
|
||||
try response.writeAll(iovec.base[0..iovec.len]);
|
||||
try response.flush();
|
||||
}
|
||||
|
|
@ -1,464 +0,0 @@
|
|||
const std = @import("../std.zig");
|
||||
const builtin = @import("builtin");
|
||||
const testing = std.testing;
|
||||
const mem = std.mem;
|
||||
|
||||
const assert = std.debug.assert;
|
||||
|
||||
pub const State = enum {
|
||||
invalid,
|
||||
|
||||
// Begin header and trailer parsing states.
|
||||
|
||||
start,
|
||||
seen_n,
|
||||
seen_r,
|
||||
seen_rn,
|
||||
seen_rnr,
|
||||
finished,
|
||||
|
||||
// Begin transfer-encoding: chunked parsing states.
|
||||
|
||||
chunk_head_size,
|
||||
chunk_head_ext,
|
||||
chunk_head_r,
|
||||
chunk_data,
|
||||
chunk_data_suffix,
|
||||
chunk_data_suffix_r,
|
||||
|
||||
/// Returns true if the parser is in a content state (ie. not waiting for more headers).
|
||||
pub fn isContent(self: State) bool {
|
||||
return switch (self) {
|
||||
.invalid, .start, .seen_n, .seen_r, .seen_rn, .seen_rnr => false,
|
||||
.finished, .chunk_head_size, .chunk_head_ext, .chunk_head_r, .chunk_data, .chunk_data_suffix, .chunk_data_suffix_r => true,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
pub const HeadersParser = struct {
|
||||
state: State = .start,
|
||||
/// A fixed buffer of len `max_header_bytes`.
|
||||
/// Pointers into this buffer are not stable until after a message is complete.
|
||||
header_bytes_buffer: []u8,
|
||||
header_bytes_len: u32,
|
||||
next_chunk_length: u64,
|
||||
/// `false`: headers. `true`: trailers.
|
||||
done: bool,
|
||||
|
||||
/// Initializes the parser with a provided buffer `buf`.
|
||||
pub fn init(buf: []u8) HeadersParser {
|
||||
return .{
|
||||
.header_bytes_buffer = buf,
|
||||
.header_bytes_len = 0,
|
||||
.done = false,
|
||||
.next_chunk_length = 0,
|
||||
};
|
||||
}
|
||||
|
||||
/// Reinitialize the parser.
|
||||
/// Asserts the parser is in the "done" state.
|
||||
pub fn reset(hp: *HeadersParser) void {
|
||||
assert(hp.done);
|
||||
hp.* = .{
|
||||
.state = .start,
|
||||
.header_bytes_buffer = hp.header_bytes_buffer,
|
||||
.header_bytes_len = 0,
|
||||
.done = false,
|
||||
.next_chunk_length = 0,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn get(hp: HeadersParser) []u8 {
|
||||
return hp.header_bytes_buffer[0..hp.header_bytes_len];
|
||||
}
|
||||
|
||||
pub fn findHeadersEnd(r: *HeadersParser, bytes: []const u8) u32 {
|
||||
var hp: std.http.HeadParser = .{
|
||||
.state = switch (r.state) {
|
||||
.start => .start,
|
||||
.seen_n => .seen_n,
|
||||
.seen_r => .seen_r,
|
||||
.seen_rn => .seen_rn,
|
||||
.seen_rnr => .seen_rnr,
|
||||
.finished => .finished,
|
||||
else => unreachable,
|
||||
},
|
||||
};
|
||||
const result = hp.feed(bytes);
|
||||
r.state = switch (hp.state) {
|
||||
.start => .start,
|
||||
.seen_n => .seen_n,
|
||||
.seen_r => .seen_r,
|
||||
.seen_rn => .seen_rn,
|
||||
.seen_rnr => .seen_rnr,
|
||||
.finished => .finished,
|
||||
};
|
||||
return @intCast(result);
|
||||
}
|
||||
|
||||
pub fn findChunkedLen(r: *HeadersParser, bytes: []const u8) u32 {
|
||||
var cp: std.http.ChunkParser = .{
|
||||
.state = switch (r.state) {
|
||||
.chunk_head_size => .head_size,
|
||||
.chunk_head_ext => .head_ext,
|
||||
.chunk_head_r => .head_r,
|
||||
.chunk_data => .data,
|
||||
.chunk_data_suffix => .data_suffix,
|
||||
.chunk_data_suffix_r => .data_suffix_r,
|
||||
.invalid => .invalid,
|
||||
else => unreachable,
|
||||
},
|
||||
.chunk_len = r.next_chunk_length,
|
||||
};
|
||||
const result = cp.feed(bytes);
|
||||
r.state = switch (cp.state) {
|
||||
.head_size => .chunk_head_size,
|
||||
.head_ext => .chunk_head_ext,
|
||||
.head_r => .chunk_head_r,
|
||||
.data => .chunk_data,
|
||||
.data_suffix => .chunk_data_suffix,
|
||||
.data_suffix_r => .chunk_data_suffix_r,
|
||||
.invalid => .invalid,
|
||||
};
|
||||
r.next_chunk_length = cp.chunk_len;
|
||||
return @intCast(result);
|
||||
}
|
||||
|
||||
/// Returns whether or not the parser has finished parsing a complete
|
||||
/// message. A message is only complete after the entire body has been read
|
||||
/// and any trailing headers have been parsed.
|
||||
pub fn isComplete(r: *HeadersParser) bool {
|
||||
return r.done and r.state == .finished;
|
||||
}
|
||||
|
||||
pub const CheckCompleteHeadError = error{HttpHeadersOversize};
|
||||
|
||||
/// Pushes `in` into the parser. Returns the number of bytes consumed by
|
||||
/// the header. Any header bytes are appended to `header_bytes_buffer`.
|
||||
pub fn checkCompleteHead(hp: *HeadersParser, in: []const u8) CheckCompleteHeadError!u32 {
|
||||
if (hp.state.isContent()) return 0;
|
||||
|
||||
const i = hp.findHeadersEnd(in);
|
||||
const data = in[0..i];
|
||||
if (hp.header_bytes_len + data.len > hp.header_bytes_buffer.len)
|
||||
return error.HttpHeadersOversize;
|
||||
|
||||
@memcpy(hp.header_bytes_buffer[hp.header_bytes_len..][0..data.len], data);
|
||||
hp.header_bytes_len += @intCast(data.len);
|
||||
|
||||
return i;
|
||||
}
|
||||
|
||||
pub const ReadError = error{
|
||||
HttpChunkInvalid,
|
||||
};
|
||||
|
||||
/// Reads the body of the message into `buffer`. Returns the number of
|
||||
/// bytes placed in the buffer.
|
||||
///
|
||||
/// If `skip` is true, the buffer will be unused and the body will be skipped.
|
||||
///
|
||||
/// See `std.http.Client.Connection for an example of `conn`.
|
||||
pub fn read(r: *HeadersParser, conn: anytype, buffer: []u8, skip: bool) !usize {
|
||||
assert(r.state.isContent());
|
||||
if (r.done) return 0;
|
||||
|
||||
var out_index: usize = 0;
|
||||
while (true) {
|
||||
switch (r.state) {
|
||||
.invalid, .start, .seen_n, .seen_r, .seen_rn, .seen_rnr => unreachable,
|
||||
.finished => {
|
||||
const data_avail = r.next_chunk_length;
|
||||
|
||||
if (skip) {
|
||||
conn.fill() catch |err| switch (err) {
|
||||
error.EndOfStream => {
|
||||
r.done = true;
|
||||
return 0;
|
||||
},
|
||||
else => |e| return e,
|
||||
};
|
||||
|
||||
const nread = @min(conn.peek().len, data_avail);
|
||||
conn.drop(@intCast(nread));
|
||||
r.next_chunk_length -= nread;
|
||||
|
||||
if (r.next_chunk_length == 0 or nread == 0) r.done = true;
|
||||
|
||||
return out_index;
|
||||
} else if (out_index < buffer.len) {
|
||||
const out_avail = buffer.len - out_index;
|
||||
|
||||
const can_read = @as(usize, @intCast(@min(data_avail, out_avail)));
|
||||
const nread = try conn.read(buffer[0..can_read]);
|
||||
r.next_chunk_length -= nread;
|
||||
|
||||
if (r.next_chunk_length == 0 or nread == 0) r.done = true;
|
||||
|
||||
return nread;
|
||||
} else {
|
||||
return out_index;
|
||||
}
|
||||
},
|
||||
.chunk_data_suffix, .chunk_data_suffix_r, .chunk_head_size, .chunk_head_ext, .chunk_head_r => {
|
||||
conn.fill() catch |err| switch (err) {
|
||||
error.EndOfStream => {
|
||||
r.done = true;
|
||||
return 0;
|
||||
},
|
||||
else => |e| return e,
|
||||
};
|
||||
|
||||
const i = r.findChunkedLen(conn.peek());
|
||||
conn.drop(@intCast(i));
|
||||
|
||||
switch (r.state) {
|
||||
.invalid => return error.HttpChunkInvalid,
|
||||
.chunk_data => if (r.next_chunk_length == 0) {
|
||||
if (std.mem.eql(u8, conn.peek(), "\r\n")) {
|
||||
r.state = .finished;
|
||||
conn.drop(2);
|
||||
} else {
|
||||
// The trailer section is formatted identically
|
||||
// to the header section.
|
||||
r.state = .seen_rn;
|
||||
}
|
||||
r.done = true;
|
||||
|
||||
return out_index;
|
||||
},
|
||||
else => return out_index,
|
||||
}
|
||||
|
||||
continue;
|
||||
},
|
||||
.chunk_data => {
|
||||
const data_avail = r.next_chunk_length;
|
||||
const out_avail = buffer.len - out_index;
|
||||
|
||||
if (skip) {
|
||||
conn.fill() catch |err| switch (err) {
|
||||
error.EndOfStream => {
|
||||
r.done = true;
|
||||
return 0;
|
||||
},
|
||||
else => |e| return e,
|
||||
};
|
||||
|
||||
const nread = @min(conn.peek().len, data_avail);
|
||||
conn.drop(@intCast(nread));
|
||||
r.next_chunk_length -= nread;
|
||||
} else if (out_avail > 0) {
|
||||
const can_read: usize = @intCast(@min(data_avail, out_avail));
|
||||
const nread = try conn.read(buffer[out_index..][0..can_read]);
|
||||
r.next_chunk_length -= nread;
|
||||
out_index += nread;
|
||||
}
|
||||
|
||||
if (r.next_chunk_length == 0) {
|
||||
r.state = .chunk_data_suffix;
|
||||
continue;
|
||||
}
|
||||
|
||||
return out_index;
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
inline fn int16(array: *const [2]u8) u16 {
|
||||
return @as(u16, @bitCast(array.*));
|
||||
}
|
||||
|
||||
inline fn int24(array: *const [3]u8) u24 {
|
||||
return @as(u24, @bitCast(array.*));
|
||||
}
|
||||
|
||||
inline fn int32(array: *const [4]u8) u32 {
|
||||
return @as(u32, @bitCast(array.*));
|
||||
}
|
||||
|
||||
inline fn intShift(comptime T: type, x: anytype) T {
|
||||
switch (@import("builtin").cpu.arch.endian()) {
|
||||
.little => return @as(T, @truncate(x >> (@bitSizeOf(@TypeOf(x)) - @bitSizeOf(T)))),
|
||||
.big => return @as(T, @truncate(x)),
|
||||
}
|
||||
}
|
||||
|
||||
/// A buffered (and peekable) Connection.
|
||||
const MockBufferedConnection = struct {
|
||||
pub const buffer_size = 0x2000;
|
||||
|
||||
conn: std.io.FixedBufferStream([]const u8),
|
||||
buf: [buffer_size]u8 = undefined,
|
||||
start: u16 = 0,
|
||||
end: u16 = 0,
|
||||
|
||||
pub fn fill(conn: *MockBufferedConnection) ReadError!void {
|
||||
if (conn.end != conn.start) return;
|
||||
|
||||
const nread = try conn.conn.read(conn.buf[0..]);
|
||||
if (nread == 0) return error.EndOfStream;
|
||||
conn.start = 0;
|
||||
conn.end = @as(u16, @truncate(nread));
|
||||
}
|
||||
|
||||
pub fn peek(conn: *MockBufferedConnection) []const u8 {
|
||||
return conn.buf[conn.start..conn.end];
|
||||
}
|
||||
|
||||
pub fn drop(conn: *MockBufferedConnection, num: u16) void {
|
||||
conn.start += num;
|
||||
}
|
||||
|
||||
pub fn readAtLeast(conn: *MockBufferedConnection, buffer: []u8, len: usize) ReadError!usize {
|
||||
var out_index: u16 = 0;
|
||||
while (out_index < len) {
|
||||
const available = conn.end - conn.start;
|
||||
const left = buffer.len - out_index;
|
||||
|
||||
if (available > 0) {
|
||||
const can_read = @as(u16, @truncate(@min(available, left)));
|
||||
|
||||
@memcpy(buffer[out_index..][0..can_read], conn.buf[conn.start..][0..can_read]);
|
||||
out_index += can_read;
|
||||
conn.start += can_read;
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
if (left > conn.buf.len) {
|
||||
// skip the buffer if the output is large enough
|
||||
return conn.conn.read(buffer[out_index..]);
|
||||
}
|
||||
|
||||
try conn.fill();
|
||||
}
|
||||
|
||||
return out_index;
|
||||
}
|
||||
|
||||
pub fn read(conn: *MockBufferedConnection, buffer: []u8) ReadError!usize {
|
||||
return conn.readAtLeast(buffer, 1);
|
||||
}
|
||||
|
||||
pub const ReadError = std.io.FixedBufferStream([]const u8).ReadError || error{EndOfStream};
|
||||
pub const Reader = std.io.GenericReader(*MockBufferedConnection, ReadError, read);
|
||||
|
||||
pub fn reader(conn: *MockBufferedConnection) Reader {
|
||||
return Reader{ .context = conn };
|
||||
}
|
||||
|
||||
pub fn writeAll(conn: *MockBufferedConnection, buffer: []const u8) WriteError!void {
|
||||
return conn.conn.writeAll(buffer);
|
||||
}
|
||||
|
||||
pub fn write(conn: *MockBufferedConnection, buffer: []const u8) WriteError!usize {
|
||||
return conn.conn.write(buffer);
|
||||
}
|
||||
|
||||
pub const WriteError = std.io.FixedBufferStream([]const u8).WriteError;
|
||||
pub const Writer = std.io.GenericWriter(*MockBufferedConnection, WriteError, write);
|
||||
|
||||
pub fn writer(conn: *MockBufferedConnection) Writer {
|
||||
return Writer{ .context = conn };
|
||||
}
|
||||
};
|
||||
|
||||
test "HeadersParser.read length" {
|
||||
// mock BufferedConnection for read
|
||||
var headers_buf: [256]u8 = undefined;
|
||||
|
||||
var r = HeadersParser.init(&headers_buf);
|
||||
const data = "GET / HTTP/1.1\r\nHost: localhost\r\nContent-Length: 5\r\n\r\nHello";
|
||||
|
||||
var conn: MockBufferedConnection = .{
|
||||
.conn = std.io.fixedBufferStream(data),
|
||||
};
|
||||
|
||||
while (true) { // read headers
|
||||
try conn.fill();
|
||||
|
||||
const nchecked = try r.checkCompleteHead(conn.peek());
|
||||
conn.drop(@intCast(nchecked));
|
||||
|
||||
if (r.state.isContent()) break;
|
||||
}
|
||||
|
||||
var buf: [8]u8 = undefined;
|
||||
|
||||
r.next_chunk_length = 5;
|
||||
const len = try r.read(&conn, &buf, false);
|
||||
try std.testing.expectEqual(@as(usize, 5), len);
|
||||
try std.testing.expectEqualStrings("Hello", buf[0..len]);
|
||||
|
||||
try std.testing.expectEqualStrings("GET / HTTP/1.1\r\nHost: localhost\r\nContent-Length: 5\r\n\r\n", r.get());
|
||||
}
|
||||
|
||||
test "HeadersParser.read chunked" {
|
||||
// mock BufferedConnection for read
|
||||
|
||||
var headers_buf: [256]u8 = undefined;
|
||||
var r = HeadersParser.init(&headers_buf);
|
||||
const data = "GET / HTTP/1.1\r\nHost: localhost\r\n\r\n2\r\nHe\r\n2\r\nll\r\n1\r\no\r\n0\r\n\r\n";
|
||||
|
||||
var conn: MockBufferedConnection = .{
|
||||
.conn = std.io.fixedBufferStream(data),
|
||||
};
|
||||
|
||||
while (true) { // read headers
|
||||
try conn.fill();
|
||||
|
||||
const nchecked = try r.checkCompleteHead(conn.peek());
|
||||
conn.drop(@intCast(nchecked));
|
||||
|
||||
if (r.state.isContent()) break;
|
||||
}
|
||||
var buf: [8]u8 = undefined;
|
||||
|
||||
r.state = .chunk_head_size;
|
||||
const len = try r.read(&conn, &buf, false);
|
||||
try std.testing.expectEqual(@as(usize, 5), len);
|
||||
try std.testing.expectEqualStrings("Hello", buf[0..len]);
|
||||
|
||||
try std.testing.expectEqualStrings("GET / HTTP/1.1\r\nHost: localhost\r\n\r\n", r.get());
|
||||
}
|
||||
|
||||
test "HeadersParser.read chunked trailer" {
|
||||
// mock BufferedConnection for read
|
||||
|
||||
var headers_buf: [256]u8 = undefined;
|
||||
var r = HeadersParser.init(&headers_buf);
|
||||
const data = "GET / HTTP/1.1\r\nHost: localhost\r\n\r\n2\r\nHe\r\n2\r\nll\r\n1\r\no\r\n0\r\nContent-Type: text/plain\r\n\r\n";
|
||||
|
||||
var conn: MockBufferedConnection = .{
|
||||
.conn = std.io.fixedBufferStream(data),
|
||||
};
|
||||
|
||||
while (true) { // read headers
|
||||
try conn.fill();
|
||||
|
||||
const nchecked = try r.checkCompleteHead(conn.peek());
|
||||
conn.drop(@intCast(nchecked));
|
||||
|
||||
if (r.state.isContent()) break;
|
||||
}
|
||||
var buf: [8]u8 = undefined;
|
||||
|
||||
r.state = .chunk_head_size;
|
||||
const len = try r.read(&conn, &buf, false);
|
||||
try std.testing.expectEqual(@as(usize, 5), len);
|
||||
try std.testing.expectEqualStrings("Hello", buf[0..len]);
|
||||
|
||||
while (true) { // read headers
|
||||
try conn.fill();
|
||||
|
||||
const nchecked = try r.checkCompleteHead(conn.peek());
|
||||
conn.drop(@intCast(nchecked));
|
||||
|
||||
if (r.state.isContent()) break;
|
||||
}
|
||||
|
||||
try std.testing.expectEqualStrings("GET / HTTP/1.1\r\nHost: localhost\r\n\r\nContent-Type: text/plain\r\n\r\n", r.get());
|
||||
}
|
||||
File diff suppressed because it is too large
Load diff
|
|
@ -1944,7 +1944,7 @@ pub const Stream = struct {
|
|||
pub const Error = ReadError;
|
||||
|
||||
pub fn getStream(r: *const Reader) Stream {
|
||||
return r.stream;
|
||||
return r.net_stream;
|
||||
}
|
||||
|
||||
pub fn getError(r: *const Reader) ?Error {
|
||||
|
|
|
|||
|
|
@ -57,7 +57,6 @@ pub const debug = @import("debug.zig");
|
|||
pub const dwarf = @import("dwarf.zig");
|
||||
pub const elf = @import("elf.zig");
|
||||
pub const enums = @import("enums.zig");
|
||||
pub const fifo = @import("fifo.zig");
|
||||
pub const fmt = @import("fmt.zig");
|
||||
pub const fs = @import("fs.zig");
|
||||
pub const gpu = @import("gpu.zig");
|
||||
|
|
|
|||
|
|
@ -385,21 +385,23 @@ pub fn run(f: *Fetch) RunError!void {
|
|||
var resource: Resource = .{ .dir = dir };
|
||||
return f.runResource(path_or_url, &resource, null);
|
||||
} else |dir_err| {
|
||||
var server_header_buffer: [init_resource_buffer_size]u8 = undefined;
|
||||
|
||||
const file_err = if (dir_err == error.NotDir) e: {
|
||||
if (fs.cwd().openFile(path_or_url, .{})) |file| {
|
||||
var resource: Resource = .{ .file = file };
|
||||
var resource: Resource = .{ .file = file.reader(&server_header_buffer) };
|
||||
return f.runResource(path_or_url, &resource, null);
|
||||
} else |err| break :e err;
|
||||
} else dir_err;
|
||||
|
||||
const uri = std.Uri.parse(path_or_url) catch |uri_err| {
|
||||
return f.fail(0, try eb.printString(
|
||||
"'{s}' could not be recognized as a file path ({s}) or an URL ({s})",
|
||||
.{ path_or_url, @errorName(file_err), @errorName(uri_err) },
|
||||
"'{s}' could not be recognized as a file path ({t}) or an URL ({t})",
|
||||
.{ path_or_url, file_err, uri_err },
|
||||
));
|
||||
};
|
||||
var server_header_buffer: [header_buffer_size]u8 = undefined;
|
||||
var resource = try f.initResource(uri, &server_header_buffer);
|
||||
var resource: Resource = undefined;
|
||||
try f.initResource(uri, &resource, &server_header_buffer);
|
||||
return f.runResource(try uri.path.toRawMaybeAlloc(arena), &resource, null);
|
||||
}
|
||||
},
|
||||
|
|
@ -464,8 +466,9 @@ pub fn run(f: *Fetch) RunError!void {
|
|||
f.location_tok,
|
||||
try eb.printString("invalid URI: {s}", .{@errorName(err)}),
|
||||
);
|
||||
var server_header_buffer: [header_buffer_size]u8 = undefined;
|
||||
var resource = try f.initResource(uri, &server_header_buffer);
|
||||
var buffer: [init_resource_buffer_size]u8 = undefined;
|
||||
var resource: Resource = undefined;
|
||||
try f.initResource(uri, &resource, &buffer);
|
||||
return f.runResource(try uri.path.toRawMaybeAlloc(arena), &resource, remote.hash);
|
||||
}
|
||||
|
||||
|
|
@ -866,8 +869,8 @@ fn fail(f: *Fetch, msg_tok: std.zig.Ast.TokenIndex, msg_str: u32) RunError {
|
|||
}
|
||||
|
||||
const Resource = union(enum) {
|
||||
file: fs.File,
|
||||
http_request: std.http.Client.Request,
|
||||
file: fs.File.Reader,
|
||||
http_request: HttpRequest,
|
||||
git: Git,
|
||||
dir: fs.Dir,
|
||||
|
||||
|
|
@ -877,10 +880,16 @@ const Resource = union(enum) {
|
|||
want_oid: git.Oid,
|
||||
};
|
||||
|
||||
const HttpRequest = struct {
|
||||
request: std.http.Client.Request,
|
||||
response: std.http.Client.Response,
|
||||
buffer: []u8,
|
||||
};
|
||||
|
||||
fn deinit(resource: *Resource) void {
|
||||
switch (resource.*) {
|
||||
.file => |*file| file.close(),
|
||||
.http_request => |*req| req.deinit(),
|
||||
.file => |*file_reader| file_reader.file.close(),
|
||||
.http_request => |*http_request| http_request.request.deinit(),
|
||||
.git => |*git_resource| {
|
||||
git_resource.fetch_stream.deinit();
|
||||
git_resource.session.deinit();
|
||||
|
|
@ -890,21 +899,13 @@ const Resource = union(enum) {
|
|||
resource.* = undefined;
|
||||
}
|
||||
|
||||
fn reader(resource: *Resource) std.io.AnyReader {
|
||||
return .{
|
||||
.context = resource,
|
||||
.readFn = read,
|
||||
};
|
||||
}
|
||||
|
||||
fn read(context: *const anyopaque, buffer: []u8) anyerror!usize {
|
||||
const resource: *Resource = @ptrCast(@alignCast(@constCast(context)));
|
||||
switch (resource.*) {
|
||||
.file => |*f| return f.read(buffer),
|
||||
.http_request => |*r| return r.read(buffer),
|
||||
.git => |*g| return g.fetch_stream.read(buffer),
|
||||
fn reader(resource: *Resource) *std.Io.Reader {
|
||||
return switch (resource.*) {
|
||||
.file => |*file_reader| return &file_reader.interface,
|
||||
.http_request => |*http_request| return http_request.response.reader(http_request.buffer),
|
||||
.git => |*g| return &g.fetch_stream.reader,
|
||||
.dir => unreachable,
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
|
|
@ -967,20 +968,22 @@ const FileType = enum {
|
|||
}
|
||||
};
|
||||
|
||||
const header_buffer_size = 16 * 1024;
|
||||
const init_resource_buffer_size = git.Packet.max_data_length;
|
||||
|
||||
fn initResource(f: *Fetch, uri: std.Uri, server_header_buffer: []u8) RunError!Resource {
|
||||
fn initResource(f: *Fetch, uri: std.Uri, resource: *Resource, reader_buffer: []u8) RunError!void {
|
||||
const gpa = f.arena.child_allocator;
|
||||
const arena = f.arena.allocator();
|
||||
const eb = &f.error_bundle;
|
||||
|
||||
if (ascii.eqlIgnoreCase(uri.scheme, "file")) {
|
||||
const path = try uri.path.toRawMaybeAlloc(arena);
|
||||
return .{ .file = f.parent_package_root.openFile(path, .{}) catch |err| {
|
||||
return f.fail(f.location_tok, try eb.printString("unable to open '{f}{s}': {s}", .{
|
||||
f.parent_package_root, path, @errorName(err),
|
||||
const file = f.parent_package_root.openFile(path, .{}) catch |err| {
|
||||
return f.fail(f.location_tok, try eb.printString("unable to open '{f}{s}': {t}", .{
|
||||
f.parent_package_root, path, err,
|
||||
}));
|
||||
} };
|
||||
};
|
||||
resource.* = .{ .file = file.reader(reader_buffer) };
|
||||
return;
|
||||
}
|
||||
|
||||
const http_client = f.job_queue.http_client;
|
||||
|
|
@ -988,37 +991,35 @@ fn initResource(f: *Fetch, uri: std.Uri, server_header_buffer: []u8) RunError!Re
|
|||
if (ascii.eqlIgnoreCase(uri.scheme, "http") or
|
||||
ascii.eqlIgnoreCase(uri.scheme, "https"))
|
||||
{
|
||||
var req = http_client.open(.GET, uri, .{
|
||||
.server_header_buffer = server_header_buffer,
|
||||
}) catch |err| {
|
||||
return f.fail(f.location_tok, try eb.printString(
|
||||
"unable to connect to server: {s}",
|
||||
.{@errorName(err)},
|
||||
));
|
||||
};
|
||||
errdefer req.deinit(); // releases more than memory
|
||||
resource.* = .{ .http_request = .{
|
||||
.request = http_client.request(.GET, uri, .{}) catch |err|
|
||||
return f.fail(f.location_tok, try eb.printString("unable to connect to server: {t}", .{err})),
|
||||
.response = undefined,
|
||||
.buffer = reader_buffer,
|
||||
} };
|
||||
const request = &resource.http_request.request;
|
||||
errdefer request.deinit();
|
||||
|
||||
req.send() catch |err| {
|
||||
return f.fail(f.location_tok, try eb.printString(
|
||||
"HTTP request failed: {s}",
|
||||
.{@errorName(err)},
|
||||
));
|
||||
};
|
||||
req.wait() catch |err| {
|
||||
return f.fail(f.location_tok, try eb.printString(
|
||||
"invalid HTTP response: {s}",
|
||||
.{@errorName(err)},
|
||||
));
|
||||
request.sendBodiless() catch |err|
|
||||
return f.fail(f.location_tok, try eb.printString("HTTP request failed: {t}", .{err}));
|
||||
|
||||
var redirect_buffer: [1024]u8 = undefined;
|
||||
const response = &resource.http_request.response;
|
||||
response.* = request.receiveHead(&redirect_buffer) catch |err| switch (err) {
|
||||
error.ReadFailed => {
|
||||
return f.fail(f.location_tok, try eb.printString("HTTP response read failure: {t}", .{
|
||||
request.connection.?.getReadError().?,
|
||||
}));
|
||||
},
|
||||
else => |e| return f.fail(f.location_tok, try eb.printString("invalid HTTP response: {t}", .{e})),
|
||||
};
|
||||
|
||||
if (req.response.status != .ok) {
|
||||
return f.fail(f.location_tok, try eb.printString(
|
||||
"bad HTTP response code: '{d} {s}'",
|
||||
.{ @intFromEnum(req.response.status), req.response.status.phrase() orelse "" },
|
||||
));
|
||||
}
|
||||
if (response.head.status != .ok) return f.fail(f.location_tok, try eb.printString(
|
||||
"bad HTTP response code: '{d} {s}'",
|
||||
.{ response.head.status, response.head.status.phrase() orelse "" },
|
||||
));
|
||||
|
||||
return .{ .http_request = req };
|
||||
return;
|
||||
}
|
||||
|
||||
if (ascii.eqlIgnoreCase(uri.scheme, "git+http") or
|
||||
|
|
@ -1026,7 +1027,7 @@ fn initResource(f: *Fetch, uri: std.Uri, server_header_buffer: []u8) RunError!Re
|
|||
{
|
||||
var transport_uri = uri;
|
||||
transport_uri.scheme = uri.scheme["git+".len..];
|
||||
var session = git.Session.init(gpa, http_client, transport_uri, server_header_buffer) catch |err| {
|
||||
var session = git.Session.init(gpa, http_client, transport_uri, reader_buffer) catch |err| {
|
||||
return f.fail(f.location_tok, try eb.printString(
|
||||
"unable to discover remote git server capabilities: {s}",
|
||||
.{@errorName(err)},
|
||||
|
|
@ -1042,16 +1043,12 @@ fn initResource(f: *Fetch, uri: std.Uri, server_header_buffer: []u8) RunError!Re
|
|||
const want_ref_head = try std.fmt.allocPrint(arena, "refs/heads/{s}", .{want_ref});
|
||||
const want_ref_tag = try std.fmt.allocPrint(arena, "refs/tags/{s}", .{want_ref});
|
||||
|
||||
var ref_iterator = session.listRefs(.{
|
||||
var ref_iterator: git.Session.RefIterator = undefined;
|
||||
session.listRefs(&ref_iterator, .{
|
||||
.ref_prefixes = &.{ want_ref, want_ref_head, want_ref_tag },
|
||||
.include_peeled = true,
|
||||
.server_header_buffer = server_header_buffer,
|
||||
}) catch |err| {
|
||||
return f.fail(f.location_tok, try eb.printString(
|
||||
"unable to list refs: {s}",
|
||||
.{@errorName(err)},
|
||||
));
|
||||
};
|
||||
.buffer = reader_buffer,
|
||||
}) catch |err| return f.fail(f.location_tok, try eb.printString("unable to list refs: {t}", .{err}));
|
||||
defer ref_iterator.deinit();
|
||||
while (ref_iterator.next() catch |err| {
|
||||
return f.fail(f.location_tok, try eb.printString(
|
||||
|
|
@ -1089,25 +1086,21 @@ fn initResource(f: *Fetch, uri: std.Uri, server_header_buffer: []u8) RunError!Re
|
|||
|
||||
var want_oid_buf: [git.Oid.max_formatted_length]u8 = undefined;
|
||||
_ = std.fmt.bufPrint(&want_oid_buf, "{f}", .{want_oid}) catch unreachable;
|
||||
var fetch_stream = session.fetch(&.{&want_oid_buf}, server_header_buffer) catch |err| {
|
||||
return f.fail(f.location_tok, try eb.printString(
|
||||
"unable to create fetch stream: {s}",
|
||||
.{@errorName(err)},
|
||||
));
|
||||
var fetch_stream: git.Session.FetchStream = undefined;
|
||||
session.fetch(&fetch_stream, &.{&want_oid_buf}, reader_buffer) catch |err| {
|
||||
return f.fail(f.location_tok, try eb.printString("unable to create fetch stream: {t}", .{err}));
|
||||
};
|
||||
errdefer fetch_stream.deinit();
|
||||
|
||||
return .{ .git = .{
|
||||
resource.* = .{ .git = .{
|
||||
.session = session,
|
||||
.fetch_stream = fetch_stream,
|
||||
.want_oid = want_oid,
|
||||
} };
|
||||
return;
|
||||
}
|
||||
|
||||
return f.fail(f.location_tok, try eb.printString(
|
||||
"unsupported URL scheme: {s}",
|
||||
.{uri.scheme},
|
||||
));
|
||||
return f.fail(f.location_tok, try eb.printString("unsupported URL scheme: {s}", .{uri.scheme}));
|
||||
}
|
||||
|
||||
fn unpackResource(
|
||||
|
|
@ -1121,9 +1114,11 @@ fn unpackResource(
|
|||
.file => FileType.fromPath(uri_path) orelse
|
||||
return f.fail(f.location_tok, try eb.printString("unknown file type: '{s}'", .{uri_path})),
|
||||
|
||||
.http_request => |req| ft: {
|
||||
.http_request => |*http_request| ft: {
|
||||
const head = &http_request.response.head;
|
||||
|
||||
// Content-Type takes first precedence.
|
||||
const content_type = req.response.content_type orelse
|
||||
const content_type = head.content_type orelse
|
||||
return f.fail(f.location_tok, try eb.addString("missing 'Content-Type' header"));
|
||||
|
||||
// Extract the MIME type, ignoring charset and boundary directives
|
||||
|
|
@ -1165,7 +1160,7 @@ fn unpackResource(
|
|||
}
|
||||
|
||||
// Next, the filename from 'content-disposition: attachment' takes precedence.
|
||||
if (req.response.content_disposition) |cd_header| {
|
||||
if (head.content_disposition) |cd_header| {
|
||||
break :ft FileType.fromContentDisposition(cd_header) orelse {
|
||||
return f.fail(f.location_tok, try eb.printString(
|
||||
"unsupported Content-Disposition header value: '{s}' for Content-Type=application/octet-stream",
|
||||
|
|
@ -1176,10 +1171,7 @@ fn unpackResource(
|
|||
|
||||
// Finally, the path from the URI is used.
|
||||
break :ft FileType.fromPath(uri_path) orelse {
|
||||
return f.fail(f.location_tok, try eb.printString(
|
||||
"unknown file type: '{s}'",
|
||||
.{uri_path},
|
||||
));
|
||||
return f.fail(f.location_tok, try eb.printString("unknown file type: '{s}'", .{uri_path}));
|
||||
};
|
||||
},
|
||||
|
||||
|
|
@ -1187,10 +1179,9 @@ fn unpackResource(
|
|||
|
||||
.dir => |dir| {
|
||||
f.recursiveDirectoryCopy(dir, tmp_directory.handle) catch |err| {
|
||||
return f.fail(f.location_tok, try eb.printString(
|
||||
"unable to copy directory '{s}': {s}",
|
||||
.{ uri_path, @errorName(err) },
|
||||
));
|
||||
return f.fail(f.location_tok, try eb.printString("unable to copy directory '{s}': {t}", .{
|
||||
uri_path, err,
|
||||
}));
|
||||
};
|
||||
return .{};
|
||||
},
|
||||
|
|
@ -1198,27 +1189,17 @@ fn unpackResource(
|
|||
|
||||
switch (file_type) {
|
||||
.tar => {
|
||||
var adapter_buffer: [1024]u8 = undefined;
|
||||
var adapter = resource.reader().adaptToNewApi(&adapter_buffer);
|
||||
return unpackTarball(f, tmp_directory.handle, &adapter.new_interface);
|
||||
return unpackTarball(f, tmp_directory.handle, resource.reader());
|
||||
},
|
||||
.@"tar.gz" => {
|
||||
var adapter_buffer: [std.crypto.tls.max_ciphertext_record_len]u8 = undefined;
|
||||
var adapter = resource.reader().adaptToNewApi(&adapter_buffer);
|
||||
var flate_buffer: [std.compress.flate.max_window_len]u8 = undefined;
|
||||
var decompress: std.compress.flate.Decompress = .init(&adapter.new_interface, .gzip, &flate_buffer);
|
||||
var decompress: std.compress.flate.Decompress = .init(resource.reader(), .gzip, &flate_buffer);
|
||||
return try unpackTarball(f, tmp_directory.handle, &decompress.reader);
|
||||
},
|
||||
.@"tar.xz" => {
|
||||
const gpa = f.arena.child_allocator;
|
||||
const reader = resource.reader();
|
||||
var br = std.io.bufferedReaderSize(std.crypto.tls.max_ciphertext_record_len, reader);
|
||||
var dcp = std.compress.xz.decompress(gpa, br.reader()) catch |err| {
|
||||
return f.fail(f.location_tok, try eb.printString(
|
||||
"unable to decompress tarball: {s}",
|
||||
.{@errorName(err)},
|
||||
));
|
||||
};
|
||||
var dcp = std.compress.xz.decompress(gpa, resource.reader().adaptToOldInterface()) catch |err|
|
||||
return f.fail(f.location_tok, try eb.printString("unable to decompress tarball: {t}", .{err}));
|
||||
defer dcp.deinit();
|
||||
var adapter_buffer: [1024]u8 = undefined;
|
||||
var adapter = dcp.reader().adaptToNewApi(&adapter_buffer);
|
||||
|
|
@ -1227,9 +1208,7 @@ fn unpackResource(
|
|||
.@"tar.zst" => {
|
||||
const window_size = std.compress.zstd.default_window_len;
|
||||
const window_buffer = try f.arena.allocator().create([window_size]u8);
|
||||
var adapter_buffer: [std.crypto.tls.max_ciphertext_record_len]u8 = undefined;
|
||||
var adapter = resource.reader().adaptToNewApi(&adapter_buffer);
|
||||
var decompress: std.compress.zstd.Decompress = .init(&adapter.new_interface, window_buffer, .{
|
||||
var decompress: std.compress.zstd.Decompress = .init(resource.reader(), window_buffer, .{
|
||||
.verify_checksum = false,
|
||||
});
|
||||
return try unpackTarball(f, tmp_directory.handle, &decompress.reader);
|
||||
|
|
@ -1237,12 +1216,15 @@ fn unpackResource(
|
|||
.git_pack => return unpackGitPack(f, tmp_directory.handle, &resource.git) catch |err| switch (err) {
|
||||
error.FetchFailed => return error.FetchFailed,
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
else => |e| return f.fail(f.location_tok, try eb.printString(
|
||||
"unable to unpack git files: {s}",
|
||||
.{@errorName(e)},
|
||||
)),
|
||||
else => |e| return f.fail(f.location_tok, try eb.printString("unable to unpack git files: {t}", .{e})),
|
||||
},
|
||||
.zip => return unzip(f, tmp_directory.handle, resource.reader()) catch |err| switch (err) {
|
||||
error.ReadFailed => return f.fail(f.location_tok, try eb.printString(
|
||||
"failed reading resource: {t}",
|
||||
.{err},
|
||||
)),
|
||||
else => |e| return e,
|
||||
},
|
||||
.zip => return try unzip(f, tmp_directory.handle, resource.reader()),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1277,99 +1259,69 @@ fn unpackTarball(f: *Fetch, out_dir: fs.Dir, reader: *std.Io.Reader) RunError!Un
|
|||
return res;
|
||||
}
|
||||
|
||||
fn unzip(f: *Fetch, out_dir: fs.Dir, reader: anytype) RunError!UnpackResult {
|
||||
fn unzip(f: *Fetch, out_dir: fs.Dir, reader: *std.Io.Reader) error{ ReadFailed, OutOfMemory, FetchFailed }!UnpackResult {
|
||||
// We write the entire contents to a file first because zip files
|
||||
// must be processed back to front and they could be too large to
|
||||
// load into memory.
|
||||
|
||||
const cache_root = f.job_queue.global_cache;
|
||||
|
||||
// TODO: the downside of this solution is if we get a failure/crash/oom/power out
|
||||
// during this process, we leave behind a zip file that would be
|
||||
// difficult to know if/when it can be cleaned up.
|
||||
// Might be worth it to use a mechanism that enables other processes
|
||||
// to see if the owning process of a file is still alive (on linux this
|
||||
// can be done with file locks).
|
||||
// Coupled with this mechansism, we could also use slots (i.e. zig-cache/tmp/0,
|
||||
// zig-cache/tmp/1, etc) which would mean that subsequent runs would
|
||||
// automatically clean up old dead files.
|
||||
// This could all be done with a simple TmpFile abstraction.
|
||||
const prefix = "tmp/";
|
||||
const suffix = ".zip";
|
||||
|
||||
const random_bytes_count = 20;
|
||||
const random_path_len = comptime std.fs.base64_encoder.calcSize(random_bytes_count);
|
||||
var zip_path: [prefix.len + random_path_len + suffix.len]u8 = undefined;
|
||||
@memcpy(zip_path[0..prefix.len], prefix);
|
||||
@memcpy(zip_path[prefix.len + random_path_len ..], suffix);
|
||||
{
|
||||
var random_bytes: [random_bytes_count]u8 = undefined;
|
||||
std.crypto.random.bytes(&random_bytes);
|
||||
_ = std.fs.base64_encoder.encode(
|
||||
zip_path[prefix.len..][0..random_path_len],
|
||||
&random_bytes,
|
||||
);
|
||||
}
|
||||
|
||||
defer cache_root.handle.deleteFile(&zip_path) catch {};
|
||||
|
||||
const eb = &f.error_bundle;
|
||||
const random_len = @sizeOf(u64) * 2;
|
||||
|
||||
{
|
||||
var zip_file = cache_root.handle.createFile(
|
||||
&zip_path,
|
||||
.{},
|
||||
) catch |err| return f.fail(f.location_tok, try eb.printString(
|
||||
"failed to create tmp zip file: {s}",
|
||||
.{@errorName(err)},
|
||||
));
|
||||
defer zip_file.close();
|
||||
var buf: [4096]u8 = undefined;
|
||||
while (true) {
|
||||
const len = reader.readAll(&buf) catch |err| return f.fail(f.location_tok, try eb.printString(
|
||||
"read zip stream failed: {s}",
|
||||
.{@errorName(err)},
|
||||
));
|
||||
if (len == 0) break;
|
||||
zip_file.deprecatedWriter().writeAll(buf[0..len]) catch |err| return f.fail(f.location_tok, try eb.printString(
|
||||
"write temporary zip file failed: {s}",
|
||||
.{@errorName(err)},
|
||||
));
|
||||
}
|
||||
}
|
||||
var zip_path: [prefix.len + random_len + suffix.len]u8 = undefined;
|
||||
zip_path[0..prefix.len].* = prefix.*;
|
||||
zip_path[prefix.len + random_len ..].* = suffix.*;
|
||||
|
||||
var zip_file = while (true) {
|
||||
const random_integer = std.crypto.random.int(u64);
|
||||
zip_path[prefix.len..][0..random_len].* = std.fmt.hex(random_integer);
|
||||
|
||||
break cache_root.handle.createFile(&zip_path, .{
|
||||
.exclusive = true,
|
||||
.read = true,
|
||||
}) catch |err| switch (err) {
|
||||
error.PathAlreadyExists => continue,
|
||||
else => |e| return f.fail(
|
||||
f.location_tok,
|
||||
try eb.printString("failed to create temporary zip file: {t}", .{e}),
|
||||
),
|
||||
};
|
||||
};
|
||||
defer zip_file.close();
|
||||
var zip_file_buffer: [4096]u8 = undefined;
|
||||
var zip_file_reader = b: {
|
||||
var zip_file_writer = zip_file.writer(&zip_file_buffer);
|
||||
|
||||
_ = reader.streamRemaining(&zip_file_writer.interface) catch |err| switch (err) {
|
||||
error.ReadFailed => return error.ReadFailed,
|
||||
error.WriteFailed => return f.fail(
|
||||
f.location_tok,
|
||||
try eb.printString("failed writing temporary zip file: {t}", .{err}),
|
||||
),
|
||||
};
|
||||
zip_file_writer.interface.flush() catch |err| return f.fail(
|
||||
f.location_tok,
|
||||
try eb.printString("failed writing temporary zip file: {t}", .{err}),
|
||||
);
|
||||
break :b zip_file_writer.moveToReader();
|
||||
};
|
||||
|
||||
var diagnostics: std.zip.Diagnostics = .{ .allocator = f.arena.allocator() };
|
||||
// no need to deinit since we are using an arena allocator
|
||||
|
||||
{
|
||||
var zip_file = cache_root.handle.openFile(
|
||||
&zip_path,
|
||||
.{},
|
||||
) catch |err| return f.fail(f.location_tok, try eb.printString(
|
||||
"failed to open temporary zip file: {s}",
|
||||
.{@errorName(err)},
|
||||
));
|
||||
defer zip_file.close();
|
||||
zip_file_reader.seekTo(0) catch |err|
|
||||
return f.fail(f.location_tok, try eb.printString("failed to seek temporary zip file: {t}", .{err}));
|
||||
std.zip.extract(out_dir, &zip_file_reader, .{
|
||||
.allow_backslashes = true,
|
||||
.diagnostics = &diagnostics,
|
||||
}) catch |err| return f.fail(f.location_tok, try eb.printString("zip extract failed: {t}", .{err}));
|
||||
|
||||
var zip_file_buffer: [1024]u8 = undefined;
|
||||
var zip_file_reader = zip_file.reader(&zip_file_buffer);
|
||||
cache_root.handle.deleteFile(&zip_path) catch |err|
|
||||
return f.fail(f.location_tok, try eb.printString("delete temporary zip failed: {t}", .{err}));
|
||||
|
||||
std.zip.extract(out_dir, &zip_file_reader, .{
|
||||
.allow_backslashes = true,
|
||||
.diagnostics = &diagnostics,
|
||||
}) catch |err| return f.fail(f.location_tok, try eb.printString(
|
||||
"zip extract failed: {s}",
|
||||
.{@errorName(err)},
|
||||
));
|
||||
}
|
||||
|
||||
cache_root.handle.deleteFile(&zip_path) catch |err| return f.fail(f.location_tok, try eb.printString(
|
||||
"delete temporary zip failed: {s}",
|
||||
.{@errorName(err)},
|
||||
));
|
||||
|
||||
const res: UnpackResult = .{ .root_dir = diagnostics.root_dir };
|
||||
return res;
|
||||
return .{ .root_dir = diagnostics.root_dir };
|
||||
}
|
||||
|
||||
fn unpackGitPack(f: *Fetch, out_dir: fs.Dir, resource: *Resource.Git) anyerror!UnpackResult {
|
||||
|
|
@ -1387,10 +1339,13 @@ fn unpackGitPack(f: *Fetch, out_dir: fs.Dir, resource: *Resource.Git) anyerror!U
|
|||
var pack_file = try pack_dir.createFile("pkg.pack", .{ .read = true });
|
||||
defer pack_file.close();
|
||||
var pack_file_buffer: [4096]u8 = undefined;
|
||||
var fifo = std.fifo.LinearFifo(u8, .{ .Slice = {} }).init(&pack_file_buffer);
|
||||
try fifo.pump(resource.fetch_stream.reader(), pack_file.deprecatedWriter());
|
||||
|
||||
var pack_file_reader = pack_file.reader(&pack_file_buffer);
|
||||
var pack_file_reader = b: {
|
||||
var pack_file_writer = pack_file.writer(&pack_file_buffer);
|
||||
const fetch_reader = &resource.fetch_stream.reader;
|
||||
_ = try fetch_reader.streamRemaining(&pack_file_writer.interface);
|
||||
try pack_file_writer.interface.flush();
|
||||
break :b pack_file_writer.moveToReader();
|
||||
};
|
||||
|
||||
var index_file = try pack_dir.createFile("pkg.idx", .{ .read = true });
|
||||
defer index_file.close();
|
||||
|
|
|
|||
|
|
@ -585,17 +585,17 @@ const ObjectCache = struct {
|
|||
/// [protocol-common](https://git-scm.com/docs/protocol-common). The special
|
||||
/// meanings of the delimiter and response-end packets are documented in
|
||||
/// [protocol-v2](https://git-scm.com/docs/protocol-v2).
|
||||
const Packet = union(enum) {
|
||||
pub const Packet = union(enum) {
|
||||
flush,
|
||||
delimiter,
|
||||
response_end,
|
||||
data: []const u8,
|
||||
|
||||
const max_data_length = 65516;
|
||||
pub const max_data_length = 65516;
|
||||
|
||||
/// Reads a packet in pkt-line format.
|
||||
fn read(reader: anytype, buf: *[max_data_length]u8) !Packet {
|
||||
const length = std.fmt.parseUnsigned(u16, &try reader.readBytesNoEof(4), 16) catch return error.InvalidPacket;
|
||||
fn read(reader: *std.Io.Reader) !Packet {
|
||||
const length = std.fmt.parseUnsigned(u16, try reader.take(4), 16) catch return error.InvalidPacket;
|
||||
switch (length) {
|
||||
0 => return .flush,
|
||||
1 => return .delimiter,
|
||||
|
|
@ -603,13 +603,11 @@ const Packet = union(enum) {
|
|||
3 => return error.InvalidPacket,
|
||||
else => if (length - 4 > max_data_length) return error.InvalidPacket,
|
||||
}
|
||||
const data = buf[0 .. length - 4];
|
||||
try reader.readNoEof(data);
|
||||
return .{ .data = data };
|
||||
return .{ .data = try reader.take(length - 4) };
|
||||
}
|
||||
|
||||
/// Writes a packet in pkt-line format.
|
||||
fn write(packet: Packet, writer: anytype) !void {
|
||||
fn write(packet: Packet, writer: *std.Io.Writer) !void {
|
||||
switch (packet) {
|
||||
.flush => try writer.writeAll("0000"),
|
||||
.delimiter => try writer.writeAll("0001"),
|
||||
|
|
@ -657,8 +655,10 @@ pub const Session = struct {
|
|||
allocator: Allocator,
|
||||
transport: *std.http.Client,
|
||||
uri: std.Uri,
|
||||
http_headers_buffer: []u8,
|
||||
/// Asserted to be at least `Packet.max_data_length`
|
||||
response_buffer: []u8,
|
||||
) !Session {
|
||||
assert(response_buffer.len >= Packet.max_data_length);
|
||||
var session: Session = .{
|
||||
.transport = transport,
|
||||
.location = try .init(allocator, uri),
|
||||
|
|
@ -668,7 +668,8 @@ pub const Session = struct {
|
|||
.allocator = allocator,
|
||||
};
|
||||
errdefer session.deinit();
|
||||
var capability_iterator = try session.getCapabilities(http_headers_buffer);
|
||||
var capability_iterator: CapabilityIterator = undefined;
|
||||
try session.getCapabilities(&capability_iterator, response_buffer);
|
||||
defer capability_iterator.deinit();
|
||||
while (try capability_iterator.next()) |capability| {
|
||||
if (mem.eql(u8, capability.key, "agent")) {
|
||||
|
|
@ -743,7 +744,8 @@ pub const Session = struct {
|
|||
///
|
||||
/// The `session.location` is updated if the server returns a redirect, so
|
||||
/// that subsequent session functions do not need to handle redirects.
|
||||
fn getCapabilities(session: *Session, http_headers_buffer: []u8) !CapabilityIterator {
|
||||
fn getCapabilities(session: *Session, it: *CapabilityIterator, response_buffer: []u8) !void {
|
||||
assert(response_buffer.len >= Packet.max_data_length);
|
||||
var info_refs_uri = session.location.uri;
|
||||
{
|
||||
const session_uri_path = try std.fmt.allocPrint(session.allocator, "{f}", .{
|
||||
|
|
@ -757,19 +759,22 @@ pub const Session = struct {
|
|||
info_refs_uri.fragment = null;
|
||||
|
||||
const max_redirects = 3;
|
||||
var request = try session.transport.open(.GET, info_refs_uri, .{
|
||||
.redirect_behavior = @enumFromInt(max_redirects),
|
||||
.server_header_buffer = http_headers_buffer,
|
||||
.extra_headers = &.{
|
||||
.{ .name = "Git-Protocol", .value = "version=2" },
|
||||
},
|
||||
});
|
||||
errdefer request.deinit();
|
||||
try request.send();
|
||||
try request.finish();
|
||||
it.* = .{
|
||||
.request = try session.transport.request(.GET, info_refs_uri, .{
|
||||
.redirect_behavior = .init(max_redirects),
|
||||
.extra_headers = &.{
|
||||
.{ .name = "Git-Protocol", .value = "version=2" },
|
||||
},
|
||||
}),
|
||||
.reader = undefined,
|
||||
};
|
||||
errdefer it.deinit();
|
||||
const request = &it.request;
|
||||
try request.sendBodiless();
|
||||
|
||||
try request.wait();
|
||||
if (request.response.status != .ok) return error.ProtocolError;
|
||||
var redirect_buffer: [1024]u8 = undefined;
|
||||
var response = try request.receiveHead(&redirect_buffer);
|
||||
if (response.head.status != .ok) return error.ProtocolError;
|
||||
const any_redirects_occurred = request.redirect_behavior.remaining() < max_redirects;
|
||||
if (any_redirects_occurred) {
|
||||
const request_uri_path = try std.fmt.allocPrint(session.allocator, "{f}", .{
|
||||
|
|
@ -784,8 +789,7 @@ pub const Session = struct {
|
|||
session.location = new_location;
|
||||
}
|
||||
|
||||
const reader = request.reader();
|
||||
var buf: [Packet.max_data_length]u8 = undefined;
|
||||
it.reader = response.reader(response_buffer);
|
||||
var state: enum { response_start, response_content } = .response_start;
|
||||
while (true) {
|
||||
// Some Git servers (at least GitHub) include an additional
|
||||
|
|
@ -795,15 +799,15 @@ pub const Session = struct {
|
|||
// Thus, we need to skip any such useless additional responses
|
||||
// before we get the one we're actually looking for. The responses
|
||||
// will be delimited by flush packets.
|
||||
const packet = Packet.read(reader, &buf) catch |e| switch (e) {
|
||||
const packet = Packet.read(it.reader) catch |err| switch (err) {
|
||||
error.EndOfStream => return error.UnsupportedProtocol, // 'version 2' packet not found
|
||||
else => |other| return other,
|
||||
else => |e| return e,
|
||||
};
|
||||
switch (packet) {
|
||||
.flush => state = .response_start,
|
||||
.data => |data| switch (state) {
|
||||
.response_start => if (mem.eql(u8, Packet.normalizeText(data), "version 2")) {
|
||||
return .{ .request = request };
|
||||
return;
|
||||
} else {
|
||||
state = .response_content;
|
||||
},
|
||||
|
|
@ -816,7 +820,7 @@ pub const Session = struct {
|
|||
|
||||
const CapabilityIterator = struct {
|
||||
request: std.http.Client.Request,
|
||||
buf: [Packet.max_data_length]u8 = undefined,
|
||||
reader: *std.Io.Reader,
|
||||
|
||||
const Capability = struct {
|
||||
key: []const u8,
|
||||
|
|
@ -830,13 +834,13 @@ pub const Session = struct {
|
|||
}
|
||||
};
|
||||
|
||||
fn deinit(iterator: *CapabilityIterator) void {
|
||||
iterator.request.deinit();
|
||||
iterator.* = undefined;
|
||||
fn deinit(it: *CapabilityIterator) void {
|
||||
it.request.deinit();
|
||||
it.* = undefined;
|
||||
}
|
||||
|
||||
fn next(iterator: *CapabilityIterator) !?Capability {
|
||||
switch (try Packet.read(iterator.request.reader(), &iterator.buf)) {
|
||||
fn next(it: *CapabilityIterator) !?Capability {
|
||||
switch (try Packet.read(it.reader)) {
|
||||
.flush => return null,
|
||||
.data => |data| return Capability.parse(Packet.normalizeText(data)),
|
||||
else => return error.UnexpectedPacket,
|
||||
|
|
@ -854,11 +858,13 @@ pub const Session = struct {
|
|||
include_symrefs: bool = false,
|
||||
/// Whether to include the peeled object ID for returned tag refs.
|
||||
include_peeled: bool = false,
|
||||
server_header_buffer: []u8,
|
||||
/// Asserted to be at least `Packet.max_data_length`.
|
||||
buffer: []u8,
|
||||
};
|
||||
|
||||
/// Returns an iterator over refs known to the server.
|
||||
pub fn listRefs(session: Session, options: ListRefsOptions) !RefIterator {
|
||||
pub fn listRefs(session: Session, it: *RefIterator, options: ListRefsOptions) !void {
|
||||
assert(options.buffer.len >= Packet.max_data_length);
|
||||
var upload_pack_uri = session.location.uri;
|
||||
{
|
||||
const session_uri_path = try std.fmt.allocPrint(session.allocator, "{f}", .{
|
||||
|
|
@ -871,59 +877,56 @@ pub const Session = struct {
|
|||
upload_pack_uri.query = null;
|
||||
upload_pack_uri.fragment = null;
|
||||
|
||||
var body: std.ArrayListUnmanaged(u8) = .empty;
|
||||
defer body.deinit(session.allocator);
|
||||
const body_writer = body.writer(session.allocator);
|
||||
try Packet.write(.{ .data = "command=ls-refs\n" }, body_writer);
|
||||
var body: std.Io.Writer = .fixed(options.buffer);
|
||||
try Packet.write(.{ .data = "command=ls-refs\n" }, &body);
|
||||
if (session.supports_agent) {
|
||||
try Packet.write(.{ .data = agent_capability }, body_writer);
|
||||
try Packet.write(.{ .data = agent_capability }, &body);
|
||||
}
|
||||
{
|
||||
const object_format_packet = try std.fmt.allocPrint(session.allocator, "object-format={s}\n", .{@tagName(session.object_format)});
|
||||
const object_format_packet = try std.fmt.allocPrint(session.allocator, "object-format={t}\n", .{
|
||||
session.object_format,
|
||||
});
|
||||
defer session.allocator.free(object_format_packet);
|
||||
try Packet.write(.{ .data = object_format_packet }, body_writer);
|
||||
try Packet.write(.{ .data = object_format_packet }, &body);
|
||||
}
|
||||
try Packet.write(.delimiter, body_writer);
|
||||
try Packet.write(.delimiter, &body);
|
||||
for (options.ref_prefixes) |ref_prefix| {
|
||||
const ref_prefix_packet = try std.fmt.allocPrint(session.allocator, "ref-prefix {s}\n", .{ref_prefix});
|
||||
defer session.allocator.free(ref_prefix_packet);
|
||||
try Packet.write(.{ .data = ref_prefix_packet }, body_writer);
|
||||
try Packet.write(.{ .data = ref_prefix_packet }, &body);
|
||||
}
|
||||
if (options.include_symrefs) {
|
||||
try Packet.write(.{ .data = "symrefs\n" }, body_writer);
|
||||
try Packet.write(.{ .data = "symrefs\n" }, &body);
|
||||
}
|
||||
if (options.include_peeled) {
|
||||
try Packet.write(.{ .data = "peel\n" }, body_writer);
|
||||
try Packet.write(.{ .data = "peel\n" }, &body);
|
||||
}
|
||||
try Packet.write(.flush, body_writer);
|
||||
try Packet.write(.flush, &body);
|
||||
|
||||
var request = try session.transport.open(.POST, upload_pack_uri, .{
|
||||
.redirect_behavior = .unhandled,
|
||||
.server_header_buffer = options.server_header_buffer,
|
||||
.extra_headers = &.{
|
||||
.{ .name = "Content-Type", .value = "application/x-git-upload-pack-request" },
|
||||
.{ .name = "Git-Protocol", .value = "version=2" },
|
||||
},
|
||||
});
|
||||
errdefer request.deinit();
|
||||
request.transfer_encoding = .{ .content_length = body.items.len };
|
||||
try request.send();
|
||||
try request.writeAll(body.items);
|
||||
try request.finish();
|
||||
|
||||
try request.wait();
|
||||
if (request.response.status != .ok) return error.ProtocolError;
|
||||
|
||||
return .{
|
||||
it.* = .{
|
||||
.request = try session.transport.request(.POST, upload_pack_uri, .{
|
||||
.redirect_behavior = .unhandled,
|
||||
.extra_headers = &.{
|
||||
.{ .name = "Content-Type", .value = "application/x-git-upload-pack-request" },
|
||||
.{ .name = "Git-Protocol", .value = "version=2" },
|
||||
},
|
||||
}),
|
||||
.reader = undefined,
|
||||
.format = session.object_format,
|
||||
.request = request,
|
||||
};
|
||||
const request = &it.request;
|
||||
errdefer request.deinit();
|
||||
try request.sendBodyComplete(body.buffered());
|
||||
|
||||
var response = try request.receiveHead(options.buffer);
|
||||
if (response.head.status != .ok) return error.ProtocolError;
|
||||
it.reader = response.reader(options.buffer);
|
||||
}
|
||||
|
||||
pub const RefIterator = struct {
|
||||
format: Oid.Format,
|
||||
request: std.http.Client.Request,
|
||||
buf: [Packet.max_data_length]u8 = undefined,
|
||||
reader: *std.Io.Reader,
|
||||
|
||||
pub const Ref = struct {
|
||||
oid: Oid,
|
||||
|
|
@ -937,13 +940,13 @@ pub const Session = struct {
|
|||
iterator.* = undefined;
|
||||
}
|
||||
|
||||
pub fn next(iterator: *RefIterator) !?Ref {
|
||||
switch (try Packet.read(iterator.request.reader(), &iterator.buf)) {
|
||||
pub fn next(it: *RefIterator) !?Ref {
|
||||
switch (try Packet.read(it.reader)) {
|
||||
.flush => return null,
|
||||
.data => |data| {
|
||||
const ref_data = Packet.normalizeText(data);
|
||||
const oid_sep_pos = mem.indexOfScalar(u8, ref_data, ' ') orelse return error.InvalidRefPacket;
|
||||
const oid = Oid.parse(iterator.format, data[0..oid_sep_pos]) catch return error.InvalidRefPacket;
|
||||
const oid = Oid.parse(it.format, data[0..oid_sep_pos]) catch return error.InvalidRefPacket;
|
||||
|
||||
const name_sep_pos = mem.indexOfScalarPos(u8, ref_data, oid_sep_pos + 1, ' ') orelse ref_data.len;
|
||||
const name = ref_data[oid_sep_pos + 1 .. name_sep_pos];
|
||||
|
|
@ -957,7 +960,7 @@ pub const Session = struct {
|
|||
if (mem.startsWith(u8, attribute, "symref-target:")) {
|
||||
symref_target = attribute["symref-target:".len..];
|
||||
} else if (mem.startsWith(u8, attribute, "peeled:")) {
|
||||
peeled = Oid.parse(iterator.format, attribute["peeled:".len..]) catch return error.InvalidRefPacket;
|
||||
peeled = Oid.parse(it.format, attribute["peeled:".len..]) catch return error.InvalidRefPacket;
|
||||
}
|
||||
last_sep_pos = next_sep_pos;
|
||||
}
|
||||
|
|
@ -973,9 +976,12 @@ pub const Session = struct {
|
|||
/// performed if the server supports it.
|
||||
pub fn fetch(
|
||||
session: Session,
|
||||
fs: *FetchStream,
|
||||
wants: []const []const u8,
|
||||
http_headers_buffer: []u8,
|
||||
) !FetchStream {
|
||||
/// Asserted to be at least `Packet.max_data_length`.
|
||||
response_buffer: []u8,
|
||||
) !void {
|
||||
assert(response_buffer.len >= Packet.max_data_length);
|
||||
var upload_pack_uri = session.location.uri;
|
||||
{
|
||||
const session_uri_path = try std.fmt.allocPrint(session.allocator, "{f}", .{
|
||||
|
|
@ -988,63 +994,71 @@ pub const Session = struct {
|
|||
upload_pack_uri.query = null;
|
||||
upload_pack_uri.fragment = null;
|
||||
|
||||
var body: std.ArrayListUnmanaged(u8) = .empty;
|
||||
defer body.deinit(session.allocator);
|
||||
const body_writer = body.writer(session.allocator);
|
||||
try Packet.write(.{ .data = "command=fetch\n" }, body_writer);
|
||||
var body: std.Io.Writer = .fixed(response_buffer);
|
||||
try Packet.write(.{ .data = "command=fetch\n" }, &body);
|
||||
if (session.supports_agent) {
|
||||
try Packet.write(.{ .data = agent_capability }, body_writer);
|
||||
try Packet.write(.{ .data = agent_capability }, &body);
|
||||
}
|
||||
{
|
||||
const object_format_packet = try std.fmt.allocPrint(session.allocator, "object-format={s}\n", .{@tagName(session.object_format)});
|
||||
defer session.allocator.free(object_format_packet);
|
||||
try Packet.write(.{ .data = object_format_packet }, body_writer);
|
||||
try Packet.write(.{ .data = object_format_packet }, &body);
|
||||
}
|
||||
try Packet.write(.delimiter, body_writer);
|
||||
try Packet.write(.delimiter, &body);
|
||||
// Our packfile parser supports the OFS_DELTA object type
|
||||
try Packet.write(.{ .data = "ofs-delta\n" }, body_writer);
|
||||
try Packet.write(.{ .data = "ofs-delta\n" }, &body);
|
||||
// We do not currently convey server progress information to the user
|
||||
try Packet.write(.{ .data = "no-progress\n" }, body_writer);
|
||||
try Packet.write(.{ .data = "no-progress\n" }, &body);
|
||||
if (session.supports_shallow) {
|
||||
try Packet.write(.{ .data = "deepen 1\n" }, body_writer);
|
||||
try Packet.write(.{ .data = "deepen 1\n" }, &body);
|
||||
}
|
||||
for (wants) |want| {
|
||||
var buf: [Packet.max_data_length]u8 = undefined;
|
||||
const arg = std.fmt.bufPrint(&buf, "want {s}\n", .{want}) catch unreachable;
|
||||
try Packet.write(.{ .data = arg }, body_writer);
|
||||
try Packet.write(.{ .data = arg }, &body);
|
||||
}
|
||||
try Packet.write(.{ .data = "done\n" }, body_writer);
|
||||
try Packet.write(.flush, body_writer);
|
||||
try Packet.write(.{ .data = "done\n" }, &body);
|
||||
try Packet.write(.flush, &body);
|
||||
|
||||
var request = try session.transport.open(.POST, upload_pack_uri, .{
|
||||
.redirect_behavior = .not_allowed,
|
||||
.server_header_buffer = http_headers_buffer,
|
||||
.extra_headers = &.{
|
||||
.{ .name = "Content-Type", .value = "application/x-git-upload-pack-request" },
|
||||
.{ .name = "Git-Protocol", .value = "version=2" },
|
||||
},
|
||||
});
|
||||
fs.* = .{
|
||||
.request = try session.transport.request(.POST, upload_pack_uri, .{
|
||||
.redirect_behavior = .not_allowed,
|
||||
.extra_headers = &.{
|
||||
.{ .name = "Content-Type", .value = "application/x-git-upload-pack-request" },
|
||||
.{ .name = "Git-Protocol", .value = "version=2" },
|
||||
},
|
||||
}),
|
||||
.input = undefined,
|
||||
.reader = undefined,
|
||||
.remaining_len = undefined,
|
||||
};
|
||||
const request = &fs.request;
|
||||
errdefer request.deinit();
|
||||
request.transfer_encoding = .{ .content_length = body.items.len };
|
||||
try request.send();
|
||||
try request.writeAll(body.items);
|
||||
try request.finish();
|
||||
|
||||
try request.wait();
|
||||
if (request.response.status != .ok) return error.ProtocolError;
|
||||
try request.sendBodyComplete(body.buffered());
|
||||
|
||||
const reader = request.reader();
|
||||
var response = try request.receiveHead(&.{});
|
||||
if (response.head.status != .ok) return error.ProtocolError;
|
||||
|
||||
const reader = response.reader(response_buffer);
|
||||
// We are not interested in any of the sections of the returned fetch
|
||||
// data other than the packfile section, since we aren't doing anything
|
||||
// complex like ref negotiation (this is a fresh clone).
|
||||
var state: enum { section_start, section_content } = .section_start;
|
||||
while (true) {
|
||||
var buf: [Packet.max_data_length]u8 = undefined;
|
||||
const packet = try Packet.read(reader, &buf);
|
||||
const packet = try Packet.read(reader);
|
||||
switch (state) {
|
||||
.section_start => switch (packet) {
|
||||
.data => |data| if (mem.eql(u8, Packet.normalizeText(data), "packfile")) {
|
||||
return .{ .request = request };
|
||||
fs.input = reader;
|
||||
fs.reader = .{
|
||||
.buffer = &.{},
|
||||
.vtable = &.{ .stream = FetchStream.stream },
|
||||
.seek = 0,
|
||||
.end = 0,
|
||||
};
|
||||
fs.remaining_len = 0;
|
||||
return;
|
||||
} else {
|
||||
state = .section_content;
|
||||
},
|
||||
|
|
@ -1061,20 +1075,23 @@ pub const Session = struct {
|
|||
|
||||
pub const FetchStream = struct {
|
||||
request: std.http.Client.Request,
|
||||
buf: [Packet.max_data_length]u8 = undefined,
|
||||
pos: usize = 0,
|
||||
len: usize = 0,
|
||||
input: *std.Io.Reader,
|
||||
reader: std.Io.Reader,
|
||||
err: ?Error = null,
|
||||
remaining_len: usize,
|
||||
|
||||
pub fn deinit(stream: *FetchStream) void {
|
||||
stream.request.deinit();
|
||||
pub fn deinit(fs: *FetchStream) void {
|
||||
fs.request.deinit();
|
||||
}
|
||||
|
||||
pub const ReadError = std.http.Client.Request.ReadError || error{
|
||||
pub const Error = error{
|
||||
InvalidPacket,
|
||||
ProtocolError,
|
||||
UnexpectedPacket,
|
||||
WriteFailed,
|
||||
ReadFailed,
|
||||
EndOfStream,
|
||||
};
|
||||
pub const Reader = std.io.GenericReader(*FetchStream, ReadError, read);
|
||||
|
||||
const StreamCode = enum(u8) {
|
||||
pack_data = 1,
|
||||
|
|
@ -1083,33 +1100,41 @@ pub const Session = struct {
|
|||
_,
|
||||
};
|
||||
|
||||
pub fn reader(stream: *FetchStream) Reader {
|
||||
return .{ .context = stream };
|
||||
}
|
||||
|
||||
pub fn read(stream: *FetchStream, buf: []u8) !usize {
|
||||
if (stream.pos == stream.len) {
|
||||
pub fn stream(r: *std.Io.Reader, w: *std.Io.Writer, limit: std.Io.Limit) std.Io.Reader.StreamError!usize {
|
||||
const fs: *FetchStream = @alignCast(@fieldParentPtr("reader", r));
|
||||
const input = fs.input;
|
||||
if (fs.remaining_len == 0) {
|
||||
while (true) {
|
||||
switch (try Packet.read(stream.request.reader(), &stream.buf)) {
|
||||
.flush => return 0,
|
||||
switch (Packet.read(input) catch |err| {
|
||||
fs.err = err;
|
||||
return error.ReadFailed;
|
||||
}) {
|
||||
.flush => return error.EndOfStream,
|
||||
.data => |data| if (data.len > 1) switch (@as(StreamCode, @enumFromInt(data[0]))) {
|
||||
.pack_data => {
|
||||
stream.pos = 1;
|
||||
stream.len = data.len;
|
||||
input.toss(1);
|
||||
fs.remaining_len = data.len;
|
||||
break;
|
||||
},
|
||||
.fatal_error => return error.ProtocolError,
|
||||
.fatal_error => {
|
||||
fs.err = error.ProtocolError;
|
||||
return error.ReadFailed;
|
||||
},
|
||||
else => {},
|
||||
},
|
||||
else => return error.UnexpectedPacket,
|
||||
else => {
|
||||
fs.err = error.UnexpectedPacket;
|
||||
return error.ReadFailed;
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const size = @min(buf.len, stream.len - stream.pos);
|
||||
@memcpy(buf[0..size], stream.buf[stream.pos .. stream.pos + size]);
|
||||
stream.pos += size;
|
||||
return size;
|
||||
const buf = limit.slice(try w.writableSliceGreedy(1));
|
||||
const n = @min(buf.len, fs.remaining_len);
|
||||
@memcpy(buf[0..n], input.buffered()[0..n]);
|
||||
input.toss(n);
|
||||
fs.remaining_len -= n;
|
||||
return n;
|
||||
}
|
||||
};
|
||||
};
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue