zig/lib/std/http/Server.zig
Andrew Kelley 6395ba852a std.http.Server: rework the API entirely
Mainly, this removes the poorly named `wait`, `send`, `finish`
functions, which all operated on the same "Response" object, which was
actually being used as the request.

Now, it looks like this:
1. std.net.Server.accept() gives you a std.net.Server.Connection
2. std.http.Server.init() with the connection
3. Server.receiveHead() gives you a Request
4. Request.reader() gives you a body reader
5. Request.respond() is a one-shot, or Request.respondStreaming() creates
   a Response
6. Response.writer() gives you a body writer
7. Response.end() finishes the response; Response.endChunked() allows
   passing response trailers.

In other words, the type system now guides the API user down the correct
path.

receiveHead allows extra bytes to be read into the read buffer, and then
will reuse those bytes for the body or the next request upon connection
reuse.

respond(), the one-shot function, will send the entire response in one
syscall.

Streaming response bodies no longer wastefully wraps every call to write
with a chunk header and trailer; instead it only sends the HTTP chunk
wrapper when flushing. This means the user can still control when it
happens but it also does not add unnecessary chunks.

Empirically, in my example project that uses this API, the usage code is
significantly less noisy, it has less error handling while handling
errors more correctly, it's more obvious what is happening, and it is
syscall-optimal.

Additionally:
* Uncouple std.http.HeadParser from protocol.zig
* Delete std.Server.Connection; use std.net.Server.Connection instead.
  - The API user supplies the read buffer when initializing the
    http.Server, and it is used for the HTTP head as well as a buffer
    for reading the body into.
* Replace and document the State enum. No longer is there both "start"
  and "first".
2024-02-23 02:37:11 -07:00

809 lines
30 KiB
Zig

//! Blocking HTTP server implementation.
connection: net.Server.Connection,
/// Keeps track of whether the Server is ready to accept a new request on the
/// same connection, and makes invalid API usage cause assertion failures
/// rather than HTTP protocol violations.
state: State,
/// User-provided buffer that must outlive this Server.
/// Used to store the client's entire HTTP header.
read_buffer: []u8,
/// Amount of available data inside read_buffer.
read_buffer_len: usize,
/// Index into `read_buffer` of the first byte of the next HTTP request.
next_request_start: usize,
pub const State = enum {
/// The connection is available to be used for the first time, or reused.
ready,
/// An error occurred in `receiveHead`.
receiving_head,
/// A Request object has been obtained and from there a Response can be
/// opened.
received_head,
/// The client is uploading something to this Server.
receiving_body,
/// The connection is eligible for another HTTP request, however the client
/// and server did not negotiate connection: keep-alive.
closing,
};
/// Initialize an HTTP server that can respond to multiple requests on the same
/// connection.
/// The returned `Server` is ready for `readRequest` to be called.
pub fn init(connection: net.Server.Connection, read_buffer: []u8) Server {
return .{
.connection = connection,
.state = .ready,
.read_buffer = read_buffer,
.read_buffer_len = 0,
.next_request_start = 0,
};
}
pub const ReceiveHeadError = error{
/// Client sent too many bytes of HTTP headers.
/// The HTTP specification suggests to respond with a 431 status code
/// before closing the connection.
HttpHeadersOversize,
/// Client sent headers that did not conform to the HTTP protocol.
HttpHeadersInvalid,
/// A low level I/O error occurred trying to read the headers.
HttpHeadersUnreadable,
};
/// The header bytes reference the read buffer that Server was initialized with
/// and remain alive until the next call to receiveHead.
pub fn receiveHead(s: *Server) ReceiveHeadError!Request {
assert(s.state == .ready);
s.state = .received_head;
errdefer s.state = .receiving_head;
// In case of a reused connection, move the next request's bytes to the
// beginning of the buffer.
if (s.next_request_start > 0) {
if (s.read_buffer_len > s.next_request_start) {
const leftover = s.read_buffer[s.next_request_start..s.read_buffer_len];
const dest = s.read_buffer[0..leftover.len];
if (leftover.len <= s.next_request_start) {
@memcpy(dest, leftover);
} else {
mem.copyBackwards(u8, dest, leftover);
}
s.read_buffer_len = leftover.len;
}
s.next_request_start = 0;
}
var hp: http.HeadParser = .{};
while (true) {
const buf = s.read_buffer[s.read_buffer_len..];
if (buf.len == 0)
return error.HttpHeadersOversize;
const read_n = s.connection.stream.read(buf) catch
return error.HttpHeadersUnreadable;
s.read_buffer_len += read_n;
const bytes = buf[0..read_n];
const end = hp.feed(bytes);
if (hp.state == .finished) return .{
.server = s,
.head_end = end,
.head = Request.Head.parse(s.read_buffer[0..end]) catch
return error.HttpHeadersInvalid,
.reader_state = undefined,
};
}
}
pub const Request = struct {
server: *Server,
/// Index into Server's read_buffer.
head_end: usize,
head: Head,
reader_state: union {
remaining_content_length: u64,
},
pub const Compression = union(enum) {
pub const DeflateDecompressor = std.compress.zlib.Decompressor(std.io.AnyReader);
pub const GzipDecompressor = std.compress.gzip.Decompressor(std.io.AnyReader);
pub const ZstdDecompressor = std.compress.zstd.Decompressor(std.io.AnyReader);
deflate: DeflateDecompressor,
gzip: GzipDecompressor,
zstd: ZstdDecompressor,
none: void,
};
pub const Head = struct {
method: http.Method,
target: []const u8,
version: http.Version,
expect: ?[]const u8,
content_type: ?[]const u8,
content_length: ?u64,
transfer_encoding: http.TransferEncoding,
transfer_compression: http.ContentEncoding,
keep_alive: bool,
compression: Compression,
pub const ParseError = error{
UnknownHttpMethod,
HttpHeadersInvalid,
HttpHeaderContinuationsUnsupported,
HttpTransferEncodingUnsupported,
HttpConnectionHeaderUnsupported,
InvalidContentLength,
CompressionUnsupported,
MissingFinalNewline,
};
pub fn parse(bytes: []const u8) ParseError!Head {
var it = mem.splitSequence(u8, bytes, "\r\n");
const first_line = it.next().?;
if (first_line.len < 10)
return error.HttpHeadersInvalid;
const method_end = mem.indexOfScalar(u8, first_line, ' ') orelse
return error.HttpHeadersInvalid;
if (method_end > 24) return error.HttpHeadersInvalid;
const method_str = first_line[0..method_end];
const method: http.Method = @enumFromInt(http.Method.parse(method_str));
const version_start = mem.lastIndexOfScalar(u8, first_line, ' ') orelse
return error.HttpHeadersInvalid;
if (version_start == method_end) return error.HttpHeadersInvalid;
const version_str = first_line[version_start + 1 ..];
if (version_str.len != 8) return error.HttpHeadersInvalid;
const version: http.Version = switch (int64(version_str[0..8])) {
int64("HTTP/1.0") => .@"HTTP/1.0",
int64("HTTP/1.1") => .@"HTTP/1.1",
else => return error.HttpHeadersInvalid,
};
const target = first_line[method_end + 1 .. version_start];
var head: Head = .{
.method = method,
.target = target,
.version = version,
.expect = null,
.content_type = null,
.content_length = null,
.transfer_encoding = .none,
.transfer_compression = .identity,
.keep_alive = false,
.compression = .none,
};
while (it.next()) |line| {
if (line.len == 0) return head;
switch (line[0]) {
' ', '\t' => return error.HttpHeaderContinuationsUnsupported,
else => {},
}
var line_it = mem.splitSequence(u8, line, ": ");
const header_name = line_it.next().?;
const header_value = line_it.rest();
if (header_value.len == 0) return error.HttpHeadersInvalid;
if (std.ascii.eqlIgnoreCase(header_name, "connection")) {
head.keep_alive = !std.ascii.eqlIgnoreCase(header_value, "close");
} else if (std.ascii.eqlIgnoreCase(header_name, "expect")) {
head.expect = header_value;
} else if (std.ascii.eqlIgnoreCase(header_name, "content-type")) {
head.content_type = header_value;
} else if (std.ascii.eqlIgnoreCase(header_name, "content-length")) {
if (head.content_length != null) return error.HttpHeadersInvalid;
head.content_length = std.fmt.parseInt(u64, header_value, 10) catch
return error.InvalidContentLength;
} else if (std.ascii.eqlIgnoreCase(header_name, "content-encoding")) {
if (head.transfer_compression != .identity) return error.HttpHeadersInvalid;
const trimmed = mem.trim(u8, header_value, " ");
if (std.meta.stringToEnum(http.ContentEncoding, trimmed)) |ce| {
head.transfer_compression = ce;
} else {
return error.HttpTransferEncodingUnsupported;
}
} else if (std.ascii.eqlIgnoreCase(header_name, "transfer-encoding")) {
// Transfer-Encoding: second, first
// Transfer-Encoding: deflate, chunked
var iter = mem.splitBackwardsScalar(u8, header_value, ',');
const first = iter.first();
const trimmed_first = mem.trim(u8, first, " ");
var next: ?[]const u8 = first;
if (std.meta.stringToEnum(http.TransferEncoding, trimmed_first)) |transfer| {
if (head.transfer_encoding != .none)
return error.HttpHeadersInvalid; // we already have a transfer encoding
head.transfer_encoding = transfer;
next = iter.next();
}
if (next) |second| {
const trimmed_second = mem.trim(u8, second, " ");
if (std.meta.stringToEnum(http.ContentEncoding, trimmed_second)) |transfer| {
if (head.transfer_compression != .identity)
return error.HttpHeadersInvalid; // double compression is not supported
head.transfer_compression = transfer;
} else {
return error.HttpTransferEncodingUnsupported;
}
}
if (iter.next()) |_| return error.HttpTransferEncodingUnsupported;
}
}
return error.MissingFinalNewline;
}
inline fn int64(array: *const [8]u8) u64 {
return @bitCast(array.*);
}
};
pub const RespondOptions = struct {
version: http.Version = .@"HTTP/1.1",
status: http.Status = .ok,
reason: ?[]const u8 = null,
keep_alive: bool = true,
extra_headers: []const http.Header = &.{},
};
/// Send an entire HTTP response to the client, including headers and body.
///
/// Automatically handles HEAD requests by omitting the body.
/// Uses the "content-length" header unless `content` is empty in which
/// case it omits the content-length header.
///
/// If the request contains a body and the connection is to be reused,
/// discards the request body, leaving the Server in the `ready` state. If
/// this discarding fails, the connection is marked as not to be reused and
/// no error is surfaced.
///
/// Asserts status is not `continue`.
/// Asserts there are at most 25 extra_headers.
pub fn respond(
request: *Request,
content: []const u8,
options: RespondOptions,
) Response.WriteError!void {
const max_extra_headers = 25;
assert(options.status != .@"continue");
assert(options.extra_headers.len <= max_extra_headers);
const keep_alive = request.discardBody(options.keep_alive);
const phrase = options.reason orelse options.status.phrase() orelse "";
var first_buffer: [500]u8 = undefined;
var h = std.ArrayListUnmanaged(u8).initBuffer(&first_buffer);
h.writerAssumeCapacity().print("{s} {d} {s}\r\n", .{
@tagName(options.version), @intFromEnum(options.status), phrase,
}) catch |err| switch (err) {};
if (keep_alive)
h.appendSliceAssumeCapacity("connection: keep-alive\r\n");
if (content.len > 0)
h.writerAssumeCapacity().print("content-length: {d}\r\n", .{content.len}) catch |err|
switch (err) {};
var iovecs: [max_extra_headers * 4 + 3]std.posix.iovec_const = undefined;
var iovecs_len: usize = 0;
iovecs[iovecs_len] = .{
.iov_base = h.items.ptr,
.iov_len = h.items.len,
};
iovecs_len += 1;
for (options.extra_headers) |header| {
iovecs[iovecs_len] = .{
.iov_base = header.name.ptr,
.iov_len = header.name.len,
};
iovecs_len += 1;
iovecs[iovecs_len] = .{
.iov_base = ": ",
.iov_len = 2,
};
iovecs_len += 1;
iovecs[iovecs_len] = .{
.iov_base = header.value.ptr,
.iov_len = header.value.len,
};
iovecs_len += 1;
iovecs[iovecs_len] = .{
.iov_base = "\r\n",
.iov_len = 2,
};
iovecs_len += 1;
}
iovecs[iovecs_len] = .{
.iov_base = "\r\n",
.iov_len = 2,
};
iovecs_len += 1;
if (request.head.method != .HEAD and content.len > 0) {
iovecs[iovecs_len] = .{
.iov_base = content.ptr,
.iov_len = content.len,
};
iovecs_len += 1;
}
try request.server.connection.stream.writevAll(iovecs[0..iovecs_len]);
}
pub const RespondStreamingOptions = struct {
/// An externally managed slice of memory used to batch bytes before
/// sending. `respondStreaming` asserts this is large enough to store
/// the full HTTP response head.
///
/// Must outlive the returned Response.
send_buffer: []u8,
/// If provided, the response will use the content-length header;
/// otherwise it will use transfer-encoding: chunked.
content_length: ?u64 = null,
/// Options that are shared with the `respond` method.
respond_options: RespondOptions = .{},
};
/// The header is buffered but not sent until Response.flush is called.
///
/// If the request contains a body and the connection is to be reused,
/// discards the request body, leaving the Server in the `ready` state. If
/// this discarding fails, the connection is marked as not to be reused and
/// no error is surfaced.
///
/// HEAD requests are handled transparently by setting a flag on the
/// returned Response to omit the body. However it may be worth noticing
/// that flag and skipping any expensive work that would otherwise need to
/// be done to satisfy the request.
///
/// Asserts `send_buffer` is large enough to store the entire response header.
/// Asserts status is not `continue`.
pub fn respondStreaming(request: *Request, options: RespondStreamingOptions) Response {
const o = options.respond_options;
assert(o.status != .@"continue");
const keep_alive = request.discardBody(o.keep_alive);
const phrase = o.reason orelse o.status.phrase() orelse "";
var h = std.ArrayListUnmanaged(u8).initBuffer(options.send_buffer);
h.writerAssumeCapacity().print("{s} {d} {s}\r\n", .{
@tagName(o.version), @intFromEnum(o.status), phrase,
}) catch |err| switch (err) {};
if (keep_alive) h.appendSliceAssumeCapacity("connection: keep-alive\r\n");
if (options.content_length) |len| {
h.writerAssumeCapacity().print("content-length: {d}\r\n", .{len}) catch |err| switch (err) {};
} else {
h.appendSliceAssumeCapacity("transfer-encoding: chunked\r\n");
}
for (o.extra_headers) |header| {
h.appendSliceAssumeCapacity(header.name);
h.appendSliceAssumeCapacity(": ");
h.appendSliceAssumeCapacity(header.value);
h.appendSliceAssumeCapacity("\r\n");
}
h.appendSliceAssumeCapacity("\r\n");
return .{
.stream = request.server.connection.stream,
.send_buffer = options.send_buffer,
.send_buffer_start = 0,
.send_buffer_end = h.items.len,
.content_length = options.content_length,
.elide_body = request.head.method == .HEAD,
.chunk_len = 0,
};
}
pub const ReadError = net.Stream.ReadError;
fn read_cl(context: *const anyopaque, buffer: []u8) ReadError!usize {
const request: *Request = @constCast(@alignCast(@ptrCast(context)));
const s = request.server;
assert(s.state == .receiving_body);
const remaining_content_length = &request.reader_state.remaining_content_length;
if (remaining_content_length.* == 0) {
s.state = .ready;
return 0;
}
const available_bytes = s.read_buffer_len - request.head_end;
if (available_bytes == 0)
s.read_buffer_len += try s.connection.stream.read(s.read_buffer[request.head_end..]);
const available_buf = s.read_buffer[request.head_end..s.read_buffer_len];
const len = @min(remaining_content_length.*, available_buf.len, buffer.len);
@memcpy(buffer[0..len], available_buf[0..len]);
remaining_content_length.* -= len;
if (remaining_content_length.* == 0)
s.state = .ready;
return len;
}
fn read_chunked(context: *const anyopaque, buffer: []u8) ReadError!usize {
const request: *Request = @constCast(@alignCast(@ptrCast(context)));
const s = request.server;
assert(s.state == .receiving_body);
_ = buffer;
@panic("TODO");
}
pub const ReadAllError = ReadError || error{HttpBodyOversize};
pub fn reader(request: *Request) std.io.AnyReader {
const s = request.server;
assert(s.state == .received_head);
s.state = .receiving_body;
switch (request.head.transfer_encoding) {
.chunked => return .{
.readFn = read_chunked,
.context = request,
},
.none => {
request.reader_state = .{
.remaining_content_length = request.head.content_length orelse 0,
};
return .{
.readFn = read_cl,
.context = request,
};
},
}
}
/// Returns whether the connection: keep-alive header should be sent to the client.
/// If it would fail, it instead sets the Server state to `receiving_body`
/// and returns false.
fn discardBody(request: *Request, keep_alive: bool) bool {
// Prepare to receive another request on the same connection.
// There are two factors to consider:
// * Any body the client sent must be discarded.
// * The Server's read_buffer may already have some bytes in it from
// whatever came after the head, which may be the next HTTP request
// or the request body.
// If the connection won't be kept alive, then none of this matters
// because the connection will be severed after the response is sent.
const s = request.server;
if (keep_alive and request.head.keep_alive) switch (s.state) {
.received_head => {
s.state = .receiving_body;
switch (request.head.transfer_encoding) {
.none => t: {
const len = request.head.content_length orelse break :t;
const head_end = request.head_end;
var total_body_discarded: usize = 0;
while (true) {
const available_bytes = s.read_buffer_len - head_end;
const remaining_len = len - total_body_discarded;
if (available_bytes >= remaining_len) {
s.next_request_start = head_end + remaining_len;
break :t;
}
total_body_discarded += available_bytes;
// Preserve request header memory until receiveHead is called.
const buf = s.read_buffer[head_end..];
const read_n = s.connection.stream.read(buf) catch return false;
s.read_buffer_len = head_end + read_n;
}
},
.chunked => {
@panic("TODO");
},
}
s.state = .ready;
return true;
},
.receiving_body, .ready => return true,
else => unreachable,
} else {
s.state = .closing;
return false;
}
}
};
pub const Response = struct {
stream: net.Stream,
send_buffer: []u8,
/// Index of the first byte in `send_buffer`.
/// This is 0 unless a short write happens in `write`.
send_buffer_start: usize,
/// Index of the last byte + 1 in `send_buffer`.
send_buffer_end: usize,
/// `null` means transfer-encoding: chunked.
/// As a debugging utility, counts down to zero as bytes are written.
content_length: ?u64,
elide_body: bool,
/// Indicates how much of the end of the `send_buffer` corresponds to a
/// chunk. This amount of data will be wrapped by an HTTP chunk header.
chunk_len: usize,
pub const WriteError = net.Stream.WriteError;
/// When using content-length, asserts that the amount of data sent matches
/// the value sent in the header, then calls `flush`.
/// Otherwise, transfer-encoding: chunked is being used, and it writes the
/// end-of-stream message, then flushes the stream to the system.
/// When request method is HEAD, does not write anything to the stream.
pub fn end(r: *Response) WriteError!void {
if (r.content_length) |len| {
assert(len == 0); // Trips when end() called before all bytes written.
return flush_cl(r);
}
if (!r.elide_body) {
return flush_chunked(r, &.{});
}
r.* = undefined;
}
pub const EndChunkedOptions = struct {
trailers: []const http.Header = &.{},
};
/// Asserts that the Response is using transfer-encoding: chunked.
/// Writes the end-of-stream message and any optional trailers, then
/// flushes the stream to the system.
/// When request method is HEAD, does not write anything to the stream.
/// Asserts there are at most 25 trailers.
pub fn endChunked(r: *Response, options: EndChunkedOptions) WriteError!void {
assert(r.content_length == null);
if (r.elide_body) return;
try flush_chunked(r, options.trailers);
r.* = undefined;
}
/// If using content-length, asserts that writing these bytes to the client
/// would not exceed the content-length value sent in the HTTP header.
/// May return 0, which does not indicate end of stream. The caller decides
/// when the end of stream occurs by calling `end`.
pub fn write(r: *Response, bytes: []const u8) WriteError!usize {
if (r.content_length != null) {
return write_cl(r, bytes);
} else {
return write_chunked(r, bytes);
}
}
fn write_cl(context: *const anyopaque, bytes: []const u8) WriteError!usize {
const r: *Response = @constCast(@alignCast(@ptrCast(context)));
const len = &r.content_length.?;
if (r.elide_body) {
len.* -= bytes.len;
return bytes.len;
}
if (bytes.len + r.send_buffer_end > r.send_buffer.len) {
const send_buffer_len = r.send_buffer_end - r.send_buffer_start;
var iovecs: [2]std.posix.iovec_const = .{
.{
.iov_base = r.send_buffer.ptr + r.send_buffer_start,
.iov_len = send_buffer_len,
},
.{
.iov_base = bytes.ptr,
.iov_len = bytes.len,
},
};
const n = try r.stream.writev(&iovecs);
if (n >= send_buffer_len) {
// It was enough to reset the buffer.
r.send_buffer_start = 0;
r.send_buffer_end = 0;
const bytes_n = n - send_buffer_len;
len.* -= bytes_n;
return bytes_n;
}
// It didn't even make it through the existing buffer, let
// alone the new bytes provided.
r.send_buffer_start += n;
return 0;
}
// All bytes can be stored in the remaining space of the buffer.
@memcpy(r.send_buffer[r.send_buffer_end..][0..bytes.len], bytes);
r.send_buffer_end += bytes.len;
len.* -= bytes.len;
return bytes.len;
}
fn write_chunked(context: *const anyopaque, bytes: []const u8) WriteError!usize {
const r: *Response = @constCast(@alignCast(@ptrCast(context)));
assert(r.content_length == null);
if (r.elide_body)
return bytes.len;
if (bytes.len + r.send_buffer_end > r.send_buffer.len) {
const send_buffer_len = r.send_buffer_end - r.send_buffer_start;
const chunk_len = r.chunk_len + bytes.len;
var header_buf: [18]u8 = undefined;
const chunk_header = std.fmt.bufPrint(&header_buf, "{x}\r\n", .{chunk_len}) catch unreachable;
var iovecs: [5]std.posix.iovec_const = .{
.{
.iov_base = r.send_buffer.ptr + r.send_buffer_start,
.iov_len = send_buffer_len - r.chunk_len,
},
.{
.iov_base = chunk_header.ptr,
.iov_len = chunk_header.len,
},
.{
.iov_base = r.send_buffer.ptr + r.send_buffer_end - r.chunk_len,
.iov_len = r.chunk_len,
},
.{
.iov_base = bytes.ptr,
.iov_len = bytes.len,
},
.{
.iov_base = "\r\n",
.iov_len = 2,
},
};
// TODO make this writev instead of writevAll, which involves
// complicating the logic of this function.
try r.stream.writevAll(&iovecs);
r.send_buffer_start = 0;
r.send_buffer_end = 0;
r.chunk_len = 0;
return bytes.len;
}
// All bytes can be stored in the remaining space of the buffer.
@memcpy(r.send_buffer[r.send_buffer_end..][0..bytes.len], bytes);
r.send_buffer_end += bytes.len;
r.chunk_len += bytes.len;
return bytes.len;
}
/// If using content-length, asserts that writing these bytes to the client
/// would not exceed the content-length value sent in the HTTP header.
pub fn writeAll(r: *Response, bytes: []const u8) WriteError!void {
var index: usize = 0;
while (index < bytes.len) {
index += try write(r, bytes[index..]);
}
}
/// Sends all buffered data to the client.
/// This is redundant after calling `end`.
pub fn flush(r: *Response) WriteError!void {
if (r.content_length != null) {
return flush_cl(r);
} else {
return flush_chunked(r, null);
}
}
fn flush_cl(r: *Response) WriteError!void {
assert(r.content_length != null);
try r.stream.writeAll(r.send_buffer[r.send_buffer_start..r.send_buffer_end]);
r.send_buffer_start = 0;
r.send_buffer_end = 0;
}
fn flush_chunked(r: *Response, end_trailers: ?[]const http.Header) WriteError!void {
const max_trailers = 25;
if (end_trailers) |trailers| assert(trailers.len <= max_trailers);
assert(r.content_length == null);
const send_buffer_len = r.send_buffer_end - r.send_buffer_start;
var header_buf: [18]u8 = undefined;
const chunk_header = std.fmt.bufPrint(&header_buf, "{x}\r\n", .{r.chunk_len}) catch unreachable;
var iovecs: [max_trailers * 4 + 5]std.posix.iovec_const = undefined;
var iovecs_len: usize = 0;
iovecs[iovecs_len] = .{
.iov_base = r.send_buffer.ptr + r.send_buffer_start,
.iov_len = send_buffer_len - r.chunk_len,
};
iovecs_len += 1;
iovecs[iovecs_len] = .{
.iov_base = chunk_header.ptr,
.iov_len = chunk_header.len,
};
iovecs_len += 1;
iovecs[iovecs_len] = .{
.iov_base = r.send_buffer.ptr + r.send_buffer_end - r.chunk_len,
.iov_len = r.chunk_len,
};
iovecs_len += 1;
if (end_trailers) |trailers| {
if (r.chunk_len > 0) {
iovecs[iovecs_len] = .{
.iov_base = "\r\n0\r\n",
.iov_len = 5,
};
iovecs_len += 1;
}
for (trailers) |trailer| {
iovecs[iovecs_len] = .{
.iov_base = trailer.name.ptr,
.iov_len = trailer.name.len,
};
iovecs_len += 1;
iovecs[iovecs_len] = .{
.iov_base = ": ",
.iov_len = 2,
};
iovecs_len += 1;
iovecs[iovecs_len] = .{
.iov_base = trailer.value.ptr,
.iov_len = trailer.value.len,
};
iovecs_len += 1;
iovecs[iovecs_len] = .{
.iov_base = "\r\n",
.iov_len = 2,
};
iovecs_len += 1;
}
iovecs[iovecs_len] = .{
.iov_base = "\r\n",
.iov_len = 2,
};
iovecs_len += 1;
} else if (r.chunk_len > 0) {
iovecs[iovecs_len] = .{
.iov_base = "\r\n",
.iov_len = 2,
};
iovecs_len += 1;
}
try r.stream.writevAll(iovecs[0..iovecs_len]);
r.send_buffer_start = 0;
r.send_buffer_end = 0;
r.chunk_len = 0;
}
pub fn writer(r: *Response) std.io.AnyWriter {
return .{
.writeFn = if (r.content_length != null) write_cl else write_chunked,
.context = r,
};
}
};
const std = @import("../std.zig");
const http = std.http;
const mem = std.mem;
const net = std.net;
const Uri = std.Uri;
const assert = std.debug.assert;
const Server = @This();