1
0
Fork 0
mirror of https://github.com/zigzap/zap.git synced 2025-10-20 15:14:08 +00:00

Compare commits

...

26 commits

Author SHA1 Message Date
renerocksai
729a614c30
fix tests on gh 2025-08-28 22:56:37 +02:00
renerocksai
ecb03cb90e
0.15.1-fix tests 2025-08-28 22:47:50 +02:00
renerocksai
4efcf18999
0.15.1-fix accept example 2025-08-28 22:38:51 +02:00
renerocksai
e5c5e7d3cf
0.15.1-fix bindataformpost example 2025-08-28 22:36:00 +02:00
renerocksai
8540828bf5
0.15.1-fix websockets example 2025-08-28 22:31:12 +02:00
renerocksai
a45baf78ea
0.15.1-fix cookies example 2025-08-28 22:27:51 +02:00
renerocksai
7672a5e099
0.15.1-fix http_params example 2025-08-28 22:22:59 +02:00
renerocksai
b5227bccbb
0.15.1-fix endpoint example 2025-08-28 22:03:43 +02:00
renerocksai
23162dbe2e
fix hello_json 2025-08-28 20:29:11 +02:00
renerocksai
5740b3b1a1
fix app_basic 0.15.1 2025-08-28 20:24:35 +02:00
renerocksai
f4d1ac0a30
fix #178 status code mismatch 2025-08-28 20:21:23 +02:00
renerocksai
0a582433a5
upgrade announceybot to 0.15.1 2025-08-28 19:45:13 +02:00
renerocksai
5c1fa416aa
fix build.zig: new 0.15.1 stdlib 2025-08-28 19:44:30 +02:00
Tesseract22
3c22e9dac5
chore: update to latest zig version
zig 0.15 introduced tons of breaking changes in the standard library,
fix them.
2025-08-28 17:47:44 +02:00
Rene Schallner
302f1e1e44
Merge pull request #174 from pollend/feature/tweak-support-cookies
feat: tweak support for additional cookie fields
2025-08-28 10:45:30 +02:00
Michael Pollind
512c630781 feat: tweak support for additional cookie fields
Signed-off-by: Michael Pollind <mpollind@gmail.com>
2025-08-10 18:59:27 -07:00
Rene Schallner
a953fc5969
Merge pull request #172 from pollend/feature/cookie-add-partition
feat: add parition support for cookies in facil.io
2025-08-07 10:31:01 +02:00
Michael Pollind
6174c3e5e0 feat: add parition support for cookies in facil.io
Signed-off-by: Michael Pollind <mpollind@gmail.com>
2025-08-06 21:12:08 -07:00
GitHub Action
a385bea1ed Update README 2025-07-25 00:00:57 +00:00
renerocksai
e1c973d5e2
bumped zon version 2025-07-25 01:59:02 +02:00
renerocksai
d9739599ec
added fio-typecheck of mimetype field when parsing binfiles 2025-07-25 01:59:02 +02:00
GitHub Action
66a7967aca Update README 2025-07-23 18:32:58 +00:00
renerocksai
e67b672977
fix zig version in README, bump zon version 2025-07-23 20:25:23 +02:00
renerocksai
dabd0637f9
endpoints, auth endpoints, middleware endpoints: eliminate need for empty stubs 2025-07-23 20:23:49 +02:00
renerocksai
baaa71d0e0
tests: fix non-overlapping ports during tests 2025-07-23 20:08:17 +02:00
renerocksai
6bdd5a41e4
exapmles/app/errors.zig: remove unnecessary empty stubs 2025-07-23 19:00:06 +02:00
35 changed files with 451 additions and 488 deletions

View file

@ -18,21 +18,22 @@ jobs:
- uses: actions/checkout@v3
- uses: goto-bus-stop/setup-zig@v2
with:
version: 0.14.0
version: 0.15.1
- name: Check zig version
run: zig version
- name: Build all examples
run: zig build all
# Run tests separately so we can see more clearly which one fails
# Also, the test runner tries to run tests concurrently, which causes
# conflicts when port numbers are re-used in the tests
# conflicts when port numbers are re-used in the tests (which they aren't
# anymore)
- name: Run mustache tests
run: zig build test-mustache
- name: Run httpparams tests
run: zig build test-httpparams
run: zig build test-http_params
- name: Run sendfile tests
run: zig build test-sendfile
- name: Run authentication tests
run: zig build test-authentication
run: zig build test-auth
- name: Report end of tests
run: echo "tests finished"

View file

@ -31,11 +31,10 @@ jobs:
- name: Run mustache tests
run: zig build test-mustache
- name: Run httpparams tests
run: zig build test-httpparams
run: zig build test-http_params
- name: Run sendfile tests
run: zig build test-sendfile
- name: Run authentication tests
run: zig build test-authentication
run: zig build test-auth
- name: Report end of tests
run: echo "tests finished"

View file

@ -25,7 +25,7 @@ proved to be:
## FAQ:
- Q: **What version of Zig does Zap support?**
- Zap uses the latest stable zig release (0.14.0), so you don't have to keep
- Zap uses the latest stable zig release (0.14.1), so you don't have to keep
up with frequent breaking changes. It's an "LTS feature".
- Q: **Can Zap build with Zig's master branch?**
- See the `zig-master` branch. Please note that the zig-master branch is not
@ -266,7 +266,7 @@ code leaks memory.
## Getting started
Make sure you have **zig 0.14.0** installed. Fetch it from
Make sure you have **zig 0.14.1** installed. Fetch it from
[here](https://ziglang.org/download).
```shell
@ -279,7 +279,7 @@ $ # open http://localhost:3000 in your browser
## Using ⚡zap⚡ in your own projects
Make sure you have **the latest zig release (0.14.0)** installed. Fetch it from
Make sure you have **the latest zig release (0.14.1)** installed. Fetch it from
[here](https://ziglang.org/download).
If you don't have an existing zig project, create one like this:
@ -298,7 +298,7 @@ In your zig project folder (where `build.zig` is located), run:
<!-- INSERT_DEP_BEGIN -->
```
zig fetch --save "git+https://github.com/zigzap/zap#v0.10.4"
zig fetch --save "git+https://github.com/zigzap/zap#v0.10.6"
```
<!-- INSERT_DEP_END -->
@ -412,3 +412,5 @@ pub fn main() !void {

175
build.zig
View file

@ -1,6 +1,41 @@
const std = @import("std");
const build_facilio = @import("facil.io/build.zig").build_facilio;
// Basically a wrapper around some common params that you would pass around to create tests (zig made them very verbose lately, unfortunately),
// save these to a struct so you don't have to pass the same params all the time.
const TestSystem = struct {
b: *std.Build,
zap_module: *std.Build.Module,
target: std.Build.ResolvedTarget,
optimize: std.builtin.OptimizeMode,
combine_test_step: *std.Build.Step,
pub fn addTest(self: TestSystem, root_src: []const u8, test_name: []const u8) void {
const tests_module = self.b.addModule(test_name, .{
.root_source_file = self.b.path(root_src),
.target = self.target,
.optimize = self.optimize,
});
const tests = self.b.addTest(.{
.name = self.b.fmt("{s}_tests", .{test_name}),
.root_module = tests_module,
});
tests.root_module.addImport("zap", self.zap_module);
const step = self.b.step(self.b.fmt("test-{s}", .{test_name}), self.b.fmt("Run {s} unit tests [REMOVE zig-cache!]", .{test_name}));
self.addRunInstallToStep(tests, step);
}
fn addRunInstallToStep(self: TestSystem, tests: *std.Build.Step.Compile, step: *std.Build.Step) void {
const run_tests = self.b.addRunArtifact(tests);
const install_tests = self.b.addInstallArtifact(tests, .{});
step.dependOn(&run_tests.step);
step.dependOn(&install_tests.step);
self.combine_test_step.dependOn(step);
}
};
pub fn build(b: *std.Build) !void {
const target = b.standardTargetOptions(.{});
if (target.result.os.tag == .windows) {
@ -34,9 +69,7 @@ pub fn build(b: *std.Build) !void {
// -- Docs
const docs_obj = b.addObject(.{
.name = "zap", // name doesn't seem to matter
.root_source_file = b.path("src/zap.zig"),
.target = target,
.optimize = .Debug,
.root_module = zap_module,
});
const install_docs = b.addInstallDirectory(.{
.install_dir = .prefix,
@ -95,13 +128,17 @@ pub fn build(b: *std.Build) !void {
const example_run_step = b.step(ex_run_stepname, ex_run_stepdesc);
const example_step = b.step(ex_name, ex_build_desc);
var example = b.addExecutable(.{
.name = ex_name,
const exe_mod = b.addModule(ex_name, .{
.root_source_file = b.path(ex_src),
.target = target,
.optimize = optimize,
});
var example = b.addExecutable(.{
.name = ex_name,
.root_module = exe_mod,
});
example.root_module.addImport("zap", zap_module);
// const example_run = example.run();
@ -132,127 +169,34 @@ pub fn build(b: *std.Build) !void {
// So, for now, we just force the exe to be built, so in order that
// we can call it again when needed.
const test_step = b.step("test", "Run unit tests");
const test_system = TestSystem{ .b = b, .zap_module = zap_module, .target = target, .optimize = optimize, .combine_test_step = test_step };
// authentication tests
//
const auth_tests = b.addTest(.{
.name = "auth_tests",
.root_source_file = b.path("src/tests/test_auth.zig"),
.target = target,
.optimize = optimize,
});
auth_tests.root_module.addImport("zap", zap_module);
const run_auth_tests = b.addRunArtifact(auth_tests);
const install_auth_tests = b.addInstallArtifact(auth_tests, .{});
test_system.addTest("src/tests/test_auth.zig", "auth");
// mustache tests
const mustache_tests = b.addTest(.{
.name = "mustache_tests",
.root_source_file = b.path("src/tests/test_mustache.zig"),
.target = target,
.optimize = optimize,
});
mustache_tests.root_module.addImport("zap", zap_module);
const run_mustache_tests = b.addRunArtifact(mustache_tests);
const install_mustache_tests = b.addInstallArtifact(mustache_tests, .{});
test_system.addTest("src/tests/test_mustache.zig", "mustache");
// http paramters (qyery, body) tests
const httpparams_tests = b.addTest(.{
.name = "http_params_tests",
.root_source_file = b.path("src/tests/test_http_params.zig"),
.target = target,
.optimize = optimize,
});
httpparams_tests.root_module.addImport("zap", zap_module);
const run_httpparams_tests = b.addRunArtifact(httpparams_tests);
test_system.addTest("src/tests/test_http_params.zig", "http_params");
// http paramters (qyery, body) tests
test_system.addTest("src/tests/test_sendfile.zig", "sendfile");
test_system.addTest("src/tests/test_recvfile.zig", "recv");
test_system.addTest("src/tests/test_recvfile_notype.zig", "recv_notype");
// TODO: for some reason, tests aren't run more than once unless
// dependencies have changed.
// So, for now, we just force the exe to be built, so in order that
// we can call it again when needed.
const install_httpparams_tests = b.addInstallArtifact(httpparams_tests, .{});
// http paramters (qyery, body) tests
const sendfile_tests = b.addTest(.{
.name = "sendfile_tests",
.root_source_file = b.path("src/tests/test_sendfile.zig"),
.target = target,
.optimize = optimize,
});
sendfile_tests.root_module.addImport("zap", zap_module);
const run_sendfile_tests = b.addRunArtifact(sendfile_tests);
const install_sendfile_tests = b.addInstallArtifact(sendfile_tests, .{});
const recvfile_tests = b.addTest(.{
.name = "recv_tests",
.root_source_file = b.path("src/tests/test_recvfile.zig"),
.target = target,
.optimize = optimize,
});
recvfile_tests.root_module.addImport("zap", zap_module);
const run_recvfile_tests = b.addRunArtifact(recvfile_tests);
const install_recvfile_tests = b.addInstallArtifact(recvfile_tests, .{});
const recvfile_notype_tests = b.addTest(.{
.name = "recv_tests",
.root_source_file = b.path("src/tests/test_recvfile_notype.zig"),
.target = target,
.optimize = optimize,
});
recvfile_notype_tests.root_module.addImport("zap", zap_module);
const run_recvfile_notype_tests = b.addRunArtifact(recvfile_notype_tests);
const install_recvfile_notype_tests = b.addInstallArtifact(recvfile_notype_tests, .{});
// test commands
const run_auth_test_step = b.step("test-authentication", "Run auth unit tests [REMOVE zig-cache!]");
run_auth_test_step.dependOn(&run_auth_tests.step);
run_auth_test_step.dependOn(&install_auth_tests.step);
const run_mustache_test_step = b.step("test-mustache", "Run mustache unit tests [REMOVE zig-cache!]");
run_mustache_test_step.dependOn(&run_mustache_tests.step);
run_mustache_test_step.dependOn(&install_mustache_tests.step);
const run_httpparams_test_step = b.step("test-httpparams", "Run http param unit tests [REMOVE zig-cache!]");
run_httpparams_test_step.dependOn(&run_httpparams_tests.step);
run_httpparams_test_step.dependOn(&install_httpparams_tests.step);
const run_sendfile_test_step = b.step("test-sendfile", "Run http param unit tests [REMOVE zig-cache!]");
run_sendfile_test_step.dependOn(&run_sendfile_tests.step);
run_sendfile_test_step.dependOn(&install_sendfile_tests.step);
const run_recvfile_test_step = b.step("test-recvfile", "Run http param unit tests [REMOVE zig-cache!]");
run_recvfile_test_step.dependOn(&run_recvfile_tests.step);
run_recvfile_test_step.dependOn(&install_recvfile_tests.step);
const run_recvfile_notype_test_step = b.step("test-recvfile_notype", "Run http param unit tests [REMOVE zig-cache!]");
run_recvfile_notype_test_step.dependOn(&run_recvfile_notype_tests.step);
run_recvfile_notype_test_step.dependOn(&install_recvfile_notype_tests.step);
// Similar to creating the run step earlier, this exposes a `test` step to
// the `zig build --help` menu, providing a way for the participant to request
// running the unit tests.
const test_step = b.step("test", "Run unit tests");
test_step.dependOn(&run_auth_tests.step);
test_step.dependOn(&run_mustache_tests.step);
test_step.dependOn(&run_httpparams_tests.step);
test_step.dependOn(&run_sendfile_tests.step);
test_step.dependOn(&run_recvfile_tests.step);
test_step.dependOn(&run_recvfile_notype_tests.step);
//
// docserver
//
const docserver_exe = b.addExecutable(.{
.name = "docserver",
const docserver_mod = b.addModule("docserver", .{
.root_source_file = b.path("./tools/docserver.zig"),
.target = target,
.optimize = optimize,
});
const docserver_exe = b.addExecutable(.{
.name = "docserver",
.root_module = docserver_mod,
});
docserver_exe.root_module.addImport("zap", zap_module);
var docserver_step = b.step("docserver", "Build docserver");
const docserver_build_step = b.addInstallArtifact(docserver_exe, .{});
@ -271,12 +215,15 @@ pub fn build(b: *std.Build) !void {
//
// announceybot
//
const announceybot_exe = b.addExecutable(.{
.name = "announceybot",
const announceybot_mod = b.addModule("announceybot", .{
.root_source_file = b.path("./tools/announceybot.zig"),
.target = target,
.optimize = optimize,
});
const announceybot_exe = b.addExecutable(.{
.name = "announceybot",
.root_module = announceybot_mod,
});
var announceybot_step = b.step("announceybot", "Build announceybot");
const announceybot_build_step = b.addInstallArtifact(announceybot_exe, .{});
announceybot_step.dependOn(&announceybot_build_step.step);

View file

@ -1,6 +1,6 @@
.{
.name = .zap,
.version = "0.10.4",
.version = "0.10.6",
.paths = .{
"build.zig",
"build.zig.zon",

View file

@ -19,7 +19,7 @@ fn on_request_verbose(r: zap.Request) !void {
const content_type: zap.ContentType = content_type: {
var accept_list = r.parseAcceptHeaders(accept_allocator) catch break :content_type .HTML;
defer accept_list.deinit();
defer accept_list.deinit(accept_allocator);
for (accept_list.items) |accept| {
break :content_type accept.toContentType() orelse continue;
@ -72,7 +72,7 @@ fn on_request_verbose(r: zap.Request) !void {
\\ <body>
\\ <h1>Hello from ZAP!!!</h1>
\\ </body>
\\</html>
\\</html>
);
},
}

View file

@ -57,9 +57,9 @@ const SimpleEndpoint = struct {
.{ context.db_connection, e.some_data, arena.ptr, thread_id },
);
try r.sendBody(response_text);
std.time.sleep(std.time.ns_per_ms * 300);
std.Thread.sleep(std.time.ns_per_ms * 300);
}
};
};
const StopEndpoint = struct {
path: []const u8,
@ -69,7 +69,7 @@ const StopEndpoint = struct {
std.debug.print(
\\Before I stop, let me dump the app context:
\\db_connection='{s}'
\\
\\
\\
, .{context.*.db_connection});
zap.stop();

View file

@ -14,7 +14,7 @@ const MyContext = struct {
db_connection: []const u8,
// we don't use this
pub fn unhandledRequest(_: *MyContext, _: Allocator, _: zap.Request) anyerror!void {}
// pub fn unhandledRequest(_: *MyContext, _: Allocator, _: zap.Request) anyerror!void {}
pub fn unhandledError(_: *MyContext, _: zap.Request, err: anyerror) void {
std.debug.print("\n\n\nUNHANDLED ERROR: {} !!! \n\n\n", .{err});
@ -46,14 +46,6 @@ const ErrorEndpoint = struct {
// -> error will be raised and dispatched to MyContext.unhandledError
return error.@"Oh-No!";
}
// empty stubs for all other request methods
pub fn post(_: *ErrorEndpoint, _: Allocator, _: *MyContext, _: zap.Request) !void {}
pub fn put(_: *ErrorEndpoint, _: Allocator, _: *MyContext, _: zap.Request) !void {}
pub fn delete(_: *ErrorEndpoint, _: Allocator, _: *MyContext, _: zap.Request) !void {}
pub fn patch(_: *ErrorEndpoint, _: Allocator, _: *MyContext, _: zap.Request) !void {}
pub fn options(_: *ErrorEndpoint, _: Allocator, _: *MyContext, _: zap.Request) !void {}
pub fn head(_: *ErrorEndpoint, _: Allocator, _: *MyContext, _: zap.Request) !void {}
};
const StopEndpoint = struct {
@ -64,17 +56,11 @@ const StopEndpoint = struct {
std.debug.print(
\\Before I stop, let me dump the app context:
\\db_connection='{s}'
\\
\\
\\
, .{context.*.db_connection});
zap.stop();
}
pub fn post(_: *StopEndpoint, _: Allocator, _: *MyContext, _: zap.Request) !void {}
pub fn put(_: *StopEndpoint, _: Allocator, _: *MyContext, _: zap.Request) !void {}
pub fn delete(_: *StopEndpoint, _: Allocator, _: *MyContext, _: zap.Request) !void {}
pub fn patch(_: *StopEndpoint, _: Allocator, _: *MyContext, _: zap.Request) !void {}
pub fn options(_: *StopEndpoint, _: Allocator, _: *MyContext, _: zap.Request) !void {}
};
pub fn main() !void {

View file

@ -38,13 +38,13 @@ const Handler = struct {
//
// HERE WE HANDLE THE BINARY FILE
//
const params = try r.parametersToOwnedList(Handler.alloc);
var params = try r.parametersToOwnedList(Handler.alloc);
defer params.deinit();
for (params.items) |kv| {
if (kv.value) |v| {
for (params.items) |*kv| {
if (kv.value) |*v| {
std.debug.print("\n", .{});
std.log.info("Param `{s}` in owned list is {any}", .{ kv.key, v });
switch (v) {
switch (v.*) {
// single-file upload
zap.Request.HttpParam.Hash_Binfile => |*file| {
const filename = file.filename orelse "(no filename)";
@ -66,7 +66,7 @@ const Handler = struct {
std.log.debug(" mimetype: {s}", .{mimetype});
std.log.debug(" contents: {any}", .{data});
}
files.*.deinit();
files.deinit(alloc);
},
else => {
// let's just get it as its raw slice

View file

@ -17,22 +17,16 @@ pub const std_options: std.Options = .{
// We send ourselves a request with a cookie
fn makeRequest(a: std.mem.Allocator, url: []const u8) !void {
const uri = try std.Uri.parse(url);
var http_client: std.http.Client = .{ .allocator = a };
defer http_client.deinit();
var server_header_buffer: [2048]u8 = undefined;
var req = try http_client.open(.GET, uri, .{
.server_header_buffer = &server_header_buffer,
_ = try http_client.fetch(.{
.location = .{ .url = url },
.method = .GET,
.extra_headers = &.{
.{ .name = "cookie", .value = "ZIG_ZAP=awesome" },
},
});
defer req.deinit();
try req.send();
try req.wait();
}
fn makeRequestThread(a: std.mem.Allocator, url: []const u8) !std.Thread {
@ -71,7 +65,7 @@ pub fn main() !void {
std.debug.print("\n", .{});
// // iterate over all cookies
const cookies = r.cookiesToOwnedList(alloc) catch unreachable;
var cookies = r.cookiesToOwnedList(alloc) catch unreachable;
defer cookies.deinit();
for (cookies.items) |kv| {
std.log.info("cookie `{s}` is {any}", .{ kv.key, kv.value });

View file

@ -13,11 +13,3 @@ pub fn get(_: *ErrorEndpoint, _: zap.Request) !void {
// --> this error will be shown in the browser, with a nice error trace
return error.@"Oh-no!";
}
// unused:
pub fn post(_: *ErrorEndpoint, _: zap.Request) !void {}
pub fn put(_: *ErrorEndpoint, _: zap.Request) !void {}
pub fn delete(_: *ErrorEndpoint, _: zap.Request) !void {}
pub fn patch(_: *ErrorEndpoint, _: zap.Request) !void {}
pub fn options(_: *ErrorEndpoint, _: zap.Request) !void {}
pub fn head(_: *ErrorEndpoint, _: zap.Request) !void {}

View file

@ -17,10 +17,3 @@ pub fn init(path: []const u8) StopEndpoint {
pub fn get(_: *StopEndpoint, _: zap.Request) !void {
zap.stop();
}
pub fn post(_: *StopEndpoint, _: zap.Request) !void {}
pub fn put(_: *StopEndpoint, _: zap.Request) !void {}
pub fn delete(_: *StopEndpoint, _: zap.Request) !void {}
pub fn patch(_: *StopEndpoint, _: zap.Request) !void {}
pub fn options(_: *StopEndpoint, _: zap.Request) !void {}
pub fn head(_: *StopEndpoint, _: zap.Request) !void {}

View file

@ -121,17 +121,18 @@ pub fn toJSON(self: *Users) ![]const u8 {
// working directly with InternalUser elements of the users hashmap.
// might actually save some memory
// TODO: maybe do it directly with the user.items
var l: std.ArrayList(User) = std.ArrayList(User).init(self.alloc);
defer l.deinit();
var l: std.ArrayList(User) = std.ArrayList(User).empty;
defer l.deinit(self.alloc);
// the potential race condition is fixed by jsonifying with the mutex locked
var it = JsonUserIteratorWithRaceCondition.init(&self.users);
while (it.next()) |user| {
try l.append(user);
try l.append(self.alloc, user);
}
std.debug.assert(self.users.count() == l.items.len);
std.debug.assert(self.count == l.items.len);
return std.json.stringifyAlloc(self.alloc, l.items, .{});
return std.json.Stringify.valueAlloc(self.alloc, l.items, .{});
}
//

View file

@ -43,7 +43,8 @@ fn userIdFromPath(self: *UserWeb, path: []const u8) ?usize {
return null;
}
pub fn put(_: *UserWeb, _: zap.Request) !void {}
// not implemented
// pub fn put(_: *UserWeb, _: zap.Request) !void {}
pub fn get(self: *UserWeb, r: zap.Request) !void {
if (r.path) |path| {

View file

@ -31,14 +31,6 @@ const Endpoint = struct {
r.setStatus(.unauthorized);
r.sendBody("UNAUTHORIZED ACCESS") catch return;
}
// not implemented, don't care
pub fn post(_: *Endpoint, _: zap.Request) !void {}
pub fn put(_: *Endpoint, _: zap.Request) !void {}
pub fn delete(_: *Endpoint, _: zap.Request) !void {}
pub fn patch(_: *Endpoint, _: zap.Request) !void {}
pub fn options(_: *Endpoint, _: zap.Request) !void {}
pub fn head(_: *Endpoint, _: zap.Request) !void {}
};
pub fn main() !void {
@ -74,7 +66,7 @@ pub fn main() !void {
listener.listen() catch {};
std.debug.print(
\\ Run the following:
\\
\\
\\ curl http://localhost:3000/test -i -H "Authorization: Bearer ABCDEFG" -v
\\ curl http://localhost:3000/test -i -H "Authorization: Bearer invalid" -v
\\

View file

@ -17,19 +17,18 @@ pub const std_options: std.Options = .{
// We send ourselves a request
fn makeRequest(a: std.mem.Allocator, url: []const u8) !void {
const uri = try std.Uri.parse(url);
var http_client: std.http.Client = .{ .allocator = a };
defer http_client.deinit();
var server_header_buffer: [2048]u8 = undefined;
var req = try http_client.open(.GET, uri, .{
.server_header_buffer = &server_header_buffer,
const response = try http_client.fetch(.{
.location = .{ .url = url },
.method = .GET,
.payload = null,
.keep_alive = false,
});
defer req.deinit();
try req.send();
try req.wait();
if (response.status.class() != .success) {
std.debug.print("HTTP Error: {?s}", .{response.status.phrase()});
}
}
fn makeRequestThread(a: std.mem.Allocator, url: []const u8) !std.Thread {
@ -95,7 +94,7 @@ pub fn main() !void {
std.debug.print("\n", .{});
// iterate over all params
const params = try r.parametersToOwnedList(alloc);
var params = try r.parametersToOwnedList(alloc);
defer params.deinit();
for (params.items) |kv| {
std.log.info("Param `{s}` is {any}", .{ kv.key, kv.value });

View file

@ -152,13 +152,6 @@ const HtmlEndpoint = struct {
};
}
pub fn post(_: *HtmlEndpoint, _: zap.Request) !void {}
pub fn put(_: *HtmlEndpoint, _: zap.Request) !void {}
pub fn delete(_: *HtmlEndpoint, _: zap.Request) !void {}
pub fn patch(_: *HtmlEndpoint, _: zap.Request) !void {}
pub fn options(_: *HtmlEndpoint, _: zap.Request) !void {}
pub fn head(_: *HtmlEndpoint, _: zap.Request) !void {}
pub fn get(_: *HtmlEndpoint, r: zap.Request) !void {
var buf: [1024]u8 = undefined;
var userFound: bool = false;

View file

@ -35,7 +35,7 @@ const ContextManager = struct {
.allocator = allocator,
.channel = channelName,
.usernamePrefix = usernamePrefix,
.contexts = ContextList.init(allocator),
.contexts = ContextList.empty,
};
}
@ -43,7 +43,7 @@ const ContextManager = struct {
for (self.contexts.items) |ctx| {
self.allocator.free(ctx.userName);
}
self.contexts.deinit();
self.contexts.deinit(self.allocator);
}
pub fn newContext(self: *ContextManager) !*Context {
@ -73,7 +73,7 @@ const ContextManager = struct {
.context = ctx,
},
};
try self.contexts.append(ctx);
try self.contexts.append(self.allocator, ctx);
return ctx;
}
};

View file

@ -7,17 +7,24 @@ pub fn build_facilio(
optimize: std.builtin.OptimizeMode,
use_openssl: bool,
) !*std.Build.Step.Compile {
const lib = b.addStaticLibrary(.{
.name = "facil.io",
const mod = b.addModule("facil.io", .{
.target = target,
.optimize = optimize,
.link_libc = true,
});
const lib = b.addLibrary(.{
.name = "facil.io",
.root_module = mod,
.linkage = .dynamic,
});
// Generate flags
var flags = std.ArrayList([]const u8).init(std.heap.page_allocator);
if (optimize != .Debug) try flags.append("-Os");
try flags.append("-Wno-return-type-c-linkage");
try flags.append("-fno-sanitize=undefined");
var flags = std.ArrayList([]const u8).empty;
defer flags.deinit(b.allocator);
if (optimize != .Debug) try flags.append(b.allocator, "-Os");
try flags.append(b.allocator, "-Wno-return-type-c-linkage");
try flags.append(b.allocator, "-fno-sanitize=undefined");
//
// let's not override malloc from within the lib
@ -25,24 +32,24 @@ pub fn build_facilio(
// try flags.append("-DFIO_OVERRIDE_MALLOC");
//
try flags.append("-DFIO_HTTP_EXACT_LOGGING");
try flags.append(b.allocator, "-DFIO_HTTP_EXACT_LOGGING");
if (target.result.abi == .musl)
try flags.append("-D_LARGEFILE64_SOURCE");
try flags.append(b.allocator, "-D_LARGEFILE64_SOURCE");
if (use_openssl)
try flags.append("-DHAVE_OPENSSL -DFIO_TLS_FOUND");
try flags.append(b.allocator, "-DHAVE_OPENSSL -DFIO_TLS_FOUND");
// Include paths
lib.addIncludePath(b.path(subdir ++ "/."));
lib.addIncludePath(b.path(subdir ++ "/lib/facil"));
lib.addIncludePath(b.path(subdir ++ "/lib/facil/fiobj"));
lib.addIncludePath(b.path(subdir ++ "/lib/facil/cli"));
lib.addIncludePath(b.path(subdir ++ "/lib/facil/http"));
lib.addIncludePath(b.path(subdir ++ "/lib/facil/http/parsers"));
mod.addIncludePath(b.path(subdir ++ "/."));
mod.addIncludePath(b.path(subdir ++ "/lib/facil"));
mod.addIncludePath(b.path(subdir ++ "/lib/facil/fiobj"));
mod.addIncludePath(b.path(subdir ++ "/lib/facil/cli"));
mod.addIncludePath(b.path(subdir ++ "/lib/facil/http"));
mod.addIncludePath(b.path(subdir ++ "/lib/facil/http/parsers"));
if (use_openssl)
lib.addIncludePath(b.path(subdir ++ "/lib/facil/tls"));
mod.addIncludePath(b.path(subdir ++ "/lib/facil/tls"));
// C source files
lib.addCSourceFiles(.{
mod.addCSourceFiles(.{
.files = &.{
subdir ++ "/lib/facil/fio.c",
subdir ++ "/lib/facil/fio_zig.c",
@ -65,7 +72,7 @@ pub fn build_facilio(
});
if (use_openssl) {
lib.addCSourceFiles(.{
mod.addCSourceFiles(.{
.files = &.{
subdir ++ "/lib/facil/tls/fio_tls_openssl.c",
subdir ++ "/lib/facil/tls/fio_tls_missing.c",
@ -74,13 +81,10 @@ pub fn build_facilio(
});
}
// link against libc
lib.linkLibC();
// link in libopenssl and libcrypto on demand
// link in modopenssl and libcrypto on demand
if (use_openssl) {
lib.linkSystemLibrary("ssl");
lib.linkSystemLibrary("crypto");
mod.linkSystemLibrary("ssl", .{});
mod.linkSystemLibrary("crypto", .{});
}
b.installArtifact(lib);

View file

@ -317,6 +317,17 @@ int http_set_cookie(http_s *h, http_cookie_args_s cookie) {
if (cookie.http_only) {
fiobj_str_write(c, "HttpOnly;", 9);
}
if(cookie.partitioned) {
fiobj_str_write(c, "Partitioned;", 12);
}
if(cookie.same_site == HTTP_COOKIE_SAME_SITE_LAX) {
fiobj_str_write(c, "SameSite=Lax;", 13);
} else if (cookie.same_site == HTTP_COOKIE_SAME_SITE_STRICT) {
fiobj_str_write(c, "SameSite=Strict;", 16);
} else if (cookie.same_site == HTTP_COOKIE_SAME_SITE_NONE) {
fiobj_str_write(c, "SameSite=None;", 14);
}
if (cookie.secure) {
fiobj_str_write(c, "secure;", 7);
}

View file

@ -58,6 +58,7 @@ Compile Time Settings
#define FIO_HTTP_EXACT_LOGGING 0
#endif
/** the `http_listen settings, see details in the struct definition. */
typedef struct http_settings_s http_settings_s;
@ -120,6 +121,16 @@ typedef struct {
void *udata;
} http_s;
/**
* This determins the SameSite attribute of a cookie.
*/
typedef enum {
HTTP_COOKIE_SAME_SITE_DEFAULT,
HTTP_COOKIE_SAME_SITE_LAX,
HTTP_COOKIE_SAME_SITE_STRICT,
HTTP_COOKIE_SAME_SITE_NONE
} http_cookie_same_site_e;
/**
* This is a helper for setting cookie data.
@ -153,6 +164,10 @@ typedef struct {
unsigned secure : 1;
/** Limit cookie to HTTP (intended to prevent javascript access/hijacking).*/
unsigned http_only : 1;
/** Partitioned storage, with a separate cookie jar per top-level site */
unsigned partitioned: 1;
/** The SameSite attribute, see `http_cookie_same_site_e` for details. */
unsigned same_site : 3;
} http_cookie_args_s;
/**

View file

@ -5,7 +5,11 @@
//! Pass an instance of an Endpoint struct to zap.Endpoint.Listener.register()
//! function to register with the listener.
//!
//! **NOTE**: Endpoints must implement the following "interface":
//! **NOTE**: Endpoints can implement the following "interface":
//!
//! Any method handler that's not implemented will be handled automatically:
//! - zap will log it
//! - a response with status code 405 (method not allowed) is sent to the client
//!
//! ```zig
//! /// The http request path / slug of the endpoint
@ -13,6 +17,7 @@
//! error_strategy: zap.Endpoint.ErrorStrategy,
//!
//! /// Handlers by request method:
//! /// implement any of the following
//! pub fn get(_: *Self, _: zap.Request) !void {}
//! pub fn post(_: *Self, _: zap.Request) !void {}
//! pub fn put(_: *Self, _: zap.Request) !void {}
@ -44,13 +49,6 @@
//! pub fn get(_: *StopEndpoint, _: zap.Request) !void {
//! zap.stop();
//! }
//!
//! pub fn post(_: *StopEndpoint, _: zap.Request) !void {}
//! pub fn put(_: *StopEndpoint, _: zap.Request) !void {}
//! pub fn delete(_: *StopEndpoint, _: zap.Request) !void {}
//! pub fn patch(_: *StopEndpoint, _: zap.Request) !void {}
//! pub fn options(_: *StopEndpoint, _: zap.Request) !void {}
//! pub fn head(_: *StopEndpoint, _: zap.Request) !void {}
//! };
//! ```
@ -154,10 +152,25 @@ pub fn checkEndpointType(T: type) void {
@compileError("Expected return type of method `" ++ @typeName(T) ++ "." ++ method ++ "` to be !void, got: !" ++ @typeName(ret_info.error_union.payload));
}
} else {
@compileError(@typeName(T) ++ " has no method named `" ++ method ++ "`");
// it is ok not to implement a method handler
// pass
}
}
}
// This can be resolved at comptime so *perhaps it does affect optimiazation
pub fn callHandlerIfExist(comptime fn_name: []const u8, e: anytype, r: Request) anyerror!void {
const EndPointType = @TypeOf(e.*);
if (@hasDecl(EndPointType, fn_name)) {
return @field(EndPointType, fn_name)(e, r);
}
zap.log.debug(
"Unhandled `{s}` {s} request ({s} not implemented in {s})",
.{ r.method orelse "<unknown>", r.path orelse "", fn_name, @typeName(EndPointType) },
);
r.setStatus(.method_not_allowed);
try r.sendBody("405 - method not allowed\r\n");
return;
}
pub const Binder = struct {
pub const Interface = struct {
@ -189,13 +202,13 @@ pub const Binder = struct {
pub fn onRequest(self: *Bound, r: zap.Request) !void {
const ret = switch (r.methodAsEnum()) {
.GET => self.endpoint.*.get(r),
.POST => self.endpoint.*.post(r),
.PUT => self.endpoint.*.put(r),
.DELETE => self.endpoint.*.delete(r),
.PATCH => self.endpoint.*.patch(r),
.OPTIONS => self.endpoint.*.options(r),
.HEAD => self.endpoint.*.head(r),
.GET => callHandlerIfExist("get", self.endpoint, r),
.POST => callHandlerIfExist("post", self.endpoint, r),
.PUT => callHandlerIfExist("put", self.endpoint, r),
.DELETE => callHandlerIfExist("delete", self.endpoint, r),
.PATCH => callHandlerIfExist("patch", self.endpoint, r),
.OPTIONS => callHandlerIfExist("options", self.endpoint, r),
.HEAD => callHandlerIfExist("head", self.endpoint, r),
else => error.UnsupportedHtmlRequestMethod,
};
if (ret) {
@ -249,8 +262,8 @@ pub fn Authenticating(EndpointType: type, Authenticator: type) type {
/// Authenticates GET requests using the Authenticator.
pub fn get(self: *AuthenticatingEndpoint, r: zap.Request) anyerror!void {
try switch (self.authenticator.authenticateRequest(&r)) {
.AuthFailed => return self.ep.*.unauthorized(r),
.AuthOK => self.ep.*.get(r),
.AuthFailed => callHandlerIfExist("unauthorized", self.ep, r),
.AuthOK => callHandlerIfExist("get", self.ep, r),
.Handled => {},
};
}
@ -258,8 +271,8 @@ pub fn Authenticating(EndpointType: type, Authenticator: type) type {
/// Authenticates POST requests using the Authenticator.
pub fn post(self: *AuthenticatingEndpoint, r: zap.Request) anyerror!void {
try switch (self.authenticator.authenticateRequest(&r)) {
.AuthFailed => return self.ep.*.unauthorized(r),
.AuthOK => self.ep.*.post(r),
.AuthFailed => callHandlerIfExist("unauthorized", self.ep, r),
.AuthOK => callHandlerIfExist("post", self.ep, r),
.Handled => {},
};
}
@ -267,8 +280,8 @@ pub fn Authenticating(EndpointType: type, Authenticator: type) type {
/// Authenticates PUT requests using the Authenticator.
pub fn put(self: *AuthenticatingEndpoint, r: zap.Request) anyerror!void {
try switch (self.authenticator.authenticateRequest(&r)) {
.AuthFailed => return self.ep.*.unauthorized(r),
.AuthOK => self.ep.*.put(r),
.AuthFailed => callHandlerIfExist("unauthorized", self.ep, r),
.AuthOK => callHandlerIfExist("put", self.ep, r),
.Handled => {},
};
}
@ -276,8 +289,8 @@ pub fn Authenticating(EndpointType: type, Authenticator: type) type {
/// Authenticates DELETE requests using the Authenticator.
pub fn delete(self: *AuthenticatingEndpoint, r: zap.Request) anyerror!void {
try switch (self.authenticator.authenticateRequest(&r)) {
.AuthFailed => return self.ep.*.unauthorized(r),
.AuthOK => self.ep.*.delete(r),
.AuthFailed => callHandlerIfExist("unauthorized", self.ep, r),
.AuthOK => callHandlerIfExist("delete", self.ep, r),
.Handled => {},
};
}
@ -285,8 +298,8 @@ pub fn Authenticating(EndpointType: type, Authenticator: type) type {
/// Authenticates PATCH requests using the Authenticator.
pub fn patch(self: *AuthenticatingEndpoint, r: zap.Request) anyerror!void {
try switch (self.authenticator.authenticateRequest(&r)) {
.AuthFailed => return self.ep.*.unauthorized(r),
.AuthOK => self.ep.*.patch(r),
.AuthFailed => callHandlerIfExist("unauthorized", self.ep, r),
.AuthOK => callHandlerIfExist("patch", self.ep, r),
.Handled => {},
};
}
@ -294,17 +307,17 @@ pub fn Authenticating(EndpointType: type, Authenticator: type) type {
/// Authenticates OPTIONS requests using the Authenticator.
pub fn options(self: *AuthenticatingEndpoint, r: zap.Request) anyerror!void {
try switch (self.authenticator.authenticateRequest(&r)) {
.AuthFailed => return self.ep.*.unauthorized(r),
.AuthOK => self.ep.*.put(r),
.AuthFailed => callHandlerIfExist("unauthorized", self.ep, r),
.AuthOK => callHandlerIfExist("options", self.ep, r),
.Handled => {},
};
}
/// Authenticates HEAD requests using the Authenticator.
pub fn head(self: *AuthenticatingEndpoint, r: zap.Request) anyerror!void {
try switch (self.authenticator.authenticateRequest(&r)) {
.AuthFailed => return self.ep.*.unauthorized(r),
.AuthOK => self.ep.*.head(r),
.AuthFailed => callHandlerIfExist("unauthorized", self.ep, r),
.AuthOK => callHandlerIfExist("head", self.ep, r),
.Handled => {},
};
}

View file

@ -39,10 +39,10 @@ pub const struct_timespec = extern struct {
tv_nsec: __syscall_slong_t,
};
pub const struct_http_settings_s = extern struct {
on_request: ?*const fn ([*c]http_s) callconv(.C) void,
on_upgrade: ?*const fn ([*c]http_s, [*c]u8, usize) callconv(.C) void,
on_response: ?*const fn ([*c]http_s) callconv(.C) void,
on_finish: ?*const fn ([*c]struct_http_settings_s) callconv(.C) void,
on_request: ?*const fn ([*c]http_s) callconv(.c) void,
on_upgrade: ?*const fn ([*c]http_s, [*c]u8, usize) callconv(.c) void,
on_response: ?*const fn ([*c]http_s) callconv(.c) void,
on_finish: ?*const fn ([*c]struct_http_settings_s) callconv(.c) void,
udata: ?*anyopaque,
public_folder: [*c]const u8,
public_folder_length: usize,
@ -103,7 +103,7 @@ pub const http_s = extern struct {
// unsigned http_only : 1;
// } http_cookie_args_s;
pub const http_cookie_args_s = extern struct {
pub const http_cookie_args_s = extern struct {
name: [*c]u8,
value: [*c]u8,
domain: [*c]u8,
@ -114,8 +114,7 @@ pub const http_cookie_args_s = extern struct {
path_len: isize,
/// in seconds
max_age: c_int,
secure: c_uint,
http_only: c_uint,
flags: c_uint,
};
pub const struct_fio_str_info_s = extern struct {
@ -125,7 +124,7 @@ pub const struct_fio_str_info_s = extern struct {
};
pub const fio_str_info_s = struct_fio_str_info_s;
pub extern fn http_send_body(h: [*c]http_s, data: ?*anyopaque, length: usize) c_int;
pub fn fiobj_each1(arg_o: FIOBJ, arg_start_at: usize, arg_task: ?*const fn (FIOBJ, ?*anyopaque) callconv(.C) c_int, arg_arg: ?*anyopaque) callconv(.C) usize {
pub fn fiobj_each1(arg_o: FIOBJ, arg_start_at: usize, arg_task: ?*const fn (FIOBJ, ?*anyopaque) callconv(.c) c_int, arg_arg: ?*anyopaque) callconv(.c) usize {
const o = arg_o;
const start_at = arg_start_at;
const task = arg_task;
@ -161,7 +160,7 @@ pub extern fn fiobj_float_new(num: f64) FIOBJ;
pub extern fn fiobj_num_new_bignum(num: isize) FIOBJ;
pub extern fn fiobj_data_newstr() FIOBJ;
pub extern fn fiobj_data_newstr2(buffer: ?*anyopaque, length: usize, dealloc: ?*const fn (?*anyopaque) callconv(.C) void) FIOBJ;
pub extern fn fiobj_data_newstr2(buffer: ?*anyopaque, length: usize, dealloc: ?*const fn (?*anyopaque) callconv(.c) void) FIOBJ;
pub extern fn fiobj_data_newtmpfile() FIOBJ;
pub extern fn fiobj_data_newfd(fd: c_int) FIOBJ;
pub extern fn fiobj_data_slice(parent: FIOBJ, offset: isize, length: usize) FIOBJ;
@ -223,14 +222,14 @@ pub extern fn fio_tls_accept(uuid: *u32, tls: ?*anyopaque, udata: ?*anyopaque) v
/// were added using fio_tls_alpn_add).
pub extern fn fio_tls_connect(uuid: *u32, tls: ?*anyopaque, udata: ?*anyopaque) void;
pub extern fn fiobj_free_wrapped(o: FIOBJ) callconv(.C) void;
pub fn fiobj_null() callconv(.C) FIOBJ {
pub extern fn fiobj_free_wrapped(o: FIOBJ) callconv(.c) void;
pub fn fiobj_null() callconv(.c) FIOBJ {
return @as(FIOBJ, @bitCast(@as(c_long, FIOBJ_T_NULL)));
}
pub fn fiobj_true() callconv(.C) FIOBJ {
pub fn fiobj_true() callconv(.c) FIOBJ {
return @as(FIOBJ, @bitCast(@as(c_long, FIOBJ_T_TRUE)));
}
pub fn fiobj_false() callconv(.C) FIOBJ {
pub fn fiobj_false() callconv(.c) FIOBJ {
return @as(FIOBJ, @bitCast(@as(c_long, FIOBJ_T_FALSE)));
}
pub extern fn fiobj_str_new(str: [*c]const u8, len: usize) FIOBJ;
@ -282,20 +281,20 @@ pub const FIOBJ_T_UNKNOWN: c_int = 44;
pub const fiobj_type_enum = u8;
pub const fiobj_object_vtable_s = extern struct {
class_name: [*c]const u8,
dealloc: ?*const fn (FIOBJ, ?*const fn (FIOBJ, ?*anyopaque) callconv(.C) void, ?*anyopaque) callconv(.C) void,
count: ?*const fn (FIOBJ) callconv(.C) usize,
is_true: ?*const fn (FIOBJ) callconv(.C) usize,
is_eq: ?*const fn (FIOBJ, FIOBJ) callconv(.C) usize,
each: ?*const fn (FIOBJ, usize, ?*const fn (FIOBJ, ?*anyopaque) callconv(.C) c_int, ?*anyopaque) callconv(.C) usize,
to_str: ?*const fn (FIOBJ) callconv(.C) fio_str_info_s,
to_i: ?*const fn (FIOBJ) callconv(.C) isize,
to_f: ?*const fn (FIOBJ) callconv(.C) f64,
dealloc: ?*const fn (FIOBJ, ?*const fn (FIOBJ, ?*anyopaque) callconv(.c) void, ?*anyopaque) callconv(.c) void,
count: ?*const fn (FIOBJ) callconv(.c) usize,
is_true: ?*const fn (FIOBJ) callconv(.c) usize,
is_eq: ?*const fn (FIOBJ, FIOBJ) callconv(.c) usize,
each: ?*const fn (FIOBJ, usize, ?*const fn (FIOBJ, ?*anyopaque) callconv(.c) c_int, ?*anyopaque) callconv(.c) usize,
to_str: ?*const fn (FIOBJ) callconv(.c) fio_str_info_s,
to_i: ?*const fn (FIOBJ) callconv(.c) isize,
to_f: ?*const fn (FIOBJ) callconv(.c) f64,
};
pub const fiobj_object_header_s = extern struct {
type: fiobj_type_enum,
ref: u32,
};
pub fn fiobj_type_is(arg_o: FIOBJ, arg_type: fiobj_type_enum) callconv(.C) usize {
pub fn fiobj_type_is(arg_o: FIOBJ, arg_type: fiobj_type_enum) callconv(.c) usize {
const o = arg_o;
const @"type" = arg_type;
while (true) {
@ -318,7 +317,7 @@ pub fn fiobj_type_is(arg_o: FIOBJ, arg_type: fiobj_type_enum) callconv(.C) usize
}
return @as(usize, @bitCast(@as(c_long, @intFromBool((((o != 0) and ((o & @as(c_ulong, @bitCast(@as(c_long, @as(c_int, 1))))) == @as(c_ulong, @bitCast(@as(c_long, @as(c_int, 0)))))) and ((o & @as(c_ulong, @bitCast(@as(c_long, @as(c_int, 6))))) != @as(c_ulong, @bitCast(@as(c_long, @as(c_int, 6)))))) and (@as(c_int, @bitCast(@as(c_uint, @as([*c]fiobj_type_enum, @ptrCast(@alignCast(@as(?*anyopaque, @ptrFromInt(o & ~@as(usize, @bitCast(@as(c_long, @as(c_int, 7)))))))))[@as(c_uint, @intCast(@as(c_int, 0)))]))) == @as(c_int, @bitCast(@as(c_uint, @"type"))))))));
}
pub fn fiobj_type(arg_o: FIOBJ) callconv(.C) fiobj_type_enum {
pub fn fiobj_type(arg_o: FIOBJ) callconv(.c) fiobj_type_enum {
const o = arg_o;
if (!(o != 0)) return @as(u8, @bitCast(@as(i8, @truncate(FIOBJ_T_NULL))));
if ((o & @as(c_ulong, @bitCast(@as(c_long, @as(c_int, 1))))) != 0) return @as(u8, @bitCast(@as(i8, @truncate(FIOBJ_T_NUMBER))));
@ -333,7 +332,7 @@ pub extern const FIOBJECT_VTABLE_STRING: fiobj_object_vtable_s;
pub extern const FIOBJECT_VTABLE_ARRAY: fiobj_object_vtable_s;
pub extern const FIOBJECT_VTABLE_HASH: fiobj_object_vtable_s;
pub extern const FIOBJECT_VTABLE_DATA: fiobj_object_vtable_s;
pub fn fiobj_type_vtable(arg_o: FIOBJ) callconv(.C) [*c]const fiobj_object_vtable_s {
pub fn fiobj_type_vtable(arg_o: FIOBJ) callconv(.c) [*c]const fiobj_object_vtable_s {
const o = arg_o;
while (true) {
switch (@as(c_int, @bitCast(@as(c_uint, fiobj_type(o))))) {
@ -351,7 +350,7 @@ pub fn fiobj_type_vtable(arg_o: FIOBJ) callconv(.C) [*c]const fiobj_object_vtabl
return null;
}
pub fn fiobj_obj2num(o: FIOBJ) callconv(.C) isize {
pub fn fiobj_obj2num(o: FIOBJ) callconv(.c) isize {
if ((o & @as(c_ulong, @bitCast(@as(c_long, @as(c_int, 1))))) != 0) {
const sign: usize = if ((o & ~(~@as(usize, @bitCast(@as(c_long, @as(c_int, 0)))) >> @as(@import("std").math.Log2Int(usize), @intCast(1)))) != 0) ~(~@as(usize, @bitCast(@as(c_long, @as(c_int, 0)))) >> @as(@import("std").math.Log2Int(usize), @intCast(1))) | (~(~@as(usize, @bitCast(@as(c_long, @as(c_int, 0)))) >> @as(@import("std").math.Log2Int(usize), @intCast(1))) >> @as(@import("std").math.Log2Int(usize), @intCast(1))) else @as(c_ulong, @bitCast(@as(c_long, @as(c_int, 0))));
return @as(isize, @bitCast(((o & (~@as(usize, @bitCast(@as(c_long, @as(c_int, 0)))) >> @as(@import("std").math.Log2Int(usize), @intCast(1)))) >> @as(@import("std").math.Log2Int(c_ulong), @intCast(1))) | sign));
@ -359,7 +358,7 @@ pub fn fiobj_obj2num(o: FIOBJ) callconv(.C) isize {
if (!(o != 0) or !(((o != 0) and ((o & @as(c_ulong, @bitCast(@as(c_long, @as(c_int, 1))))) == @as(c_ulong, @bitCast(@as(c_long, @as(c_int, 0)))))) and ((o & @as(c_ulong, @bitCast(@as(c_long, @as(c_int, 6))))) != @as(c_ulong, @bitCast(@as(c_long, @as(c_int, 6))))))) return @as(isize, @bitCast(@as(c_long, @intFromBool(o == @as(c_ulong, @bitCast(@as(c_long, FIOBJ_T_TRUE)))))));
return fiobj_type_vtable(o).*.to_i.?(o);
}
pub fn fiobj_obj2float(o: FIOBJ) callconv(.C) f64 {
pub fn fiobj_obj2float(o: FIOBJ) callconv(.c) f64 {
if ((o & @as(c_ulong, @bitCast(@as(c_long, @as(c_int, 1))))) != 0) return @as(f64, @floatFromInt(fiobj_obj2num(o)));
// the below doesn't parse and we don't support ints here anyway
// if (!(o != 0) or ((o & @bitCast(c_ulong, @as(c_long, @as(c_int, 6)))) == @bitCast(c_ulong, @as(c_long, @as(c_int, 6))))) return @intToFloat(f64, o == @bitCast(c_ulong, @as(c_long, FIOBJ_T_TRUE)));
@ -367,7 +366,7 @@ pub fn fiobj_obj2float(o: FIOBJ) callconv(.C) f64 {
}
pub extern fn fio_ltocstr(c_long) fio_str_info_s;
pub fn fiobj_obj2cstr(o: FIOBJ) callconv(.C) fio_str_info_s {
pub fn fiobj_obj2cstr(o: FIOBJ) callconv(.c) fio_str_info_s {
if (!(o != 0)) {
const ret: fio_str_info_s = fio_str_info_s{
.capa = @as(usize, @bitCast(@as(c_long, @as(c_int, 0)))),
@ -432,8 +431,8 @@ pub extern fn http_push_data(h: [*c]http_s, data: ?*anyopaque, length: usize, mi
pub extern fn http_push_file(h: [*c]http_s, filename: FIOBJ, mime_type: FIOBJ) c_int;
pub const struct_http_pause_handle_s = opaque {};
pub const http_pause_handle_s = struct_http_pause_handle_s;
pub extern fn http_pause(h: [*c]http_s, task: ?*const fn (?*http_pause_handle_s) callconv(.C) void) void;
pub extern fn http_resume(http: ?*http_pause_handle_s, task: ?*const fn ([*c]http_s) callconv(.C) void, fallback: ?*const fn (?*anyopaque) callconv(.C) void) void;
pub extern fn http_pause(h: [*c]http_s, task: ?*const fn (?*http_pause_handle_s) callconv(.c) void) void;
pub extern fn http_resume(http: ?*http_pause_handle_s, task: ?*const fn ([*c]http_s) callconv(.c) void, fallback: ?*const fn (?*anyopaque) callconv(.c) void) void;
pub extern fn http_paused_udata_get(http: ?*http_pause_handle_s) ?*anyopaque;
pub extern fn http_paused_udata_set(http: ?*http_pause_handle_s, udata: ?*anyopaque) ?*anyopaque;
pub extern fn http_listen(port: [*c]const u8, binding: [*c]const u8, struct_http_settings_s) isize;
@ -444,11 +443,11 @@ pub extern fn http_hijack(h: [*c]http_s, leftover: [*c]fio_str_info_s) isize;
pub const struct_ws_s = opaque {};
pub const ws_s = struct_ws_s;
pub const websocket_settings_s = extern struct {
on_message: ?*const fn (?*ws_s, fio_str_info_s, u8) callconv(.C) void,
on_open: ?*const fn (?*ws_s) callconv(.C) void,
on_ready: ?*const fn (?*ws_s) callconv(.C) void,
on_shutdown: ?*const fn (?*ws_s) callconv(.C) void,
on_close: ?*const fn (isize, ?*anyopaque) callconv(.C) void,
on_message: ?*const fn (?*ws_s, fio_str_info_s, u8) callconv(.c) void,
on_open: ?*const fn (?*ws_s) callconv(.c) void,
on_ready: ?*const fn (?*ws_s) callconv(.c) void,
on_shutdown: ?*const fn (?*ws_s) callconv(.c) void,
on_close: ?*const fn (isize, ?*anyopaque) callconv(.c) void,
udata: ?*anyopaque,
};
@ -466,8 +465,8 @@ pub const websocket_settings_s = extern struct {
pub const websocket_subscribe_s_zigcompat = extern struct {
ws: ?*ws_s,
channel: fio_str_info_s,
on_message: ?*const fn (?*ws_s, fio_str_info_s, fio_str_info_s, ?*anyopaque) callconv(.C) void,
on_unsubscribe: ?*const fn (?*anyopaque) callconv(.C) void,
on_message: ?*const fn (?*ws_s, fio_str_info_s, fio_str_info_s, ?*anyopaque) callconv(.c) void,
on_unsubscribe: ?*const fn (?*anyopaque) callconv(.c) void,
udata: ?*anyopaque,
match: fio_match_fn,
force_binary: u8,
@ -475,7 +474,7 @@ pub const websocket_subscribe_s_zigcompat = extern struct {
};
/// 0 on failure
pub extern fn websocket_subscribe_zigcompat(websocket_subscribe_s_zigcompat) callconv(.C) usize;
pub extern fn websocket_subscribe_zigcompat(websocket_subscribe_s_zigcompat) callconv(.c) usize;
pub extern fn http_upgrade2ws(http: [*c]http_s, websocket_settings_s) c_int;
pub extern fn websocket_connect(url: [*c]const u8, settings: websocket_settings_s) c_int;
@ -505,19 +504,19 @@ pub const struct_fio_publish_args_s = extern struct {
pub const http_sse_s = struct_http_sse_s;
pub const struct_http_sse_s = extern struct {
on_open: ?*const fn ([*c]http_sse_s) callconv(.C) void,
on_ready: ?*const fn ([*c]http_sse_s) callconv(.C) void,
on_shutdown: ?*const fn ([*c]http_sse_s) callconv(.C) void,
on_close: ?*const fn ([*c]http_sse_s) callconv(.C) void,
on_open: ?*const fn ([*c]http_sse_s) callconv(.c) void,
on_ready: ?*const fn ([*c]http_sse_s) callconv(.c) void,
on_shutdown: ?*const fn ([*c]http_sse_s) callconv(.c) void,
on_close: ?*const fn ([*c]http_sse_s) callconv(.c) void,
udata: ?*anyopaque,
};
pub extern fn http_upgrade2sse(h: [*c]http_s, http_sse_s) c_int;
pub extern fn http_sse_set_timout(sse: [*c]http_sse_s, timeout: u8) void;
pub const fio_match_fn = ?*const fn (fio_str_info_s, fio_str_info_s) callconv(.C) c_int;
pub const fio_match_fn = ?*const fn (fio_str_info_s, fio_str_info_s) callconv(.c) c_int;
pub const struct_http_sse_subscribe_args = extern struct {
channel: fio_str_info_s,
on_message: ?*const fn ([*c]http_sse_s, fio_str_info_s, fio_str_info_s, ?*anyopaque) callconv(.C) void,
on_unsubscribe: ?*const fn (?*anyopaque) callconv(.C) void,
on_message: ?*const fn ([*c]http_sse_s, fio_str_info_s, fio_str_info_s, ?*anyopaque) callconv(.c) void,
on_unsubscribe: ?*const fn (?*anyopaque) callconv(.c) void,
udata: ?*anyopaque,
match: fio_match_fn,
};
@ -565,7 +564,7 @@ pub extern fn http_gmtime(timer: time_t, tmbuf: [*c]struct_tm) [*c]struct_tm;
pub extern fn http_date2rfc7231(target: [*c]u8, tmbuf: [*c]struct_tm) usize;
pub extern fn http_date2rfc2109(target: [*c]u8, tmbuf: [*c]struct_tm) usize;
pub extern fn http_date2rfc2822(target: [*c]u8, tmbuf: [*c]struct_tm) usize;
pub fn http_date2str(arg_target: [*c]u8, arg_tmbuf: [*c]struct_tm) callconv(.C) usize {
pub fn http_date2str(arg_target: [*c]u8, arg_tmbuf: [*c]struct_tm) callconv(.c) usize {
const target = arg_target;
const tmbuf = arg_tmbuf;
return http_date2rfc7231(target, tmbuf);

View file

@ -47,7 +47,7 @@ pub const StatusCode = enum(u16) {
gone = 410,
length_required = 411,
precondition_failed = 412,
payload_too_large = 413,
content_too_large = 413,
uri_too_long = 414,
unsupported_media_type = 415,
range_not_satisfiable = 416,
@ -91,7 +91,7 @@ pub const StatusCode = enum(u16) {
.no_content => "No Content",
.reset_content => "Reset Content",
.partial_content => "Partial Content",
.multiple_choices => "Multiple Choices",
.multi_status => "Multi-Status",
.moved_permanently => "Moved Permanently",
.found => "Found",
.see_other => "See Other",
@ -111,11 +111,10 @@ pub const StatusCode = enum(u16) {
.gone => "Gone",
.length_required => "Length Required",
.precondition_failed => "Precondition Failed",
.request_entity_too_large => "Request Entity Too Large",
.request_uri_too_long => "Request-URI Too Long",
.unsupported_mediatype => "Unsupported Media Type",
.requested_range_not_satisfiable => "Requested Range Not Satisfiable",
.teapot => "I'm a Teapot",
.content_too_large => "Content Too Large",
.uri_too_long => "URI Too Long",
.unsupported_media_type => "Unsupported Media Type",
.range_not_satisfiable => "Range Not Satisfiable",
.upgrade_required => "Upgrade Required",
.request_header_fields_too_large => "Request Header Fields Too Large",
.expectation_failed => "Expectation Failed",
@ -125,7 +124,29 @@ pub const StatusCode = enum(u16) {
.service_unavailable => "Service Unavailable",
.gateway_timeout => "Gateway Timeout",
.http_version_not_supported => "HTTP Version Not Supported",
_ => "",
.early_hints => "Early Hints",
.already_reported => "Already Reported",
.im_used => "IM Used",
.unused => "306", // unused
.permanent_redirect => "Permanent Redirect",
.im_a_teapot => "I'm a teapot",
.misdirected_request => "Misdirected Request",
.unprocessable_content => "Unprocessable Content", // (WebDAV)
.locked => "Locked", // (WebDAV)
.failed_dependency => "Failed Dependency", // (WebDAV)
.too_early => "Too Early",
.precondition_required => "Precondition Required",
.too_many_requests => "Too Many Requests",
.unavailable_for_legal_reasons => "Unavailable For Legal Reasons", // gotta love those lawyers
.variant_also_negotiates => "Variant Also Negotiates",
.insufficient_storage => "Insufficient Storage", // (WebDAV)
.loop_detected => "Loop Detected", // (WebDAV)
.not_extended => "Not Extended",
.network_authentication_required => "Network Authentication Required",
.processing => "Processing",
.multiple_choices => "Multiple Chouces",
_ => "(Unknown)",
};
}
};

View file

@ -1,5 +1,6 @@
const std = @import("std");
const zap = @import("zap.zig");
const callHandlerIfExist = @import("endpoint.zig").callHandlerIfExist;
/// Your middleware components need to contain a handler.
///
@ -102,16 +103,16 @@ pub fn EndpointHandler(comptime HandlerType: anytype, comptime EndpointType: any
if (!self.options.checkPath or
std.mem.startsWith(u8, r.path orelse "", self.endpoint.path))
{
switch (r.methodAsEnum()) {
.GET => try self.endpoint.*.get(r),
.POST => try self.endpoint.*.post(r),
.PUT => try self.endpoint.*.put(r),
.DELETE => try self.endpoint.*.delete(r),
.PATCH => try self.endpoint.*.patch(r),
.OPTIONS => try self.endpoint.*.options(r),
.HEAD => try self.endpoint.*.head(r),
try switch (r.methodAsEnum()) {
.GET => callHandlerIfExist("get", self.endpoint, r),
.POST => callHandlerIfExist("post", self.endpoint, r),
.PUT => callHandlerIfExist("put", self.endpoint, r),
.DELETE => callHandlerIfExist("delete", self.endpoint, r),
.PATCH => callHandlerIfExist("patch", self.endpoint, r),
.OPTIONS => callHandlerIfExist("options", self.endpoint, r),
.HEAD => callHandlerIfExist("head", self.endpoint, r),
else => {},
}
};
}
// if the request was handled by the endpoint, we may break the chain here

View file

@ -43,10 +43,10 @@ pub const HttpParamStrKVList = struct {
pub const HttpParamKVList = struct {
items: []HttpParamKV,
allocator: Allocator,
pub fn deinit(self: *const HttpParamKVList) void {
for (self.items) |item| {
pub fn deinit(self: *HttpParamKVList) void {
for (self.items) |*item| {
self.allocator.free(item.key);
if (item.value) |v| {
if (item.value) |*v| {
v.free(self.allocator);
}
}
@ -80,11 +80,11 @@ pub const HttpParam = union(HttpParamValueType) {
/// value will always be null
Array_Binfile: std.ArrayList(HttpParamBinaryFile),
pub fn free(self: HttpParam, alloc: Allocator) void {
switch (self) {
pub fn free(self: *HttpParam, alloc: Allocator) void {
switch (self.*) {
.String => |s| alloc.free(s),
.Array_Binfile => |a| {
a.deinit();
.Array_Binfile => |*a| {
a.deinit(alloc);
},
else => {
// nothing to free
@ -132,8 +132,14 @@ fn parseBinfilesFrom(a: Allocator, o: fio.FIOBJ) !HttpParam {
var mimetype: []const u8 = undefined;
if (fio.fiobj_hash_haskey(o, key_type) == 1) {
const mt = fio.fiobj_obj2cstr(fio.fiobj_hash_get(o, key_type));
mimetype = mt.data[0..mt.len];
const mt_fiobj = fio.fiobj_hash_get(o, key_type);
// for some reason, mimetype can be an array
if (fio.fiobj_type_is(mt_fiobj, fio.FIOBJ_T_STRING) == 1) {
const mt = fio.fiobj_obj2cstr(mt_fiobj);
mimetype = mt.data[0..mt.len];
} else {
mimetype = &"application/octet-stream".*;
}
} else {
mimetype = &"application/octet-stream".*;
}
@ -184,7 +190,7 @@ fn parseBinfilesFrom(a: Allocator, o: fio.FIOBJ) !HttpParam {
if (fio.fiobj_ary_count(fn_ary) == len and fio.fiobj_ary_count(mt_ary) == len) {
var i: isize = 0;
var ret = std.ArrayList(HttpParamBinaryFile).init(a);
var ret = std.ArrayList(HttpParamBinaryFile).empty;
while (i < len) : (i += 1) {
const file_data_obj = fio.fiobj_ary_entry(data, i);
const file_name_obj = fio.fiobj_ary_entry(fn_ary, i);
@ -209,7 +215,7 @@ fn parseBinfilesFrom(a: Allocator, o: fio.FIOBJ) !HttpParam {
const file_data = fio.fiobj_obj2cstr(file_data_obj);
const file_name = fio.fiobj_obj2cstr(file_name_obj);
const file_mimetype = fio.fiobj_obj2cstr(file_mimetype_obj);
try ret.append(.{
try ret.append(a, .{
.data = file_data.data[0..file_data.len],
.mimetype = file_mimetype.data[0..file_mimetype.len],
.filename = file_name.data[0..file_name.len],
@ -257,6 +263,8 @@ pub fn fiobj2HttpParam(a: Allocator, o: fio.FIOBJ) !?HttpParam {
};
}
pub const CookieSameSite = enum(u8) { Default, Lax, Strict, None };
/// Args for setting a cookie
pub const CookieArgs = struct {
name: []const u8,
@ -267,6 +275,8 @@ pub const CookieArgs = struct {
max_age_s: c_int = 0,
secure: bool = true,
http_only: bool = true,
partitioned: bool = false,
same_site: CookieSameSite = .Default,
};
path: ?[]const u8,
@ -344,18 +354,16 @@ pub fn _internal_sendError(self: *const Request, err: anyerror, err_trace: ?std.
// TODO: let's hope 20k is enough. Maybe just really allocate here
self.h.*.status = errorcode_num;
var buf: [20 * 1024]u8 = undefined;
var fba = std.heap.FixedBufferAllocator.init(&buf);
var string = std.ArrayList(u8).init(fba.allocator());
var writer = string.writer();
var writer = std.io.Writer.fixed(&buf);
try writer.print("ERROR: {any}\n\n", .{err});
if (err_trace) |trace| {
const debugInfo = try std.debug.getSelfDebugInfo();
const ttyConfig: std.io.tty.Config = .no_color;
try std.debug.writeStackTrace(trace, writer, debugInfo, ttyConfig);
try std.debug.writeStackTrace(trace, &writer, debugInfo, ttyConfig);
}
try self.sendBody(string.items);
try self.sendBody(writer.buffered());
}
/// Send body.
@ -526,7 +534,7 @@ pub fn setHeader(self: *const Request, name: []const u8, value: []const u8) Http
}
pub fn headersToOwnedList(self: *const Request, a: Allocator) !HttpParamStrKVList {
var headers = std.ArrayList(HttpParamStrKV).init(a);
var headers = std.ArrayList(HttpParamStrKV).empty;
var context: CallbackContext_StrKV = .{
.params = &headers,
.allocator = a,
@ -535,7 +543,7 @@ pub fn headersToOwnedList(self: *const Request, a: Allocator) !HttpParamStrKVLis
if (howmany != headers.items.len) {
return error.HttpIterHeaders;
}
return .{ .items = try headers.toOwnedSlice(), .allocator = a };
return .{ .items = try headers.toOwnedSlice(a), .allocator = a };
}
/// Set status by numeric value.
@ -665,19 +673,15 @@ pub fn parseAcceptHeaders(self: *const Request, allocator: Allocator) !AcceptHea
/// Set a response cookie
pub fn setCookie(self: *const Request, args: CookieArgs) HttpError!void {
const c: fio.http_cookie_args_s = .{
.name = util.toCharPtr(args.name),
.name_len = @as(isize, @intCast(args.name.len)),
.value = util.toCharPtr(args.value),
.value_len = @as(isize, @intCast(args.value.len)),
.domain = if (args.domain) |p| util.toCharPtr(p) else null,
.domain_len = if (args.domain) |p| @as(isize, @intCast(p.len)) else 0,
.path = if (args.path) |p| util.toCharPtr(p) else null,
.path_len = if (args.path) |p| @as(isize, @intCast(p.len)) else 0,
.max_age = args.max_age_s,
.secure = if (args.secure) 1 else 0,
.http_only = if (args.http_only) 1 else 0,
};
const c: fio.http_cookie_args_s = .{ .name = util.toCharPtr(args.name), .name_len = @as(isize, @intCast(args.name.len)), .value = util.toCharPtr(args.value), .value_len = @as(isize, @intCast(args.value.len)), .domain = if (args.domain) |p| util.toCharPtr(p) else null, .domain_len = if (args.domain) |p| @as(isize, @intCast(p.len)) else 0, .path = if (args.path) |p| util.toCharPtr(p) else null, .path_len = if (args.path) |p| @as(isize, @intCast(p.len)) else 0, .max_age = args.max_age_s, .flags = (((if (args.secure) @as(c_uint, 1) else 0) << 0) |
((if (args.http_only) @as(c_uint, 1) else 0) << 1) |
((if (args.partitioned) @as(c_uint, 1) else 0) << 2) |
((switch (args.same_site) {
.Default => @as(c_uint, 0),
.Lax => @as(c_uint, 1),
.Strict => @as(c_uint, 2),
.None => @as(c_uint, 3),
}) << 3)) };
// TODO WAT?
// if we:
@ -727,11 +731,11 @@ const CallbackContext_KV = struct {
params: *std.ArrayList(HttpParamKV),
last_error: ?anyerror = null,
pub fn callback(fiobj_value: fio.FIOBJ, context_: ?*anyopaque) callconv(.C) c_int {
pub fn callback(fiobj_value: fio.FIOBJ, context_: ?*anyopaque) callconv(.c) c_int {
const ctx: *CallbackContext_KV = @as(*CallbackContext_KV, @ptrCast(@alignCast(context_)));
// this is thread-safe, guaranteed by fio
const fiobj_key: fio.FIOBJ = fio.fiobj_hash_key_in_loop();
ctx.params.append(.{
ctx.params.append(ctx.allocator, .{
.key = util.fio2strAlloc(ctx.allocator, fiobj_key) catch |err| {
ctx.last_error = err;
return -1;
@ -756,11 +760,11 @@ const CallbackContext_StrKV = struct {
params: *std.ArrayList(HttpParamStrKV),
last_error: ?anyerror = null,
pub fn callback(fiobj_value: fio.FIOBJ, context_: ?*anyopaque) callconv(.C) c_int {
pub fn callback(fiobj_value: fio.FIOBJ, context_: ?*anyopaque) callconv(.c) c_int {
const ctx: *CallbackContext_StrKV = @as(*CallbackContext_StrKV, @ptrCast(@alignCast(context_)));
// this is thread-safe, guaranteed by fio
const fiobj_key: fio.FIOBJ = fio.fiobj_hash_key_in_loop();
ctx.params.append(.{
ctx.params.append(ctx.allocator, .{
.key = util.fio2strAlloc(ctx.allocator, fiobj_key) catch |err| {
ctx.last_error = err;
return -1;
@ -791,7 +795,7 @@ pub fn cookiesToOwnedStrList(self: *const Request, a: Allocator) anyerror!HttpPa
if (howmany != self.getCookiesCount()) {
return error.HttpIterParams;
}
return .{ .items = try params.toOwnedSlice(), .allocator = a };
return .{ .items = try params.toOwnedSlice(a), .allocator = a };
}
/// Same as parametersToOwnedList() but for cookies
@ -802,7 +806,7 @@ pub fn cookiesToOwnedList(self: *const Request, a: Allocator) !HttpParamKVList {
if (howmany != self.getCookiesCount()) {
return error.HttpIterParams;
}
return .{ .items = try params.toOwnedSlice(), .allocator = a };
return .{ .items = try params.toOwnedSlice(a), .allocator = a };
}
/// Returns the query / body parameters as key/value pairs, as strings.
@ -830,7 +834,7 @@ pub fn parametersToOwnedStrList(self: *const Request, a: Allocator) anyerror!Htt
if (howmany != self.getParamCount()) {
return error.HttpIterParams;
}
return .{ .items = try params.toOwnedSlice(), .allocator = a };
return .{ .items = try params.toOwnedSlice(a), .allocator = a };
}
/// Returns the query / body parameters as key/value pairs
@ -855,7 +859,7 @@ pub fn parametersToOwnedList(self: *const Request, a: Allocator) !HttpParamKVLis
if (howmany != self.getParamCount()) {
return error.HttpIterParams;
}
return .{ .items = try params.toOwnedSlice(), .allocator = a };
return .{ .items = try params.toOwnedSlice(a), .allocator = a };
}
/// get named parameter (parsed) as string

View file

@ -154,7 +154,7 @@ pub const Endpoint = struct {
pub fn get(_: *Endpoint, r: zap.Request) !void {
r.sendBody(HTTP_RESPONSE) catch return;
received_response = HTTP_RESPONSE;
std.time.sleep(1 * std.time.ns_per_s);
std.Thread.sleep(1 * std.time.ns_per_s);
zap.stop();
}
@ -162,15 +162,9 @@ pub const Endpoint = struct {
r.setStatus(.unauthorized);
r.sendBody("UNAUTHORIZED ACCESS") catch return;
received_response = "UNAUTHORIZED";
std.time.sleep(1 * std.time.ns_per_s);
std.Thread.sleep(1 * std.time.ns_per_s);
zap.stop();
}
pub fn post(_: *Endpoint, _: zap.Request) !void {}
pub fn put(_: *Endpoint, _: zap.Request) !void {}
pub fn delete(_: *Endpoint, _: zap.Request) !void {}
pub fn patch(_: *Endpoint, _: zap.Request) !void {}
pub fn options(_: *Endpoint, _: zap.Request) !void {}
pub fn head(_: *Endpoint, _: zap.Request) !void {}
};
//
// end of http client code
@ -184,7 +178,7 @@ test "BearerAuthSingle authenticateRequest OK" {
var listener = zap.Endpoint.Listener.init(
a,
.{
.port = 3000,
.port = 3001,
.on_request = null,
.log = false,
.max_clients = 10,
@ -211,7 +205,7 @@ test "BearerAuthSingle authenticateRequest OK" {
listener.listen() catch {};
const thread = try makeRequestThread(a, "http://127.0.0.1:3000/test", .{ .auth = .Bearer, .token = token });
const thread = try makeRequestThread(a, "http://127.0.0.1:3001/test", .{ .auth = .Bearer, .token = token });
defer thread.join();
// start worker threads
@ -231,7 +225,7 @@ test "BearerAuthSingle authenticateRequest test-unauthorized" {
var listener = zap.Endpoint.Listener.init(
a,
.{
.port = 3000,
.port = 3002,
.on_request = null,
.log = false,
.max_clients = 10,
@ -264,7 +258,7 @@ test "BearerAuthSingle authenticateRequest test-unauthorized" {
try listener.listen();
const thread = try makeRequestThread(a, "http://127.0.0.1:3000/test", .{ .auth = .Bearer, .token = "invalid" });
const thread = try makeRequestThread(a, "http://127.0.0.1:3002/test", .{ .auth = .Bearer, .token = "invalid" });
defer thread.join();
// start worker threads
@ -284,7 +278,7 @@ test "BearerAuthMulti authenticateRequest OK" {
var listener = zap.Endpoint.Listener.init(
a,
.{
.port = 3000,
.port = 3003,
.on_request = null,
.log = false,
.max_clients = 10,
@ -311,7 +305,7 @@ test "BearerAuthMulti authenticateRequest OK" {
try listener.listen();
const thread = try makeRequestThread(a, "http://127.0.0.1:3000/test", .{ .auth = .Bearer, .token = token });
const thread = try makeRequestThread(a, "http://127.0.0.1:3003/test", .{ .auth = .Bearer, .token = token });
defer thread.join();
// start worker threads
@ -331,7 +325,7 @@ test "BearerAuthMulti authenticateRequest test-unauthorized" {
var listener = zap.Endpoint.Listener.init(
a,
.{
.port = 3000,
.port = 3004,
.on_request = null,
.log = false,
.max_clients = 10,
@ -358,7 +352,7 @@ test "BearerAuthMulti authenticateRequest test-unauthorized" {
listener.listen() catch {};
const thread = try makeRequestThread(a, "http://127.0.0.1:3000/test", .{ .auth = .Bearer, .token = "invalid" });
const thread = try makeRequestThread(a, "http://127.0.0.1:3004/test", .{ .auth = .Bearer, .token = "invalid" });
defer thread.join();
// start worker threads
@ -378,7 +372,7 @@ test "BasicAuth Token68 authenticateRequest" {
var listener = zap.Endpoint.Listener.init(
a,
.{
.port = 3000,
.port = 3005,
.on_request = null,
.log = false,
.max_clients = 10,
@ -410,7 +404,7 @@ test "BasicAuth Token68 authenticateRequest" {
listener.listen() catch {};
const thread = try makeRequestThread(a, "http://127.0.0.1:3000/test", .{ .auth = .Basic, .token = token });
const thread = try makeRequestThread(a, "http://127.0.0.1:3005/test", .{ .auth = .Basic, .token = token });
defer thread.join();
// start worker threads
@ -430,7 +424,7 @@ test "BasicAuth Token68 authenticateRequest test-unauthorized" {
var listener = zap.Endpoint.Listener.init(
a,
.{
.port = 3000,
.port = 3006,
.on_request = null,
.log = false,
.max_clients = 10,
@ -462,7 +456,7 @@ test "BasicAuth Token68 authenticateRequest test-unauthorized" {
listener.listen() catch {};
const thread = try makeRequestThread(a, "http://127.0.0.1:3000/test", .{ .auth = .Basic, .token = "invalid" });
const thread = try makeRequestThread(a, "http://127.0.0.1:3006/test", .{ .auth = .Basic, .token = "invalid" });
defer thread.join();
// start worker threads
@ -481,7 +475,7 @@ test "BasicAuth UserPass authenticateRequest" {
var listener = zap.Endpoint.Listener.init(
a,
.{
.port = 3000,
.port = 3007,
.on_request = null,
.log = false,
.max_clients = 10,
@ -524,7 +518,7 @@ test "BasicAuth UserPass authenticateRequest" {
listener.listen() catch {};
const thread = try makeRequestThread(a, "http://127.0.0.1:3000/test", .{ .auth = .Basic, .token = encoded });
const thread = try makeRequestThread(a, "http://127.0.0.1:3007/test", .{ .auth = .Basic, .token = encoded });
defer thread.join();
// start worker threads
@ -543,7 +537,7 @@ test "BasicAuth UserPass authenticateRequest test-unauthorized" {
var listener = zap.Endpoint.Listener.init(
a,
.{
.port = 3000,
.port = 3008,
.on_request = null,
.log = false,
.max_clients = 10,
@ -589,7 +583,7 @@ test "BasicAuth UserPass authenticateRequest test-unauthorized" {
listener.listen() catch {};
const thread = try makeRequestThread(a, "http://127.0.0.1:3000/test", .{ .auth = .Basic, .token = "invalid" });
const thread = try makeRequestThread(a, "http://127.0.0.1:3008/test", .{ .auth = .Basic, .token = "invalid" });
defer thread.join();
// start worker threads

View file

@ -65,7 +65,7 @@ test "http parameters" {
// setup listener
var listener = zap.HttpListener.init(
.{
.port = 3001,
.port = 3010,
.on_request = Handler.on_request,
.log = false,
.max_clients = 10,
@ -74,7 +74,7 @@ test "http parameters" {
);
try listener.listen();
const thread = try makeRequestThread(allocator, "http://127.0.0.1:3001/?one=1&two=2&string=hello+world&float=6.28&bool=true");
const thread = try makeRequestThread(allocator, "http://127.0.0.1:3010/?one=1&two=2&string=hello+world&float=6.28&bool=true");
defer thread.join();
zap.start(.{
.threads = 1,

View file

@ -44,7 +44,6 @@ fn makeRequest(allocator: std.mem.Allocator, url: []const u8) !void {
defer allocator.free(request_content_type);
// Allocate a buffer for server headers
var buf: [4096]u8 = undefined;
_ = try http_client.fetch(.{
.method = .POST,
.location = .{ .url = url },
@ -52,7 +51,6 @@ fn makeRequest(allocator: std.mem.Allocator, url: []const u8) !void {
.content_type = .{ .override = request_content_type },
},
.payload = payload,
.server_header_buffer = &buf,
});
zap.stop();
@ -67,7 +65,7 @@ pub fn on_request(r: zap.Request) !void {
pub fn on_request_inner(r: zap.Request) !void {
try r.parseBody();
const params = try r.parametersToOwnedList(std.testing.allocator);
var params = try r.parametersToOwnedList(std.testing.allocator);
defer params.deinit();
std.testing.expect(params.items.len == 1) catch |err| {
@ -110,7 +108,7 @@ test "recv file" {
var listener = zap.HttpListener.init(
.{
.port = 3003,
.port = 3020,
.on_request = on_request,
.log = false,
.max_clients = 10,
@ -119,7 +117,7 @@ test "recv file" {
);
try listener.listen();
const t1 = try std.Thread.spawn(.{}, makeRequest, .{ allocator, "http://127.0.0.1:3003" });
const t1 = try std.Thread.spawn(.{}, makeRequest, .{ allocator, "http://127.0.0.1:3020" });
defer t1.join();
zap.start(.{

View file

@ -42,7 +42,6 @@ fn makeRequest(allocator: std.mem.Allocator, url: []const u8) !void {
defer allocator.free(request_content_type);
// Allocate a buffer for server headers
var buf: [4096]u8 = undefined;
_ = try http_client.fetch(.{
.method = .POST,
.location = .{ .url = url },
@ -50,7 +49,6 @@ fn makeRequest(allocator: std.mem.Allocator, url: []const u8) !void {
.content_type = .{ .override = request_content_type },
},
.payload = payload,
.server_header_buffer = &buf,
});
zap.stop();
@ -65,7 +63,7 @@ pub fn on_request(r: zap.Request) !void {
pub fn on_request_inner(r: zap.Request) !void {
try r.parseBody();
const params = try r.parametersToOwnedList(std.testing.allocator);
var params = try r.parametersToOwnedList(std.testing.allocator);
defer params.deinit();
std.testing.expect(params.items.len == 1) catch |err| {
@ -108,7 +106,7 @@ test "recv file" {
var listener = zap.HttpListener.init(
.{
.port = 3003,
.port = 3030,
.on_request = on_request,
.log = false,
.max_clients = 10,
@ -117,7 +115,7 @@ test "recv file" {
);
try listener.listen();
const t1 = try std.Thread.spawn(.{}, makeRequest, .{ allocator, "http://127.0.0.1:3003" });
const t1 = try std.Thread.spawn(.{}, makeRequest, .{ allocator, "http://127.0.0.1:3030" });
defer t1.join();
zap.start(.{

View file

@ -17,14 +17,18 @@ const testfile = @embedFile("testfile.txt");
fn makeRequest(a: std.mem.Allocator, url: []const u8) !void {
var http_client: std.http.Client = .{ .allocator = a };
defer http_client.deinit();
var response = std.ArrayList(u8).init(a);
defer response.deinit();
var response_writer = std.io.Writer.Allocating.init(a);
defer response_writer.deinit();
_ = try http_client.fetch(.{
.location = .{ .url = url },
.response_storage = .{ .dynamic = &response },
.response_writer = &response_writer.writer,
});
read_len = response.items.len;
@memcpy(buffer[0..read_len.?], response.items);
const response_text = response_writer.written();
read_len = response_text.len;
@memcpy(buffer[0..read_len.?], response_text);
zap.stop();
}
@ -42,7 +46,7 @@ test "send file" {
// setup listener
var listener = zap.HttpListener.init(
.{
.port = 3002,
.port = 3040,
.on_request = on_request,
.log = false,
.max_clients = 10,
@ -51,7 +55,7 @@ test "send file" {
);
try listener.listen();
const thread = try makeRequestThread(allocator, "http://127.0.0.1:3002/?file=src/tests/testfile.txt");
const thread = try makeRequestThread(allocator, "http://127.0.0.1:3040/?file=src/tests/testfile.txt");
defer thread.join();
zap.start(.{
.threads = 1,

View file

@ -17,6 +17,23 @@ pub fn fio2str(o: fio.FIOBJ) ?[]const u8 {
return x.data[0..x.len];
}
pub fn fiobj_type_as_string(o: zap.fio.FIOBJ) []const u8 {
const value_type = switch (zap.fio.fiobj_type(o)) {
zap.fio.FIOBJ_T_NULL => "null",
zap.fio.FIOBJ_T_TRUE => "true",
zap.fio.FIOBJ_T_FALSE => "false",
zap.fio.FIOBJ_T_NUMBER => "number",
zap.fio.FIOBJ_T_FLOAT => "float",
zap.fio.FIOBJ_T_STRING => "string",
zap.fio.FIOBJ_T_ARRAY => "array",
zap.fio.FIOBJ_T_HASH => "hash",
zap.fio.FIOBJ_T_DATA => "data",
zap.fio.FIOBJ_T_UNKNOWN => "unknown",
else => "unreachable",
};
return value_type;
}
/// Used internally: convert a FIO object into its string representation.
/// This always allocates, so choose your allocator wisely.
/// Let's never use that
@ -58,12 +75,11 @@ pub fn toCharPtr(s: []const u8) [*c]u8 {
pub fn stringifyBuf(
buffer: []u8,
value: anytype,
options: std.json.StringifyOptions,
options: std.json.Stringify.Options,
) ![]const u8 {
var fba = std.heap.FixedBufferAllocator.init(buffer);
var string = std.ArrayList(u8).init(fba.allocator());
if (std.json.stringify(value, options, string.writer())) {
return string.items;
var w: std.io.Writer = .fixed(buffer);
if (std.json.Stringify.value(value, options, &w)) {
return w.buffered();
} else |err| { // error
return err;
}

View file

@ -63,7 +63,7 @@ pub fn Handler(comptime ContextType: type) type {
}
}
fn internal_on_message(handle: WsHandle, msg: fio.fio_str_info_s, is_text: u8) callconv(.C) void {
fn internal_on_message(handle: WsHandle, msg: fio.fio_str_info_s, is_text: u8) callconv(.c) void {
const user_provided_settings: ?*WebSocketSettings = @as(?*WebSocketSettings, @ptrCast(@alignCast(fio.websocket_udata_get(handle))));
const message = msg.data[0..msg.len];
if (user_provided_settings) |settings| {
@ -75,7 +75,7 @@ pub fn Handler(comptime ContextType: type) type {
}
}
fn internal_on_open(handle: WsHandle) callconv(.C) void {
fn internal_on_open(handle: WsHandle) callconv(.c) void {
const user_provided_settings: ?*WebSocketSettings = @as(?*WebSocketSettings, @ptrCast(@alignCast(fio.websocket_udata_get(handle))));
if (user_provided_settings) |settings| {
if (settings.on_open) |on_open| {
@ -86,7 +86,7 @@ pub fn Handler(comptime ContextType: type) type {
}
}
fn internal_on_ready(handle: WsHandle) callconv(.C) void {
fn internal_on_ready(handle: WsHandle) callconv(.c) void {
const user_provided_settings: ?*WebSocketSettings = @as(?*WebSocketSettings, @ptrCast(@alignCast(fio.websocket_udata_get(handle))));
if (user_provided_settings) |settings| {
if (settings.on_ready) |on_ready| {
@ -97,7 +97,7 @@ pub fn Handler(comptime ContextType: type) type {
}
}
fn internal_on_shutdown(handle: WsHandle) callconv(.C) void {
fn internal_on_shutdown(handle: WsHandle) callconv(.c) void {
const user_provided_settings: ?*WebSocketSettings = @as(?*WebSocketSettings, @ptrCast(@alignCast(fio.websocket_udata_get(handle))));
if (user_provided_settings) |settings| {
if (settings.on_shutdown) |on_shutdown| {
@ -108,7 +108,7 @@ pub fn Handler(comptime ContextType: type) type {
}
}
fn internal_on_close(uuid: isize, udata: ?*anyopaque) callconv(.C) void {
fn internal_on_close(uuid: isize, udata: ?*anyopaque) callconv(.c) void {
const user_provided_settings: ?*WebSocketSettings = @as(?*WebSocketSettings, @ptrCast(@alignCast(udata)));
if (user_provided_settings) |settings| {
if (settings.on_close) |on_close| {
@ -219,7 +219,7 @@ pub fn Handler(comptime ContextType: type) type {
return ret;
}
pub fn internal_subscription_on_message(handle: WsHandle, channel: fio.fio_str_info_s, message: fio.fio_str_info_s, udata: ?*anyopaque) callconv(.C) void {
pub fn internal_subscription_on_message(handle: WsHandle, channel: fio.fio_str_info_s, message: fio.fio_str_info_s, udata: ?*anyopaque) callconv(.c) void {
if (udata) |p| {
const args = @as(*SubscribeArgs, @ptrCast(@alignCast(p)));
if (args.on_message) |on_message| {
@ -229,7 +229,7 @@ pub fn Handler(comptime ContextType: type) type {
}
}
}
pub fn internal_subscription_on_unsubscribe(udata: ?*anyopaque) callconv(.C) void {
pub fn internal_subscription_on_unsubscribe(udata: ?*anyopaque) callconv(.c) void {
if (udata) |p| {
const args = @as(*SubscribeArgs, @ptrCast(@alignCast(p)));
if (args.on_unsubscribe) |on_unsubscribe| {

View file

@ -116,7 +116,7 @@ pub const ContentType = enum {
};
/// Used internally: facilio Http request callback function type
pub const FioHttpRequestFn = *const fn (r: [*c]fio.http_s) callconv(.C) void;
pub const FioHttpRequestFn = *const fn (r: [*c]fio.http_s) callconv(.c) void;
/// Zap Http request callback function type.
pub const HttpRequestFn = *const fn (Request) anyerror!void;
@ -169,7 +169,7 @@ pub const HttpListener = struct {
// we could make it dynamic by passing a HttpListener via udata
/// Used internally: the listener's facilio request callback
pub fn theOneAndOnlyRequestCallBack(r: [*c]fio.http_s) callconv(.C) void {
pub fn theOneAndOnlyRequestCallBack(r: [*c]fio.http_s) callconv(.c) void {
if (the_one_and_only_listener) |l| {
var req: Request = .{
.path = util.fio2str(r.*.path),
@ -196,7 +196,7 @@ pub const HttpListener = struct {
}
/// Used internally: the listener's facilio response callback
pub fn theOneAndOnlyResponseCallBack(r: [*c]fio.http_s) callconv(.C) void {
pub fn theOneAndOnlyResponseCallBack(r: [*c]fio.http_s) callconv(.c) void {
if (the_one_and_only_listener) |l| {
var req: Request = .{
.path = util.fio2str(r.*.path),
@ -219,7 +219,7 @@ pub const HttpListener = struct {
}
/// Used internally: the listener's facilio upgrade callback
pub fn theOneAndOnlyUpgradeCallBack(r: [*c]fio.http_s, target: [*c]u8, target_len: usize) callconv(.C) void {
pub fn theOneAndOnlyUpgradeCallBack(r: [*c]fio.http_s, target: [*c]u8, target_len: usize) callconv(.c) void {
if (the_one_and_only_listener) |l| {
var req: Request = .{
.path = util.fio2str(r.*.path),
@ -243,7 +243,7 @@ pub const HttpListener = struct {
}
/// Used internally: the listener's facilio finish callback
pub fn theOneAndOnlyFinishCallBack(s: [*c]fio.struct_http_settings_s) callconv(.C) void {
pub fn theOneAndOnlyFinishCallBack(s: [*c]fio.struct_http_settings_s) callconv(.c) void {
if (the_one_and_only_listener) |l| {
l.settings.on_finish.?(s) catch |err| {
Logging.on_uncaught_error("HttpListener on_finish", err);
@ -288,7 +288,7 @@ pub const HttpListener = struct {
// in debug2 and debug3 of hello example
// std.debug.print("X\n", .{});
// TODO: still happening?
std.time.sleep(500 * std.time.ns_per_ms);
std.Thread.sleep(500 * std.time.ns_per_ms);
var portbuf: [100]u8 = undefined;
const printed_port = try std.fmt.bufPrintZ(&portbuf, "{d}", .{self.settings.port});
@ -364,7 +364,7 @@ pub const LowLevel = struct {
// in debug2 and debug3 of hello example
// std.debug.print("X\n", .{});
// TODO: still happening?
std.time.sleep(500 * std.time.ns_per_ms);
std.Thread.sleep(500 * std.time.ns_per_ms);
if (fio.http_listen(port, interface, x) == -1) {
return error.ListenError;

View file

@ -25,7 +25,7 @@ fn usage() void {
\\
\\ release-notes: print release notes for the given git tag
\\
\\ update-readme: modify the README.md to the latest build.zig.zon
\\ update-readme: modify the README.md to the latest build.zig.zon
\\ instructions
;
std.debug.print("{s}", .{message});
@ -122,38 +122,21 @@ fn renderTemplate(allocator: std.mem.Allocator, template: []const u8, substitute
}
fn sendToDiscordPart(allocator: std.mem.Allocator, url: []const u8, message_json: []const u8) !void {
// url
const uri = try std.Uri.parse(url);
// client
var http_client: std.http.Client = .{ .allocator = allocator };
defer http_client.deinit();
var server_header_buffer: [2048]u8 = undefined;
// request
var req = try http_client.open(.POST, uri, .{
.server_header_buffer = &server_header_buffer,
.extra_headers = &.{
.{ .name = "accept", .value = "*/*" },
.{ .name = "Content-Type", .value = "application/json" },
},
const response = try http_client.fetch(.{
.location = .{ .url = url },
.payload = message_json,
.method = .POST,
.headers = .{ .content_type = .{ .override = "application/json" } },
.keep_alive = false,
});
defer req.deinit();
req.transfer_encoding = .chunked;
// connect, send request
try req.send();
// send POST payload
try req.writer().writeAll(message_json);
try req.finish();
// wait for response
try req.wait();
var buffer: [1024]u8 = undefined;
_ = try req.readAll(&buffer);
if (response.status.class() != .success) {
std.debug.print("Discord: {?s}", .{response.status.phrase()});
return error.DiscordPostError;
}
}
fn sendToDiscord(allocator: std.mem.Allocator, url: []const u8, message: []const u8) !void {
@ -161,21 +144,16 @@ fn sendToDiscord(allocator: std.mem.Allocator, url: []const u8, message: []const
// max size: 100kB
const buf: []u8 = try allocator.alloc(u8, 100 * 1024);
defer allocator.free(buf);
var fba = std.heap.FixedBufferAllocator.init(buf);
var string = std.ArrayList(u8).init(fba.allocator());
try std.json.stringify(.{ .content = message }, .{}, string.writer());
var w: std.io.Writer = .fixed(buf);
try std.json.Stringify.value(.{ .content = message }, .{}, &w);
const string = w.buffered();
// We need to split shit into max 2000 characters
if (string.items.len < 1999) {
defer string.deinit();
try sendToDiscordPart(allocator, url, string.items);
if (string.len < 1999) {
try sendToDiscordPart(allocator, url, string);
return;
}
// we don't use it anymore
string.deinit();
fba.reset();
// we can re-use the buf now
// we need to split
@ -187,21 +165,21 @@ fn sendToDiscord(allocator: std.mem.Allocator, url: []const u8, message: []const
from: usize,
to: usize,
};
var chunks = std.ArrayList(Desc).init(allocator);
defer chunks.deinit();
var chunks = std.ArrayList(Desc).empty;
defer chunks.deinit(allocator);
var i: usize = 0;
var chunk_i: usize = 0;
var last_newline_index: usize = 0;
var last_from: usize = 0;
var in_code_block: bool = false;
std.debug.print("Needing to split message of size {}.\n", .{message.len});
std.debug.print("Needing to split message of size {d}.\n", .{message.len});
while (true) {
if (chunk_i > SPLIT_THRESHOLD) {
// start a new chunk
// we assume, there was a newline in 1990 bytes
// try chunks.append(message[last_newline_index..i]);
try chunks.append(.{ .from = last_from, .to = last_newline_index });
try chunks.append(allocator, .{ .from = last_from, .to = last_newline_index });
chunk_i = 0;
last_from = last_newline_index + 1;
i = last_from;
@ -260,7 +238,7 @@ fn sendToDiscord(allocator: std.mem.Allocator, url: []const u8, message: []const
if (i >= message.len) {
// push last part
// try chunks.append(message[last_newline_index..i]);
try chunks.append(.{ .from = last_from, .to = i });
try chunks.append(allocator, .{ .from = last_from, .to = i });
break;
}
}
@ -284,12 +262,13 @@ fn sendToDiscord(allocator: std.mem.Allocator, url: []const u8, message: []const
while (it < chunks.items.len) {
const desc = chunks.items[it];
const part = message[desc.from..desc.to];
fba.reset();
var part_string = std.ArrayList(u8).init(fba.allocator());
defer part_string.deinit();
try std.json.stringify(.{ .content = part }, .{}, part_string.writer());
std.debug.print("SENDING PART {} / {}: ... ", .{ it, chunks.items.len });
try sendToDiscordPart(allocator, url, part_string.items);
var ww: std.io.Writer = .fixed(buf);
try std.json.Stringify.value(.{ .content = part }, .{}, &ww);
const part_string = ww.buffered();
std.debug.print("SENDING PART {d} / {d}: ... ", .{ it, chunks.items.len });
try sendToDiscordPart(allocator, url, part_string);
std.debug.print("done!\n", .{});
it += 1;
}
@ -323,7 +302,12 @@ fn command_releasenotes(allocator: std.mem.Allocator, tag: []const u8) !void {
.annotation = annotation,
});
defer allocator.free(release_notes);
try std.io.getStdOut().writeAll(release_notes);
var stdout_buffer: [1024]u8 = undefined;
var stdout_writer = std.fs.File.stdout().writer(&stdout_buffer);
const stdout = &stdout_writer.interface;
try stdout.writeAll(release_notes);
try stdout.flush();
}
fn command_update_readme(allocator: std.mem.Allocator, tag: []const u8) !void {
const annotation = try get_tag_annotation(allocator, tag);
@ -340,10 +324,10 @@ fn command_update_readme(allocator: std.mem.Allocator, tag: []const u8) !void {
defer allocator.free(readme);
var output_file = try std.fs.cwd().createFile(README_PATH, .{});
var writer = output_file.writer();
defer output_file.close();
// var writer = std.io.getStdOut().writer();
var output_buffer: [2048]u8 = undefined;
var output_writer = output_file.writer(&output_buffer);
const writer = &output_writer.interface;
// iterate over lines
var in_replace_block: bool = false;
@ -356,11 +340,11 @@ fn command_update_readme(allocator: std.mem.Allocator, tag: []const u8) !void {
continue;
}
if (std.mem.startsWith(u8, line, REPLACE_BEGIN_MARKER)) {
_ = try writer.write(REPLACE_BEGIN_MARKER);
_ = try writer.write("\n");
_ = try writer.write(update_part);
_ = try writer.write(REPLACE_END_MARKER);
_ = try writer.write("\n");
_ = try writer.writeAll(REPLACE_BEGIN_MARKER);
_ = try writer.writeByte('\n');
_ = try writer.writeAll(update_part);
_ = try writer.writeAll(REPLACE_END_MARKER);
_ = try writer.writeByte('\n');
in_replace_block = true;
continue;
}
@ -369,6 +353,7 @@ fn command_update_readme(allocator: std.mem.Allocator, tag: []const u8) !void {
// returns indices etc
const output_line = try std.fmt.allocPrint(allocator, "{s}\n", .{line});
defer allocator.free(output_line);
_ = try writer.write(output_line);
_ = try writer.writeAll(output_line);
}
try writer.flush(); // don't forget to flush!
}