1
0
Fork 0
mirror of https://github.com/zigzap/zap.git synced 2025-10-20 15:14:08 +00:00

docs, announceybot: switch to using zig fetch

This commit is contained in:
Rene Schallner 2024-10-14 22:58:07 +02:00
parent 29b923f96e
commit 9a80747cdd
No known key found for this signature in database
10 changed files with 20 additions and 1125 deletions

View file

@ -58,7 +58,7 @@ master though.
## Here's what works ## Here's what works
I recommend checking out **Endpoint-based examples for more realistic I recommend checking out **Endpoint-based examples for more realistic
use cases**. Most of the examples are super stripped down to only include use cases**. Most of the examples are super stripped down to only include
what's necessary to show a feature. what's necessary to show a feature.
**NOTE: To see API docs, run `zig build run-docserver`.** To specify a custom **NOTE: To see API docs, run `zig build run-docserver`.** To specify a custom
@ -132,10 +132,10 @@ port and docs dir: `zig build docserver && zig-out/bin/docserver --port=8989
call `r.sendError(err, status_code)` when you catch an error and a stack trace call `r.sendError(err, status_code)` when you catch an error and a stack trace
will be returned to the client / browser. will be returned to the client / browser.
- [**HTTPS**](examples/https/https.zig): Shows how easy it is to use facil.io's - [**HTTPS**](examples/https/https.zig): Shows how easy it is to use facil.io's
openssl support. Must be compiled with `-Dopenssl=true` or the environment openssl support. Must be compiled with `-Dopenssl=true` or the environment
variable `ZAP_USE_OPENSSL` set to `true` and requires openssl dev dependencies variable `ZAP_USE_OPENSSL` set to `true` and requires openssl dev dependencies
(headers, lib) to be installed on the system. (headers, lib) to be installed on the system.
- run it like this: `ZAP_USE_OPENSSL=true zig build run-https` - run it like this: `ZAP_USE_OPENSSL=true zig build run-https`
OR like this: `zig build -Dopenssl=true run-https` OR like this: `zig build -Dopenssl=true run-https`
- it will tell you how to generate certificates - it will tell you how to generate certificates
- [**simple_router**](examples/simple_router/simple_router.zig): See how you - [**simple_router**](examples/simple_router/simple_router.zig): See how you
@ -181,7 +181,7 @@ simplistic testing scenario.
So, being somewhere in the ballpark of basic GO performance, zig zap seems to be So, being somewhere in the ballpark of basic GO performance, zig zap seems to be
... of reasonable performance 😎. ... of reasonable performance 😎.
I can rest my case that developing ZAP was a good idea because it's faster than I can rest my case that developing ZAP was a good idea because it's faster than
both alternatives: a) staying with Python, and b) creating a GO + Zig hybrid. both alternatives: a) staying with Python, and b) creating a GO + Zig hybrid.
@ -257,7 +257,7 @@ $ git init ## (optional)
**Note**: Nix/NixOS users are lucky; you can use the existing `flake.nix` and run **Note**: Nix/NixOS users are lucky; you can use the existing `flake.nix` and run
`nix develop` to get a development shell providing zig and all `nix develop` to get a development shell providing zig and all
dependencies to build and run the GO, python, and rust examples for the dependencies to build and run the GO, python, and rust examples for the
`wrk` performance tests. For the mere building of zap projects, `wrk` performance tests. For the mere building of zap projects,
`nix develop .#build` will only fetch zig 0.11.0. TODO: upgrade to latest zig. `nix develop .#build` will only fetch zig 0.11.0. TODO: upgrade to latest zig.
With an existing Zig project, adding Zap to it is easy: With an existing Zig project, adding Zap to it is easy:
@ -265,25 +265,11 @@ With an existing Zig project, adding Zap to it is easy:
1. Add zap to your `build.zig.zon` 1. Add zap to your `build.zig.zon`
2. Add zap to your `build.zig` 2. Add zap to your `build.zig`
To add zap to `build.zig.zon`: In your zig project folder (where `build.zig` is located), run:
<!-- INSERT_DEP_BEGIN --> <!-- INSERT_DEP_BEGIN -->
```zig ```
.{ zig fetch --save "git+https://github.com/zigzap/zap#v0.8.0"
.name = "My example project",
.version = "0.0.1",
.dependencies = .{
// zap v0.8.0
.zap = .{
.url = "https://github.com/zigzap/zap/archive/v0.8.0.tar.gz",
.hash = "12209936c3333b53b53edcf453b1670babb9ae8c2197b1ca627c01e72670e20c1a21",
},
},
.paths = .{
"",
},
}
``` ```
<!-- INSERT_DEP_END --> <!-- INSERT_DEP_END -->

View file

@ -211,20 +211,6 @@ pub fn build(b: *std.Build) !void {
test_step.dependOn(&run_httpparams_tests.step); test_step.dependOn(&run_httpparams_tests.step);
test_step.dependOn(&run_sendfile_tests.step); test_step.dependOn(&run_sendfile_tests.step);
//
// pkghash
//
const pkghash_exe = b.addExecutable(.{
.name = "pkghash",
.root_source_file = b.path("./tools/pkghash.zig"),
.target = target,
.optimize = optimize,
});
var pkghash_step = b.step("pkghash", "Build pkghash");
const pkghash_build_step = b.addInstallArtifact(pkghash_exe, .{});
pkghash_step.dependOn(&pkghash_build_step.step);
all_step.dependOn(&pkghash_build_step.step);
// //
// docserver // docserver
// //

View file

@ -1,500 +0,0 @@
// borrowed from the zig sourcebase https://github.com/ziglang/zig
pub const basename = "build.zig.zon";
pub const Hash = std.crypto.hash.sha2.Sha256;
pub const Dependency = struct {
url: []const u8,
url_tok: Ast.TokenIndex,
hash: ?[]const u8,
hash_tok: Ast.TokenIndex,
};
pub const ErrorMessage = struct {
msg: []const u8,
tok: Ast.TokenIndex,
off: u32,
};
pub const MultihashFunction = enum(u16) {
identity = 0x00,
sha1 = 0x11,
@"sha2-256" = 0x12,
@"sha2-512" = 0x13,
@"sha3-512" = 0x14,
@"sha3-384" = 0x15,
@"sha3-256" = 0x16,
@"sha3-224" = 0x17,
@"sha2-384" = 0x20,
@"sha2-256-trunc254-padded" = 0x1012,
@"sha2-224" = 0x1013,
@"sha2-512-224" = 0x1014,
@"sha2-512-256" = 0x1015,
@"blake2b-256" = 0xb220,
_,
};
pub const multihash_function: MultihashFunction = switch (Hash) {
std.crypto.hash.sha2.Sha256 => .@"sha2-256",
else => @compileError("unreachable"),
};
comptime {
// We avoid unnecessary uleb128 code in hexDigest by asserting here the
// values are small enough to be contained in the one-byte encoding.
assert(@intFromEnum(multihash_function) < 127);
assert(Hash.digest_length < 127);
}
pub const multihash_len = 1 + 1 + Hash.digest_length;
name: []const u8,
version: std.SemanticVersion,
dependencies: std.StringArrayHashMapUnmanaged(Dependency),
errors: []ErrorMessage,
arena_state: std.heap.ArenaAllocator.State,
pub const Error = Allocator.Error;
pub fn parse(gpa: Allocator, ast: std.zig.Ast) Error!Manifest {
const node_tags = ast.nodes.items(.tag);
const node_datas = ast.nodes.items(.data);
assert(node_tags[0] == .root);
const main_node_index = node_datas[0].lhs;
var arena_instance = std.heap.ArenaAllocator.init(gpa);
errdefer arena_instance.deinit();
var p: Parse = .{
.gpa = gpa,
.ast = ast,
.arena = arena_instance.allocator(),
.errors = .{},
.name = undefined,
.version = undefined,
.dependencies = .{},
.buf = .{},
};
defer p.buf.deinit(gpa);
defer p.errors.deinit(gpa);
defer p.dependencies.deinit(gpa);
p.parseRoot(main_node_index) catch |err| switch (err) {
error.ParseFailure => assert(p.errors.items.len > 0),
else => |e| return e,
};
return .{
.name = p.name,
.version = p.version,
.dependencies = try p.dependencies.clone(p.arena),
.errors = try p.arena.dupe(ErrorMessage, p.errors.items),
.arena_state = arena_instance.state,
};
}
pub fn deinit(man: *Manifest, gpa: Allocator) void {
man.arena_state.promote(gpa).deinit();
man.* = undefined;
}
const hex_charset = "0123456789abcdef";
pub fn hex64(x: u64) [16]u8 {
var result: [16]u8 = undefined;
var i: usize = 0;
while (i < 8) : (i += 1) {
const byte = @as(u8, @truncate(x >> @as(u6, @intCast(8 * i))));
result[i * 2 + 0] = hex_charset[byte >> 4];
result[i * 2 + 1] = hex_charset[byte & 15];
}
return result;
}
test hex64 {
const s = "[" ++ hex64(0x12345678_abcdef00) ++ "]";
try std.testing.expectEqualStrings("[00efcdab78563412]", s);
}
pub fn hexDigest(digest: [Hash.digest_length]u8) [multihash_len * 2]u8 {
var result: [multihash_len * 2]u8 = undefined;
result[0] = hex_charset[@intFromEnum(multihash_function) >> 4];
result[1] = hex_charset[@intFromEnum(multihash_function) & 15];
result[2] = hex_charset[Hash.digest_length >> 4];
result[3] = hex_charset[Hash.digest_length & 15];
for (digest, 0..) |byte, i| {
result[4 + i * 2] = hex_charset[byte >> 4];
result[5 + i * 2] = hex_charset[byte & 15];
}
return result;
}
const Parse = struct {
gpa: Allocator,
ast: std.zig.Ast,
arena: Allocator,
buf: std.ArrayListUnmanaged(u8),
errors: std.ArrayListUnmanaged(ErrorMessage),
name: []const u8,
version: std.SemanticVersion,
dependencies: std.StringArrayHashMapUnmanaged(Dependency),
const InnerError = error{ ParseFailure, OutOfMemory };
fn parseRoot(p: *Parse, node: Ast.Node.Index) !void {
const ast = p.ast;
const main_tokens = ast.nodes.items(.main_token);
const main_token = main_tokens[node];
var buf: [2]Ast.Node.Index = undefined;
const struct_init = ast.fullStructInit(&buf, node) orelse {
return fail(p, main_token, "expected top level expression to be a struct", .{});
};
var have_name = false;
var have_version = false;
for (struct_init.ast.fields) |field_init| {
const name_token = ast.firstToken(field_init) - 2;
const field_name = try identifierTokenString(p, name_token);
// We could get fancy with reflection and comptime logic here but doing
// things manually provides an opportunity to do any additional verification
// that is desirable on a per-field basis.
if (mem.eql(u8, field_name, "dependencies")) {
try parseDependencies(p, field_init);
} else if (mem.eql(u8, field_name, "name")) {
p.name = try parseString(p, field_init);
have_name = true;
} else if (mem.eql(u8, field_name, "version")) {
const version_text = try parseString(p, field_init);
p.version = std.SemanticVersion.parse(version_text) catch |err| v: {
try appendError(p, main_tokens[field_init], "unable to parse semantic version: {s}", .{@errorName(err)});
break :v undefined;
};
have_version = true;
} else {
// Ignore unknown fields so that we can add fields in future zig
// versions without breaking older zig versions.
}
}
if (!have_name) {
try appendError(p, main_token, "missing top-level 'name' field", .{});
}
if (!have_version) {
try appendError(p, main_token, "missing top-level 'version' field", .{});
}
}
fn parseDependencies(p: *Parse, node: Ast.Node.Index) !void {
const ast = p.ast;
const main_tokens = ast.nodes.items(.main_token);
var buf: [2]Ast.Node.Index = undefined;
const struct_init = ast.fullStructInit(&buf, node) orelse {
const tok = main_tokens[node];
return fail(p, tok, "expected dependencies expression to be a struct", .{});
};
for (struct_init.ast.fields) |field_init| {
const name_token = ast.firstToken(field_init) - 2;
const dep_name = try identifierTokenString(p, name_token);
const dep = try parseDependency(p, field_init);
try p.dependencies.put(p.gpa, dep_name, dep);
}
}
fn parseDependency(p: *Parse, node: Ast.Node.Index) !Dependency {
const ast = p.ast;
const main_tokens = ast.nodes.items(.main_token);
var buf: [2]Ast.Node.Index = undefined;
const struct_init = ast.fullStructInit(&buf, node) orelse {
const tok = main_tokens[node];
return fail(p, tok, "expected dependency expression to be a struct", .{});
};
var dep: Dependency = .{
.url = undefined,
.url_tok = undefined,
.hash = null,
.hash_tok = undefined,
};
var have_url = false;
for (struct_init.ast.fields) |field_init| {
const name_token = ast.firstToken(field_init) - 2;
const field_name = try identifierTokenString(p, name_token);
// We could get fancy with reflection and comptime logic here but doing
// things manually provides an opportunity to do any additional verification
// that is desirable on a per-field basis.
if (mem.eql(u8, field_name, "url")) {
dep.url = parseString(p, field_init) catch |err| switch (err) {
error.ParseFailure => continue,
else => |e| return e,
};
dep.url_tok = main_tokens[field_init];
have_url = true;
} else if (mem.eql(u8, field_name, "hash")) {
dep.hash = parseHash(p, field_init) catch |err| switch (err) {
error.ParseFailure => continue,
else => |e| return e,
};
dep.hash_tok = main_tokens[field_init];
} else {
// Ignore unknown fields so that we can add fields in future zig
// versions without breaking older zig versions.
}
}
if (!have_url) {
try appendError(p, main_tokens[node], "dependency is missing 'url' field", .{});
}
return dep;
}
fn parseString(p: *Parse, node: Ast.Node.Index) ![]const u8 {
const ast = p.ast;
const node_tags = ast.nodes.items(.tag);
const main_tokens = ast.nodes.items(.main_token);
if (node_tags[node] != .string_literal) {
return fail(p, main_tokens[node], "expected string literal", .{});
}
const str_lit_token = main_tokens[node];
const token_bytes = ast.tokenSlice(str_lit_token);
p.buf.clearRetainingCapacity();
try parseStrLit(p, str_lit_token, &p.buf, token_bytes, 0);
const duped = try p.arena.dupe(u8, p.buf.items);
return duped;
}
fn parseHash(p: *Parse, node: Ast.Node.Index) ![]const u8 {
const ast = p.ast;
const main_tokens = ast.nodes.items(.main_token);
const tok = main_tokens[node];
const h = try parseString(p, node);
if (h.len >= 2) {
const their_multihash_func = std.fmt.parseInt(u8, h[0..2], 16) catch |err| {
return fail(p, tok, "invalid multihash value: unable to parse hash function: {s}", .{
@errorName(err),
});
};
if (@as(MultihashFunction, @enumFromInt(their_multihash_func)) != multihash_function) {
return fail(p, tok, "unsupported hash function: only sha2-256 is supported", .{});
}
}
const hex_multihash_len = 2 * Manifest.multihash_len;
if (h.len != hex_multihash_len) {
return fail(p, tok, "wrong hash size. expected: {d}, found: {d}", .{
hex_multihash_len, h.len,
});
}
return h;
}
/// TODO: try to DRY this with AstGen.identifierTokenString
fn identifierTokenString(p: *Parse, token: Ast.TokenIndex) InnerError![]const u8 {
const ast = p.ast;
const token_tags = ast.tokens.items(.tag);
assert(token_tags[token] == .identifier);
const ident_name = ast.tokenSlice(token);
if (!mem.startsWith(u8, ident_name, "@")) {
return ident_name;
}
p.buf.clearRetainingCapacity();
try parseStrLit(p, token, &p.buf, ident_name, 1);
const duped = try p.arena.dupe(u8, p.buf.items);
return duped;
}
/// TODO: try to DRY this with AstGen.parseStrLit
fn parseStrLit(
p: *Parse,
token: Ast.TokenIndex,
buf: *std.ArrayListUnmanaged(u8),
bytes: []const u8,
offset: u32,
) InnerError!void {
const raw_string = bytes[offset..];
var buf_managed = buf.toManaged(p.gpa);
const result = std.zig.string_literal.parseWrite(buf_managed.writer(), raw_string);
buf.* = buf_managed.moveToUnmanaged();
switch (try result) {
.success => {},
.failure => |err| try p.appendStrLitError(err, token, bytes, offset),
}
}
/// TODO: try to DRY this with AstGen.failWithStrLitError
fn appendStrLitError(
p: *Parse,
err: std.zig.string_literal.Error,
token: Ast.TokenIndex,
bytes: []const u8,
offset: u32,
) Allocator.Error!void {
const raw_string = bytes[offset..];
switch (err) {
.invalid_escape_character => |bad_index| {
try p.appendErrorOff(
token,
offset + @as(u32, @intCast(bad_index)),
"invalid escape character: '{c}'",
.{raw_string[bad_index]},
);
},
.expected_hex_digit => |bad_index| {
try p.appendErrorOff(
token,
offset + @as(u32, @intCast(bad_index)),
"expected hex digit, found '{c}'",
.{raw_string[bad_index]},
);
},
.empty_unicode_escape_sequence => |bad_index| {
try p.appendErrorOff(
token,
offset + @as(u32, @intCast(bad_index)),
"empty unicode escape sequence",
.{},
);
},
.expected_hex_digit_or_rbrace => |bad_index| {
try p.appendErrorOff(
token,
offset + @as(u32, @intCast(bad_index)),
"expected hex digit or '}}', found '{c}'",
.{raw_string[bad_index]},
);
},
.invalid_unicode_codepoint => |bad_index| {
try p.appendErrorOff(
token,
offset + @as(u32, @intCast(bad_index)),
"unicode escape does not correspond to a valid codepoint",
.{},
);
},
.expected_lbrace => |bad_index| {
try p.appendErrorOff(
token,
offset + @as(u32, @intCast(bad_index)),
"expected '{{', found '{c}",
.{raw_string[bad_index]},
);
},
.expected_rbrace => |bad_index| {
try p.appendErrorOff(
token,
offset + @as(u32, @intCast(bad_index)),
"expected '}}', found '{c}",
.{raw_string[bad_index]},
);
},
.expected_single_quote => |bad_index| {
try p.appendErrorOff(
token,
offset + @as(u32, @intCast(bad_index)),
"expected single quote ('), found '{c}",
.{raw_string[bad_index]},
);
},
.invalid_character => |bad_index| {
try p.appendErrorOff(
token,
offset + @as(u32, @intCast(bad_index)),
"invalid byte in string or character literal: '{c}'",
.{raw_string[bad_index]},
);
},
}
}
fn fail(
p: *Parse,
tok: Ast.TokenIndex,
comptime fmt: []const u8,
args: anytype,
) InnerError {
try appendError(p, tok, fmt, args);
return error.ParseFailure;
}
fn appendError(p: *Parse, tok: Ast.TokenIndex, comptime fmt: []const u8, args: anytype) !void {
return appendErrorOff(p, tok, 0, fmt, args);
}
fn appendErrorOff(
p: *Parse,
tok: Ast.TokenIndex,
byte_offset: u32,
comptime fmt: []const u8,
args: anytype,
) Allocator.Error!void {
try p.errors.append(p.gpa, .{
.msg = try std.fmt.allocPrint(p.arena, fmt, args),
.tok = tok,
.off = byte_offset,
});
}
};
const Manifest = @This();
const std = @import("std");
const mem = std.mem;
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const Ast = std.zig.Ast;
const testing = std.testing;
test "basic" {
const gpa = testing.allocator;
const example =
\\.{
\\ .name = "foo",
\\ .version = "3.2.1",
\\ .dependencies = .{
\\ .bar = .{
\\ .url = "https://example.com/baz.tar.gz",
\\ .hash = "1220f1b680b6065fcfc94fe777f22e73bcb7e2767e5f4d99d4255fe76ded69c7a35f",
\\ },
\\ },
\\}
;
var ast = try std.zig.Ast.parse(gpa, example, .zon);
defer ast.deinit(gpa);
try testing.expect(ast.errors.len == 0);
var manifest = try Manifest.parse(gpa, ast);
defer manifest.deinit(gpa);
try testing.expectEqualStrings("foo", manifest.name);
try testing.expectEqual(@as(std.SemanticVersion, .{
.major = 3,
.minor = 2,
.patch = 1,
}), manifest.version);
try testing.expect(manifest.dependencies.count() == 1);
try testing.expectEqualStrings("bar", manifest.dependencies.keys()[0]);
try testing.expectEqualStrings(
"https://example.com/baz.tar.gz",
manifest.dependencies.values()[0].url,
);
try testing.expectEqualStrings(
"1220f1b680b6065fcfc94fe777f22e73bcb7e2767e5f4d99d4255fe76ded69c7a35f",
manifest.dependencies.values()[0].hash orelse return error.TestFailed,
);
}

Binary file not shown.

View file

@ -1,6 +1,7 @@
// for use inside of github, build with
// zig build -Dtarget=x86_64-linux-musl -Doptimize=ReleaseSmall announceybot
// then copy to ./announceybot.exe
const std = @import("std"); const std = @import("std");
const PkgHash = @import("pkghash.zig");
const Manifest = @import("Manifest.zig");
const README_PATH = "README.md"; const README_PATH = "README.md";
const README_MAX_SIZE = 25 * 1024; const README_MAX_SIZE = 25 * 1024;
@ -105,30 +106,6 @@ fn get_tag_annotation(allocator: std.mem.Allocator, tagname: []const u8) ![]cons
return try allocator.dupe(u8, return_string); return try allocator.dupe(u8, return_string);
} }
/// you need to have checked out the git tag!
fn getPkgHash(allocator: std.mem.Allocator) ![]const u8 {
const cwd = std.fs.cwd();
const cwd_absolute_path = try cwd.realpathAlloc(allocator, ".");
defer allocator.free(cwd_absolute_path);
const hash = blk: {
const result = try PkgHash.gitFileList(allocator, cwd_absolute_path);
defer allocator.free(result);
var thread_pool: std.Thread.Pool = undefined;
try thread_pool.init(.{ .allocator = allocator });
defer thread_pool.deinit();
break :blk try PkgHash.computePackageHashForFileList(
&thread_pool,
cwd,
result,
);
};
const digest = Manifest.hexDigest(hash);
return try allocator.dupe(u8, digest[0..]);
}
const RenderParams = struct { const RenderParams = struct {
tag: ?[]const u8 = null, tag: ?[]const u8 = null,
hash: ?[]const u8 = null, hash: ?[]const u8 = null,
@ -137,14 +114,11 @@ const RenderParams = struct {
fn renderTemplate(allocator: std.mem.Allocator, template: []const u8, substitutes: RenderParams) ![]const u8 { fn renderTemplate(allocator: std.mem.Allocator, template: []const u8, substitutes: RenderParams) ![]const u8 {
const the_tag = substitutes.tag orelse ""; const the_tag = substitutes.tag orelse "";
const the_hash = substitutes.hash orelse "";
const the_anno = substitutes.annotation orelse ""; const the_anno = substitutes.annotation orelse "";
const s1 = try std.mem.replaceOwned(u8, allocator, template, "{tag}", the_tag); const s1 = try std.mem.replaceOwned(u8, allocator, template, "{tag}", the_tag);
defer allocator.free(s1); defer allocator.free(s1);
const s2 = try std.mem.replaceOwned(u8, allocator, s1, "{hash}", the_hash); return try std.mem.replaceOwned(u8, allocator, s1, "{annotation}", the_anno);
defer allocator.free(s2);
return try std.mem.replaceOwned(u8, allocator, s2, "{annotation}", the_anno);
} }
fn sendToDiscordPart(allocator: std.mem.Allocator, url: []const u8, message_json: []const u8) !void { fn sendToDiscordPart(allocator: std.mem.Allocator, url: []const u8, message_json: []const u8) !void {
@ -324,12 +298,9 @@ fn sendToDiscord(allocator: std.mem.Allocator, url: []const u8, message: []const
fn command_announce(allocator: std.mem.Allocator, tag: []const u8) !void { fn command_announce(allocator: std.mem.Allocator, tag: []const u8) !void {
const annotation = try get_tag_annotation(allocator, tag); const annotation = try get_tag_annotation(allocator, tag);
defer allocator.free(annotation); defer allocator.free(annotation);
const hash = try getPkgHash(allocator);
defer allocator.free(hash);
const announcement = try renderTemplate(allocator, RELEASE_ANNOUNCEMENT_TEMPLATE, .{ const announcement = try renderTemplate(allocator, RELEASE_ANNOUNCEMENT_TEMPLATE, .{
.tag = tag, .tag = tag,
.hash = hash,
.annotation = annotation, .annotation = annotation,
}); });
@ -346,12 +317,9 @@ fn command_announce(allocator: std.mem.Allocator, tag: []const u8) !void {
fn command_releasenotes(allocator: std.mem.Allocator, tag: []const u8) !void { fn command_releasenotes(allocator: std.mem.Allocator, tag: []const u8) !void {
const annotation = try get_tag_annotation(allocator, tag); const annotation = try get_tag_annotation(allocator, tag);
defer allocator.free(annotation); defer allocator.free(annotation);
const hash = try getPkgHash(allocator);
defer allocator.free(hash);
const release_notes = try renderTemplate(allocator, RELEASE_NOTES_TEMPLATE, .{ const release_notes = try renderTemplate(allocator, RELEASE_NOTES_TEMPLATE, .{
.tag = tag, .tag = tag,
.hash = hash,
.annotation = annotation, .annotation = annotation,
}); });
defer allocator.free(release_notes); defer allocator.free(release_notes);
@ -360,12 +328,9 @@ fn command_releasenotes(allocator: std.mem.Allocator, tag: []const u8) !void {
fn command_update_readme(allocator: std.mem.Allocator, tag: []const u8) !void { fn command_update_readme(allocator: std.mem.Allocator, tag: []const u8) !void {
const annotation = try get_tag_annotation(allocator, tag); const annotation = try get_tag_annotation(allocator, tag);
defer allocator.free(annotation); defer allocator.free(annotation);
const hash = try getPkgHash(allocator);
defer allocator.free(hash);
const update_part = try renderTemplate(allocator, README_UPDATE_TEMPLATE, .{ const update_part = try renderTemplate(allocator, README_UPDATE_TEMPLATE, .{
.tag = tag, .tag = tag,
.hash = hash,
.annotation = annotation, .annotation = annotation,
}); });
defer allocator.free(update_part); defer allocator.free(update_part);

View file

@ -6,25 +6,10 @@ __**New release {tag}!**__
**Using it** **Using it**
Modify your `build.zig.zon` like this: In your zig project folder (where `build.zig` is located), run:
```zig
.{
.name = "My example project",
.version = "0.0.1",
.dependencies = .{
// zap {tag}
.zap = .{
.url = "https://github.com/zigzap/zap/archive/refs/tags/{tag}.tar.gz",
.hash = "{hash}",
},
},
.paths = .{
"",
},
}
```
zig fetch --save "git+https://github.com/zigzap/zap#{tag}"
``` ```
See the release page: https://github.com/zigzap/zap/releases/{tag} for more information! See the release page: https://github.com/zigzap/zap/releases/{tag} for more information!

View file

@ -1,19 +1,3 @@
```zig ```
.{ zig fetch --save "git+https://github.com/zigzap/zap#{tag}"
.name = "My example project",
.version = "0.0.1",
.dependencies = .{
// zap {tag}
.zap = .{
// when tagged:
// .url = "https://github.com/zigzap/zap/archive/refs/tags/{tag}.tar.gz",
.url = "https://github.com/zigzap/zap/archive/{tag}.tar.gz",
.hash = "{hash}",
},
},
.paths = .{
"",
},
}
``` ```

View file

@ -6,41 +6,16 @@
## Using it ## Using it
To use in your own projects, put this dependency into your `build.zig.zon`: In your zig project folder (where `build.zig` is located), run:
```zig
// zap {tag}
.zap = .{
.url = "https://github.com/zigzap/zap/archive/refs/tags/{tag}.tar.gz",
.hash = "{hash}",
}
``` ```
zig fetch --save "git+https://github.com/zigzap/zap#{tag}"
Here is a complete `build.zig.zon` example:
```zig
.{
.name = "My example project",
.version = "0.0.1",
.dependencies = .{
// zap {tag}
.zap = .{
.url = "https://github.com/zigzap/zap/archive/refs/tags/{tag}.tar.gz",
.hash = "{hash}",
},
},
.paths = .{
"",
},
}
``` ```
Then, in your `build.zig`'s `build` function, add the following before Then, in your `build.zig`'s `build` function, add the following before
`b.installArtifact(exe)`: `b.installArtifact(exe)`:
```zig ```zig
const zap = b.dependency("zap", .{ const zap = b.dependency("zap", .{
.target = target, .target = target,
.optimize = optimize, .optimize = optimize,

Binary file not shown.

View file

@ -1,486 +0,0 @@
const std = @import("std");
const builtin = std.builtin;
const assert = std.debug.assert;
const io = std.io;
const fs = std.fs;
const mem = std.mem;
const process = std.process;
const Allocator = mem.Allocator;
const ThreadPool = std.Thread.Pool;
const WaitGroup = std.Thread.WaitGroup;
const Manifest = @import("Manifest.zig");
var general_purpose_allocator = std.heap.GeneralPurposeAllocator(.{}){};
/// 1MB git output
const MAX_TEMPLATE_SIZE = 1024 * 1024;
pub fn fatal(comptime format: []const u8, args: anytype) noreturn {
std.log.err(format, args);
process.exit(1);
}
pub fn main() !void {
const gpa = general_purpose_allocator.allocator();
defer _ = general_purpose_allocator.deinit();
var arena_instance = std.heap.ArenaAllocator.init(gpa);
defer arena_instance.deinit();
const arena = arena_instance.allocator();
const args = try process.argsAlloc(arena);
const command_arg = args[0];
_ = command_arg;
if (args.len > 1) {
const arg1 = args[1];
if (mem.eql(u8, arg1, "-h") or mem.eql(u8, arg1, "--help")) {
try showHelp();
return;
}
if (mem.eql(u8, arg1, "-g") or mem.eql(u8, arg1, "--git")) {
try cmdPkgGit(gpa, args);
return;
}
}
try cmdPkg(gpa, arena, args);
}
fn showHelp() !void {
const stdout = io.getStdOut().writer();
try stdout.writeAll(usage_pkg);
}
pub const usage_pkg =
\\Usage: pkghash [options]
\\
\\Options:
\\ -h --help Print this help and exit.
\\ -g --git Use git ls-files
\\
\\Sub-options:
\\ --allow-directory : calc hash even if no build.zig is present
\\ applies in no-git mode only
\\
\\Sub-options for --git:
\\ --tag=<tag> : specify git tag to use in template
\\ defaults to tag pointing to HEAD
\\ --template=<file.md> : specify markdown template to render
;
pub fn gitLatestTag(gpa: Allocator, pkg_dir: []const u8) ![]const u8 {
const result = try std.process.Child.run(.{
.allocator = gpa,
.argv = &.{
"git",
"-C",
pkg_dir,
"tag",
"--contains=HEAD",
},
.cwd = pkg_dir,
// cwd_dir: ?fs.Dir = null,
// env_map: ?*const EnvMap = null,
// max_output_bytes: usize = 50 * 1024,
// expand_arg0: Arg0Expand = .no_expand,
});
defer gpa.free(result.stderr);
const retcode = switch (result.term) {
.Exited => |exitcode| exitcode,
else => return error.GitError,
};
if (retcode != 0) return error.GitError;
return result.stdout;
}
pub fn gitFileList(gpa: Allocator, pkg_dir: []const u8) ![]const u8 {
const result = try std.process.Child.run(.{
.allocator = gpa,
.argv = &.{
"git",
"-C",
pkg_dir,
"ls-files",
},
.cwd = pkg_dir,
// cwd_dir: ?fs.Dir = null,
// env_map: ?*const EnvMap = null,
// max_output_bytes: usize = 50 * 1024,
// expand_arg0: Arg0Expand = .no_expand,
});
defer gpa.free(result.stderr);
const retcode = switch (result.term) {
.Exited => |exitcode| exitcode,
else => return error.GitError,
};
if (retcode != 0) return error.GitError;
return result.stdout;
}
pub fn cmdPkgGit(gpa: Allocator, args: []const []const u8) !void {
if (args.len == 0) fatal("Expected at least one argument.\n", .{});
const cwd = std.fs.cwd();
const cwd_absolute_path = try cwd.realpathAlloc(gpa, ".");
defer gpa.free(cwd_absolute_path);
var do_render_template = false;
var template_filn: ?[]const u8 = null;
var git_tag: ?[]const u8 = null;
const arg_tag = "--tag=";
const arg_template = "--template=";
for (args) |arg| {
if (std.mem.startsWith(u8, arg, arg_tag)) {
if (arg.len > arg_tag.len) {
git_tag = try gpa.dupe(u8, arg[arg_tag.len..]);
do_render_template = true;
} else {
std.debug.print(
\\Error: --tag=... requires a tag after the =
, .{});
try showHelp();
return;
}
}
if (std.mem.startsWith(u8, arg, arg_template)) {
if (arg.len > arg_template.len) {
template_filn = arg[arg_template.len..];
do_render_template = true;
} else {
std.debug.print(
\\Error: --template=... requires a filename after the =
, .{});
try showHelp();
return;
}
}
}
if (do_render_template) {
if (template_filn == null) {
std.debug.print(
\\Error: if --tag=... is provided, --template= must be provided, too!
\\Use -h for help
, .{});
try showHelp();
return;
}
if (git_tag == null) {
// try to get the latest tag
if (gitLatestTag(gpa, cwd_absolute_path)) |tag_slice| {
// strip \n
defer gpa.free(tag_slice);
if (tag_slice.len > 1) {
git_tag = try gpa.dupe(u8, tag_slice[0 .. tag_slice.len - 1]);
} else {
std.debug.print(
\\Error: could not deduce git tag! Provide --tag=
\\Use -h for help
, .{});
try showHelp();
return;
}
} else |_| {
std.debug.print(
\\Error: if --template=... is provided, --tag= must be provided, too!
\\Use -h for help
, .{});
try showHelp();
return;
}
}
}
errdefer {
if (git_tag) |g| {
gpa.free(g);
}
}
const hash = blk: {
const result = try gitFileList(gpa, cwd_absolute_path);
defer gpa.free(result);
var thread_pool: ThreadPool = undefined;
try thread_pool.init(.{ .allocator = gpa });
defer thread_pool.deinit();
break :blk try computePackageHashForFileList(
&thread_pool,
cwd,
result,
);
};
const std_out = std.io.getStdOut();
const digest = Manifest.hexDigest(hash);
const digest_slice = digest[0..];
if (!do_render_template) {
try std_out.writeAll(digest_slice);
try std_out.writeAll("\n");
} else {
try renderTemplate(gpa, git_tag.?, template_filn.?, digest_slice);
}
if (git_tag) |g| {
gpa.free(g);
}
}
fn renderTemplate(gpa: std.mem.Allocator, tag: []const u8, template: []const u8, hash: []const u8) !void {
const contents = try std.fs.cwd().readFileAlloc(gpa, template, MAX_TEMPLATE_SIZE);
defer gpa.free(contents);
const s1 = try std.mem.replaceOwned(u8, gpa, contents, "{tag}", tag);
defer gpa.free(s1);
const s2 = try std.mem.replaceOwned(u8, gpa, s1, "{hash}", hash);
defer gpa.free(s2);
try std.io.getStdOut().writer().writeAll(s2);
}
pub fn cmdPkg(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
_ = arena;
const cwd = std.fs.cwd();
dir_test: {
if (args.len > 1 and mem.eql(u8, args[1], "--allow-directory")) break :dir_test;
try if (cwd.access("build.zig", .{})) |_| break :dir_test else |err| switch (err) {
error.FileNotFound => {},
else => |e| e,
};
try if (cwd.access("build.zig.zon", .{})) |_| break :dir_test else |err| switch (err) {
error.FileNotFound => {},
else => |e| e,
};
break :dir_test fatal("Could not find either build.zig or build.zig.zon in this directory.\n Use --allow-directory to override this check.\n", .{});
}
const hash = blk: {
const cwd_absolute_path = try cwd.realpathAlloc(gpa, ".");
defer gpa.free(cwd_absolute_path);
// computePackageHash will close the directory after completion
// std.debug.print("abspath: {s}\n", .{cwd_absolute_path});
var cwd_copy = try fs.openDirAbsolute(cwd_absolute_path, .{});
errdefer cwd_copy.close();
var thread_pool: ThreadPool = undefined;
try thread_pool.init(.{ .allocator = gpa });
defer thread_pool.deinit();
// workaround for missing inclusion/exclusion support -> #14311.
const excluded_directories: []const []const u8 = &.{
"zig-out",
"zig-cache",
".git",
};
break :blk try computePackageHashExcludingDirectories(
&thread_pool,
cwd_copy,
excluded_directories,
);
};
const std_out = std.io.getStdOut();
const digest = Manifest.hexDigest(hash);
try std_out.writeAll(digest[0..]);
try std_out.writeAll("\n");
}
/// Make a file system path identical independently of operating system path inconsistencies.
/// This converts backslashes into forward slashes.
fn normalizePath(arena: Allocator, fs_path: []const u8) ![]const u8 {
const canonical_sep = '/';
if (fs.path.sep == canonical_sep)
return fs_path;
const normalized = try arena.dupe(u8, fs_path);
for (normalized) |*byte| {
switch (byte.*) {
fs.path.sep => byte.* = canonical_sep,
else => continue,
}
}
return normalized;
}
const HashedFile = struct {
fs_path: []const u8,
normalized_path: []const u8,
hash: [Manifest.Hash.digest_length]u8,
failure: Error!void,
const Error = fs.File.OpenError || fs.File.ReadError || fs.File.StatError;
fn lessThan(context: void, lhs: *const HashedFile, rhs: *const HashedFile) bool {
_ = context;
return mem.lessThan(u8, lhs.normalized_path, rhs.normalized_path);
}
};
fn workerHashFile(dir: fs.Dir, hashed_file: *HashedFile, wg: *WaitGroup) void {
defer wg.finish();
hashed_file.failure = hashFileFallible(dir, hashed_file);
}
fn hashFileFallible(dir: fs.Dir, hashed_file: *HashedFile) HashedFile.Error!void {
var buf: [8000]u8 = undefined;
var file = try dir.openFile(hashed_file.fs_path, .{});
defer file.close();
var hasher = Manifest.Hash.init(.{});
hasher.update(hashed_file.normalized_path);
hasher.update(&.{ 0, @intFromBool(try isExecutable(file)) });
while (true) {
const bytes_read = try file.read(&buf);
if (bytes_read == 0) break;
hasher.update(buf[0..bytes_read]);
}
hasher.final(&hashed_file.hash);
}
fn isExecutable(file: fs.File) !bool {
_ = file;
// hack: in order to mimic current zig's tar extraction, we set everything to
// NOT EXECUTABLE
// const stat = try file.stat();
// return (stat.mode & std.os.S.IXUSR) != 0;
return false;
}
pub fn computePackageHashExcludingDirectories(
thread_pool: *ThreadPool,
pkg_dir: fs.Dir,
excluded_directories: []const []const u8,
) ![Manifest.Hash.digest_length]u8 {
const gpa = thread_pool.allocator;
// We'll use an arena allocator for the path name strings since they all
// need to be in memory for sorting.
var arena_instance = std.heap.ArenaAllocator.init(gpa);
defer arena_instance.deinit();
const arena = arena_instance.allocator();
// Collect all files, recursively, then sort.
var all_files = std.ArrayList(*HashedFile).init(gpa);
defer all_files.deinit();
var walker = try pkg_dir.walk(gpa);
defer walker.deinit();
{
// The final hash will be a hash of each file hashed independently. This
// allows hashing in parallel.
var wait_group: WaitGroup = .{};
defer wait_group.wait();
loop: while (try walker.next()) |entry| {
switch (entry.kind) {
.directory => {
for (excluded_directories) |dir_name| {
if (mem.eql(u8, entry.basename, dir_name)) {
var item = walker.stack.pop();
if (walker.stack.items.len != 0) {
item.iter.dir.close();
}
continue :loop;
}
}
continue :loop;
},
.file => {},
else => return error.IllegalFileTypeInPackage,
}
const hashed_file = try arena.create(HashedFile);
const fs_path = try arena.dupe(u8, entry.path);
hashed_file.* = .{
.fs_path = fs_path,
.normalized_path = try normalizePath(arena, fs_path),
.hash = undefined, // to be populated by the worker
.failure = undefined, // to be populated by the worker
};
wait_group.start();
try thread_pool.spawn(workerHashFile, .{ pkg_dir, hashed_file, &wait_group });
try all_files.append(hashed_file);
}
}
std.mem.sort(*HashedFile, all_files.items, {}, HashedFile.lessThan);
var hasher = Manifest.Hash.init(.{});
var any_failures = false;
for (all_files.items) |hashed_file| {
hashed_file.failure catch |err| {
any_failures = true;
std.log.err("unable to hash '{s}': {s}", .{ hashed_file.fs_path, @errorName(err) });
};
// std.debug.print("{s} : {s}\n", .{ hashed_file.normalized_path, Manifest.hexDigest(hashed_file.hash) });
hasher.update(&hashed_file.hash);
}
if (any_failures) return error.PackageHashUnavailable;
return hasher.finalResult();
}
pub fn computePackageHashForFileList(
thread_pool: *ThreadPool,
pkg_dir: fs.Dir,
file_list: []const u8,
) ![Manifest.Hash.digest_length]u8 {
const gpa = thread_pool.allocator;
// We'll use an arena allocator for the path name strings since they all
// need to be in memory for sorting.
var arena_instance = std.heap.ArenaAllocator.init(gpa);
defer arena_instance.deinit();
const arena = arena_instance.allocator();
// Collect all files, recursively, then sort.
var all_files = std.ArrayList(*HashedFile).init(gpa);
defer all_files.deinit();
{
// The final hash will be a hash of each file hashed independently. This
// allows hashing in parallel.
var wait_group: WaitGroup = .{};
defer wait_group.wait();
var it = std.mem.splitScalar(u8, file_list, '\n');
while (it.next()) |entry| {
if (entry.len > 0) {
const hashed_file = try arena.create(HashedFile);
const fs_path = try arena.dupe(u8, entry);
hashed_file.* = .{
.fs_path = fs_path,
.normalized_path = try normalizePath(arena, fs_path),
.hash = undefined, // to be populated by the worker
.failure = undefined, // to be populated by the worker
};
wait_group.start();
try thread_pool.spawn(workerHashFile, .{ pkg_dir, hashed_file, &wait_group });
try all_files.append(hashed_file);
}
}
}
std.mem.sort(*HashedFile, all_files.items, {}, HashedFile.lessThan);
var hasher = Manifest.Hash.init(.{});
var any_failures = false;
for (all_files.items) |hashed_file| {
hashed_file.failure catch |err| {
any_failures = true;
std.log.err("unable to hash '{s}': {s}", .{ hashed_file.fs_path, @errorName(err) });
};
// std.debug.print("{s} : {s}\n", .{ hashed_file.normalized_path, Manifest.hexDigest(hashed_file.hash) });
hasher.update(&hashed_file.hash);
}
if (any_failures) return error.PackageHashUnavailable;
return hasher.finalResult();
}