mirror of
https://codeberg.org/ziglang/zig.git
synced 2025-12-06 22:04:21 +00:00
Add module for mapping ASN1 types to Zig types. See
`asn1.Tag.fromZig` for the mapping. Add DER encoder and decoder.
See `asn1/test.zig` for example usage of every ASN1 type.
This implementation allows ASN1 tags to be overriden with `asn1_tag`
and `asn1_tags`:
```zig
const MyContainer = (enum | union | struct) {
field: u32,
pub const asn1_tag = asn1.Tag.init(...);
// This specifies a tag's class, and if explicit, additional encoding
// rules.
pub const asn1_tags = .{
.field = asn1.FieldTag.explicit(0, .context_specific),
};
};
```
Despite having an enum tag type, ASN1 frequently uses OIDs as enum
values. This is supported via an `pub const oids` field.
```zig
const MyEnum = enum {
a,
pub const oids = asn1.Oid.StaticMap(MyEnum).initComptime(.{
.a = "1.2.3.4",
});
};
```
Futhermore, a container may choose to implement encoding and decoding
however it deems fit. This allows for derived fields since Zig has a far
more powerful type system than ASN1.
```zig
// ASN1 has no standard way of tagging unions.
const MyContainer = union(enum) {
derived: PowerfulZigType,
const WeakAsn1Type = ...;
pub fn encodeDer(self: MyContainer, encoder: *der.Encoder) !void {
try encoder.any(WeakAsn1Type{...});
}
pub fn decodeDer(decoder: *der.Decoder) !MyContainer {
const weak_asn1_type = try decoder.any(WeakAsn1Type);
return .{ .derived = PowerfulZigType{...} };
}
};
```
An unfortunate side-effect is that decoding and encoding cannot have
complete complete error sets unless we limit what errors users may
return. Luckily, PKI ASN1 types are NOT recursive so the inferred
error set should be sufficient.
Finally, other encodings are possible, but this patch only implements
a buffered DER encoder and decoder.
In an effort to keep the changeset minimal this PR does not actually
use the DER parser for stdlib PKI, but a tested example of how it may
be used for Certificate is available
[here.](https://github.com/clickingbuttons/asn1/blob/69c5709d/src/Certificate.zig)
Closes #19775.
97 lines
3.1 KiB
Zig
97 lines
3.1 KiB
Zig
//! An ArrayList that grows backwards. Counts nested prefix length fields
|
|
//! in O(n) instead of O(n^depth) at the cost of extra buffering.
|
|
//!
|
|
//! Laid out in memory like:
|
|
//! capacity |--------------------------|
|
|
//! data |-------------|
|
|
data: []u8,
|
|
capacity: usize,
|
|
allocator: Allocator,
|
|
|
|
const ArrayListReverse = @This();
|
|
const Error = Allocator.Error;
|
|
|
|
pub fn init(allocator: Allocator) ArrayListReverse {
|
|
return .{ .data = &.{}, .capacity = 0, .allocator = allocator };
|
|
}
|
|
|
|
pub fn deinit(self: *ArrayListReverse) void {
|
|
self.allocator.free(self.allocatedSlice());
|
|
}
|
|
|
|
pub fn ensureCapacity(self: *ArrayListReverse, new_capacity: usize) Error!void {
|
|
if (self.capacity >= new_capacity) return;
|
|
|
|
const old_memory = self.allocatedSlice();
|
|
// Just make a new allocation to not worry about aliasing.
|
|
const new_memory = try self.allocator.alloc(u8, new_capacity);
|
|
@memcpy(new_memory[new_capacity - self.data.len ..], self.data);
|
|
self.allocator.free(old_memory);
|
|
self.data.ptr = new_memory.ptr + new_capacity - self.data.len;
|
|
self.capacity = new_memory.len;
|
|
}
|
|
|
|
pub fn prependSlice(self: *ArrayListReverse, data: []const u8) Error!void {
|
|
try self.ensureCapacity(self.data.len + data.len);
|
|
const old_len = self.data.len;
|
|
const new_len = old_len + data.len;
|
|
assert(new_len <= self.capacity);
|
|
self.data.len = new_len;
|
|
|
|
const end = self.data.ptr;
|
|
const begin = end - data.len;
|
|
const slice = begin[0..data.len];
|
|
@memcpy(slice, data);
|
|
self.data.ptr = begin;
|
|
}
|
|
|
|
pub const Writer = std.io.Writer(*ArrayListReverse, Error, prependSliceSize);
|
|
/// Warning: This writer writes backwards. `fn print` will NOT work as expected.
|
|
pub fn writer(self: *ArrayListReverse) Writer {
|
|
return .{ .context = self };
|
|
}
|
|
|
|
fn prependSliceSize(self: *ArrayListReverse, data: []const u8) Error!usize {
|
|
try self.prependSlice(data);
|
|
return data.len;
|
|
}
|
|
|
|
fn allocatedSlice(self: *ArrayListReverse) []u8 {
|
|
return (self.data.ptr + self.data.len - self.capacity)[0..self.capacity];
|
|
}
|
|
|
|
/// Invalidates all element pointers.
|
|
pub fn clearAndFree(self: *ArrayListReverse) void {
|
|
self.allocator.free(self.allocatedSlice());
|
|
self.data.len = 0;
|
|
self.capacity = 0;
|
|
}
|
|
|
|
/// The caller owns the returned memory.
|
|
/// Capacity is cleared, making deinit() safe but unnecessary to call.
|
|
pub fn toOwnedSlice(self: *ArrayListReverse) Error![]u8 {
|
|
const new_memory = try self.allocator.alloc(u8, self.data.len);
|
|
@memcpy(new_memory, self.data);
|
|
@memset(self.data, undefined);
|
|
self.clearAndFree();
|
|
return new_memory;
|
|
}
|
|
|
|
const std = @import("std");
|
|
const Allocator = std.mem.Allocator;
|
|
const assert = std.debug.assert;
|
|
const testing = std.testing;
|
|
|
|
test ArrayListReverse {
|
|
var b = ArrayListReverse.init(testing.allocator);
|
|
defer b.deinit();
|
|
const data: []const u8 = &.{ 4, 5, 6 };
|
|
try b.prependSlice(data);
|
|
try testing.expectEqual(data.len, b.data.len);
|
|
try testing.expectEqualSlices(u8, data, b.data);
|
|
|
|
const data2: []const u8 = &.{ 1, 2, 3 };
|
|
try b.prependSlice(data2);
|
|
try testing.expectEqual(data.len + data2.len, b.data.len);
|
|
try testing.expectEqualSlices(u8, data2 ++ data, b.data);
|
|
}
|