Find system-installed root SSL certificates on macOS (#14325)

This commit is contained in:
fn ⌃ ⌥ 2023-01-16 14:34:04 -08:00 committed by GitHub
parent b42bd759a7
commit e45b471ad3
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
4 changed files with 173 additions and 36 deletions

View file

@ -60,13 +60,13 @@ pub fn rescan(cb: *Bundle, gpa: Allocator) !void {
.windows => { .windows => {
// TODO // TODO
}, },
.macos => { .macos => return rescanMac(cb, gpa),
// TODO
},
else => {}, else => {},
} }
} }
pub const rescanMac = @import("Bundle/macos.zig").rescanMac;
pub fn rescanLinux(cb: *Bundle, gpa: Allocator) !void { pub fn rescanLinux(cb: *Bundle, gpa: Allocator) !void {
// Possible certificate files; stop after finding one. // Possible certificate files; stop after finding one.
const cert_file_paths = [_][]const u8{ const cert_file_paths = [_][]const u8{
@ -195,25 +195,29 @@ pub fn addCertsFromFile(cb: *Bundle, gpa: Allocator, file: fs.File) !void {
const decoded_start = @intCast(u32, cb.bytes.items.len); const decoded_start = @intCast(u32, cb.bytes.items.len);
const dest_buf = cb.bytes.allocatedSlice()[decoded_start..]; const dest_buf = cb.bytes.allocatedSlice()[decoded_start..];
cb.bytes.items.len += try base64.decode(dest_buf, encoded_cert); cb.bytes.items.len += try base64.decode(dest_buf, encoded_cert);
// Even though we could only partially parse the certificate to find try cb.parseCert(gpa, decoded_start, now_sec);
// the subject name, we pre-parse all of them to make sure and only }
// include in the bundle ones that we know will parse. This way we can }
// use `catch unreachable` later.
const parsed_cert = try Certificate.parse(.{ pub fn parseCert(cb: *Bundle, gpa: Allocator, decoded_start: u32, now_sec: i64) !void {
.buffer = cb.bytes.items, // Even though we could only partially parse the certificate to find
.index = decoded_start, // the subject name, we pre-parse all of them to make sure and only
}); // include in the bundle ones that we know will parse. This way we can
if (now_sec > parsed_cert.validity.not_after) { // use `catch unreachable` later.
// Ignore expired cert. const parsed_cert = try Certificate.parse(.{
cb.bytes.items.len = decoded_start; .buffer = cb.bytes.items,
continue; .index = decoded_start,
} });
const gop = try cb.map.getOrPutContext(gpa, parsed_cert.subject_slice, .{ .cb = cb }); if (now_sec > parsed_cert.validity.not_after) {
if (gop.found_existing) { // Ignore expired cert.
cb.bytes.items.len = decoded_start; cb.bytes.items.len = decoded_start;
} else { return;
gop.value_ptr.* = decoded_start; }
} const gop = try cb.map.getOrPutContext(gpa, parsed_cert.subject_slice, .{ .cb = cb });
if (gop.found_existing) {
cb.bytes.items.len = decoded_start;
} else {
gop.value_ptr.* = decoded_start;
} }
} }

View file

@ -0,0 +1,136 @@
const std = @import("std");
const assert = std.debug.assert;
const mem = std.mem;
const fs = std.fs;
const Allocator = std.mem.Allocator;
const Bundle = @import("../Bundle.zig");
pub fn rescanMac(cb: *Bundle, gpa: Allocator) !void {
const file = try fs.openFileAbsolute("/System/Library/Keychains/SystemRootCertificates.keychain", .{});
defer file.close();
const bytes = try file.readToEndAlloc(gpa, std.math.maxInt(u32));
defer gpa.free(bytes);
var stream = std.io.fixedBufferStream(bytes);
const reader = stream.reader();
const db_header = try reader.readStructBig(ApplDbHeader);
assert(mem.eql(u8, "kych", &@bitCast([4]u8, db_header.signature)));
try stream.seekTo(db_header.schema_offset);
const db_schema = try reader.readStructBig(ApplDbSchema);
var table_list = try gpa.alloc(u32, db_schema.table_count);
defer gpa.free(table_list);
var table_idx: u32 = 0;
while (table_idx < table_list.len) : (table_idx += 1) {
table_list[table_idx] = try reader.readIntBig(u32);
}
const now_sec = std.time.timestamp();
for (table_list) |table_offset| {
try stream.seekTo(db_header.schema_offset + table_offset);
const table_header = try reader.readStructBig(TableHeader);
if (@intToEnum(TableId, table_header.table_id) != TableId.CSSM_DL_DB_RECORD_X509_CERTIFICATE) {
continue;
}
var record_list = try gpa.alloc(u32, table_header.record_count);
defer gpa.free(record_list);
var record_idx: u32 = 0;
while (record_idx < record_list.len) : (record_idx += 1) {
record_list[record_idx] = try reader.readIntBig(u32);
}
for (record_list) |record_offset| {
try stream.seekTo(db_header.schema_offset + table_offset + record_offset);
const cert_header = try reader.readStructBig(X509CertHeader);
try cb.bytes.ensureUnusedCapacity(gpa, cert_header.cert_size);
const cert_start = @intCast(u32, cb.bytes.items.len);
const dest_buf = cb.bytes.allocatedSlice()[cert_start..];
cb.bytes.items.len += try reader.readAtLeast(dest_buf, cert_header.cert_size);
try cb.parseCert(gpa, cert_start, now_sec);
}
}
}
const ApplDbHeader = extern struct {
signature: @Vector(4, u8),
version: u32,
header_size: u32,
schema_offset: u32,
auth_offset: u32,
};
const ApplDbSchema = extern struct {
schema_size: u32,
table_count: u32,
};
const TableHeader = extern struct {
table_size: u32,
table_id: u32,
record_count: u32,
records: u32,
indexes_offset: u32,
free_list_head: u32,
record_numbers_count: u32,
};
const TableId = enum(u32) {
CSSM_DL_DB_SCHEMA_INFO = 0x00000000,
CSSM_DL_DB_SCHEMA_INDEXES = 0x00000001,
CSSM_DL_DB_SCHEMA_ATTRIBUTES = 0x00000002,
CSSM_DL_DB_SCHEMA_PARSING_MODULE = 0x00000003,
CSSM_DL_DB_RECORD_ANY = 0x0000000a,
CSSM_DL_DB_RECORD_CERT = 0x0000000b,
CSSM_DL_DB_RECORD_CRL = 0x0000000c,
CSSM_DL_DB_RECORD_POLICY = 0x0000000d,
CSSM_DL_DB_RECORD_GENERIC = 0x0000000e,
CSSM_DL_DB_RECORD_PUBLIC_KEY = 0x0000000f,
CSSM_DL_DB_RECORD_PRIVATE_KEY = 0x00000010,
CSSM_DL_DB_RECORD_SYMMETRIC_KEY = 0x00000011,
CSSM_DL_DB_RECORD_ALL_KEYS = 0x00000012,
CSSM_DL_DB_RECORD_GENERIC_PASSWORD = 0x80000000,
CSSM_DL_DB_RECORD_INTERNET_PASSWORD = 0x80000001,
CSSM_DL_DB_RECORD_APPLESHARE_PASSWORD = 0x80000002,
CSSM_DL_DB_RECORD_USER_TRUST = 0x80000003,
CSSM_DL_DB_RECORD_X509_CRL = 0x80000004,
CSSM_DL_DB_RECORD_UNLOCK_REFERRAL = 0x80000005,
CSSM_DL_DB_RECORD_EXTENDED_ATTRIBUTE = 0x80000006,
CSSM_DL_DB_RECORD_X509_CERTIFICATE = 0x80001000,
CSSM_DL_DB_RECORD_METADATA = 0x80008000,
_,
};
const X509CertHeader = extern struct {
record_size: u32,
record_number: u32,
unknown1: u32,
unknown2: u32,
cert_size: u32,
unknown3: u32,
cert_type: u32,
cert_encoding: u32,
print_name: u32,
alias: u32,
subject: u32,
issuer: u32,
serial_number: u32,
subject_key_identifier: u32,
public_key_hash: u32,
};

View file

@ -3,6 +3,7 @@ const math = std.math;
const assert = std.debug.assert; const assert = std.debug.assert;
const mem = std.mem; const mem = std.mem;
const testing = std.testing; const testing = std.testing;
const native_endian = @import("builtin").target.cpu.arch.endian();
pub fn Reader( pub fn Reader(
comptime Context: type, comptime Context: type,
@ -351,6 +352,14 @@ pub fn Reader(
return res[0]; return res[0];
} }
pub fn readStructBig(self: Self, comptime T: type) !T {
var res = try self.readStruct(T);
if (native_endian != std.builtin.Endian.Big) {
mem.byteSwapAllFields(T, &res);
}
return res;
}
/// Reads an integer with the same size as the given enum's tag type. If the integer matches /// Reads an integer with the same size as the given enum's tag type. If the integer matches
/// an enum tag, casts the integer to the enum tag and returns it. Otherwise, returns an error. /// an enum tag, casts the integer to the enum tag and returns it. Otherwise, returns an error.
/// TODO optimization taking advantage of most fields being in order /// TODO optimization taking advantage of most fields being in order

View file

@ -1,9 +1,7 @@
const std = @import("std"); const std = @import("std");
const builtin = @import("builtin");
const log = std.log.scoped(.archive); const log = std.log.scoped(.archive);
const macho = std.macho; const macho = std.macho;
const mem = std.mem; const mem = std.mem;
const native_endian = builtin.target.cpu.arch.endian();
pub fn decodeArch(cputype: macho.cpu_type_t, comptime logError: bool) !std.Target.Cpu.Arch { pub fn decodeArch(cputype: macho.cpu_type_t, comptime logError: bool) !std.Target.Cpu.Arch {
const cpu_arch: std.Target.Cpu.Arch = switch (cputype) { const cpu_arch: std.Target.Cpu.Arch = switch (cputype) {
@ -19,23 +17,13 @@ pub fn decodeArch(cputype: macho.cpu_type_t, comptime logError: bool) !std.Targe
return cpu_arch; return cpu_arch;
} }
fn readFatStruct(reader: anytype, comptime T: type) !T {
// Fat structures (fat_header & fat_arch) are always written and read to/from
// disk in big endian order.
var res = try reader.readStruct(T);
if (native_endian != std.builtin.Endian.Big) {
mem.byteSwapAllFields(T, &res);
}
return res;
}
pub fn getLibraryOffset(reader: anytype, cpu_arch: std.Target.Cpu.Arch) !u64 { pub fn getLibraryOffset(reader: anytype, cpu_arch: std.Target.Cpu.Arch) !u64 {
const fat_header = try readFatStruct(reader, macho.fat_header); const fat_header = try reader.readStructBig(macho.fat_header);
if (fat_header.magic != macho.FAT_MAGIC) return 0; if (fat_header.magic != macho.FAT_MAGIC) return 0;
var fat_arch_index: u32 = 0; var fat_arch_index: u32 = 0;
while (fat_arch_index < fat_header.nfat_arch) : (fat_arch_index += 1) { while (fat_arch_index < fat_header.nfat_arch) : (fat_arch_index += 1) {
const fat_arch = try readFatStruct(reader, macho.fat_arch); const fat_arch = try reader.readStructBig(macho.fat_arch);
// If we come across an architecture that we do not know how to handle, that's // If we come across an architecture that we do not know how to handle, that's
// fine because we can keep looking for one that might match. // fine because we can keep looking for one that might match.
const lib_arch = decodeArch(fat_arch.cputype, false) catch |err| switch (err) { const lib_arch = decodeArch(fat_arch.cputype, false) catch |err| switch (err) {