mirror of
https://codeberg.org/ziglang/zig.git
synced 2025-12-06 13:54:21 +00:00
macho: extract parallel hasher into a generic helper struct
This commit is contained in:
parent
423d7b848b
commit
b3a2ab3fed
3 changed files with 67 additions and 59 deletions
|
|
@ -594,6 +594,7 @@ set(ZIG_STAGE2_SOURCES
|
||||||
"${CMAKE_SOURCE_DIR}/src/link/MachO/dead_strip.zig"
|
"${CMAKE_SOURCE_DIR}/src/link/MachO/dead_strip.zig"
|
||||||
"${CMAKE_SOURCE_DIR}/src/link/MachO/eh_frame.zig"
|
"${CMAKE_SOURCE_DIR}/src/link/MachO/eh_frame.zig"
|
||||||
"${CMAKE_SOURCE_DIR}/src/link/MachO/fat.zig"
|
"${CMAKE_SOURCE_DIR}/src/link/MachO/fat.zig"
|
||||||
|
"${CMAKE_SOURCE_DIR}/src/link/MachO/hasher.zig"
|
||||||
"${CMAKE_SOURCE_DIR}/src/link/MachO/load_commands.zig"
|
"${CMAKE_SOURCE_DIR}/src/link/MachO/load_commands.zig"
|
||||||
"${CMAKE_SOURCE_DIR}/src/link/MachO/thunks.zig"
|
"${CMAKE_SOURCE_DIR}/src/link/MachO/thunks.zig"
|
||||||
"${CMAKE_SOURCE_DIR}/src/link/MachO/zld.zig"
|
"${CMAKE_SOURCE_DIR}/src/link/MachO/zld.zig"
|
||||||
|
|
|
||||||
|
|
@ -7,11 +7,10 @@ const log = std.log.scoped(.link);
|
||||||
const macho = std.macho;
|
const macho = std.macho;
|
||||||
const mem = std.mem;
|
const mem = std.mem;
|
||||||
const testing = std.testing;
|
const testing = std.testing;
|
||||||
const ThreadPool = std.Thread.Pool;
|
|
||||||
const WaitGroup = std.Thread.WaitGroup;
|
|
||||||
|
|
||||||
const Allocator = mem.Allocator;
|
const Allocator = mem.Allocator;
|
||||||
const Compilation = @import("../../Compilation.zig");
|
const Compilation = @import("../../Compilation.zig");
|
||||||
|
const Hasher = @import("hasher.zig").ParallelHasher;
|
||||||
const Sha256 = std.crypto.hash.sha2.Sha256;
|
const Sha256 = std.crypto.hash.sha2.Sha256;
|
||||||
|
|
||||||
const hash_size = Sha256.digest_length;
|
const hash_size = Sha256.digest_length;
|
||||||
|
|
@ -289,7 +288,11 @@ pub fn writeAdhocSignature(
|
||||||
self.code_directory.inner.nCodeSlots = total_pages;
|
self.code_directory.inner.nCodeSlots = total_pages;
|
||||||
|
|
||||||
// Calculate hash for each page (in file) and write it to the buffer
|
// Calculate hash for each page (in file) and write it to the buffer
|
||||||
try self.parallelHash(gpa, comp.thread_pool, opts.file, opts.file_size);
|
var hasher = Hasher(Sha256){};
|
||||||
|
try hasher.hash(gpa, comp.thread_pool, opts.file, self.code_directory.code_slots.items, .{
|
||||||
|
.chunk_size = self.page_size,
|
||||||
|
.max_file_size = opts.file_size,
|
||||||
|
});
|
||||||
|
|
||||||
try blobs.append(.{ .code_directory = &self.code_directory });
|
try blobs.append(.{ .code_directory = &self.code_directory });
|
||||||
header.length += @sizeOf(macho.BlobIndex);
|
header.length += @sizeOf(macho.BlobIndex);
|
||||||
|
|
@ -348,62 +351,6 @@ pub fn writeAdhocSignature(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn parallelHash(
|
|
||||||
self: *CodeSignature,
|
|
||||||
gpa: Allocator,
|
|
||||||
pool: *ThreadPool,
|
|
||||||
file: fs.File,
|
|
||||||
file_size: u32,
|
|
||||||
) !void {
|
|
||||||
var wg: WaitGroup = .{};
|
|
||||||
|
|
||||||
const total_num_chunks = mem.alignForward(usize, file_size, self.page_size) / self.page_size;
|
|
||||||
assert(self.code_directory.code_slots.items.len >= total_num_chunks);
|
|
||||||
|
|
||||||
const buffer = try gpa.alloc(u8, self.page_size * total_num_chunks);
|
|
||||||
defer gpa.free(buffer);
|
|
||||||
|
|
||||||
const results = try gpa.alloc(fs.File.PReadError!usize, total_num_chunks);
|
|
||||||
defer gpa.free(results);
|
|
||||||
|
|
||||||
{
|
|
||||||
wg.reset();
|
|
||||||
defer wg.wait();
|
|
||||||
|
|
||||||
var i: usize = 0;
|
|
||||||
while (i < total_num_chunks) : (i += 1) {
|
|
||||||
const fstart = i * self.page_size;
|
|
||||||
const fsize = if (fstart + self.page_size > file_size)
|
|
||||||
file_size - fstart
|
|
||||||
else
|
|
||||||
self.page_size;
|
|
||||||
wg.start();
|
|
||||||
try pool.spawn(worker, .{
|
|
||||||
file,
|
|
||||||
fstart,
|
|
||||||
buffer[fstart..][0..fsize],
|
|
||||||
&self.code_directory.code_slots.items[i],
|
|
||||||
&results[i],
|
|
||||||
&wg,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for (results) |result| _ = try result;
|
|
||||||
}
|
|
||||||
|
|
||||||
fn worker(
|
|
||||||
file: fs.File,
|
|
||||||
fstart: usize,
|
|
||||||
buffer: []u8,
|
|
||||||
out: *[hash_size]u8,
|
|
||||||
err: *fs.File.PReadError!usize,
|
|
||||||
wg: *WaitGroup,
|
|
||||||
) void {
|
|
||||||
defer wg.finish();
|
|
||||||
err.* = file.preadAll(buffer, fstart);
|
|
||||||
Sha256.hash(buffer, out, .{});
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn size(self: CodeSignature) u32 {
|
pub fn size(self: CodeSignature) u32 {
|
||||||
var ssize: u32 = @sizeOf(macho.SuperBlob) + @sizeOf(macho.BlobIndex) + self.code_directory.size();
|
var ssize: u32 = @sizeOf(macho.SuperBlob) + @sizeOf(macho.BlobIndex) + self.code_directory.size();
|
||||||
if (self.requirements) |req| {
|
if (self.requirements) |req| {
|
||||||
|
|
|
||||||
60
src/link/MachO/hasher.zig
Normal file
60
src/link/MachO/hasher.zig
Normal file
|
|
@ -0,0 +1,60 @@
|
||||||
|
const std = @import("std");
|
||||||
|
const assert = std.debug.assert;
|
||||||
|
const fs = std.fs;
|
||||||
|
const mem = std.mem;
|
||||||
|
|
||||||
|
const Allocator = mem.Allocator;
|
||||||
|
const ThreadPool = std.Thread.Pool;
|
||||||
|
const WaitGroup = std.Thread.WaitGroup;
|
||||||
|
|
||||||
|
pub fn ParallelHasher(comptime Hasher: type) type {
|
||||||
|
const hash_size = Hasher.digest_length;
|
||||||
|
|
||||||
|
return struct {
|
||||||
|
pub fn hash(self: @This(), gpa: Allocator, pool: *ThreadPool, file: fs.File, out: [][hash_size]u8, opts: struct {
|
||||||
|
chunk_size: u16 = 0x4000,
|
||||||
|
max_file_size: ?u64 = null,
|
||||||
|
}) !void {
|
||||||
|
_ = self;
|
||||||
|
|
||||||
|
var wg: WaitGroup = .{};
|
||||||
|
|
||||||
|
const file_size = opts.max_file_size orelse try file.getEndPos();
|
||||||
|
const total_num_chunks = mem.alignForward(u64, file_size, opts.chunk_size) / opts.chunk_size;
|
||||||
|
assert(out.len >= total_num_chunks);
|
||||||
|
|
||||||
|
const buffer = try gpa.alloc(u8, opts.chunk_size * total_num_chunks);
|
||||||
|
defer gpa.free(buffer);
|
||||||
|
|
||||||
|
const results = try gpa.alloc(fs.File.PReadError!usize, total_num_chunks);
|
||||||
|
defer gpa.free(results);
|
||||||
|
|
||||||
|
{
|
||||||
|
wg.reset();
|
||||||
|
defer wg.wait();
|
||||||
|
|
||||||
|
var i: usize = 0;
|
||||||
|
while (i < total_num_chunks) : (i += 1) {
|
||||||
|
const fstart = i * opts.chunk_size;
|
||||||
|
const fsize = if (fstart + opts.chunk_size > file_size) file_size - fstart else opts.chunk_size;
|
||||||
|
wg.start();
|
||||||
|
try pool.spawn(worker, .{ file, fstart, buffer[fstart..][0..fsize], &out[i], &results[i], &wg });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (results) |result| _ = try result;
|
||||||
|
}
|
||||||
|
|
||||||
|
fn worker(
|
||||||
|
file: fs.File,
|
||||||
|
fstart: usize,
|
||||||
|
buffer: []u8,
|
||||||
|
out: *[hash_size]u8,
|
||||||
|
err: *fs.File.PReadError!usize,
|
||||||
|
wg: *WaitGroup,
|
||||||
|
) void {
|
||||||
|
defer wg.finish();
|
||||||
|
err.* = file.preadAll(buffer, fstart);
|
||||||
|
Hasher.hash(buffer, out, .{});
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
Loading…
Add table
Reference in a new issue