Optimize Allocator functions to create less duplicate code for similar types (#16332)

* Move functionality from generic functions that doesn't depend on the type into a function that only depends on comptime alignment.

This reduces comptime code duplication because e.g. `alloc(u32, )` and `alloc(i32, )` now use the same function `allocWithFoo(4, 4, )` under the hood.
This commit is contained in:
IntegratedQuantum 2023-07-06 20:41:49 +02:00 committed by GitHub
parent 91daf1c8d8
commit 49ac816e36
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23

View file

@ -102,8 +102,8 @@ pub inline fn rawFree(self: Allocator, buf: []u8, log2_buf_align: u8, ret_addr:
/// Call `destroy` with the result to free the memory. /// Call `destroy` with the result to free the memory.
pub fn create(self: Allocator, comptime T: type) Error!*T { pub fn create(self: Allocator, comptime T: type) Error!*T {
if (@sizeOf(T) == 0) return @as(*T, @ptrFromInt(math.maxInt(usize))); if (@sizeOf(T) == 0) return @as(*T, @ptrFromInt(math.maxInt(usize)));
const slice = try self.allocAdvancedWithRetAddr(T, null, 1, @returnAddress()); const ptr: *T = @ptrCast(try self.allocBytesWithAlignment(@alignOf(T), @sizeOf(T), @returnAddress()));
return &slice[0]; return ptr;
} }
/// `ptr` should be the return value of `create`, or otherwise /// `ptr` should be the return value of `create`, or otherwise
@ -113,7 +113,7 @@ pub fn destroy(self: Allocator, ptr: anytype) void {
const T = info.child; const T = info.child;
if (@sizeOf(T) == 0) return; if (@sizeOf(T) == 0) return;
const non_const_ptr = @as([*]u8, @ptrCast(@constCast(ptr))); const non_const_ptr = @as([*]u8, @ptrCast(@constCast(ptr)));
self.rawFree(non_const_ptr[0..@sizeOf(T)], math.log2(info.alignment), @returnAddress()); self.rawFree(non_const_ptr[0..@sizeOf(T)], log2a(info.alignment), @returnAddress());
} }
/// Allocates an array of `n` items of type `T` and sets all the /// Allocates an array of `n` items of type `T` and sets all the
@ -192,7 +192,7 @@ pub fn alignedAlloc(
return self.allocAdvancedWithRetAddr(T, alignment, n, @returnAddress()); return self.allocAdvancedWithRetAddr(T, alignment, n, @returnAddress());
} }
pub fn allocAdvancedWithRetAddr( pub inline fn allocAdvancedWithRetAddr(
self: Allocator, self: Allocator,
comptime T: type, comptime T: type,
/// null means naturally aligned /// null means naturally aligned
@ -201,23 +201,30 @@ pub fn allocAdvancedWithRetAddr(
return_address: usize, return_address: usize,
) Error![]align(alignment orelse @alignOf(T)) T { ) Error![]align(alignment orelse @alignOf(T)) T {
const a = alignment orelse @alignOf(T); const a = alignment orelse @alignOf(T);
const ptr: [*]align(a) T = @ptrCast(try self.allocWithSizeAndAlignment(@sizeOf(T), a, n, return_address));
return ptr[0..n];
}
fn allocWithSizeAndAlignment(self: Allocator, comptime size: usize, comptime alignment: u29, n: usize, return_address: usize) Error![*]align(alignment) u8 {
const byte_count = math.mul(usize, size, n) catch return Error.OutOfMemory;
return self.allocBytesWithAlignment(alignment, byte_count, return_address);
}
fn allocBytesWithAlignment(self: Allocator, comptime alignment: u29, byte_count: usize, return_address: usize) Error![*]align(alignment) u8 {
// The Zig Allocator interface is not intended to solve alignments beyond // The Zig Allocator interface is not intended to solve alignments beyond
// the minimum OS page size. For these use cases, the caller must use OS // the minimum OS page size. For these use cases, the caller must use OS
// APIs directly. // APIs directly.
comptime assert(a <= mem.page_size); comptime assert(alignment <= mem.page_size);
if (n == 0) { if (byte_count == 0) {
const ptr = comptime std.mem.alignBackward(usize, math.maxInt(usize), a); const ptr = comptime std.mem.alignBackward(usize, math.maxInt(usize), alignment);
return @as([*]align(a) T, @ptrFromInt(ptr))[0..0]; return @as([*]align(alignment) u8, @ptrFromInt(ptr));
} }
const byte_count = math.mul(usize, @sizeOf(T), n) catch return Error.OutOfMemory; const byte_ptr = self.rawAlloc(byte_count, log2a(alignment), return_address) orelse return Error.OutOfMemory;
const byte_ptr = self.rawAlloc(byte_count, log2a(a), return_address) orelse return Error.OutOfMemory;
// TODO: https://github.com/ziglang/zig/issues/4298 // TODO: https://github.com/ziglang/zig/issues/4298
@memset(byte_ptr[0..byte_count], undefined); @memset(byte_ptr[0..byte_count], undefined);
const byte_slice: []align(a) u8 = @alignCast(byte_ptr[0..byte_count]); return @as([*]align(alignment) u8, @alignCast(byte_ptr));
return mem.bytesAsSlice(T, byte_slice);
} }
/// Requests to modify the size of an allocation. It is guaranteed to not move /// Requests to modify the size of an allocation. It is guaranteed to not move