mirror of
https://codeberg.org/ziglang/zig.git
synced 2025-12-06 05:44:20 +00:00
std: eradicate u29 and embrace std.mem.Alignment
This commit is contained in:
parent
ec28888581
commit
f32a5d349d
32 changed files with 153 additions and 156 deletions
4
lib/compiler/aro/aro/Parser.zig
vendored
4
lib/compiler/aro/aro/Parser.zig
vendored
|
|
@ -101,7 +101,7 @@ value_map: Tree.ValueMap,
|
|||
|
||||
// buffers used during compilation
|
||||
syms: SymbolStack = .{},
|
||||
strings: std.ArrayListAligned(u8, 4),
|
||||
strings: std.ArrayListAligned(u8, .@"4"),
|
||||
labels: std.ArrayList(Label),
|
||||
list_buf: NodeList,
|
||||
decl_buf: NodeList,
|
||||
|
|
@ -693,7 +693,7 @@ pub fn parse(pp: *Preprocessor) Compilation.Error!Tree {
|
|||
.gpa = pp.comp.gpa,
|
||||
.arena = arena.allocator(),
|
||||
.tok_ids = pp.tokens.items(.id),
|
||||
.strings = std.ArrayListAligned(u8, 4).init(pp.comp.gpa),
|
||||
.strings = std.ArrayListAligned(u8, .@"4").init(pp.comp.gpa),
|
||||
.value_map = Tree.ValueMap.init(pp.comp.gpa),
|
||||
.data = NodeList.init(pp.comp.gpa),
|
||||
.labels = std.ArrayList(Label).init(pp.comp.gpa),
|
||||
|
|
|
|||
2
lib/compiler/aro/aro/Preprocessor.zig
vendored
2
lib/compiler/aro/aro/Preprocessor.zig
vendored
|
|
@ -983,7 +983,7 @@ fn expr(pp: *Preprocessor, tokenizer: *Tokenizer) MacroError!bool {
|
|||
.tok_i = @intCast(token_state.tokens_len),
|
||||
.arena = pp.arena.allocator(),
|
||||
.in_macro = true,
|
||||
.strings = std.ArrayListAligned(u8, 4).init(pp.comp.gpa),
|
||||
.strings = std.ArrayListAligned(u8, .@"4").init(pp.comp.gpa),
|
||||
|
||||
.data = undefined,
|
||||
.value_map = undefined,
|
||||
|
|
|
|||
|
|
@ -841,12 +841,12 @@ fn ElfFile(comptime is_64: bool) type {
|
|||
arena: std.heap.ArenaAllocator,
|
||||
|
||||
const SectionCategory = ElfFileHelper.SectionCategory;
|
||||
const section_memory_align = @alignOf(Elf_Sym); // most restrictive of what we may load in memory
|
||||
const section_memory_align: std.mem.Alignment = .of(Elf_Sym); // most restrictive of what we may load in memory
|
||||
const Section = struct {
|
||||
section: Elf_Shdr,
|
||||
name: []const u8 = "",
|
||||
segment: ?*const Elf_Phdr = null, // if the section is used by a program segment (there can be more than one)
|
||||
payload: ?[]align(section_memory_align) const u8 = null, // if we need the data in memory
|
||||
payload: ?[]align(section_memory_align.toByteUnits()) const u8 = null, // if we need the data in memory
|
||||
category: SectionCategory = .none, // should the section be kept in the exe or stripped to the debug database, or both.
|
||||
};
|
||||
|
||||
|
|
@ -999,7 +999,7 @@ fn ElfFile(comptime is_64: bool) type {
|
|||
remap_idx: u16,
|
||||
|
||||
// optionally overrides the payload from the source file
|
||||
payload: ?[]align(section_memory_align) const u8 = null,
|
||||
payload: ?[]align(section_memory_align.toByteUnits()) const u8 = null,
|
||||
section: ?Elf_Shdr = null,
|
||||
};
|
||||
const sections_update = try allocator.alloc(Update, self.sections.len);
|
||||
|
|
@ -1219,7 +1219,7 @@ fn ElfFile(comptime is_64: bool) type {
|
|||
if (options.debuglink) |link| {
|
||||
const payload = payload: {
|
||||
const crc_offset = std.mem.alignForward(usize, link.name.len + 1, 4);
|
||||
const buf = try allocator.alignedAlloc(u8, 4, crc_offset + 4);
|
||||
const buf = try allocator.alignedAlloc(u8, .@"4", crc_offset + 4);
|
||||
@memcpy(buf[0..link.name.len], link.name);
|
||||
@memset(buf[link.name.len..crc_offset], 0);
|
||||
@memcpy(buf[crc_offset..], std.mem.asBytes(&link.crc32));
|
||||
|
|
@ -1498,7 +1498,7 @@ const ElfFileHelper = struct {
|
|||
var section_reader = std.io.limitedReader(in_file.reader(), size);
|
||||
|
||||
// allocate as large as decompressed data. if the compression doesn't fit, keep the data uncompressed.
|
||||
const compressed_data = try allocator.alignedAlloc(u8, 8, @intCast(size));
|
||||
const compressed_data = try allocator.alignedAlloc(u8, .@"8", @intCast(size));
|
||||
var compressed_stream = std.io.fixedBufferStream(compressed_data);
|
||||
|
||||
try compressed_stream.writer().writeAll(prefix);
|
||||
|
|
|
|||
|
|
@ -563,7 +563,7 @@ fn make(step: *Step, make_options: Step.MakeOptions) !void {
|
|||
src_path.sub_path,
|
||||
check_object.max_bytes,
|
||||
null,
|
||||
@alignOf(u64),
|
||||
.of(u64),
|
||||
null,
|
||||
) catch |err| return step.fail("unable to read '{'}': {s}", .{ src_path, @errorName(err) });
|
||||
|
||||
|
|
|
|||
|
|
@ -56,7 +56,7 @@ const Os = switch (builtin.os.tag) {
|
|||
const bytes = lfh.slice();
|
||||
const new_ptr = try gpa.alignedAlloc(
|
||||
u8,
|
||||
@alignOf(std.os.linux.file_handle),
|
||||
.of(std.os.linux.file_handle),
|
||||
@sizeOf(std.os.linux.file_handle) + bytes.len,
|
||||
);
|
||||
const new_header: *std.os.linux.file_handle = @ptrCast(new_ptr);
|
||||
|
|
|
|||
|
|
@ -2129,7 +2129,7 @@ const IndexHeader = struct {
|
|||
const len = @as(usize, 1) << @as(math.Log2Int(usize), @intCast(new_bit_index));
|
||||
const index_size = hash_map.capacityIndexSize(new_bit_index);
|
||||
const nbytes = @sizeOf(IndexHeader) + index_size * len;
|
||||
const bytes = try gpa.alignedAlloc(u8, @alignOf(IndexHeader), nbytes);
|
||||
const bytes = try gpa.alignedAlloc(u8, .of(IndexHeader), nbytes);
|
||||
@memset(bytes[@sizeOf(IndexHeader)..], 0xff);
|
||||
const result: *IndexHeader = @alignCast(@ptrCast(bytes.ptr));
|
||||
result.* = .{
|
||||
|
|
|
|||
|
|
@ -22,9 +22,9 @@ pub fn ArrayList(comptime T: type) type {
|
|||
///
|
||||
/// This struct internally stores a `std.mem.Allocator` for memory management.
|
||||
/// To manually specify an allocator with each function call see `ArrayListAlignedUnmanaged`.
|
||||
pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
|
||||
pub fn ArrayListAligned(comptime T: type, comptime alignment: ?mem.Alignment) type {
|
||||
if (alignment) |a| {
|
||||
if (a == @alignOf(T)) {
|
||||
if (a.toByteUnits() == @alignOf(T)) {
|
||||
return ArrayListAligned(T, null);
|
||||
}
|
||||
}
|
||||
|
|
@ -43,10 +43,10 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
|
|||
capacity: usize,
|
||||
allocator: Allocator,
|
||||
|
||||
pub const Slice = if (alignment) |a| ([]align(a) T) else []T;
|
||||
pub const Slice = if (alignment) |a| ([]align(a.toByteUnits()) T) else []T;
|
||||
|
||||
pub fn SentinelSlice(comptime s: T) type {
|
||||
return if (alignment) |a| ([:s]align(a) T) else [:s]T;
|
||||
return if (alignment) |a| ([:s]align(a.toByteUnits()) T) else [:s]T;
|
||||
}
|
||||
|
||||
/// Deinitialize with `deinit` or use `toOwnedSlice`.
|
||||
|
|
@ -611,9 +611,9 @@ pub fn ArrayListUnmanaged(comptime T: type) type {
|
|||
/// or use `toOwnedSlice`.
|
||||
///
|
||||
/// Default initialization of this struct is deprecated; use `.empty` instead.
|
||||
pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) type {
|
||||
pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?mem.Alignment) type {
|
||||
if (alignment) |a| {
|
||||
if (a == @alignOf(T)) {
|
||||
if (a.toByteUnits() == @alignOf(T)) {
|
||||
return ArrayListAlignedUnmanaged(T, null);
|
||||
}
|
||||
}
|
||||
|
|
@ -637,10 +637,10 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
|
|||
.capacity = 0,
|
||||
};
|
||||
|
||||
pub const Slice = if (alignment) |a| ([]align(a) T) else []T;
|
||||
pub const Slice = if (alignment) |a| ([]align(a.toByteUnits()) T) else []T;
|
||||
|
||||
pub fn SentinelSlice(comptime s: T) type {
|
||||
return if (alignment) |a| ([:s]align(a) T) else [:s]T;
|
||||
return if (alignment) |a| ([:s]align(a.toByteUnits()) T) else [:s]T;
|
||||
}
|
||||
|
||||
/// Initialize with capacity to hold `num` elements.
|
||||
|
|
@ -1913,7 +1913,7 @@ test "ArrayList(u8) implements writer" {
|
|||
try testing.expectEqualSlices(u8, "x: 42\ny: 1234\n", buffer.items);
|
||||
}
|
||||
{
|
||||
var list = ArrayListAligned(u8, 2).init(a);
|
||||
var list = ArrayListAligned(u8, .@"2").init(a);
|
||||
defer list.deinit();
|
||||
|
||||
const writer = list.writer();
|
||||
|
|
@ -1940,7 +1940,7 @@ test "ArrayListUnmanaged(u8) implements writer" {
|
|||
try testing.expectEqualSlices(u8, "x: 42\ny: 1234\n", buffer.items);
|
||||
}
|
||||
{
|
||||
var list: ArrayListAlignedUnmanaged(u8, 2) = .empty;
|
||||
var list: ArrayListAlignedUnmanaged(u8, .@"2") = .empty;
|
||||
defer list.deinit(a);
|
||||
|
||||
const writer = list.writer(a);
|
||||
|
|
@ -2126,7 +2126,7 @@ test "toOwnedSliceSentinel" {
|
|||
test "accepts unaligned slices" {
|
||||
const a = testing.allocator;
|
||||
{
|
||||
var list = std.ArrayListAligned(u8, 8).init(a);
|
||||
var list = std.ArrayListAligned(u8, .@"8").init(a);
|
||||
defer list.deinit();
|
||||
|
||||
try list.appendSlice(&.{ 0, 1, 2, 3 });
|
||||
|
|
@ -2136,7 +2136,7 @@ test "accepts unaligned slices" {
|
|||
try testing.expectEqualSlices(u8, list.items, &.{ 0, 8, 9, 6, 7, 2, 3 });
|
||||
}
|
||||
{
|
||||
var list: std.ArrayListAlignedUnmanaged(u8, 8) = .empty;
|
||||
var list: std.ArrayListAlignedUnmanaged(u8, .@"8") = .empty;
|
||||
defer list.deinit(a);
|
||||
|
||||
try list.appendSlice(a, &.{ 0, 1, 2, 3 });
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ const std = @import("std.zig");
|
|||
const assert = std.debug.assert;
|
||||
const mem = std.mem;
|
||||
const testing = std.testing;
|
||||
const Alignment = std.mem.Alignment;
|
||||
|
||||
/// A structure with an array and a length, that can be used as a slice.
|
||||
///
|
||||
|
|
@ -16,7 +17,7 @@ const testing = std.testing;
|
|||
/// var a_clone = a; // creates a copy - the structure doesn't use any internal pointers
|
||||
/// ```
|
||||
pub fn BoundedArray(comptime T: type, comptime buffer_capacity: usize) type {
|
||||
return BoundedArrayAligned(T, @alignOf(T), buffer_capacity);
|
||||
return BoundedArrayAligned(T, .of(T), buffer_capacity);
|
||||
}
|
||||
|
||||
/// A structure with an array, length and alignment, that can be used as a
|
||||
|
|
@ -34,12 +35,12 @@ pub fn BoundedArray(comptime T: type, comptime buffer_capacity: usize) type {
|
|||
/// ```
|
||||
pub fn BoundedArrayAligned(
|
||||
comptime T: type,
|
||||
comptime alignment: u29,
|
||||
comptime alignment: Alignment,
|
||||
comptime buffer_capacity: usize,
|
||||
) type {
|
||||
return struct {
|
||||
const Self = @This();
|
||||
buffer: [buffer_capacity]T align(alignment) = undefined,
|
||||
buffer: [buffer_capacity]T align(alignment.toByteUnits()) = undefined,
|
||||
len: usize = 0,
|
||||
|
||||
/// Set the actual length of the slice.
|
||||
|
|
@ -51,15 +52,15 @@ pub fn BoundedArrayAligned(
|
|||
|
||||
/// View the internal array as a slice whose size was previously set.
|
||||
pub fn slice(self: anytype) switch (@TypeOf(&self.buffer)) {
|
||||
*align(alignment) [buffer_capacity]T => []align(alignment) T,
|
||||
*align(alignment) const [buffer_capacity]T => []align(alignment) const T,
|
||||
*align(alignment.toByteUnits()) [buffer_capacity]T => []align(alignment.toByteUnits()) T,
|
||||
*align(alignment.toByteUnits()) const [buffer_capacity]T => []align(alignment.toByteUnits()) const T,
|
||||
else => unreachable,
|
||||
} {
|
||||
return self.buffer[0..self.len];
|
||||
}
|
||||
|
||||
/// View the internal array as a constant slice whose size was previously set.
|
||||
pub fn constSlice(self: *const Self) []align(alignment) const T {
|
||||
pub fn constSlice(self: *const Self) []align(alignment.toByteUnits()) const T {
|
||||
return self.slice();
|
||||
}
|
||||
|
||||
|
|
@ -120,7 +121,7 @@ pub fn BoundedArrayAligned(
|
|||
|
||||
/// Resize the slice, adding `n` new elements, which have `undefined` values.
|
||||
/// The return value is a pointer to the array of uninitialized elements.
|
||||
pub fn addManyAsArray(self: *Self, comptime n: usize) error{Overflow}!*align(alignment) [n]T {
|
||||
pub fn addManyAsArray(self: *Self, comptime n: usize) error{Overflow}!*align(alignment.toByteUnits()) [n]T {
|
||||
const prev_len = self.len;
|
||||
try self.resize(self.len + n);
|
||||
return self.slice()[prev_len..][0..n];
|
||||
|
|
@ -128,7 +129,7 @@ pub fn BoundedArrayAligned(
|
|||
|
||||
/// Resize the slice, adding `n` new elements, which have `undefined` values.
|
||||
/// The return value is a slice pointing to the uninitialized elements.
|
||||
pub fn addManyAsSlice(self: *Self, n: usize) error{Overflow}![]align(alignment) T {
|
||||
pub fn addManyAsSlice(self: *Self, n: usize) error{Overflow}![]align(alignment.toByteUnits()) T {
|
||||
const prev_len = self.len;
|
||||
try self.resize(self.len + n);
|
||||
return self.slice()[prev_len..][0..n];
|
||||
|
|
@ -146,7 +147,7 @@ pub fn BoundedArrayAligned(
|
|||
/// This can be useful for writing directly into it.
|
||||
/// Note that such an operation must be followed up with a
|
||||
/// call to `resize()`
|
||||
pub fn unusedCapacitySlice(self: *Self) []align(alignment) T {
|
||||
pub fn unusedCapacitySlice(self: *Self) []align(alignment.toByteUnits()) T {
|
||||
return self.buffer[self.len..];
|
||||
}
|
||||
|
||||
|
|
@ -399,7 +400,7 @@ test BoundedArray {
|
|||
}
|
||||
|
||||
test "BoundedArrayAligned" {
|
||||
var a = try BoundedArrayAligned(u8, 16, 4).init(0);
|
||||
var a = try BoundedArrayAligned(u8, .@"16", 4).init(0);
|
||||
try a.append(0);
|
||||
try a.append(0);
|
||||
try a.append(255);
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ const pwhash = crypto.pwhash;
|
|||
|
||||
const Thread = std.Thread;
|
||||
const Blake2b512 = blake2.Blake2b512;
|
||||
const Blocks = std.ArrayListAligned([block_length]u64, 16);
|
||||
const Blocks = std.ArrayListAligned([block_length]u64, .@"16");
|
||||
const H0 = [Blake2b512.digest_length + 8]u8;
|
||||
|
||||
const EncodingError = crypto.errors.EncodingError;
|
||||
|
|
|
|||
|
|
@ -195,11 +195,11 @@ pub fn kdf(
|
|||
params.r > max_int / 256 or
|
||||
n > max_int / 128 / @as(u64, params.r)) return KdfError.WeakParameters;
|
||||
|
||||
const xy = try allocator.alignedAlloc(u32, 16, 64 * params.r);
|
||||
const xy = try allocator.alignedAlloc(u32, .@"16", 64 * params.r);
|
||||
defer allocator.free(xy);
|
||||
const v = try allocator.alignedAlloc(u32, 16, 32 * n * params.r);
|
||||
const v = try allocator.alignedAlloc(u32, .@"16", 32 * n * params.r);
|
||||
defer allocator.free(v);
|
||||
var dk = try allocator.alignedAlloc(u8, 16, params.p * 128 * params.r);
|
||||
var dk = try allocator.alignedAlloc(u8, .@"16", params.p * 128 * params.r);
|
||||
defer allocator.free(dk);
|
||||
|
||||
try pwhash.pbkdf2(dk, password, salt, 1, HmacSha256);
|
||||
|
|
|
|||
|
|
@ -1960,7 +1960,7 @@ pub fn readFile(self: Dir, file_path: []const u8, buffer: []u8) ![]u8 {
|
|||
/// On WASI, `file_path` should be encoded as valid UTF-8.
|
||||
/// On other platforms, `file_path` is an opaque sequence of bytes with no particular encoding.
|
||||
pub fn readFileAlloc(self: Dir, allocator: mem.Allocator, file_path: []const u8, max_bytes: usize) ![]u8 {
|
||||
return self.readFileAllocOptions(allocator, file_path, max_bytes, null, @alignOf(u8), null);
|
||||
return self.readFileAllocOptions(allocator, file_path, max_bytes, null, .of(u8), null);
|
||||
}
|
||||
|
||||
/// On success, caller owns returned buffer.
|
||||
|
|
@ -1977,9 +1977,9 @@ pub fn readFileAllocOptions(
|
|||
file_path: []const u8,
|
||||
max_bytes: usize,
|
||||
size_hint: ?usize,
|
||||
comptime alignment: u29,
|
||||
comptime alignment: std.mem.Alignment,
|
||||
comptime optional_sentinel: ?u8,
|
||||
) !(if (optional_sentinel) |s| [:s]align(alignment) u8 else []align(alignment) u8) {
|
||||
) !(if (optional_sentinel) |s| [:s]align(alignment.toByteUnits()) u8 else []align(alignment.toByteUnits()) u8) {
|
||||
var file = try self.openFile(file_path, .{});
|
||||
defer file.close();
|
||||
|
||||
|
|
|
|||
|
|
@ -1133,7 +1133,7 @@ pub fn updateTimes(
|
|||
/// On success, caller owns returned buffer.
|
||||
/// If the file is larger than `max_bytes`, returns `error.FileTooBig`.
|
||||
pub fn readToEndAlloc(self: File, allocator: Allocator, max_bytes: usize) ![]u8 {
|
||||
return self.readToEndAllocOptions(allocator, max_bytes, null, @alignOf(u8), null);
|
||||
return self.readToEndAllocOptions(allocator, max_bytes, null, .of(u8), null);
|
||||
}
|
||||
|
||||
/// Reads all the bytes from the current position to the end of the file.
|
||||
|
|
@ -1147,9 +1147,9 @@ pub fn readToEndAllocOptions(
|
|||
allocator: Allocator,
|
||||
max_bytes: usize,
|
||||
size_hint: ?usize,
|
||||
comptime alignment: u29,
|
||||
comptime alignment: Alignment,
|
||||
comptime optional_sentinel: ?u8,
|
||||
) !(if (optional_sentinel) |s| [:s]align(alignment) u8 else []align(alignment) u8) {
|
||||
) !(if (optional_sentinel) |s| [:s]align(alignment.toByteUnits()) u8 else []align(alignment.toByteUnits()) u8) {
|
||||
// If no size hint is provided fall back to the size=0 code path
|
||||
const size = size_hint orelse 0;
|
||||
|
||||
|
|
@ -1782,3 +1782,4 @@ const windows = std.os.windows;
|
|||
const Os = std.builtin.Os;
|
||||
const maxInt = std.math.maxInt;
|
||||
const is_windows = builtin.os.tag == .windows;
|
||||
const Alignment = std.mem.Alignment;
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ const math = std.math;
|
|||
const mem = std.mem;
|
||||
const Allocator = mem.Allocator;
|
||||
const Wyhash = std.hash.Wyhash;
|
||||
const Alignment = std.mem.Alignment;
|
||||
|
||||
pub fn getAutoHashFn(comptime K: type, comptime Context: type) (fn (Context, K) u64) {
|
||||
comptime {
|
||||
|
|
@ -1460,7 +1461,7 @@ pub fn HashMapUnmanaged(
|
|||
const header_align = @alignOf(Header);
|
||||
const key_align = if (@sizeOf(K) == 0) 1 else @alignOf(K);
|
||||
const val_align = if (@sizeOf(V) == 0) 1 else @alignOf(V);
|
||||
const max_align = comptime @max(header_align, key_align, val_align);
|
||||
const max_align: Alignment = comptime .fromByteUnits(@max(header_align, key_align, val_align));
|
||||
|
||||
const new_cap: usize = new_capacity;
|
||||
const meta_size = @sizeOf(Header) + new_cap * @sizeOf(Metadata);
|
||||
|
|
@ -1472,7 +1473,7 @@ pub fn HashMapUnmanaged(
|
|||
const vals_start = std.mem.alignForward(usize, keys_end, val_align);
|
||||
const vals_end = vals_start + new_cap * @sizeOf(V);
|
||||
|
||||
const total_size = std.mem.alignForward(usize, vals_end, max_align);
|
||||
const total_size = max_align.forward(vals_end);
|
||||
|
||||
const slice = try allocator.alignedAlloc(u8, max_align, total_size);
|
||||
const ptr: [*]u8 = @ptrCast(slice.ptr);
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ const mem = std.mem;
|
|||
const c = std.c;
|
||||
const Allocator = std.mem.Allocator;
|
||||
const windows = std.os.windows;
|
||||
const Alignment = std.mem.Alignment;
|
||||
|
||||
pub const ArenaAllocator = @import("heap/arena_allocator.zig").ArenaAllocator;
|
||||
pub const SmpAllocator = @import("heap/SmpAllocator.zig");
|
||||
|
|
@ -153,7 +154,7 @@ const CAllocator = struct {
|
|||
return @alignCast(@ptrCast(ptr - @sizeOf(usize)));
|
||||
}
|
||||
|
||||
fn alignedAlloc(len: usize, alignment: mem.Alignment) ?[*]u8 {
|
||||
fn alignedAlloc(len: usize, alignment: Alignment) ?[*]u8 {
|
||||
const alignment_bytes = alignment.toByteUnits();
|
||||
if (supports_posix_memalign) {
|
||||
// The posix_memalign only accepts alignment values that are a
|
||||
|
|
@ -201,7 +202,7 @@ const CAllocator = struct {
|
|||
fn alloc(
|
||||
_: *anyopaque,
|
||||
len: usize,
|
||||
alignment: mem.Alignment,
|
||||
alignment: Alignment,
|
||||
return_address: usize,
|
||||
) ?[*]u8 {
|
||||
_ = return_address;
|
||||
|
|
@ -212,7 +213,7 @@ const CAllocator = struct {
|
|||
fn resize(
|
||||
_: *anyopaque,
|
||||
buf: []u8,
|
||||
alignment: mem.Alignment,
|
||||
alignment: Alignment,
|
||||
new_len: usize,
|
||||
return_address: usize,
|
||||
) bool {
|
||||
|
|
@ -233,7 +234,7 @@ const CAllocator = struct {
|
|||
fn remap(
|
||||
context: *anyopaque,
|
||||
memory: []u8,
|
||||
alignment: mem.Alignment,
|
||||
alignment: Alignment,
|
||||
new_len: usize,
|
||||
return_address: usize,
|
||||
) ?[*]u8 {
|
||||
|
|
@ -245,7 +246,7 @@ const CAllocator = struct {
|
|||
fn free(
|
||||
_: *anyopaque,
|
||||
buf: []u8,
|
||||
alignment: mem.Alignment,
|
||||
alignment: Alignment,
|
||||
return_address: usize,
|
||||
) void {
|
||||
_ = alignment;
|
||||
|
|
@ -281,7 +282,7 @@ const raw_c_allocator_vtable: Allocator.VTable = .{
|
|||
fn rawCAlloc(
|
||||
context: *anyopaque,
|
||||
len: usize,
|
||||
alignment: mem.Alignment,
|
||||
alignment: Alignment,
|
||||
return_address: usize,
|
||||
) ?[*]u8 {
|
||||
_ = context;
|
||||
|
|
@ -299,7 +300,7 @@ fn rawCAlloc(
|
|||
fn rawCResize(
|
||||
context: *anyopaque,
|
||||
memory: []u8,
|
||||
alignment: mem.Alignment,
|
||||
alignment: Alignment,
|
||||
new_len: usize,
|
||||
return_address: usize,
|
||||
) bool {
|
||||
|
|
@ -314,7 +315,7 @@ fn rawCResize(
|
|||
fn rawCRemap(
|
||||
context: *anyopaque,
|
||||
memory: []u8,
|
||||
alignment: mem.Alignment,
|
||||
alignment: Alignment,
|
||||
new_len: usize,
|
||||
return_address: usize,
|
||||
) ?[*]u8 {
|
||||
|
|
@ -327,7 +328,7 @@ fn rawCRemap(
|
|||
fn rawCFree(
|
||||
context: *anyopaque,
|
||||
memory: []u8,
|
||||
alignment: mem.Alignment,
|
||||
alignment: Alignment,
|
||||
return_address: usize,
|
||||
) void {
|
||||
_ = context;
|
||||
|
|
@ -425,7 +426,7 @@ pub fn StackFallbackAllocator(comptime size: usize) type {
|
|||
fn alloc(
|
||||
ctx: *anyopaque,
|
||||
len: usize,
|
||||
alignment: mem.Alignment,
|
||||
alignment: Alignment,
|
||||
ra: usize,
|
||||
) ?[*]u8 {
|
||||
const self: *Self = @ptrCast(@alignCast(ctx));
|
||||
|
|
@ -436,7 +437,7 @@ pub fn StackFallbackAllocator(comptime size: usize) type {
|
|||
fn resize(
|
||||
ctx: *anyopaque,
|
||||
buf: []u8,
|
||||
alignment: mem.Alignment,
|
||||
alignment: Alignment,
|
||||
new_len: usize,
|
||||
ra: usize,
|
||||
) bool {
|
||||
|
|
@ -451,7 +452,7 @@ pub fn StackFallbackAllocator(comptime size: usize) type {
|
|||
fn remap(
|
||||
context: *anyopaque,
|
||||
memory: []u8,
|
||||
alignment: mem.Alignment,
|
||||
alignment: Alignment,
|
||||
new_len: usize,
|
||||
return_address: usize,
|
||||
) ?[*]u8 {
|
||||
|
|
@ -466,7 +467,7 @@ pub fn StackFallbackAllocator(comptime size: usize) type {
|
|||
fn free(
|
||||
ctx: *anyopaque,
|
||||
buf: []u8,
|
||||
alignment: mem.Alignment,
|
||||
alignment: Alignment,
|
||||
ra: usize,
|
||||
) void {
|
||||
const self: *Self = @ptrCast(@alignCast(ctx));
|
||||
|
|
@ -512,7 +513,7 @@ test PageAllocator {
|
|||
}
|
||||
|
||||
if (builtin.os.tag == .windows) {
|
||||
const slice = try allocator.alignedAlloc(u8, page_size_min, 128);
|
||||
const slice = try allocator.alignedAlloc(u8, .fromByteUnits(page_size_min), 128);
|
||||
slice[0] = 0x12;
|
||||
slice[127] = 0x34;
|
||||
allocator.free(slice);
|
||||
|
|
@ -609,7 +610,7 @@ pub fn testAllocatorAligned(base_allocator: mem.Allocator) !void {
|
|||
const allocator = validationAllocator.allocator();
|
||||
|
||||
// Test a few alignment values, smaller and bigger than the type's one
|
||||
inline for ([_]u29{ 1, 2, 4, 8, 16, 32, 64 }) |alignment| {
|
||||
inline for ([_]Alignment{ .@"1", .@"2", .@"4", .@"8", .@"16", .@"32", .@"64" }) |alignment| {
|
||||
// initial
|
||||
var slice = try allocator.alignedAlloc(u8, alignment, 10);
|
||||
try testing.expect(slice.len == 10);
|
||||
|
|
@ -640,7 +641,7 @@ pub fn testAllocatorLargeAlignment(base_allocator: mem.Allocator) !void {
|
|||
var align_mask: usize = undefined;
|
||||
align_mask = @shlWithOverflow(~@as(usize, 0), @as(Allocator.Log2Align, @ctz(large_align)))[0];
|
||||
|
||||
var slice = try allocator.alignedAlloc(u8, large_align, 500);
|
||||
var slice = try allocator.alignedAlloc(u8, .fromByteUnits(large_align), 500);
|
||||
try testing.expect(@intFromPtr(slice.ptr) & align_mask == @intFromPtr(slice.ptr));
|
||||
|
||||
if (allocator.resize(slice, 100)) {
|
||||
|
|
@ -669,7 +670,7 @@ pub fn testAllocatorAlignedShrink(base_allocator: mem.Allocator) !void {
|
|||
const debug_allocator = fib.allocator();
|
||||
|
||||
const alloc_size = pageSize() * 2 + 50;
|
||||
var slice = try allocator.alignedAlloc(u8, 16, alloc_size);
|
||||
var slice = try allocator.alignedAlloc(u8, .@"16", alloc_size);
|
||||
defer allocator.free(slice);
|
||||
|
||||
var stuff_to_free = std.ArrayList([]align(16) u8).init(debug_allocator);
|
||||
|
|
@ -679,7 +680,7 @@ pub fn testAllocatorAlignedShrink(base_allocator: mem.Allocator) !void {
|
|||
// fail, because of this high over-alignment we want to have.
|
||||
while (@intFromPtr(slice.ptr) == mem.alignForward(usize, @intFromPtr(slice.ptr), pageSize() * 32)) {
|
||||
try stuff_to_free.append(slice);
|
||||
slice = try allocator.alignedAlloc(u8, 16, alloc_size);
|
||||
slice = try allocator.alignedAlloc(u8, .@"16", alloc_size);
|
||||
}
|
||||
while (stuff_to_free.pop()) |item| {
|
||||
allocator.free(item);
|
||||
|
|
|
|||
|
|
@ -218,7 +218,7 @@ test "very large allocation" {
|
|||
}
|
||||
|
||||
test "realloc" {
|
||||
var slice = try test_ally.alignedAlloc(u8, @alignOf(u32), 1);
|
||||
var slice = try test_ally.alignedAlloc(u8, .of(u32), 1);
|
||||
defer test_ally.free(slice);
|
||||
slice[0] = 0x12;
|
||||
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ const std = @import("../std.zig");
|
|||
const assert = std.debug.assert;
|
||||
const mem = std.mem;
|
||||
const Allocator = std.mem.Allocator;
|
||||
const Alignment = std.mem.Alignment;
|
||||
|
||||
/// This allocator takes an existing allocator, wraps it, and provides an interface where
|
||||
/// you can allocate and then free it all together. Calls to free an individual item only
|
||||
|
|
@ -41,7 +42,7 @@ pub const ArenaAllocator = struct {
|
|||
data: usize,
|
||||
node: std.SinglyLinkedList.Node = .{},
|
||||
};
|
||||
const BufNode_alignment: mem.Alignment = .fromByteUnits(@alignOf(BufNode));
|
||||
const BufNode_alignment: Alignment = .fromByteUnits(@alignOf(BufNode));
|
||||
|
||||
pub fn init(child_allocator: Allocator) ArenaAllocator {
|
||||
return (State{}).promote(child_allocator);
|
||||
|
|
@ -181,7 +182,7 @@ pub const ArenaAllocator = struct {
|
|||
return buf_node;
|
||||
}
|
||||
|
||||
fn alloc(ctx: *anyopaque, n: usize, alignment: mem.Alignment, ra: usize) ?[*]u8 {
|
||||
fn alloc(ctx: *anyopaque, n: usize, alignment: Alignment, ra: usize) ?[*]u8 {
|
||||
const self: *ArenaAllocator = @ptrCast(@alignCast(ctx));
|
||||
_ = ra;
|
||||
|
||||
|
|
@ -214,7 +215,7 @@ pub const ArenaAllocator = struct {
|
|||
}
|
||||
}
|
||||
|
||||
fn resize(ctx: *anyopaque, buf: []u8, alignment: mem.Alignment, new_len: usize, ret_addr: usize) bool {
|
||||
fn resize(ctx: *anyopaque, buf: []u8, alignment: Alignment, new_len: usize, ret_addr: usize) bool {
|
||||
const self: *ArenaAllocator = @ptrCast(@alignCast(ctx));
|
||||
_ = alignment;
|
||||
_ = ret_addr;
|
||||
|
|
@ -242,14 +243,14 @@ pub const ArenaAllocator = struct {
|
|||
fn remap(
|
||||
context: *anyopaque,
|
||||
memory: []u8,
|
||||
alignment: mem.Alignment,
|
||||
alignment: Alignment,
|
||||
new_len: usize,
|
||||
return_address: usize,
|
||||
) ?[*]u8 {
|
||||
return if (resize(context, memory, alignment, new_len, return_address)) memory.ptr else null;
|
||||
}
|
||||
|
||||
fn free(ctx: *anyopaque, buf: []u8, alignment: mem.Alignment, ret_addr: usize) void {
|
||||
fn free(ctx: *anyopaque, buf: []u8, alignment: Alignment, ret_addr: usize) void {
|
||||
_ = alignment;
|
||||
_ = ret_addr;
|
||||
|
||||
|
|
@ -279,9 +280,9 @@ test "reset with preheating" {
|
|||
const total_size: usize = random.intRangeAtMost(usize, 256, 16384);
|
||||
while (alloced_bytes < total_size) {
|
||||
const size = random.intRangeAtMost(usize, 16, 256);
|
||||
const alignment = 32;
|
||||
const alignment: Alignment = .@"32";
|
||||
const slice = try arena_allocator.allocator().alignedAlloc(u8, alignment, size);
|
||||
try std.testing.expect(std.mem.isAligned(@intFromPtr(slice.ptr), alignment));
|
||||
try std.testing.expect(alignment.check(@intFromPtr(slice.ptr)));
|
||||
try std.testing.expectEqual(size, slice.len);
|
||||
alloced_bytes += slice.len;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1120,7 +1120,7 @@ test "realloc" {
|
|||
defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
|
||||
const allocator = gpa.allocator();
|
||||
|
||||
var slice = try allocator.alignedAlloc(u8, @alignOf(u32), 1);
|
||||
var slice = try allocator.alignedAlloc(u8, .of(u32), 1);
|
||||
defer allocator.free(slice);
|
||||
slice[0] = 0x12;
|
||||
|
||||
|
|
@ -1234,7 +1234,7 @@ test "shrink large object to large object with larger alignment" {
|
|||
const debug_allocator = fba.allocator();
|
||||
|
||||
const alloc_size = default_page_size * 2 + 50;
|
||||
var slice = try allocator.alignedAlloc(u8, 16, alloc_size);
|
||||
var slice = try allocator.alignedAlloc(u8, .@"16", alloc_size);
|
||||
defer allocator.free(slice);
|
||||
|
||||
const big_alignment: usize = default_page_size * 2;
|
||||
|
|
@ -1244,7 +1244,7 @@ test "shrink large object to large object with larger alignment" {
|
|||
var stuff_to_free = std.ArrayList([]align(16) u8).init(debug_allocator);
|
||||
while (mem.isAligned(@intFromPtr(slice.ptr), big_alignment)) {
|
||||
try stuff_to_free.append(slice);
|
||||
slice = try allocator.alignedAlloc(u8, 16, alloc_size);
|
||||
slice = try allocator.alignedAlloc(u8, .@"16", alloc_size);
|
||||
}
|
||||
while (stuff_to_free.pop()) |item| {
|
||||
allocator.free(item);
|
||||
|
|
@ -1308,7 +1308,7 @@ test "realloc large object to larger alignment" {
|
|||
var fba = std.heap.FixedBufferAllocator.init(&debug_buffer);
|
||||
const debug_allocator = fba.allocator();
|
||||
|
||||
var slice = try allocator.alignedAlloc(u8, 16, default_page_size * 2 + 50);
|
||||
var slice = try allocator.alignedAlloc(u8, .@"16", default_page_size * 2 + 50);
|
||||
defer allocator.free(slice);
|
||||
|
||||
const big_alignment: usize = default_page_size * 2;
|
||||
|
|
@ -1316,7 +1316,7 @@ test "realloc large object to larger alignment" {
|
|||
var stuff_to_free = std.ArrayList([]align(16) u8).init(debug_allocator);
|
||||
while (mem.isAligned(@intFromPtr(slice.ptr), big_alignment)) {
|
||||
try stuff_to_free.append(slice);
|
||||
slice = try allocator.alignedAlloc(u8, 16, default_page_size * 2 + 50);
|
||||
slice = try allocator.alignedAlloc(u8, .@"16", default_page_size * 2 + 50);
|
||||
}
|
||||
while (stuff_to_free.pop()) |item| {
|
||||
allocator.free(item);
|
||||
|
|
@ -1402,7 +1402,7 @@ test "large allocations count requested size not backing size" {
|
|||
var gpa: DebugAllocator(.{ .enable_memory_limit = true }) = .{};
|
||||
const allocator = gpa.allocator();
|
||||
|
||||
var buf = try allocator.alignedAlloc(u8, 1, default_page_size + 1);
|
||||
var buf = try allocator.alignedAlloc(u8, .@"1", default_page_size + 1);
|
||||
try std.testing.expectEqual(default_page_size + 1, gpa.total_requested_bytes);
|
||||
buf = try allocator.realloc(buf, 1);
|
||||
try std.testing.expectEqual(1, gpa.total_requested_bytes);
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
const std = @import("../std.zig");
|
||||
const Alignment = std.mem.Alignment;
|
||||
|
||||
const debug_mode = @import("builtin").mode == .Debug;
|
||||
|
||||
|
|
@ -8,14 +9,14 @@ pub const MemoryPoolError = error{OutOfMemory};
|
|||
/// Use this when you need to allocate a lot of objects of the same type,
|
||||
/// because It outperforms general purpose allocators.
|
||||
pub fn MemoryPool(comptime Item: type) type {
|
||||
return MemoryPoolAligned(Item, @alignOf(Item));
|
||||
return MemoryPoolAligned(Item, .of(Item));
|
||||
}
|
||||
|
||||
/// A memory pool that can allocate objects of a single type very quickly.
|
||||
/// Use this when you need to allocate a lot of objects of the same type,
|
||||
/// because It outperforms general purpose allocators.
|
||||
pub fn MemoryPoolAligned(comptime Item: type, comptime alignment: u29) type {
|
||||
if (@alignOf(Item) == alignment) {
|
||||
pub fn MemoryPoolAligned(comptime Item: type, comptime alignment: Alignment) type {
|
||||
if (@alignOf(Item) == comptime alignment.toByteUnits()) {
|
||||
return MemoryPoolExtra(Item, .{});
|
||||
} else {
|
||||
return MemoryPoolExtra(Item, .{ .alignment = alignment });
|
||||
|
|
@ -24,7 +25,7 @@ pub fn MemoryPoolAligned(comptime Item: type, comptime alignment: u29) type {
|
|||
|
||||
pub const Options = struct {
|
||||
/// The alignment of the memory pool items. Use `null` for natural alignment.
|
||||
alignment: ?u29 = null,
|
||||
alignment: ?Alignment = null,
|
||||
|
||||
/// If `true`, the memory pool can allocate additional items after a initial setup.
|
||||
/// If `false`, the memory pool will not allocate further after a call to `initPreheated`.
|
||||
|
|
@ -43,17 +44,17 @@ pub fn MemoryPoolExtra(comptime Item: type, comptime pool_options: Options) type
|
|||
pub const item_size = @max(@sizeOf(Node), @sizeOf(Item));
|
||||
|
||||
// This needs to be kept in sync with Node.
|
||||
const node_alignment = @alignOf(*anyopaque);
|
||||
const node_alignment: Alignment = .of(*anyopaque);
|
||||
|
||||
/// Alignment of the memory pool items. This is not necessarily the same
|
||||
/// as `@alignOf(Item)` as the pool also uses the items for internal means.
|
||||
pub const item_alignment = @max(node_alignment, pool_options.alignment orelse @alignOf(Item));
|
||||
pub const item_alignment: Alignment = node_alignment.max(pool_options.alignment orelse .of(Item));
|
||||
|
||||
const Node = struct {
|
||||
next: ?*align(item_alignment) @This(),
|
||||
next: ?*align(item_alignment.toByteUnits()) @This(),
|
||||
};
|
||||
const NodePtr = *align(item_alignment) Node;
|
||||
const ItemPtr = *align(item_alignment) Item;
|
||||
const NodePtr = *align(item_alignment.toByteUnits()) Node;
|
||||
const ItemPtr = *align(item_alignment.toByteUnits()) Item;
|
||||
|
||||
arena: std.heap.ArenaAllocator,
|
||||
free_list: ?NodePtr = null,
|
||||
|
|
@ -143,7 +144,7 @@ pub fn MemoryPoolExtra(comptime Item: type, comptime pool_options: Options) type
|
|||
pool.free_list = node;
|
||||
}
|
||||
|
||||
fn allocNew(pool: *Pool) MemoryPoolError!*align(item_alignment) [item_size]u8 {
|
||||
fn allocNew(pool: *Pool) MemoryPoolError!*align(item_alignment.toByteUnits()) [item_size]u8 {
|
||||
const mem = try pool.arena.allocator().alignedAlloc(u8, item_alignment, item_size);
|
||||
return mem[0..item_size]; // coerce slice to array pointer
|
||||
}
|
||||
|
|
@ -213,7 +214,7 @@ test "greater than pointer manual alignment" {
|
|||
data: u64,
|
||||
};
|
||||
|
||||
var pool = MemoryPoolAligned(Foo, 16).init(std.testing.allocator);
|
||||
var pool = MemoryPoolAligned(Foo, .@"16").init(std.testing.allocator);
|
||||
defer pool.deinit();
|
||||
|
||||
const foo: *align(16) Foo = try pool.create();
|
||||
|
|
|
|||
|
|
@ -5,7 +5,6 @@ const c = std.c;
|
|||
const is_windows = builtin.os.tag == .windows;
|
||||
const windows = std.os.windows;
|
||||
const posix = std.posix;
|
||||
|
||||
const math = std.math;
|
||||
const assert = std.debug.assert;
|
||||
const fs = std.fs;
|
||||
|
|
@ -13,6 +12,7 @@ const mem = std.mem;
|
|||
const meta = std.meta;
|
||||
const File = std.fs.File;
|
||||
const Allocator = std.mem.Allocator;
|
||||
const Alignment = std.mem.Alignment;
|
||||
|
||||
fn getStdOutHandle() posix.fd_t {
|
||||
if (is_windows) {
|
||||
|
|
@ -104,7 +104,7 @@ pub fn GenericReader(
|
|||
|
||||
pub inline fn readAllArrayListAligned(
|
||||
self: Self,
|
||||
comptime alignment: ?u29,
|
||||
comptime alignment: ?Alignment,
|
||||
array_list: *std.ArrayListAligned(u8, alignment),
|
||||
max_append_size: usize,
|
||||
) (error{StreamTooLong} || Allocator.Error || Error)!void {
|
||||
|
|
|
|||
|
|
@ -54,7 +54,7 @@ pub fn readAllArrayList(
|
|||
|
||||
pub fn readAllArrayListAligned(
|
||||
self: Self,
|
||||
comptime alignment: ?u29,
|
||||
comptime alignment: ?Alignment,
|
||||
array_list: *std.ArrayListAligned(u8, alignment),
|
||||
max_append_size: usize,
|
||||
) anyerror!void {
|
||||
|
|
@ -379,6 +379,7 @@ const assert = std.debug.assert;
|
|||
const mem = std.mem;
|
||||
const testing = std.testing;
|
||||
const native_endian = @import("builtin").target.cpu.arch.endian();
|
||||
const Alignment = std.mem.Alignment;
|
||||
|
||||
test {
|
||||
_ = @import("Reader/test.zig");
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ const float = @import("math/float.zig");
|
|||
const assert = std.debug.assert;
|
||||
const mem = std.mem;
|
||||
const testing = std.testing;
|
||||
const Alignment = std.mem.Alignment;
|
||||
|
||||
/// Euler's number (e)
|
||||
pub const e = 2.71828182845904523536028747135266249775724709369995;
|
||||
|
|
@ -1084,19 +1085,16 @@ test cast {
|
|||
|
||||
pub const AlignCastError = error{UnalignedMemory};
|
||||
|
||||
fn AlignCastResult(comptime alignment: u29, comptime Ptr: type) type {
|
||||
fn AlignCastResult(comptime alignment: Alignment, comptime Ptr: type) type {
|
||||
var ptr_info = @typeInfo(Ptr);
|
||||
ptr_info.pointer.alignment = alignment;
|
||||
ptr_info.pointer.alignment = alignment.toByteUnits();
|
||||
return @Type(ptr_info);
|
||||
}
|
||||
|
||||
/// Align cast a pointer but return an error if it's the wrong alignment
|
||||
pub fn alignCast(comptime alignment: u29, ptr: anytype) AlignCastError!AlignCastResult(alignment, @TypeOf(ptr)) {
|
||||
const addr = @intFromPtr(ptr);
|
||||
if (addr % alignment != 0) {
|
||||
return error.UnalignedMemory;
|
||||
}
|
||||
return @alignCast(ptr);
|
||||
pub fn alignCast(comptime alignment: Alignment, ptr: anytype) AlignCastError!AlignCastResult(alignment, @TypeOf(ptr)) {
|
||||
if (alignment.check(@intFromPtr(ptr))) return @alignCast(ptr);
|
||||
return error.UnalignedMemory;
|
||||
}
|
||||
|
||||
/// Asserts `int > 0`.
|
||||
|
|
|
|||
|
|
@ -38,6 +38,10 @@ pub const Alignment = enum(math.Log2Int(usize)) {
|
|||
return @enumFromInt(@ctz(n));
|
||||
}
|
||||
|
||||
pub inline fn of(comptime T: type) Alignment {
|
||||
return comptime fromByteUnits(@alignOf(T));
|
||||
}
|
||||
|
||||
pub fn order(lhs: Alignment, rhs: Alignment) std.math.Order {
|
||||
return std.math.order(@intFromEnum(lhs), @intFromEnum(rhs));
|
||||
}
|
||||
|
|
@ -166,21 +170,6 @@ pub fn validationWrap(allocator: anytype) ValidationAllocator(@TypeOf(allocator)
|
|||
return ValidationAllocator(@TypeOf(allocator)).init(allocator);
|
||||
}
|
||||
|
||||
/// An allocator helper function. Adjusts an allocation length satisfy `len_align`.
|
||||
/// `full_len` should be the full capacity of the allocation which may be greater
|
||||
/// than the `len` that was requested. This function should only be used by allocators
|
||||
/// that are unaffected by `len_align`.
|
||||
pub fn alignAllocLen(full_len: usize, alloc_len: usize, len_align: u29) usize {
|
||||
assert(alloc_len > 0);
|
||||
assert(alloc_len >= len_align);
|
||||
assert(full_len >= alloc_len);
|
||||
if (len_align == 0)
|
||||
return alloc_len;
|
||||
const adjusted = alignBackwardAnyAlign(usize, full_len, len_align);
|
||||
assert(adjusted >= alloc_len);
|
||||
return adjusted;
|
||||
}
|
||||
|
||||
test "Allocator basics" {
|
||||
try testing.expectError(error.OutOfMemory, testing.failing_allocator.alloc(u8, 1));
|
||||
try testing.expectError(error.OutOfMemory, testing.failing_allocator.allocSentinel(u8, 1, 0));
|
||||
|
|
|
|||
|
|
@ -152,9 +152,9 @@ pub inline fn rawFree(a: Allocator, memory: []u8, alignment: Alignment, ret_addr
|
|||
pub fn create(a: Allocator, comptime T: type) Error!*T {
|
||||
if (@sizeOf(T) == 0) {
|
||||
const ptr = comptime std.mem.alignBackward(usize, math.maxInt(usize), @alignOf(T));
|
||||
return @as(*T, @ptrFromInt(ptr));
|
||||
return @ptrFromInt(ptr);
|
||||
}
|
||||
const ptr: *T = @ptrCast(try a.allocBytesWithAlignment(@alignOf(T), @sizeOf(T), @returnAddress()));
|
||||
const ptr: *T = @ptrCast(try a.allocBytesWithAlignment(.of(T), @sizeOf(T), @returnAddress()));
|
||||
return ptr;
|
||||
}
|
||||
|
||||
|
|
@ -186,7 +186,7 @@ pub fn allocWithOptions(
|
|||
comptime Elem: type,
|
||||
n: usize,
|
||||
/// null means naturally aligned
|
||||
comptime optional_alignment: ?u29,
|
||||
comptime optional_alignment: ?Alignment,
|
||||
comptime optional_sentinel: ?Elem,
|
||||
) Error!AllocWithOptionsPayload(Elem, optional_alignment, optional_sentinel) {
|
||||
return self.allocWithOptionsRetAddr(Elem, n, optional_alignment, optional_sentinel, @returnAddress());
|
||||
|
|
@ -197,7 +197,7 @@ pub fn allocWithOptionsRetAddr(
|
|||
comptime Elem: type,
|
||||
n: usize,
|
||||
/// null means naturally aligned
|
||||
comptime optional_alignment: ?u29,
|
||||
comptime optional_alignment: ?Alignment,
|
||||
comptime optional_sentinel: ?Elem,
|
||||
return_address: usize,
|
||||
) Error!AllocWithOptionsPayload(Elem, optional_alignment, optional_sentinel) {
|
||||
|
|
@ -210,11 +210,11 @@ pub fn allocWithOptionsRetAddr(
|
|||
}
|
||||
}
|
||||
|
||||
fn AllocWithOptionsPayload(comptime Elem: type, comptime alignment: ?u29, comptime sentinel: ?Elem) type {
|
||||
fn AllocWithOptionsPayload(comptime Elem: type, comptime alignment: ?Alignment, comptime sentinel: ?Elem) type {
|
||||
if (sentinel) |s| {
|
||||
return [:s]align(alignment orelse @alignOf(Elem)) Elem;
|
||||
return [:s]align(if (alignment) |a| a.toByteUnits() else @alignOf(Elem)) Elem;
|
||||
} else {
|
||||
return []align(alignment orelse @alignOf(Elem)) Elem;
|
||||
return []align(if (alignment) |a| a.toByteUnits() else @alignOf(Elem)) Elem;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -239,9 +239,9 @@ pub fn alignedAlloc(
|
|||
self: Allocator,
|
||||
comptime T: type,
|
||||
/// null means naturally aligned
|
||||
comptime alignment: ?u29,
|
||||
comptime alignment: ?Alignment,
|
||||
n: usize,
|
||||
) Error![]align(alignment orelse @alignOf(T)) T {
|
||||
) Error![]align(if (alignment) |a| a.toByteUnits() else @alignOf(T)) T {
|
||||
return self.allocAdvancedWithRetAddr(T, alignment, n, @returnAddress());
|
||||
}
|
||||
|
||||
|
|
@ -249,27 +249,38 @@ pub inline fn allocAdvancedWithRetAddr(
|
|||
self: Allocator,
|
||||
comptime T: type,
|
||||
/// null means naturally aligned
|
||||
comptime alignment: ?u29,
|
||||
comptime alignment: ?Alignment,
|
||||
n: usize,
|
||||
return_address: usize,
|
||||
) Error![]align(alignment orelse @alignOf(T)) T {
|
||||
const a = alignment orelse @alignOf(T);
|
||||
const ptr: [*]align(a) T = @ptrCast(try self.allocWithSizeAndAlignment(@sizeOf(T), a, n, return_address));
|
||||
) Error![]align(if (alignment) |a| a.toByteUnits() else @alignOf(T)) T {
|
||||
const a = comptime (alignment orelse Alignment.fromByteUnits(@alignOf(T)));
|
||||
const ptr: [*]align(a.toByteUnits()) T = @ptrCast(try self.allocWithSizeAndAlignment(@sizeOf(T), a, n, return_address));
|
||||
return ptr[0..n];
|
||||
}
|
||||
|
||||
fn allocWithSizeAndAlignment(self: Allocator, comptime size: usize, comptime alignment: u29, n: usize, return_address: usize) Error![*]align(alignment) u8 {
|
||||
fn allocWithSizeAndAlignment(
|
||||
self: Allocator,
|
||||
comptime size: usize,
|
||||
comptime alignment: Alignment,
|
||||
n: usize,
|
||||
return_address: usize,
|
||||
) Error![*]align(alignment.toByteUnits()) u8 {
|
||||
const byte_count = math.mul(usize, size, n) catch return Error.OutOfMemory;
|
||||
return self.allocBytesWithAlignment(alignment, byte_count, return_address);
|
||||
}
|
||||
|
||||
fn allocBytesWithAlignment(self: Allocator, comptime alignment: u29, byte_count: usize, return_address: usize) Error![*]align(alignment) u8 {
|
||||
fn allocBytesWithAlignment(
|
||||
self: Allocator,
|
||||
comptime alignment: Alignment,
|
||||
byte_count: usize,
|
||||
return_address: usize,
|
||||
) Error![*]align(alignment.toByteUnits()) u8 {
|
||||
if (byte_count == 0) {
|
||||
const ptr = comptime std.mem.alignBackward(usize, math.maxInt(usize), alignment);
|
||||
return @as([*]align(alignment) u8, @ptrFromInt(ptr));
|
||||
const ptr = comptime alignment.backward(math.maxInt(usize));
|
||||
return @as([*]align(alignment.toByteUnits()) u8, @ptrFromInt(ptr));
|
||||
}
|
||||
|
||||
const byte_ptr = self.rawAlloc(byte_count, .fromByteUnits(alignment), return_address) orelse return Error.OutOfMemory;
|
||||
const byte_ptr = self.rawAlloc(byte_count, alignment, return_address) orelse return Error.OutOfMemory;
|
||||
@memset(byte_ptr[0..byte_count], undefined);
|
||||
return @alignCast(byte_ptr);
|
||||
}
|
||||
|
|
@ -378,7 +389,7 @@ pub fn reallocAdvanced(
|
|||
const Slice = @typeInfo(@TypeOf(old_mem)).pointer;
|
||||
const T = Slice.child;
|
||||
if (old_mem.len == 0) {
|
||||
return self.allocAdvancedWithRetAddr(T, Slice.alignment, new_n, return_address);
|
||||
return self.allocAdvancedWithRetAddr(T, .fromByteUnits(Slice.alignment), new_n, return_address);
|
||||
}
|
||||
if (new_n == 0) {
|
||||
self.free(old_mem);
|
||||
|
|
|
|||
|
|
@ -144,7 +144,7 @@ test TrailerFlags {
|
|||
.b = true,
|
||||
.c = true,
|
||||
});
|
||||
const slice = try testing.allocator.alignedAlloc(u8, 8, flags.sizeInBytes());
|
||||
const slice = try testing.allocator.alignedAlloc(u8, .@"8", flags.sizeInBytes());
|
||||
defer testing.allocator.free(slice);
|
||||
|
||||
flags.set(slice.ptr, .b, false);
|
||||
|
|
|
|||
|
|
@ -350,11 +350,7 @@ pub fn MultiArrayList(comptime T: type) type {
|
|||
assert(new_len <= self.capacity);
|
||||
assert(new_len <= self.len);
|
||||
|
||||
const other_bytes = gpa.alignedAlloc(
|
||||
u8,
|
||||
@alignOf(Elem),
|
||||
capacityInBytes(new_len),
|
||||
) catch {
|
||||
const other_bytes = gpa.alignedAlloc(u8, .of(Elem), capacityInBytes(new_len)) catch {
|
||||
const self_slice = self.slice();
|
||||
inline for (fields, 0..) |field_info, i| {
|
||||
if (@sizeOf(field_info.type) != 0) {
|
||||
|
|
@ -440,11 +436,7 @@ pub fn MultiArrayList(comptime T: type) type {
|
|||
/// `new_capacity` must be greater or equal to `len`.
|
||||
pub fn setCapacity(self: *Self, gpa: Allocator, new_capacity: usize) !void {
|
||||
assert(new_capacity >= self.len);
|
||||
const new_bytes = try gpa.alignedAlloc(
|
||||
u8,
|
||||
@alignOf(Elem),
|
||||
capacityInBytes(new_capacity),
|
||||
);
|
||||
const new_bytes = try gpa.alignedAlloc(u8, .of(Elem), capacityInBytes(new_capacity));
|
||||
if (self.len == 0) {
|
||||
gpa.free(self.allocatedBytes());
|
||||
self.bytes = new_bytes.ptr;
|
||||
|
|
|
|||
|
|
@ -1256,7 +1256,7 @@ pub fn argsAlloc(allocator: Allocator) ![][:0]u8 {
|
|||
const slice_sizes = slice_list.items;
|
||||
const slice_list_bytes = try math.mul(usize, @sizeOf([]u8), slice_sizes.len);
|
||||
const total_bytes = try math.add(usize, slice_list_bytes, contents_slice.len);
|
||||
const buf = try allocator.alignedAlloc(u8, @alignOf([]u8), total_bytes);
|
||||
const buf = try allocator.alignedAlloc(u8, .of([]u8), total_bytes);
|
||||
errdefer allocator.free(buf);
|
||||
|
||||
const result_slice_list = mem.bytesAsSlice([:0]u8, buf[0..slice_list_bytes]);
|
||||
|
|
|
|||
|
|
@ -543,7 +543,7 @@ pub fn readSourceFileToEndAlloc(gpa: Allocator, input: std.fs.File, size_hint: ?
|
|||
gpa,
|
||||
max_src_size,
|
||||
size_hint,
|
||||
@alignOf(u8),
|
||||
.of(u8),
|
||||
0,
|
||||
) catch |err| switch (err) {
|
||||
error.ConnectionResetByPeer => unreachable,
|
||||
|
|
|
|||
|
|
@ -679,7 +679,7 @@ const Parser = struct {
|
|||
const slice = try self.gpa.allocWithOptions(
|
||||
pointer.child,
|
||||
nodes.len,
|
||||
pointer.alignment,
|
||||
.fromByteUnits(pointer.alignment),
|
||||
pointer.sentinel(),
|
||||
);
|
||||
errdefer self.gpa.free(slice);
|
||||
|
|
|
|||
|
|
@ -255,7 +255,7 @@ pub fn trackZir(
|
|||
const new_map_capacity = map_header.capacity * 2;
|
||||
const new_map_buf = try arena.allocator().alignedAlloc(
|
||||
u8,
|
||||
Map.alignment,
|
||||
.fromByteUnits(Map.alignment),
|
||||
Map.entries_offset + new_map_capacity * @sizeOf(Map.Entry),
|
||||
);
|
||||
const new_map: Map = .{ .entries = @ptrCast(new_map_buf[Map.entries_offset..].ptr) };
|
||||
|
|
@ -350,7 +350,7 @@ pub fn rehashTrackedInsts(
|
|||
defer arena_state.* = arena.state;
|
||||
const new_map_buf = try arena.allocator().alignedAlloc(
|
||||
u8,
|
||||
Map.alignment,
|
||||
.fromByteUnits(Map.alignment),
|
||||
Map.entries_offset + want_capacity * @sizeOf(Map.Entry),
|
||||
);
|
||||
const new_map: Map = .{ .entries = @ptrCast(new_map_buf[Map.entries_offset..].ptr) };
|
||||
|
|
@ -1296,7 +1296,7 @@ const Local = struct {
|
|||
defer mutable.arena.* = arena.state;
|
||||
const buf = try arena.allocator().alignedAlloc(
|
||||
u8,
|
||||
alignment,
|
||||
.fromByteUnits(alignment),
|
||||
bytes_offset + View.capacityInBytes(capacity),
|
||||
);
|
||||
var new_list: ListSelf = .{ .bytes = @ptrCast(buf[bytes_offset..].ptr) };
|
||||
|
|
@ -7547,7 +7547,7 @@ fn getOrPutKeyEnsuringAdditionalCapacity(
|
|||
}
|
||||
const new_map_buf = try arena.allocator().alignedAlloc(
|
||||
u8,
|
||||
Map.alignment,
|
||||
.fromByteUnits(Map.alignment),
|
||||
Map.entries_offset + new_map_capacity * @sizeOf(Map.Entry),
|
||||
);
|
||||
const new_map: Map = .{ .entries = @ptrCast(new_map_buf[Map.entries_offset..].ptr) };
|
||||
|
|
@ -11666,7 +11666,7 @@ pub fn getOrPutTrailingString(
|
|||
const new_map_capacity = map_header.capacity * 2;
|
||||
const new_map_buf = try arena.allocator().alignedAlloc(
|
||||
u8,
|
||||
Map.alignment,
|
||||
.fromByteUnits(Map.alignment),
|
||||
Map.entries_offset + new_map_capacity * @sizeOf(Map.Entry),
|
||||
);
|
||||
const new_map: Map = .{ .entries = @ptrCast(new_map_buf[Map.entries_offset..].ptr) };
|
||||
|
|
@ -12586,7 +12586,7 @@ const GlobalErrorSet = struct {
|
|||
const new_map_capacity = map_header.capacity * 2;
|
||||
const new_map_buf = try arena.allocator().alignedAlloc(
|
||||
u8,
|
||||
Map.alignment,
|
||||
.fromByteUnits(Map.alignment),
|
||||
Map.entries_offset + new_map_capacity * @sizeOf(Map.Entry),
|
||||
);
|
||||
const new_map: Map = .{ .entries = @ptrCast(new_map_buf[Map.entries_offset..].ptr) };
|
||||
|
|
|
|||
|
|
@ -640,7 +640,7 @@ fn loadManifest(f: *Fetch, pkg_root: Cache.Path) RunError!void {
|
|||
try fs.path.join(arena, &.{ pkg_root.sub_path, Manifest.basename }),
|
||||
Manifest.max_bytes,
|
||||
null,
|
||||
1,
|
||||
.@"1",
|
||||
0,
|
||||
) catch |err| switch (err) {
|
||||
error.FileNotFound => return,
|
||||
|
|
|
|||
|
|
@ -7510,7 +7510,7 @@ fn loadManifest(
|
|||
Package.Manifest.basename,
|
||||
Package.Manifest.max_bytes,
|
||||
null,
|
||||
1,
|
||||
.@"1",
|
||||
0,
|
||||
) catch |err| switch (err) {
|
||||
error.FileNotFound => {
|
||||
|
|
|
|||
|
|
@ -358,7 +358,7 @@ fn addFromDirInner(
|
|||
current_file.* = filename;
|
||||
|
||||
const max_file_size = 10 * 1024 * 1024;
|
||||
const src = try iterable_dir.readFileAllocOptions(ctx.arena, filename, max_file_size, null, 1, 0);
|
||||
const src = try iterable_dir.readFileAllocOptions(ctx.arena, filename, max_file_size, null, .@"1", 0);
|
||||
|
||||
// Parse the manifest
|
||||
var manifest = try TestManifest.parse(ctx.arena, src);
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue