mirror of
https://codeberg.org/ziglang/zig.git
synced 2025-12-06 05:44:20 +00:00
Merge pull request #20511 from archbirdplus
runtime page size detection rework GeneralPurposeAllocator to reduce active mapping count Allocator VTable API update
This commit is contained in:
commit
6a6e72fff8
44 changed files with 2989 additions and 2718 deletions
|
|
@ -480,7 +480,7 @@ pub const MemoryMappedList = struct {
|
|||
/// of this ArrayList in accordance with the respective documentation. In
|
||||
/// all cases, "invalidated" means that the memory has been passed to this
|
||||
/// allocator's resize or free function.
|
||||
items: []align(std.mem.page_size) volatile u8,
|
||||
items: []align(std.heap.page_size_min) volatile u8,
|
||||
/// How many bytes this list can hold without allocating additional memory.
|
||||
capacity: usize,
|
||||
|
||||
|
|
|
|||
|
|
@ -41,7 +41,7 @@ const fuzzer_arch_os_abi = "wasm32-freestanding";
|
|||
const fuzzer_cpu_features = "baseline+atomics+bulk_memory+multivalue+mutable_globals+nontrapping_fptoint+reference_types+sign_ext";
|
||||
|
||||
const CoverageMap = struct {
|
||||
mapped_memory: []align(std.mem.page_size) const u8,
|
||||
mapped_memory: []align(std.heap.page_size_min) const u8,
|
||||
coverage: Coverage,
|
||||
source_locations: []Coverage.SourceLocation,
|
||||
/// Elements are indexes into `source_locations` pointing to the unit tests that are being fuzz tested.
|
||||
|
|
|
|||
|
|
@ -769,7 +769,7 @@ const PosixThreadImpl = struct {
|
|||
// Use the same set of parameters used by the libc-less impl.
|
||||
const stack_size = @max(config.stack_size, 16 * 1024);
|
||||
assert(c.pthread_attr_setstacksize(&attr, stack_size) == .SUCCESS);
|
||||
assert(c.pthread_attr_setguardsize(&attr, std.mem.page_size) == .SUCCESS);
|
||||
assert(c.pthread_attr_setguardsize(&attr, std.heap.pageSize()) == .SUCCESS);
|
||||
|
||||
var handle: c.pthread_t = undefined;
|
||||
switch (c.pthread_create(
|
||||
|
|
@ -1155,7 +1155,7 @@ const LinuxThreadImpl = struct {
|
|||
completion: Completion = Completion.init(.running),
|
||||
child_tid: std.atomic.Value(i32) = std.atomic.Value(i32).init(1),
|
||||
parent_tid: i32 = undefined,
|
||||
mapped: []align(std.mem.page_size) u8,
|
||||
mapped: []align(std.heap.page_size_min) u8,
|
||||
|
||||
/// Calls `munmap(mapped.ptr, mapped.len)` then `exit(1)` without touching the stack (which lives in `mapped.ptr`).
|
||||
/// Ported over from musl libc's pthread detached implementation:
|
||||
|
|
@ -1362,7 +1362,7 @@ const LinuxThreadImpl = struct {
|
|||
};
|
||||
|
||||
fn spawn(config: SpawnConfig, comptime f: anytype, args: anytype) !Impl {
|
||||
const page_size = std.mem.page_size;
|
||||
const page_size = std.heap.pageSize();
|
||||
const Args = @TypeOf(args);
|
||||
const Instance = struct {
|
||||
fn_args: Args,
|
||||
|
|
|
|||
|
|
@ -105,21 +105,19 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
|
|||
return result;
|
||||
}
|
||||
|
||||
/// The caller owns the returned memory. Empties this ArrayList,
|
||||
/// Its capacity is cleared, making deinit() safe but unnecessary to call.
|
||||
/// The caller owns the returned memory. Empties this ArrayList.
|
||||
/// Its capacity is cleared, making `deinit` safe but unnecessary to call.
|
||||
pub fn toOwnedSlice(self: *Self) Allocator.Error!Slice {
|
||||
const allocator = self.allocator;
|
||||
|
||||
const old_memory = self.allocatedSlice();
|
||||
if (allocator.resize(old_memory, self.items.len)) {
|
||||
const result = self.items;
|
||||
if (allocator.remap(old_memory, self.items.len)) |new_items| {
|
||||
self.* = init(allocator);
|
||||
return result;
|
||||
return new_items;
|
||||
}
|
||||
|
||||
const new_memory = try allocator.alignedAlloc(T, alignment, self.items.len);
|
||||
@memcpy(new_memory, self.items);
|
||||
@memset(self.items, undefined);
|
||||
self.clearAndFree();
|
||||
return new_memory;
|
||||
}
|
||||
|
|
@ -185,8 +183,9 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
|
|||
// extra capacity.
|
||||
const new_capacity = growCapacity(self.capacity, new_len);
|
||||
const old_memory = self.allocatedSlice();
|
||||
if (self.allocator.resize(old_memory, new_capacity)) {
|
||||
self.capacity = new_capacity;
|
||||
if (self.allocator.remap(old_memory, new_capacity)) |new_memory| {
|
||||
self.items.ptr = new_memory.ptr;
|
||||
self.capacity = new_memory.len;
|
||||
return addManyAtAssumeCapacity(self, index, count);
|
||||
}
|
||||
|
||||
|
|
@ -468,8 +467,9 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
|
|||
// the allocator implementation would pointlessly copy our
|
||||
// extra capacity.
|
||||
const old_memory = self.allocatedSlice();
|
||||
if (self.allocator.resize(old_memory, new_capacity)) {
|
||||
self.capacity = new_capacity;
|
||||
if (self.allocator.remap(old_memory, new_capacity)) |new_memory| {
|
||||
self.items.ptr = new_memory.ptr;
|
||||
self.capacity = new_memory.len;
|
||||
} else {
|
||||
const new_memory = try self.allocator.alignedAlloc(T, alignment, new_capacity);
|
||||
@memcpy(new_memory[0..self.items.len], self.items);
|
||||
|
|
@ -707,15 +707,13 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
|
|||
/// Its capacity is cleared, making deinit() safe but unnecessary to call.
|
||||
pub fn toOwnedSlice(self: *Self, allocator: Allocator) Allocator.Error!Slice {
|
||||
const old_memory = self.allocatedSlice();
|
||||
if (allocator.resize(old_memory, self.items.len)) {
|
||||
const result = self.items;
|
||||
if (allocator.remap(old_memory, self.items.len)) |new_items| {
|
||||
self.* = .empty;
|
||||
return result;
|
||||
return new_items;
|
||||
}
|
||||
|
||||
const new_memory = try allocator.alignedAlloc(T, alignment, self.items.len);
|
||||
@memcpy(new_memory, self.items);
|
||||
@memset(self.items, undefined);
|
||||
self.clearAndFree(allocator);
|
||||
return new_memory;
|
||||
}
|
||||
|
|
@ -1031,9 +1029,9 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
|
|||
}
|
||||
|
||||
const old_memory = self.allocatedSlice();
|
||||
if (allocator.resize(old_memory, new_len)) {
|
||||
self.capacity = new_len;
|
||||
self.items.len = new_len;
|
||||
if (allocator.remap(old_memory, new_len)) |new_items| {
|
||||
self.capacity = new_items.len;
|
||||
self.items = new_items;
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
@ -1099,8 +1097,9 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
|
|||
// the allocator implementation would pointlessly copy our
|
||||
// extra capacity.
|
||||
const old_memory = self.allocatedSlice();
|
||||
if (allocator.resize(old_memory, new_capacity)) {
|
||||
self.capacity = new_capacity;
|
||||
if (allocator.remap(old_memory, new_capacity)) |new_memory| {
|
||||
self.items.ptr = new_memory.ptr;
|
||||
self.capacity = new_memory.len;
|
||||
} else {
|
||||
const new_memory = try allocator.alignedAlloc(T, alignment, new_capacity);
|
||||
@memcpy(new_memory[0..self.items.len], self.items);
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ const builtin = @import("builtin");
|
|||
const c = @This();
|
||||
const maxInt = std.math.maxInt;
|
||||
const assert = std.debug.assert;
|
||||
const page_size = std.mem.page_size;
|
||||
const page_size = std.heap.page_size_min;
|
||||
const native_abi = builtin.abi;
|
||||
const native_arch = builtin.cpu.arch;
|
||||
const native_os = builtin.os.tag;
|
||||
|
|
@ -2227,6 +2227,39 @@ pub const SC = switch (native_os) {
|
|||
.linux => linux.SC,
|
||||
else => void,
|
||||
};
|
||||
|
||||
pub const _SC = switch (native_os) {
|
||||
.driverkit, .ios, .macos, .tvos, .visionos, .watchos => enum(c_int) {
|
||||
PAGESIZE = 29,
|
||||
},
|
||||
.dragonfly => enum(c_int) {
|
||||
PAGESIZE = 47,
|
||||
},
|
||||
.freebsd => enum(c_int) {
|
||||
PAGESIZE = 47,
|
||||
},
|
||||
.fuchsia => enum(c_int) {
|
||||
PAGESIZE = 30,
|
||||
},
|
||||
.haiku => enum(c_int) {
|
||||
PAGESIZE = 27,
|
||||
},
|
||||
.linux => enum(c_int) {
|
||||
PAGESIZE = 30,
|
||||
},
|
||||
.netbsd => enum(c_int) {
|
||||
PAGESIZE = 28,
|
||||
},
|
||||
.openbsd => enum(c_int) {
|
||||
PAGESIZE = 28,
|
||||
},
|
||||
.solaris, .illumos => enum(c_int) {
|
||||
PAGESIZE = 11,
|
||||
NPROCESSORS_ONLN = 15,
|
||||
},
|
||||
else => void,
|
||||
};
|
||||
|
||||
pub const SEEK = switch (native_os) {
|
||||
.linux => linux.SEEK,
|
||||
.emscripten => emscripten.SEEK,
|
||||
|
|
@ -7834,6 +7867,11 @@ pub const MAP = switch (native_os) {
|
|||
else => void,
|
||||
};
|
||||
|
||||
pub const MREMAP = switch (native_os) {
|
||||
.linux => linux.MREMAP,
|
||||
else => void,
|
||||
};
|
||||
|
||||
/// Used by libc to communicate failure. Not actually part of the underlying syscall.
|
||||
pub const MAP_FAILED: *anyopaque = @ptrFromInt(maxInt(usize));
|
||||
|
||||
|
|
@ -9232,7 +9270,7 @@ pub extern "c" fn getpwnam(name: [*:0]const u8) ?*passwd;
|
|||
pub extern "c" fn getpwuid(uid: uid_t) ?*passwd;
|
||||
pub extern "c" fn getrlimit64(resource: rlimit_resource, rlim: *rlimit) c_int;
|
||||
pub extern "c" fn lseek64(fd: fd_t, offset: i64, whence: c_int) i64;
|
||||
pub extern "c" fn mmap64(addr: ?*align(std.mem.page_size) anyopaque, len: usize, prot: c_uint, flags: c_uint, fd: fd_t, offset: i64) *anyopaque;
|
||||
pub extern "c" fn mmap64(addr: ?*align(page_size) anyopaque, len: usize, prot: c_uint, flags: c_uint, fd: fd_t, offset: i64) *anyopaque;
|
||||
pub extern "c" fn open64(path: [*:0]const u8, oflag: O, ...) c_int;
|
||||
pub extern "c" fn openat64(fd: c_int, path: [*:0]const u8, oflag: O, ...) c_int;
|
||||
pub extern "c" fn pread64(fd: fd_t, buf: [*]u8, nbyte: usize, offset: i64) isize;
|
||||
|
|
@ -9324,13 +9362,13 @@ pub extern "c" fn signalfd(fd: fd_t, mask: *const sigset_t, flags: u32) c_int;
|
|||
|
||||
pub extern "c" fn prlimit(pid: pid_t, resource: rlimit_resource, new_limit: *const rlimit, old_limit: *rlimit) c_int;
|
||||
pub extern "c" fn mincore(
|
||||
addr: *align(std.mem.page_size) anyopaque,
|
||||
addr: *align(page_size) anyopaque,
|
||||
length: usize,
|
||||
vec: [*]u8,
|
||||
) c_int;
|
||||
|
||||
pub extern "c" fn madvise(
|
||||
addr: *align(std.mem.page_size) anyopaque,
|
||||
addr: *align(page_size) anyopaque,
|
||||
length: usize,
|
||||
advice: u32,
|
||||
) c_int;
|
||||
|
|
@ -9428,6 +9466,10 @@ pub const posix_memalign = switch (native_os) {
|
|||
.dragonfly, .netbsd, .freebsd, .solaris, .openbsd, .linux, .macos, .ios, .tvos, .watchos, .visionos => private.posix_memalign,
|
||||
else => {},
|
||||
};
|
||||
pub const sysconf = switch (native_os) {
|
||||
.solaris => solaris.sysconf,
|
||||
else => private.sysconf,
|
||||
};
|
||||
|
||||
pub const sf_hdtr = switch (native_os) {
|
||||
.freebsd, .macos, .ios, .tvos, .watchos, .visionos => extern struct {
|
||||
|
|
@ -9471,6 +9513,7 @@ pub extern "c" fn write(fd: fd_t, buf: [*]const u8, nbyte: usize) isize;
|
|||
pub extern "c" fn pwrite(fd: fd_t, buf: [*]const u8, nbyte: usize, offset: off_t) isize;
|
||||
pub extern "c" fn mmap(addr: ?*align(page_size) anyopaque, len: usize, prot: c_uint, flags: MAP, fd: fd_t, offset: off_t) *anyopaque;
|
||||
pub extern "c" fn munmap(addr: *align(page_size) const anyopaque, len: usize) c_int;
|
||||
pub extern "c" fn mremap(addr: ?*align(page_size) const anyopaque, old_len: usize, new_len: usize, flags: MREMAP, ...) *anyopaque;
|
||||
pub extern "c" fn mprotect(addr: *align(page_size) anyopaque, len: usize, prot: c_uint) c_int;
|
||||
pub extern "c" fn link(oldpath: [*:0]const u8, newpath: [*:0]const u8) c_int;
|
||||
pub extern "c" fn linkat(oldfd: fd_t, oldpath: [*:0]const u8, newfd: fd_t, newpath: [*:0]const u8, flags: c_int) c_int;
|
||||
|
|
@ -9823,7 +9866,6 @@ pub const SCM = solaris.SCM;
|
|||
pub const SETCONTEXT = solaris.SETCONTEXT;
|
||||
pub const SETUSTACK = solaris.GETUSTACK;
|
||||
pub const SFD = solaris.SFD;
|
||||
pub const _SC = solaris._SC;
|
||||
pub const cmsghdr = solaris.cmsghdr;
|
||||
pub const ctid_t = solaris.ctid_t;
|
||||
pub const file_obj = solaris.file_obj;
|
||||
|
|
@ -9840,7 +9882,6 @@ pub const priority = solaris.priority;
|
|||
pub const procfs = solaris.procfs;
|
||||
pub const projid_t = solaris.projid_t;
|
||||
pub const signalfd_siginfo = solaris.signalfd_siginfo;
|
||||
pub const sysconf = solaris.sysconf;
|
||||
pub const taskid_t = solaris.taskid_t;
|
||||
pub const zoneid_t = solaris.zoneid_t;
|
||||
|
||||
|
|
@ -9997,6 +10038,7 @@ pub const host_t = darwin.host_t;
|
|||
pub const ipc_space_t = darwin.ipc_space_t;
|
||||
pub const ipc_space_port_t = darwin.ipc_space_port_t;
|
||||
pub const kern_return_t = darwin.kern_return_t;
|
||||
pub const vm_size_t = darwin.vm_size_t;
|
||||
pub const kevent64 = darwin.kevent64;
|
||||
pub const kevent64_s = darwin.kevent64_s;
|
||||
pub const mach_absolute_time = darwin.mach_absolute_time;
|
||||
|
|
@ -10168,6 +10210,7 @@ const private = struct {
|
|||
extern "c" fn socket(domain: c_uint, sock_type: c_uint, protocol: c_uint) c_int;
|
||||
extern "c" fn stat(noalias path: [*:0]const u8, noalias buf: *Stat) c_int;
|
||||
extern "c" fn sigaltstack(ss: ?*stack_t, old_ss: ?*stack_t) c_int;
|
||||
extern "c" fn sysconf(sc: c_int) c_long;
|
||||
|
||||
extern "c" fn pthread_setname_np(thread: pthread_t, name: [*:0]const u8) c_int;
|
||||
extern "c" fn getcontext(ucp: *ucontext_t) c_int;
|
||||
|
|
@ -10202,7 +10245,7 @@ const private = struct {
|
|||
extern "c" fn __getrusage50(who: c_int, usage: *rusage) c_int;
|
||||
extern "c" fn __gettimeofday50(noalias tv: ?*timeval, noalias tz: ?*timezone) c_int;
|
||||
extern "c" fn __libc_thr_yield() c_int;
|
||||
extern "c" fn __msync13(addr: *align(std.mem.page_size) const anyopaque, len: usize, flags: c_int) c_int;
|
||||
extern "c" fn __msync13(addr: *align(page_size) const anyopaque, len: usize, flags: c_int) c_int;
|
||||
extern "c" fn __nanosleep50(rqtp: *const timespec, rmtp: ?*timespec) c_int;
|
||||
extern "c" fn __sigaction14(sig: c_int, noalias act: ?*const Sigaction, noalias oact: ?*Sigaction) c_int;
|
||||
extern "c" fn __sigfillset14(set: ?*sigset_t) void;
|
||||
|
|
|
|||
|
|
@ -154,10 +154,6 @@ pub const AF_SUN = struct {
|
|||
pub const NOPLM = 0x00000004;
|
||||
};
|
||||
|
||||
pub const _SC = struct {
|
||||
pub const NPROCESSORS_ONLN = 15;
|
||||
};
|
||||
|
||||
pub const procfs = struct {
|
||||
pub const misc_header = extern struct {
|
||||
size: u32,
|
||||
|
|
|
|||
|
|
@ -42,7 +42,7 @@ var install_atfork_handler = std.once(struct {
|
|||
}
|
||||
}.do);
|
||||
|
||||
threadlocal var wipe_mem: []align(mem.page_size) u8 = &[_]u8{};
|
||||
threadlocal var wipe_mem: []align(std.heap.page_size_min) u8 = &[_]u8{};
|
||||
|
||||
fn tlsCsprngFill(_: *anyopaque, buffer: []u8) void {
|
||||
if (os_has_arc4random) {
|
||||
|
|
@ -77,7 +77,7 @@ fn tlsCsprngFill(_: *anyopaque, buffer: []u8) void {
|
|||
} else {
|
||||
// Use a static thread-local buffer.
|
||||
const S = struct {
|
||||
threadlocal var buf: Context align(mem.page_size) = .{
|
||||
threadlocal var buf: Context align(std.heap.page_size_min) = .{
|
||||
.init_state = .uninitialized,
|
||||
.rng = undefined,
|
||||
};
|
||||
|
|
@ -85,7 +85,7 @@ fn tlsCsprngFill(_: *anyopaque, buffer: []u8) void {
|
|||
wipe_mem = mem.asBytes(&S.buf);
|
||||
}
|
||||
}
|
||||
const ctx = @as(*Context, @ptrCast(wipe_mem.ptr));
|
||||
const ctx: *Context = @ptrCast(wipe_mem.ptr);
|
||||
|
||||
switch (ctx.init_state) {
|
||||
.uninitialized => {
|
||||
|
|
@ -141,7 +141,7 @@ fn childAtForkHandler() callconv(.c) void {
|
|||
}
|
||||
|
||||
fn fillWithCsprng(buffer: []u8) void {
|
||||
const ctx = @as(*Context, @ptrCast(wipe_mem.ptr));
|
||||
const ctx: *Context = @ptrCast(wipe_mem.ptr);
|
||||
return ctx.rng.fill(buffer);
|
||||
}
|
||||
|
||||
|
|
@ -157,7 +157,7 @@ fn initAndFill(buffer: []u8) void {
|
|||
// the `std.options.cryptoRandomSeed` function is provided.
|
||||
std.options.cryptoRandomSeed(&seed);
|
||||
|
||||
const ctx = @as(*Context, @ptrCast(wipe_mem.ptr));
|
||||
const ctx: *Context = @ptrCast(wipe_mem.ptr);
|
||||
ctx.rng = Rng.init(seed);
|
||||
std.crypto.secureZero(u8, &seed);
|
||||
|
||||
|
|
|
|||
|
|
@ -1134,7 +1134,7 @@ fn printLineFromFileAnyOs(out_stream: anytype, source_location: SourceLocation)
|
|||
defer f.close();
|
||||
// TODO fstat and make sure that the file has the correct size
|
||||
|
||||
var buf: [mem.page_size]u8 = undefined;
|
||||
var buf: [4096]u8 = undefined;
|
||||
var amt_read = try f.read(buf[0..]);
|
||||
const line_start = seek: {
|
||||
var current_line_start: usize = 0;
|
||||
|
|
@ -1237,7 +1237,7 @@ test printLineFromFileAnyOs {
|
|||
|
||||
const overlap = 10;
|
||||
var writer = file.writer();
|
||||
try writer.writeByteNTimes('a', mem.page_size - overlap);
|
||||
try writer.writeByteNTimes('a', std.heap.page_size_min - overlap);
|
||||
try writer.writeByte('\n');
|
||||
try writer.writeByteNTimes('a', overlap);
|
||||
|
||||
|
|
@ -1252,10 +1252,10 @@ test printLineFromFileAnyOs {
|
|||
defer allocator.free(path);
|
||||
|
||||
var writer = file.writer();
|
||||
try writer.writeByteNTimes('a', mem.page_size);
|
||||
try writer.writeByteNTimes('a', std.heap.page_size_max);
|
||||
|
||||
try printLineFromFileAnyOs(output_stream, .{ .file_name = path, .line = 1, .column = 0 });
|
||||
try expectEqualStrings(("a" ** mem.page_size) ++ "\n", output.items);
|
||||
try expectEqualStrings(("a" ** std.heap.page_size_max) ++ "\n", output.items);
|
||||
output.clearRetainingCapacity();
|
||||
}
|
||||
{
|
||||
|
|
@ -1265,18 +1265,18 @@ test printLineFromFileAnyOs {
|
|||
defer allocator.free(path);
|
||||
|
||||
var writer = file.writer();
|
||||
try writer.writeByteNTimes('a', 3 * mem.page_size);
|
||||
try writer.writeByteNTimes('a', 3 * std.heap.page_size_max);
|
||||
|
||||
try expectError(error.EndOfFile, printLineFromFileAnyOs(output_stream, .{ .file_name = path, .line = 2, .column = 0 }));
|
||||
|
||||
try printLineFromFileAnyOs(output_stream, .{ .file_name = path, .line = 1, .column = 0 });
|
||||
try expectEqualStrings(("a" ** (3 * mem.page_size)) ++ "\n", output.items);
|
||||
try expectEqualStrings(("a" ** (3 * std.heap.page_size_max)) ++ "\n", output.items);
|
||||
output.clearRetainingCapacity();
|
||||
|
||||
try writer.writeAll("a\na");
|
||||
|
||||
try printLineFromFileAnyOs(output_stream, .{ .file_name = path, .line = 1, .column = 0 });
|
||||
try expectEqualStrings(("a" ** (3 * mem.page_size)) ++ "a\n", output.items);
|
||||
try expectEqualStrings(("a" ** (3 * std.heap.page_size_max)) ++ "a\n", output.items);
|
||||
output.clearRetainingCapacity();
|
||||
|
||||
try printLineFromFileAnyOs(output_stream, .{ .file_name = path, .line = 2, .column = 0 });
|
||||
|
|
@ -1290,7 +1290,7 @@ test printLineFromFileAnyOs {
|
|||
defer allocator.free(path);
|
||||
|
||||
var writer = file.writer();
|
||||
const real_file_start = 3 * mem.page_size;
|
||||
const real_file_start = 3 * std.heap.page_size_min;
|
||||
try writer.writeByteNTimes('\n', real_file_start);
|
||||
try writer.writeAll("abc\ndef");
|
||||
|
||||
|
|
|
|||
|
|
@ -2120,8 +2120,8 @@ fn pcRelBase(field_ptr: usize, pc_rel_offset: i64) !usize {
|
|||
pub const ElfModule = struct {
|
||||
base_address: usize,
|
||||
dwarf: Dwarf,
|
||||
mapped_memory: []align(std.mem.page_size) const u8,
|
||||
external_mapped_memory: ?[]align(std.mem.page_size) const u8,
|
||||
mapped_memory: []align(std.heap.page_size_min) const u8,
|
||||
external_mapped_memory: ?[]align(std.heap.page_size_min) const u8,
|
||||
|
||||
pub fn deinit(self: *@This(), allocator: Allocator) void {
|
||||
self.dwarf.deinit(allocator);
|
||||
|
|
@ -2167,11 +2167,11 @@ pub const ElfModule = struct {
|
|||
/// sections from an external file.
|
||||
pub fn load(
|
||||
gpa: Allocator,
|
||||
mapped_mem: []align(std.mem.page_size) const u8,
|
||||
mapped_mem: []align(std.heap.page_size_min) const u8,
|
||||
build_id: ?[]const u8,
|
||||
expected_crc: ?u32,
|
||||
parent_sections: *Dwarf.SectionArray,
|
||||
parent_mapped_mem: ?[]align(std.mem.page_size) const u8,
|
||||
parent_mapped_mem: ?[]align(std.heap.page_size_min) const u8,
|
||||
elf_filename: ?[]const u8,
|
||||
) LoadError!Dwarf.ElfModule {
|
||||
if (expected_crc) |crc| if (crc != std.hash.crc.Crc32.hash(mapped_mem)) return error.InvalidDebugInfo;
|
||||
|
|
@ -2423,7 +2423,7 @@ pub const ElfModule = struct {
|
|||
build_id: ?[]const u8,
|
||||
expected_crc: ?u32,
|
||||
parent_sections: *Dwarf.SectionArray,
|
||||
parent_mapped_mem: ?[]align(std.mem.page_size) const u8,
|
||||
parent_mapped_mem: ?[]align(std.heap.page_size_min) const u8,
|
||||
) LoadError!Dwarf.ElfModule {
|
||||
const elf_file = elf_file_path.root_dir.handle.openFile(elf_file_path.sub_path, .{}) catch |err| switch (err) {
|
||||
error.FileNotFound => return missing(),
|
||||
|
|
|
|||
|
|
@ -10,7 +10,6 @@ const std = @import("../std.zig");
|
|||
const Allocator = std.mem.Allocator;
|
||||
const Path = std.Build.Cache.Path;
|
||||
const Dwarf = std.debug.Dwarf;
|
||||
const page_size = std.mem.page_size;
|
||||
const assert = std.debug.assert;
|
||||
const Coverage = std.debug.Coverage;
|
||||
const SourceLocation = std.debug.Coverage.SourceLocation;
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ const native_os = builtin.os.tag;
|
|||
const std = @import("../std.zig");
|
||||
const posix = std.posix;
|
||||
const File = std.fs.File;
|
||||
const page_size = std.mem.page_size;
|
||||
const page_size_min = std.heap.page_size_min;
|
||||
|
||||
const MemoryAccessor = @This();
|
||||
|
||||
|
|
@ -93,9 +93,10 @@ pub fn isValidMemory(address: usize) bool {
|
|||
// We are unable to determine validity of memory for freestanding targets
|
||||
if (native_os == .freestanding or native_os == .other or native_os == .uefi) return true;
|
||||
|
||||
const aligned_address = address & ~@as(usize, @intCast((page_size - 1)));
|
||||
const page_size = std.heap.pageSize();
|
||||
const aligned_address = address & ~(page_size - 1);
|
||||
if (aligned_address == 0) return false;
|
||||
const aligned_memory = @as([*]align(page_size) u8, @ptrFromInt(aligned_address))[0..page_size];
|
||||
const aligned_memory = @as([*]align(page_size_min) u8, @ptrFromInt(aligned_address))[0..page_size];
|
||||
|
||||
if (native_os == .windows) {
|
||||
const windows = std.os.windows;
|
||||
|
|
@ -104,7 +105,7 @@ pub fn isValidMemory(address: usize) bool {
|
|||
|
||||
// The only error this function can throw is ERROR_INVALID_PARAMETER.
|
||||
// supply an address that invalid i'll be thrown.
|
||||
const rc = windows.VirtualQuery(aligned_memory, &memory_info, aligned_memory.len) catch {
|
||||
const rc = windows.VirtualQuery(@ptrCast(aligned_memory), &memory_info, aligned_memory.len) catch {
|
||||
return false;
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -504,7 +504,7 @@ pub const Module = switch (native_os) {
|
|||
.macos, .ios, .watchos, .tvos, .visionos => struct {
|
||||
base_address: usize,
|
||||
vmaddr_slide: usize,
|
||||
mapped_memory: []align(mem.page_size) const u8,
|
||||
mapped_memory: []align(std.heap.page_size_min) const u8,
|
||||
symbols: []const MachoSymbol,
|
||||
strings: [:0]const u8,
|
||||
ofiles: OFileTable,
|
||||
|
|
@ -1046,7 +1046,7 @@ pub fn readElfDebugInfo(
|
|||
build_id: ?[]const u8,
|
||||
expected_crc: ?u32,
|
||||
parent_sections: *Dwarf.SectionArray,
|
||||
parent_mapped_mem: ?[]align(mem.page_size) const u8,
|
||||
parent_mapped_mem: ?[]align(std.heap.page_size_min) const u8,
|
||||
) !Dwarf.ElfModule {
|
||||
nosuspend {
|
||||
const elf_file = (if (elf_filename) |filename| blk: {
|
||||
|
|
@ -1088,7 +1088,7 @@ const MachoSymbol = struct {
|
|||
|
||||
/// Takes ownership of file, even on error.
|
||||
/// TODO it's weird to take ownership even on error, rework this code.
|
||||
fn mapWholeFile(file: File) ![]align(mem.page_size) const u8 {
|
||||
fn mapWholeFile(file: File) ![]align(std.heap.page_size_min) const u8 {
|
||||
nosuspend {
|
||||
defer file.close();
|
||||
|
||||
|
|
|
|||
|
|
@ -143,7 +143,7 @@ pub const ElfDynLib = struct {
|
|||
hashtab: [*]posix.Elf_Symndx,
|
||||
versym: ?[*]elf.Versym,
|
||||
verdef: ?*elf.Verdef,
|
||||
memory: []align(mem.page_size) u8,
|
||||
memory: []align(std.heap.page_size_min) u8,
|
||||
|
||||
pub const Error = ElfDynLibError;
|
||||
|
||||
|
|
@ -219,11 +219,13 @@ pub const ElfDynLib = struct {
|
|||
const stat = try file.stat();
|
||||
const size = std.math.cast(usize, stat.size) orelse return error.FileTooBig;
|
||||
|
||||
const page_size = std.heap.pageSize();
|
||||
|
||||
// This one is to read the ELF info. We do more mmapping later
|
||||
// corresponding to the actual LOAD sections.
|
||||
const file_bytes = try posix.mmap(
|
||||
null,
|
||||
mem.alignForward(usize, size, mem.page_size),
|
||||
mem.alignForward(usize, size, page_size),
|
||||
posix.PROT.READ,
|
||||
.{ .TYPE = .PRIVATE },
|
||||
fd,
|
||||
|
|
@ -284,10 +286,10 @@ pub const ElfDynLib = struct {
|
|||
elf.PT_LOAD => {
|
||||
// The VirtAddr may not be page-aligned; in such case there will be
|
||||
// extra nonsense mapped before/after the VirtAddr,MemSiz
|
||||
const aligned_addr = (base + ph.p_vaddr) & ~(@as(usize, mem.page_size) - 1);
|
||||
const aligned_addr = (base + ph.p_vaddr) & ~(@as(usize, page_size) - 1);
|
||||
const extra_bytes = (base + ph.p_vaddr) - aligned_addr;
|
||||
const extended_memsz = mem.alignForward(usize, ph.p_memsz + extra_bytes, mem.page_size);
|
||||
const ptr = @as([*]align(mem.page_size) u8, @ptrFromInt(aligned_addr));
|
||||
const extended_memsz = mem.alignForward(usize, ph.p_memsz + extra_bytes, page_size);
|
||||
const ptr = @as([*]align(std.heap.page_size_min) u8, @ptrFromInt(aligned_addr));
|
||||
const prot = elfToMmapProt(ph.p_flags);
|
||||
if ((ph.p_flags & elf.PF_W) == 0) {
|
||||
// If it does not need write access, it can be mapped from the fd.
|
||||
|
|
|
|||
|
|
@ -91,7 +91,7 @@ pub fn LinearFifo(
|
|||
mem.copyForwards(T, self.buf[0..self.count], self.buf[self.head..][0..self.count]);
|
||||
self.head = 0;
|
||||
} else {
|
||||
var tmp: [mem.page_size / 2 / @sizeOf(T)]T = undefined;
|
||||
var tmp: [4096 / 2 / @sizeOf(T)]T = undefined;
|
||||
|
||||
while (self.head != 0) {
|
||||
const n = @min(self.head, tmp.len);
|
||||
|
|
|
|||
|
|
@ -413,10 +413,15 @@ pub fn HashMap(
|
|||
/// If there is an `Entry` with a matching key, it is deleted from
|
||||
/// the hash map, and this function returns true. Otherwise this
|
||||
/// function returns false.
|
||||
///
|
||||
/// TODO: answer the question in these doc comments, does this
|
||||
/// increase the unused capacity by one?
|
||||
pub fn remove(self: *Self, key: K) bool {
|
||||
return self.unmanaged.removeContext(key, self.ctx);
|
||||
}
|
||||
|
||||
/// TODO: answer the question in these doc comments, does this
|
||||
/// increase the unused capacity by one?
|
||||
pub fn removeAdapted(self: *Self, key: anytype, ctx: anytype) bool {
|
||||
return self.unmanaged.removeAdapted(key, ctx);
|
||||
}
|
||||
|
|
@ -424,6 +429,9 @@ pub fn HashMap(
|
|||
/// Delete the entry with key pointed to by key_ptr from the hash map.
|
||||
/// key_ptr is assumed to be a valid pointer to a key that is present
|
||||
/// in the hash map.
|
||||
///
|
||||
/// TODO: answer the question in these doc comments, does this
|
||||
/// increase the unused capacity by one?
|
||||
pub fn removeByPtr(self: *Self, key_ptr: *K) void {
|
||||
self.unmanaged.removeByPtr(key_ptr);
|
||||
}
|
||||
|
|
@ -1225,14 +1233,23 @@ pub fn HashMapUnmanaged(
|
|||
/// If there is an `Entry` with a matching key, it is deleted from
|
||||
/// the hash map, and this function returns true. Otherwise this
|
||||
/// function returns false.
|
||||
///
|
||||
/// TODO: answer the question in these doc comments, does this
|
||||
/// increase the unused capacity by one?
|
||||
pub fn remove(self: *Self, key: K) bool {
|
||||
if (@sizeOf(Context) != 0)
|
||||
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call removeContext instead.");
|
||||
return self.removeContext(key, undefined);
|
||||
}
|
||||
|
||||
/// TODO: answer the question in these doc comments, does this
|
||||
/// increase the unused capacity by one?
|
||||
pub fn removeContext(self: *Self, key: K, ctx: Context) bool {
|
||||
return self.removeAdapted(key, ctx);
|
||||
}
|
||||
|
||||
/// TODO: answer the question in these doc comments, does this
|
||||
/// increase the unused capacity by one?
|
||||
pub fn removeAdapted(self: *Self, key: anytype, ctx: anytype) bool {
|
||||
if (self.getIndex(key, ctx)) |idx| {
|
||||
self.removeByIndex(idx);
|
||||
|
|
@ -1245,6 +1262,9 @@ pub fn HashMapUnmanaged(
|
|||
/// Delete the entry with key pointed to by key_ptr from the hash map.
|
||||
/// key_ptr is assumed to be a valid pointer to a key that is present
|
||||
/// in the hash map.
|
||||
///
|
||||
/// TODO: answer the question in these doc comments, does this
|
||||
/// increase the unused capacity by one?
|
||||
pub fn removeByPtr(self: *Self, key_ptr: *K) void {
|
||||
// TODO: replace with pointer subtraction once supported by zig
|
||||
// if @sizeOf(K) == 0 then there is at most one item in the hash
|
||||
|
|
|
|||
921
lib/std/heap.zig
921
lib/std/heap.zig
File diff suppressed because it is too large
Load diff
230
lib/std/heap/FixedBufferAllocator.zig
Normal file
230
lib/std/heap/FixedBufferAllocator.zig
Normal file
|
|
@ -0,0 +1,230 @@
|
|||
const std = @import("../std.zig");
|
||||
const Allocator = std.mem.Allocator;
|
||||
const assert = std.debug.assert;
|
||||
const mem = std.mem;
|
||||
|
||||
const FixedBufferAllocator = @This();
|
||||
|
||||
end_index: usize,
|
||||
buffer: []u8,
|
||||
|
||||
pub fn init(buffer: []u8) FixedBufferAllocator {
|
||||
return .{
|
||||
.buffer = buffer,
|
||||
.end_index = 0,
|
||||
};
|
||||
}
|
||||
|
||||
/// Using this at the same time as the interface returned by `threadSafeAllocator` is not thread safe.
|
||||
pub fn allocator(self: *FixedBufferAllocator) Allocator {
|
||||
return .{
|
||||
.ptr = self,
|
||||
.vtable = &.{
|
||||
.alloc = alloc,
|
||||
.resize = resize,
|
||||
.remap = remap,
|
||||
.free = free,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
/// Provides a lock free thread safe `Allocator` interface to the underlying `FixedBufferAllocator`
|
||||
///
|
||||
/// Using this at the same time as the interface returned by `allocator` is not thread safe.
|
||||
pub fn threadSafeAllocator(self: *FixedBufferAllocator) Allocator {
|
||||
return .{
|
||||
.ptr = self,
|
||||
.vtable = &.{
|
||||
.alloc = threadSafeAlloc,
|
||||
.resize = Allocator.noResize,
|
||||
.remap = Allocator.noRemap,
|
||||
.free = Allocator.noFree,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
pub fn ownsPtr(self: *FixedBufferAllocator, ptr: [*]u8) bool {
|
||||
return sliceContainsPtr(self.buffer, ptr);
|
||||
}
|
||||
|
||||
pub fn ownsSlice(self: *FixedBufferAllocator, slice: []u8) bool {
|
||||
return sliceContainsSlice(self.buffer, slice);
|
||||
}
|
||||
|
||||
/// This has false negatives when the last allocation had an
|
||||
/// adjusted_index. In such case we won't be able to determine what the
|
||||
/// last allocation was because the alignForward operation done in alloc is
|
||||
/// not reversible.
|
||||
pub fn isLastAllocation(self: *FixedBufferAllocator, buf: []u8) bool {
|
||||
return buf.ptr + buf.len == self.buffer.ptr + self.end_index;
|
||||
}
|
||||
|
||||
pub fn alloc(ctx: *anyopaque, n: usize, alignment: mem.Alignment, ra: usize) ?[*]u8 {
|
||||
const self: *FixedBufferAllocator = @ptrCast(@alignCast(ctx));
|
||||
_ = ra;
|
||||
const ptr_align = alignment.toByteUnits();
|
||||
const adjust_off = mem.alignPointerOffset(self.buffer.ptr + self.end_index, ptr_align) orelse return null;
|
||||
const adjusted_index = self.end_index + adjust_off;
|
||||
const new_end_index = adjusted_index + n;
|
||||
if (new_end_index > self.buffer.len) return null;
|
||||
self.end_index = new_end_index;
|
||||
return self.buffer.ptr + adjusted_index;
|
||||
}
|
||||
|
||||
pub fn resize(
|
||||
ctx: *anyopaque,
|
||||
buf: []u8,
|
||||
alignment: mem.Alignment,
|
||||
new_size: usize,
|
||||
return_address: usize,
|
||||
) bool {
|
||||
const self: *FixedBufferAllocator = @ptrCast(@alignCast(ctx));
|
||||
_ = alignment;
|
||||
_ = return_address;
|
||||
assert(@inComptime() or self.ownsSlice(buf));
|
||||
|
||||
if (!self.isLastAllocation(buf)) {
|
||||
if (new_size > buf.len) return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
if (new_size <= buf.len) {
|
||||
const sub = buf.len - new_size;
|
||||
self.end_index -= sub;
|
||||
return true;
|
||||
}
|
||||
|
||||
const add = new_size - buf.len;
|
||||
if (add + self.end_index > self.buffer.len) return false;
|
||||
|
||||
self.end_index += add;
|
||||
return true;
|
||||
}
|
||||
|
||||
pub fn remap(
|
||||
context: *anyopaque,
|
||||
memory: []u8,
|
||||
alignment: mem.Alignment,
|
||||
new_len: usize,
|
||||
return_address: usize,
|
||||
) ?[*]u8 {
|
||||
return if (resize(context, memory, alignment, new_len, return_address)) memory.ptr else null;
|
||||
}
|
||||
|
||||
pub fn free(
|
||||
ctx: *anyopaque,
|
||||
buf: []u8,
|
||||
alignment: mem.Alignment,
|
||||
return_address: usize,
|
||||
) void {
|
||||
const self: *FixedBufferAllocator = @ptrCast(@alignCast(ctx));
|
||||
_ = alignment;
|
||||
_ = return_address;
|
||||
assert(@inComptime() or self.ownsSlice(buf));
|
||||
|
||||
if (self.isLastAllocation(buf)) {
|
||||
self.end_index -= buf.len;
|
||||
}
|
||||
}
|
||||
|
||||
fn threadSafeAlloc(ctx: *anyopaque, n: usize, alignment: mem.Alignment, ra: usize) ?[*]u8 {
|
||||
const self: *FixedBufferAllocator = @ptrCast(@alignCast(ctx));
|
||||
_ = ra;
|
||||
const ptr_align = alignment.toByteUnits();
|
||||
var end_index = @atomicLoad(usize, &self.end_index, .seq_cst);
|
||||
while (true) {
|
||||
const adjust_off = mem.alignPointerOffset(self.buffer.ptr + end_index, ptr_align) orelse return null;
|
||||
const adjusted_index = end_index + adjust_off;
|
||||
const new_end_index = adjusted_index + n;
|
||||
if (new_end_index > self.buffer.len) return null;
|
||||
end_index = @cmpxchgWeak(usize, &self.end_index, end_index, new_end_index, .seq_cst, .seq_cst) orelse
|
||||
return self.buffer[adjusted_index..new_end_index].ptr;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn reset(self: *FixedBufferAllocator) void {
|
||||
self.end_index = 0;
|
||||
}
|
||||
|
||||
fn sliceContainsPtr(container: []u8, ptr: [*]u8) bool {
|
||||
return @intFromPtr(ptr) >= @intFromPtr(container.ptr) and
|
||||
@intFromPtr(ptr) < (@intFromPtr(container.ptr) + container.len);
|
||||
}
|
||||
|
||||
fn sliceContainsSlice(container: []u8, slice: []u8) bool {
|
||||
return @intFromPtr(slice.ptr) >= @intFromPtr(container.ptr) and
|
||||
(@intFromPtr(slice.ptr) + slice.len) <= (@intFromPtr(container.ptr) + container.len);
|
||||
}
|
||||
|
||||
var test_fixed_buffer_allocator_memory: [800000 * @sizeOf(u64)]u8 = undefined;
|
||||
|
||||
test FixedBufferAllocator {
|
||||
var fixed_buffer_allocator = mem.validationWrap(FixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..]));
|
||||
const a = fixed_buffer_allocator.allocator();
|
||||
|
||||
try std.heap.testAllocator(a);
|
||||
try std.heap.testAllocatorAligned(a);
|
||||
try std.heap.testAllocatorLargeAlignment(a);
|
||||
try std.heap.testAllocatorAlignedShrink(a);
|
||||
}
|
||||
|
||||
test reset {
|
||||
var buf: [8]u8 align(@alignOf(u64)) = undefined;
|
||||
var fba = FixedBufferAllocator.init(buf[0..]);
|
||||
const a = fba.allocator();
|
||||
|
||||
const X = 0xeeeeeeeeeeeeeeee;
|
||||
const Y = 0xffffffffffffffff;
|
||||
|
||||
const x = try a.create(u64);
|
||||
x.* = X;
|
||||
try std.testing.expectError(error.OutOfMemory, a.create(u64));
|
||||
|
||||
fba.reset();
|
||||
const y = try a.create(u64);
|
||||
y.* = Y;
|
||||
|
||||
// we expect Y to have overwritten X.
|
||||
try std.testing.expect(x.* == y.*);
|
||||
try std.testing.expect(y.* == Y);
|
||||
}
|
||||
|
||||
test "reuse memory on realloc" {
|
||||
var small_fixed_buffer: [10]u8 = undefined;
|
||||
// check if we re-use the memory
|
||||
{
|
||||
var fixed_buffer_allocator = FixedBufferAllocator.init(small_fixed_buffer[0..]);
|
||||
const a = fixed_buffer_allocator.allocator();
|
||||
|
||||
const slice0 = try a.alloc(u8, 5);
|
||||
try std.testing.expect(slice0.len == 5);
|
||||
const slice1 = try a.realloc(slice0, 10);
|
||||
try std.testing.expect(slice1.ptr == slice0.ptr);
|
||||
try std.testing.expect(slice1.len == 10);
|
||||
try std.testing.expectError(error.OutOfMemory, a.realloc(slice1, 11));
|
||||
}
|
||||
// check that we don't re-use the memory if it's not the most recent block
|
||||
{
|
||||
var fixed_buffer_allocator = FixedBufferAllocator.init(small_fixed_buffer[0..]);
|
||||
const a = fixed_buffer_allocator.allocator();
|
||||
|
||||
var slice0 = try a.alloc(u8, 2);
|
||||
slice0[0] = 1;
|
||||
slice0[1] = 2;
|
||||
const slice1 = try a.alloc(u8, 2);
|
||||
const slice2 = try a.realloc(slice0, 4);
|
||||
try std.testing.expect(slice0.ptr != slice2.ptr);
|
||||
try std.testing.expect(slice1.ptr != slice2.ptr);
|
||||
try std.testing.expect(slice2[0] == 1);
|
||||
try std.testing.expect(slice2[1] == 2);
|
||||
}
|
||||
}
|
||||
|
||||
test "thread safe version" {
|
||||
var fixed_buffer_allocator = FixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..]);
|
||||
|
||||
try std.heap.testAllocator(fixed_buffer_allocator.threadSafeAllocator());
|
||||
try std.heap.testAllocatorAligned(fixed_buffer_allocator.threadSafeAllocator());
|
||||
try std.heap.testAllocatorLargeAlignment(fixed_buffer_allocator.threadSafeAllocator());
|
||||
try std.heap.testAllocatorAlignedShrink(fixed_buffer_allocator.threadSafeAllocator());
|
||||
}
|
||||
|
|
@ -7,107 +7,183 @@ const assert = std.debug.assert;
|
|||
const native_os = builtin.os.tag;
|
||||
const windows = std.os.windows;
|
||||
const posix = std.posix;
|
||||
const page_size_min = std.heap.page_size_min;
|
||||
|
||||
pub const vtable = Allocator.VTable{
|
||||
pub const vtable: Allocator.VTable = .{
|
||||
.alloc = alloc,
|
||||
.resize = resize,
|
||||
.remap = remap,
|
||||
.free = free,
|
||||
};
|
||||
|
||||
fn alloc(_: *anyopaque, n: usize, log2_align: u8, ra: usize) ?[*]u8 {
|
||||
fn alloc(context: *anyopaque, n: usize, alignment: mem.Alignment, ra: usize) ?[*]u8 {
|
||||
_ = context;
|
||||
_ = ra;
|
||||
_ = log2_align;
|
||||
assert(n > 0);
|
||||
if (n > maxInt(usize) - (mem.page_size - 1)) return null;
|
||||
|
||||
const page_size = std.heap.pageSize();
|
||||
if (n >= maxInt(usize) - page_size) return null;
|
||||
const alignment_bytes = alignment.toByteUnits();
|
||||
|
||||
if (native_os == .windows) {
|
||||
// According to official documentation, VirtualAlloc aligns to page
|
||||
// boundary, however, empirically it reserves pages on a 64K boundary.
|
||||
// Since it is very likely the requested alignment will be honored,
|
||||
// this logic first tries a call with exactly the size requested,
|
||||
// before falling back to the loop below.
|
||||
// https://devblogs.microsoft.com/oldnewthing/?p=42223
|
||||
const addr = windows.VirtualAlloc(
|
||||
null,
|
||||
|
||||
// VirtualAlloc will round the length to a multiple of page size.
|
||||
// VirtualAlloc docs: If the lpAddress parameter is NULL, this value is rounded up to the next page boundary
|
||||
// "If the lpAddress parameter is NULL, this value is rounded up to
|
||||
// the next page boundary".
|
||||
n,
|
||||
|
||||
windows.MEM_COMMIT | windows.MEM_RESERVE,
|
||||
windows.PAGE_READWRITE,
|
||||
) catch return null;
|
||||
return @ptrCast(addr);
|
||||
|
||||
if (mem.isAligned(@intFromPtr(addr), alignment_bytes))
|
||||
return @ptrCast(addr);
|
||||
|
||||
// Fallback: reserve a range of memory large enough to find a
|
||||
// sufficiently aligned address, then free the entire range and
|
||||
// immediately allocate the desired subset. Another thread may have won
|
||||
// the race to map the target range, in which case a retry is needed.
|
||||
windows.VirtualFree(addr, 0, windows.MEM_RELEASE);
|
||||
|
||||
const overalloc_len = n + alignment_bytes - page_size;
|
||||
const aligned_len = mem.alignForward(usize, n, page_size);
|
||||
|
||||
while (true) {
|
||||
const reserved_addr = windows.VirtualAlloc(
|
||||
null,
|
||||
overalloc_len,
|
||||
windows.MEM_RESERVE,
|
||||
windows.PAGE_NOACCESS,
|
||||
) catch return null;
|
||||
const aligned_addr = mem.alignForward(usize, @intFromPtr(reserved_addr), alignment_bytes);
|
||||
windows.VirtualFree(reserved_addr, 0, windows.MEM_RELEASE);
|
||||
const ptr = windows.VirtualAlloc(
|
||||
@ptrFromInt(aligned_addr),
|
||||
aligned_len,
|
||||
windows.MEM_COMMIT | windows.MEM_RESERVE,
|
||||
windows.PAGE_READWRITE,
|
||||
) catch continue;
|
||||
return @ptrCast(ptr);
|
||||
}
|
||||
}
|
||||
|
||||
const aligned_len = mem.alignForward(usize, n, mem.page_size);
|
||||
const aligned_len = mem.alignForward(usize, n, page_size);
|
||||
const max_drop_len = alignment_bytes - @min(alignment_bytes, page_size);
|
||||
const overalloc_len = if (max_drop_len <= aligned_len - n)
|
||||
aligned_len
|
||||
else
|
||||
mem.alignForward(usize, aligned_len + max_drop_len, page_size);
|
||||
const hint = @atomicLoad(@TypeOf(std.heap.next_mmap_addr_hint), &std.heap.next_mmap_addr_hint, .unordered);
|
||||
const slice = posix.mmap(
|
||||
hint,
|
||||
aligned_len,
|
||||
overalloc_len,
|
||||
posix.PROT.READ | posix.PROT.WRITE,
|
||||
.{ .TYPE = .PRIVATE, .ANONYMOUS = true },
|
||||
-1,
|
||||
0,
|
||||
) catch return null;
|
||||
assert(mem.isAligned(@intFromPtr(slice.ptr), mem.page_size));
|
||||
const new_hint: [*]align(mem.page_size) u8 = @alignCast(slice.ptr + aligned_len);
|
||||
const result_ptr = mem.alignPointer(slice.ptr, alignment_bytes) orelse return null;
|
||||
// Unmap the extra bytes that were only requested in order to guarantee
|
||||
// that the range of memory we were provided had a proper alignment in it
|
||||
// somewhere. The extra bytes could be at the beginning, or end, or both.
|
||||
const drop_len = result_ptr - slice.ptr;
|
||||
if (drop_len != 0) posix.munmap(slice[0..drop_len]);
|
||||
const remaining_len = overalloc_len - drop_len;
|
||||
if (remaining_len > aligned_len) posix.munmap(@alignCast(result_ptr[aligned_len..remaining_len]));
|
||||
const new_hint: [*]align(page_size_min) u8 = @alignCast(result_ptr + aligned_len);
|
||||
_ = @cmpxchgStrong(@TypeOf(std.heap.next_mmap_addr_hint), &std.heap.next_mmap_addr_hint, hint, new_hint, .monotonic, .monotonic);
|
||||
return slice.ptr;
|
||||
return result_ptr;
|
||||
}
|
||||
|
||||
fn resize(
|
||||
_: *anyopaque,
|
||||
buf_unaligned: []u8,
|
||||
log2_buf_align: u8,
|
||||
new_size: usize,
|
||||
context: *anyopaque,
|
||||
memory: []u8,
|
||||
alignment: mem.Alignment,
|
||||
new_len: usize,
|
||||
return_address: usize,
|
||||
) bool {
|
||||
_ = log2_buf_align;
|
||||
_ = context;
|
||||
_ = alignment;
|
||||
_ = return_address;
|
||||
const new_size_aligned = mem.alignForward(usize, new_size, mem.page_size);
|
||||
|
||||
if (native_os == .windows) {
|
||||
if (new_size <= buf_unaligned.len) {
|
||||
const base_addr = @intFromPtr(buf_unaligned.ptr);
|
||||
const old_addr_end = base_addr + buf_unaligned.len;
|
||||
const new_addr_end = mem.alignForward(usize, base_addr + new_size, mem.page_size);
|
||||
if (old_addr_end > new_addr_end) {
|
||||
// For shrinking that is not releasing, we will only
|
||||
// decommit the pages not needed anymore.
|
||||
windows.VirtualFree(
|
||||
@as(*anyopaque, @ptrFromInt(new_addr_end)),
|
||||
old_addr_end - new_addr_end,
|
||||
windows.MEM_DECOMMIT,
|
||||
);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
const old_size_aligned = mem.alignForward(usize, buf_unaligned.len, mem.page_size);
|
||||
if (new_size_aligned <= old_size_aligned) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
const buf_aligned_len = mem.alignForward(usize, buf_unaligned.len, mem.page_size);
|
||||
if (new_size_aligned == buf_aligned_len)
|
||||
return true;
|
||||
|
||||
if (new_size_aligned < buf_aligned_len) {
|
||||
const ptr = buf_unaligned.ptr + new_size_aligned;
|
||||
// TODO: if the next_mmap_addr_hint is within the unmapped range, update it
|
||||
posix.munmap(@alignCast(ptr[0 .. buf_aligned_len - new_size_aligned]));
|
||||
return true;
|
||||
}
|
||||
|
||||
// TODO: call mremap
|
||||
// TODO: if the next_mmap_addr_hint is within the remapped range, update it
|
||||
return false;
|
||||
return realloc(memory, new_len, false) != null;
|
||||
}
|
||||
|
||||
fn free(_: *anyopaque, slice: []u8, log2_buf_align: u8, return_address: usize) void {
|
||||
_ = log2_buf_align;
|
||||
pub fn remap(
|
||||
context: *anyopaque,
|
||||
memory: []u8,
|
||||
alignment: mem.Alignment,
|
||||
new_len: usize,
|
||||
return_address: usize,
|
||||
) ?[*]u8 {
|
||||
_ = context;
|
||||
_ = alignment;
|
||||
_ = return_address;
|
||||
return realloc(memory, new_len, true);
|
||||
}
|
||||
|
||||
fn free(context: *anyopaque, slice: []u8, alignment: mem.Alignment, return_address: usize) void {
|
||||
_ = context;
|
||||
_ = alignment;
|
||||
_ = return_address;
|
||||
|
||||
if (native_os == .windows) {
|
||||
windows.VirtualFree(slice.ptr, 0, windows.MEM_RELEASE);
|
||||
} else {
|
||||
const buf_aligned_len = mem.alignForward(usize, slice.len, mem.page_size);
|
||||
const buf_aligned_len = mem.alignForward(usize, slice.len, std.heap.pageSize());
|
||||
posix.munmap(@alignCast(slice.ptr[0..buf_aligned_len]));
|
||||
}
|
||||
}
|
||||
|
||||
fn realloc(uncasted_memory: []u8, new_len: usize, may_move: bool) ?[*]u8 {
|
||||
const memory: []align(std.heap.page_size_min) u8 = @alignCast(uncasted_memory);
|
||||
const page_size = std.heap.pageSize();
|
||||
const new_size_aligned = mem.alignForward(usize, new_len, page_size);
|
||||
|
||||
if (native_os == .windows) {
|
||||
if (new_len <= memory.len) {
|
||||
const base_addr = @intFromPtr(memory.ptr);
|
||||
const old_addr_end = base_addr + memory.len;
|
||||
const new_addr_end = mem.alignForward(usize, base_addr + new_len, page_size);
|
||||
if (old_addr_end > new_addr_end) {
|
||||
// For shrinking that is not releasing, we will only decommit
|
||||
// the pages not needed anymore.
|
||||
windows.VirtualFree(
|
||||
@ptrFromInt(new_addr_end),
|
||||
old_addr_end - new_addr_end,
|
||||
windows.MEM_DECOMMIT,
|
||||
);
|
||||
}
|
||||
return memory.ptr;
|
||||
}
|
||||
const old_size_aligned = mem.alignForward(usize, memory.len, page_size);
|
||||
if (new_size_aligned <= old_size_aligned) {
|
||||
return memory.ptr;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
const page_aligned_len = mem.alignForward(usize, memory.len, page_size);
|
||||
if (new_size_aligned == page_aligned_len)
|
||||
return memory.ptr;
|
||||
|
||||
if (posix.MREMAP != void) {
|
||||
// TODO: if the next_mmap_addr_hint is within the remapped range, update it
|
||||
const new_memory = posix.mremap(memory.ptr, memory.len, new_len, .{ .MAYMOVE = may_move }, null) catch return null;
|
||||
return new_memory.ptr;
|
||||
}
|
||||
|
||||
if (new_size_aligned < page_aligned_len) {
|
||||
const ptr = memory.ptr + new_size_aligned;
|
||||
// TODO: if the next_mmap_addr_hint is within the unmapped range, update it
|
||||
posix.munmap(@alignCast(ptr[0 .. page_aligned_len - new_size_aligned]));
|
||||
return memory.ptr;
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -9,35 +9,45 @@ pub fn allocator(self: *ThreadSafeAllocator) Allocator {
|
|||
.vtable = &.{
|
||||
.alloc = alloc,
|
||||
.resize = resize,
|
||||
.remap = remap,
|
||||
.free = free,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
fn alloc(ctx: *anyopaque, n: usize, log2_ptr_align: u8, ra: usize) ?[*]u8 {
|
||||
fn alloc(ctx: *anyopaque, n: usize, alignment: std.mem.Alignment, ra: usize) ?[*]u8 {
|
||||
const self: *ThreadSafeAllocator = @ptrCast(@alignCast(ctx));
|
||||
self.mutex.lock();
|
||||
defer self.mutex.unlock();
|
||||
|
||||
return self.child_allocator.rawAlloc(n, log2_ptr_align, ra);
|
||||
return self.child_allocator.rawAlloc(n, alignment, ra);
|
||||
}
|
||||
|
||||
fn resize(ctx: *anyopaque, buf: []u8, log2_buf_align: u8, new_len: usize, ret_addr: usize) bool {
|
||||
fn resize(ctx: *anyopaque, buf: []u8, alignment: std.mem.Alignment, new_len: usize, ret_addr: usize) bool {
|
||||
const self: *ThreadSafeAllocator = @ptrCast(@alignCast(ctx));
|
||||
|
||||
self.mutex.lock();
|
||||
defer self.mutex.unlock();
|
||||
|
||||
return self.child_allocator.rawResize(buf, log2_buf_align, new_len, ret_addr);
|
||||
return self.child_allocator.rawResize(buf, alignment, new_len, ret_addr);
|
||||
}
|
||||
|
||||
fn free(ctx: *anyopaque, buf: []u8, log2_buf_align: u8, ret_addr: usize) void {
|
||||
fn remap(context: *anyopaque, memory: []u8, alignment: std.mem.Alignment, new_len: usize, return_address: usize) ?[*]u8 {
|
||||
const self: *ThreadSafeAllocator = @ptrCast(@alignCast(context));
|
||||
|
||||
self.mutex.lock();
|
||||
defer self.mutex.unlock();
|
||||
|
||||
return self.child_allocator.rawRemap(memory, alignment, new_len, return_address);
|
||||
}
|
||||
|
||||
fn free(ctx: *anyopaque, buf: []u8, alignment: std.mem.Alignment, ret_addr: usize) void {
|
||||
const self: *ThreadSafeAllocator = @ptrCast(@alignCast(ctx));
|
||||
|
||||
self.mutex.lock();
|
||||
defer self.mutex.unlock();
|
||||
|
||||
return self.child_allocator.rawFree(buf, log2_buf_align, ret_addr);
|
||||
return self.child_allocator.rawFree(buf, alignment, ret_addr);
|
||||
}
|
||||
|
||||
const std = @import("../std.zig");
|
||||
|
|
|
|||
|
|
@ -20,6 +20,7 @@ comptime {
|
|||
pub const vtable: Allocator.VTable = .{
|
||||
.alloc = alloc,
|
||||
.resize = resize,
|
||||
.remap = remap,
|
||||
.free = free,
|
||||
};
|
||||
|
||||
|
|
@ -40,18 +41,17 @@ const size_class_count = math.log2(bigpage_size) - min_class;
|
|||
/// etc.
|
||||
const big_size_class_count = math.log2(bigpage_count);
|
||||
|
||||
var next_addrs = [1]usize{0} ** size_class_count;
|
||||
var next_addrs: [size_class_count]usize = @splat(0);
|
||||
/// For each size class, points to the freed pointer.
|
||||
var frees = [1]usize{0} ** size_class_count;
|
||||
var frees: [size_class_count]usize = @splat(0);
|
||||
/// For each big size class, points to the freed pointer.
|
||||
var big_frees = [1]usize{0} ** big_size_class_count;
|
||||
var big_frees: [big_size_class_count]usize = @splat(0);
|
||||
|
||||
fn alloc(ctx: *anyopaque, len: usize, log2_align: u8, return_address: usize) ?[*]u8 {
|
||||
fn alloc(ctx: *anyopaque, len: usize, alignment: mem.Alignment, return_address: usize) ?[*]u8 {
|
||||
_ = ctx;
|
||||
_ = return_address;
|
||||
// Make room for the freelist next pointer.
|
||||
const alignment = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_align));
|
||||
const actual_len = @max(len +| @sizeOf(usize), alignment);
|
||||
const actual_len = @max(len +| @sizeOf(usize), alignment.toByteUnits());
|
||||
const slot_size = math.ceilPowerOfTwo(usize, actual_len) catch return null;
|
||||
const class = math.log2(slot_size) - min_class;
|
||||
if (class < size_class_count) {
|
||||
|
|
@ -86,7 +86,7 @@ fn alloc(ctx: *anyopaque, len: usize, log2_align: u8, return_address: usize) ?[*
|
|||
fn resize(
|
||||
ctx: *anyopaque,
|
||||
buf: []u8,
|
||||
log2_buf_align: u8,
|
||||
alignment: mem.Alignment,
|
||||
new_len: usize,
|
||||
return_address: usize,
|
||||
) bool {
|
||||
|
|
@ -94,7 +94,7 @@ fn resize(
|
|||
_ = return_address;
|
||||
// We don't want to move anything from one size class to another, but we
|
||||
// can recover bytes in between powers of two.
|
||||
const buf_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_buf_align));
|
||||
const buf_align = alignment.toByteUnits();
|
||||
const old_actual_len = @max(buf.len + @sizeOf(usize), buf_align);
|
||||
const new_actual_len = @max(new_len +| @sizeOf(usize), buf_align);
|
||||
const old_small_slot_size = math.ceilPowerOfTwoAssert(usize, old_actual_len);
|
||||
|
|
@ -111,15 +111,25 @@ fn resize(
|
|||
}
|
||||
}
|
||||
|
||||
fn remap(
|
||||
context: *anyopaque,
|
||||
memory: []u8,
|
||||
alignment: mem.Alignment,
|
||||
new_len: usize,
|
||||
return_address: usize,
|
||||
) ?[*]u8 {
|
||||
return if (resize(context, memory, alignment, new_len, return_address)) memory.ptr else null;
|
||||
}
|
||||
|
||||
fn free(
|
||||
ctx: *anyopaque,
|
||||
buf: []u8,
|
||||
log2_buf_align: u8,
|
||||
alignment: mem.Alignment,
|
||||
return_address: usize,
|
||||
) void {
|
||||
_ = ctx;
|
||||
_ = return_address;
|
||||
const buf_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_buf_align));
|
||||
const buf_align = alignment.toByteUnits();
|
||||
const actual_len = @max(buf.len + @sizeOf(usize), buf_align);
|
||||
const slot_size = math.ceilPowerOfTwoAssert(usize, actual_len);
|
||||
const class = math.log2(slot_size) - min_class;
|
||||
|
|
@ -160,7 +170,7 @@ fn allocBigPages(n: usize) usize {
|
|||
return @as(usize, @intCast(page_index)) * wasm.page_size;
|
||||
}
|
||||
|
||||
const test_ally = Allocator{
|
||||
const test_ally: Allocator = .{
|
||||
.ptr = undefined,
|
||||
.vtable = &vtable,
|
||||
};
|
||||
|
|
|
|||
|
|
@ -29,12 +29,14 @@ pub const ArenaAllocator = struct {
|
|||
.vtable = &.{
|
||||
.alloc = alloc,
|
||||
.resize = resize,
|
||||
.remap = remap,
|
||||
.free = free,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
const BufNode = std.SinglyLinkedList(usize).Node;
|
||||
const BufNode_alignment: mem.Alignment = .fromByteUnits(@alignOf(BufNode));
|
||||
|
||||
pub fn init(child_allocator: Allocator) ArenaAllocator {
|
||||
return (State{}).promote(child_allocator);
|
||||
|
|
@ -47,9 +49,8 @@ pub const ArenaAllocator = struct {
|
|||
while (it) |node| {
|
||||
// this has to occur before the free because the free frees node
|
||||
const next_it = node.next;
|
||||
const align_bits = std.math.log2_int(usize, @alignOf(BufNode));
|
||||
const alloc_buf = @as([*]u8, @ptrCast(node))[0..node.data];
|
||||
self.child_allocator.rawFree(alloc_buf, align_bits, @returnAddress());
|
||||
self.child_allocator.rawFree(alloc_buf, BufNode_alignment, @returnAddress());
|
||||
it = next_it;
|
||||
}
|
||||
}
|
||||
|
|
@ -120,7 +121,6 @@ pub const ArenaAllocator = struct {
|
|||
return true;
|
||||
}
|
||||
const total_size = requested_capacity + @sizeOf(BufNode);
|
||||
const align_bits = std.math.log2_int(usize, @alignOf(BufNode));
|
||||
// Free all nodes except for the last one
|
||||
var it = self.state.buffer_list.first;
|
||||
const maybe_first_node = while (it) |node| {
|
||||
|
|
@ -129,7 +129,7 @@ pub const ArenaAllocator = struct {
|
|||
if (next_it == null)
|
||||
break node;
|
||||
const alloc_buf = @as([*]u8, @ptrCast(node))[0..node.data];
|
||||
self.child_allocator.rawFree(alloc_buf, align_bits, @returnAddress());
|
||||
self.child_allocator.rawFree(alloc_buf, BufNode_alignment, @returnAddress());
|
||||
it = next_it;
|
||||
} else null;
|
||||
std.debug.assert(maybe_first_node == null or maybe_first_node.?.next == null);
|
||||
|
|
@ -141,16 +141,16 @@ pub const ArenaAllocator = struct {
|
|||
if (first_node.data == total_size)
|
||||
return true;
|
||||
const first_alloc_buf = @as([*]u8, @ptrCast(first_node))[0..first_node.data];
|
||||
if (self.child_allocator.rawResize(first_alloc_buf, align_bits, total_size, @returnAddress())) {
|
||||
if (self.child_allocator.rawResize(first_alloc_buf, BufNode_alignment, total_size, @returnAddress())) {
|
||||
// successful resize
|
||||
first_node.data = total_size;
|
||||
} else {
|
||||
// manual realloc
|
||||
const new_ptr = self.child_allocator.rawAlloc(total_size, align_bits, @returnAddress()) orelse {
|
||||
const new_ptr = self.child_allocator.rawAlloc(total_size, BufNode_alignment, @returnAddress()) orelse {
|
||||
// we failed to preheat the arena properly, signal this to the user.
|
||||
return false;
|
||||
};
|
||||
self.child_allocator.rawFree(first_alloc_buf, align_bits, @returnAddress());
|
||||
self.child_allocator.rawFree(first_alloc_buf, BufNode_alignment, @returnAddress());
|
||||
const node: *BufNode = @ptrCast(@alignCast(new_ptr));
|
||||
node.* = .{ .data = total_size };
|
||||
self.state.buffer_list.first = node;
|
||||
|
|
@ -163,8 +163,7 @@ pub const ArenaAllocator = struct {
|
|||
const actual_min_size = minimum_size + (@sizeOf(BufNode) + 16);
|
||||
const big_enough_len = prev_len + actual_min_size;
|
||||
const len = big_enough_len + big_enough_len / 2;
|
||||
const log2_align = comptime std.math.log2_int(usize, @alignOf(BufNode));
|
||||
const ptr = self.child_allocator.rawAlloc(len, log2_align, @returnAddress()) orelse
|
||||
const ptr = self.child_allocator.rawAlloc(len, BufNode_alignment, @returnAddress()) orelse
|
||||
return null;
|
||||
const buf_node: *BufNode = @ptrCast(@alignCast(ptr));
|
||||
buf_node.* = .{ .data = len };
|
||||
|
|
@ -173,11 +172,11 @@ pub const ArenaAllocator = struct {
|
|||
return buf_node;
|
||||
}
|
||||
|
||||
fn alloc(ctx: *anyopaque, n: usize, log2_ptr_align: u8, ra: usize) ?[*]u8 {
|
||||
fn alloc(ctx: *anyopaque, n: usize, alignment: mem.Alignment, ra: usize) ?[*]u8 {
|
||||
const self: *ArenaAllocator = @ptrCast(@alignCast(ctx));
|
||||
_ = ra;
|
||||
|
||||
const ptr_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align));
|
||||
const ptr_align = alignment.toByteUnits();
|
||||
var cur_node = if (self.state.buffer_list.first) |first_node|
|
||||
first_node
|
||||
else
|
||||
|
|
@ -197,8 +196,7 @@ pub const ArenaAllocator = struct {
|
|||
}
|
||||
|
||||
const bigger_buf_size = @sizeOf(BufNode) + new_end_index;
|
||||
const log2_align = comptime std.math.log2_int(usize, @alignOf(BufNode));
|
||||
if (self.child_allocator.rawResize(cur_alloc_buf, log2_align, bigger_buf_size, @returnAddress())) {
|
||||
if (self.child_allocator.rawResize(cur_alloc_buf, BufNode_alignment, bigger_buf_size, @returnAddress())) {
|
||||
cur_node.data = bigger_buf_size;
|
||||
} else {
|
||||
// Allocate a new node if that's not possible
|
||||
|
|
@ -207,9 +205,9 @@ pub const ArenaAllocator = struct {
|
|||
}
|
||||
}
|
||||
|
||||
fn resize(ctx: *anyopaque, buf: []u8, log2_buf_align: u8, new_len: usize, ret_addr: usize) bool {
|
||||
fn resize(ctx: *anyopaque, buf: []u8, alignment: mem.Alignment, new_len: usize, ret_addr: usize) bool {
|
||||
const self: *ArenaAllocator = @ptrCast(@alignCast(ctx));
|
||||
_ = log2_buf_align;
|
||||
_ = alignment;
|
||||
_ = ret_addr;
|
||||
|
||||
const cur_node = self.state.buffer_list.first orelse return false;
|
||||
|
|
@ -231,8 +229,18 @@ pub const ArenaAllocator = struct {
|
|||
}
|
||||
}
|
||||
|
||||
fn free(ctx: *anyopaque, buf: []u8, log2_buf_align: u8, ret_addr: usize) void {
|
||||
_ = log2_buf_align;
|
||||
fn remap(
|
||||
context: *anyopaque,
|
||||
memory: []u8,
|
||||
alignment: mem.Alignment,
|
||||
new_len: usize,
|
||||
return_address: usize,
|
||||
) ?[*]u8 {
|
||||
return if (resize(context, memory, alignment, new_len, return_address)) memory.ptr else null;
|
||||
}
|
||||
|
||||
fn free(ctx: *anyopaque, buf: []u8, alignment: mem.Alignment, ret_addr: usize) void {
|
||||
_ = alignment;
|
||||
_ = ret_addr;
|
||||
|
||||
const self: *ArenaAllocator = @ptrCast(@alignCast(ctx));
|
||||
|
|
|
|||
1410
lib/std/heap/debug_allocator.zig
Normal file
1410
lib/std/heap/debug_allocator.zig
Normal file
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
|
|
@ -1,118 +0,0 @@
|
|||
const std = @import("../std.zig");
|
||||
const Allocator = std.mem.Allocator;
|
||||
|
||||
/// This allocator is used in front of another allocator and logs to the provided writer
|
||||
/// on every call to the allocator. Writer errors are ignored.
|
||||
pub fn LogToWriterAllocator(comptime Writer: type) type {
|
||||
return struct {
|
||||
parent_allocator: Allocator,
|
||||
writer: Writer,
|
||||
|
||||
const Self = @This();
|
||||
|
||||
pub fn init(parent_allocator: Allocator, writer: Writer) Self {
|
||||
return Self{
|
||||
.parent_allocator = parent_allocator,
|
||||
.writer = writer,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn allocator(self: *Self) Allocator {
|
||||
return .{
|
||||
.ptr = self,
|
||||
.vtable = &.{
|
||||
.alloc = alloc,
|
||||
.resize = resize,
|
||||
.free = free,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
fn alloc(
|
||||
ctx: *anyopaque,
|
||||
len: usize,
|
||||
log2_ptr_align: u8,
|
||||
ra: usize,
|
||||
) ?[*]u8 {
|
||||
const self: *Self = @ptrCast(@alignCast(ctx));
|
||||
self.writer.print("alloc : {}", .{len}) catch {};
|
||||
const result = self.parent_allocator.rawAlloc(len, log2_ptr_align, ra);
|
||||
if (result != null) {
|
||||
self.writer.print(" success!\n", .{}) catch {};
|
||||
} else {
|
||||
self.writer.print(" failure!\n", .{}) catch {};
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
fn resize(
|
||||
ctx: *anyopaque,
|
||||
buf: []u8,
|
||||
log2_buf_align: u8,
|
||||
new_len: usize,
|
||||
ra: usize,
|
||||
) bool {
|
||||
const self: *Self = @ptrCast(@alignCast(ctx));
|
||||
if (new_len <= buf.len) {
|
||||
self.writer.print("shrink: {} to {}\n", .{ buf.len, new_len }) catch {};
|
||||
} else {
|
||||
self.writer.print("expand: {} to {}", .{ buf.len, new_len }) catch {};
|
||||
}
|
||||
|
||||
if (self.parent_allocator.rawResize(buf, log2_buf_align, new_len, ra)) {
|
||||
if (new_len > buf.len) {
|
||||
self.writer.print(" success!\n", .{}) catch {};
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
std.debug.assert(new_len > buf.len);
|
||||
self.writer.print(" failure!\n", .{}) catch {};
|
||||
return false;
|
||||
}
|
||||
|
||||
fn free(
|
||||
ctx: *anyopaque,
|
||||
buf: []u8,
|
||||
log2_buf_align: u8,
|
||||
ra: usize,
|
||||
) void {
|
||||
const self: *Self = @ptrCast(@alignCast(ctx));
|
||||
self.writer.print("free : {}\n", .{buf.len}) catch {};
|
||||
self.parent_allocator.rawFree(buf, log2_buf_align, ra);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/// This allocator is used in front of another allocator and logs to the provided writer
|
||||
/// on every call to the allocator. Writer errors are ignored.
|
||||
pub fn logToWriterAllocator(
|
||||
parent_allocator: Allocator,
|
||||
writer: anytype,
|
||||
) LogToWriterAllocator(@TypeOf(writer)) {
|
||||
return LogToWriterAllocator(@TypeOf(writer)).init(parent_allocator, writer);
|
||||
}
|
||||
|
||||
test "LogToWriterAllocator" {
|
||||
var log_buf: [255]u8 = undefined;
|
||||
var fbs = std.io.fixedBufferStream(&log_buf);
|
||||
|
||||
var allocator_buf: [10]u8 = undefined;
|
||||
var fixedBufferAllocator = std.mem.validationWrap(std.heap.FixedBufferAllocator.init(&allocator_buf));
|
||||
var allocator_state = logToWriterAllocator(fixedBufferAllocator.allocator(), fbs.writer());
|
||||
const allocator = allocator_state.allocator();
|
||||
|
||||
var a = try allocator.alloc(u8, 10);
|
||||
try std.testing.expect(allocator.resize(a, 5));
|
||||
a = a[0..5];
|
||||
try std.testing.expect(!allocator.resize(a, 20));
|
||||
allocator.free(a);
|
||||
|
||||
try std.testing.expectEqualSlices(u8,
|
||||
\\alloc : 10 success!
|
||||
\\shrink: 10 to 5
|
||||
\\expand: 5 to 20 failure!
|
||||
\\free : 5
|
||||
\\
|
||||
, fbs.getWritten());
|
||||
}
|
||||
|
|
@ -1,133 +0,0 @@
|
|||
const std = @import("../std.zig");
|
||||
const Allocator = std.mem.Allocator;
|
||||
|
||||
/// This allocator is used in front of another allocator and logs to `std.log`
|
||||
/// on every call to the allocator.
|
||||
/// For logging to a `std.io.Writer` see `std.heap.LogToWriterAllocator`
|
||||
pub fn LoggingAllocator(
|
||||
comptime success_log_level: std.log.Level,
|
||||
comptime failure_log_level: std.log.Level,
|
||||
) type {
|
||||
return ScopedLoggingAllocator(.default, success_log_level, failure_log_level);
|
||||
}
|
||||
|
||||
/// This allocator is used in front of another allocator and logs to `std.log`
|
||||
/// with the given scope on every call to the allocator.
|
||||
/// For logging to a `std.io.Writer` see `std.heap.LogToWriterAllocator`
|
||||
pub fn ScopedLoggingAllocator(
|
||||
comptime scope: @Type(.enum_literal),
|
||||
comptime success_log_level: std.log.Level,
|
||||
comptime failure_log_level: std.log.Level,
|
||||
) type {
|
||||
const log = std.log.scoped(scope);
|
||||
|
||||
return struct {
|
||||
parent_allocator: Allocator,
|
||||
|
||||
const Self = @This();
|
||||
|
||||
pub fn init(parent_allocator: Allocator) Self {
|
||||
return .{
|
||||
.parent_allocator = parent_allocator,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn allocator(self: *Self) Allocator {
|
||||
return .{
|
||||
.ptr = self,
|
||||
.vtable = &.{
|
||||
.alloc = alloc,
|
||||
.resize = resize,
|
||||
.free = free,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
// This function is required as the `std.log.log` function is not public
|
||||
inline fn logHelper(comptime log_level: std.log.Level, comptime format: []const u8, args: anytype) void {
|
||||
switch (log_level) {
|
||||
.err => log.err(format, args),
|
||||
.warn => log.warn(format, args),
|
||||
.info => log.info(format, args),
|
||||
.debug => log.debug(format, args),
|
||||
}
|
||||
}
|
||||
|
||||
fn alloc(
|
||||
ctx: *anyopaque,
|
||||
len: usize,
|
||||
log2_ptr_align: u8,
|
||||
ra: usize,
|
||||
) ?[*]u8 {
|
||||
const self: *Self = @ptrCast(@alignCast(ctx));
|
||||
const result = self.parent_allocator.rawAlloc(len, log2_ptr_align, ra);
|
||||
if (result != null) {
|
||||
logHelper(
|
||||
success_log_level,
|
||||
"alloc - success - len: {}, ptr_align: {}",
|
||||
.{ len, log2_ptr_align },
|
||||
);
|
||||
} else {
|
||||
logHelper(
|
||||
failure_log_level,
|
||||
"alloc - failure: OutOfMemory - len: {}, ptr_align: {}",
|
||||
.{ len, log2_ptr_align },
|
||||
);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
fn resize(
|
||||
ctx: *anyopaque,
|
||||
buf: []u8,
|
||||
log2_buf_align: u8,
|
||||
new_len: usize,
|
||||
ra: usize,
|
||||
) bool {
|
||||
const self: *Self = @ptrCast(@alignCast(ctx));
|
||||
if (self.parent_allocator.rawResize(buf, log2_buf_align, new_len, ra)) {
|
||||
if (new_len <= buf.len) {
|
||||
logHelper(
|
||||
success_log_level,
|
||||
"shrink - success - {} to {}, buf_align: {}",
|
||||
.{ buf.len, new_len, log2_buf_align },
|
||||
);
|
||||
} else {
|
||||
logHelper(
|
||||
success_log_level,
|
||||
"expand - success - {} to {}, buf_align: {}",
|
||||
.{ buf.len, new_len, log2_buf_align },
|
||||
);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
std.debug.assert(new_len > buf.len);
|
||||
logHelper(
|
||||
failure_log_level,
|
||||
"expand - failure - {} to {}, buf_align: {}",
|
||||
.{ buf.len, new_len, log2_buf_align },
|
||||
);
|
||||
return false;
|
||||
}
|
||||
|
||||
fn free(
|
||||
ctx: *anyopaque,
|
||||
buf: []u8,
|
||||
log2_buf_align: u8,
|
||||
ra: usize,
|
||||
) void {
|
||||
const self: *Self = @ptrCast(@alignCast(ctx));
|
||||
self.parent_allocator.rawFree(buf, log2_buf_align, ra);
|
||||
logHelper(success_log_level, "free - len: {}", .{buf.len});
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/// This allocator is used in front of another allocator and logs to `std.log`
|
||||
/// on every call to the allocator.
|
||||
/// For logging to a `std.io.Writer` see `std.heap.LogToWriterAllocator`
|
||||
pub fn loggingAllocator(parent_allocator: Allocator) LoggingAllocator(.debug, .err) {
|
||||
return LoggingAllocator(.debug, .err).init(parent_allocator);
|
||||
}
|
||||
|
|
@ -3,6 +3,7 @@ const builtin = @import("builtin");
|
|||
const math = std.math;
|
||||
const Allocator = std.mem.Allocator;
|
||||
const mem = std.mem;
|
||||
const heap = std.heap;
|
||||
const assert = std.debug.assert;
|
||||
|
||||
pub fn SbrkAllocator(comptime sbrk: *const fn (n: usize) usize) type {
|
||||
|
|
@ -18,7 +19,7 @@ pub fn SbrkAllocator(comptime sbrk: *const fn (n: usize) usize) type {
|
|||
const max_usize = math.maxInt(usize);
|
||||
const ushift = math.Log2Int(usize);
|
||||
const bigpage_size = 64 * 1024;
|
||||
const pages_per_bigpage = bigpage_size / mem.page_size;
|
||||
const pages_per_bigpage = bigpage_size / heap.pageSize();
|
||||
const bigpage_count = max_usize / bigpage_size;
|
||||
|
||||
/// Because of storing free list pointers, the minimum size class is 3.
|
||||
|
|
@ -58,7 +59,7 @@ pub fn SbrkAllocator(comptime sbrk: *const fn (n: usize) usize) type {
|
|||
}
|
||||
|
||||
const next_addr = next_addrs[class];
|
||||
if (next_addr % mem.page_size == 0) {
|
||||
if (next_addr % heap.pageSize() == 0) {
|
||||
const addr = allocBigPages(1);
|
||||
if (addr == 0) return null;
|
||||
//std.debug.print("allocated fresh slot_size={d} class={d} addr=0x{x}\n", .{
|
||||
|
|
@ -153,7 +154,7 @@ pub fn SbrkAllocator(comptime sbrk: *const fn (n: usize) usize) type {
|
|||
big_frees[class] = node.*;
|
||||
return top_free_ptr;
|
||||
}
|
||||
return sbrk(pow2_pages * pages_per_bigpage * mem.page_size);
|
||||
return sbrk(pow2_pages * pages_per_bigpage * heap.pageSize());
|
||||
}
|
||||
};
|
||||
}
|
||||
|
|
|
|||
145
lib/std/mem.zig
145
lib/std/mem.zig
|
|
@ -8,26 +8,6 @@ const testing = std.testing;
|
|||
const Endian = std.builtin.Endian;
|
||||
const native_endian = builtin.cpu.arch.endian();
|
||||
|
||||
/// Compile time known minimum page size.
|
||||
/// https://github.com/ziglang/zig/issues/4082
|
||||
pub const page_size = switch (builtin.cpu.arch) {
|
||||
.wasm32, .wasm64 => 64 * 1024,
|
||||
.aarch64 => switch (builtin.os.tag) {
|
||||
.macos, .ios, .watchos, .tvos, .visionos => 16 * 1024,
|
||||
else => 4 * 1024,
|
||||
},
|
||||
.sparc64 => 8 * 1024,
|
||||
.loongarch32, .loongarch64 => switch (builtin.os.tag) {
|
||||
// Linux default KConfig value is 16KiB
|
||||
.linux => 16 * 1024,
|
||||
// FIXME:
|
||||
// There is no other OS supported yet. Use the same value
|
||||
// as Linux for now.
|
||||
else => 16 * 1024,
|
||||
},
|
||||
else => 4 * 1024,
|
||||
};
|
||||
|
||||
/// The standard library currently thoroughly depends on byte size
|
||||
/// being 8 bits. (see the use of u8 throughout allocation code as
|
||||
/// the "byte" type.) Code which depends on this can reference this
|
||||
|
|
@ -38,6 +18,60 @@ pub const byte_size_in_bits = 8;
|
|||
|
||||
pub const Allocator = @import("mem/Allocator.zig");
|
||||
|
||||
/// Stored as a power-of-two.
|
||||
pub const Alignment = enum(math.Log2Int(usize)) {
|
||||
@"1" = 0,
|
||||
@"2" = 1,
|
||||
@"4" = 2,
|
||||
@"8" = 3,
|
||||
@"16" = 4,
|
||||
@"32" = 5,
|
||||
@"64" = 6,
|
||||
_,
|
||||
|
||||
pub fn toByteUnits(a: Alignment) usize {
|
||||
return @as(usize, 1) << @intFromEnum(a);
|
||||
}
|
||||
|
||||
pub fn fromByteUnits(n: usize) Alignment {
|
||||
assert(std.math.isPowerOfTwo(n));
|
||||
return @enumFromInt(@ctz(n));
|
||||
}
|
||||
|
||||
pub fn order(lhs: Alignment, rhs: Alignment) std.math.Order {
|
||||
return std.math.order(@intFromEnum(lhs), @intFromEnum(rhs));
|
||||
}
|
||||
|
||||
pub fn compare(lhs: Alignment, op: std.math.CompareOperator, rhs: Alignment) bool {
|
||||
return std.math.compare(@intFromEnum(lhs), op, @intFromEnum(rhs));
|
||||
}
|
||||
|
||||
pub fn max(lhs: Alignment, rhs: Alignment) Alignment {
|
||||
return @enumFromInt(@max(@intFromEnum(lhs), @intFromEnum(rhs)));
|
||||
}
|
||||
|
||||
pub fn min(lhs: Alignment, rhs: Alignment) Alignment {
|
||||
return @enumFromInt(@min(@intFromEnum(lhs), @intFromEnum(rhs)));
|
||||
}
|
||||
|
||||
/// Return next address with this alignment.
|
||||
pub fn forward(a: Alignment, address: usize) usize {
|
||||
const x = (@as(usize, 1) << @intFromEnum(a)) - 1;
|
||||
return (address + x) & ~x;
|
||||
}
|
||||
|
||||
/// Return previous address with this alignment.
|
||||
pub fn backward(a: Alignment, address: usize) usize {
|
||||
const x = (@as(usize, 1) << @intFromEnum(a)) - 1;
|
||||
return address & ~x;
|
||||
}
|
||||
|
||||
/// Return whether address is aligned to this amount.
|
||||
pub fn check(a: Alignment, address: usize) bool {
|
||||
return @ctz(address) >= @intFromEnum(a);
|
||||
}
|
||||
};
|
||||
|
||||
/// Detects and asserts if the std.mem.Allocator interface is violated by the caller
|
||||
/// or the allocator.
|
||||
pub fn ValidationAllocator(comptime T: type) type {
|
||||
|
|
@ -58,6 +92,7 @@ pub fn ValidationAllocator(comptime T: type) type {
|
|||
.vtable = &.{
|
||||
.alloc = alloc,
|
||||
.resize = resize,
|
||||
.remap = remap,
|
||||
.free = free,
|
||||
},
|
||||
};
|
||||
|
|
@ -71,41 +106,54 @@ pub fn ValidationAllocator(comptime T: type) type {
|
|||
pub fn alloc(
|
||||
ctx: *anyopaque,
|
||||
n: usize,
|
||||
log2_ptr_align: u8,
|
||||
alignment: mem.Alignment,
|
||||
ret_addr: usize,
|
||||
) ?[*]u8 {
|
||||
assert(n > 0);
|
||||
const self: *Self = @ptrCast(@alignCast(ctx));
|
||||
const underlying = self.getUnderlyingAllocatorPtr();
|
||||
const result = underlying.rawAlloc(n, log2_ptr_align, ret_addr) orelse
|
||||
const result = underlying.rawAlloc(n, alignment, ret_addr) orelse
|
||||
return null;
|
||||
assert(mem.isAlignedLog2(@intFromPtr(result), log2_ptr_align));
|
||||
assert(alignment.check(@intFromPtr(result)));
|
||||
return result;
|
||||
}
|
||||
|
||||
pub fn resize(
|
||||
ctx: *anyopaque,
|
||||
buf: []u8,
|
||||
log2_buf_align: u8,
|
||||
alignment: Alignment,
|
||||
new_len: usize,
|
||||
ret_addr: usize,
|
||||
) bool {
|
||||
const self: *Self = @ptrCast(@alignCast(ctx));
|
||||
assert(buf.len > 0);
|
||||
const underlying = self.getUnderlyingAllocatorPtr();
|
||||
return underlying.rawResize(buf, log2_buf_align, new_len, ret_addr);
|
||||
return underlying.rawResize(buf, alignment, new_len, ret_addr);
|
||||
}
|
||||
|
||||
pub fn remap(
|
||||
ctx: *anyopaque,
|
||||
buf: []u8,
|
||||
alignment: Alignment,
|
||||
new_len: usize,
|
||||
ret_addr: usize,
|
||||
) ?[*]u8 {
|
||||
const self: *Self = @ptrCast(@alignCast(ctx));
|
||||
assert(buf.len > 0);
|
||||
const underlying = self.getUnderlyingAllocatorPtr();
|
||||
return underlying.rawRemap(buf, alignment, new_len, ret_addr);
|
||||
}
|
||||
|
||||
pub fn free(
|
||||
ctx: *anyopaque,
|
||||
buf: []u8,
|
||||
log2_buf_align: u8,
|
||||
alignment: Alignment,
|
||||
ret_addr: usize,
|
||||
) void {
|
||||
const self: *Self = @ptrCast(@alignCast(ctx));
|
||||
assert(buf.len > 0);
|
||||
const underlying = self.getUnderlyingAllocatorPtr();
|
||||
underlying.rawFree(buf, log2_buf_align, ret_addr);
|
||||
underlying.rawFree(buf, alignment, ret_addr);
|
||||
}
|
||||
|
||||
pub fn reset(self: *Self) void {
|
||||
|
|
@ -133,27 +181,9 @@ pub fn alignAllocLen(full_len: usize, alloc_len: usize, len_align: u29) usize {
|
|||
return adjusted;
|
||||
}
|
||||
|
||||
const fail_allocator = Allocator{
|
||||
.ptr = undefined,
|
||||
.vtable = &failAllocator_vtable,
|
||||
};
|
||||
|
||||
const failAllocator_vtable = Allocator.VTable{
|
||||
.alloc = failAllocatorAlloc,
|
||||
.resize = Allocator.noResize,
|
||||
.free = Allocator.noFree,
|
||||
};
|
||||
|
||||
fn failAllocatorAlloc(_: *anyopaque, n: usize, log2_alignment: u8, ra: usize) ?[*]u8 {
|
||||
_ = n;
|
||||
_ = log2_alignment;
|
||||
_ = ra;
|
||||
return null;
|
||||
}
|
||||
|
||||
test "Allocator basics" {
|
||||
try testing.expectError(error.OutOfMemory, fail_allocator.alloc(u8, 1));
|
||||
try testing.expectError(error.OutOfMemory, fail_allocator.allocSentinel(u8, 1, 0));
|
||||
try testing.expectError(error.OutOfMemory, testing.failing_allocator.alloc(u8, 1));
|
||||
try testing.expectError(error.OutOfMemory, testing.failing_allocator.allocSentinel(u8, 1, 0));
|
||||
}
|
||||
|
||||
test "Allocator.resize" {
|
||||
|
|
@ -1068,16 +1098,18 @@ pub fn indexOfSentinel(comptime T: type, comptime sentinel: T, p: [*:sentinel]co
|
|||
// as we don't read into a new page. This should be the case for most architectures
|
||||
// which use paged memory, however should be confirmed before adding a new arch below.
|
||||
.aarch64, .x86, .x86_64 => if (std.simd.suggestVectorLength(T)) |block_len| {
|
||||
const page_size = std.heap.pageSize();
|
||||
const block_size = @sizeOf(T) * block_len;
|
||||
const Block = @Vector(block_len, T);
|
||||
const mask: Block = @splat(sentinel);
|
||||
|
||||
comptime std.debug.assert(std.mem.page_size % block_size == 0);
|
||||
comptime assert(std.heap.page_size_max % @sizeOf(Block) == 0);
|
||||
assert(page_size % @sizeOf(Block) == 0);
|
||||
|
||||
// First block may be unaligned
|
||||
const start_addr = @intFromPtr(&p[i]);
|
||||
const offset_in_page = start_addr & (std.mem.page_size - 1);
|
||||
if (offset_in_page <= std.mem.page_size - block_size) {
|
||||
const offset_in_page = start_addr & (page_size - 1);
|
||||
if (offset_in_page <= page_size - @sizeOf(Block)) {
|
||||
// Will not read past the end of a page, full block.
|
||||
const block: Block = p[i..][0..block_len].*;
|
||||
const matches = block == mask;
|
||||
|
|
@ -1097,7 +1129,7 @@ pub fn indexOfSentinel(comptime T: type, comptime sentinel: T, p: [*:sentinel]co
|
|||
}
|
||||
}
|
||||
|
||||
std.debug.assert(std.mem.isAligned(@intFromPtr(&p[i]), block_size));
|
||||
assert(std.mem.isAligned(@intFromPtr(&p[i]), block_size));
|
||||
while (true) {
|
||||
const block: *const Block = @ptrCast(@alignCast(p[i..][0..block_len]));
|
||||
const matches = block.* == mask;
|
||||
|
|
@ -1120,23 +1152,24 @@ pub fn indexOfSentinel(comptime T: type, comptime sentinel: T, p: [*:sentinel]co
|
|||
test "indexOfSentinel vector paths" {
|
||||
const Types = [_]type{ u8, u16, u32, u64 };
|
||||
const allocator = std.testing.allocator;
|
||||
const page_size = std.heap.pageSize();
|
||||
|
||||
inline for (Types) |T| {
|
||||
const block_len = std.simd.suggestVectorLength(T) orelse continue;
|
||||
|
||||
// Allocate three pages so we guarantee a page-crossing address with a full page after
|
||||
const memory = try allocator.alloc(T, 3 * std.mem.page_size / @sizeOf(T));
|
||||
const memory = try allocator.alloc(T, 3 * page_size / @sizeOf(T));
|
||||
defer allocator.free(memory);
|
||||
@memset(memory, 0xaa);
|
||||
|
||||
// Find starting page-alignment = 0
|
||||
var start: usize = 0;
|
||||
const start_addr = @intFromPtr(&memory);
|
||||
start += (std.mem.alignForward(usize, start_addr, std.mem.page_size) - start_addr) / @sizeOf(T);
|
||||
try testing.expect(start < std.mem.page_size / @sizeOf(T));
|
||||
start += (std.mem.alignForward(usize, start_addr, page_size) - start_addr) / @sizeOf(T);
|
||||
try testing.expect(start < page_size / @sizeOf(T));
|
||||
|
||||
// Validate all sub-block alignments
|
||||
const search_len = std.mem.page_size / @sizeOf(T);
|
||||
const search_len = page_size / @sizeOf(T);
|
||||
memory[start + search_len] = 0;
|
||||
for (0..block_len) |offset| {
|
||||
try testing.expectEqual(search_len - offset, indexOfSentinel(T, 0, @ptrCast(&memory[start + offset])));
|
||||
|
|
@ -1144,7 +1177,7 @@ test "indexOfSentinel vector paths" {
|
|||
memory[start + search_len] = 0xaa;
|
||||
|
||||
// Validate page boundary crossing
|
||||
const start_page_boundary = start + (std.mem.page_size / @sizeOf(T));
|
||||
const start_page_boundary = start + (page_size / @sizeOf(T));
|
||||
memory[start_page_boundary + block_len] = 0;
|
||||
for (0..block_len) |offset| {
|
||||
try testing.expectEqual(2 * block_len - offset, indexOfSentinel(T, 0, @ptrCast(&memory[start_page_boundary - block_len + offset])));
|
||||
|
|
|
|||
|
|
@ -6,29 +6,34 @@ const math = std.math;
|
|||
const mem = std.mem;
|
||||
const Allocator = @This();
|
||||
const builtin = @import("builtin");
|
||||
const Alignment = std.mem.Alignment;
|
||||
|
||||
pub const Error = error{OutOfMemory};
|
||||
pub const Log2Align = math.Log2Int(usize);
|
||||
|
||||
/// The type erased pointer to the allocator implementation.
|
||||
/// Any comparison of this field may result in illegal behavior, since it may be set to
|
||||
/// `undefined` in cases where the allocator implementation does not have any associated
|
||||
/// state.
|
||||
///
|
||||
/// Any comparison of this field may result in illegal behavior, since it may
|
||||
/// be set to `undefined` in cases where the allocator implementation does not
|
||||
/// have any associated state.
|
||||
ptr: *anyopaque,
|
||||
vtable: *const VTable,
|
||||
|
||||
pub const VTable = struct {
|
||||
/// Attempt to allocate exactly `len` bytes aligned to `1 << ptr_align`.
|
||||
/// Return a pointer to `len` bytes with specified `alignment`, or return
|
||||
/// `null` indicating the allocation failed.
|
||||
///
|
||||
/// `ret_addr` is optionally provided as the first return address of the
|
||||
/// allocation call stack. If the value is `0` it means no return address
|
||||
/// has been provided.
|
||||
alloc: *const fn (ctx: *anyopaque, len: usize, ptr_align: u8, ret_addr: usize) ?[*]u8,
|
||||
alloc: *const fn (*anyopaque, len: usize, alignment: Alignment, ret_addr: usize) ?[*]u8,
|
||||
|
||||
/// Attempt to expand or shrink memory in place. `buf.len` must equal the
|
||||
/// length requested from the most recent successful call to `alloc` or
|
||||
/// `resize`. `buf_align` must equal the same value that was passed as the
|
||||
/// `ptr_align` parameter to the original `alloc` call.
|
||||
/// Attempt to expand or shrink memory in place.
|
||||
///
|
||||
/// `memory.len` must equal the length requested from the most recent
|
||||
/// successful call to `alloc`, `resize`, or `remap`. `alignment` must
|
||||
/// equal the same value that was passed as the `alignment` parameter to
|
||||
/// the original `alloc` call.
|
||||
///
|
||||
/// A result of `true` indicates the resize was successful and the
|
||||
/// allocation now has the same address but a size of `new_len`. `false`
|
||||
|
|
@ -40,72 +45,113 @@ pub const VTable = struct {
|
|||
/// `ret_addr` is optionally provided as the first return address of the
|
||||
/// allocation call stack. If the value is `0` it means no return address
|
||||
/// has been provided.
|
||||
resize: *const fn (ctx: *anyopaque, buf: []u8, buf_align: u8, new_len: usize, ret_addr: usize) bool,
|
||||
resize: *const fn (*anyopaque, memory: []u8, alignment: Alignment, new_len: usize, ret_addr: usize) bool,
|
||||
|
||||
/// Free and invalidate a buffer.
|
||||
/// Attempt to expand or shrink memory, allowing relocation.
|
||||
///
|
||||
/// `buf.len` must equal the most recent length returned by `alloc` or
|
||||
/// given to a successful `resize` call.
|
||||
/// `memory.len` must equal the length requested from the most recent
|
||||
/// successful call to `alloc`, `resize`, or `remap`. `alignment` must
|
||||
/// equal the same value that was passed as the `alignment` parameter to
|
||||
/// the original `alloc` call.
|
||||
///
|
||||
/// `buf_align` must equal the same value that was passed as the
|
||||
/// `ptr_align` parameter to the original `alloc` call.
|
||||
/// A non-`null` return value indicates the resize was successful. The
|
||||
/// allocation may have same address, or may have been relocated. In either
|
||||
/// case, the allocation now has size of `new_len`. A `null` return value
|
||||
/// indicates that the resize would be equivalent to allocating new memory,
|
||||
/// copying the bytes from the old memory, and then freeing the old memory.
|
||||
/// In such case, it is more efficient for the caller to perform the copy.
|
||||
///
|
||||
/// `new_len` must be greater than zero.
|
||||
///
|
||||
/// `ret_addr` is optionally provided as the first return address of the
|
||||
/// allocation call stack. If the value is `0` it means no return address
|
||||
/// has been provided.
|
||||
free: *const fn (ctx: *anyopaque, buf: []u8, buf_align: u8, ret_addr: usize) void,
|
||||
remap: *const fn (*anyopaque, memory: []u8, alignment: Alignment, new_len: usize, ret_addr: usize) ?[*]u8,
|
||||
|
||||
/// Free and invalidate a region of memory.
|
||||
///
|
||||
/// `memory.len` must equal the length requested from the most recent
|
||||
/// successful call to `alloc`, `resize`, or `remap`. `alignment` must
|
||||
/// equal the same value that was passed as the `alignment` parameter to
|
||||
/// the original `alloc` call.
|
||||
///
|
||||
/// `ret_addr` is optionally provided as the first return address of the
|
||||
/// allocation call stack. If the value is `0` it means no return address
|
||||
/// has been provided.
|
||||
free: *const fn (*anyopaque, memory: []u8, alignment: Alignment, ret_addr: usize) void,
|
||||
};
|
||||
|
||||
pub fn noResize(
|
||||
self: *anyopaque,
|
||||
buf: []u8,
|
||||
log2_buf_align: u8,
|
||||
memory: []u8,
|
||||
alignment: Alignment,
|
||||
new_len: usize,
|
||||
ret_addr: usize,
|
||||
) bool {
|
||||
_ = self;
|
||||
_ = buf;
|
||||
_ = log2_buf_align;
|
||||
_ = memory;
|
||||
_ = alignment;
|
||||
_ = new_len;
|
||||
_ = ret_addr;
|
||||
return false;
|
||||
}
|
||||
|
||||
pub fn noRemap(
|
||||
self: *anyopaque,
|
||||
memory: []u8,
|
||||
alignment: Alignment,
|
||||
new_len: usize,
|
||||
ret_addr: usize,
|
||||
) ?[*]u8 {
|
||||
_ = self;
|
||||
_ = memory;
|
||||
_ = alignment;
|
||||
_ = new_len;
|
||||
_ = ret_addr;
|
||||
return null;
|
||||
}
|
||||
|
||||
pub fn noFree(
|
||||
self: *anyopaque,
|
||||
buf: []u8,
|
||||
log2_buf_align: u8,
|
||||
memory: []u8,
|
||||
alignment: Alignment,
|
||||
ret_addr: usize,
|
||||
) void {
|
||||
_ = self;
|
||||
_ = buf;
|
||||
_ = log2_buf_align;
|
||||
_ = memory;
|
||||
_ = alignment;
|
||||
_ = ret_addr;
|
||||
}
|
||||
|
||||
/// This function is not intended to be called except from within the
|
||||
/// implementation of an Allocator
|
||||
pub inline fn rawAlloc(self: Allocator, len: usize, ptr_align: u8, ret_addr: usize) ?[*]u8 {
|
||||
return self.vtable.alloc(self.ptr, len, ptr_align, ret_addr);
|
||||
/// implementation of an `Allocator`.
|
||||
pub inline fn rawAlloc(a: Allocator, len: usize, alignment: Alignment, ret_addr: usize) ?[*]u8 {
|
||||
return a.vtable.alloc(a.ptr, len, alignment, ret_addr);
|
||||
}
|
||||
|
||||
/// This function is not intended to be called except from within the
|
||||
/// implementation of an Allocator
|
||||
pub inline fn rawResize(self: Allocator, buf: []u8, log2_buf_align: u8, new_len: usize, ret_addr: usize) bool {
|
||||
return self.vtable.resize(self.ptr, buf, log2_buf_align, new_len, ret_addr);
|
||||
/// implementation of an `Allocator`.
|
||||
pub inline fn rawResize(a: Allocator, memory: []u8, alignment: Alignment, new_len: usize, ret_addr: usize) bool {
|
||||
return a.vtable.resize(a.ptr, memory, alignment, new_len, ret_addr);
|
||||
}
|
||||
|
||||
/// This function is not intended to be called except from within the
|
||||
/// implementation of an Allocator
|
||||
pub inline fn rawFree(self: Allocator, buf: []u8, log2_buf_align: u8, ret_addr: usize) void {
|
||||
return self.vtable.free(self.ptr, buf, log2_buf_align, ret_addr);
|
||||
/// implementation of an `Allocator`.
|
||||
pub inline fn rawRemap(a: Allocator, memory: []u8, alignment: Alignment, new_len: usize, ret_addr: usize) ?[*]u8 {
|
||||
return a.vtable.remap(a.ptr, memory, alignment, new_len, ret_addr);
|
||||
}
|
||||
|
||||
/// This function is not intended to be called except from within the
|
||||
/// implementation of an `Allocator`.
|
||||
pub inline fn rawFree(a: Allocator, memory: []u8, alignment: Alignment, ret_addr: usize) void {
|
||||
return a.vtable.free(a.ptr, memory, alignment, ret_addr);
|
||||
}
|
||||
|
||||
/// Returns a pointer to undefined memory.
|
||||
/// Call `destroy` with the result to free the memory.
|
||||
pub fn create(self: Allocator, comptime T: type) Error!*T {
|
||||
pub fn create(a: Allocator, comptime T: type) Error!*T {
|
||||
if (@sizeOf(T) == 0) return @as(*T, @ptrFromInt(math.maxInt(usize)));
|
||||
const ptr: *T = @ptrCast(try self.allocBytesWithAlignment(@alignOf(T), @sizeOf(T), @returnAddress()));
|
||||
const ptr: *T = @ptrCast(try a.allocBytesWithAlignment(@alignOf(T), @sizeOf(T), @returnAddress()));
|
||||
return ptr;
|
||||
}
|
||||
|
||||
|
|
@ -117,7 +163,7 @@ pub fn destroy(self: Allocator, ptr: anytype) void {
|
|||
const T = info.child;
|
||||
if (@sizeOf(T) == 0) return;
|
||||
const non_const_ptr = @as([*]u8, @ptrCast(@constCast(ptr)));
|
||||
self.rawFree(non_const_ptr[0..@sizeOf(T)], log2a(info.alignment), @returnAddress());
|
||||
self.rawFree(non_const_ptr[0..@sizeOf(T)], .fromByteUnits(info.alignment), @returnAddress());
|
||||
}
|
||||
|
||||
/// Allocates an array of `n` items of type `T` and sets all the
|
||||
|
|
@ -215,46 +261,92 @@ fn allocWithSizeAndAlignment(self: Allocator, comptime size: usize, comptime ali
|
|||
}
|
||||
|
||||
fn allocBytesWithAlignment(self: Allocator, comptime alignment: u29, byte_count: usize, return_address: usize) Error![*]align(alignment) u8 {
|
||||
// The Zig Allocator interface is not intended to solve alignments beyond
|
||||
// the minimum OS page size. For these use cases, the caller must use OS
|
||||
// APIs directly.
|
||||
comptime assert(alignment <= mem.page_size);
|
||||
|
||||
if (byte_count == 0) {
|
||||
const ptr = comptime std.mem.alignBackward(usize, math.maxInt(usize), alignment);
|
||||
return @as([*]align(alignment) u8, @ptrFromInt(ptr));
|
||||
}
|
||||
|
||||
const byte_ptr = self.rawAlloc(byte_count, log2a(alignment), return_address) orelse return Error.OutOfMemory;
|
||||
// TODO: https://github.com/ziglang/zig/issues/4298
|
||||
const byte_ptr = self.rawAlloc(byte_count, .fromByteUnits(alignment), return_address) orelse return Error.OutOfMemory;
|
||||
@memset(byte_ptr[0..byte_count], undefined);
|
||||
return @as([*]align(alignment) u8, @alignCast(byte_ptr));
|
||||
return @alignCast(byte_ptr);
|
||||
}
|
||||
|
||||
/// Requests to modify the size of an allocation. It is guaranteed to not move
|
||||
/// the pointer, however the allocator implementation may refuse the resize
|
||||
/// request by returning `false`.
|
||||
pub fn resize(self: Allocator, old_mem: anytype, new_n: usize) bool {
|
||||
const Slice = @typeInfo(@TypeOf(old_mem)).pointer;
|
||||
/// Request to modify the size of an allocation.
|
||||
///
|
||||
/// It is guaranteed to not move the pointer, however the allocator
|
||||
/// implementation may refuse the resize request by returning `false`.
|
||||
///
|
||||
/// `allocation` may be an empty slice, in which case a new allocation is made.
|
||||
///
|
||||
/// `new_len` may be zero, in which case the allocation is freed.
|
||||
pub fn resize(self: Allocator, allocation: anytype, new_len: usize) bool {
|
||||
const Slice = @typeInfo(@TypeOf(allocation)).pointer;
|
||||
const T = Slice.child;
|
||||
if (new_n == 0) {
|
||||
self.free(old_mem);
|
||||
const alignment = Slice.alignment;
|
||||
if (new_len == 0) {
|
||||
self.free(allocation);
|
||||
return true;
|
||||
}
|
||||
if (old_mem.len == 0) {
|
||||
if (allocation.len == 0) {
|
||||
return false;
|
||||
}
|
||||
const old_byte_slice = mem.sliceAsBytes(old_mem);
|
||||
const old_memory = mem.sliceAsBytes(allocation);
|
||||
// I would like to use saturating multiplication here, but LLVM cannot lower it
|
||||
// on WebAssembly: https://github.com/ziglang/zig/issues/9660
|
||||
//const new_byte_count = new_n *| @sizeOf(T);
|
||||
const new_byte_count = math.mul(usize, @sizeOf(T), new_n) catch return false;
|
||||
return self.rawResize(old_byte_slice, log2a(Slice.alignment), new_byte_count, @returnAddress());
|
||||
//const new_len_bytes = new_len *| @sizeOf(T);
|
||||
const new_len_bytes = math.mul(usize, @sizeOf(T), new_len) catch return false;
|
||||
return self.rawResize(old_memory, .fromByteUnits(alignment), new_len_bytes, @returnAddress());
|
||||
}
|
||||
|
||||
/// Request to modify the size of an allocation, allowing relocation.
|
||||
///
|
||||
/// A non-`null` return value indicates the resize was successful. The
|
||||
/// allocation may have same address, or may have been relocated. In either
|
||||
/// case, the allocation now has size of `new_len`. A `null` return value
|
||||
/// indicates that the resize would be equivalent to allocating new memory,
|
||||
/// copying the bytes from the old memory, and then freeing the old memory.
|
||||
/// In such case, it is more efficient for the caller to perform those
|
||||
/// operations.
|
||||
///
|
||||
/// `allocation` may be an empty slice, in which case a new allocation is made.
|
||||
///
|
||||
/// `new_len` may be zero, in which case the allocation is freed.
|
||||
pub fn remap(self: Allocator, allocation: anytype, new_len: usize) t: {
|
||||
const Slice = @typeInfo(@TypeOf(allocation)).pointer;
|
||||
break :t ?[]align(Slice.alignment) Slice.child;
|
||||
} {
|
||||
const Slice = @typeInfo(@TypeOf(allocation)).pointer;
|
||||
const T = Slice.child;
|
||||
const alignment = Slice.alignment;
|
||||
if (new_len == 0) {
|
||||
self.free(allocation);
|
||||
return allocation[0..0];
|
||||
}
|
||||
if (allocation.len == 0) {
|
||||
return null;
|
||||
}
|
||||
const old_memory = mem.sliceAsBytes(allocation);
|
||||
// I would like to use saturating multiplication here, but LLVM cannot lower it
|
||||
// on WebAssembly: https://github.com/ziglang/zig/issues/9660
|
||||
//const new_len_bytes = new_len *| @sizeOf(T);
|
||||
const new_len_bytes = math.mul(usize, @sizeOf(T), new_len) catch return null;
|
||||
const new_ptr = self.rawRemap(old_memory, .fromByteUnits(alignment), new_len_bytes, @returnAddress()) orelse return null;
|
||||
const new_memory: []align(alignment) u8 = @alignCast(new_ptr[0..new_len_bytes]);
|
||||
return mem.bytesAsSlice(T, new_memory);
|
||||
}
|
||||
|
||||
/// This function requests a new byte size for an existing allocation, which
|
||||
/// can be larger, smaller, or the same size as the old memory allocation.
|
||||
///
|
||||
/// If `new_n` is 0, this is the same as `free` and it always succeeds.
|
||||
///
|
||||
/// `old_mem` may have length zero, which makes a new allocation.
|
||||
///
|
||||
/// This function only fails on out-of-memory conditions, unlike:
|
||||
/// * `remap` which returns `null` when the `Allocator` implementation cannot
|
||||
/// do the realloc more efficiently than the caller
|
||||
/// * `resize` which returns `false` when the `Allocator` implementation cannot
|
||||
/// change the size without relocating the allocation.
|
||||
pub fn realloc(self: Allocator, old_mem: anytype, new_n: usize) t: {
|
||||
const Slice = @typeInfo(@TypeOf(old_mem)).pointer;
|
||||
break :t Error![]align(Slice.alignment) Slice.child;
|
||||
|
|
@ -285,18 +377,17 @@ pub fn reallocAdvanced(
|
|||
const old_byte_slice = mem.sliceAsBytes(old_mem);
|
||||
const byte_count = math.mul(usize, @sizeOf(T), new_n) catch return Error.OutOfMemory;
|
||||
// Note: can't set shrunk memory to undefined as memory shouldn't be modified on realloc failure
|
||||
if (self.rawResize(old_byte_slice, log2a(Slice.alignment), byte_count, return_address)) {
|
||||
const new_bytes: []align(Slice.alignment) u8 = @alignCast(old_byte_slice.ptr[0..byte_count]);
|
||||
if (self.rawRemap(old_byte_slice, .fromByteUnits(Slice.alignment), byte_count, return_address)) |p| {
|
||||
const new_bytes: []align(Slice.alignment) u8 = @alignCast(p[0..byte_count]);
|
||||
return mem.bytesAsSlice(T, new_bytes);
|
||||
}
|
||||
|
||||
const new_mem = self.rawAlloc(byte_count, log2a(Slice.alignment), return_address) orelse
|
||||
const new_mem = self.rawAlloc(byte_count, .fromByteUnits(Slice.alignment), return_address) orelse
|
||||
return error.OutOfMemory;
|
||||
const copy_len = @min(byte_count, old_byte_slice.len);
|
||||
@memcpy(new_mem[0..copy_len], old_byte_slice[0..copy_len]);
|
||||
// TODO https://github.com/ziglang/zig/issues/4298
|
||||
@memset(old_byte_slice, undefined);
|
||||
self.rawFree(old_byte_slice, log2a(Slice.alignment), return_address);
|
||||
self.rawFree(old_byte_slice, .fromByteUnits(Slice.alignment), return_address);
|
||||
|
||||
const new_bytes: []align(Slice.alignment) u8 = @alignCast(new_mem[0..byte_count]);
|
||||
return mem.bytesAsSlice(T, new_bytes);
|
||||
|
|
@ -311,9 +402,8 @@ pub fn free(self: Allocator, memory: anytype) void {
|
|||
const bytes_len = bytes.len + if (Slice.sentinel() != null) @sizeOf(Slice.child) else 0;
|
||||
if (bytes_len == 0) return;
|
||||
const non_const_ptr = @constCast(bytes.ptr);
|
||||
// TODO: https://github.com/ziglang/zig/issues/4298
|
||||
@memset(non_const_ptr[0..bytes_len], undefined);
|
||||
self.rawFree(non_const_ptr[0..bytes_len], log2a(Slice.alignment), @returnAddress());
|
||||
self.rawFree(non_const_ptr[0..bytes_len], .fromByteUnits(Slice.alignment), @returnAddress());
|
||||
}
|
||||
|
||||
/// Copies `m` to newly allocated memory. Caller owns the memory.
|
||||
|
|
@ -330,17 +420,3 @@ pub fn dupeZ(allocator: Allocator, comptime T: type, m: []const T) Error![:0]T {
|
|||
new_buf[m.len] = 0;
|
||||
return new_buf[0..m.len :0];
|
||||
}
|
||||
|
||||
/// TODO replace callsites with `@log2` after this proposal is implemented:
|
||||
/// https://github.com/ziglang/zig/issues/13642
|
||||
inline fn log2a(x: anytype) switch (@typeInfo(@TypeOf(x))) {
|
||||
.int => math.Log2Int(@TypeOf(x)),
|
||||
.comptime_int => comptime_int,
|
||||
else => @compileError("int please"),
|
||||
} {
|
||||
switch (@typeInfo(@TypeOf(x))) {
|
||||
.int => return math.log2_int(@TypeOf(x), x),
|
||||
.comptime_int => return math.log2(x),
|
||||
else => @compileError("bad"),
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -305,6 +305,13 @@ pub const MAP = switch (native_arch) {
|
|||
else => @compileError("missing std.os.linux.MAP constants for this architecture"),
|
||||
};
|
||||
|
||||
pub const MREMAP = packed struct(u32) {
|
||||
MAYMOVE: bool = false,
|
||||
FIXED: bool = false,
|
||||
DONTUNMAP: bool = false,
|
||||
_: u29 = 0,
|
||||
};
|
||||
|
||||
pub const O = switch (native_arch) {
|
||||
.x86_64 => packed struct(u32) {
|
||||
ACCMODE: ACCMODE = .RDONLY,
|
||||
|
|
@ -892,10 +899,6 @@ pub fn umount2(special: [*:0]const u8, flags: u32) usize {
|
|||
|
||||
pub fn mmap(address: ?[*]u8, length: usize, prot: usize, flags: MAP, fd: i32, offset: i64) usize {
|
||||
if (@hasField(SYS, "mmap2")) {
|
||||
// Make sure the offset is also specified in multiples of page size
|
||||
if ((offset & (MMAP2_UNIT - 1)) != 0)
|
||||
return @bitCast(-@as(isize, @intFromEnum(E.INVAL)));
|
||||
|
||||
return syscall6(
|
||||
.mmap2,
|
||||
@intFromPtr(address),
|
||||
|
|
@ -934,6 +937,17 @@ pub fn mprotect(address: [*]const u8, length: usize, protection: usize) usize {
|
|||
return syscall3(.mprotect, @intFromPtr(address), length, protection);
|
||||
}
|
||||
|
||||
pub fn mremap(old_addr: ?[*]const u8, old_len: usize, new_len: usize, flags: MREMAP, new_addr: ?[*]const u8) usize {
|
||||
return syscall5(
|
||||
.mremap,
|
||||
@intFromPtr(old_addr),
|
||||
old_len,
|
||||
new_len,
|
||||
@as(u32, @bitCast(flags)),
|
||||
@intFromPtr(new_addr),
|
||||
);
|
||||
}
|
||||
|
||||
pub const MSF = struct {
|
||||
pub const ASYNC = 1;
|
||||
pub const INVALIDATE = 2;
|
||||
|
|
|
|||
|
|
@ -8,6 +8,7 @@ const posix = std.posix;
|
|||
const linux = std.os.linux;
|
||||
const testing = std.testing;
|
||||
const is_linux = builtin.os.tag == .linux;
|
||||
const page_size_min = std.heap.page_size_min;
|
||||
|
||||
fd: posix.fd_t = -1,
|
||||
sq: SubmissionQueue,
|
||||
|
|
@ -1341,8 +1342,8 @@ pub const SubmissionQueue = struct {
|
|||
dropped: *u32,
|
||||
array: []u32,
|
||||
sqes: []linux.io_uring_sqe,
|
||||
mmap: []align(mem.page_size) u8,
|
||||
mmap_sqes: []align(mem.page_size) u8,
|
||||
mmap: []align(page_size_min) u8,
|
||||
mmap_sqes: []align(page_size_min) u8,
|
||||
|
||||
// We use `sqe_head` and `sqe_tail` in the same way as liburing:
|
||||
// We increment `sqe_tail` (but not `tail`) for each call to `get_sqe()`.
|
||||
|
|
@ -1460,7 +1461,7 @@ pub const BufferGroup = struct {
|
|||
/// Pointer to the memory shared by the kernel.
|
||||
/// `buffers_count` of `io_uring_buf` structures are shared by the kernel.
|
||||
/// First `io_uring_buf` is overlaid by `io_uring_buf_ring` struct.
|
||||
br: *align(mem.page_size) linux.io_uring_buf_ring,
|
||||
br: *align(page_size_min) linux.io_uring_buf_ring,
|
||||
/// Contiguous block of memory of size (buffers_count * buffer_size).
|
||||
buffers: []u8,
|
||||
/// Size of each buffer in buffers.
|
||||
|
|
@ -1555,7 +1556,7 @@ pub const BufferGroup = struct {
|
|||
/// `fd` is IO_Uring.fd for which the provided buffer ring is being registered.
|
||||
/// `entries` is the number of entries requested in the buffer ring, must be power of 2.
|
||||
/// `group_id` is the chosen buffer group ID, unique in IO_Uring.
|
||||
pub fn setup_buf_ring(fd: posix.fd_t, entries: u16, group_id: u16) !*align(mem.page_size) linux.io_uring_buf_ring {
|
||||
pub fn setup_buf_ring(fd: posix.fd_t, entries: u16, group_id: u16) !*align(page_size_min) linux.io_uring_buf_ring {
|
||||
if (entries == 0 or entries > 1 << 15) return error.EntriesNotInRange;
|
||||
if (!std.math.isPowerOfTwo(entries)) return error.EntriesNotPowerOfTwo;
|
||||
|
||||
|
|
@ -1571,7 +1572,7 @@ pub fn setup_buf_ring(fd: posix.fd_t, entries: u16, group_id: u16) !*align(mem.p
|
|||
errdefer posix.munmap(mmap);
|
||||
assert(mmap.len == mmap_size);
|
||||
|
||||
const br: *align(mem.page_size) linux.io_uring_buf_ring = @ptrCast(mmap.ptr);
|
||||
const br: *align(page_size_min) linux.io_uring_buf_ring = @ptrCast(mmap.ptr);
|
||||
try register_buf_ring(fd, @intFromPtr(br), entries, group_id);
|
||||
return br;
|
||||
}
|
||||
|
|
@ -1613,9 +1614,9 @@ fn handle_register_buf_ring_result(res: usize) !void {
|
|||
}
|
||||
|
||||
// Unregisters a previously registered shared buffer ring, returned from io_uring_setup_buf_ring.
|
||||
pub fn free_buf_ring(fd: posix.fd_t, br: *align(mem.page_size) linux.io_uring_buf_ring, entries: u32, group_id: u16) void {
|
||||
pub fn free_buf_ring(fd: posix.fd_t, br: *align(page_size_min) linux.io_uring_buf_ring, entries: u32, group_id: u16) void {
|
||||
unregister_buf_ring(fd, group_id) catch {};
|
||||
var mmap: []align(mem.page_size) u8 = undefined;
|
||||
var mmap: []align(page_size_min) u8 = undefined;
|
||||
mmap.ptr = @ptrCast(br);
|
||||
mmap.len = entries * @sizeOf(linux.io_uring_buf);
|
||||
posix.munmap(mmap);
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@ const assert = std.debug.assert;
|
|||
const native_arch = @import("builtin").cpu.arch;
|
||||
const linux = std.os.linux;
|
||||
const posix = std.posix;
|
||||
const page_size_min = std.heap.page_size_min;
|
||||
|
||||
/// Represents an ELF TLS variant.
|
||||
///
|
||||
|
|
@ -484,13 +485,13 @@ pub fn prepareArea(area: []u8) usize {
|
|||
};
|
||||
}
|
||||
|
||||
// The main motivation for the size chosen here is that this is how much ends up being requested for
|
||||
// the thread-local variables of the `std.crypto.random` implementation. I'm not sure why it ends up
|
||||
// being so much; the struct itself is only 64 bytes. I think it has to do with being page-aligned
|
||||
// and LLVM or LLD is not smart enough to lay out the TLS data in a space-conserving way. Anyway, I
|
||||
// think it's fine because it's less than 3 pages of memory, and putting it in the ELF like this is
|
||||
// equivalent to moving the `mmap` call below into the kernel, avoiding syscall overhead.
|
||||
var main_thread_area_buffer: [0x2100]u8 align(mem.page_size) = undefined;
|
||||
/// The main motivation for the size chosen here is that this is how much ends up being requested for
|
||||
/// the thread-local variables of the `std.crypto.random` implementation. I'm not sure why it ends up
|
||||
/// being so much; the struct itself is only 64 bytes. I think it has to do with being page-aligned
|
||||
/// and LLVM or LLD is not smart enough to lay out the TLS data in a space-conserving way. Anyway, I
|
||||
/// think it's fine because it's less than 3 pages of memory, and putting it in the ELF like this is
|
||||
/// equivalent to moving the `mmap` call below into the kernel, avoiding syscall overhead.
|
||||
var main_thread_area_buffer: [0x2100]u8 align(page_size_min) = undefined;
|
||||
|
||||
/// Computes the layout of the static TLS area, allocates the area, initializes all of its fields,
|
||||
/// and assigns the architecture-specific value to the TP register.
|
||||
|
|
@ -503,7 +504,7 @@ pub fn initStatic(phdrs: []elf.Phdr) void {
|
|||
const area = blk: {
|
||||
// Fast path for the common case where the TLS data is really small, avoid an allocation and
|
||||
// use our local buffer.
|
||||
if (area_desc.alignment <= mem.page_size and area_desc.size <= main_thread_area_buffer.len) {
|
||||
if (area_desc.alignment <= page_size_min and area_desc.size <= main_thread_area_buffer.len) {
|
||||
break :blk main_thread_area_buffer[0..area_desc.size];
|
||||
}
|
||||
|
||||
|
|
@ -517,7 +518,7 @@ pub fn initStatic(phdrs: []elf.Phdr) void {
|
|||
);
|
||||
if (@as(isize, @bitCast(begin_addr)) < 0) @trap();
|
||||
|
||||
const area_ptr: [*]align(mem.page_size) u8 = @ptrFromInt(begin_addr);
|
||||
const area_ptr: [*]align(page_size_min) u8 = @ptrFromInt(begin_addr);
|
||||
|
||||
// Make sure the slice is correctly aligned.
|
||||
const begin_aligned_addr = alignForward(begin_addr, area_desc.alignment);
|
||||
|
|
|
|||
|
|
@ -367,8 +367,8 @@ pub fn sbrk(n: usize) usize {
|
|||
bloc = @intFromPtr(&ExecData.end);
|
||||
bloc_max = @intFromPtr(&ExecData.end);
|
||||
}
|
||||
const bl = std.mem.alignForward(usize, bloc, std.mem.page_size);
|
||||
const n_aligned = std.mem.alignForward(usize, n, std.mem.page_size);
|
||||
const bl = std.mem.alignForward(usize, bloc, std.heap.pageSize());
|
||||
const n_aligned = std.mem.alignForward(usize, n, std.heap.pageSize());
|
||||
if (bl + n_aligned > bloc_max) {
|
||||
// we need to allocate
|
||||
if (brk_(bl + n_aligned) < 0) return 0;
|
||||
|
|
|
|||
|
|
@ -2016,18 +2016,6 @@ pub fn InitOnceExecuteOnce(InitOnce: *INIT_ONCE, InitFn: INIT_ONCE_FN, Parameter
|
|||
assert(kernel32.InitOnceExecuteOnce(InitOnce, InitFn, Parameter, Context) != 0);
|
||||
}
|
||||
|
||||
pub fn HeapFree(hHeap: HANDLE, dwFlags: DWORD, lpMem: *anyopaque) void {
|
||||
assert(kernel32.HeapFree(hHeap, dwFlags, lpMem) != 0);
|
||||
}
|
||||
|
||||
pub fn HeapDestroy(hHeap: HANDLE) void {
|
||||
assert(kernel32.HeapDestroy(hHeap) != 0);
|
||||
}
|
||||
|
||||
pub fn LocalFree(hMem: HLOCAL) void {
|
||||
assert(kernel32.LocalFree(hMem) == null);
|
||||
}
|
||||
|
||||
pub const SetFileTimeError = error{Unexpected};
|
||||
|
||||
pub fn SetFileTime(
|
||||
|
|
|
|||
|
|
@ -42,6 +42,7 @@ const WCHAR = windows.WCHAR;
|
|||
const WIN32_FIND_DATAW = windows.WIN32_FIND_DATAW;
|
||||
const Win32Error = windows.Win32Error;
|
||||
const WORD = windows.WORD;
|
||||
const SYSTEM_INFO = windows.SYSTEM_INFO;
|
||||
|
||||
// I/O - Filesystem
|
||||
|
||||
|
|
@ -527,11 +528,6 @@ pub extern "kernel32" fn HeapCreate(
|
|||
dwMaximumSize: SIZE_T,
|
||||
) callconv(.winapi) ?HANDLE;
|
||||
|
||||
// TODO: Wrapper around RtlDestroyHeap (BOOLEAN -> BOOL).
|
||||
pub extern "kernel32" fn HeapDestroy(
|
||||
hHeap: HANDLE,
|
||||
) callconv(.winapi) BOOL;
|
||||
|
||||
// TODO: Forwarder to RtlReAllocateHeap.
|
||||
pub extern "kernel32" fn HeapReAlloc(
|
||||
hHeap: HANDLE,
|
||||
|
|
@ -584,10 +580,6 @@ pub extern "kernel32" fn VirtualQuery(
|
|||
dwLength: SIZE_T,
|
||||
) callconv(.winapi) SIZE_T;
|
||||
|
||||
pub extern "kernel32" fn LocalFree(
|
||||
hMem: HLOCAL,
|
||||
) callconv(.winapi) ?HLOCAL;
|
||||
|
||||
// TODO: Getter for peb.ProcessHeap
|
||||
pub extern "kernel32" fn GetProcessHeap() callconv(.winapi) ?HANDLE;
|
||||
|
||||
|
|
@ -667,6 +659,6 @@ pub extern "kernel32" fn SetLastError(
|
|||
// TODO:
|
||||
// Wrapper around KUSER_SHARED_DATA.SystemTime.
|
||||
// Much better to use NtQuerySystemTime or NtQuerySystemTimePrecise for guaranteed 0.1ns precision.
|
||||
pub extern "kernel32" fn GetSystemTimeAsFileTime(
|
||||
lpSystemTimeAsFileTime: *FILETIME,
|
||||
) callconv(.winapi) void;
|
||||
pub extern "kernel32" fn GetSystemTimeAsFileTime(lpSystemTimeAsFileTime: *FILETIME) callconv(.winapi) void;
|
||||
|
||||
pub extern "kernel32" fn GetSystemInfo(lpSystemInfo: *SYSTEM_INFO) callconv(.winapi) void;
|
||||
|
|
|
|||
|
|
@ -24,6 +24,7 @@ const maxInt = std.math.maxInt;
|
|||
const cast = std.math.cast;
|
||||
const assert = std.debug.assert;
|
||||
const native_os = builtin.os.tag;
|
||||
const page_size_min = std.heap.page_size_min;
|
||||
|
||||
test {
|
||||
_ = @import("posix/test.zig");
|
||||
|
|
@ -82,6 +83,7 @@ pub const MAP = system.MAP;
|
|||
pub const MAX_ADDR_LEN = system.MAX_ADDR_LEN;
|
||||
pub const MFD = system.MFD;
|
||||
pub const MMAP2_UNIT = system.MMAP2_UNIT;
|
||||
pub const MREMAP = system.MREMAP;
|
||||
pub const MSF = system.MSF;
|
||||
pub const MSG = system.MSG;
|
||||
pub const NAME_MAX = system.NAME_MAX;
|
||||
|
|
@ -4694,7 +4696,7 @@ pub const MProtectError = error{
|
|||
OutOfMemory,
|
||||
} || UnexpectedError;
|
||||
|
||||
pub fn mprotect(memory: []align(mem.page_size) u8, protection: u32) MProtectError!void {
|
||||
pub fn mprotect(memory: []align(page_size_min) u8, protection: u32) MProtectError!void {
|
||||
if (native_os == .windows) {
|
||||
const win_prot: windows.DWORD = switch (@as(u3, @truncate(protection))) {
|
||||
0b000 => windows.PAGE_NOACCESS,
|
||||
|
|
@ -4759,21 +4761,21 @@ pub const MMapError = error{
|
|||
/// * SIGSEGV - Attempted write into a region mapped as read-only.
|
||||
/// * SIGBUS - Attempted access to a portion of the buffer that does not correspond to the file
|
||||
pub fn mmap(
|
||||
ptr: ?[*]align(mem.page_size) u8,
|
||||
ptr: ?[*]align(page_size_min) u8,
|
||||
length: usize,
|
||||
prot: u32,
|
||||
flags: system.MAP,
|
||||
fd: fd_t,
|
||||
offset: u64,
|
||||
) MMapError![]align(mem.page_size) u8 {
|
||||
) MMapError![]align(page_size_min) u8 {
|
||||
const mmap_sym = if (lfs64_abi) system.mmap64 else system.mmap;
|
||||
const rc = mmap_sym(ptr, length, prot, @bitCast(flags), fd, @bitCast(offset));
|
||||
const err: E = if (builtin.link_libc) blk: {
|
||||
if (rc != std.c.MAP_FAILED) return @as([*]align(mem.page_size) u8, @ptrCast(@alignCast(rc)))[0..length];
|
||||
if (rc != std.c.MAP_FAILED) return @as([*]align(page_size_min) u8, @ptrCast(@alignCast(rc)))[0..length];
|
||||
break :blk @enumFromInt(system._errno().*);
|
||||
} else blk: {
|
||||
const err = errno(rc);
|
||||
if (err == .SUCCESS) return @as([*]align(mem.page_size) u8, @ptrFromInt(rc))[0..length];
|
||||
if (err == .SUCCESS) return @as([*]align(page_size_min) u8, @ptrFromInt(rc))[0..length];
|
||||
break :blk err;
|
||||
};
|
||||
switch (err) {
|
||||
|
|
@ -4799,7 +4801,7 @@ pub fn mmap(
|
|||
/// Zig's munmap function does not, for two reasons:
|
||||
/// * It violates the Zig principle that resource deallocation must succeed.
|
||||
/// * The Windows function, VirtualFree, has this restriction.
|
||||
pub fn munmap(memory: []align(mem.page_size) const u8) void {
|
||||
pub fn munmap(memory: []align(page_size_min) const u8) void {
|
||||
switch (errno(system.munmap(memory.ptr, memory.len))) {
|
||||
.SUCCESS => return,
|
||||
.INVAL => unreachable, // Invalid parameters.
|
||||
|
|
@ -4808,12 +4810,46 @@ pub fn munmap(memory: []align(mem.page_size) const u8) void {
|
|||
}
|
||||
}
|
||||
|
||||
pub const MRemapError = error{
|
||||
LockedMemoryLimitExceeded,
|
||||
/// Either a bug in the calling code, or the operating system abused the
|
||||
/// EINVAL error code.
|
||||
InvalidSyscallParameters,
|
||||
OutOfMemory,
|
||||
} || UnexpectedError;
|
||||
|
||||
pub fn mremap(
|
||||
old_address: ?[*]align(page_size_min) u8,
|
||||
old_len: usize,
|
||||
new_len: usize,
|
||||
flags: system.MREMAP,
|
||||
new_address: ?[*]align(page_size_min) u8,
|
||||
) MRemapError![]align(page_size_min) u8 {
|
||||
const rc = system.mremap(old_address, old_len, new_len, flags, new_address);
|
||||
const err: E = if (builtin.link_libc) blk: {
|
||||
if (rc != std.c.MAP_FAILED) return @as([*]align(page_size_min) u8, @ptrCast(@alignCast(rc)))[0..new_len];
|
||||
break :blk @enumFromInt(system._errno().*);
|
||||
} else blk: {
|
||||
const err = errno(rc);
|
||||
if (err == .SUCCESS) return @as([*]align(page_size_min) u8, @ptrFromInt(rc))[0..new_len];
|
||||
break :blk err;
|
||||
};
|
||||
switch (err) {
|
||||
.SUCCESS => unreachable,
|
||||
.AGAIN => return error.LockedMemoryLimitExceeded,
|
||||
.INVAL => return error.InvalidSyscallParameters,
|
||||
.NOMEM => return error.OutOfMemory,
|
||||
.FAULT => unreachable,
|
||||
else => return unexpectedErrno(err),
|
||||
}
|
||||
}
|
||||
|
||||
pub const MSyncError = error{
|
||||
UnmappedMemory,
|
||||
PermissionDenied,
|
||||
} || UnexpectedError;
|
||||
|
||||
pub fn msync(memory: []align(mem.page_size) u8, flags: i32) MSyncError!void {
|
||||
pub fn msync(memory: []align(page_size_min) u8, flags: i32) MSyncError!void {
|
||||
switch (errno(system.msync(memory.ptr, memory.len, flags))) {
|
||||
.SUCCESS => return,
|
||||
.PERM => return error.PermissionDenied,
|
||||
|
|
@ -7135,7 +7171,7 @@ pub const MincoreError = error{
|
|||
} || UnexpectedError;
|
||||
|
||||
/// Determine whether pages are resident in memory.
|
||||
pub fn mincore(ptr: [*]align(mem.page_size) u8, length: usize, vec: [*]u8) MincoreError!void {
|
||||
pub fn mincore(ptr: [*]align(page_size_min) u8, length: usize, vec: [*]u8) MincoreError!void {
|
||||
return switch (errno(system.mincore(ptr, length, vec))) {
|
||||
.SUCCESS => {},
|
||||
.AGAIN => error.SystemResources,
|
||||
|
|
@ -7181,7 +7217,7 @@ pub const MadviseError = error{
|
|||
|
||||
/// Give advice about use of memory.
|
||||
/// This syscall is optional and is sometimes configured to be disabled.
|
||||
pub fn madvise(ptr: [*]align(mem.page_size) u8, length: usize, advice: u32) MadviseError!void {
|
||||
pub fn madvise(ptr: [*]align(page_size_min) u8, length: usize, advice: u32) MadviseError!void {
|
||||
switch (errno(system.madvise(ptr, length, advice))) {
|
||||
.SUCCESS => return,
|
||||
.PERM => return error.PermissionDenied,
|
||||
|
|
|
|||
|
|
@ -1560,7 +1560,7 @@ pub fn posixGetUserInfo(name: []const u8) !UserInfo {
|
|||
ReadGroupId,
|
||||
};
|
||||
|
||||
var buf: [std.mem.page_size]u8 = undefined;
|
||||
var buf: [std.heap.page_size_min]u8 = undefined;
|
||||
var name_index: usize = 0;
|
||||
var state = State.Start;
|
||||
var uid: posix.uid_t = 0;
|
||||
|
|
|
|||
|
|
@ -576,7 +576,7 @@ fn expandStackSize(phdrs: []elf.Phdr) void {
|
|||
switch (phdr.p_type) {
|
||||
elf.PT_GNU_STACK => {
|
||||
if (phdr.p_memsz == 0) break;
|
||||
assert(phdr.p_memsz % std.mem.page_size == 0);
|
||||
assert(phdr.p_memsz % std.heap.page_size_min == 0);
|
||||
|
||||
// Silently fail if we are unable to get limits.
|
||||
const limits = std.posix.getrlimit(.STACK) catch break;
|
||||
|
|
|
|||
|
|
@ -119,6 +119,13 @@ pub const Options = struct {
|
|||
args: anytype,
|
||||
) void = log.defaultLog,
|
||||
|
||||
/// Overrides `std.heap.page_size_min`.
|
||||
page_size_min: ?usize = null,
|
||||
/// Overrides `std.heap.page_size_max`.
|
||||
page_size_max: ?usize = null,
|
||||
/// Overrides default implementation for determining OS page size at runtime.
|
||||
queryPageSize: fn () usize = heap.defaultQueryPageSize,
|
||||
|
||||
fmt_max_depth: usize = fmt.default_max_depth,
|
||||
|
||||
cryptoRandomSeed: fn (buffer: []u8) void = @import("crypto/tlcsprng.zig").defaultRandomSeed,
|
||||
|
|
|
|||
|
|
@ -7,21 +7,27 @@ const math = std.math;
|
|||
/// Initialized on startup. Read-only after that.
|
||||
pub var random_seed: u32 = 0;
|
||||
|
||||
pub const FailingAllocator = @import("testing/failing_allocator.zig").FailingAllocator;
|
||||
pub const FailingAllocator = @import("testing/FailingAllocator.zig");
|
||||
pub const failing_allocator = failing_allocator_instance.allocator();
|
||||
var failing_allocator_instance = FailingAllocator.init(base_allocator_instance.allocator(), .{
|
||||
.fail_index = 0,
|
||||
});
|
||||
var base_allocator_instance = std.heap.FixedBufferAllocator.init("");
|
||||
|
||||
/// This should only be used in temporary test programs.
|
||||
pub const allocator = allocator_instance.allocator();
|
||||
pub var allocator_instance: std.heap.GeneralPurposeAllocator(.{}) = b: {
|
||||
if (!builtin.is_test)
|
||||
@compileError("Cannot use testing allocator outside of test block");
|
||||
pub var allocator_instance: std.heap.GeneralPurposeAllocator(.{
|
||||
.stack_trace_frames = if (std.debug.sys_can_stack_trace) 10 else 0,
|
||||
.resize_stack_traces = true,
|
||||
// A unique value so that when a default-constructed
|
||||
// GeneralPurposeAllocator is incorrectly passed to testing allocator, or
|
||||
// vice versa, panic occurs.
|
||||
.canary = @truncate(0x2731e675c3a701ba),
|
||||
}) = b: {
|
||||
if (!builtin.is_test) @compileError("testing allocator used when not testing");
|
||||
break :b .init;
|
||||
};
|
||||
|
||||
pub const failing_allocator = failing_allocator_instance.allocator();
|
||||
pub var failing_allocator_instance = FailingAllocator.init(base_allocator_instance.allocator(), .{ .fail_index = 0 });
|
||||
|
||||
pub var base_allocator_instance = std.heap.FixedBufferAllocator.init("");
|
||||
|
||||
/// TODO https://github.com/ziglang/zig/issues/5738
|
||||
pub var log_level = std.log.Level.warn;
|
||||
|
||||
|
|
|
|||
161
lib/std/testing/FailingAllocator.zig
Normal file
161
lib/std/testing/FailingAllocator.zig
Normal file
|
|
@ -0,0 +1,161 @@
|
|||
//! Allocator that fails after N allocations, useful for making sure out of
|
||||
//! memory conditions are handled correctly.
|
||||
//!
|
||||
//! To use this, first initialize it and get an allocator with
|
||||
//!
|
||||
//! `const failing_allocator = &FailingAllocator.init(<allocator>,
|
||||
//! <config>).allocator;`
|
||||
//!
|
||||
//! Then use `failing_allocator` anywhere you would have used a
|
||||
//! different allocator.
|
||||
const std = @import("../std.zig");
|
||||
const mem = std.mem;
|
||||
const FailingAllocator = @This();
|
||||
|
||||
alloc_index: usize,
|
||||
resize_index: usize,
|
||||
internal_allocator: mem.Allocator,
|
||||
allocated_bytes: usize,
|
||||
freed_bytes: usize,
|
||||
allocations: usize,
|
||||
deallocations: usize,
|
||||
stack_addresses: [num_stack_frames]usize,
|
||||
has_induced_failure: bool,
|
||||
fail_index: usize,
|
||||
resize_fail_index: usize,
|
||||
|
||||
const num_stack_frames = if (std.debug.sys_can_stack_trace) 16 else 0;
|
||||
|
||||
pub const Config = struct {
|
||||
/// The number of successful allocations you can expect from this allocator.
|
||||
/// The next allocation will fail. For example, with `fail_index` equal to
|
||||
/// 2, the following test will pass:
|
||||
///
|
||||
/// var a = try failing_alloc.create(i32);
|
||||
/// var b = try failing_alloc.create(i32);
|
||||
/// testing.expectError(error.OutOfMemory, failing_alloc.create(i32));
|
||||
fail_index: usize = std.math.maxInt(usize),
|
||||
|
||||
/// Number of successful resizes to expect from this allocator. The next resize will fail.
|
||||
resize_fail_index: usize = std.math.maxInt(usize),
|
||||
};
|
||||
|
||||
pub fn init(internal_allocator: mem.Allocator, config: Config) FailingAllocator {
|
||||
return FailingAllocator{
|
||||
.internal_allocator = internal_allocator,
|
||||
.alloc_index = 0,
|
||||
.resize_index = 0,
|
||||
.allocated_bytes = 0,
|
||||
.freed_bytes = 0,
|
||||
.allocations = 0,
|
||||
.deallocations = 0,
|
||||
.stack_addresses = undefined,
|
||||
.has_induced_failure = false,
|
||||
.fail_index = config.fail_index,
|
||||
.resize_fail_index = config.resize_fail_index,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn allocator(self: *FailingAllocator) mem.Allocator {
|
||||
return .{
|
||||
.ptr = self,
|
||||
.vtable = &.{
|
||||
.alloc = alloc,
|
||||
.resize = resize,
|
||||
.remap = remap,
|
||||
.free = free,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
fn alloc(
|
||||
ctx: *anyopaque,
|
||||
len: usize,
|
||||
alignment: mem.Alignment,
|
||||
return_address: usize,
|
||||
) ?[*]u8 {
|
||||
const self: *FailingAllocator = @ptrCast(@alignCast(ctx));
|
||||
if (self.alloc_index == self.fail_index) {
|
||||
if (!self.has_induced_failure) {
|
||||
@memset(&self.stack_addresses, 0);
|
||||
var stack_trace = std.builtin.StackTrace{
|
||||
.instruction_addresses = &self.stack_addresses,
|
||||
.index = 0,
|
||||
};
|
||||
std.debug.captureStackTrace(return_address, &stack_trace);
|
||||
self.has_induced_failure = true;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
const result = self.internal_allocator.rawAlloc(len, alignment, return_address) orelse
|
||||
return null;
|
||||
self.allocated_bytes += len;
|
||||
self.allocations += 1;
|
||||
self.alloc_index += 1;
|
||||
return result;
|
||||
}
|
||||
|
||||
fn resize(
|
||||
ctx: *anyopaque,
|
||||
memory: []u8,
|
||||
alignment: mem.Alignment,
|
||||
new_len: usize,
|
||||
ra: usize,
|
||||
) bool {
|
||||
const self: *FailingAllocator = @ptrCast(@alignCast(ctx));
|
||||
if (self.resize_index == self.resize_fail_index)
|
||||
return false;
|
||||
if (!self.internal_allocator.rawResize(memory, alignment, new_len, ra))
|
||||
return false;
|
||||
if (new_len < memory.len) {
|
||||
self.freed_bytes += memory.len - new_len;
|
||||
} else {
|
||||
self.allocated_bytes += new_len - memory.len;
|
||||
}
|
||||
self.resize_index += 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
fn remap(
|
||||
ctx: *anyopaque,
|
||||
memory: []u8,
|
||||
alignment: mem.Alignment,
|
||||
new_len: usize,
|
||||
ra: usize,
|
||||
) ?[*]u8 {
|
||||
const self: *FailingAllocator = @ptrCast(@alignCast(ctx));
|
||||
if (self.resize_index == self.resize_fail_index) return null;
|
||||
const new_ptr = self.internal_allocator.rawRemap(memory, alignment, new_len, ra) orelse return null;
|
||||
if (new_len < memory.len) {
|
||||
self.freed_bytes += memory.len - new_len;
|
||||
} else {
|
||||
self.allocated_bytes += new_len - memory.len;
|
||||
}
|
||||
self.resize_index += 1;
|
||||
return new_ptr;
|
||||
}
|
||||
|
||||
fn free(
|
||||
ctx: *anyopaque,
|
||||
old_mem: []u8,
|
||||
alignment: mem.Alignment,
|
||||
ra: usize,
|
||||
) void {
|
||||
const self: *FailingAllocator = @ptrCast(@alignCast(ctx));
|
||||
self.internal_allocator.rawFree(old_mem, alignment, ra);
|
||||
self.deallocations += 1;
|
||||
self.freed_bytes += old_mem.len;
|
||||
}
|
||||
|
||||
/// Only valid once `has_induced_failure == true`
|
||||
pub fn getStackTrace(self: *FailingAllocator) std.builtin.StackTrace {
|
||||
std.debug.assert(self.has_induced_failure);
|
||||
var len: usize = 0;
|
||||
while (len < self.stack_addresses.len and self.stack_addresses[len] != 0) {
|
||||
len += 1;
|
||||
}
|
||||
return .{
|
||||
.instruction_addresses = &self.stack_addresses,
|
||||
.index = len,
|
||||
};
|
||||
}
|
||||
|
|
@ -1,142 +0,0 @@
|
|||
const std = @import("../std.zig");
|
||||
const mem = std.mem;
|
||||
|
||||
pub const Config = struct {
|
||||
/// The number of successful allocations you can expect from this allocator.
|
||||
/// The next allocation will fail. For example, with `fail_index` equal to
|
||||
/// 2, the following test will pass:
|
||||
///
|
||||
/// var a = try failing_alloc.create(i32);
|
||||
/// var b = try failing_alloc.create(i32);
|
||||
/// testing.expectError(error.OutOfMemory, failing_alloc.create(i32));
|
||||
fail_index: usize = std.math.maxInt(usize),
|
||||
|
||||
/// Number of successful resizes to expect from this allocator. The next resize will fail.
|
||||
resize_fail_index: usize = std.math.maxInt(usize),
|
||||
};
|
||||
|
||||
/// Allocator that fails after N allocations, useful for making sure out of
|
||||
/// memory conditions are handled correctly.
|
||||
///
|
||||
/// To use this, first initialize it and get an allocator with
|
||||
///
|
||||
/// `const failing_allocator = &FailingAllocator.init(<allocator>,
|
||||
/// <config>).allocator;`
|
||||
///
|
||||
/// Then use `failing_allocator` anywhere you would have used a
|
||||
/// different allocator.
|
||||
pub const FailingAllocator = struct {
|
||||
alloc_index: usize,
|
||||
resize_index: usize,
|
||||
internal_allocator: mem.Allocator,
|
||||
allocated_bytes: usize,
|
||||
freed_bytes: usize,
|
||||
allocations: usize,
|
||||
deallocations: usize,
|
||||
stack_addresses: [num_stack_frames]usize,
|
||||
has_induced_failure: bool,
|
||||
fail_index: usize,
|
||||
resize_fail_index: usize,
|
||||
|
||||
const num_stack_frames = if (std.debug.sys_can_stack_trace) 16 else 0;
|
||||
|
||||
pub fn init(internal_allocator: mem.Allocator, config: Config) FailingAllocator {
|
||||
return FailingAllocator{
|
||||
.internal_allocator = internal_allocator,
|
||||
.alloc_index = 0,
|
||||
.resize_index = 0,
|
||||
.allocated_bytes = 0,
|
||||
.freed_bytes = 0,
|
||||
.allocations = 0,
|
||||
.deallocations = 0,
|
||||
.stack_addresses = undefined,
|
||||
.has_induced_failure = false,
|
||||
.fail_index = config.fail_index,
|
||||
.resize_fail_index = config.resize_fail_index,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn allocator(self: *FailingAllocator) mem.Allocator {
|
||||
return .{
|
||||
.ptr = self,
|
||||
.vtable = &.{
|
||||
.alloc = alloc,
|
||||
.resize = resize,
|
||||
.free = free,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
fn alloc(
|
||||
ctx: *anyopaque,
|
||||
len: usize,
|
||||
log2_ptr_align: u8,
|
||||
return_address: usize,
|
||||
) ?[*]u8 {
|
||||
const self: *FailingAllocator = @ptrCast(@alignCast(ctx));
|
||||
if (self.alloc_index == self.fail_index) {
|
||||
if (!self.has_induced_failure) {
|
||||
@memset(&self.stack_addresses, 0);
|
||||
var stack_trace = std.builtin.StackTrace{
|
||||
.instruction_addresses = &self.stack_addresses,
|
||||
.index = 0,
|
||||
};
|
||||
std.debug.captureStackTrace(return_address, &stack_trace);
|
||||
self.has_induced_failure = true;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
const result = self.internal_allocator.rawAlloc(len, log2_ptr_align, return_address) orelse
|
||||
return null;
|
||||
self.allocated_bytes += len;
|
||||
self.allocations += 1;
|
||||
self.alloc_index += 1;
|
||||
return result;
|
||||
}
|
||||
|
||||
fn resize(
|
||||
ctx: *anyopaque,
|
||||
old_mem: []u8,
|
||||
log2_old_align: u8,
|
||||
new_len: usize,
|
||||
ra: usize,
|
||||
) bool {
|
||||
const self: *FailingAllocator = @ptrCast(@alignCast(ctx));
|
||||
if (self.resize_index == self.resize_fail_index)
|
||||
return false;
|
||||
if (!self.internal_allocator.rawResize(old_mem, log2_old_align, new_len, ra))
|
||||
return false;
|
||||
if (new_len < old_mem.len) {
|
||||
self.freed_bytes += old_mem.len - new_len;
|
||||
} else {
|
||||
self.allocated_bytes += new_len - old_mem.len;
|
||||
}
|
||||
self.resize_index += 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
fn free(
|
||||
ctx: *anyopaque,
|
||||
old_mem: []u8,
|
||||
log2_old_align: u8,
|
||||
ra: usize,
|
||||
) void {
|
||||
const self: *FailingAllocator = @ptrCast(@alignCast(ctx));
|
||||
self.internal_allocator.rawFree(old_mem, log2_old_align, ra);
|
||||
self.deallocations += 1;
|
||||
self.freed_bytes += old_mem.len;
|
||||
}
|
||||
|
||||
/// Only valid once `has_induced_failure == true`
|
||||
pub fn getStackTrace(self: *FailingAllocator) std.builtin.StackTrace {
|
||||
std.debug.assert(self.has_induced_failure);
|
||||
var len: usize = 0;
|
||||
while (len < self.stack_addresses.len and self.stack_addresses[len] != 0) {
|
||||
len += 1;
|
||||
}
|
||||
return .{
|
||||
.instruction_addresses = &self.stack_addresses,
|
||||
.index = len,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
|
@ -162,7 +162,7 @@ pub fn decompress(
|
|||
var total_uncompressed: u64 = 0;
|
||||
switch (method) {
|
||||
.store => {
|
||||
var buf: [std.mem.page_size]u8 = undefined;
|
||||
var buf: [4096]u8 = undefined;
|
||||
while (true) {
|
||||
const len = try reader.read(&buf);
|
||||
if (len == 0) break;
|
||||
|
|
|
|||
|
|
@ -1249,7 +1249,7 @@ fn unzip(f: *Fetch, out_dir: fs.Dir, reader: anytype) RunError!UnpackResult {
|
|||
.{@errorName(err)},
|
||||
));
|
||||
defer zip_file.close();
|
||||
var buf: [std.mem.page_size]u8 = undefined;
|
||||
var buf: [4096]u8 = undefined;
|
||||
while (true) {
|
||||
const len = reader.readAll(&buf) catch |err| return f.fail(f.location_tok, try eb.printString(
|
||||
"read zip stream failed: {s}",
|
||||
|
|
|
|||
|
|
@ -493,49 +493,6 @@ pub fn addCases(cases: *tests.CompareOutputContext) void {
|
|||
\\
|
||||
);
|
||||
|
||||
// It is required to override the log function in order to print to stdout instead of stderr
|
||||
cases.add("std.heap.LoggingAllocator logs to std.log",
|
||||
\\const std = @import("std");
|
||||
\\
|
||||
\\pub const std_options: std.Options = .{
|
||||
\\ .log_level = .debug,
|
||||
\\ .logFn = log,
|
||||
\\};
|
||||
\\
|
||||
\\pub fn main() !void {
|
||||
\\ var allocator_buf: [10]u8 = undefined;
|
||||
\\ const fba = std.heap.FixedBufferAllocator.init(&allocator_buf);
|
||||
\\ var fba_wrapped = std.mem.validationWrap(fba);
|
||||
\\ var logging_allocator = std.heap.loggingAllocator(fba_wrapped.allocator());
|
||||
\\ const allocator = logging_allocator.allocator();
|
||||
\\
|
||||
\\ var a = try allocator.alloc(u8, 10);
|
||||
\\ try std.testing.expect(allocator.resize(a, 5));
|
||||
\\ a = a[0..5];
|
||||
\\ try std.testing.expect(a.len == 5);
|
||||
\\ try std.testing.expect(!allocator.resize(a, 20));
|
||||
\\ allocator.free(a);
|
||||
\\}
|
||||
\\
|
||||
\\pub fn log(
|
||||
\\ comptime level: std.log.Level,
|
||||
\\ comptime scope: @TypeOf(.EnumLiteral),
|
||||
\\ comptime format: []const u8,
|
||||
\\ args: anytype,
|
||||
\\) void {
|
||||
\\ const level_txt = comptime level.asText();
|
||||
\\ const prefix2 = if (scope == .default) ": " else "(" ++ @tagName(scope) ++ "): ";
|
||||
\\ const stdout = std.io.getStdOut().writer();
|
||||
\\ nosuspend stdout.print(level_txt ++ prefix2 ++ format ++ "\n", args) catch return;
|
||||
\\}
|
||||
,
|
||||
\\debug: alloc - success - len: 10, ptr_align: 0
|
||||
\\debug: shrink - success - 10 to 5, buf_align: 0
|
||||
\\error: expand - failure - 5 to 20, buf_align: 0
|
||||
\\debug: free - len: 5
|
||||
\\
|
||||
);
|
||||
|
||||
cases.add("valid carriage return example", "const io = @import(\"std\").io;\r\n" ++ // Testing CRLF line endings are valid
|
||||
"\r\n" ++
|
||||
"pub \r fn main() void {\r\n" ++ // Testing isolated carriage return as whitespace is valid
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue