mirror of
https://codeberg.org/ziglang/zig.git
synced 2025-12-06 05:44:20 +00:00
delete std.heap.WasmPageAllocator
This allocator has no purpose since it cannot truly fulfill the role of page allocation, and std.heap.wasm_allocator is better both in terms of performance and code size. This commit redefines `std.heap.page_allocator` to be less strict: "On operating systems that support memory mapping, this allocator makes a syscall directly for every allocation and free. Otherwise, it falls back to the preferred singleton for the target. Thread-safe." This now matches how it was actually being implemented, and matches its use sites - which are mainly as the backing allocator for `std.heap.ArenaAllocator`.
This commit is contained in:
parent
cd365b8b82
commit
fecdc53a48
4 changed files with 34 additions and 258 deletions
|
|
@ -18,7 +18,6 @@ pub const GeneralPurposeAllocatorConfig = @import("heap/general_purpose_allocato
|
|||
pub const GeneralPurposeAllocator = @import("heap/general_purpose_allocator.zig").GeneralPurposeAllocator;
|
||||
pub const Check = @import("heap/general_purpose_allocator.zig").Check;
|
||||
pub const WasmAllocator = @import("heap/WasmAllocator.zig");
|
||||
pub const WasmPageAllocator = @import("heap/WasmPageAllocator.zig");
|
||||
pub const PageAllocator = @import("heap/PageAllocator.zig");
|
||||
pub const ThreadSafeAllocator = @import("heap/ThreadSafeAllocator.zig");
|
||||
pub const SbrkAllocator = @import("heap/sbrk_allocator.zig").SbrkAllocator;
|
||||
|
|
@ -223,36 +222,35 @@ fn rawCFree(
|
|||
c.free(buf.ptr);
|
||||
}
|
||||
|
||||
/// This allocator makes a syscall directly for every allocation and free.
|
||||
/// Thread-safe and lock-free.
|
||||
pub const page_allocator = if (@hasDecl(root, "os") and
|
||||
/// On operating systems that support memory mapping, this allocator makes a
|
||||
/// syscall directly for every allocation and free.
|
||||
///
|
||||
/// Otherwise, it falls back to the preferred singleton for the target.
|
||||
///
|
||||
/// Thread-safe.
|
||||
pub const page_allocator: Allocator = if (@hasDecl(root, "os") and
|
||||
@hasDecl(root.os, "heap") and
|
||||
@hasDecl(root.os.heap, "page_allocator"))
|
||||
root.os.heap.page_allocator
|
||||
else if (builtin.target.isWasm())
|
||||
Allocator{
|
||||
.ptr = undefined,
|
||||
.vtable = &WasmPageAllocator.vtable,
|
||||
}
|
||||
else if (builtin.target.os.tag == .plan9)
|
||||
Allocator{
|
||||
.ptr = undefined,
|
||||
.vtable = &SbrkAllocator(std.os.plan9.sbrk).vtable,
|
||||
}
|
||||
else
|
||||
Allocator{
|
||||
.ptr = undefined,
|
||||
.vtable = &PageAllocator.vtable,
|
||||
};
|
||||
else if (builtin.target.isWasm()) .{
|
||||
.ptr = undefined,
|
||||
.vtable = &WasmAllocator.vtable,
|
||||
} else if (builtin.target.os.tag == .plan9) .{
|
||||
.ptr = undefined,
|
||||
.vtable = &SbrkAllocator(std.os.plan9.sbrk).vtable,
|
||||
} else .{
|
||||
.ptr = undefined,
|
||||
.vtable = &PageAllocator.vtable,
|
||||
};
|
||||
|
||||
/// This allocator is fast, small, and specific to WebAssembly. In the future,
|
||||
/// this will be the implementation automatically selected by
|
||||
/// `GeneralPurposeAllocator` when compiling in `ReleaseSmall` mode for wasm32
|
||||
/// and wasm64 architectures.
|
||||
/// Until then, it is available here to play with.
|
||||
pub const wasm_allocator = Allocator{
|
||||
pub const wasm_allocator: Allocator = .{
|
||||
.ptr = undefined,
|
||||
.vtable = &std.heap.WasmAllocator.vtable,
|
||||
.vtable = &WasmAllocator.vtable,
|
||||
};
|
||||
|
||||
/// Verifies that the adjusted length will still map to the full length
|
||||
|
|
@ -892,6 +890,5 @@ test {
|
|||
_ = GeneralPurposeAllocator;
|
||||
if (builtin.target.isWasm()) {
|
||||
_ = WasmAllocator;
|
||||
_ = WasmPageAllocator;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -10,11 +10,14 @@ const math = std.math;
|
|||
|
||||
comptime {
|
||||
if (!builtin.target.isWasm()) {
|
||||
@compileError("WasmPageAllocator is only available for wasm32 arch");
|
||||
@compileError("only available for wasm32 arch");
|
||||
}
|
||||
if (!builtin.single_threaded) {
|
||||
@compileError("TODO implement support for multi-threaded wasm");
|
||||
}
|
||||
}
|
||||
|
||||
pub const vtable = Allocator.VTable{
|
||||
pub const vtable: Allocator.VTable = .{
|
||||
.alloc = alloc,
|
||||
.resize = resize,
|
||||
.free = free,
|
||||
|
|
|
|||
|
|
@ -1,233 +0,0 @@
|
|||
const WasmPageAllocator = @This();
|
||||
const std = @import("../std.zig");
|
||||
const builtin = @import("builtin");
|
||||
const Allocator = std.mem.Allocator;
|
||||
const mem = std.mem;
|
||||
const maxInt = std.math.maxInt;
|
||||
const assert = std.debug.assert;
|
||||
|
||||
comptime {
|
||||
if (!builtin.target.isWasm()) {
|
||||
@compileError("WasmPageAllocator is only available for wasm32 arch");
|
||||
}
|
||||
}
|
||||
|
||||
pub const vtable = Allocator.VTable{
|
||||
.alloc = alloc,
|
||||
.resize = resize,
|
||||
.free = free,
|
||||
};
|
||||
|
||||
const PageStatus = enum(u1) {
|
||||
used = 0,
|
||||
free = 1,
|
||||
|
||||
pub const none_free: u8 = 0;
|
||||
};
|
||||
|
||||
const FreeBlock = struct {
|
||||
data: []u128,
|
||||
|
||||
fn totalPages(self: FreeBlock) usize {
|
||||
return self.data.len * 128;
|
||||
}
|
||||
|
||||
fn isInitialized(self: FreeBlock) bool {
|
||||
return self.data.len > 0;
|
||||
}
|
||||
|
||||
fn getBit(self: FreeBlock, idx: usize) PageStatus {
|
||||
const bit = mem.readPackedInt(u1, mem.sliceAsBytes(self.data), idx, .little);
|
||||
return @as(PageStatus, @enumFromInt(bit));
|
||||
}
|
||||
|
||||
fn setBits(self: FreeBlock, start_idx: usize, len: usize, val: PageStatus) void {
|
||||
var i: usize = 0;
|
||||
const bytes = mem.sliceAsBytes(self.data);
|
||||
while (i < len) : (i += 1) {
|
||||
mem.writePackedInt(u1, bytes, start_idx + i, @intFromEnum(val), .little);
|
||||
}
|
||||
}
|
||||
|
||||
// Use '0xFFFFFFFF' as a _missing_ sentinel
|
||||
// This saves ~50 bytes compared to returning a nullable
|
||||
|
||||
// We can guarantee that conventional memory never gets this big,
|
||||
// and wasm32 would not be able to address this memory (32 GB > usize).
|
||||
|
||||
// Revisit if this is settled: https://github.com/ziglang/zig/issues/3806
|
||||
const not_found = maxInt(usize);
|
||||
|
||||
fn useRecycled(self: FreeBlock, num_pages: usize, log2_align: u8) usize {
|
||||
@branchHint(.cold);
|
||||
for (self.data, 0..) |segment, i| {
|
||||
const spills_into_next = @as(i128, @bitCast(segment)) < 0;
|
||||
const has_enough_bits = @popCount(segment) >= num_pages;
|
||||
|
||||
if (!spills_into_next and !has_enough_bits) continue;
|
||||
|
||||
var j: usize = i * 128;
|
||||
while (j < (i + 1) * 128) : (j += 1) {
|
||||
var count: usize = 0;
|
||||
while (j + count < self.totalPages() and self.getBit(j + count) == .free) {
|
||||
count += 1;
|
||||
const addr = j * mem.page_size;
|
||||
if (count >= num_pages and mem.isAlignedLog2(addr, log2_align)) {
|
||||
self.setBits(j, num_pages, .used);
|
||||
return j;
|
||||
}
|
||||
}
|
||||
j += count;
|
||||
}
|
||||
}
|
||||
return not_found;
|
||||
}
|
||||
|
||||
fn recycle(self: FreeBlock, start_idx: usize, len: usize) void {
|
||||
self.setBits(start_idx, len, .free);
|
||||
}
|
||||
};
|
||||
|
||||
var _conventional_data = [_]u128{0} ** 16;
|
||||
// Marking `conventional` as const saves ~40 bytes
|
||||
const conventional = FreeBlock{ .data = &_conventional_data };
|
||||
var extended = FreeBlock{ .data = &[_]u128{} };
|
||||
|
||||
fn extendedOffset() usize {
|
||||
return conventional.totalPages();
|
||||
}
|
||||
|
||||
fn nPages(memsize: usize) usize {
|
||||
return mem.alignForward(usize, memsize, mem.page_size) / mem.page_size;
|
||||
}
|
||||
|
||||
fn alloc(ctx: *anyopaque, len: usize, log2_align: u8, ra: usize) ?[*]u8 {
|
||||
_ = ctx;
|
||||
_ = ra;
|
||||
if (len > maxInt(usize) - (mem.page_size - 1)) return null;
|
||||
const page_count = nPages(len);
|
||||
const page_idx = allocPages(page_count, log2_align) catch return null;
|
||||
return @as([*]u8, @ptrFromInt(page_idx * mem.page_size));
|
||||
}
|
||||
|
||||
fn allocPages(page_count: usize, log2_align: u8) !usize {
|
||||
{
|
||||
const idx = conventional.useRecycled(page_count, log2_align);
|
||||
if (idx != FreeBlock.not_found) {
|
||||
return idx;
|
||||
}
|
||||
}
|
||||
|
||||
const idx = extended.useRecycled(page_count, log2_align);
|
||||
if (idx != FreeBlock.not_found) {
|
||||
return idx + extendedOffset();
|
||||
}
|
||||
|
||||
const next_page_idx = @wasmMemorySize(0);
|
||||
const next_page_addr = next_page_idx * mem.page_size;
|
||||
const aligned_addr = mem.alignForwardLog2(next_page_addr, log2_align);
|
||||
const drop_page_count = @divExact(aligned_addr - next_page_addr, mem.page_size);
|
||||
const result = @wasmMemoryGrow(0, @as(u32, @intCast(drop_page_count + page_count)));
|
||||
if (result <= 0)
|
||||
return error.OutOfMemory;
|
||||
assert(result == next_page_idx);
|
||||
const aligned_page_idx = next_page_idx + drop_page_count;
|
||||
if (drop_page_count > 0) {
|
||||
freePages(next_page_idx, aligned_page_idx);
|
||||
}
|
||||
return @as(usize, @intCast(aligned_page_idx));
|
||||
}
|
||||
|
||||
fn freePages(start: usize, end: usize) void {
|
||||
if (start < extendedOffset()) {
|
||||
conventional.recycle(start, @min(extendedOffset(), end) - start);
|
||||
}
|
||||
if (end > extendedOffset()) {
|
||||
var new_end = end;
|
||||
if (!extended.isInitialized()) {
|
||||
// Steal the last page from the memory currently being recycled
|
||||
// TODO: would it be better if we use the first page instead?
|
||||
new_end -= 1;
|
||||
|
||||
extended.data = @as([*]u128, @ptrFromInt(new_end * mem.page_size))[0 .. mem.page_size / @sizeOf(u128)];
|
||||
// Since this is the first page being freed and we consume it, assume *nothing* is free.
|
||||
@memset(extended.data, PageStatus.none_free);
|
||||
}
|
||||
const clamped_start = @max(extendedOffset(), start);
|
||||
extended.recycle(clamped_start - extendedOffset(), new_end - clamped_start);
|
||||
}
|
||||
}
|
||||
|
||||
fn resize(
|
||||
ctx: *anyopaque,
|
||||
buf: []u8,
|
||||
log2_buf_align: u8,
|
||||
new_len: usize,
|
||||
return_address: usize,
|
||||
) bool {
|
||||
_ = ctx;
|
||||
_ = log2_buf_align;
|
||||
_ = return_address;
|
||||
const aligned_len = mem.alignForward(usize, buf.len, mem.page_size);
|
||||
if (new_len > aligned_len) return false;
|
||||
const current_n = nPages(aligned_len);
|
||||
const new_n = nPages(new_len);
|
||||
if (new_n != current_n) {
|
||||
const base = nPages(@intFromPtr(buf.ptr));
|
||||
freePages(base + new_n, base + current_n);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
fn free(
|
||||
ctx: *anyopaque,
|
||||
buf: []u8,
|
||||
log2_buf_align: u8,
|
||||
return_address: usize,
|
||||
) void {
|
||||
_ = ctx;
|
||||
_ = log2_buf_align;
|
||||
_ = return_address;
|
||||
const aligned_len = mem.alignForward(usize, buf.len, mem.page_size);
|
||||
const current_n = nPages(aligned_len);
|
||||
const base = nPages(@intFromPtr(buf.ptr));
|
||||
freePages(base, base + current_n);
|
||||
}
|
||||
|
||||
test "internals" {
|
||||
const page_allocator = std.heap.page_allocator;
|
||||
const testing = std.testing;
|
||||
|
||||
const conventional_memsize = WasmPageAllocator.conventional.totalPages() * mem.page_size;
|
||||
const initial = try page_allocator.alloc(u8, mem.page_size);
|
||||
try testing.expect(@intFromPtr(initial.ptr) < conventional_memsize); // If this isn't conventional, the rest of these tests don't make sense. Also we have a serious memory leak in the test suite.
|
||||
|
||||
var inplace = try page_allocator.realloc(initial, 1);
|
||||
try testing.expectEqual(initial.ptr, inplace.ptr);
|
||||
inplace = try page_allocator.realloc(inplace, 4);
|
||||
try testing.expectEqual(initial.ptr, inplace.ptr);
|
||||
page_allocator.free(inplace);
|
||||
|
||||
const reuse = try page_allocator.alloc(u8, 1);
|
||||
try testing.expectEqual(initial.ptr, reuse.ptr);
|
||||
page_allocator.free(reuse);
|
||||
|
||||
// This segment may span conventional and extended which has really complex rules so we're just ignoring it for now.
|
||||
const padding = try page_allocator.alloc(u8, conventional_memsize);
|
||||
page_allocator.free(padding);
|
||||
|
||||
const ext = try page_allocator.alloc(u8, conventional_memsize);
|
||||
try testing.expect(@intFromPtr(ext.ptr) >= conventional_memsize);
|
||||
|
||||
const use_small = try page_allocator.alloc(u8, 1);
|
||||
try testing.expectEqual(initial.ptr, use_small.ptr);
|
||||
page_allocator.free(use_small);
|
||||
|
||||
inplace = try page_allocator.realloc(ext, 1);
|
||||
try testing.expectEqual(ext.ptr, inplace.ptr);
|
||||
page_allocator.free(inplace);
|
||||
|
||||
const reuse_extended = try page_allocator.alloc(u8, conventional_memsize);
|
||||
try testing.expectEqual(ext.ptr, reuse_extended.ptr);
|
||||
page_allocator.free(reuse_extended);
|
||||
}
|
||||
|
|
@ -1171,7 +1171,11 @@ test "shrink" {
|
|||
}
|
||||
|
||||
test "large object - grow" {
|
||||
var gpa = GeneralPurposeAllocator(test_config){};
|
||||
if (builtin.target.isWasm()) {
|
||||
// Not expected to pass on targets that do not have memory mapping.
|
||||
return error.SkipZigTest;
|
||||
}
|
||||
var gpa: GeneralPurposeAllocator(test_config) = .{};
|
||||
defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
|
||||
const allocator = gpa.allocator();
|
||||
|
||||
|
|
@ -1344,6 +1348,11 @@ test "realloc large object to larger alignment" {
|
|||
}
|
||||
|
||||
test "large object shrinks to small but allocation fails during shrink" {
|
||||
if (builtin.target.isWasm()) {
|
||||
// Not expected to pass on targets that do not have memory mapping.
|
||||
return error.SkipZigTest;
|
||||
}
|
||||
|
||||
var failing_allocator = std.testing.FailingAllocator.init(std.heap.page_allocator, .{ .fail_index = 3 });
|
||||
var gpa = GeneralPurposeAllocator(.{}){ .backing_allocator = failing_allocator.allocator() };
|
||||
defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue