Merge pull request #25302 from ziglang/growCapacity

std: remove loop from growCapacity
This commit is contained in:
Andrew Kelley 2025-09-21 04:55:39 -07:00 committed by GitHub
commit 594cb38fcb
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
6 changed files with 18 additions and 67 deletions

View file

@ -2667,7 +2667,7 @@ pub const Allocating = struct {
pub fn ensureTotalCapacity(a: *Allocating, new_capacity: usize) Allocator.Error!void {
// Protects growing unnecessarily since better_capacity will be larger.
if (a.writer.buffer.len >= new_capacity) return;
const better_capacity = ArrayList(u8).growCapacity(a.writer.buffer.len, new_capacity);
const better_capacity = ArrayList(u8).growCapacity(new_capacity);
return ensureTotalCapacityPrecise(a, better_capacity);
}

View file

@ -172,7 +172,7 @@ pub fn AlignedManaged(comptime T: type, comptime alignment: ?mem.Alignment) type
// a new buffer and doing our own copy. With a realloc() call,
// the allocator implementation would pointlessly copy our
// extra capacity.
const new_capacity = Aligned(T, alignment).growCapacity(self.capacity, new_len);
const new_capacity = Aligned(T, alignment).growCapacity(new_len);
const old_memory = self.allocatedSlice();
if (self.allocator.remap(old_memory, new_capacity)) |new_memory| {
self.items.ptr = new_memory.ptr;
@ -408,7 +408,7 @@ pub fn AlignedManaged(comptime T: type, comptime alignment: ?mem.Alignment) type
// Protects growing unnecessarily since better_capacity will be larger.
if (self.capacity >= new_capacity) return;
const better_capacity = Aligned(T, alignment).growCapacity(self.capacity, new_capacity);
const better_capacity = Aligned(T, alignment).growCapacity(new_capacity);
return self.ensureTotalCapacityPrecise(better_capacity);
}
@ -1160,7 +1160,7 @@ pub fn Aligned(comptime T: type, comptime alignment: ?mem.Alignment) type {
/// Invalidates element pointers if additional memory is needed.
pub fn ensureTotalCapacity(self: *Self, gpa: Allocator, new_capacity: usize) Allocator.Error!void {
if (self.capacity >= new_capacity) return;
return self.ensureTotalCapacityPrecise(gpa, growCapacity(self.capacity, new_capacity));
return self.ensureTotalCapacityPrecise(gpa, growCapacity(new_capacity));
}
/// If the current capacity is less than `new_capacity`, this function will
@ -1359,17 +1359,12 @@ pub fn Aligned(comptime T: type, comptime alignment: ?mem.Alignment) type {
return self.getLast();
}
const init_capacity = @as(comptime_int, @max(1, std.atomic.cache_line / @sizeOf(T)));
const init_capacity: comptime_int = @max(1, std.atomic.cache_line / @sizeOf(T));
/// Called when memory growth is necessary. Returns a capacity larger than
/// minimum that grows super-linearly.
pub fn growCapacity(current: usize, minimum: usize) usize {
var new = current;
while (true) {
new +|= new / 2 + init_capacity;
if (new >= minimum)
return new;
}
pub fn growCapacity(minimum: usize) usize {
return minimum +| (minimum / 2 + init_capacity);
}
};
}

View file

@ -56,7 +56,7 @@ pub fn Deque(comptime T: type) type {
/// Invalidates element pointers if additional memory is needed.
pub fn ensureTotalCapacity(deque: *Self, gpa: Allocator, new_capacity: usize) Allocator.Error!void {
if (deque.buffer.len >= new_capacity) return;
return deque.ensureTotalCapacityPrecise(gpa, growCapacity(deque.buffer.len, new_capacity));
return deque.ensureTotalCapacityPrecise(gpa, std.ArrayList(T).growCapacity(new_capacity));
}
/// If the current capacity is less than `new_capacity`, this function will
@ -243,18 +243,6 @@ pub fn Deque(comptime T: type) type {
return index - head_len;
}
}
const init_capacity: comptime_int = @max(1, std.atomic.cache_line / @sizeOf(T));
/// Called when memory growth is necessary. Returns a capacity larger than
/// minimum that grows super-linearly.
fn growCapacity(current: usize, minimum: usize) usize {
var new = current;
while (true) {
new +|= new / 2 + init_capacity;
if (new >= minimum) return new;
}
}
};
}

View file

@ -431,33 +431,6 @@ test "skipValue" {
try std.testing.expectError(error.SyntaxError, testSkipValue("[102, 111, 111}"));
}
fn testEnsureStackCapacity(do_ensure: bool) !void {
var fail_alloc = std.testing.FailingAllocator.init(std.testing.allocator, .{ .fail_index = 1 });
const failing_allocator = fail_alloc.allocator();
const nestings = 2049; // intentionally not a power of 2.
var input_string: std.ArrayListUnmanaged(u8) = .empty;
try input_string.appendNTimes(std.testing.allocator, '[', nestings);
try input_string.appendNTimes(std.testing.allocator, ']', nestings);
defer input_string.deinit(std.testing.allocator);
var scanner = Scanner.initCompleteInput(failing_allocator, input_string.items);
defer scanner.deinit();
if (do_ensure) {
try scanner.ensureTotalStackCapacity(nestings);
}
try scanner.skipValue();
try std.testing.expectEqual(Token.end_of_document, try scanner.next());
}
test "ensureTotalStackCapacity" {
// Once to demonstrate failure.
try std.testing.expectError(error.OutOfMemory, testEnsureStackCapacity(false));
// Then to demonstrate it works.
try testEnsureStackCapacity(true);
}
fn testDiagnosticsFromSource(expected_error: ?anyerror, line: u64, col: u64, byte_offset: u64, source: anytype) !void {
var diagnostics = Diagnostics{};
source.enableDiagnostics(&diagnostics);

View file

@ -914,7 +914,7 @@ test "parse at comptime" {
uptime: u64,
};
const config = comptime x: {
var buf: [256]u8 = undefined;
var buf: [300]u8 = undefined;
var fba = std.heap.FixedBufferAllocator.init(&buf);
const res = parseFromSliceLeaky(Config, fba.allocator(), doc, .{});
// Assert no error can occur since we are

View file

@ -457,24 +457,19 @@ pub fn MultiArrayList(comptime T: type) type {
/// Invalidates element pointers if additional memory is needed.
pub fn ensureTotalCapacity(self: *Self, gpa: Allocator, new_capacity: usize) Allocator.Error!void {
if (self.capacity >= new_capacity) return;
return self.setCapacity(gpa, growCapacity(self.capacity, new_capacity));
return self.setCapacity(gpa, growCapacity(new_capacity));
}
const init_capacity = init: {
var max = 1;
for (fields) |field| max = @as(comptime_int, @max(max, @sizeOf(field.type)));
break :init @as(comptime_int, @max(1, std.atomic.cache_line / max));
const init_capacity: comptime_int = init: {
var max: comptime_int = 1;
for (fields) |field| max = @max(max, @sizeOf(field.type));
break :init @max(1, std.atomic.cache_line / max);
};
/// Called when memory growth is necessary. Returns a capacity larger than
/// minimum that grows super-linearly.
fn growCapacity(current: usize, minimum: usize) usize {
var new = current;
while (true) {
new +|= new / 2 + init_capacity;
if (new >= minimum)
return new;
}
/// Given a lower bound of required memory capacity, returns a larger value
/// with super-linear growth.
pub fn growCapacity(minimum: usize) usize {
return minimum +| (minimum / 2 + init_capacity);
}
/// Modify the array so that it can hold at least `additional_count` **more** items.