Merge pull request #17400 from joadnacer/ringbuffer-optim

std.RingBuffer: Implement mem.copy read/write
This commit is contained in:
Andrew Kelley 2023-10-22 13:19:22 -04:00 committed by GitHub
commit d8c067966f
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
3 changed files with 95 additions and 10 deletions

View file

@ -11,6 +11,7 @@
const Allocator = @import("std").mem.Allocator;
const assert = @import("std").debug.assert;
const copyForwards = @import("std").mem.copyForwards;
const RingBuffer = @This();
@ -18,7 +19,7 @@ data: []u8,
read_index: usize,
write_index: usize,
pub const Error = error{Full};
pub const Error = error{ Full, ReadLengthInvalid };
/// Allocate a new `RingBuffer`; `deinit()` should be called to free the buffer.
pub fn init(allocator: Allocator, capacity: usize) Allocator.Error!RingBuffer {
@ -63,6 +64,7 @@ pub fn writeAssumeCapacity(self: *RingBuffer, byte: u8) void {
/// Write `bytes` into the ring buffer. Returns `error.Full` if the ring
/// buffer does not have enough space, without writing any data.
/// Uses memcpy and so `bytes` must not overlap ring buffer data.
pub fn writeSlice(self: *RingBuffer, bytes: []const u8) Error!void {
if (self.len() + bytes.len > self.data.len) return error.Full;
self.writeSliceAssumeCapacity(bytes);
@ -70,8 +72,51 @@ pub fn writeSlice(self: *RingBuffer, bytes: []const u8) Error!void {
/// Write `bytes` into the ring buffer. If there is not enough space, older
/// bytes will be overwritten.
/// Uses memcpy and so `bytes` must not overlap ring buffer data.
pub fn writeSliceAssumeCapacity(self: *RingBuffer, bytes: []const u8) void {
for (bytes) |b| self.writeAssumeCapacity(b);
const data_start = self.mask(self.write_index);
const part1_data_end = @min(data_start + bytes.len, self.data.len);
const part1_len = part1_data_end - data_start;
@memcpy(self.data[data_start..part1_data_end], bytes[0..part1_len]);
const remaining = bytes.len - part1_len;
const to_write = @min(remaining, remaining % self.data.len + self.data.len);
const part2_bytes_start = bytes.len - to_write;
const part2_bytes_end = @min(part2_bytes_start + self.data.len, bytes.len);
const part2_len = part2_bytes_end - part2_bytes_start;
@memcpy(self.data[0..part2_len], bytes[part2_bytes_start..part2_bytes_end]);
if (part2_bytes_end != bytes.len) {
const part3_len = bytes.len - part2_bytes_end;
@memcpy(self.data[0..part3_len], bytes[part2_bytes_end..bytes.len]);
}
self.write_index = self.mask2(self.write_index + bytes.len);
}
/// Write `bytes` into the ring buffer. Returns `error.Full` if the ring
/// buffer does not have enough space, without writing any data.
/// Uses copyForwards and can write slices from this RingBuffer into itself.
pub fn writeSliceForwards(self: *RingBuffer, bytes: []const u8) Error!void {
if (self.len() + bytes.len > self.data.len) return error.Full;
self.writeSliceForwardsAssumeCapacity(bytes);
}
/// Write `bytes` into the ring buffer. If there is not enough space, older
/// bytes will be overwritten.
/// Uses copyForwards and can write slices from this RingBuffer into itself.
pub fn writeSliceForwardsAssumeCapacity(self: *RingBuffer, bytes: []const u8) void {
const data_start = self.mask(self.write_index);
const part1_data_end = @min(data_start + bytes.len, self.data.len);
const part1_len = part1_data_end - data_start;
copyForwards(u8, self.data[data_start..], bytes[0..part1_len]);
const remaining = bytes.len - part1_len;
const to_write = @min(remaining, remaining % self.data.len + self.data.len);
const part2_bytes_start = bytes.len - to_write;
const part2_bytes_end = @min(part2_bytes_start + self.data.len, bytes.len);
copyForwards(u8, self.data[0..], bytes[part2_bytes_start..part2_bytes_end]);
if (part2_bytes_end != bytes.len)
copyForwards(u8, self.data[0..], bytes[part2_bytes_end..bytes.len]);
self.write_index = self.mask2(self.write_index + bytes.len);
}
/// Consume a byte from the ring buffer and return it. Returns `null` if the
@ -90,6 +135,50 @@ pub fn readAssumeLength(self: *RingBuffer) u8 {
return byte;
}
/// Reads first `length` bytes written to the ring buffer into `dest`; Returns
/// Error.ReadLengthInvalid if length greater than ring or dest length
/// Uses memcpy and so `dest` must not overlap ring buffer data.
pub fn readFirst(self: *RingBuffer, dest: []u8, length: usize) Error!void {
if (length > self.len() or length > dest.len) return error.ReadLengthInvalid;
self.readFirstAssumeLength(dest, length);
}
/// Reads first `length` bytes written to the ring buffer into `dest`;
/// Asserts that length not greater than ring buffer or dest length
/// Uses memcpy and so `dest` must not overlap ring buffer data.
pub fn readFirstAssumeLength(self: *RingBuffer, dest: []u8, length: usize) void {
assert(length <= self.len() and length <= dest.len);
const data_start = self.mask(self.read_index);
const part1_data_end = @min(self.data.len, data_start + length);
const part1_len = part1_data_end - data_start;
const part2_len = length - part1_len;
@memcpy(dest[0..part1_len], self.data[data_start..part1_data_end]);
@memcpy(dest[part1_len..length], self.data[0..part2_len]);
self.read_index = self.mask2(self.read_index + length);
}
/// Reads last `length` bytes written to the ring buffer into `dest`; Returns
/// Error.ReadLengthInvalid if length greater than ring or dest length
/// Uses memcpy and so `dest` must not overlap ring buffer data.
pub fn readLast(self: *RingBuffer, dest: []u8, length: usize) Error!void {
if (length > self.len() or length > dest.len) return error.ReadLengthInvalid;
self.readLastAssumeLength(dest, length);
}
/// Reads last `length` bytes written to the ring buffer into `dest`;
/// Asserts that length not greater than ring buffer or dest length
/// Uses memcpy and so `dest` must not overlap ring buffer data.
pub fn readLastAssumeLength(self: *RingBuffer, dest: []u8, length: usize) void {
assert(length <= self.len() and length <= dest.len);
const data_start = self.mask(self.write_index + self.data.len - length);
const part1_data_end = @min(self.data.len, data_start + length);
const part1_len = part1_data_end - data_start;
const part2_len = length - part1_len;
@memcpy(dest[0..part1_len], self.data[data_start..part1_data_end]);
@memcpy(dest[part1_len..length], self.data[0..part2_len]);
self.write_index = if (self.write_index >= self.data_len) self.write_index - length else data_start;
}
/// Returns `true` if the ring buffer is empty and `false` otherwise.
pub fn isEmpty(self: RingBuffer) bool {
return self.write_index == self.read_index;

View file

@ -219,9 +219,7 @@ pub fn DecompressStream(
}
const size = @min(self.buffer.len(), buffer.len);
for (0..size) |i| {
buffer[i] = self.buffer.read().?;
}
self.buffer.readFirstAssumeLength(buffer, size);
if (self.state == .LastBlock and self.buffer.len() == 0) {
self.state = .NewFrame;
self.allocator.free(self.literal_fse_buffer);

View file

@ -311,8 +311,8 @@ pub const DecodeState = struct {
try self.decodeLiteralsRingBuffer(dest, sequence.literal_length);
const copy_start = dest.write_index + dest.data.len - sequence.offset;
const copy_slice = dest.sliceAt(copy_start, sequence.match_length);
for (copy_slice.first) |b| dest.writeAssumeCapacity(b);
for (copy_slice.second) |b| dest.writeAssumeCapacity(b);
dest.writeSliceForwardsAssumeCapacity(copy_slice.first);
dest.writeSliceForwardsAssumeCapacity(copy_slice.second);
self.written_count += sequence.match_length;
}
@ -723,9 +723,7 @@ pub fn decodeBlockRingBuffer(
},
.rle => {
if (src.len < 1) return error.MalformedRleBlock;
for (0..block_size) |_| {
dest.writeAssumeCapacity(src[0]);
}
dest.writeSliceAssumeCapacity(src[0..block_size]);
consumed_count.* += 1;
decode_state.written_count += block_size;
return block_size;