mirror of
https://codeberg.org/ziglang/zig.git
synced 2025-12-06 13:54:21 +00:00
std: make RwLock test less intensive
This test called `yield` 80,000 times, which is nothing on a system with little load, but murder on a CI system. macOS' scheduler in particular doesn't seem to deal with this very well. The `yield` calls also weren't even necessarily doing what they were meant to: if the optimizer could figure out that it doesn't clobber some memory, then it could happily reorder around the `yield`s anyway! The test has been simplified and made to work better, and the number of yields have been reduced. The number of overall iterations has also been reduced, because with the `yield` calls making races very likely, we don't really need to run too many iterations to be confident that the implementation is race-free.
This commit is contained in:
parent
7a5d2a196f
commit
b866c14328
1 changed files with 61 additions and 56 deletions
|
|
@ -301,81 +301,86 @@ test "concurrent access" {
|
||||||
|
|
||||||
const num_writers: usize = 2;
|
const num_writers: usize = 2;
|
||||||
const num_readers: usize = 4;
|
const num_readers: usize = 4;
|
||||||
const num_writes: usize = 4096;
|
const num_writes: usize = 1000;
|
||||||
const num_reads: usize = 8192;
|
const num_reads: usize = 2000;
|
||||||
|
|
||||||
const Runner = struct {
|
const Runner = struct {
|
||||||
const Self = @This();
|
const Runner = @This();
|
||||||
|
|
||||||
rwl: RwLock = .{},
|
rwl: RwLock,
|
||||||
writes: usize = 0,
|
writes: usize,
|
||||||
reads: std.atomic.Value(usize) = std.atomic.Value(usize).init(0),
|
reads: std.atomic.Value(usize),
|
||||||
|
|
||||||
term1: usize = 0,
|
val_a: usize,
|
||||||
term2: usize = 0,
|
val_b: usize,
|
||||||
term_sum: usize = 0,
|
|
||||||
|
|
||||||
fn reader(self: *Self) !void {
|
fn reader(run: *Runner, thread_idx: usize) !void {
|
||||||
while (true) {
|
|
||||||
self.rwl.lockShared();
|
|
||||||
defer self.rwl.unlockShared();
|
|
||||||
|
|
||||||
if (self.writes >= num_writes or self.reads.load(.unordered) >= num_reads)
|
|
||||||
break;
|
|
||||||
|
|
||||||
try self.check();
|
|
||||||
|
|
||||||
_ = self.reads.fetchAdd(1, .monotonic);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn writer(self: *Self, thread_idx: usize) !void {
|
|
||||||
var prng = std.Random.DefaultPrng.init(thread_idx);
|
var prng = std.Random.DefaultPrng.init(thread_idx);
|
||||||
var rnd = prng.random();
|
const rnd = prng.random();
|
||||||
|
|
||||||
while (true) {
|
while (true) {
|
||||||
self.rwl.lock();
|
run.rwl.lockShared();
|
||||||
defer self.rwl.unlock();
|
defer run.rwl.unlockShared();
|
||||||
|
|
||||||
if (self.writes >= num_writes)
|
try testing.expect(run.writes <= num_writes);
|
||||||
break;
|
if (run.reads.fetchAdd(1, .monotonic) >= num_reads) break;
|
||||||
|
|
||||||
try self.check();
|
// We use `volatile` accesses so that we can make sure the memory is accessed either
|
||||||
|
// side of a yield, maximising chances of a race.
|
||||||
|
const a_ptr: *const volatile usize = &run.val_a;
|
||||||
|
const b_ptr: *const volatile usize = &run.val_b;
|
||||||
|
|
||||||
const term1 = rnd.int(usize);
|
const old_a = a_ptr.*;
|
||||||
self.term1 = term1;
|
if (rnd.boolean()) try std.Thread.yield();
|
||||||
try std.Thread.yield();
|
const old_b = b_ptr.*;
|
||||||
|
try testing.expect(old_a == old_b);
|
||||||
const term2 = rnd.int(usize);
|
|
||||||
self.term2 = term2;
|
|
||||||
try std.Thread.yield();
|
|
||||||
|
|
||||||
self.term_sum = term1 +% term2;
|
|
||||||
self.writes += 1;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn check(self: *const Self) !void {
|
fn writer(run: *Runner, thread_idx: usize) !void {
|
||||||
const term_sum = self.term_sum;
|
var prng = std.Random.DefaultPrng.init(thread_idx);
|
||||||
try std.Thread.yield();
|
const rnd = prng.random();
|
||||||
|
while (true) {
|
||||||
|
run.rwl.lock();
|
||||||
|
defer run.rwl.unlock();
|
||||||
|
|
||||||
const term2 = self.term2;
|
try testing.expect(run.writes <= num_writes);
|
||||||
try std.Thread.yield();
|
if (run.writes == num_writes) break;
|
||||||
|
|
||||||
const term1 = self.term1;
|
// We use `volatile` accesses so that we can make sure the memory is accessed either
|
||||||
try testing.expectEqual(term_sum, term1 +% term2);
|
// side of a yield, maximising chances of a race.
|
||||||
|
const a_ptr: *volatile usize = &run.val_a;
|
||||||
|
const b_ptr: *volatile usize = &run.val_b;
|
||||||
|
|
||||||
|
const new_val = rnd.int(usize);
|
||||||
|
|
||||||
|
const old_a = a_ptr.*;
|
||||||
|
a_ptr.* = new_val;
|
||||||
|
if (rnd.boolean()) try std.Thread.yield();
|
||||||
|
const old_b = b_ptr.*;
|
||||||
|
b_ptr.* = new_val;
|
||||||
|
try testing.expect(old_a == old_b);
|
||||||
|
|
||||||
|
run.writes += 1;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
var runner = Runner{};
|
var run: Runner = .{
|
||||||
var threads: [num_writers + num_readers]std.Thread = undefined;
|
.rwl = .{},
|
||||||
|
.writes = 0,
|
||||||
|
.reads = .init(0),
|
||||||
|
.val_a = 0,
|
||||||
|
.val_b = 0,
|
||||||
|
};
|
||||||
|
var write_threads: [num_writers]std.Thread = undefined;
|
||||||
|
var read_threads: [num_readers]std.Thread = undefined;
|
||||||
|
|
||||||
for (threads[0..num_writers], 0..) |*t, i| t.* = try std.Thread.spawn(.{}, Runner.writer, .{ &runner, i });
|
for (&write_threads, 0..) |*t, i| t.* = try .spawn(.{}, Runner.writer, .{ &run, i });
|
||||||
for (threads[num_writers..]) |*t| t.* = try std.Thread.spawn(.{}, Runner.reader, .{&runner});
|
for (&read_threads, num_writers..) |*t, i| t.* = try .spawn(.{}, Runner.reader, .{ &run, i });
|
||||||
|
|
||||||
for (threads) |t| t.join();
|
for (write_threads) |t| t.join();
|
||||||
|
for (read_threads) |t| t.join();
|
||||||
|
|
||||||
try testing.expectEqual(num_writes, runner.writes);
|
try testing.expect(run.writes == num_writes);
|
||||||
|
try testing.expect(run.reads.raw >= num_reads);
|
||||||
//std.debug.print("reads={}\n", .{ runner.reads.load(.unordered)});
|
|
||||||
}
|
}
|
||||||
|
|
|
||||||
Loading…
Add table
Reference in a new issue