Skip to content

Support atomic operations with bools and non power of two integers #4707

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 11 commits into from
Mar 12, 2020
45 changes: 10 additions & 35 deletions doc/langref.html.in
Original file line number Diff line number Diff line change
Expand Up @@ -6728,17 +6728,8 @@ async fn func(y: *i32) void {
This builtin function atomically dereferences a pointer and returns the value.
</p>
<p>
{#syntax#}T{#endsyntax#} must be a pointer type, a {#syntax#}bool{#endsyntax#}, a float,
an integer whose bit count meets these requirements:
</p>
<ul>
<li>At least 8</li>
<li>At most the same as usize</li>
<li>Power of 2</li>
</ul> or an enum with a valid integer tag type.
<p>
TODO right now bool is not accepted. Also I think we could make non powers of 2 work fine, maybe
we can remove this restriction
{#syntax#}T{#endsyntax#} must be a {#syntax#}bool{#endsyntax#}, a float,
an integer or an enum.
</p>
{#header_close#}
{#header_open|@atomicRmw#}
Expand All @@ -6747,17 +6738,8 @@ async fn func(y: *i32) void {
This builtin function atomically modifies memory and then returns the previous value.
</p>
<p>
{#syntax#}T{#endsyntax#} must be a pointer type, a {#syntax#}bool{#endsyntax#},
or an integer whose bit count meets these requirements:
</p>
<ul>
<li>At least 8</li>
<li>At most the same as usize</li>
<li>Power of 2</li>
</ul>
<p>
TODO right now bool is not accepted. Also I think we could make non powers of 2 work fine, maybe
we can remove this restriction
{#syntax#}T{#endsyntax#} must be a {#syntax#}bool{#endsyntax#}, a float,
an integer or an enum.
</p>
<p>
Supported operations:
Expand All @@ -6782,17 +6764,8 @@ async fn func(y: *i32) void {
This builtin function atomically stores a value.
</p>
<p>
{#syntax#}T{#endsyntax#} must be a pointer type, a {#syntax#}bool{#endsyntax#}, a float,
an integer whose bit count meets these requirements:
</p>
<ul>
<li>At least 8</li>
<li>At most the same as usize</li>
<li>Power of 2</li>
</ul> or an enum with a valid integer tag type.
<p>
TODO right now bool is not accepted. Also I think we could make non powers of 2 work fine, maybe
we can remove this restriction
{#syntax#}T{#endsyntax#} must be a {#syntax#}bool{#endsyntax#}, a float,
an integer or an enum.
</p>
{#header_close#}
{#header_open|@bitCast#}
Expand Down Expand Up @@ -7108,7 +7081,8 @@ fn cmpxchgStrongButNotAtomic(comptime T: type, ptr: *T, expected_value: T, new_v
more efficiently in machine instructions.
</p>
<p>
{#syntax#}AtomicOrder{#endsyntax#} can be found with {#syntax#}@import("builtin").AtomicOrder{#endsyntax#}.
{#syntax#}T{#endsyntax#} must be a {#syntax#}bool{#endsyntax#}, a float,
an integer or an enum.
</p>
<p>{#syntax#}@TypeOf(ptr).alignment{#endsyntax#} must be {#syntax#}>= @sizeOf(T).{#endsyntax#}</p>
{#see_also|Compile Variables|cmpxchgWeak#}
Expand Down Expand Up @@ -7136,7 +7110,8 @@ fn cmpxchgWeakButNotAtomic(comptime T: type, ptr: *T, expected_value: T, new_val
However if you need a stronger guarantee, use {#link|@cmpxchgStrong#}.
</p>
<p>
{#syntax#}AtomicOrder{#endsyntax#} can be found with {#syntax#}@import("builtin").AtomicOrder{#endsyntax#}.
{#syntax#}T{#endsyntax#} must be a {#syntax#}bool{#endsyntax#}, a float,
an integer or an enum.
</p>
<p>{#syntax#}@TypeOf(ptr).alignment{#endsyntax#} must be {#syntax#}>= @sizeOf(T).{#endsyntax#}</p>
{#see_also|Compile Variables|cmpxchgStrong#}
Expand Down
13 changes: 5 additions & 8 deletions lib/std/atomic/int.zig
Original file line number Diff line number Diff line change
@@ -1,6 +1,3 @@
const builtin = @import("builtin");
const AtomicOrder = builtin.AtomicOrder;

/// Thread-safe, lock-free integer
pub fn Int(comptime T: type) type {
return struct {
Expand All @@ -14,28 +11,28 @@ pub fn Int(comptime T: type) type {

/// Returns previous value
pub fn incr(self: *Self) T {
return @atomicRmw(T, &self.unprotected_value, builtin.AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
return @atomicRmw(T, &self.unprotected_value, .Add, 1, .SeqCst);
}

/// Returns previous value
pub fn decr(self: *Self) T {
return @atomicRmw(T, &self.unprotected_value, builtin.AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
return @atomicRmw(T, &self.unprotected_value, .Sub, 1, .SeqCst);
}

pub fn get(self: *Self) T {
return @atomicLoad(T, &self.unprotected_value, AtomicOrder.SeqCst);
return @atomicLoad(T, &self.unprotected_value, .SeqCst);
}

pub fn set(self: *Self, new_value: T) void {
_ = self.xchg(new_value);
}

pub fn xchg(self: *Self, new_value: T) T {
return @atomicRmw(T, &self.unprotected_value, builtin.AtomicRmwOp.Xchg, new_value, AtomicOrder.SeqCst);
return @atomicRmw(T, &self.unprotected_value, .Xchg, new_value, .SeqCst);
}

pub fn fetchAdd(self: *Self, op: T) T {
return @atomicRmw(T, &self.unprotected_value, builtin.AtomicRmwOp.Add, op, AtomicOrder.SeqCst);
return @atomicRmw(T, &self.unprotected_value, .Add, op, .SeqCst);
}
};
}
20 changes: 9 additions & 11 deletions lib/std/atomic/queue.zig
Original file line number Diff line number Diff line change
@@ -1,7 +1,5 @@
const std = @import("../std.zig");
const builtin = @import("builtin");
const AtomicOrder = builtin.AtomicOrder;
const AtomicRmwOp = builtin.AtomicRmwOp;
const assert = std.debug.assert;
const expect = std.testing.expect;

Expand Down Expand Up @@ -149,7 +147,7 @@ const Context = struct {
put_sum: isize,
get_sum: isize,
get_count: usize,
puts_done: u8, // TODO make this a bool
puts_done: bool,
};

// TODO add lazy evaluated build options and then put puts_per_thread behind
Expand All @@ -173,7 +171,7 @@ test "std.atomic.Queue" {
.queue = &queue,
.put_sum = 0,
.get_sum = 0,
.puts_done = 0,
.puts_done = false,
.get_count = 0,
};

Expand All @@ -186,7 +184,7 @@ test "std.atomic.Queue" {
}
}
expect(!context.queue.isEmpty());
context.puts_done = 1;
context.puts_done = true;
{
var i: usize = 0;
while (i < put_thread_count) : (i += 1) {
Expand All @@ -208,7 +206,7 @@ test "std.atomic.Queue" {

for (putters) |t|
t.wait();
@atomicStore(u8, &context.puts_done, 1, AtomicOrder.SeqCst);
@atomicStore(bool, &context.puts_done, true, .SeqCst);
for (getters) |t|
t.wait();

Expand All @@ -235,25 +233,25 @@ fn startPuts(ctx: *Context) u8 {
std.time.sleep(1); // let the os scheduler be our fuzz
const x = @bitCast(i32, r.random.scalar(u32));
const node = ctx.allocator.create(Queue(i32).Node) catch unreachable;
node.* = Queue(i32).Node{
node.* = .{
.prev = undefined,
.next = undefined,
.data = x,
};
ctx.queue.put(node);
_ = @atomicRmw(isize, &ctx.put_sum, builtin.AtomicRmwOp.Add, x, AtomicOrder.SeqCst);
_ = @atomicRmw(isize, &ctx.put_sum, .Add, x, .SeqCst);
}
return 0;
}

fn startGets(ctx: *Context) u8 {
while (true) {
const last = @atomicLoad(u8, &ctx.puts_done, builtin.AtomicOrder.SeqCst) == 1;
const last = @atomicLoad(bool, &ctx.puts_done, .SeqCst);

while (ctx.queue.get()) |node| {
std.time.sleep(1); // let the os scheduler be our fuzz
_ = @atomicRmw(isize, &ctx.get_sum, builtin.AtomicRmwOp.Add, node.data, builtin.AtomicOrder.SeqCst);
_ = @atomicRmw(usize, &ctx.get_count, builtin.AtomicRmwOp.Add, 1, builtin.AtomicOrder.SeqCst);
_ = @atomicRmw(isize, &ctx.get_sum, .Add, node.data, .SeqCst);
_ = @atomicRmw(usize, &ctx.get_count, .Add, 1, .SeqCst);
}

if (last) return 0;
Expand Down
31 changes: 15 additions & 16 deletions lib/std/atomic/stack.zig
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
const assert = std.debug.assert;
const builtin = @import("builtin");
const AtomicOrder = builtin.AtomicOrder;
const expect = std.testing.expect;

/// Many reader, many writer, non-allocating, thread-safe
Expand All @@ -11,7 +10,7 @@ pub fn Stack(comptime T: type) type {
root: ?*Node,
lock: @TypeOf(lock_init),

const lock_init = if (builtin.single_threaded) {} else @as(u8, 0);
const lock_init = if (builtin.single_threaded) {} else false;

pub const Self = @This();

Expand All @@ -31,16 +30,16 @@ pub fn Stack(comptime T: type) type {
/// being the first item in the stack, returns the other item that was there.
pub fn pushFirst(self: *Self, node: *Node) ?*Node {
node.next = null;
return @cmpxchgStrong(?*Node, &self.root, null, node, AtomicOrder.SeqCst, AtomicOrder.SeqCst);
return @cmpxchgStrong(?*Node, &self.root, null, node, .SeqCst, .SeqCst);
}

pub fn push(self: *Self, node: *Node) void {
if (builtin.single_threaded) {
node.next = self.root;
self.root = node;
} else {
while (@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst) != 0) {}
defer assert(@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst) == 1);
while (@atomicRmw(bool, &self.lock, .Xchg, true, .SeqCst)) {}
defer assert(@atomicRmw(bool, &self.lock, .Xchg, false, .SeqCst));

node.next = self.root;
self.root = node;
Expand All @@ -53,8 +52,8 @@ pub fn Stack(comptime T: type) type {
self.root = root.next;
return root;
} else {
while (@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst) != 0) {}
defer assert(@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst) == 1);
while (@atomicRmw(bool, &self.lock, .Xchg, true, .SeqCst)) {}
defer assert(@atomicRmw(bool, &self.lock, .Xchg, false, .SeqCst));

const root = self.root orelse return null;
self.root = root.next;
Expand All @@ -63,7 +62,7 @@ pub fn Stack(comptime T: type) type {
}

pub fn isEmpty(self: *Self) bool {
return @atomicLoad(?*Node, &self.root, AtomicOrder.SeqCst) == null;
return @atomicLoad(?*Node, &self.root, .SeqCst) == null;
}
};
}
Expand All @@ -75,7 +74,7 @@ const Context = struct {
put_sum: isize,
get_sum: isize,
get_count: usize,
puts_done: u8, // TODO make this a bool
puts_done: bool,
};
// TODO add lazy evaluated build options and then put puts_per_thread behind
// some option such as: "AggressiveMultithreadedFuzzTest". In the AppVeyor
Expand All @@ -98,7 +97,7 @@ test "std.atomic.stack" {
.stack = &stack,
.put_sum = 0,
.get_sum = 0,
.puts_done = 0,
.puts_done = false,
.get_count = 0,
};

Expand All @@ -109,7 +108,7 @@ test "std.atomic.stack" {
expect(startPuts(&context) == 0);
}
}
context.puts_done = 1;
context.puts_done = true;
{
var i: usize = 0;
while (i < put_thread_count) : (i += 1) {
Expand All @@ -128,7 +127,7 @@ test "std.atomic.stack" {

for (putters) |t|
t.wait();
@atomicStore(u8, &context.puts_done, 1, AtomicOrder.SeqCst);
@atomicStore(bool, &context.puts_done, true, .SeqCst);
for (getters) |t|
t.wait();
}
Expand Down Expand Up @@ -158,19 +157,19 @@ fn startPuts(ctx: *Context) u8 {
.data = x,
};
ctx.stack.push(node);
_ = @atomicRmw(isize, &ctx.put_sum, builtin.AtomicRmwOp.Add, x, AtomicOrder.SeqCst);
_ = @atomicRmw(isize, &ctx.put_sum, .Add, x, .SeqCst);
}
return 0;
}

fn startGets(ctx: *Context) u8 {
while (true) {
const last = @atomicLoad(u8, &ctx.puts_done, builtin.AtomicOrder.SeqCst) == 1;
const last = @atomicLoad(bool, &ctx.puts_done, .SeqCst);

while (ctx.stack.pop()) |node| {
std.time.sleep(1); // let the os scheduler be our fuzz
_ = @atomicRmw(isize, &ctx.get_sum, builtin.AtomicRmwOp.Add, node.data, builtin.AtomicOrder.SeqCst);
_ = @atomicRmw(usize, &ctx.get_count, builtin.AtomicRmwOp.Add, 1, builtin.AtomicOrder.SeqCst);
_ = @atomicRmw(isize, &ctx.get_sum, .Add, node.data, .SeqCst);
_ = @atomicRmw(usize, &ctx.get_count, .Add, 1, .SeqCst);
}

if (last) return 0;
Expand Down
23 changes: 10 additions & 13 deletions lib/std/event/channel.zig
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,8 @@ pub fn Channel(comptime T: type) type {
putters: std.atomic.Queue(PutNode),
get_count: usize,
put_count: usize,
dispatch_lock: u8, // TODO make this a bool
need_dispatch: u8, // TODO make this a bool
dispatch_lock: bool,
need_dispatch: bool,

// simple fixed size ring buffer
buffer_nodes: []T,
Expand Down Expand Up @@ -62,8 +62,8 @@ pub fn Channel(comptime T: type) type {
.buffer_len = 0,
.buffer_nodes = buffer,
.buffer_index = 0,
.dispatch_lock = 0,
.need_dispatch = 0,
.dispatch_lock = false,
.need_dispatch = false,
.getters = std.atomic.Queue(GetNode).init(),
.putters = std.atomic.Queue(PutNode).init(),
.or_null_queue = std.atomic.Queue(*std.atomic.Queue(GetNode).Node).init(),
Expand Down Expand Up @@ -165,15 +165,14 @@ pub fn Channel(comptime T: type) type {

fn dispatch(self: *SelfChannel) void {
// set the "need dispatch" flag
@atomicStore(u8, &self.need_dispatch, 1, .SeqCst);
@atomicStore(bool, &self.need_dispatch, true, .SeqCst);

lock: while (true) {
// set the lock flag
const prev_lock = @atomicRmw(u8, &self.dispatch_lock, .Xchg, 1, .SeqCst);
if (prev_lock != 0) return;
if (@atomicRmw(bool, &self.dispatch_lock, .Xchg, true, .SeqCst)) return;

// clear the need_dispatch flag since we're about to do it
@atomicStore(u8, &self.need_dispatch, 0, .SeqCst);
@atomicStore(bool, &self.need_dispatch, false, .SeqCst);

while (true) {
one_dispatch: {
Expand Down Expand Up @@ -250,14 +249,12 @@ pub fn Channel(comptime T: type) type {
}

// clear need-dispatch flag
const need_dispatch = @atomicRmw(u8, &self.need_dispatch, .Xchg, 0, .SeqCst);
if (need_dispatch != 0) continue;
if (@atomicRmw(bool, &self.need_dispatch, .Xchg, false, .SeqCst)) continue;

const my_lock = @atomicRmw(u8, &self.dispatch_lock, .Xchg, 0, .SeqCst);
assert(my_lock != 0);
assert(@atomicRmw(bool, &self.dispatch_lock, .Xchg, false, .SeqCst));

// we have to check again now that we unlocked
if (@atomicLoad(u8, &self.need_dispatch, .SeqCst) != 0) continue :lock;
if (@atomicLoad(bool, &self.need_dispatch, .SeqCst)) continue :lock;

return;
}
Expand Down
Loading