Skip to content

Commit f51bec3

Browse files
authored
Merge pull request #4707 from Vexu/small-atomics
Support atomic operations with bools and non power of two integers
2 parents aa49f97 + 71d776c commit f51bec3

File tree

11 files changed

+415
-176
lines changed

11 files changed

+415
-176
lines changed

doc/langref.html.in

Lines changed: 10 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -6728,17 +6728,8 @@ async fn func(y: *i32) void {
67286728
This builtin function atomically dereferences a pointer and returns the value.
67296729
</p>
67306730
<p>
6731-
{#syntax#}T{#endsyntax#} must be a pointer type, a {#syntax#}bool{#endsyntax#}, a float,
6732-
an integer whose bit count meets these requirements:
6733-
</p>
6734-
<ul>
6735-
<li>At least 8</li>
6736-
<li>At most the same as usize</li>
6737-
<li>Power of 2</li>
6738-
</ul> or an enum with a valid integer tag type.
6739-
<p>
6740-
TODO right now bool is not accepted. Also I think we could make non powers of 2 work fine, maybe
6741-
we can remove this restriction
6731+
{#syntax#}T{#endsyntax#} must be a {#syntax#}bool{#endsyntax#}, a float,
6732+
an integer or an enum.
67426733
</p>
67436734
{#header_close#}
67446735
{#header_open|@atomicRmw#}
@@ -6747,17 +6738,8 @@ async fn func(y: *i32) void {
67476738
This builtin function atomically modifies memory and then returns the previous value.
67486739
</p>
67496740
<p>
6750-
{#syntax#}T{#endsyntax#} must be a pointer type, a {#syntax#}bool{#endsyntax#},
6751-
or an integer whose bit count meets these requirements:
6752-
</p>
6753-
<ul>
6754-
<li>At least 8</li>
6755-
<li>At most the same as usize</li>
6756-
<li>Power of 2</li>
6757-
</ul>
6758-
<p>
6759-
TODO right now bool is not accepted. Also I think we could make non powers of 2 work fine, maybe
6760-
we can remove this restriction
6741+
{#syntax#}T{#endsyntax#} must be a {#syntax#}bool{#endsyntax#}, a float,
6742+
an integer or an enum.
67616743
</p>
67626744
<p>
67636745
Supported operations:
@@ -6782,17 +6764,8 @@ async fn func(y: *i32) void {
67826764
This builtin function atomically stores a value.
67836765
</p>
67846766
<p>
6785-
{#syntax#}T{#endsyntax#} must be a pointer type, a {#syntax#}bool{#endsyntax#}, a float,
6786-
an integer whose bit count meets these requirements:
6787-
</p>
6788-
<ul>
6789-
<li>At least 8</li>
6790-
<li>At most the same as usize</li>
6791-
<li>Power of 2</li>
6792-
</ul> or an enum with a valid integer tag type.
6793-
<p>
6794-
TODO right now bool is not accepted. Also I think we could make non powers of 2 work fine, maybe
6795-
we can remove this restriction
6767+
{#syntax#}T{#endsyntax#} must be a {#syntax#}bool{#endsyntax#}, a float,
6768+
an integer or an enum.
67966769
</p>
67976770
{#header_close#}
67986771
{#header_open|@bitCast#}
@@ -7108,7 +7081,8 @@ fn cmpxchgStrongButNotAtomic(comptime T: type, ptr: *T, expected_value: T, new_v
71087081
more efficiently in machine instructions.
71097082
</p>
71107083
<p>
7111-
{#syntax#}AtomicOrder{#endsyntax#} can be found with {#syntax#}@import("builtin").AtomicOrder{#endsyntax#}.
7084+
{#syntax#}T{#endsyntax#} must be a {#syntax#}bool{#endsyntax#}, a float,
7085+
an integer or an enum.
71127086
</p>
71137087
<p>{#syntax#}@TypeOf(ptr).alignment{#endsyntax#} must be {#syntax#}>= @sizeOf(T).{#endsyntax#}</p>
71147088
{#see_also|Compile Variables|cmpxchgWeak#}
@@ -7136,7 +7110,8 @@ fn cmpxchgWeakButNotAtomic(comptime T: type, ptr: *T, expected_value: T, new_val
71367110
However if you need a stronger guarantee, use {#link|@cmpxchgStrong#}.
71377111
</p>
71387112
<p>
7139-
{#syntax#}AtomicOrder{#endsyntax#} can be found with {#syntax#}@import("builtin").AtomicOrder{#endsyntax#}.
7113+
{#syntax#}T{#endsyntax#} must be a {#syntax#}bool{#endsyntax#}, a float,
7114+
an integer or an enum.
71407115
</p>
71417116
<p>{#syntax#}@TypeOf(ptr).alignment{#endsyntax#} must be {#syntax#}>= @sizeOf(T).{#endsyntax#}</p>
71427117
{#see_also|Compile Variables|cmpxchgStrong#}

lib/std/atomic/int.zig

Lines changed: 5 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,3 @@
1-
const builtin = @import("builtin");
2-
const AtomicOrder = builtin.AtomicOrder;
3-
41
/// Thread-safe, lock-free integer
52
pub fn Int(comptime T: type) type {
63
return struct {
@@ -14,28 +11,28 @@ pub fn Int(comptime T: type) type {
1411

1512
/// Returns previous value
1613
pub fn incr(self: *Self) T {
17-
return @atomicRmw(T, &self.unprotected_value, builtin.AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
14+
return @atomicRmw(T, &self.unprotected_value, .Add, 1, .SeqCst);
1815
}
1916

2017
/// Returns previous value
2118
pub fn decr(self: *Self) T {
22-
return @atomicRmw(T, &self.unprotected_value, builtin.AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
19+
return @atomicRmw(T, &self.unprotected_value, .Sub, 1, .SeqCst);
2320
}
2421

2522
pub fn get(self: *Self) T {
26-
return @atomicLoad(T, &self.unprotected_value, AtomicOrder.SeqCst);
23+
return @atomicLoad(T, &self.unprotected_value, .SeqCst);
2724
}
2825

2926
pub fn set(self: *Self, new_value: T) void {
3027
_ = self.xchg(new_value);
3128
}
3229

3330
pub fn xchg(self: *Self, new_value: T) T {
34-
return @atomicRmw(T, &self.unprotected_value, builtin.AtomicRmwOp.Xchg, new_value, AtomicOrder.SeqCst);
31+
return @atomicRmw(T, &self.unprotected_value, .Xchg, new_value, .SeqCst);
3532
}
3633

3734
pub fn fetchAdd(self: *Self, op: T) T {
38-
return @atomicRmw(T, &self.unprotected_value, builtin.AtomicRmwOp.Add, op, AtomicOrder.SeqCst);
35+
return @atomicRmw(T, &self.unprotected_value, .Add, op, .SeqCst);
3936
}
4037
};
4138
}

lib/std/atomic/queue.zig

Lines changed: 9 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,5 @@
11
const std = @import("../std.zig");
22
const builtin = @import("builtin");
3-
const AtomicOrder = builtin.AtomicOrder;
4-
const AtomicRmwOp = builtin.AtomicRmwOp;
53
const assert = std.debug.assert;
64
const expect = std.testing.expect;
75

@@ -145,7 +143,7 @@ const Context = struct {
145143
put_sum: isize,
146144
get_sum: isize,
147145
get_count: usize,
148-
puts_done: u8, // TODO make this a bool
146+
puts_done: bool,
149147
};
150148

151149
// TODO add lazy evaluated build options and then put puts_per_thread behind
@@ -169,7 +167,7 @@ test "std.atomic.Queue" {
169167
.queue = &queue,
170168
.put_sum = 0,
171169
.get_sum = 0,
172-
.puts_done = 0,
170+
.puts_done = false,
173171
.get_count = 0,
174172
};
175173

@@ -182,7 +180,7 @@ test "std.atomic.Queue" {
182180
}
183181
}
184182
expect(!context.queue.isEmpty());
185-
context.puts_done = 1;
183+
context.puts_done = true;
186184
{
187185
var i: usize = 0;
188186
while (i < put_thread_count) : (i += 1) {
@@ -204,7 +202,7 @@ test "std.atomic.Queue" {
204202

205203
for (putters) |t|
206204
t.wait();
207-
@atomicStore(u8, &context.puts_done, 1, AtomicOrder.SeqCst);
205+
@atomicStore(bool, &context.puts_done, true, .SeqCst);
208206
for (getters) |t|
209207
t.wait();
210208

@@ -231,25 +229,25 @@ fn startPuts(ctx: *Context) u8 {
231229
std.time.sleep(1); // let the os scheduler be our fuzz
232230
const x = @bitCast(i32, r.random.scalar(u32));
233231
const node = ctx.allocator.create(Queue(i32).Node) catch unreachable;
234-
node.* = Queue(i32).Node{
232+
node.* = .{
235233
.prev = undefined,
236234
.next = undefined,
237235
.data = x,
238236
};
239237
ctx.queue.put(node);
240-
_ = @atomicRmw(isize, &ctx.put_sum, builtin.AtomicRmwOp.Add, x, AtomicOrder.SeqCst);
238+
_ = @atomicRmw(isize, &ctx.put_sum, .Add, x, .SeqCst);
241239
}
242240
return 0;
243241
}
244242

245243
fn startGets(ctx: *Context) u8 {
246244
while (true) {
247-
const last = @atomicLoad(u8, &ctx.puts_done, builtin.AtomicOrder.SeqCst) == 1;
245+
const last = @atomicLoad(bool, &ctx.puts_done, .SeqCst);
248246

249247
while (ctx.queue.get()) |node| {
250248
std.time.sleep(1); // let the os scheduler be our fuzz
251-
_ = @atomicRmw(isize, &ctx.get_sum, builtin.AtomicRmwOp.Add, node.data, builtin.AtomicOrder.SeqCst);
252-
_ = @atomicRmw(usize, &ctx.get_count, builtin.AtomicRmwOp.Add, 1, builtin.AtomicOrder.SeqCst);
249+
_ = @atomicRmw(isize, &ctx.get_sum, .Add, node.data, .SeqCst);
250+
_ = @atomicRmw(usize, &ctx.get_count, .Add, 1, .SeqCst);
253251
}
254252

255253
if (last) return 0;

lib/std/atomic/stack.zig

Lines changed: 15 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
const assert = std.debug.assert;
22
const builtin = @import("builtin");
3-
const AtomicOrder = builtin.AtomicOrder;
43
const expect = std.testing.expect;
54

65
/// Many reader, many writer, non-allocating, thread-safe
@@ -11,7 +10,7 @@ pub fn Stack(comptime T: type) type {
1110
root: ?*Node,
1211
lock: @TypeOf(lock_init),
1312

14-
const lock_init = if (builtin.single_threaded) {} else @as(u8, 0);
13+
const lock_init = if (builtin.single_threaded) {} else false;
1514

1615
pub const Self = @This();
1716

@@ -31,16 +30,16 @@ pub fn Stack(comptime T: type) type {
3130
/// being the first item in the stack, returns the other item that was there.
3231
pub fn pushFirst(self: *Self, node: *Node) ?*Node {
3332
node.next = null;
34-
return @cmpxchgStrong(?*Node, &self.root, null, node, AtomicOrder.SeqCst, AtomicOrder.SeqCst);
33+
return @cmpxchgStrong(?*Node, &self.root, null, node, .SeqCst, .SeqCst);
3534
}
3635

3736
pub fn push(self: *Self, node: *Node) void {
3837
if (builtin.single_threaded) {
3938
node.next = self.root;
4039
self.root = node;
4140
} else {
42-
while (@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst) != 0) {}
43-
defer assert(@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst) == 1);
41+
while (@atomicRmw(bool, &self.lock, .Xchg, true, .SeqCst)) {}
42+
defer assert(@atomicRmw(bool, &self.lock, .Xchg, false, .SeqCst));
4443

4544
node.next = self.root;
4645
self.root = node;
@@ -53,8 +52,8 @@ pub fn Stack(comptime T: type) type {
5352
self.root = root.next;
5453
return root;
5554
} else {
56-
while (@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst) != 0) {}
57-
defer assert(@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst) == 1);
55+
while (@atomicRmw(bool, &self.lock, .Xchg, true, .SeqCst)) {}
56+
defer assert(@atomicRmw(bool, &self.lock, .Xchg, false, .SeqCst));
5857

5958
const root = self.root orelse return null;
6059
self.root = root.next;
@@ -63,7 +62,7 @@ pub fn Stack(comptime T: type) type {
6362
}
6463

6564
pub fn isEmpty(self: *Self) bool {
66-
return @atomicLoad(?*Node, &self.root, AtomicOrder.SeqCst) == null;
65+
return @atomicLoad(?*Node, &self.root, .SeqCst) == null;
6766
}
6867
};
6968
}
@@ -75,7 +74,7 @@ const Context = struct {
7574
put_sum: isize,
7675
get_sum: isize,
7776
get_count: usize,
78-
puts_done: u8, // TODO make this a bool
77+
puts_done: bool,
7978
};
8079
// TODO add lazy evaluated build options and then put puts_per_thread behind
8180
// some option such as: "AggressiveMultithreadedFuzzTest". In the AppVeyor
@@ -98,7 +97,7 @@ test "std.atomic.stack" {
9897
.stack = &stack,
9998
.put_sum = 0,
10099
.get_sum = 0,
101-
.puts_done = 0,
100+
.puts_done = false,
102101
.get_count = 0,
103102
};
104103

@@ -109,7 +108,7 @@ test "std.atomic.stack" {
109108
expect(startPuts(&context) == 0);
110109
}
111110
}
112-
context.puts_done = 1;
111+
context.puts_done = true;
113112
{
114113
var i: usize = 0;
115114
while (i < put_thread_count) : (i += 1) {
@@ -128,7 +127,7 @@ test "std.atomic.stack" {
128127

129128
for (putters) |t|
130129
t.wait();
131-
@atomicStore(u8, &context.puts_done, 1, AtomicOrder.SeqCst);
130+
@atomicStore(bool, &context.puts_done, true, .SeqCst);
132131
for (getters) |t|
133132
t.wait();
134133
}
@@ -158,19 +157,19 @@ fn startPuts(ctx: *Context) u8 {
158157
.data = x,
159158
};
160159
ctx.stack.push(node);
161-
_ = @atomicRmw(isize, &ctx.put_sum, builtin.AtomicRmwOp.Add, x, AtomicOrder.SeqCst);
160+
_ = @atomicRmw(isize, &ctx.put_sum, .Add, x, .SeqCst);
162161
}
163162
return 0;
164163
}
165164

166165
fn startGets(ctx: *Context) u8 {
167166
while (true) {
168-
const last = @atomicLoad(u8, &ctx.puts_done, builtin.AtomicOrder.SeqCst) == 1;
167+
const last = @atomicLoad(bool, &ctx.puts_done, .SeqCst);
169168

170169
while (ctx.stack.pop()) |node| {
171170
std.time.sleep(1); // let the os scheduler be our fuzz
172-
_ = @atomicRmw(isize, &ctx.get_sum, builtin.AtomicRmwOp.Add, node.data, builtin.AtomicOrder.SeqCst);
173-
_ = @atomicRmw(usize, &ctx.get_count, builtin.AtomicRmwOp.Add, 1, builtin.AtomicOrder.SeqCst);
171+
_ = @atomicRmw(isize, &ctx.get_sum, .Add, node.data, .SeqCst);
172+
_ = @atomicRmw(usize, &ctx.get_count, .Add, 1, .SeqCst);
174173
}
175174

176175
if (last) return 0;

lib/std/event/channel.zig

Lines changed: 10 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -14,8 +14,8 @@ pub fn Channel(comptime T: type) type {
1414
putters: std.atomic.Queue(PutNode),
1515
get_count: usize,
1616
put_count: usize,
17-
dispatch_lock: u8, // TODO make this a bool
18-
need_dispatch: u8, // TODO make this a bool
17+
dispatch_lock: bool,
18+
need_dispatch: bool,
1919

2020
// simple fixed size ring buffer
2121
buffer_nodes: []T,
@@ -62,8 +62,8 @@ pub fn Channel(comptime T: type) type {
6262
.buffer_len = 0,
6363
.buffer_nodes = buffer,
6464
.buffer_index = 0,
65-
.dispatch_lock = 0,
66-
.need_dispatch = 0,
65+
.dispatch_lock = false,
66+
.need_dispatch = false,
6767
.getters = std.atomic.Queue(GetNode).init(),
6868
.putters = std.atomic.Queue(PutNode).init(),
6969
.or_null_queue = std.atomic.Queue(*std.atomic.Queue(GetNode).Node).init(),
@@ -165,15 +165,14 @@ pub fn Channel(comptime T: type) type {
165165

166166
fn dispatch(self: *SelfChannel) void {
167167
// set the "need dispatch" flag
168-
@atomicStore(u8, &self.need_dispatch, 1, .SeqCst);
168+
@atomicStore(bool, &self.need_dispatch, true, .SeqCst);
169169

170170
lock: while (true) {
171171
// set the lock flag
172-
const prev_lock = @atomicRmw(u8, &self.dispatch_lock, .Xchg, 1, .SeqCst);
173-
if (prev_lock != 0) return;
172+
if (@atomicRmw(bool, &self.dispatch_lock, .Xchg, true, .SeqCst)) return;
174173

175174
// clear the need_dispatch flag since we're about to do it
176-
@atomicStore(u8, &self.need_dispatch, 0, .SeqCst);
175+
@atomicStore(bool, &self.need_dispatch, false, .SeqCst);
177176

178177
while (true) {
179178
one_dispatch: {
@@ -250,14 +249,12 @@ pub fn Channel(comptime T: type) type {
250249
}
251250

252251
// clear need-dispatch flag
253-
const need_dispatch = @atomicRmw(u8, &self.need_dispatch, .Xchg, 0, .SeqCst);
254-
if (need_dispatch != 0) continue;
252+
if (@atomicRmw(bool, &self.need_dispatch, .Xchg, false, .SeqCst)) continue;
255253

256-
const my_lock = @atomicRmw(u8, &self.dispatch_lock, .Xchg, 0, .SeqCst);
257-
assert(my_lock != 0);
254+
assert(@atomicRmw(bool, &self.dispatch_lock, .Xchg, false, .SeqCst));
258255

259256
// we have to check again now that we unlocked
260-
if (@atomicLoad(u8, &self.need_dispatch, .SeqCst) != 0) continue :lock;
257+
if (@atomicLoad(bool, &self.need_dispatch, .SeqCst)) continue :lock;
261258

262259
return;
263260
}

0 commit comments

Comments
 (0)