Skip to content

Commit 6067d39

Browse files
std.builtin: make atomic order fields lowercase
1 parent c260b4c commit 6067d39

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

45 files changed

+367
-367
lines changed

lib/build_runner.zig

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -892,10 +892,10 @@ fn workerMakeOneStep(
892892
// then we return without doing the step, relying on another worker to
893893
// queue this step up again when dependencies are met.
894894
for (s.dependencies.items) |dep| {
895-
switch (@atomicLoad(Step.State, &dep.state, .SeqCst)) {
895+
switch (@atomicLoad(Step.State, &dep.state, .seq_cst)) {
896896
.success, .skipped => continue,
897897
.failure, .dependency_failure, .skipped_oom => {
898-
@atomicStore(Step.State, &s.state, .dependency_failure, .SeqCst);
898+
@atomicStore(Step.State, &s.state, .dependency_failure, .seq_cst);
899899
return;
900900
},
901901
.precheck_done, .running => {
@@ -929,7 +929,7 @@ fn workerMakeOneStep(
929929
s.state = .running;
930930
} else {
931931
// Avoid running steps twice.
932-
if (@cmpxchgStrong(Step.State, &s.state, .precheck_done, .running, .SeqCst, .SeqCst) != null) {
932+
if (@cmpxchgStrong(Step.State, &s.state, .precheck_done, .running, .seq_cst, .seq_cst) != null) {
933933
// Another worker got the job.
934934
return;
935935
}
@@ -956,13 +956,13 @@ fn workerMakeOneStep(
956956

957957
handle_result: {
958958
if (make_result) |_| {
959-
@atomicStore(Step.State, &s.state, .success, .SeqCst);
959+
@atomicStore(Step.State, &s.state, .success, .seq_cst);
960960
} else |err| switch (err) {
961961
error.MakeFailed => {
962-
@atomicStore(Step.State, &s.state, .failure, .SeqCst);
962+
@atomicStore(Step.State, &s.state, .failure, .seq_cst);
963963
break :handle_result;
964964
},
965-
error.MakeSkipped => @atomicStore(Step.State, &s.state, .skipped, .SeqCst),
965+
error.MakeSkipped => @atomicStore(Step.State, &s.state, .skipped, .seq_cst),
966966
}
967967

968968
// Successful completion of a step, so we queue up its dependants as well.

lib/compiler_rt/atomics.zig

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,7 @@ const SpinlockTable = struct {
7474
: "memory"
7575
);
7676
} else flag: {
77-
break :flag @atomicRmw(@TypeOf(self.v), &self.v, .Xchg, .Locked, .Acquire);
77+
break :flag @atomicRmw(@TypeOf(self.v), &self.v, .Xchg, .Locked, .acquire);
7878
};
7979

8080
switch (flag) {
@@ -91,7 +91,7 @@ const SpinlockTable = struct {
9191
: "memory"
9292
);
9393
} else {
94-
@atomicStore(@TypeOf(self.v), &self.v, .Unlocked, .Release);
94+
@atomicStore(@TypeOf(self.v), &self.v, .Unlocked, .release);
9595
}
9696
}
9797
};
@@ -172,7 +172,7 @@ inline fn atomic_load_N(comptime T: type, src: *T, model: i32) T {
172172
defer sl.release();
173173
return src.*;
174174
} else {
175-
return @atomicLoad(T, src, .SeqCst);
175+
return @atomicLoad(T, src, .seq_cst);
176176
}
177177
}
178178

@@ -203,7 +203,7 @@ inline fn atomic_store_N(comptime T: type, dst: *T, value: T, model: i32) void {
203203
defer sl.release();
204204
dst.* = value;
205205
} else {
206-
@atomicStore(T, dst, value, .SeqCst);
206+
@atomicStore(T, dst, value, .seq_cst);
207207
}
208208
}
209209

@@ -239,12 +239,12 @@ fn wideUpdate(comptime T: type, ptr: *T, val: T, update: anytype) T {
239239

240240
const mask = @as(WideAtomic, std.math.maxInt(T)) << inner_shift;
241241

242-
var wide_old = @atomicLoad(WideAtomic, wide_ptr, .SeqCst);
242+
var wide_old = @atomicLoad(WideAtomic, wide_ptr, .seq_cst);
243243
while (true) {
244244
const old = @as(T, @truncate((wide_old & mask) >> inner_shift));
245245
const new = update(val, old);
246246
const wide_new = wide_old & ~mask | (@as(WideAtomic, new) << inner_shift);
247-
if (@cmpxchgWeak(WideAtomic, wide_ptr, wide_old, wide_new, .SeqCst, .SeqCst)) |new_wide_old| {
247+
if (@cmpxchgWeak(WideAtomic, wide_ptr, wide_old, wide_new, .seq_cst, .seq_cst)) |new_wide_old| {
248248
wide_old = new_wide_old;
249249
} else {
250250
return old;
@@ -270,7 +270,7 @@ inline fn atomic_exchange_N(comptime T: type, ptr: *T, val: T, model: i32) T {
270270
};
271271
return wideUpdate(T, ptr, val, Updater.update);
272272
} else {
273-
return @atomicRmw(T, ptr, .Xchg, val, .SeqCst);
273+
return @atomicRmw(T, ptr, .Xchg, val, .seq_cst);
274274
}
275275
}
276276

@@ -315,7 +315,7 @@ inline fn atomic_compare_exchange_N(
315315
expected.* = value;
316316
return 0;
317317
} else {
318-
if (@cmpxchgStrong(T, ptr, expected.*, desired, .SeqCst, .SeqCst)) |old_value| {
318+
if (@cmpxchgStrong(T, ptr, expected.*, desired, .seq_cst, .seq_cst)) |old_value| {
319319
expected.* = old_value;
320320
return 0;
321321
}
@@ -373,7 +373,7 @@ inline fn fetch_op_N(comptime T: type, comptime op: std.builtin.AtomicRmwOp, ptr
373373
return wideUpdate(T, ptr, val, Updater.update);
374374
}
375375

376-
return @atomicRmw(T, ptr, op, val, .SeqCst);
376+
return @atomicRmw(T, ptr, op, val, .seq_cst);
377377
}
378378

379379
fn __atomic_fetch_add_1(ptr: *u8, val: u8, model: i32) callconv(.C) u8 {

lib/std/Progress.zig

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -95,9 +95,9 @@ pub const Node = struct {
9595
/// This is the same as calling `start` and then `end` on the returned `Node`. Thread-safe.
9696
pub fn completeOne(self: *Node) void {
9797
if (self.parent) |parent| {
98-
@atomicStore(?*Node, &parent.recently_updated_child, self, .Release);
98+
@atomicStore(?*Node, &parent.recently_updated_child, self, .release);
9999
}
100-
_ = @atomicRmw(usize, &self.unprotected_completed_items, .Add, 1, .Monotonic);
100+
_ = @atomicRmw(usize, &self.unprotected_completed_items, .Add, 1, .monotonic);
101101
self.context.maybeRefresh();
102102
}
103103

@@ -108,7 +108,7 @@ pub const Node = struct {
108108
{
109109
self.context.update_mutex.lock();
110110
defer self.context.update_mutex.unlock();
111-
_ = @cmpxchgStrong(?*Node, &parent.recently_updated_child, self, null, .Monotonic, .Monotonic);
111+
_ = @cmpxchgStrong(?*Node, &parent.recently_updated_child, self, null, .monotonic, .monotonic);
112112
}
113113
parent.completeOne();
114114
} else {
@@ -122,7 +122,7 @@ pub const Node = struct {
122122
/// Tell the parent node that this node is actively being worked on. Thread-safe.
123123
pub fn activate(self: *Node) void {
124124
if (self.parent) |parent| {
125-
@atomicStore(?*Node, &parent.recently_updated_child, self, .Release);
125+
@atomicStore(?*Node, &parent.recently_updated_child, self, .release);
126126
self.context.maybeRefresh();
127127
}
128128
}
@@ -134,9 +134,9 @@ pub const Node = struct {
134134
defer progress.update_mutex.unlock();
135135
self.name = name;
136136
if (self.parent) |parent| {
137-
@atomicStore(?*Node, &parent.recently_updated_child, self, .Release);
137+
@atomicStore(?*Node, &parent.recently_updated_child, self, .release);
138138
if (parent.parent) |grand_parent| {
139-
@atomicStore(?*Node, &grand_parent.recently_updated_child, parent, .Release);
139+
@atomicStore(?*Node, &grand_parent.recently_updated_child, parent, .release);
140140
}
141141
if (progress.timer) |*timer| progress.maybeRefreshWithHeldLock(timer);
142142
}
@@ -149,22 +149,22 @@ pub const Node = struct {
149149
defer progress.update_mutex.unlock();
150150
self.unit = unit;
151151
if (self.parent) |parent| {
152-
@atomicStore(?*Node, &parent.recently_updated_child, self, .Release);
152+
@atomicStore(?*Node, &parent.recently_updated_child, self, .release);
153153
if (parent.parent) |grand_parent| {
154-
@atomicStore(?*Node, &grand_parent.recently_updated_child, parent, .Release);
154+
@atomicStore(?*Node, &grand_parent.recently_updated_child, parent, .release);
155155
}
156156
if (progress.timer) |*timer| progress.maybeRefreshWithHeldLock(timer);
157157
}
158158
}
159159

160160
/// Thread-safe. 0 means unknown.
161161
pub fn setEstimatedTotalItems(self: *Node, count: usize) void {
162-
@atomicStore(usize, &self.unprotected_estimated_total_items, count, .Monotonic);
162+
@atomicStore(usize, &self.unprotected_estimated_total_items, count, .monotonic);
163163
}
164164

165165
/// Thread-safe.
166166
pub fn setCompletedItems(self: *Node, completed_items: usize) void {
167-
@atomicStore(usize, &self.unprotected_completed_items, completed_items, .Monotonic);
167+
@atomicStore(usize, &self.unprotected_completed_items, completed_items, .monotonic);
168168
}
169169
};
170170

@@ -313,8 +313,8 @@ fn refreshWithHeldLock(self: *Progress) void {
313313
self.bufWrite(&end, "... ", .{});
314314
}
315315
need_ellipse = false;
316-
const eti = @atomicLoad(usize, &node.unprotected_estimated_total_items, .Monotonic);
317-
const completed_items = @atomicLoad(usize, &node.unprotected_completed_items, .Monotonic);
316+
const eti = @atomicLoad(usize, &node.unprotected_estimated_total_items, .monotonic);
317+
const completed_items = @atomicLoad(usize, &node.unprotected_completed_items, .monotonic);
318318
const current_item = completed_items + 1;
319319
if (node.name.len != 0 or eti > 0) {
320320
if (node.name.len != 0) {
@@ -331,7 +331,7 @@ fn refreshWithHeldLock(self: *Progress) void {
331331
need_ellipse = false;
332332
}
333333
}
334-
maybe_node = @atomicLoad(?*Node, &node.recently_updated_child, .Acquire);
334+
maybe_node = @atomicLoad(?*Node, &node.recently_updated_child, .acquire);
335335
}
336336
if (need_ellipse) {
337337
self.bufWrite(&end, "... ", .{});

lib/std/Thread.zig

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -510,7 +510,7 @@ const WindowsThreadImpl = struct {
510510

511511
fn entryFn(raw_ptr: windows.PVOID) callconv(.C) windows.DWORD {
512512
const self: *@This() = @ptrCast(@alignCast(raw_ptr));
513-
defer switch (self.thread.completion.swap(.completed, .SeqCst)) {
513+
defer switch (self.thread.completion.swap(.completed, .seq_cst)) {
514514
.running => {},
515515
.completed => unreachable,
516516
.detached => self.thread.free(),
@@ -563,7 +563,7 @@ const WindowsThreadImpl = struct {
563563

564564
fn detach(self: Impl) void {
565565
windows.CloseHandle(self.thread.thread_handle);
566-
switch (self.thread.completion.swap(.detached, .SeqCst)) {
566+
switch (self.thread.completion.swap(.detached, .seq_cst)) {
567567
.running => {},
568568
.completed => self.thread.free(),
569569
.detached => unreachable,
@@ -573,7 +573,7 @@ const WindowsThreadImpl = struct {
573573
fn join(self: Impl) void {
574574
windows.WaitForSingleObjectEx(self.thread.thread_handle, windows.INFINITE, false) catch unreachable;
575575
windows.CloseHandle(self.thread.thread_handle);
576-
assert(self.thread.completion.load(.SeqCst) == .completed);
576+
assert(self.thread.completion.load(.seq_cst) == .completed);
577577
self.thread.free();
578578
}
579579
};
@@ -780,11 +780,11 @@ const WasiThreadImpl = struct {
780780
}
781781

782782
fn getHandle(self: Impl) ThreadHandle {
783-
return self.thread.tid.load(.SeqCst);
783+
return self.thread.tid.load(.seq_cst);
784784
}
785785

786786
fn detach(self: Impl) void {
787-
switch (self.thread.state.swap(.detached, .SeqCst)) {
787+
switch (self.thread.state.swap(.detached, .seq_cst)) {
788788
.running => {},
789789
.completed => self.join(),
790790
.detached => unreachable,
@@ -801,7 +801,7 @@ const WasiThreadImpl = struct {
801801

802802
var spin: u8 = 10;
803803
while (true) {
804-
const tid = self.thread.tid.load(.SeqCst);
804+
const tid = self.thread.tid.load(.seq_cst);
805805
if (tid == 0) {
806806
break;
807807
}
@@ -901,7 +901,7 @@ const WasiThreadImpl = struct {
901901
if (tid < 0) {
902902
return error.SystemResources;
903903
}
904-
instance.thread.tid.store(tid, .SeqCst);
904+
instance.thread.tid.store(tid, .seq_cst);
905905

906906
return .{ .thread = &instance.thread };
907907
}
@@ -914,12 +914,12 @@ const WasiThreadImpl = struct {
914914
}
915915
__set_stack_pointer(arg.thread.memory.ptr + arg.stack_offset);
916916
__wasm_init_tls(arg.thread.memory.ptr + arg.tls_offset);
917-
@atomicStore(u32, &WasiThreadImpl.tls_thread_id, @intCast(tid), .SeqCst);
917+
@atomicStore(u32, &WasiThreadImpl.tls_thread_id, @intCast(tid), .seq_cst);
918918

919919
// Finished bootstrapping, call user's procedure.
920920
arg.call_back(arg.raw_ptr);
921921

922-
switch (arg.thread.state.swap(.completed, .SeqCst)) {
922+
switch (arg.thread.state.swap(.completed, .seq_cst)) {
923923
.running => {
924924
// reset the Thread ID
925925
asm volatile (
@@ -1191,7 +1191,7 @@ const LinuxThreadImpl = struct {
11911191

11921192
fn entryFn(raw_arg: usize) callconv(.C) u8 {
11931193
const self = @as(*@This(), @ptrFromInt(raw_arg));
1194-
defer switch (self.thread.completion.swap(.completed, .SeqCst)) {
1194+
defer switch (self.thread.completion.swap(.completed, .seq_cst)) {
11951195
.running => {},
11961196
.completed => unreachable,
11971197
.detached => self.thread.freeAndExit(),
@@ -1311,7 +1311,7 @@ const LinuxThreadImpl = struct {
13111311
}
13121312

13131313
fn detach(self: Impl) void {
1314-
switch (self.thread.completion.swap(.detached, .SeqCst)) {
1314+
switch (self.thread.completion.swap(.detached, .seq_cst)) {
13151315
.running => {},
13161316
.completed => self.join(),
13171317
.detached => unreachable,
@@ -1323,7 +1323,7 @@ const LinuxThreadImpl = struct {
13231323

13241324
var spin: u8 = 10;
13251325
while (true) {
1326-
const tid = self.thread.child_tid.load(.SeqCst);
1326+
const tid = self.thread.child_tid.load(.seq_cst);
13271327
if (tid == 0) {
13281328
break;
13291329
}

lib/std/Thread/Condition.zig

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -163,7 +163,7 @@ const WindowsImpl = struct {
163163

164164
if (comptime builtin.mode == .Debug) {
165165
// The internal state of the DebugMutex needs to be handled here as well.
166-
mutex.impl.locking_thread.store(0, .Unordered);
166+
mutex.impl.locking_thread.store(0, .unordered);
167167
}
168168
const rc = os.windows.kernel32.SleepConditionVariableSRW(
169169
&self.condition,
@@ -173,7 +173,7 @@ const WindowsImpl = struct {
173173
);
174174
if (comptime builtin.mode == .Debug) {
175175
// The internal state of the DebugMutex needs to be handled here as well.
176-
mutex.impl.locking_thread.store(std.Thread.getCurrentId(), .Unordered);
176+
mutex.impl.locking_thread.store(std.Thread.getCurrentId(), .unordered);
177177
}
178178

179179
// Return error.Timeout if we know the timeout elapsed correctly.
@@ -212,8 +212,8 @@ const FutexImpl = struct {
212212
// - T1: s & signals == 0 -> FUTEX_WAIT(&epoch, e) (missed the state update + the epoch change)
213213
//
214214
// Acquire barrier to ensure the epoch load happens before the state load.
215-
var epoch = self.epoch.load(.Acquire);
216-
var state = self.state.fetchAdd(one_waiter, .Monotonic);
215+
var epoch = self.epoch.load(.acquire);
216+
var state = self.state.fetchAdd(one_waiter, .monotonic);
217217
assert(state & waiter_mask != waiter_mask);
218218
state += one_waiter;
219219

@@ -231,30 +231,30 @@ const FutexImpl = struct {
231231
// Acquire barrier ensures code before the wake() which added the signal happens before we decrement it and return.
232232
while (state & signal_mask != 0) {
233233
const new_state = state - one_waiter - one_signal;
234-
state = self.state.cmpxchgWeak(state, new_state, .Acquire, .Monotonic) orelse return;
234+
state = self.state.cmpxchgWeak(state, new_state, .acquire, .monotonic) orelse return;
235235
}
236236

237237
// Remove the waiter we added and officially return timed out.
238238
const new_state = state - one_waiter;
239-
state = self.state.cmpxchgWeak(state, new_state, .Monotonic, .Monotonic) orelse return err;
239+
state = self.state.cmpxchgWeak(state, new_state, .monotonic, .monotonic) orelse return err;
240240
}
241241
},
242242
};
243243

244-
epoch = self.epoch.load(.Acquire);
245-
state = self.state.load(.Monotonic);
244+
epoch = self.epoch.load(.acquire);
245+
state = self.state.load(.monotonic);
246246

247247
// Try to wake up by consuming a signal and decremented the waiter we added previously.
248248
// Acquire barrier ensures code before the wake() which added the signal happens before we decrement it and return.
249249
while (state & signal_mask != 0) {
250250
const new_state = state - one_waiter - one_signal;
251-
state = self.state.cmpxchgWeak(state, new_state, .Acquire, .Monotonic) orelse return;
251+
state = self.state.cmpxchgWeak(state, new_state, .acquire, .monotonic) orelse return;
252252
}
253253
}
254254
}
255255

256256
fn wake(self: *Impl, comptime notify: Notify) void {
257-
var state = self.state.load(.Monotonic);
257+
var state = self.state.load(.monotonic);
258258
while (true) {
259259
const waiters = (state & waiter_mask) / one_waiter;
260260
const signals = (state & signal_mask) / one_signal;
@@ -275,7 +275,7 @@ const FutexImpl = struct {
275275
// Reserve the amount of waiters to wake by incrementing the signals count.
276276
// Release barrier ensures code before the wake() happens before the signal it posted and consumed by the wait() threads.
277277
const new_state = state + (one_signal * to_wake);
278-
state = self.state.cmpxchgWeak(state, new_state, .Release, .Monotonic) orelse {
278+
state = self.state.cmpxchgWeak(state, new_state, .release, .monotonic) orelse {
279279
// Wake up the waiting threads we reserved above by changing the epoch value.
280280
// NOTE: a waiting thread could miss a wake up if *exactly* ((1<<32)-1) wake()s happen between it observing the epoch and sleeping on it.
281281
// This is very unlikely due to how many precise amount of Futex.wake() calls that would be between the waiting thread's potential preemption.
@@ -288,7 +288,7 @@ const FutexImpl = struct {
288288
// - T1: s = LOAD(&state)
289289
// - T2: UPDATE(&state, signal) + FUTEX_WAKE(&epoch)
290290
// - T1: s & signals == 0 -> FUTEX_WAIT(&epoch, e) (missed both epoch change and state change)
291-
_ = self.epoch.fetchAdd(1, .Release);
291+
_ = self.epoch.fetchAdd(1, .release);
292292
Futex.wake(&self.epoch, to_wake);
293293
return;
294294
};

0 commit comments

Comments
 (0)