From 8dc188ebe06b5b78dcead521561858fc27e25204 Mon Sep 17 00:00:00 2001
From: Vexu
Date: Tue, 10 Mar 2020 22:33:32 +0200
Subject: [PATCH 01/11] support atomic operations with bools
---
src/codegen.cpp | 43 ++++++++++++++++++++++++++++++++
src/ir.cpp | 6 +++++
test/stage1/behavior/atomics.zig | 10 ++++++++
3 files changed, 59 insertions(+)
diff --git a/src/codegen.cpp b/src/codegen.cpp
index 22cb97520565..7238d5041b46 100644
--- a/src/codegen.cpp
+++ b/src/codegen.cpp
@@ -5224,6 +5224,15 @@ static LLVMValueRef ir_render_cmpxchg(CodeGen *g, IrExecutableGen *executable, I
LLVMValueRef cmp_val = ir_llvm_value(g, instruction->cmp_value);
LLVMValueRef new_val = ir_llvm_value(g, instruction->new_value);
+ ZigType *operand_type = instruction->new_value->value->type;
+ if (operand_type->id == ZigTypeIdBool) {
+ // treat bool as u8
+ ptr_val = LLVMBuildBitCast(g->builder, ptr_val,
+ LLVMPointerType(g->builtin_types.entry_u8->llvm_type, 0), "");
+ cmp_val = LLVMConstZExt(cmp_val, g->builtin_types.entry_u8->llvm_type);
+ new_val = LLVMConstZExt(new_val, g->builtin_types.entry_u8->llvm_type);
+ }
+
LLVMAtomicOrdering success_order = to_LLVMAtomicOrdering(instruction->success_order);
LLVMAtomicOrdering failure_order = to_LLVMAtomicOrdering(instruction->failure_order);
@@ -5236,6 +5245,9 @@ static LLVMValueRef ir_render_cmpxchg(CodeGen *g, IrExecutableGen *executable, I
if (!handle_is_ptr(g, optional_type)) {
LLVMValueRef payload_val = LLVMBuildExtractValue(g->builder, result_val, 0, "");
+ if (operand_type->id == ZigTypeIdBool) {
+ payload_val = LLVMBuildTrunc(g->builder, payload_val, g->builtin_types.entry_bool->llvm_type, "");
+ }
LLVMValueRef success_bit = LLVMBuildExtractValue(g->builder, result_val, 1, "");
return LLVMBuildSelect(g->builder, success_bit, LLVMConstNull(get_llvm_type(g, child_type)), payload_val, "");
}
@@ -5250,6 +5262,9 @@ static LLVMValueRef ir_render_cmpxchg(CodeGen *g, IrExecutableGen *executable, I
ir_assert(type_has_bits(g, child_type), &instruction->base);
LLVMValueRef payload_val = LLVMBuildExtractValue(g->builder, result_val, 0, "");
+ if (operand_type->id == ZigTypeIdBool) {
+ payload_val = LLVMBuildTrunc(g->builder, payload_val, g->builtin_types.entry_bool->llvm_type, "");
+ }
LLVMValueRef val_ptr = LLVMBuildStructGEP(g->builder, result_loc, maybe_child_index, "");
gen_assign_raw(g, val_ptr, get_pointer_to_type(g, child_type, false), payload_val);
@@ -5827,6 +5842,16 @@ static LLVMValueRef ir_render_atomic_rmw(CodeGen *g, IrExecutableGen *executable
LLVMValueRef ptr = ir_llvm_value(g, instruction->ptr);
LLVMValueRef operand = ir_llvm_value(g, instruction->operand);
+ if (operand_type->id == ZigTypeIdBool) {
+ // treat bool as u8
+ LLVMValueRef casted_ptr = LLVMBuildBitCast(g->builder, ptr,
+ LLVMPointerType(g->builtin_types.entry_u8->llvm_type, 0), "");
+ LLVMValueRef casted_operand = LLVMBuildPtrToInt(g->builder, operand, g->builtin_types.entry_u8->llvm_type, "");
+ LLVMValueRef uncasted_result = ZigLLVMBuildAtomicRMW(g->builder, op, casted_ptr, casted_operand, ordering,
+ g->is_single_threaded);
+ return LLVMBuildTrunc(g->builder, uncasted_result, g->builtin_types.entry_bool->llvm_type, "");
+ }
+
if (get_codegen_ptr_type_bail(g, operand_type) == nullptr) {
return ZigLLVMBuildAtomicRMW(g->builder, op, ptr, operand, ordering, g->is_single_threaded);
}
@@ -5845,6 +5870,16 @@ static LLVMValueRef ir_render_atomic_load(CodeGen *g, IrExecutableGen *executabl
{
LLVMAtomicOrdering ordering = to_LLVMAtomicOrdering(instruction->ordering);
LLVMValueRef ptr = ir_llvm_value(g, instruction->ptr);
+
+ ZigType *operand_type = instruction->ptr->value->type->data.pointer.child_type;
+ if (operand_type->id == ZigTypeIdBool) {
+ // treat bool as u8
+ ptr = LLVMBuildBitCast(g->builder, ptr,
+ LLVMPointerType(g->builtin_types.entry_u8->llvm_type, 0), "");
+ LLVMValueRef load_inst = gen_load(g, ptr, instruction->ptr->value->type, "");
+ LLVMSetOrdering(load_inst, ordering);
+ return LLVMBuildTrunc(g->builder, load_inst, g->builtin_types.entry_bool->llvm_type, "");
+ }
LLVMValueRef load_inst = gen_load(g, ptr, instruction->ptr->value->type, "");
LLVMSetOrdering(load_inst, ordering);
return load_inst;
@@ -5856,6 +5891,14 @@ static LLVMValueRef ir_render_atomic_store(CodeGen *g, IrExecutableGen *executab
LLVMAtomicOrdering ordering = to_LLVMAtomicOrdering(instruction->ordering);
LLVMValueRef ptr = ir_llvm_value(g, instruction->ptr);
LLVMValueRef value = ir_llvm_value(g, instruction->value);
+
+ ZigType *operand_type = instruction->value->value->type;
+ if (operand_type->id == ZigTypeIdBool) {
+ // treat bool as u8
+ ptr = LLVMBuildBitCast(g->builder, ptr,
+ LLVMPointerType(g->builtin_types.entry_u8->llvm_type, 0), "");
+ value = LLVMConstZExt(value, g->builtin_types.entry_u8->llvm_type);
+ }
LLVMValueRef store_inst = gen_store(g, value, ptr, instruction->ptr->value->type);
LLVMSetOrdering(store_inst, ordering);
return nullptr;
diff --git a/src/ir.cpp b/src/ir.cpp
index e5b28f84c2fc..c6978ca0a961 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -28357,6 +28357,8 @@ static ZigType *ir_resolve_atomic_operand_type(IrAnalyze *ira, IrInstGen *op) {
max_atomic_bits, (uint32_t) operand_type->data.floating.bit_count));
return ira->codegen->builtin_types.entry_invalid;
}
+ } else if (operand_type->id == ZigTypeIdBool) {
+ // will be treated as u8
} else {
Error err;
ZigType *operand_ptr_type;
@@ -28397,6 +28399,10 @@ static IrInstGen *ir_analyze_instruction_atomic_rmw(IrAnalyze *ira, IrInstSrcAto
ir_add_error(ira, &instruction->op->base,
buf_sprintf("@atomicRmw on enum only works with .Xchg"));
return ira->codegen->invalid_inst_gen;
+ } else if (operand_type->id == ZigTypeIdBool && op != AtomicRmwOp_xchg) {
+ ir_add_error(ira, &instruction->op->base,
+ buf_sprintf("@atomicRmw on bool only works with .Xchg"));
+ return ira->codegen->invalid_inst_gen;
} else if (operand_type->id == ZigTypeIdFloat && op > AtomicRmwOp_sub) {
ir_add_error(ira, &instruction->op->base,
buf_sprintf("@atomicRmw with float only works with .Xchg, .Add and .Sub"));
diff --git a/test/stage1/behavior/atomics.zig b/test/stage1/behavior/atomics.zig
index 0347f6f94a28..bda0a8469ce0 100644
--- a/test/stage1/behavior/atomics.zig
+++ b/test/stage1/behavior/atomics.zig
@@ -161,3 +161,13 @@ fn testAtomicRmwFloat() void {
_ = @atomicRmw(f32, &x, .Sub, 2, .SeqCst);
expect(x == 4);
}
+
+test "atomics with bool" {
+ var x = false;
+ @atomicStore(bool, &x, true, .SeqCst);
+ expect(x == true);
+ expect(@atomicLoad(bool, &x, .SeqCst) == true);
+ expect(@atomicRmw(bool, &x, .Xchg, false, .SeqCst) == true);
+ expect(@cmpxchgStrong(bool, &x, false, true, .SeqCst, .SeqCst) == null);
+ expect(@cmpxchgStrong(bool, &x, false, true, .SeqCst, .SeqCst).? == true);
+}
From ee5b00a8b90ef375d0cd4432d31e3a4ed0b6f632 Mon Sep 17 00:00:00 2001
From: Vexu
Date: Tue, 10 Mar 2020 22:46:19 +0200
Subject: [PATCH 02/11] use atomic bools in std lib
---
lib/std/atomic/queue.zig | 20 +++++++++-----------
lib/std/atomic/stack.zig | 31 +++++++++++++++----------------
lib/std/event/channel.zig | 24 ++++++++++++------------
lib/std/event/lock.zig | 36 +++++++++++++++++-------------------
lib/std/event/rwlock.zig | 32 ++++++++++++++++----------------
src/ir.cpp | 6 +++---
test/compile_errors.zig | 13 +++++++++++--
7 files changed, 83 insertions(+), 79 deletions(-)
diff --git a/lib/std/atomic/queue.zig b/lib/std/atomic/queue.zig
index 1969587f30a6..5c40acc3cb72 100644
--- a/lib/std/atomic/queue.zig
+++ b/lib/std/atomic/queue.zig
@@ -1,7 +1,5 @@
const std = @import("../std.zig");
const builtin = @import("builtin");
-const AtomicOrder = builtin.AtomicOrder;
-const AtomicRmwOp = builtin.AtomicRmwOp;
const assert = std.debug.assert;
const expect = std.testing.expect;
@@ -149,7 +147,7 @@ const Context = struct {
put_sum: isize,
get_sum: isize,
get_count: usize,
- puts_done: u8, // TODO make this a bool
+ puts_done: bool,
};
// TODO add lazy evaluated build options and then put puts_per_thread behind
@@ -173,7 +171,7 @@ test "std.atomic.Queue" {
.queue = &queue,
.put_sum = 0,
.get_sum = 0,
- .puts_done = 0,
+ .puts_done = false,
.get_count = 0,
};
@@ -186,7 +184,7 @@ test "std.atomic.Queue" {
}
}
expect(!context.queue.isEmpty());
- context.puts_done = 1;
+ context.puts_done = true;
{
var i: usize = 0;
while (i < put_thread_count) : (i += 1) {
@@ -208,7 +206,7 @@ test "std.atomic.Queue" {
for (putters) |t|
t.wait();
- @atomicStore(u8, &context.puts_done, 1, AtomicOrder.SeqCst);
+ @atomicStore(bool, &context.puts_done, true, .SeqCst);
for (getters) |t|
t.wait();
@@ -235,25 +233,25 @@ fn startPuts(ctx: *Context) u8 {
std.time.sleep(1); // let the os scheduler be our fuzz
const x = @bitCast(i32, r.random.scalar(u32));
const node = ctx.allocator.create(Queue(i32).Node) catch unreachable;
- node.* = Queue(i32).Node{
+ node.* = .{
.prev = undefined,
.next = undefined,
.data = x,
};
ctx.queue.put(node);
- _ = @atomicRmw(isize, &ctx.put_sum, builtin.AtomicRmwOp.Add, x, AtomicOrder.SeqCst);
+ _ = @atomicRmw(isize, &ctx.put_sum, .Add, x, .SeqCst);
}
return 0;
}
fn startGets(ctx: *Context) u8 {
while (true) {
- const last = @atomicLoad(u8, &ctx.puts_done, builtin.AtomicOrder.SeqCst) == 1;
+ const last = @atomicLoad(bool, &ctx.puts_done, .SeqCst);
while (ctx.queue.get()) |node| {
std.time.sleep(1); // let the os scheduler be our fuzz
- _ = @atomicRmw(isize, &ctx.get_sum, builtin.AtomicRmwOp.Add, node.data, builtin.AtomicOrder.SeqCst);
- _ = @atomicRmw(usize, &ctx.get_count, builtin.AtomicRmwOp.Add, 1, builtin.AtomicOrder.SeqCst);
+ _ = @atomicRmw(isize, &ctx.get_sum, .Add, node.data, .SeqCst);
+ _ = @atomicRmw(usize, &ctx.get_count, .Add, 1, .SeqCst);
}
if (last) return 0;
diff --git a/lib/std/atomic/stack.zig b/lib/std/atomic/stack.zig
index 0f67a257cc71..07cb16e45013 100644
--- a/lib/std/atomic/stack.zig
+++ b/lib/std/atomic/stack.zig
@@ -1,6 +1,5 @@
const assert = std.debug.assert;
const builtin = @import("builtin");
-const AtomicOrder = builtin.AtomicOrder;
const expect = std.testing.expect;
/// Many reader, many writer, non-allocating, thread-safe
@@ -11,7 +10,7 @@ pub fn Stack(comptime T: type) type {
root: ?*Node,
lock: @TypeOf(lock_init),
- const lock_init = if (builtin.single_threaded) {} else @as(u8, 0);
+ const lock_init = if (builtin.single_threaded) {} else false;
pub const Self = @This();
@@ -31,7 +30,7 @@ pub fn Stack(comptime T: type) type {
/// being the first item in the stack, returns the other item that was there.
pub fn pushFirst(self: *Self, node: *Node) ?*Node {
node.next = null;
- return @cmpxchgStrong(?*Node, &self.root, null, node, AtomicOrder.SeqCst, AtomicOrder.SeqCst);
+ return @cmpxchgStrong(?*Node, &self.root, null, node, .SeqCst, .SeqCst);
}
pub fn push(self: *Self, node: *Node) void {
@@ -39,8 +38,8 @@ pub fn Stack(comptime T: type) type {
node.next = self.root;
self.root = node;
} else {
- while (@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst) != 0) {}
- defer assert(@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst) == 1);
+ while (@atomicRmw(bool, &self.lock, .Xchg, true, .SeqCst) != false) {}
+ defer assert(@atomicRmw(bool, &self.lock, .Xchg, false, .SeqCst) == true);
node.next = self.root;
self.root = node;
@@ -53,8 +52,8 @@ pub fn Stack(comptime T: type) type {
self.root = root.next;
return root;
} else {
- while (@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst) != 0) {}
- defer assert(@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst) == 1);
+ while (@atomicRmw(bool, &self.lock, .Xchg, true, .SeqCst) != false) {}
+ defer assert(@atomicRmw(bool, &self.lock, .Xchg, false, .SeqCst) == true);
const root = self.root orelse return null;
self.root = root.next;
@@ -63,7 +62,7 @@ pub fn Stack(comptime T: type) type {
}
pub fn isEmpty(self: *Self) bool {
- return @atomicLoad(?*Node, &self.root, AtomicOrder.SeqCst) == null;
+ return @atomicLoad(?*Node, &self.root, .SeqCst) == null;
}
};
}
@@ -75,7 +74,7 @@ const Context = struct {
put_sum: isize,
get_sum: isize,
get_count: usize,
- puts_done: u8, // TODO make this a bool
+ puts_done: bool,
};
// TODO add lazy evaluated build options and then put puts_per_thread behind
// some option such as: "AggressiveMultithreadedFuzzTest". In the AppVeyor
@@ -98,7 +97,7 @@ test "std.atomic.stack" {
.stack = &stack,
.put_sum = 0,
.get_sum = 0,
- .puts_done = 0,
+ .puts_done = false,
.get_count = 0,
};
@@ -109,7 +108,7 @@ test "std.atomic.stack" {
expect(startPuts(&context) == 0);
}
}
- context.puts_done = 1;
+ context.puts_done = true;
{
var i: usize = 0;
while (i < put_thread_count) : (i += 1) {
@@ -128,7 +127,7 @@ test "std.atomic.stack" {
for (putters) |t|
t.wait();
- @atomicStore(u8, &context.puts_done, 1, AtomicOrder.SeqCst);
+ @atomicStore(bool, &context.puts_done, true, .SeqCst);
for (getters) |t|
t.wait();
}
@@ -158,19 +157,19 @@ fn startPuts(ctx: *Context) u8 {
.data = x,
};
ctx.stack.push(node);
- _ = @atomicRmw(isize, &ctx.put_sum, builtin.AtomicRmwOp.Add, x, AtomicOrder.SeqCst);
+ _ = @atomicRmw(isize, &ctx.put_sum, .Add, x, .SeqCst);
}
return 0;
}
fn startGets(ctx: *Context) u8 {
while (true) {
- const last = @atomicLoad(u8, &ctx.puts_done, builtin.AtomicOrder.SeqCst) == 1;
+ const last = @atomicLoad(bool, &ctx.puts_done, .SeqCst) == true;
while (ctx.stack.pop()) |node| {
std.time.sleep(1); // let the os scheduler be our fuzz
- _ = @atomicRmw(isize, &ctx.get_sum, builtin.AtomicRmwOp.Add, node.data, builtin.AtomicOrder.SeqCst);
- _ = @atomicRmw(usize, &ctx.get_count, builtin.AtomicRmwOp.Add, 1, builtin.AtomicOrder.SeqCst);
+ _ = @atomicRmw(isize, &ctx.get_sum, .Add, node.data, .SeqCst);
+ _ = @atomicRmw(usize, &ctx.get_count, .Add, 1, .SeqCst);
}
if (last) return 0;
diff --git a/lib/std/event/channel.zig b/lib/std/event/channel.zig
index 3c5b48d04741..355bd7829225 100644
--- a/lib/std/event/channel.zig
+++ b/lib/std/event/channel.zig
@@ -14,8 +14,8 @@ pub fn Channel(comptime T: type) type {
putters: std.atomic.Queue(PutNode),
get_count: usize,
put_count: usize,
- dispatch_lock: u8, // TODO make this a bool
- need_dispatch: u8, // TODO make this a bool
+ dispatch_lock: bool,
+ need_dispatch: bool,
// simple fixed size ring buffer
buffer_nodes: []T,
@@ -62,8 +62,8 @@ pub fn Channel(comptime T: type) type {
.buffer_len = 0,
.buffer_nodes = buffer,
.buffer_index = 0,
- .dispatch_lock = 0,
- .need_dispatch = 0,
+ .dispatch_lock = false,
+ .need_dispatch = false,
.getters = std.atomic.Queue(GetNode).init(),
.putters = std.atomic.Queue(PutNode).init(),
.or_null_queue = std.atomic.Queue(*std.atomic.Queue(GetNode).Node).init(),
@@ -165,15 +165,15 @@ pub fn Channel(comptime T: type) type {
fn dispatch(self: *SelfChannel) void {
// set the "need dispatch" flag
- @atomicStore(u8, &self.need_dispatch, 1, .SeqCst);
+ @atomicStore(bool, &self.need_dispatch, true, .SeqCst);
lock: while (true) {
// set the lock flag
- const prev_lock = @atomicRmw(u8, &self.dispatch_lock, .Xchg, 1, .SeqCst);
+ const prev_lock = @atomicRmw(bool, &self.dispatch_lock, .Xchg, true, .SeqCst);
if (prev_lock != 0) return;
// clear the need_dispatch flag since we're about to do it
- @atomicStore(u8, &self.need_dispatch, 0, .SeqCst);
+ @atomicStore(bool, &self.need_dispatch, false, .SeqCst);
while (true) {
one_dispatch: {
@@ -250,14 +250,14 @@ pub fn Channel(comptime T: type) type {
}
// clear need-dispatch flag
- const need_dispatch = @atomicRmw(u8, &self.need_dispatch, .Xchg, 0, .SeqCst);
- if (need_dispatch != 0) continue;
+ const need_dispatch = @atomicRmw(bool, &self.need_dispatch, .Xchg, false, .SeqCst);
+ if (need_dispatch) continue;
- const my_lock = @atomicRmw(u8, &self.dispatch_lock, .Xchg, 0, .SeqCst);
- assert(my_lock != 0);
+ const my_lock = @atomicRmw(bool, &self.dispatch_lock, .Xchg, false, .SeqCst);
+ assert(my_lock);
// we have to check again now that we unlocked
- if (@atomicLoad(u8, &self.need_dispatch, .SeqCst) != 0) continue :lock;
+ if (@atomicLoad(bool, &self.need_dispatch, .SeqCst)) continue :lock;
return;
}
diff --git a/lib/std/event/lock.zig b/lib/std/event/lock.zig
index b9cbb5d95fd0..6b27bbd8c459 100644
--- a/lib/std/event/lock.zig
+++ b/lib/std/event/lock.zig
@@ -11,9 +11,9 @@ const Loop = std.event.Loop;
/// Allows only one actor to hold the lock.
/// TODO: make this API also work in blocking I/O mode.
pub const Lock = struct {
- shared_bit: u8, // TODO make this a bool
+ shared: bool,
queue: Queue,
- queue_empty_bit: u8, // TODO make this a bool
+ queue_empty: bool,
const Queue = std.atomic.Queue(anyframe);
@@ -31,20 +31,19 @@ pub const Lock = struct {
}
// We need to release the lock.
- @atomicStore(u8, &self.lock.queue_empty_bit, 1, .SeqCst);
- @atomicStore(u8, &self.lock.shared_bit, 0, .SeqCst);
+ @atomicStore(bool, &self.lock.queue_empty, true, .SeqCst);
+ @atomicStore(bool, &self.lock.shared, false, .SeqCst);
// There might be a queue item. If we know the queue is empty, we can be done,
// because the other actor will try to obtain the lock.
// But if there's a queue item, we are the actor which must loop and attempt
// to grab the lock again.
- if (@atomicLoad(u8, &self.lock.queue_empty_bit, .SeqCst) == 1) {
+ if (@atomicLoad(bool, &self.lock.queue_empty, .SeqCst)) {
return;
}
while (true) {
- const old_bit = @atomicRmw(u8, &self.lock.shared_bit, .Xchg, 1, .SeqCst);
- if (old_bit != 0) {
+ if (@atomicRmw(bool, &self.lock.shared, .Xchg, true, .SeqCst)) {
// We did not obtain the lock. Great, the queue is someone else's problem.
return;
}
@@ -56,11 +55,11 @@ pub const Lock = struct {
}
// Release the lock again.
- @atomicStore(u8, &self.lock.queue_empty_bit, 1, .SeqCst);
- @atomicStore(u8, &self.lock.shared_bit, 0, .SeqCst);
+ @atomicStore(bool, &self.lock.queue_empty, true, .SeqCst);
+ @atomicStore(bool, &self.lock.shared, false, .SeqCst);
// Find out if we can be done.
- if (@atomicLoad(u8, &self.lock.queue_empty_bit, .SeqCst) == 1) {
+ if (@atomicLoad(bool, &self.lock.queue_empty, .SeqCst)) {
return;
}
}
@@ -69,24 +68,24 @@ pub const Lock = struct {
pub fn init() Lock {
return Lock{
- .shared_bit = 0,
+ .shared = false,
.queue = Queue.init(),
- .queue_empty_bit = 1,
+ .queue_empty = true,
};
}
pub fn initLocked() Lock {
return Lock{
- .shared_bit = 1,
+ .shared = true,
.queue = Queue.init(),
- .queue_empty_bit = 1,
+ .queue_empty = true,
};
}
/// Must be called when not locked. Not thread safe.
/// All calls to acquire() and release() must complete before calling deinit().
pub fn deinit(self: *Lock) void {
- assert(self.shared_bit == 0);
+ assert(!self.shared);
while (self.queue.get()) |node| resume node.data;
}
@@ -99,12 +98,11 @@ pub const Lock = struct {
// At this point, we are in the queue, so we might have already been resumed.
- // We set this bit so that later we can rely on the fact, that if queue_empty_bit is 1, some actor
+ // We set this bit so that later we can rely on the fact, that if queue_empty == true, some actor
// will attempt to grab the lock.
- @atomicStore(u8, &self.queue_empty_bit, 0, .SeqCst);
+ @atomicStore(bool, &self.queue_empty, false, .SeqCst);
- const old_bit = @atomicRmw(u8, &self.shared_bit, .Xchg, 1, .SeqCst);
- if (old_bit == 0) {
+ if (!@atomicRmw(bool, &self.shared, .Xchg, true, .SeqCst)) {
if (self.queue.get()) |node| {
// Whether this node is us or someone else, we tail resume it.
resume node.data;
diff --git a/lib/std/event/rwlock.zig b/lib/std/event/rwlock.zig
index f4b13d008b5f..425088063f71 100644
--- a/lib/std/event/rwlock.zig
+++ b/lib/std/event/rwlock.zig
@@ -16,8 +16,8 @@ pub const RwLock = struct {
shared_state: State,
writer_queue: Queue,
reader_queue: Queue,
- writer_queue_empty_bit: u8, // TODO make this a bool
- reader_queue_empty_bit: u8, // TODO make this a bool
+ writer_queue_empty: bool,
+ reader_queue_empty: bool,
reader_lock_count: usize,
const State = enum(u8) {
@@ -40,7 +40,7 @@ pub const RwLock = struct {
return;
}
- @atomicStore(u8, &self.lock.reader_queue_empty_bit, 1, .SeqCst);
+ @atomicStore(bool, &self.lock.reader_queue_empty, true, .SeqCst);
if (@cmpxchgStrong(State, &self.lock.shared_state, .ReadLock, .Unlocked, .SeqCst, .SeqCst) != null) {
// Didn't unlock. Someone else's problem.
return;
@@ -62,7 +62,7 @@ pub const RwLock = struct {
}
// We need to release the write lock. Check if any readers are waiting to grab the lock.
- if (@atomicLoad(u8, &self.lock.reader_queue_empty_bit, .SeqCst) == 0) {
+ if (!@atomicLoad(bool, &self.lock.reader_queue_empty, .SeqCst)) {
// Switch to a read lock.
@atomicStore(State, &self.lock.shared_state, .ReadLock, .SeqCst);
while (self.lock.reader_queue.get()) |node| {
@@ -71,7 +71,7 @@ pub const RwLock = struct {
return;
}
- @atomicStore(u8, &self.lock.writer_queue_empty_bit, 1, .SeqCst);
+ @atomicStore(bool, &self.lock.writer_queue_empty, true, .SeqCst);
@atomicStore(State, &self.lock.shared_state, .Unlocked, .SeqCst);
self.lock.commonPostUnlock();
@@ -79,12 +79,12 @@ pub const RwLock = struct {
};
pub fn init() RwLock {
- return RwLock{
+ return .{
.shared_state = .Unlocked,
.writer_queue = Queue.init(),
- .writer_queue_empty_bit = 1,
+ .writer_queue_empty = true,
.reader_queue = Queue.init(),
- .reader_queue_empty_bit = 1,
+ .reader_queue_empty = true,
.reader_lock_count = 0,
};
}
@@ -111,9 +111,9 @@ pub const RwLock = struct {
// At this point, we are in the reader_queue, so we might have already been resumed.
- // We set this bit so that later we can rely on the fact, that if reader_queue_empty_bit is 1,
+ // We set this bit so that later we can rely on the fact, that if reader_queue_empty == true,
// some actor will attempt to grab the lock.
- @atomicStore(u8, &self.reader_queue_empty_bit, 0, .SeqCst);
+ @atomicStore(bool, &self.reader_queue_empty, false, .SeqCst);
// Here we don't care if we are the one to do the locking or if it was already locked for reading.
const have_read_lock = if (@cmpxchgStrong(State, &self.shared_state, .Unlocked, .ReadLock, .SeqCst, .SeqCst)) |old_state| old_state == .ReadLock else true;
@@ -142,9 +142,9 @@ pub const RwLock = struct {
// At this point, we are in the writer_queue, so we might have already been resumed.
- // We set this bit so that later we can rely on the fact, that if writer_queue_empty_bit is 1,
+ // We set this bit so that later we can rely on the fact, that if writer_queue_empty == true,
// some actor will attempt to grab the lock.
- @atomicStore(u8, &self.writer_queue_empty_bit, 0, .SeqCst);
+ @atomicStore(bool, &self.writer_queue_empty, false, .SeqCst);
// Here we must be the one to acquire the write lock. It cannot already be locked.
if (@cmpxchgStrong(State, &self.shared_state, .Unlocked, .WriteLock, .SeqCst, .SeqCst) == null) {
@@ -165,7 +165,7 @@ pub const RwLock = struct {
// obtain the lock.
// But if there's a writer_queue item or a reader_queue item,
// we are the actor which must loop and attempt to grab the lock again.
- if (@atomicLoad(u8, &self.writer_queue_empty_bit, .SeqCst) == 0) {
+ if (!@atomicLoad(bool, &self.writer_queue_empty, .SeqCst)) {
if (@cmpxchgStrong(State, &self.shared_state, .Unlocked, .WriteLock, .SeqCst, .SeqCst) != null) {
// We did not obtain the lock. Great, the queues are someone else's problem.
return;
@@ -176,12 +176,12 @@ pub const RwLock = struct {
return;
}
// Release the lock again.
- @atomicStore(u8, &self.writer_queue_empty_bit, 1, .SeqCst);
+ @atomicStore(bool, &self.writer_queue_empty, true, .SeqCst);
@atomicStore(State, &self.shared_state, .Unlocked, .SeqCst);
continue;
}
- if (@atomicLoad(u8, &self.reader_queue_empty_bit, .SeqCst) == 0) {
+ if (!@atomicLoad(bool, &self.reader_queue_empty, .SeqCst)) {
if (@cmpxchgStrong(State, &self.shared_state, .Unlocked, .ReadLock, .SeqCst, .SeqCst) != null) {
// We did not obtain the lock. Great, the queues are someone else's problem.
return;
@@ -195,7 +195,7 @@ pub const RwLock = struct {
return;
}
// Release the lock again.
- @atomicStore(u8, &self.reader_queue_empty_bit, 1, .SeqCst);
+ @atomicStore(bool, &self.reader_queue_empty, true, .SeqCst);
if (@cmpxchgStrong(State, &self.shared_state, .ReadLock, .Unlocked, .SeqCst, .SeqCst) != null) {
// Didn't unlock. Someone else's problem.
return;
diff --git a/src/ir.cpp b/src/ir.cpp
index c6978ca0a961..cad1d382e16e 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -28397,15 +28397,15 @@ static IrInstGen *ir_analyze_instruction_atomic_rmw(IrAnalyze *ira, IrInstSrcAto
if (operand_type->id == ZigTypeIdEnum && op != AtomicRmwOp_xchg) {
ir_add_error(ira, &instruction->op->base,
- buf_sprintf("@atomicRmw on enum only works with .Xchg"));
+ buf_sprintf("@atomicRmw with enum only allowed with .Xchg"));
return ira->codegen->invalid_inst_gen;
} else if (operand_type->id == ZigTypeIdBool && op != AtomicRmwOp_xchg) {
ir_add_error(ira, &instruction->op->base,
- buf_sprintf("@atomicRmw on bool only works with .Xchg"));
+ buf_sprintf("@atomicRmw with bool only allowed with .Xchg"));
return ira->codegen->invalid_inst_gen;
} else if (operand_type->id == ZigTypeIdFloat && op > AtomicRmwOp_sub) {
ir_add_error(ira, &instruction->op->base,
- buf_sprintf("@atomicRmw with float only works with .Xchg, .Add and .Sub"));
+ buf_sprintf("@atomicRmw with float only allowed with .Xchg, .Add and .Sub"));
return ira->codegen->invalid_inst_gen;
}
diff --git a/test/compile_errors.zig b/test/compile_errors.zig
index a2d4e8ac23df..28529b3f579c 100644
--- a/test/compile_errors.zig
+++ b/test/compile_errors.zig
@@ -2,6 +2,15 @@ const tests = @import("tests.zig");
const std = @import("std");
pub fn addCases(cases: *tests.CompileErrorContext) void {
+ cases.add("atomicrmw with bool op not .Xchg",
+ \\export fn entry() void {
+ \\ var x = false;
+ \\ _ = @atomicRmw(bool, &x, .Add, true, .SeqCst);
+ \\}
+ , &[_][]const u8{
+ "tmp.zig:3:30: error: @atomicRmw with bool only allowed with .Xchg",
+ });
+
cases.addTest("combination of noasync and async",
\\export fn entry() void {
\\ noasync {
@@ -325,7 +334,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
\\ _ = @atomicRmw(f32, &x, .And, 2, .SeqCst);
\\}
, &[_][]const u8{
- "tmp.zig:3:29: error: @atomicRmw with float only works with .Xchg, .Add and .Sub",
+ "tmp.zig:3:29: error: @atomicRmw with float only allowed with .Xchg, .Add and .Sub",
});
cases.add("intToPtr with misaligned address",
@@ -542,7 +551,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
\\ _ = @atomicRmw(E, &x, .Add, .b, .SeqCst);
\\}
, &[_][]const u8{
- "tmp.zig:9:27: error: @atomicRmw on enum only works with .Xchg",
+ "tmp.zig:9:27: error: @atomicRmw with enum only allowed with .Xchg",
});
cases.add("disallow coercion from non-null-terminated pointer to null-terminated pointer",
From 21809c33001cc53c8fb3b56b25264e8d9076bed9 Mon Sep 17 00:00:00 2001
From: Vexu
Date: Wed, 11 Mar 2020 09:24:53 +0200
Subject: [PATCH 03/11] support non power of two integers in atomic ops
---
src/all_types.hpp | 8 +++++
src/codegen.cpp | 45 ++++++++++++------------
src/ir.cpp | 87 +++++++++++++++++++++++------------------------
3 files changed, 73 insertions(+), 67 deletions(-)
diff --git a/src/all_types.hpp b/src/all_types.hpp
index 14b99228ca1c..149b7f464706 100644
--- a/src/all_types.hpp
+++ b/src/all_types.hpp
@@ -3567,6 +3567,8 @@ struct IrInstGenCmpxchg {
IrInstGen *cmp_value;
IrInstGen *new_value;
IrInstGen *result_loc;
+ // non null if operand needs widening and truncating
+ ZigType *actual_type;
bool is_weak;
};
@@ -4199,6 +4201,8 @@ struct IrInstGenAtomicRmw {
IrInstGen *ptr;
IrInstGen *operand;
+ // non null if operand needs widening and truncating
+ ZigType *actual_type;
AtomicRmwOp op;
AtomicOrder ordering;
};
@@ -4215,6 +4219,8 @@ struct IrInstGenAtomicLoad {
IrInstGen base;
IrInstGen *ptr;
+ // non null if operand needs widening and truncating
+ ZigType *actual_type;
AtomicOrder ordering;
};
@@ -4232,6 +4238,8 @@ struct IrInstGenAtomicStore {
IrInstGen *ptr;
IrInstGen *value;
+ // non null if operand needs widening and truncating
+ ZigType *actual_type;
AtomicOrder ordering;
};
diff --git a/src/codegen.cpp b/src/codegen.cpp
index 7238d5041b46..cfb3de292aa1 100644
--- a/src/codegen.cpp
+++ b/src/codegen.cpp
@@ -5225,12 +5225,12 @@ static LLVMValueRef ir_render_cmpxchg(CodeGen *g, IrExecutableGen *executable, I
LLVMValueRef new_val = ir_llvm_value(g, instruction->new_value);
ZigType *operand_type = instruction->new_value->value->type;
- if (operand_type->id == ZigTypeIdBool) {
- // treat bool as u8
+ if (instruction->actual_type != nullptr) {
+ // operand needs widening and truncating
ptr_val = LLVMBuildBitCast(g->builder, ptr_val,
- LLVMPointerType(g->builtin_types.entry_u8->llvm_type, 0), "");
- cmp_val = LLVMConstZExt(cmp_val, g->builtin_types.entry_u8->llvm_type);
- new_val = LLVMConstZExt(new_val, g->builtin_types.entry_u8->llvm_type);
+ LLVMPointerType(get_llvm_type(g, instruction->actual_type), 0), "");
+ cmp_val = LLVMConstZExt(cmp_val, get_llvm_type(g, instruction->actual_type));
+ new_val = LLVMConstZExt(new_val, get_llvm_type(g, instruction->actual_type));
}
LLVMAtomicOrdering success_order = to_LLVMAtomicOrdering(instruction->success_order);
@@ -5245,8 +5245,8 @@ static LLVMValueRef ir_render_cmpxchg(CodeGen *g, IrExecutableGen *executable, I
if (!handle_is_ptr(g, optional_type)) {
LLVMValueRef payload_val = LLVMBuildExtractValue(g->builder, result_val, 0, "");
- if (operand_type->id == ZigTypeIdBool) {
- payload_val = LLVMBuildTrunc(g->builder, payload_val, g->builtin_types.entry_bool->llvm_type, "");
+ if (instruction->actual_type != nullptr) {
+ payload_val = LLVMBuildTrunc(g->builder, payload_val, get_llvm_type(g, operand_type), "");
}
LLVMValueRef success_bit = LLVMBuildExtractValue(g->builder, result_val, 1, "");
return LLVMBuildSelect(g->builder, success_bit, LLVMConstNull(get_llvm_type(g, child_type)), payload_val, "");
@@ -5262,8 +5262,8 @@ static LLVMValueRef ir_render_cmpxchg(CodeGen *g, IrExecutableGen *executable, I
ir_assert(type_has_bits(g, child_type), &instruction->base);
LLVMValueRef payload_val = LLVMBuildExtractValue(g->builder, result_val, 0, "");
- if (operand_type->id == ZigTypeIdBool) {
- payload_val = LLVMBuildTrunc(g->builder, payload_val, g->builtin_types.entry_bool->llvm_type, "");
+ if (instruction->actual_type != nullptr) {
+ payload_val = LLVMBuildTrunc(g->builder, payload_val, get_llvm_type(g, operand_type), "");
}
LLVMValueRef val_ptr = LLVMBuildStructGEP(g->builder, result_loc, maybe_child_index, "");
gen_assign_raw(g, val_ptr, get_pointer_to_type(g, child_type, false), payload_val);
@@ -5842,14 +5842,14 @@ static LLVMValueRef ir_render_atomic_rmw(CodeGen *g, IrExecutableGen *executable
LLVMValueRef ptr = ir_llvm_value(g, instruction->ptr);
LLVMValueRef operand = ir_llvm_value(g, instruction->operand);
- if (operand_type->id == ZigTypeIdBool) {
- // treat bool as u8
+ if (instruction->actual_type != nullptr) {
+ // operand needs widening and truncating
LLVMValueRef casted_ptr = LLVMBuildBitCast(g->builder, ptr,
- LLVMPointerType(g->builtin_types.entry_u8->llvm_type, 0), "");
- LLVMValueRef casted_operand = LLVMBuildPtrToInt(g->builder, operand, g->builtin_types.entry_u8->llvm_type, "");
+ LLVMPointerType(get_llvm_type(g, instruction->actual_type), 0), "");
+ LLVMValueRef casted_operand = LLVMBuildPtrToInt(g->builder, operand, get_llvm_type(g, instruction->actual_type), "");
LLVMValueRef uncasted_result = ZigLLVMBuildAtomicRMW(g->builder, op, casted_ptr, casted_operand, ordering,
g->is_single_threaded);
- return LLVMBuildTrunc(g->builder, uncasted_result, g->builtin_types.entry_bool->llvm_type, "");
+ return LLVMBuildTrunc(g->builder, uncasted_result, get_llvm_type(g, operand_type), "");
}
if (get_codegen_ptr_type_bail(g, operand_type) == nullptr) {
@@ -5872,13 +5872,13 @@ static LLVMValueRef ir_render_atomic_load(CodeGen *g, IrExecutableGen *executabl
LLVMValueRef ptr = ir_llvm_value(g, instruction->ptr);
ZigType *operand_type = instruction->ptr->value->type->data.pointer.child_type;
- if (operand_type->id == ZigTypeIdBool) {
- // treat bool as u8
+ if (instruction->actual_type != nullptr) {
+ // operand needs widening and truncating
ptr = LLVMBuildBitCast(g->builder, ptr,
- LLVMPointerType(g->builtin_types.entry_u8->llvm_type, 0), "");
+ LLVMPointerType(get_llvm_type(g, instruction->actual_type), 0), "");
LLVMValueRef load_inst = gen_load(g, ptr, instruction->ptr->value->type, "");
LLVMSetOrdering(load_inst, ordering);
- return LLVMBuildTrunc(g->builder, load_inst, g->builtin_types.entry_bool->llvm_type, "");
+ return LLVMBuildTrunc(g->builder, load_inst, get_llvm_type(g, operand_type), "");
}
LLVMValueRef load_inst = gen_load(g, ptr, instruction->ptr->value->type, "");
LLVMSetOrdering(load_inst, ordering);
@@ -5892,12 +5892,11 @@ static LLVMValueRef ir_render_atomic_store(CodeGen *g, IrExecutableGen *executab
LLVMValueRef ptr = ir_llvm_value(g, instruction->ptr);
LLVMValueRef value = ir_llvm_value(g, instruction->value);
- ZigType *operand_type = instruction->value->value->type;
- if (operand_type->id == ZigTypeIdBool) {
- // treat bool as u8
+ if (instruction->actual_type != nullptr) {
+ // operand needs widening and truncating
ptr = LLVMBuildBitCast(g->builder, ptr,
- LLVMPointerType(g->builtin_types.entry_u8->llvm_type, 0), "");
- value = LLVMConstZExt(value, g->builtin_types.entry_u8->llvm_type);
+ LLVMPointerType(get_llvm_type(g, instruction->actual_type), 0), "");
+ value = LLVMConstZExt(value, get_llvm_type(g, instruction->actual_type));
}
LLVMValueRef store_inst = gen_store(g, value, ptr, instruction->ptr->value->type);
LLVMSetOrdering(store_inst, ordering);
diff --git a/src/ir.cpp b/src/ir.cpp
index cad1d382e16e..9465be8b0b24 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -227,7 +227,7 @@ static IrInstGen *ir_analyze_container_field_ptr(IrAnalyze *ira, Buf *field_name
static void ir_assert(bool ok, IrInst* source_instruction);
static void ir_assert_gen(bool ok, IrInstGen *source_instruction);
static IrInstGen *ir_get_var_ptr(IrAnalyze *ira, IrInst *source_instr, ZigVar *var);
-static ZigType *ir_resolve_atomic_operand_type(IrAnalyze *ira, IrInstGen *op);
+static ZigType *ir_resolve_atomic_operand_type(IrAnalyze *ira, IrInstGen *op, ZigType **actual_type);
static IrInstSrc *ir_lval_wrap(IrBuilderSrc *irb, Scope *scope, IrInstSrc *value, LVal lval, ResultLoc *result_loc);
static IrInstSrc *ir_expr_wrap(IrBuilderSrc *irb, Scope *scope, IrInstSrc *inst, ResultLoc *result_loc);
static ZigType *adjust_ptr_align(CodeGen *g, ZigType *ptr_type, uint32_t new_align);
@@ -3406,7 +3406,7 @@ static IrInstSrc *ir_build_cmpxchg_src(IrBuilderSrc *irb, Scope *scope, AstNode
static IrInstGen *ir_build_cmpxchg_gen(IrAnalyze *ira, IrInst *source_instruction, ZigType *result_type,
IrInstGen *ptr, IrInstGen *cmp_value, IrInstGen *new_value,
- AtomicOrder success_order, AtomicOrder failure_order, bool is_weak, IrInstGen *result_loc)
+ AtomicOrder success_order, AtomicOrder failure_order, bool is_weak, IrInstGen *result_loc, ZigType *actual_type)
{
IrInstGenCmpxchg *instruction = ir_build_inst_gen(&ira->new_irb,
source_instruction->scope, source_instruction->source_node);
@@ -3418,6 +3418,7 @@ static IrInstGen *ir_build_cmpxchg_gen(IrAnalyze *ira, IrInst *source_instructio
instruction->failure_order = failure_order;
instruction->is_weak = is_weak;
instruction->result_loc = result_loc;
+ instruction->actual_type = actual_type;
ir_ref_inst_gen(ptr, ira->new_irb.current_basic_block);
ir_ref_inst_gen(cmp_value, ira->new_irb.current_basic_block);
@@ -4554,7 +4555,7 @@ static IrInstSrc *ir_build_atomic_rmw_src(IrBuilderSrc *irb, Scope *scope, AstNo
}
static IrInstGen *ir_build_atomic_rmw_gen(IrAnalyze *ira, IrInst *source_instr,
- IrInstGen *ptr, IrInstGen *operand, AtomicRmwOp op, AtomicOrder ordering, ZigType *operand_type)
+ IrInstGen *ptr, IrInstGen *operand, AtomicRmwOp op, AtomicOrder ordering, ZigType *operand_type, ZigType *actual_type)
{
IrInstGenAtomicRmw *instruction = ir_build_inst_gen(&ira->new_irb, source_instr->scope, source_instr->source_node);
instruction->base.value->type = operand_type;
@@ -4562,6 +4563,7 @@ static IrInstGen *ir_build_atomic_rmw_gen(IrAnalyze *ira, IrInst *source_instr,
instruction->op = op;
instruction->operand = operand;
instruction->ordering = ordering;
+ instruction->actual_type = actual_type;
ir_ref_inst_gen(ptr, ira->new_irb.current_basic_block);
ir_ref_inst_gen(operand, ira->new_irb.current_basic_block);
@@ -4585,13 +4587,14 @@ static IrInstSrc *ir_build_atomic_load_src(IrBuilderSrc *irb, Scope *scope, AstN
}
static IrInstGen *ir_build_atomic_load_gen(IrAnalyze *ira, IrInst *source_instr,
- IrInstGen *ptr, AtomicOrder ordering, ZigType *operand_type)
+ IrInstGen *ptr, AtomicOrder ordering, ZigType *operand_type, ZigType *actual_type)
{
IrInstGenAtomicLoad *instruction = ir_build_inst_gen(&ira->new_irb,
source_instr->scope, source_instr->source_node);
instruction->base.value->type = operand_type;
instruction->ptr = ptr;
instruction->ordering = ordering;
+ instruction->actual_type = actual_type;
ir_ref_inst_gen(ptr, ira->new_irb.current_basic_block);
@@ -4616,13 +4619,14 @@ static IrInstSrc *ir_build_atomic_store_src(IrBuilderSrc *irb, Scope *scope, Ast
}
static IrInstGen *ir_build_atomic_store_gen(IrAnalyze *ira, IrInst *source_instr,
- IrInstGen *ptr, IrInstGen *value, AtomicOrder ordering)
+ IrInstGen *ptr, IrInstGen *value, AtomicOrder ordering, ZigType *actual_type)
{
IrInstGenAtomicStore *instruction = ir_build_inst_void(&ira->new_irb,
source_instr->scope, source_instr->source_node);
instruction->ptr = ptr;
instruction->value = value;
instruction->ordering = ordering;
+ instruction->actual_type = actual_type;
ir_ref_inst_gen(ptr, ira->new_irb.current_basic_block);
ir_ref_inst_gen(value, ira->new_irb.current_basic_block);
@@ -25121,7 +25125,8 @@ static IrInstGen *ir_analyze_instruction_embed_file(IrAnalyze *ira, IrInstSrcEmb
}
static IrInstGen *ir_analyze_instruction_cmpxchg(IrAnalyze *ira, IrInstSrcCmpxchg *instruction) {
- ZigType *operand_type = ir_resolve_atomic_operand_type(ira, instruction->type_value->child);
+ ZigType *actual_type;
+ ZigType *operand_type = ir_resolve_atomic_operand_type(ira, instruction->type_value->child, &actual_type);
if (type_is_invalid(operand_type))
return ira->codegen->invalid_inst_gen;
@@ -25213,7 +25218,7 @@ static IrInstGen *ir_analyze_instruction_cmpxchg(IrAnalyze *ira, IrInstSrcCmpxch
return ir_build_cmpxchg_gen(ira, &instruction->base.base, result_type,
casted_ptr, casted_cmp_value, casted_new_value,
- success_order, failure_order, instruction->is_weak, result_loc);
+ success_order, failure_order, instruction->is_weak, result_loc, actual_type);
}
static IrInstGen *ir_analyze_instruction_fence(IrAnalyze *ira, IrInstSrcFence *instruction) {
@@ -28305,17 +28310,15 @@ static IrInstGen *ir_analyze_instruction_tag_type(IrAnalyze *ira, IrInstSrcTagTy
}
}
-static ZigType *ir_resolve_atomic_operand_type(IrAnalyze *ira, IrInstGen *op) {
+static ZigType *ir_resolve_atomic_operand_type(IrAnalyze *ira, IrInstGen *op, ZigType **actual_type) {
ZigType *operand_type = ir_resolve_type(ira, op);
if (type_is_invalid(operand_type))
return ira->codegen->builtin_types.entry_invalid;
- if (operand_type->id == ZigTypeIdInt) {
- if (operand_type->data.integral.bit_count < 8) {
- ir_add_error(ira, &op->base,
- buf_sprintf("expected integer type 8 bits or larger, found %" PRIu32 "-bit integer type",
- operand_type->data.integral.bit_count));
- return ira->codegen->builtin_types.entry_invalid;
+ *actual_type = nullptr;
+ if (operand_type->id == ZigTypeIdInt || operand_type->id == ZigTypeIdEnum) {
+ if (operand_type->id == ZigTypeIdEnum) {
+ operand_type = operand_type->data.enumeration.tag_int_type;
}
uint32_t max_atomic_bits = target_arch_largest_atomic_bits(ira->codegen->zig_target->arch);
if (operand_type->data.integral.bit_count > max_atomic_bits) {
@@ -28324,30 +28327,22 @@ static ZigType *ir_resolve_atomic_operand_type(IrAnalyze *ira, IrInstGen *op) {
max_atomic_bits, operand_type->data.integral.bit_count));
return ira->codegen->builtin_types.entry_invalid;
}
- if (!is_power_of_2(operand_type->data.integral.bit_count)) {
- ir_add_error(ira, &op->base,
- buf_sprintf("%" PRIu32 "-bit integer type is not a power of 2", operand_type->data.integral.bit_count));
- return ira->codegen->builtin_types.entry_invalid;
- }
- } else if (operand_type->id == ZigTypeIdEnum) {
- ZigType *int_type = operand_type->data.enumeration.tag_int_type;
- if (int_type->data.integral.bit_count < 8) {
- ir_add_error(ira, &op->base,
- buf_sprintf("expected enum tag type 8 bits or larger, found %" PRIu32 "-bit tag type",
- int_type->data.integral.bit_count));
- return ira->codegen->builtin_types.entry_invalid;
- }
- uint32_t max_atomic_bits = target_arch_largest_atomic_bits(ira->codegen->zig_target->arch);
- if (int_type->data.integral.bit_count > max_atomic_bits) {
- ir_add_error(ira, &op->base,
- buf_sprintf("expected %" PRIu32 "-bit enum tag type or smaller, found %" PRIu32 "-bit tag type",
- max_atomic_bits, int_type->data.integral.bit_count));
- return ira->codegen->builtin_types.entry_invalid;
- }
- if (!is_power_of_2(int_type->data.integral.bit_count)) {
- ir_add_error(ira, &op->base,
- buf_sprintf("%" PRIu32 "-bit enum tag type is not a power of 2", int_type->data.integral.bit_count));
- return ira->codegen->builtin_types.entry_invalid;
+ auto bit_count = operand_type->data.integral.bit_count;
+ bool is_signed = operand_type->data.integral.is_signed;
+ if (bit_count < 2 || !is_power_of_2(bit_count)) {
+ if (bit_count < 8) {
+ *actual_type = get_int_type(ira->codegen, is_signed, 8);
+ } else if (bit_count < 16) {
+ *actual_type = get_int_type(ira->codegen, is_signed, 16);
+ } else if (bit_count < 32) {
+ *actual_type = get_int_type(ira->codegen, is_signed, 32);
+ } else if (bit_count < 64) {
+ *actual_type = get_int_type(ira->codegen, is_signed, 64);
+ } else if (bit_count < 128) {
+ *actual_type = get_int_type(ira->codegen, is_signed, 128);
+ } else {
+ zig_unreachable();
+ }
}
} else if (operand_type->id == ZigTypeIdFloat) {
uint32_t max_atomic_bits = target_arch_largest_atomic_bits(ira->codegen->zig_target->arch);
@@ -28359,6 +28354,7 @@ static ZigType *ir_resolve_atomic_operand_type(IrAnalyze *ira, IrInstGen *op) {
}
} else if (operand_type->id == ZigTypeIdBool) {
// will be treated as u8
+ *actual_type = ira->codegen->builtin_types.entry_u8;
} else {
Error err;
ZigType *operand_ptr_type;
@@ -28376,7 +28372,8 @@ static ZigType *ir_resolve_atomic_operand_type(IrAnalyze *ira, IrInstGen *op) {
}
static IrInstGen *ir_analyze_instruction_atomic_rmw(IrAnalyze *ira, IrInstSrcAtomicRmw *instruction) {
- ZigType *operand_type = ir_resolve_atomic_operand_type(ira, instruction->operand_type->child);
+ ZigType *actual_type;
+ ZigType *operand_type = ir_resolve_atomic_operand_type(ira, instruction->operand_type->child, &actual_type);
if (type_is_invalid(operand_type))
return ira->codegen->invalid_inst_gen;
@@ -28434,11 +28431,12 @@ static IrInstGen *ir_analyze_instruction_atomic_rmw(IrAnalyze *ira, IrInstSrcAto
}
return ir_build_atomic_rmw_gen(ira, &instruction->base.base, casted_ptr, casted_operand, op,
- ordering, operand_type);
+ ordering, operand_type, actual_type);
}
static IrInstGen *ir_analyze_instruction_atomic_load(IrAnalyze *ira, IrInstSrcAtomicLoad *instruction) {
- ZigType *operand_type = ir_resolve_atomic_operand_type(ira, instruction->operand_type->child);
+ ZigType *actual_type;
+ ZigType *operand_type = ir_resolve_atomic_operand_type(ira, instruction->operand_type->child, &actual_type);
if (type_is_invalid(operand_type))
return ira->codegen->invalid_inst_gen;
@@ -28468,11 +28466,12 @@ static IrInstGen *ir_analyze_instruction_atomic_load(IrAnalyze *ira, IrInstSrcAt
return result;
}
- return ir_build_atomic_load_gen(ira, &instruction->base.base, casted_ptr, ordering, operand_type);
+ return ir_build_atomic_load_gen(ira, &instruction->base.base, casted_ptr, ordering, operand_type, actual_type);
}
static IrInstGen *ir_analyze_instruction_atomic_store(IrAnalyze *ira, IrInstSrcAtomicStore *instruction) {
- ZigType *operand_type = ir_resolve_atomic_operand_type(ira, instruction->operand_type->child);
+ ZigType *actual_type;
+ ZigType *operand_type = ir_resolve_atomic_operand_type(ira, instruction->operand_type->child, &actual_type);
if (type_is_invalid(operand_type))
return ira->codegen->invalid_inst_gen;
@@ -28511,7 +28510,7 @@ static IrInstGen *ir_analyze_instruction_atomic_store(IrAnalyze *ira, IrInstSrcA
return result;
}
- return ir_build_atomic_store_gen(ira, &instruction->base.base, casted_ptr, casted_value, ordering);
+ return ir_build_atomic_store_gen(ira, &instruction->base.base, casted_ptr, casted_value, ordering, actual_type);
}
static IrInstGen *ir_analyze_instruction_save_err_ret_addr(IrAnalyze *ira, IrInstSrcSaveErrRetAddr *instruction) {
From 64e60d8ae2c06689a2e0533eb43a1c6a8ff01259 Mon Sep 17 00:00:00 2001
From: Vexu
Date: Wed, 11 Mar 2020 10:29:15 +0200
Subject: [PATCH 04/11] special case atomic operations on zero bit types
---
src/ir.cpp | 37 ++++++++++++++++++++++++++------
test/stage1/behavior/atomics.zig | 27 ++++++++++++++++-------
2 files changed, 50 insertions(+), 14 deletions(-)
diff --git a/src/ir.cpp b/src/ir.cpp
index 9465be8b0b24..2e87bd2687ec 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -25199,12 +25199,22 @@ static IrInstGen *ir_analyze_instruction_cmpxchg(IrAnalyze *ira, IrInstSrcCmpxch
return ira->codegen->invalid_inst_gen;
}
+ ZigType *result_type = get_optional_type(ira->codegen, operand_type);
+
+ // special case zero bit types
+ if (type_has_one_possible_value(ira->codegen, operand_type) == OnePossibleValueYes) {
+ ZigValue *val = ira->codegen->pass1_arena->allocate(1);
+ val->special = ConstValSpecialStatic;
+ val->type = result_type;
+ set_optional_value_to_null(val);
+ return ir_const_move(ira, &instruction->base.base, val);
+ }
+
if (instr_is_comptime(casted_ptr) && casted_ptr->value->data.x_ptr.mut != ConstPtrMutRuntimeVar &&
instr_is_comptime(casted_cmp_value) && instr_is_comptime(casted_new_value)) {
zig_panic("TODO compile-time execution of cmpxchg");
}
- ZigType *result_type = get_optional_type(ira->codegen, operand_type);
IrInstGen *result_loc;
if (handle_is_ptr(ira->codegen, result_type)) {
result_loc = ir_resolve_result(ira, &instruction->base.base, instruction->result_loc,
@@ -28317,18 +28327,23 @@ static ZigType *ir_resolve_atomic_operand_type(IrAnalyze *ira, IrInstGen *op, Zi
*actual_type = nullptr;
if (operand_type->id == ZigTypeIdInt || operand_type->id == ZigTypeIdEnum) {
+ ZigType *int_type;
if (operand_type->id == ZigTypeIdEnum) {
- operand_type = operand_type->data.enumeration.tag_int_type;
+ int_type = operand_type->data.enumeration.tag_int_type;
+ } else {
+ int_type = operand_type;
}
+ auto bit_count = int_type->data.integral.bit_count;
+ bool is_signed = int_type->data.integral.is_signed;
uint32_t max_atomic_bits = target_arch_largest_atomic_bits(ira->codegen->zig_target->arch);
- if (operand_type->data.integral.bit_count > max_atomic_bits) {
+
+ if (bit_count > max_atomic_bits) {
ir_add_error(ira, &op->base,
buf_sprintf("expected %" PRIu32 "-bit integer type or smaller, found %" PRIu32 "-bit integer type",
- max_atomic_bits, operand_type->data.integral.bit_count));
+ max_atomic_bits, bit_count));
return ira->codegen->builtin_types.entry_invalid;
}
- auto bit_count = operand_type->data.integral.bit_count;
- bool is_signed = operand_type->data.integral.is_signed;
+
if (bit_count < 2 || !is_power_of_2(bit_count)) {
if (bit_count < 8) {
*actual_type = get_int_type(ira->codegen, is_signed, 8);
@@ -28423,6 +28438,11 @@ static IrInstGen *ir_analyze_instruction_atomic_rmw(IrAnalyze *ira, IrInstSrcAto
return ira->codegen->invalid_inst_gen;
}
+ // special case zero bit types
+ if (type_has_one_possible_value(ira->codegen, operand_type) == OnePossibleValueYes) {
+ return ir_const_move(ira, &instruction->base.base, get_the_one_possible_value(ira->codegen, operand_type));
+ }
+
if (instr_is_comptime(casted_operand) && instr_is_comptime(casted_ptr) && casted_ptr->value->data.x_ptr.mut == ConstPtrMutComptimeVar)
{
ir_add_error(ira, &instruction->base.base,
@@ -28504,6 +28524,11 @@ static IrInstGen *ir_analyze_instruction_atomic_store(IrAnalyze *ira, IrInstSrcA
return ira->codegen->invalid_inst_gen;
}
+ // special case zero bit types
+ if (type_has_one_possible_value(ira->codegen, operand_type) == OnePossibleValueYes) {
+ return ir_const_void(ira, &instruction->base.base);
+ }
+
if (instr_is_comptime(casted_value) && instr_is_comptime(casted_ptr)) {
IrInstGen *result = ir_analyze_store_ptr(ira, &instruction->base.base, casted_ptr, value, false);
result->value->type = ira->codegen->builtin_types.entry_void;
diff --git a/test/stage1/behavior/atomics.zig b/test/stage1/behavior/atomics.zig
index bda0a8469ce0..edc712d85b88 100644
--- a/test/stage1/behavior/atomics.zig
+++ b/test/stage1/behavior/atomics.zig
@@ -162,12 +162,23 @@ fn testAtomicRmwFloat() void {
expect(x == 4);
}
-test "atomics with bool" {
- var x = false;
- @atomicStore(bool, &x, true, .SeqCst);
- expect(x == true);
- expect(@atomicLoad(bool, &x, .SeqCst) == true);
- expect(@atomicRmw(bool, &x, .Xchg, false, .SeqCst) == true);
- expect(@cmpxchgStrong(bool, &x, false, true, .SeqCst, .SeqCst) == null);
- expect(@cmpxchgStrong(bool, &x, false, true, .SeqCst, .SeqCst).? == true);
+test "atomics with different types" {
+ // testAtomicsWithType(bool, true, false);
+ // inline for (.{ u1, i5, u33 }) |T| {
+ // var x: T = 0;
+ // testAtomicsWithType(T, 0, 1);
+ // }
+ testAtomicsWithType(u0, 0, 0);
+ testAtomicsWithType(i0, 0, 0);
+}
+
+fn testAtomicsWithType(comptime T: type, a: T, b: T) void {
+ var x: T = b;
+ @atomicStore(T, &x, a, .SeqCst);
+ expect(x == a);
+ expect(@atomicLoad(T, &x, .SeqCst) == a);
+ expect(@atomicRmw(T, &x, .Xchg, b, .SeqCst) == a);
+ expect(@cmpxchgStrong(T, &x, b, a, .SeqCst, .SeqCst) == null);
+ if (@sizeOf(T) != 0)
+ expect(@cmpxchgStrong(T, &x, b, a, .SeqCst, .SeqCst).? == a);
}
From 1f66435a6b0c5ccf6e4e96df0ed96732480ab4db Mon Sep 17 00:00:00 2001
From: Vexu
Date: Wed, 11 Mar 2020 12:02:05 +0200
Subject: [PATCH 05/11] support cmpxchg at comptime
---
src/ir.cpp | 32 ++++++++++++------------
test/stage1/behavior/atomics.zig | 42 +++++++++++++++++---------------
2 files changed, 39 insertions(+), 35 deletions(-)
diff --git a/src/ir.cpp b/src/ir.cpp
index 2e87bd2687ec..338f803bbe87 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -25212,7 +25212,20 @@ static IrInstGen *ir_analyze_instruction_cmpxchg(IrAnalyze *ira, IrInstSrcCmpxch
if (instr_is_comptime(casted_ptr) && casted_ptr->value->data.x_ptr.mut != ConstPtrMutRuntimeVar &&
instr_is_comptime(casted_cmp_value) && instr_is_comptime(casted_new_value)) {
- zig_panic("TODO compile-time execution of cmpxchg");
+ IrInstGen *result = ir_get_deref(ira, &instruction->base.base, casted_ptr, nullptr);
+ ZigValue *op1_val = ir_resolve_const(ira, result, UndefBad);
+ ZigValue *op2_val = ir_resolve_const(ira, casted_cmp_value, UndefBad);
+ bool eql = const_values_equal(ira->codegen, op1_val, op2_val);
+ ZigValue *val = ira->codegen->pass1_arena->allocate(1);
+ val->special = ConstValSpecialStatic;
+ val->type = result_type;
+ if (eql) {
+ ir_analyze_store_ptr(ira, &instruction->base.base, casted_ptr, casted_new_value, false);
+ set_optional_value_to_null(val);
+ } else {
+ set_optional_payload(val, op1_val);
+ }
+ return ir_const_move(ira, &instruction->base.base, val);
}
IrInstGen *result_loc;
@@ -28334,7 +28347,6 @@ static ZigType *ir_resolve_atomic_operand_type(IrAnalyze *ira, IrInstGen *op, Zi
int_type = operand_type;
}
auto bit_count = int_type->data.integral.bit_count;
- bool is_signed = int_type->data.integral.is_signed;
uint32_t max_atomic_bits = target_arch_largest_atomic_bits(ira->codegen->zig_target->arch);
if (bit_count > max_atomic_bits) {
@@ -28344,20 +28356,8 @@ static ZigType *ir_resolve_atomic_operand_type(IrAnalyze *ira, IrInstGen *op, Zi
return ira->codegen->builtin_types.entry_invalid;
}
- if (bit_count < 2 || !is_power_of_2(bit_count)) {
- if (bit_count < 8) {
- *actual_type = get_int_type(ira->codegen, is_signed, 8);
- } else if (bit_count < 16) {
- *actual_type = get_int_type(ira->codegen, is_signed, 16);
- } else if (bit_count < 32) {
- *actual_type = get_int_type(ira->codegen, is_signed, 32);
- } else if (bit_count < 64) {
- *actual_type = get_int_type(ira->codegen, is_signed, 64);
- } else if (bit_count < 128) {
- *actual_type = get_int_type(ira->codegen, is_signed, 128);
- } else {
- zig_unreachable();
- }
+ if (bit_count == 1 || !is_power_of_2(bit_count)) {
+ *actual_type = get_int_type(ira->codegen, int_type->data.integral.is_signed, int_type->abi_size * 8);
}
} else if (operand_type->id == ZigTypeIdFloat) {
uint32_t max_atomic_bits = target_arch_largest_atomic_bits(ira->codegen->zig_target->arch);
diff --git a/test/stage1/behavior/atomics.zig b/test/stage1/behavior/atomics.zig
index edc712d85b88..9c75afc36947 100644
--- a/test/stage1/behavior/atomics.zig
+++ b/test/stage1/behavior/atomics.zig
@@ -2,29 +2,32 @@ const std = @import("std");
const expect = std.testing.expect;
const expectEqual = std.testing.expectEqual;
const builtin = @import("builtin");
-const AtomicRmwOp = builtin.AtomicRmwOp;
-const AtomicOrder = builtin.AtomicOrder;
test "cmpxchg" {
+ testCmpxchg();
+ comptime testCmpxchg();
+}
+
+fn testCmpxchg() void {
var x: i32 = 1234;
- if (@cmpxchgWeak(i32, &x, 99, 5678, AtomicOrder.SeqCst, AtomicOrder.SeqCst)) |x1| {
+ if (@cmpxchgWeak(i32, &x, 99, 5678, .SeqCst, .SeqCst)) |x1| {
expect(x1 == 1234);
} else {
@panic("cmpxchg should have failed");
}
- while (@cmpxchgWeak(i32, &x, 1234, 5678, AtomicOrder.SeqCst, AtomicOrder.SeqCst)) |x1| {
+ while (@cmpxchgWeak(i32, &x, 1234, 5678, .SeqCst, .SeqCst)) |x1| {
expect(x1 == 1234);
}
expect(x == 5678);
- expect(@cmpxchgStrong(i32, &x, 5678, 42, AtomicOrder.SeqCst, AtomicOrder.SeqCst) == null);
+ expect(@cmpxchgStrong(i32, &x, 5678, 42, .SeqCst, .SeqCst) == null);
expect(x == 42);
}
test "fence" {
var x: i32 = 1234;
- @fence(AtomicOrder.SeqCst);
+ @fence(.SeqCst);
x = 5678;
}
@@ -36,18 +39,18 @@ test "atomicrmw and atomicload" {
}
fn testAtomicRmw(ptr: *u8) void {
- const prev_value = @atomicRmw(u8, ptr, AtomicRmwOp.Xchg, 42, AtomicOrder.SeqCst);
+ const prev_value = @atomicRmw(u8, ptr, .Xchg, 42, .SeqCst);
expect(prev_value == 200);
comptime {
var x: i32 = 1234;
const y: i32 = 12345;
- expect(@atomicLoad(i32, &x, AtomicOrder.SeqCst) == 1234);
- expect(@atomicLoad(i32, &y, AtomicOrder.SeqCst) == 12345);
+ expect(@atomicLoad(i32, &x, .SeqCst) == 1234);
+ expect(@atomicLoad(i32, &y, .SeqCst) == 12345);
}
}
fn testAtomicLoad(ptr: *u8) void {
- const x = @atomicLoad(u8, ptr, AtomicOrder.SeqCst);
+ const x = @atomicLoad(u8, ptr, .SeqCst);
expect(x == 42);
}
@@ -56,18 +59,18 @@ test "cmpxchg with ptr" {
var data2: i32 = 5678;
var data3: i32 = 9101;
var x: *i32 = &data1;
- if (@cmpxchgWeak(*i32, &x, &data2, &data3, AtomicOrder.SeqCst, AtomicOrder.SeqCst)) |x1| {
+ if (@cmpxchgWeak(*i32, &x, &data2, &data3, .SeqCst, .SeqCst)) |x1| {
expect(x1 == &data1);
} else {
@panic("cmpxchg should have failed");
}
- while (@cmpxchgWeak(*i32, &x, &data1, &data3, AtomicOrder.SeqCst, AtomicOrder.SeqCst)) |x1| {
+ while (@cmpxchgWeak(*i32, &x, &data1, &data3, .SeqCst, .SeqCst)) |x1| {
expect(x1 == &data1);
}
expect(x == &data3);
- expect(@cmpxchgStrong(*i32, &x, &data3, &data2, AtomicOrder.SeqCst, AtomicOrder.SeqCst) == null);
+ expect(@cmpxchgStrong(*i32, &x, &data3, &data2, .SeqCst, .SeqCst) == null);
expect(x == &data2);
}
@@ -163,16 +166,17 @@ fn testAtomicRmwFloat() void {
}
test "atomics with different types" {
- // testAtomicsWithType(bool, true, false);
- // inline for (.{ u1, i5, u33 }) |T| {
- // var x: T = 0;
- // testAtomicsWithType(T, 0, 1);
- // }
+ testAtomicsWithType(bool, true, false);
+ inline for (.{ u1, i5, u33 }) |T| {
+ var x: T = 0;
+ testAtomicsWithType(T, 0, 1);
+ }
testAtomicsWithType(u0, 0, 0);
testAtomicsWithType(i0, 0, 0);
}
-fn testAtomicsWithType(comptime T: type, a: T, b: T) void {
+// a and b souldn't need to be comptime
+fn testAtomicsWithType(comptime T: type, comptime a: T, comptime b: T) void {
var x: T = b;
@atomicStore(T, &x, a, .SeqCst);
expect(x == a);
From ec906a97712b329694e8f2e1a35b50b75d052642 Mon Sep 17 00:00:00 2001
From: Vexu
Date: Wed, 11 Mar 2020 13:29:17 +0200
Subject: [PATCH 06/11] fix codegen, update docs
---
doc/langref.html.in | 45 +++++++-------------------------
src/codegen.cpp | 10 +++----
test/stage1/behavior/atomics.zig | 5 ++--
3 files changed, 17 insertions(+), 43 deletions(-)
diff --git a/doc/langref.html.in b/doc/langref.html.in
index 447d545975f8..0dedb48b1c67 100644
--- a/doc/langref.html.in
+++ b/doc/langref.html.in
@@ -6728,17 +6728,8 @@ async fn func(y: *i32) void {
This builtin function atomically dereferences a pointer and returns the value.
- {#syntax#}T{#endsyntax#} must be a pointer type, a {#syntax#}bool{#endsyntax#}, a float,
- an integer whose bit count meets these requirements:
-
-
- - At least 8
- - At most the same as usize
- - Power of 2
-
or an enum with a valid integer tag type.
-
- TODO right now bool is not accepted. Also I think we could make non powers of 2 work fine, maybe
- we can remove this restriction
+ {#syntax#}T{#endsyntax#} must be a {#syntax#}bool{#endsyntax#}, a float,
+ an integer or an enum.
{#header_close#}
{#header_open|@atomicRmw#}
@@ -6747,17 +6738,8 @@ async fn func(y: *i32) void {
This builtin function atomically modifies memory and then returns the previous value.
- {#syntax#}T{#endsyntax#} must be a pointer type, a {#syntax#}bool{#endsyntax#},
- or an integer whose bit count meets these requirements:
-
-
- - At least 8
- - At most the same as usize
- - Power of 2
-
-
- TODO right now bool is not accepted. Also I think we could make non powers of 2 work fine, maybe
- we can remove this restriction
+ {#syntax#}T{#endsyntax#} must be a {#syntax#}bool{#endsyntax#}, a float,
+ an integer or an enum.
Supported operations:
@@ -6782,17 +6764,8 @@ async fn func(y: *i32) void {
This builtin function atomically stores a value.
- {#syntax#}T{#endsyntax#} must be a pointer type, a {#syntax#}bool{#endsyntax#}, a float,
- an integer whose bit count meets these requirements:
-
-
- - At least 8
- - At most the same as usize
- - Power of 2
-
or an enum with a valid integer tag type.
-
- TODO right now bool is not accepted. Also I think we could make non powers of 2 work fine, maybe
- we can remove this restriction
+ {#syntax#}T{#endsyntax#} must be a {#syntax#}bool{#endsyntax#}, a float,
+ an integer or an enum.
{#header_close#}
{#header_open|@bitCast#}
@@ -7108,7 +7081,8 @@ fn cmpxchgStrongButNotAtomic(comptime T: type, ptr: *T, expected_value: T, new_v
more efficiently in machine instructions.
- {#syntax#}AtomicOrder{#endsyntax#} can be found with {#syntax#}@import("builtin").AtomicOrder{#endsyntax#}.
+ {#syntax#}T{#endsyntax#} must be a {#syntax#}bool{#endsyntax#}, a float,
+ an integer or an enum.
{#syntax#}@TypeOf(ptr).alignment{#endsyntax#} must be {#syntax#}>= @sizeOf(T).{#endsyntax#}
{#see_also|Compile Variables|cmpxchgWeak#}
@@ -7136,7 +7110,8 @@ fn cmpxchgWeakButNotAtomic(comptime T: type, ptr: *T, expected_value: T, new_val
However if you need a stronger guarantee, use {#link|@cmpxchgStrong#}.
- {#syntax#}AtomicOrder{#endsyntax#} can be found with {#syntax#}@import("builtin").AtomicOrder{#endsyntax#}.
+ {#syntax#}T{#endsyntax#} must be a {#syntax#}bool{#endsyntax#}, a float,
+ an integer or an enum.
{#syntax#}@TypeOf(ptr).alignment{#endsyntax#} must be {#syntax#}>= @sizeOf(T).{#endsyntax#}
{#see_also|Compile Variables|cmpxchgStrong#}
diff --git a/src/codegen.cpp b/src/codegen.cpp
index cfb3de292aa1..66cf919d883b 100644
--- a/src/codegen.cpp
+++ b/src/codegen.cpp
@@ -5229,8 +5229,8 @@ static LLVMValueRef ir_render_cmpxchg(CodeGen *g, IrExecutableGen *executable, I
// operand needs widening and truncating
ptr_val = LLVMBuildBitCast(g->builder, ptr_val,
LLVMPointerType(get_llvm_type(g, instruction->actual_type), 0), "");
- cmp_val = LLVMConstZExt(cmp_val, get_llvm_type(g, instruction->actual_type));
- new_val = LLVMConstZExt(new_val, get_llvm_type(g, instruction->actual_type));
+ cmp_val = LLVMBuildZExt(g->builder, cmp_val, get_llvm_type(g, instruction->actual_type), "");
+ new_val = LLVMBuildZExt(g->builder, new_val, get_llvm_type(g, instruction->actual_type), "");
}
LLVMAtomicOrdering success_order = to_LLVMAtomicOrdering(instruction->success_order);
@@ -5846,7 +5846,7 @@ static LLVMValueRef ir_render_atomic_rmw(CodeGen *g, IrExecutableGen *executable
// operand needs widening and truncating
LLVMValueRef casted_ptr = LLVMBuildBitCast(g->builder, ptr,
LLVMPointerType(get_llvm_type(g, instruction->actual_type), 0), "");
- LLVMValueRef casted_operand = LLVMBuildPtrToInt(g->builder, operand, get_llvm_type(g, instruction->actual_type), "");
+ LLVMValueRef casted_operand = LLVMBuildZExt(g->builder, operand, get_llvm_type(g, instruction->actual_type), "");
LLVMValueRef uncasted_result = ZigLLVMBuildAtomicRMW(g->builder, op, casted_ptr, casted_operand, ordering,
g->is_single_threaded);
return LLVMBuildTrunc(g->builder, uncasted_result, get_llvm_type(g, operand_type), "");
@@ -5893,10 +5893,10 @@ static LLVMValueRef ir_render_atomic_store(CodeGen *g, IrExecutableGen *executab
LLVMValueRef value = ir_llvm_value(g, instruction->value);
if (instruction->actual_type != nullptr) {
- // operand needs widening and truncating
+ // operand needs widening
ptr = LLVMBuildBitCast(g->builder, ptr,
LLVMPointerType(get_llvm_type(g, instruction->actual_type), 0), "");
- value = LLVMConstZExt(value, get_llvm_type(g, instruction->actual_type));
+ value = LLVMBuildZExt(g->builder, value, get_llvm_type(g, instruction->actual_type), "");
}
LLVMValueRef store_inst = gen_store(g, value, ptr, instruction->ptr->value->type);
LLVMSetOrdering(store_inst, ordering);
diff --git a/test/stage1/behavior/atomics.zig b/test/stage1/behavior/atomics.zig
index 9c75afc36947..c655bfe7a404 100644
--- a/test/stage1/behavior/atomics.zig
+++ b/test/stage1/behavior/atomics.zig
@@ -167,7 +167,7 @@ fn testAtomicRmwFloat() void {
test "atomics with different types" {
testAtomicsWithType(bool, true, false);
- inline for (.{ u1, i5, u33 }) |T| {
+ inline for (.{ u1, i5, u15 }) |T| {
var x: T = 0;
testAtomicsWithType(T, 0, 1);
}
@@ -175,8 +175,7 @@ test "atomics with different types" {
testAtomicsWithType(i0, 0, 0);
}
-// a and b souldn't need to be comptime
-fn testAtomicsWithType(comptime T: type, comptime a: T, comptime b: T) void {
+fn testAtomicsWithType(comptime T: type, a: T, b: T) void {
var x: T = b;
@atomicStore(T, &x, a, .SeqCst);
expect(x == a);
From 9262f065f50db3ea72454b6c1b9b66fd911aa6ba Mon Sep 17 00:00:00 2001
From: Vexu
Date: Wed, 11 Mar 2020 16:46:12 +0200
Subject: [PATCH 07/11] Move abi size checking to codegen
---
lib/std/atomic/int.zig | 13 +++-----
src/all_types.hpp | 8 -----
src/codegen.cpp | 61 +++++++++++++++++++++++++++--------
src/ir.cpp | 73 ++++++++++++++++++------------------------
4 files changed, 84 insertions(+), 71 deletions(-)
diff --git a/lib/std/atomic/int.zig b/lib/std/atomic/int.zig
index 94985b914fae..446059e7ef66 100644
--- a/lib/std/atomic/int.zig
+++ b/lib/std/atomic/int.zig
@@ -1,6 +1,3 @@
-const builtin = @import("builtin");
-const AtomicOrder = builtin.AtomicOrder;
-
/// Thread-safe, lock-free integer
pub fn Int(comptime T: type) type {
return struct {
@@ -14,16 +11,16 @@ pub fn Int(comptime T: type) type {
/// Returns previous value
pub fn incr(self: *Self) T {
- return @atomicRmw(T, &self.unprotected_value, builtin.AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
+ return @atomicRmw(T, &self.unprotected_value, .Add, 1, .SeqCst);
}
/// Returns previous value
pub fn decr(self: *Self) T {
- return @atomicRmw(T, &self.unprotected_value, builtin.AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
+ return @atomicRmw(T, &self.unprotected_value, .Sub, 1, .SeqCst);
}
pub fn get(self: *Self) T {
- return @atomicLoad(T, &self.unprotected_value, AtomicOrder.SeqCst);
+ return @atomicLoad(T, &self.unprotected_value, .SeqCst);
}
pub fn set(self: *Self, new_value: T) void {
@@ -31,11 +28,11 @@ pub fn Int(comptime T: type) type {
}
pub fn xchg(self: *Self, new_value: T) T {
- return @atomicRmw(T, &self.unprotected_value, builtin.AtomicRmwOp.Xchg, new_value, AtomicOrder.SeqCst);
+ return @atomicRmw(T, &self.unprotected_value, .Xchg, new_value, .SeqCst);
}
pub fn fetchAdd(self: *Self, op: T) T {
- return @atomicRmw(T, &self.unprotected_value, builtin.AtomicRmwOp.Add, op, AtomicOrder.SeqCst);
+ return @atomicRmw(T, &self.unprotected_value, .Add, op, .SeqCst);
}
};
}
diff --git a/src/all_types.hpp b/src/all_types.hpp
index 149b7f464706..14b99228ca1c 100644
--- a/src/all_types.hpp
+++ b/src/all_types.hpp
@@ -3567,8 +3567,6 @@ struct IrInstGenCmpxchg {
IrInstGen *cmp_value;
IrInstGen *new_value;
IrInstGen *result_loc;
- // non null if operand needs widening and truncating
- ZigType *actual_type;
bool is_weak;
};
@@ -4201,8 +4199,6 @@ struct IrInstGenAtomicRmw {
IrInstGen *ptr;
IrInstGen *operand;
- // non null if operand needs widening and truncating
- ZigType *actual_type;
AtomicRmwOp op;
AtomicOrder ordering;
};
@@ -4219,8 +4215,6 @@ struct IrInstGenAtomicLoad {
IrInstGen base;
IrInstGen *ptr;
- // non null if operand needs widening and truncating
- ZigType *actual_type;
AtomicOrder ordering;
};
@@ -4238,8 +4232,6 @@ struct IrInstGenAtomicStore {
IrInstGen *ptr;
IrInstGen *value;
- // non null if operand needs widening and truncating
- ZigType *actual_type;
AtomicOrder ordering;
};
diff --git a/src/codegen.cpp b/src/codegen.cpp
index 66cf919d883b..55713c1b88c8 100644
--- a/src/codegen.cpp
+++ b/src/codegen.cpp
@@ -5219,18 +5219,48 @@ static enum ZigLLVM_AtomicRMWBinOp to_ZigLLVMAtomicRMWBinOp(AtomicRmwOp op, bool
zig_unreachable();
}
+static LLVMTypeRef get_atomic_abi_type(CodeGen *g, IrInstGen *instruction) {
+ // If the operand type of an atomic operation is not a power of two sized
+ // we need to widen it before using it and then truncate the result.
+
+ ir_assert(instruction->value->type->id == ZigTypeIdPointer, instruction);
+ ZigType *operand_type = instruction->value->type->data.pointer.child_type;
+ if (operand_type->id == ZigTypeIdInt || operand_type->id == ZigTypeIdEnum) {
+ if (operand_type->id == ZigTypeIdEnum) {
+ operand_type = operand_type->data.enumeration.tag_int_type;
+ }
+ auto bit_count = operand_type->data.integral.bit_count;
+ bool is_signed = operand_type->data.integral.is_signed;
+
+ ir_assert(bit_count != 0, instruction);
+ if (bit_count == 1 || !is_power_of_2(bit_count)) {
+ return get_llvm_type(g, get_int_type(g, is_signed, operand_type->abi_size * 8));
+ } else {
+ return nullptr;
+ }
+ } else if (operand_type->id == ZigTypeIdFloat) {
+ return nullptr;
+ } else if (operand_type->id == ZigTypeIdBool) {
+ return g->builtin_types.entry_u8->llvm_type;
+ } else {
+ ir_assert(get_codegen_ptr_type_bail(g, operand_type) != nullptr, instruction);
+ return nullptr;
+ }
+}
+
static LLVMValueRef ir_render_cmpxchg(CodeGen *g, IrExecutableGen *executable, IrInstGenCmpxchg *instruction) {
LLVMValueRef ptr_val = ir_llvm_value(g, instruction->ptr);
LLVMValueRef cmp_val = ir_llvm_value(g, instruction->cmp_value);
LLVMValueRef new_val = ir_llvm_value(g, instruction->new_value);
ZigType *operand_type = instruction->new_value->value->type;
- if (instruction->actual_type != nullptr) {
+ LLVMTypeRef actual_abi_type = get_atomic_abi_type(g, instruction->ptr);
+ if (actual_abi_type != nullptr) {
// operand needs widening and truncating
ptr_val = LLVMBuildBitCast(g->builder, ptr_val,
- LLVMPointerType(get_llvm_type(g, instruction->actual_type), 0), "");
- cmp_val = LLVMBuildZExt(g->builder, cmp_val, get_llvm_type(g, instruction->actual_type), "");
- new_val = LLVMBuildZExt(g->builder, new_val, get_llvm_type(g, instruction->actual_type), "");
+ LLVMPointerType(actual_abi_type, 0), "");
+ cmp_val = LLVMBuildZExt(g->builder, cmp_val, actual_abi_type, "");
+ new_val = LLVMBuildZExt(g->builder, new_val, actual_abi_type, "");
}
LLVMAtomicOrdering success_order = to_LLVMAtomicOrdering(instruction->success_order);
@@ -5245,7 +5275,7 @@ static LLVMValueRef ir_render_cmpxchg(CodeGen *g, IrExecutableGen *executable, I
if (!handle_is_ptr(g, optional_type)) {
LLVMValueRef payload_val = LLVMBuildExtractValue(g->builder, result_val, 0, "");
- if (instruction->actual_type != nullptr) {
+ if (actual_abi_type != nullptr) {
payload_val = LLVMBuildTrunc(g->builder, payload_val, get_llvm_type(g, operand_type), "");
}
LLVMValueRef success_bit = LLVMBuildExtractValue(g->builder, result_val, 1, "");
@@ -5262,7 +5292,7 @@ static LLVMValueRef ir_render_cmpxchg(CodeGen *g, IrExecutableGen *executable, I
ir_assert(type_has_bits(g, child_type), &instruction->base);
LLVMValueRef payload_val = LLVMBuildExtractValue(g->builder, result_val, 0, "");
- if (instruction->actual_type != nullptr) {
+ if (actual_abi_type != nullptr) {
payload_val = LLVMBuildTrunc(g->builder, payload_val, get_llvm_type(g, operand_type), "");
}
LLVMValueRef val_ptr = LLVMBuildStructGEP(g->builder, result_loc, maybe_child_index, "");
@@ -5842,11 +5872,12 @@ static LLVMValueRef ir_render_atomic_rmw(CodeGen *g, IrExecutableGen *executable
LLVMValueRef ptr = ir_llvm_value(g, instruction->ptr);
LLVMValueRef operand = ir_llvm_value(g, instruction->operand);
- if (instruction->actual_type != nullptr) {
+ LLVMTypeRef actual_abi_type = get_atomic_abi_type(g, instruction->ptr);
+ if (actual_abi_type != nullptr) {
// operand needs widening and truncating
LLVMValueRef casted_ptr = LLVMBuildBitCast(g->builder, ptr,
- LLVMPointerType(get_llvm_type(g, instruction->actual_type), 0), "");
- LLVMValueRef casted_operand = LLVMBuildZExt(g->builder, operand, get_llvm_type(g, instruction->actual_type), "");
+ LLVMPointerType(actual_abi_type, 0), "");
+ LLVMValueRef casted_operand = LLVMBuildZExt(g->builder, operand, actual_abi_type, "");
LLVMValueRef uncasted_result = ZigLLVMBuildAtomicRMW(g->builder, op, casted_ptr, casted_operand, ordering,
g->is_single_threaded);
return LLVMBuildTrunc(g->builder, uncasted_result, get_llvm_type(g, operand_type), "");
@@ -5872,10 +5903,11 @@ static LLVMValueRef ir_render_atomic_load(CodeGen *g, IrExecutableGen *executabl
LLVMValueRef ptr = ir_llvm_value(g, instruction->ptr);
ZigType *operand_type = instruction->ptr->value->type->data.pointer.child_type;
- if (instruction->actual_type != nullptr) {
+ LLVMTypeRef actual_abi_type = get_atomic_abi_type(g, instruction->ptr);
+ if (actual_abi_type != nullptr) {
// operand needs widening and truncating
ptr = LLVMBuildBitCast(g->builder, ptr,
- LLVMPointerType(get_llvm_type(g, instruction->actual_type), 0), "");
+ LLVMPointerType(actual_abi_type, 0), "");
LLVMValueRef load_inst = gen_load(g, ptr, instruction->ptr->value->type, "");
LLVMSetOrdering(load_inst, ordering);
return LLVMBuildTrunc(g->builder, load_inst, get_llvm_type(g, operand_type), "");
@@ -5892,11 +5924,12 @@ static LLVMValueRef ir_render_atomic_store(CodeGen *g, IrExecutableGen *executab
LLVMValueRef ptr = ir_llvm_value(g, instruction->ptr);
LLVMValueRef value = ir_llvm_value(g, instruction->value);
- if (instruction->actual_type != nullptr) {
+ LLVMTypeRef actual_abi_type = get_atomic_abi_type(g, instruction->ptr);
+ if (actual_abi_type != nullptr) {
// operand needs widening
ptr = LLVMBuildBitCast(g->builder, ptr,
- LLVMPointerType(get_llvm_type(g, instruction->actual_type), 0), "");
- value = LLVMBuildZExt(g->builder, value, get_llvm_type(g, instruction->actual_type), "");
+ LLVMPointerType(actual_abi_type, 0), "");
+ value = LLVMBuildZExt(g->builder, value, actual_abi_type, "");
}
LLVMValueRef store_inst = gen_store(g, value, ptr, instruction->ptr->value->type);
LLVMSetOrdering(store_inst, ordering);
diff --git a/src/ir.cpp b/src/ir.cpp
index 338f803bbe87..dc3e7941d620 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -227,7 +227,7 @@ static IrInstGen *ir_analyze_container_field_ptr(IrAnalyze *ira, Buf *field_name
static void ir_assert(bool ok, IrInst* source_instruction);
static void ir_assert_gen(bool ok, IrInstGen *source_instruction);
static IrInstGen *ir_get_var_ptr(IrAnalyze *ira, IrInst *source_instr, ZigVar *var);
-static ZigType *ir_resolve_atomic_operand_type(IrAnalyze *ira, IrInstGen *op, ZigType **actual_type);
+static ZigType *ir_resolve_atomic_operand_type(IrAnalyze *ira, IrInstGen *op);
static IrInstSrc *ir_lval_wrap(IrBuilderSrc *irb, Scope *scope, IrInstSrc *value, LVal lval, ResultLoc *result_loc);
static IrInstSrc *ir_expr_wrap(IrBuilderSrc *irb, Scope *scope, IrInstSrc *inst, ResultLoc *result_loc);
static ZigType *adjust_ptr_align(CodeGen *g, ZigType *ptr_type, uint32_t new_align);
@@ -3406,7 +3406,7 @@ static IrInstSrc *ir_build_cmpxchg_src(IrBuilderSrc *irb, Scope *scope, AstNode
static IrInstGen *ir_build_cmpxchg_gen(IrAnalyze *ira, IrInst *source_instruction, ZigType *result_type,
IrInstGen *ptr, IrInstGen *cmp_value, IrInstGen *new_value,
- AtomicOrder success_order, AtomicOrder failure_order, bool is_weak, IrInstGen *result_loc, ZigType *actual_type)
+ AtomicOrder success_order, AtomicOrder failure_order, bool is_weak, IrInstGen *result_loc)
{
IrInstGenCmpxchg *instruction = ir_build_inst_gen(&ira->new_irb,
source_instruction->scope, source_instruction->source_node);
@@ -3418,7 +3418,6 @@ static IrInstGen *ir_build_cmpxchg_gen(IrAnalyze *ira, IrInst *source_instructio
instruction->failure_order = failure_order;
instruction->is_weak = is_weak;
instruction->result_loc = result_loc;
- instruction->actual_type = actual_type;
ir_ref_inst_gen(ptr, ira->new_irb.current_basic_block);
ir_ref_inst_gen(cmp_value, ira->new_irb.current_basic_block);
@@ -4555,7 +4554,7 @@ static IrInstSrc *ir_build_atomic_rmw_src(IrBuilderSrc *irb, Scope *scope, AstNo
}
static IrInstGen *ir_build_atomic_rmw_gen(IrAnalyze *ira, IrInst *source_instr,
- IrInstGen *ptr, IrInstGen *operand, AtomicRmwOp op, AtomicOrder ordering, ZigType *operand_type, ZigType *actual_type)
+ IrInstGen *ptr, IrInstGen *operand, AtomicRmwOp op, AtomicOrder ordering, ZigType *operand_type)
{
IrInstGenAtomicRmw *instruction = ir_build_inst_gen(&ira->new_irb, source_instr->scope, source_instr->source_node);
instruction->base.value->type = operand_type;
@@ -4563,7 +4562,6 @@ static IrInstGen *ir_build_atomic_rmw_gen(IrAnalyze *ira, IrInst *source_instr,
instruction->op = op;
instruction->operand = operand;
instruction->ordering = ordering;
- instruction->actual_type = actual_type;
ir_ref_inst_gen(ptr, ira->new_irb.current_basic_block);
ir_ref_inst_gen(operand, ira->new_irb.current_basic_block);
@@ -4587,14 +4585,13 @@ static IrInstSrc *ir_build_atomic_load_src(IrBuilderSrc *irb, Scope *scope, AstN
}
static IrInstGen *ir_build_atomic_load_gen(IrAnalyze *ira, IrInst *source_instr,
- IrInstGen *ptr, AtomicOrder ordering, ZigType *operand_type, ZigType *actual_type)
+ IrInstGen *ptr, AtomicOrder ordering, ZigType *operand_type)
{
IrInstGenAtomicLoad *instruction = ir_build_inst_gen(&ira->new_irb,
source_instr->scope, source_instr->source_node);
instruction->base.value->type = operand_type;
instruction->ptr = ptr;
instruction->ordering = ordering;
- instruction->actual_type = actual_type;
ir_ref_inst_gen(ptr, ira->new_irb.current_basic_block);
@@ -4619,14 +4616,13 @@ static IrInstSrc *ir_build_atomic_store_src(IrBuilderSrc *irb, Scope *scope, Ast
}
static IrInstGen *ir_build_atomic_store_gen(IrAnalyze *ira, IrInst *source_instr,
- IrInstGen *ptr, IrInstGen *value, AtomicOrder ordering, ZigType *actual_type)
+ IrInstGen *ptr, IrInstGen *value, AtomicOrder ordering)
{
IrInstGenAtomicStore *instruction = ir_build_inst_void(&ira->new_irb,
source_instr->scope, source_instr->source_node);
instruction->ptr = ptr;
instruction->value = value;
instruction->ordering = ordering;
- instruction->actual_type = actual_type;
ir_ref_inst_gen(ptr, ira->new_irb.current_basic_block);
ir_ref_inst_gen(value, ira->new_irb.current_basic_block);
@@ -25125,8 +25121,7 @@ static IrInstGen *ir_analyze_instruction_embed_file(IrAnalyze *ira, IrInstSrcEmb
}
static IrInstGen *ir_analyze_instruction_cmpxchg(IrAnalyze *ira, IrInstSrcCmpxchg *instruction) {
- ZigType *actual_type;
- ZigType *operand_type = ir_resolve_atomic_operand_type(ira, instruction->type_value->child, &actual_type);
+ ZigType *operand_type = ir_resolve_atomic_operand_type(ira, instruction->type_value->child);
if (type_is_invalid(operand_type))
return ira->codegen->invalid_inst_gen;
@@ -25203,29 +25198,34 @@ static IrInstGen *ir_analyze_instruction_cmpxchg(IrAnalyze *ira, IrInstSrcCmpxch
// special case zero bit types
if (type_has_one_possible_value(ira->codegen, operand_type) == OnePossibleValueYes) {
- ZigValue *val = ira->codegen->pass1_arena->allocate(1);
- val->special = ConstValSpecialStatic;
- val->type = result_type;
- set_optional_value_to_null(val);
- return ir_const_move(ira, &instruction->base.base, val);
+ IrInstGen *result = ir_const(ira, &instruction->base.base, result_type);
+ set_optional_value_to_null(result->value);
+ return result;
}
if (instr_is_comptime(casted_ptr) && casted_ptr->value->data.x_ptr.mut != ConstPtrMutRuntimeVar &&
instr_is_comptime(casted_cmp_value) && instr_is_comptime(casted_new_value)) {
- IrInstGen *result = ir_get_deref(ira, &instruction->base.base, casted_ptr, nullptr);
- ZigValue *op1_val = ir_resolve_const(ira, result, UndefBad);
+ ZigValue *ptr_val = ir_resolve_const(ira, casted_ptr, UndefBad);
+ if (ptr_val == nullptr)
+ return ira->codegen->invalid_inst_gen;
+
+ ZigValue *op1_val = const_ptr_pointee(ira, ira->codegen, ptr_val, instruction->base.base.source_node);
+ if (op1_val == nullptr)
+ return ira->codegen->invalid_inst_gen;
+
ZigValue *op2_val = ir_resolve_const(ira, casted_cmp_value, UndefBad);
+ if (op2_val == nullptr)
+ return ira->codegen->invalid_inst_gen;
+
bool eql = const_values_equal(ira->codegen, op1_val, op2_val);
- ZigValue *val = ira->codegen->pass1_arena->allocate(1);
- val->special = ConstValSpecialStatic;
- val->type = result_type;
+ IrInstGen *result = ir_const(ira, &instruction->base.base, result_type);
if (eql) {
ir_analyze_store_ptr(ira, &instruction->base.base, casted_ptr, casted_new_value, false);
- set_optional_value_to_null(val);
+ set_optional_value_to_null(result->value);
} else {
- set_optional_payload(val, op1_val);
+ set_optional_payload(result->value, op1_val);
}
- return ir_const_move(ira, &instruction->base.base, val);
+ return result;
}
IrInstGen *result_loc;
@@ -25241,7 +25241,7 @@ static IrInstGen *ir_analyze_instruction_cmpxchg(IrAnalyze *ira, IrInstSrcCmpxch
return ir_build_cmpxchg_gen(ira, &instruction->base.base, result_type,
casted_ptr, casted_cmp_value, casted_new_value,
- success_order, failure_order, instruction->is_weak, result_loc, actual_type);
+ success_order, failure_order, instruction->is_weak, result_loc);
}
static IrInstGen *ir_analyze_instruction_fence(IrAnalyze *ira, IrInstSrcFence *instruction) {
@@ -28333,12 +28333,11 @@ static IrInstGen *ir_analyze_instruction_tag_type(IrAnalyze *ira, IrInstSrcTagTy
}
}
-static ZigType *ir_resolve_atomic_operand_type(IrAnalyze *ira, IrInstGen *op, ZigType **actual_type) {
+static ZigType *ir_resolve_atomic_operand_type(IrAnalyze *ira, IrInstGen *op) {
ZigType *operand_type = ir_resolve_type(ira, op);
if (type_is_invalid(operand_type))
return ira->codegen->builtin_types.entry_invalid;
- *actual_type = nullptr;
if (operand_type->id == ZigTypeIdInt || operand_type->id == ZigTypeIdEnum) {
ZigType *int_type;
if (operand_type->id == ZigTypeIdEnum) {
@@ -28355,10 +28354,6 @@ static ZigType *ir_resolve_atomic_operand_type(IrAnalyze *ira, IrInstGen *op, Zi
max_atomic_bits, bit_count));
return ira->codegen->builtin_types.entry_invalid;
}
-
- if (bit_count == 1 || !is_power_of_2(bit_count)) {
- *actual_type = get_int_type(ira->codegen, int_type->data.integral.is_signed, int_type->abi_size * 8);
- }
} else if (operand_type->id == ZigTypeIdFloat) {
uint32_t max_atomic_bits = target_arch_largest_atomic_bits(ira->codegen->zig_target->arch);
if (operand_type->data.floating.bit_count > max_atomic_bits) {
@@ -28369,7 +28364,6 @@ static ZigType *ir_resolve_atomic_operand_type(IrAnalyze *ira, IrInstGen *op, Zi
}
} else if (operand_type->id == ZigTypeIdBool) {
// will be treated as u8
- *actual_type = ira->codegen->builtin_types.entry_u8;
} else {
Error err;
ZigType *operand_ptr_type;
@@ -28387,8 +28381,7 @@ static ZigType *ir_resolve_atomic_operand_type(IrAnalyze *ira, IrInstGen *op, Zi
}
static IrInstGen *ir_analyze_instruction_atomic_rmw(IrAnalyze *ira, IrInstSrcAtomicRmw *instruction) {
- ZigType *actual_type;
- ZigType *operand_type = ir_resolve_atomic_operand_type(ira, instruction->operand_type->child, &actual_type);
+ ZigType *operand_type = ir_resolve_atomic_operand_type(ira, instruction->operand_type->child);
if (type_is_invalid(operand_type))
return ira->codegen->invalid_inst_gen;
@@ -28451,12 +28444,11 @@ static IrInstGen *ir_analyze_instruction_atomic_rmw(IrAnalyze *ira, IrInstSrcAto
}
return ir_build_atomic_rmw_gen(ira, &instruction->base.base, casted_ptr, casted_operand, op,
- ordering, operand_type, actual_type);
+ ordering, operand_type);
}
static IrInstGen *ir_analyze_instruction_atomic_load(IrAnalyze *ira, IrInstSrcAtomicLoad *instruction) {
- ZigType *actual_type;
- ZigType *operand_type = ir_resolve_atomic_operand_type(ira, instruction->operand_type->child, &actual_type);
+ ZigType *operand_type = ir_resolve_atomic_operand_type(ira, instruction->operand_type->child);
if (type_is_invalid(operand_type))
return ira->codegen->invalid_inst_gen;
@@ -28486,12 +28478,11 @@ static IrInstGen *ir_analyze_instruction_atomic_load(IrAnalyze *ira, IrInstSrcAt
return result;
}
- return ir_build_atomic_load_gen(ira, &instruction->base.base, casted_ptr, ordering, operand_type, actual_type);
+ return ir_build_atomic_load_gen(ira, &instruction->base.base, casted_ptr, ordering, operand_type);
}
static IrInstGen *ir_analyze_instruction_atomic_store(IrAnalyze *ira, IrInstSrcAtomicStore *instruction) {
- ZigType *actual_type;
- ZigType *operand_type = ir_resolve_atomic_operand_type(ira, instruction->operand_type->child, &actual_type);
+ ZigType *operand_type = ir_resolve_atomic_operand_type(ira, instruction->operand_type->child);
if (type_is_invalid(operand_type))
return ira->codegen->invalid_inst_gen;
@@ -28535,7 +28526,7 @@ static IrInstGen *ir_analyze_instruction_atomic_store(IrAnalyze *ira, IrInstSrcA
return result;
}
- return ir_build_atomic_store_gen(ira, &instruction->base.base, casted_ptr, casted_value, ordering, actual_type);
+ return ir_build_atomic_store_gen(ira, &instruction->base.base, casted_ptr, casted_value, ordering);
}
static IrInstGen *ir_analyze_instruction_save_err_ret_addr(IrAnalyze *ira, IrInstSrcSaveErrRetAddr *instruction) {
From 710b05b15302f05f98a31635af6d654858215f34 Mon Sep 17 00:00:00 2001
From: Vexu
Date: Thu, 12 Mar 2020 16:46:16 +0200
Subject: [PATCH 08/11] support `@atomicRmw` at comptime
---
src/ir.cpp | 82 +++++++++++++++++++++++++++++---
test/compile_errors.zig | 18 +++----
test/stage1/behavior/atomics.zig | 29 +++++++++++
3 files changed, 114 insertions(+), 15 deletions(-)
diff --git a/src/ir.cpp b/src/ir.cpp
index dc3e7941d620..4f139895b9ac 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -28436,14 +28436,84 @@ static IrInstGen *ir_analyze_instruction_atomic_rmw(IrAnalyze *ira, IrInstSrcAto
return ir_const_move(ira, &instruction->base.base, get_the_one_possible_value(ira->codegen, operand_type));
}
- if (instr_is_comptime(casted_operand) && instr_is_comptime(casted_ptr) && casted_ptr->value->data.x_ptr.mut == ConstPtrMutComptimeVar)
- {
- ir_add_error(ira, &instruction->base.base,
- buf_sprintf("compiler bug: TODO compile-time execution of @atomicRmw"));
- return ira->codegen->invalid_inst_gen;
+ IrInst *source_inst = &instruction->base.base;
+ if (instr_is_comptime(casted_operand) && instr_is_comptime(casted_ptr) && casted_ptr->value->data.x_ptr.mut == ConstPtrMutComptimeVar) {
+ ZigValue *ptr_val = ir_resolve_const(ira, casted_ptr, UndefBad);
+ if (ptr_val == nullptr)
+ return ira->codegen->invalid_inst_gen;
+
+ ZigValue *op1_val = const_ptr_pointee(ira, ira->codegen, ptr_val, instruction->base.base.source_node);
+ if (op1_val == nullptr)
+ return ira->codegen->invalid_inst_gen;
+
+ ZigValue *op2_val = ir_resolve_const(ira, casted_operand, UndefBad);
+ if (op2_val == nullptr)
+ return ira->codegen->invalid_inst_gen;
+
+ if (op == AtomicRmwOp_xchg) {
+ ir_analyze_store_ptr(ira, source_inst, casted_ptr, casted_operand, false);
+ return ir_const_move(ira, source_inst, op1_val);
+ }
+
+ if (operand_type->id == ZigTypeIdPointer || operand_type->id == ZigTypeIdOptional) {
+ ir_add_error(ira, &instruction->ordering->base,
+ buf_sprintf("TODO comptime @atomicRmw with pointers other than .Xchg"));
+ return ira->codegen->invalid_inst_gen;
+ }
+
+ if (op == AtomicRmwOp_min || op == AtomicRmwOp_max) {
+ IrBinOp bin_op;
+ if (op == AtomicRmwOp_min)
+ // store op2 if op2 < op1
+ bin_op = IrBinOpCmpGreaterThan;
+ else
+ // store op2 if op2 > op1
+ bin_op = IrBinOpCmpLessThan;
+
+ IrInstGen *dummy_value = ir_const(ira, source_inst, operand_type);
+ ir_eval_bin_op_cmp_scalar(ira, source_inst, op1_val, bin_op, op2_val, dummy_value->value);
+ if (dummy_value->value->data.x_bool)
+ ir_analyze_store_ptr(ira, source_inst, casted_ptr, casted_operand, false);
+ } else {
+ IrBinOp bin_op;
+ switch (op) {
+ case AtomicRmwOp_xchg:
+ case AtomicRmwOp_max:
+ case AtomicRmwOp_min:
+ zig_unreachable();
+ case AtomicRmwOp_add:
+ if (operand_type->id == ZigTypeIdFloat)
+ bin_op = IrBinOpAdd;
+ else
+ bin_op = IrBinOpAddWrap;
+ break;
+ case AtomicRmwOp_sub:
+ if (operand_type->id == ZigTypeIdFloat)
+ bin_op = IrBinOpSub;
+ else
+ bin_op = IrBinOpSubWrap;
+ break;
+ case AtomicRmwOp_and:
+ case AtomicRmwOp_nand:
+ bin_op = IrBinOpBinAnd;
+ break;
+ case AtomicRmwOp_or:
+ bin_op = IrBinOpBinOr;
+ break;
+ case AtomicRmwOp_xor:
+ bin_op = IrBinOpBinXor;
+ break;
+ }
+ ir_eval_math_op_scalar(ira, source_inst, operand_type, op1_val, bin_op, op2_val, op1_val);
+ if (op == AtomicRmwOp_nand) {
+ bigint_not(&op1_val->data.x_bigint, &op1_val->data.x_bigint,
+ operand_type->data.integral.bit_count, operand_type->data.integral.is_signed);
+ }
+ }
+ return ir_const_move(ira, source_inst, op1_val);
}
- return ir_build_atomic_rmw_gen(ira, &instruction->base.base, casted_ptr, casted_operand, op,
+ return ir_build_atomic_rmw_gen(ira, source_inst, casted_ptr, casted_operand, op,
ordering, operand_type);
}
diff --git a/test/compile_errors.zig b/test/compile_errors.zig
index 28529b3f579c..2881c7f65872 100644
--- a/test/compile_errors.zig
+++ b/test/compile_errors.zig
@@ -2,15 +2,6 @@ const tests = @import("tests.zig");
const std = @import("std");
pub fn addCases(cases: *tests.CompileErrorContext) void {
- cases.add("atomicrmw with bool op not .Xchg",
- \\export fn entry() void {
- \\ var x = false;
- \\ _ = @atomicRmw(bool, &x, .Add, true, .SeqCst);
- \\}
- , &[_][]const u8{
- "tmp.zig:3:30: error: @atomicRmw with bool only allowed with .Xchg",
- });
-
cases.addTest("combination of noasync and async",
\\export fn entry() void {
\\ noasync {
@@ -26,6 +17,15 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
"tmp.zig:5:9: error: resume in noasync scope",
});
+ cases.add("atomicrmw with bool op not .Xchg",
+ \\export fn entry() void {
+ \\ var x = false;
+ \\ _ = @atomicRmw(bool, &x, .Add, true, .SeqCst);
+ \\}
+ , &[_][]const u8{
+ "tmp.zig:3:30: error: @atomicRmw with bool only allowed with .Xchg",
+ });
+
cases.addTest("@TypeOf with no arguments",
\\export fn entry() void {
\\ _ = @TypeOf();
diff --git a/test/stage1/behavior/atomics.zig b/test/stage1/behavior/atomics.zig
index c655bfe7a404..aca5593c6ed9 100644
--- a/test/stage1/behavior/atomics.zig
+++ b/test/stage1/behavior/atomics.zig
@@ -149,6 +149,7 @@ fn testAtomicStore() void {
}
test "atomicrmw with floats" {
+ comptime testAtomicRmwFloat();
if (builtin.arch == .aarch64 or builtin.arch == .arm or builtin.arch == .riscv64)
return error.SkipZigTest;
testAtomicRmwFloat();
@@ -165,6 +166,34 @@ fn testAtomicRmwFloat() void {
expect(x == 4);
}
+test "atomicrmw with ints" {
+ testAtomicRmwFloat();
+ comptime testAtomicRmwFloat();
+}
+
+fn testAtomicRmwInt() void {
+ var x: u8 = 1;
+ _ = @atomicRmw(u8, &x, .Xchg, 3, .SeqCst);
+ expect(x == 3);
+ _ = @atomicRmw(u8, &x, .Add, 3, .SeqCst);
+ expect(x == 6);
+ _ = @atomicRmw(u8, &x, .Sub, 1, .SeqCst);
+ expect(x == 5);
+ _ = @atomicRmw(u8, &x, .And, 4, .SeqCst);
+ expect(x == 4);
+ _ = @atomicRmw(u8, &x, .Nand, 4, .SeqCst);
+ expect(x == 0xfb);
+ _ = @atomicRmw(u8, &x, .Or, 6, .SeqCst);
+ expect(x == 0xff);
+ _ = @atomicRmw(u8, &x, .Xor, 2, .SeqCst);
+ expect(x == 0xfd);
+ _ = @atomicRmw(u8, &x, .Max, 1, .SeqCst);
+ expect(x == 0xfd);
+ _ = @atomicRmw(u8, &x, .Min, 1, .SeqCst);
+ expect(x == 1);
+}
+
+
test "atomics with different types" {
testAtomicsWithType(bool, true, false);
inline for (.{ u1, i5, u15 }) |T| {
From ce19638cd4690a8ac01a04500fcc525341d0de78 Mon Sep 17 00:00:00 2001
From: Vexu
Date: Thu, 12 Mar 2020 17:31:10 +0200
Subject: [PATCH 09/11] disable test on mipsel
---
test/stage1/behavior/atomics.zig | 8 +++++---
1 file changed, 5 insertions(+), 3 deletions(-)
diff --git a/test/stage1/behavior/atomics.zig b/test/stage1/behavior/atomics.zig
index aca5593c6ed9..36751e7d259d 100644
--- a/test/stage1/behavior/atomics.zig
+++ b/test/stage1/behavior/atomics.zig
@@ -149,10 +149,10 @@ fn testAtomicStore() void {
}
test "atomicrmw with floats" {
- comptime testAtomicRmwFloat();
if (builtin.arch == .aarch64 or builtin.arch == .arm or builtin.arch == .riscv64)
return error.SkipZigTest;
testAtomicRmwFloat();
+ comptime testAtomicRmwFloat();
}
fn testAtomicRmwFloat() void {
@@ -167,8 +167,10 @@ fn testAtomicRmwFloat() void {
}
test "atomicrmw with ints" {
- testAtomicRmwFloat();
- comptime testAtomicRmwFloat();
+ if (builtin.arch == .mipsel)
+ return error.SkipZigTest;
+ testAtomicRmwInt();
+ comptime testAtomicRmwInt();
}
fn testAtomicRmwInt() void {
From 6dde769279aaa0cc09d13dd0670b74a8dd24f547 Mon Sep 17 00:00:00 2001
From: Vexu
Date: Thu, 12 Mar 2020 21:15:58 +0200
Subject: [PATCH 10/11] Simplify stores, use sext for signed ints
---
src/codegen.cpp | 22 ++++++++++---
src/ir.cpp | 53 +++++++++++++++++++++++---------
test/stage1/behavior/atomics.zig | 4 +--
3 files changed, 59 insertions(+), 20 deletions(-)
diff --git a/src/codegen.cpp b/src/codegen.cpp
index 55713c1b88c8..a9c539224646 100644
--- a/src/codegen.cpp
+++ b/src/codegen.cpp
@@ -5259,8 +5259,13 @@ static LLVMValueRef ir_render_cmpxchg(CodeGen *g, IrExecutableGen *executable, I
// operand needs widening and truncating
ptr_val = LLVMBuildBitCast(g->builder, ptr_val,
LLVMPointerType(actual_abi_type, 0), "");
- cmp_val = LLVMBuildZExt(g->builder, cmp_val, actual_abi_type, "");
- new_val = LLVMBuildZExt(g->builder, new_val, actual_abi_type, "");
+ if (operand_type->data.integral.is_signed) {
+ cmp_val = LLVMBuildSExt(g->builder, cmp_val, actual_abi_type, "");
+ new_val = LLVMBuildSExt(g->builder, new_val, actual_abi_type, "");
+ } else {
+ cmp_val = LLVMBuildZExt(g->builder, cmp_val, actual_abi_type, "");
+ new_val = LLVMBuildZExt(g->builder, new_val, actual_abi_type, "");
+ }
}
LLVMAtomicOrdering success_order = to_LLVMAtomicOrdering(instruction->success_order);
@@ -5877,7 +5882,12 @@ static LLVMValueRef ir_render_atomic_rmw(CodeGen *g, IrExecutableGen *executable
// operand needs widening and truncating
LLVMValueRef casted_ptr = LLVMBuildBitCast(g->builder, ptr,
LLVMPointerType(actual_abi_type, 0), "");
- LLVMValueRef casted_operand = LLVMBuildZExt(g->builder, operand, actual_abi_type, "");
+ LLVMValueRef casted_operand;
+ if (operand_type->data.integral.is_signed) {
+ casted_operand = LLVMBuildSExt(g->builder, operand, actual_abi_type, "");
+ } else {
+ casted_operand = LLVMBuildZExt(g->builder, operand, actual_abi_type, "");
+ }
LLVMValueRef uncasted_result = ZigLLVMBuildAtomicRMW(g->builder, op, casted_ptr, casted_operand, ordering,
g->is_single_threaded);
return LLVMBuildTrunc(g->builder, uncasted_result, get_llvm_type(g, operand_type), "");
@@ -5929,7 +5939,11 @@ static LLVMValueRef ir_render_atomic_store(CodeGen *g, IrExecutableGen *executab
// operand needs widening
ptr = LLVMBuildBitCast(g->builder, ptr,
LLVMPointerType(actual_abi_type, 0), "");
- value = LLVMBuildZExt(g->builder, value, actual_abi_type, "");
+ if (instruction->value->value->type->data.integral.is_signed) {
+ value = LLVMBuildSExt(g->builder, value, actual_abi_type, "");
+ } else {
+ value = LLVMBuildZExt(g->builder, value, actual_abi_type, "");
+ }
}
LLVMValueRef store_inst = gen_store(g, value, ptr, instruction->ptr->value->type);
LLVMSetOrdering(store_inst, ordering);
diff --git a/src/ir.cpp b/src/ir.cpp
index 4f139895b9ac..ede403c722fa 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -25197,10 +25197,16 @@ static IrInstGen *ir_analyze_instruction_cmpxchg(IrAnalyze *ira, IrInstSrcCmpxch
ZigType *result_type = get_optional_type(ira->codegen, operand_type);
// special case zero bit types
- if (type_has_one_possible_value(ira->codegen, operand_type) == OnePossibleValueYes) {
- IrInstGen *result = ir_const(ira, &instruction->base.base, result_type);
- set_optional_value_to_null(result->value);
- return result;
+ switch (type_has_one_possible_value(ira->codegen, operand_type)) {
+ case OnePossibleValueInvalid:
+ return ira->codegen->invalid_inst_gen;
+ case OnePossibleValueYes: {
+ IrInstGen *result = ir_const(ira, &instruction->base.base, result_type);
+ set_optional_value_to_null(result->value);
+ return result;
+ }
+ case OnePossibleValueNo:
+ break;
}
if (instr_is_comptime(casted_ptr) && casted_ptr->value->data.x_ptr.mut != ConstPtrMutRuntimeVar &&
@@ -28432,8 +28438,13 @@ static IrInstGen *ir_analyze_instruction_atomic_rmw(IrAnalyze *ira, IrInstSrcAto
}
// special case zero bit types
- if (type_has_one_possible_value(ira->codegen, operand_type) == OnePossibleValueYes) {
- return ir_const_move(ira, &instruction->base.base, get_the_one_possible_value(ira->codegen, operand_type));
+ switch (type_has_one_possible_value(ira->codegen, operand_type)) {
+ case OnePossibleValueInvalid:
+ return ira->codegen->invalid_inst_gen;
+ case OnePossibleValueYes:
+ return ir_const_move(ira, &instruction->base.base, get_the_one_possible_value(ira->codegen, operand_type));
+ case OnePossibleValueNo:
+ break;
}
IrInst *source_inst = &instruction->base.base;
@@ -28450,9 +28461,11 @@ static IrInstGen *ir_analyze_instruction_atomic_rmw(IrAnalyze *ira, IrInstSrcAto
if (op2_val == nullptr)
return ira->codegen->invalid_inst_gen;
+ IrInstGen *result = ir_const(ira, source_inst, operand_type);
+ copy_const_val(ira->codegen, result->value, op1_val);
if (op == AtomicRmwOp_xchg) {
- ir_analyze_store_ptr(ira, source_inst, casted_ptr, casted_operand, false);
- return ir_const_move(ira, source_inst, op1_val);
+ copy_const_val(ira->codegen, op1_val, op2_val);
+ return result;
}
if (operand_type->id == ZigTypeIdPointer || operand_type->id == ZigTypeIdOptional) {
@@ -28461,6 +28474,7 @@ static IrInstGen *ir_analyze_instruction_atomic_rmw(IrAnalyze *ira, IrInstSrcAto
return ira->codegen->invalid_inst_gen;
}
+ ErrorMsg *msg;
if (op == AtomicRmwOp_min || op == AtomicRmwOp_max) {
IrBinOp bin_op;
if (op == AtomicRmwOp_min)
@@ -28471,9 +28485,12 @@ static IrInstGen *ir_analyze_instruction_atomic_rmw(IrAnalyze *ira, IrInstSrcAto
bin_op = IrBinOpCmpLessThan;
IrInstGen *dummy_value = ir_const(ira, source_inst, operand_type);
- ir_eval_bin_op_cmp_scalar(ira, source_inst, op1_val, bin_op, op2_val, dummy_value->value);
+ msg = ir_eval_bin_op_cmp_scalar(ira, source_inst, op1_val, bin_op, op2_val, dummy_value->value);
+ if (msg != nullptr) {
+ return ira->codegen->invalid_inst_gen;
+ }
if (dummy_value->value->data.x_bool)
- ir_analyze_store_ptr(ira, source_inst, casted_ptr, casted_operand, false);
+ copy_const_val(ira->codegen, op1_val, op2_val);
} else {
IrBinOp bin_op;
switch (op) {
@@ -28504,13 +28521,16 @@ static IrInstGen *ir_analyze_instruction_atomic_rmw(IrAnalyze *ira, IrInstSrcAto
bin_op = IrBinOpBinXor;
break;
}
- ir_eval_math_op_scalar(ira, source_inst, operand_type, op1_val, bin_op, op2_val, op1_val);
+ msg = ir_eval_math_op_scalar(ira, source_inst, operand_type, op1_val, bin_op, op2_val, op1_val);
+ if (msg != nullptr) {
+ return ira->codegen->invalid_inst_gen;
+ }
if (op == AtomicRmwOp_nand) {
bigint_not(&op1_val->data.x_bigint, &op1_val->data.x_bigint,
operand_type->data.integral.bit_count, operand_type->data.integral.is_signed);
}
}
- return ir_const_move(ira, source_inst, op1_val);
+ return result;
}
return ir_build_atomic_rmw_gen(ira, source_inst, casted_ptr, casted_operand, op,
@@ -28586,8 +28606,13 @@ static IrInstGen *ir_analyze_instruction_atomic_store(IrAnalyze *ira, IrInstSrcA
}
// special case zero bit types
- if (type_has_one_possible_value(ira->codegen, operand_type) == OnePossibleValueYes) {
- return ir_const_void(ira, &instruction->base.base);
+ switch (type_has_one_possible_value(ira->codegen, operand_type)) {
+ case OnePossibleValueInvalid:
+ return ira->codegen->invalid_inst_gen;
+ case OnePossibleValueYes:
+ return ir_const_void(ira, &instruction->base.base);
+ case OnePossibleValueNo:
+ break;
}
if (instr_is_comptime(casted_value) && instr_is_comptime(casted_ptr)) {
diff --git a/test/stage1/behavior/atomics.zig b/test/stage1/behavior/atomics.zig
index 36751e7d259d..8870091d75e9 100644
--- a/test/stage1/behavior/atomics.zig
+++ b/test/stage1/behavior/atomics.zig
@@ -175,8 +175,8 @@ test "atomicrmw with ints" {
fn testAtomicRmwInt() void {
var x: u8 = 1;
- _ = @atomicRmw(u8, &x, .Xchg, 3, .SeqCst);
- expect(x == 3);
+ var res = @atomicRmw(u8, &x, .Xchg, 3, .SeqCst);
+ expect(x == 3 and res == 1);
_ = @atomicRmw(u8, &x, .Add, 3, .SeqCst);
expect(x == 6);
_ = @atomicRmw(u8, &x, .Sub, 1, .SeqCst);
From 71d776c3be91f6b4e982b45fbfe03e3696a397f5 Mon Sep 17 00:00:00 2001
From: Vexu
Date: Thu, 12 Mar 2020 22:42:01 +0200
Subject: [PATCH 11/11] add note to disabled tests, improve comptime cmpxchg
---
lib/std/atomic/stack.zig | 10 +++++-----
lib/std/event/channel.zig | 9 +++------
src/ir.cpp | 18 +++++++++++-------
test/stage1/behavior/atomics.zig | 6 ++++--
4 files changed, 23 insertions(+), 20 deletions(-)
diff --git a/lib/std/atomic/stack.zig b/lib/std/atomic/stack.zig
index 07cb16e45013..092dce15b07c 100644
--- a/lib/std/atomic/stack.zig
+++ b/lib/std/atomic/stack.zig
@@ -38,8 +38,8 @@ pub fn Stack(comptime T: type) type {
node.next = self.root;
self.root = node;
} else {
- while (@atomicRmw(bool, &self.lock, .Xchg, true, .SeqCst) != false) {}
- defer assert(@atomicRmw(bool, &self.lock, .Xchg, false, .SeqCst) == true);
+ while (@atomicRmw(bool, &self.lock, .Xchg, true, .SeqCst)) {}
+ defer assert(@atomicRmw(bool, &self.lock, .Xchg, false, .SeqCst));
node.next = self.root;
self.root = node;
@@ -52,8 +52,8 @@ pub fn Stack(comptime T: type) type {
self.root = root.next;
return root;
} else {
- while (@atomicRmw(bool, &self.lock, .Xchg, true, .SeqCst) != false) {}
- defer assert(@atomicRmw(bool, &self.lock, .Xchg, false, .SeqCst) == true);
+ while (@atomicRmw(bool, &self.lock, .Xchg, true, .SeqCst)) {}
+ defer assert(@atomicRmw(bool, &self.lock, .Xchg, false, .SeqCst));
const root = self.root orelse return null;
self.root = root.next;
@@ -164,7 +164,7 @@ fn startPuts(ctx: *Context) u8 {
fn startGets(ctx: *Context) u8 {
while (true) {
- const last = @atomicLoad(bool, &ctx.puts_done, .SeqCst) == true;
+ const last = @atomicLoad(bool, &ctx.puts_done, .SeqCst);
while (ctx.stack.pop()) |node| {
std.time.sleep(1); // let the os scheduler be our fuzz
diff --git a/lib/std/event/channel.zig b/lib/std/event/channel.zig
index 355bd7829225..83c77bcac503 100644
--- a/lib/std/event/channel.zig
+++ b/lib/std/event/channel.zig
@@ -169,8 +169,7 @@ pub fn Channel(comptime T: type) type {
lock: while (true) {
// set the lock flag
- const prev_lock = @atomicRmw(bool, &self.dispatch_lock, .Xchg, true, .SeqCst);
- if (prev_lock != 0) return;
+ if (@atomicRmw(bool, &self.dispatch_lock, .Xchg, true, .SeqCst)) return;
// clear the need_dispatch flag since we're about to do it
@atomicStore(bool, &self.need_dispatch, false, .SeqCst);
@@ -250,11 +249,9 @@ pub fn Channel(comptime T: type) type {
}
// clear need-dispatch flag
- const need_dispatch = @atomicRmw(bool, &self.need_dispatch, .Xchg, false, .SeqCst);
- if (need_dispatch) continue;
+ if (@atomicRmw(bool, &self.need_dispatch, .Xchg, false, .SeqCst)) continue;
- const my_lock = @atomicRmw(bool, &self.dispatch_lock, .Xchg, false, .SeqCst);
- assert(my_lock);
+ assert(@atomicRmw(bool, &self.dispatch_lock, .Xchg, false, .SeqCst));
// we have to check again now that we unlocked
if (@atomicLoad(bool, &self.need_dispatch, .SeqCst)) continue :lock;
diff --git a/src/ir.cpp b/src/ir.cpp
index ede403c722fa..5facc3eb49a7 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -25215,21 +25215,25 @@ static IrInstGen *ir_analyze_instruction_cmpxchg(IrAnalyze *ira, IrInstSrcCmpxch
if (ptr_val == nullptr)
return ira->codegen->invalid_inst_gen;
- ZigValue *op1_val = const_ptr_pointee(ira, ira->codegen, ptr_val, instruction->base.base.source_node);
- if (op1_val == nullptr)
+ ZigValue *stored_val = const_ptr_pointee(ira, ira->codegen, ptr_val, instruction->base.base.source_node);
+ if (stored_val == nullptr)
return ira->codegen->invalid_inst_gen;
- ZigValue *op2_val = ir_resolve_const(ira, casted_cmp_value, UndefBad);
- if (op2_val == nullptr)
+ ZigValue *expected_val = ir_resolve_const(ira, casted_cmp_value, UndefBad);
+ if (expected_val == nullptr)
+ return ira->codegen->invalid_inst_gen;
+
+ ZigValue *new_val = ir_resolve_const(ira, casted_new_value, UndefBad);
+ if (new_val == nullptr)
return ira->codegen->invalid_inst_gen;
- bool eql = const_values_equal(ira->codegen, op1_val, op2_val);
+ bool eql = const_values_equal(ira->codegen, stored_val, expected_val);
IrInstGen *result = ir_const(ira, &instruction->base.base, result_type);
if (eql) {
- ir_analyze_store_ptr(ira, &instruction->base.base, casted_ptr, casted_new_value, false);
+ copy_const_val(ira->codegen, stored_val, new_val);
set_optional_value_to_null(result->value);
} else {
- set_optional_payload(result->value, op1_val);
+ set_optional_payload(result->value, stored_val);
}
return result;
}
diff --git a/test/stage1/behavior/atomics.zig b/test/stage1/behavior/atomics.zig
index 8870091d75e9..3e6d0b3d0f61 100644
--- a/test/stage1/behavior/atomics.zig
+++ b/test/stage1/behavior/atomics.zig
@@ -149,6 +149,7 @@ fn testAtomicStore() void {
}
test "atomicrmw with floats" {
+ // TODO https://github.com/ziglang/zig/issues/4457
if (builtin.arch == .aarch64 or builtin.arch == .arm or builtin.arch == .riscv64)
return error.SkipZigTest;
testAtomicRmwFloat();
@@ -167,8 +168,6 @@ fn testAtomicRmwFloat() void {
}
test "atomicrmw with ints" {
- if (builtin.arch == .mipsel)
- return error.SkipZigTest;
testAtomicRmwInt();
comptime testAtomicRmwInt();
}
@@ -189,6 +188,9 @@ fn testAtomicRmwInt() void {
expect(x == 0xff);
_ = @atomicRmw(u8, &x, .Xor, 2, .SeqCst);
expect(x == 0xfd);
+
+ // TODO https://github.com/ziglang/zig/issues/4724
+ if (builtin.arch == .mipsel) return;
_ = @atomicRmw(u8, &x, .Max, 1, .SeqCst);
expect(x == 0xfd);
_ = @atomicRmw(u8, &x, .Min, 1, .SeqCst);