diff --git a/doc/langref.html.in b/doc/langref.html.in
index 446a201bbec5..e7b28014bd13 100644
--- a/doc/langref.html.in
+++ b/doc/langref.html.in
@@ -6841,6 +6841,99 @@ async fn func(y: *i32) void {
{#header_close#}
+ {#header_open|@call#}
+ {#syntax#}@call(options: std.builtin.CallOptions, function: var, args: var) var{#endsyntax#}
+
+ Calls a function, in the same way that invoking an expression with parentheses does:
+
+ {#code_begin|test|call#}
+const assert = @import("std").debug.assert;
+
+test "noinline function call" {
+ assert(@call(.{}, add, .{3, 9}) == 12);
+}
+
+fn add(a: i32, b: i32) i32 {
+ return a + b;
+}
+ {#code_end#}
+
+ {#syntax#}@call{#endsyntax#} allows more flexibility than normal function call syntax does. The
+ {#syntax#}CallOptions{#endsyntax#} struct is reproduced here:
+
+ {#code_begin|syntax#}
+pub const CallOptions = struct {
+ modifier: Modifier = .auto,
+ stack: ?[]align(std.Target.stack_align) u8 = null,
+
+ pub const Modifier = enum {
+ /// Equivalent to function call syntax.
+ auto,
+
+ /// Prevents tail call optimization. This guarantees that the return
+ /// address will point to the callsite, as opposed to the callsite's
+ /// callsite. If the call is otherwise required to be tail-called
+ /// or inlined, a compile error is emitted instead.
+ never_tail,
+
+ /// Guarantees that the call will not be inlined. If the call is
+ /// otherwise required to be inlined, a compile error is emitted instead.
+ never_inline,
+
+ /// Asserts that the function call will not suspend. This allows a
+ /// non-async function to call an async function.
+ no_async,
+
+ /// Guarantees that the call will be generated with tail call optimization.
+ /// If this is not possible, a compile error is emitted instead.
+ always_tail,
+
+ /// Guarantees that the call will inlined at the callsite.
+ /// If this is not possible, a compile error is emitted instead.
+ always_inline,
+
+ /// Evaluates the call at compile-time. If the call cannot be completed at
+ /// compile-time, a compile error is emitted instead.
+ compile_time,
+ };
+};
+ {#code_end#}
+
+ {#header_open|Calling with a New Stack#}
+
+ When the {#syntax#}stack{#endsyntax#} option is provided, instead of using the same stack as the caller, the function uses the provided stack.
+
+ {#code_begin|test|new_stack_call#}
+const std = @import("std");
+const assert = std.debug.assert;
+
+var new_stack_bytes: [1024]u8 align(16) = undefined;
+
+test "calling a function with a new stack" {
+ const arg = 1234;
+
+ const a = @call(.{.stack = new_stack_bytes[0..512]}, targetFunction, .{arg});
+ const b = @call(.{.stack = new_stack_bytes[512..]}, targetFunction, .{arg});
+ _ = targetFunction(arg);
+
+ assert(arg == 1234);
+ assert(a < b);
+}
+
+fn targetFunction(x: i32) usize {
+ assert(x == 1234);
+
+ var local_variable: i32 = 42;
+ const ptr = &local_variable;
+ ptr.* += 1;
+
+ assert(local_variable == 43);
+ return @ptrToInt(ptr);
+}
+ {#code_end#}
+ {#header_close#}
+ {#header_close#}
+
{#header_open|@cDefine#}
{#syntax#}@cDefine(comptime name: []u8, value){#endsyntax#}
@@ -7427,27 +7520,6 @@ test "@hasDecl" {
{#see_also|Compile Variables|@embedFile#}
{#header_close#}
- {#header_open|@inlineCall#}
-
{#syntax#}@inlineCall(function: X, args: ...) Y{#endsyntax#}
-
- This calls a function, in the same way that invoking an expression with parentheses does:
-
- {#code_begin|test#}
-const assert = @import("std").debug.assert;
-
-test "inline function call" {
- assert(@inlineCall(add, 3, 9) == 12);
-}
-
-fn add(a: i32, b: i32) i32 { return a + b; }
- {#code_end#}
-
- Unlike a normal function call, however, {#syntax#}@inlineCall{#endsyntax#} guarantees that the call
- will be inlined. If the call cannot be inlined, a compile error is emitted.
-
- {#see_also|@noInlineCall#}
- {#header_close#}
-
{#header_open|@intCast#}
{#syntax#}@intCast(comptime DestType: type, int: var) DestType{#endsyntax#}
@@ -7605,71 +7677,6 @@ mem.set(u8, dest, c);{#endsyntax#}
{#header_close#}
- {#header_open|@newStackCall#}
- {#syntax#}@newStackCall(new_stack: []align(target_stack_align) u8, function: var, args: ...) var{#endsyntax#}
-
- This calls a function, in the same way that invoking an expression with parentheses does. However,
- instead of using the same stack as the caller, the function uses the stack provided in the {#syntax#}new_stack{#endsyntax#}
- parameter.
-
-
- The new stack must be aligned to {#syntax#}target_stack_align{#endsyntax#} bytes. This is a target-specific
- number. A safe value that will work on all targets is {#syntax#}16{#endsyntax#}. This value can
- also be obtained by using {#link|@sizeOf#} on the {#link|@Frame#} type of {#link|Async Functions#}.
-
- {#code_begin|test#}
-const std = @import("std");
-const assert = std.debug.assert;
-
-var new_stack_bytes: [1024]u8 align(16) = undefined;
-
-test "calling a function with a new stack" {
- const arg = 1234;
-
- const a = @newStackCall(new_stack_bytes[0..512], targetFunction, arg);
- const b = @newStackCall(new_stack_bytes[512..], targetFunction, arg);
- _ = targetFunction(arg);
-
- assert(arg == 1234);
- assert(a < b);
-}
-
-fn targetFunction(x: i32) usize {
- assert(x == 1234);
-
- var local_variable: i32 = 42;
- const ptr = &local_variable;
- ptr.* += 1;
-
- assert(local_variable == 43);
- return @ptrToInt(ptr);
-}
- {#code_end#}
- {#header_close#}
-
- {#header_open|@noInlineCall#}
- {#syntax#}@noInlineCall(function: var, args: ...) var{#endsyntax#}
-
- This calls a function, in the same way that invoking an expression with parentheses does:
-
- {#code_begin|test#}
-const assert = @import("std").debug.assert;
-
-test "noinline function call" {
- assert(@noInlineCall(add, 3, 9) == 12);
-}
-
-fn add(a: i32, b: i32) i32 {
- return a + b;
-}
- {#code_end#}
-
- Unlike a normal function call, however, {#syntax#}@noInlineCall{#endsyntax#} guarantees that the call
- will not be inlined. If the call must be inlined, a compile error is emitted.
-
- {#see_also|@inlineCall#}
- {#header_close#}
-
{#header_open|@OpaqueType#}
{#syntax#}@OpaqueType() type{#endsyntax#}
diff --git a/lib/std/builtin.zig b/lib/std/builtin.zig
index 89acb0df60dc..35188b61e305 100644
--- a/lib/std/builtin.zig
+++ b/lib/std/builtin.zig
@@ -372,6 +372,44 @@ pub const Version = struct {
patch: u32,
};
+/// This data structure is used by the Zig language code generation and
+/// therefore must be kept in sync with the compiler implementation.
+pub const CallOptions = struct {
+ modifier: Modifier = .auto,
+ stack: ?[]align(std.Target.stack_align) u8 = null,
+
+ pub const Modifier = enum {
+ /// Equivalent to function call syntax.
+ auto,
+
+ /// Prevents tail call optimization. This guarantees that the return
+ /// address will point to the callsite, as opposed to the callsite's
+ /// callsite. If the call is otherwise required to be tail-called
+ /// or inlined, a compile error is emitted instead.
+ never_tail,
+
+ /// Guarantees that the call will not be inlined. If the call is
+ /// otherwise required to be inlined, a compile error is emitted instead.
+ never_inline,
+
+ /// Asserts that the function call will not suspend. This allows a
+ /// non-async function to call an async function.
+ no_async,
+
+ /// Guarantees that the call will be generated with tail call optimization.
+ /// If this is not possible, a compile error is emitted instead.
+ always_tail,
+
+ /// Guarantees that the call will inlined at the callsite.
+ /// If this is not possible, a compile error is emitted instead.
+ always_inline,
+
+ /// Evaluates the call at compile-time. If the call cannot be completed at
+ /// compile-time, a compile error is emitted instead.
+ compile_time,
+ };
+};
+
/// This function type is used by the Zig language code generation and
/// therefore must be kept in sync with the compiler implementation.
pub const PanicFn = fn ([]const u8, ?*StackTrace) noreturn;
diff --git a/lib/std/hash/auto_hash.zig b/lib/std/hash/auto_hash.zig
index bd5479d0930a..07ee1d3d002f 100644
--- a/lib/std/hash/auto_hash.zig
+++ b/lib/std/hash/auto_hash.zig
@@ -92,7 +92,7 @@ pub fn hash(hasher: var, key: var, comptime strat: HashStrategy) void {
// Help the optimizer see that hashing an int is easy by inlining!
// TODO Check if the situation is better after #561 is resolved.
- .Int => @inlineCall(hasher.update, std.mem.asBytes(&key)),
+ .Int => @call(.{ .modifier = .always_inline }, hasher.update, .{std.mem.asBytes(&key)}),
.Float => |info| hash(hasher, @bitCast(@IntType(false, info.bits), key), strat),
@@ -101,7 +101,7 @@ pub fn hash(hasher: var, key: var, comptime strat: HashStrategy) void {
.ErrorSet => hash(hasher, @errorToInt(key), strat),
.AnyFrame, .Fn => hash(hasher, @ptrToInt(key), strat),
- .Pointer => @inlineCall(hashPointer, hasher, key, strat),
+ .Pointer => @call(.{ .modifier = .always_inline }, hashPointer, .{ hasher, key, strat }),
.Optional => if (key) |k| hash(hasher, k, strat),
diff --git a/lib/std/hash/cityhash.zig b/lib/std/hash/cityhash.zig
index 5038c3758eab..0f78140c9d03 100644
--- a/lib/std/hash/cityhash.zig
+++ b/lib/std/hash/cityhash.zig
@@ -197,7 +197,7 @@ pub const CityHash64 = struct {
}
fn hashLen16(u: u64, v: u64) u64 {
- return @inlineCall(hash128To64, u, v);
+ return @call(.{ .modifier = .always_inline }, hash128To64, .{ u, v });
}
fn hashLen16Mul(low: u64, high: u64, mul: u64) u64 {
@@ -210,7 +210,7 @@ pub const CityHash64 = struct {
}
fn hash128To64(low: u64, high: u64) u64 {
- return @inlineCall(hashLen16Mul, low, high, 0x9ddfea08eb382d69);
+ return @call(.{ .modifier = .always_inline }, hashLen16Mul, .{ low, high, 0x9ddfea08eb382d69 });
}
fn hashLen0To16(str: []const u8) u64 {
@@ -291,7 +291,14 @@ pub const CityHash64 = struct {
}
fn weakHashLen32WithSeeds(ptr: [*]const u8, a: u64, b: u64) WeakPair {
- return @inlineCall(weakHashLen32WithSeedsHelper, fetch64(ptr), fetch64(ptr + 8), fetch64(ptr + 16), fetch64(ptr + 24), a, b);
+ return @call(.{ .modifier = .always_inline }, weakHashLen32WithSeedsHelper, .{
+ fetch64(ptr),
+ fetch64(ptr + 8),
+ fetch64(ptr + 16),
+ fetch64(ptr + 24),
+ a,
+ b,
+ });
}
pub fn hash(str: []const u8) u64 {
@@ -339,7 +346,7 @@ pub const CityHash64 = struct {
}
pub fn hashWithSeed(str: []const u8, seed: u64) u64 {
- return @inlineCall(Self.hashWithSeeds, str, k2, seed);
+ return @call(.{ .modifier = .always_inline }, Self.hashWithSeeds, .{ str, k2, seed });
}
pub fn hashWithSeeds(str: []const u8, seed0: u64, seed1: u64) u64 {
diff --git a/lib/std/hash/murmur.zig b/lib/std/hash/murmur.zig
index d3379a81f70a..01639270101e 100644
--- a/lib/std/hash/murmur.zig
+++ b/lib/std/hash/murmur.zig
@@ -8,7 +8,7 @@ pub const Murmur2_32 = struct {
const Self = @This();
pub fn hash(str: []const u8) u32 {
- return @inlineCall(Self.hashWithSeed, str, default_seed);
+ return @call(.{ .modifier = .always_inline }, Self.hashWithSeed, .{ str, default_seed });
}
pub fn hashWithSeed(str: []const u8, seed: u32) u32 {
@@ -44,7 +44,7 @@ pub const Murmur2_32 = struct {
}
pub fn hashUint32(v: u32) u32 {
- return @inlineCall(Self.hashUint32WithSeed, v, default_seed);
+ return @call(.{ .modifier = .always_inline }, Self.hashUint32WithSeed, .{ v, default_seed });
}
pub fn hashUint32WithSeed(v: u32, seed: u32) u32 {
@@ -64,7 +64,7 @@ pub const Murmur2_32 = struct {
}
pub fn hashUint64(v: u64) u32 {
- return @inlineCall(Self.hashUint64WithSeed, v, default_seed);
+ return @call(.{ .modifier = .always_inline }, Self.hashUint64WithSeed, .{ v, default_seed });
}
pub fn hashUint64WithSeed(v: u64, seed: u32) u32 {
@@ -93,7 +93,7 @@ pub const Murmur2_64 = struct {
const Self = @This();
pub fn hash(str: []const u8) u64 {
- return @inlineCall(Self.hashWithSeed, str, default_seed);
+ return @call(.{ .modifier = .always_inline }, Self.hashWithSeed, .{ str, default_seed });
}
pub fn hashWithSeed(str: []const u8, seed: u64) u64 {
@@ -127,7 +127,7 @@ pub const Murmur2_64 = struct {
}
pub fn hashUint32(v: u32) u64 {
- return @inlineCall(Self.hashUint32WithSeed, v, default_seed);
+ return @call(.{ .modifier = .always_inline }, Self.hashUint32WithSeed, .{ v, default_seed });
}
pub fn hashUint32WithSeed(v: u32, seed: u32) u64 {
@@ -144,7 +144,7 @@ pub const Murmur2_64 = struct {
}
pub fn hashUint64(v: u64) u64 {
- return @inlineCall(Self.hashUint64WithSeed, v, default_seed);
+ return @call(.{ .modifier = .always_inline }, Self.hashUint64WithSeed, .{ v, default_seed });
}
pub fn hashUint64WithSeed(v: u64, seed: u32) u64 {
@@ -172,7 +172,7 @@ pub const Murmur3_32 = struct {
}
pub fn hash(str: []const u8) u32 {
- return @inlineCall(Self.hashWithSeed, str, default_seed);
+ return @call(.{ .modifier = .always_inline }, Self.hashWithSeed, .{ str, default_seed });
}
pub fn hashWithSeed(str: []const u8, seed: u32) u32 {
@@ -220,7 +220,7 @@ pub const Murmur3_32 = struct {
}
pub fn hashUint32(v: u32) u32 {
- return @inlineCall(Self.hashUint32WithSeed, v, default_seed);
+ return @call(.{ .modifier = .always_inline }, Self.hashUint32WithSeed, .{ v, default_seed });
}
pub fn hashUint32WithSeed(v: u32, seed: u32) u32 {
@@ -246,7 +246,7 @@ pub const Murmur3_32 = struct {
}
pub fn hashUint64(v: u64) u32 {
- return @inlineCall(Self.hashUint64WithSeed, v, default_seed);
+ return @call(.{ .modifier = .always_inline }, Self.hashUint64WithSeed, .{ v, default_seed });
}
pub fn hashUint64WithSeed(v: u64, seed: u32) u32 {
diff --git a/lib/std/hash/siphash.zig b/lib/std/hash/siphash.zig
index 6b4cc2b16b52..ccef47c4b2f2 100644
--- a/lib/std/hash/siphash.zig
+++ b/lib/std/hash/siphash.zig
@@ -11,7 +11,7 @@ const testing = std.testing;
const math = std.math;
const mem = std.mem;
-const Endian = @import("builtin").Endian;
+const Endian = std.builtin.Endian;
pub fn SipHash64(comptime c_rounds: usize, comptime d_rounds: usize) type {
return SipHash(u64, c_rounds, d_rounds);
@@ -62,7 +62,7 @@ fn SipHashStateless(comptime T: type, comptime c_rounds: usize, comptime d_round
var off: usize = 0;
while (off < b.len) : (off += 8) {
- @inlineCall(self.round, b[off .. off + 8]);
+ @call(.{ .modifier = .always_inline }, self.round, .{b[off .. off + 8]});
}
self.msg_len +%= @truncate(u8, b.len);
@@ -84,9 +84,12 @@ fn SipHashStateless(comptime T: type, comptime c_rounds: usize, comptime d_round
self.v2 ^= 0xff;
}
+ // TODO this is a workaround, should be able to supply the value without a separate variable
+ const inl = std.builtin.CallOptions{ .modifier = .always_inline };
+
comptime var i: usize = 0;
inline while (i < d_rounds) : (i += 1) {
- @inlineCall(sipRound, self);
+ @call(inl, sipRound, .{self});
}
const b1 = self.v0 ^ self.v1 ^ self.v2 ^ self.v3;
@@ -98,7 +101,7 @@ fn SipHashStateless(comptime T: type, comptime c_rounds: usize, comptime d_round
comptime var j: usize = 0;
inline while (j < d_rounds) : (j += 1) {
- @inlineCall(sipRound, self);
+ @call(inl, sipRound, .{self});
}
const b2 = self.v0 ^ self.v1 ^ self.v2 ^ self.v3;
@@ -111,9 +114,11 @@ fn SipHashStateless(comptime T: type, comptime c_rounds: usize, comptime d_round
const m = mem.readIntSliceLittle(u64, b[0..]);
self.v3 ^= m;
+ // TODO this is a workaround, should be able to supply the value without a separate variable
+ const inl = std.builtin.CallOptions{ .modifier = .always_inline };
comptime var i: usize = 0;
inline while (i < c_rounds) : (i += 1) {
- @inlineCall(sipRound, self);
+ @call(inl, sipRound, .{self});
}
self.v0 ^= m;
@@ -140,8 +145,8 @@ fn SipHashStateless(comptime T: type, comptime c_rounds: usize, comptime d_round
const aligned_len = input.len - (input.len % 8);
var c = Self.init(key);
- @inlineCall(c.update, input[0..aligned_len]);
- return @inlineCall(c.final, input[aligned_len..]);
+ @call(.{ .modifier = .always_inline }, c.update, .{input[0..aligned_len]});
+ return @call(.{ .modifier = .always_inline }, c.final, .{input[aligned_len..]});
}
};
}
diff --git a/lib/std/hash/wyhash.zig b/lib/std/hash/wyhash.zig
index 7e35ccc6d2f5..8d11c700cf55 100644
--- a/lib/std/hash/wyhash.zig
+++ b/lib/std/hash/wyhash.zig
@@ -65,7 +65,7 @@ const WyhashStateless = struct {
var off: usize = 0;
while (off < b.len) : (off += 32) {
- @inlineCall(self.round, b[off .. off + 32]);
+ @call(.{ .modifier = .always_inline }, self.round, .{b[off .. off + 32]});
}
self.msg_len += b.len;
@@ -121,8 +121,8 @@ const WyhashStateless = struct {
const aligned_len = input.len - (input.len % 32);
var c = WyhashStateless.init(seed);
- @inlineCall(c.update, input[0..aligned_len]);
- return @inlineCall(c.final, input[aligned_len..]);
+ @call(.{ .modifier = .always_inline }, c.update, .{input[0..aligned_len]});
+ return @call(.{ .modifier = .always_inline }, c.final, .{input[aligned_len..]});
}
};
diff --git a/lib/std/math/big/int.zig b/lib/std/math/big/int.zig
index 0459b0b15828..5c84dc462b17 100644
--- a/lib/std/math/big/int.zig
+++ b/lib/std/math/big/int.zig
@@ -811,7 +811,7 @@ pub const Int = struct {
var j: usize = 0;
while (j < a_lo.len) : (j += 1) {
- a_lo[j] = @inlineCall(addMulLimbWithCarry, a_lo[j], y[j], xi, &carry);
+ a_lo[j] = @call(.{ .modifier = .always_inline }, addMulLimbWithCarry, .{ a_lo[j], y[j], xi, &carry });
}
j = 0;
@@ -1214,7 +1214,11 @@ pub const Int = struct {
const dst_i = src_i + limb_shift;
const src_digit = a[src_i];
- r[dst_i] = carry | @inlineCall(math.shr, Limb, src_digit, Limb.bit_count - @intCast(Limb, interior_limb_shift));
+ r[dst_i] = carry | @call(.{ .modifier = .always_inline }, math.shr, .{
+ Limb,
+ src_digit,
+ Limb.bit_count - @intCast(Limb, interior_limb_shift),
+ });
carry = (src_digit << interior_limb_shift);
}
@@ -1254,7 +1258,11 @@ pub const Int = struct {
const src_digit = a[src_i];
r[dst_i] = carry | (src_digit >> interior_limb_shift);
- carry = @inlineCall(math.shl, Limb, src_digit, Limb.bit_count - @intCast(Limb, interior_limb_shift));
+ carry = @call(.{ .modifier = .always_inline }, math.shl, .{
+ Limb,
+ src_digit,
+ Limb.bit_count - @intCast(Limb, interior_limb_shift),
+ });
}
}
diff --git a/lib/std/os/linux.zig b/lib/std/os/linux.zig
index e3d84e1e63f1..907fd24db104 100644
--- a/lib/std/os/linux.zig
+++ b/lib/std/os/linux.zig
@@ -94,7 +94,7 @@ pub fn fork() usize {
/// the compiler is not aware of how vfork affects control flow and you may
/// see different results in optimized builds.
pub inline fn vfork() usize {
- return @inlineCall(syscall0, SYS_vfork);
+ return @call(.{ .modifier = .always_inline }, syscall0, .{SYS_vfork});
}
pub fn futimens(fd: i32, times: *const [2]timespec) usize {
diff --git a/lib/std/special/compiler_rt/arm/aeabi_dcmp.zig b/lib/std/special/compiler_rt/arm/aeabi_dcmp.zig
index 33bfdabcfbac..7463c499316b 100644
--- a/lib/std/special/compiler_rt/arm/aeabi_dcmp.zig
+++ b/lib/std/special/compiler_rt/arm/aeabi_dcmp.zig
@@ -14,31 +14,31 @@ const ConditionalOperator = enum {
pub nakedcc fn __aeabi_dcmpeq() noreturn {
@setRuntimeSafety(false);
- @inlineCall(aeabi_dcmp, .Eq);
+ @call(.{ .modifier = .always_inline }, aeabi_dcmp, .{.Eq});
unreachable;
}
pub nakedcc fn __aeabi_dcmplt() noreturn {
@setRuntimeSafety(false);
- @inlineCall(aeabi_dcmp, .Lt);
+ @call(.{ .modifier = .always_inline }, aeabi_dcmp, .{.Lt});
unreachable;
}
pub nakedcc fn __aeabi_dcmple() noreturn {
@setRuntimeSafety(false);
- @inlineCall(aeabi_dcmp, .Le);
+ @call(.{ .modifier = .always_inline }, aeabi_dcmp, .{.Le});
unreachable;
}
pub nakedcc fn __aeabi_dcmpge() noreturn {
@setRuntimeSafety(false);
- @inlineCall(aeabi_dcmp, .Ge);
+ @call(.{ .modifier = .always_inline }, aeabi_dcmp, .{.Ge});
unreachable;
}
pub nakedcc fn __aeabi_dcmpgt() noreturn {
@setRuntimeSafety(false);
- @inlineCall(aeabi_dcmp, .Gt);
+ @call(.{ .modifier = .always_inline }, aeabi_dcmp, .{.Gt});
unreachable;
}
diff --git a/lib/std/special/compiler_rt/arm/aeabi_fcmp.zig b/lib/std/special/compiler_rt/arm/aeabi_fcmp.zig
index cc5efc64fc00..9a24641d9a6b 100644
--- a/lib/std/special/compiler_rt/arm/aeabi_fcmp.zig
+++ b/lib/std/special/compiler_rt/arm/aeabi_fcmp.zig
@@ -14,31 +14,31 @@ const ConditionalOperator = enum {
pub nakedcc fn __aeabi_fcmpeq() noreturn {
@setRuntimeSafety(false);
- @inlineCall(aeabi_fcmp, .Eq);
+ @call(.{ .modifier = .always_inline }, aeabi_fcmp, .{.Eq});
unreachable;
}
pub nakedcc fn __aeabi_fcmplt() noreturn {
@setRuntimeSafety(false);
- @inlineCall(aeabi_fcmp, .Lt);
+ @call(.{ .modifier = .always_inline }, aeabi_fcmp, .{.Lt});
unreachable;
}
pub nakedcc fn __aeabi_fcmple() noreturn {
@setRuntimeSafety(false);
- @inlineCall(aeabi_fcmp, .Le);
+ @call(.{ .modifier = .always_inline }, aeabi_fcmp, .{.Le});
unreachable;
}
pub nakedcc fn __aeabi_fcmpge() noreturn {
@setRuntimeSafety(false);
- @inlineCall(aeabi_fcmp, .Ge);
+ @call(.{ .modifier = .always_inline }, aeabi_fcmp, .{.Ge});
unreachable;
}
pub nakedcc fn __aeabi_fcmpgt() noreturn {
@setRuntimeSafety(false);
- @inlineCall(aeabi_fcmp, .Gt);
+ @call(.{ .modifier = .always_inline }, aeabi_fcmp, .{.Gt});
unreachable;
}
diff --git a/lib/std/special/compiler_rt/divti3.zig b/lib/std/special/compiler_rt/divti3.zig
index 477ce2cb985b..fcb23a50d97b 100644
--- a/lib/std/special/compiler_rt/divti3.zig
+++ b/lib/std/special/compiler_rt/divti3.zig
@@ -17,7 +17,10 @@ pub extern fn __divti3(a: i128, b: i128) i128 {
const v128 = @Vector(2, u64);
pub extern fn __divti3_windows_x86_64(a: v128, b: v128) v128 {
- return @bitCast(v128, @inlineCall(__divti3, @bitCast(i128, a), @bitCast(i128, b)));
+ return @bitCast(v128, @call(.{ .modifier = .always_inline }, __divti3, .{
+ @bitCast(i128, a),
+ @bitCast(i128, b),
+ }));
}
test "import divti3" {
diff --git a/lib/std/special/compiler_rt/extendXfYf2.zig b/lib/std/special/compiler_rt/extendXfYf2.zig
index 3bdc5164e220..427bd4ec24d0 100644
--- a/lib/std/special/compiler_rt/extendXfYf2.zig
+++ b/lib/std/special/compiler_rt/extendXfYf2.zig
@@ -3,19 +3,19 @@ const builtin = @import("builtin");
const is_test = builtin.is_test;
pub extern fn __extendsfdf2(a: f32) f64 {
- return @inlineCall(extendXfYf2, f64, f32, @bitCast(u32, a));
+ return @call(.{ .modifier = .always_inline }, extendXfYf2, .{ f64, f32, @bitCast(u32, a) });
}
pub extern fn __extenddftf2(a: f64) f128 {
- return @inlineCall(extendXfYf2, f128, f64, @bitCast(u64, a));
+ return @call(.{ .modifier = .always_inline }, extendXfYf2, .{ f128, f64, @bitCast(u64, a) });
}
pub extern fn __extendsftf2(a: f32) f128 {
- return @inlineCall(extendXfYf2, f128, f32, @bitCast(u32, a));
+ return @call(.{ .modifier = .always_inline }, extendXfYf2, .{ f128, f32, @bitCast(u32, a) });
}
pub extern fn __extendhfsf2(a: u16) f32 {
- return @inlineCall(extendXfYf2, f32, f16, a);
+ return @call(.{ .modifier = .always_inline }, extendXfYf2, .{ f32, f16, a });
}
const CHAR_BIT = 8;
diff --git a/lib/std/special/compiler_rt/floatsiXf.zig b/lib/std/special/compiler_rt/floatsiXf.zig
index 714681834d26..917dfb47fc59 100644
--- a/lib/std/special/compiler_rt/floatsiXf.zig
+++ b/lib/std/special/compiler_rt/floatsiXf.zig
@@ -55,17 +55,17 @@ fn floatsiXf(comptime T: type, a: i32) T {
pub extern fn __floatsisf(arg: i32) f32 {
@setRuntimeSafety(builtin.is_test);
- return @inlineCall(floatsiXf, f32, arg);
+ return @call(.{ .modifier = .always_inline }, floatsiXf, .{ f32, arg });
}
pub extern fn __floatsidf(arg: i32) f64 {
@setRuntimeSafety(builtin.is_test);
- return @inlineCall(floatsiXf, f64, arg);
+ return @call(.{ .modifier = .always_inline }, floatsiXf, .{ f64, arg });
}
pub extern fn __floatsitf(arg: i32) f128 {
@setRuntimeSafety(builtin.is_test);
- return @inlineCall(floatsiXf, f128, arg);
+ return @call(.{ .modifier = .always_inline }, floatsiXf, .{ f128, arg });
}
fn test_one_floatsitf(a: i32, expected: u128) void {
diff --git a/lib/std/special/compiler_rt/modti3.zig b/lib/std/special/compiler_rt/modti3.zig
index 16f2f38ba370..d983ecba5f8e 100644
--- a/lib/std/special/compiler_rt/modti3.zig
+++ b/lib/std/special/compiler_rt/modti3.zig
@@ -22,7 +22,10 @@ pub extern fn __modti3(a: i128, b: i128) i128 {
const v128 = @Vector(2, u64);
pub extern fn __modti3_windows_x86_64(a: v128, b: v128) v128 {
- return @bitCast(v128, @inlineCall(__modti3, @bitCast(i128, a), @bitCast(i128, b)));
+ return @bitCast(v128, @call(.{ .modifier = .always_inline }, __modti3, .{
+ @bitCast(i128, a),
+ @bitCast(i128, b),
+ }));
}
test "import modti3" {
diff --git a/lib/std/special/compiler_rt/multi3.zig b/lib/std/special/compiler_rt/multi3.zig
index f3b74b85d908..56ff56cbb24e 100644
--- a/lib/std/special/compiler_rt/multi3.zig
+++ b/lib/std/special/compiler_rt/multi3.zig
@@ -16,7 +16,10 @@ pub extern fn __multi3(a: i128, b: i128) i128 {
const v128 = @Vector(2, u64);
pub extern fn __multi3_windows_x86_64(a: v128, b: v128) v128 {
- return @bitCast(v128, @inlineCall(__multi3, @bitCast(i128, a), @bitCast(i128, b)));
+ return @bitCast(v128, @call(.{ .modifier = .always_inline }, __multi3, .{
+ @bitCast(i128, a),
+ @bitCast(i128, b),
+ }));
}
fn __mulddi3(a: u64, b: u64) i128 {
diff --git a/lib/std/special/compiler_rt/stack_probe.zig b/lib/std/special/compiler_rt/stack_probe.zig
index c3e534c8ec92..6406f3977a9a 100644
--- a/lib/std/special/compiler_rt/stack_probe.zig
+++ b/lib/std/special/compiler_rt/stack_probe.zig
@@ -182,25 +182,25 @@ fn win_probe_stack_adjust_sp() void {
pub nakedcc fn _chkstk() void {
@setRuntimeSafety(false);
- @inlineCall(win_probe_stack_adjust_sp);
+ @call(.{ .modifier = .always_inline }, win_probe_stack_adjust_sp, .{});
}
pub nakedcc fn __chkstk() void {
@setRuntimeSafety(false);
switch (builtin.arch) {
- .i386 => @inlineCall(win_probe_stack_adjust_sp),
- .x86_64 => @inlineCall(win_probe_stack_only),
+ .i386 => @call(.{ .modifier = .always_inline }, win_probe_stack_adjust_sp, .{}),
+ .x86_64 => @call(.{ .modifier = .always_inline }, win_probe_stack_only, .{}),
else => unreachable,
}
}
pub nakedcc fn ___chkstk() void {
@setRuntimeSafety(false);
- @inlineCall(win_probe_stack_adjust_sp);
+ @call(.{ .modifier = .always_inline }, win_probe_stack_adjust_sp, .{});
}
pub nakedcc fn __chkstk_ms() void {
@setRuntimeSafety(false);
- @inlineCall(win_probe_stack_only);
+ @call(.{ .modifier = .always_inline }, win_probe_stack_only, .{});
}
pub nakedcc fn ___chkstk_ms() void {
@setRuntimeSafety(false);
- @inlineCall(win_probe_stack_only);
+ @call(.{ .modifier = .always_inline }, win_probe_stack_only, .{});
}
diff --git a/lib/std/special/compiler_rt/umodti3.zig b/lib/std/special/compiler_rt/umodti3.zig
index 7add0b2ffef0..9d4a42147c39 100644
--- a/lib/std/special/compiler_rt/umodti3.zig
+++ b/lib/std/special/compiler_rt/umodti3.zig
@@ -11,5 +11,8 @@ pub extern fn __umodti3(a: u128, b: u128) u128 {
const v128 = @Vector(2, u64);
pub extern fn __umodti3_windows_x86_64(a: v128, b: v128) v128 {
- return @bitCast(v128, @inlineCall(__umodti3, @bitCast(u128, a), @bitCast(u128, b)));
+ return @bitCast(v128, @call(.{ .modifier = .always_inline }, __umodti3, .{
+ @bitCast(u128, a),
+ @bitCast(u128, b),
+ }));
}
diff --git a/lib/std/special/start.zig b/lib/std/special/start.zig
index a93b01c29006..d10431da2063 100644
--- a/lib/std/special/start.zig
+++ b/lib/std/special/start.zig
@@ -59,7 +59,7 @@ stdcallcc fn _DllMainCRTStartup(
extern fn wasm_freestanding_start() void {
// This is marked inline because for some reason LLVM in release mode fails to inline it,
// and we want fewer call frames in stack traces.
- _ = @inlineCall(callMain);
+ _ = @call(.{ .modifier = .always_inline }, callMain, .{});
}
extern fn EfiMain(handle: uefi.Handle, system_table: *uefi.tables.SystemTable) usize {
@@ -89,7 +89,7 @@ nakedcc fn _start() noreturn {
if (builtin.os == builtin.Os.wasi) {
// This is marked inline because for some reason LLVM in release mode fails to inline it,
// and we want fewer call frames in stack traces.
- std.os.wasi.proc_exit(@inlineCall(callMain));
+ std.os.wasi.proc_exit(@call(.{ .modifier = .always_inline }, callMain, .{}));
}
switch (builtin.arch) {
@@ -125,7 +125,7 @@ nakedcc fn _start() noreturn {
}
// If LLVM inlines stack variables into _start, they will overwrite
// the command line argument data.
- @noInlineCall(posixCallMainAndExit);
+ @call(.{ .modifier = .never_inline }, posixCallMainAndExit, .{});
}
stdcallcc fn WinMainCRTStartup() noreturn {
@@ -184,10 +184,10 @@ fn posixCallMainAndExit() noreturn {
// 0,
//) catch @panic("out of memory");
//std.os.mprotect(new_stack[0..std.mem.page_size], std.os.PROT_NONE) catch {};
- //std.os.exit(@newStackCall(new_stack, callMainWithArgs, argc, argv, envp));
+ //std.os.exit(@call(.{.stack = new_stack}, callMainWithArgs, .{argc, argv, envp}));
}
- std.os.exit(@inlineCall(callMainWithArgs, argc, argv, envp));
+ std.os.exit(@call(.{ .modifier = .always_inline }, callMainWithArgs, .{ argc, argv, envp }));
}
fn callMainWithArgs(argc: usize, argv: [*][*:0]u8, envp: [][*:0]u8) u8 {
@@ -203,7 +203,7 @@ extern fn main(c_argc: i32, c_argv: [*][*:0]u8, c_envp: [*:null]?[*:0]u8) i32 {
var env_count: usize = 0;
while (c_envp[env_count] != null) : (env_count += 1) {}
const envp = @ptrCast([*][*:0]u8, c_envp)[0..env_count];
- return @inlineCall(callMainWithArgs, @intCast(usize, c_argc), c_argv, envp);
+ return @call(.{ .modifier = .always_inline }, callMainWithArgs, .{ @intCast(usize, c_argc), c_argv, envp });
}
// General error message for a malformed return type
@@ -233,7 +233,7 @@ inline fn initEventLoopAndCallMain() u8 {
// This is marked inline because for some reason LLVM in release mode fails to inline it,
// and we want fewer call frames in stack traces.
- return @inlineCall(callMain);
+ return @call(.{ .modifier = .always_inline }, callMain, .{});
}
async fn callMainAsync(loop: *std.event.Loop) u8 {
diff --git a/src-self-hosted/ir.zig b/src-self-hosted/ir.zig
index 8d1c32cefd90..1d11d7969dd0 100644
--- a/src-self-hosted/ir.zig
+++ b/src-self-hosted/ir.zig
@@ -321,7 +321,7 @@ pub const Inst = struct {
}
const llvm_cc = llvm.CCallConv;
- const fn_inline = llvm.FnInline.Auto;
+ const call_attr = llvm.CallAttr.Auto;
return llvm.BuildCall(
ofile.builder,
@@ -329,7 +329,7 @@ pub const Inst = struct {
args.ptr,
@intCast(c_uint, args.len),
llvm_cc,
- fn_inline,
+ call_attr,
"",
) orelse error.OutOfMemory;
}
diff --git a/src-self-hosted/llvm.zig b/src-self-hosted/llvm.zig
index 476637b2c2c7..040bcdc51adf 100644
--- a/src-self-hosted/llvm.zig
+++ b/src-self-hosted/llvm.zig
@@ -260,10 +260,12 @@ pub const X86StdcallCallConv = c.LLVMX86StdcallCallConv;
pub const X86FastcallCallConv = c.LLVMX86FastcallCallConv;
pub const CallConv = c.LLVMCallConv;
-pub const FnInline = extern enum {
+pub const CallAttr = extern enum {
Auto,
- Always,
- Never,
+ NeverTail,
+ NeverInline,
+ AlwaysTail,
+ AlwaysInline,
};
fn removeNullability(comptime T: type) type {
@@ -286,6 +288,6 @@ extern fn ZigLLVMTargetMachineEmitToFile(
) bool;
pub const BuildCall = ZigLLVMBuildCall;
-extern fn ZigLLVMBuildCall(B: *Builder, Fn: *Value, Args: [*]*Value, NumArgs: c_uint, CC: c_uint, fn_inline: FnInline, Name: [*:0]const u8) ?*Value;
+extern fn ZigLLVMBuildCall(B: *Builder, Fn: *Value, Args: [*]*Value, NumArgs: c_uint, CC: c_uint, fn_inline: CallAttr, Name: [*:0]const u8) ?*Value;
pub const PrivateLinkage = c.LLVMLinkage.LLVMPrivateLinkage;
diff --git a/src/all_types.hpp b/src/all_types.hpp
index b6310b02fb38..a5b080498556 100644
--- a/src/all_types.hpp
+++ b/src/all_types.hpp
@@ -409,6 +409,9 @@ struct ZigValue {
LLVMValueRef llvm_global;
union {
+ // populated if special == ConstValSpecialLazy
+ LazyValue *x_lazy;
+
// populated if special == ConstValSpecialStatic
BigInt x_bigint;
BigFloat x_bigfloat;
@@ -429,7 +432,6 @@ struct ZigValue {
ConstPtrValue x_ptr;
ConstArgTuple x_arg_tuple;
Buf *x_enum_literal;
- LazyValue *x_lazy;
// populated if special == ConstValSpecialRuntime
RuntimeHintErrorUnion rh_error_union;
@@ -767,11 +769,19 @@ struct AstNodeUnwrapOptional {
AstNode *expr;
};
+// Must be synchronized with std.builtin.CallOptions.Modifier
enum CallModifier {
CallModifierNone,
- CallModifierAsync,
+ CallModifierNeverTail,
+ CallModifierNeverInline,
CallModifierNoAsync,
+ CallModifierAlwaysTail,
+ CallModifierAlwaysInline,
+ CallModifierCompileTime,
+
+ // These are additional tags in the compiler, but not exposed in the std lib.
CallModifierBuiltin,
+ CallModifierAsync,
};
struct AstNodeFnCallExpr {
@@ -1692,8 +1702,6 @@ enum BuiltinFnId {
BuiltinFnIdFieldParentPtr,
BuiltinFnIdByteOffsetOf,
BuiltinFnIdBitOffsetOf,
- BuiltinFnIdInlineCall,
- BuiltinFnIdNoInlineCall,
BuiltinFnIdNewStackCall,
BuiltinFnIdAsyncCall,
BuiltinFnIdTypeId,
@@ -1717,6 +1725,7 @@ enum BuiltinFnId {
BuiltinFnIdFrameHandle,
BuiltinFnIdFrameSize,
BuiltinFnIdAs,
+ BuiltinFnIdCall,
};
struct BuiltinFnEntry {
@@ -2479,6 +2488,8 @@ enum IrInstructionId {
IrInstructionIdVarPtr,
IrInstructionIdReturnPtr,
IrInstructionIdCallSrc,
+ IrInstructionIdCallSrcArgs,
+ IrInstructionIdCallExtra,
IrInstructionIdCallGen,
IrInstructionIdConst,
IrInstructionIdReturn,
@@ -2886,15 +2897,37 @@ struct IrInstructionCallSrc {
ZigFn *fn_entry;
size_t arg_count;
IrInstruction **args;
+ IrInstruction *ret_ptr;
ResultLoc *result_loc;
IrInstruction *new_stack;
- FnInline fn_inline;
CallModifier modifier;
-
bool is_async_call_builtin;
- bool is_comptime;
+};
+
+// This is a pass1 instruction, used by @call when the args node is
+// a tuple or struct literal.
+struct IrInstructionCallSrcArgs {
+ IrInstruction base;
+
+ IrInstruction *options;
+ IrInstruction *fn_ref;
+ IrInstruction **args_ptr;
+ size_t args_len;
+ ResultLoc *result_loc;
+};
+
+// This is a pass1 instruction, used by @call, when the args node
+// is not a literal.
+// `args` is expected to be either a struct or a tuple.
+struct IrInstructionCallExtra {
+ IrInstruction base;
+
+ IrInstruction *options;
+ IrInstruction *fn_ref;
+ IrInstruction *args;
+ ResultLoc *result_loc;
};
struct IrInstructionCallGen {
@@ -2908,7 +2941,6 @@ struct IrInstructionCallGen {
IrInstruction *frame_result_loc;
IrInstruction *new_stack;
- FnInline fn_inline;
CallModifier modifier;
bool is_async_call_builtin;
diff --git a/src/analyze.cpp b/src/analyze.cpp
index d616148596a5..c3e24ecb46f6 100644
--- a/src/analyze.cpp
+++ b/src/analyze.cpp
@@ -594,8 +594,11 @@ ZigType *get_pointer_to_type_extra2(CodeGen *g, ZigType *child_type, bool is_con
break;
}
-
- if (type_is_resolved(child_type, ResolveStatusZeroBitsKnown)) {
+ if (inferred_struct_field != nullptr) {
+ entry->abi_size = g->builtin_types.entry_usize->abi_size;
+ entry->size_in_bits = g->builtin_types.entry_usize->size_in_bits;
+ entry->abi_align = g->builtin_types.entry_usize->abi_align;
+ } else if (type_is_resolved(child_type, ResolveStatusZeroBitsKnown)) {
if (type_has_bits(child_type)) {
entry->abi_size = g->builtin_types.entry_usize->abi_size;
entry->size_in_bits = g->builtin_types.entry_usize->size_in_bits;
@@ -956,10 +959,7 @@ bool calling_convention_allows_zig_types(CallingConvention cc) {
ZigType *get_stack_trace_type(CodeGen *g) {
if (g->stack_trace_type == nullptr) {
- ZigValue *stack_trace_type_val = get_builtin_value(g, "StackTrace");
- assert(stack_trace_type_val->type->id == ZigTypeIdMetaType);
-
- g->stack_trace_type = stack_trace_type_val->data.x_type;
+ g->stack_trace_type = get_builtin_type(g, "StackTrace");
assertNoError(type_resolve(g, g->stack_trace_type, ResolveStatusZeroBitsKnown));
}
return g->stack_trace_type;
@@ -2717,10 +2717,10 @@ static Error resolve_struct_zero_bits(CodeGen *g, ZigType *struct_type) {
src_assert(struct_type->data.structure.fields == nullptr, decl_node);
struct_type->data.structure.fields = alloc_type_struct_fields(field_count);
} else if (decl_node->type == NodeTypeContainerInitExpr) {
- src_assert(struct_type->data.structure.is_inferred, decl_node);
- src_assert(struct_type->data.structure.fields != nullptr, decl_node);
-
field_count = struct_type->data.structure.src_field_count;
+
+ src_assert(struct_type->data.structure.is_inferred, decl_node);
+ src_assert(field_count == 0 || struct_type->data.structure.fields != nullptr, decl_node);
} else zig_unreachable();
struct_type->data.structure.fields_by_name.init(field_count);
@@ -7531,6 +7531,12 @@ ZigValue *get_builtin_value(CodeGen *codegen, const char *name) {
return var_value;
}
+ZigType *get_builtin_type(CodeGen *codegen, const char *name) {
+ ZigValue *type_val = get_builtin_value(codegen, name);
+ assert(type_val->type->id == ZigTypeIdMetaType);
+ return type_val->data.x_type;
+}
+
bool type_is_global_error_set(ZigType *err_set_type) {
assert(err_set_type->id == ZigTypeIdErrorSet);
assert(!err_set_type->data.error_set.incomplete);
diff --git a/src/analyze.hpp b/src/analyze.hpp
index 05eb97139ead..dd22c914db79 100644
--- a/src/analyze.hpp
+++ b/src/analyze.hpp
@@ -207,6 +207,7 @@ void add_var_export(CodeGen *g, ZigVar *fn_table_entry, const char *symbol_name,
ZigValue *get_builtin_value(CodeGen *codegen, const char *name);
+ZigType *get_builtin_type(CodeGen *codegen, const char *name);
ZigType *get_stack_trace_type(CodeGen *g);
bool resolve_inferred_error_set(CodeGen *g, ZigType *err_set_type, AstNode *source_node);
diff --git a/src/ast_render.cpp b/src/ast_render.cpp
index a8fb899ec3d8..2be932ac8e39 100644
--- a/src/ast_render.cpp
+++ b/src/ast_render.cpp
@@ -702,14 +702,29 @@ static void render_node_extra(AstRender *ar, AstNode *node, bool grouped) {
switch (node->data.fn_call_expr.modifier) {
case CallModifierNone:
break;
- case CallModifierBuiltin:
- fprintf(ar->f, "@");
+ case CallModifierNoAsync:
+ fprintf(ar->f, "noasync ");
break;
case CallModifierAsync:
fprintf(ar->f, "async ");
break;
- case CallModifierNoAsync:
- fprintf(ar->f, "noasync ");
+ case CallModifierNeverTail:
+ fprintf(ar->f, "notail ");
+ break;
+ case CallModifierNeverInline:
+ fprintf(ar->f, "noinline ");
+ break;
+ case CallModifierAlwaysTail:
+ fprintf(ar->f, "tail ");
+ break;
+ case CallModifierAlwaysInline:
+ fprintf(ar->f, "inline ");
+ break;
+ case CallModifierCompileTime:
+ fprintf(ar->f, "comptime ");
+ break;
+ case CallModifierBuiltin:
+ fprintf(ar->f, "@");
break;
}
AstNode *fn_ref_node = node->data.fn_call_expr.fn_ref_expr;
diff --git a/src/codegen.cpp b/src/codegen.cpp
index 324a32485eee..4428c7797c3d 100644
--- a/src/codegen.cpp
+++ b/src/codegen.cpp
@@ -981,7 +981,7 @@ static void gen_panic(CodeGen *g, LLVMValueRef msg_arg, LLVMValueRef stack_trace
msg_arg,
stack_trace_arg,
};
- ZigLLVMBuildCall(g->builder, fn_val, args, 2, llvm_cc, ZigLLVM_FnInlineAuto, "");
+ ZigLLVMBuildCall(g->builder, fn_val, args, 2, llvm_cc, ZigLLVM_CallAttrAuto, "");
if (!stack_trace_is_llvm_alloca) {
// The stack trace argument is not in the stack of the caller, so
// we'd like to set tail call here, but because slices (the type of msg_arg) are
@@ -1201,7 +1201,8 @@ static LLVMValueRef get_return_err_fn(CodeGen *g) {
LLVMPositionBuilderAtEnd(g->builder, dest_non_null_block);
LLVMValueRef args[] = { err_ret_trace_ptr, return_address };
- ZigLLVMBuildCall(g->builder, add_error_return_trace_addr_fn_val, args, 2, get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAlways, "");
+ ZigLLVMBuildCall(g->builder, add_error_return_trace_addr_fn_val, args, 2,
+ get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_CallAttrAlwaysInline, "");
LLVMBuildRetVoid(g->builder);
LLVMPositionBuilderAtEnd(g->builder, prev_block);
@@ -1370,13 +1371,13 @@ static void gen_safety_crash_for_err(CodeGen *g, LLVMValueRef err_val, Scope *sc
err_val,
};
call_instruction = ZigLLVMBuildCall(g->builder, safety_crash_err_fn, args, 2,
- get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, "");
+ get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_CallAttrAuto, "");
} else {
LLVMValueRef args[] = {
err_val,
};
call_instruction = ZigLLVMBuildCall(g->builder, safety_crash_err_fn, args, 1,
- get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, "");
+ get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_CallAttrAuto, "");
}
if (!is_llvm_alloca) {
LLVMSetTailCall(call_instruction, true);
@@ -2216,7 +2217,7 @@ static LLVMValueRef get_merge_err_ret_traces_fn_val(CodeGen *g) {
LLVMValueRef addr_ptr = LLVMBuildInBoundsGEP(g->builder, src_ptr_val, &ptr_index, 1, "");
LLVMValueRef this_addr_val = LLVMBuildLoad(g->builder, addr_ptr, "");
LLVMValueRef args[] = {dest_stack_trace_ptr, this_addr_val};
- ZigLLVMBuildCall(g->builder, add_error_return_trace_addr_fn_val, args, 2, get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAlways, "");
+ ZigLLVMBuildCall(g->builder, add_error_return_trace_addr_fn_val, args, 2, get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_CallAttrAlwaysInline, "");
LLVMValueRef prev_frames_left = LLVMBuildLoad(g->builder, frames_left_ptr, "");
LLVMValueRef new_frames_left = LLVMBuildNUWSub(g->builder, prev_frames_left, usize_one, "");
LLVMValueRef done_bit = LLVMBuildICmp(g->builder, LLVMIntEQ, new_frames_left, usize_zero, "");
@@ -2253,7 +2254,7 @@ static LLVMValueRef ir_render_save_err_ret_addr(CodeGen *g, IrExecutable *execut
LLVMValueRef my_err_trace_val = get_cur_err_ret_trace_val(g, save_err_ret_addr_instruction->base.scope,
&is_llvm_alloca);
ZigLLVMBuildCall(g->builder, return_err_fn, &my_err_trace_val, 1,
- get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, "");
+ get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_CallAttrAuto, "");
ZigType *ret_type = g->cur_fn->type_entry->data.fn.fn_type_id.return_type;
if (fn_is_async(g->cur_fn) && codegen_fn_has_err_ret_tracing_arg(g, ret_type)) {
@@ -2297,7 +2298,7 @@ static LLVMValueRef gen_resume(CodeGen *g, LLVMValueRef fn_val, LLVMValueRef tar
LLVMValueRef arg_val = LLVMConstSub(LLVMConstAllOnes(usize_type_ref),
LLVMConstInt(usize_type_ref, resume_id, false));
LLVMValueRef args[] = {target_frame_ptr, arg_val};
- return ZigLLVMBuildCall(g->builder, fn_val, args, 2, LLVMFastCallConv, ZigLLVM_FnInlineAuto, "");
+ return ZigLLVMBuildCall(g->builder, fn_val, args, 2, LLVMFastCallConv, ZigLLVM_CallAttrAuto, "");
}
static LLVMBasicBlockRef gen_suspend_begin(CodeGen *g, const char *name_hint) {
@@ -2424,7 +2425,7 @@ static void gen_async_return(CodeGen *g, IrInstructionReturn *instruction) {
LLVMValueRef my_err_trace_val = get_cur_err_ret_trace_val(g, instruction->base.scope, &is_llvm_alloca);
LLVMValueRef args[] = { dest_trace_ptr, my_err_trace_val };
ZigLLVMBuildCall(g->builder, get_merge_err_ret_traces_fn_val(g), args, 2,
- get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, "");
+ get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_CallAttrAuto, "");
}
}
@@ -3061,7 +3062,7 @@ static LLVMValueRef ir_render_cast(CodeGen *g, IrExecutable *executable,
ZigType *actual_type = cast_instruction->value->value->type;
ZigType *wanted_type = cast_instruction->base.value->type;
LLVMValueRef expr_val = ir_llvm_value(g, cast_instruction->value);
- assert(expr_val);
+ ir_assert(expr_val, &cast_instruction->base);
switch (cast_instruction->cast_op) {
case CastOpNoCast:
@@ -4142,16 +4143,28 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
fn_walk.data.call.gen_param_types = &gen_param_types;
walk_function_params(g, fn_type, &fn_walk);
- ZigLLVM_FnInline fn_inline;
- switch (instruction->fn_inline) {
- case FnInlineAuto:
- fn_inline = ZigLLVM_FnInlineAuto;
+ ZigLLVM_CallAttr call_attr;
+ switch (instruction->modifier) {
+ case CallModifierBuiltin:
+ case CallModifierCompileTime:
+ zig_unreachable();
+ case CallModifierNone:
+ case CallModifierNoAsync:
+ case CallModifierAsync:
+ call_attr = ZigLLVM_CallAttrAuto;
break;
- case FnInlineAlways:
- fn_inline = (instruction->fn_entry == nullptr) ? ZigLLVM_FnInlineAuto : ZigLLVM_FnInlineAlways;
+ case CallModifierNeverTail:
+ call_attr = ZigLLVM_CallAttrNeverTail;
break;
- case FnInlineNever:
- fn_inline = ZigLLVM_FnInlineNever;
+ case CallModifierNeverInline:
+ call_attr = ZigLLVM_CallAttrNeverInline;
+ break;
+ case CallModifierAlwaysTail:
+ call_attr = ZigLLVM_CallAttrAlwaysTail;
+ break;
+ case CallModifierAlwaysInline:
+ ir_assert(instruction->fn_entry != nullptr, &instruction->base);
+ call_attr = ZigLLVM_CallAttrAlwaysInline;
break;
}
@@ -4257,7 +4270,7 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
if (instruction->new_stack == nullptr || instruction->is_async_call_builtin) {
result = ZigLLVMBuildCall(g->builder, fn_val,
- gen_param_values.items, (unsigned)gen_param_values.length, llvm_cc, fn_inline, "");
+ gen_param_values.items, (unsigned)gen_param_values.length, llvm_cc, call_attr, "");
} else if (instruction->modifier == CallModifierAsync) {
zig_panic("TODO @asyncCall of non-async function");
} else {
@@ -4269,7 +4282,7 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
}
gen_set_stack_pointer(g, new_stack_addr);
result = ZigLLVMBuildCall(g->builder, fn_val,
- gen_param_values.items, (unsigned)gen_param_values.length, llvm_cc, fn_inline, "");
+ gen_param_values.items, (unsigned)gen_param_values.length, llvm_cc, call_attr, "");
if (src_return_type->id != ZigTypeIdUnreachable) {
LLVMValueRef stackrestore_fn_val = get_stackrestore_fn_val(g);
LLVMBuildCall(g->builder, stackrestore_fn_val, &old_stack_ref, 1, "");
@@ -4317,8 +4330,17 @@ static LLVMValueRef ir_render_struct_field_ptr(CodeGen *g, IrExecutable *executa
return struct_ptr;
}
- ZigType *struct_type = (struct_ptr_type->id == ZigTypeIdPointer) ?
- struct_ptr_type->data.pointer.child_type : struct_ptr_type;
+ ZigType *struct_type;
+ if (struct_ptr_type->id == ZigTypeIdPointer) {
+ if (struct_ptr_type->data.pointer.inferred_struct_field != nullptr) {
+ struct_type = struct_ptr_type->data.pointer.inferred_struct_field->inferred_struct_type;
+ } else {
+ struct_type = struct_ptr_type->data.pointer.child_type;
+ }
+ } else {
+ struct_type = struct_ptr_type;
+ }
+
if ((err = type_resolve(g, struct_type, ResolveStatusLLVMFull)))
codegen_report_errors_and_exit(g);
@@ -4947,7 +4969,7 @@ static LLVMValueRef ir_render_enum_tag_name(CodeGen *g, IrExecutable *executable
LLVMValueRef enum_tag_value = ir_llvm_value(g, instruction->target);
return ZigLLVMBuildCall(g->builder, enum_name_function, &enum_tag_value, 1,
- get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, "");
+ get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_CallAttrAuto, "");
}
static LLVMValueRef ir_render_field_parent_ptr(CodeGen *g, IrExecutable *executable,
@@ -5903,7 +5925,7 @@ static LLVMValueRef gen_await_early_return(CodeGen *g, IrInstruction *source_ins
LLVMValueRef dest_trace_ptr = get_cur_err_ret_trace_val(g, source_instr->scope, &is_llvm_alloca);
LLVMValueRef args[] = { dest_trace_ptr, src_trace_ptr };
ZigLLVMBuildCall(g->builder, get_merge_err_ret_traces_fn_val(g), args, 2,
- get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, "");
+ get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_CallAttrAuto, "");
}
if (non_async && type_has_bits(result_type)) {
LLVMValueRef result_ptr = (result_loc == nullptr) ? their_result_ptr : result_loc;
@@ -6137,7 +6159,9 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
case IrInstructionIdLoadPtr:
case IrInstructionIdHasDecl:
case IrInstructionIdUndeclaredIdent:
+ case IrInstructionIdCallExtra:
case IrInstructionIdCallSrc:
+ case IrInstructionIdCallSrcArgs:
case IrInstructionIdAllocaSrc:
case IrInstructionIdEndExpr:
case IrInstructionIdImplicitCast:
@@ -8118,8 +8142,6 @@ static void define_builtin_fns(CodeGen *g) {
create_builtin_fn(g, BuiltinFnIdNearbyInt, "nearbyInt", 2);
create_builtin_fn(g, BuiltinFnIdRound, "round", 2);
create_builtin_fn(g, BuiltinFnIdMulAdd, "mulAdd", 4);
- create_builtin_fn(g, BuiltinFnIdInlineCall, "inlineCall", SIZE_MAX);
- create_builtin_fn(g, BuiltinFnIdNoInlineCall, "noInlineCall", SIZE_MAX);
create_builtin_fn(g, BuiltinFnIdNewStackCall, "newStackCall", SIZE_MAX);
create_builtin_fn(g, BuiltinFnIdAsyncCall, "asyncCall", SIZE_MAX);
create_builtin_fn(g, BuiltinFnIdTypeId, "typeId", 1);
@@ -8146,6 +8168,7 @@ static void define_builtin_fns(CodeGen *g) {
create_builtin_fn(g, BuiltinFnIdFrameAddress, "frameAddress", 0);
create_builtin_fn(g, BuiltinFnIdFrameSize, "frameSize", 1);
create_builtin_fn(g, BuiltinFnIdAs, "as", 2);
+ create_builtin_fn(g, BuiltinFnIdCall, "call", 3);
}
static const char *bool_to_str(bool b) {
diff --git a/src/ir.cpp b/src/ir.cpp
index 50479c1ab509..e6978eae77d9 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -265,6 +265,7 @@ static IrInstruction *ir_analyze_struct_field_ptr(IrAnalyze *ira, IrInstruction
static IrInstruction *ir_analyze_inferred_field_ptr(IrAnalyze *ira, Buf *field_name,
IrInstruction *source_instr, IrInstruction *container_ptr, ZigType *container_type);
static ResultLoc *no_result_loc(void);
+static IrInstruction *ir_analyze_test_non_null(IrAnalyze *ira, IrInstruction *source_inst, IrInstruction *value);
static void destroy_instruction(IrInstruction *inst) {
#ifdef ZIG_ENABLE_MEM_PROFILE
@@ -289,6 +290,10 @@ static void destroy_instruction(IrInstruction *inst) {
return destroy(reinterpret_cast(inst), name);
case IrInstructionIdCallSrc:
return destroy(reinterpret_cast(inst), name);
+ case IrInstructionIdCallSrcArgs:
+ return destroy(reinterpret_cast(inst), name);
+ case IrInstructionIdCallExtra:
+ return destroy(reinterpret_cast(inst), name);
case IrInstructionIdCallGen:
return destroy(reinterpret_cast(inst), name);
case IrInstructionIdUnOp:
@@ -646,6 +651,15 @@ static ZigValue *const_ptr_pointee_unchecked(CodeGen *g, ZigValue *const_val) {
assert(const_val->special == ConstValSpecialStatic);
ZigValue *result;
+ InferredStructField *isf = const_val->type->data.pointer.inferred_struct_field;
+ if (isf != nullptr) {
+ TypeStructField *field = find_struct_type_field(isf->inferred_struct_type, isf->field_name);
+ assert(field != nullptr);
+ assert(const_val->data.x_ptr.special == ConstPtrSpecialRef);
+ ZigValue *struct_val = const_val->data.x_ptr.data.ref.pointee;
+ return struct_val->data.x_struct.fields[field->src_index];
+ }
+
switch (type_has_one_possible_value(g, const_val->type->data.pointer.child_type)) {
case OnePossibleValueInvalid:
zig_unreachable();
@@ -705,6 +719,13 @@ static bool is_opt_err_set(ZigType *ty) {
(ty->id == ZigTypeIdOptional && ty->data.maybe.child_type->id == ZigTypeIdErrorSet);
}
+static bool is_tuple(ZigType *type) {
+ return type->id == ZigTypeIdStruct && type->data.structure.decl_node != nullptr &&
+ type->data.structure.decl_node->type == NodeTypeContainerInitExpr &&
+ (type->data.structure.decl_node->data.container_init_expr.kind == ContainerInitKindArray ||
+ type->data.structure.decl_node->data.container_init_expr.entries.length == 0);
+}
+
static bool is_slice(ZigType *type) {
return type->id == ZigTypeIdStruct && type->data.structure.is_slice;
}
@@ -968,6 +989,14 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionCallSrc *) {
return IrInstructionIdCallSrc;
}
+static constexpr IrInstructionId ir_instruction_id(IrInstructionCallSrcArgs *) {
+ return IrInstructionIdCallSrcArgs;
+}
+
+static constexpr IrInstructionId ir_instruction_id(IrInstructionCallExtra *) {
+ return IrInstructionIdCallExtra;
+}
+
static constexpr IrInstructionId ir_instruction_id(IrInstructionCallGen *) {
return IrInstructionIdCallGen;
}
@@ -1891,30 +1920,61 @@ static IrInstruction *ir_build_union_field_ptr(IrBuilder *irb, Scope *scope, Ast
return &instruction->base;
}
+static IrInstruction *ir_build_call_extra(IrBuilder *irb, Scope *scope, AstNode *source_node,
+ IrInstruction *options, IrInstruction *fn_ref, IrInstruction *args, ResultLoc *result_loc)
+{
+ IrInstructionCallExtra *call_instruction = ir_build_instruction(irb, scope, source_node);
+ call_instruction->options = options;
+ call_instruction->fn_ref = fn_ref;
+ call_instruction->args = args;
+ call_instruction->result_loc = result_loc;
+
+ ir_ref_instruction(options, irb->current_basic_block);
+ ir_ref_instruction(fn_ref, irb->current_basic_block);
+ ir_ref_instruction(args, irb->current_basic_block);
+
+ return &call_instruction->base;
+}
+
+static IrInstruction *ir_build_call_src_args(IrBuilder *irb, Scope *scope, AstNode *source_node,
+ IrInstruction *options, IrInstruction *fn_ref, IrInstruction **args_ptr, size_t args_len,
+ ResultLoc *result_loc)
+{
+ IrInstructionCallSrcArgs *call_instruction = ir_build_instruction(irb, scope, source_node);
+ call_instruction->options = options;
+ call_instruction->fn_ref = fn_ref;
+ call_instruction->args_ptr = args_ptr;
+ call_instruction->args_len = args_len;
+ call_instruction->result_loc = result_loc;
+
+ ir_ref_instruction(options, irb->current_basic_block);
+ ir_ref_instruction(fn_ref, irb->current_basic_block);
+ for (size_t i = 0; i < args_len; i += 1)
+ ir_ref_instruction(args_ptr[i], irb->current_basic_block);
+
+ return &call_instruction->base;
+}
+
static IrInstruction *ir_build_call_src(IrBuilder *irb, Scope *scope, AstNode *source_node,
ZigFn *fn_entry, IrInstruction *fn_ref, size_t arg_count, IrInstruction **args,
- bool is_comptime, FnInline fn_inline, CallModifier modifier, bool is_async_call_builtin,
+ IrInstruction *ret_ptr, CallModifier modifier, bool is_async_call_builtin,
IrInstruction *new_stack, ResultLoc *result_loc)
{
IrInstructionCallSrc *call_instruction = ir_build_instruction(irb, scope, source_node);
call_instruction->fn_entry = fn_entry;
call_instruction->fn_ref = fn_ref;
- call_instruction->is_comptime = is_comptime;
- call_instruction->fn_inline = fn_inline;
call_instruction->args = args;
call_instruction->arg_count = arg_count;
call_instruction->modifier = modifier;
call_instruction->is_async_call_builtin = is_async_call_builtin;
call_instruction->new_stack = new_stack;
call_instruction->result_loc = result_loc;
+ call_instruction->ret_ptr = ret_ptr;
if (fn_ref != nullptr) ir_ref_instruction(fn_ref, irb->current_basic_block);
for (size_t i = 0; i < arg_count; i += 1)
ir_ref_instruction(args[i], irb->current_basic_block);
- if (modifier == CallModifierAsync && new_stack != nullptr) {
- // in this case the arg at the end is the return pointer
- ir_ref_instruction(args[arg_count], irb->current_basic_block);
- }
+ if (ret_ptr != nullptr) ir_ref_instruction(ret_ptr, irb->current_basic_block);
if (new_stack != nullptr) ir_ref_instruction(new_stack, irb->current_basic_block);
return &call_instruction->base;
@@ -1922,7 +1982,7 @@ static IrInstruction *ir_build_call_src(IrBuilder *irb, Scope *scope, AstNode *s
static IrInstructionCallGen *ir_build_call_gen(IrAnalyze *ira, IrInstruction *source_instruction,
ZigFn *fn_entry, IrInstruction *fn_ref, size_t arg_count, IrInstruction **args,
- FnInline fn_inline, CallModifier modifier, IrInstruction *new_stack, bool is_async_call_builtin,
+ CallModifier modifier, IrInstruction *new_stack, bool is_async_call_builtin,
IrInstruction *result_loc, ZigType *return_type)
{
IrInstructionCallGen *call_instruction = ir_build_instruction(&ira->new_irb,
@@ -1930,7 +1990,6 @@ static IrInstructionCallGen *ir_build_call_gen(IrAnalyze *ira, IrInstruction *so
call_instruction->base.value->type = return_type;
call_instruction->fn_entry = fn_entry;
call_instruction->fn_ref = fn_ref;
- call_instruction->fn_inline = fn_inline;
call_instruction->args = args;
call_instruction->arg_count = arg_count;
call_instruction->modifier = modifier;
@@ -5054,10 +5113,7 @@ static IrInstruction *ir_gen_async_call(IrBuilder *irb, Scope *scope, AstNode *a
return fn_ref;
size_t arg_count = call_node->data.fn_call_expr.params.length - arg_offset;
-
- // last "arg" is return pointer
- IrInstruction **args = allocate(arg_count + 1);
-
+ IrInstruction **args = allocate(arg_count);
for (size_t i = 0; i < arg_count; i += 1) {
AstNode *arg_node = call_node->data.fn_call_expr.params.at(i + arg_offset);
IrInstruction *arg = ir_gen_node(irb, arg_node, scope);
@@ -5066,15 +5122,50 @@ static IrInstruction *ir_gen_async_call(IrBuilder *irb, Scope *scope, AstNode *a
args[i] = arg;
}
- args[arg_count] = ret_ptr;
-
CallModifier modifier = (await_node == nullptr) ? CallModifierAsync : CallModifierNone;
bool is_async_call_builtin = true;
- IrInstruction *call = ir_build_call_src(irb, scope, call_node, nullptr, fn_ref, arg_count, args, false,
- FnInlineAuto, modifier, is_async_call_builtin, bytes, result_loc);
+ IrInstruction *call = ir_build_call_src(irb, scope, call_node, nullptr, fn_ref, arg_count, args,
+ ret_ptr, modifier, is_async_call_builtin, bytes, result_loc);
return ir_lval_wrap(irb, scope, call, lval, result_loc);
}
+static IrInstruction *ir_gen_fn_call_with_args(IrBuilder *irb, Scope *scope, AstNode *source_node,
+ AstNode *fn_ref_node, CallModifier modifier, IrInstruction *options,
+ AstNode **args_ptr, size_t args_len, LVal lval, ResultLoc *result_loc)
+{
+ IrInstruction *fn_ref = ir_gen_node(irb, fn_ref_node, scope);
+ if (fn_ref == irb->codegen->invalid_instruction)
+ return fn_ref;
+
+ IrInstruction *fn_type = ir_build_typeof(irb, scope, source_node, fn_ref);
+
+ IrInstruction **args = allocate(args_len);
+ for (size_t i = 0; i < args_len; i += 1) {
+ AstNode *arg_node = args_ptr[i];
+
+ IrInstruction *arg_index = ir_build_const_usize(irb, scope, arg_node, i);
+ IrInstruction *arg_type = ir_build_arg_type(irb, scope, source_node, fn_type, arg_index, true);
+ ResultLoc *no_result = no_result_loc();
+ ir_build_reset_result(irb, scope, source_node, no_result);
+ ResultLocCast *result_loc_cast = ir_build_cast_result_loc(irb, arg_type, no_result);
+
+ IrInstruction *arg = ir_gen_node_extra(irb, arg_node, scope, LValNone, &result_loc_cast->base);
+ if (arg == irb->codegen->invalid_instruction)
+ return arg;
+
+ args[i] = ir_build_implicit_cast(irb, scope, arg_node, arg, result_loc_cast);
+ }
+
+ IrInstruction *fn_call;
+ if (options != nullptr) {
+ fn_call = ir_build_call_src_args(irb, scope, source_node, options, fn_ref, args, args_len, result_loc);
+ } else {
+ fn_call = ir_build_call_src(irb, scope, source_node, nullptr, fn_ref, args_len, args, nullptr,
+ modifier, false, nullptr, result_loc);
+ }
+ return ir_lval_wrap(irb, scope, fn_call, lval, result_loc);
+}
+
static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNode *node, LVal lval,
ResultLoc *result_loc)
{
@@ -5993,34 +6084,6 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
IrInstruction *offset_of = ir_build_bit_offset_of(irb, scope, node, arg0_value, arg1_value);
return ir_lval_wrap(irb, scope, offset_of, lval, result_loc);
}
- case BuiltinFnIdInlineCall:
- case BuiltinFnIdNoInlineCall:
- {
- if (node->data.fn_call_expr.params.length == 0) {
- add_node_error(irb->codegen, node, buf_sprintf("expected at least 1 argument, found 0"));
- return irb->codegen->invalid_instruction;
- }
-
- AstNode *fn_ref_node = node->data.fn_call_expr.params.at(0);
- IrInstruction *fn_ref = ir_gen_node(irb, fn_ref_node, scope);
- if (fn_ref == irb->codegen->invalid_instruction)
- return fn_ref;
-
- size_t arg_count = node->data.fn_call_expr.params.length - 1;
-
- IrInstruction **args = allocate(arg_count);
- for (size_t i = 0; i < arg_count; i += 1) {
- AstNode *arg_node = node->data.fn_call_expr.params.at(i + 1);
- args[i] = ir_gen_node(irb, arg_node, scope);
- if (args[i] == irb->codegen->invalid_instruction)
- return args[i];
- }
- FnInline fn_inline = (builtin_fn->id == BuiltinFnIdInlineCall) ? FnInlineAlways : FnInlineNever;
-
- IrInstruction *call = ir_build_call_src(irb, scope, node, nullptr, fn_ref, arg_count, args, false,
- fn_inline, CallModifierNone, false, nullptr, result_loc);
- return ir_lval_wrap(irb, scope, call, lval, result_loc);
- }
case BuiltinFnIdNewStackCall:
{
if (node->data.fn_call_expr.params.length < 2) {
@@ -6050,10 +6113,52 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
return args[i];
}
- IrInstruction *call = ir_build_call_src(irb, scope, node, nullptr, fn_ref, arg_count, args, false,
- FnInlineAuto, CallModifierNone, false, new_stack, result_loc);
+ IrInstruction *call = ir_build_call_src(irb, scope, node, nullptr, fn_ref, arg_count, args,
+ nullptr, CallModifierNone, false, new_stack, result_loc);
+ return ir_lval_wrap(irb, scope, call, lval, result_loc);
+ }
+ case BuiltinFnIdCall: {
+ // Cast the options parameter to the options type
+ ZigType *options_type = get_builtin_type(irb->codegen, "CallOptions");
+ IrInstruction *options_type_inst = ir_build_const_type(irb, scope, node, options_type);
+ ResultLocCast *result_loc_cast = ir_build_cast_result_loc(irb, options_type_inst, no_result_loc());
+
+ AstNode *options_node = node->data.fn_call_expr.params.at(0);
+ IrInstruction *options_inner = ir_gen_node_extra(irb, options_node, scope,
+ LValNone, &result_loc_cast->base);
+ if (options_inner == irb->codegen->invalid_instruction)
+ return options_inner;
+ IrInstruction *options = ir_build_implicit_cast(irb, scope, options_node, options_inner, result_loc_cast);
+
+ AstNode *fn_ref_node = node->data.fn_call_expr.params.at(1);
+ AstNode *args_node = node->data.fn_call_expr.params.at(2);
+ if (args_node->type == NodeTypeContainerInitExpr) {
+ if (args_node->data.container_init_expr.kind == ContainerInitKindArray ||
+ args_node->data.container_init_expr.entries.length == 0)
+ {
+ return ir_gen_fn_call_with_args(irb, scope, node,
+ fn_ref_node, CallModifierNone, options,
+ args_node->data.container_init_expr.entries.items,
+ args_node->data.container_init_expr.entries.length,
+ lval, result_loc);
+ } else {
+ exec_add_error_node(irb->codegen, irb->exec, args_node,
+ buf_sprintf("TODO: @call with anon struct literal"));
+ return irb->codegen->invalid_instruction;
+ }
+ } else {
+ IrInstruction *fn_ref = ir_gen_node(irb, fn_ref_node, scope);
+ if (fn_ref == irb->codegen->invalid_instruction)
+ return fn_ref;
+
+ IrInstruction *args = ir_gen_node(irb, args_node, scope);
+ if (args == irb->codegen->invalid_instruction)
+ return args;
+
+ IrInstruction *call = ir_build_call_extra(irb, scope, node, options, fn_ref, args, result_loc);
return ir_lval_wrap(irb, scope, call, lval, result_loc);
}
+ }
case BuiltinFnIdAsyncCall:
return ir_gen_async_call(irb, scope, nullptr, node, lval, result_loc);
case BuiltinFnIdTypeId:
@@ -6371,33 +6476,8 @@ static IrInstruction *ir_gen_fn_call(IrBuilder *irb, Scope *scope, AstNode *node
return ir_gen_builtin_fn_call(irb, scope, node, lval, result_loc);
AstNode *fn_ref_node = node->data.fn_call_expr.fn_ref_expr;
- IrInstruction *fn_ref = ir_gen_node(irb, fn_ref_node, scope);
- if (fn_ref == irb->codegen->invalid_instruction)
- return fn_ref;
-
- IrInstruction *fn_type = ir_build_typeof(irb, scope, node, fn_ref);
-
- size_t arg_count = node->data.fn_call_expr.params.length;
- IrInstruction **args = allocate(arg_count);
- for (size_t i = 0; i < arg_count; i += 1) {
- AstNode *arg_node = node->data.fn_call_expr.params.at(i);
-
- IrInstruction *arg_index = ir_build_const_usize(irb, scope, arg_node, i);
- IrInstruction *arg_type = ir_build_arg_type(irb, scope, node, fn_type, arg_index, true);
- ResultLoc *no_result = no_result_loc();
- ir_build_reset_result(irb, scope, node, no_result);
- ResultLocCast *result_loc_cast = ir_build_cast_result_loc(irb, arg_type, no_result);
-
- IrInstruction *arg = ir_gen_node_extra(irb, arg_node, scope, LValNone, &result_loc_cast->base);
- if (arg == irb->codegen->invalid_instruction)
- return arg;
-
- args[i] = ir_build_implicit_cast(irb, scope, arg_node, arg, result_loc_cast);
- }
-
- IrInstruction *fn_call = ir_build_call_src(irb, scope, node, nullptr, fn_ref, arg_count, args, false,
- FnInlineAuto, node->data.fn_call_expr.modifier, false, nullptr, result_loc);
- return ir_lval_wrap(irb, scope, fn_call, lval, result_loc);
+ return ir_gen_fn_call_with_args(irb, scope, node, fn_ref_node, node->data.fn_call_expr.modifier,
+ nullptr, node->data.fn_call_expr.params.items, node->data.fn_call_expr.params.length, lval, result_loc);
}
static IrInstruction *ir_gen_if_bool_expr(IrBuilder *irb, Scope *scope, AstNode *node, LVal lval,
@@ -13278,6 +13358,15 @@ static IrInstruction *ir_analyze_struct_value_field_value(IrAnalyze *ira, IrInst
return ir_get_deref(ira, source_instr, field_ptr, nullptr);
}
+static IrInstruction *ir_analyze_optional_value_payload_value(IrAnalyze *ira, IrInstruction *source_instr,
+ IrInstruction *optional_operand, bool safety_check_on)
+{
+ IrInstruction *opt_ptr = ir_get_ref(ira, source_instr, optional_operand, true, false);
+ IrInstruction *payload_ptr = ir_analyze_unwrap_optional_payload(ira, source_instr, opt_ptr,
+ safety_check_on, false);
+ return ir_get_deref(ira, source_instr, payload_ptr, nullptr);
+}
+
static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_instr,
ZigType *wanted_type, IrInstruction *value)
{
@@ -13911,6 +14000,20 @@ static IrInstruction *ir_implicit_cast(IrAnalyze *ira, IrInstruction *value, Zig
return ir_implicit_cast2(ira, value, value, expected_type);
}
+static ZigType *get_ptr_elem_type(CodeGen *g, IrInstruction *ptr) {
+ ir_assert(ptr->value->type->id == ZigTypeIdPointer, ptr);
+ ZigType *elem_type = ptr->value->type->data.pointer.child_type;
+ if (elem_type != g->builtin_types.entry_var)
+ return elem_type;
+
+ if (ir_resolve_lazy(g, ptr->source_node, ptr->value))
+ return g->builtin_types.entry_invalid;
+
+ assert(value_is_comptime(ptr->value));
+ ZigValue *pointee = const_ptr_pointee_unchecked(g, ptr->value);
+ return pointee->type;
+}
+
static IrInstruction *ir_get_deref(IrAnalyze *ira, IrInstruction *source_instruction, IrInstruction *ptr,
ResultLoc *result_loc)
{
@@ -13927,6 +14030,8 @@ static IrInstruction *ir_get_deref(IrAnalyze *ira, IrInstruction *source_instruc
}
ZigType *child_type = ptr_type->data.pointer.child_type;
+ if (type_is_invalid(child_type))
+ return ira->codegen->invalid_instruction;
// if the child type has one possible value, the deref is comptime
switch (type_has_one_possible_value(ira->codegen, child_type)) {
case OnePossibleValueInvalid:
@@ -14102,9 +14207,7 @@ static bool ir_resolve_atomic_order(IrAnalyze *ira, IrInstruction *value, Atomic
if (type_is_invalid(value->value->type))
return false;
- ZigValue *atomic_order_val = get_builtin_value(ira->codegen, "AtomicOrder");
- assert(atomic_order_val->type->id == ZigTypeIdMetaType);
- ZigType *atomic_order_type = atomic_order_val->data.x_type;
+ ZigType *atomic_order_type = get_builtin_type(ira->codegen, "AtomicOrder");
IrInstruction *casted_value = ir_implicit_cast(ira, value, atomic_order_type);
if (type_is_invalid(casted_value->value->type))
@@ -14122,9 +14225,7 @@ static bool ir_resolve_atomic_rmw_op(IrAnalyze *ira, IrInstruction *value, Atomi
if (type_is_invalid(value->value->type))
return false;
- ZigValue *atomic_rmw_op_val = get_builtin_value(ira->codegen, "AtomicRmwOp");
- assert(atomic_rmw_op_val->type->id == ZigTypeIdMetaType);
- ZigType *atomic_rmw_op_type = atomic_rmw_op_val->data.x_type;
+ ZigType *atomic_rmw_op_type = get_builtin_type(ira->codegen, "AtomicRmwOp");
IrInstruction *casted_value = ir_implicit_cast(ira, value, atomic_rmw_op_type);
if (type_is_invalid(casted_value->value->type))
@@ -14142,9 +14243,7 @@ static bool ir_resolve_global_linkage(IrAnalyze *ira, IrInstruction *value, Glob
if (type_is_invalid(value->value->type))
return false;
- ZigValue *global_linkage_val = get_builtin_value(ira->codegen, "GlobalLinkage");
- assert(global_linkage_val->type->id == ZigTypeIdMetaType);
- ZigType *global_linkage_type = global_linkage_val->data.x_type;
+ ZigType *global_linkage_type = get_builtin_type(ira->codegen, "GlobalLinkage");
IrInstruction *casted_value = ir_implicit_cast(ira, value, global_linkage_type);
if (type_is_invalid(casted_value->value->type))
@@ -14162,9 +14261,7 @@ static bool ir_resolve_float_mode(IrAnalyze *ira, IrInstruction *value, FloatMod
if (type_is_invalid(value->value->type))
return false;
- ZigValue *float_mode_val = get_builtin_value(ira->codegen, "FloatMode");
- assert(float_mode_val->type->id == ZigTypeIdMetaType);
- ZigType *float_mode_type = float_mode_val->data.x_type;
+ ZigType *float_mode_type = get_builtin_type(ira->codegen, "FloatMode");
IrInstruction *casted_value = ir_implicit_cast(ira, value, float_mode_type);
if (type_is_invalid(casted_value->value->type))
@@ -16972,11 +17069,11 @@ static IrInstruction *ir_analyze_instruction_reset_result(IrAnalyze *ira, IrInst
return ir_const_void(ira, &instruction->base);
}
-static IrInstruction *get_async_call_result_loc(IrAnalyze *ira, IrInstructionCallSrc *call_instruction,
- ZigType *fn_ret_type)
+static IrInstruction *get_async_call_result_loc(IrAnalyze *ira, IrInstruction *source_instr,
+ ZigType *fn_ret_type, bool is_async_call_builtin, IrInstruction **args_ptr, size_t args_len,
+ IrInstruction *ret_ptr_uncasted)
{
- ir_assert(call_instruction->is_async_call_builtin, &call_instruction->base);
- IrInstruction *ret_ptr_uncasted = call_instruction->args[call_instruction->arg_count]->child;
+ ir_assert(is_async_call_builtin, source_instr);
if (type_is_invalid(ret_ptr_uncasted->value->type))
return ira->codegen->invalid_instruction;
if (ret_ptr_uncasted->value->type->id == ZigTypeIdVoid) {
@@ -16986,9 +17083,10 @@ static IrInstruction *get_async_call_result_loc(IrAnalyze *ira, IrInstructionCal
return ir_implicit_cast(ira, ret_ptr_uncasted, get_pointer_to_type(ira->codegen, fn_ret_type, false));
}
-static IrInstruction *ir_analyze_async_call(IrAnalyze *ira, IrInstructionCallSrc *call_instruction, ZigFn *fn_entry,
+static IrInstruction *ir_analyze_async_call(IrAnalyze *ira, IrInstruction *source_instr, ZigFn *fn_entry,
ZigType *fn_type, IrInstruction *fn_ref, IrInstruction **casted_args, size_t arg_count,
- IrInstruction *casted_new_stack)
+ IrInstruction *casted_new_stack, bool is_async_call_builtin, IrInstruction *ret_ptr_uncasted,
+ ResultLoc *call_result_loc)
{
if (fn_entry == nullptr) {
if (fn_type->data.fn.fn_type_id.cc != CallingConventionAsync) {
@@ -17003,19 +17101,20 @@ static IrInstruction *ir_analyze_async_call(IrAnalyze *ira, IrInstructionCallSrc
}
if (casted_new_stack != nullptr) {
ZigType *fn_ret_type = fn_type->data.fn.fn_type_id.return_type;
- IrInstruction *ret_ptr = get_async_call_result_loc(ira, call_instruction, fn_ret_type);
+ IrInstruction *ret_ptr = get_async_call_result_loc(ira, source_instr, fn_ret_type, is_async_call_builtin,
+ casted_args, arg_count, ret_ptr_uncasted);
if (ret_ptr != nullptr && type_is_invalid(ret_ptr->value->type))
return ira->codegen->invalid_instruction;
ZigType *anyframe_type = get_any_frame_type(ira->codegen, fn_ret_type);
- IrInstructionCallGen *call_gen = ir_build_call_gen(ira, &call_instruction->base, fn_entry, fn_ref,
- arg_count, casted_args, FnInlineAuto, CallModifierAsync, casted_new_stack,
- call_instruction->is_async_call_builtin, ret_ptr, anyframe_type);
+ IrInstructionCallGen *call_gen = ir_build_call_gen(ira, source_instr, fn_entry, fn_ref,
+ arg_count, casted_args, CallModifierAsync, casted_new_stack,
+ is_async_call_builtin, ret_ptr, anyframe_type);
return &call_gen->base;
} else {
ZigType *frame_type = get_fn_frame_type(ira->codegen, fn_entry);
- IrInstruction *result_loc = ir_resolve_result(ira, &call_instruction->base, call_instruction->result_loc,
+ IrInstruction *result_loc = ir_resolve_result(ira, source_instr, call_result_loc,
frame_type, nullptr, true, true, false);
if (type_is_invalid(result_loc->value->type) || instr_is_unreachable(result_loc)) {
return result_loc;
@@ -17023,9 +17122,9 @@ static IrInstruction *ir_analyze_async_call(IrAnalyze *ira, IrInstructionCallSrc
result_loc = ir_implicit_cast(ira, result_loc, get_pointer_to_type(ira->codegen, frame_type, false));
if (type_is_invalid(result_loc->value->type))
return ira->codegen->invalid_instruction;
- return &ir_build_call_gen(ira, &call_instruction->base, fn_entry, fn_ref, arg_count,
- casted_args, FnInlineAuto, CallModifierAsync, casted_new_stack,
- call_instruction->is_async_call_builtin, result_loc, frame_type)->base;
+ return &ir_build_call_gen(ira, source_instr, fn_entry, fn_ref, arg_count,
+ casted_args, CallModifierAsync, casted_new_stack,
+ is_async_call_builtin, result_loc, frame_type)->base;
}
}
static bool ir_analyze_fn_call_inline_arg(IrAnalyze *ira, AstNode *fn_proto_node,
@@ -17288,9 +17387,7 @@ static IrInstruction *ir_analyze_store_ptr(IrAnalyze *ira, IrInstruction *source
copy_const_val(casted_ptr->value, ptr->value);
casted_ptr->value->type = struct_ptr_type;
} else {
- casted_ptr = ir_build_cast(&ira->new_irb, source_instr->scope,
- source_instr->source_node, struct_ptr_type, ptr, CastOpNoop);
- casted_ptr->value->type = struct_ptr_type;
+ casted_ptr = ptr;
}
if (instr_is_comptime(casted_ptr)) {
ZigValue *ptr_val = ir_resolve_const(ira, casted_ptr, UndefBad);
@@ -17371,6 +17468,12 @@ static IrInstruction *ir_analyze_store_ptr(IrAnalyze *ira, IrInstruction *source
}
}
+ if (ptr->value->type->data.pointer.inferred_struct_field != nullptr &&
+ child_type == ira->codegen->builtin_types.entry_var)
+ {
+ child_type = ptr->value->type->data.pointer.inferred_struct_field->inferred_struct_type;
+ }
+
switch (type_requires_comptime(ira->codegen, child_type)) {
case ReqCompTimeInvalid:
return ira->codegen->invalid_instruction;
@@ -17417,25 +17520,21 @@ static IrInstruction *ir_analyze_store_ptr(IrAnalyze *ira, IrInstruction *source
return &store_ptr->base;
}
-static IrInstruction *analyze_casted_new_stack(IrAnalyze *ira, IrInstructionCallSrc *call_instruction,
- ZigFn *fn_entry)
+static IrInstruction *analyze_casted_new_stack(IrAnalyze *ira, IrInstruction *source_instr,
+ IrInstruction *new_stack, bool is_async_call_builtin, ZigFn *fn_entry)
{
- if (call_instruction->new_stack == nullptr)
+ if (new_stack == nullptr)
return nullptr;
- if (!call_instruction->is_async_call_builtin &&
+ if (!is_async_call_builtin &&
arch_stack_pointer_register_name(ira->codegen->zig_target->arch) == nullptr)
{
- ir_add_error(ira, &call_instruction->base,
- buf_sprintf("target arch '%s' does not support @newStackCall",
+ ir_add_error(ira, source_instr,
+ buf_sprintf("target arch '%s' does not support calling with a new stack",
target_arch_name(ira->codegen->zig_target->arch)));
}
- IrInstruction *new_stack = call_instruction->new_stack->child;
- if (type_is_invalid(new_stack->value->type))
- return ira->codegen->invalid_instruction;
-
- if (call_instruction->is_async_call_builtin &&
+ if (is_async_call_builtin &&
fn_entry != nullptr && new_stack->value->type->id == ZigTypeIdPointer &&
new_stack->value->type->data.pointer.child_type->id == ZigTypeIdFnFrame)
{
@@ -17451,9 +17550,11 @@ static IrInstruction *analyze_casted_new_stack(IrAnalyze *ira, IrInstructionCall
}
}
-static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *call_instruction,
+static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstruction *source_instr,
ZigFn *fn_entry, ZigType *fn_type, IrInstruction *fn_ref,
- IrInstruction *first_arg_ptr, bool comptime_fn_call, FnInline fn_inline)
+ IrInstruction *first_arg_ptr, CallModifier modifier,
+ IrInstruction *new_stack, bool is_async_call_builtin,
+ IrInstruction **args_ptr, size_t args_len, IrInstruction *ret_ptr, ResultLoc *call_result_loc)
{
Error err;
FnTypeId *fn_type_id = &fn_type->data.fn.fn_type_id;
@@ -17469,16 +17570,16 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c
}
size_t src_param_count = fn_type_id->param_count - var_args_1_or_0;
- size_t call_param_count = call_instruction->arg_count + first_arg_1_or_0;
- for (size_t i = 0; i < call_instruction->arg_count; i += 1) {
- ZigValue *arg_tuple_value = call_instruction->args[i]->child->value;
+ size_t call_param_count = args_len + first_arg_1_or_0;
+ for (size_t i = 0; i < args_len; i += 1) {
+ ZigValue *arg_tuple_value = args_ptr[i]->value;
if (arg_tuple_value->type->id == ZigTypeIdArgTuple) {
call_param_count -= 1;
call_param_count += arg_tuple_value->data.x_arg_tuple.end_index -
arg_tuple_value->data.x_arg_tuple.start_index;
}
}
- AstNode *source_node = call_instruction->base.source_node;
+ AstNode *source_node = source_instr->source_node;
AstNode *fn_proto_node = fn_entry ? fn_entry->proto_node : nullptr;;
@@ -17511,14 +17612,14 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c
return ira->codegen->invalid_instruction;
}
- if (comptime_fn_call) {
+ if (modifier == CallModifierCompileTime) {
// No special handling is needed for compile time evaluation of generic functions.
if (!fn_entry || fn_entry->body_node == nullptr) {
ir_add_error(ira, fn_ref, buf_sprintf("unable to evaluate constant expression"));
return ira->codegen->invalid_instruction;
}
- if (!ir_emit_backward_branch(ira, &call_instruction->base))
+ if (!ir_emit_backward_branch(ira, source_instr))
return ira->codegen->invalid_instruction;
// Fork a scope of the function with known values for the parameters.
@@ -17550,16 +17651,14 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c
}
if (fn_proto_node->data.fn_proto.is_var_args) {
- ir_add_error(ira, &call_instruction->base,
+ ir_add_error(ira, source_instr,
buf_sprintf("compiler bug: unable to call var args function at compile time. https://github.com/ziglang/zig/issues/313"));
return ira->codegen->invalid_instruction;
}
- for (size_t call_i = 0; call_i < call_instruction->arg_count; call_i += 1) {
- IrInstruction *old_arg = call_instruction->args[call_i]->child;
- if (type_is_invalid(old_arg->value->type))
- return ira->codegen->invalid_instruction;
+ for (size_t call_i = 0; call_i < args_len; call_i += 1) {
+ IrInstruction *old_arg = args_ptr[call_i];
if (!ir_analyze_fn_call_inline_arg(ira, fn_proto_node, old_arg, &exec_scope, &next_proto_i))
return ira->codegen->invalid_instruction;
@@ -17593,7 +17692,7 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c
AstNode *body_node = fn_entry->body_node;
result = ir_eval_const_value(ira->codegen, exec_scope, body_node, return_type,
ira->new_irb.exec->backward_branch_count, ira->new_irb.exec->backward_branch_quota, fn_entry,
- nullptr, call_instruction->base.source_node, nullptr, ira->new_irb.exec, return_type_node,
+ nullptr, source_instr->source_node, nullptr, ira->new_irb.exec, return_type_node,
UndefOk);
if (inferred_err_set_type != nullptr) {
@@ -17623,24 +17722,21 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c
}
}
- IrInstruction *new_instruction = ir_const_move(ira, &call_instruction->base, result);
+ IrInstruction *new_instruction = ir_const_move(ira, source_instr, result);
return ir_finish_anal(ira, new_instruction);
}
if (fn_type->data.fn.is_generic) {
if (!fn_entry) {
- ir_add_error(ira, call_instruction->fn_ref,
+ ir_add_error(ira, fn_ref,
buf_sprintf("calling a generic function requires compile-time known function value"));
return ira->codegen->invalid_instruction;
}
// Count the arguments of the function type id we are creating
size_t new_fn_arg_count = first_arg_1_or_0;
- for (size_t call_i = 0; call_i < call_instruction->arg_count; call_i += 1) {
- IrInstruction *arg = call_instruction->args[call_i]->child;
- if (type_is_invalid(arg->value->type))
- return ira->codegen->invalid_instruction;
-
+ for (size_t call_i = 0; call_i < args_len; call_i += 1) {
+ IrInstruction *arg = args_ptr[call_i];
if (arg->value->type->id == ZigTypeIdArgTuple) {
new_fn_arg_count += arg->value->data.x_arg_tuple.end_index - arg->value->data.x_arg_tuple.start_index;
} else {
@@ -17702,10 +17798,8 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c
ZigFn *parent_fn_entry = exec_fn_entry(ira->new_irb.exec);
assert(parent_fn_entry);
- for (size_t call_i = 0; call_i < call_instruction->arg_count; call_i += 1) {
- IrInstruction *arg = call_instruction->args[call_i]->child;
- if (type_is_invalid(arg->value->type))
- return ira->codegen->invalid_instruction;
+ for (size_t call_i = 0; call_i < args_len; call_i += 1) {
+ IrInstruction *arg = args_ptr[call_i];
if (arg->value->type->id == ZigTypeIdArgTuple) {
for (size_t arg_tuple_i = arg->value->data.x_arg_tuple.start_index;
@@ -17804,8 +17898,9 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c
switch (type_requires_comptime(ira->codegen, specified_return_type)) {
case ReqCompTimeYes:
// Throw out our work and call the function as if it were comptime.
- return ir_analyze_fn_call(ira, call_instruction, fn_entry, fn_type, fn_ref, first_arg_ptr,
- true, FnInlineAuto);
+ return ir_analyze_fn_call(ira, source_instr, fn_entry, fn_type, fn_ref, first_arg_ptr,
+ CallModifierCompileTime, new_stack, is_async_call_builtin, args_ptr, args_len,
+ ret_ptr, call_result_loc);
case ReqCompTimeInvalid:
return ira->codegen->invalid_instruction;
case ReqCompTimeNo:
@@ -17823,9 +17918,9 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c
if (type_is_invalid(impl_fn->type_entry))
return ira->codegen->invalid_instruction;
- impl_fn->ir_executable->source_node = call_instruction->base.source_node;
+ impl_fn->ir_executable->source_node = source_instr->source_node;
impl_fn->ir_executable->parent_exec = ira->new_irb.exec;
- impl_fn->analyzed_executable.source_node = call_instruction->base.source_node;
+ impl_fn->analyzed_executable.source_node = source_instr->source_node;
impl_fn->analyzed_executable.parent_exec = ira->new_irb.exec;
impl_fn->analyzed_executable.backward_branch_quota = ira->new_irb.exec->backward_branch_quota;
impl_fn->analyzed_executable.is_generic_instantiation = true;
@@ -17839,32 +17934,35 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c
parent_fn_entry->calls_or_awaits_errorable_fn = true;
}
- IrInstruction *casted_new_stack = analyze_casted_new_stack(ira, call_instruction, impl_fn);
+ IrInstruction *casted_new_stack = analyze_casted_new_stack(ira, source_instr, new_stack,
+ is_async_call_builtin, impl_fn);
if (casted_new_stack != nullptr && type_is_invalid(casted_new_stack->value->type))
return ira->codegen->invalid_instruction;
size_t impl_param_count = impl_fn_type_id->param_count;
- if (call_instruction->modifier == CallModifierAsync) {
- IrInstruction *result = ir_analyze_async_call(ira, call_instruction, impl_fn, impl_fn->type_entry,
- nullptr, casted_args, impl_param_count, casted_new_stack);
+ if (modifier == CallModifierAsync) {
+ IrInstruction *result = ir_analyze_async_call(ira, source_instr, impl_fn, impl_fn->type_entry,
+ nullptr, casted_args, impl_param_count, casted_new_stack, is_async_call_builtin, ret_ptr,
+ call_result_loc);
return ir_finish_anal(ira, result);
}
IrInstruction *result_loc;
if (handle_is_ptr(impl_fn_type_id->return_type)) {
- result_loc = ir_resolve_result(ira, &call_instruction->base, call_instruction->result_loc,
+ result_loc = ir_resolve_result(ira, source_instr, call_result_loc,
impl_fn_type_id->return_type, nullptr, true, true, false);
if (result_loc != nullptr) {
if (type_is_invalid(result_loc->value->type) || instr_is_unreachable(result_loc)) {
return result_loc;
}
if (!handle_is_ptr(result_loc->value->type->data.pointer.child_type)) {
- ir_reset_result(call_instruction->result_loc);
+ ir_reset_result(call_result_loc);
result_loc = nullptr;
}
}
- } else if (call_instruction->is_async_call_builtin) {
- result_loc = get_async_call_result_loc(ira, call_instruction, impl_fn_type_id->return_type);
+ } else if (is_async_call_builtin) {
+ result_loc = get_async_call_result_loc(ira, source_instr, impl_fn_type_id->return_type,
+ is_async_call_builtin, args_ptr, args_len, ret_ptr);
if (result_loc != nullptr && type_is_invalid(result_loc->value->type))
return ira->codegen->invalid_instruction;
} else {
@@ -17873,18 +17971,17 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c
if (impl_fn_type_id->cc == CallingConventionAsync &&
parent_fn_entry->inferred_async_node == nullptr &&
- call_instruction->modifier != CallModifierNoAsync)
+ modifier != CallModifierNoAsync)
{
parent_fn_entry->inferred_async_node = fn_ref->source_node;
parent_fn_entry->inferred_async_fn = impl_fn;
}
- IrInstructionCallGen *new_call_instruction = ir_build_call_gen(ira, &call_instruction->base,
- impl_fn, nullptr, impl_param_count, casted_args, fn_inline,
- call_instruction->modifier, casted_new_stack, call_instruction->is_async_call_builtin, result_loc,
- impl_fn_type_id->return_type);
+ IrInstructionCallGen *new_call_instruction = ir_build_call_gen(ira, source_instr,
+ impl_fn, nullptr, impl_param_count, casted_args, modifier, casted_new_stack,
+ is_async_call_builtin, result_loc, impl_fn_type_id->return_type);
- if (get_scope_typeof(call_instruction->base.scope) == nullptr) {
+ if (get_scope_typeof(source_instr->scope) == nullptr) {
parent_fn_entry->call_list.append(new_call_instruction);
}
@@ -17926,8 +18023,8 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c
casted_args[next_arg_index] = casted_arg;
next_arg_index += 1;
}
- for (size_t call_i = 0; call_i < call_instruction->arg_count; call_i += 1) {
- IrInstruction *old_arg = call_instruction->args[call_i]->child;
+ for (size_t call_i = 0; call_i < args_len; call_i += 1) {
+ IrInstruction *old_arg = args_ptr[call_i];
if (type_is_invalid(old_arg->value->type))
return ira->codegen->invalid_instruction;
@@ -17988,25 +18085,26 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c
if (type_is_invalid(return_type))
return ira->codegen->invalid_instruction;
- if (fn_entry != nullptr && fn_entry->fn_inline == FnInlineAlways && fn_inline == FnInlineNever) {
- ir_add_error(ira, &call_instruction->base,
+ if (fn_entry != nullptr && fn_entry->fn_inline == FnInlineAlways && modifier == CallModifierNeverInline) {
+ ir_add_error(ira, source_instr,
buf_sprintf("no-inline call of inline function"));
return ira->codegen->invalid_instruction;
}
- IrInstruction *casted_new_stack = analyze_casted_new_stack(ira, call_instruction, fn_entry);
+ IrInstruction *casted_new_stack = analyze_casted_new_stack(ira, source_instr, new_stack,
+ is_async_call_builtin, fn_entry);
if (casted_new_stack != nullptr && type_is_invalid(casted_new_stack->value->type))
return ira->codegen->invalid_instruction;
- if (call_instruction->modifier == CallModifierAsync) {
- IrInstruction *result = ir_analyze_async_call(ira, call_instruction, fn_entry, fn_type, fn_ref,
- casted_args, call_param_count, casted_new_stack);
+ if (modifier == CallModifierAsync) {
+ IrInstruction *result = ir_analyze_async_call(ira, source_instr, fn_entry, fn_type, fn_ref,
+ casted_args, call_param_count, casted_new_stack, is_async_call_builtin, ret_ptr, call_result_loc);
return ir_finish_anal(ira, result);
}
if (fn_type_id->cc == CallingConventionAsync &&
parent_fn_entry->inferred_async_node == nullptr &&
- call_instruction->modifier != CallModifierNoAsync)
+ modifier != CallModifierNoAsync)
{
parent_fn_entry->inferred_async_node = fn_ref->source_node;
parent_fn_entry->inferred_async_fn = fn_entry;
@@ -18014,41 +18112,202 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c
IrInstruction *result_loc;
if (handle_is_ptr(return_type)) {
- result_loc = ir_resolve_result(ira, &call_instruction->base, call_instruction->result_loc,
+ result_loc = ir_resolve_result(ira, source_instr, call_result_loc,
return_type, nullptr, true, true, false);
if (result_loc != nullptr) {
if (type_is_invalid(result_loc->value->type) || instr_is_unreachable(result_loc)) {
return result_loc;
}
if (!handle_is_ptr(result_loc->value->type->data.pointer.child_type)) {
- ir_reset_result(call_instruction->result_loc);
+ ir_reset_result(call_result_loc);
result_loc = nullptr;
}
}
- } else if (call_instruction->is_async_call_builtin) {
- result_loc = get_async_call_result_loc(ira, call_instruction, return_type);
+ } else if (is_async_call_builtin) {
+ result_loc = get_async_call_result_loc(ira, source_instr, return_type, is_async_call_builtin,
+ args_ptr, args_len, ret_ptr);
if (result_loc != nullptr && type_is_invalid(result_loc->value->type))
return ira->codegen->invalid_instruction;
} else {
result_loc = nullptr;
}
- IrInstructionCallGen *new_call_instruction = ir_build_call_gen(ira, &call_instruction->base, fn_entry, fn_ref,
- call_param_count, casted_args, fn_inline, call_instruction->modifier, casted_new_stack,
- call_instruction->is_async_call_builtin, result_loc, return_type);
- if (get_scope_typeof(call_instruction->base.scope) == nullptr) {
+ IrInstructionCallGen *new_call_instruction = ir_build_call_gen(ira, source_instr, fn_entry, fn_ref,
+ call_param_count, casted_args, modifier, casted_new_stack,
+ is_async_call_builtin, result_loc, return_type);
+ if (get_scope_typeof(source_instr->scope) == nullptr) {
parent_fn_entry->call_list.append(new_call_instruction);
}
return ir_finish_anal(ira, &new_call_instruction->base);
}
+static IrInstruction *ir_analyze_fn_call_src(IrAnalyze *ira, IrInstructionCallSrc *call_instruction,
+ ZigFn *fn_entry, ZigType *fn_type, IrInstruction *fn_ref,
+ IrInstruction *first_arg_ptr, CallModifier modifier)
+{
+ IrInstruction *new_stack = nullptr;
+ if (call_instruction->new_stack) {
+ new_stack = call_instruction->new_stack->child;
+ if (type_is_invalid(new_stack->value->type))
+ return ira->codegen->invalid_instruction;
+ }
+ IrInstruction **args_ptr = allocate(call_instruction->arg_count, "IrInstruction *");
+ for (size_t i = 0; i < call_instruction->arg_count; i += 1) {
+ args_ptr[i] = call_instruction->args[i]->child;
+ if (type_is_invalid(args_ptr[i]->value->type))
+ return ira->codegen->invalid_instruction;
+ }
+ IrInstruction *ret_ptr = nullptr;
+ if (call_instruction->ret_ptr != nullptr) {
+ ret_ptr = call_instruction->ret_ptr->child;
+ if (type_is_invalid(ret_ptr->value->type))
+ return ira->codegen->invalid_instruction;
+ }
+ IrInstruction *result = ir_analyze_fn_call(ira, &call_instruction->base, fn_entry, fn_type, fn_ref,
+ first_arg_ptr, modifier, new_stack, call_instruction->is_async_call_builtin,
+ args_ptr, call_instruction->arg_count, ret_ptr, call_instruction->result_loc);
+ deallocate(args_ptr, call_instruction->arg_count, "IrInstruction *");
+ return result;
+}
+
+static IrInstruction *ir_analyze_call_extra(IrAnalyze *ira, IrInstruction *source_instr,
+ IrInstruction *pass1_options, IrInstruction *pass1_fn_ref, IrInstruction **args_ptr, size_t args_len,
+ ResultLoc *result_loc)
+{
+ IrInstruction *options = pass1_options->child;
+ if (type_is_invalid(options->value->type))
+ return ira->codegen->invalid_instruction;
+
+ IrInstruction *fn_ref = pass1_fn_ref->child;
+ if (type_is_invalid(fn_ref->value->type))
+ return ira->codegen->invalid_instruction;
+ IrInstruction *first_arg_ptr = nullptr;
+ ZigFn *fn = nullptr;
+ if (fn_ref->value->type->id == ZigTypeIdBoundFn) {
+ assert(fn_ref->value->special == ConstValSpecialStatic);
+ fn = fn_ref->value->data.x_bound_fn.fn;
+ first_arg_ptr = fn_ref->value->data.x_bound_fn.first_arg;
+ if (type_is_invalid(first_arg_ptr->value->type))
+ return ira->codegen->invalid_instruction;
+ } else {
+ fn = ir_resolve_fn(ira, fn_ref);
+ }
+ ZigType *fn_type = (fn != nullptr) ? fn->type_entry : fn_ref->value->type;
+
+ TypeStructField *modifier_field = find_struct_type_field(options->value->type, buf_create_from_str("modifier"));
+ ir_assert(modifier_field != nullptr, source_instr);
+ IrInstruction *modifier_inst = ir_analyze_struct_value_field_value(ira, source_instr, options, modifier_field);
+ ZigValue *modifier_val = ir_resolve_const(ira, modifier_inst, UndefBad);
+ if (modifier_val == nullptr)
+ return ira->codegen->invalid_instruction;
+ CallModifier modifier = (CallModifier)bigint_as_u32(&modifier_val->data.x_enum_tag);
+ if (modifier == CallModifierAsync) {
+ ir_add_error(ira, source_instr, buf_sprintf("TODO: @call with async modifier"));
+ return ira->codegen->invalid_instruction;
+ }
+ if (ir_should_inline(ira->new_irb.exec, source_instr->scope)) {
+ switch (modifier) {
+ case CallModifierBuiltin:
+ zig_unreachable();
+ case CallModifierAsync:
+ ir_add_error(ira, source_instr, buf_sprintf("TODO: comptime @call with async modifier"));
+ return ira->codegen->invalid_instruction;
+ case CallModifierCompileTime:
+ case CallModifierNone:
+ case CallModifierAlwaysInline:
+ case CallModifierAlwaysTail:
+ case CallModifierNoAsync:
+ modifier = CallModifierCompileTime;
+ break;
+ case CallModifierNeverInline:
+ ir_add_error(ira, source_instr,
+ buf_sprintf("unable to perform 'never_inline' call at compile-time"));
+ return ira->codegen->invalid_instruction;
+ case CallModifierNeverTail:
+ ir_add_error(ira, source_instr,
+ buf_sprintf("unable to perform 'never_tail' call at compile-time"));
+ return ira->codegen->invalid_instruction;
+ }
+ }
+
+ TypeStructField *stack_field = find_struct_type_field(options->value->type, buf_create_from_str("stack"));
+ ir_assert(stack_field != nullptr, source_instr);
+ IrInstruction *opt_stack = ir_analyze_struct_value_field_value(ira, source_instr, options, stack_field);
+ if (type_is_invalid(opt_stack->value->type))
+ return ira->codegen->invalid_instruction;
+ IrInstruction *stack_is_non_null_inst = ir_analyze_test_non_null(ira, source_instr, opt_stack);
+ bool stack_is_non_null;
+ if (!ir_resolve_bool(ira, stack_is_non_null_inst, &stack_is_non_null))
+ return ira->codegen->invalid_instruction;
+ IrInstruction *stack;
+ if (stack_is_non_null) {
+ stack = ir_analyze_optional_value_payload_value(ira, source_instr, opt_stack, false);
+ if (type_is_invalid(stack->value->type))
+ return ira->codegen->invalid_instruction;
+ } else {
+ stack = nullptr;
+ }
+
+ return ir_analyze_fn_call(ira, source_instr, fn, fn_type, fn_ref, first_arg_ptr,
+ modifier, stack, false, args_ptr, args_len, nullptr, result_loc);
+}
+
+static IrInstruction *ir_analyze_instruction_call_extra(IrAnalyze *ira, IrInstructionCallExtra *instruction) {
+ IrInstruction *args = instruction->args->child;
+ ZigType *args_type = args->value->type;
+ if (type_is_invalid(args_type))
+ return ira->codegen->invalid_instruction;
+
+ if (args_type->id != ZigTypeIdStruct) {
+ ir_add_error(ira, args,
+ buf_sprintf("expected tuple or struct, found '%s'", buf_ptr(&args_type->name)));
+ return ira->codegen->invalid_instruction;
+ }
+
+ IrInstruction **args_ptr = nullptr;
+ size_t args_len = 0;
+
+ if (is_tuple(args_type)) {
+ args_len = args_type->data.structure.src_field_count;
+ args_ptr = allocate(args_len, "IrInstruction *");
+ for (size_t i = 0; i < args_len; i += 1) {
+ TypeStructField *arg_field = args_type->data.structure.fields[i];
+ args_ptr[i] = ir_analyze_struct_value_field_value(ira, &instruction->base, args, arg_field);
+ if (type_is_invalid(args_ptr[i]->value->type))
+ return ira->codegen->invalid_instruction;
+ }
+ } else {
+ ir_add_error(ira, args, buf_sprintf("TODO: struct args"));
+ return ira->codegen->invalid_instruction;
+ }
+ IrInstruction *result = ir_analyze_call_extra(ira, &instruction->base, instruction->options,
+ instruction->fn_ref, args_ptr, args_len, instruction->result_loc);
+ deallocate(args_ptr, args_len, "IrInstruction *");
+ return result;
+}
+
+static IrInstruction *ir_analyze_instruction_call_args(IrAnalyze *ira, IrInstructionCallSrcArgs *instruction) {
+ IrInstruction **args_ptr = allocate(instruction->args_len, "IrInstruction *");
+ for (size_t i = 0; i < instruction->args_len; i += 1) {
+ args_ptr[i] = instruction->args_ptr[i]->child;
+ if (type_is_invalid(args_ptr[i]->value->type))
+ return ira->codegen->invalid_instruction;
+ }
+
+ IrInstruction *result = ir_analyze_call_extra(ira, &instruction->base, instruction->options,
+ instruction->fn_ref, args_ptr, instruction->args_len, instruction->result_loc);
+ deallocate(args_ptr, instruction->args_len, "IrInstruction *");
+ return result;
+}
+
static IrInstruction *ir_analyze_instruction_call(IrAnalyze *ira, IrInstructionCallSrc *call_instruction) {
IrInstruction *fn_ref = call_instruction->fn_ref->child;
if (type_is_invalid(fn_ref->value->type))
return ira->codegen->invalid_instruction;
- bool is_comptime = call_instruction->is_comptime ||
+ bool is_comptime = (call_instruction->modifier == CallModifierCompileTime) ||
ir_should_inline(ira->new_irb.exec, call_instruction->base.scope);
+ CallModifier modifier = is_comptime ? CallModifierCompileTime : call_instruction->modifier;
if (is_comptime || instr_is_comptime(fn_ref)) {
if (fn_ref->value->type->id == ZigTypeIdMetaType) {
@@ -18063,14 +18322,16 @@ static IrInstruction *ir_analyze_instruction_call(IrAnalyze *ira, IrInstructionC
} else if (fn_ref->value->type->id == ZigTypeIdFn) {
ZigFn *fn_table_entry = ir_resolve_fn(ira, fn_ref);
ZigType *fn_type = fn_table_entry ? fn_table_entry->type_entry : fn_ref->value->type;
- return ir_analyze_fn_call(ira, call_instruction, fn_table_entry, fn_type,
- fn_ref, nullptr, is_comptime, call_instruction->fn_inline);
+ CallModifier modifier = is_comptime ? CallModifierCompileTime : call_instruction->modifier;
+ return ir_analyze_fn_call_src(ira, call_instruction, fn_table_entry, fn_type,
+ fn_ref, nullptr, modifier);
} else if (fn_ref->value->type->id == ZigTypeIdBoundFn) {
assert(fn_ref->value->special == ConstValSpecialStatic);
ZigFn *fn_table_entry = fn_ref->value->data.x_bound_fn.fn;
IrInstruction *first_arg_ptr = fn_ref->value->data.x_bound_fn.first_arg;
- return ir_analyze_fn_call(ira, call_instruction, fn_table_entry, fn_table_entry->type_entry,
- fn_ref, first_arg_ptr, is_comptime, call_instruction->fn_inline);
+ CallModifier modifier = is_comptime ? CallModifierCompileTime : call_instruction->modifier;
+ return ir_analyze_fn_call_src(ira, call_instruction, fn_table_entry, fn_table_entry->type_entry,
+ fn_ref, first_arg_ptr, modifier);
} else {
ir_add_error_node(ira, fn_ref->source_node,
buf_sprintf("type '%s' not a function", buf_ptr(&fn_ref->value->type->name)));
@@ -18079,8 +18340,8 @@ static IrInstruction *ir_analyze_instruction_call(IrAnalyze *ira, IrInstructionC
}
if (fn_ref->value->type->id == ZigTypeIdFn) {
- return ir_analyze_fn_call(ira, call_instruction, nullptr, fn_ref->value->type,
- fn_ref, nullptr, false, call_instruction->fn_inline);
+ return ir_analyze_fn_call_src(ira, call_instruction, nullptr, fn_ref->value->type,
+ fn_ref, nullptr, modifier);
} else {
ir_add_error_node(ira, fn_ref->source_node,
buf_sprintf("type '%s' not a function", buf_ptr(&fn_ref->value->type->name)));
@@ -19356,8 +19617,18 @@ static IrInstruction *ir_analyze_inferred_field_ptr(IrAnalyze *ira, Buf *field_n
PtrLenSingle, 0, 0, 0, false, VECTOR_INDEX_NONE, inferred_struct_field, nullptr);
if (instr_is_comptime(container_ptr)) {
- IrInstruction *result = ir_const(ira, source_instr, field_ptr_type);
- copy_const_val(result->value, container_ptr->value);
+ ZigValue *ptr_val = ir_resolve_const(ira, container_ptr, UndefBad);
+ if (ptr_val == nullptr)
+ return ira->codegen->invalid_instruction;
+
+ IrInstruction *result;
+ if (ptr_val->data.x_ptr.mut == ConstPtrMutInfer) {
+ result = ir_build_cast(&ira->new_irb, source_instr->scope,
+ source_instr->source_node, container_ptr_type, container_ptr, CastOpNoop);
+ } else {
+ result = ir_const(ira, source_instr, field_ptr_type);
+ }
+ copy_const_val(result->value, ptr_val);
result->value->type = field_ptr_type;
return result;
}
@@ -20374,20 +20645,6 @@ static IrInstruction *ir_analyze_instruction_test_non_null(IrAnalyze *ira, IrIns
return ir_analyze_test_non_null(ira, &instruction->base, value);
}
-static ZigType *get_ptr_elem_type(CodeGen *g, IrInstruction *ptr) {
- ir_assert(ptr->value->type->id == ZigTypeIdPointer, ptr);
- ZigType *elem_type = ptr->value->type->data.pointer.child_type;
- if (elem_type != g->builtin_types.entry_var)
- return elem_type;
-
- if (ir_resolve_lazy(g, ptr->source_node, ptr->value))
- return g->builtin_types.entry_invalid;
-
- assert(value_is_comptime(ptr->value));
- ZigValue *pointee = const_ptr_pointee_unchecked(g, ptr->value);
- return pointee->type;
-}
-
static IrInstruction *ir_analyze_unwrap_optional_payload(IrAnalyze *ira, IrInstruction *source_instr,
IrInstruction *base_ptr, bool safety_check_on, bool initializing)
{
@@ -21794,9 +22051,7 @@ static void ensure_field_index(ZigType *type, const char *field_name, size_t ind
static ZigType *ir_type_info_get_type(IrAnalyze *ira, const char *type_name, ZigType *root) {
Error err;
- ZigValue *type_info_var = get_builtin_value(ira->codegen, "TypeInfo");
- assert(type_info_var->type->id == ZigTypeIdMetaType);
- ZigType *type_info_type = type_info_var->data.x_type;
+ ZigType *type_info_type = get_builtin_type(ira->codegen, "TypeInfo");
assert(type_info_type->id == ZigTypeIdUnion);
if ((err = type_resolve(ira->codegen, type_info_type, ResolveStatusSizeKnown))) {
zig_unreachable();
@@ -23026,9 +23281,7 @@ static IrInstruction *ir_analyze_instruction_type_id(IrAnalyze *ira,
if (type_is_invalid(type_entry))
return ira->codegen->invalid_instruction;
- ZigValue *var_value = get_builtin_value(ira->codegen, "TypeId");
- assert(var_value->type->id == ZigTypeIdMetaType);
- ZigType *result_type = var_value->data.x_type;
+ ZigType *result_type = get_builtin_type(ira->codegen, "TypeId");
IrInstruction *result = ir_const(ira, &instruction->base, result_type);
bigint_init_unsigned(&result->value->data.x_enum_tag, type_id_index(type_entry));
@@ -27779,6 +28032,10 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction
return ir_analyze_instruction_field_ptr(ira, (IrInstructionFieldPtr *)instruction);
case IrInstructionIdCallSrc:
return ir_analyze_instruction_call(ira, (IrInstructionCallSrc *)instruction);
+ case IrInstructionIdCallSrcArgs:
+ return ir_analyze_instruction_call_args(ira, (IrInstructionCallSrcArgs *)instruction);
+ case IrInstructionIdCallExtra:
+ return ir_analyze_instruction_call_extra(ira, (IrInstructionCallExtra *)instruction);
case IrInstructionIdBr:
return ir_analyze_instruction_br(ira, (IrInstructionBr *)instruction);
case IrInstructionIdCondBr:
@@ -28176,7 +28433,9 @@ bool ir_has_side_effects(IrInstruction *instruction) {
case IrInstructionIdDeclVarGen:
case IrInstructionIdStorePtr:
case IrInstructionIdVectorStoreElem:
+ case IrInstructionIdCallExtra:
case IrInstructionIdCallSrc:
+ case IrInstructionIdCallSrcArgs:
case IrInstructionIdCallGen:
case IrInstructionIdReturn:
case IrInstructionIdUnreachable:
diff --git a/src/ir_print.cpp b/src/ir_print.cpp
index bacdd82251a3..d1ea15f0169b 100644
--- a/src/ir_print.cpp
+++ b/src/ir_print.cpp
@@ -92,8 +92,12 @@ const char* ir_instruction_type_str(IrInstructionId id) {
return "VarPtr";
case IrInstructionIdReturnPtr:
return "ReturnPtr";
+ case IrInstructionIdCallExtra:
+ return "CallExtra";
case IrInstructionIdCallSrc:
return "CallSrc";
+ case IrInstructionIdCallSrcArgs:
+ return "CallSrcArgs";
case IrInstructionIdCallGen:
return "CallGen";
case IrInstructionIdConst:
@@ -636,15 +640,57 @@ static void ir_print_result_loc(IrPrint *irp, ResultLoc *result_loc) {
zig_unreachable();
}
+static void ir_print_call_extra(IrPrint *irp, IrInstructionCallExtra *instruction) {
+ fprintf(irp->f, "opts=");
+ ir_print_other_instruction(irp, instruction->options);
+ fprintf(irp->f, ", fn=");
+ ir_print_other_instruction(irp, instruction->fn_ref);
+ fprintf(irp->f, ", args=");
+ ir_print_other_instruction(irp, instruction->args);
+ fprintf(irp->f, ", result=");
+ ir_print_result_loc(irp, instruction->result_loc);
+}
+
+static void ir_print_call_src_args(IrPrint *irp, IrInstructionCallSrcArgs *instruction) {
+ fprintf(irp->f, "opts=");
+ ir_print_other_instruction(irp, instruction->options);
+ fprintf(irp->f, ", fn=");
+ ir_print_other_instruction(irp, instruction->fn_ref);
+ fprintf(irp->f, ", args=(");
+ for (size_t i = 0; i < instruction->args_len; i += 1) {
+ IrInstruction *arg = instruction->args_ptr[i];
+ if (i != 0)
+ fprintf(irp->f, ", ");
+ ir_print_other_instruction(irp, arg);
+ }
+ fprintf(irp->f, "), result=");
+ ir_print_result_loc(irp, instruction->result_loc);
+}
+
static void ir_print_call_src(IrPrint *irp, IrInstructionCallSrc *call_instruction) {
switch (call_instruction->modifier) {
case CallModifierNone:
break;
+ case CallModifierNoAsync:
+ fprintf(irp->f, "noasync ");
+ break;
case CallModifierAsync:
fprintf(irp->f, "async ");
break;
- case CallModifierNoAsync:
- fprintf(irp->f, "noasync ");
+ case CallModifierNeverTail:
+ fprintf(irp->f, "notail ");
+ break;
+ case CallModifierNeverInline:
+ fprintf(irp->f, "noinline ");
+ break;
+ case CallModifierAlwaysTail:
+ fprintf(irp->f, "tail ");
+ break;
+ case CallModifierAlwaysInline:
+ fprintf(irp->f, "inline ");
+ break;
+ case CallModifierCompileTime:
+ fprintf(irp->f, "comptime ");
break;
case CallModifierBuiltin:
zig_unreachable();
@@ -670,11 +716,26 @@ static void ir_print_call_gen(IrPrint *irp, IrInstructionCallGen *call_instructi
switch (call_instruction->modifier) {
case CallModifierNone:
break;
+ case CallModifierNoAsync:
+ fprintf(irp->f, "noasync ");
+ break;
case CallModifierAsync:
fprintf(irp->f, "async ");
break;
- case CallModifierNoAsync:
- fprintf(irp->f, "noasync ");
+ case CallModifierNeverTail:
+ fprintf(irp->f, "notail ");
+ break;
+ case CallModifierNeverInline:
+ fprintf(irp->f, "noinline ");
+ break;
+ case CallModifierAlwaysTail:
+ fprintf(irp->f, "tail ");
+ break;
+ case CallModifierAlwaysInline:
+ fprintf(irp->f, "inline ");
+ break;
+ case CallModifierCompileTime:
+ fprintf(irp->f, "comptime ");
break;
case CallModifierBuiltin:
zig_unreachable();
@@ -2082,9 +2143,15 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction, bool
case IrInstructionIdCast:
ir_print_cast(irp, (IrInstructionCast *)instruction);
break;
+ case IrInstructionIdCallExtra:
+ ir_print_call_extra(irp, (IrInstructionCallExtra *)instruction);
+ break;
case IrInstructionIdCallSrc:
ir_print_call_src(irp, (IrInstructionCallSrc *)instruction);
break;
+ case IrInstructionIdCallSrcArgs:
+ ir_print_call_src_args(irp, (IrInstructionCallSrcArgs *)instruction);
+ break;
case IrInstructionIdCallGen:
ir_print_call_gen(irp, (IrInstructionCallGen *)instruction);
break;
diff --git a/src/zig_llvm.cpp b/src/zig_llvm.cpp
index ac17e6edfe1d..71d30e566c71 100644
--- a/src/zig_llvm.cpp
+++ b/src/zig_llvm.cpp
@@ -269,19 +269,25 @@ ZIG_EXTERN_C LLVMTypeRef ZigLLVMTokenTypeInContext(LLVMContextRef context_ref) {
}
LLVMValueRef ZigLLVMBuildCall(LLVMBuilderRef B, LLVMValueRef Fn, LLVMValueRef *Args,
- unsigned NumArgs, unsigned CC, ZigLLVM_FnInline fn_inline, const char *Name)
+ unsigned NumArgs, unsigned CC, ZigLLVM_CallAttr attr, const char *Name)
{
CallInst *call_inst = CallInst::Create(unwrap(Fn), makeArrayRef(unwrap(Args), NumArgs), Name);
call_inst->setCallingConv(CC);
- switch (fn_inline) {
- case ZigLLVM_FnInlineAuto:
+ switch (attr) {
+ case ZigLLVM_CallAttrAuto:
break;
- case ZigLLVM_FnInlineAlways:
- call_inst->addAttribute(AttributeList::FunctionIndex, Attribute::AlwaysInline);
+ case ZigLLVM_CallAttrNeverTail:
+ call_inst->setTailCallKind(CallInst::TCK_NoTail);
break;
- case ZigLLVM_FnInlineNever:
+ case ZigLLVM_CallAttrNeverInline:
call_inst->addAttribute(AttributeList::FunctionIndex, Attribute::NoInline);
break;
+ case ZigLLVM_CallAttrAlwaysTail:
+ call_inst->setTailCallKind(CallInst::TCK_MustTail);
+ break;
+ case ZigLLVM_CallAttrAlwaysInline:
+ call_inst->addAttribute(AttributeList::FunctionIndex, Attribute::AlwaysInline);
+ break;
}
return wrap(unwrap(B)->Insert(call_inst));
}
diff --git a/src/zig_llvm.h b/src/zig_llvm.h
index ebc3c1a00dd2..3376b0ec6180 100644
--- a/src/zig_llvm.h
+++ b/src/zig_llvm.h
@@ -64,13 +64,15 @@ ZIG_EXTERN_C LLVMTargetMachineRef ZigLLVMCreateTargetMachine(LLVMTargetRef T, co
ZIG_EXTERN_C LLVMTypeRef ZigLLVMTokenTypeInContext(LLVMContextRef context_ref);
-enum ZigLLVM_FnInline {
- ZigLLVM_FnInlineAuto,
- ZigLLVM_FnInlineAlways,
- ZigLLVM_FnInlineNever,
+enum ZigLLVM_CallAttr {
+ ZigLLVM_CallAttrAuto,
+ ZigLLVM_CallAttrNeverTail,
+ ZigLLVM_CallAttrNeverInline,
+ ZigLLVM_CallAttrAlwaysTail,
+ ZigLLVM_CallAttrAlwaysInline,
};
ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildCall(LLVMBuilderRef B, LLVMValueRef Fn, LLVMValueRef *Args,
- unsigned NumArgs, unsigned CC, enum ZigLLVM_FnInline fn_inline, const char *Name);
+ unsigned NumArgs, unsigned CC, enum ZigLLVM_CallAttr attr, const char *Name);
ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildMemCpy(LLVMBuilderRef B, LLVMValueRef Dst, unsigned DstAlign,
LLVMValueRef Src, unsigned SrcAlign, LLVMValueRef Size, bool isVolatile);
diff --git a/test/compile_errors.zig b/test/compile_errors.zig
index d3d439aeafae..13e666f49ea7 100644
--- a/test/compile_errors.zig
+++ b/test/compile_errors.zig
@@ -2,6 +2,36 @@ const tests = @import("tests.zig");
const builtin = @import("builtin");
pub fn addCases(cases: *tests.CompileErrorContext) void {
+ cases.add(
+ "bad usage of @call",
+ \\export fn entry1() void {
+ \\ @call(.{}, foo, {});
+ \\}
+ \\export fn entry2() void {
+ \\ comptime @call(.{ .modifier = .never_inline }, foo, .{});
+ \\}
+ \\export fn entry3() void {
+ \\ comptime @call(.{ .modifier = .never_tail }, foo, .{});
+ \\}
+ \\export fn entry4() void {
+ \\ @call(.{ .modifier = .never_inline }, bar, .{});
+ \\}
+ \\export fn entry5(c: bool) void {
+ \\ var baz = if (c) baz1 else baz2;
+ \\ @call(.{ .modifier = .compile_time }, baz, .{});
+ \\}
+ \\fn foo() void {}
+ \\inline fn bar() void {}
+ \\fn baz1() void {}
+ \\fn baz2() void {}
+ ,
+ "tmp.zig:2:21: error: expected tuple or struct, found 'void'",
+ "tmp.zig:5:14: error: unable to perform 'never_inline' call at compile-time",
+ "tmp.zig:8:14: error: unable to perform 'never_tail' call at compile-time",
+ "tmp.zig:11:5: error: no-inline call of inline function",
+ "tmp.zig:15:43: error: unable to evaluate constant expression",
+ );
+
cases.add(
\\export async fn foo() void {}
, "tmp.zig:1:1: error: exported function cannot be async");
@@ -14,13 +44,13 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
);
cases.addCase(x: {
- var tc = cases.create("@newStackCall on unsupported target",
+ var tc = cases.create("call with new stack on unsupported target",
+ \\var buf: [10]u8 align(16) = undefined;
\\export fn entry() void {
- \\ var buf: [10]u8 align(16) = undefined;
- \\ @newStackCall(&buf, foo);
+ \\ @call(.{.stack = &buf}, foo, .{});
\\}
\\fn foo() void {}
- , "tmp.zig:3:5: error: target arch 'wasm32' does not support @newStackCall");
+ , "tmp.zig:3:5: error: target arch 'wasm32' does not support calling with a new stack");
tc.target = tests.Target{
.Cross = tests.CrossTarget{
.arch = .wasm32,
@@ -1927,17 +1957,6 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
"tmp.zig:2:12: error: use of undeclared identifier 'SomeNonexistentType'",
);
- cases.add(
- "@noInlineCall on an inline function",
- \\inline fn foo() void {}
- \\
- \\export fn entry() void {
- \\ @noInlineCall(foo);
- \\}
- ,
- "tmp.zig:4:5: error: no-inline call of inline function",
- );
-
cases.add(
"comptime continue inside runtime catch",
\\export fn entry(c: bool) void {
diff --git a/test/stage1/behavior.zig b/test/stage1/behavior.zig
index f99de3b14938..0ce08a1e6a9a 100644
--- a/test/stage1/behavior.zig
+++ b/test/stage1/behavior.zig
@@ -52,6 +52,7 @@ comptime {
_ = @import("behavior/bugs/920.zig");
_ = @import("behavior/byteswap.zig");
_ = @import("behavior/byval_arg_var.zig");
+ _ = @import("behavior/call.zig");
_ = @import("behavior/cast.zig");
_ = @import("behavior/const_slice_child.zig");
_ = @import("behavior/defer.zig");
diff --git a/test/stage1/behavior/call.zig b/test/stage1/behavior/call.zig
new file mode 100644
index 000000000000..d68fb64d021e
--- /dev/null
+++ b/test/stage1/behavior/call.zig
@@ -0,0 +1,48 @@
+const std = @import("std");
+const expect = std.testing.expect;
+
+test "basic invocations" {
+ const foo = struct {
+ fn foo() i32 {
+ return 1234;
+ }
+ }.foo;
+ expect(@call(.{}, foo, .{}) == 1234);
+ comptime {
+ // modifiers that allow comptime calls
+ expect(@call(.{}, foo, .{}) == 1234);
+ expect(@call(.{ .modifier = .no_async }, foo, .{}) == 1234);
+ expect(@call(.{ .modifier = .always_tail }, foo, .{}) == 1234);
+ expect(@call(.{ .modifier = .always_inline }, foo, .{}) == 1234);
+ }
+ {
+ // comptime call without comptime keyword
+ const result = @call(.{ .modifier = .compile_time }, foo, .{}) == 1234;
+ comptime expect(result);
+ }
+}
+
+test "tuple parameters" {
+ const add = struct {
+ fn add(a: i32, b: i32) i32 {
+ return a + b;
+ }
+ }.add;
+ var a: i32 = 12;
+ var b: i32 = 34;
+ expect(@call(.{}, add, .{ a, 34 }) == 46);
+ expect(@call(.{}, add, .{ 12, b }) == 46);
+ expect(@call(.{}, add, .{ a, b }) == 46);
+ expect(@call(.{}, add, .{ 12, 34 }) == 46);
+ comptime expect(@call(.{}, add, .{ 12, 34 }) == 46);
+ {
+ const separate_args0 = .{ a, b };
+ //TODO const separate_args1 = .{ a, 34 };
+ const separate_args2 = .{ 12, 34 };
+ //TODO const separate_args3 = .{ 12, b };
+ expect(@call(.{ .modifier = .always_inline }, add, separate_args0) == 46);
+ // TODO expect(@call(.{ .modifier = .always_inline }, add, separate_args1) == 46);
+ expect(@call(.{ .modifier = .always_inline }, add, separate_args2) == 46);
+ // TODO expect(@call(.{ .modifier = .always_inline }, add, separate_args3) == 46);
+ }
+}
diff --git a/test/stage1/behavior/fn.zig b/test/stage1/behavior/fn.zig
index 99d145f1b1eb..0e9e379f01e0 100644
--- a/test/stage1/behavior/fn.zig
+++ b/test/stage1/behavior/fn.zig
@@ -96,14 +96,6 @@ fn fn4() u32 {
return 8;
}
-test "inline function call" {
- expect(@inlineCall(add, 3, 9) == 12);
-}
-
-fn add(a: i32, b: i32) i32 {
- return a + b;
-}
-
test "number literal as an argument" {
numberLiteralArg(3);
comptime numberLiteralArg(3);
@@ -251,7 +243,7 @@ test "discard the result of a function that returns a struct" {
test "function call with anon list literal" {
const S = struct {
fn doTheTest() void {
- consumeVec(.{9, 8, 7});
+ consumeVec(.{ 9, 8, 7 });
}
fn consumeVec(vec: [3]f32) void {
diff --git a/test/stage1/behavior/new_stack_call.zig b/test/stage1/behavior/new_stack_call.zig
index b057566d9c14..69763a52c64e 100644
--- a/test/stage1/behavior/new_stack_call.zig
+++ b/test/stage1/behavior/new_stack_call.zig
@@ -18,8 +18,8 @@ test "calling a function with a new stack" {
const arg = 1234;
- const a = @newStackCall(new_stack_bytes[0..512], targetFunction, arg);
- const b = @newStackCall(new_stack_bytes[512..], targetFunction, arg);
+ const a = @call(.{ .stack = new_stack_bytes[0..512] }, targetFunction, .{arg});
+ const b = @call(.{ .stack = new_stack_bytes[512..] }, targetFunction, .{arg});
_ = targetFunction(arg);
expect(arg == 1234);