From bb1c3e8b7e2be201221e14719d2d39e6298cc66c Mon Sep 17 00:00:00 2001 From: Isaac Freund Date: Wed, 10 Aug 2022 13:21:47 +0200 Subject: [PATCH 1/2] stage2: Handle lazy values for the % operator --- src/Sema.zig | 59 ++++++++++++++++++++++++++++++++++++++++-- src/value.zig | 38 --------------------------- test/behavior/math.zig | 15 +++++++++++ 3 files changed, 72 insertions(+), 40 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index acdce0e9b097..6d95b46c7c6a 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -78,6 +78,7 @@ post_hoc_blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, *LabeledBlock) = .{}, err: ?*Module.ErrorMsg = null, const std = @import("std"); +const math = std.math; const mem = std.mem; const Allocator = std.mem.Allocator; const assert = std.debug.assert; @@ -11824,7 +11825,7 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. return sema.failWithDivideByZero(block, rhs_src); } if (maybe_lhs_val) |lhs_val| { - const rem_result = try lhs_val.intRem(rhs_val, resolved_type, sema.arena, target); + const rem_result = try sema.intRem(block, resolved_type, lhs_val, lhs_src, rhs_val, rhs_src); // If this answer could possibly be different by doing `intMod`, // we must emit a compile error. Otherwise, it's OK. if ((try rhs_val.compareWithZeroAdvanced(.lt, sema.kit(block, src))) != (try lhs_val.compareWithZeroAdvanced(.lt, sema.kit(block, src))) and @@ -11886,6 +11887,60 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. return block.addBinOp(air_tag, casted_lhs, casted_rhs); } +fn intRem( + sema: *Sema, + block: *Block, + ty: Type, + lhs: Value, + lhs_src: LazySrcLoc, + rhs: Value, + rhs_src: LazySrcLoc, +) CompileError!Value { + if (ty.zigTypeTag() == .Vector) { + const result_data = try sema.arena.alloc(Value, ty.vectorLen()); + for (result_data) |*scalar, i| { + scalar.* = try sema.intRemScalar(block, lhs.indexVectorlike(i), lhs_src, rhs.indexVectorlike(i), rhs_src); + } + return Value.Tag.aggregate.create(sema.arena, result_data); + } + return sema.intRemScalar(block, lhs, lhs_src, rhs, rhs_src); +} + +fn intRemScalar( + sema: *Sema, + block: *Block, + lhs: Value, + lhs_src: LazySrcLoc, + rhs: Value, + rhs_src: LazySrcLoc, +) CompileError!Value { + const target = sema.mod.getTarget(); + // TODO is this a performance issue? maybe we should try the operation without + // resorting to BigInt first. + var lhs_space: Value.BigIntSpace = undefined; + var rhs_space: Value.BigIntSpace = undefined; + const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, target, sema.kit(block, lhs_src)); + const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, target, sema.kit(block, rhs_src)); + const limbs_q = try sema.arena.alloc( + math.big.Limb, + lhs_bigint.limbs.len, + ); + const limbs_r = try sema.arena.alloc( + math.big.Limb, + // TODO: consider reworking Sema to re-use Values rather than + // always producing new Value objects. + rhs_bigint.limbs.len, + ); + const limbs_buffer = try sema.arena.alloc( + math.big.Limb, + math.big.int.calcDivLimbsBufferLen(lhs_bigint.limbs.len, rhs_bigint.limbs.len), + ); + var result_q = math.big.int.Mutable{ .limbs = limbs_q, .positive = undefined, .len = undefined }; + var result_r = math.big.int.Mutable{ .limbs = limbs_r, .positive = undefined, .len = undefined }; + result_q.divTrunc(&result_r, lhs_bigint, rhs_bigint, limbs_buffer); + return Value.fromBigInt(sema.arena, result_r.toConst()); +} + fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; @@ -12050,7 +12105,7 @@ fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins if (maybe_lhs_val) |lhs_val| { return sema.addConstant( resolved_type, - try lhs_val.intRem(rhs_val, resolved_type, sema.arena, target), + try sema.intRem(block, resolved_type, lhs_val, lhs_src, rhs_val, rhs_src), ); } break :rs lhs_src; diff --git a/src/value.zig b/src/value.zig index 3994040ba656..f156651eaac6 100644 --- a/src/value.zig +++ b/src/value.zig @@ -3472,44 +3472,6 @@ pub const Value = extern union { return fromBigInt(allocator, result_q.toConst()); } - pub fn intRem(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, target: Target) !Value { - if (ty.zigTypeTag() == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen()); - for (result_data) |*scalar, i| { - scalar.* = try intRemScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator, target); - } - return Value.Tag.aggregate.create(allocator, result_data); - } - return intRemScalar(lhs, rhs, allocator, target); - } - - pub fn intRemScalar(lhs: Value, rhs: Value, allocator: Allocator, target: Target) !Value { - // TODO is this a performance issue? maybe we should try the operation without - // resorting to BigInt first. - var lhs_space: Value.BigIntSpace = undefined; - var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, target); - const rhs_bigint = rhs.toBigInt(&rhs_space, target); - const limbs_q = try allocator.alloc( - std.math.big.Limb, - lhs_bigint.limbs.len, - ); - const limbs_r = try allocator.alloc( - std.math.big.Limb, - // TODO: consider reworking Sema to re-use Values rather than - // always producing new Value objects. - rhs_bigint.limbs.len, - ); - const limbs_buffer = try allocator.alloc( - std.math.big.Limb, - std.math.big.int.calcDivLimbsBufferLen(lhs_bigint.limbs.len, rhs_bigint.limbs.len), - ); - var result_q = BigIntMutable{ .limbs = limbs_q, .positive = undefined, .len = undefined }; - var result_r = BigIntMutable{ .limbs = limbs_r, .positive = undefined, .len = undefined }; - result_q.divTrunc(&result_r, lhs_bigint, rhs_bigint, limbs_buffer); - return fromBigInt(allocator, result_r.toConst()); - } - pub fn intMod(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, target: Target) !Value { if (ty.zigTypeTag() == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen()); diff --git a/test/behavior/math.zig b/test/behavior/math.zig index c8d0becbd655..ce476fea04e5 100644 --- a/test/behavior/math.zig +++ b/test/behavior/math.zig @@ -1721,3 +1721,18 @@ fn testAbsFloat() !void { fn testAbsFloatOne(in: f32, out: f32) !void { try expect(@fabs(@as(f32, in)) == @as(f32, out)); } + +test "mod lazy values" { + { + const X = struct { x: u32 }; + const x = @sizeOf(X); + const y = 1 % x; + _ = y; + } + { + const X = struct { x: u32 }; + const x = @sizeOf(X); + const y = x % 1; + _ = y; + } +} From 0d32b73078aa4579187f7d5c67343a6036eed277 Mon Sep 17 00:00:00 2001 From: Isaac Freund Date: Mon, 8 Aug 2022 18:39:14 +0200 Subject: [PATCH 2/2] stage2: Implement explicit backing integers for packed structs Now the backing integer of a packed struct type may be explicitly specified with e.g. `packed struct(u32) { ... }`. --- lib/std/builtin.zig | 2 + lib/std/zig/Ast.zig | 2 +- lib/std/zig/parse.zig | 10 +- lib/std/zig/parser_test.zig | 7 + src/AstGen.zig | 56 ++++- src/Autodoc.zig | 11 + src/Module.zig | 21 +- src/Sema.zig | 207 +++++++++++++++++- src/Zir.zig | 16 +- src/codegen/llvm.zig | 15 +- src/print_zir.zig | 25 ++- src/stage1/all_types.hpp | 1 + src/stage1/analyze.cpp | 6 + src/stage1/ir.cpp | 40 +++- src/stage1/parser.cpp | 11 +- src/type.zig | 23 +- test/behavior.zig | 1 + .../packed_struct_explicit_backing_int.zig | 53 +++++ test/behavior/type_info.zig | 4 +- .../packed_struct_backing_int_wrong.zig | 55 +++++ 20 files changed, 493 insertions(+), 73 deletions(-) create mode 100644 test/behavior/packed_struct_explicit_backing_int.zig create mode 100644 test/cases/compile_errors/packed_struct_backing_int_wrong.zig diff --git a/lib/std/builtin.zig b/lib/std/builtin.zig index ef716c6972c4..2c2bc92c9602 100644 --- a/lib/std/builtin.zig +++ b/lib/std/builtin.zig @@ -294,6 +294,8 @@ pub const Type = union(enum) { /// therefore must be kept in sync with the compiler implementation. pub const Struct = struct { layout: ContainerLayout, + /// Only valid if layout is .Packed + backing_integer: ?type = null, fields: []const StructField, decls: []const Declaration, is_tuple: bool, diff --git a/lib/std/zig/Ast.zig b/lib/std/zig/Ast.zig index 9bffcb3df202..016cefb25592 100644 --- a/lib/std/zig/Ast.zig +++ b/lib/std/zig/Ast.zig @@ -2967,7 +2967,7 @@ pub const Node = struct { /// Same as ContainerDeclTwo except there is known to be a trailing comma /// or semicolon before the rbrace. container_decl_two_trailing, - /// `union(lhs)` / `enum(lhs)`. `SubRange[rhs]`. + /// `struct(lhs)` / `union(lhs)` / `enum(lhs)`. `SubRange[rhs]`. container_decl_arg, /// Same as container_decl_arg but there is known to be a trailing /// comma or semicolon before the rbrace. diff --git a/lib/std/zig/parse.zig b/lib/std/zig/parse.zig index 2a7d2623efb2..a03764a91cc6 100644 --- a/lib/std/zig/parse.zig +++ b/lib/std/zig/parse.zig @@ -3356,16 +3356,18 @@ const Parser = struct { } /// Caller must have already verified the first token. + /// ContainerDeclAuto <- ContainerDeclType LBRACE container_doc_comment? ContainerMembers RBRACE + /// /// ContainerDeclType - /// <- KEYWORD_struct + /// <- KEYWORD_struct (LPAREN Expr RPAREN)? + /// / KEYWORD_opaque /// / KEYWORD_enum (LPAREN Expr RPAREN)? /// / KEYWORD_union (LPAREN (KEYWORD_enum (LPAREN Expr RPAREN)? / Expr) RPAREN)? - /// / KEYWORD_opaque fn parseContainerDeclAuto(p: *Parser) !Node.Index { const main_token = p.nextToken(); const arg_expr = switch (p.token_tags[main_token]) { - .keyword_struct, .keyword_opaque => null_node, - .keyword_enum => blk: { + .keyword_opaque => null_node, + .keyword_struct, .keyword_enum => blk: { if (p.eatToken(.l_paren)) |_| { const expr = try p.expectExpr(); _ = try p.expectToken(.r_paren); diff --git a/lib/std/zig/parser_test.zig b/lib/std/zig/parser_test.zig index a74d53f21c0d..bee9375b5ae1 100644 --- a/lib/std/zig/parser_test.zig +++ b/lib/std/zig/parser_test.zig @@ -3064,6 +3064,13 @@ test "zig fmt: struct declaration" { \\ c: u8, \\}; \\ + \\const Ps = packed struct(u32) { + \\ a: u1, + \\ b: u2, + \\ + \\ c: u29, + \\}; + \\ \\const Es = extern struct { \\ a: u8, \\ b: u8, diff --git a/src/AstGen.zig b/src/AstGen.zig index efa8690b55ce..af77cdacc466 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -152,6 +152,7 @@ pub fn generate(gpa: Allocator, tree: Ast) Allocator.Error!Zir { 0, tree.containerDeclRoot(), .Auto, + 0, )) |struct_decl_ref| { assert(refToIndex(struct_decl_ref).? == 0); } else |err| switch (err) { @@ -4223,15 +4224,18 @@ fn structDeclInner( node: Ast.Node.Index, container_decl: Ast.full.ContainerDecl, layout: std.builtin.Type.ContainerLayout, + backing_int_node: Ast.Node.Index, ) InnerError!Zir.Inst.Ref { const decl_inst = try gz.reserveInstructionIndex(); - if (container_decl.ast.members.len == 0) { + if (container_decl.ast.members.len == 0 and backing_int_node == 0) { try gz.setStruct(decl_inst, .{ .src_node = node, .layout = layout, .fields_len = 0, .decls_len = 0, + .backing_int_ref = .none, + .backing_int_body_len = 0, .known_non_opv = false, .known_comptime_only = false, }); @@ -4266,6 +4270,35 @@ fn structDeclInner( }; defer block_scope.unstack(); + const scratch_top = astgen.scratch.items.len; + defer astgen.scratch.items.len = scratch_top; + + var backing_int_body_len: usize = 0; + const backing_int_ref: Zir.Inst.Ref = blk: { + if (backing_int_node != 0) { + if (layout != .Packed) { + return astgen.failNode(backing_int_node, "non-packed struct does not support backing integer type", .{}); + } else { + const backing_int_ref = try typeExpr(&block_scope, &namespace.base, backing_int_node); + if (!block_scope.isEmpty()) { + if (!block_scope.endsWithNoReturn()) { + _ = try block_scope.addBreak(.break_inline, decl_inst, backing_int_ref); + } + + const body = block_scope.instructionsSlice(); + const old_scratch_len = astgen.scratch.items.len; + try astgen.scratch.ensureUnusedCapacity(gpa, countBodyLenAfterFixups(astgen, body)); + appendBodyWithFixupsArrayList(astgen, &astgen.scratch, body); + backing_int_body_len = astgen.scratch.items.len - old_scratch_len; + block_scope.instructions.items.len = block_scope.instructions_top; + } + break :blk backing_int_ref; + } + } else { + break :blk .none; + } + }; + const decl_count = try astgen.scanDecls(&namespace, container_decl.ast.members); const field_count = @intCast(u32, container_decl.ast.members.len - decl_count); @@ -4378,6 +4411,8 @@ fn structDeclInner( .layout = layout, .fields_len = field_count, .decls_len = decl_count, + .backing_int_ref = backing_int_ref, + .backing_int_body_len = @intCast(u32, backing_int_body_len), .known_non_opv = known_non_opv, .known_comptime_only = known_comptime_only, }); @@ -4386,7 +4421,9 @@ fn structDeclInner( const decls_slice = wip_members.declsSlice(); const fields_slice = wip_members.fieldsSlice(); const bodies_slice = astgen.scratch.items[bodies_start..]; - try astgen.extra.ensureUnusedCapacity(gpa, decls_slice.len + fields_slice.len + bodies_slice.len); + try astgen.extra.ensureUnusedCapacity(gpa, backing_int_body_len + + decls_slice.len + fields_slice.len + bodies_slice.len); + astgen.extra.appendSliceAssumeCapacity(astgen.scratch.items[scratch_top..][0..backing_int_body_len]); astgen.extra.appendSliceAssumeCapacity(decls_slice); astgen.extra.appendSliceAssumeCapacity(fields_slice); astgen.extra.appendSliceAssumeCapacity(bodies_slice); @@ -4582,9 +4619,7 @@ fn containerDecl( else => unreachable, } else std.builtin.Type.ContainerLayout.Auto; - assert(container_decl.ast.arg == 0); - - const result = try structDeclInner(gz, scope, node, container_decl, layout); + const result = try structDeclInner(gz, scope, node, container_decl, layout, container_decl.ast.arg); return rvalue(gz, rl, result, node); }, .keyword_union => { @@ -11254,6 +11289,8 @@ const GenZir = struct { src_node: Ast.Node.Index, fields_len: u32, decls_len: u32, + backing_int_ref: Zir.Inst.Ref, + backing_int_body_len: u32, layout: std.builtin.Type.ContainerLayout, known_non_opv: bool, known_comptime_only: bool, @@ -11261,7 +11298,7 @@ const GenZir = struct { const astgen = gz.astgen; const gpa = astgen.gpa; - try astgen.extra.ensureUnusedCapacity(gpa, 4); + try astgen.extra.ensureUnusedCapacity(gpa, 6); const payload_index = @intCast(u32, astgen.extra.items.len); if (args.src_node != 0) { @@ -11274,6 +11311,12 @@ const GenZir = struct { if (args.decls_len != 0) { astgen.extra.appendAssumeCapacity(args.decls_len); } + if (args.backing_int_ref != .none) { + astgen.extra.appendAssumeCapacity(args.backing_int_body_len); + if (args.backing_int_body_len == 0) { + astgen.extra.appendAssumeCapacity(@enumToInt(args.backing_int_ref)); + } + } astgen.instructions.set(inst, .{ .tag = .extended, .data = .{ .extended = .{ @@ -11282,6 +11325,7 @@ const GenZir = struct { .has_src_node = args.src_node != 0, .has_fields_len = args.fields_len != 0, .has_decls_len = args.decls_len != 0, + .has_backing_int = args.backing_int_ref != .none, .known_non_opv = args.known_non_opv, .known_comptime_only = args.known_comptime_only, .name_strategy = gz.anon_name_strategy, diff --git a/src/Autodoc.zig b/src/Autodoc.zig index 3fcc28d74274..ef2579a8e418 100644 --- a/src/Autodoc.zig +++ b/src/Autodoc.zig @@ -2536,6 +2536,17 @@ fn walkInstruction( break :blk decls_len; } else 0; + // TODO: Expose explicit backing integer types in some way. + if (small.has_backing_int) { + const backing_int_body_len = file.zir.extra[extra_index]; + extra_index += 1; // backing_int_body_len + if (backing_int_body_len == 0) { + extra_index += 1; // backing_int_ref + } else { + extra_index += backing_int_body_len; // backing_int_body_inst + } + } + var decl_indexes: std.ArrayListUnmanaged(usize) = .{}; var priv_decl_indexes: std.ArrayListUnmanaged(usize) = .{}; diff --git a/src/Module.zig b/src/Module.zig index 8b195eff2de0..7e877a2f4ad6 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -895,6 +895,11 @@ pub const Struct = struct { zir_index: Zir.Inst.Index, layout: std.builtin.Type.ContainerLayout, + /// If the layout is not packed, this is the noreturn type. + /// If the layout is packed, this is the backing integer type of the packed struct. + /// Whether zig chooses this type or the user specifies it, it is stored here. + /// This will be set to the noreturn type until status is `have_layout`. + backing_int_ty: Type = Type.initTag(.noreturn), status: enum { none, field_types_wip, @@ -1025,7 +1030,7 @@ pub const Struct = struct { pub fn packedFieldBitOffset(s: Struct, target: Target, index: usize) u16 { assert(s.layout == .Packed); - assert(s.haveFieldTypes()); + assert(s.haveLayout()); var bit_sum: u64 = 0; for (s.fields.values()) |field, i| { if (i == index) { @@ -1033,19 +1038,7 @@ pub const Struct = struct { } bit_sum += field.ty.bitSize(target); } - return @intCast(u16, bit_sum); - } - - pub fn packedIntegerBits(s: Struct, target: Target) u16 { - return s.packedFieldBitOffset(target, s.fields.count()); - } - - pub fn packedIntegerType(s: Struct, target: Target, buf: *Type.Payload.Bits) Type { - buf.* = .{ - .base = .{ .tag = .int_unsigned }, - .data = s.packedIntegerBits(target), - }; - return Type.initPayload(&buf.base); + unreachable; // index out of bounds } }; diff --git a/src/Sema.zig b/src/Sema.zig index 6d95b46c7c6a..faf16926539f 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -2239,6 +2239,16 @@ pub fn analyzeStructDecl( break :blk decls_len; } else 0; + if (small.has_backing_int) { + const backing_int_body_len = sema.code.extra[extra_index]; + extra_index += 1; // backing_int_body_len + if (backing_int_body_len == 0) { + extra_index += 1; // backing_int_ref + } else { + extra_index += backing_int_body_len; // backing_int_body_inst + } + } + _ = try sema.mod.scanNamespace(&struct_obj.namespace, extra_index, decls_len, new_decl); } @@ -14285,13 +14295,27 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, struct_ty.getNamespace()); - const field_values = try sema.arena.create([4]Value); + const backing_integer_val = blk: { + if (layout == .Packed) { + const struct_obj = struct_ty.castTag(.@"struct").?.data; + assert(struct_obj.haveLayout()); + assert(struct_obj.backing_int_ty.isInt()); + const backing_int_ty_val = try Value.Tag.ty.create(sema.arena, struct_obj.backing_int_ty); + break :blk try Value.Tag.opt_payload.create(sema.arena, backing_int_ty_val); + } else { + break :blk Value.initTag(.null_value); + } + }; + + const field_values = try sema.arena.create([5]Value); field_values.* = .{ // layout: ContainerLayout, try Value.Tag.enum_field_index.create( sema.arena, @enumToInt(layout), ), + // backing_integer: ?type, + backing_integer_val, // fields: []const StructField, fields_val, // decls: []const Declaration, @@ -16308,7 +16332,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in if (!try sema.intFitsInType(block, src, alignment_val, Type.u32, null)) { return sema.fail(block, src, "alignment must fit in 'u32'", .{}); } - const abi_align = @intCast(u29, alignment_val.toUnsignedInt(target)); + const abi_align = @intCast(u29, (try alignment_val.getUnsignedIntAdvanced(target, sema.kit(block, src))).?); var buffer: Value.ToTypeBuffer = undefined; const unresolved_elem_ty = child_val.toType(&buffer); @@ -16473,22 +16497,31 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in const struct_val = union_val.val.castTag(.aggregate).?.data; // layout: containerlayout, const layout_val = struct_val[0]; + // backing_int: ?type, + const backing_int_val = struct_val[1]; // fields: []const enumfield, - const fields_val = struct_val[1]; + const fields_val = struct_val[2]; // decls: []const declaration, - const decls_val = struct_val[2]; + const decls_val = struct_val[3]; // is_tuple: bool, - const is_tuple_val = struct_val[3]; + const is_tuple_val = struct_val[4]; + assert(struct_val.len == 5); + + const layout = layout_val.toEnum(std.builtin.Type.ContainerLayout); // Decls if (decls_val.sliceLen(mod) > 0) { return sema.fail(block, src, "reified structs must have no decls", .{}); } + if (layout != .Packed and !backing_int_val.isNull()) { + return sema.fail(block, src, "non-packed struct does not support backing integer type", .{}); + } + return if (is_tuple_val.toBool()) try sema.reifyTuple(block, src, fields_val) else - try sema.reifyStruct(block, inst, src, layout_val, fields_val, name_strategy); + try sema.reifyStruct(block, inst, src, layout, backing_int_val, fields_val, name_strategy); }, .Enum => { const struct_val = union_val.val.castTag(.aggregate).?.data; @@ -16981,7 +17014,8 @@ fn reifyStruct( block: *Block, inst: Zir.Inst.Index, src: LazySrcLoc, - layout_val: Value, + layout: std.builtin.Type.ContainerLayout, + backing_int_val: Value, fields_val: Value, name_strategy: Zir.Inst.NameStrategy, ) CompileError!Air.Inst.Ref { @@ -17004,7 +17038,7 @@ fn reifyStruct( .owner_decl = new_decl_index, .fields = .{}, .zir_index = inst, - .layout = layout_val.toEnum(std.builtin.Type.ContainerLayout), + .layout = layout, .status = .have_field_types, .known_non_opv = false, .namespace = .{ @@ -17070,6 +17104,41 @@ fn reifyStruct( }; } + if (layout == .Packed) { + struct_obj.status = .layout_wip; + + for (struct_obj.fields.values()) |field, index| { + sema.resolveTypeLayout(block, src, field.ty) catch |err| switch (err) { + error.AnalysisFail => { + const msg = sema.err orelse return err; + try sema.addFieldErrNote(block, struct_ty, index, msg, "while checking this field", .{}); + return err; + }, + else => return err, + }; + } + + var fields_bit_sum: u64 = 0; + for (struct_obj.fields.values()) |field| { + fields_bit_sum += field.ty.bitSize(target); + } + + if (backing_int_val.optionalValue()) |payload| { + var buf: Value.ToTypeBuffer = undefined; + const backing_int_ty = payload.toType(&buf); + try sema.checkBackingIntType(block, src, backing_int_ty, fields_bit_sum); + struct_obj.backing_int_ty = try backing_int_ty.copy(new_decl_arena_allocator); + } else { + var buf: Type.Payload.Bits = .{ + .base = .{ .tag = .int_unsigned }, + .data = @intCast(u16, fields_bit_sum), + }; + struct_obj.backing_int_ty = try Type.initPayload(&buf.base).copy(new_decl_arena_allocator); + } + + struct_obj.status = .have_layout; + } + try new_decl.finalizeNewArena(&new_decl_arena); return sema.analyzeDeclVal(block, src, new_decl_index); } @@ -27154,6 +27223,11 @@ fn resolveStructLayout( else => return err, }; } + + if (struct_obj.layout == .Packed) { + try semaBackingIntType(sema.mod, struct_obj); + } + struct_obj.status = .have_layout; // In case of querying the ABI alignment of this struct, we will ask @@ -27173,6 +27247,109 @@ fn resolveStructLayout( // otherwise it's a tuple; no need to resolve anything } +fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!void { + const gpa = mod.gpa; + const target = mod.getTarget(); + + var fields_bit_sum: u64 = 0; + for (struct_obj.fields.values()) |field| { + fields_bit_sum += field.ty.bitSize(target); + } + + const decl_index = struct_obj.owner_decl; + const decl = mod.declPtr(decl_index); + var decl_arena = decl.value_arena.?.promote(gpa); + defer decl.value_arena.?.* = decl_arena.state; + const decl_arena_allocator = decl_arena.allocator(); + + const zir = struct_obj.namespace.file_scope.zir; + const extended = zir.instructions.items(.data)[struct_obj.zir_index].extended; + assert(extended.opcode == .struct_decl); + const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small); + + if (small.has_backing_int) { + var extra_index: usize = extended.operand; + extra_index += @boolToInt(small.has_src_node); + extra_index += @boolToInt(small.has_fields_len); + extra_index += @boolToInt(small.has_decls_len); + + const backing_int_body_len = zir.extra[extra_index]; + extra_index += 1; + + var analysis_arena = std.heap.ArenaAllocator.init(gpa); + defer analysis_arena.deinit(); + + var sema: Sema = .{ + .mod = mod, + .gpa = gpa, + .arena = analysis_arena.allocator(), + .perm_arena = decl_arena_allocator, + .code = zir, + .owner_decl = decl, + .owner_decl_index = decl_index, + .func = null, + .fn_ret_ty = Type.void, + .owner_func = null, + }; + defer sema.deinit(); + + var wip_captures = try WipCaptureScope.init(gpa, decl_arena_allocator, decl.src_scope); + defer wip_captures.deinit(); + + var block: Block = .{ + .parent = null, + .sema = &sema, + .src_decl = decl_index, + .namespace = &struct_obj.namespace, + .wip_capture_scope = wip_captures.scope, + .instructions = .{}, + .inlining = null, + .is_comptime = true, + }; + defer { + assert(block.instructions.items.len == 0); + block.params.deinit(gpa); + } + + const backing_int_src: LazySrcLoc = .{ .node_offset_container_tag = 0 }; + const backing_int_ty = blk: { + if (backing_int_body_len == 0) { + const backing_int_ref = @intToEnum(Zir.Inst.Ref, zir.extra[extra_index]); + break :blk try sema.resolveType(&block, backing_int_src, backing_int_ref); + } else { + const body = zir.extra[extra_index..][0..backing_int_body_len]; + const ty_ref = try sema.resolveBody(&block, body, struct_obj.zir_index); + break :blk try sema.analyzeAsType(&block, backing_int_src, ty_ref); + } + }; + + try sema.checkBackingIntType(&block, backing_int_src, backing_int_ty, fields_bit_sum); + struct_obj.backing_int_ty = try backing_int_ty.copy(decl_arena_allocator); + } else { + var buf: Type.Payload.Bits = .{ + .base = .{ .tag = .int_unsigned }, + .data = @intCast(u16, fields_bit_sum), + }; + struct_obj.backing_int_ty = try Type.initPayload(&buf.base).copy(decl_arena_allocator); + } +} + +fn checkBackingIntType(sema: *Sema, block: *Block, src: LazySrcLoc, backing_int_ty: Type, fields_bit_sum: u64) CompileError!void { + const target = sema.mod.getTarget(); + + if (!backing_int_ty.isInt()) { + return sema.fail(block, src, "expected backing integer type, found '{}'", .{backing_int_ty.fmt(sema.mod)}); + } + if (backing_int_ty.bitSize(target) != fields_bit_sum) { + return sema.fail( + block, + src, + "backing integer type '{}' has bit size {} but the struct fields have a total bit size of {}", + .{ backing_int_ty.fmt(sema.mod), backing_int_ty.bitSize(target), fields_bit_sum }, + ); + } +} + fn resolveUnionLayout( sema: *Sema, block: *Block, @@ -27495,12 +27672,26 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void break :decls_len decls_len; } else 0; + // The backing integer cannot be handled until `resolveStructLayout()`. + if (small.has_backing_int) { + const backing_int_body_len = zir.extra[extra_index]; + extra_index += 1; // backing_int_body_len + if (backing_int_body_len == 0) { + extra_index += 1; // backing_int_ref + } else { + extra_index += backing_int_body_len; // backing_int_body_inst + } + } + // Skip over decls. var decls_it = zir.declIteratorInner(extra_index, decls_len); while (decls_it.next()) |_| {} extra_index = decls_it.extra_index; if (fields_len == 0) { + if (struct_obj.layout == .Packed) { + try semaBackingIntType(mod, struct_obj); + } struct_obj.status = .have_layout; return; } diff --git a/src/Zir.zig b/src/Zir.zig index c62e6d02bb55..538ef6aaf8c3 100644 --- a/src/Zir.zig +++ b/src/Zir.zig @@ -3085,13 +3085,16 @@ pub const Inst = struct { /// 0. src_node: i32, // if has_src_node /// 1. fields_len: u32, // if has_fields_len /// 2. decls_len: u32, // if has_decls_len - /// 3. decl_bits: u32 // for every 8 decls + /// 3. backing_int_body_len: u32, // if has_backing_int + /// 4. backing_int_ref: Ref, // if has_backing_int and backing_int_body_len is 0 + /// 5. backing_int_body_inst: Inst, // if has_backing_int and backing_int_body_len is > 0 + /// 6. decl_bits: u32 // for every 8 decls /// - sets of 4 bits: /// 0b000X: whether corresponding decl is pub /// 0b00X0: whether corresponding decl is exported /// 0b0X00: whether corresponding decl has an align expression /// 0bX000: whether corresponding decl has a linksection or an address space expression - /// 4. decl: { // for every decls_len + /// 7. decl: { // for every decls_len /// src_hash: [4]u32, // hash of source bytes /// line: u32, // line number of decl, relative to parent /// name: u32, // null terminated string index @@ -3109,13 +3112,13 @@ pub const Inst = struct { /// address_space: Ref, /// } /// } - /// 5. flags: u32 // for every 8 fields + /// 8. flags: u32 // for every 8 fields /// - sets of 4 bits: /// 0b000X: whether corresponding field has an align expression /// 0b00X0: whether corresponding field has a default expression /// 0b0X00: whether corresponding field is comptime /// 0bX000: whether corresponding field has a type expression - /// 6. fields: { // for every fields_len + /// 9. fields: { // for every fields_len /// field_name: u32, /// doc_comment: u32, // 0 if no doc comment /// field_type: Ref, // if corresponding bit is not set. none means anytype. @@ -3123,7 +3126,7 @@ pub const Inst = struct { /// align_body_len: u32, // if corresponding bit is set /// init_body_len: u32, // if corresponding bit is set /// } - /// 7. bodies: { // for every fields_len + /// 10. bodies: { // for every fields_len /// field_type_body_inst: Inst, // for each field_type_body_len /// align_body_inst: Inst, // for each align_body_len /// init_body_inst: Inst, // for each init_body_len @@ -3133,11 +3136,12 @@ pub const Inst = struct { has_src_node: bool, has_fields_len: bool, has_decls_len: bool, + has_backing_int: bool, known_non_opv: bool, known_comptime_only: bool, name_strategy: NameStrategy, layout: std.builtin.Type.ContainerLayout, - _: u7 = undefined, + _: u6 = undefined, }; }; diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 9c3efa18cd9f..6d2922ea3fe5 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1683,8 +1683,7 @@ pub const Object = struct { if (ty.castTag(.@"struct")) |payload| { const struct_obj = payload.data; if (struct_obj.layout == .Packed) { - var buf: Type.Payload.Bits = undefined; - const info = struct_obj.packedIntegerType(target, &buf).intInfo(target); + const info = struct_obj.backing_int_ty.intInfo(target); const dwarf_encoding: c_uint = switch (info.signedness) { .signed => DW.ATE.signed, .unsigned => DW.ATE.unsigned, @@ -2679,9 +2678,7 @@ pub const DeclGen = struct { const struct_obj = t.castTag(.@"struct").?.data; if (struct_obj.layout == .Packed) { - var buf: Type.Payload.Bits = undefined; - const int_ty = struct_obj.packedIntegerType(target, &buf); - const int_llvm_ty = try dg.lowerType(int_ty); + const int_llvm_ty = try dg.lowerType(struct_obj.backing_int_ty); gop.value_ptr.* = int_llvm_ty; return int_llvm_ty; } @@ -3330,8 +3327,8 @@ pub const DeclGen = struct { const struct_obj = tv.ty.castTag(.@"struct").?.data; if (struct_obj.layout == .Packed) { - const big_bits = struct_obj.packedIntegerBits(target); - const int_llvm_ty = dg.context.intType(big_bits); + const big_bits = struct_obj.backing_int_ty.bitSize(target); + const int_llvm_ty = dg.context.intType(@intCast(c_uint, big_bits)); const fields = struct_obj.fields.values(); comptime assert(Type.packed_struct_layout_version == 2); var running_int: *const llvm.Value = int_llvm_ty.constNull(); @@ -8243,8 +8240,8 @@ pub const FuncGen = struct { .Struct => { if (result_ty.containerLayout() == .Packed) { const struct_obj = result_ty.castTag(.@"struct").?.data; - const big_bits = struct_obj.packedIntegerBits(target); - const int_llvm_ty = self.dg.context.intType(big_bits); + const big_bits = struct_obj.backing_int_ty.bitSize(target); + const int_llvm_ty = self.dg.context.intType(@intCast(c_uint, big_bits)); const fields = struct_obj.fields.values(); comptime assert(Type.packed_struct_layout_version == 2); var running_int: *const llvm.Value = int_llvm_ty.constNull(); diff --git a/src/print_zir.zig b/src/print_zir.zig index 2b70799c367b..4bc96c42591c 100644 --- a/src/print_zir.zig +++ b/src/print_zir.zig @@ -1245,9 +1245,28 @@ const Writer = struct { try self.writeFlag(stream, "known_non_opv, ", small.known_non_opv); try self.writeFlag(stream, "known_comptime_only, ", small.known_comptime_only); - try stream.print("{s}, {s}, ", .{ - @tagName(small.name_strategy), @tagName(small.layout), - }); + + try stream.print("{s}, ", .{@tagName(small.name_strategy)}); + + if (small.layout == .Packed and small.has_backing_int) { + const backing_int_body_len = self.code.extra[extra_index]; + extra_index += 1; + try stream.writeAll("Packed("); + if (backing_int_body_len == 0) { + const backing_int_ref = @intToEnum(Zir.Inst.Ref, self.code.extra[extra_index]); + extra_index += 1; + try self.writeInstRef(stream, backing_int_ref); + } else { + const body = self.code.extra[extra_index..][0..backing_int_body_len]; + extra_index += backing_int_body_len; + self.indent += 2; + try self.writeBracedDecl(stream, body); + self.indent -= 2; + } + try stream.writeAll("), "); + } else { + try stream.print("{s}, ", .{@tagName(small.layout)}); + } if (decls_len == 0) { try stream.writeAll("{}, "); diff --git a/src/stage1/all_types.hpp b/src/stage1/all_types.hpp index 4028c3872d08..9f9a6151b8d5 100644 --- a/src/stage1/all_types.hpp +++ b/src/stage1/all_types.hpp @@ -1116,6 +1116,7 @@ struct AstNodeContainerDecl { ContainerLayout layout; bool auto_enum, is_root; // union(enum) + bool unsupported_explicit_backing_int; }; struct AstNodeErrorSetField { diff --git a/src/stage1/analyze.cpp b/src/stage1/analyze.cpp index 08aa8bbf062f..90173f384e9e 100644 --- a/src/stage1/analyze.cpp +++ b/src/stage1/analyze.cpp @@ -3034,6 +3034,12 @@ static Error resolve_struct_zero_bits(CodeGen *g, ZigType *struct_type) { AstNode *decl_node = struct_type->data.structure.decl_node; + if (decl_node->data.container_decl.unsupported_explicit_backing_int) { + add_node_error(g, decl_node, buf_create_from_str( + "the stage1 compiler does not support explicit backing integer types on packed structs")); + return ErrorSemanticAnalyzeFail; + } + if (struct_type->data.structure.resolve_loop_flag_zero_bits) { if (struct_type->data.structure.resolve_status != ResolveStatusInvalid) { struct_type->data.structure.resolve_status = ResolveStatusInvalid; diff --git a/src/stage1/ir.cpp b/src/stage1/ir.cpp index e31715030cec..a5428945a99d 100644 --- a/src/stage1/ir.cpp +++ b/src/stage1/ir.cpp @@ -18640,7 +18640,7 @@ static Error ir_make_type_info_value(IrAnalyze *ira, Scope *scope, AstNode *sour result->special = ConstValSpecialStatic; result->type = ir_type_info_get_type(ira, "Struct", nullptr); - ZigValue **fields = alloc_const_vals_ptrs(g, 4); + ZigValue **fields = alloc_const_vals_ptrs(g, 5); result->data.x_struct.fields = fields; // layout: ContainerLayout @@ -18648,8 +18648,17 @@ static Error ir_make_type_info_value(IrAnalyze *ira, Scope *scope, AstNode *sour fields[0]->special = ConstValSpecialStatic; fields[0]->type = ir_type_info_get_type(ira, "ContainerLayout", nullptr); bigint_init_unsigned(&fields[0]->data.x_enum_tag, type_entry->data.structure.layout); + + // backing_integer: ?type + ensure_field_index(result->type, "backing_integer", 1); + fields[1]->special = ConstValSpecialStatic; + fields[1]->type = get_optional_type(g, g->builtin_types.entry_type); + // This is always null in stage1, as stage1 does not support explicit backing integers + // for packed structs. + fields[1]->data.x_optional = nullptr; + // fields: []Type.StructField - ensure_field_index(result->type, "fields", 1); + ensure_field_index(result->type, "fields", 2); ZigType *type_info_struct_field_type = ir_type_info_get_type(ira, "StructField", nullptr); if ((err = type_resolve(g, type_info_struct_field_type, ResolveStatusSizeKnown))) { @@ -18663,7 +18672,7 @@ static Error ir_make_type_info_value(IrAnalyze *ira, Scope *scope, AstNode *sour struct_field_array->data.x_array.special = ConstArraySpecialNone; struct_field_array->data.x_array.data.s_none.elements = g->pass1_arena->allocate(struct_field_count); - init_const_slice(g, fields[1], struct_field_array, 0, struct_field_count, false, nullptr); + init_const_slice(g, fields[2], struct_field_array, 0, struct_field_count, false, nullptr); for (uint32_t struct_field_index = 0; struct_field_index < struct_field_count; struct_field_index++) { TypeStructField *struct_field = type_entry->data.structure.fields[struct_field_index]; @@ -18710,18 +18719,18 @@ static Error ir_make_type_info_value(IrAnalyze *ira, Scope *scope, AstNode *sour struct_field_val->parent.data.p_array.elem_index = struct_field_index; } // decls: []Type.Declaration - ensure_field_index(result->type, "decls", 2); - if ((err = ir_make_type_info_decls(ira, source_node, fields[2], + ensure_field_index(result->type, "decls", 3); + if ((err = ir_make_type_info_decls(ira, source_node, fields[3], type_entry->data.structure.decls_scope, false))) { return err; } // is_tuple: bool - ensure_field_index(result->type, "is_tuple", 3); - fields[3]->special = ConstValSpecialStatic; - fields[3]->type = g->builtin_types.entry_bool; - fields[3]->data.x_bool = is_tuple(type_entry); + ensure_field_index(result->type, "is_tuple", 4); + fields[4]->special = ConstValSpecialStatic; + fields[4]->type = g->builtin_types.entry_bool; + fields[4]->data.x_bool = is_tuple(type_entry); break; } @@ -19313,7 +19322,14 @@ static ZigType *type_info_to_type(IrAnalyze *ira, Scope *scope, AstNode *source_ assert(layout_value->type == ir_type_info_get_type(ira, "ContainerLayout", nullptr)); ContainerLayout layout = (ContainerLayout)bigint_as_u32(&layout_value->data.x_enum_tag); - ZigValue *fields_value = get_const_field(ira, source_node, payload, "fields", 1); + ZigType *tag_type = get_const_field_meta_type_optional(ira, source_node, payload, "backing_integer", 1); + if (tag_type != nullptr) { + ir_add_error_node(ira, source_node, buf_create_from_str( + "the stage1 compiler does not support explicit backing integer types on packed structs")); + return ira->codegen->invalid_inst_gen->value->type; + } + + ZigValue *fields_value = get_const_field(ira, source_node, payload, "fields", 2); if (fields_value == nullptr) return ira->codegen->invalid_inst_gen->value->type; assert(fields_value->special == ConstValSpecialStatic); @@ -19322,7 +19338,7 @@ static ZigType *type_info_to_type(IrAnalyze *ira, Scope *scope, AstNode *source_ ZigValue *fields_len_value = fields_value->data.x_struct.fields[slice_len_index]; size_t fields_len = bigint_as_usize(&fields_len_value->data.x_bigint); - ZigValue *decls_value = get_const_field(ira, source_node, payload, "decls", 2); + ZigValue *decls_value = get_const_field(ira, source_node, payload, "decls", 3); if (decls_value == nullptr) return ira->codegen->invalid_inst_gen->value->type; assert(decls_value->special == ConstValSpecialStatic); @@ -19335,7 +19351,7 @@ static ZigType *type_info_to_type(IrAnalyze *ira, Scope *scope, AstNode *source_ } bool is_tuple; - if ((err = get_const_field_bool(ira, source_node, payload, "is_tuple", 3, &is_tuple))) + if ((err = get_const_field_bool(ira, source_node, payload, "is_tuple", 4, &is_tuple))) return ira->codegen->invalid_inst_gen->value->type; ZigType *entry = new_type_table_entry(ZigTypeIdStruct); diff --git a/src/stage1/parser.cpp b/src/stage1/parser.cpp index fdc0777aff1c..bd778484cb9f 100644 --- a/src/stage1/parser.cpp +++ b/src/stage1/parser.cpp @@ -2902,16 +2902,25 @@ static AstNode *ast_parse_container_decl_auto(ParseContext *pc) { } // ContainerDeclType -// <- KEYWORD_struct +// <- KEYWORD_struct (LPAREN Expr RPAREN)? // / KEYWORD_enum (LPAREN Expr RPAREN)? // / KEYWORD_union (LPAREN (KEYWORD_enum (LPAREN Expr RPAREN)? / Expr) RPAREN)? // / KEYWORD_opaque static AstNode *ast_parse_container_decl_type(ParseContext *pc) { TokenIndex first = eat_token_if(pc, TokenIdKeywordStruct); if (first != 0) { + bool explicit_backing_int = false; + if (eat_token_if(pc, TokenIdLParen) != 0) { + explicit_backing_int = true; + ast_expect(pc, ast_parse_expr); + expect_token(pc, TokenIdRParen); + } AstNode *res = ast_create_node(pc, NodeTypeContainerDecl, first); res->data.container_decl.init_arg_expr = nullptr; res->data.container_decl.kind = ContainerKindStruct; + // We want this to be an error in semantic analysis not parsing to make sharing + // the test suite between stage1 and self hosted easier. + res->data.container_decl.unsupported_explicit_backing_int = explicit_backing_int; return res; } diff --git a/src/type.zig b/src/type.zig index 85d77303c9e7..1b71f4e9b1d9 100644 --- a/src/type.zig +++ b/src/type.zig @@ -3000,9 +3000,17 @@ pub const Type = extern union { .lazy => |arena| return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }, }; if (struct_obj.layout == .Packed) { - var buf: Type.Payload.Bits = undefined; - const int_ty = struct_obj.packedIntegerType(target, &buf); - return AbiAlignmentAdvanced{ .scalar = int_ty.abiAlignment(target) }; + switch (strat) { + .sema_kit => |sk| try sk.sema.resolveTypeLayout(sk.block, sk.src, ty), + .lazy => |arena| { + if (!struct_obj.haveLayout()) { + return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }; + } + }, + .eager => {}, + } + assert(struct_obj.haveLayout()); + return AbiAlignmentAdvanced{ .scalar = struct_obj.backing_int_ty.abiAlignment(target) }; } const fields = ty.structFields(); @@ -3192,17 +3200,16 @@ pub const Type = extern union { .Packed => { const struct_obj = ty.castTag(.@"struct").?.data; switch (strat) { - .sema_kit => |sk| _ = try sk.sema.resolveTypeFields(sk.block, sk.src, ty), + .sema_kit => |sk| try sk.sema.resolveTypeLayout(sk.block, sk.src, ty), .lazy => |arena| { - if (!struct_obj.haveFieldTypes()) { + if (!struct_obj.haveLayout()) { return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) }; } }, .eager => {}, } - var buf: Type.Payload.Bits = undefined; - const int_ty = struct_obj.packedIntegerType(target, &buf); - return AbiSizeAdvanced{ .scalar = int_ty.abiSize(target) }; + assert(struct_obj.haveLayout()); + return AbiSizeAdvanced{ .scalar = struct_obj.backing_int_ty.abiSize(target) }; }, else => { switch (strat) { diff --git a/test/behavior.zig b/test/behavior.zig index fee61f5e0921..e9c4ec779bb5 100644 --- a/test/behavior.zig +++ b/test/behavior.zig @@ -165,6 +165,7 @@ test { if (builtin.zig_backend != .stage1) { _ = @import("behavior/decltest.zig"); + _ = @import("behavior/packed_struct_explicit_backing_int.zig"); } if (builtin.os.tag != .wasi) { diff --git a/test/behavior/packed_struct_explicit_backing_int.zig b/test/behavior/packed_struct_explicit_backing_int.zig new file mode 100644 index 000000000000..165e94fd4e73 --- /dev/null +++ b/test/behavior/packed_struct_explicit_backing_int.zig @@ -0,0 +1,53 @@ +const std = @import("std"); +const builtin = @import("builtin"); +const assert = std.debug.assert; +const expectEqual = std.testing.expectEqual; +const native_endian = builtin.cpu.arch.endian(); + +test "packed struct explicit backing integer" { + assert(builtin.zig_backend != .stage1); + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + + const S1 = packed struct { a: u8, b: u8, c: u8 }; + + const S2 = packed struct(i24) { d: u8, e: u8, f: u8 }; + + const S3 = packed struct { x: S1, y: S2 }; + const S3Padded = packed struct(u64) { s3: S3, pad: u16 }; + + try expectEqual(48, @bitSizeOf(S3)); + try expectEqual(@sizeOf(u48), @sizeOf(S3)); + + try expectEqual(3, @offsetOf(S3, "y")); + try expectEqual(24, @bitOffsetOf(S3, "y")); + + if (native_endian == .Little) { + const s3 = @bitCast(S3Padded, @as(u64, 0xe952d5c71ff4)).s3; + try expectEqual(@as(u8, 0xf4), s3.x.a); + try expectEqual(@as(u8, 0x1f), s3.x.b); + try expectEqual(@as(u8, 0xc7), s3.x.c); + try expectEqual(@as(u8, 0xd5), s3.y.d); + try expectEqual(@as(u8, 0x52), s3.y.e); + try expectEqual(@as(u8, 0xe9), s3.y.f); + } + + const S4 = packed struct { a: i32, b: i8 }; + const S5 = packed struct(u80) { a: i32, b: i8, c: S4 }; + const S6 = packed struct(i80) { a: i32, b: S4, c: i8 }; + + const expectedBitSize = 80; + const expectedByteSize = @sizeOf(u80); + try expectEqual(expectedBitSize, @bitSizeOf(S5)); + try expectEqual(expectedByteSize, @sizeOf(S5)); + try expectEqual(expectedBitSize, @bitSizeOf(S6)); + try expectEqual(expectedByteSize, @sizeOf(S6)); + + try expectEqual(5, @offsetOf(S5, "c")); + try expectEqual(40, @bitOffsetOf(S5, "c")); + try expectEqual(9, @offsetOf(S6, "c")); + try expectEqual(72, @bitOffsetOf(S6, "c")); +} diff --git a/test/behavior/type_info.zig b/test/behavior/type_info.zig index b1012e69c8b7..968c3e7490b7 100644 --- a/test/behavior/type_info.zig +++ b/test/behavior/type_info.zig @@ -293,6 +293,7 @@ test "type info: struct info" { fn testStruct() !void { const unpacked_struct_info = @typeInfo(TestStruct); try expect(unpacked_struct_info.Struct.is_tuple == false); + try expect(unpacked_struct_info.Struct.backing_integer == null); try expect(unpacked_struct_info.Struct.fields[0].alignment == @alignOf(u32)); try expect(@ptrCast(*const u32, unpacked_struct_info.Struct.fields[0].default_value.?).* == 4); try expect(mem.eql(u8, "foobar", @ptrCast(*const *const [6:0]u8, unpacked_struct_info.Struct.fields[1].default_value.?).*)); @@ -315,6 +316,7 @@ fn testPackedStruct() !void { try expect(struct_info == .Struct); try expect(struct_info.Struct.is_tuple == false); try expect(struct_info.Struct.layout == .Packed); + try expect(struct_info.Struct.backing_integer == u128); try expect(struct_info.Struct.fields.len == 4); try expect(struct_info.Struct.fields[0].alignment == 0); try expect(struct_info.Struct.fields[2].field_type == f32); @@ -326,7 +328,7 @@ fn testPackedStruct() !void { } const TestPackedStruct = packed struct { - fieldA: usize, + fieldA: u64, fieldB: void, fieldC: f32, fieldD: u32 = 4, diff --git a/test/cases/compile_errors/packed_struct_backing_int_wrong.zig b/test/cases/compile_errors/packed_struct_backing_int_wrong.zig new file mode 100644 index 000000000000..cd1b4ec11cb7 --- /dev/null +++ b/test/cases/compile_errors/packed_struct_backing_int_wrong.zig @@ -0,0 +1,55 @@ +export fn entry1() void { + _ = @sizeOf(packed struct(u32) { + x: u1, + y: u24, + z: u4, + }); +} +export fn entry2() void { + _ = @sizeOf(packed struct(i31) { + x: u4, + y: u24, + z: u4, + }); +} + +export fn entry3() void { + _ = @sizeOf(packed struct(void) { + x: void, + }); +} + +export fn entry4() void { + _ = @sizeOf(packed struct(void) {}); +} + +export fn entry5() void { + _ = @sizeOf(packed struct(noreturn) {}); +} + +export fn entry6() void { + _ = @sizeOf(packed struct(f64) { + x: u32, + y: f32, + }); +} + +export fn entry7() void { + _ = @sizeOf(packed struct(*u32) { + x: u4, + y: u24, + z: u4, + }); +} + +// error +// backend=llvm +// target=native +// +// :2:31: error: backing integer type 'u32' has bit size 32 but the struct fields have a total bit size of 29 +// :9:31: error: backing integer type 'i31' has bit size 31 but the struct fields have a total bit size of 32 +// :17:31: error: expected backing integer type, found 'void' +// :23:31: error: expected backing integer type, found 'void' +// :27:31: error: expected backing integer type, found 'noreturn' +// :31:31: error: expected backing integer type, found 'f64' +// :38:31: error: expected backing integer type, found '*u32'