diff --git a/include/qemu/int128.h b/include/qemu/int128.h index ef71f56e3f509..aa3e99990bf21 100644 --- a/include/qemu/int128.h +++ b/include/qemu/int128.h @@ -128,11 +128,21 @@ static inline bool int128_ge(Int128 a, Int128 b) return a >= b; } +static inline bool int128_uge(Int128 a, Int128 b) +{ + return ((__uint128_t)a) >= ((__uint128_t)b); +} + static inline bool int128_lt(Int128 a, Int128 b) { return a < b; } +static inline bool int128_ult(Int128 a, Int128 b) +{ + return (__uint128_t)a < (__uint128_t)b; +} + static inline bool int128_le(Int128 a, Int128 b) { return a <= b; @@ -373,11 +383,21 @@ static inline bool int128_ge(Int128 a, Int128 b) return a.hi > b.hi || (a.hi == b.hi && a.lo >= b.lo); } +static inline bool int128_uge(Int128 a, Int128 b) +{ + return (uint64_t)a.hi > (uint64_t)b.hi || (a.hi == b.hi && a.lo >= b.lo); +} + static inline bool int128_lt(Int128 a, Int128 b) { return !int128_ge(a, b); } +static inline bool int128_ult(Int128 a, Int128 b) +{ + return !int128_uge(a, b); +} + static inline bool int128_le(Int128 a, Int128 b) { return int128_ge(b, a); diff --git a/target/ppc/helper.h b/target/ppc/helper.h index 6233e28d853f0..c3fadd34fd1ba 100644 --- a/target/ppc/helper.h +++ b/target/ppc/helper.h @@ -196,14 +196,14 @@ DEF_HELPER_FLAGS_5(vadduws, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32) DEF_HELPER_FLAGS_5(vsububs, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32) DEF_HELPER_FLAGS_5(vsubuhs, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32) DEF_HELPER_FLAGS_5(vsubuws, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32) -DEF_HELPER_FLAGS_3(vadduqm, TCG_CALL_NO_RWG, void, avr, avr, avr) -DEF_HELPER_FLAGS_4(vaddecuq, TCG_CALL_NO_RWG, void, avr, avr, avr, avr) -DEF_HELPER_FLAGS_4(vaddeuqm, TCG_CALL_NO_RWG, void, avr, avr, avr, avr) -DEF_HELPER_FLAGS_3(vaddcuq, TCG_CALL_NO_RWG, void, avr, avr, avr) -DEF_HELPER_FLAGS_3(vsubuqm, TCG_CALL_NO_RWG, void, avr, avr, avr) -DEF_HELPER_FLAGS_4(vsubecuq, TCG_CALL_NO_RWG, void, avr, avr, avr, avr) -DEF_HELPER_FLAGS_4(vsubeuqm, TCG_CALL_NO_RWG, void, avr, avr, avr, avr) -DEF_HELPER_FLAGS_3(vsubcuq, TCG_CALL_NO_RWG, void, avr, avr, avr) +DEF_HELPER_FLAGS_3(VADDUQM, TCG_CALL_NO_RWG, void, avr, avr, avr) +DEF_HELPER_FLAGS_4(VADDECUQ, TCG_CALL_NO_RWG, void, avr, avr, avr, avr) +DEF_HELPER_FLAGS_4(VADDEUQM, TCG_CALL_NO_RWG, void, avr, avr, avr, avr) +DEF_HELPER_FLAGS_3(VADDCUQ, TCG_CALL_NO_RWG, void, avr, avr, avr) +DEF_HELPER_FLAGS_3(VSUBUQM, TCG_CALL_NO_RWG, void, avr, avr, avr) +DEF_HELPER_FLAGS_4(VSUBECUQ, TCG_CALL_NO_RWG, void, avr, avr, avr, avr) +DEF_HELPER_FLAGS_4(VSUBEUQM, TCG_CALL_NO_RWG, void, avr, avr, avr, avr) +DEF_HELPER_FLAGS_3(VSUBCUQ, TCG_CALL_NO_RWG, void, avr, avr, avr) DEF_HELPER_FLAGS_4(vsldoi, TCG_CALL_NO_RWG, void, avr, avr, avr, i32) DEF_HELPER_FLAGS_3(vextractub, TCG_CALL_NO_RWG, void, avr, avr, i32) DEF_HELPER_FLAGS_3(vextractuh, TCG_CALL_NO_RWG, void, avr, avr, i32) @@ -310,7 +310,7 @@ DEF_HELPER_FLAGS_3(vbpermq, TCG_CALL_NO_RWG, void, avr, avr, avr) DEF_HELPER_FLAGS_3(vpmsumb, TCG_CALL_NO_RWG, void, avr, avr, avr) DEF_HELPER_FLAGS_3(vpmsumh, TCG_CALL_NO_RWG, void, avr, avr, avr) DEF_HELPER_FLAGS_3(vpmsumw, TCG_CALL_NO_RWG, void, avr, avr, avr) -DEF_HELPER_FLAGS_3(vpmsumd, TCG_CALL_NO_RWG, void, avr, avr, avr) +DEF_HELPER_FLAGS_3(VPMSUMD, TCG_CALL_NO_RWG, void, avr, avr, avr) DEF_HELPER_FLAGS_2(vextublx, TCG_CALL_NO_RWG, tl, tl, avr) DEF_HELPER_FLAGS_2(vextuhlx, TCG_CALL_NO_RWG, tl, tl, avr) DEF_HELPER_FLAGS_2(vextuwlx, TCG_CALL_NO_RWG, tl, tl, avr) diff --git a/target/ppc/insn32.decode b/target/ppc/insn32.decode index 18a94fa3b52ec..7f6ac992cd97d 100644 --- a/target/ppc/insn32.decode +++ b/target/ppc/insn32.decode @@ -426,6 +426,10 @@ DSCLIQ 111111 ..... ..... ...... 001000010 . @Z22_tap_sh_rc DSCRI 111011 ..... ..... ...... 001100010 . @Z22_ta_sh_rc DSCRIQ 111111 ..... ..... ...... 001100010 . @Z22_tap_sh_rc +## Vector Exclusive-OR-based Instructions + +VPMSUMD 000100 ..... ..... ..... 10011001000 @VX + ## Vector Integer Instructions VCMPEQUB 000100 ..... ..... ..... . 0000000110 @VC @@ -546,6 +550,18 @@ VRLQNM 000100 ..... ..... ..... 00101000101 @VX ## Vector Integer Arithmetic Instructions +VADDCUQ 000100 ..... ..... ..... 00101000000 @VX +VADDUQM 000100 ..... ..... ..... 00100000000 @VX + +VADDEUQM 000100 ..... ..... ..... ..... 111100 @VA +VADDECUQ 000100 ..... ..... ..... ..... 111101 @VA + +VSUBCUQ 000100 ..... ..... ..... 10101000000 @VX +VSUBUQM 000100 ..... ..... ..... 10100000000 @VX + +VSUBECUQ 000100 ..... ..... ..... ..... 111111 @VA +VSUBEUQM 000100 ..... ..... ..... ..... 111110 @VA + VEXTSB2W 000100 ..... 10000 ..... 11000000010 @VX_tb VEXTSH2W 000100 ..... 10001 ..... 11000000010 @VX_tb VEXTSB2D 000100 ..... 11000 ..... 11000000010 @VX_tb diff --git a/target/ppc/int_helper.c b/target/ppc/int_helper.c index 105b626d1b5a3..6e5293e1be4a0 100644 --- a/target/ppc/int_helper.c +++ b/target/ppc/int_helper.c @@ -1378,52 +1378,24 @@ PMSUM(vpmsumb, u8, u16, uint16_t) PMSUM(vpmsumh, u16, u32, uint32_t) PMSUM(vpmsumw, u32, u64, uint64_t) -void helper_vpmsumd(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) +void helper_VPMSUMD(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) { - -#ifdef CONFIG_INT128 - int i, j; - __uint128_t prod[2]; - - VECTOR_FOR_INORDER_I(i, u64) { - prod[i] = 0; - for (j = 0; j < 64; j++) { - if (a->u64[i] & (1ull << j)) { - prod[i] ^= (((__uint128_t)b->u64[i]) << j); - } - } - } - - r->u128 = prod[0] ^ prod[1]; - -#else int i, j; - ppc_avr_t prod[2]; - - VECTOR_FOR_INORDER_I(i, u64) { - prod[i].VsrD(1) = prod[i].VsrD(0) = 0; - for (j = 0; j < 64; j++) { - if (a->u64[i] & (1ull << j)) { - ppc_avr_t bshift; - if (j == 0) { - bshift.VsrD(0) = 0; - bshift.VsrD(1) = b->u64[i]; - } else { - bshift.VsrD(0) = b->u64[i] >> (64 - j); - bshift.VsrD(1) = b->u64[i] << j; - } - prod[i].VsrD(1) ^= bshift.VsrD(1); - prod[i].VsrD(0) ^= bshift.VsrD(0); + Int128 tmp, prod[2] = {int128_zero(), int128_zero()}; + + for (j = 0; j < 64; j++) { + for (i = 0; i < ARRAY_SIZE(r->u64); i++) { + if (a->VsrD(i) & (1ull << j)) { + tmp = int128_make64(b->VsrD(i)); + tmp = int128_lshift(tmp, j); + prod[i] = int128_xor(prod[i], tmp); } } } - r->VsrD(1) = prod[0].VsrD(1) ^ prod[1].VsrD(1); - r->VsrD(0) = prod[0].VsrD(0) ^ prod[1].VsrD(0); -#endif + r->s128 = int128_xor(prod[0], prod[1]); } - #if HOST_BIG_ENDIAN #define PKBIG 1 #else @@ -2098,189 +2070,66 @@ VGENERIC_DO(popcntd, u64) #undef VGENERIC_DO -#if HOST_BIG_ENDIAN -#define QW_ONE { .u64 = { 0, 1 } } -#else -#define QW_ONE { .u64 = { 1, 0 } } -#endif - -#ifndef CONFIG_INT128 - -static inline void avr_qw_not(ppc_avr_t *t, ppc_avr_t a) -{ - t->u64[0] = ~a.u64[0]; - t->u64[1] = ~a.u64[1]; -} - -static int avr_qw_cmpu(ppc_avr_t a, ppc_avr_t b) -{ - if (a.VsrD(0) < b.VsrD(0)) { - return -1; - } else if (a.VsrD(0) > b.VsrD(0)) { - return 1; - } else if (a.VsrD(1) < b.VsrD(1)) { - return -1; - } else if (a.VsrD(1) > b.VsrD(1)) { - return 1; - } else { - return 0; - } -} - -static void avr_qw_add(ppc_avr_t *t, ppc_avr_t a, ppc_avr_t b) -{ - t->VsrD(1) = a.VsrD(1) + b.VsrD(1); - t->VsrD(0) = a.VsrD(0) + b.VsrD(0) + - (~a.VsrD(1) < b.VsrD(1)); -} - -static int avr_qw_addc(ppc_avr_t *t, ppc_avr_t a, ppc_avr_t b) -{ - ppc_avr_t not_a; - t->VsrD(1) = a.VsrD(1) + b.VsrD(1); - t->VsrD(0) = a.VsrD(0) + b.VsrD(0) + - (~a.VsrD(1) < b.VsrD(1)); - avr_qw_not(¬_a, a); - return avr_qw_cmpu(not_a, b) < 0; -} - -#endif - -void helper_vadduqm(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) +void helper_VADDUQM(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) { -#ifdef CONFIG_INT128 - r->u128 = a->u128 + b->u128; -#else - avr_qw_add(r, *a, *b); -#endif + r->s128 = int128_add(a->s128, b->s128); } -void helper_vaddeuqm(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) +void helper_VADDEUQM(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) { -#ifdef CONFIG_INT128 - r->u128 = a->u128 + b->u128 + (c->u128 & 1); -#else - - if (c->VsrD(1) & 1) { - ppc_avr_t tmp; - - tmp.VsrD(0) = 0; - tmp.VsrD(1) = c->VsrD(1) & 1; - avr_qw_add(&tmp, *a, tmp); - avr_qw_add(r, tmp, *b); - } else { - avr_qw_add(r, *a, *b); - } -#endif + r->s128 = int128_add(int128_add(a->s128, b->s128), + int128_make64(int128_getlo(c->s128) & 1)); } -void helper_vaddcuq(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) +void helper_VADDCUQ(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) { -#ifdef CONFIG_INT128 - r->u128 = (~a->u128 < b->u128); -#else - ppc_avr_t not_a; - - avr_qw_not(¬_a, *a); - + r->VsrD(1) = int128_ult(int128_not(a->s128), b->s128); r->VsrD(0) = 0; - r->VsrD(1) = (avr_qw_cmpu(not_a, *b) < 0); -#endif } -void helper_vaddecuq(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) +void helper_VADDECUQ(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) { -#ifdef CONFIG_INT128 - int carry_out = (~a->u128 < b->u128); - if (!carry_out && (c->u128 & 1)) { - carry_out = ((a->u128 + b->u128 + 1) == 0) && - ((a->u128 != 0) || (b->u128 != 0)); - } - r->u128 = carry_out; -#else - - int carry_in = c->VsrD(1) & 1; - int carry_out = 0; - ppc_avr_t tmp; - - carry_out = avr_qw_addc(&tmp, *a, *b); + bool carry_out = int128_ult(int128_not(a->s128), b->s128), + carry_in = int128_getlo(c->s128) & 1; if (!carry_out && carry_in) { - ppc_avr_t one = QW_ONE; - carry_out = avr_qw_addc(&tmp, tmp, one); + carry_out = (int128_nz(a->s128) || int128_nz(b->s128)) && + int128_eq(int128_add(a->s128, b->s128), int128_makes64(-1)); } + r->VsrD(0) = 0; r->VsrD(1) = carry_out; -#endif } -void helper_vsubuqm(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) +void helper_VSUBUQM(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) { -#ifdef CONFIG_INT128 - r->u128 = a->u128 - b->u128; -#else - ppc_avr_t tmp; - ppc_avr_t one = QW_ONE; - - avr_qw_not(&tmp, *b); - avr_qw_add(&tmp, *a, tmp); - avr_qw_add(r, tmp, one); -#endif + r->s128 = int128_sub(a->s128, b->s128); } -void helper_vsubeuqm(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) +void helper_VSUBEUQM(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) { -#ifdef CONFIG_INT128 - r->u128 = a->u128 + ~b->u128 + (c->u128 & 1); -#else - ppc_avr_t tmp, sum; - - avr_qw_not(&tmp, *b); - avr_qw_add(&sum, *a, tmp); - - tmp.VsrD(0) = 0; - tmp.VsrD(1) = c->VsrD(1) & 1; - avr_qw_add(r, sum, tmp); -#endif + r->s128 = int128_add(int128_add(a->s128, int128_not(b->s128)), + int128_make64(int128_getlo(c->s128) & 1)); } -void helper_vsubcuq(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) +void helper_VSUBCUQ(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) { -#ifdef CONFIG_INT128 - r->u128 = (~a->u128 < ~b->u128) || - (a->u128 + ~b->u128 == (__uint128_t)-1); -#else - int carry = (avr_qw_cmpu(*a, *b) > 0); - if (!carry) { - ppc_avr_t tmp; - avr_qw_not(&tmp, *b); - avr_qw_add(&tmp, *a, tmp); - carry = ((tmp.VsrSD(0) == -1ull) && (tmp.VsrSD(1) == -1ull)); - } + Int128 tmp = int128_not(b->s128); + + r->VsrD(1) = int128_ult(int128_not(a->s128), tmp) || + int128_eq(int128_add(a->s128, tmp), int128_makes64(-1)); r->VsrD(0) = 0; - r->VsrD(1) = carry; -#endif } -void helper_vsubecuq(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) +void helper_VSUBECUQ(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) { -#ifdef CONFIG_INT128 - r->u128 = - (~a->u128 < ~b->u128) || - ((c->u128 & 1) && (a->u128 + ~b->u128 == (__uint128_t)-1)); -#else - int carry_in = c->VsrD(1) & 1; - int carry_out = (avr_qw_cmpu(*a, *b) > 0); - if (!carry_out && carry_in) { - ppc_avr_t tmp; - avr_qw_not(&tmp, *b); - avr_qw_add(&tmp, *a, tmp); - carry_out = ((tmp.VsrD(0) == -1ull) && (tmp.VsrD(1) == -1ull)); - } + Int128 tmp = int128_not(b->s128); + bool carry_out = int128_ult(int128_not(a->s128), tmp), + carry_in = int128_getlo(c->s128) & 1; + r->VsrD(1) = carry_out || (carry_in && int128_eq(int128_add(a->s128, tmp), + int128_makes64(-1))); r->VsrD(0) = 0; - r->VsrD(1) = carry_out; -#endif } #define BCD_PLUS_PREF_1 0xC diff --git a/target/ppc/translate/vmx-impl.c.inc b/target/ppc/translate/vmx-impl.c.inc index d7524c3204615..a0f7befffee6f 100644 --- a/target/ppc/translate/vmx-impl.c.inc +++ b/target/ppc/translate/vmx-impl.c.inc @@ -1234,18 +1234,6 @@ GEN_VXFORM_SAT(vsubuws, MO_32, sub, ussub, 0, 26); GEN_VXFORM_SAT(vsubsbs, MO_8, sub, sssub, 0, 28); GEN_VXFORM_SAT(vsubshs, MO_16, sub, sssub, 0, 29); GEN_VXFORM_SAT(vsubsws, MO_32, sub, sssub, 0, 30); -GEN_VXFORM(vadduqm, 0, 4); -GEN_VXFORM(vaddcuq, 0, 5); -GEN_VXFORM3(vaddeuqm, 30, 0); -GEN_VXFORM3(vaddecuq, 30, 0); -GEN_VXFORM_DUAL(vaddeuqm, PPC_NONE, PPC2_ALTIVEC_207, \ - vaddecuq, PPC_NONE, PPC2_ALTIVEC_207) -GEN_VXFORM(vsubuqm, 0, 20); -GEN_VXFORM(vsubcuq, 0, 21); -GEN_VXFORM3(vsubeuqm, 31, 0); -GEN_VXFORM3(vsubecuq, 31, 0); -GEN_VXFORM_DUAL(vsubeuqm, PPC_NONE, PPC2_ALTIVEC_207, \ - vsubecuq, PPC_NONE, PPC2_ALTIVEC_207) GEN_VXFORM_TRANS(vsl, 2, 7); GEN_VXFORM_TRANS(vsr, 2, 11); GEN_VXFORM_ENV(vpkuhum, 7, 0); @@ -2572,6 +2560,12 @@ static bool do_va_helper(DisasContext *ctx, arg_VA *a, return true; } +TRANS_FLAGS2(ALTIVEC_207, VADDECUQ, do_va_helper, gen_helper_VADDECUQ) +TRANS_FLAGS2(ALTIVEC_207, VADDEUQM, do_va_helper, gen_helper_VADDEUQM) + +TRANS_FLAGS2(ALTIVEC_207, VSUBEUQM, do_va_helper, gen_helper_VSUBEUQM) +TRANS_FLAGS2(ALTIVEC_207, VSUBECUQ, do_va_helper, gen_helper_VSUBECUQ) + TRANS_FLAGS(ALTIVEC, VPERM, do_va_helper, gen_helper_VPERM) TRANS_FLAGS2(ISA300, VPERMR, do_va_helper, gen_helper_VPERMR) @@ -2717,7 +2711,6 @@ GEN_VXFORM_TRANS(vgbbd, 6, 20); GEN_VXFORM(vpmsumb, 4, 16) GEN_VXFORM(vpmsumh, 4, 17) GEN_VXFORM(vpmsumw, 4, 18) -GEN_VXFORM(vpmsumd, 4, 19) #define GEN_BCD(op) \ static void gen_##op(DisasContext *ctx) \ @@ -2862,11 +2855,6 @@ GEN_VXFORM_DUAL(vsubuwm, PPC_ALTIVEC, PPC_NONE, \ bcdus, PPC_NONE, PPC2_ISA300) GEN_VXFORM_DUAL(vsubsbs, PPC_ALTIVEC, PPC_NONE, \ bcdtrunc, PPC_NONE, PPC2_ISA300) -GEN_VXFORM_DUAL(vsubuqm, PPC2_ALTIVEC_207, PPC_NONE, \ - bcdtrunc, PPC_NONE, PPC2_ISA300) -GEN_VXFORM_DUAL(vsubcuq, PPC2_ALTIVEC_207, PPC_NONE, \ - bcdutrunc, PPC_NONE, PPC2_ISA300) - static void gen_vsbox(DisasContext *ctx) { @@ -3101,6 +3089,14 @@ static bool do_vx_helper(DisasContext *ctx, arg_VX *a, return true; } +TRANS_FLAGS2(ALTIVEC_207, VADDCUQ, do_vx_helper, gen_helper_VADDCUQ) +TRANS_FLAGS2(ALTIVEC_207, VADDUQM, do_vx_helper, gen_helper_VADDUQM) + +TRANS_FLAGS2(ALTIVEC_207, VPMSUMD, do_vx_helper, gen_helper_VPMSUMD) + +TRANS_FLAGS2(ALTIVEC_207, VSUBCUQ, do_vx_helper, gen_helper_VSUBCUQ) +TRANS_FLAGS2(ALTIVEC_207, VSUBUQM, do_vx_helper, gen_helper_VSUBUQM) + static bool do_vx_vmuleo(DisasContext *ctx, arg_VX *a, bool even, void (*gen_mul)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64)) { diff --git a/target/ppc/translate/vmx-ops.c.inc b/target/ppc/translate/vmx-ops.c.inc index d7cc57868eced..a3a0fd0650054 100644 --- a/target/ppc/translate/vmx-ops.c.inc +++ b/target/ppc/translate/vmx-ops.c.inc @@ -126,12 +126,8 @@ GEN_VXFORM(vsubuws, 0, 26), GEN_VXFORM_DUAL(vsubsbs, bcdtrunc, 0, 28, PPC_ALTIVEC, PPC2_ISA300), GEN_VXFORM(vsubshs, 0, 29), GEN_VXFORM_DUAL(vsubsws, xpnd04_2, 0, 30, PPC_ALTIVEC, PPC_NONE), -GEN_VXFORM_207(vadduqm, 0, 4), -GEN_VXFORM_207(vaddcuq, 0, 5), -GEN_VXFORM_DUAL(vaddeuqm, vaddecuq, 30, 0xFF, PPC_NONE, PPC2_ALTIVEC_207), -GEN_VXFORM_DUAL(vsubuqm, bcdtrunc, 0, 20, PPC2_ALTIVEC_207, PPC2_ISA300), -GEN_VXFORM_DUAL(vsubcuq, bcdutrunc, 0, 21, PPC2_ALTIVEC_207, PPC2_ISA300), -GEN_VXFORM_DUAL(vsubeuqm, vsubecuq, 31, 0xFF, PPC_NONE, PPC2_ALTIVEC_207), +GEN_VXFORM_300(bcdtrunc, 0, 20), +GEN_VXFORM_300(bcdutrunc, 0, 21), GEN_VXFORM(vsl, 2, 7), GEN_VXFORM(vsr, 2, 11), GEN_VXFORM(vpkuhum, 7, 0), @@ -237,7 +233,6 @@ GEN_VXFORM_207(vgbbd, 6, 20), GEN_VXFORM_207(vpmsumb, 4, 16), GEN_VXFORM_207(vpmsumh, 4, 17), GEN_VXFORM_207(vpmsumw, 4, 18), -GEN_VXFORM_207(vpmsumd, 4, 19), GEN_VXFORM_207(vsbox, 4, 23),