From 1b988ebe997799ac66fd6aaa079286ecb879482e Mon Sep 17 00:00:00 2001 From: SparrowLii Date: Mon, 1 Mar 2021 09:41:57 +0800 Subject: [PATCH 1/5] use generic const in neon --- crates/core_arch/src/aarch64/neon/mod.rs | 320 +++++++++--------- .../src/arm/neon/shift_and_insert_tests.rs | 4 +- 2 files changed, 162 insertions(+), 162 deletions(-) diff --git a/crates/core_arch/src/aarch64/neon/mod.rs b/crates/core_arch/src/aarch64/neon/mod.rs index 227d227f4e..a0c2f3b9cb 100644 --- a/crates/core_arch/src/aarch64/neon/mod.rs +++ b/crates/core_arch/src/aarch64/neon/mod.rs @@ -2318,362 +2318,362 @@ pub unsafe fn vcvtq_u32_f32(a: float32x4_t) -> uint32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sli, n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsli_n_s8(a: int8x8_t, b: int8x8_t, n: i32) -> int8x8_t { - assert!(0 <= n && n <= 7, "must have 0 ≤ n ≤ 7, but n = {}", n); - vsli_n_s8_(a, b, n) +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + assert!(0 <= N && N <= 7, "must have 0 ≤ N ≤ 7, but N = {}", N); + vsli_n_s8_(a, b, N) } /// Shift Left and Insert (immediate) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sli, n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsliq_n_s8(a: int8x16_t, b: int8x16_t, n: i32) -> int8x16_t { - assert!(0 <= n && n <= 7, "must have 0 ≤ n ≤ 7, but n = {}", n); - vsliq_n_s8_(a, b, n) +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + assert!(0 <= N && N <= 7, "must have 0 ≤ N ≤ 7, but N = {}", N); + vsliq_n_s8_(a, b, N) } /// Shift Left and Insert (immediate) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sli, n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsli_n_s16(a: int16x4_t, b: int16x4_t, n: i32) -> int16x4_t { - assert!(0 <= n && n <= 15, "must have 0 ≤ n ≤ 15, but n = {}", n); - vsli_n_s16_(a, b, n) +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + assert!(0 <= N && N <= 15, "must have 0 ≤ N ≤ 15, but N = {}", N); + vsli_n_s16_(a, b, N) } /// Shift Left and Insert (immediate) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sli, n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsliq_n_s16(a: int16x8_t, b: int16x8_t, n: i32) -> int16x8_t { - assert!(0 <= n && n <= 15, "must have 0 ≤ n ≤ 15, but n = {}", n); - vsliq_n_s16_(a, b, n) +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + assert!(0 <= N && N <= 15, "must have 0 ≤ N ≤ 15, but N = {}", N); + vsliq_n_s16_(a, b, N) } /// Shift Left and Insert (immediate) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sli, n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsli_n_s32(a: int32x2_t, b: int32x2_t, n: i32) -> int32x2_t { - assert!(0 <= n && n <= 31, "must have 0 ≤ n ≤ 31, but n = {}", n); - vsli_n_s32_(a, b, n) +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + assert!(0 <= N && N <= 31, "must have 0 ≤ N ≤ 31, but N = {}", N); + vsli_n_s32_(a, b, N) } /// Shift Left and Insert (immediate) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sli, n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsliq_n_s32(a: int32x4_t, b: int32x4_t, n: i32) -> int32x4_t { - assert!(0 <= n && n <= 31, "must have 0 ≤ n ≤ 31, but n = {}", n); - vsliq_n_s32_(a, b, n) +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + assert!(0 <= N && N <= 31, "must have 0 ≤ N ≤ 31, but N = {}", N); + vsliq_n_s32_(a, b, N) } /// Shift Left and Insert (immediate) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sli, n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsli_n_s64(a: int64x1_t, b: int64x1_t, n: i32) -> int64x1_t { - assert!(0 <= n && n <= 63, "must have 0 ≤ n ≤ 63, but n = {}", n); - vsli_n_s64_(a, b, n) +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { + assert!(0 <= N && N <= 63, "must have 0 ≤ N ≤ 63, but N = {}", N); + vsli_n_s64_(a, b, N) } /// Shift Left and Insert (immediate) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sli, n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsliq_n_s64(a: int64x2_t, b: int64x2_t, n: i32) -> int64x2_t { - assert!(0 <= n && n <= 63, "must have 0 ≤ n ≤ 63, but n = {}", n); - vsliq_n_s64_(a, b, n) +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + assert!(0 <= N && N <= 63, "must have 0 ≤ N ≤ 63, but N = {}", N); + vsliq_n_s64_(a, b, N) } /// Shift Left and Insert (immediate) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sli, n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsli_n_u8(a: uint8x8_t, b: uint8x8_t, n: i32) -> uint8x8_t { - assert!(0 <= n && n <= 7, "must have 0 ≤ n ≤ 7, but n = {}", n); - transmute(vsli_n_s8_(transmute(a), transmute(b), n)) +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + assert!(0 <= N && N <= 7, "must have 0 ≤ N ≤ 7, but N = {}", N); + transmute(vsli_n_s8_(transmute(a), transmute(b), N)) } /// Shift Left and Insert (immediate) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sli, n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsliq_n_u8(a: uint8x16_t, b: uint8x16_t, n: i32) -> uint8x16_t { - assert!(0 <= n && n <= 7, "must have 0 ≤ n ≤ 7, but n = {}", n); - transmute(vsliq_n_s8_(transmute(a), transmute(b), n)) +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + assert!(0 <= N && N <= 7, "must have 0 ≤ N ≤ 7, but N = {}", N); + transmute(vsliq_n_s8_(transmute(a), transmute(b), N)) } /// Shift Left and Insert (immediate) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sli, n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsli_n_u16(a: uint16x4_t, b: uint16x4_t, n: i32) -> uint16x4_t { - assert!(0 <= n && n <= 15, "must have 0 ≤ n ≤ 15, but n = {}", n); - transmute(vsli_n_s16_(transmute(a), transmute(b), n)) +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + assert!(0 <= N && N <= 15, "must have 0 ≤ N ≤ 15, but N = {}", N); + transmute(vsli_n_s16_(transmute(a), transmute(b), N)) } /// Shift Left and Insert (immediate) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sli, n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsliq_n_u16(a: uint16x8_t, b: uint16x8_t, n: i32) -> uint16x8_t { - assert!(0 <= n && n <= 15, "must have 0 ≤ n ≤ 15, but n = {}", n); - transmute(vsliq_n_s16_(transmute(a), transmute(b), n)) +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + assert!(0 <= N && N <= 15, "must have 0 ≤ N ≤ 15, but N = {}", N); + transmute(vsliq_n_s16_(transmute(a), transmute(b), N)) } /// Shift Left and Insert (immediate) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sli, n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsli_n_u32(a: uint32x2_t, b: uint32x2_t, n: i32) -> uint32x2_t { - assert!(0 <= n && n <= 31, "must have 0 ≤ n ≤ 31, but n = {}", n); - transmute(vsli_n_s32_(transmute(a), transmute(b), n)) +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + assert!(0 <= N && N <= 31, "must have 0 ≤ N ≤ 31, but N = {}", N); + transmute(vsli_n_s32_(transmute(a), transmute(b), N)) } /// Shift Left and Insert (immediate) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sli, n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsliq_n_u32(a: uint32x4_t, b: uint32x4_t, n: i32) -> uint32x4_t { - assert!(0 <= n && n <= 31, "must have 0 ≤ n ≤ 31, but n = {}", n); - transmute(vsliq_n_s32_(transmute(a), transmute(b), n)) +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + assert!(0 <= N && N <= 31, "must have 0 ≤ N ≤ 31, but N = {}", N); + transmute(vsliq_n_s32_(transmute(a), transmute(b), N)) } /// Shift Left and Insert (immediate) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sli, n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsli_n_u64(a: uint64x1_t, b: uint64x1_t, n: i32) -> uint64x1_t { - assert!(0 <= n && n <= 63, "must have 0 ≤ n ≤ 63, but n = {}", n); - transmute(vsli_n_s64_(transmute(a), transmute(b), n)) +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + assert!(0 <= N && N <= 63, "must have 0 ≤ N ≤ 63, but N = {}", N); + transmute(vsli_n_s64_(transmute(a), transmute(b), N)) } /// Shift Left and Insert (immediate) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sli, n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsliq_n_u64(a: uint64x2_t, b: uint64x2_t, n: i32) -> uint64x2_t { - assert!(0 <= n && n <= 63, "must have 0 ≤ n ≤ 63, but n = {}", n); - transmute(vsliq_n_s64_(transmute(a), transmute(b), n)) +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + assert!(0 <= N && N <= 63, "must have 0 ≤ N ≤ 63, but N = {}", N); + transmute(vsliq_n_s64_(transmute(a), transmute(b), N)) } /// Shift Left and Insert (immediate) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sli, n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsli_n_p8(a: poly8x8_t, b: poly8x8_t, n: i32) -> poly8x8_t { - assert!(0 <= n && n <= 7, "must have 0 ≤ n ≤ 7, but n = {}", n); - transmute(vsli_n_s8_(transmute(a), transmute(b), n)) +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + assert!(0 <= N && N <= 7, "must have 0 ≤ N ≤ 7, but N = {}", N); + transmute(vsli_n_s8_(transmute(a), transmute(b), N)) } /// Shift Left and Insert (immediate) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sli, n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsliq_n_p8(a: poly8x16_t, b: poly8x16_t, n: i32) -> poly8x16_t { - assert!(0 <= n && n <= 7, "must have 0 ≤ n ≤ 7, but n = {}", n); - transmute(vsliq_n_s8_(transmute(a), transmute(b), n)) +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + assert!(0 <= N && N <= 7, "must have 0 ≤ N ≤ 7, but N = {}", N); + transmute(vsliq_n_s8_(transmute(a), transmute(b), N)) } /// Shift Left and Insert (immediate) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sli, n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsli_n_p16(a: poly16x4_t, b: poly16x4_t, n: i32) -> poly16x4_t { - assert!(0 <= n && n <= 15, "must have 0 ≤ n ≤ 15, but n = {}", n); - transmute(vsli_n_s16_(transmute(a), transmute(b), n)) +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + assert!(0 <= N && N <= 15, "must have 0 ≤ N ≤ 15, but N = {}", N); + transmute(vsli_n_s16_(transmute(a), transmute(b), N)) } /// Shift Left and Insert (immediate) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sli, n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsliq_n_p16(a: poly16x8_t, b: poly16x8_t, n: i32) -> poly16x8_t { - assert!(0 <= n && n <= 15, "must have 0 ≤ n ≤ 15, but n = {}", n); - transmute(vsliq_n_s16_(transmute(a), transmute(b), n)) +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + assert!(0 <= N && N <= 15, "must have 0 ≤ N ≤ 15, but N = {}", N); + transmute(vsliq_n_s16_(transmute(a), transmute(b), N)) } /// Shift Right and Insert (immediate) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sri, n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsri_n_s8(a: int8x8_t, b: int8x8_t, n: i32) -> int8x8_t { - assert!(1 <= n && n <= 8, "must have 1 ≤ n ≤ 8, but n = {}", n); - vsri_n_s8_(a, b, n) +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsri_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + assert!(1 <= N && N <= 8, "must have 1 ≤ N ≤ 8, but N = {}", N); + vsri_n_s8_(a, b, N) } /// Shift Right and Insert (immediate) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sri, n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsriq_n_s8(a: int8x16_t, b: int8x16_t, n: i32) -> int8x16_t { - assert!(1 <= n && n <= 8, "must have 1 ≤ n ≤ 8, but n = {}", n); - vsriq_n_s8_(a, b, n) +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsriq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + assert!(1 <= N && N <= 8, "must have 1 ≤ N ≤ 8, but N = {}", N); + vsriq_n_s8_(a, b, N) } /// Shift Right and Insert (immediate) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sri, n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsri_n_s16(a: int16x4_t, b: int16x4_t, n: i32) -> int16x4_t { - assert!(1 <= n && n <= 16, "must have 1 ≤ n ≤ 16, but n = {}", n); - vsri_n_s16_(a, b, n) +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsri_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + assert!(1 <= N && N <= 16, "must have 1 ≤ N ≤ 16, but N = {}", N); + vsri_n_s16_(a, b, N) } /// Shift Right and Insert (immediate) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sri, n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsriq_n_s16(a: int16x8_t, b: int16x8_t, n: i32) -> int16x8_t { - assert!(1 <= n && n <= 16, "must have 1 ≤ n ≤ 16, but n = {}", n); - vsriq_n_s16_(a, b, n) +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsriq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + assert!(1 <= N && N <= 16, "must have 1 ≤ N ≤ 16, but N = {}", N); + vsriq_n_s16_(a, b, N) } /// Shift Right and Insert (immediate) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sri, n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsri_n_s32(a: int32x2_t, b: int32x2_t, n: i32) -> int32x2_t { - assert!(1 <= n && n <= 32, "must have 1 ≤ n ≤ 32, but n = {}", n); - vsri_n_s32_(a, b, n) +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsri_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + assert!(1 <= N && N <= 32, "must have 1 ≤ N ≤ 32, but N = {}", N); + vsri_n_s32_(a, b, N) } /// Shift Right and Insert (immediate) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sri, n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsriq_n_s32(a: int32x4_t, b: int32x4_t, n: i32) -> int32x4_t { - assert!(1 <= n && n <= 32, "must have 1 ≤ n ≤ 32, but n = {}", n); - vsriq_n_s32_(a, b, n) +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsriq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + assert!(1 <= N && N <= 32, "must have 1 ≤ N ≤ 32, but N = {}", N); + vsriq_n_s32_(a, b, N) } /// Shift Right and Insert (immediate) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sri, n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsri_n_s64(a: int64x1_t, b: int64x1_t, n: i32) -> int64x1_t { - assert!(1 <= n && n <= 64, "must have 1 ≤ n ≤ 64, but n = {}", n); - vsri_n_s64_(a, b, n) +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsri_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { + assert!(1 <= N && N <= 64, "must have 1 ≤ N ≤ 64, but N = {}", N); + vsri_n_s64_(a, b, N) } /// Shift Right and Insert (immediate) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sri, n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsriq_n_s64(a: int64x2_t, b: int64x2_t, n: i32) -> int64x2_t { - assert!(1 <= n && n <= 64, "must have 1 ≤ n ≤ 64, but n = {}", n); - vsriq_n_s64_(a, b, n) +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsriq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + assert!(1 <= N && N <= 64, "must have 1 ≤ N ≤ 64, but N = {}", N); + vsriq_n_s64_(a, b, N) } /// Shift Right and Insert (immediate) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sri, n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsri_n_u8(a: uint8x8_t, b: uint8x8_t, n: i32) -> uint8x8_t { - assert!(1 <= n && n <= 8, "must have 1 ≤ n ≤ 8, but n = {}", n); - transmute(vsri_n_s8_(transmute(a), transmute(b), n)) +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsri_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + assert!(1 <= N && N <= 8, "must have 1 ≤ N ≤ 8, but N = {}", N); + transmute(vsri_n_s8_(transmute(a), transmute(b), N)) } /// Shift Right and Insert (immediate) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sri, n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsriq_n_u8(a: uint8x16_t, b: uint8x16_t, n: i32) -> uint8x16_t { - assert!(1 <= n && n <= 8, "must have 1 ≤ n ≤ 8, but n = {}", n); - transmute(vsriq_n_s8_(transmute(a), transmute(b), n)) +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsriq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + assert!(1 <= N && N <= 8, "must have 1 ≤ N ≤ 8, but N = {}", N); + transmute(vsriq_n_s8_(transmute(a), transmute(b), N)) } /// Shift Right and Insert (immediate) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sri, n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsri_n_u16(a: uint16x4_t, b: uint16x4_t, n: i32) -> uint16x4_t { - assert!(1 <= n && n <= 16, "must have 1 ≤ n ≤ 16, but n = {}", n); - transmute(vsri_n_s16_(transmute(a), transmute(b), n)) +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsri_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + assert!(1 <= N && N <= 16, "must have 1 ≤ N ≤ 16, but N = {}", N); + transmute(vsri_n_s16_(transmute(a), transmute(b), N)) } /// Shift Right and Insert (immediate) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sri, n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsriq_n_u16(a: uint16x8_t, b: uint16x8_t, n: i32) -> uint16x8_t { - assert!(1 <= n && n <= 16, "must have 1 ≤ n ≤ 16, but n = {}", n); - transmute(vsriq_n_s16_(transmute(a), transmute(b), n)) +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsriq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + assert!(1 <= N && N <= 16, "must have 1 ≤ N ≤ 16, but N = {}", N); + transmute(vsriq_n_s16_(transmute(a), transmute(b), N)) } /// Shift Right and Insert (immediate) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sri, n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsri_n_u32(a: uint32x2_t, b: uint32x2_t, n: i32) -> uint32x2_t { - assert!(1 <= n && n <= 32, "must have 1 ≤ n ≤ 32, but n = {}", n); - transmute(vsri_n_s32_(transmute(a), transmute(b), n)) +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsri_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + assert!(1 <= N && N <= 32, "must have 1 ≤ N ≤ 32, but N = {}", N); + transmute(vsri_n_s32_(transmute(a), transmute(b), N)) } /// Shift Right and Insert (immediate) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sri, n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsriq_n_u32(a: uint32x4_t, b: uint32x4_t, n: i32) -> uint32x4_t { - assert!(1 <= n && n <= 32, "must have 1 ≤ n ≤ 32, but n = {}", n); - transmute(vsriq_n_s32_(transmute(a), transmute(b), n)) +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsriq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + assert!(1 <= N && N <= 32, "must have 1 ≤ N ≤ 32, but N = {}", N); + transmute(vsriq_n_s32_(transmute(a), transmute(b), N)) } /// Shift Right and Insert (immediate) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sri, n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsri_n_u64(a: uint64x1_t, b: uint64x1_t, n: i32) -> uint64x1_t { - assert!(1 <= n && n <= 64, "must have 1 ≤ n ≤ 64, but n = {}", n); - transmute(vsri_n_s64_(transmute(a), transmute(b), n)) +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsri_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + assert!(1 <= N && N <= 64, "must have 1 ≤ N ≤ 64, but N = {}", N); + transmute(vsri_n_s64_(transmute(a), transmute(b), N)) } /// Shift Right and Insert (immediate) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sri, n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsriq_n_u64(a: uint64x2_t, b: uint64x2_t, n: i32) -> uint64x2_t { - assert!(1 <= n && n <= 64, "must have 1 ≤ n ≤ 64, but n = {}", n); - transmute(vsriq_n_s64_(transmute(a), transmute(b), n)) +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsriq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + assert!(1 <= N && N <= 64, "must have 1 ≤ N ≤ 64, but N = {}", N); + transmute(vsriq_n_s64_(transmute(a), transmute(b), N)) } /// Shift Right and Insert (immediate) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sri, n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsri_n_p8(a: poly8x8_t, b: poly8x8_t, n: i32) -> poly8x8_t { - assert!(1 <= n && n <= 8, "must have 1 ≤ n ≤ 8, but n = {}", n); - transmute(vsri_n_s8_(transmute(a), transmute(b), n)) +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsri_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + assert!(1 <= N && N <= 8, "must have 1 ≤ N ≤ 8, but N = {}", N); + transmute(vsri_n_s8_(transmute(a), transmute(b), N)) } /// Shift Right and Insert (immediate) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sri, n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsriq_n_p8(a: poly8x16_t, b: poly8x16_t, n: i32) -> poly8x16_t { - assert!(1 <= n && n <= 8, "must have 1 ≤ n ≤ 8, but n = {}", n); - transmute(vsriq_n_s8_(transmute(a), transmute(b), n)) +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsriq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + assert!(1 <= N && N <= 8, "must have 1 ≤ N ≤ 8, but N = {}", N); + transmute(vsriq_n_s8_(transmute(a), transmute(b), N)) } /// Shift Right and Insert (immediate) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sri, n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsri_n_p16(a: poly16x4_t, b: poly16x4_t, n: i32) -> poly16x4_t { - assert!(1 <= n && n <= 16, "must have 1 ≤ n ≤ 16, but n = {}", n); - transmute(vsri_n_s16_(transmute(a), transmute(b), n)) +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsri_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + assert!(1 <= N && N <= 16, "must have 1 ≤ N ≤ 16, but N = {}", N); + transmute(vsri_n_s16_(transmute(a), transmute(b), N)) } /// Shift Right and Insert (immediate) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sri, n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsriq_n_p16(a: poly16x8_t, b: poly16x8_t, n: i32) -> poly16x8_t { - assert!(1 <= n && n <= 16, "must have 1 ≤ n ≤ 16, but n = {}", n); - transmute(vsriq_n_s16_(transmute(a), transmute(b), n)) +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsriq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + assert!(1 <= N && N <= 16, "must have 1 ≤ N ≤ 16, but N = {}", N); + transmute(vsriq_n_s16_(transmute(a), transmute(b), N)) } #[cfg(test)] diff --git a/crates/core_arch/src/arm/neon/shift_and_insert_tests.rs b/crates/core_arch/src/arm/neon/shift_and_insert_tests.rs index a556789245..07ca893c81 100644 --- a/crates/core_arch/src/arm/neon/shift_and_insert_tests.rs +++ b/crates/core_arch/src/arm/neon/shift_and_insert_tests.rs @@ -23,7 +23,7 @@ macro_rules! test_vsli { let b = [$($b as $t),*]; let n_bit_mask: $t = (1 << $n) - 1; let e = [$(($a as $t & n_bit_mask) | ($b as $t << $n)),*]; - let r = $fn_id(transmute(a), transmute(b), $n); + let r = $fn_id::<$n>(transmute(a), transmute(b), $n); let mut d = e; d = transmute(r); assert_eq!(d, e); @@ -60,7 +60,7 @@ macro_rules! test_vsri { let b = [$($b as $t),*]; let n_bit_mask = ((1 as $t << $n) - 1).rotate_right($n); let e = [$(($a as $t & n_bit_mask) | (($b as $t >> $n) & !n_bit_mask)),*]; - let r = $fn_id(transmute(a), transmute(b), $n); + let r = $fn_id::<$n>(transmute(a), transmute(b), $n); let mut d = e; d = transmute(r); assert_eq!(d, e); From 2a1d6b8d42926115493642d5180b5692cdd9a6fd Mon Sep 17 00:00:00 2001 From: SparrowLii Date: Mon, 1 Mar 2021 13:06:20 +0800 Subject: [PATCH 2/5] use static_assert! instead of assert!; use generic const in vld1_lane_* and other methods --- crates/core_arch/src/aarch64/neon/mod.rs | 80 +- crates/core_arch/src/arm/neon/mod.rs | 748 ++++++++---------- .../src/arm/neon/shift_and_insert_tests.rs | 4 +- 3 files changed, 372 insertions(+), 460 deletions(-) diff --git a/crates/core_arch/src/aarch64/neon/mod.rs b/crates/core_arch/src/aarch64/neon/mod.rs index a0c2f3b9cb..c265676542 100644 --- a/crates/core_arch/src/aarch64/neon/mod.rs +++ b/crates/core_arch/src/aarch64/neon/mod.rs @@ -2320,7 +2320,7 @@ pub unsafe fn vcvtq_u32_f32(a: float32x4_t) -> uint32x4_t { #[cfg_attr(test, assert_instr(sli, n = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsli_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - assert!(0 <= N && N <= 7, "must have 0 ≤ N ≤ 7, but N = {}", N); + static_assert_imm3!(N); vsli_n_s8_(a, b, N) } /// Shift Left and Insert (immediate) @@ -2329,7 +2329,7 @@ pub unsafe fn vsli_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[cfg_attr(test, assert_instr(sli, n = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsliq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - assert!(0 <= N && N <= 7, "must have 0 ≤ N ≤ 7, but N = {}", N); + static_assert_imm3!(N); vsliq_n_s8_(a, b, N) } /// Shift Left and Insert (immediate) @@ -2338,7 +2338,7 @@ pub unsafe fn vsliq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t #[cfg_attr(test, assert_instr(sli, n = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsli_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - assert!(0 <= N && N <= 15, "must have 0 ≤ N ≤ 15, but N = {}", N); + static_assert_imm4!(N); vsli_n_s16_(a, b, N) } /// Shift Left and Insert (immediate) @@ -2347,7 +2347,7 @@ pub unsafe fn vsli_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t #[cfg_attr(test, assert_instr(sli, n = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsliq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - assert!(0 <= N && N <= 15, "must have 0 ≤ N ≤ 15, but N = {}", N); + static_assert_imm4!(N); vsliq_n_s16_(a, b, N) } /// Shift Left and Insert (immediate) @@ -2356,7 +2356,7 @@ pub unsafe fn vsliq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t #[cfg_attr(test, assert_instr(sli, n = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsli_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - assert!(0 <= N && N <= 31, "must have 0 ≤ N ≤ 31, but N = {}", N); + static_assert!(N: i32 where N >= 0 && N <= 31); vsli_n_s32_(a, b, N) } /// Shift Left and Insert (immediate) @@ -2365,7 +2365,7 @@ pub unsafe fn vsli_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t #[cfg_attr(test, assert_instr(sli, n = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsliq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - assert!(0 <= N && N <= 31, "must have 0 ≤ N ≤ 31, but N = {}", N); + static_assert!(N: i32 where N >= 0 && N <= 31); vsliq_n_s32_(a, b, N) } /// Shift Left and Insert (immediate) @@ -2374,7 +2374,7 @@ pub unsafe fn vsliq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t #[cfg_attr(test, assert_instr(sli, n = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsli_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - assert!(0 <= N && N <= 63, "must have 0 ≤ N ≤ 63, but N = {}", N); + static_assert!(N: i32 where N >= 0 && N <= 63); vsli_n_s64_(a, b, N) } /// Shift Left and Insert (immediate) @@ -2383,7 +2383,7 @@ pub unsafe fn vsli_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t #[cfg_attr(test, assert_instr(sli, n = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsliq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - assert!(0 <= N && N <= 63, "must have 0 ≤ N ≤ 63, but N = {}", N); + static_assert!(N: i32 where N >= 0 && N <= 63); vsliq_n_s64_(a, b, N) } /// Shift Left and Insert (immediate) @@ -2392,7 +2392,7 @@ pub unsafe fn vsliq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t #[cfg_attr(test, assert_instr(sli, n = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsli_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - assert!(0 <= N && N <= 7, "must have 0 ≤ N ≤ 7, but N = {}", N); + static_assert_imm3!(N); transmute(vsli_n_s8_(transmute(a), transmute(b), N)) } /// Shift Left and Insert (immediate) @@ -2401,7 +2401,7 @@ pub unsafe fn vsli_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(test, assert_instr(sli, n = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsliq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - assert!(0 <= N && N <= 7, "must have 0 ≤ N ≤ 7, but N = {}", N); + static_assert_imm3!(N); transmute(vsliq_n_s8_(transmute(a), transmute(b), N)) } /// Shift Left and Insert (immediate) @@ -2410,7 +2410,7 @@ pub unsafe fn vsliq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16 #[cfg_attr(test, assert_instr(sli, n = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsli_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - assert!(0 <= N && N <= 15, "must have 0 ≤ N ≤ 15, but N = {}", N); + static_assert_imm4!(N); transmute(vsli_n_s16_(transmute(a), transmute(b), N)) } /// Shift Left and Insert (immediate) @@ -2419,7 +2419,7 @@ pub unsafe fn vsli_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4 #[cfg_attr(test, assert_instr(sli, n = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsliq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - assert!(0 <= N && N <= 15, "must have 0 ≤ N ≤ 15, but N = {}", N); + static_assert_imm4!(N); transmute(vsliq_n_s16_(transmute(a), transmute(b), N)) } /// Shift Left and Insert (immediate) @@ -2428,7 +2428,7 @@ pub unsafe fn vsliq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x #[cfg_attr(test, assert_instr(sli, n = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsli_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - assert!(0 <= N && N <= 31, "must have 0 ≤ N ≤ 31, but N = {}", N); + static_assert!(N: i32 where N >= 0 && N <= 31); transmute(vsli_n_s32_(transmute(a), transmute(b), N)) } /// Shift Left and Insert (immediate) @@ -2437,7 +2437,7 @@ pub unsafe fn vsli_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2 #[cfg_attr(test, assert_instr(sli, n = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsliq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - assert!(0 <= N && N <= 31, "must have 0 ≤ N ≤ 31, but N = {}", N); + static_assert!(N: i32 where N >= 0 && N <= 31); transmute(vsliq_n_s32_(transmute(a), transmute(b), N)) } /// Shift Left and Insert (immediate) @@ -2446,7 +2446,7 @@ pub unsafe fn vsliq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x #[cfg_attr(test, assert_instr(sli, n = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsli_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - assert!(0 <= N && N <= 63, "must have 0 ≤ N ≤ 63, but N = {}", N); + static_assert!(N: i32 where N >= 0 && N <= 63); transmute(vsli_n_s64_(transmute(a), transmute(b), N)) } /// Shift Left and Insert (immediate) @@ -2455,7 +2455,7 @@ pub unsafe fn vsli_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1 #[cfg_attr(test, assert_instr(sli, n = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsliq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - assert!(0 <= N && N <= 63, "must have 0 ≤ N ≤ 63, but N = {}", N); + static_assert!(N: i32 where N >= 0 && N <= 63); transmute(vsliq_n_s64_(transmute(a), transmute(b), N)) } /// Shift Left and Insert (immediate) @@ -2464,7 +2464,7 @@ pub unsafe fn vsliq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x #[cfg_attr(test, assert_instr(sli, n = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsli_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { - assert!(0 <= N && N <= 7, "must have 0 ≤ N ≤ 7, but N = {}", N); + static_assert_imm3!(N); transmute(vsli_n_s8_(transmute(a), transmute(b), N)) } /// Shift Left and Insert (immediate) @@ -2473,7 +2473,7 @@ pub unsafe fn vsli_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { #[cfg_attr(test, assert_instr(sli, n = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsliq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { - assert!(0 <= N && N <= 7, "must have 0 ≤ N ≤ 7, but N = {}", N); + static_assert_imm3!(N); transmute(vsliq_n_s8_(transmute(a), transmute(b), N)) } /// Shift Left and Insert (immediate) @@ -2482,7 +2482,7 @@ pub unsafe fn vsliq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16 #[cfg_attr(test, assert_instr(sli, n = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsli_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { - assert!(0 <= N && N <= 15, "must have 0 ≤ N ≤ 15, but N = {}", N); + static_assert_imm4!(N); transmute(vsli_n_s16_(transmute(a), transmute(b), N)) } /// Shift Left and Insert (immediate) @@ -2491,7 +2491,7 @@ pub unsafe fn vsli_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4 #[cfg_attr(test, assert_instr(sli, n = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsliq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { - assert!(0 <= N && N <= 15, "must have 0 ≤ N ≤ 15, but N = {}", N); + static_assert_imm4!(N); transmute(vsliq_n_s16_(transmute(a), transmute(b), N)) } @@ -2501,7 +2501,7 @@ pub unsafe fn vsliq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x #[cfg_attr(test, assert_instr(sri, n = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsri_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - assert!(1 <= N && N <= 8, "must have 1 ≤ N ≤ 8, but N = {}", N); + static_assert!(N: i32 where N >= 1 && N <= 8); vsri_n_s8_(a, b, N) } /// Shift Right and Insert (immediate) @@ -2510,7 +2510,7 @@ pub unsafe fn vsri_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[cfg_attr(test, assert_instr(sri, n = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsriq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - assert!(1 <= N && N <= 8, "must have 1 ≤ N ≤ 8, but N = {}", N); + static_assert!(N: i32 where N >= 1 && N <= 8); vsriq_n_s8_(a, b, N) } /// Shift Right and Insert (immediate) @@ -2519,7 +2519,7 @@ pub unsafe fn vsriq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t #[cfg_attr(test, assert_instr(sri, n = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsri_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - assert!(1 <= N && N <= 16, "must have 1 ≤ N ≤ 16, but N = {}", N); + static_assert!(N: i32 where N >= 1 && N <= 16); vsri_n_s16_(a, b, N) } /// Shift Right and Insert (immediate) @@ -2528,7 +2528,7 @@ pub unsafe fn vsri_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t #[cfg_attr(test, assert_instr(sri, n = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsriq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - assert!(1 <= N && N <= 16, "must have 1 ≤ N ≤ 16, but N = {}", N); + static_assert!(N: i32 where N >= 1 && N <= 16); vsriq_n_s16_(a, b, N) } /// Shift Right and Insert (immediate) @@ -2537,7 +2537,7 @@ pub unsafe fn vsriq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t #[cfg_attr(test, assert_instr(sri, n = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsri_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - assert!(1 <= N && N <= 32, "must have 1 ≤ N ≤ 32, but N = {}", N); + static_assert!(N: i32 where N >= 1 && N <= 32); vsri_n_s32_(a, b, N) } /// Shift Right and Insert (immediate) @@ -2546,7 +2546,7 @@ pub unsafe fn vsri_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t #[cfg_attr(test, assert_instr(sri, n = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsriq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - assert!(1 <= N && N <= 32, "must have 1 ≤ N ≤ 32, but N = {}", N); + static_assert!(N: i32 where N >= 1 && N <= 32); vsriq_n_s32_(a, b, N) } /// Shift Right and Insert (immediate) @@ -2555,7 +2555,7 @@ pub unsafe fn vsriq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t #[cfg_attr(test, assert_instr(sri, n = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsri_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - assert!(1 <= N && N <= 64, "must have 1 ≤ N ≤ 64, but N = {}", N); + static_assert!(N: i32 where N >= 1 && N <= 64); vsri_n_s64_(a, b, N) } /// Shift Right and Insert (immediate) @@ -2564,7 +2564,7 @@ pub unsafe fn vsri_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t #[cfg_attr(test, assert_instr(sri, n = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsriq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - assert!(1 <= N && N <= 64, "must have 1 ≤ N ≤ 64, but N = {}", N); + static_assert!(N: i32 where N >= 1 && N <= 64); vsriq_n_s64_(a, b, N) } /// Shift Right and Insert (immediate) @@ -2573,7 +2573,7 @@ pub unsafe fn vsriq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t #[cfg_attr(test, assert_instr(sri, n = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsri_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - assert!(1 <= N && N <= 8, "must have 1 ≤ N ≤ 8, but N = {}", N); + static_assert!(N: i32 where N >= 1 && N <= 8); transmute(vsri_n_s8_(transmute(a), transmute(b), N)) } /// Shift Right and Insert (immediate) @@ -2582,7 +2582,7 @@ pub unsafe fn vsri_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(test, assert_instr(sri, n = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsriq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - assert!(1 <= N && N <= 8, "must have 1 ≤ N ≤ 8, but N = {}", N); + static_assert!(N: i32 where N >= 1 && N <= 8); transmute(vsriq_n_s8_(transmute(a), transmute(b), N)) } /// Shift Right and Insert (immediate) @@ -2591,7 +2591,7 @@ pub unsafe fn vsriq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16 #[cfg_attr(test, assert_instr(sri, n = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsri_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - assert!(1 <= N && N <= 16, "must have 1 ≤ N ≤ 16, but N = {}", N); + static_assert!(N: i32 where N >= 1 && N <= 16); transmute(vsri_n_s16_(transmute(a), transmute(b), N)) } /// Shift Right and Insert (immediate) @@ -2600,7 +2600,7 @@ pub unsafe fn vsri_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4 #[cfg_attr(test, assert_instr(sri, n = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsriq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - assert!(1 <= N && N <= 16, "must have 1 ≤ N ≤ 16, but N = {}", N); + static_assert!(N: i32 where N >= 1 && N <= 16); transmute(vsriq_n_s16_(transmute(a), transmute(b), N)) } /// Shift Right and Insert (immediate) @@ -2609,7 +2609,7 @@ pub unsafe fn vsriq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x #[cfg_attr(test, assert_instr(sri, n = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsri_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - assert!(1 <= N && N <= 32, "must have 1 ≤ N ≤ 32, but N = {}", N); + static_assert!(N: i32 where N >= 1 && N <= 32); transmute(vsri_n_s32_(transmute(a), transmute(b), N)) } /// Shift Right and Insert (immediate) @@ -2618,7 +2618,7 @@ pub unsafe fn vsri_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2 #[cfg_attr(test, assert_instr(sri, n = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsriq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - assert!(1 <= N && N <= 32, "must have 1 ≤ N ≤ 32, but N = {}", N); + static_assert!(N: i32 where N >= 1 && N <= 32); transmute(vsriq_n_s32_(transmute(a), transmute(b), N)) } /// Shift Right and Insert (immediate) @@ -2627,7 +2627,7 @@ pub unsafe fn vsriq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x #[cfg_attr(test, assert_instr(sri, n = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsri_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - assert!(1 <= N && N <= 64, "must have 1 ≤ N ≤ 64, but N = {}", N); + static_assert!(N: i32 where N >= 1 && N <= 64); transmute(vsri_n_s64_(transmute(a), transmute(b), N)) } /// Shift Right and Insert (immediate) @@ -2636,7 +2636,7 @@ pub unsafe fn vsri_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1 #[cfg_attr(test, assert_instr(sri, n = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsriq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - assert!(1 <= N && N <= 64, "must have 1 ≤ N ≤ 64, but N = {}", N); + static_assert!(N: i32 where N >= 1 && N <= 64); transmute(vsriq_n_s64_(transmute(a), transmute(b), N)) } /// Shift Right and Insert (immediate) @@ -2645,7 +2645,7 @@ pub unsafe fn vsriq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x #[cfg_attr(test, assert_instr(sri, n = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsri_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { - assert!(1 <= N && N <= 8, "must have 1 ≤ N ≤ 8, but N = {}", N); + static_assert!(N: i32 where N >= 1 && N <= 8); transmute(vsri_n_s8_(transmute(a), transmute(b), N)) } /// Shift Right and Insert (immediate) @@ -2654,7 +2654,7 @@ pub unsafe fn vsri_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { #[cfg_attr(test, assert_instr(sri, n = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsriq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { - assert!(1 <= N && N <= 8, "must have 1 ≤ N ≤ 8, but N = {}", N); + static_assert!(N: i32 where N >= 1 && N <= 8); transmute(vsriq_n_s8_(transmute(a), transmute(b), N)) } /// Shift Right and Insert (immediate) @@ -2663,7 +2663,7 @@ pub unsafe fn vsriq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16 #[cfg_attr(test, assert_instr(sri, n = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsri_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { - assert!(1 <= N && N <= 16, "must have 1 ≤ N ≤ 16, but N = {}", N); + static_assert!(N: i32 where N >= 1 && N <= 16); transmute(vsri_n_s16_(transmute(a), transmute(b), N)) } /// Shift Right and Insert (immediate) @@ -2672,7 +2672,7 @@ pub unsafe fn vsri_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4 #[cfg_attr(test, assert_instr(sri, n = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsriq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { - assert!(1 <= N && N <= 16, "must have 1 ≤ N ≤ 16, but N = {}", N); + static_assert!(N: i32 where N >= 1 && N <= 16); transmute(vsriq_n_s16_(transmute(a), transmute(b), N)) } diff --git a/crates/core_arch/src/arm/neon/mod.rs b/crates/core_arch/src/arm/neon/mod.rs index b4ce393cdc..cc8f4f6203 100644 --- a/crates/core_arch/src/arm/neon/mod.rs +++ b/crates/core_arch/src/arm/neon/mod.rs @@ -589,352 +589,264 @@ pub unsafe fn vld1q_f32(ptr: *const f32) -> float32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_args_required_const(2)] +#[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", lane = 7))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, lane = 7))] -pub unsafe fn vld1_lane_s8(ptr: *const i8, src: int8x8_t, lane: i32) -> int8x8_t { - assert!( - 0 <= lane && lane <= 7, - "must have 0 ≤ lane ≤ 7, but lane = {}", - lane - ); - simd_insert(src, lane as u32, *ptr) +pub unsafe fn vld1_lane_s8(ptr: *const i8, src: int8x8_t) -> int8x8_t { + static_assert_imm3!(LANE); + simd_insert(src, LANE as u32, *ptr) } /// Load one single-element structure to one lane of one register. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_args_required_const(2)] +#[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", lane = 15))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, lane = 15))] -pub unsafe fn vld1q_lane_s8(ptr: *const i8, src: int8x16_t, lane: i32) -> int8x16_t { - assert!( - 0 <= lane && lane <= 15, - "must have 0 ≤ lane ≤ 15, but lane = {}", - lane - ); - simd_insert(src, lane as u32, *ptr) +pub unsafe fn vld1q_lane_s8(ptr: *const i8, src: int8x16_t) -> int8x16_t { + static_assert_imm4!(LANE); + simd_insert(src, LANE as u32, *ptr) } /// Load one single-element structure to one lane of one register. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_args_required_const(2)] +#[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", lane = 3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, lane = 3))] -pub unsafe fn vld1_lane_s16(ptr: *const i16, src: int16x4_t, lane: i32) -> int16x4_t { - assert!( - 0 <= lane && lane <= 3, - "must have 0 ≤ lane ≤ 3, but lane = {}", - lane - ); - simd_insert(src, lane as u32, *ptr) +pub unsafe fn vld1_lane_s16(ptr: *const i16, src: int16x4_t) -> int16x4_t { + static_assert_imm2!(LANE); + simd_insert(src, LANE as u32, *ptr) } /// Load one single-element structure to one lane of one register. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_args_required_const(2)] +#[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", lane = 7))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, lane = 7))] -pub unsafe fn vld1q_lane_s16(ptr: *const i16, src: int16x8_t, lane: i32) -> int16x8_t { - assert!( - 0 <= lane && lane <= 7, - "must have 0 ≤ lane ≤ 7, but lane = {}", - lane - ); - simd_insert(src, lane as u32, *ptr) +pub unsafe fn vld1q_lane_s16(ptr: *const i16, src: int16x8_t) -> int16x8_t { + static_assert_imm3!(LANE); + simd_insert(src, LANE as u32, *ptr) } /// Load one single-element structure to one lane of one register. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_args_required_const(2)] +#[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", lane = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, lane = 1))] -pub unsafe fn vld1_lane_s32(ptr: *const i32, src: int32x2_t, lane: i32) -> int32x2_t { - assert!( - 0 <= lane && lane <= 1, - "must have 0 ≤ lane ≤ 1, but lane = {}", - lane - ); - simd_insert(src, lane as u32, *ptr) +pub unsafe fn vld1_lane_s32(ptr: *const i32, src: int32x2_t) -> int32x2_t { + static_assert!(LANE : i32 where 0 <= LANE && LANE <= 1); + simd_insert(src, LANE as u32, *ptr) } /// Load one single-element structure to one lane of one register. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_args_required_const(2)] +#[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", lane = 3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, lane = 3))] -pub unsafe fn vld1q_lane_s32(ptr: *const i32, src: int32x4_t, lane: i32) -> int32x4_t { - assert!( - 0 <= lane && lane <= 3, - "must have 0 ≤ lane ≤ 3, but lane = {}", - lane - ); - simd_insert(src, lane as u32, *ptr) +pub unsafe fn vld1q_lane_s32(ptr: *const i32, src: int32x4_t) -> int32x4_t { + static_assert_imm2!(LANE); + simd_insert(src, LANE as u32, *ptr) } /// Load one single-element structure to one lane of one register. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_args_required_const(2)] +#[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr", lane = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ldr, lane = 0))] -pub unsafe fn vld1_lane_s64(ptr: *const i64, src: int64x1_t, lane: i32) -> int64x1_t { - assert!( - 0 <= lane && lane <= 0, - "must have 0 ≤ lane ≤ 0, but lane = {}", - lane - ); - simd_insert(src, lane as u32, *ptr) +pub unsafe fn vld1_lane_s64(ptr: *const i64, src: int64x1_t) -> int64x1_t { + static_assert!(LANE : i32 where 0 <= LANE && LANE <= 0); + simd_insert(src, LANE as u32, *ptr) } /// Load one single-element structure to one lane of one register. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_args_required_const(2)] +#[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr", lane = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, lane = 1))] -pub unsafe fn vld1q_lane_s64(ptr: *const i64, src: int64x2_t, lane: i32) -> int64x2_t { - assert!( - 0 <= lane && lane <= 1, - "must have 0 ≤ lane ≤ 1, but lane = {}", - lane - ); - simd_insert(src, lane as u32, *ptr) +pub unsafe fn vld1q_lane_s64(ptr: *const i64, src: int64x2_t) -> int64x2_t { + static_assert!(LANE : i32 where 0 <= LANE && LANE <= 1); + simd_insert(src, LANE as u32, *ptr) } /// Load one single-element structure to one lane of one register. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_args_required_const(2)] +#[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", lane = 7))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, lane = 7))] -pub unsafe fn vld1_lane_u8(ptr: *const u8, src: uint8x8_t, lane: i32) -> uint8x8_t { - assert!( - 0 <= lane && lane <= 7, - "must have 0 ≤ lane ≤ 7, but lane = {}", - lane - ); - simd_insert(src, lane as u32, *ptr) +pub unsafe fn vld1_lane_u8(ptr: *const u8, src: uint8x8_t) -> uint8x8_t { + static_assert_imm3!(LANE); + simd_insert(src, LANE as u32, *ptr) } /// Load one single-element structure to one lane of one register. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_args_required_const(2)] +#[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", lane = 15))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, lane = 15))] -pub unsafe fn vld1q_lane_u8(ptr: *const u8, src: uint8x16_t, lane: i32) -> uint8x16_t { - assert!( - 0 <= lane && lane <= 15, - "must have 0 ≤ lane ≤ 15, but lane = {}", - lane - ); - simd_insert(src, lane as u32, *ptr) +pub unsafe fn vld1q_lane_u8(ptr: *const u8, src: uint8x16_t) -> uint8x16_t { + static_assert_imm4!(LANE); + simd_insert(src, LANE as u32, *ptr) } /// Load one single-element structure to one lane of one register. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_args_required_const(2)] +#[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", lane = 3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, lane = 3))] -pub unsafe fn vld1_lane_u16(ptr: *const u16, src: uint16x4_t, lane: i32) -> uint16x4_t { - assert!( - 0 <= lane && lane <= 3, - "must have 0 ≤ lane ≤ 3, but lane = {}", - lane - ); - simd_insert(src, lane as u32, *ptr) +pub unsafe fn vld1_lane_u16(ptr: *const u16, src: uint16x4_t) -> uint16x4_t { + static_assert_imm2!(LANE); + simd_insert(src, LANE as u32, *ptr) } /// Load one single-element structure to one lane of one register. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_args_required_const(2)] +#[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", lane = 7))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, lane = 7))] -pub unsafe fn vld1q_lane_u16(ptr: *const u16, src: uint16x8_t, lane: i32) -> uint16x8_t { - assert!( - 0 <= lane && lane <= 7, - "must have 0 ≤ lane ≤ 7, but lane = {}", - lane - ); - simd_insert(src, lane as u32, *ptr) +pub unsafe fn vld1q_lane_u16(ptr: *const u16, src: uint16x8_t) -> uint16x8_t { + static_assert_imm3!(LANE); + simd_insert(src, LANE as u32, *ptr) } /// Load one single-element structure to one lane of one register. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_args_required_const(2)] +#[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", lane = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, lane = 1))] -pub unsafe fn vld1_lane_u32(ptr: *const u32, src: uint32x2_t, lane: i32) -> uint32x2_t { - assert!( - 0 <= lane && lane <= 1, - "must have 0 ≤ lane ≤ 1, but lane = {}", - lane - ); - simd_insert(src, lane as u32, *ptr) +pub unsafe fn vld1_lane_u32(ptr: *const u32, src: uint32x2_t) -> uint32x2_t { + static_assert!(LANE : i32 where 0 <= LANE && LANE <= 1); + simd_insert(src, LANE as u32, *ptr) } /// Load one single-element structure to one lane of one register. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_args_required_const(2)] +#[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", lane = 3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, lane = 3))] -pub unsafe fn vld1q_lane_u32(ptr: *const u32, src: uint32x4_t, lane: i32) -> uint32x4_t { - assert!( - 0 <= lane && lane <= 3, - "must have 0 ≤ lane ≤ 3, but lane = {}", - lane - ); - simd_insert(src, lane as u32, *ptr) +pub unsafe fn vld1q_lane_u32(ptr: *const u32, src: uint32x4_t) -> uint32x4_t { + static_assert_imm2!(LANE); + simd_insert(src, LANE as u32, *ptr) } /// Load one single-element structure to one lane of one register. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_args_required_const(2)] +#[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr", lane = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ldr, lane = 0))] -pub unsafe fn vld1_lane_u64(ptr: *const u64, src: uint64x1_t, lane: i32) -> uint64x1_t { - assert!( - 0 <= lane && lane <= 0, - "must have 0 ≤ lane ≤ 0, but lane = {}", - lane - ); - simd_insert(src, lane as u32, *ptr) +pub unsafe fn vld1_lane_u64(ptr: *const u64, src: uint64x1_t) -> uint64x1_t { + static_assert!(LANE : i32 where 0 <= LANE && LANE <= 0); + simd_insert(src, LANE as u32, *ptr) } /// Load one single-element structure to one lane of one register. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_args_required_const(2)] +#[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr", lane = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, lane = 1))] -pub unsafe fn vld1q_lane_u64(ptr: *const u64, src: uint64x2_t, lane: i32) -> uint64x2_t { - assert!( - 0 <= lane && lane <= 1, - "must have 0 ≤ lane ≤ 1, but lane = {}", - lane - ); - simd_insert(src, lane as u32, *ptr) +pub unsafe fn vld1q_lane_u64(ptr: *const u64, src: uint64x2_t) -> uint64x2_t { + static_assert!(LANE : i32 where 0 <= LANE && LANE <= 1); + simd_insert(src, LANE as u32, *ptr) } /// Load one single-element structure to one lane of one register. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_args_required_const(2)] +#[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", lane = 7))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, lane = 7))] -pub unsafe fn vld1_lane_p8(ptr: *const p8, src: poly8x8_t, lane: i32) -> poly8x8_t { - assert!( - 0 <= lane && lane <= 7, - "must have 0 ≤ lane ≤ 7, but lane = {}", - lane - ); - simd_insert(src, lane as u32, *ptr) +pub unsafe fn vld1_lane_p8(ptr: *const p8, src: poly8x8_t) -> poly8x8_t { + static_assert_imm3!(LANE); + simd_insert(src, LANE as u32, *ptr) } /// Load one single-element structure to one lane of one register. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_args_required_const(2)] +#[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", lane = 15))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, lane = 15))] -pub unsafe fn vld1q_lane_p8(ptr: *const p8, src: poly8x16_t, lane: i32) -> poly8x16_t { - assert!( - 0 <= lane && lane <= 15, - "must have 0 ≤ lane ≤ 15, but lane = {}", - lane - ); - simd_insert(src, lane as u32, *ptr) +pub unsafe fn vld1q_lane_p8(ptr: *const p8, src: poly8x16_t) -> poly8x16_t { + static_assert_imm4!(LANE); + simd_insert(src, LANE as u32, *ptr) } /// Load one single-element structure to one lane of one register. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_args_required_const(2)] +#[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", lane = 3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, lane = 3))] -pub unsafe fn vld1_lane_p16(ptr: *const p16, src: poly16x4_t, lane: i32) -> poly16x4_t { - assert!( - 0 <= lane && lane <= 3, - "must have 0 ≤ lane ≤ 3, but lane = {}", - lane - ); - simd_insert(src, lane as u32, *ptr) +pub unsafe fn vld1_lane_p16(ptr: *const p16, src: poly16x4_t) -> poly16x4_t { + static_assert_imm2!(LANE); + simd_insert(src, LANE as u32, *ptr) } /// Load one single-element structure to one lane of one register. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_args_required_const(2)] +#[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", lane = 7))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, lane = 7))] -pub unsafe fn vld1q_lane_p16(ptr: *const p16, src: poly16x8_t, lane: i32) -> poly16x8_t { - assert!( - 0 <= lane && lane <= 7, - "must have 0 ≤ lane ≤ 7, but lane = {}", - lane - ); - simd_insert(src, lane as u32, *ptr) +pub unsafe fn vld1q_lane_p16(ptr: *const p16, src: poly16x8_t) -> poly16x8_t { + static_assert_imm3!(LANE); + simd_insert(src, LANE as u32, *ptr) } /// Load one single-element structure to one lane of one register. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_args_required_const(2)] +#[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", lane = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, lane = 1))] -pub unsafe fn vld1_lane_f32(ptr: *const f32, src: float32x2_t, lane: i32) -> float32x2_t { - assert!( - 0 <= lane && lane <= 1, - "must have 0 ≤ lane ≤ 1, but lane = {}", - lane - ); - simd_insert(src, lane as u32, *ptr) +pub unsafe fn vld1_lane_f32(ptr: *const f32, src: float32x2_t) -> float32x2_t { + static_assert!(LANE : i32 where 0 <= LANE && LANE <= 1); + simd_insert(src, LANE as u32, *ptr) } /// Load one single-element structure to one lane of one register. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_args_required_const(2)] +#[rustc_legacy_const_generics(2)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", lane = 3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, lane = 3))] -pub unsafe fn vld1q_lane_f32(ptr: *const f32, src: float32x4_t, lane: i32) -> float32x4_t { - assert!( - 0 <= lane && lane <= 3, - "must have 0 ≤ lane ≤ 3, but lane = {}", - lane - ); - simd_insert(src, lane as u32, *ptr) +pub unsafe fn vld1q_lane_f32(ptr: *const f32, src: float32x4_t) -> float32x4_t { + static_assert_imm2!(LANE); + simd_insert(src, LANE as u32, *ptr) } /// Load one single-element structure and Replicate to all lanes (of one register). @@ -944,7 +856,7 @@ pub unsafe fn vld1q_lane_f32(ptr: *const f32, src: float32x4_t, lane: i32) -> fl #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] pub unsafe fn vld1_dup_s8(ptr: *const i8) -> int8x8_t { - let x = vld1_lane_s8(ptr, transmute(i8x8::splat(0)), 0); + let x = vld1_lane_s8::<0>(ptr, transmute(i8x8::splat(0))); simd_shuffle8(x, x, [0, 0, 0, 0, 0, 0, 0, 0]) } @@ -955,7 +867,7 @@ pub unsafe fn vld1_dup_s8(ptr: *const i8) -> int8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] pub unsafe fn vld1q_dup_s8(ptr: *const i8) -> int8x16_t { - let x = vld1q_lane_s8(ptr, transmute(i8x16::splat(0)), 0); + let x = vld1q_lane_s8::<0>(ptr, transmute(i8x16::splat(0))); simd_shuffle16(x, x, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) } @@ -966,7 +878,7 @@ pub unsafe fn vld1q_dup_s8(ptr: *const i8) -> int8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] pub unsafe fn vld1_dup_s16(ptr: *const i16) -> int16x4_t { - let x = vld1_lane_s16(ptr, transmute(i16x4::splat(0)), 0); + let x = vld1_lane_s16::<0>(ptr, transmute(i16x4::splat(0))); simd_shuffle4(x, x, [0, 0, 0, 0]) } @@ -977,7 +889,7 @@ pub unsafe fn vld1_dup_s16(ptr: *const i16) -> int16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] pub unsafe fn vld1q_dup_s16(ptr: *const i16) -> int16x8_t { - let x = vld1q_lane_s16(ptr, transmute(i16x8::splat(0)), 0); + let x = vld1q_lane_s16::<0>(ptr, transmute(i16x8::splat(0))); simd_shuffle8(x, x, [0, 0, 0, 0, 0, 0, 0, 0]) } @@ -988,7 +900,7 @@ pub unsafe fn vld1q_dup_s16(ptr: *const i16) -> int16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] pub unsafe fn vld1_dup_s32(ptr: *const i32) -> int32x2_t { - let x = vld1_lane_s32(ptr, transmute(i32x2::splat(0)), 0); + let x = vld1_lane_s32::<0>(ptr, transmute(i32x2::splat(0))); simd_shuffle2(x, x, [0, 0]) } @@ -999,7 +911,7 @@ pub unsafe fn vld1_dup_s32(ptr: *const i32) -> int32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] pub unsafe fn vld1q_dup_s32(ptr: *const i32) -> int32x4_t { - let x = vld1q_lane_s32(ptr, transmute(i32x4::splat(0)), 0); + let x = vld1q_lane_s32::<0>(ptr, transmute(i32x4::splat(0))); simd_shuffle4(x, x, [0, 0, 0, 0]) } @@ -1022,7 +934,7 @@ pub unsafe fn vld1_dup_s64(ptr: *const i64) -> int64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] pub unsafe fn vld1q_dup_s64(ptr: *const i64) -> int64x2_t { - let x = vld1q_lane_s64(ptr, transmute(i64x2::splat(0)), 0); + let x = vld1q_lane_s64::<0>(ptr, transmute(i64x2::splat(0))); simd_shuffle2(x, x, [0, 0]) } @@ -1033,7 +945,7 @@ pub unsafe fn vld1q_dup_s64(ptr: *const i64) -> int64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] pub unsafe fn vld1_dup_u8(ptr: *const u8) -> uint8x8_t { - let x = vld1_lane_u8(ptr, transmute(u8x8::splat(0)), 0); + let x = vld1_lane_u8::<0>(ptr, transmute(u8x8::splat(0))); simd_shuffle8(x, x, [0, 0, 0, 0, 0, 0, 0, 0]) } @@ -1044,7 +956,7 @@ pub unsafe fn vld1_dup_u8(ptr: *const u8) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] pub unsafe fn vld1q_dup_u8(ptr: *const u8) -> uint8x16_t { - let x = vld1q_lane_u8(ptr, transmute(u8x16::splat(0)), 0); + let x = vld1q_lane_u8::<0>(ptr, transmute(u8x16::splat(0))); simd_shuffle16(x, x, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) } @@ -1055,7 +967,7 @@ pub unsafe fn vld1q_dup_u8(ptr: *const u8) -> uint8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] pub unsafe fn vld1_dup_u16(ptr: *const u16) -> uint16x4_t { - let x = vld1_lane_u16(ptr, transmute(u16x4::splat(0)), 0); + let x = vld1_lane_u16::<0>(ptr, transmute(u16x4::splat(0))); simd_shuffle4(x, x, [0, 0, 0, 0]) } @@ -1066,7 +978,7 @@ pub unsafe fn vld1_dup_u16(ptr: *const u16) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] pub unsafe fn vld1q_dup_u16(ptr: *const u16) -> uint16x8_t { - let x = vld1q_lane_u16(ptr, transmute(u16x8::splat(0)), 0); + let x = vld1q_lane_u16::<0>(ptr, transmute(u16x8::splat(0))); simd_shuffle8(x, x, [0, 0, 0, 0, 0, 0, 0, 0]) } @@ -1077,7 +989,7 @@ pub unsafe fn vld1q_dup_u16(ptr: *const u16) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] pub unsafe fn vld1_dup_u32(ptr: *const u32) -> uint32x2_t { - let x = vld1_lane_u32(ptr, transmute(u32x2::splat(0)), 0); + let x = vld1_lane_u32::<0>(ptr, transmute(u32x2::splat(0))); simd_shuffle2(x, x, [0, 0]) } @@ -1088,7 +1000,7 @@ pub unsafe fn vld1_dup_u32(ptr: *const u32) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] pub unsafe fn vld1q_dup_u32(ptr: *const u32) -> uint32x4_t { - let x = vld1q_lane_u32(ptr, transmute(u32x4::splat(0)), 0); + let x = vld1q_lane_u32::<0>(ptr, transmute(u32x4::splat(0))); simd_shuffle4(x, x, [0, 0, 0, 0]) } @@ -1111,7 +1023,7 @@ pub unsafe fn vld1_dup_u64(ptr: *const u64) -> uint64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] pub unsafe fn vld1q_dup_u64(ptr: *const u64) -> uint64x2_t { - let x = vld1q_lane_u64(ptr, transmute(u64x2::splat(0)), 0); + let x = vld1q_lane_u64::<0>(ptr, transmute(u64x2::splat(0))); simd_shuffle2(x, x, [0, 0]) } @@ -1122,7 +1034,7 @@ pub unsafe fn vld1q_dup_u64(ptr: *const u64) -> uint64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] pub unsafe fn vld1_dup_p8(ptr: *const p8) -> poly8x8_t { - let x = vld1_lane_p8(ptr, transmute(u8x8::splat(0)), 0); + let x = vld1_lane_p8::<0>(ptr, transmute(u8x8::splat(0))); simd_shuffle8(x, x, [0, 0, 0, 0, 0, 0, 0, 0]) } @@ -1133,7 +1045,7 @@ pub unsafe fn vld1_dup_p8(ptr: *const p8) -> poly8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] pub unsafe fn vld1q_dup_p8(ptr: *const p8) -> poly8x16_t { - let x = vld1q_lane_p8(ptr, transmute(u8x16::splat(0)), 0); + let x = vld1q_lane_p8::<0>(ptr, transmute(u8x16::splat(0))); simd_shuffle16(x, x, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) } @@ -1144,7 +1056,7 @@ pub unsafe fn vld1q_dup_p8(ptr: *const p8) -> poly8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] pub unsafe fn vld1_dup_p16(ptr: *const p16) -> poly16x4_t { - let x = vld1_lane_p16(ptr, transmute(u16x4::splat(0)), 0); + let x = vld1_lane_p16::<0>(ptr, transmute(u16x4::splat(0))); simd_shuffle4(x, x, [0, 0, 0, 0]) } @@ -1155,7 +1067,7 @@ pub unsafe fn vld1_dup_p16(ptr: *const p16) -> poly16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] pub unsafe fn vld1q_dup_p16(ptr: *const p16) -> poly16x8_t { - let x = vld1q_lane_p16(ptr, transmute(u16x8::splat(0)), 0); + let x = vld1q_lane_p16::<0>(ptr, transmute(u16x8::splat(0))); simd_shuffle8(x, x, [0, 0, 0, 0, 0, 0, 0, 0]) } @@ -1166,7 +1078,7 @@ pub unsafe fn vld1q_dup_p16(ptr: *const p16) -> poly16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] pub unsafe fn vld1_dup_f32(ptr: *const f32) -> float32x2_t { - let x = vld1_lane_f32(ptr, transmute(f32x2::splat(0.)), 0); + let x = vld1_lane_f32::<0>(ptr, transmute(f32x2::splat(0.))); simd_shuffle2(x, x, [0, 0]) } @@ -1177,7 +1089,7 @@ pub unsafe fn vld1_dup_f32(ptr: *const f32) -> float32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] pub unsafe fn vld1q_dup_f32(ptr: *const f32) -> float32x4_t { - let x = vld1q_lane_f32(ptr, transmute(f32x4::splat(0.)), 0); + let x = vld1q_lane_f32::<0>(ptr, transmute(f32x4::splat(0.))); simd_shuffle4(x, x, [0, 0, 0, 0]) } @@ -3064,14 +2976,14 @@ pub unsafe fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_args_required_const(1)] +#[rustc_legacy_const_generics(1)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov.32", imm5 = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mov, imm5 = 1))] // Based on the discussion in https://github.com/rust-lang/stdarch/pull/792 // `mov` seems to be an acceptable intrinsic to compile to // #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(vmov, imm5 = 1))] -pub unsafe fn vgetq_lane_u64(v: uint64x2_t, imm5: i32) -> u64 { - assert!(imm5 >= 0 && imm5 <= 1); +pub unsafe fn vgetq_lane_u64(v: uint64x2_t) -> u64 { + static_assert!(imm5 : i32 where imm5 >= 0 && imm5 <= 1); simd_extract(v, imm5 as u32) } @@ -3079,13 +2991,13 @@ pub unsafe fn vgetq_lane_u64(v: uint64x2_t, imm5: i32) -> u64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_args_required_const(1)] +#[rustc_legacy_const_generics(1)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov.32", imm5 = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmov, imm5 = 0))] // FIXME: no 32bit this seems to be turned into two vmov.32 instructions // validate correctness -pub unsafe fn vget_lane_u64(v: uint64x1_t, imm5: i32) -> u64 { - assert!(imm5 == 0); +pub unsafe fn vget_lane_u64(v: uint64x1_t) -> u64 { + static_assert!(imm5 : i32 where imm5 == 0); simd_extract(v, 0) } @@ -3093,11 +3005,11 @@ pub unsafe fn vget_lane_u64(v: uint64x1_t, imm5: i32) -> u64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_args_required_const(1)] +#[rustc_legacy_const_generics(1)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov.u16", imm5 = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umov, imm5 = 2))] -pub unsafe fn vgetq_lane_u16(v: uint16x8_t, imm5: i32) -> u16 { - assert!(imm5 >= 0 && imm5 <= 7); +pub unsafe fn vgetq_lane_u16(v: uint16x8_t) -> u16 { + static_assert_imm3!(imm5); simd_extract(v, imm5 as u32) } @@ -3105,11 +3017,11 @@ pub unsafe fn vgetq_lane_u16(v: uint16x8_t, imm5: i32) -> u16 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_args_required_const(1)] +#[rustc_legacy_const_generics(1)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov.32", imm5 = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mov, imm5 = 2))] -pub unsafe fn vgetq_lane_u32(v: uint32x4_t, imm5: i32) -> u32 { - assert!(imm5 >= 0 && imm5 <= 3); +pub unsafe fn vgetq_lane_u32(v: uint32x4_t) -> u32 { + static_assert_imm2!(imm5); simd_extract(v, imm5 as u32) } @@ -3117,11 +3029,11 @@ pub unsafe fn vgetq_lane_u32(v: uint32x4_t, imm5: i32) -> u32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_args_required_const(1)] +#[rustc_legacy_const_generics(1)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov.32", imm5 = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mov, imm5 = 2))] -pub unsafe fn vgetq_lane_s32(v: int32x4_t, imm5: i32) -> i32 { - assert!(imm5 >= 0 && imm5 <= 3); +pub unsafe fn vgetq_lane_s32(v: int32x4_t) -> i32 { + static_assert_imm2!(imm5); simd_extract(v, imm5 as u32) } @@ -3129,11 +3041,11 @@ pub unsafe fn vgetq_lane_s32(v: int32x4_t, imm5: i32) -> i32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_args_required_const(1)] +#[rustc_legacy_const_generics(1)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov.u8", imm5 = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umov, imm5 = 2))] -pub unsafe fn vget_lane_u8(v: uint8x8_t, imm5: i32) -> u8 { - assert!(imm5 >= 0 && imm5 <= 7); +pub unsafe fn vget_lane_u8(v: uint8x8_t) -> u8 { + static_assert_imm3!(imm5); simd_extract(v, imm5 as u32) } @@ -3233,8 +3145,8 @@ pub unsafe fn vreinterpretq_u8_s8(a: int8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u8", imm3 = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr("ushr", imm3 = 1))] -#[rustc_args_required_const(1)] -pub unsafe fn vshrq_n_u8(a: uint8x16_t, imm3: i32) -> uint8x16_t { +#[rustc_legacy_const_generics(1)] +pub unsafe fn vshrq_n_u8(a: uint8x16_t) -> uint8x16_t { if imm3 < 0 || imm3 > 7 { unreachable_unchecked(); } else { @@ -3265,8 +3177,8 @@ pub unsafe fn vshrq_n_u8(a: uint8x16_t, imm3: i32) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshl.s8", imm3 = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, imm3 = 1))] -#[rustc_args_required_const(1)] -pub unsafe fn vshlq_n_u8(a: uint8x16_t, imm3: i32) -> uint8x16_t { +#[rustc_legacy_const_generics(1)] +pub unsafe fn vshlq_n_u8(a: uint8x16_t) -> uint8x16_t { if imm3 < 0 || imm3 > 7 { unreachable_unchecked(); } else { @@ -3297,12 +3209,12 @@ pub unsafe fn vshlq_n_u8(a: uint8x16_t, imm3: i32) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", n = 3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, n = 3))] -#[rustc_args_required_const(2)] -pub unsafe fn vextq_s8(a: int8x16_t, b: int8x16_t, n: i32) -> int8x16_t { - if n < 0 || n > 15 { +#[rustc_legacy_const_generics(2)] +pub unsafe fn vextq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + if N < 0 || N > 15 { unreachable_unchecked(); }; - match n & 0b1111 { + match N & 0b1111 { 0 => simd_shuffle16(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 1 => simd_shuffle16( a, @@ -3403,12 +3315,12 @@ pub unsafe fn vextq_s8(a: int8x16_t, b: int8x16_t, n: i32) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", n = 3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, n = 3))] -#[rustc_args_required_const(2)] -pub unsafe fn vextq_u8(a: uint8x16_t, b: uint8x16_t, n: i32) -> uint8x16_t { - if n < 0 || n > 15 { +#[rustc_legacy_const_generics(2)] +pub unsafe fn vextq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + if N < 0 || N > 15 { unreachable_unchecked(); }; - match n & 0b1111 { + match N & 0b1111 { 0 => simd_shuffle16(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 1 => simd_shuffle16( a, @@ -3586,10 +3498,10 @@ pub unsafe fn vcntq_p8(a: poly8x16_t) -> poly8x16_t { #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsli.8", n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsli_n_s8(a: int8x8_t, b: int8x8_t, n: i32) -> int8x8_t { - assert!(0 <= n && n <= 7, "must have 0 ≤ n ≤ 7, but n = {}", n); - let n = n as i8; +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + static_assert_imm3!(N); + let n = N as i8; vshiftins_v8i8(a, b, int8x8_t(n, n, n, n, n, n, n, n)) } /// Shift Left and Insert (immediate) @@ -3597,10 +3509,10 @@ pub unsafe fn vsli_n_s8(a: int8x8_t, b: int8x8_t, n: i32) -> int8x8_t { #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsli.8", n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsliq_n_s8(a: int8x16_t, b: int8x16_t, n: i32) -> int8x16_t { - assert!(0 <= n && n <= 7, "must have 0 ≤ n ≤ 7, but n = {}", n); - let n = n as i8; +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + static_assert_imm3!(N); + let n = N as i8; vshiftins_v16i8( a, b, @@ -3612,10 +3524,10 @@ pub unsafe fn vsliq_n_s8(a: int8x16_t, b: int8x16_t, n: i32) -> int8x16_t { #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsli.16", n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsli_n_s16(a: int16x4_t, b: int16x4_t, n: i32) -> int16x4_t { - assert!(0 <= n && n <= 15, "must have 0 ≤ n ≤ 15, but n = {}", n); - let n = n as i16; +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + static_assert_imm4!(N); + let n = N as i16; vshiftins_v4i16(a, b, int16x4_t(n, n, n, n)) } /// Shift Left and Insert (immediate) @@ -3623,10 +3535,10 @@ pub unsafe fn vsli_n_s16(a: int16x4_t, b: int16x4_t, n: i32) -> int16x4_t { #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsli.16", n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsliq_n_s16(a: int16x8_t, b: int16x8_t, n: i32) -> int16x8_t { - assert!(0 <= n && n <= 15, "must have 0 ≤ n ≤ 15, but n = {}", n); - let n = n as i16; +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + static_assert_imm4!(N); + let n = N as i16; vshiftins_v8i16(a, b, int16x8_t(n, n, n, n, n, n, n, n)) } /// Shift Left and Insert (immediate) @@ -3634,50 +3546,50 @@ pub unsafe fn vsliq_n_s16(a: int16x8_t, b: int16x8_t, n: i32) -> int16x8_t { #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsli.32", n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsli_n_s32(a: int32x2_t, b: int32x2_t, n: i32) -> int32x2_t { - assert!(0 <= n && n <= 31, "must have 0 ≤ n ≤ 31, but n = {}", n); - vshiftins_v2i32(a, b, int32x2_t(n, n)) +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + static_assert_imm5!(N); + vshiftins_v2i32(a, b, int32x2_t(N, N)) } /// Shift Left and Insert (immediate) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsli.32", n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsliq_n_s32(a: int32x4_t, b: int32x4_t, n: i32) -> int32x4_t { - assert!(0 <= n && n <= 31, "must have 0 ≤ n ≤ 31, but n = {}", n); - vshiftins_v4i32(a, b, int32x4_t(n, n, n, n)) +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + static_assert_imm5!(N); + vshiftins_v4i32(a, b, int32x4_t(N, N, N, N)) } /// Shift Left and Insert (immediate) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsli.64", n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsli_n_s64(a: int64x1_t, b: int64x1_t, n: i32) -> int64x1_t { - assert!(0 <= n && n <= 63, "must have 0 ≤ n ≤ 63, but n = {}", n); - vshiftins_v1i64(a, b, int64x1_t(n as i64)) +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { + static_assert!(N : i32 where 0 <= N && N <= 63); + vshiftins_v1i64(a, b, int64x1_t(N as i64)) } /// Shift Left and Insert (immediate) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsli.64", n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsliq_n_s64(a: int64x2_t, b: int64x2_t, n: i32) -> int64x2_t { - assert!(0 <= n && n <= 63, "must have 0 ≤ n ≤ 63, but n = {}", n); - vshiftins_v2i64(a, b, int64x2_t(n as i64, n as i64)) +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + static_assert!(N : i32 where 0 <= N && N <= 63); + vshiftins_v2i64(a, b, int64x2_t(N as i64, N as i64)) } /// Shift Left and Insert (immediate) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsli.8", n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsli_n_u8(a: uint8x8_t, b: uint8x8_t, n: i32) -> uint8x8_t { - assert!(0 <= n && n <= 7, "must have 0 ≤ n ≤ 7, but n = {}", n); - let n = n as i8; +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + static_assert_imm3!(N); + let n = N as i8; transmute(vshiftins_v8i8( transmute(a), transmute(b), @@ -3689,10 +3601,10 @@ pub unsafe fn vsli_n_u8(a: uint8x8_t, b: uint8x8_t, n: i32) -> uint8x8_t { #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsli.8", n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsliq_n_u8(a: uint8x16_t, b: uint8x16_t, n: i32) -> uint8x16_t { - assert!(0 <= n && n <= 7, "must have 0 ≤ n ≤ 7, but n = {}", n); - let n = n as i8; +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + static_assert_imm3!(N); + let n = N as i8; transmute(vshiftins_v16i8( transmute(a), transmute(b), @@ -3704,10 +3616,10 @@ pub unsafe fn vsliq_n_u8(a: uint8x16_t, b: uint8x16_t, n: i32) -> uint8x16_t { #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsli.16", n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsli_n_u16(a: uint16x4_t, b: uint16x4_t, n: i32) -> uint16x4_t { - assert!(0 <= n && n <= 15, "must have 0 ≤ n ≤ 15, but n = {}", n); - let n = n as i16; +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + static_assert_imm4!(N); + let n = N as i16; transmute(vshiftins_v4i16( transmute(a), transmute(b), @@ -3719,10 +3631,10 @@ pub unsafe fn vsli_n_u16(a: uint16x4_t, b: uint16x4_t, n: i32) -> uint16x4_t { #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsli.16", n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsliq_n_u16(a: uint16x8_t, b: uint16x8_t, n: i32) -> uint16x8_t { - assert!(0 <= n && n <= 15, "must have 0 ≤ n ≤ 15, but n = {}", n); - let n = n as i16; +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + static_assert_imm4!(N); + let n = N as i16; transmute(vshiftins_v8i16( transmute(a), transmute(b), @@ -3734,23 +3646,23 @@ pub unsafe fn vsliq_n_u16(a: uint16x8_t, b: uint16x8_t, n: i32) -> uint16x8_t { #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsli.32", n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsli_n_u32(a: uint32x2_t, b: uint32x2_t, n: i32) -> uint32x2_t { - assert!(0 <= n && n <= 31, "must have 0 ≤ n ≤ 31, but n = {}", n); - transmute(vshiftins_v2i32(transmute(a), transmute(b), int32x2_t(n, n))) +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + static_assert_imm5!(N); + transmute(vshiftins_v2i32(transmute(a), transmute(b), int32x2_t(N, N))) } /// Shift Left and Insert (immediate) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsli.32", n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsliq_n_u32(a: uint32x4_t, b: uint32x4_t, n: i32) -> uint32x4_t { - assert!(0 <= n && n <= 31, "must have 0 ≤ n ≤ 31, but n = {}", n); +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + static_assert_imm5!(N); transmute(vshiftins_v4i32( transmute(a), transmute(b), - int32x4_t(n, n, n, n), + int32x4_t(N, N, N, N), )) } /// Shift Left and Insert (immediate) @@ -3758,13 +3670,13 @@ pub unsafe fn vsliq_n_u32(a: uint32x4_t, b: uint32x4_t, n: i32) -> uint32x4_t { #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsli.64", n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsli_n_u64(a: uint64x1_t, b: uint64x1_t, n: i32) -> uint64x1_t { - assert!(0 <= n && n <= 63, "must have 0 ≤ n ≤ 63, but n = {}", n); +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + static_assert!(N : i32 where 0 <= N && N <= 63); transmute(vshiftins_v1i64( transmute(a), transmute(b), - int64x1_t(n as i64), + int64x1_t(N as i64), )) } /// Shift Left and Insert (immediate) @@ -3772,13 +3684,13 @@ pub unsafe fn vsli_n_u64(a: uint64x1_t, b: uint64x1_t, n: i32) -> uint64x1_t { #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsli.64", n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsliq_n_u64(a: uint64x2_t, b: uint64x2_t, n: i32) -> uint64x2_t { - assert!(0 <= n && n <= 63, "must have 0 ≤ n ≤ 63, but n = {}", n); +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + static_assert!(N : i32 where 0 <= N && N <= 63); transmute(vshiftins_v2i64( transmute(a), transmute(b), - int64x2_t(n as i64, n as i64), + int64x2_t(N as i64, N as i64), )) } /// Shift Left and Insert (immediate) @@ -3786,10 +3698,10 @@ pub unsafe fn vsliq_n_u64(a: uint64x2_t, b: uint64x2_t, n: i32) -> uint64x2_t { #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsli.8", n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsli_n_p8(a: poly8x8_t, b: poly8x8_t, n: i32) -> poly8x8_t { - assert!(0 <= n && n <= 7, "must have 0 ≤ n ≤ 7, but n = {}", n); - let n = n as i8; +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + static_assert_imm3!(N); + let n = N as i8; transmute(vshiftins_v8i8( transmute(a), transmute(b), @@ -3801,10 +3713,10 @@ pub unsafe fn vsli_n_p8(a: poly8x8_t, b: poly8x8_t, n: i32) -> poly8x8_t { #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsli.8", n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsliq_n_p8(a: poly8x16_t, b: poly8x16_t, n: i32) -> poly8x16_t { - assert!(0 <= n && n <= 7, "must have 0 ≤ n ≤ 7, but n = {}", n); - let n = n as i8; +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + static_assert_imm3!(N); + let n = N as i8; transmute(vshiftins_v16i8( transmute(a), transmute(b), @@ -3816,10 +3728,10 @@ pub unsafe fn vsliq_n_p8(a: poly8x16_t, b: poly8x16_t, n: i32) -> poly8x16_t { #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsli.16", n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsli_n_p16(a: poly16x4_t, b: poly16x4_t, n: i32) -> poly16x4_t { - assert!(0 <= n && n <= 15, "must have 0 ≤ n ≤ 15, but n = {}", n); - let n = n as i16; +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + static_assert_imm4!(N); + let n = N as i16; transmute(vshiftins_v4i16( transmute(a), transmute(b), @@ -3831,10 +3743,10 @@ pub unsafe fn vsli_n_p16(a: poly16x4_t, b: poly16x4_t, n: i32) -> poly16x4_t { #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsli.16", n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsliq_n_p16(a: poly16x8_t, b: poly16x8_t, n: i32) -> poly16x8_t { - assert!(0 <= n && n <= 15, "must have 0 ≤ n ≤ 15, but n = {}", n); - let n = n as i16; +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + static_assert_imm4!(N); + let n = N as i16; transmute(vshiftins_v8i16( transmute(a), transmute(b), @@ -3847,10 +3759,10 @@ pub unsafe fn vsliq_n_p16(a: poly16x8_t, b: poly16x8_t, n: i32) -> poly16x8_t { #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsri.8", n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsri_n_s8(a: int8x8_t, b: int8x8_t, n: i32) -> int8x8_t { - assert!(1 <= n && n <= 8, "must have 1 ≤ n ≤ 8, but n = {}", n); - let n = -n as i8; +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsri_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + static_assert!(N : i32 where 1 <= N && N <= 8); + let n = -N as i8; vshiftins_v8i8(a, b, int8x8_t(n, n, n, n, n, n, n, n)) } /// Shift Right and Insert (immediate) @@ -3858,10 +3770,10 @@ pub unsafe fn vsri_n_s8(a: int8x8_t, b: int8x8_t, n: i32) -> int8x8_t { #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsri.8", n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsriq_n_s8(a: int8x16_t, b: int8x16_t, n: i32) -> int8x16_t { - assert!(1 <= n && n <= 8, "must have 1 ≤ n ≤ 8, but n = {}", n); - let n = -n as i8; +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsriq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + static_assert!(N : i32 where 1 <= N && N <= 8); + let n = -N as i8; vshiftins_v16i8( a, b, @@ -3873,10 +3785,10 @@ pub unsafe fn vsriq_n_s8(a: int8x16_t, b: int8x16_t, n: i32) -> int8x16_t { #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsri.16", n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsri_n_s16(a: int16x4_t, b: int16x4_t, n: i32) -> int16x4_t { - assert!(1 <= n && n <= 16, "must have 1 ≤ n ≤ 16, but n = {}", n); - let n = -n as i16; +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsri_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + static_assert!(N : i32 where 1 <= N && N <= 16); + let n = -N as i16; vshiftins_v4i16(a, b, int16x4_t(n, n, n, n)) } /// Shift Right and Insert (immediate) @@ -3884,10 +3796,10 @@ pub unsafe fn vsri_n_s16(a: int16x4_t, b: int16x4_t, n: i32) -> int16x4_t { #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsri.16", n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsriq_n_s16(a: int16x8_t, b: int16x8_t, n: i32) -> int16x8_t { - assert!(1 <= n && n <= 16, "must have 1 ≤ n ≤ 16, but n = {}", n); - let n = -n as i16; +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsriq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + static_assert!(N : i32 where 1 <= N && N <= 16); + let n = -N as i16; vshiftins_v8i16(a, b, int16x8_t(n, n, n, n, n, n, n, n)) } /// Shift Right and Insert (immediate) @@ -3895,50 +3807,50 @@ pub unsafe fn vsriq_n_s16(a: int16x8_t, b: int16x8_t, n: i32) -> int16x8_t { #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsri.32", n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsri_n_s32(a: int32x2_t, b: int32x2_t, n: i32) -> int32x2_t { - assert!(1 <= n && n <= 32, "must have 1 ≤ n ≤ 32, but n = {}", n); - vshiftins_v2i32(a, b, int32x2_t(-n, -n)) +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsri_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + static_assert!(N : i32 where 1 <= N && N <= 32); + vshiftins_v2i32(a, b, int32x2_t(-N, -N)) } /// Shift Right and Insert (immediate) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsri.32", n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsriq_n_s32(a: int32x4_t, b: int32x4_t, n: i32) -> int32x4_t { - assert!(1 <= n && n <= 32, "must have 1 ≤ n ≤ 32, but n = {}", n); - vshiftins_v4i32(a, b, int32x4_t(-n, -n, -n, -n)) +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsriq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + static_assert!(N : i32 where 1 <= N && N <= 32); + vshiftins_v4i32(a, b, int32x4_t(-N, -N, -N, -N)) } /// Shift Right and Insert (immediate) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsri.64", n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsri_n_s64(a: int64x1_t, b: int64x1_t, n: i32) -> int64x1_t { - assert!(1 <= n && n <= 64, "must have 1 ≤ n ≤ 64, but n = {}", n); - vshiftins_v1i64(a, b, int64x1_t(-n as i64)) +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsri_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { + static_assert!(N : i32 where 1 <= N && N <= 64); + vshiftins_v1i64(a, b, int64x1_t(-N as i64)) } /// Shift Right and Insert (immediate) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsri.64", n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsriq_n_s64(a: int64x2_t, b: int64x2_t, n: i32) -> int64x2_t { - assert!(1 <= n && n <= 64, "must have 1 ≤ n ≤ 64, but n = {}", n); - vshiftins_v2i64(a, b, int64x2_t(-n as i64, -n as i64)) +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsriq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + static_assert!(N : i32 where 1 <= N && N <= 64); + vshiftins_v2i64(a, b, int64x2_t(-N as i64, -N as i64)) } /// Shift Right and Insert (immediate) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsri.8", n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsri_n_u8(a: uint8x8_t, b: uint8x8_t, n: i32) -> uint8x8_t { - assert!(1 <= n && n <= 8, "must have 1 ≤ n ≤ 8, but n = {}", n); - let n = -n as i8; +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsri_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + static_assert!(N : i32 where 1 <= N && N <= 8); + let n = -N as i8; transmute(vshiftins_v8i8( transmute(a), transmute(b), @@ -3950,10 +3862,10 @@ pub unsafe fn vsri_n_u8(a: uint8x8_t, b: uint8x8_t, n: i32) -> uint8x8_t { #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsri.8", n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsriq_n_u8(a: uint8x16_t, b: uint8x16_t, n: i32) -> uint8x16_t { - assert!(1 <= n && n <= 8, "must have 1 ≤ n ≤ 8, but n = {}", n); - let n = -n as i8; +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsriq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + static_assert!(N : i32 where 1 <= N && N <= 8); + let n = -N as i8; transmute(vshiftins_v16i8( transmute(a), transmute(b), @@ -3965,10 +3877,10 @@ pub unsafe fn vsriq_n_u8(a: uint8x16_t, b: uint8x16_t, n: i32) -> uint8x16_t { #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsri.16", n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsri_n_u16(a: uint16x4_t, b: uint16x4_t, n: i32) -> uint16x4_t { - assert!(1 <= n && n <= 16, "must have 1 ≤ n ≤ 16, but n = {}", n); - let n = -n as i16; +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsri_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + static_assert!(N : i32 where 1 <= N && N <= 16); + let n = -N as i16; transmute(vshiftins_v4i16( transmute(a), transmute(b), @@ -3980,10 +3892,10 @@ pub unsafe fn vsri_n_u16(a: uint16x4_t, b: uint16x4_t, n: i32) -> uint16x4_t { #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsri.16", n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsriq_n_u16(a: uint16x8_t, b: uint16x8_t, n: i32) -> uint16x8_t { - assert!(1 <= n && n <= 16, "must have 1 ≤ n ≤ 16, but n = {}", n); - let n = -n as i16; +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsriq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + static_assert!(N : i32 where 1 <= N && N <= 16); + let n = -N as i16; transmute(vshiftins_v8i16( transmute(a), transmute(b), @@ -3995,13 +3907,13 @@ pub unsafe fn vsriq_n_u16(a: uint16x8_t, b: uint16x8_t, n: i32) -> uint16x8_t { #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsri.32", n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsri_n_u32(a: uint32x2_t, b: uint32x2_t, n: i32) -> uint32x2_t { - assert!(1 <= n && n <= 32, "must have 1 ≤ n ≤ 32, but n = {}", n); +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsri_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + static_assert!(N : i32 where 1 <= N && N <= 32); transmute(vshiftins_v2i32( transmute(a), transmute(b), - int32x2_t(-n, -n), + int32x2_t(-N, -N), )) } /// Shift Right and Insert (immediate) @@ -4009,13 +3921,13 @@ pub unsafe fn vsri_n_u32(a: uint32x2_t, b: uint32x2_t, n: i32) -> uint32x2_t { #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsri.32", n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsriq_n_u32(a: uint32x4_t, b: uint32x4_t, n: i32) -> uint32x4_t { - assert!(1 <= n && n <= 32, "must have 1 ≤ n ≤ 32, but n = {}", n); +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsriq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + static_assert!(N : i32 where 1 <= N && N <= 32); transmute(vshiftins_v4i32( transmute(a), transmute(b), - int32x4_t(-n, -n, -n, -n), + int32x4_t(-N, -N, -N, -N), )) } /// Shift Right and Insert (immediate) @@ -4023,13 +3935,13 @@ pub unsafe fn vsriq_n_u32(a: uint32x4_t, b: uint32x4_t, n: i32) -> uint32x4_t { #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsri.64", n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsri_n_u64(a: uint64x1_t, b: uint64x1_t, n: i32) -> uint64x1_t { - assert!(1 <= n && n <= 64, "must have 1 ≤ n ≤ 64, but n = {}", n); +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsri_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + static_assert!(N : i32 where 1 <= N && N <= 64); transmute(vshiftins_v1i64( transmute(a), transmute(b), - int64x1_t(-n as i64), + int64x1_t(-N as i64), )) } /// Shift Right and Insert (immediate) @@ -4037,13 +3949,13 @@ pub unsafe fn vsri_n_u64(a: uint64x1_t, b: uint64x1_t, n: i32) -> uint64x1_t { #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsri.64", n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsriq_n_u64(a: uint64x2_t, b: uint64x2_t, n: i32) -> uint64x2_t { - assert!(1 <= n && n <= 64, "must have 1 ≤ n ≤ 64, but n = {}", n); +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsriq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + static_assert!(N : i32 where 1 <= N && N <= 64); transmute(vshiftins_v2i64( transmute(a), transmute(b), - int64x2_t(-n as i64, -n as i64), + int64x2_t(-N as i64, -N as i64), )) } /// Shift Right and Insert (immediate) @@ -4051,10 +3963,10 @@ pub unsafe fn vsriq_n_u64(a: uint64x2_t, b: uint64x2_t, n: i32) -> uint64x2_t { #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsri.8", n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsri_n_p8(a: poly8x8_t, b: poly8x8_t, n: i32) -> poly8x8_t { - assert!(1 <= n && n <= 8, "must have 1 ≤ n ≤ 8, but n = {}", n); - let n = -n as i8; +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsri_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + static_assert!(N : i32 where 1 <= N && N <= 8); + let n = -N as i8; transmute(vshiftins_v8i8( transmute(a), transmute(b), @@ -4066,10 +3978,10 @@ pub unsafe fn vsri_n_p8(a: poly8x8_t, b: poly8x8_t, n: i32) -> poly8x8_t { #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsri.8", n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsriq_n_p8(a: poly8x16_t, b: poly8x16_t, n: i32) -> poly8x16_t { - assert!(1 <= n && n <= 8, "must have 1 ≤ n ≤ 8, but n = {}", n); - let n = -n as i8; +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsriq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + static_assert!(N : i32 where 1 <= N && N <= 8); + let n = -N as i8; transmute(vshiftins_v16i8( transmute(a), transmute(b), @@ -4081,10 +3993,10 @@ pub unsafe fn vsriq_n_p8(a: poly8x16_t, b: poly8x16_t, n: i32) -> poly8x16_t { #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsri.16", n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsri_n_p16(a: poly16x4_t, b: poly16x4_t, n: i32) -> poly16x4_t { - assert!(1 <= n && n <= 16, "must have 1 ≤ n ≤ 16, but n = {}", n); - let n = -n as i16; +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsri_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + static_assert!(N : i32 where 1 <= N && N <= 16); + let n = -N as i16; transmute(vshiftins_v4i16( transmute(a), transmute(b), @@ -4096,10 +4008,10 @@ pub unsafe fn vsri_n_p16(a: poly16x4_t, b: poly16x4_t, n: i32) -> poly16x4_t { #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsri.16", n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsriq_n_p16(a: poly16x8_t, b: poly16x8_t, n: i32) -> poly16x8_t { - assert!(1 <= n && n <= 16, "must have 1 ≤ n ≤ 16, but n = {}", n); - let n = -n as i16; +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsriq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + static_assert!(N : i32 where 1 <= N && N <= 16); + let n = -N as i16; transmute(vshiftins_v8i16( transmute(a), transmute(b), @@ -4440,7 +4352,7 @@ mod tests { let a = i8x8::new(0, 1, 2, 3, 4, 5, 6, 7); let elem: i8 = 42; let e = i8x8::new(0, 1, 2, 3, 4, 5, 6, 42); - let r: i8x8 = transmute(vld1_lane_s8(&elem, transmute(a), 7)); + let r: i8x8 = transmute(vld1_lane_s8::<7>(&elem, transmute(a))); assert_eq!(r, e) } @@ -4449,7 +4361,7 @@ mod tests { let a = i8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let elem: i8 = 42; let e = i8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 42); - let r: i8x16 = transmute(vld1q_lane_s8(&elem, transmute(a), 15)); + let r: i8x16 = transmute(vld1q_lane_s8::<15>(&elem, transmute(a))); assert_eq!(r, e) } @@ -4458,7 +4370,7 @@ mod tests { let a = i16x4::new(0, 1, 2, 3); let elem: i16 = 42; let e = i16x4::new(0, 1, 2, 42); - let r: i16x4 = transmute(vld1_lane_s16(&elem, transmute(a), 3)); + let r: i16x4 = transmute(vld1_lane_s16::<3>(&elem, transmute(a))); assert_eq!(r, e) } @@ -4467,7 +4379,7 @@ mod tests { let a = i16x8::new(0, 1, 2, 3, 4, 5, 6, 7); let elem: i16 = 42; let e = i16x8::new(0, 1, 2, 3, 4, 5, 6, 42); - let r: i16x8 = transmute(vld1q_lane_s16(&elem, transmute(a), 7)); + let r: i16x8 = transmute(vld1q_lane_s16::<7>(&elem, transmute(a))); assert_eq!(r, e) } @@ -4476,7 +4388,7 @@ mod tests { let a = i32x2::new(0, 1); let elem: i32 = 42; let e = i32x2::new(0, 42); - let r: i32x2 = transmute(vld1_lane_s32(&elem, transmute(a), 1)); + let r: i32x2 = transmute(vld1_lane_s32::<1>(&elem, transmute(a))); assert_eq!(r, e) } @@ -4485,7 +4397,7 @@ mod tests { let a = i32x4::new(0, 1, 2, 3); let elem: i32 = 42; let e = i32x4::new(0, 1, 2, 42); - let r: i32x4 = transmute(vld1q_lane_s32(&elem, transmute(a), 3)); + let r: i32x4 = transmute(vld1q_lane_s32::<3>(&elem, transmute(a))); assert_eq!(r, e) } @@ -4494,7 +4406,7 @@ mod tests { let a = i64x1::new(0); let elem: i64 = 42; let e = i64x1::new(42); - let r: i64x1 = transmute(vld1_lane_s64(&elem, transmute(a), 0)); + let r: i64x1 = transmute(vld1_lane_s64::<0>(&elem, transmute(a))); assert_eq!(r, e) } @@ -4503,7 +4415,7 @@ mod tests { let a = i64x2::new(0, 1); let elem: i64 = 42; let e = i64x2::new(0, 42); - let r: i64x2 = transmute(vld1q_lane_s64(&elem, transmute(a), 1)); + let r: i64x2 = transmute(vld1q_lane_s64::<1>(&elem, transmute(a))); assert_eq!(r, e) } @@ -4512,7 +4424,7 @@ mod tests { let a = u8x8::new(0, 1, 2, 3, 4, 5, 6, 7); let elem: u8 = 42; let e = u8x8::new(0, 1, 2, 3, 4, 5, 6, 42); - let r: u8x8 = transmute(vld1_lane_u8(&elem, transmute(a), 7)); + let r: u8x8 = transmute(vld1_lane_u8::<7>(&elem, transmute(a))); assert_eq!(r, e) } @@ -4521,7 +4433,7 @@ mod tests { let a = u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let elem: u8 = 42; let e = u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 42); - let r: u8x16 = transmute(vld1q_lane_u8(&elem, transmute(a), 15)); + let r: u8x16 = transmute(vld1q_lane_u8::<15>(&elem, transmute(a))); assert_eq!(r, e) } @@ -4530,7 +4442,7 @@ mod tests { let a = u16x4::new(0, 1, 2, 3); let elem: u16 = 42; let e = u16x4::new(0, 1, 2, 42); - let r: u16x4 = transmute(vld1_lane_u16(&elem, transmute(a), 3)); + let r: u16x4 = transmute(vld1_lane_u16::<3>(&elem, transmute(a))); assert_eq!(r, e) } @@ -4539,7 +4451,7 @@ mod tests { let a = u16x8::new(0, 1, 2, 3, 4, 5, 6, 7); let elem: u16 = 42; let e = u16x8::new(0, 1, 2, 3, 4, 5, 6, 42); - let r: u16x8 = transmute(vld1q_lane_u16(&elem, transmute(a), 7)); + let r: u16x8 = transmute(vld1q_lane_u16::<7>(&elem, transmute(a))); assert_eq!(r, e) } @@ -4548,7 +4460,7 @@ mod tests { let a = u32x2::new(0, 1); let elem: u32 = 42; let e = u32x2::new(0, 42); - let r: u32x2 = transmute(vld1_lane_u32(&elem, transmute(a), 1)); + let r: u32x2 = transmute(vld1_lane_u32::<1>(&elem, transmute(a))); assert_eq!(r, e) } @@ -4557,7 +4469,7 @@ mod tests { let a = u32x4::new(0, 1, 2, 3); let elem: u32 = 42; let e = u32x4::new(0, 1, 2, 42); - let r: u32x4 = transmute(vld1q_lane_u32(&elem, transmute(a), 3)); + let r: u32x4 = transmute(vld1q_lane_u32::<3>(&elem, transmute(a))); assert_eq!(r, e) } @@ -4566,7 +4478,7 @@ mod tests { let a = u64x1::new(0); let elem: u64 = 42; let e = u64x1::new(42); - let r: u64x1 = transmute(vld1_lane_u64(&elem, transmute(a), 0)); + let r: u64x1 = transmute(vld1_lane_u64::<0>(&elem, transmute(a))); assert_eq!(r, e) } @@ -4575,7 +4487,7 @@ mod tests { let a = u64x2::new(0, 1); let elem: u64 = 42; let e = u64x2::new(0, 42); - let r: u64x2 = transmute(vld1q_lane_u64(&elem, transmute(a), 1)); + let r: u64x2 = transmute(vld1q_lane_u64::<1>(&elem, transmute(a))); assert_eq!(r, e) } @@ -4584,7 +4496,7 @@ mod tests { let a = u8x8::new(0, 1, 2, 3, 4, 5, 6, 7); let elem: p8 = 42; let e = u8x8::new(0, 1, 2, 3, 4, 5, 6, 42); - let r: u8x8 = transmute(vld1_lane_p8(&elem, transmute(a), 7)); + let r: u8x8 = transmute(vld1_lane_p8::<7>(&elem, transmute(a))); assert_eq!(r, e) } @@ -4593,7 +4505,7 @@ mod tests { let a = u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let elem: p8 = 42; let e = u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 42); - let r: u8x16 = transmute(vld1q_lane_p8(&elem, transmute(a), 15)); + let r: u8x16 = transmute(vld1q_lane_p8::<15>(&elem, transmute(a))); assert_eq!(r, e) } @@ -4602,7 +4514,7 @@ mod tests { let a = u16x4::new(0, 1, 2, 3); let elem: p16 = 42; let e = u16x4::new(0, 1, 2, 42); - let r: u16x4 = transmute(vld1_lane_p16(&elem, transmute(a), 3)); + let r: u16x4 = transmute(vld1_lane_p16::<3>(&elem, transmute(a))); assert_eq!(r, e) } @@ -4611,7 +4523,7 @@ mod tests { let a = u16x8::new(0, 1, 2, 3, 4, 5, 6, 7); let elem: p16 = 42; let e = u16x8::new(0, 1, 2, 3, 4, 5, 6, 42); - let r: u16x8 = transmute(vld1q_lane_p16(&elem, transmute(a), 7)); + let r: u16x8 = transmute(vld1q_lane_p16::<7>(&elem, transmute(a))); assert_eq!(r, e) } @@ -4620,7 +4532,7 @@ mod tests { let a = f32x2::new(0., 1.); let elem: f32 = 42.; let e = f32x2::new(0., 42.); - let r: f32x2 = transmute(vld1_lane_f32(&elem, transmute(a), 1)); + let r: f32x2 = transmute(vld1_lane_f32::<1>(&elem, transmute(a))); assert_eq!(r, e) } @@ -4629,7 +4541,7 @@ mod tests { let a = f32x4::new(0., 1., 2., 3.); let elem: f32 = 42.; let e = f32x4::new(0., 1., 2., 42.); - let r: f32x4 = transmute(vld1q_lane_f32(&elem, transmute(a), 3)); + let r: f32x4 = transmute(vld1q_lane_f32::<3>(&elem, transmute(a))); assert_eq!(r, e) } @@ -4836,35 +4748,35 @@ mod tests { #[simd_test(enable = "neon")] unsafe fn test_vget_lane_u8() { let v = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8); - let r = vget_lane_u8(transmute(v), 1); + let r = vget_lane_u8::<1>(transmute(v)); assert_eq!(r, 2); } #[simd_test(enable = "neon")] unsafe fn test_vgetq_lane_u32() { let v = i32x4::new(1, 2, 3, 4); - let r = vgetq_lane_u32(transmute(v), 1); + let r = vgetq_lane_u32::<1>(transmute(v)); assert_eq!(r, 2); } #[simd_test(enable = "neon")] unsafe fn test_vgetq_lane_s32() { let v = i32x4::new(1, 2, 3, 4); - let r = vgetq_lane_s32(transmute(v), 1); + let r = vgetq_lane_s32::<1>(transmute(v)); assert_eq!(r, 2); } #[simd_test(enable = "neon")] unsafe fn test_vget_lane_u64() { let v: u64 = 1; - let r = vget_lane_u64(transmute(v), 0); + let r = vget_lane_u64::<0>(transmute(v)); assert_eq!(r, 1); } #[simd_test(enable = "neon")] unsafe fn test_vgetq_lane_u16() { let v = i16x8::new(1, 2, 3, 4, 5, 6, 7, 8); - let r = vgetq_lane_u16(transmute(v), 1); + let r = vgetq_lane_u16::<1>(transmute(v)); assert_eq!(r, 2); } @@ -4875,7 +4787,7 @@ mod tests { 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 31, 31, 32, ); let e = i8x16::new(4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19); - let r: i8x16 = transmute(vextq_s8(transmute(a), transmute(b), 3)); + let r: i8x16 = transmute(vextq_s8::<3>(transmute(a), transmute(b))); assert_eq!(r, e); } @@ -4886,7 +4798,7 @@ mod tests { 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 31, 31, 32, ); let e = u8x16::new(4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19); - let r: u8x16 = transmute(vextq_u8(transmute(a), transmute(b), 3)); + let r: u8x16 = transmute(vextq_u8::<3>(transmute(a), transmute(b))); assert_eq!(r, e); } @@ -4894,7 +4806,7 @@ mod tests { unsafe fn test_vshrq_n_u8() { let a = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); let e = u8x16::new(0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4); - let r: u8x16 = transmute(vshrq_n_u8(transmute(a), 2)); + let r: u8x16 = transmute(vshrq_n_u8::<2>(transmute(a))); assert_eq!(r, e); } @@ -4902,7 +4814,7 @@ mod tests { unsafe fn test_vshlq_n_u8() { let a = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); let e = u8x16::new(4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60, 64); - let r: u8x16 = transmute(vshlq_n_u8(transmute(a), 2)); + let r: u8x16 = transmute(vshlq_n_u8::<2>(transmute(a))); assert_eq!(r, e); } @@ -4957,7 +4869,7 @@ mod tests { #[simd_test(enable = "neon")] unsafe fn test_vgetq_lane_u64() { let v = i64x2::new(1, 2); - let r = vgetq_lane_u64(transmute(v), 1); + let r = vgetq_lane_u64::<1>(transmute(v)); assert_eq!(r, 2); } diff --git a/crates/core_arch/src/arm/neon/shift_and_insert_tests.rs b/crates/core_arch/src/arm/neon/shift_and_insert_tests.rs index 07ca893c81..125659c105 100644 --- a/crates/core_arch/src/arm/neon/shift_and_insert_tests.rs +++ b/crates/core_arch/src/arm/neon/shift_and_insert_tests.rs @@ -23,7 +23,7 @@ macro_rules! test_vsli { let b = [$($b as $t),*]; let n_bit_mask: $t = (1 << $n) - 1; let e = [$(($a as $t & n_bit_mask) | ($b as $t << $n)),*]; - let r = $fn_id::<$n>(transmute(a), transmute(b), $n); + let r = $fn_id::<$n>(transmute(a), transmute(b)); let mut d = e; d = transmute(r); assert_eq!(d, e); @@ -60,7 +60,7 @@ macro_rules! test_vsri { let b = [$($b as $t),*]; let n_bit_mask = ((1 as $t << $n) - 1).rotate_right($n); let e = [$(($a as $t & n_bit_mask) | (($b as $t >> $n) & !n_bit_mask)),*]; - let r = $fn_id::<$n>(transmute(a), transmute(b), $n); + let r = $fn_id::<$n>(transmute(a), transmute(b)); let mut d = e; d = transmute(r); assert_eq!(d, e); From 6ed8d05bcc59003788c8ebb01e54ef49bda29d99 Mon Sep 17 00:00:00 2001 From: SparrowLii Date: Mon, 1 Mar 2021 13:13:59 +0800 Subject: [PATCH 3/5] replace static_assert_imm5! with static_assert! --- crates/core_arch/src/arm/neon/mod.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/crates/core_arch/src/arm/neon/mod.rs b/crates/core_arch/src/arm/neon/mod.rs index cc8f4f6203..133c431d45 100644 --- a/crates/core_arch/src/arm/neon/mod.rs +++ b/crates/core_arch/src/arm/neon/mod.rs @@ -3548,7 +3548,7 @@ pub unsafe fn vsliq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t #[cfg_attr(test, assert_instr("vsli.32", n = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsli_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - static_assert_imm5!(N); + static_assert!(N: i32 where N >= 0 && N <= 31); vshiftins_v2i32(a, b, int32x2_t(N, N)) } /// Shift Left and Insert (immediate) @@ -3558,7 +3558,7 @@ pub unsafe fn vsli_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t #[cfg_attr(test, assert_instr("vsli.32", n = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsliq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - static_assert_imm5!(N); + static_assert!(N: i32 where N >= 0 && N <= 31); vshiftins_v4i32(a, b, int32x4_t(N, N, N, N)) } /// Shift Left and Insert (immediate) @@ -3648,7 +3648,7 @@ pub unsafe fn vsliq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x #[cfg_attr(test, assert_instr("vsli.32", n = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsli_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - static_assert_imm5!(N); + static_assert!(N: i32 where N >= 0 && N <= 31); transmute(vshiftins_v2i32(transmute(a), transmute(b), int32x2_t(N, N))) } /// Shift Left and Insert (immediate) @@ -3658,7 +3658,7 @@ pub unsafe fn vsli_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2 #[cfg_attr(test, assert_instr("vsli.32", n = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsliq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - static_assert_imm5!(N); + static_assert!(N: i32 where N >= 0 && N <= 31); transmute(vshiftins_v4i32( transmute(a), transmute(b), From 6a79ab28271f45ebf04f3ef84e2c5d8c0cd668e2 Mon Sep 17 00:00:00 2001 From: SparrowLii Date: Mon, 1 Mar 2021 13:39:00 +0800 Subject: [PATCH 4/5] set const generic in assert_instr --- crates/core_arch/src/aarch64/neon/mod.rs | 80 +++++------ crates/core_arch/src/arm/neon/mod.rs | 176 +++++++++++------------ 2 files changed, 128 insertions(+), 128 deletions(-) diff --git a/crates/core_arch/src/aarch64/neon/mod.rs b/crates/core_arch/src/aarch64/neon/mod.rs index c265676542..383f8a18a6 100644 --- a/crates/core_arch/src/aarch64/neon/mod.rs +++ b/crates/core_arch/src/aarch64/neon/mod.rs @@ -2317,7 +2317,7 @@ pub unsafe fn vcvtq_u32_f32(a: float32x4_t) -> uint32x4_t { /// Shift Left and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, n = 1))] +#[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsli_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { static_assert_imm3!(N); @@ -2326,7 +2326,7 @@ pub unsafe fn vsli_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { /// Shift Left and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, n = 1))] +#[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsliq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { static_assert_imm3!(N); @@ -2335,7 +2335,7 @@ pub unsafe fn vsliq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t /// Shift Left and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, n = 1))] +#[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsli_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { static_assert_imm4!(N); @@ -2344,7 +2344,7 @@ pub unsafe fn vsli_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t /// Shift Left and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, n = 1))] +#[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsliq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { static_assert_imm4!(N); @@ -2353,7 +2353,7 @@ pub unsafe fn vsliq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t /// Shift Left and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, n = 1))] +#[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsli_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { static_assert!(N: i32 where N >= 0 && N <= 31); @@ -2362,7 +2362,7 @@ pub unsafe fn vsli_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t /// Shift Left and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, n = 1))] +#[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsliq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { static_assert!(N: i32 where N >= 0 && N <= 31); @@ -2371,7 +2371,7 @@ pub unsafe fn vsliq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t /// Shift Left and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, n = 1))] +#[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsli_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { static_assert!(N: i32 where N >= 0 && N <= 63); @@ -2380,7 +2380,7 @@ pub unsafe fn vsli_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t /// Shift Left and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, n = 1))] +#[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsliq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { static_assert!(N: i32 where N >= 0 && N <= 63); @@ -2389,7 +2389,7 @@ pub unsafe fn vsliq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t /// Shift Left and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, n = 1))] +#[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsli_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { static_assert_imm3!(N); @@ -2398,7 +2398,7 @@ pub unsafe fn vsli_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { /// Shift Left and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, n = 1))] +#[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsliq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { static_assert_imm3!(N); @@ -2407,7 +2407,7 @@ pub unsafe fn vsliq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16 /// Shift Left and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, n = 1))] +#[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsli_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { static_assert_imm4!(N); @@ -2416,7 +2416,7 @@ pub unsafe fn vsli_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4 /// Shift Left and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, n = 1))] +#[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsliq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { static_assert_imm4!(N); @@ -2425,7 +2425,7 @@ pub unsafe fn vsliq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x /// Shift Left and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, n = 1))] +#[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsli_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { static_assert!(N: i32 where N >= 0 && N <= 31); @@ -2434,7 +2434,7 @@ pub unsafe fn vsli_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2 /// Shift Left and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, n = 1))] +#[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsliq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { static_assert!(N: i32 where N >= 0 && N <= 31); @@ -2443,7 +2443,7 @@ pub unsafe fn vsliq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x /// Shift Left and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, n = 1))] +#[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsli_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { static_assert!(N: i32 where N >= 0 && N <= 63); @@ -2452,7 +2452,7 @@ pub unsafe fn vsli_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1 /// Shift Left and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, n = 1))] +#[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsliq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { static_assert!(N: i32 where N >= 0 && N <= 63); @@ -2461,7 +2461,7 @@ pub unsafe fn vsliq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x /// Shift Left and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, n = 1))] +#[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsli_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { static_assert_imm3!(N); @@ -2470,7 +2470,7 @@ pub unsafe fn vsli_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { /// Shift Left and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, n = 1))] +#[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsliq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { static_assert_imm3!(N); @@ -2479,7 +2479,7 @@ pub unsafe fn vsliq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16 /// Shift Left and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, n = 1))] +#[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsli_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { static_assert_imm4!(N); @@ -2488,7 +2488,7 @@ pub unsafe fn vsli_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4 /// Shift Left and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, n = 1))] +#[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsliq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { static_assert_imm4!(N); @@ -2498,7 +2498,7 @@ pub unsafe fn vsliq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x /// Shift Right and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, n = 1))] +#[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsri_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { static_assert!(N: i32 where N >= 1 && N <= 8); @@ -2507,7 +2507,7 @@ pub unsafe fn vsri_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { /// Shift Right and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, n = 1))] +#[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsriq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { static_assert!(N: i32 where N >= 1 && N <= 8); @@ -2516,7 +2516,7 @@ pub unsafe fn vsriq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t /// Shift Right and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, n = 1))] +#[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsri_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { static_assert!(N: i32 where N >= 1 && N <= 16); @@ -2525,7 +2525,7 @@ pub unsafe fn vsri_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t /// Shift Right and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, n = 1))] +#[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsriq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { static_assert!(N: i32 where N >= 1 && N <= 16); @@ -2534,7 +2534,7 @@ pub unsafe fn vsriq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t /// Shift Right and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, n = 1))] +#[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsri_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { static_assert!(N: i32 where N >= 1 && N <= 32); @@ -2543,7 +2543,7 @@ pub unsafe fn vsri_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t /// Shift Right and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, n = 1))] +#[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsriq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { static_assert!(N: i32 where N >= 1 && N <= 32); @@ -2552,7 +2552,7 @@ pub unsafe fn vsriq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t /// Shift Right and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, n = 1))] +#[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsri_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { static_assert!(N: i32 where N >= 1 && N <= 64); @@ -2561,7 +2561,7 @@ pub unsafe fn vsri_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t /// Shift Right and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, n = 1))] +#[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsriq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { static_assert!(N: i32 where N >= 1 && N <= 64); @@ -2570,7 +2570,7 @@ pub unsafe fn vsriq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t /// Shift Right and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, n = 1))] +#[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsri_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { static_assert!(N: i32 where N >= 1 && N <= 8); @@ -2579,7 +2579,7 @@ pub unsafe fn vsri_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { /// Shift Right and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, n = 1))] +#[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsriq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { static_assert!(N: i32 where N >= 1 && N <= 8); @@ -2588,7 +2588,7 @@ pub unsafe fn vsriq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16 /// Shift Right and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, n = 1))] +#[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsri_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { static_assert!(N: i32 where N >= 1 && N <= 16); @@ -2597,7 +2597,7 @@ pub unsafe fn vsri_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4 /// Shift Right and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, n = 1))] +#[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsriq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { static_assert!(N: i32 where N >= 1 && N <= 16); @@ -2606,7 +2606,7 @@ pub unsafe fn vsriq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x /// Shift Right and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, n = 1))] +#[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsri_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { static_assert!(N: i32 where N >= 1 && N <= 32); @@ -2615,7 +2615,7 @@ pub unsafe fn vsri_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2 /// Shift Right and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, n = 1))] +#[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsriq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { static_assert!(N: i32 where N >= 1 && N <= 32); @@ -2624,7 +2624,7 @@ pub unsafe fn vsriq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x /// Shift Right and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, n = 1))] +#[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsri_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { static_assert!(N: i32 where N >= 1 && N <= 64); @@ -2633,7 +2633,7 @@ pub unsafe fn vsri_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1 /// Shift Right and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, n = 1))] +#[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsriq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { static_assert!(N: i32 where N >= 1 && N <= 64); @@ -2642,7 +2642,7 @@ pub unsafe fn vsriq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x /// Shift Right and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, n = 1))] +#[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsri_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { static_assert!(N: i32 where N >= 1 && N <= 8); @@ -2651,7 +2651,7 @@ pub unsafe fn vsri_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { /// Shift Right and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, n = 1))] +#[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsriq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { static_assert!(N: i32 where N >= 1 && N <= 8); @@ -2660,7 +2660,7 @@ pub unsafe fn vsriq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16 /// Shift Right and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, n = 1))] +#[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsri_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { static_assert!(N: i32 where N >= 1 && N <= 16); @@ -2669,7 +2669,7 @@ pub unsafe fn vsri_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4 /// Shift Right and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, n = 1))] +#[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsriq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { static_assert!(N: i32 where N >= 1 && N <= 16); diff --git a/crates/core_arch/src/arm/neon/mod.rs b/crates/core_arch/src/arm/neon/mod.rs index 133c431d45..7080eca5ac 100644 --- a/crates/core_arch/src/arm/neon/mod.rs +++ b/crates/core_arch/src/arm/neon/mod.rs @@ -590,8 +590,8 @@ pub unsafe fn vld1q_f32(ptr: *const f32) -> float32x4_t { #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", lane = 7))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, lane = 7))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", LANE = 7))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 7))] pub unsafe fn vld1_lane_s8(ptr: *const i8, src: int8x8_t) -> int8x8_t { static_assert_imm3!(LANE); simd_insert(src, LANE as u32, *ptr) @@ -602,8 +602,8 @@ pub unsafe fn vld1_lane_s8(ptr: *const i8, src: int8x8_t) -> in #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", lane = 15))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, lane = 15))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", LANE = 15))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 15))] pub unsafe fn vld1q_lane_s8(ptr: *const i8, src: int8x16_t) -> int8x16_t { static_assert_imm4!(LANE); simd_insert(src, LANE as u32, *ptr) @@ -614,8 +614,8 @@ pub unsafe fn vld1q_lane_s8(ptr: *const i8, src: int8x16_t) -> #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", lane = 3))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, lane = 3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", LANE = 3))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 3))] pub unsafe fn vld1_lane_s16(ptr: *const i16, src: int16x4_t) -> int16x4_t { static_assert_imm2!(LANE); simd_insert(src, LANE as u32, *ptr) @@ -626,8 +626,8 @@ pub unsafe fn vld1_lane_s16(ptr: *const i16, src: int16x4_t) -> #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", lane = 7))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, lane = 7))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", LANE = 7))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 7))] pub unsafe fn vld1q_lane_s16(ptr: *const i16, src: int16x8_t) -> int16x8_t { static_assert_imm3!(LANE); simd_insert(src, LANE as u32, *ptr) @@ -638,8 +638,8 @@ pub unsafe fn vld1q_lane_s16(ptr: *const i16, src: int16x8_t) - #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", lane = 1))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, lane = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 1))] pub unsafe fn vld1_lane_s32(ptr: *const i32, src: int32x2_t) -> int32x2_t { static_assert!(LANE : i32 where 0 <= LANE && LANE <= 1); simd_insert(src, LANE as u32, *ptr) @@ -650,8 +650,8 @@ pub unsafe fn vld1_lane_s32(ptr: *const i32, src: int32x2_t) -> #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", lane = 3))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, lane = 3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", LANE = 3))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 3))] pub unsafe fn vld1q_lane_s32(ptr: *const i32, src: int32x4_t) -> int32x4_t { static_assert_imm2!(LANE); simd_insert(src, LANE as u32, *ptr) @@ -662,8 +662,8 @@ pub unsafe fn vld1q_lane_s32(ptr: *const i32, src: int32x4_t) - #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr", lane = 0))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ldr, lane = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr", LANE = 0))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ldr, LANE = 0))] pub unsafe fn vld1_lane_s64(ptr: *const i64, src: int64x1_t) -> int64x1_t { static_assert!(LANE : i32 where 0 <= LANE && LANE <= 0); simd_insert(src, LANE as u32, *ptr) @@ -674,8 +674,8 @@ pub unsafe fn vld1_lane_s64(ptr: *const i64, src: int64x1_t) -> #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr", lane = 1))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, lane = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr", LANE = 1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 1))] pub unsafe fn vld1q_lane_s64(ptr: *const i64, src: int64x2_t) -> int64x2_t { static_assert!(LANE : i32 where 0 <= LANE && LANE <= 1); simd_insert(src, LANE as u32, *ptr) @@ -686,8 +686,8 @@ pub unsafe fn vld1q_lane_s64(ptr: *const i64, src: int64x2_t) - #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", lane = 7))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, lane = 7))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", LANE = 7))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 7))] pub unsafe fn vld1_lane_u8(ptr: *const u8, src: uint8x8_t) -> uint8x8_t { static_assert_imm3!(LANE); simd_insert(src, LANE as u32, *ptr) @@ -698,8 +698,8 @@ pub unsafe fn vld1_lane_u8(ptr: *const u8, src: uint8x8_t) -> u #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", lane = 15))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, lane = 15))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", LANE = 15))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 15))] pub unsafe fn vld1q_lane_u8(ptr: *const u8, src: uint8x16_t) -> uint8x16_t { static_assert_imm4!(LANE); simd_insert(src, LANE as u32, *ptr) @@ -710,8 +710,8 @@ pub unsafe fn vld1q_lane_u8(ptr: *const u8, src: uint8x16_t) -> #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", lane = 3))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, lane = 3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", LANE = 3))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 3))] pub unsafe fn vld1_lane_u16(ptr: *const u16, src: uint16x4_t) -> uint16x4_t { static_assert_imm2!(LANE); simd_insert(src, LANE as u32, *ptr) @@ -722,8 +722,8 @@ pub unsafe fn vld1_lane_u16(ptr: *const u16, src: uint16x4_t) - #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", lane = 7))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, lane = 7))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", LANE = 7))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 7))] pub unsafe fn vld1q_lane_u16(ptr: *const u16, src: uint16x8_t) -> uint16x8_t { static_assert_imm3!(LANE); simd_insert(src, LANE as u32, *ptr) @@ -734,8 +734,8 @@ pub unsafe fn vld1q_lane_u16(ptr: *const u16, src: uint16x8_t) #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", lane = 1))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, lane = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 1))] pub unsafe fn vld1_lane_u32(ptr: *const u32, src: uint32x2_t) -> uint32x2_t { static_assert!(LANE : i32 where 0 <= LANE && LANE <= 1); simd_insert(src, LANE as u32, *ptr) @@ -746,8 +746,8 @@ pub unsafe fn vld1_lane_u32(ptr: *const u32, src: uint32x2_t) - #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", lane = 3))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, lane = 3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", LANE = 3))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 3))] pub unsafe fn vld1q_lane_u32(ptr: *const u32, src: uint32x4_t) -> uint32x4_t { static_assert_imm2!(LANE); simd_insert(src, LANE as u32, *ptr) @@ -758,8 +758,8 @@ pub unsafe fn vld1q_lane_u32(ptr: *const u32, src: uint32x4_t) #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr", lane = 0))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ldr, lane = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr", LANE = 0))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ldr, LANE = 0))] pub unsafe fn vld1_lane_u64(ptr: *const u64, src: uint64x1_t) -> uint64x1_t { static_assert!(LANE : i32 where 0 <= LANE && LANE <= 0); simd_insert(src, LANE as u32, *ptr) @@ -770,8 +770,8 @@ pub unsafe fn vld1_lane_u64(ptr: *const u64, src: uint64x1_t) - #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr", lane = 1))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, lane = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr", LANE = 1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 1))] pub unsafe fn vld1q_lane_u64(ptr: *const u64, src: uint64x2_t) -> uint64x2_t { static_assert!(LANE : i32 where 0 <= LANE && LANE <= 1); simd_insert(src, LANE as u32, *ptr) @@ -782,8 +782,8 @@ pub unsafe fn vld1q_lane_u64(ptr: *const u64, src: uint64x2_t) #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", lane = 7))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, lane = 7))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", LANE = 7))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 7))] pub unsafe fn vld1_lane_p8(ptr: *const p8, src: poly8x8_t) -> poly8x8_t { static_assert_imm3!(LANE); simd_insert(src, LANE as u32, *ptr) @@ -794,8 +794,8 @@ pub unsafe fn vld1_lane_p8(ptr: *const p8, src: poly8x8_t) -> p #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", lane = 15))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, lane = 15))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", LANE = 15))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 15))] pub unsafe fn vld1q_lane_p8(ptr: *const p8, src: poly8x16_t) -> poly8x16_t { static_assert_imm4!(LANE); simd_insert(src, LANE as u32, *ptr) @@ -806,8 +806,8 @@ pub unsafe fn vld1q_lane_p8(ptr: *const p8, src: poly8x16_t) -> #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", lane = 3))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, lane = 3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", LANE = 3))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 3))] pub unsafe fn vld1_lane_p16(ptr: *const p16, src: poly16x4_t) -> poly16x4_t { static_assert_imm2!(LANE); simd_insert(src, LANE as u32, *ptr) @@ -818,8 +818,8 @@ pub unsafe fn vld1_lane_p16(ptr: *const p16, src: poly16x4_t) - #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", lane = 7))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, lane = 7))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", LANE = 7))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 7))] pub unsafe fn vld1q_lane_p16(ptr: *const p16, src: poly16x8_t) -> poly16x8_t { static_assert_imm3!(LANE); simd_insert(src, LANE as u32, *ptr) @@ -830,8 +830,8 @@ pub unsafe fn vld1q_lane_p16(ptr: *const p16, src: poly16x8_t) #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", lane = 1))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, lane = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 1))] pub unsafe fn vld1_lane_f32(ptr: *const f32, src: float32x2_t) -> float32x2_t { static_assert!(LANE : i32 where 0 <= LANE && LANE <= 1); simd_insert(src, LANE as u32, *ptr) @@ -842,8 +842,8 @@ pub unsafe fn vld1_lane_f32(ptr: *const f32, src: float32x2_t) #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", lane = 3))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, lane = 3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", LANE = 3))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 3))] pub unsafe fn vld1q_lane_f32(ptr: *const f32, src: float32x4_t) -> float32x4_t { static_assert_imm2!(LANE); simd_insert(src, LANE as u32, *ptr) @@ -3207,8 +3207,8 @@ pub unsafe fn vshlq_n_u8(a: uint8x16_t) -> uint8x16_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", n = 3))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, n = 3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 3))] #[rustc_legacy_const_generics(2)] pub unsafe fn vextq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { if N < 0 || N > 15 { @@ -3313,8 +3313,8 @@ pub unsafe fn vextq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", n = 3))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, n = 3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 3))] #[rustc_legacy_const_generics(2)] pub unsafe fn vextq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { if N < 0 || N > 15 { @@ -3497,7 +3497,7 @@ pub unsafe fn vcntq_p8(a: poly8x16_t) -> poly8x16_t { #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsli.8", n = 1))] +#[cfg_attr(test, assert_instr("vsli.8", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsli_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { static_assert_imm3!(N); @@ -3508,7 +3508,7 @@ pub unsafe fn vsli_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsli.8", n = 1))] +#[cfg_attr(test, assert_instr("vsli.8", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsliq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { static_assert_imm3!(N); @@ -3523,7 +3523,7 @@ pub unsafe fn vsliq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsli.16", n = 1))] +#[cfg_attr(test, assert_instr("vsli.16", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsli_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { static_assert_imm4!(N); @@ -3534,7 +3534,7 @@ pub unsafe fn vsli_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsli.16", n = 1))] +#[cfg_attr(test, assert_instr("vsli.16", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsliq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { static_assert_imm4!(N); @@ -3545,7 +3545,7 @@ pub unsafe fn vsliq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsli.32", n = 1))] +#[cfg_attr(test, assert_instr("vsli.32", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsli_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { static_assert!(N: i32 where N >= 0 && N <= 31); @@ -3555,7 +3555,7 @@ pub unsafe fn vsli_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsli.32", n = 1))] +#[cfg_attr(test, assert_instr("vsli.32", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsliq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { static_assert!(N: i32 where N >= 0 && N <= 31); @@ -3565,7 +3565,7 @@ pub unsafe fn vsliq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsli.64", n = 1))] +#[cfg_attr(test, assert_instr("vsli.64", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsli_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { static_assert!(N : i32 where 0 <= N && N <= 63); @@ -3575,7 +3575,7 @@ pub unsafe fn vsli_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsli.64", n = 1))] +#[cfg_attr(test, assert_instr("vsli.64", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsliq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { static_assert!(N : i32 where 0 <= N && N <= 63); @@ -3585,7 +3585,7 @@ pub unsafe fn vsliq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsli.8", n = 1))] +#[cfg_attr(test, assert_instr("vsli.8", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsli_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { static_assert_imm3!(N); @@ -3600,7 +3600,7 @@ pub unsafe fn vsli_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsli.8", n = 1))] +#[cfg_attr(test, assert_instr("vsli.8", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsliq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { static_assert_imm3!(N); @@ -3615,7 +3615,7 @@ pub unsafe fn vsliq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16 #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsli.16", n = 1))] +#[cfg_attr(test, assert_instr("vsli.16", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsli_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { static_assert_imm4!(N); @@ -3630,7 +3630,7 @@ pub unsafe fn vsli_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4 #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsli.16", n = 1))] +#[cfg_attr(test, assert_instr("vsli.16", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsliq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { static_assert_imm4!(N); @@ -3645,7 +3645,7 @@ pub unsafe fn vsliq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsli.32", n = 1))] +#[cfg_attr(test, assert_instr("vsli.32", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsli_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { static_assert!(N: i32 where N >= 0 && N <= 31); @@ -3655,7 +3655,7 @@ pub unsafe fn vsli_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2 #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsli.32", n = 1))] +#[cfg_attr(test, assert_instr("vsli.32", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsliq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { static_assert!(N: i32 where N >= 0 && N <= 31); @@ -3669,7 +3669,7 @@ pub unsafe fn vsliq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsli.64", n = 1))] +#[cfg_attr(test, assert_instr("vsli.64", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsli_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { static_assert!(N : i32 where 0 <= N && N <= 63); @@ -3683,7 +3683,7 @@ pub unsafe fn vsli_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1 #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsli.64", n = 1))] +#[cfg_attr(test, assert_instr("vsli.64", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsliq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { static_assert!(N : i32 where 0 <= N && N <= 63); @@ -3697,7 +3697,7 @@ pub unsafe fn vsliq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsli.8", n = 1))] +#[cfg_attr(test, assert_instr("vsli.8", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsli_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { static_assert_imm3!(N); @@ -3712,7 +3712,7 @@ pub unsafe fn vsli_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsli.8", n = 1))] +#[cfg_attr(test, assert_instr("vsli.8", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsliq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { static_assert_imm3!(N); @@ -3727,7 +3727,7 @@ pub unsafe fn vsliq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16 #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsli.16", n = 1))] +#[cfg_attr(test, assert_instr("vsli.16", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsli_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { static_assert_imm4!(N); @@ -3742,7 +3742,7 @@ pub unsafe fn vsli_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4 #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsli.16", n = 1))] +#[cfg_attr(test, assert_instr("vsli.16", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsliq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { static_assert_imm4!(N); @@ -3758,7 +3758,7 @@ pub unsafe fn vsliq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsri.8", n = 1))] +#[cfg_attr(test, assert_instr("vsri.8", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsri_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { static_assert!(N : i32 where 1 <= N && N <= 8); @@ -3769,7 +3769,7 @@ pub unsafe fn vsri_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsri.8", n = 1))] +#[cfg_attr(test, assert_instr("vsri.8", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsriq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { static_assert!(N : i32 where 1 <= N && N <= 8); @@ -3784,7 +3784,7 @@ pub unsafe fn vsriq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsri.16", n = 1))] +#[cfg_attr(test, assert_instr("vsri.16", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsri_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { static_assert!(N : i32 where 1 <= N && N <= 16); @@ -3795,7 +3795,7 @@ pub unsafe fn vsri_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsri.16", n = 1))] +#[cfg_attr(test, assert_instr("vsri.16", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsriq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { static_assert!(N : i32 where 1 <= N && N <= 16); @@ -3806,7 +3806,7 @@ pub unsafe fn vsriq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsri.32", n = 1))] +#[cfg_attr(test, assert_instr("vsri.32", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsri_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { static_assert!(N : i32 where 1 <= N && N <= 32); @@ -3816,7 +3816,7 @@ pub unsafe fn vsri_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsri.32", n = 1))] +#[cfg_attr(test, assert_instr("vsri.32", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsriq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { static_assert!(N : i32 where 1 <= N && N <= 32); @@ -3826,7 +3826,7 @@ pub unsafe fn vsriq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsri.64", n = 1))] +#[cfg_attr(test, assert_instr("vsri.64", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsri_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { static_assert!(N : i32 where 1 <= N && N <= 64); @@ -3836,7 +3836,7 @@ pub unsafe fn vsri_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsri.64", n = 1))] +#[cfg_attr(test, assert_instr("vsri.64", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsriq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { static_assert!(N : i32 where 1 <= N && N <= 64); @@ -3846,7 +3846,7 @@ pub unsafe fn vsriq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsri.8", n = 1))] +#[cfg_attr(test, assert_instr("vsri.8", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsri_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { static_assert!(N : i32 where 1 <= N && N <= 8); @@ -3861,7 +3861,7 @@ pub unsafe fn vsri_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsri.8", n = 1))] +#[cfg_attr(test, assert_instr("vsri.8", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsriq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { static_assert!(N : i32 where 1 <= N && N <= 8); @@ -3876,7 +3876,7 @@ pub unsafe fn vsriq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16 #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsri.16", n = 1))] +#[cfg_attr(test, assert_instr("vsri.16", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsri_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { static_assert!(N : i32 where 1 <= N && N <= 16); @@ -3891,7 +3891,7 @@ pub unsafe fn vsri_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4 #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsri.16", n = 1))] +#[cfg_attr(test, assert_instr("vsri.16", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsriq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { static_assert!(N : i32 where 1 <= N && N <= 16); @@ -3906,7 +3906,7 @@ pub unsafe fn vsriq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsri.32", n = 1))] +#[cfg_attr(test, assert_instr("vsri.32", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsri_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { static_assert!(N : i32 where 1 <= N && N <= 32); @@ -3920,7 +3920,7 @@ pub unsafe fn vsri_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2 #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsri.32", n = 1))] +#[cfg_attr(test, assert_instr("vsri.32", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsriq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { static_assert!(N : i32 where 1 <= N && N <= 32); @@ -3934,7 +3934,7 @@ pub unsafe fn vsriq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsri.64", n = 1))] +#[cfg_attr(test, assert_instr("vsri.64", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsri_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { static_assert!(N : i32 where 1 <= N && N <= 64); @@ -3948,7 +3948,7 @@ pub unsafe fn vsri_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1 #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsri.64", n = 1))] +#[cfg_attr(test, assert_instr("vsri.64", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsriq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { static_assert!(N : i32 where 1 <= N && N <= 64); @@ -3962,7 +3962,7 @@ pub unsafe fn vsriq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsri.8", n = 1))] +#[cfg_attr(test, assert_instr("vsri.8", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsri_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { static_assert!(N : i32 where 1 <= N && N <= 8); @@ -3977,7 +3977,7 @@ pub unsafe fn vsri_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsri.8", n = 1))] +#[cfg_attr(test, assert_instr("vsri.8", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsriq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { static_assert!(N : i32 where 1 <= N && N <= 8); @@ -3992,7 +3992,7 @@ pub unsafe fn vsriq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16 #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsri.16", n = 1))] +#[cfg_attr(test, assert_instr("vsri.16", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsri_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { static_assert!(N : i32 where 1 <= N && N <= 16); @@ -4007,7 +4007,7 @@ pub unsafe fn vsri_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4 #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsri.16", n = 1))] +#[cfg_attr(test, assert_instr("vsri.16", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsriq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { static_assert!(N : i32 where 1 <= N && N <= 16); From c4ef544d6cfe4e4bf1650c2265fc349e601b246e Mon Sep 17 00:00:00 2001 From: SparrowLii Date: Mon, 1 Mar 2021 17:09:02 +0800 Subject: [PATCH 5/5] Convert vsli_n_* methods to const generics --- crates/core_arch/src/aarch64/neon/mod.rs | 200 ++--- crates/core_arch/src/arm/neon/mod.rs | 724 ++++++++++-------- .../src/arm/neon/shift_and_insert_tests.rs | 2 +- 3 files changed, 507 insertions(+), 419 deletions(-) diff --git a/crates/core_arch/src/aarch64/neon/mod.rs b/crates/core_arch/src/aarch64/neon/mod.rs index 383f8a18a6..bba024635a 100644 --- a/crates/core_arch/src/aarch64/neon/mod.rs +++ b/crates/core_arch/src/aarch64/neon/mod.rs @@ -2498,182 +2498,182 @@ pub unsafe fn vsliq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x /// Shift Right and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsri_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - static_assert!(N: i32 where N >= 1 && N <= 8); - vsri_n_s8_(a, b, N) +#[cfg_attr(test, assert_instr(sri, n = 1))] +#[rustc_args_required_const(2)] +pub unsafe fn vsri_n_s8(a: int8x8_t, b: int8x8_t, n: i32) -> int8x8_t { + assert!(1 <= n && n <= 8, "must have 1 ≤ n ≤ 8, but n = {}", n); + vsri_n_s8_(a, b, n) } /// Shift Right and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsriq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - static_assert!(N: i32 where N >= 1 && N <= 8); - vsriq_n_s8_(a, b, N) +#[cfg_attr(test, assert_instr(sri, n = 1))] +#[rustc_args_required_const(2)] +pub unsafe fn vsriq_n_s8(a: int8x16_t, b: int8x16_t, n: i32) -> int8x16_t { + assert!(1 <= n && n <= 8, "must have 1 ≤ n ≤ 8, but n = {}", n); + vsriq_n_s8_(a, b, n) } /// Shift Right and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsri_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - static_assert!(N: i32 where N >= 1 && N <= 16); - vsri_n_s16_(a, b, N) +#[cfg_attr(test, assert_instr(sri, n = 1))] +#[rustc_args_required_const(2)] +pub unsafe fn vsri_n_s16(a: int16x4_t, b: int16x4_t, n: i32) -> int16x4_t { + assert!(1 <= n && n <= 16, "must have 1 ≤ n ≤ 16, but n = {}", n); + vsri_n_s16_(a, b, n) } /// Shift Right and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsriq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - static_assert!(N: i32 where N >= 1 && N <= 16); - vsriq_n_s16_(a, b, N) +#[cfg_attr(test, assert_instr(sri, n = 1))] +#[rustc_args_required_const(2)] +pub unsafe fn vsriq_n_s16(a: int16x8_t, b: int16x8_t, n: i32) -> int16x8_t { + assert!(1 <= n && n <= 16, "must have 1 ≤ n ≤ 16, but n = {}", n); + vsriq_n_s16_(a, b, n) } /// Shift Right and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsri_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - static_assert!(N: i32 where N >= 1 && N <= 32); - vsri_n_s32_(a, b, N) +#[cfg_attr(test, assert_instr(sri, n = 1))] +#[rustc_args_required_const(2)] +pub unsafe fn vsri_n_s32(a: int32x2_t, b: int32x2_t, n: i32) -> int32x2_t { + assert!(1 <= n && n <= 32, "must have 1 ≤ n ≤ 32, but n = {}", n); + vsri_n_s32_(a, b, n) } /// Shift Right and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsriq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - static_assert!(N: i32 where N >= 1 && N <= 32); - vsriq_n_s32_(a, b, N) +#[cfg_attr(test, assert_instr(sri, n = 1))] +#[rustc_args_required_const(2)] +pub unsafe fn vsriq_n_s32(a: int32x4_t, b: int32x4_t, n: i32) -> int32x4_t { + assert!(1 <= n && n <= 32, "must have 1 ≤ n ≤ 32, but n = {}", n); + vsriq_n_s32_(a, b, n) } /// Shift Right and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsri_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - static_assert!(N: i32 where N >= 1 && N <= 64); - vsri_n_s64_(a, b, N) +#[cfg_attr(test, assert_instr(sri, n = 1))] +#[rustc_args_required_const(2)] +pub unsafe fn vsri_n_s64(a: int64x1_t, b: int64x1_t, n: i32) -> int64x1_t { + assert!(1 <= n && n <= 64, "must have 1 ≤ n ≤ 64, but n = {}", n); + vsri_n_s64_(a, b, n) } /// Shift Right and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsriq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - static_assert!(N: i32 where N >= 1 && N <= 64); - vsriq_n_s64_(a, b, N) +#[cfg_attr(test, assert_instr(sri, n = 1))] +#[rustc_args_required_const(2)] +pub unsafe fn vsriq_n_s64(a: int64x2_t, b: int64x2_t, n: i32) -> int64x2_t { + assert!(1 <= n && n <= 64, "must have 1 ≤ n ≤ 64, but n = {}", n); + vsriq_n_s64_(a, b, n) } /// Shift Right and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsri_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - static_assert!(N: i32 where N >= 1 && N <= 8); - transmute(vsri_n_s8_(transmute(a), transmute(b), N)) +#[cfg_attr(test, assert_instr(sri, n = 1))] +#[rustc_args_required_const(2)] +pub unsafe fn vsri_n_u8(a: uint8x8_t, b: uint8x8_t, n: i32) -> uint8x8_t { + assert!(1 <= n && n <= 8, "must have 1 ≤ n ≤ 8, but n = {}", n); + transmute(vsri_n_s8_(transmute(a), transmute(b), n)) } /// Shift Right and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsriq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - static_assert!(N: i32 where N >= 1 && N <= 8); - transmute(vsriq_n_s8_(transmute(a), transmute(b), N)) +#[cfg_attr(test, assert_instr(sri, n = 1))] +#[rustc_args_required_const(2)] +pub unsafe fn vsriq_n_u8(a: uint8x16_t, b: uint8x16_t, n: i32) -> uint8x16_t { + assert!(1 <= n && n <= 8, "must have 1 ≤ n ≤ 8, but n = {}", n); + transmute(vsriq_n_s8_(transmute(a), transmute(b), n)) } /// Shift Right and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsri_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - static_assert!(N: i32 where N >= 1 && N <= 16); - transmute(vsri_n_s16_(transmute(a), transmute(b), N)) +#[cfg_attr(test, assert_instr(sri, n = 1))] +#[rustc_args_required_const(2)] +pub unsafe fn vsri_n_u16(a: uint16x4_t, b: uint16x4_t, n: i32) -> uint16x4_t { + assert!(1 <= n && n <= 16, "must have 1 ≤ n ≤ 16, but n = {}", n); + transmute(vsri_n_s16_(transmute(a), transmute(b), n)) } /// Shift Right and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsriq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - static_assert!(N: i32 where N >= 1 && N <= 16); - transmute(vsriq_n_s16_(transmute(a), transmute(b), N)) +#[cfg_attr(test, assert_instr(sri, n = 1))] +#[rustc_args_required_const(2)] +pub unsafe fn vsriq_n_u16(a: uint16x8_t, b: uint16x8_t, n: i32) -> uint16x8_t { + assert!(1 <= n && n <= 16, "must have 1 ≤ n ≤ 16, but n = {}", n); + transmute(vsriq_n_s16_(transmute(a), transmute(b), n)) } /// Shift Right and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsri_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - static_assert!(N: i32 where N >= 1 && N <= 32); - transmute(vsri_n_s32_(transmute(a), transmute(b), N)) +#[cfg_attr(test, assert_instr(sri, n = 1))] +#[rustc_args_required_const(2)] +pub unsafe fn vsri_n_u32(a: uint32x2_t, b: uint32x2_t, n: i32) -> uint32x2_t { + assert!(1 <= n && n <= 32, "must have 1 ≤ n ≤ 32, but n = {}", n); + transmute(vsri_n_s32_(transmute(a), transmute(b), n)) } /// Shift Right and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsriq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - static_assert!(N: i32 where N >= 1 && N <= 32); - transmute(vsriq_n_s32_(transmute(a), transmute(b), N)) +#[cfg_attr(test, assert_instr(sri, n = 1))] +#[rustc_args_required_const(2)] +pub unsafe fn vsriq_n_u32(a: uint32x4_t, b: uint32x4_t, n: i32) -> uint32x4_t { + assert!(1 <= n && n <= 32, "must have 1 ≤ n ≤ 32, but n = {}", n); + transmute(vsriq_n_s32_(transmute(a), transmute(b), n)) } /// Shift Right and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsri_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - static_assert!(N: i32 where N >= 1 && N <= 64); - transmute(vsri_n_s64_(transmute(a), transmute(b), N)) +#[cfg_attr(test, assert_instr(sri, n = 1))] +#[rustc_args_required_const(2)] +pub unsafe fn vsri_n_u64(a: uint64x1_t, b: uint64x1_t, n: i32) -> uint64x1_t { + assert!(1 <= n && n <= 64, "must have 1 ≤ n ≤ 64, but n = {}", n); + transmute(vsri_n_s64_(transmute(a), transmute(b), n)) } /// Shift Right and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsriq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - static_assert!(N: i32 where N >= 1 && N <= 64); - transmute(vsriq_n_s64_(transmute(a), transmute(b), N)) +#[cfg_attr(test, assert_instr(sri, n = 1))] +#[rustc_args_required_const(2)] +pub unsafe fn vsriq_n_u64(a: uint64x2_t, b: uint64x2_t, n: i32) -> uint64x2_t { + assert!(1 <= n && n <= 64, "must have 1 ≤ n ≤ 64, but n = {}", n); + transmute(vsriq_n_s64_(transmute(a), transmute(b), n)) } /// Shift Right and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsri_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { - static_assert!(N: i32 where N >= 1 && N <= 8); - transmute(vsri_n_s8_(transmute(a), transmute(b), N)) +#[cfg_attr(test, assert_instr(sri, n = 1))] +#[rustc_args_required_const(2)] +pub unsafe fn vsri_n_p8(a: poly8x8_t, b: poly8x8_t, n: i32) -> poly8x8_t { + assert!(1 <= n && n <= 8, "must have 1 ≤ n ≤ 8, but n = {}", n); + transmute(vsri_n_s8_(transmute(a), transmute(b), n)) } /// Shift Right and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsriq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { - static_assert!(N: i32 where N >= 1 && N <= 8); - transmute(vsriq_n_s8_(transmute(a), transmute(b), N)) +#[cfg_attr(test, assert_instr(sri, n = 1))] +#[rustc_args_required_const(2)] +pub unsafe fn vsriq_n_p8(a: poly8x16_t, b: poly8x16_t, n: i32) -> poly8x16_t { + assert!(1 <= n && n <= 8, "must have 1 ≤ n ≤ 8, but n = {}", n); + transmute(vsriq_n_s8_(transmute(a), transmute(b), n)) } /// Shift Right and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsri_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { - static_assert!(N: i32 where N >= 1 && N <= 16); - transmute(vsri_n_s16_(transmute(a), transmute(b), N)) +#[cfg_attr(test, assert_instr(sri, n = 1))] +#[rustc_args_required_const(2)] +pub unsafe fn vsri_n_p16(a: poly16x4_t, b: poly16x4_t, n: i32) -> poly16x4_t { + assert!(1 <= n && n <= 16, "must have 1 ≤ n ≤ 16, but n = {}", n); + transmute(vsri_n_s16_(transmute(a), transmute(b), n)) } /// Shift Right and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsriq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { - static_assert!(N: i32 where N >= 1 && N <= 16); - transmute(vsriq_n_s16_(transmute(a), transmute(b), N)) +#[cfg_attr(test, assert_instr(sri, n = 1))] +#[rustc_args_required_const(2)] +pub unsafe fn vsriq_n_p16(a: poly16x8_t, b: poly16x8_t, n: i32) -> poly16x8_t { + assert!(1 <= n && n <= 16, "must have 1 ≤ n ≤ 16, but n = {}", n); + transmute(vsriq_n_s16_(transmute(a), transmute(b), n)) } #[cfg(test)] diff --git a/crates/core_arch/src/arm/neon/mod.rs b/crates/core_arch/src/arm/neon/mod.rs index 7080eca5ac..9fd178f359 100644 --- a/crates/core_arch/src/arm/neon/mod.rs +++ b/crates/core_arch/src/arm/neon/mod.rs @@ -589,264 +589,352 @@ pub unsafe fn vld1q_f32(ptr: *const f32) -> float32x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", LANE = 7))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 7))] -pub unsafe fn vld1_lane_s8(ptr: *const i8, src: int8x8_t) -> int8x8_t { - static_assert_imm3!(LANE); - simd_insert(src, LANE as u32, *ptr) +#[rustc_args_required_const(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", lane = 7))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, lane = 7))] +pub unsafe fn vld1_lane_s8(ptr: *const i8, src: int8x8_t, lane: i32) -> int8x8_t { + assert!( + 0 <= lane && lane <= 7, + "must have 0 ≤ lane ≤ 7, but lane = {}", + lane + ); + simd_insert(src, lane as u32, *ptr) } /// Load one single-element structure to one lane of one register. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", LANE = 15))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 15))] -pub unsafe fn vld1q_lane_s8(ptr: *const i8, src: int8x16_t) -> int8x16_t { - static_assert_imm4!(LANE); - simd_insert(src, LANE as u32, *ptr) +#[rustc_args_required_const(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", lane = 15))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, lane = 15))] +pub unsafe fn vld1q_lane_s8(ptr: *const i8, src: int8x16_t, lane: i32) -> int8x16_t { + assert!( + 0 <= lane && lane <= 15, + "must have 0 ≤ lane ≤ 15, but lane = {}", + lane + ); + simd_insert(src, lane as u32, *ptr) } /// Load one single-element structure to one lane of one register. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", LANE = 3))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 3))] -pub unsafe fn vld1_lane_s16(ptr: *const i16, src: int16x4_t) -> int16x4_t { - static_assert_imm2!(LANE); - simd_insert(src, LANE as u32, *ptr) +#[rustc_args_required_const(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", lane = 3))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, lane = 3))] +pub unsafe fn vld1_lane_s16(ptr: *const i16, src: int16x4_t, lane: i32) -> int16x4_t { + assert!( + 0 <= lane && lane <= 3, + "must have 0 ≤ lane ≤ 3, but lane = {}", + lane + ); + simd_insert(src, lane as u32, *ptr) } /// Load one single-element structure to one lane of one register. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", LANE = 7))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 7))] -pub unsafe fn vld1q_lane_s16(ptr: *const i16, src: int16x8_t) -> int16x8_t { - static_assert_imm3!(LANE); - simd_insert(src, LANE as u32, *ptr) +#[rustc_args_required_const(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", lane = 7))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, lane = 7))] +pub unsafe fn vld1q_lane_s16(ptr: *const i16, src: int16x8_t, lane: i32) -> int16x8_t { + assert!( + 0 <= lane && lane <= 7, + "must have 0 ≤ lane ≤ 7, but lane = {}", + lane + ); + simd_insert(src, lane as u32, *ptr) } /// Load one single-element structure to one lane of one register. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", LANE = 1))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 1))] -pub unsafe fn vld1_lane_s32(ptr: *const i32, src: int32x2_t) -> int32x2_t { - static_assert!(LANE : i32 where 0 <= LANE && LANE <= 1); - simd_insert(src, LANE as u32, *ptr) +#[rustc_args_required_const(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", lane = 1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, lane = 1))] +pub unsafe fn vld1_lane_s32(ptr: *const i32, src: int32x2_t, lane: i32) -> int32x2_t { + assert!( + 0 <= lane && lane <= 1, + "must have 0 ≤ lane ≤ 1, but lane = {}", + lane + ); + simd_insert(src, lane as u32, *ptr) } /// Load one single-element structure to one lane of one register. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", LANE = 3))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 3))] -pub unsafe fn vld1q_lane_s32(ptr: *const i32, src: int32x4_t) -> int32x4_t { - static_assert_imm2!(LANE); - simd_insert(src, LANE as u32, *ptr) +#[rustc_args_required_const(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", lane = 3))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, lane = 3))] +pub unsafe fn vld1q_lane_s32(ptr: *const i32, src: int32x4_t, lane: i32) -> int32x4_t { + assert!( + 0 <= lane && lane <= 3, + "must have 0 ≤ lane ≤ 3, but lane = {}", + lane + ); + simd_insert(src, lane as u32, *ptr) } /// Load one single-element structure to one lane of one register. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr", LANE = 0))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ldr, LANE = 0))] -pub unsafe fn vld1_lane_s64(ptr: *const i64, src: int64x1_t) -> int64x1_t { - static_assert!(LANE : i32 where 0 <= LANE && LANE <= 0); - simd_insert(src, LANE as u32, *ptr) +#[rustc_args_required_const(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr", lane = 0))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ldr, lane = 0))] +pub unsafe fn vld1_lane_s64(ptr: *const i64, src: int64x1_t, lane: i32) -> int64x1_t { + assert!( + 0 <= lane && lane <= 0, + "must have 0 ≤ lane ≤ 0, but lane = {}", + lane + ); + simd_insert(src, lane as u32, *ptr) } /// Load one single-element structure to one lane of one register. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr", LANE = 1))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 1))] -pub unsafe fn vld1q_lane_s64(ptr: *const i64, src: int64x2_t) -> int64x2_t { - static_assert!(LANE : i32 where 0 <= LANE && LANE <= 1); - simd_insert(src, LANE as u32, *ptr) +#[rustc_args_required_const(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr", lane = 1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, lane = 1))] +pub unsafe fn vld1q_lane_s64(ptr: *const i64, src: int64x2_t, lane: i32) -> int64x2_t { + assert!( + 0 <= lane && lane <= 1, + "must have 0 ≤ lane ≤ 1, but lane = {}", + lane + ); + simd_insert(src, lane as u32, *ptr) } /// Load one single-element structure to one lane of one register. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", LANE = 7))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 7))] -pub unsafe fn vld1_lane_u8(ptr: *const u8, src: uint8x8_t) -> uint8x8_t { - static_assert_imm3!(LANE); - simd_insert(src, LANE as u32, *ptr) +#[rustc_args_required_const(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", lane = 7))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, lane = 7))] +pub unsafe fn vld1_lane_u8(ptr: *const u8, src: uint8x8_t, lane: i32) -> uint8x8_t { + assert!( + 0 <= lane && lane <= 7, + "must have 0 ≤ lane ≤ 7, but lane = {}", + lane + ); + simd_insert(src, lane as u32, *ptr) } /// Load one single-element structure to one lane of one register. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", LANE = 15))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 15))] -pub unsafe fn vld1q_lane_u8(ptr: *const u8, src: uint8x16_t) -> uint8x16_t { - static_assert_imm4!(LANE); - simd_insert(src, LANE as u32, *ptr) +#[rustc_args_required_const(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", lane = 15))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, lane = 15))] +pub unsafe fn vld1q_lane_u8(ptr: *const u8, src: uint8x16_t, lane: i32) -> uint8x16_t { + assert!( + 0 <= lane && lane <= 15, + "must have 0 ≤ lane ≤ 15, but lane = {}", + lane + ); + simd_insert(src, lane as u32, *ptr) } /// Load one single-element structure to one lane of one register. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", LANE = 3))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 3))] -pub unsafe fn vld1_lane_u16(ptr: *const u16, src: uint16x4_t) -> uint16x4_t { - static_assert_imm2!(LANE); - simd_insert(src, LANE as u32, *ptr) +#[rustc_args_required_const(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", lane = 3))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, lane = 3))] +pub unsafe fn vld1_lane_u16(ptr: *const u16, src: uint16x4_t, lane: i32) -> uint16x4_t { + assert!( + 0 <= lane && lane <= 3, + "must have 0 ≤ lane ≤ 3, but lane = {}", + lane + ); + simd_insert(src, lane as u32, *ptr) } /// Load one single-element structure to one lane of one register. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", LANE = 7))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 7))] -pub unsafe fn vld1q_lane_u16(ptr: *const u16, src: uint16x8_t) -> uint16x8_t { - static_assert_imm3!(LANE); - simd_insert(src, LANE as u32, *ptr) +#[rustc_args_required_const(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", lane = 7))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, lane = 7))] +pub unsafe fn vld1q_lane_u16(ptr: *const u16, src: uint16x8_t, lane: i32) -> uint16x8_t { + assert!( + 0 <= lane && lane <= 7, + "must have 0 ≤ lane ≤ 7, but lane = {}", + lane + ); + simd_insert(src, lane as u32, *ptr) } /// Load one single-element structure to one lane of one register. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", LANE = 1))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 1))] -pub unsafe fn vld1_lane_u32(ptr: *const u32, src: uint32x2_t) -> uint32x2_t { - static_assert!(LANE : i32 where 0 <= LANE && LANE <= 1); - simd_insert(src, LANE as u32, *ptr) +#[rustc_args_required_const(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", lane = 1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, lane = 1))] +pub unsafe fn vld1_lane_u32(ptr: *const u32, src: uint32x2_t, lane: i32) -> uint32x2_t { + assert!( + 0 <= lane && lane <= 1, + "must have 0 ≤ lane ≤ 1, but lane = {}", + lane + ); + simd_insert(src, lane as u32, *ptr) } /// Load one single-element structure to one lane of one register. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", LANE = 3))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 3))] -pub unsafe fn vld1q_lane_u32(ptr: *const u32, src: uint32x4_t) -> uint32x4_t { - static_assert_imm2!(LANE); - simd_insert(src, LANE as u32, *ptr) +#[rustc_args_required_const(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", lane = 3))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, lane = 3))] +pub unsafe fn vld1q_lane_u32(ptr: *const u32, src: uint32x4_t, lane: i32) -> uint32x4_t { + assert!( + 0 <= lane && lane <= 3, + "must have 0 ≤ lane ≤ 3, but lane = {}", + lane + ); + simd_insert(src, lane as u32, *ptr) } /// Load one single-element structure to one lane of one register. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr", LANE = 0))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ldr, LANE = 0))] -pub unsafe fn vld1_lane_u64(ptr: *const u64, src: uint64x1_t) -> uint64x1_t { - static_assert!(LANE : i32 where 0 <= LANE && LANE <= 0); - simd_insert(src, LANE as u32, *ptr) +#[rustc_args_required_const(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr", lane = 0))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ldr, lane = 0))] +pub unsafe fn vld1_lane_u64(ptr: *const u64, src: uint64x1_t, lane: i32) -> uint64x1_t { + assert!( + 0 <= lane && lane <= 0, + "must have 0 ≤ lane ≤ 0, but lane = {}", + lane + ); + simd_insert(src, lane as u32, *ptr) } /// Load one single-element structure to one lane of one register. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr", LANE = 1))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 1))] -pub unsafe fn vld1q_lane_u64(ptr: *const u64, src: uint64x2_t) -> uint64x2_t { - static_assert!(LANE : i32 where 0 <= LANE && LANE <= 1); - simd_insert(src, LANE as u32, *ptr) +#[rustc_args_required_const(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr", lane = 1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, lane = 1))] +pub unsafe fn vld1q_lane_u64(ptr: *const u64, src: uint64x2_t, lane: i32) -> uint64x2_t { + assert!( + 0 <= lane && lane <= 1, + "must have 0 ≤ lane ≤ 1, but lane = {}", + lane + ); + simd_insert(src, lane as u32, *ptr) } /// Load one single-element structure to one lane of one register. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", LANE = 7))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 7))] -pub unsafe fn vld1_lane_p8(ptr: *const p8, src: poly8x8_t) -> poly8x8_t { - static_assert_imm3!(LANE); - simd_insert(src, LANE as u32, *ptr) +#[rustc_args_required_const(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", lane = 7))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, lane = 7))] +pub unsafe fn vld1_lane_p8(ptr: *const p8, src: poly8x8_t, lane: i32) -> poly8x8_t { + assert!( + 0 <= lane && lane <= 7, + "must have 0 ≤ lane ≤ 7, but lane = {}", + lane + ); + simd_insert(src, lane as u32, *ptr) } /// Load one single-element structure to one lane of one register. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", LANE = 15))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 15))] -pub unsafe fn vld1q_lane_p8(ptr: *const p8, src: poly8x16_t) -> poly8x16_t { - static_assert_imm4!(LANE); - simd_insert(src, LANE as u32, *ptr) +#[rustc_args_required_const(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", lane = 15))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, lane = 15))] +pub unsafe fn vld1q_lane_p8(ptr: *const p8, src: poly8x16_t, lane: i32) -> poly8x16_t { + assert!( + 0 <= lane && lane <= 15, + "must have 0 ≤ lane ≤ 15, but lane = {}", + lane + ); + simd_insert(src, lane as u32, *ptr) } /// Load one single-element structure to one lane of one register. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", LANE = 3))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 3))] -pub unsafe fn vld1_lane_p16(ptr: *const p16, src: poly16x4_t) -> poly16x4_t { - static_assert_imm2!(LANE); - simd_insert(src, LANE as u32, *ptr) +#[rustc_args_required_const(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", lane = 3))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, lane = 3))] +pub unsafe fn vld1_lane_p16(ptr: *const p16, src: poly16x4_t, lane: i32) -> poly16x4_t { + assert!( + 0 <= lane && lane <= 3, + "must have 0 ≤ lane ≤ 3, but lane = {}", + lane + ); + simd_insert(src, lane as u32, *ptr) } /// Load one single-element structure to one lane of one register. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", LANE = 7))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 7))] -pub unsafe fn vld1q_lane_p16(ptr: *const p16, src: poly16x8_t) -> poly16x8_t { - static_assert_imm3!(LANE); - simd_insert(src, LANE as u32, *ptr) +#[rustc_args_required_const(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", lane = 7))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, lane = 7))] +pub unsafe fn vld1q_lane_p16(ptr: *const p16, src: poly16x8_t, lane: i32) -> poly16x8_t { + assert!( + 0 <= lane && lane <= 7, + "must have 0 ≤ lane ≤ 7, but lane = {}", + lane + ); + simd_insert(src, lane as u32, *ptr) } /// Load one single-element structure to one lane of one register. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", LANE = 1))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 1))] -pub unsafe fn vld1_lane_f32(ptr: *const f32, src: float32x2_t) -> float32x2_t { - static_assert!(LANE : i32 where 0 <= LANE && LANE <= 1); - simd_insert(src, LANE as u32, *ptr) +#[rustc_args_required_const(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", lane = 1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, lane = 1))] +pub unsafe fn vld1_lane_f32(ptr: *const f32, src: float32x2_t, lane: i32) -> float32x2_t { + assert!( + 0 <= lane && lane <= 1, + "must have 0 ≤ lane ≤ 1, but lane = {}", + lane + ); + simd_insert(src, lane as u32, *ptr) } /// Load one single-element structure to one lane of one register. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", LANE = 3))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, LANE = 3))] -pub unsafe fn vld1q_lane_f32(ptr: *const f32, src: float32x4_t) -> float32x4_t { - static_assert_imm2!(LANE); - simd_insert(src, LANE as u32, *ptr) +#[rustc_args_required_const(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", lane = 3))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1, lane = 3))] +pub unsafe fn vld1q_lane_f32(ptr: *const f32, src: float32x4_t, lane: i32) -> float32x4_t { + assert!( + 0 <= lane && lane <= 3, + "must have 0 ≤ lane ≤ 3, but lane = {}", + lane + ); + simd_insert(src, lane as u32, *ptr) } /// Load one single-element structure and Replicate to all lanes (of one register). @@ -856,7 +944,7 @@ pub unsafe fn vld1q_lane_f32(ptr: *const f32, src: float32x4_t) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] pub unsafe fn vld1_dup_s8(ptr: *const i8) -> int8x8_t { - let x = vld1_lane_s8::<0>(ptr, transmute(i8x8::splat(0))); + let x = vld1_lane_s8(ptr, transmute(i8x8::splat(0)), 0); simd_shuffle8(x, x, [0, 0, 0, 0, 0, 0, 0, 0]) } @@ -867,7 +955,7 @@ pub unsafe fn vld1_dup_s8(ptr: *const i8) -> int8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] pub unsafe fn vld1q_dup_s8(ptr: *const i8) -> int8x16_t { - let x = vld1q_lane_s8::<0>(ptr, transmute(i8x16::splat(0))); + let x = vld1q_lane_s8(ptr, transmute(i8x16::splat(0)), 0); simd_shuffle16(x, x, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) } @@ -878,7 +966,7 @@ pub unsafe fn vld1q_dup_s8(ptr: *const i8) -> int8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] pub unsafe fn vld1_dup_s16(ptr: *const i16) -> int16x4_t { - let x = vld1_lane_s16::<0>(ptr, transmute(i16x4::splat(0))); + let x = vld1_lane_s16(ptr, transmute(i16x4::splat(0)), 0); simd_shuffle4(x, x, [0, 0, 0, 0]) } @@ -889,7 +977,7 @@ pub unsafe fn vld1_dup_s16(ptr: *const i16) -> int16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] pub unsafe fn vld1q_dup_s16(ptr: *const i16) -> int16x8_t { - let x = vld1q_lane_s16::<0>(ptr, transmute(i16x8::splat(0))); + let x = vld1q_lane_s16(ptr, transmute(i16x8::splat(0)), 0); simd_shuffle8(x, x, [0, 0, 0, 0, 0, 0, 0, 0]) } @@ -900,7 +988,7 @@ pub unsafe fn vld1q_dup_s16(ptr: *const i16) -> int16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] pub unsafe fn vld1_dup_s32(ptr: *const i32) -> int32x2_t { - let x = vld1_lane_s32::<0>(ptr, transmute(i32x2::splat(0))); + let x = vld1_lane_s32(ptr, transmute(i32x2::splat(0)), 0); simd_shuffle2(x, x, [0, 0]) } @@ -911,7 +999,7 @@ pub unsafe fn vld1_dup_s32(ptr: *const i32) -> int32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] pub unsafe fn vld1q_dup_s32(ptr: *const i32) -> int32x4_t { - let x = vld1q_lane_s32::<0>(ptr, transmute(i32x4::splat(0))); + let x = vld1q_lane_s32(ptr, transmute(i32x4::splat(0)), 0); simd_shuffle4(x, x, [0, 0, 0, 0]) } @@ -934,7 +1022,7 @@ pub unsafe fn vld1_dup_s64(ptr: *const i64) -> int64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] pub unsafe fn vld1q_dup_s64(ptr: *const i64) -> int64x2_t { - let x = vld1q_lane_s64::<0>(ptr, transmute(i64x2::splat(0))); + let x = vld1q_lane_s64(ptr, transmute(i64x2::splat(0)), 0); simd_shuffle2(x, x, [0, 0]) } @@ -945,7 +1033,7 @@ pub unsafe fn vld1q_dup_s64(ptr: *const i64) -> int64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] pub unsafe fn vld1_dup_u8(ptr: *const u8) -> uint8x8_t { - let x = vld1_lane_u8::<0>(ptr, transmute(u8x8::splat(0))); + let x = vld1_lane_u8(ptr, transmute(u8x8::splat(0)), 0); simd_shuffle8(x, x, [0, 0, 0, 0, 0, 0, 0, 0]) } @@ -956,7 +1044,7 @@ pub unsafe fn vld1_dup_u8(ptr: *const u8) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] pub unsafe fn vld1q_dup_u8(ptr: *const u8) -> uint8x16_t { - let x = vld1q_lane_u8::<0>(ptr, transmute(u8x16::splat(0))); + let x = vld1q_lane_u8(ptr, transmute(u8x16::splat(0)), 0); simd_shuffle16(x, x, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) } @@ -967,7 +1055,7 @@ pub unsafe fn vld1q_dup_u8(ptr: *const u8) -> uint8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] pub unsafe fn vld1_dup_u16(ptr: *const u16) -> uint16x4_t { - let x = vld1_lane_u16::<0>(ptr, transmute(u16x4::splat(0))); + let x = vld1_lane_u16(ptr, transmute(u16x4::splat(0)), 0); simd_shuffle4(x, x, [0, 0, 0, 0]) } @@ -978,7 +1066,7 @@ pub unsafe fn vld1_dup_u16(ptr: *const u16) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] pub unsafe fn vld1q_dup_u16(ptr: *const u16) -> uint16x8_t { - let x = vld1q_lane_u16::<0>(ptr, transmute(u16x8::splat(0))); + let x = vld1q_lane_u16(ptr, transmute(u16x8::splat(0)), 0); simd_shuffle8(x, x, [0, 0, 0, 0, 0, 0, 0, 0]) } @@ -989,7 +1077,7 @@ pub unsafe fn vld1q_dup_u16(ptr: *const u16) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] pub unsafe fn vld1_dup_u32(ptr: *const u32) -> uint32x2_t { - let x = vld1_lane_u32::<0>(ptr, transmute(u32x2::splat(0))); + let x = vld1_lane_u32(ptr, transmute(u32x2::splat(0)), 0); simd_shuffle2(x, x, [0, 0]) } @@ -1000,7 +1088,7 @@ pub unsafe fn vld1_dup_u32(ptr: *const u32) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] pub unsafe fn vld1q_dup_u32(ptr: *const u32) -> uint32x4_t { - let x = vld1q_lane_u32::<0>(ptr, transmute(u32x4::splat(0))); + let x = vld1q_lane_u32(ptr, transmute(u32x4::splat(0)), 0); simd_shuffle4(x, x, [0, 0, 0, 0]) } @@ -1023,7 +1111,7 @@ pub unsafe fn vld1_dup_u64(ptr: *const u64) -> uint64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] pub unsafe fn vld1q_dup_u64(ptr: *const u64) -> uint64x2_t { - let x = vld1q_lane_u64::<0>(ptr, transmute(u64x2::splat(0))); + let x = vld1q_lane_u64(ptr, transmute(u64x2::splat(0)), 0); simd_shuffle2(x, x, [0, 0]) } @@ -1034,7 +1122,7 @@ pub unsafe fn vld1q_dup_u64(ptr: *const u64) -> uint64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] pub unsafe fn vld1_dup_p8(ptr: *const p8) -> poly8x8_t { - let x = vld1_lane_p8::<0>(ptr, transmute(u8x8::splat(0))); + let x = vld1_lane_p8(ptr, transmute(u8x8::splat(0)), 0); simd_shuffle8(x, x, [0, 0, 0, 0, 0, 0, 0, 0]) } @@ -1045,7 +1133,7 @@ pub unsafe fn vld1_dup_p8(ptr: *const p8) -> poly8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] pub unsafe fn vld1q_dup_p8(ptr: *const p8) -> poly8x16_t { - let x = vld1q_lane_p8::<0>(ptr, transmute(u8x16::splat(0))); + let x = vld1q_lane_p8(ptr, transmute(u8x16::splat(0)), 0); simd_shuffle16(x, x, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) } @@ -1056,7 +1144,7 @@ pub unsafe fn vld1q_dup_p8(ptr: *const p8) -> poly8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] pub unsafe fn vld1_dup_p16(ptr: *const p16) -> poly16x4_t { - let x = vld1_lane_p16::<0>(ptr, transmute(u16x4::splat(0))); + let x = vld1_lane_p16(ptr, transmute(u16x4::splat(0)), 0); simd_shuffle4(x, x, [0, 0, 0, 0]) } @@ -1067,7 +1155,7 @@ pub unsafe fn vld1_dup_p16(ptr: *const p16) -> poly16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] pub unsafe fn vld1q_dup_p16(ptr: *const p16) -> poly16x8_t { - let x = vld1q_lane_p16::<0>(ptr, transmute(u16x8::splat(0))); + let x = vld1q_lane_p16(ptr, transmute(u16x8::splat(0)), 0); simd_shuffle8(x, x, [0, 0, 0, 0, 0, 0, 0, 0]) } @@ -1078,7 +1166,7 @@ pub unsafe fn vld1q_dup_p16(ptr: *const p16) -> poly16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] pub unsafe fn vld1_dup_f32(ptr: *const f32) -> float32x2_t { - let x = vld1_lane_f32::<0>(ptr, transmute(f32x2::splat(0.))); + let x = vld1_lane_f32(ptr, transmute(f32x2::splat(0.)), 0); simd_shuffle2(x, x, [0, 0]) } @@ -1089,7 +1177,7 @@ pub unsafe fn vld1_dup_f32(ptr: *const f32) -> float32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1r))] pub unsafe fn vld1q_dup_f32(ptr: *const f32) -> float32x4_t { - let x = vld1q_lane_f32::<0>(ptr, transmute(f32x4::splat(0.))); + let x = vld1q_lane_f32(ptr, transmute(f32x4::splat(0.)), 0); simd_shuffle4(x, x, [0, 0, 0, 0]) } @@ -2976,14 +3064,14 @@ pub unsafe fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] +#[rustc_args_required_const(1)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov.32", imm5 = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mov, imm5 = 1))] // Based on the discussion in https://github.com/rust-lang/stdarch/pull/792 // `mov` seems to be an acceptable intrinsic to compile to // #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(vmov, imm5 = 1))] -pub unsafe fn vgetq_lane_u64(v: uint64x2_t) -> u64 { - static_assert!(imm5 : i32 where imm5 >= 0 && imm5 <= 1); +pub unsafe fn vgetq_lane_u64(v: uint64x2_t, imm5: i32) -> u64 { + assert!(imm5 >= 0 && imm5 <= 1); simd_extract(v, imm5 as u32) } @@ -2991,13 +3079,13 @@ pub unsafe fn vgetq_lane_u64(v: uint64x2_t) -> u64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] +#[rustc_args_required_const(1)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov.32", imm5 = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmov, imm5 = 0))] // FIXME: no 32bit this seems to be turned into two vmov.32 instructions // validate correctness -pub unsafe fn vget_lane_u64(v: uint64x1_t) -> u64 { - static_assert!(imm5 : i32 where imm5 == 0); +pub unsafe fn vget_lane_u64(v: uint64x1_t, imm5: i32) -> u64 { + assert!(imm5 == 0); simd_extract(v, 0) } @@ -3005,11 +3093,11 @@ pub unsafe fn vget_lane_u64(v: uint64x1_t) -> u64 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] +#[rustc_args_required_const(1)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov.u16", imm5 = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umov, imm5 = 2))] -pub unsafe fn vgetq_lane_u16(v: uint16x8_t) -> u16 { - static_assert_imm3!(imm5); +pub unsafe fn vgetq_lane_u16(v: uint16x8_t, imm5: i32) -> u16 { + assert!(imm5 >= 0 && imm5 <= 7); simd_extract(v, imm5 as u32) } @@ -3017,11 +3105,11 @@ pub unsafe fn vgetq_lane_u16(v: uint16x8_t) -> u16 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] +#[rustc_args_required_const(1)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov.32", imm5 = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mov, imm5 = 2))] -pub unsafe fn vgetq_lane_u32(v: uint32x4_t) -> u32 { - static_assert_imm2!(imm5); +pub unsafe fn vgetq_lane_u32(v: uint32x4_t, imm5: i32) -> u32 { + assert!(imm5 >= 0 && imm5 <= 3); simd_extract(v, imm5 as u32) } @@ -3029,11 +3117,11 @@ pub unsafe fn vgetq_lane_u32(v: uint32x4_t) -> u32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] +#[rustc_args_required_const(1)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov.32", imm5 = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mov, imm5 = 2))] -pub unsafe fn vgetq_lane_s32(v: int32x4_t) -> i32 { - static_assert_imm2!(imm5); +pub unsafe fn vgetq_lane_s32(v: int32x4_t, imm5: i32) -> i32 { + assert!(imm5 >= 0 && imm5 <= 3); simd_extract(v, imm5 as u32) } @@ -3041,11 +3129,11 @@ pub unsafe fn vgetq_lane_s32(v: int32x4_t) -> i32 { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] +#[rustc_args_required_const(1)] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov.u8", imm5 = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umov, imm5 = 2))] -pub unsafe fn vget_lane_u8(v: uint8x8_t) -> u8 { - static_assert_imm3!(imm5); +pub unsafe fn vget_lane_u8(v: uint8x8_t, imm5: i32) -> u8 { + assert!(imm5 >= 0 && imm5 <= 7); simd_extract(v, imm5 as u32) } @@ -3145,8 +3233,8 @@ pub unsafe fn vreinterpretq_u8_s8(a: int8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u8", imm3 = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr("ushr", imm3 = 1))] -#[rustc_legacy_const_generics(1)] -pub unsafe fn vshrq_n_u8(a: uint8x16_t) -> uint8x16_t { +#[rustc_args_required_const(1)] +pub unsafe fn vshrq_n_u8(a: uint8x16_t, imm3: i32) -> uint8x16_t { if imm3 < 0 || imm3 > 7 { unreachable_unchecked(); } else { @@ -3177,8 +3265,8 @@ pub unsafe fn vshrq_n_u8(a: uint8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshl.s8", imm3 = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, imm3 = 1))] -#[rustc_legacy_const_generics(1)] -pub unsafe fn vshlq_n_u8(a: uint8x16_t) -> uint8x16_t { +#[rustc_args_required_const(1)] +pub unsafe fn vshlq_n_u8(a: uint8x16_t, imm3: i32) -> uint8x16_t { if imm3 < 0 || imm3 > 7 { unreachable_unchecked(); } else { @@ -3207,14 +3295,14 @@ pub unsafe fn vshlq_n_u8(a: uint8x16_t) -> uint8x16_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 3))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vextq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - if N < 0 || N > 15 { +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", n = 3))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, n = 3))] +#[rustc_args_required_const(2)] +pub unsafe fn vextq_s8(a: int8x16_t, b: int8x16_t, n: i32) -> int8x16_t { + if n < 0 || n > 15 { unreachable_unchecked(); }; - match N & 0b1111 { + match n & 0b1111 { 0 => simd_shuffle16(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 1 => simd_shuffle16( a, @@ -3313,14 +3401,14 @@ pub unsafe fn vextq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 3))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vextq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - if N < 0 || N > 15 { +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", n = 3))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, n = 3))] +#[rustc_args_required_const(2)] +pub unsafe fn vextq_u8(a: uint8x16_t, b: uint8x16_t, n: i32) -> uint8x16_t { + if n < 0 || n > 15 { unreachable_unchecked(); }; - match N & 0b1111 { + match n & 0b1111 { 0 => simd_shuffle16(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 1 => simd_shuffle16( a, @@ -3758,22 +3846,22 @@ pub unsafe fn vsliq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsri.8", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsri_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - static_assert!(N : i32 where 1 <= N && N <= 8); - let n = -N as i8; +#[cfg_attr(test, assert_instr("vsri.8", n = 1))] +#[rustc_args_required_const(2)] +pub unsafe fn vsri_n_s8(a: int8x8_t, b: int8x8_t, n: i32) -> int8x8_t { + assert!(1 <= n && n <= 8, "must have 1 ≤ n ≤ 8, but n = {}", n); + let n = -n as i8; vshiftins_v8i8(a, b, int8x8_t(n, n, n, n, n, n, n, n)) } /// Shift Right and Insert (immediate) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsri.8", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsriq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - static_assert!(N : i32 where 1 <= N && N <= 8); - let n = -N as i8; +#[cfg_attr(test, assert_instr("vsri.8", n = 1))] +#[rustc_args_required_const(2)] +pub unsafe fn vsriq_n_s8(a: int8x16_t, b: int8x16_t, n: i32) -> int8x16_t { + assert!(1 <= n && n <= 8, "must have 1 ≤ n ≤ 8, but n = {}", n); + let n = -n as i8; vshiftins_v16i8( a, b, @@ -3784,73 +3872,73 @@ pub unsafe fn vsriq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsri.16", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsri_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - static_assert!(N : i32 where 1 <= N && N <= 16); - let n = -N as i16; +#[cfg_attr(test, assert_instr("vsri.16", n = 1))] +#[rustc_args_required_const(2)] +pub unsafe fn vsri_n_s16(a: int16x4_t, b: int16x4_t, n: i32) -> int16x4_t { + assert!(1 <= n && n <= 16, "must have 1 ≤ n ≤ 16, but n = {}", n); + let n = -n as i16; vshiftins_v4i16(a, b, int16x4_t(n, n, n, n)) } /// Shift Right and Insert (immediate) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsri.16", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsriq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - static_assert!(N : i32 where 1 <= N && N <= 16); - let n = -N as i16; +#[cfg_attr(test, assert_instr("vsri.16", n = 1))] +#[rustc_args_required_const(2)] +pub unsafe fn vsriq_n_s16(a: int16x8_t, b: int16x8_t, n: i32) -> int16x8_t { + assert!(1 <= n && n <= 16, "must have 1 ≤ n ≤ 16, but n = {}", n); + let n = -n as i16; vshiftins_v8i16(a, b, int16x8_t(n, n, n, n, n, n, n, n)) } /// Shift Right and Insert (immediate) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsri.32", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsri_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - static_assert!(N : i32 where 1 <= N && N <= 32); - vshiftins_v2i32(a, b, int32x2_t(-N, -N)) +#[cfg_attr(test, assert_instr("vsri.32", n = 1))] +#[rustc_args_required_const(2)] +pub unsafe fn vsri_n_s32(a: int32x2_t, b: int32x2_t, n: i32) -> int32x2_t { + assert!(1 <= n && n <= 32, "must have 1 ≤ n ≤ 32, but n = {}", n); + vshiftins_v2i32(a, b, int32x2_t(-n, -n)) } /// Shift Right and Insert (immediate) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsri.32", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsriq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - static_assert!(N : i32 where 1 <= N && N <= 32); - vshiftins_v4i32(a, b, int32x4_t(-N, -N, -N, -N)) +#[cfg_attr(test, assert_instr("vsri.32", n = 1))] +#[rustc_args_required_const(2)] +pub unsafe fn vsriq_n_s32(a: int32x4_t, b: int32x4_t, n: i32) -> int32x4_t { + assert!(1 <= n && n <= 32, "must have 1 ≤ n ≤ 32, but n = {}", n); + vshiftins_v4i32(a, b, int32x4_t(-n, -n, -n, -n)) } /// Shift Right and Insert (immediate) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsri.64", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsri_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - static_assert!(N : i32 where 1 <= N && N <= 64); - vshiftins_v1i64(a, b, int64x1_t(-N as i64)) +#[cfg_attr(test, assert_instr("vsri.64", n = 1))] +#[rustc_args_required_const(2)] +pub unsafe fn vsri_n_s64(a: int64x1_t, b: int64x1_t, n: i32) -> int64x1_t { + assert!(1 <= n && n <= 64, "must have 1 ≤ n ≤ 64, but n = {}", n); + vshiftins_v1i64(a, b, int64x1_t(-n as i64)) } /// Shift Right and Insert (immediate) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsri.64", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsriq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - static_assert!(N : i32 where 1 <= N && N <= 64); - vshiftins_v2i64(a, b, int64x2_t(-N as i64, -N as i64)) +#[cfg_attr(test, assert_instr("vsri.64", n = 1))] +#[rustc_args_required_const(2)] +pub unsafe fn vsriq_n_s64(a: int64x2_t, b: int64x2_t, n: i32) -> int64x2_t { + assert!(1 <= n && n <= 64, "must have 1 ≤ n ≤ 64, but n = {}", n); + vshiftins_v2i64(a, b, int64x2_t(-n as i64, -n as i64)) } /// Shift Right and Insert (immediate) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsri.8", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsri_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - static_assert!(N : i32 where 1 <= N && N <= 8); - let n = -N as i8; +#[cfg_attr(test, assert_instr("vsri.8", n = 1))] +#[rustc_args_required_const(2)] +pub unsafe fn vsri_n_u8(a: uint8x8_t, b: uint8x8_t, n: i32) -> uint8x8_t { + assert!(1 <= n && n <= 8, "must have 1 ≤ n ≤ 8, but n = {}", n); + let n = -n as i8; transmute(vshiftins_v8i8( transmute(a), transmute(b), @@ -3861,11 +3949,11 @@ pub unsafe fn vsri_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsri.8", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsriq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - static_assert!(N : i32 where 1 <= N && N <= 8); - let n = -N as i8; +#[cfg_attr(test, assert_instr("vsri.8", n = 1))] +#[rustc_args_required_const(2)] +pub unsafe fn vsriq_n_u8(a: uint8x16_t, b: uint8x16_t, n: i32) -> uint8x16_t { + assert!(1 <= n && n <= 8, "must have 1 ≤ n ≤ 8, but n = {}", n); + let n = -n as i8; transmute(vshiftins_v16i8( transmute(a), transmute(b), @@ -3876,11 +3964,11 @@ pub unsafe fn vsriq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16 #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsri.16", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsri_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - static_assert!(N : i32 where 1 <= N && N <= 16); - let n = -N as i16; +#[cfg_attr(test, assert_instr("vsri.16", n = 1))] +#[rustc_args_required_const(2)] +pub unsafe fn vsri_n_u16(a: uint16x4_t, b: uint16x4_t, n: i32) -> uint16x4_t { + assert!(1 <= n && n <= 16, "must have 1 ≤ n ≤ 16, but n = {}", n); + let n = -n as i16; transmute(vshiftins_v4i16( transmute(a), transmute(b), @@ -3891,11 +3979,11 @@ pub unsafe fn vsri_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4 #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsri.16", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsriq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - static_assert!(N : i32 where 1 <= N && N <= 16); - let n = -N as i16; +#[cfg_attr(test, assert_instr("vsri.16", n = 1))] +#[rustc_args_required_const(2)] +pub unsafe fn vsriq_n_u16(a: uint16x8_t, b: uint16x8_t, n: i32) -> uint16x8_t { + assert!(1 <= n && n <= 16, "must have 1 ≤ n ≤ 16, but n = {}", n); + let n = -n as i16; transmute(vshiftins_v8i16( transmute(a), transmute(b), @@ -3906,67 +3994,67 @@ pub unsafe fn vsriq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsri.32", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsri_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - static_assert!(N : i32 where 1 <= N && N <= 32); +#[cfg_attr(test, assert_instr("vsri.32", n = 1))] +#[rustc_args_required_const(2)] +pub unsafe fn vsri_n_u32(a: uint32x2_t, b: uint32x2_t, n: i32) -> uint32x2_t { + assert!(1 <= n && n <= 32, "must have 1 ≤ n ≤ 32, but n = {}", n); transmute(vshiftins_v2i32( transmute(a), transmute(b), - int32x2_t(-N, -N), + int32x2_t(-n, -n), )) } /// Shift Right and Insert (immediate) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsri.32", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsriq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - static_assert!(N : i32 where 1 <= N && N <= 32); +#[cfg_attr(test, assert_instr("vsri.32", n = 1))] +#[rustc_args_required_const(2)] +pub unsafe fn vsriq_n_u32(a: uint32x4_t, b: uint32x4_t, n: i32) -> uint32x4_t { + assert!(1 <= n && n <= 32, "must have 1 ≤ n ≤ 32, but n = {}", n); transmute(vshiftins_v4i32( transmute(a), transmute(b), - int32x4_t(-N, -N, -N, -N), + int32x4_t(-n, -n, -n, -n), )) } /// Shift Right and Insert (immediate) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsri.64", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsri_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - static_assert!(N : i32 where 1 <= N && N <= 64); +#[cfg_attr(test, assert_instr("vsri.64", n = 1))] +#[rustc_args_required_const(2)] +pub unsafe fn vsri_n_u64(a: uint64x1_t, b: uint64x1_t, n: i32) -> uint64x1_t { + assert!(1 <= n && n <= 64, "must have 1 ≤ n ≤ 64, but n = {}", n); transmute(vshiftins_v1i64( transmute(a), transmute(b), - int64x1_t(-N as i64), + int64x1_t(-n as i64), )) } /// Shift Right and Insert (immediate) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsri.64", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsriq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - static_assert!(N : i32 where 1 <= N && N <= 64); +#[cfg_attr(test, assert_instr("vsri.64", n = 1))] +#[rustc_args_required_const(2)] +pub unsafe fn vsriq_n_u64(a: uint64x2_t, b: uint64x2_t, n: i32) -> uint64x2_t { + assert!(1 <= n && n <= 64, "must have 1 ≤ n ≤ 64, but n = {}", n); transmute(vshiftins_v2i64( transmute(a), transmute(b), - int64x2_t(-N as i64, -N as i64), + int64x2_t(-n as i64, -n as i64), )) } /// Shift Right and Insert (immediate) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsri.8", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsri_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { - static_assert!(N : i32 where 1 <= N && N <= 8); - let n = -N as i8; +#[cfg_attr(test, assert_instr("vsri.8", n = 1))] +#[rustc_args_required_const(2)] +pub unsafe fn vsri_n_p8(a: poly8x8_t, b: poly8x8_t, n: i32) -> poly8x8_t { + assert!(1 <= n && n <= 8, "must have 1 ≤ n ≤ 8, but n = {}", n); + let n = -n as i8; transmute(vshiftins_v8i8( transmute(a), transmute(b), @@ -3977,11 +4065,11 @@ pub unsafe fn vsri_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsri.8", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsriq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { - static_assert!(N : i32 where 1 <= N && N <= 8); - let n = -N as i8; +#[cfg_attr(test, assert_instr("vsri.8", n = 1))] +#[rustc_args_required_const(2)] +pub unsafe fn vsriq_n_p8(a: poly8x16_t, b: poly8x16_t, n: i32) -> poly8x16_t { + assert!(1 <= n && n <= 8, "must have 1 ≤ n ≤ 8, but n = {}", n); + let n = -n as i8; transmute(vshiftins_v16i8( transmute(a), transmute(b), @@ -3992,11 +4080,11 @@ pub unsafe fn vsriq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16 #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsri.16", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsri_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { - static_assert!(N : i32 where 1 <= N && N <= 16); - let n = -N as i16; +#[cfg_attr(test, assert_instr("vsri.16", n = 1))] +#[rustc_args_required_const(2)] +pub unsafe fn vsri_n_p16(a: poly16x4_t, b: poly16x4_t, n: i32) -> poly16x4_t { + assert!(1 <= n && n <= 16, "must have 1 ≤ n ≤ 16, but n = {}", n); + let n = -n as i16; transmute(vshiftins_v4i16( transmute(a), transmute(b), @@ -4007,11 +4095,11 @@ pub unsafe fn vsri_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4 #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsri.16", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsriq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { - static_assert!(N : i32 where 1 <= N && N <= 16); - let n = -N as i16; +#[cfg_attr(test, assert_instr("vsri.16", n = 1))] +#[rustc_args_required_const(2)] +pub unsafe fn vsriq_n_p16(a: poly16x8_t, b: poly16x8_t, n: i32) -> poly16x8_t { + assert!(1 <= n && n <= 16, "must have 1 ≤ n ≤ 16, but n = {}", n); + let n = -n as i16; transmute(vshiftins_v8i16( transmute(a), transmute(b), @@ -4352,7 +4440,7 @@ mod tests { let a = i8x8::new(0, 1, 2, 3, 4, 5, 6, 7); let elem: i8 = 42; let e = i8x8::new(0, 1, 2, 3, 4, 5, 6, 42); - let r: i8x8 = transmute(vld1_lane_s8::<7>(&elem, transmute(a))); + let r: i8x8 = transmute(vld1_lane_s8(&elem, transmute(a), 7)); assert_eq!(r, e) } @@ -4361,7 +4449,7 @@ mod tests { let a = i8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let elem: i8 = 42; let e = i8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 42); - let r: i8x16 = transmute(vld1q_lane_s8::<15>(&elem, transmute(a))); + let r: i8x16 = transmute(vld1q_lane_s8(&elem, transmute(a), 15)); assert_eq!(r, e) } @@ -4370,7 +4458,7 @@ mod tests { let a = i16x4::new(0, 1, 2, 3); let elem: i16 = 42; let e = i16x4::new(0, 1, 2, 42); - let r: i16x4 = transmute(vld1_lane_s16::<3>(&elem, transmute(a))); + let r: i16x4 = transmute(vld1_lane_s16(&elem, transmute(a), 3)); assert_eq!(r, e) } @@ -4379,7 +4467,7 @@ mod tests { let a = i16x8::new(0, 1, 2, 3, 4, 5, 6, 7); let elem: i16 = 42; let e = i16x8::new(0, 1, 2, 3, 4, 5, 6, 42); - let r: i16x8 = transmute(vld1q_lane_s16::<7>(&elem, transmute(a))); + let r: i16x8 = transmute(vld1q_lane_s16(&elem, transmute(a), 7)); assert_eq!(r, e) } @@ -4388,7 +4476,7 @@ mod tests { let a = i32x2::new(0, 1); let elem: i32 = 42; let e = i32x2::new(0, 42); - let r: i32x2 = transmute(vld1_lane_s32::<1>(&elem, transmute(a))); + let r: i32x2 = transmute(vld1_lane_s32(&elem, transmute(a), 1)); assert_eq!(r, e) } @@ -4397,7 +4485,7 @@ mod tests { let a = i32x4::new(0, 1, 2, 3); let elem: i32 = 42; let e = i32x4::new(0, 1, 2, 42); - let r: i32x4 = transmute(vld1q_lane_s32::<3>(&elem, transmute(a))); + let r: i32x4 = transmute(vld1q_lane_s32(&elem, transmute(a), 3)); assert_eq!(r, e) } @@ -4406,7 +4494,7 @@ mod tests { let a = i64x1::new(0); let elem: i64 = 42; let e = i64x1::new(42); - let r: i64x1 = transmute(vld1_lane_s64::<0>(&elem, transmute(a))); + let r: i64x1 = transmute(vld1_lane_s64(&elem, transmute(a), 0)); assert_eq!(r, e) } @@ -4415,7 +4503,7 @@ mod tests { let a = i64x2::new(0, 1); let elem: i64 = 42; let e = i64x2::new(0, 42); - let r: i64x2 = transmute(vld1q_lane_s64::<1>(&elem, transmute(a))); + let r: i64x2 = transmute(vld1q_lane_s64(&elem, transmute(a), 1)); assert_eq!(r, e) } @@ -4424,7 +4512,7 @@ mod tests { let a = u8x8::new(0, 1, 2, 3, 4, 5, 6, 7); let elem: u8 = 42; let e = u8x8::new(0, 1, 2, 3, 4, 5, 6, 42); - let r: u8x8 = transmute(vld1_lane_u8::<7>(&elem, transmute(a))); + let r: u8x8 = transmute(vld1_lane_u8(&elem, transmute(a), 7)); assert_eq!(r, e) } @@ -4433,7 +4521,7 @@ mod tests { let a = u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let elem: u8 = 42; let e = u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 42); - let r: u8x16 = transmute(vld1q_lane_u8::<15>(&elem, transmute(a))); + let r: u8x16 = transmute(vld1q_lane_u8(&elem, transmute(a), 15)); assert_eq!(r, e) } @@ -4442,7 +4530,7 @@ mod tests { let a = u16x4::new(0, 1, 2, 3); let elem: u16 = 42; let e = u16x4::new(0, 1, 2, 42); - let r: u16x4 = transmute(vld1_lane_u16::<3>(&elem, transmute(a))); + let r: u16x4 = transmute(vld1_lane_u16(&elem, transmute(a), 3)); assert_eq!(r, e) } @@ -4451,7 +4539,7 @@ mod tests { let a = u16x8::new(0, 1, 2, 3, 4, 5, 6, 7); let elem: u16 = 42; let e = u16x8::new(0, 1, 2, 3, 4, 5, 6, 42); - let r: u16x8 = transmute(vld1q_lane_u16::<7>(&elem, transmute(a))); + let r: u16x8 = transmute(vld1q_lane_u16(&elem, transmute(a), 7)); assert_eq!(r, e) } @@ -4460,7 +4548,7 @@ mod tests { let a = u32x2::new(0, 1); let elem: u32 = 42; let e = u32x2::new(0, 42); - let r: u32x2 = transmute(vld1_lane_u32::<1>(&elem, transmute(a))); + let r: u32x2 = transmute(vld1_lane_u32(&elem, transmute(a), 1)); assert_eq!(r, e) } @@ -4469,7 +4557,7 @@ mod tests { let a = u32x4::new(0, 1, 2, 3); let elem: u32 = 42; let e = u32x4::new(0, 1, 2, 42); - let r: u32x4 = transmute(vld1q_lane_u32::<3>(&elem, transmute(a))); + let r: u32x4 = transmute(vld1q_lane_u32(&elem, transmute(a), 3)); assert_eq!(r, e) } @@ -4478,7 +4566,7 @@ mod tests { let a = u64x1::new(0); let elem: u64 = 42; let e = u64x1::new(42); - let r: u64x1 = transmute(vld1_lane_u64::<0>(&elem, transmute(a))); + let r: u64x1 = transmute(vld1_lane_u64(&elem, transmute(a), 0)); assert_eq!(r, e) } @@ -4487,7 +4575,7 @@ mod tests { let a = u64x2::new(0, 1); let elem: u64 = 42; let e = u64x2::new(0, 42); - let r: u64x2 = transmute(vld1q_lane_u64::<1>(&elem, transmute(a))); + let r: u64x2 = transmute(vld1q_lane_u64(&elem, transmute(a), 1)); assert_eq!(r, e) } @@ -4496,7 +4584,7 @@ mod tests { let a = u8x8::new(0, 1, 2, 3, 4, 5, 6, 7); let elem: p8 = 42; let e = u8x8::new(0, 1, 2, 3, 4, 5, 6, 42); - let r: u8x8 = transmute(vld1_lane_p8::<7>(&elem, transmute(a))); + let r: u8x8 = transmute(vld1_lane_p8(&elem, transmute(a), 7)); assert_eq!(r, e) } @@ -4505,7 +4593,7 @@ mod tests { let a = u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let elem: p8 = 42; let e = u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 42); - let r: u8x16 = transmute(vld1q_lane_p8::<15>(&elem, transmute(a))); + let r: u8x16 = transmute(vld1q_lane_p8(&elem, transmute(a), 15)); assert_eq!(r, e) } @@ -4514,7 +4602,7 @@ mod tests { let a = u16x4::new(0, 1, 2, 3); let elem: p16 = 42; let e = u16x4::new(0, 1, 2, 42); - let r: u16x4 = transmute(vld1_lane_p16::<3>(&elem, transmute(a))); + let r: u16x4 = transmute(vld1_lane_p16(&elem, transmute(a), 3)); assert_eq!(r, e) } @@ -4523,7 +4611,7 @@ mod tests { let a = u16x8::new(0, 1, 2, 3, 4, 5, 6, 7); let elem: p16 = 42; let e = u16x8::new(0, 1, 2, 3, 4, 5, 6, 42); - let r: u16x8 = transmute(vld1q_lane_p16::<7>(&elem, transmute(a))); + let r: u16x8 = transmute(vld1q_lane_p16(&elem, transmute(a), 7)); assert_eq!(r, e) } @@ -4532,7 +4620,7 @@ mod tests { let a = f32x2::new(0., 1.); let elem: f32 = 42.; let e = f32x2::new(0., 42.); - let r: f32x2 = transmute(vld1_lane_f32::<1>(&elem, transmute(a))); + let r: f32x2 = transmute(vld1_lane_f32(&elem, transmute(a), 1)); assert_eq!(r, e) } @@ -4541,7 +4629,7 @@ mod tests { let a = f32x4::new(0., 1., 2., 3.); let elem: f32 = 42.; let e = f32x4::new(0., 1., 2., 42.); - let r: f32x4 = transmute(vld1q_lane_f32::<3>(&elem, transmute(a))); + let r: f32x4 = transmute(vld1q_lane_f32(&elem, transmute(a), 3)); assert_eq!(r, e) } @@ -4748,35 +4836,35 @@ mod tests { #[simd_test(enable = "neon")] unsafe fn test_vget_lane_u8() { let v = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8); - let r = vget_lane_u8::<1>(transmute(v)); + let r = vget_lane_u8(transmute(v), 1); assert_eq!(r, 2); } #[simd_test(enable = "neon")] unsafe fn test_vgetq_lane_u32() { let v = i32x4::new(1, 2, 3, 4); - let r = vgetq_lane_u32::<1>(transmute(v)); + let r = vgetq_lane_u32(transmute(v), 1); assert_eq!(r, 2); } #[simd_test(enable = "neon")] unsafe fn test_vgetq_lane_s32() { let v = i32x4::new(1, 2, 3, 4); - let r = vgetq_lane_s32::<1>(transmute(v)); + let r = vgetq_lane_s32(transmute(v), 1); assert_eq!(r, 2); } #[simd_test(enable = "neon")] unsafe fn test_vget_lane_u64() { let v: u64 = 1; - let r = vget_lane_u64::<0>(transmute(v)); + let r = vget_lane_u64(transmute(v), 0); assert_eq!(r, 1); } #[simd_test(enable = "neon")] unsafe fn test_vgetq_lane_u16() { let v = i16x8::new(1, 2, 3, 4, 5, 6, 7, 8); - let r = vgetq_lane_u16::<1>(transmute(v)); + let r = vgetq_lane_u16(transmute(v), 1); assert_eq!(r, 2); } @@ -4787,7 +4875,7 @@ mod tests { 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 31, 31, 32, ); let e = i8x16::new(4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19); - let r: i8x16 = transmute(vextq_s8::<3>(transmute(a), transmute(b))); + let r: i8x16 = transmute(vextq_s8(transmute(a), transmute(b), 3)); assert_eq!(r, e); } @@ -4798,7 +4886,7 @@ mod tests { 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 31, 31, 32, ); let e = u8x16::new(4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19); - let r: u8x16 = transmute(vextq_u8::<3>(transmute(a), transmute(b))); + let r: u8x16 = transmute(vextq_u8(transmute(a), transmute(b), 3)); assert_eq!(r, e); } @@ -4806,7 +4894,7 @@ mod tests { unsafe fn test_vshrq_n_u8() { let a = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); let e = u8x16::new(0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4); - let r: u8x16 = transmute(vshrq_n_u8::<2>(transmute(a))); + let r: u8x16 = transmute(vshrq_n_u8(transmute(a), 2)); assert_eq!(r, e); } @@ -4814,7 +4902,7 @@ mod tests { unsafe fn test_vshlq_n_u8() { let a = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); let e = u8x16::new(4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60, 64); - let r: u8x16 = transmute(vshlq_n_u8::<2>(transmute(a))); + let r: u8x16 = transmute(vshlq_n_u8(transmute(a), 2)); assert_eq!(r, e); } @@ -4869,7 +4957,7 @@ mod tests { #[simd_test(enable = "neon")] unsafe fn test_vgetq_lane_u64() { let v = i64x2::new(1, 2); - let r = vgetq_lane_u64::<1>(transmute(v)); + let r = vgetq_lane_u64(transmute(v), 1); assert_eq!(r, 2); } diff --git a/crates/core_arch/src/arm/neon/shift_and_insert_tests.rs b/crates/core_arch/src/arm/neon/shift_and_insert_tests.rs index 125659c105..04e623f24e 100644 --- a/crates/core_arch/src/arm/neon/shift_and_insert_tests.rs +++ b/crates/core_arch/src/arm/neon/shift_and_insert_tests.rs @@ -60,7 +60,7 @@ macro_rules! test_vsri { let b = [$($b as $t),*]; let n_bit_mask = ((1 as $t << $n) - 1).rotate_right($n); let e = [$(($a as $t & n_bit_mask) | (($b as $t >> $n) & !n_bit_mask)),*]; - let r = $fn_id::<$n>(transmute(a), transmute(b)); + let r = $fn_id(transmute(a), transmute(b), $n); let mut d = e; d = transmute(r); assert_eq!(d, e);