@@ -106,8 +106,8 @@ unsafe fn load_data_avx2(
106
106
107
107
macro_rules! unrolled_iterations {
108
108
( $( $i: literal) ,* ) => { $(
109
- x[ $i] = _mm256_insertf128_si256:: < 1 > ( x[ $i] , _mm_loadu_si128( data. add( $i) as * const _) ) ;
110
- x[ $i] = _mm256_insertf128_si256:: < 0 > ( x[ $i] , _mm_loadu_si128( data. add( $i + 1 ) as * const _) ) ;
109
+ x[ $i] = _mm256_insertf128_si256( x[ $i] , _mm_loadu_si128( data. add( $i) as * const _) , 1 ) ;
110
+ x[ $i] = _mm256_insertf128_si256( x[ $i] , _mm_loadu_si128( data. add( $i + 1 ) as * const _) , 0 ) ;
111
111
112
112
x[ $i] = _mm256_shuffle_epi8( x[ $i] , MASK ) ;
113
113
let y = _mm256_add_epi64(
@@ -117,11 +117,11 @@ unsafe fn load_data_avx2(
117
117
118
118
_mm_store_si128(
119
119
& mut ms[ 2 * $i] as * mut u64 as * mut _,
120
- _mm256_extracti128_si256:: < 0 > ( y ) ,
120
+ _mm256_extracti128_si256( y , 0 ) ,
121
121
) ;
122
122
_mm_store_si128(
123
123
& mut t2[ 2 * $i] as * mut u64 as * mut _,
124
- _mm256_extracti128_si256:: < 1 > ( y ) ,
124
+ _mm256_extracti128_si256( y , 1 ) ,
125
125
) ;
126
126
) * } ;
127
127
}
@@ -164,11 +164,11 @@ unsafe fn rounds_0_63_avx2(
164
164
165
165
_mm_store_si128 (
166
166
& mut ms[ 2 * j] as * mut u64 as * mut _ ,
167
- _mm256_extracti128_si256 :: < 0 > ( y ) ,
167
+ _mm256_extracti128_si256 ( y , 0 ) ,
168
168
) ;
169
169
_mm_store_si128 (
170
170
& mut t2[ ( 16 * i) + 2 * j] as * mut u64 as * mut _ ,
171
- _mm256_extracti128_si256 :: < 1 > ( y ) ,
171
+ _mm256_extracti128_si256 ( y , 1 ) ,
172
172
) ;
173
173
174
174
k64x4_idx += 4 ;
@@ -258,50 +258,50 @@ macro_rules! fn_sha512_update_x {
258
258
} ) => {
259
259
unsafe fn $name( x: & mut [ $ty; 8 ] , k64_p: * const $ty) -> $ty {
260
260
// q[2:1]
261
- let mut t0 = $ALIGNR8:: < 8 > ( x[ 1 ] , x[ 0 ] ) ;
261
+ let mut t0 = $ALIGNR8( x[ 1 ] , x[ 0 ] , 8 ) ;
262
262
// q[10:9]
263
- let mut t3 = $ALIGNR8:: < 8 > ( x[ 5 ] , x[ 4 ] ) ;
263
+ let mut t3 = $ALIGNR8( x[ 5 ] , x[ 4 ] , 8 ) ;
264
264
// q[2:1] >> s0[0]
265
- let mut t2 = $SRL64:: < 1 > ( t0) ;
265
+ let mut t2 = $SRL64( t0, 1 ) ;
266
266
// q[1:0] + q[10:9]
267
267
x[ 0 ] = $ADD64( x[ 0 ] , t3) ;
268
268
// q[2:1] >> s0[2]
269
- t3 = $SRL64:: < 7 > ( t0) ;
269
+ t3 = $SRL64( t0, 7 ) ;
270
270
// q[2:1] << (64 - s0[1])
271
- let mut t1 = $SLL64:: < { 64 - 8 } > ( t0 ) ;
271
+ let mut t1 = $SLL64( t0 , 64 - 8 ) ;
272
272
// (q[2:1] >> s0[2]) ^
273
273
// (q[2:1] >> s0[0])
274
274
t0 = $XOR( t3, t2) ;
275
275
// q[2:1] >> s0[1]
276
- t2 = $SRL64:: < { 8 - 1 } > ( t2 ) ;
276
+ t2 = $SRL64( t2 , 8 - 1 ) ;
277
277
// (q[2:1] >> s0[2]) ^
278
278
// (q[2:1] >> s0[0]) ^
279
279
// q[2:1] << (64 - s0[1])
280
280
t0 = $XOR( t0, t1) ;
281
281
// q[2:1] << (64 - s0[0])
282
- t1 = $SLL64:: < { 8 - 1 } > ( t1 ) ;
282
+ t1 = $SLL64( t1 , 8 - 1 ) ;
283
283
// sigma1(q[2:1])
284
284
t0 = $XOR( t0, t2) ;
285
285
t0 = $XOR( t0, t1) ;
286
286
// q[15:14] >> s1[2]
287
- t3 = $SRL64:: < 6 > ( x[ 7 ] ) ;
287
+ t3 = $SRL64( x[ 7 ] , 6 ) ;
288
288
// q[15:14] >> (64 - s1[1])
289
- t2 = $SLL64:: < { 64 - 61 } > ( x [ 7 ] ) ;
289
+ t2 = $SLL64( x [ 7 ] , 64 - 61 ) ;
290
290
// q[1:0] + sigma0(q[2:1])
291
291
x[ 0 ] = $ADD64( x[ 0 ] , t0) ;
292
292
// q[15:14] >> s1[0]
293
- t1 = $SRL64:: < 19 > ( x[ 7 ] ) ;
293
+ t1 = $SRL64( x[ 7 ] , 19 ) ;
294
294
// q[15:14] >> s1[2] ^
295
295
// q[15:14] >> (64 - s1[1])
296
296
t3 = $XOR( t3, t2) ;
297
297
// q[15:14] >> (64 - s1[0])
298
- t2 = $SLL64:: < { 61 - 19 } > ( t2 ) ;
298
+ t2 = $SLL64( t2 , 61 - 19 ) ;
299
299
// q[15:14] >> s1[2] ^
300
300
// q[15:14] >> (64 - s1[1] ^
301
301
// q[15:14] >> s1[0]
302
302
t3 = $XOR( t3, t1) ;
303
303
// q[15:14] >> s1[1]
304
- t1 = $SRL64:: < { 61 - 19 } > ( t1 ) ;
304
+ t1 = $SRL64( t1 , 61 - 19 ) ;
305
305
// sigma1(q[15:14])
306
306
t3 = $XOR( t3, t2) ;
307
307
t3 = $XOR( t3, t1) ;
0 commit comments