@@ -2929,9 +2929,11 @@ pub unsafe fn _mm256_sll_epi64(a: __m256i, count: __m128i) -> __m256i {
2929
2929
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_slli_epi16)
2930
2930
#[ inline]
2931
2931
#[ target_feature( enable = "avx2" ) ]
2932
- #[ cfg_attr( test, assert_instr( vpsllw) ) ]
2932
+ #[ cfg_attr( test, assert_instr( vpsllw, imm8 = 7 ) ) ]
2933
+ #[ rustc_legacy_const_generics( 1 ) ]
2933
2934
#[ stable( feature = "simd_x86" , since = "1.27.0" ) ]
2934
- pub unsafe fn _mm256_slli_epi16 ( a : __m256i , imm8 : i32 ) -> __m256i {
2935
+ pub unsafe fn _mm256_slli_epi16 < const imm8: i32 > ( a : __m256i ) -> __m256i {
2936
+ static_assert_imm8 ! ( imm8) ;
2935
2937
transmute ( pslliw ( a. as_i16x16 ( ) , imm8) )
2936
2938
}
2937
2939
@@ -2941,9 +2943,11 @@ pub unsafe fn _mm256_slli_epi16(a: __m256i, imm8: i32) -> __m256i {
2941
2943
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_slli_epi32)
2942
2944
#[ inline]
2943
2945
#[ target_feature( enable = "avx2" ) ]
2944
- #[ cfg_attr( test, assert_instr( vpslld) ) ]
2946
+ #[ cfg_attr( test, assert_instr( vpslld, imm8 = 7 ) ) ]
2947
+ #[ rustc_legacy_const_generics( 1 ) ]
2945
2948
#[ stable( feature = "simd_x86" , since = "1.27.0" ) ]
2946
- pub unsafe fn _mm256_slli_epi32 ( a : __m256i , imm8 : i32 ) -> __m256i {
2949
+ pub unsafe fn _mm256_slli_epi32 < const imm8: i32 > ( a : __m256i ) -> __m256i {
2950
+ static_assert_imm8 ! ( imm8) ;
2947
2951
transmute ( psllid ( a. as_i32x8 ( ) , imm8) )
2948
2952
}
2949
2953
@@ -2953,9 +2957,11 @@ pub unsafe fn _mm256_slli_epi32(a: __m256i, imm8: i32) -> __m256i {
2953
2957
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_slli_epi64)
2954
2958
#[ inline]
2955
2959
#[ target_feature( enable = "avx2" ) ]
2956
- #[ cfg_attr( test, assert_instr( vpsllq) ) ]
2960
+ #[ cfg_attr( test, assert_instr( vpsllq, imm8 = 7 ) ) ]
2961
+ #[ rustc_legacy_const_generics( 1 ) ]
2957
2962
#[ stable( feature = "simd_x86" , since = "1.27.0" ) ]
2958
- pub unsafe fn _mm256_slli_epi64 ( a : __m256i , imm8 : i32 ) -> __m256i {
2963
+ pub unsafe fn _mm256_slli_epi64 < const imm8: i32 > ( a : __m256i ) -> __m256i {
2964
+ static_assert_imm8 ! ( imm8) ;
2959
2965
transmute ( pslliq ( a. as_i64x4 ( ) , imm8) )
2960
2966
}
2961
2967
@@ -3077,9 +3083,11 @@ pub unsafe fn _mm256_sra_epi32(a: __m256i, count: __m128i) -> __m256i {
3077
3083
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_srai_epi16)
3078
3084
#[ inline]
3079
3085
#[ target_feature( enable = "avx2" ) ]
3080
- #[ cfg_attr( test, assert_instr( vpsraw) ) ]
3086
+ #[ cfg_attr( test, assert_instr( vpsraw, imm8 = 7 ) ) ]
3087
+ #[ rustc_legacy_const_generics( 1 ) ]
3081
3088
#[ stable( feature = "simd_x86" , since = "1.27.0" ) ]
3082
- pub unsafe fn _mm256_srai_epi16 ( a : __m256i , imm8 : i32 ) -> __m256i {
3089
+ pub unsafe fn _mm256_srai_epi16 < const imm8: i32 > ( a : __m256i ) -> __m256i {
3090
+ static_assert_imm8 ! ( imm8) ;
3083
3091
transmute ( psraiw ( a. as_i16x16 ( ) , imm8) )
3084
3092
}
3085
3093
@@ -3089,9 +3097,11 @@ pub unsafe fn _mm256_srai_epi16(a: __m256i, imm8: i32) -> __m256i {
3089
3097
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_srai_epi32)
3090
3098
#[ inline]
3091
3099
#[ target_feature( enable = "avx2" ) ]
3092
- #[ cfg_attr( test, assert_instr( vpsrad) ) ]
3100
+ #[ cfg_attr( test, assert_instr( vpsrad, imm8 = 7 ) ) ]
3101
+ #[ rustc_legacy_const_generics( 1 ) ]
3093
3102
#[ stable( feature = "simd_x86" , since = "1.27.0" ) ]
3094
- pub unsafe fn _mm256_srai_epi32 ( a : __m256i , imm8 : i32 ) -> __m256i {
3103
+ pub unsafe fn _mm256_srai_epi32 < const imm8: i32 > ( a : __m256i ) -> __m256i {
3104
+ static_assert_imm8 ! ( imm8) ;
3095
3105
transmute ( psraid ( a. as_i32x8 ( ) , imm8) )
3096
3106
}
3097
3107
@@ -3197,9 +3207,11 @@ pub unsafe fn _mm256_srl_epi64(a: __m256i, count: __m128i) -> __m256i {
3197
3207
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_srli_epi16)
3198
3208
#[ inline]
3199
3209
#[ target_feature( enable = "avx2" ) ]
3200
- #[ cfg_attr( test, assert_instr( vpsrlw) ) ]
3210
+ #[ cfg_attr( test, assert_instr( vpsrlw, imm8 = 7 ) ) ]
3211
+ #[ rustc_legacy_const_generics( 1 ) ]
3201
3212
#[ stable( feature = "simd_x86" , since = "1.27.0" ) ]
3202
- pub unsafe fn _mm256_srli_epi16 ( a : __m256i , imm8 : i32 ) -> __m256i {
3213
+ pub unsafe fn _mm256_srli_epi16 < const imm8: i32 > ( a : __m256i ) -> __m256i {
3214
+ static_assert_imm8 ! ( imm8) ;
3203
3215
transmute ( psrliw ( a. as_i16x16 ( ) , imm8) )
3204
3216
}
3205
3217
@@ -3209,9 +3221,11 @@ pub unsafe fn _mm256_srli_epi16(a: __m256i, imm8: i32) -> __m256i {
3209
3221
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_srli_epi32)
3210
3222
#[ inline]
3211
3223
#[ target_feature( enable = "avx2" ) ]
3212
- #[ cfg_attr( test, assert_instr( vpsrld) ) ]
3224
+ #[ cfg_attr( test, assert_instr( vpsrld, imm8 = 7 ) ) ]
3225
+ #[ rustc_legacy_const_generics( 1 ) ]
3213
3226
#[ stable( feature = "simd_x86" , since = "1.27.0" ) ]
3214
- pub unsafe fn _mm256_srli_epi32 ( a : __m256i , imm8 : i32 ) -> __m256i {
3227
+ pub unsafe fn _mm256_srli_epi32 < const imm8: i32 > ( a : __m256i ) -> __m256i {
3228
+ static_assert_imm8 ! ( imm8) ;
3215
3229
transmute ( psrlid ( a. as_i32x8 ( ) , imm8) )
3216
3230
}
3217
3231
@@ -3221,9 +3235,11 @@ pub unsafe fn _mm256_srli_epi32(a: __m256i, imm8: i32) -> __m256i {
3221
3235
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_srli_epi64)
3222
3236
#[ inline]
3223
3237
#[ target_feature( enable = "avx2" ) ]
3224
- #[ cfg_attr( test, assert_instr( vpsrlq) ) ]
3238
+ #[ cfg_attr( test, assert_instr( vpsrlq, imm8 = 7 ) ) ]
3239
+ #[ rustc_legacy_const_generics( 1 ) ]
3225
3240
#[ stable( feature = "simd_x86" , since = "1.27.0" ) ]
3226
- pub unsafe fn _mm256_srli_epi64 ( a : __m256i , imm8 : i32 ) -> __m256i {
3241
+ pub unsafe fn _mm256_srli_epi64 < const imm8: i32 > ( a : __m256i ) -> __m256i {
3242
+ static_assert_imm8 ! ( imm8) ;
3227
3243
transmute ( psrliq ( a. as_i64x4 ( ) , imm8) )
3228
3244
}
3229
3245
@@ -5204,23 +5220,23 @@ mod tests {
5204
5220
#[ simd_test( enable = "avx2" ) ]
5205
5221
unsafe fn test_mm256_slli_epi16 ( ) {
5206
5222
assert_eq_m256i (
5207
- _mm256_slli_epi16 ( _mm256_set1_epi16 ( 0xFF ) , 4 ) ,
5223
+ _mm256_slli_epi16 :: < 4 > ( _mm256_set1_epi16 ( 0xFF ) ) ,
5208
5224
_mm256_set1_epi16 ( 0xFF0 ) ,
5209
5225
) ;
5210
5226
}
5211
5227
5212
5228
#[ simd_test( enable = "avx2" ) ]
5213
5229
unsafe fn test_mm256_slli_epi32 ( ) {
5214
5230
assert_eq_m256i (
5215
- _mm256_slli_epi32 ( _mm256_set1_epi32 ( 0xFFFF ) , 4 ) ,
5231
+ _mm256_slli_epi32 :: < 4 > ( _mm256_set1_epi32 ( 0xFFFF ) ) ,
5216
5232
_mm256_set1_epi32 ( 0xFFFF0 ) ,
5217
5233
) ;
5218
5234
}
5219
5235
5220
5236
#[ simd_test( enable = "avx2" ) ]
5221
5237
unsafe fn test_mm256_slli_epi64 ( ) {
5222
5238
assert_eq_m256i (
5223
- _mm256_slli_epi64 ( _mm256_set1_epi64x ( 0xFFFFFFFF ) , 4 ) ,
5239
+ _mm256_slli_epi64 :: < 4 > ( _mm256_set1_epi64x ( 0xFFFFFFFF ) ) ,
5224
5240
_mm256_set1_epi64x ( 0xFFFFFFFF0 ) ,
5225
5241
) ;
5226
5242
}
@@ -5287,15 +5303,15 @@ mod tests {
5287
5303
#[ simd_test( enable = "avx2" ) ]
5288
5304
unsafe fn test_mm256_srai_epi16 ( ) {
5289
5305
assert_eq_m256i (
5290
- _mm256_srai_epi16 ( _mm256_set1_epi16 ( -1 ) , 1 ) ,
5306
+ _mm256_srai_epi16 :: < 1 > ( _mm256_set1_epi16 ( -1 ) ) ,
5291
5307
_mm256_set1_epi16 ( -1 ) ,
5292
5308
) ;
5293
5309
}
5294
5310
5295
5311
#[ simd_test( enable = "avx2" ) ]
5296
5312
unsafe fn test_mm256_srai_epi32 ( ) {
5297
5313
assert_eq_m256i (
5298
- _mm256_srai_epi32 ( _mm256_set1_epi32 ( -1 ) , 1 ) ,
5314
+ _mm256_srai_epi32 :: < 1 > ( _mm256_set1_epi32 ( -1 ) ) ,
5299
5315
_mm256_set1_epi32 ( -1 ) ,
5300
5316
) ;
5301
5317
}
@@ -5365,23 +5381,23 @@ mod tests {
5365
5381
#[ simd_test( enable = "avx2" ) ]
5366
5382
unsafe fn test_mm256_srli_epi16 ( ) {
5367
5383
assert_eq_m256i (
5368
- _mm256_srli_epi16 ( _mm256_set1_epi16 ( 0xFF ) , 4 ) ,
5384
+ _mm256_srli_epi16 :: < 4 > ( _mm256_set1_epi16 ( 0xFF ) ) ,
5369
5385
_mm256_set1_epi16 ( 0xF ) ,
5370
5386
) ;
5371
5387
}
5372
5388
5373
5389
#[ simd_test( enable = "avx2" ) ]
5374
5390
unsafe fn test_mm256_srli_epi32 ( ) {
5375
5391
assert_eq_m256i (
5376
- _mm256_srli_epi32 ( _mm256_set1_epi32 ( 0xFFFF ) , 4 ) ,
5392
+ _mm256_srli_epi32 :: < 4 > ( _mm256_set1_epi32 ( 0xFFFF ) ) ,
5377
5393
_mm256_set1_epi32 ( 0xFFF ) ,
5378
5394
) ;
5379
5395
}
5380
5396
5381
5397
#[ simd_test( enable = "avx2" ) ]
5382
5398
unsafe fn test_mm256_srli_epi64 ( ) {
5383
5399
assert_eq_m256i (
5384
- _mm256_srli_epi64 ( _mm256_set1_epi64x ( 0xFFFFFFFF ) , 4 ) ,
5400
+ _mm256_srli_epi64 :: < 4 > ( _mm256_set1_epi64x ( 0xFFFFFFFF ) ) ,
5385
5401
_mm256_set1_epi64x ( 0xFFFFFFF ) ,
5386
5402
) ;
5387
5403
}
0 commit comments