@@ -404,6 +404,7 @@ pub unsafe fn v128_store(m: *mut v128, a: v128) {
404
404
#[ cfg_attr( test, assert_instr( v128. load8_lane, L = 0 ) ) ]
405
405
#[ target_feature( enable = "simd128" ) ]
406
406
pub unsafe fn v128_load8_lane < const L : usize > ( v : v128 , m : * const u8 ) -> v128 {
407
+ static_assert ! ( L : usize where L < 16 ) ;
407
408
transmute ( llvm_load8_lane ( m, v. as_u8x16 ( ) , L ) )
408
409
}
409
410
@@ -412,6 +413,7 @@ pub unsafe fn v128_load8_lane<const L: usize>(v: v128, m: *const u8) -> v128 {
412
413
#[ cfg_attr( test, assert_instr( v128. load16_lane, L = 0 ) ) ]
413
414
#[ target_feature( enable = "simd128" ) ]
414
415
pub unsafe fn v128_load16_lane < const L : usize > ( v : v128 , m : * const u16 ) -> v128 {
416
+ static_assert ! ( L : usize where L < 8 ) ;
415
417
transmute ( llvm_load16_lane ( m, v. as_u16x8 ( ) , L ) )
416
418
}
417
419
@@ -420,6 +422,7 @@ pub unsafe fn v128_load16_lane<const L: usize>(v: v128, m: *const u16) -> v128 {
420
422
#[ cfg_attr( test, assert_instr( v128. load32_lane, L = 0 ) ) ]
421
423
#[ target_feature( enable = "simd128" ) ]
422
424
pub unsafe fn v128_load32_lane < const L : usize > ( v : v128 , m : * const u32 ) -> v128 {
425
+ static_assert ! ( L : usize where L < 4 ) ;
423
426
transmute ( llvm_load32_lane ( m, v. as_u32x4 ( ) , L ) )
424
427
}
425
428
@@ -428,6 +431,7 @@ pub unsafe fn v128_load32_lane<const L: usize>(v: v128, m: *const u32) -> v128 {
428
431
#[ cfg_attr( test, assert_instr( v128. load64_lane, L = 0 ) ) ]
429
432
#[ target_feature( enable = "simd128" ) ]
430
433
pub unsafe fn v128_load64_lane < const L : usize > ( v : v128 , m : * const u64 ) -> v128 {
434
+ static_assert ! ( L : usize where L < 2 ) ;
431
435
transmute ( llvm_load64_lane ( m, v. as_u64x2 ( ) , L ) )
432
436
}
433
437
@@ -436,6 +440,7 @@ pub unsafe fn v128_load64_lane<const L: usize>(v: v128, m: *const u64) -> v128 {
436
440
#[ cfg_attr( test, assert_instr( v128. store8_lane, L = 0 ) ) ]
437
441
#[ target_feature( enable = "simd128" ) ]
438
442
pub unsafe fn v128_store8_lane < const L : usize > ( v : v128 , m : * mut u8 ) {
443
+ static_assert ! ( L : usize where L < 16 ) ;
439
444
llvm_store8_lane ( m, v. as_u8x16 ( ) , L ) ;
440
445
}
441
446
@@ -444,6 +449,7 @@ pub unsafe fn v128_store8_lane<const L: usize>(v: v128, m: *mut u8) {
444
449
#[ cfg_attr( test, assert_instr( v128. store16_lane, L = 0 ) ) ]
445
450
#[ target_feature( enable = "simd128" ) ]
446
451
pub unsafe fn v128_store16_lane < const L : usize > ( v : v128 , m : * mut u16 ) {
452
+ static_assert ! ( L : usize where L < 8 ) ;
447
453
llvm_store16_lane ( m, v. as_u16x8 ( ) , L )
448
454
}
449
455
@@ -452,6 +458,7 @@ pub unsafe fn v128_store16_lane<const L: usize>(v: v128, m: *mut u16) {
452
458
#[ cfg_attr( test, assert_instr( v128. store32_lane, L = 0 ) ) ]
453
459
#[ target_feature( enable = "simd128" ) ]
454
460
pub unsafe fn v128_store32_lane < const L : usize > ( v : v128 , m : * mut u32 ) {
461
+ static_assert ! ( L : usize where L < 4 ) ;
455
462
llvm_store32_lane ( m, v. as_u32x4 ( ) , L )
456
463
}
457
464
@@ -460,6 +467,7 @@ pub unsafe fn v128_store32_lane<const L: usize>(v: v128, m: *mut u32) {
460
467
#[ cfg_attr( test, assert_instr( v128. store64_lane, L = 0 ) ) ]
461
468
#[ target_feature( enable = "simd128" ) ]
462
469
pub unsafe fn v128_store64_lane < const L : usize > ( v : v128 , m : * mut u64 ) {
470
+ static_assert ! ( L : usize where L < 2 ) ;
463
471
llvm_store64_lane ( m, v. as_u64x2 ( ) , L )
464
472
}
465
473
@@ -649,6 +657,22 @@ pub unsafe fn i8x16_shuffle<
649
657
a : v128 ,
650
658
b : v128 ,
651
659
) -> v128 {
660
+ static_assert ! ( I0 : usize where I0 < 32 ) ;
661
+ static_assert ! ( I1 : usize where I1 < 32 ) ;
662
+ static_assert ! ( I2 : usize where I2 < 32 ) ;
663
+ static_assert ! ( I3 : usize where I3 < 32 ) ;
664
+ static_assert ! ( I4 : usize where I4 < 32 ) ;
665
+ static_assert ! ( I5 : usize where I5 < 32 ) ;
666
+ static_assert ! ( I6 : usize where I6 < 32 ) ;
667
+ static_assert ! ( I7 : usize where I7 < 32 ) ;
668
+ static_assert ! ( I8 : usize where I8 < 32 ) ;
669
+ static_assert ! ( I9 : usize where I9 < 32 ) ;
670
+ static_assert ! ( I10 : usize where I10 < 32 ) ;
671
+ static_assert ! ( I11 : usize where I11 < 32 ) ;
672
+ static_assert ! ( I12 : usize where I12 < 32 ) ;
673
+ static_assert ! ( I13 : usize where I13 < 32 ) ;
674
+ static_assert ! ( I14 : usize where I14 < 32 ) ;
675
+ static_assert ! ( I15 : usize where I15 < 32 ) ;
652
676
let shuf = simd_shuffle16 :: < u8x16 , u8x16 > (
653
677
a. as_u8x16 ( ) ,
654
678
b. as_u8x16 ( ) ,
@@ -696,6 +720,14 @@ pub unsafe fn i16x8_shuffle<
696
720
a : v128 ,
697
721
b : v128 ,
698
722
) -> v128 {
723
+ static_assert ! ( I0 : usize where I0 < 16 ) ;
724
+ static_assert ! ( I1 : usize where I1 < 16 ) ;
725
+ static_assert ! ( I2 : usize where I2 < 16 ) ;
726
+ static_assert ! ( I3 : usize where I3 < 16 ) ;
727
+ static_assert ! ( I4 : usize where I4 < 16 ) ;
728
+ static_assert ! ( I5 : usize where I5 < 16 ) ;
729
+ static_assert ! ( I6 : usize where I6 < 16 ) ;
730
+ static_assert ! ( I7 : usize where I7 < 16 ) ;
699
731
let shuf = simd_shuffle8 :: < u16x8 , u16x8 > (
700
732
a. as_u16x8 ( ) ,
701
733
b. as_u16x8 ( ) ,
@@ -720,6 +752,10 @@ pub unsafe fn i32x4_shuffle<const I0: usize, const I1: usize, const I2: usize, c
720
752
a : v128 ,
721
753
b : v128 ,
722
754
) -> v128 {
755
+ static_assert ! ( I0 : usize where I0 < 8 ) ;
756
+ static_assert ! ( I1 : usize where I1 < 8 ) ;
757
+ static_assert ! ( I2 : usize where I2 < 8 ) ;
758
+ static_assert ! ( I3 : usize where I3 < 8 ) ;
723
759
let shuf = simd_shuffle4 :: < u32x4 , u32x4 > (
724
760
a. as_u32x4 ( ) ,
725
761
b. as_u32x4 ( ) ,
@@ -739,6 +775,8 @@ pub unsafe fn i32x4_shuffle<const I0: usize, const I1: usize, const I2: usize, c
739
775
#[ cfg_attr( test, assert_instr( i8x16. shuffle, I0 = 0 , I1 = 2 ) ) ]
740
776
#[ target_feature( enable = "simd128" ) ]
741
777
pub unsafe fn i64x2_shuffle < const I0 : usize , const I1 : usize > ( a : v128 , b : v128 ) -> v128 {
778
+ static_assert ! ( I0 : usize where I0 < 4 ) ;
779
+ static_assert ! ( I1 : usize where I1 < 4 ) ;
742
780
let shuf = simd_shuffle2 :: < u64x2 , u64x2 > ( a. as_u64x2 ( ) , b. as_u64x2 ( ) , [ I0 as u32 , I1 as u32 ] ) ;
743
781
transmute ( shuf)
744
782
}
@@ -751,6 +789,7 @@ pub unsafe fn i64x2_shuffle<const I0: usize, const I1: usize>(a: v128, b: v128)
751
789
#[ cfg_attr( test, assert_instr( i8x16. extract_lane_s, N = 3 ) ) ]
752
790
#[ target_feature( enable = "simd128" ) ]
753
791
pub unsafe fn i8x16_extract_lane < const N : usize > ( a : v128 ) -> i8 {
792
+ static_assert ! ( N : usize where N < 16 ) ;
754
793
simd_extract ( a. as_i8x16 ( ) , N as u32 )
755
794
}
756
795
@@ -762,6 +801,7 @@ pub unsafe fn i8x16_extract_lane<const N: usize>(a: v128) -> i8 {
762
801
#[ cfg_attr( test, assert_instr( i8x16. replace_lane, N = 2 ) ) ]
763
802
#[ target_feature( enable = "simd128" ) ]
764
803
pub unsafe fn i8x16_replace_lane < const N : usize > ( a : v128 , val : i8 ) -> v128 {
804
+ static_assert ! ( N : usize where N < 16 ) ;
765
805
transmute ( simd_insert ( a. as_i8x16 ( ) , N as u32 , val) )
766
806
}
767
807
@@ -773,6 +813,7 @@ pub unsafe fn i8x16_replace_lane<const N: usize>(a: v128, val: i8) -> v128 {
773
813
#[ cfg_attr( test, assert_instr( i16x8. extract_lane_s, N = 2 ) ) ]
774
814
#[ target_feature( enable = "simd128" ) ]
775
815
pub unsafe fn i16x8_extract_lane < const N : usize > ( a : v128 ) -> i16 {
816
+ static_assert ! ( N : usize where N < 8 ) ;
776
817
simd_extract ( a. as_i16x8 ( ) , N as u32 )
777
818
}
778
819
@@ -784,6 +825,7 @@ pub unsafe fn i16x8_extract_lane<const N: usize>(a: v128) -> i16 {
784
825
#[ cfg_attr( test, assert_instr( i16x8. replace_lane, N = 2 ) ) ]
785
826
#[ target_feature( enable = "simd128" ) ]
786
827
pub unsafe fn i16x8_replace_lane < const N : usize > ( a : v128 , val : i16 ) -> v128 {
828
+ static_assert ! ( N : usize where N < 8 ) ;
787
829
transmute ( simd_insert ( a. as_i16x8 ( ) , N as u32 , val) )
788
830
}
789
831
@@ -795,6 +837,7 @@ pub unsafe fn i16x8_replace_lane<const N: usize>(a: v128, val: i16) -> v128 {
795
837
#[ cfg_attr( test, assert_instr( i32x4. extract_lane, N = 2 ) ) ]
796
838
#[ target_feature( enable = "simd128" ) ]
797
839
pub unsafe fn i32x4_extract_lane < const N : usize > ( a : v128 ) -> i32 {
840
+ static_assert ! ( N : usize where N < 4 ) ;
798
841
simd_extract ( a. as_i32x4 ( ) , N as u32 )
799
842
}
800
843
@@ -806,6 +849,7 @@ pub unsafe fn i32x4_extract_lane<const N: usize>(a: v128) -> i32 {
806
849
#[ cfg_attr( test, assert_instr( i32x4. replace_lane, N = 2 ) ) ]
807
850
#[ target_feature( enable = "simd128" ) ]
808
851
pub unsafe fn i32x4_replace_lane < const N : usize > ( a : v128 , val : i32 ) -> v128 {
852
+ static_assert ! ( N : usize where N < 4 ) ;
809
853
transmute ( simd_insert ( a. as_i32x4 ( ) , N as u32 , val) )
810
854
}
811
855
@@ -817,6 +861,7 @@ pub unsafe fn i32x4_replace_lane<const N: usize>(a: v128, val: i32) -> v128 {
817
861
#[ cfg_attr( test, assert_instr( i64x2. extract_lane, N = 1 ) ) ]
818
862
#[ target_feature( enable = "simd128" ) ]
819
863
pub unsafe fn i64x2_extract_lane < const N : usize > ( a : v128 ) -> i64 {
864
+ static_assert ! ( N : usize where N < 2 ) ;
820
865
simd_extract ( a. as_i64x2 ( ) , N as u32 )
821
866
}
822
867
@@ -828,6 +873,7 @@ pub unsafe fn i64x2_extract_lane<const N: usize>(a: v128) -> i64 {
828
873
#[ cfg_attr( test, assert_instr( i64x2. replace_lane, N = 0 ) ) ]
829
874
#[ target_feature( enable = "simd128" ) ]
830
875
pub unsafe fn i64x2_replace_lane < const N : usize > ( a : v128 , val : i64 ) -> v128 {
876
+ static_assert ! ( N : usize where N < 2 ) ;
831
877
transmute ( simd_insert ( a. as_i64x2 ( ) , N as u32 , val) )
832
878
}
833
879
@@ -839,6 +885,7 @@ pub unsafe fn i64x2_replace_lane<const N: usize>(a: v128, val: i64) -> v128 {
839
885
#[ cfg_attr( test, assert_instr( f32x4. extract_lane, N = 1 ) ) ]
840
886
#[ target_feature( enable = "simd128" ) ]
841
887
pub unsafe fn f32x4_extract_lane < const N : usize > ( a : v128 ) -> f32 {
888
+ static_assert ! ( N : usize where N < 4 ) ;
842
889
simd_extract ( a. as_f32x4 ( ) , N as u32 )
843
890
}
844
891
@@ -850,6 +897,7 @@ pub unsafe fn f32x4_extract_lane<const N: usize>(a: v128) -> f32 {
850
897
#[ cfg_attr( test, assert_instr( f32x4. replace_lane, N = 1 ) ) ]
851
898
#[ target_feature( enable = "simd128" ) ]
852
899
pub unsafe fn f32x4_replace_lane < const N : usize > ( a : v128 , val : f32 ) -> v128 {
900
+ static_assert ! ( N : usize where N < 4 ) ;
853
901
transmute ( simd_insert ( a. as_f32x4 ( ) , N as u32 , val) )
854
902
}
855
903
@@ -861,6 +909,7 @@ pub unsafe fn f32x4_replace_lane<const N: usize>(a: v128, val: f32) -> v128 {
861
909
#[ cfg_attr( test, assert_instr( f64x2. extract_lane, N = 1 ) ) ]
862
910
#[ target_feature( enable = "simd128" ) ]
863
911
pub unsafe fn f64x2_extract_lane < const N : usize > ( a : v128 ) -> f64 {
912
+ static_assert ! ( N : usize where N < 2 ) ;
864
913
simd_extract ( a. as_f64x2 ( ) , N as u32 )
865
914
}
866
915
@@ -872,6 +921,7 @@ pub unsafe fn f64x2_extract_lane<const N: usize>(a: v128) -> f64 {
872
921
#[ cfg_attr( test, assert_instr( f64x2. replace_lane, N = 1 ) ) ]
873
922
#[ target_feature( enable = "simd128" ) ]
874
923
pub unsafe fn f64x2_replace_lane < const N : usize > ( a : v128 , val : f64 ) -> v128 {
924
+ static_assert ! ( N : usize where N < 2 ) ;
875
925
transmute ( simd_insert ( a. as_f64x2 ( ) , N as u32 , val) )
876
926
}
877
927
0 commit comments