@@ -2024,14 +2024,10 @@ pub unsafe fn __msa_binsl_d(a: v2u64, b: v2u64, c: v2u64) -> v2u64 {
2024
2024
#[inline]
2025
2025
#[target_feature(enable = "msa")]
2026
2026
#[cfg_attr(test, assert_instr(binsli.b, imm3 = 0b111))]
2027
- #[rustc_args_required_const(2)]
2028
- pub unsafe fn __msa_binsli_b(a: v16u8, b: v16u8, imm3: i32) -> v16u8 {
2029
- macro_rules! call {
2030
- ($imm3:expr) => {
2031
- msa_binsli_b(a, mem::transmute(b), $imm3)
2032
- };
2033
- }
2034
- constify_imm3!(imm3, call)
2027
+ #[rustc_legacy_const_generics(2)]
2028
+ pub unsafe fn __msa_binsli_b<const IMM3: i32>(a: v16u8, b: v16u8) -> v16u8 {
2029
+ static_assert_imm3!(IMM3);
2030
+ msa_binsli_b(a, mem::transmute(b), IMM3)
2035
2031
}
2036
2032
2037
2033
/// Immediate Bit Insert Left
@@ -2043,14 +2039,10 @@ pub unsafe fn __msa_binsli_b(a: v16u8, b: v16u8, imm3: i32) -> v16u8 {
2043
2039
#[inline]
2044
2040
#[target_feature(enable = "msa")]
2045
2041
#[cfg_attr(test, assert_instr(binsli.h, imm4 = 0b1111))]
2046
- #[rustc_args_required_const(2)]
2047
- pub unsafe fn __msa_binsli_h(a: v8u16, b: v8u16, imm4: i32) -> v8u16 {
2048
- macro_rules! call {
2049
- ($imm4:expr) => {
2050
- msa_binsli_h(a, mem::transmute(b), $imm4)
2051
- };
2052
- }
2053
- constify_imm4!(imm4, call)
2042
+ #[rustc_legacy_const_generics(2)]
2043
+ pub unsafe fn __msa_binsli_h<const IMM4: i32>(a: v8u16, b: v8u16) -> v8u16 {
2044
+ static_assert_imm4!(IMM4);
2045
+ msa_binsli_h(a, mem::transmute(b), IMM4)
2054
2046
}
2055
2047
2056
2048
/// Immediate Bit Insert Left
@@ -2062,14 +2054,10 @@ pub unsafe fn __msa_binsli_h(a: v8u16, b: v8u16, imm4: i32) -> v8u16 {
2062
2054
#[inline]
2063
2055
#[target_feature(enable = "msa")]
2064
2056
#[cfg_attr(test, assert_instr(binsli.w, imm5 = 0b11111))]
2065
- #[rustc_args_required_const(2)]
2066
- pub unsafe fn __msa_binsli_w(a: v4u32, b: v4u32, imm5: i32) -> v4u32 {
2067
- macro_rules! call {
2068
- ($imm5:expr) => {
2069
- msa_binsli_w(a, mem::transmute(b), $imm5)
2070
- };
2071
- }
2072
- constify_imm5!(imm5, call)
2057
+ #[rustc_legacy_const_generics(2)]
2058
+ pub unsafe fn __msa_binsli_w<const IMM5: i32>(a: v4u32, b: v4u32) -> v4u32 {
2059
+ static_assert_imm5!(IMM5);
2060
+ msa_binsli_w(a, mem::transmute(b), IMM5)
2073
2061
}
2074
2062
2075
2063
/// Immediate Bit Insert Left
@@ -2081,14 +2069,10 @@ pub unsafe fn __msa_binsli_w(a: v4u32, b: v4u32, imm5: i32) -> v4u32 {
2081
2069
#[inline]
2082
2070
#[target_feature(enable = "msa")]
2083
2071
#[cfg_attr(test, assert_instr(binsli.d, imm6 = 0b111111))]
2084
- #[rustc_args_required_const(2)]
2085
- pub unsafe fn __msa_binsli_d(a: v2u64, b: v2u64, imm6: i32) -> v2u64 {
2086
- macro_rules! call {
2087
- ($imm6:expr) => {
2088
- msa_binsli_d(a, mem::transmute(b), $imm6)
2089
- };
2090
- }
2091
- constify_imm6!(imm6, call)
2072
+ #[rustc_legacy_const_generics(2)]
2073
+ pub unsafe fn __msa_binsli_d<const IMM6: i32>(a: v2u64, b: v2u64) -> v2u64 {
2074
+ static_assert_imm6!(IMM6);
2075
+ msa_binsli_d(a, mem::transmute(b), IMM6)
2092
2076
}
2093
2077
2094
2078
/// Vector Bit Insert Right
@@ -2156,14 +2140,10 @@ pub unsafe fn __msa_binsr_d(a: v2u64, b: v2u64, c: v2u64) -> v2u64 {
2156
2140
#[inline]
2157
2141
#[target_feature(enable = "msa")]
2158
2142
#[cfg_attr(test, assert_instr(binsri.b, imm3 = 0b111))]
2159
- #[rustc_args_required_const(2)]
2160
- pub unsafe fn __msa_binsri_b(a: v16u8, b: v16u8, imm3: i32) -> v16u8 {
2161
- macro_rules! call {
2162
- ($imm3:expr) => {
2163
- msa_binsri_b(a, mem::transmute(b), $imm3)
2164
- };
2165
- }
2166
- constify_imm3!(imm3, call)
2143
+ #[rustc_legacy_const_generics(2)]
2144
+ pub unsafe fn __msa_binsri_b<const IMM3: i32>(a: v16u8, b: v16u8) -> v16u8 {
2145
+ static_assert_imm3!(IMM3);
2146
+ msa_binsri_b(a, mem::transmute(b), IMM3)
2167
2147
}
2168
2148
2169
2149
/// Immediate Bit Insert Right
@@ -2175,14 +2155,10 @@ pub unsafe fn __msa_binsri_b(a: v16u8, b: v16u8, imm3: i32) -> v16u8 {
2175
2155
#[inline]
2176
2156
#[target_feature(enable = "msa")]
2177
2157
#[cfg_attr(test, assert_instr(binsri.h, imm4 = 0b1111))]
2178
- #[rustc_args_required_const(2)]
2179
- pub unsafe fn __msa_binsri_h(a: v8u16, b: v8u16, imm4: i32) -> v8u16 {
2180
- macro_rules! call {
2181
- ($imm4:expr) => {
2182
- msa_binsri_h(a, mem::transmute(b), $imm4)
2183
- };
2184
- }
2185
- constify_imm4!(imm4, call)
2158
+ #[rustc_legacy_const_generics(2)]
2159
+ pub unsafe fn __msa_binsri_h<const IMM4: i32>(a: v8u16, b: v8u16) -> v8u16 {
2160
+ static_assert_imm4!(IMM4);
2161
+ msa_binsri_h(a, mem::transmute(b), IMM4)
2186
2162
}
2187
2163
2188
2164
/// Immediate Bit Insert Right
@@ -2194,14 +2170,10 @@ pub unsafe fn __msa_binsri_h(a: v8u16, b: v8u16, imm4: i32) -> v8u16 {
2194
2170
#[inline]
2195
2171
#[target_feature(enable = "msa")]
2196
2172
#[cfg_attr(test, assert_instr(binsri.w, imm5 = 0b11111))]
2197
- #[rustc_args_required_const(2)]
2198
- pub unsafe fn __msa_binsri_w(a: v4u32, b: v4u32, imm5: i32) -> v4u32 {
2199
- macro_rules! call {
2200
- ($imm5:expr) => {
2201
- msa_binsri_w(a, mem::transmute(b), $imm5)
2202
- };
2203
- }
2204
- constify_imm5!(imm5, call)
2173
+ #[rustc_legacy_const_generics(2)]
2174
+ pub unsafe fn __msa_binsri_w<const IMM5: i32>(a: v4u32, b: v4u32) -> v4u32 {
2175
+ static_assert_imm5!(IMM5);
2176
+ msa_binsri_w(a, mem::transmute(b), IMM5)
2205
2177
}
2206
2178
2207
2179
/// Immediate Bit Insert Right
@@ -2213,14 +2185,10 @@ pub unsafe fn __msa_binsri_w(a: v4u32, b: v4u32, imm5: i32) -> v4u32 {
2213
2185
#[inline]
2214
2186
#[target_feature(enable = "msa")]
2215
2187
#[cfg_attr(test, assert_instr(binsri.d, imm6 = 0b111111))]
2216
- #[rustc_args_required_const(2)]
2217
- pub unsafe fn __msa_binsri_d(a: v2u64, b: v2u64, imm6: i32) -> v2u64 {
2218
- macro_rules! call {
2219
- ($imm6:expr) => {
2220
- msa_binsri_d(a, mem::transmute(b), $imm6)
2221
- };
2222
- }
2223
- constify_imm6!(imm6, call)
2188
+ #[rustc_legacy_const_generics(2)]
2189
+ pub unsafe fn __msa_binsri_d<const IMM6: i32>(a: v2u64, b: v2u64) -> v2u64 {
2190
+ static_assert_imm6!(IMM6);
2191
+ msa_binsri_d(a, mem::transmute(b), IMM6)
2224
2192
}
2225
2193
2226
2194
/// Vector Bit Move If Not Zero
@@ -2246,14 +2214,10 @@ pub unsafe fn __msa_bmnz_v(a: v16u8, b: v16u8, c: v16u8) -> v16u8 {
2246
2214
#[inline]
2247
2215
#[target_feature(enable = "msa")]
2248
2216
#[cfg_attr(test, assert_instr(bmnzi.b, imm8 = 0b11111111))]
2249
- #[rustc_args_required_const(2)]
2250
- pub unsafe fn __msa_bmnzi_b(a: v16u8, b: v16u8, imm8: i32) -> v16u8 {
2251
- macro_rules! call {
2252
- ($imm8:expr) => {
2253
- msa_bmnzi_b(a, mem::transmute(b), $imm8)
2254
- };
2255
- }
2256
- constify_imm8!(imm8, call)
2217
+ #[rustc_legacy_const_generics(2)]
2218
+ pub unsafe fn __msa_bmnzi_b<const IMM8: i32>(a: v16u8, b: v16u8) -> v16u8 {
2219
+ static_assert_imm8!(IMM8);
2220
+ msa_bmnzi_b(a, mem::transmute(b), IMM8)
2257
2221
}
2258
2222
2259
2223
/// Vector Bit Move If Zero
@@ -2279,14 +2243,10 @@ pub unsafe fn __msa_bmz_v(a: v16u8, b: v16u8, c: v16u8) -> v16u8 {
2279
2243
#[inline]
2280
2244
#[target_feature(enable = "msa")]
2281
2245
#[cfg_attr(test, assert_instr(bmzi.b, imm8 = 0b11111111))]
2282
- #[rustc_args_required_const(2)]
2283
- pub unsafe fn __msa_bmzi_b(a: v16u8, b: v16u8, imm8: i32) -> v16u8 {
2284
- macro_rules! call {
2285
- ($imm8:expr) => {
2286
- msa_bmzi_b(a, mem::transmute(b), $imm8)
2287
- };
2288
- }
2289
- constify_imm8!(imm8, call)
2246
+ #[rustc_legacy_const_generics(2)]
2247
+ pub unsafe fn __msa_bmzi_b<const IMM8: i32>(a: v16u8, b: v16u8) -> v16u8 {
2248
+ static_assert_imm8!(IMM8);
2249
+ msa_bmzi_b(a, mem::transmute(b), IMM8)
2290
2250
}
2291
2251
2292
2252
/// Vector Bit Negate
@@ -2484,14 +2444,10 @@ pub unsafe fn __msa_bsel_v(a: v16u8, b: v16u8, c: v16u8) -> v16u8 {
2484
2444
#[inline]
2485
2445
#[target_feature(enable = "msa")]
2486
2446
#[cfg_attr(test, assert_instr(bseli.b, imm8 = 0b11111111))]
2487
- #[rustc_args_required_const(2)]
2488
- pub unsafe fn __msa_bseli_b(a: v16u8, b: v16u8, imm8: i32) -> v16u8 {
2489
- macro_rules! call {
2490
- ($imm8:expr) => {
2491
- msa_bseli_b(a, mem::transmute(b), $imm8)
2492
- };
2493
- }
2494
- constify_imm8!(imm8, call)
2447
+ #[rustc_legacy_const_generics(2)]
2448
+ pub unsafe fn __msa_bseli_b<const IMM8: i32>(a: v16u8, b: v16u8) -> v16u8 {
2449
+ static_assert_imm8!(IMM8);
2450
+ msa_bseli_b(a, mem::transmute(b), IMM8)
2495
2451
}
2496
2452
2497
2453
/// Vector Bit Set
@@ -7450,14 +7406,10 @@ pub unsafe fn __msa_sld_d(a: v2i64, b: v2i64, c: i32) -> v2i64 {
7450
7406
#[inline]
7451
7407
#[target_feature(enable = "msa")]
7452
7408
#[cfg_attr(test, assert_instr(sldi.b, imm4 = 0b1111))]
7453
- #[rustc_args_required_const(2)]
7454
- pub unsafe fn __msa_sldi_b(a: v16i8, b: v16i8, imm4: i32) -> v16i8 {
7455
- macro_rules! call {
7456
- ($imm4:expr) => {
7457
- msa_sldi_b(a, mem::transmute(b), $imm4)
7458
- };
7459
- }
7460
- constify_imm4!(imm4, call)
7409
+ #[rustc_legacy_const_generics(2)]
7410
+ pub unsafe fn __msa_sldi_b<const IMM4: i32>(a: v16i8, b: v16i8) -> v16i8 {
7411
+ static_assert_imm4!(IMM4);
7412
+ msa_sldi_b(a, mem::transmute(b), IMM4)
7461
7413
}
7462
7414
7463
7415
/// Immediate Columns Slide
@@ -7474,14 +7426,10 @@ pub unsafe fn __msa_sldi_b(a: v16i8, b: v16i8, imm4: i32) -> v16i8 {
7474
7426
#[inline]
7475
7427
#[target_feature(enable = "msa")]
7476
7428
#[cfg_attr(test, assert_instr(sldi.h, imm3 = 0b111))]
7477
- #[rustc_args_required_const(2)]
7478
- pub unsafe fn __msa_sldi_h(a: v8i16, b: v8i16, imm3: i32) -> v8i16 {
7479
- macro_rules! call {
7480
- ($imm3:expr) => {
7481
- msa_sldi_h(a, mem::transmute(b), $imm3)
7482
- };
7483
- }
7484
- constify_imm3!(imm3, call)
7429
+ #[rustc_legacy_const_generics(2)]
7430
+ pub unsafe fn __msa_sldi_h<const IMM3: i32>(a: v8i16, b: v8i16) -> v8i16 {
7431
+ static_assert_imm3!(IMM3);
7432
+ msa_sldi_h(a, mem::transmute(b), IMM3)
7485
7433
}
7486
7434
7487
7435
/// Immediate Columns Slide
@@ -7498,14 +7446,10 @@ pub unsafe fn __msa_sldi_h(a: v8i16, b: v8i16, imm3: i32) -> v8i16 {
7498
7446
#[inline]
7499
7447
#[target_feature(enable = "msa")]
7500
7448
#[cfg_attr(test, assert_instr(sldi.w, imm2 = 0b11))]
7501
- #[rustc_args_required_const(2)]
7502
- pub unsafe fn __msa_sldi_w(a: v4i32, b: v4i32, imm2: i32) -> v4i32 {
7503
- macro_rules! call {
7504
- ($imm2:expr) => {
7505
- msa_sldi_w(a, mem::transmute(b), $imm2)
7506
- };
7507
- }
7508
- constify_imm2!(imm2, call)
7449
+ #[rustc_legacy_const_generics(2)]
7450
+ pub unsafe fn __msa_sldi_w<const IMM2: i32>(a: v4i32, b: v4i32) -> v4i32 {
7451
+ static_assert_imm2!(IMM2);
7452
+ msa_sldi_w(a, mem::transmute(b), IMM2)
7509
7453
}
7510
7454
7511
7455
/// Immediate Columns Slide
@@ -7522,14 +7466,10 @@ pub unsafe fn __msa_sldi_w(a: v4i32, b: v4i32, imm2: i32) -> v4i32 {
7522
7466
#[inline]
7523
7467
#[target_feature(enable = "msa")]
7524
7468
#[cfg_attr(test, assert_instr(sldi.d, imm1 = 0b1))]
7525
- #[rustc_args_required_const(2)]
7526
- pub unsafe fn __msa_sldi_d(a: v2i64, b: v2i64, imm1: i32) -> v2i64 {
7527
- macro_rules! call {
7528
- ($imm1:expr) => {
7529
- msa_sldi_d(a, mem::transmute(b), $imm1)
7530
- };
7531
- }
7532
- constify_imm1!(imm1, call)
7469
+ #[rustc_legacy_const_generics(2)]
7470
+ pub unsafe fn __msa_sldi_d<const IMM1: i32>(a: v2i64, b: v2i64) -> v2i64 {
7471
+ static_assert_imm1!(IMM1);
7472
+ msa_sldi_d(a, mem::transmute(b), IMM1)
7533
7473
}
7534
7474
7535
7475
/// Vector Shift Left
@@ -8249,14 +8189,10 @@ pub unsafe fn __msa_srlri_d<const IMM6: i32>(a: v2i64) -> v2i64 {
8249
8189
#[inline]
8250
8190
#[target_feature(enable = "msa")]
8251
8191
#[cfg_attr(test, assert_instr(st.b, imm_s10 = 0b1111111111))]
8252
- #[rustc_args_required_const(2)]
8253
- pub unsafe fn __msa_st_b(a: v16i8, mem_addr: *mut u8, imm_s10: i32) -> () {
8254
- macro_rules! call {
8255
- ($imm_s10:expr) => {
8256
- msa_st_b(a, mem_addr, $imm_s10)
8257
- };
8258
- }
8259
- constify_imm_s10!(imm_s10, call)
8192
+ #[rustc_legacy_const_generics(2)]
8193
+ pub unsafe fn __msa_st_b<const IMM_S10: i32>(a: v16i8, mem_addr: *mut u8) -> () {
8194
+ static_assert_imm_s10!(IMM_S10);
8195
+ msa_st_b(a, mem_addr, IMM_S10)
8260
8196
}
8261
8197
8262
8198
/// Vector Store
@@ -8268,14 +8204,11 @@ pub unsafe fn __msa_st_b(a: v16i8, mem_addr: *mut u8, imm_s10: i32) -> () {
8268
8204
#[inline]
8269
8205
#[target_feature(enable = "msa")]
8270
8206
#[cfg_attr(test, assert_instr(st.h, imm_s11 = 0b11111111111))]
8271
- #[rustc_args_required_const(2)]
8272
- pub unsafe fn __msa_st_h(a: v8i16, mem_addr: *mut u8, imm_s11: i32) -> () {
8273
- macro_rules! call {
8274
- ($imm_s11:expr) => {
8275
- msa_st_h(a, mem_addr, $imm_s11)
8276
- };
8277
- }
8278
- constify_imm_s11!(imm_s11, call)
8207
+ #[rustc_legacy_const_generics(2)]
8208
+ pub unsafe fn __msa_st_h<const IMM_S11: i32>(a: v8i16, mem_addr: *mut u8) -> () {
8209
+ static_assert_imm_s11!(IMM_S11);
8210
+ static_assert!(IMM_S11: i32 where IMM_S11 % 2 == 0);
8211
+ msa_st_h(a, mem_addr, IMM_S11)
8279
8212
}
8280
8213
8281
8214
/// Vector Store
@@ -8287,14 +8220,11 @@ pub unsafe fn __msa_st_h(a: v8i16, mem_addr: *mut u8, imm_s11: i32) -> () {
8287
8220
#[inline]
8288
8221
#[target_feature(enable = "msa")]
8289
8222
#[cfg_attr(test, assert_instr(st.w, imm_s12 = 0b111111111111))]
8290
- #[rustc_args_required_const(2)]
8291
- pub unsafe fn __msa_st_w(a: v4i32, mem_addr: *mut u8, imm_s12: i32) -> () {
8292
- macro_rules! call {
8293
- ($imm_s12:expr) => {
8294
- msa_st_w(a, mem_addr, $imm_s12)
8295
- };
8296
- }
8297
- constify_imm_s12!(imm_s12, call)
8223
+ #[rustc_legacy_const_generics(2)]
8224
+ pub unsafe fn __msa_st_w<const IMM_S12: i32>(a: v4i32, mem_addr: *mut u8) -> () {
8225
+ static_assert_imm_s12!(IMM_S12);
8226
+ static_assert!(IMM_S12: i32 where IMM_S12 % 4 == 0);
8227
+ msa_st_w(a, mem_addr, IMM_S12)
8298
8228
}
8299
8229
8300
8230
/// Vector Store
@@ -8306,14 +8236,11 @@ pub unsafe fn __msa_st_w(a: v4i32, mem_addr: *mut u8, imm_s12: i32) -> () {
8306
8236
#[inline]
8307
8237
#[target_feature(enable = "msa")]
8308
8238
#[cfg_attr(test, assert_instr(st.d, imm_s13 = 0b1111111111111))]
8309
- #[rustc_args_required_const(2)]
8310
- pub unsafe fn __msa_st_d(a: v2i64, mem_addr: *mut u8, imm_s13: i32) -> () {
8311
- macro_rules! call {
8312
- ($imm_s13:expr) => {
8313
- msa_st_d(a, mem_addr, $imm_s13)
8314
- };
8315
- }
8316
- constify_imm_s13!(imm_s13, call)
8239
+ #[rustc_legacy_const_generics(2)]
8240
+ pub unsafe fn __msa_st_d<const IMM_S13: i32>(a: v2i64, mem_addr: *mut u8) -> () {
8241
+ static_assert_imm_s13!(IMM_S13);
8242
+ static_assert!(IMM_S13: i32 where IMM_S13 % 8 == 0);
8243
+ msa_st_d(a, mem_addr, IMM_S13)
8317
8244
}
8318
8245
8319
8246
/// Vector Signed Saturated Subtract of Signed Values
0 commit comments