Skip to content

powerpc: use more target-independent llvm intrinsics (min, max, round, countlz) #1713

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 5 commits into from
Feb 23, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
58 changes: 19 additions & 39 deletions crates/core_arch/src/powerpc/altivec.rs
Original file line number Diff line number Diff line change
Expand Up @@ -158,32 +158,32 @@ unsafe extern "C" {
#[link_name = "llvm.ppc.altivec.vmulosh"]
fn vmulosh(a: vector_signed_short, b: vector_signed_short) -> vector_signed_int;

#[link_name = "llvm.ppc.altivec.vmaxsb"]
#[link_name = "llvm.smax.v16i8"]
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Since these are generic LLVM vector intrinsics, it would make more sense to provide these in core::intrinsics::simd and then use them from stdarch, similar to how simd_neg is used.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I see this is already a pattern in quite a few places in stdarch. This doesn't need to block this PR, it can be resolved separately.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yeah, makes sense. I'll add that then.

fn vmaxsb(a: vector_signed_char, b: vector_signed_char) -> vector_signed_char;
#[link_name = "llvm.ppc.altivec.vmaxsh"]
#[link_name = "llvm.smax.v8i16"]
fn vmaxsh(a: vector_signed_short, b: vector_signed_short) -> vector_signed_short;
#[link_name = "llvm.ppc.altivec.vmaxsw"]
#[link_name = "llvm.smax.v4i32"]
fn vmaxsw(a: vector_signed_int, b: vector_signed_int) -> vector_signed_int;

#[link_name = "llvm.ppc.altivec.vmaxub"]
#[link_name = "llvm.umax.v16i8"]
fn vmaxub(a: vector_unsigned_char, b: vector_unsigned_char) -> vector_unsigned_char;
#[link_name = "llvm.ppc.altivec.vmaxuh"]
#[link_name = "llvm.umax.v8i16"]
fn vmaxuh(a: vector_unsigned_short, b: vector_unsigned_short) -> vector_unsigned_short;
#[link_name = "llvm.ppc.altivec.vmaxuw"]
#[link_name = "llvm.umax.v4i32"]
fn vmaxuw(a: vector_unsigned_int, b: vector_unsigned_int) -> vector_unsigned_int;

#[link_name = "llvm.ppc.altivec.vminsb"]
#[link_name = "llvm.smin.v16i8"]
fn vminsb(a: vector_signed_char, b: vector_signed_char) -> vector_signed_char;
#[link_name = "llvm.ppc.altivec.vminsh"]
#[link_name = "llvm.smin.v8i16"]
fn vminsh(a: vector_signed_short, b: vector_signed_short) -> vector_signed_short;
#[link_name = "llvm.ppc.altivec.vminsw"]
#[link_name = "llvm.smin.v4i32"]
fn vminsw(a: vector_signed_int, b: vector_signed_int) -> vector_signed_int;

#[link_name = "llvm.ppc.altivec.vminub"]
#[link_name = "llvm.umin.v16i8"]
fn vminub(a: vector_unsigned_char, b: vector_unsigned_char) -> vector_unsigned_char;
#[link_name = "llvm.ppc.altivec.vminuh"]
#[link_name = "llvm.umin.v8i16"]
fn vminuh(a: vector_unsigned_short, b: vector_unsigned_short) -> vector_unsigned_short;
#[link_name = "llvm.ppc.altivec.vminuw"]
#[link_name = "llvm.umin.v4i32"]
fn vminuw(a: vector_unsigned_int, b: vector_unsigned_int) -> vector_unsigned_int;

#[link_name = "llvm.ppc.altivec.vsubsbs"]
Expand Down Expand Up @@ -368,21 +368,14 @@ unsafe extern "C" {
#[link_name = "llvm.ppc.altivec.srv"]
fn vsrv(a: vector_unsigned_char, b: vector_unsigned_char) -> vector_unsigned_char;

#[link_name = "llvm.ctlz.v16i8"]
fn vclzb(a: vector_signed_char) -> vector_signed_char;
#[link_name = "llvm.ctlz.v8i16"]
fn vclzh(a: vector_signed_short) -> vector_signed_short;
#[link_name = "llvm.ctlz.v4i32"]
fn vclzw(a: vector_signed_int) -> vector_signed_int;

#[link_name = "llvm.ppc.altivec.vrlb"]
fn vrlb(a: vector_signed_char, b: vector_unsigned_char) -> vector_signed_char;
#[link_name = "llvm.ppc.altivec.vrlh"]
fn vrlh(a: vector_signed_short, b: vector_unsigned_short) -> vector_signed_short;
#[link_name = "llvm.ppc.altivec.vrlw"]
fn vrlw(a: vector_signed_int, c: vector_unsigned_int) -> vector_signed_int;

#[link_name = "llvm.ppc.altivec.vrfin"]
#[link_name = "llvm.nearbyint.v4f32"]
fn vrfin(a: vector_float) -> vector_float;
Comment on lines -385 to 379
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I've verified that these in fact produce identical assembly https://godbolt.org/z/Wx1KWezbe

}

Expand Down Expand Up @@ -1462,21 +1455,15 @@ mod sealed {
#[cfg_attr(test, assert_instr(vspltb, IMM4 = 15))]
unsafe fn vspltb<const IMM4: u32>(a: vector_signed_char) -> vector_signed_char {
static_assert_uimm_bits!(IMM4, 4);
let b = u8x16::splat(IMM4 as u8);
vec_perm(a, a, transmute(b))
simd_shuffle(a, a, const { u32x16::from_array([IMM4; 16]) })
}

#[inline]
#[target_feature(enable = "altivec")]
#[cfg_attr(test, assert_instr(vsplth, IMM3 = 7))]
unsafe fn vsplth<const IMM3: u32>(a: vector_signed_short) -> vector_signed_short {
static_assert_uimm_bits!(IMM3, 3);
let b0 = IMM3 as u8 * 2;
let b1 = b0 + 1;
let b = u8x16::new(
b0, b1, b0, b1, b0, b1, b0, b1, b0, b1, b0, b1, b0, b1, b0, b1,
);
vec_perm(a, a, transmute(b))
simd_shuffle(a, a, const { u32x8::from_array([IMM3; 8]) })
}

#[inline]
Expand All @@ -1485,14 +1472,7 @@ mod sealed {
#[cfg_attr(all(test, target_feature = "vsx"), assert_instr(xxspltw, IMM2 = 3))]
unsafe fn vspltw<const IMM2: u32>(a: vector_signed_int) -> vector_signed_int {
static_assert_uimm_bits!(IMM2, 2);
let b0 = IMM2 as u8 * 4;
let b1 = b0 + 1;
let b2 = b0 + 2;
let b3 = b0 + 3;
let b = u8x16::new(
b0, b1, b2, b3, b0, b1, b2, b3, b0, b1, b2, b3, b0, b1, b2, b3,
);
vec_perm(a, a, transmute(b))
simd_shuffle(a, a, const { u32x4::from_array([IMM2; 4]) })
}

#[unstable(feature = "stdarch_powerpc", issue = "111145")]
Expand Down Expand Up @@ -3191,9 +3171,9 @@ mod sealed {

impl_vec_shift_octect! { [VectorSro vec_sro] (vsro) }

test_impl! { vec_vcntlzb(a: vector_signed_char) -> vector_signed_char [vclzb, vclzb] }
test_impl! { vec_vcntlzh(a: vector_signed_short) -> vector_signed_short [vclzh, vclzh] }
test_impl! { vec_vcntlzw(a: vector_signed_int) -> vector_signed_int [vclzw, vclzw] }
test_impl! { vec_vcntlzb(a: vector_signed_char) -> vector_signed_char [simd_ctlz, vclzb] }
test_impl! { vec_vcntlzh(a: vector_signed_short) -> vector_signed_short [simd_ctlz, vclzh] }
test_impl! { vec_vcntlzw(a: vector_signed_int) -> vector_signed_int [simd_ctlz, vclzw] }

#[unstable(feature = "stdarch_powerpc", issue = "111145")]
pub trait VectorCntlz {
Expand Down
3 changes: 1 addition & 2 deletions crates/core_arch/src/powerpc/macros.rs
Original file line number Diff line number Diff line change
Expand Up @@ -298,8 +298,7 @@ macro_rules! impl_neg {
impl crate::ops::Neg for s_t_l!($s) {
type Output = s_t_l!($s);
fn neg(self) -> Self::Output {
let zero = $s::splat($zero);
unsafe { transmute(simd_sub(zero, transmute(self))) }
unsafe { simd_neg(self) }
}
}
};
Expand Down
4 changes: 4 additions & 0 deletions crates/core_arch/src/simd.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,10 @@ macro_rules! simd_ty {
pub(crate) const fn new($($param_name: $elem_type),*) -> Self {
$id([$($param_name),*])
}
#[inline(always)]
pub(crate) const fn from_array(elements: [$elem_type; $len]) -> Self {
$id(elements)
}
// FIXME: Workaround rust@60637
#[inline(always)]
pub(crate) fn splat(value: $elem_type) -> Self {
Expand Down