diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index 35874325326fb..3b6dd0c11bbf9 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -1448,6 +1448,8 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, Custom); if (Subtarget.hasStdExtZfhminOrZhinxmin()) setOperationAction(ISD::BITCAST, MVT::f16, Custom); + if (Subtarget.hasStdExtZfbfmin()) + setOperationAction(ISD::BITCAST, MVT::bf16, Custom); if (Subtarget.hasStdExtFOrZfinx()) setOperationAction(ISD::BITCAST, MVT::f32, Custom); if (Subtarget.hasStdExtDOrZdinx()) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll index 9ad1d7167c6a0..de459dac11b06 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll @@ -1,12 +1,16 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zvfh -verify-machineinstrs \ -; RUN: -target-abi=ilp32d < %s | FileCheck %s --check-prefixes=CHECK,RV32 -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zvfh -verify-machineinstrs \ -; RUN: -target-abi=lp64d < %s | FileCheck %s --check-prefixes=CHECK,RV64 -; RUN: llc -mtriple=riscv32 -mattr=+zve32f,+zvl128b,+d,+zvfh \ +; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zvfh,+zfbfmin,+zvfbfmin -verify-machineinstrs \ +; RUN: -target-abi=ilp32d < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,RV32 +; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zvfh,+zfbfmin,+zvfbfmin -verify-machineinstrs \ +; RUN: -target-abi=lp64d < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,RV64 +; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin -verify-machineinstrs \ +; RUN: -target-abi=ilp32d < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV32 +; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin -verify-machineinstrs \ +; RUN: -target-abi=lp64d < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV64 +; RUN: llc -mtriple=riscv32 -mattr=+zve32f,+zvl128b,+d,+zvfh,+zfbfmin,+zvfbfmin \ ; RUN: -verify-machineinstrs -target-abi=ilp32d < %s | FileCheck %s \ ; RUN: --check-prefixes=ELEN32,RV32ELEN32 -; RUN: llc -mtriple=riscv64 -mattr=+zve32f,+zvl128b,+d,+zvfh \ +; RUN: llc -mtriple=riscv64 -mattr=+zve32f,+zvl128b,+d,+zvfh,+zfbfmin,+zvfbfmin \ ; RUN: -verify-machineinstrs -target-abi=lp64d < %s | FileCheck %s \ ; RUN: --check-prefixes=ELEN32,RV64ELEN32 @@ -262,13 +266,92 @@ define i64 @bitcast_v1i64_i64(<1 x i64> %a) { ret i64 %b } -define half @bitcast_v2i8_f16(<2 x i8> %a) { -; CHECK-LABEL: bitcast_v2i8_f16: +define bfloat @bitcast_v2i8_bf16(<2 x i8> %a) { +; CHECK-LABEL: bitcast_v2i8_bf16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma -; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: fmv.h.x fa0, a0 +; CHECK-NEXT: ret +; +; ELEN32-LABEL: bitcast_v2i8_bf16: +; ELEN32: # %bb.0: +; ELEN32-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; ELEN32-NEXT: vmv.x.s a0, v8 +; ELEN32-NEXT: fmv.h.x fa0, a0 +; ELEN32-NEXT: ret + %b = bitcast <2 x i8> %a to bfloat + ret bfloat %b +} + +define bfloat @bitcast_v1i16_bf16(<1 x i16> %a) { +; CHECK-LABEL: bitcast_v1i16_bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: fmv.h.x fa0, a0 +; CHECK-NEXT: ret +; +; ELEN32-LABEL: bitcast_v1i16_bf16: +; ELEN32: # %bb.0: +; ELEN32-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; ELEN32-NEXT: vmv.x.s a0, v8 +; ELEN32-NEXT: fmv.h.x fa0, a0 +; ELEN32-NEXT: ret + %b = bitcast <1 x i16> %a to bfloat + ret bfloat %b +} + +define bfloat @bitcast_v1bf16_bf16(<1 x bfloat> %a) { +; CHECK-LABEL: bitcast_v1bf16_bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: fmv.h.x fa0, a0 +; CHECK-NEXT: ret +; +; ELEN32-LABEL: bitcast_v1bf16_bf16: +; ELEN32: # %bb.0: +; ELEN32-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; ELEN32-NEXT: vmv.x.s a0, v8 +; ELEN32-NEXT: fmv.h.x fa0, a0 +; ELEN32-NEXT: ret + %b = bitcast <1 x bfloat> %a to bfloat + ret bfloat %b +} + +define <1 x bfloat> @bitcast_bf16_v1bf16(bfloat %a) { +; CHECK-LABEL: bitcast_bf16_v1bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: fmv.x.h a0, fa0 +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret ; +; ELEN32-LABEL: bitcast_bf16_v1bf16: +; ELEN32: # %bb.0: +; ELEN32-NEXT: fmv.x.h a0, fa0 +; ELEN32-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; ELEN32-NEXT: vmv.s.x v8, a0 +; ELEN32-NEXT: ret + %b = bitcast bfloat %a to <1 x bfloat> + ret <1 x bfloat> %b +} + +define half @bitcast_v2i8_f16(<2 x i8> %a) { +; ZVFH-LABEL: bitcast_v2i8_f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; ZVFH-NEXT: vfmv.f.s fa0, v8 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: bitcast_v2i8_f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; ZVFHMIN-NEXT: vmv.x.s a0, v8 +; ZVFHMIN-NEXT: fmv.h.x fa0, a0 +; ZVFHMIN-NEXT: ret +; ; ELEN32-LABEL: bitcast_v2i8_f16: ; ELEN32: # %bb.0: ; ELEN32-NEXT: vsetivli zero, 1, e16, m1, ta, ma @@ -279,11 +362,18 @@ define half @bitcast_v2i8_f16(<2 x i8> %a) { } define half @bitcast_v1i16_f16(<1 x i16> %a) { -; CHECK-LABEL: bitcast_v1i16_f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma -; CHECK-NEXT: vfmv.f.s fa0, v8 -; CHECK-NEXT: ret +; ZVFH-LABEL: bitcast_v1i16_f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; ZVFH-NEXT: vfmv.f.s fa0, v8 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: bitcast_v1i16_f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; ZVFHMIN-NEXT: vmv.x.s a0, v8 +; ZVFHMIN-NEXT: fmv.h.x fa0, a0 +; ZVFHMIN-NEXT: ret ; ; ELEN32-LABEL: bitcast_v1i16_f16: ; ELEN32: # %bb.0: @@ -294,6 +384,52 @@ define half @bitcast_v1i16_f16(<1 x i16> %a) { ret half %b } +define half @bitcast_v1f16_f16(<1 x half> %a) { +; ZVFH-LABEL: bitcast_v1f16_f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; ZVFH-NEXT: vfmv.f.s fa0, v8 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: bitcast_v1f16_f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; ZVFHMIN-NEXT: vmv.x.s a0, v8 +; ZVFHMIN-NEXT: fmv.h.x fa0, a0 +; ZVFHMIN-NEXT: ret +; +; ELEN32-LABEL: bitcast_v1f16_f16: +; ELEN32: # %bb.0: +; ELEN32-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; ELEN32-NEXT: vfmv.f.s fa0, v8 +; ELEN32-NEXT: ret + %b = bitcast <1 x half> %a to half + ret half %b +} + +define <1 x half> @bitcast_f16_v1f16(half %a) { +; ZVFH-LABEL: bitcast_f16_v1f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; ZVFH-NEXT: vfmv.s.f v8, fa0 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: bitcast_f16_v1f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a0, fa0 +; ZVFHMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; ZVFHMIN-NEXT: vmv.s.x v8, a0 +; ZVFHMIN-NEXT: ret +; +; ELEN32-LABEL: bitcast_f16_v1f16: +; ELEN32: # %bb.0: +; ELEN32-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; ELEN32-NEXT: vfmv.s.f v8, fa0 +; ELEN32-NEXT: ret + %b = bitcast half %a to <1 x half> + ret <1 x half> %b +} + define float @bitcast_v4i8_f32(<4 x i8> %a) { ; CHECK-LABEL: bitcast_v4i8_f32: ; CHECK: # %bb.0: