|
| 1 | +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| 2 | +; RUN: llc -mtriple=riscv64 -mattr=+f,+v -verify-machineinstrs < %s | FileCheck %s |
| 3 | + |
| 4 | +define <vscale x 2 x float> @test_reverse_load_combiner(<vscale x 2 x float>* %ptr, i32 zeroext %evl) { |
| 5 | +; CHECK-LABEL: test_reverse_load_combiner: |
| 6 | +; CHECK: # %bb.0: |
| 7 | +; CHECK-NEXT: slli a2, a1, 2 |
| 8 | +; CHECK-NEXT: add a0, a2, a0 |
| 9 | +; CHECK-NEXT: addi a0, a0, -4 |
| 10 | +; CHECK-NEXT: li a2, -4 |
| 11 | +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma |
| 12 | +; CHECK-NEXT: vlse32.v v8, (a0), a2 |
| 13 | +; CHECK-NEXT: ret |
| 14 | + %load = call <vscale x 2 x float> @llvm.vp.load.nxv2f32.p0nxv2f32(<vscale x 2 x float>* %ptr, <vscale x 2 x i1> splat (i1 true), i32 %evl) |
| 15 | + %rev = call <vscale x 2 x float> @llvm.experimental.vp.reverse.nxv2f32(<vscale x 2 x float> %load, <vscale x 2 x i1> splat (i1 true), i32 %evl) |
| 16 | + ret <vscale x 2 x float> %rev |
| 17 | +} |
| 18 | + |
| 19 | +define <vscale x 2 x float> @test_load_mask_is_vp_reverse(<vscale x 2 x float>* %ptr, <vscale x 2 x i1> %mask, i32 zeroext %evl) { |
| 20 | +; CHECK-LABEL: test_load_mask_is_vp_reverse: |
| 21 | +; CHECK: # %bb.0: |
| 22 | +; CHECK-NEXT: slli a2, a1, 2 |
| 23 | +; CHECK-NEXT: add a0, a2, a0 |
| 24 | +; CHECK-NEXT: addi a0, a0, -4 |
| 25 | +; CHECK-NEXT: li a2, -4 |
| 26 | +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma |
| 27 | +; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t |
| 28 | +; CHECK-NEXT: ret |
| 29 | + %loadmask = call <vscale x 2 x i1> @llvm.experimental.vp.reverse.nxv2i1(<vscale x 2 x i1> %mask, <vscale x 2 x i1> splat (i1 true), i32 %evl) |
| 30 | + %load = call <vscale x 2 x float> @llvm.vp.load.nxv2f32.p0nxv2f32(<vscale x 2 x float>* %ptr, <vscale x 2 x i1> %loadmask, i32 %evl) |
| 31 | + %rev = call <vscale x 2 x float> @llvm.experimental.vp.reverse.nxv2f32(<vscale x 2 x float> %load, <vscale x 2 x i1> splat (i1 true), i32 %evl) |
| 32 | + ret <vscale x 2 x float> %rev |
| 33 | +} |
| 34 | + |
| 35 | +define <vscale x 2 x float> @test_load_mask_not_all_one(<vscale x 2 x float>* %ptr, <vscale x 2 x i1> %notallones, i32 zeroext %evl) { |
| 36 | +; CHECK-LABEL: test_load_mask_not_all_one: |
| 37 | +; CHECK: # %bb.0: |
| 38 | +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma |
| 39 | +; CHECK-NEXT: vle32.v v9, (a0), v0.t |
| 40 | +; CHECK-NEXT: vid.v v8, v0.t |
| 41 | +; CHECK-NEXT: addi a1, a1, -1 |
| 42 | +; CHECK-NEXT: vrsub.vx v10, v8, a1, v0.t |
| 43 | +; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t |
| 44 | +; CHECK-NEXT: ret |
| 45 | + %load = call <vscale x 2 x float> @llvm.vp.load.nxv2f32.p0nxv2f32(<vscale x 2 x float>* %ptr, <vscale x 2 x i1> %notallones, i32 %evl) |
| 46 | + %rev = call <vscale x 2 x float> @llvm.experimental.vp.reverse.nxv2f32(<vscale x 2 x float> %load, <vscale x 2 x i1> %notallones, i32 %evl) |
| 47 | + ret <vscale x 2 x float> %rev |
| 48 | +} |
| 49 | + |
| 50 | +define <vscale x 2 x float> @test_different_evl(<vscale x 2 x float>* %ptr, <vscale x 2 x i1> %mask, i32 zeroext %evl1, i32 zeroext %evl2) { |
| 51 | +; CHECK-LABEL: test_different_evl: |
| 52 | +; CHECK: # %bb.0: |
| 53 | +; CHECK-NEXT: addi a3, a1, -1 |
| 54 | +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma |
| 55 | +; CHECK-NEXT: vid.v v8 |
| 56 | +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma |
| 57 | +; CHECK-NEXT: vmv.v.i v9, 0 |
| 58 | +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma |
| 59 | +; CHECK-NEXT: vrsub.vx v8, v8, a3 |
| 60 | +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma |
| 61 | +; CHECK-NEXT: vmerge.vim v9, v9, 1, v0 |
| 62 | +; CHECK-NEXT: vrgatherei16.vv v10, v9, v8 |
| 63 | +; CHECK-NEXT: vmsne.vi v0, v10, 0 |
| 64 | +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma |
| 65 | +; CHECK-NEXT: vle32.v v9, (a0), v0.t |
| 66 | +; CHECK-NEXT: addi a2, a2, -1 |
| 67 | +; CHECK-NEXT: vid.v v8 |
| 68 | +; CHECK-NEXT: vrsub.vx v10, v8, a2 |
| 69 | +; CHECK-NEXT: vrgather.vv v8, v9, v10 |
| 70 | +; CHECK-NEXT: ret |
| 71 | + %loadmask = call <vscale x 2 x i1> @llvm.experimental.vp.reverse.nxv2i1(<vscale x 2 x i1> %mask, <vscale x 2 x i1> splat (i1 true), i32 %evl1) |
| 72 | + %load = call <vscale x 2 x float> @llvm.vp.load.nxv2f32.p0nxv2f32(<vscale x 2 x float>* %ptr, <vscale x 2 x i1> %loadmask, i32 %evl2) |
| 73 | + %rev = call <vscale x 2 x float> @llvm.experimental.vp.reverse.nxv2f32(<vscale x 2 x float> %load, <vscale x 2 x i1> splat (i1 true), i32 %evl2) |
| 74 | + ret <vscale x 2 x float> %rev |
| 75 | +} |
| 76 | + |
| 77 | +declare <vscale x 2 x float> @llvm.vp.load.nxv2f32.p0nxv2f32(<vscale x 2 x float>* nocapture, <vscale x 2 x i1>, i32) |
| 78 | +declare <vscale x 2 x float> @llvm.experimental.vp.reverse.nxv2f32(<vscale x 2 x float>, <vscale x 2 x i1>, i32) |
| 79 | +declare <vscale x 2 x i1> @llvm.experimental.vp.reverse.nxv2i1(<vscale x 2 x i1>, <vscale x 2 x i1>, i32) |
0 commit comments