diff --git a/llvm/test/CodeGen/AMDGPU/unfold-masked-merge-scalar-variablemask.ll b/llvm/test/CodeGen/AMDGPU/unfold-masked-merge-scalar-variablemask.ll new file mode 100644 index 0000000000000..69724aa75af4f --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/unfold-masked-merge-scalar-variablemask.ll @@ -0,0 +1,779 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 < %s | FileCheck --check-prefix=GCN %s + +define i32 @s_out32(i32 inreg %x, i32 inreg %y, i32 inreg %mask) { +; GCN-LABEL: s_out32: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: s_and_b32 s0, s0, s2 +; GCN-NEXT: s_and_not1_b32 s1, s1, s2 +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GCN-NEXT: s_or_b32 s0, s0, s1 +; GCN-NEXT: v_mov_b32_e32 v0, s0 +; GCN-NEXT: s_setpc_b64 s[30:31] + %mx = and i32 %x, %mask + %notmask = xor i32 %mask, -1 + %my = and i32 %y, %notmask + %r = or i32 %mx, %my + ret i32 %r +} + +define i64 @s_out64(i64 inreg %x, i64 inreg %y, i64 inreg %mask) { +; GCN-LABEL: s_out64: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: s_and_b64 s[0:1], s[0:1], s[16:17] +; GCN-NEXT: s_and_not1_b64 s[2:3], s[2:3], s[16:17] +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GCN-NEXT: s_or_b64 s[0:1], s[0:1], s[2:3] +; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 +; GCN-NEXT: s_setpc_b64 s[30:31] + %mx = and i64 %x, %mask + %notmask = xor i64 %mask, -1 + %my = and i64 %y, %notmask + %r = or i64 %mx, %my + ret i64 %r +} + +define i32 @s_in32(i32 inreg %x, i32 inreg %y, i32 inreg %mask) { +; GCN-LABEL: s_in32: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: s_xor_b32 s0, s0, s1 +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GCN-NEXT: s_and_b32 s0, s0, s2 +; GCN-NEXT: s_xor_b32 s0, s0, s1 +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GCN-NEXT: v_mov_b32_e32 v0, s0 +; GCN-NEXT: s_setpc_b64 s[30:31] + %n0 = xor i32 %x, %y + %n1 = and i32 %n0, %mask + %r = xor i32 %n1, %y + ret i32 %r +} + +define i64 @s_in64(i64 inreg %x, i64 inreg %y, i64 inreg %mask) { +; GCN-LABEL: s_in64: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: s_xor_b64 s[0:1], s[0:1], s[2:3] +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GCN-NEXT: s_and_b64 s[0:1], s[0:1], s[16:17] +; GCN-NEXT: s_xor_b64 s[0:1], s[0:1], s[2:3] +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 +; GCN-NEXT: s_setpc_b64 s[30:31] + %n0 = xor i64 %x, %y + %n1 = and i64 %n0, %mask + %r = xor i64 %n1, %y + ret i64 %r +} +; ============================================================================ ; +; Commutativity tests. +; ============================================================================ ; +define i32 @s_in_commutativity_0_0_1(i32 inreg %x, i32 inreg %y, i32 inreg %mask) { +; GCN-LABEL: s_in_commutativity_0_0_1: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: s_xor_b32 s0, s0, s1 +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GCN-NEXT: s_and_b32 s0, s2, s0 +; GCN-NEXT: s_xor_b32 s0, s0, s1 +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GCN-NEXT: v_mov_b32_e32 v0, s0 +; GCN-NEXT: s_setpc_b64 s[30:31] + %n0 = xor i32 %x, %y + %n1 = and i32 %mask, %n0 + %r = xor i32 %n1, %y + ret i32 %r +} + +define i32 @s_in_commutativity_0_1_0(i32 inreg %x, i32 inreg %y, i32 inreg %mask) { +; GCN-LABEL: s_in_commutativity_0_1_0: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: s_xor_b32 s0, s0, s1 +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GCN-NEXT: s_and_b32 s0, s0, s2 +; GCN-NEXT: s_xor_b32 s0, s1, s0 +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GCN-NEXT: v_mov_b32_e32 v0, s0 +; GCN-NEXT: s_setpc_b64 s[30:31] + %n0 = xor i32 %x, %y + %n1 = and i32 %n0, %mask + %r = xor i32 %y, %n1 + ret i32 %r +} + +define i32 @in_commutativity_0_1_1(i32 inreg %x, i32 inreg %y, i32 inreg %mask) { +; GCN-LABEL: in_commutativity_0_1_1: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: s_xor_b32 s0, s0, s1 +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GCN-NEXT: s_and_b32 s0, s2, s0 +; GCN-NEXT: s_xor_b32 s0, s1, s0 +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GCN-NEXT: v_mov_b32_e32 v0, s0 +; GCN-NEXT: s_setpc_b64 s[30:31] + %n0 = xor i32 %x, %y + %n1 = and i32 %mask, %n0 + %r = xor i32 %y, %n1 + ret i32 %r +} + +define i32 @s_in_commutativity_1_0_0(i32 inreg %x, i32 inreg %y, i32 inreg %mask) { +; GCN-LABEL: s_in_commutativity_1_0_0: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: s_xor_b32 s1, s0, s1 +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GCN-NEXT: s_and_b32 s1, s1, s2 +; GCN-NEXT: s_xor_b32 s0, s1, s0 +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GCN-NEXT: v_mov_b32_e32 v0, s0 +; GCN-NEXT: s_setpc_b64 s[30:31] + %n0 = xor i32 %x, %y + %n1 = and i32 %n0, %mask + %r = xor i32 %n1, %x + ret i32 %r +} + +define i32 @s_in_commutativity_1_0_1(i32 inreg %x, i32 inreg %y, i32 inreg %mask) { +; GCN-LABEL: s_in_commutativity_1_0_1: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: s_xor_b32 s1, s0, s1 +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GCN-NEXT: s_and_b32 s1, s2, s1 +; GCN-NEXT: s_xor_b32 s0, s1, s0 +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GCN-NEXT: v_mov_b32_e32 v0, s0 +; GCN-NEXT: s_setpc_b64 s[30:31] + %n0 = xor i32 %x, %y + %n1 = and i32 %mask, %n0 + %r = xor i32 %n1, %x + ret i32 %r +} + +define i32 @s_in_commutativity_1_1_0(i32 inreg %x, i32 inreg %y, i32 inreg %mask) { +; GCN-LABEL: s_in_commutativity_1_1_0: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: s_xor_b32 s1, s0, s1 +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GCN-NEXT: s_and_b32 s1, s1, s2 +; GCN-NEXT: s_xor_b32 s0, s0, s1 +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GCN-NEXT: v_mov_b32_e32 v0, s0 +; GCN-NEXT: s_setpc_b64 s[30:31] + %n0 = xor i32 %x, %y + %n1 = and i32 %n0, %mask + %r = xor i32 %x, %n1 + ret i32 %r +} + +define i32 @s_in_commutativity_1_1_1(i32 inreg %x, i32 inreg %y, i32 inreg %mask) { +; GCN-LABEL: s_in_commutativity_1_1_1: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: s_xor_b32 s1, s0, s1 +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GCN-NEXT: s_and_b32 s1, s2, s1 +; GCN-NEXT: s_xor_b32 s0, s0, s1 +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GCN-NEXT: v_mov_b32_e32 v0, s0 +; GCN-NEXT: s_setpc_b64 s[30:31] + %n0 = xor i32 %x, %y + %n1 = and i32 %mask, %n0 + %r = xor i32 %x, %n1 + ret i32 %r +} +; ============================================================================ ; +; Y is an 'and' too. +; ============================================================================ ; +define i32 @s_in_complex_y0(i32 inreg %x, i32 inreg %y_hi, i32 inreg %y_low, i32 inreg %mask) { +; GCN-LABEL: s_in_complex_y0: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: s_and_b32 s1, s1, s2 +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GCN-NEXT: s_xor_b32 s0, s0, s1 +; GCN-NEXT: s_and_b32 s0, s0, s3 +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GCN-NEXT: s_xor_b32 s0, s0, s1 +; GCN-NEXT: v_mov_b32_e32 v0, s0 +; GCN-NEXT: s_setpc_b64 s[30:31] + %y = and i32 %y_hi, %y_low + %n0 = xor i32 %x, %y + %n1 = and i32 %n0, %mask + %r = xor i32 %n1, %y + ret i32 %r +} + +define i32 @s_in_complex_y1(i32 inreg %x, i32 inreg %y_hi, i32 inreg %y_low, i32 inreg %mask) { +; GCN-LABEL: s_in_complex_y1: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: s_and_b32 s1, s1, s2 +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GCN-NEXT: s_xor_b32 s0, s0, s1 +; GCN-NEXT: s_and_b32 s0, s0, s3 +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GCN-NEXT: s_xor_b32 s0, s1, s0 +; GCN-NEXT: v_mov_b32_e32 v0, s0 +; GCN-NEXT: s_setpc_b64 s[30:31] + %y = and i32 %y_hi, %y_low + %n0 = xor i32 %x, %y + %n1 = and i32 %n0, %mask + %r = xor i32 %y, %n1 + ret i32 %r +} +; ============================================================================ ; +; M is an 'xor' too. +; ============================================================================ ; +define i32 @s_in_complex_m0(i32 inreg %x, i32 inreg %y, i32 inreg %m_a, i32 inreg %m_b) { +; GCN-LABEL: s_in_complex_m0: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: s_xor_b32 s2, s2, s3 +; GCN-NEXT: s_xor_b32 s0, s0, s1 +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GCN-NEXT: s_and_b32 s0, s0, s2 +; GCN-NEXT: s_xor_b32 s0, s0, s1 +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GCN-NEXT: v_mov_b32_e32 v0, s0 +; GCN-NEXT: s_setpc_b64 s[30:31] + %mask = xor i32 %m_a, %m_b + %n0 = xor i32 %x, %y + %n1 = and i32 %n0, %mask + %r = xor i32 %n1, %y + ret i32 %r +} + +define i32 @s_in_complex_m1(i32 inreg %x, i32 inreg %y, i32 inreg %m_a, i32 inreg %m_b) { +; GCN-LABEL: s_in_complex_m1: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: s_xor_b32 s2, s2, s3 +; GCN-NEXT: s_xor_b32 s0, s0, s1 +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GCN-NEXT: s_and_b32 s0, s2, s0 +; GCN-NEXT: s_xor_b32 s0, s0, s1 +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GCN-NEXT: v_mov_b32_e32 v0, s0 +; GCN-NEXT: s_setpc_b64 s[30:31] + %mask = xor i32 %m_a, %m_b + %n0 = xor i32 %x, %y + %n1 = and i32 %mask, %n0 + %r = xor i32 %n1, %y + ret i32 %r +} +; ============================================================================ ; +; Both Y and M are complex. +; ============================================================================ ; +define i32 @s_in_complex_y0_m0(i32 inreg %x, i32 inreg %y_hi, i32 inreg %y_low, i32 inreg %m_a, i32 inreg %m_b) { +; GCN-LABEL: s_in_complex_y0_m0: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: s_and_b32 s1, s1, s2 +; GCN-NEXT: s_xor_b32 s2, s3, s16 +; GCN-NEXT: s_xor_b32 s0, s0, s1 +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GCN-NEXT: s_and_b32 s0, s0, s2 +; GCN-NEXT: s_xor_b32 s0, s0, s1 +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GCN-NEXT: v_mov_b32_e32 v0, s0 +; GCN-NEXT: s_setpc_b64 s[30:31] + %y = and i32 %y_hi, %y_low + %mask = xor i32 %m_a, %m_b + %n0 = xor i32 %x, %y + %n1 = and i32 %n0, %mask + %r = xor i32 %n1, %y + ret i32 %r +} + +define i32 @s_in_complex_y1_m0(i32 inreg %x, i32 inreg %y_hi, i32 inreg %y_low, i32 inreg %m_a, i32 inreg %m_b) { +; GCN-LABEL: s_in_complex_y1_m0: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: s_and_b32 s1, s1, s2 +; GCN-NEXT: s_xor_b32 s2, s3, s16 +; GCN-NEXT: s_xor_b32 s0, s0, s1 +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GCN-NEXT: s_and_b32 s0, s0, s2 +; GCN-NEXT: s_xor_b32 s0, s1, s0 +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GCN-NEXT: v_mov_b32_e32 v0, s0 +; GCN-NEXT: s_setpc_b64 s[30:31] + %y = and i32 %y_hi, %y_low + %mask = xor i32 %m_a, %m_b + %n0 = xor i32 %x, %y + %n1 = and i32 %n0, %mask + %r = xor i32 %y, %n1 + ret i32 %r +} + +define i32 @s_in_complex_y0_m1(i32 inreg %x, i32 inreg %y_hi, i32 inreg %y_low, i32 inreg %m_a, i32 inreg %m_b) { +; GCN-LABEL: s_in_complex_y0_m1: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: s_and_b32 s1, s1, s2 +; GCN-NEXT: s_xor_b32 s2, s3, s16 +; GCN-NEXT: s_xor_b32 s0, s0, s1 +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GCN-NEXT: s_and_b32 s0, s2, s0 +; GCN-NEXT: s_xor_b32 s0, s0, s1 +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GCN-NEXT: v_mov_b32_e32 v0, s0 +; GCN-NEXT: s_setpc_b64 s[30:31] + %y = and i32 %y_hi, %y_low + %mask = xor i32 %m_a, %m_b + %n0 = xor i32 %x, %y + %n1 = and i32 %mask, %n0 + %r = xor i32 %n1, %y + ret i32 %r +} + +define i32 @s_in_complex_y1_m1(i32 inreg %x, i32 inreg %y_hi, i32 inreg %y_low, i32 inreg %m_a, i32 inreg %m_b) { +; GCN-LABEL: s_in_complex_y1_m1: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: s_and_b32 s1, s1, s2 +; GCN-NEXT: s_xor_b32 s2, s3, s16 +; GCN-NEXT: s_xor_b32 s0, s0, s1 +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GCN-NEXT: s_and_b32 s0, s2, s0 +; GCN-NEXT: s_xor_b32 s0, s1, s0 +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GCN-NEXT: v_mov_b32_e32 v0, s0 +; GCN-NEXT: s_setpc_b64 s[30:31] + %y = and i32 %y_hi, %y_low + %mask = xor i32 %m_a, %m_b + %n0 = xor i32 %x, %y + %n1 = and i32 %mask, %n0 + %r = xor i32 %y, %n1 + ret i32 %r +} +; ============================================================================ ; +; Various cases with %x and/or %y being a constant +; ============================================================================ ; +define i32 @s_out_constant_varx_mone(i32 inreg %x, i32 inreg %y, i32 inreg %mask) { +; GCN-LABEL: s_out_constant_varx_mone: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: s_and_b32 s0, s2, s0 +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GCN-NEXT: s_or_not1_b32 s0, s0, s2 +; GCN-NEXT: v_mov_b32_e32 v0, s0 +; GCN-NEXT: s_setpc_b64 s[30:31] + %notmask = xor i32 %mask, -1 + %mx = and i32 %mask, %x + %my = and i32 %notmask, -1 + %r = or i32 %mx, %my + ret i32 %r +} + +define i32 @s_in_constant_varx_mone(i32 inreg %x, i32 inreg %y, i32 inreg %mask) { +; GCN-LABEL: s_in_constant_varx_mone: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: s_not_b32 s0, s0 +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GCN-NEXT: s_nand_b32 s0, s0, s2 +; GCN-NEXT: v_mov_b32_e32 v0, s0 +; GCN-NEXT: s_setpc_b64 s[30:31] + %n0 = xor i32 %x, -1 + %n1 = and i32 %n0, %mask + %r = xor i32 %n1, -1 + ret i32 %r +} + +; This is not a canonical form. Testing for completeness only. +define i32 @s_out_constant_varx_mone_invmask(i32 inreg %x, i32 inreg %y, i32 inreg %mask) { +; GCN-LABEL: s_out_constant_varx_mone_invmask: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: s_or_b32 s0, s0, s2 +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GCN-NEXT: v_mov_b32_e32 v0, s0 +; GCN-NEXT: s_setpc_b64 s[30:31] + %notmask = xor i32 %mask, -1 + %mx = and i32 %notmask, %x + %my = and i32 %mask, -1 + %r = or i32 %mx, %my + ret i32 %r +} + +; This is not a canonical form. Testing for completeness only. +define i32 @s_in_constant_varx_mone_invmask(i32 inreg %x, i32 inreg %y, i32 inreg %mask) { +; GCN-LABEL: s_in_constant_varx_mone_invmask: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: s_not_b32 s1, s2 +; GCN-NEXT: s_not_b32 s0, s0 +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GCN-NEXT: s_nand_b32 s0, s0, s1 +; GCN-NEXT: v_mov_b32_e32 v0, s0 +; GCN-NEXT: s_setpc_b64 s[30:31] + %notmask = xor i32 %mask, -1 + %n0 = xor i32 %x, -1 + %n1 = and i32 %n0, %notmask + %r = xor i32 %n1, -1 + ret i32 %r +} + +define i32 @s_out_constant_varx_42(i32 inreg %x, i32 inreg %y, i32 inreg %mask) { +; GCN-LABEL: s_out_constant_varx_42: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: s_and_b32 s0, s2, s0 +; GCN-NEXT: s_and_not1_b32 s1, 42, s2 +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GCN-NEXT: s_or_b32 s0, s0, s1 +; GCN-NEXT: v_mov_b32_e32 v0, s0 +; GCN-NEXT: s_setpc_b64 s[30:31] + %notmask = xor i32 %mask, -1 + %mx = and i32 %mask, %x + %my = and i32 %notmask, 42 + %r = or i32 %mx, %my + ret i32 %r +} + +define i32 @in_constant_varx_42(i32 inreg %x, i32 inreg %y, i32 inreg %mask) { +; GCN-LABEL: in_constant_varx_42: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: s_xor_b32 s0, s0, 42 +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GCN-NEXT: s_and_b32 s0, s0, s2 +; GCN-NEXT: s_xor_b32 s0, s0, 42 +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GCN-NEXT: v_mov_b32_e32 v0, s0 +; GCN-NEXT: s_setpc_b64 s[30:31] + %n0 = xor i32 %x, 42 + %n1 = and i32 %n0, %mask + %r = xor i32 %n1, 42 + ret i32 %r +} + +; This is not a canonical form. Testing for completeness only. +define i32 @s_out_constant_varx_42_invmask(i32 inreg %x, i32 inreg %y, i32 inreg %mask) { +; GCN-LABEL: s_out_constant_varx_42_invmask: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: s_and_not1_b32 s0, s0, s2 +; GCN-NEXT: s_and_b32 s1, s2, 42 +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GCN-NEXT: s_or_b32 s0, s0, s1 +; GCN-NEXT: v_mov_b32_e32 v0, s0 +; GCN-NEXT: s_setpc_b64 s[30:31] + %notmask = xor i32 %mask, -1 + %mx = and i32 %notmask, %x + %my = and i32 %mask, 42 + %r = or i32 %mx, %my + ret i32 %r +} + +; This is not a canonical form. Testing for completeness only. +define i32 @s_in_constant_varx_42_invmask(i32 inreg %x, i32 inreg %y, i32 inreg %mask) { +; GCN-LABEL: s_in_constant_varx_42_invmask: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: s_xor_b32 s0, s0, 42 +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GCN-NEXT: s_and_not1_b32 s0, s0, s2 +; GCN-NEXT: s_xor_b32 s0, s0, 42 +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GCN-NEXT: v_mov_b32_e32 v0, s0 +; GCN-NEXT: s_setpc_b64 s[30:31] + %notmask = xor i32 %mask, -1 + %n0 = xor i32 %x, 42 + %n1 = and i32 %n0, %notmask + %r = xor i32 %n1, 42 + ret i32 %r +} + +define i32 @s_out_constant_mone_vary(i32 inreg %x, i32 inreg %y, i32 inreg %mask) { +; GCN-LABEL: s_out_constant_mone_vary: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: s_or_b32 s0, s1, s2 +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GCN-NEXT: v_mov_b32_e32 v0, s0 +; GCN-NEXT: s_setpc_b64 s[30:31] + %notmask = xor i32 %mask, -1 + %mx = and i32 %mask, -1 + %my = and i32 %notmask, %y + %r = or i32 %mx, %my + ret i32 %r +} + +define i32 @s_in_constant_mone_vary(i32 inreg %x, i32 inreg %y, i32 inreg %mask) { +; GCN-LABEL: s_in_constant_mone_vary: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: s_or_b32 s0, s2, s1 +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GCN-NEXT: v_mov_b32_e32 v0, s0 +; GCN-NEXT: s_setpc_b64 s[30:31] + %n0 = xor i32 -1, %y + %n1 = and i32 %n0, %mask + %r = xor i32 %n1, %y + ret i32 %r +} + +; This is not a canonical form. Testing for completeness only. +define i32 @s_out_constant_mone_vary_invmask(i32 inreg %x, i32 inreg %y, i32 inreg %mask) { +; GCN-LABEL: s_out_constant_mone_vary_invmask: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: s_and_b32 s0, s2, s1 +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GCN-NEXT: s_or_not1_b32 s0, s0, s2 +; GCN-NEXT: v_mov_b32_e32 v0, s0 +; GCN-NEXT: s_setpc_b64 s[30:31] + %notmask = xor i32 %mask, -1 + %mx = and i32 %notmask, -1 + %my = and i32 %mask, %y + %r = or i32 %mx, %my + ret i32 %r +} + +; This is not a canonical form. Testing for completeness only. +define i32 @s_in_constant_mone_vary_invmask(i32 inreg %x, i32 inreg %y, i32 inreg %mask) { +; GCN-LABEL: s_in_constant_mone_vary_invmask: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: s_or_not1_b32 s0, s1, s2 +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GCN-NEXT: v_mov_b32_e32 v0, s0 +; GCN-NEXT: s_setpc_b64 s[30:31] + %notmask = xor i32 %mask, -1 + %n0 = xor i32 -1, %y + %n1 = and i32 %n0, %notmask + %r = xor i32 %n1, %y + ret i32 %r +} + +define i32 @s_out_constant_42_vary(i32 inreg %x, i32 inreg %y, i32 inreg %mask) { +; GCN-LABEL: s_out_constant_42_vary: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: s_and_b32 s0, s2, 42 +; GCN-NEXT: s_and_not1_b32 s1, s1, s2 +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GCN-NEXT: s_or_b32 s0, s0, s1 +; GCN-NEXT: v_mov_b32_e32 v0, s0 +; GCN-NEXT: s_setpc_b64 s[30:31] + %notmask = xor i32 %mask, -1 + %mx = and i32 %mask, 42 + %my = and i32 %notmask, %y + %r = or i32 %mx, %my + ret i32 %r +} + +define i32 @s_in_constant_42_vary(i32 inreg %x, i32 inreg %y, i32 inreg %mask) { +; GCN-LABEL: s_in_constant_42_vary: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: s_xor_b32 s0, s1, 42 +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GCN-NEXT: s_and_b32 s0, s0, s2 +; GCN-NEXT: s_xor_b32 s0, s0, s1 +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GCN-NEXT: v_mov_b32_e32 v0, s0 +; GCN-NEXT: s_setpc_b64 s[30:31] + %n0 = xor i32 42, %y + %n1 = and i32 %n0, %mask + %r = xor i32 %n1, %y + ret i32 %r +} + +; This is not a canonical form. Testing for completeness only. +define i32 @s_out_constant_42_vary_invmask(i32 inreg %x, i32 inreg %y, i32 inreg %mask) { +; GCN-LABEL: s_out_constant_42_vary_invmask: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: s_and_not1_b32 s0, 42, s2 +; GCN-NEXT: s_and_b32 s1, s2, s1 +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GCN-NEXT: s_or_b32 s0, s0, s1 +; GCN-NEXT: v_mov_b32_e32 v0, s0 +; GCN-NEXT: s_setpc_b64 s[30:31] + %notmask = xor i32 %mask, -1 + %mx = and i32 %notmask, 42 + %my = and i32 %mask, %y + %r = or i32 %mx, %my + ret i32 %r +} + +; This is not a canonical form. Testing for completeness only. +define i32 @s_in_constant_42_vary_invmask(i32 inreg %x, i32 inreg %y, i32 inreg %mask) { +; GCN-LABEL: s_in_constant_42_vary_invmask: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: s_xor_b32 s0, s1, 42 +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GCN-NEXT: s_and_not1_b32 s0, s0, s2 +; GCN-NEXT: s_xor_b32 s0, s0, s1 +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GCN-NEXT: v_mov_b32_e32 v0, s0 +; GCN-NEXT: s_setpc_b64 s[30:31] + %notmask = xor i32 %mask, -1 + %n0 = xor i32 42, %y + %n1 = and i32 %n0, %notmask + %r = xor i32 %n1, %y + ret i32 %r +} +; ============================================================================ ; +; Negative tests. Should not be folded. +; ============================================================================ ; +; Multi-use tests. +declare void @use32(i32) nounwind +define i32 @s_in_multiuse_A(i32 inreg %x, i32 inreg %y, i32 inreg %z, i32 inreg %mask) nounwind { +; GCN-LABEL: s_in_multiuse_A: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: s_mov_b32 s2, s33 +; GCN-NEXT: s_mov_b32 s33, s32 +; GCN-NEXT: s_or_saveexec_b32 s16, -1 +; GCN-NEXT: scratch_store_b32 off, v40, s33 ; 4-byte Folded Spill +; GCN-NEXT: s_mov_b32 exec_lo, s16 +; GCN-NEXT: v_writelane_b32 v40, s2, 4 +; GCN-NEXT: s_add_i32 s32, s32, 16 +; GCN-NEXT: s_getpc_b64 s[16:17] +; GCN-NEXT: s_add_u32 s16, s16, use32@gotpcrel32@lo+4 +; GCN-NEXT: s_addc_u32 s17, s17, use32@gotpcrel32@hi+12 +; GCN-NEXT: s_xor_b32 s0, s0, s1 +; GCN-NEXT: s_load_b64 s[16:17], s[16:17], 0x0 +; GCN-NEXT: v_writelane_b32 v40, s30, 0 +; GCN-NEXT: v_writelane_b32 v40, s31, 1 +; GCN-NEXT: v_writelane_b32 v40, s34, 2 +; GCN-NEXT: s_mov_b32 s34, s1 +; GCN-NEXT: v_writelane_b32 v40, s35, 3 +; GCN-NEXT: s_and_b32 s35, s0, s3 +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GCN-NEXT: v_mov_b32_e32 v0, s35 +; GCN-NEXT: s_waitcnt lgkmcnt(0) +; GCN-NEXT: s_swappc_b64 s[30:31], s[16:17] +; GCN-NEXT: s_xor_b32 s0, s35, s34 +; GCN-NEXT: v_readlane_b32 s35, v40, 3 +; GCN-NEXT: v_mov_b32_e32 v0, s0 +; GCN-NEXT: v_readlane_b32 s34, v40, 2 +; GCN-NEXT: v_readlane_b32 s31, v40, 1 +; GCN-NEXT: v_readlane_b32 s30, v40, 0 +; GCN-NEXT: s_mov_b32 s32, s33 +; GCN-NEXT: v_readlane_b32 s0, v40, 4 +; GCN-NEXT: s_or_saveexec_b32 s1, -1 +; GCN-NEXT: scratch_load_b32 v40, off, s33 ; 4-byte Folded Reload +; GCN-NEXT: s_mov_b32 exec_lo, s1 +; GCN-NEXT: s_mov_b32 s33, s0 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: s_setpc_b64 s[30:31] + %n0 = xor i32 %x, %y + %n1 = and i32 %n0, %mask + call void @use32(i32 %n1) + %r = xor i32 %n1, %y + ret i32 %r +} + +define i32 @s_in_multiuse_B(i32 inreg %x, i32 inreg %y, i32 inreg %z, i32 inreg %mask) nounwind { +; GCN-LABEL: s_in_multiuse_B: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: s_mov_b32 s2, s33 +; GCN-NEXT: s_mov_b32 s33, s32 +; GCN-NEXT: s_or_saveexec_b32 s16, -1 +; GCN-NEXT: scratch_store_b32 off, v40, s33 ; 4-byte Folded Spill +; GCN-NEXT: s_mov_b32 exec_lo, s16 +; GCN-NEXT: s_add_i32 s32, s32, 16 +; GCN-NEXT: s_getpc_b64 s[16:17] +; GCN-NEXT: s_add_u32 s16, s16, use32@gotpcrel32@lo+4 +; GCN-NEXT: s_addc_u32 s17, s17, use32@gotpcrel32@hi+12 +; GCN-NEXT: v_writelane_b32 v40, s2, 4 +; GCN-NEXT: s_load_b64 s[16:17], s[16:17], 0x0 +; GCN-NEXT: s_xor_b32 s0, s0, s1 +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GCN-NEXT: v_mov_b32_e32 v0, s0 +; GCN-NEXT: v_writelane_b32 v40, s30, 0 +; GCN-NEXT: v_writelane_b32 v40, s31, 1 +; GCN-NEXT: v_writelane_b32 v40, s34, 2 +; GCN-NEXT: s_mov_b32 s34, s1 +; GCN-NEXT: v_writelane_b32 v40, s35, 3 +; GCN-NEXT: s_and_b32 s35, s0, s3 +; GCN-NEXT: s_waitcnt lgkmcnt(0) +; GCN-NEXT: s_swappc_b64 s[30:31], s[16:17] +; GCN-NEXT: s_xor_b32 s0, s35, s34 +; GCN-NEXT: v_readlane_b32 s35, v40, 3 +; GCN-NEXT: v_mov_b32_e32 v0, s0 +; GCN-NEXT: v_readlane_b32 s34, v40, 2 +; GCN-NEXT: v_readlane_b32 s31, v40, 1 +; GCN-NEXT: v_readlane_b32 s30, v40, 0 +; GCN-NEXT: s_mov_b32 s32, s33 +; GCN-NEXT: v_readlane_b32 s0, v40, 4 +; GCN-NEXT: s_or_saveexec_b32 s1, -1 +; GCN-NEXT: scratch_load_b32 v40, off, s33 ; 4-byte Folded Reload +; GCN-NEXT: s_mov_b32 exec_lo, s1 +; GCN-NEXT: s_mov_b32 s33, s0 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: s_setpc_b64 s[30:31] + %n0 = xor i32 %x, %y + %n1 = and i32 %n0, %mask + call void @use32(i32 %n0) + %r = xor i32 %n1, %y + ret i32 %r +} + +; Various bad variants +define i32 @s_n0_badmask(i32 inreg %x, i32 inreg %y, i32 inreg %mask, i32 inreg %mask2) { +; GCN-LABEL: s_n0_badmask: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: s_and_b32 s0, s0, s2 +; GCN-NEXT: s_and_not1_b32 s1, s1, s3 +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GCN-NEXT: s_or_b32 s0, s0, s1 +; GCN-NEXT: v_mov_b32_e32 v0, s0 +; GCN-NEXT: s_setpc_b64 s[30:31] + %mx = and i32 %x, %mask + %notmask = xor i32 %mask2, -1 + %my = and i32 %y, %notmask + %r = or i32 %mx, %my + ret i32 %r +} + +define i32 @s_n0_badxor(i32 inreg %x, i32 inreg %y, i32 inreg %mask) { +; GCN-LABEL: s_n0_badxor: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: s_xor_b32 s3, s2, 1 +; GCN-NEXT: s_and_b32 s0, s0, s2 +; GCN-NEXT: s_and_b32 s1, s1, s3 +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GCN-NEXT: s_or_b32 s0, s0, s1 +; GCN-NEXT: v_mov_b32_e32 v0, s0 +; GCN-NEXT: s_setpc_b64 s[30:31] + %mx = and i32 %x, %mask + %notmask = xor i32 %mask, 1 + %my = and i32 %y, %notmask + %r = or i32 %mx, %my + ret i32 %r +} + +define i32 @s_n1_thirdvar(i32 inreg %x, i32 inreg %y, i32 inreg %z, i32 inreg %mask) { +; GCN-LABEL: s_n1_thirdvar: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: s_xor_b32 s0, s0, s1 +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GCN-NEXT: s_and_b32 s0, s0, s3 +; GCN-NEXT: s_xor_b32 s0, s0, s2 +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GCN-NEXT: v_mov_b32_e32 v0, s0 +; GCN-NEXT: s_setpc_b64 s[30:31] + %n0 = xor i32 %x, %y + %n1 = and i32 %n0, %mask + %r = xor i32 %n1, %z + ret i32 %r +}