-
Notifications
You must be signed in to change notification settings - Fork 13.6k
[NFC][AMDGPU] Add test for unfold-masked-merge-scalar-variablemask.ll #140093
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Merged
Conversation
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
@llvm/pr-subscribers-backend-amdgpu Author: Harrison Hao (harrisonGPU) ChangesPatch is 29.34 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/140093.diff 1 Files Affected:
diff --git a/llvm/test/CodeGen/AMDGPU/unfold-masked-merge-scalar-variablemask.ll b/llvm/test/CodeGen/AMDGPU/unfold-masked-merge-scalar-variablemask.ll
new file mode 100644
index 0000000000000..d580db53c253c
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/unfold-masked-merge-scalar-variablemask.ll
@@ -0,0 +1,779 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 < %s | FileCheck --check-prefix=GCN %s
+
+define i32 @s_out32(i32 inreg %x, i32 inreg %y, i32 inreg %mask) {
+; GCN-LABEL: s_out32:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_and_b32 s0, s0, s2
+; GCN-NEXT: s_and_not1_b32 s1, s1, s2
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GCN-NEXT: s_or_b32 s0, s0, s1
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %mx = and i32 %x, %mask
+ %notmask = xor i32 %mask, -1
+ %my = and i32 %y, %notmask
+ %r = or i32 %mx, %my
+ ret i32 %r
+}
+
+define i64 @s_out64(i64 inreg %x, i64 inreg %y, i64 inreg %mask) {
+; GCN-LABEL: s_out64:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_and_b64 s[0:1], s[0:1], s[16:17]
+; GCN-NEXT: s_and_not1_b64 s[2:3], s[2:3], s[16:17]
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GCN-NEXT: s_or_b64 s[0:1], s[0:1], s[2:3]
+; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %mx = and i64 %x, %mask
+ %notmask = xor i64 %mask, -1
+ %my = and i64 %y, %notmask
+ %r = or i64 %mx, %my
+ ret i64 %r
+}
+
+define i32 @s_in32(i32 inreg %x, i32 inreg %y, i32 inreg %mask) {
+; GCN-LABEL: s_in32:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_xor_b32 s0, s0, s1
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GCN-NEXT: s_and_b32 s0, s0, s2
+; GCN-NEXT: s_xor_b32 s0, s0, s1
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %n0 = xor i32 %x, %y
+ %n1 = and i32 %n0, %mask
+ %r = xor i32 %n1, %y
+ ret i32 %r
+}
+
+define i64 @s_in64(i64 inreg %x, i64 inreg %y, i64 inreg %mask) {
+; GCN-LABEL: s_in64:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_xor_b64 s[0:1], s[0:1], s[2:3]
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GCN-NEXT: s_and_b64 s[0:1], s[0:1], s[16:17]
+; GCN-NEXT: s_xor_b64 s[0:1], s[0:1], s[2:3]
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %n0 = xor i64 %x, %y
+ %n1 = and i64 %n0, %mask
+ %r = xor i64 %n1, %y
+ ret i64 %r
+}
+; ============================================================================ ;
+; Commutativity tests.
+; ============================================================================ ;
+define i32 @s_in_commutativity_0_0_1(i32 inreg %x, i32 inreg %y, i32 inreg %mask) {
+; GCN-LABEL: s_in_commutativity_0_0_1:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_xor_b32 s0, s0, s1
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GCN-NEXT: s_and_b32 s0, s2, s0
+; GCN-NEXT: s_xor_b32 s0, s0, s1
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %n0 = xor i32 %x, %y
+ %n1 = and i32 %mask, %n0
+ %r = xor i32 %n1, %y
+ ret i32 %r
+}
+
+define i32 @s_in_commutativity_0_1_0(i32 inreg %x, i32 inreg %y, i32 inreg %mask) {
+; GCN-LABEL: s_in_commutativity_0_1_0:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_xor_b32 s0, s0, s1
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GCN-NEXT: s_and_b32 s0, s0, s2
+; GCN-NEXT: s_xor_b32 s0, s1, s0
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %n0 = xor i32 %x, %y
+ %n1 = and i32 %n0, %mask
+ %r = xor i32 %y, %n1
+ ret i32 %r
+}
+
+define i32 @in_commutativity_0_1_1(i32 inreg %x, i32 inreg %y, i32 inreg %mask) {
+; GCN-LABEL: in_commutativity_0_1_1:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_xor_b32 s0, s0, s1
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GCN-NEXT: s_and_b32 s0, s2, s0
+; GCN-NEXT: s_xor_b32 s0, s1, s0
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %n0 = xor i32 %x, %y
+ %n1 = and i32 %mask, %n0
+ %r = xor i32 %y, %n1
+ ret i32 %r
+}
+
+define i32 @s_in_commutativity_1_0_0(i32 inreg %x, i32 inreg %y, i32 inreg %mask) {
+; GCN-LABEL: s_in_commutativity_1_0_0:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_xor_b32 s1, s0, s1
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GCN-NEXT: s_and_b32 s1, s1, s2
+; GCN-NEXT: s_xor_b32 s0, s1, s0
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %n0 = xor i32 %x, %y
+ %n1 = and i32 %n0, %mask
+ %r = xor i32 %n1, %x
+ ret i32 %r
+}
+
+define i32 @s_in_commutativity_1_0_1(i32 inreg %x, i32 inreg %y, i32 inreg %mask) {
+; GCN-LABEL: s_in_commutativity_1_0_1:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_xor_b32 s1, s0, s1
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GCN-NEXT: s_and_b32 s1, s2, s1
+; GCN-NEXT: s_xor_b32 s0, s1, s0
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %n0 = xor i32 %x, %y
+ %n1 = and i32 %mask, %n0
+ %r = xor i32 %n1, %x
+ ret i32 %r
+}
+
+define i32 @s_in_commutativity_1_1_0(i32 inreg %x, i32 inreg %y, i32 inreg %mask) {
+; GCN-LABEL: s_in_commutativity_1_1_0:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_xor_b32 s1, s0, s1
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GCN-NEXT: s_and_b32 s1, s1, s2
+; GCN-NEXT: s_xor_b32 s0, s0, s1
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %n0 = xor i32 %x, %y
+ %n1 = and i32 %n0, %mask
+ %r = xor i32 %x, %n1
+ ret i32 %r
+}
+
+define i32 @s_in_commutativity_1_1_1(i32 inreg %x, i32 inreg %y, i32 inreg %mask) {
+; GCN-LABEL: s_in_commutativity_1_1_1:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_xor_b32 s1, s0, s1
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GCN-NEXT: s_and_b32 s1, s2, s1
+; GCN-NEXT: s_xor_b32 s0, s0, s1
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %n0 = xor i32 %x, %y
+ %n1 = and i32 %mask, %n0
+ %r = xor i32 %x, %n1
+ ret i32 %r
+}
+; ============================================================================ ;
+; Y is an 'and' too.
+; ============================================================================ ;
+define i32 @s_in_complex_y0(i32 inreg %x, i32 inreg %y_hi, i32 inreg %y_low, i32 inreg %mask) {
+; GCN-LABEL: s_in_complex_y0:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_and_b32 s1, s1, s2
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GCN-NEXT: s_xor_b32 s0, s0, s1
+; GCN-NEXT: s_and_b32 s0, s0, s3
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GCN-NEXT: s_xor_b32 s0, s0, s1
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %y = and i32 %y_hi, %y_low
+ %n0 = xor i32 %x, %y
+ %n1 = and i32 %n0, %mask
+ %r = xor i32 %n1, %y
+ ret i32 %r
+}
+
+define i32 @s_in_complex_y1(i32 inreg %x, i32 inreg %y_hi, i32 inreg %y_low, i32 inreg %mask) {
+; GCN-LABEL: s_in_complex_y1:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_and_b32 s1, s1, s2
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GCN-NEXT: s_xor_b32 s0, s0, s1
+; GCN-NEXT: s_and_b32 s0, s0, s3
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GCN-NEXT: s_xor_b32 s0, s1, s0
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %y = and i32 %y_hi, %y_low
+ %n0 = xor i32 %x, %y
+ %n1 = and i32 %n0, %mask
+ %r = xor i32 %y, %n1
+ ret i32 %r
+}
+; ============================================================================ ;
+; M is an 'xor' too.
+; ============================================================================ ;
+define i32 @s_in_complex_m0(i32 inreg %x, i32 inreg %y, i32 inreg %m_a, i32 inreg %m_b) {
+; GCN-LABEL: s_in_complex_m0:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_xor_b32 s2, s2, s3
+; GCN-NEXT: s_xor_b32 s0, s0, s1
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GCN-NEXT: s_and_b32 s0, s0, s2
+; GCN-NEXT: s_xor_b32 s0, s0, s1
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %mask = xor i32 %m_a, %m_b
+ %n0 = xor i32 %x, %y
+ %n1 = and i32 %n0, %mask
+ %r = xor i32 %n1, %y
+ ret i32 %r
+}
+
+define i32 @s_in_complex_m1(i32 inreg %x, i32 inreg %y, i32 inreg %m_a, i32 inreg %m_b) {
+; GCN-LABEL: s_in_complex_m1:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_xor_b32 s2, s2, s3
+; GCN-NEXT: s_xor_b32 s0, s0, s1
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GCN-NEXT: s_and_b32 s0, s2, s0
+; GCN-NEXT: s_xor_b32 s0, s0, s1
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %mask = xor i32 %m_a, %m_b
+ %n0 = xor i32 %x, %y
+ %n1 = and i32 %mask, %n0
+ %r = xor i32 %n1, %y
+ ret i32 %r
+}
+; ============================================================================ ;
+; Both Y and M are complex.
+; ============================================================================ ;
+define i32 @s_in_complex_y0_m0(i32 inreg %x, i32 inreg %y_hi, i32 inreg %y_low, i32 inreg %m_a, i32 inreg %m_b) {
+; GCN-LABEL: s_in_complex_y0_m0:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_and_b32 s1, s1, s2
+; GCN-NEXT: s_xor_b32 s2, s3, s16
+; GCN-NEXT: s_xor_b32 s0, s0, s1
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GCN-NEXT: s_and_b32 s0, s0, s2
+; GCN-NEXT: s_xor_b32 s0, s0, s1
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %y = and i32 %y_hi, %y_low
+ %mask = xor i32 %m_a, %m_b
+ %n0 = xor i32 %x, %y
+ %n1 = and i32 %n0, %mask
+ %r = xor i32 %n1, %y
+ ret i32 %r
+}
+
+define i32 @s_in_complex_y1_m0(i32 inreg %x, i32 inreg %y_hi, i32 inreg %y_low, i32 inreg %m_a, i32 inreg %m_b) {
+; GCN-LABEL: s_in_complex_y1_m0:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_and_b32 s1, s1, s2
+; GCN-NEXT: s_xor_b32 s2, s3, s16
+; GCN-NEXT: s_xor_b32 s0, s0, s1
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GCN-NEXT: s_and_b32 s0, s0, s2
+; GCN-NEXT: s_xor_b32 s0, s1, s0
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %y = and i32 %y_hi, %y_low
+ %mask = xor i32 %m_a, %m_b
+ %n0 = xor i32 %x, %y
+ %n1 = and i32 %n0, %mask
+ %r = xor i32 %y, %n1
+ ret i32 %r
+}
+
+define i32 @s_in_complex_y0_m1(i32 inreg %x, i32 inreg %y_hi, i32 inreg %y_low, i32 inreg %m_a, i32 inreg %m_b) {
+; GCN-LABEL: s_in_complex_y0_m1:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_and_b32 s1, s1, s2
+; GCN-NEXT: s_xor_b32 s2, s3, s16
+; GCN-NEXT: s_xor_b32 s0, s0, s1
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GCN-NEXT: s_and_b32 s0, s2, s0
+; GCN-NEXT: s_xor_b32 s0, s0, s1
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %y = and i32 %y_hi, %y_low
+ %mask = xor i32 %m_a, %m_b
+ %n0 = xor i32 %x, %y
+ %n1 = and i32 %mask, %n0
+ %r = xor i32 %n1, %y
+ ret i32 %r
+}
+
+define i32 @s_in_complex_y1_m1(i32 inreg %x, i32 inreg %y_hi, i32 inreg %y_low, i32 inreg %m_a, i32 inreg %m_b) {
+; GCN-LABEL: s_in_complex_y1_m1:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_and_b32 s1, s1, s2
+; GCN-NEXT: s_xor_b32 s2, s3, s16
+; GCN-NEXT: s_xor_b32 s0, s0, s1
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GCN-NEXT: s_and_b32 s0, s2, s0
+; GCN-NEXT: s_xor_b32 s0, s1, s0
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %y = and i32 %y_hi, %y_low
+ %mask = xor i32 %m_a, %m_b
+ %n0 = xor i32 %x, %y
+ %n1 = and i32 %mask, %n0
+ %r = xor i32 %y, %n1
+ ret i32 %r
+}
+; ============================================================================ ;
+; Various cases with %x and/or %y being a constant
+; ============================================================================ ;
+define i32 @s_out_constant_varx_mone(i32 inreg %x, i32 inreg %y, i32 inreg %mask) {
+; GCN-LABEL: s_out_constant_varx_mone:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_and_b32 s0, s2, s0
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GCN-NEXT: s_or_not1_b32 s0, s0, s2
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %notmask = xor i32 %mask, -1
+ %mx = and i32 %mask, %x
+ %my = and i32 %notmask, -1
+ %r = or i32 %mx, %my
+ ret i32 %r
+}
+
+define i32 @s_in_constant_varx_mone(i32 inreg %x, i32 inreg %y, i32 inreg %mask) {
+; GCN-LABEL: s_in_constant_varx_mone:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_not_b32 s0, s0
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GCN-NEXT: s_nand_b32 s0, s0, s2
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %n0 = xor i32 %x, -1
+ %n1 = and i32 %n0, %mask
+ %r = xor i32 %n1, -1
+ ret i32 %r
+}
+
+; This is not a canonical form. Testing for completeness only.
+define i32 @s_out_constant_varx_mone_invmask(i32 inreg %x, i32 inreg %y, i32 inreg %mask) {
+; GCN-LABEL: s_out_constant_varx_mone_invmask:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_or_b32 s0, s0, s2
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %notmask = xor i32 %mask, -1
+ %mx = and i32 %notmask, %x
+ %my = and i32 %mask, -1
+ %r = or i32 %mx, %my
+ ret i32 %r
+}
+
+; This is not a canonical form. Testing for completeness only.
+define i32 @s_in_constant_varx_mone_invmask(i32 inreg %x, i32 inreg %y, i32 inreg %mask) {
+; GCN-LABEL: s_in_constant_varx_mone_invmask:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_not_b32 s1, s2
+; GCN-NEXT: s_not_b32 s0, s0
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GCN-NEXT: s_nand_b32 s0, s0, s1
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %notmask = xor i32 %mask, -1
+ %n0 = xor i32 %x, -1
+ %n1 = and i32 %n0, %notmask
+ %r = xor i32 %n1, -1
+ ret i32 %r
+}
+
+define i32 @s_out_constant_varx_42(i32 inreg %x, i32 inreg %y, i32 inreg %mask) {
+; GCN-LABEL: s_out_constant_varx_42:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_and_b32 s0, s2, s0
+; GCN-NEXT: s_and_not1_b32 s1, 42, s2
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GCN-NEXT: s_or_b32 s0, s0, s1
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %notmask = xor i32 %mask, -1
+ %mx = and i32 %mask, %x
+ %my = and i32 %notmask, 42
+ %r = or i32 %mx, %my
+ ret i32 %r
+}
+
+define i32 @in_constant_varx_42(i32 inreg %x, i32 inreg %y, i32 inreg %mask) {
+; GCN-LABEL: in_constant_varx_42:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_xor_b32 s0, s0, 42
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GCN-NEXT: s_and_b32 s0, s0, s2
+; GCN-NEXT: s_xor_b32 s0, s0, 42
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %n0 = xor i32 %x, 42
+ %n1 = and i32 %n0, %mask
+ %r = xor i32 %n1, 42
+ ret i32 %r
+}
+
+; This is not a canonical form. Testing for completeness only.
+define i32 @s_out_constant_varx_42_invmask(i32 inreg %x, i32 inreg %y, i32 inreg %mask) {
+; GCN-LABEL: s_out_constant_varx_42_invmask:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_and_not1_b32 s0, s0, s2
+; GCN-NEXT: s_and_b32 s1, s2, 42
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GCN-NEXT: s_or_b32 s0, s0, s1
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %notmask = xor i32 %mask, -1
+ %mx = and i32 %notmask, %x
+ %my = and i32 %mask, 42
+ %r = or i32 %mx, %my
+ ret i32 %r
+}
+
+; This is not a canonical form. Testing for completeness only.
+define i32 @s_in_constant_varx_42_invmask(i32 inreg %x, i32 inreg %y, i32 inreg %mask) {
+; GCN-LABEL: s_in_constant_varx_42_invmask:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_xor_b32 s0, s0, 42
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GCN-NEXT: s_and_not1_b32 s0, s0, s2
+; GCN-NEXT: s_xor_b32 s0, s0, 42
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %notmask = xor i32 %mask, -1
+ %n0 = xor i32 %x, 42
+ %n1 = and i32 %n0, %notmask
+ %r = xor i32 %n1, 42
+ ret i32 %r
+}
+
+define i32 @s_out_constant_mone_vary(i32 inreg %x, i32 inreg %y, i32 inreg %mask) {
+; GCN-LABEL: s_out_constant_mone_vary:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_or_b32 s0, s1, s2
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %notmask = xor i32 %mask, -1
+ %mx = and i32 %mask, -1
+ %my = and i32 %notmask, %y
+ %r = or i32 %mx, %my
+ ret i32 %r
+}
+
+define i32 @s_in_constant_mone_vary(i32 inreg %x, i32 inreg %y, i32 inreg %mask) {
+; GCN-LABEL: s_in_constant_mone_vary:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_or_b32 s0, s2, s1
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %n0 = xor i32 -1, %y
+ %n1 = and i32 %n0, %mask
+ %r = xor i32 %n1, %y
+ ret i32 %r
+}
+
+; This is not a canonical form. Testing for completeness only.
+define i32 @s_out_constant_mone_vary_invmask(i32 inreg %x, i32 inreg %y, i32 inreg %mask) {
+; GCN-LABEL: s_out_constant_mone_vary_invmask:
+; GCN: ...
[truncated]
|
what do you mean by "close" the three links? |
Sorry, I make a mistake, I used 'References' |
shiltian
reviewed
May 19, 2025
llvm/test/CodeGen/AMDGPU/unfold-masked-merge-scalar-variablemask.ll
Outdated
Show resolved
Hide resolved
shiltian
approved these changes
May 19, 2025
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
This enables DAGCombine to recognize and form and_not + or patterns when hasAndNot is true, allowing better SALU codegen for masked merge idioms.
For example:
(A & (B ^ C)) ^ C → (A & B) | (~A & C)
References: