-
Notifications
You must be signed in to change notification settings - Fork 13.5k
[RISCV] Add SEXT_INREG patterns for Xqcibm ext instruction #140192
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
Handle sign_extend_inreg from i1/i8/i16
@llvm/pr-subscribers-backend-risc-v Author: Sudharsan Veeravalli (svs-quic) ChangesHandle sign_extend_inreg from i1/i8/i16 Full diff: https://github.com/llvm/llvm-project/pull/140192.diff 4 Files Affected:
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 5564ca4154043..fae2cda13863d 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -310,12 +310,13 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::VASTART, MVT::Other, Custom);
setOperationAction({ISD::VAARG, ISD::VACOPY, ISD::VAEND}, MVT::Other, Expand);
- if (!Subtarget.hasVendorXTHeadBb())
+ if (!Subtarget.hasVendorXTHeadBb() && !Subtarget.hasVendorXqcibm())
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
setOperationAction(ISD::EH_DWARF_CFA, MVT::i32, Custom);
if (!Subtarget.hasStdExtZbb() && !Subtarget.hasVendorXTHeadBb() &&
+ !Subtarget.hasVendorXqcibm() &&
!(Subtarget.hasVendorXCValu() && !Subtarget.is64Bit()))
setOperationAction(ISD::SIGN_EXTEND_INREG, {MVT::i8, MVT::i16}, Expand);
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index 317c7d8d74801..923990c1927eb 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -2704,6 +2704,9 @@ bool RISCVInstrInfo::verifyInstruction(const MachineInstr &MI,
case RISCVOp::OPERAND_UIMM5_NONZERO:
Ok = isUInt<5>(Imm) && (Imm != 0);
break;
+ case RISCVOp::OPERAND_UIMM5_PLUS1:
+ Ok = (isUInt<5>(Imm) && (Imm != 0)) || (Imm == 32);
+ break;
case RISCVOp::OPERAND_UIMM6_LSB0:
Ok = isShiftedUInt<5, 1>(Imm);
break;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXqci.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXqci.td
index 5c109d03d78d5..5649e9b985366 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoXqci.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXqci.td
@@ -1434,6 +1434,12 @@ def : SelectQCbi<SETULT, uimm16nonzero, Select_GPR_Using_CC_Uimm16NonZero>;
def : SelectQCbi<SETUGE, uimm16nonzero, Select_GPR_Using_CC_Uimm16NonZero>;
} // let Predicates = [HasVendorXqcibi, IsRV32], AddedComplexity = 2
+let Predicates = [HasVendorXqcibm, IsRV32] in {
+def : Pat<(sext_inreg (i32 GPR:$rs1), i16), (QC_EXT GPR:$rs1, 16, 0)>;
+def : Pat<(sext_inreg (i32 GPR:$rs1), i8), (QC_EXT GPR:$rs1, 8, 0)>;
+def : Pat<(sext_inreg (i32 GPR:$rs1), i1), (QC_EXT GPR:$rs1, 1, 0)>;
+} // Predicates = [HasVendorXqcibm, IsRV32]
+
let Predicates = [HasVendorXqciint, IsRV32] in
def : Pat<(riscv_mileaveret_glue), (QC_C_MILEAVERET)>;
diff --git a/llvm/test/CodeGen/RISCV/xqcibm-extract.ll b/llvm/test/CodeGen/RISCV/xqcibm-extract.ll
new file mode 100644
index 0000000000000..3f5b949585fa3
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/xqcibm-extract.ll
@@ -0,0 +1,233 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
+; RUN: | FileCheck %s -check-prefixes=RV32I
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcibm -verify-machineinstrs < %s \
+; RUN: | FileCheck %s -check-prefixes=RV32XQCIBM
+
+define i32 @sexti1_i32(i1 %a) nounwind {
+; RV32I-LABEL: sexti1_i32:
+; RV32I: # %bb.0:
+; RV32I-NEXT: slli a0, a0, 31
+; RV32I-NEXT: srai a0, a0, 31
+; RV32I-NEXT: ret
+;
+; RV32XQCIBM-LABEL: sexti1_i32:
+; RV32XQCIBM: # %bb.0:
+; RV32XQCIBM-NEXT: qc.ext a0, a0, 1, 0
+; RV32XQCIBM-NEXT: ret
+ %sext = sext i1 %a to i32
+ ret i32 %sext
+}
+
+define i32 @sexti1_i32_2(i32 %a) {
+; RV32I-LABEL: sexti1_i32_2:
+; RV32I: # %bb.0:
+; RV32I-NEXT: slli a0, a0, 31
+; RV32I-NEXT: srai a0, a0, 31
+; RV32I-NEXT: ret
+;
+; RV32XQCIBM-LABEL: sexti1_i32_2:
+; RV32XQCIBM: # %bb.0:
+; RV32XQCIBM-NEXT: qc.ext a0, a0, 1, 0
+; RV32XQCIBM-NEXT: ret
+ %shl = shl i32 %a, 31
+ %shr = ashr exact i32 %shl, 31
+ ret i32 %shr
+}
+
+
+define i32 @sexti8_i32(i8 %a) nounwind {
+; RV32I-LABEL: sexti8_i32:
+; RV32I: # %bb.0:
+; RV32I-NEXT: slli a0, a0, 24
+; RV32I-NEXT: srai a0, a0, 24
+; RV32I-NEXT: ret
+;
+; RV32XQCIBM-LABEL: sexti8_i32:
+; RV32XQCIBM: # %bb.0:
+; RV32XQCIBM-NEXT: qc.ext a0, a0, 8, 0
+; RV32XQCIBM-NEXT: ret
+ %sext = sext i8 %a to i32
+ ret i32 %sext
+}
+
+define i32 @sexti8_i32_2(i32 %a) {
+; RV32I-LABEL: sexti8_i32_2:
+; RV32I: # %bb.0:
+; RV32I-NEXT: slli a0, a0, 24
+; RV32I-NEXT: srai a0, a0, 24
+; RV32I-NEXT: ret
+;
+; RV32XQCIBM-LABEL: sexti8_i32_2:
+; RV32XQCIBM: # %bb.0:
+; RV32XQCIBM-NEXT: qc.ext a0, a0, 8, 0
+; RV32XQCIBM-NEXT: ret
+ %shl = shl i32 %a, 24
+ %shr = ashr exact i32 %shl, 24
+ ret i32 %shr
+}
+
+define i32 @sexti16_i32(i16 %a) nounwind {
+; RV32I-LABEL: sexti16_i32:
+; RV32I: # %bb.0:
+; RV32I-NEXT: slli a0, a0, 16
+; RV32I-NEXT: srai a0, a0, 16
+; RV32I-NEXT: ret
+;
+; RV32XQCIBM-LABEL: sexti16_i32:
+; RV32XQCIBM: # %bb.0:
+; RV32XQCIBM-NEXT: qc.ext a0, a0, 16, 0
+; RV32XQCIBM-NEXT: ret
+ %sext = sext i16 %a to i32
+ ret i32 %sext
+}
+
+define i32 @sexti16_i32_2(i32 %a) {
+; RV32I-LABEL: sexti16_i32_2:
+; RV32I: # %bb.0:
+; RV32I-NEXT: slli a0, a0, 16
+; RV32I-NEXT: srai a0, a0, 16
+; RV32I-NEXT: ret
+;
+; RV32XQCIBM-LABEL: sexti16_i32_2:
+; RV32XQCIBM: # %bb.0:
+; RV32XQCIBM-NEXT: qc.ext a0, a0, 16, 0
+; RV32XQCIBM-NEXT: ret
+ %shl = shl i32 %a, 16
+ %shr = ashr exact i32 %shl, 16
+ ret i32 %shr
+}
+
+define i64 @sexti1_i64(i64 %a) {
+; RV32I-LABEL: sexti1_i64:
+; RV32I: # %bb.0:
+; RV32I-NEXT: slli a0, a0, 31
+; RV32I-NEXT: srai a0, a0, 31
+; RV32I-NEXT: mv a1, a0
+; RV32I-NEXT: ret
+;
+; RV32XQCIBM-LABEL: sexti1_i64:
+; RV32XQCIBM: # %bb.0:
+; RV32XQCIBM-NEXT: qc.ext a0, a0, 1, 0
+; RV32XQCIBM-NEXT: mv a1, a0
+; RV32XQCIBM-NEXT: ret
+ %shl = shl i64 %a, 63
+ %shr = ashr exact i64 %shl, 63
+ ret i64 %shr
+}
+
+define i64 @sexti1_i64_2(i1 %a) {
+; RV32I-LABEL: sexti1_i64_2:
+; RV32I: # %bb.0:
+; RV32I-NEXT: slli a0, a0, 31
+; RV32I-NEXT: srai a0, a0, 31
+; RV32I-NEXT: mv a1, a0
+; RV32I-NEXT: ret
+;
+; RV32XQCIBM-LABEL: sexti1_i64_2:
+; RV32XQCIBM: # %bb.0:
+; RV32XQCIBM-NEXT: qc.ext a0, a0, 1, 0
+; RV32XQCIBM-NEXT: mv a1, a0
+; RV32XQCIBM-NEXT: ret
+ %1 = sext i1 %a to i64
+ ret i64 %1
+}
+
+define i64 @sexti8_i64(i64 %a) {
+; RV32I-LABEL: sexti8_i64:
+; RV32I: # %bb.0:
+; RV32I-NEXT: slli a1, a0, 24
+; RV32I-NEXT: srai a0, a1, 24
+; RV32I-NEXT: srai a1, a1, 31
+; RV32I-NEXT: ret
+;
+; RV32XQCIBM-LABEL: sexti8_i64:
+; RV32XQCIBM: # %bb.0:
+; RV32XQCIBM-NEXT: qc.ext a0, a0, 8, 0
+; RV32XQCIBM-NEXT: srai a1, a0, 31
+; RV32XQCIBM-NEXT: ret
+ %shl = shl i64 %a, 56
+ %shr = ashr exact i64 %shl, 56
+ ret i64 %shr
+}
+
+define i64 @sexti8_i64_2(i8 %a) {
+; RV32I-LABEL: sexti8_i64_2:
+; RV32I: # %bb.0:
+; RV32I-NEXT: slli a1, a0, 24
+; RV32I-NEXT: srai a0, a1, 24
+; RV32I-NEXT: srai a1, a1, 31
+; RV32I-NEXT: ret
+;
+; RV32XQCIBM-LABEL: sexti8_i64_2:
+; RV32XQCIBM: # %bb.0:
+; RV32XQCIBM-NEXT: qc.ext a0, a0, 8, 0
+; RV32XQCIBM-NEXT: srai a1, a0, 31
+; RV32XQCIBM-NEXT: ret
+ %1 = sext i8 %a to i64
+ ret i64 %1
+}
+
+define i64 @sexti16_i64(i64 %a) {
+; RV32I-LABEL: sexti16_i64:
+; RV32I: # %bb.0:
+; RV32I-NEXT: slli a1, a0, 16
+; RV32I-NEXT: srai a0, a1, 16
+; RV32I-NEXT: srai a1, a1, 31
+; RV32I-NEXT: ret
+;
+; RV32XQCIBM-LABEL: sexti16_i64:
+; RV32XQCIBM: # %bb.0:
+; RV32XQCIBM-NEXT: qc.ext a0, a0, 16, 0
+; RV32XQCIBM-NEXT: srai a1, a0, 31
+; RV32XQCIBM-NEXT: ret
+ %shl = shl i64 %a, 48
+ %shr = ashr exact i64 %shl, 48
+ ret i64 %shr
+}
+
+define i64 @sexti16_i64_2(i16 %a) {
+; RV32I-LABEL: sexti16_i64_2:
+; RV32I: # %bb.0:
+; RV32I-NEXT: slli a1, a0, 16
+; RV32I-NEXT: srai a0, a1, 16
+; RV32I-NEXT: srai a1, a1, 31
+; RV32I-NEXT: ret
+;
+; RV32XQCIBM-LABEL: sexti16_i64_2:
+; RV32XQCIBM: # %bb.0:
+; RV32XQCIBM-NEXT: qc.ext a0, a0, 16, 0
+; RV32XQCIBM-NEXT: srai a1, a0, 31
+; RV32XQCIBM-NEXT: ret
+ %1 = sext i16 %a to i64
+ ret i64 %1
+}
+
+define i64 @sexti32_i64(i64 %a) {
+; RV32I-LABEL: sexti32_i64:
+; RV32I: # %bb.0:
+; RV32I-NEXT: srai a1, a0, 31
+; RV32I-NEXT: ret
+;
+; RV32XQCIBM-LABEL: sexti32_i64:
+; RV32XQCIBM: # %bb.0:
+; RV32XQCIBM-NEXT: srai a1, a0, 31
+; RV32XQCIBM-NEXT: ret
+ %shl = shl i64 %a, 32
+ %shr = ashr exact i64 %shl, 32
+ ret i64 %shr
+}
+
+define i64 @sexti32_i64_2(i32 %a) {
+; RV32I-LABEL: sexti32_i64_2:
+; RV32I: # %bb.0:
+; RV32I-NEXT: srai a1, a0, 31
+; RV32I-NEXT: ret
+;
+; RV32XQCIBM-LABEL: sexti32_i64_2:
+; RV32XQCIBM: # %bb.0:
+; RV32XQCIBM-NEXT: srai a1, a0, 31
+; RV32XQCIBM-NEXT: ret
+ %1 = sext i32 %a to i64
+ ret i64 %1
+}
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM
Handle sign_extend_inreg from i1/i8/i16