diff --git a/llvm/include/llvm/IR/IntrinsicsRISCVXAndes.td b/llvm/include/llvm/IR/IntrinsicsRISCVXAndes.td index d90fe2cd0e6f3..270066f815d8b 100644 --- a/llvm/include/llvm/IR/IntrinsicsRISCVXAndes.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCVXAndes.td @@ -14,4 +14,9 @@ let TargetPrefix = "riscv" in { // Andes Vector Packed FP16 Extension defm nds_vfpmadt : RISCVBinaryAAXRoundingMode; defm nds_vfpmadb : RISCVBinaryAAXRoundingMode; + + // Andes Vector Dot Product Extension + defm nds_vd4dots : RISCVTernaryWide; + defm nds_vd4dotu : RISCVTernaryWide; + defm nds_vd4dotsu : RISCVTernaryWide; } diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXAndes.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXAndes.td index 6afe88b805d35..158b62fb00659 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoXAndes.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXAndes.td @@ -388,6 +388,27 @@ multiclass VPatVFPMADBinaryV_VX_RM, + SchedBinary<"WriteVIALUV", "ReadVIALUV", "ReadVIALUV", m.MX, + forcePassthruRead=true>; + } +} + +multiclass VPatTernaryVD4DOT_VV vtilist> { + foreach vtiToWti = vtilist in { + defvar vti = vtiToWti.Vti; + defvar wti = vtiToWti.Wti; + let Predicates = GetVTypePredicates.Predicates in + defm : VPatTernaryWithPolicy; + } +} + //===----------------------------------------------------------------------===// // XAndesPerf //===----------------------------------------------------------------------===// @@ -499,3 +520,25 @@ defm PseudoNDS_VFPMADB : VPseudoVFPMAD_VF_RM; defm : VPatVFPMADBinaryV_VX_RM<"int_riscv_nds_vfpmadt", "PseudoNDS_VFPMADT", AllFP16Vectors>; defm : VPatVFPMADBinaryV_VX_RM<"int_riscv_nds_vfpmadb", "PseudoNDS_VFPMADB", AllFP16Vectors>; + +let Predicates = [HasVendorXAndesVDot] in { +defm PseudoNDS_VD4DOTS : VPseudoVD4DOT_VV; +defm PseudoNDS_VD4DOTU : VPseudoVD4DOT_VV; +defm PseudoNDS_VD4DOTSU : VPseudoVD4DOT_VV; +} + +defset list AllQuadWidenableVD4DOTVectors = { + def : VTypeInfoToWide; + def : VTypeInfoToWide; + def : VTypeInfoToWide; + def : VTypeInfoToWide; + def : VTypeInfoToWide; + def : VTypeInfoToWide; + def : VTypeInfoToWide; + def : VTypeInfoToWide; + def : VTypeInfoToWide; +} + +defm : VPatTernaryVD4DOT_VV<"int_riscv_nds_vd4dots", "PseudoNDS_VD4DOTS", AllQuadWidenableVD4DOTVectors>; +defm : VPatTernaryVD4DOT_VV<"int_riscv_nds_vd4dotu", "PseudoNDS_VD4DOTU", AllQuadWidenableVD4DOTVectors>; +defm : VPatTernaryVD4DOT_VV<"int_riscv_nds_vd4dotsu", "PseudoNDS_VD4DOTSU", AllQuadWidenableVD4DOTVectors>; diff --git a/llvm/test/CodeGen/RISCV/rvv/xandesvdot-vd4dots.ll b/llvm/test/CodeGen/RISCV/rvv/xandesvdot-vd4dots.ll new file mode 100644 index 0000000000000..1a5abe352ced1 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/xandesvdot-vd4dots.ll @@ -0,0 +1,288 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+zve64x,+xandesvdot \ +; RUN: -verify-machineinstrs -target-abi=ilp32 | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+zve64x,+xandesvdot \ +; RUN: -verify-machineinstrs -target-abi=lp64 | FileCheck %s + +define @intrinsic_vd4dots_vv_nxv1i32_nxv4i8_nxv4i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vd4dots_vv_nxv1i32_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: nds.vd4dots.vv v8, v9, v10 +; CHECK-NEXT: ret +entry: + %a = tail call @llvm.riscv.nds.vd4dots.nxv1i32.nxv4i8.nxv4i8( + %0, + %1, + %2, + iXLen %3, iXLen 2) + ret %a +} + +define @intrinsic_vd4dots_mask_vv_nxv1i32_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vd4dots_mask_vv_nxv1i32_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: nds.vd4dots.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = tail call @llvm.riscv.nds.vd4dots.mask.nxv1i32.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 2) + ret %a +} + +define @intrinsic_vd4dots_vv_nxv2i32_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vd4dots_vv_nxv2i32_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: nds.vd4dots.vv v8, v9, v10 +; CHECK-NEXT: ret +entry: + %a = tail call @llvm.riscv.nds.vd4dots.nxv2i32.nxv8i8.nxv8i8( + %0, + %1, + %2, + iXLen %3, iXLen 2) + ret %a +} + +define @intrinsic_vd4dots_mask_vv_nxv2i32_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vd4dots_mask_vv_nxv2i32_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: nds.vd4dots.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = tail call @llvm.riscv.nds.vd4dots.mask.nxv2i32.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 2) + ret %a +} + +define @intrinsic_vd4dots_vv_nxv4i32_nxv16i8_nxv16i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vd4dots_vv_nxv4i32_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: nds.vd4dots.vv v8, v10, v12 +; CHECK-NEXT: ret +entry: + %a = tail call @llvm.riscv.nds.vd4dots.nxv4i32.nxv16i8.nxv16i8( + %0, + %1, + %2, + iXLen %3, iXLen 2) + ret %a +} + +define @intrinsic_vd4dots_mask_vv_nxv4i32_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vd4dots_mask_vv_nxv4i32_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: nds.vd4dots.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = tail call @llvm.riscv.nds.vd4dots.mask.nxv4i32.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 2) + ret %a +} + +define @intrinsic_vd4dots_vv_nxv8i32_nxv32i8_nxv32i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vd4dots_vv_nxv8i32_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: nds.vd4dots.vv v8, v12, v16 +; CHECK-NEXT: ret +entry: + %a = tail call @llvm.riscv.nds.vd4dots.nxv8i32.nxv32i8.nxv32i8( + %0, + %1, + %2, + iXLen %3, iXLen 2) + ret %a +} + +define @intrinsic_vd4dots_mask_vv_nxv8i32_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vd4dots_mask_vv_nxv8i32_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: nds.vd4dots.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = tail call @llvm.riscv.nds.vd4dots.mask.nxv8i32.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 2) + ret %a +} + +define @intrinsic_vd4dots_vv_nxv16i32_nxv64i8_nxv64i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vd4dots_vv_nxv16i32_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8r.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, ma +; CHECK-NEXT: nds.vd4dots.vv v8, v16, v24 +; CHECK-NEXT: ret +entry: + %a = tail call @llvm.riscv.nds.vd4dots.nxv16i32.nxv64i8.nxv64i8( + %0, + %1, + %2, + iXLen %3, iXLen 2) + ret %a +} + +define @intrinsic_vd4dots_mask_vv_nxv16i32_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vd4dots_mask_vv_nxv16i32_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8r.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, ma +; CHECK-NEXT: nds.vd4dots.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = tail call @llvm.riscv.nds.vd4dots.mask.nxv16i32.nxv64i8.nxv64i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 2) + ret %a +} + +define @intrinsic_vd4dots_vv_nxv1i64_nxv4i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vd4dots_vv_nxv1i64_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma +; CHECK-NEXT: nds.vd4dots.vv v8, v9, v10 +; CHECK-NEXT: ret +entry: + %a = tail call @llvm.riscv.nds.vd4dots.nxv1i64.nxv4i16.nxv4i16( + %0, + %1, + %2, + iXLen %3, iXLen 2) + ret %a +} + +define @intrinsic_vd4dots_mask_vv_nxv1i64_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vd4dots_mask_vv_nxv1i64_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma +; CHECK-NEXT: nds.vd4dots.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = tail call @llvm.riscv.nds.vd4dots.mask.nxv1i64.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 2) + ret %a +} + +define @intrinsic_vd4dots_vv_nxv2i64_nxv8i16_nxv8i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vd4dots_vv_nxv2i64_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma +; CHECK-NEXT: nds.vd4dots.vv v8, v10, v12 +; CHECK-NEXT: ret +entry: + %a = tail call @llvm.riscv.nds.vd4dots.nxv2i64.nxv8i16.nxv8i16( + %0, + %1, + %2, + iXLen %3, iXLen 2) + ret %a +} + +define @intrinsic_vd4dots_mask_vv_nxv2i64_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vd4dots_mask_vv_nxv2i64_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma +; CHECK-NEXT: nds.vd4dots.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = tail call @llvm.riscv.nds.vd4dots.mask.nxv2i64.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 2) + ret %a +} + +define @intrinsic_vd4dots_vv_nxv4i64_nxv16i16_nxv16i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vd4dots_vv_nxv4i64_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma +; CHECK-NEXT: nds.vd4dots.vv v8, v12, v16 +; CHECK-NEXT: ret +entry: + %a = tail call @llvm.riscv.nds.vd4dots.nxv4i64.nxv16i16.nxv16i16( + %0, + %1, + %2, + iXLen %3, iXLen 2) + ret %a +} + +define @intrinsic_vd4dots_mask_vv_nxv4i64_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vd4dots_mask_vv_nxv4i64_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma +; CHECK-NEXT: nds.vd4dots.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = tail call @llvm.riscv.nds.vd4dots.mask.nxv4i64.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 2) + ret %a +} + +define @intrinsic_vd4dots_vv_nxv8i64_nxv32i16_nxv32i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vd4dots_vv_nxv8i64_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re16.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, ma +; CHECK-NEXT: nds.vd4dots.vv v8, v16, v24 +; CHECK-NEXT: ret +entry: + %a = tail call @llvm.riscv.nds.vd4dots.nxv8i64.nxv32i16.nxv32i16( + %0, + %1, + %2, + iXLen %3, iXLen 2) + ret %a +} + +define @intrinsic_vd4dots_mask_vv_nxv8i64_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vd4dots_mask_vv_nxv8i64_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re16.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, ma +; CHECK-NEXT: nds.vd4dots.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = tail call @llvm.riscv.nds.vd4dots.mask.nxv8i64.nxv32i16.nxv32i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 2) + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/xandesvdot-vd4dotsu.ll b/llvm/test/CodeGen/RISCV/rvv/xandesvdot-vd4dotsu.ll new file mode 100644 index 0000000000000..01194ca2c9e4d --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/xandesvdot-vd4dotsu.ll @@ -0,0 +1,288 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+zve64x,+xandesvdot \ +; RUN: -verify-machineinstrs -target-abi=ilp32 | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+zve64x,+xandesvdot \ +; RUN: -verify-machineinstrs -target-abi=lp64 | FileCheck %s + +define @intrinsic_vd4dotsu_vv_nxv1i32_nxv4i8_nxv4i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vd4dotsu_vv_nxv1i32_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: nds.vd4dotsu.vv v8, v9, v10 +; CHECK-NEXT: ret +entry: + %a = tail call @llvm.riscv.nds.vd4dotsu.nxv1i32.nxv4i8.nxv4i8( + %0, + %1, + %2, + iXLen %3, iXLen 2) + ret %a +} + +define @intrinsic_vd4dotsu_mask_vv_nxv1i32_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vd4dotsu_mask_vv_nxv1i32_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: nds.vd4dotsu.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = tail call @llvm.riscv.nds.vd4dotsu.mask.nxv1i32.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 2) + ret %a +} + +define @intrinsic_vd4dotsu_vv_nxv2i32_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vd4dotsu_vv_nxv2i32_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: nds.vd4dotsu.vv v8, v9, v10 +; CHECK-NEXT: ret +entry: + %a = tail call @llvm.riscv.nds.vd4dotsu.nxv2i32.nxv8i8.nxv8i8( + %0, + %1, + %2, + iXLen %3, iXLen 2) + ret %a +} + +define @intrinsic_vd4dotsu_mask_vv_nxv2i32_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vd4dotsu_mask_vv_nxv2i32_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: nds.vd4dotsu.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = tail call @llvm.riscv.nds.vd4dotsu.mask.nxv2i32.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 2) + ret %a +} + +define @intrinsic_vd4dotsu_vv_nxv4i32_nxv16i8_nxv16i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vd4dotsu_vv_nxv4i32_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: nds.vd4dotsu.vv v8, v10, v12 +; CHECK-NEXT: ret +entry: + %a = tail call @llvm.riscv.nds.vd4dotsu.nxv4i32.nxv16i8.nxv16i8( + %0, + %1, + %2, + iXLen %3, iXLen 2) + ret %a +} + +define @intrinsic_vd4dotsu_mask_vv_nxv4i32_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vd4dotsu_mask_vv_nxv4i32_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: nds.vd4dotsu.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = tail call @llvm.riscv.nds.vd4dotsu.mask.nxv4i32.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 2) + ret %a +} + +define @intrinsic_vd4dotsu_vv_nxv8i32_nxv32i8_nxv32i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vd4dotsu_vv_nxv8i32_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: nds.vd4dotsu.vv v8, v12, v16 +; CHECK-NEXT: ret +entry: + %a = tail call @llvm.riscv.nds.vd4dotsu.nxv8i32.nxv32i8.nxv32i8( + %0, + %1, + %2, + iXLen %3, iXLen 2) + ret %a +} + +define @intrinsic_vd4dotsu_mask_vv_nxv8i32_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vd4dotsu_mask_vv_nxv8i32_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: nds.vd4dotsu.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = tail call @llvm.riscv.nds.vd4dotsu.mask.nxv8i32.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 2) + ret %a +} + +define @intrinsic_vd4dotsu_vv_nxv16i32_nxv64i8_nxv64i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vd4dotsu_vv_nxv16i32_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8r.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, ma +; CHECK-NEXT: nds.vd4dotsu.vv v8, v16, v24 +; CHECK-NEXT: ret +entry: + %a = tail call @llvm.riscv.nds.vd4dotsu.nxv16i32.nxv64i8.nxv64i8( + %0, + %1, + %2, + iXLen %3, iXLen 2) + ret %a +} + +define @intrinsic_vd4dotsu_mask_vv_nxv16i32_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vd4dotsu_mask_vv_nxv16i32_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8r.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, ma +; CHECK-NEXT: nds.vd4dotsu.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = tail call @llvm.riscv.nds.vd4dotsu.mask.nxv16i32.nxv64i8.nxv64i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 2) + ret %a +} + +define @intrinsic_vd4dotsu_vv_nxv1i64_nxv4i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vd4dotsu_vv_nxv1i64_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma +; CHECK-NEXT: nds.vd4dotsu.vv v8, v9, v10 +; CHECK-NEXT: ret +entry: + %a = tail call @llvm.riscv.nds.vd4dotsu.nxv1i64.nxv4i16.nxv4i16( + %0, + %1, + %2, + iXLen %3, iXLen 2) + ret %a +} + +define @intrinsic_vd4dotsu_mask_vv_nxv1i64_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vd4dotsu_mask_vv_nxv1i64_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma +; CHECK-NEXT: nds.vd4dotsu.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = tail call @llvm.riscv.nds.vd4dotsu.mask.nxv1i64.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 2) + ret %a +} + +define @intrinsic_vd4dotsu_vv_nxv2i64_nxv8i16_nxv8i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vd4dotsu_vv_nxv2i64_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma +; CHECK-NEXT: nds.vd4dotsu.vv v8, v10, v12 +; CHECK-NEXT: ret +entry: + %a = tail call @llvm.riscv.nds.vd4dotsu.nxv2i64.nxv8i16.nxv8i16( + %0, + %1, + %2, + iXLen %3, iXLen 2) + ret %a +} + +define @intrinsic_vd4dotsu_mask_vv_nxv2i64_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vd4dotsu_mask_vv_nxv2i64_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma +; CHECK-NEXT: nds.vd4dotsu.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = tail call @llvm.riscv.nds.vd4dotsu.mask.nxv2i64.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 2) + ret %a +} + +define @intrinsic_vd4dotsu_vv_nxv4i64_nxv16i16_nxv16i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vd4dotsu_vv_nxv4i64_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma +; CHECK-NEXT: nds.vd4dotsu.vv v8, v12, v16 +; CHECK-NEXT: ret +entry: + %a = tail call @llvm.riscv.nds.vd4dotsu.nxv4i64.nxv16i16.nxv16i16( + %0, + %1, + %2, + iXLen %3, iXLen 2) + ret %a +} + +define @intrinsic_vd4dotsu_mask_vv_nxv4i64_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vd4dotsu_mask_vv_nxv4i64_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma +; CHECK-NEXT: nds.vd4dotsu.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = tail call @llvm.riscv.nds.vd4dotsu.mask.nxv4i64.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 2) + ret %a +} + +define @intrinsic_vd4dotsu_vv_nxv8i64_nxv32i16_nxv32i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vd4dotsu_vv_nxv8i64_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re16.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, ma +; CHECK-NEXT: nds.vd4dotsu.vv v8, v16, v24 +; CHECK-NEXT: ret +entry: + %a = tail call @llvm.riscv.nds.vd4dotsu.nxv8i64.nxv32i16.nxv32i16( + %0, + %1, + %2, + iXLen %3, iXLen 2) + ret %a +} + +define @intrinsic_vd4dotsu_mask_vv_nxv8i64_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vd4dotsu_mask_vv_nxv8i64_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re16.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, ma +; CHECK-NEXT: nds.vd4dotsu.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = tail call @llvm.riscv.nds.vd4dotsu.mask.nxv8i64.nxv32i16.nxv32i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 2) + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/xandesvdot-vd4dotu.ll b/llvm/test/CodeGen/RISCV/rvv/xandesvdot-vd4dotu.ll new file mode 100644 index 0000000000000..9e29575ecdd85 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/xandesvdot-vd4dotu.ll @@ -0,0 +1,288 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+zve64x,+xandesvdot \ +; RUN: -verify-machineinstrs -target-abi=ilp32 | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+zve64x,+xandesvdot \ +; RUN: -verify-machineinstrs -target-abi=lp64 | FileCheck %s + +define @intrinsic_vd4dotu_vv_nxv1i32_nxv4i8_nxv4i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vd4dotu_vv_nxv1i32_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: nds.vd4dotu.vv v8, v9, v10 +; CHECK-NEXT: ret +entry: + %a = tail call @llvm.riscv.nds.vd4dotu.nxv1i32.nxv4i8.nxv4i8( + %0, + %1, + %2, + iXLen %3, iXLen 2) + ret %a +} + +define @intrinsic_vd4dotu_mask_vv_nxv1i32_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vd4dotu_mask_vv_nxv1i32_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: nds.vd4dotu.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = tail call @llvm.riscv.nds.vd4dotu.mask.nxv1i32.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 2) + ret %a +} + +define @intrinsic_vd4dotu_vv_nxv2i32_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vd4dotu_vv_nxv2i32_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: nds.vd4dotu.vv v8, v9, v10 +; CHECK-NEXT: ret +entry: + %a = tail call @llvm.riscv.nds.vd4dotu.nxv2i32.nxv8i8.nxv8i8( + %0, + %1, + %2, + iXLen %3, iXLen 2) + ret %a +} + +define @intrinsic_vd4dotu_mask_vv_nxv2i32_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vd4dotu_mask_vv_nxv2i32_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: nds.vd4dotu.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = tail call @llvm.riscv.nds.vd4dotu.mask.nxv2i32.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 2) + ret %a +} + +define @intrinsic_vd4dotu_vv_nxv4i32_nxv16i8.nxv16i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vd4dotu_vv_nxv4i32_nxv16i8.nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: nds.vd4dotu.vv v8, v10, v12 +; CHECK-NEXT: ret +entry: + %a = tail call @llvm.riscv.nds.vd4dotu.nxv4i32.nxv16i8.nxv16i8( + %0, + %1, + %2, + iXLen %3, iXLen 2) + ret %a +} + +define @intrinsic_vd4dotu_mask_vv_nxv4i32_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vd4dotu_mask_vv_nxv4i32_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: nds.vd4dotu.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = tail call @llvm.riscv.nds.vd4dotu.mask.nxv4i32.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 2) + ret %a +} + +define @intrinsic_vd4dotu_vv_nxv8i32_nxv32i8_nxv32i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vd4dotu_vv_nxv8i32_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: nds.vd4dotu.vv v8, v12, v16 +; CHECK-NEXT: ret +entry: + %a = tail call @llvm.riscv.nds.vd4dotu.nxv8i32.nxv32i8.nxv32i8( + %0, + %1, + %2, + iXLen %3, iXLen 2) + ret %a +} + +define @intrinsic_vd4dotu_mask_vv_nxv8i32_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vd4dotu_mask_vv_nxv8i32_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: nds.vd4dotu.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = tail call @llvm.riscv.nds.vd4dotu.mask.nxv8i32.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 2) + ret %a +} + +define @intrinsic_vd4dotu_vv_nxv16i32_nxv64i8_nxv64i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vd4dotu_vv_nxv16i32_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8r.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, ma +; CHECK-NEXT: nds.vd4dotu.vv v8, v16, v24 +; CHECK-NEXT: ret +entry: + %a = tail call @llvm.riscv.nds.vd4dotu.nxv16i32.nxv64i8.nxv64i8( + %0, + %1, + %2, + iXLen %3, iXLen 2) + ret %a +} + +define @intrinsic_vd4dotu_mask_vv_nxv16i32_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vd4dotu_mask_vv_nxv16i32_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8r.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, ma +; CHECK-NEXT: nds.vd4dotu.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = tail call @llvm.riscv.nds.vd4dotu.mask.nxv16i32.nxv64i8.nxv64i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 2) + ret %a +} + +define @intrinsic_vd4dotu_vv_nxv1i64_nxv4i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vd4dotu_vv_nxv1i64_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma +; CHECK-NEXT: nds.vd4dotu.vv v8, v9, v10 +; CHECK-NEXT: ret +entry: + %a = tail call @llvm.riscv.nds.vd4dotu.nxv1i64.nxv4i16.nxv4i16( + %0, + %1, + %2, + iXLen %3, iXLen 2) + ret %a +} + +define @intrinsic_vd4dotu_mask_vv_nxv1i64_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vd4dotu_mask_vv_nxv1i64_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma +; CHECK-NEXT: nds.vd4dotu.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = tail call @llvm.riscv.nds.vd4dotu.mask.nxv1i64.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 2) + ret %a +} + +define @intrinsic_vd4dotu_vv_nxv2i64_nxv8i16_nxv8i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vd4dotu_vv_nxv2i64_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma +; CHECK-NEXT: nds.vd4dotu.vv v8, v10, v12 +; CHECK-NEXT: ret +entry: + %a = tail call @llvm.riscv.nds.vd4dotu.nxv2i64.nxv8i16.nxv8i16( + %0, + %1, + %2, + iXLen %3, iXLen 2) + ret %a +} + +define @intrinsic_vd4dotu_mask_vv_nxv2i64_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vd4dotu_mask_vv_nxv2i64_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma +; CHECK-NEXT: nds.vd4dotu.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = tail call @llvm.riscv.nds.vd4dotu.mask.nxv2i64.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 2) + ret %a +} + +define @intrinsic_vd4dotu_vv_nxv4i64_nxv16i16_nxv16i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vd4dotu_vv_nxv4i64_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma +; CHECK-NEXT: nds.vd4dotu.vv v8, v12, v16 +; CHECK-NEXT: ret +entry: + %a = tail call @llvm.riscv.nds.vd4dotu.nxv4i64.nxv16i16.nxv16i16( + %0, + %1, + %2, + iXLen %3, iXLen 2) + ret %a +} + +define @intrinsic_vd4dotu_mask_vv_nxv4i64_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vd4dotu_mask_vv_nxv4i64_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma +; CHECK-NEXT: nds.vd4dotu.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = tail call @llvm.riscv.nds.vd4dotu.mask.nxv4i64.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 2) + ret %a +} + +define @intrinsic_vd4dotu_vv_nxv8i64_nxv32i16_nxv32i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vd4dotu_vv_nxv8i64_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re16.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, ma +; CHECK-NEXT: nds.vd4dotu.vv v8, v16, v24 +; CHECK-NEXT: ret +entry: + %a = tail call @llvm.riscv.nds.vd4dotu.nxv8i64.nxv32i16.nxv32i16( + %0, + %1, + %2, + iXLen %3, iXLen 2) + ret %a +} + +define @intrinsic_vd4dotu_mask_vv_nxv8i64_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vd4dotu_mask_vv_nxv8i64_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re16.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, ma +; CHECK-NEXT: nds.vd4dotu.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = tail call @llvm.riscv.nds.vd4dotu.mask.nxv8i64.nxv32i16.nxv32i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 2) + ret %a +}