diff --git a/llvm/include/llvm/CodeGen/SDNodeInfo.h b/llvm/include/llvm/CodeGen/SDNodeInfo.h index 3992db31638b8..ba6c343ee1838 100644 --- a/llvm/include/llvm/CodeGen/SDNodeInfo.h +++ b/llvm/include/llvm/CodeGen/SDNodeInfo.h @@ -55,12 +55,14 @@ struct SDTypeConstraint { MVT::SimpleValueType VT; }; +using SDNodeTSFlags = uint32_t; + struct SDNodeDesc { uint16_t NumResults; int16_t NumOperands; uint32_t Properties; uint32_t Flags; - uint32_t TSFlags; + SDNodeTSFlags TSFlags; unsigned NameOffset; unsigned ConstraintOffset; unsigned ConstraintCount; diff --git a/llvm/lib/Target/RISCV/CMakeLists.txt b/llvm/lib/Target/RISCV/CMakeLists.txt index b0456aa25f09e..e32d6eab3b977 100644 --- a/llvm/lib/Target/RISCV/CMakeLists.txt +++ b/llvm/lib/Target/RISCV/CMakeLists.txt @@ -16,6 +16,7 @@ tablegen(LLVM RISCVGenRegisterInfo.inc -gen-register-info) tablegen(LLVM RISCVGenSearchableTables.inc -gen-searchable-tables) tablegen(LLVM RISCVGenSubtargetInfo.inc -gen-subtarget) tablegen(LLVM RISCVGenExegesis.inc -gen-exegesis) +tablegen(LLVM RISCVGenSDNodeInfo.inc -gen-sd-node-info) set(LLVM_TARGET_DEFINITIONS RISCVGISel.td) tablegen(LLVM RISCVGenGlobalISel.inc -gen-global-isel) diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp index 86bdb4c7fd24c..9648349d2ab76 100644 --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -16,6 +16,7 @@ #include "MCTargetDesc/RISCVMatInt.h" #include "RISCVISelLowering.h" #include "RISCVInstrInfo.h" +#include "RISCVSelectionDAGInfo.h" #include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/IR/IntrinsicsRISCV.h" #include "llvm/Support/Alignment.h" @@ -34,6 +35,9 @@ static cl::opt UsePseudoMovImm( "constant materialization"), cl::init(false)); +#define GET_DAGISEL_BODY RISCVDAGToDAGISel +#include "RISCVGenDAGISel.inc" + void RISCVDAGToDAGISel::PreprocessISelDAG() { SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_end(); diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h index 0672b6ad8829e..af8d235c54012 100644 --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h @@ -191,6 +191,7 @@ class RISCVDAGToDAGISel : public SelectionDAGISel { } // Include the pieces autogenerated from the target description. +#define GET_DAGISEL_DECL #include "RISCVGenDAGISel.inc" private: diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index fd1d4d439fd7b..c53550ea3b23b 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -6920,50 +6920,6 @@ static unsigned getRISCVVLOp(SDValue Op) { #undef VP_CASE } -/// Return true if a RISC-V target specified op has a passthru operand. -static bool hasPassthruOp(unsigned Opcode) { - assert(Opcode > RISCVISD::FIRST_NUMBER && - Opcode <= RISCVISD::LAST_STRICTFP_OPCODE && - "not a RISC-V target specific op"); - static_assert( - RISCVISD::LAST_VL_VECTOR_OP - RISCVISD::FIRST_VL_VECTOR_OP == 137 && - RISCVISD::LAST_STRICTFP_OPCODE - RISCVISD::FIRST_STRICTFP_OPCODE == 21 && - "adding target specific op should update this function"); - if (Opcode >= RISCVISD::ADD_VL && Opcode <= RISCVISD::VFMAX_VL) - return true; - if (Opcode == RISCVISD::FCOPYSIGN_VL) - return true; - if (Opcode >= RISCVISD::VWMUL_VL && Opcode <= RISCVISD::VFWSUB_W_VL) - return true; - if (Opcode == RISCVISD::SETCC_VL) - return true; - if (Opcode >= RISCVISD::STRICT_FADD_VL && Opcode <= RISCVISD::STRICT_FDIV_VL) - return true; - if (Opcode == RISCVISD::VMERGE_VL) - return true; - return false; -} - -/// Return true if a RISC-V target specified op has a mask operand. -static bool hasMaskOp(unsigned Opcode) { - assert(Opcode > RISCVISD::FIRST_NUMBER && - Opcode <= RISCVISD::LAST_STRICTFP_OPCODE && - "not a RISC-V target specific op"); - static_assert( - RISCVISD::LAST_VL_VECTOR_OP - RISCVISD::FIRST_VL_VECTOR_OP == 137 && - RISCVISD::LAST_STRICTFP_OPCODE - RISCVISD::FIRST_STRICTFP_OPCODE == 21 && - "adding target specific op should update this function"); - if (Opcode >= RISCVISD::TRUNCATE_VECTOR_VL && Opcode <= RISCVISD::SETCC_VL) - return true; - if (Opcode >= RISCVISD::VRGATHER_VX_VL && - Opcode <= RISCVISD::LAST_VL_VECTOR_OP) - return true; - if (Opcode >= RISCVISD::STRICT_FADD_VL && - Opcode <= RISCVISD::STRICT_VFROUND_NOEXCEPT_VL) - return true; - return false; -} - static bool isPromotedOpNeedingSplit(SDValue Op, const RISCVSubtarget &Subtarget) { if (Op.getValueType() == MVT::nxv32f16 && @@ -12535,9 +12491,12 @@ SDValue RISCVTargetLowering::lowerFixedLengthVectorSelectToRVV( SDValue RISCVTargetLowering::lowerToScalableOp(SDValue Op, SelectionDAG &DAG) const { + const auto &TSInfo = + static_cast(DAG.getSelectionDAGInfo()); + unsigned NewOpc = getRISCVVLOp(Op); - bool HasPassthruOp = hasPassthruOp(NewOpc); - bool HasMask = hasMaskOp(NewOpc); + bool HasPassthruOp = TSInfo.hasPassthruOp(NewOpc); + bool HasMask = TSInfo.hasMaskOp(NewOpc); MVT VT = Op.getSimpleValueType(); MVT ContainerVT = getContainerForFixedLengthVector(VT); @@ -12588,8 +12547,11 @@ SDValue RISCVTargetLowering::lowerToScalableOp(SDValue Op, // * Fixed-length vectors are converted to their scalable-vector container // types. SDValue RISCVTargetLowering::lowerVPOp(SDValue Op, SelectionDAG &DAG) const { + const auto &TSInfo = + static_cast(DAG.getSelectionDAGInfo()); + unsigned RISCVISDOpc = getRISCVVLOp(Op); - bool HasPassthruOp = hasPassthruOp(RISCVISDOpc); + bool HasPassthruOp = TSInfo.hasPassthruOp(RISCVISDOpc); SDLoc DL(Op); MVT VT = Op.getSimpleValueType(); @@ -13564,7 +13526,7 @@ SDValue RISCVTargetLowering::lowerEH_DWARF_CFA(SDValue Op, // Returns the opcode of the target-specific SDNode that implements the 32-bit // form of the given Opcode. -static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) { +static unsigned getRISCVWOpcode(unsigned Opcode) { switch (Opcode) { default: llvm_unreachable("Unexpected opcode"); @@ -13595,7 +13557,7 @@ static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) { static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG, unsigned ExtOpc = ISD::ANY_EXTEND) { SDLoc DL(N); - RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode()); + unsigned WOpcode = getRISCVWOpcode(N->getOpcode()); SDValue NewOp0 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(0)); SDValue NewOp1 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(1)); SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1); @@ -18486,15 +18448,9 @@ static SDValue combineToVWMACC(SDNode *N, SelectionDAG &DAG, if (AddMask != MulMask || AddVL != MulVL) return SDValue(); - unsigned Opc = RISCVISD::VWMACC_VL + MulOp.getOpcode() - RISCVISD::VWMUL_VL; - static_assert(RISCVISD::VWMACC_VL + 1 == RISCVISD::VWMACCU_VL, - "Unexpected opcode after VWMACC_VL"); - static_assert(RISCVISD::VWMACC_VL + 2 == RISCVISD::VWMACCSU_VL, - "Unexpected opcode after VWMACC_VL!"); - static_assert(RISCVISD::VWMUL_VL + 1 == RISCVISD::VWMULU_VL, - "Unexpected opcode after VWMUL_VL!"); - static_assert(RISCVISD::VWMUL_VL + 2 == RISCVISD::VWMULSU_VL, - "Unexpected opcode after VWMUL_VL!"); + const auto &TSInfo = + static_cast(DAG.getSelectionDAGInfo()); + unsigned Opc = TSInfo.getMAccOpcode(MulOp.getOpcode()); SDLoc DL(N); EVT VT = N->getValueType(0); @@ -22241,286 +22197,6 @@ bool RISCVTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { return CI->isTailCall(); } -const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const { -#define NODE_NAME_CASE(NODE) \ - case RISCVISD::NODE: \ - return "RISCVISD::" #NODE; - // clang-format off - switch ((RISCVISD::NodeType)Opcode) { - case RISCVISD::FIRST_NUMBER: - break; - NODE_NAME_CASE(RET_GLUE) - NODE_NAME_CASE(SRET_GLUE) - NODE_NAME_CASE(MRET_GLUE) - NODE_NAME_CASE(QC_C_MILEAVERET_GLUE) - NODE_NAME_CASE(CALL) - NODE_NAME_CASE(TAIL) - NODE_NAME_CASE(SELECT_CC) - NODE_NAME_CASE(BR_CC) - NODE_NAME_CASE(BuildGPRPair) - NODE_NAME_CASE(SplitGPRPair) - NODE_NAME_CASE(BuildPairF64) - NODE_NAME_CASE(SplitF64) - NODE_NAME_CASE(ADD_LO) - NODE_NAME_CASE(HI) - NODE_NAME_CASE(LLA) - NODE_NAME_CASE(ADD_TPREL) - NODE_NAME_CASE(MULHSU) - NODE_NAME_CASE(SHL_ADD) - NODE_NAME_CASE(SLLW) - NODE_NAME_CASE(SRAW) - NODE_NAME_CASE(SRLW) - NODE_NAME_CASE(DIVW) - NODE_NAME_CASE(DIVUW) - NODE_NAME_CASE(REMUW) - NODE_NAME_CASE(ROLW) - NODE_NAME_CASE(RORW) - NODE_NAME_CASE(CLZW) - NODE_NAME_CASE(CTZW) - NODE_NAME_CASE(ABSW) - NODE_NAME_CASE(FMV_H_X) - NODE_NAME_CASE(FMV_X_ANYEXTH) - NODE_NAME_CASE(FMV_X_SIGNEXTH) - NODE_NAME_CASE(FMV_W_X_RV64) - NODE_NAME_CASE(FMV_X_ANYEXTW_RV64) - NODE_NAME_CASE(FCVT_X) - NODE_NAME_CASE(FCVT_XU) - NODE_NAME_CASE(FCVT_W_RV64) - NODE_NAME_CASE(FCVT_WU_RV64) - NODE_NAME_CASE(STRICT_FCVT_W_RV64) - NODE_NAME_CASE(STRICT_FCVT_WU_RV64) - NODE_NAME_CASE(FROUND) - NODE_NAME_CASE(FCLASS) - NODE_NAME_CASE(FSGNJX) - NODE_NAME_CASE(FMAX) - NODE_NAME_CASE(FMIN) - NODE_NAME_CASE(FLI) - NODE_NAME_CASE(READ_COUNTER_WIDE) - NODE_NAME_CASE(BREV8) - NODE_NAME_CASE(ORC_B) - NODE_NAME_CASE(ZIP) - NODE_NAME_CASE(UNZIP) - NODE_NAME_CASE(CLMUL) - NODE_NAME_CASE(CLMULH) - NODE_NAME_CASE(CLMULR) - NODE_NAME_CASE(MOPR) - NODE_NAME_CASE(MOPRR) - NODE_NAME_CASE(SHA256SIG0) - NODE_NAME_CASE(SHA256SIG1) - NODE_NAME_CASE(SHA256SUM0) - NODE_NAME_CASE(SHA256SUM1) - NODE_NAME_CASE(SM4KS) - NODE_NAME_CASE(SM4ED) - NODE_NAME_CASE(SM3P0) - NODE_NAME_CASE(SM3P1) - NODE_NAME_CASE(TH_LWD) - NODE_NAME_CASE(TH_LWUD) - NODE_NAME_CASE(TH_LDD) - NODE_NAME_CASE(TH_SWD) - NODE_NAME_CASE(TH_SDD) - NODE_NAME_CASE(VMV_V_V_VL) - NODE_NAME_CASE(VMV_V_X_VL) - NODE_NAME_CASE(VFMV_V_F_VL) - NODE_NAME_CASE(VMV_X_S) - NODE_NAME_CASE(VMV_S_X_VL) - NODE_NAME_CASE(VFMV_S_F_VL) - NODE_NAME_CASE(SPLAT_VECTOR_SPLIT_I64_VL) - NODE_NAME_CASE(READ_VLENB) - NODE_NAME_CASE(TRUNCATE_VECTOR_VL) - NODE_NAME_CASE(TRUNCATE_VECTOR_VL_SSAT) - NODE_NAME_CASE(TRUNCATE_VECTOR_VL_USAT) - NODE_NAME_CASE(VSLIDEUP_VL) - NODE_NAME_CASE(VSLIDE1UP_VL) - NODE_NAME_CASE(VSLIDEDOWN_VL) - NODE_NAME_CASE(VSLIDE1DOWN_VL) - NODE_NAME_CASE(VFSLIDE1UP_VL) - NODE_NAME_CASE(VFSLIDE1DOWN_VL) - NODE_NAME_CASE(VID_VL) - NODE_NAME_CASE(VFNCVT_ROD_VL) - NODE_NAME_CASE(VECREDUCE_ADD_VL) - NODE_NAME_CASE(VECREDUCE_UMAX_VL) - NODE_NAME_CASE(VECREDUCE_SMAX_VL) - NODE_NAME_CASE(VECREDUCE_UMIN_VL) - NODE_NAME_CASE(VECREDUCE_SMIN_VL) - NODE_NAME_CASE(VECREDUCE_AND_VL) - NODE_NAME_CASE(VECREDUCE_OR_VL) - NODE_NAME_CASE(VECREDUCE_XOR_VL) - NODE_NAME_CASE(VECREDUCE_FADD_VL) - NODE_NAME_CASE(VECREDUCE_SEQ_FADD_VL) - NODE_NAME_CASE(VECREDUCE_FMIN_VL) - NODE_NAME_CASE(VECREDUCE_FMAX_VL) - NODE_NAME_CASE(ADD_VL) - NODE_NAME_CASE(AND_VL) - NODE_NAME_CASE(MUL_VL) - NODE_NAME_CASE(OR_VL) - NODE_NAME_CASE(SDIV_VL) - NODE_NAME_CASE(SHL_VL) - NODE_NAME_CASE(SREM_VL) - NODE_NAME_CASE(SRA_VL) - NODE_NAME_CASE(SRL_VL) - NODE_NAME_CASE(ROTL_VL) - NODE_NAME_CASE(ROTR_VL) - NODE_NAME_CASE(SUB_VL) - NODE_NAME_CASE(UDIV_VL) - NODE_NAME_CASE(UREM_VL) - NODE_NAME_CASE(XOR_VL) - NODE_NAME_CASE(AVGFLOORS_VL) - NODE_NAME_CASE(AVGFLOORU_VL) - NODE_NAME_CASE(AVGCEILS_VL) - NODE_NAME_CASE(AVGCEILU_VL) - NODE_NAME_CASE(SADDSAT_VL) - NODE_NAME_CASE(UADDSAT_VL) - NODE_NAME_CASE(SSUBSAT_VL) - NODE_NAME_CASE(USUBSAT_VL) - NODE_NAME_CASE(FADD_VL) - NODE_NAME_CASE(FSUB_VL) - NODE_NAME_CASE(FMUL_VL) - NODE_NAME_CASE(FDIV_VL) - NODE_NAME_CASE(FNEG_VL) - NODE_NAME_CASE(FABS_VL) - NODE_NAME_CASE(FSQRT_VL) - NODE_NAME_CASE(FCLASS_VL) - NODE_NAME_CASE(VFMADD_VL) - NODE_NAME_CASE(VFNMADD_VL) - NODE_NAME_CASE(VFMSUB_VL) - NODE_NAME_CASE(VFNMSUB_VL) - NODE_NAME_CASE(VFWMADD_VL) - NODE_NAME_CASE(VFWNMADD_VL) - NODE_NAME_CASE(VFWMSUB_VL) - NODE_NAME_CASE(VFWNMSUB_VL) - NODE_NAME_CASE(FCOPYSIGN_VL) - NODE_NAME_CASE(SMIN_VL) - NODE_NAME_CASE(SMAX_VL) - NODE_NAME_CASE(UMIN_VL) - NODE_NAME_CASE(UMAX_VL) - NODE_NAME_CASE(BITREVERSE_VL) - NODE_NAME_CASE(BSWAP_VL) - NODE_NAME_CASE(CTLZ_VL) - NODE_NAME_CASE(CTTZ_VL) - NODE_NAME_CASE(CTPOP_VL) - NODE_NAME_CASE(VFMIN_VL) - NODE_NAME_CASE(VFMAX_VL) - NODE_NAME_CASE(MULHS_VL) - NODE_NAME_CASE(MULHU_VL) - NODE_NAME_CASE(VFCVT_RTZ_X_F_VL) - NODE_NAME_CASE(VFCVT_RTZ_XU_F_VL) - NODE_NAME_CASE(VFCVT_RM_X_F_VL) - NODE_NAME_CASE(VFCVT_RM_XU_F_VL) - NODE_NAME_CASE(VFROUND_NOEXCEPT_VL) - NODE_NAME_CASE(SINT_TO_FP_VL) - NODE_NAME_CASE(UINT_TO_FP_VL) - NODE_NAME_CASE(VFCVT_RM_F_XU_VL) - NODE_NAME_CASE(VFCVT_RM_F_X_VL) - NODE_NAME_CASE(FP_EXTEND_VL) - NODE_NAME_CASE(FP_ROUND_VL) - NODE_NAME_CASE(STRICT_FADD_VL) - NODE_NAME_CASE(STRICT_FSUB_VL) - NODE_NAME_CASE(STRICT_FMUL_VL) - NODE_NAME_CASE(STRICT_FDIV_VL) - NODE_NAME_CASE(STRICT_FSQRT_VL) - NODE_NAME_CASE(STRICT_VFMADD_VL) - NODE_NAME_CASE(STRICT_VFNMADD_VL) - NODE_NAME_CASE(STRICT_VFMSUB_VL) - NODE_NAME_CASE(STRICT_VFNMSUB_VL) - NODE_NAME_CASE(STRICT_FP_ROUND_VL) - NODE_NAME_CASE(STRICT_FP_EXTEND_VL) - NODE_NAME_CASE(STRICT_VFNCVT_ROD_VL) - NODE_NAME_CASE(STRICT_SINT_TO_FP_VL) - NODE_NAME_CASE(STRICT_UINT_TO_FP_VL) - NODE_NAME_CASE(STRICT_VFCVT_RM_X_F_VL) - NODE_NAME_CASE(STRICT_VFCVT_RTZ_X_F_VL) - NODE_NAME_CASE(STRICT_VFCVT_RTZ_XU_F_VL) - NODE_NAME_CASE(STRICT_FSETCC_VL) - NODE_NAME_CASE(STRICT_FSETCCS_VL) - NODE_NAME_CASE(STRICT_VFROUND_NOEXCEPT_VL) - NODE_NAME_CASE(VWMUL_VL) - NODE_NAME_CASE(VWMULU_VL) - NODE_NAME_CASE(VWMULSU_VL) - NODE_NAME_CASE(VWADD_VL) - NODE_NAME_CASE(VWADDU_VL) - NODE_NAME_CASE(VWSUB_VL) - NODE_NAME_CASE(VWSUBU_VL) - NODE_NAME_CASE(VWADD_W_VL) - NODE_NAME_CASE(VWADDU_W_VL) - NODE_NAME_CASE(VWSUB_W_VL) - NODE_NAME_CASE(VWSUBU_W_VL) - NODE_NAME_CASE(VWSLL_VL) - NODE_NAME_CASE(VFWMUL_VL) - NODE_NAME_CASE(VFWADD_VL) - NODE_NAME_CASE(VFWSUB_VL) - NODE_NAME_CASE(VFWADD_W_VL) - NODE_NAME_CASE(VFWSUB_W_VL) - NODE_NAME_CASE(VWMACC_VL) - NODE_NAME_CASE(VWMACCU_VL) - NODE_NAME_CASE(VWMACCSU_VL) - NODE_NAME_CASE(SETCC_VL) - NODE_NAME_CASE(VMERGE_VL) - NODE_NAME_CASE(VMAND_VL) - NODE_NAME_CASE(VMOR_VL) - NODE_NAME_CASE(VMXOR_VL) - NODE_NAME_CASE(VMCLR_VL) - NODE_NAME_CASE(VMSET_VL) - NODE_NAME_CASE(VRGATHER_VX_VL) - NODE_NAME_CASE(VRGATHER_VV_VL) - NODE_NAME_CASE(VRGATHEREI16_VV_VL) - NODE_NAME_CASE(VSEXT_VL) - NODE_NAME_CASE(VZEXT_VL) - NODE_NAME_CASE(VCPOP_VL) - NODE_NAME_CASE(VFIRST_VL) - NODE_NAME_CASE(RI_VINSERT_VL) - NODE_NAME_CASE(RI_VZIPEVEN_VL) - NODE_NAME_CASE(RI_VZIPODD_VL) - NODE_NAME_CASE(RI_VZIP2A_VL) - NODE_NAME_CASE(RI_VZIP2B_VL) - NODE_NAME_CASE(RI_VUNZIP2A_VL) - NODE_NAME_CASE(RI_VUNZIP2B_VL) - NODE_NAME_CASE(RI_VEXTRACT) - NODE_NAME_CASE(VQDOT_VL) - NODE_NAME_CASE(VQDOTU_VL) - NODE_NAME_CASE(VQDOTSU_VL) - NODE_NAME_CASE(READ_CSR) - NODE_NAME_CASE(WRITE_CSR) - NODE_NAME_CASE(SWAP_CSR) - NODE_NAME_CASE(CZERO_EQZ) - NODE_NAME_CASE(CZERO_NEZ) - NODE_NAME_CASE(SW_GUARDED_BRIND) - NODE_NAME_CASE(SW_GUARDED_CALL) - NODE_NAME_CASE(SW_GUARDED_TAIL) - NODE_NAME_CASE(TUPLE_INSERT) - NODE_NAME_CASE(TUPLE_EXTRACT) - NODE_NAME_CASE(SF_VC_XV_SE) - NODE_NAME_CASE(SF_VC_IV_SE) - NODE_NAME_CASE(SF_VC_VV_SE) - NODE_NAME_CASE(SF_VC_FV_SE) - NODE_NAME_CASE(SF_VC_XVV_SE) - NODE_NAME_CASE(SF_VC_IVV_SE) - NODE_NAME_CASE(SF_VC_VVV_SE) - NODE_NAME_CASE(SF_VC_FVV_SE) - NODE_NAME_CASE(SF_VC_XVW_SE) - NODE_NAME_CASE(SF_VC_IVW_SE) - NODE_NAME_CASE(SF_VC_VVW_SE) - NODE_NAME_CASE(SF_VC_FVW_SE) - NODE_NAME_CASE(SF_VC_V_X_SE) - NODE_NAME_CASE(SF_VC_V_I_SE) - NODE_NAME_CASE(SF_VC_V_XV_SE) - NODE_NAME_CASE(SF_VC_V_IV_SE) - NODE_NAME_CASE(SF_VC_V_VV_SE) - NODE_NAME_CASE(SF_VC_V_FV_SE) - NODE_NAME_CASE(SF_VC_V_XVV_SE) - NODE_NAME_CASE(SF_VC_V_IVV_SE) - NODE_NAME_CASE(SF_VC_V_VVV_SE) - NODE_NAME_CASE(SF_VC_V_FVV_SE) - NODE_NAME_CASE(SF_VC_V_XVW_SE) - NODE_NAME_CASE(SF_VC_V_IVW_SE) - NODE_NAME_CASE(SF_VC_V_VVW_SE) - NODE_NAME_CASE(SF_VC_V_FVW_SE) - NODE_NAME_CASE(PROBED_ALLOCA) - } - // clang-format on - return nullptr; -#undef NODE_NAME_CASE -} - /// getConstraintType - Given a constraint letter, return the type of /// constraint it is for this target. RISCVTargetLowering::ConstraintType diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h index 65d433e045423..c865da80bcdf6 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -26,512 +26,6 @@ class InstructionCost; class RISCVSubtarget; struct RISCVRegisterInfo; -namespace RISCVISD { -// clang-format off -enum NodeType : unsigned { - FIRST_NUMBER = ISD::BUILTIN_OP_END, - RET_GLUE, - SRET_GLUE, - MRET_GLUE, - QC_C_MILEAVERET_GLUE, - CALL, - TAIL, - /// Select with condition operator - This selects between a true value and - /// a false value (ops #3 and #4) based on the boolean result of comparing - /// the lhs and rhs (ops #0 and #1) of a conditional expression with the - /// condition code in op #2, a XLenVT constant from the ISD::CondCode enum. - /// The lhs and rhs are XLenVT integers. The true and false values can be - /// integer or floating point. - SELECT_CC, - BR_CC, - - /// Turn a pair of `i`s into an even-odd register pair (`untyped`). - /// - Output: `untyped` even-odd register pair - /// - Input 0: `i` low-order bits, for even register. - /// - Input 1: `i` high-order bits, for odd register. - BuildGPRPair, - - /// Turn an even-odd register pair (`untyped`) into a pair of `i`s. - /// - Output 0: `i` low-order bits, from even register. - /// - Output 1: `i` high-order bits, from odd register. - /// - Input: `untyped` even-odd register pair - SplitGPRPair, - - /// Turns a pair of `i32`s into an `f64`. Needed for rv32d/ilp32. - /// - Output: `f64`. - /// - Input 0: low-order bits (31-0) (as `i32`), for even register. - /// - Input 1: high-order bits (63-32) (as `i32`), for odd register. - BuildPairF64, - - /// Turns a `f64` into a pair of `i32`s. Needed for rv32d/ilp32. - /// - Output 0: low-order bits (31-0) (as `i32`), from even register. - /// - Output 1: high-order bits (63-32) (as `i32`), from odd register. - /// - Input 0: `f64`. - SplitF64, - - // Add the Lo 12 bits from an address. Selected to ADDI. - ADD_LO, - // Get the Hi 20 bits from an address. Selected to LUI. - HI, - - // Represents an AUIPC+ADDI pair. Selected to PseudoLLA. - LLA, - - // Selected as PseudoAddTPRel. Used to emit a TP-relative relocation. - ADD_TPREL, - - // Multiply high for signedxunsigned. - MULHSU, - - // Represents (ADD (SHL a, b), c) with the arguments appearing in the order - // a, b, c. 'b' must be a constant. Maps to sh1add/sh2add/sh3add with zba - // or addsl with XTheadBa. - SHL_ADD, - - // RV64I shifts, directly matching the semantics of the named RISC-V - // instructions. - SLLW, - SRAW, - SRLW, - // 32-bit operations from RV64M that can't be simply matched with a pattern - // at instruction selection time. These have undefined behavior for division - // by 0 or overflow (divw) like their target independent counterparts. - DIVW, - DIVUW, - REMUW, - // RV64IB rotates, directly matching the semantics of the named RISC-V - // instructions. - ROLW, - RORW, - // RV64IZbb bit counting instructions directly matching the semantics of the - // named RISC-V instructions. - CLZW, - CTZW, - - // RV64IZbb absolute value for i32. Expanded to (max (negw X), X) during isel. - ABSW, - - // FPR<->GPR transfer operations when the FPR is smaller than XLEN, needed as - // XLEN is the only legal integer width. - // - // FMV_H_X matches the semantics of the FMV.H.X. - // FMV_X_ANYEXTH is similar to FMV.X.H but has an any-extended result. - // FMV_X_SIGNEXTH is similar to FMV.X.H and has a sign-extended result. - // FMV_W_X_RV64 matches the semantics of the FMV.W.X. - // FMV_X_ANYEXTW_RV64 is similar to FMV.X.W but has an any-extended result. - // - // This is a more convenient semantic for producing dagcombines that remove - // unnecessary GPR->FPR->GPR moves. - FMV_H_X, - FMV_X_ANYEXTH, - FMV_X_SIGNEXTH, - FMV_W_X_RV64, - FMV_X_ANYEXTW_RV64, - // FP to XLen int conversions. Corresponds to fcvt.l(u).s/d/h on RV64 and - // fcvt.w(u).s/d/h on RV32. Unlike FP_TO_S/UINT these saturate out of - // range inputs. These are used for FP_TO_S/UINT_SAT lowering. Rounding mode - // is passed as a TargetConstant operand using the RISCVFPRndMode enum. - FCVT_X, - FCVT_XU, - // FP to 32 bit int conversions for RV64. These are used to keep track of the - // result being sign extended to 64 bit. These saturate out of range inputs. - // Used for FP_TO_S/UINT and FP_TO_S/UINT_SAT lowering. Rounding mode - // is passed as a TargetConstant operand using the RISCVFPRndMode enum. - FCVT_W_RV64, - FCVT_WU_RV64, - - // Rounds an FP value to its corresponding integer in the same FP format. - // First operand is the value to round, the second operand is the largest - // integer that can be represented exactly in the FP format. This will be - // expanded into multiple instructions and basic blocks with a custom - // inserter. - FROUND, - - FCLASS, - FSGNJX, - - // Floating point fmax and fmin matching the RISC-V instruction semantics. - FMAX, FMIN, - - // Zfa fli instruction for constant materialization. - FLI, - - // A read of the 64-bit counter CSR on a 32-bit target (returns (Lo, Hi)). - // It takes a chain operand and another two target constant operands (the - // CSR numbers of the low and high parts of the counter). - READ_COUNTER_WIDE, - - // brev8, orc.b, zip, and unzip from Zbb and Zbkb. All operands are i32 or - // XLenVT. - BREV8, - ORC_B, - ZIP, - UNZIP, - - // Scalar cryptography - CLMUL, CLMULH, CLMULR, - SHA256SIG0, SHA256SIG1, SHA256SUM0, SHA256SUM1, - SM4KS, SM4ED, - SM3P0, SM3P1, - - // May-Be-Operations - MOPR, MOPRR, - - // Vector Extension - FIRST_VL_VECTOR_OP, - // VMV_V_V_VL matches the semantics of vmv.v.v but includes an extra operand - // for the VL value to be used for the operation. The first operand is - // passthru operand. - VMV_V_V_VL = FIRST_VL_VECTOR_OP, - // VMV_V_X_VL matches the semantics of vmv.v.x but includes an extra operand - // for the VL value to be used for the operation. The first operand is - // passthru operand. - VMV_V_X_VL, - // VFMV_V_F_VL matches the semantics of vfmv.v.f but includes an extra operand - // for the VL value to be used for the operation. The first operand is - // passthru operand. - VFMV_V_F_VL, - // VMV_X_S matches the semantics of vmv.x.s. The result is always XLenVT sign - // extended from the vector element size. - VMV_X_S, - // VMV_S_X_VL matches the semantics of vmv.s.x. It carries a VL operand. - VMV_S_X_VL, - // VFMV_S_F_VL matches the semantics of vfmv.s.f. It carries a VL operand. - VFMV_S_F_VL, - // Splats an 64-bit value that has been split into two i32 parts. This is - // expanded late to two scalar stores and a stride 0 vector load. - // The first operand is passthru operand. - SPLAT_VECTOR_SPLIT_I64_VL, - // Truncates a RVV integer vector by one power-of-two. Carries both an extra - // mask and VL operand. - TRUNCATE_VECTOR_VL, - // Truncates a RVV integer vector by one power-of-two. If the value doesn't - // fit in the destination type, the result is saturated. These correspond to - // vnclip and vnclipu with a shift of 0. Carries both an extra mask and VL - // operand. - TRUNCATE_VECTOR_VL_SSAT, - TRUNCATE_VECTOR_VL_USAT, - // Matches the semantics of vslideup/vslidedown. The first operand is the - // pass-thru operand, the second is the source vector, the third is the XLenVT - // index (either constant or non-constant), the fourth is the mask, the fifth - // is the VL and the sixth is the policy. - VSLIDEUP_VL, - VSLIDEDOWN_VL, - // Matches the semantics of vslide1up/slide1down. The first operand is - // passthru operand, the second is source vector, third is the XLenVT scalar - // value. The fourth and fifth operands are the mask and VL operands. - VSLIDE1UP_VL, - VSLIDE1DOWN_VL, - // Matches the semantics of vfslide1up/vfslide1down. The first operand is - // passthru operand, the second is source vector, third is a scalar value - // whose type matches the element type of the vectors. The fourth and fifth - // operands are the mask and VL operands. - VFSLIDE1UP_VL, - VFSLIDE1DOWN_VL, - // Matches the semantics of the vid.v instruction, with a mask and VL - // operand. - VID_VL, - // Matches the semantics of the vfcnvt.rod function (Convert double-width - // float to single-width float, rounding towards odd). Takes a double-width - // float vector and produces a single-width float vector. Also has a mask and - // VL operand. - VFNCVT_ROD_VL, - // These nodes match the semantics of the corresponding RVV vector reduction - // instructions. They produce a vector result which is the reduction - // performed over the second vector operand plus the first element of the - // third vector operand. The first operand is the pass-thru operand. The - // second operand is an unconstrained vector type, and the result, first, and - // third operand's types are expected to be the corresponding full-width - // LMUL=1 type for the second operand: - // nxv8i8 = vecreduce_add nxv8i8, nxv32i8, nxv8i8 - // nxv2i32 = vecreduce_add nxv2i32, nxv8i32, nxv2i32 - // The different in types does introduce extra vsetvli instructions but - // similarly it reduces the number of registers consumed per reduction. - // Also has a mask and VL operand. - VECREDUCE_ADD_VL, - VECREDUCE_UMAX_VL, - VECREDUCE_SMAX_VL, - VECREDUCE_UMIN_VL, - VECREDUCE_SMIN_VL, - VECREDUCE_AND_VL, - VECREDUCE_OR_VL, - VECREDUCE_XOR_VL, - VECREDUCE_FADD_VL, - VECREDUCE_SEQ_FADD_VL, - VECREDUCE_FMIN_VL, - VECREDUCE_FMAX_VL, - - // Vector binary ops with a passthru as a third operand, a mask as a fourth - // operand, and VL as a fifth operand. - ADD_VL, - AND_VL, - MUL_VL, - OR_VL, - SDIV_VL, - SHL_VL, - SREM_VL, - SRA_VL, - SRL_VL, - ROTL_VL, - ROTR_VL, - SUB_VL, - UDIV_VL, - UREM_VL, - XOR_VL, - SMIN_VL, - SMAX_VL, - UMIN_VL, - UMAX_VL, - - BITREVERSE_VL, - BSWAP_VL, - CTLZ_VL, - CTTZ_VL, - CTPOP_VL, - - SADDSAT_VL, - UADDSAT_VL, - SSUBSAT_VL, - USUBSAT_VL, - - // Averaging adds of signed integers. - AVGFLOORS_VL, - // Averaging adds of unsigned integers. - AVGFLOORU_VL, - // Rounding averaging adds of signed integers. - AVGCEILS_VL, - // Rounding averaging adds of unsigned integers. - AVGCEILU_VL, - - MULHS_VL, - MULHU_VL, - FADD_VL, - FSUB_VL, - FMUL_VL, - FDIV_VL, - VFMIN_VL, - VFMAX_VL, - - // Vector unary ops with a mask as a second operand and VL as a third operand. - FNEG_VL, - FABS_VL, - FSQRT_VL, - FCLASS_VL, - FCOPYSIGN_VL, // Has a passthru operand - VFCVT_RTZ_X_F_VL, - VFCVT_RTZ_XU_F_VL, - VFROUND_NOEXCEPT_VL, - VFCVT_RM_X_F_VL, // Has a rounding mode operand. - VFCVT_RM_XU_F_VL, // Has a rounding mode operand. - SINT_TO_FP_VL, - UINT_TO_FP_VL, - VFCVT_RM_F_X_VL, // Has a rounding mode operand. - VFCVT_RM_F_XU_VL, // Has a rounding mode operand. - FP_ROUND_VL, - FP_EXTEND_VL, - - // Vector FMA ops with a mask as a fourth operand and VL as a fifth operand. - VFMADD_VL, - VFNMADD_VL, - VFMSUB_VL, - VFNMSUB_VL, - - // Vector widening FMA ops with a mask as a fourth operand and VL as a fifth - // operand. - VFWMADD_VL, - VFWNMADD_VL, - VFWMSUB_VL, - VFWNMSUB_VL, - - // Widening instructions with a passthru value a third operand, a mask as a - // fourth operand, and VL as a fifth operand. - VWMUL_VL, - VWMULU_VL, - VWMULSU_VL, - VWADD_VL, - VWADDU_VL, - VWSUB_VL, - VWSUBU_VL, - VWADD_W_VL, - VWADDU_W_VL, - VWSUB_W_VL, - VWSUBU_W_VL, - VWSLL_VL, - - VFWMUL_VL, - VFWADD_VL, - VFWSUB_VL, - VFWADD_W_VL, - VFWSUB_W_VL, - - // Widening ternary operations with a mask as the fourth operand and VL as the - // fifth operand. - VWMACC_VL, - VWMACCU_VL, - VWMACCSU_VL, - - // Vector compare producing a mask. Fourth operand is input mask. Fifth - // operand is VL. - SETCC_VL, - - // General vmerge node with mask, true, false, passthru, and vl operands. - // Tail agnostic vselect can be implemented by setting passthru to undef. - VMERGE_VL, - - // Mask binary operators. - VMAND_VL, - VMOR_VL, - VMXOR_VL, - - // Set mask vector to all zeros or ones. - VMCLR_VL, - VMSET_VL, - - // Matches the semantics of vrgather.vx and vrgather.vv with extra operands - // for passthru and VL, except that out of bound indices result in a poison - // result not zero. Operands are (src, index, mask, passthru, vl). - VRGATHER_VX_VL, - VRGATHER_VV_VL, - VRGATHEREI16_VV_VL, - - // Vector sign/zero extend with additional mask & VL operands. - VSEXT_VL, - VZEXT_VL, - - // vcpop.m with additional mask and VL operands. - VCPOP_VL, - - // vfirst.m with additional mask and VL operands. - VFIRST_VL, - - // XRivosVisni - // VINSERT matches the semantics of ri.vinsert.v.x. It carries a VL operand. - RI_VINSERT_VL, - - // XRivosVizip - RI_VZIPEVEN_VL, - RI_VZIPODD_VL, - RI_VZIP2A_VL, - RI_VZIP2B_VL, - RI_VUNZIP2A_VL, - RI_VUNZIP2B_VL, - - // zvqdot instructions with additional passthru, mask and VL operands - VQDOT_VL, - VQDOTU_VL, - VQDOTSU_VL, - - LAST_VL_VECTOR_OP = VQDOTSU_VL, - - // XRivosVisni - // VEXTRACT matches the semantics of ri.vextract.x.v. The result is always - // XLenVT sign extended from the vector element size. VEXTRACT does *not* - // have a VL operand. - RI_VEXTRACT, - - // Read VLENB CSR - READ_VLENB, - // Reads value of CSR. - // The first operand is a chain pointer. The second specifies address of the - // required CSR. Two results are produced, the read value and the new chain - // pointer. - READ_CSR, - // Write value to CSR. - // The first operand is a chain pointer, the second specifies address of the - // required CSR and the third is the value to write. The result is the new - // chain pointer. - WRITE_CSR, - // Read and write value of CSR. - // The first operand is a chain pointer, the second specifies address of the - // required CSR and the third is the value to write. Two results are produced, - // the value read before the modification and the new chain pointer. - SWAP_CSR, - - // Branchless select operations, matching the semantics of the instructions - // defined in Zicond or XVentanaCondOps. - CZERO_EQZ, // vt.maskc for XVentanaCondOps. - CZERO_NEZ, // vt.maskcn for XVentanaCondOps. - - // Software guarded BRIND node. Operand 0 is the chain operand and - // operand 1 is the target address. - SW_GUARDED_BRIND, - // Software guarded calls for large code model - SW_GUARDED_CALL, - SW_GUARDED_TAIL, - - SF_VC_XV_SE, - SF_VC_IV_SE, - SF_VC_VV_SE, - SF_VC_FV_SE, - SF_VC_XVV_SE, - SF_VC_IVV_SE, - SF_VC_VVV_SE, - SF_VC_FVV_SE, - SF_VC_XVW_SE, - SF_VC_IVW_SE, - SF_VC_VVW_SE, - SF_VC_FVW_SE, - SF_VC_V_X_SE, - SF_VC_V_I_SE, - SF_VC_V_XV_SE, - SF_VC_V_IV_SE, - SF_VC_V_VV_SE, - SF_VC_V_FV_SE, - SF_VC_V_XVV_SE, - SF_VC_V_IVV_SE, - SF_VC_V_VVV_SE, - SF_VC_V_FVV_SE, - SF_VC_V_XVW_SE, - SF_VC_V_IVW_SE, - SF_VC_V_VVW_SE, - SF_VC_V_FVW_SE, - - // To avoid stack clash, allocation is performed by block and each block is - // probed. - PROBED_ALLOCA, - - // RISC-V vector tuple type version of INSERT_SUBVECTOR/EXTRACT_SUBVECTOR. - TUPLE_INSERT, - TUPLE_EXTRACT, - - // FP to 32 bit int conversions for RV64. These are used to keep track of the - // result being sign extended to 64 bit. These saturate out of range inputs. - FIRST_STRICTFP_OPCODE, - STRICT_FCVT_W_RV64 = FIRST_STRICTFP_OPCODE, - STRICT_FCVT_WU_RV64, - STRICT_FADD_VL, - STRICT_FSUB_VL, - STRICT_FMUL_VL, - STRICT_FDIV_VL, - STRICT_FSQRT_VL, - STRICT_VFMADD_VL, - STRICT_VFNMADD_VL, - STRICT_VFMSUB_VL, - STRICT_VFNMSUB_VL, - STRICT_FP_ROUND_VL, - STRICT_FP_EXTEND_VL, - STRICT_VFNCVT_ROD_VL, - STRICT_SINT_TO_FP_VL, - STRICT_UINT_TO_FP_VL, - STRICT_VFCVT_RM_X_F_VL, - STRICT_VFCVT_RTZ_X_F_VL, - STRICT_VFCVT_RTZ_XU_F_VL, - STRICT_FSETCC_VL, - STRICT_FSETCCS_VL, - STRICT_VFROUND_NOEXCEPT_VL, - LAST_STRICTFP_OPCODE = STRICT_VFROUND_NOEXCEPT_VL, - - FIRST_MEMORY_OPCODE, - TH_LWD = FIRST_MEMORY_OPCODE, - TH_LWUD, - TH_LDD, - TH_SWD, - TH_SDD, - LAST_MEMORY_OPCODE = TH_SDD, -}; -// clang-format on -} // namespace RISCVISD - class RISCVTargetLowering : public TargetLowering { const RISCVSubtarget &Subtarget; @@ -667,9 +161,6 @@ class RISCVTargetLowering : public TargetLowering { const Constant *getTargetConstantFromLoad(LoadSDNode *LD) const override; - // This method returns the name of a target specific DAG node. - const char *getTargetNodeName(unsigned Opcode) const override; - MachineMemOperand::Flags getTargetMMOFlags(const Instruction &I) const override; diff --git a/llvm/lib/Target/RISCV/RISCVInstrGISel.td b/llvm/lib/Target/RISCV/RISCVInstrGISel.td index ac5f4f0ca6cc5..bd3ecd737bede 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrGISel.td +++ b/llvm/lib/Target/RISCV/RISCVInstrGISel.td @@ -145,14 +145,12 @@ def G_VMSET_VL : RISCVGenericInstruction { } def : GINodeEquiv; -// Pseudo equivalent to a RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL. There is no -// record to mark as equivalent to using GINodeEquiv because it gets lowered -// before instruction selection. def G_SPLAT_VECTOR_SPLIT_I64_VL : RISCVGenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$passthru, type1:$hi, type1:$lo, type2:$vl); let hasSideEffects = false; } +def : GINodeEquiv; // Pseudo equivalent to a RISCVISD::VSLIDEDOWN_VL def G_VSLIDEDOWN_VL : RISCVGenericInstruction { diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td index 91903a9ea1f78..e9bdeb88e4ca8 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td @@ -14,6 +14,15 @@ // RISC-V specific DAG Nodes. //===----------------------------------------------------------------------===// +class RVSDNode properties = []> + : SDNode<"RISCVISD::" # opcode, type_profile, properties> { + bit HasPassthruOp = false; + bit HasMaskOp = false; + let TSFlags{0} = HasPassthruOp; + let TSFlags{1} = HasMaskOp; +} + // Target-independent type requirements, but with target-specific formats. def SDT_CallSeqStart : SDCallSeqStart<[SDTCisVT<0, i32>, SDTCisVT<1, i32>]>; @@ -29,14 +38,6 @@ def SDT_RISCVSelectCC : SDTypeProfile<1, 5, [SDTCisSameAs<1, 2>, def SDT_RISCVBrCC : SDTypeProfile<0, 4, [SDTCisSameAs<0, 1>, SDTCisVT<2, OtherVT>, SDTCisVT<3, OtherVT>]>; -def SDT_RISCVReadCSR : SDTypeProfile<1, 1, [SDTCisInt<0>, SDTCisInt<1>]>; -def SDT_RISCVWriteCSR : SDTypeProfile<0, 2, [SDTCisInt<0>, SDTCisInt<1>]>; -def SDT_RISCVSwapCSR : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisInt<1>, - SDTCisInt<2>]>; -def SDT_RISCVReadCounterWide : SDTypeProfile<2, 2, [SDTCisVT<0, i32>, - SDTCisVT<1, i32>, - SDTCisInt<2>, - SDTCisInt<3>]>; def SDT_RISCVIntUnaryOpW : SDTypeProfile<1, 1, [ SDTCisSameAs<0, 1>, SDTCisVT<0, i64> ]>; @@ -53,58 +54,124 @@ def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_CallSeqStart, def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_CallSeqEnd, [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>; -// Target-dependent nodes. -def riscv_call : SDNode<"RISCVISD::CALL", SDT_RISCVCall, - [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue, - SDNPVariadic]>; -def riscv_sw_guarded_call : SDNode<"RISCVISD::SW_GUARDED_CALL", SDT_RISCVCall, - [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue, - SDNPVariadic]>; -def riscv_ret_glue : SDNode<"RISCVISD::RET_GLUE", SDTNone, - [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>; -def riscv_sret_glue : SDNode<"RISCVISD::SRET_GLUE", SDTNone, - [SDNPHasChain, SDNPOptInGlue]>; -def riscv_mret_glue : SDNode<"RISCVISD::MRET_GLUE", SDTNone, - [SDNPHasChain, SDNPOptInGlue]>; -def riscv_mileaveret_glue : SDNode<"RISCVISD::QC_C_MILEAVERET_GLUE", SDTNone, - [SDNPHasChain, SDNPOptInGlue]>; -def riscv_selectcc : SDNode<"RISCVISD::SELECT_CC", SDT_RISCVSelectCC>; -def riscv_brcc : SDNode<"RISCVISD::BR_CC", SDT_RISCVBrCC, - [SDNPHasChain]>; -def riscv_tail : SDNode<"RISCVISD::TAIL", SDT_RISCVCall, - [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue, - SDNPVariadic]>; -def riscv_sw_guarded_tail : SDNode<"RISCVISD::SW_GUARDED_TAIL", SDT_RISCVCall, - [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue, - SDNPVariadic]>; -def riscv_sw_guarded_brind : SDNode<"RISCVISD::SW_GUARDED_BRIND", - SDTBrind, [SDNPHasChain]>; -def riscv_sllw : SDNode<"RISCVISD::SLLW", SDT_RISCVIntBinOpW>; -def riscv_sraw : SDNode<"RISCVISD::SRAW", SDT_RISCVIntBinOpW>; -def riscv_srlw : SDNode<"RISCVISD::SRLW", SDT_RISCVIntBinOpW>; -def riscv_read_csr : SDNode<"RISCVISD::READ_CSR", SDT_RISCVReadCSR, - [SDNPHasChain]>; -def riscv_write_csr : SDNode<"RISCVISD::WRITE_CSR", SDT_RISCVWriteCSR, - [SDNPHasChain]>; -def riscv_swap_csr : SDNode<"RISCVISD::SWAP_CSR", SDT_RISCVSwapCSR, - [SDNPHasChain]>; - -def riscv_read_counter_wide : SDNode<"RISCVISD::READ_COUNTER_WIDE", - SDT_RISCVReadCounterWide, - [SDNPHasChain, SDNPSideEffect]>; - -def riscv_add_lo : SDNode<"RISCVISD::ADD_LO", SDTIntBinOp>; -def riscv_hi : SDNode<"RISCVISD::HI", SDTIntUnaryOp>; -def riscv_lla : SDNode<"RISCVISD::LLA", SDTIntUnaryOp>; -def riscv_add_tprel : SDNode<"RISCVISD::ADD_TPREL", - SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, - SDTCisSameAs<0, 2>, - SDTCisSameAs<0, 3>, - SDTCisInt<0>]>>; - -def riscv_probed_alloca : SDNode<"RISCVISD::PROBED_ALLOCA", - SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>, - [SDNPHasChain, SDNPMayStore]>; +def riscv_call : RVSDNode<"CALL", SDT_RISCVCall, + [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue, + SDNPVariadic]>; +def riscv_tail : RVSDNode<"TAIL", SDT_RISCVCall, + [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue, + SDNPVariadic]>; + +// Software guarded calls for large code model +def riscv_sw_guarded_call : RVSDNode<"SW_GUARDED_CALL", SDT_RISCVCall, + [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue, + SDNPVariadic]>; +def riscv_sw_guarded_tail : RVSDNode<"SW_GUARDED_TAIL", SDT_RISCVCall, + [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue, + SDNPVariadic]>; + +def riscv_ret_glue : RVSDNode<"RET_GLUE", SDTNone, + [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>; +def riscv_sret_glue : RVSDNode<"SRET_GLUE", SDTNone, + [SDNPHasChain, SDNPOptInGlue]>; +def riscv_mret_glue : RVSDNode<"MRET_GLUE", SDTNone, + [SDNPHasChain, SDNPOptInGlue]>; +def riscv_mileaveret_glue : RVSDNode<"QC_C_MILEAVERET_GLUE", SDTNone, + [SDNPHasChain, SDNPOptInGlue]>; + +// Select with condition operator - This selects between a true value and +// a false value (ops #3 and #4) based on the boolean result of comparing +// the lhs and rhs (ops #0 and #1) of a conditional expression with the +// condition code in op #2, a XLenVT constant from the ISD::CondCode enum. +// The lhs and rhs are XLenVT integers. The true and false values can be +// integer or floating point. +def riscv_selectcc : RVSDNode<"SELECT_CC", SDT_RISCVSelectCC>; +def riscv_brcc : RVSDNode<"BR_CC", SDT_RISCVBrCC, + [SDNPHasChain]>; + +// Software guarded BRIND node. Operand is the target address. +def riscv_sw_guarded_brind : RVSDNode<"SW_GUARDED_BRIND", + SDTBrind, [SDNPHasChain]>; + +// RV64I shifts, directly matching the semantics of the named RISC-V +// instructions. +def riscv_sllw : RVSDNode<"SLLW", SDT_RISCVIntBinOpW>; +def riscv_sraw : RVSDNode<"SRAW", SDT_RISCVIntBinOpW>; +def riscv_srlw : RVSDNode<"SRLW", SDT_RISCVIntBinOpW>; + +// Reads value of CSR. The first operand is the address of the required CSR. +// The result is the read value. +def riscv_read_csr : RVSDNode<"READ_CSR", + SDTypeProfile<1, 1, [SDTCisInt<0>, + SDTCisInt<1>]>, + [SDNPHasChain]>; +// Write value to CSR. The first operand is the address of the required CSR, +// the second is the value to write. +def riscv_write_csr : RVSDNode<"WRITE_CSR", + SDTypeProfile<0, 2, [SDTCisInt<0>, + SDTCisInt<1>]>, + [SDNPHasChain]>; + +// Read and write value of CSR. The first operand is the address of the +// required CSR, the second is the value to write, and the result is the +// read value (before modification). +def riscv_swap_csr : RVSDNode<"SWAP_CSR", + SDTypeProfile<1, 2, [SDTCisInt<0>, + SDTCisInt<1>, + SDTCisInt<2>]>, + [SDNPHasChain]>; + +// A read of the 64-bit counter CSR on a 32-bit target (returns (Lo, Hi)). +// It takes a chain operand and another two target constant operands (the +// CSR numbers of the low and high parts of the counter). +def riscv_read_counter_wide : RVSDNode<"READ_COUNTER_WIDE", + SDTypeProfile<2, 2, [SDTCisVT<0, i32>, + SDTCisVT<1, i32>, + SDTCisInt<2>, + SDTCisInt<3>]>, + [SDNPHasChain, SDNPSideEffect]>; + +// Add the Lo 12 bits from an address. Selected to ADDI. +def riscv_add_lo : RVSDNode<"ADD_LO", SDTIntBinOp>; + +// Get the Hi 20 bits from an address. Selected to LUI. +def riscv_hi : RVSDNode<"HI", SDTIntUnaryOp>; + +// Represents an AUIPC+ADDI pair. Selected to PseudoLLA. +def riscv_lla : RVSDNode<"LLA", SDTIntUnaryOp>; + +// Selected as PseudoAddTPRel. Used to emit a TP-relative relocation. +def riscv_add_tprel : RVSDNode<"ADD_TPREL", + SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, + SDTCisSameAs<0, 2>, + SDTCisSameAs<0, 3>, + SDTCisInt<0>]>>; + +// To avoid stack clash, allocation is performed by block and each block is +// probed. +def riscv_probed_alloca : RVSDNode<"PROBED_ALLOCA", + SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>, + [SDNPHasChain, SDNPMayStore]>; + + +defvar GPRPairVT = untyped; + +/// Turn a pair of `i`s into an even-odd register pair (`untyped`). +/// - Output: `untyped` even-odd register pair +/// - Input 0: `i` low-order bits, for even register. +/// - Input 1: `i` high-order bits, for odd register. +def riscv_build_gpr_pair : RVSDNode<"BuildGPRPair", + SDTypeProfile<1, 2, [SDTCisVT<0, GPRPairVT>, + SDTCisVT<1, XLenVT>, + SDTCisVT<2, XLenVT>]>>; + +/// Turn an even-odd register pair (`untyped`) into a pair of `i`s. +/// - Output 0: `i` low-order bits, from even register. +/// - Output 1: `i` high-order bits, from odd register. +/// - Input: `untyped` even-odd register pair +def riscv_split_gpr_pair : RVSDNode<"SplitGPRPair", + SDTypeProfile<2, 1, [SDTCisVT<0, XLenVT>, + SDTCisVT<1, XLenVT>, + SDTCisVT<2, GPRPairVT>]>>; //===----------------------------------------------------------------------===// // Operand and SDNode transformation definitions. diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoD.td b/llvm/lib/Target/RISCV/RISCVInstrInfoD.td index 01f6c4db0598b..0c584daf45b14 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoD.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoD.td @@ -22,9 +22,9 @@ def SDT_RISCVSplitF64 : SDTypeProfile<2, 1, [SDTCisVT<0, i32>, SDTCisVT<1, i32>, SDTCisVT<2, f64>]>; -def RISCVBuildPairF64 : SDNode<"RISCVISD::BuildPairF64", SDT_RISCVBuildPairF64>; +def RISCVBuildPairF64 : RVSDNode<"BuildPairF64", SDT_RISCVBuildPairF64>; def : GINodeEquiv; -def RISCVSplitF64 : SDNode<"RISCVISD::SplitF64", SDT_RISCVSplitF64>; +def RISCVSplitF64 : RVSDNode<"SplitF64", SDT_RISCVSplitF64>; def : GINodeEquiv; def AddrRegImmINX : ComplexPattern; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoF.td b/llvm/lib/Target/RISCV/RISCVInstrInfoF.td index 97550381a6fb7..360191f03ddf7 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoF.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoF.td @@ -35,36 +35,64 @@ def SDT_RISCVFSGNJX : SDTypeProfile<1, 2, [SDTCisFP<0>, SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>]>; def riscv_fclass - : SDNode<"RISCVISD::FCLASS", SDT_RISCVFCLASS>; + : RVSDNode<"FCLASS", SDT_RISCVFCLASS>; +// Rounds an FP value to its corresponding integer in the same FP format. +// First operand is the value to round, the second operand is the largest +// integer that can be represented exactly in the FP format. This will be +// expanded into multiple instructions and basic blocks with a custom +// inserter. def riscv_fround - : SDNode<"RISCVISD::FROUND", SDT_RISCVFROUND>; + : RVSDNode<"FROUND", SDT_RISCVFROUND>; def riscv_fsgnjx - : SDNode<"RISCVISD::FSGNJX", SDT_RISCVFSGNJX>; + : RVSDNode<"FSGNJX", SDT_RISCVFSGNJX>; +// FPR<->GPR transfer operations when the FPR is smaller than XLEN, needed as +// XLEN is the only legal integer width. +// +// FMV_W_X_RV64 matches the semantics of the FMV.W.X. +// FMV_X_ANYEXTW_RV64 is similar to FMV.X.W but has an any-extended result. +// +// This is a more convenient semantic for producing dagcombines that remove +// unnecessary GPR->FPR->GPR moves. def riscv_fmv_w_x_rv64 - : SDNode<"RISCVISD::FMV_W_X_RV64", SDT_RISCVFMV_W_X_RV64>; + : RVSDNode<"FMV_W_X_RV64", SDT_RISCVFMV_W_X_RV64>; def riscv_fmv_x_anyextw_rv64 - : SDNode<"RISCVISD::FMV_X_ANYEXTW_RV64", SDT_RISCVFMV_X_ANYEXTW_RV64>; + : RVSDNode<"FMV_X_ANYEXTW_RV64", SDT_RISCVFMV_X_ANYEXTW_RV64>; + +// FP to 32 bit int conversions for RV64. These are used to keep track of the +// result being sign extended to 64 bit. These saturate out of range inputs. +// Used for FP_TO_S/UINT and FP_TO_S/UINT_SAT lowering. Rounding mode +// is passed as a TargetConstant operand using the RISCVFPRndMode enum. def riscv_fcvt_w_rv64 - : SDNode<"RISCVISD::FCVT_W_RV64", SDT_RISCVFCVT_W_RV64>; + : RVSDNode<"FCVT_W_RV64", SDT_RISCVFCVT_W_RV64>; def riscv_fcvt_wu_rv64 - : SDNode<"RISCVISD::FCVT_WU_RV64", SDT_RISCVFCVT_W_RV64>; + : RVSDNode<"FCVT_WU_RV64", SDT_RISCVFCVT_W_RV64>; + +// FP to XLen int conversions. Corresponds to fcvt.l(u).s/d/h on RV64 and +// fcvt.w(u).s/d/h on RV32. Unlike FP_TO_S/UINT these saturate out of +// range inputs. These are used for FP_TO_S/UINT_SAT lowering. Rounding mode +// is passed as a TargetConstant operand using the RISCVFPRndMode enum. def riscv_fcvt_x - : SDNode<"RISCVISD::FCVT_X", SDT_RISCVFCVT_X>; + : RVSDNode<"FCVT_X", SDT_RISCVFCVT_X>; def riscv_fcvt_xu - : SDNode<"RISCVISD::FCVT_XU", SDT_RISCVFCVT_X>; - -def riscv_fmin : SDNode<"RISCVISD::FMIN", SDTFPBinOp>; -def riscv_fmax : SDNode<"RISCVISD::FMAX", SDTFPBinOp>; - -def riscv_strict_fcvt_w_rv64 - : SDNode<"RISCVISD::STRICT_FCVT_W_RV64", SDT_RISCVFCVT_W_RV64, - [SDNPHasChain]>; -def riscv_strict_fcvt_wu_rv64 - : SDNode<"RISCVISD::STRICT_FCVT_WU_RV64", SDT_RISCVFCVT_W_RV64, - [SDNPHasChain]>; + : RVSDNode<"FCVT_XU", SDT_RISCVFCVT_X>; + +// Floating point fmax and fmin matching the RISC-V instruction semantics. +def riscv_fmin : RVSDNode<"FMIN", SDTFPBinOp>; +def riscv_fmax : RVSDNode<"FMAX", SDTFPBinOp>; + +let IsStrictFP = true in { + // FP to 32 bit int conversions for RV64. These are used to keep track of the + // result being sign extended to 64 bit. These saturate out of range inputs. + def riscv_strict_fcvt_w_rv64 + : RVSDNode<"STRICT_FCVT_W_RV64", SDT_RISCVFCVT_W_RV64, + [SDNPHasChain]>; + def riscv_strict_fcvt_wu_rv64 + : RVSDNode<"STRICT_FCVT_WU_RV64", SDT_RISCVFCVT_W_RV64, + [SDNPHasChain]>; +} def riscv_any_fcvt_w_rv64 : PatFrags<(ops node:$src, node:$frm), [(riscv_strict_fcvt_w_rv64 node:$src, node:$frm), diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoM.td b/llvm/lib/Target/RISCV/RISCVInstrInfoM.td index 046e655c4a8c0..c1f67f77d7b84 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoM.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoM.td @@ -15,10 +15,15 @@ // RISC-V specific DAG Nodes. //===----------------------------------------------------------------------===// -def riscv_mulhsu : SDNode<"RISCVISD::MULHSU", SDTIntBinOp>; -def riscv_divw : SDNode<"RISCVISD::DIVW", SDT_RISCVIntBinOpW>; -def riscv_divuw : SDNode<"RISCVISD::DIVUW", SDT_RISCVIntBinOpW>; -def riscv_remuw : SDNode<"RISCVISD::REMUW", SDT_RISCVIntBinOpW>; +// Multiply high for signedxunsigned. +def riscv_mulhsu : RVSDNode<"MULHSU", SDTIntBinOp>; + +// 32-bit operations from RV64M that can't be simply matched with a pattern +// at instruction selection time. These have undefined behavior for division +// by 0 or overflow (divw) like their target independent counterparts. +def riscv_divw : RVSDNode<"DIVW", SDT_RISCVIntBinOpW>; +def riscv_divuw : RVSDNode<"DIVUW", SDT_RISCVIntBinOpW>; +def riscv_remuw : RVSDNode<"REMUW", SDT_RISCVIntBinOpW>; //===----------------------------------------------------------------------===// // Instructions diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td index 5edcfdf2654a4..b4495c49b005a 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -70,11 +70,15 @@ /// //===----------------------------------------------------------------------===// -def riscv_vmv_x_s : SDNode<"RISCVISD::VMV_X_S", - SDTypeProfile<1, 1, [SDTCisInt<0>, SDTCisVec<1>, - SDTCisInt<1>]>>; -def riscv_read_vlenb : SDNode<"RISCVISD::READ_VLENB", - SDTypeProfile<1, 0, [SDTCisVT<0, XLenVT>]>>; +// VMV_X_S matches the semantics of vmv.x.s. The result is always XLenVT sign +// extended from the vector element size. +def riscv_vmv_x_s : RVSDNode<"VMV_X_S", + SDTypeProfile<1, 1, [SDTCisInt<0>, SDTCisVec<1>, + SDTCisInt<1>]>>; + +// Read VLENB CSR +def riscv_read_vlenb : RVSDNode<"READ_VLENB", + SDTypeProfile<1, 0, [SDTCisVT<0, XLenVT>]>>; // Operand that is allowed to be a register other than X0, a 5 bit unsigned // immediate, or -1. -1 means VLMAX. This allows us to pick between VSETIVLI and diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td index 2b0b31c79c7a7..99cb5da700dc3 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td @@ -17,6 +17,32 @@ /// //===----------------------------------------------------------------------===// +// Splats an 64-bit value that has been split into two i32 parts. This is +// expanded late to two scalar stores and a stride 0 vector load. +// The first operand is passthru operand. +// +// This is only present to generate the correct TableGen SDNode description, +// it is lowered before instruction selection. +// FIXME: I'm not sure the types here are entirely correct. +// Returns a vector. Operand 0 is a passthru, operand 1 and 2 are i32 scalars, operand 3 is VL +def riscv_splat_vector_split_i64_vl : RVSDNode<"SPLAT_VECTOR_SPLIT_I64_VL", + SDTypeProfile<1, 4, [SDTCisVec<0>, + SDTCVecEltisVT<0, i64>, + SDTCisSameAs<1, 0>, + SDTCisVT<2, i32>, + SDTCisVT<3, i32>, + SDTCisVT<4, XLenVT>]>>; + +// RISC-V vector tuple type version of INSERT_SUBVECTOR/EXTRACT_SUBVECTOR. +def riscv_tuple_insert : RVSDNode<"TUPLE_INSERT", + SDTypeProfile<1, 3, [SDTCisSameAs<1, 0>, + SDTCisVec<2>, + SDTCisVT<3, i32>]>>; +def riscv_tuple_extract : RVSDNode<"TUPLE_EXTRACT", + SDTypeProfile<1, 2, [SDTCisVec<0>, + SDTCisVT<2, i32>]>>; + + //===----------------------------------------------------------------------===// // Helpers to define the VL patterns. //===----------------------------------------------------------------------===// @@ -69,85 +95,119 @@ def SDT_RISCVCopySign_VL : SDTypeProfile<1, 5, [SDTCisSameAs<0, 1>, SDTCisSameNumEltsAs<0, 4>, SDTCisVT<5, XLenVT>]>; -def riscv_vmv_v_v_vl : SDNode<"RISCVISD::VMV_V_V_VL", - SDTypeProfile<1, 3, [SDTCisVec<0>, - SDTCisSameAs<0, 1>, - SDTCisSameAs<0, 2>, - SDTCisVT<3, XLenVT>]>>; -def riscv_vmv_v_x_vl : SDNode<"RISCVISD::VMV_V_X_VL", - SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisInt<0>, - SDTCisSameAs<0, 1>, - SDTCisVT<2, XLenVT>, - SDTCisVT<3, XLenVT>]>>; -def riscv_vfmv_v_f_vl : SDNode<"RISCVISD::VFMV_V_F_VL", - SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisFP<0>, - SDTCisSameAs<0, 1>, - SDTCisEltOfVec<2, 0>, - SDTCisVT<3, XLenVT>]>>; -def riscv_vmv_s_x_vl : SDNode<"RISCVISD::VMV_S_X_VL", - SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, - SDTCisInt<0>, - SDTCisVT<2, XLenVT>, - SDTCisVT<3, XLenVT>]>>; -def riscv_vfmv_s_f_vl : SDNode<"RISCVISD::VFMV_S_F_VL", - SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, - SDTCisFP<0>, - SDTCisEltOfVec<2, 0>, - SDTCisVT<3, XLenVT>]>>; - -def riscv_add_vl : SDNode<"RISCVISD::ADD_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; -def riscv_sub_vl : SDNode<"RISCVISD::SUB_VL", SDT_RISCVIntBinOp_VL>; -def riscv_mul_vl : SDNode<"RISCVISD::MUL_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; -def riscv_mulhs_vl : SDNode<"RISCVISD::MULHS_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; -def riscv_mulhu_vl : SDNode<"RISCVISD::MULHU_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; -def riscv_and_vl : SDNode<"RISCVISD::AND_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; -def riscv_or_vl : SDNode<"RISCVISD::OR_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; -def riscv_xor_vl : SDNode<"RISCVISD::XOR_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; -def riscv_sdiv_vl : SDNode<"RISCVISD::SDIV_VL", SDT_RISCVIntBinOp_VL>; -def riscv_srem_vl : SDNode<"RISCVISD::SREM_VL", SDT_RISCVIntBinOp_VL>; -def riscv_udiv_vl : SDNode<"RISCVISD::UDIV_VL", SDT_RISCVIntBinOp_VL>; -def riscv_urem_vl : SDNode<"RISCVISD::UREM_VL", SDT_RISCVIntBinOp_VL>; -def riscv_shl_vl : SDNode<"RISCVISD::SHL_VL", SDT_RISCVIntBinOp_VL>; -def riscv_sra_vl : SDNode<"RISCVISD::SRA_VL", SDT_RISCVIntBinOp_VL>; -def riscv_srl_vl : SDNode<"RISCVISD::SRL_VL", SDT_RISCVIntBinOp_VL>; -def riscv_rotl_vl : SDNode<"RISCVISD::ROTL_VL", SDT_RISCVIntBinOp_VL>; -def riscv_rotr_vl : SDNode<"RISCVISD::ROTR_VL", SDT_RISCVIntBinOp_VL>; -def riscv_smin_vl : SDNode<"RISCVISD::SMIN_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; -def riscv_smax_vl : SDNode<"RISCVISD::SMAX_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; -def riscv_umin_vl : SDNode<"RISCVISD::UMIN_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; -def riscv_umax_vl : SDNode<"RISCVISD::UMAX_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; - -def riscv_bitreverse_vl : SDNode<"RISCVISD::BITREVERSE_VL", SDT_RISCVIntUnOp_VL>; -def riscv_bswap_vl : SDNode<"RISCVISD::BSWAP_VL", SDT_RISCVIntUnOp_VL>; -def riscv_ctlz_vl : SDNode<"RISCVISD::CTLZ_VL", SDT_RISCVIntUnOp_VL>; -def riscv_cttz_vl : SDNode<"RISCVISD::CTTZ_VL", SDT_RISCVIntUnOp_VL>; -def riscv_ctpop_vl : SDNode<"RISCVISD::CTPOP_VL", SDT_RISCVIntUnOp_VL>; - -def riscv_avgfloors_vl : SDNode<"RISCVISD::AVGFLOORS_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; -def riscv_avgflooru_vl : SDNode<"RISCVISD::AVGFLOORU_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; -def riscv_avgceils_vl : SDNode<"RISCVISD::AVGCEILS_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; -def riscv_avgceilu_vl : SDNode<"RISCVISD::AVGCEILU_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; -def riscv_saddsat_vl : SDNode<"RISCVISD::SADDSAT_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; -def riscv_uaddsat_vl : SDNode<"RISCVISD::UADDSAT_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; -def riscv_ssubsat_vl : SDNode<"RISCVISD::SSUBSAT_VL", SDT_RISCVIntBinOp_VL>; -def riscv_usubsat_vl : SDNode<"RISCVISD::USUBSAT_VL", SDT_RISCVIntBinOp_VL>; - -def riscv_fadd_vl : SDNode<"RISCVISD::FADD_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative]>; -def riscv_fsub_vl : SDNode<"RISCVISD::FSUB_VL", SDT_RISCVFPBinOp_VL>; -def riscv_fmul_vl : SDNode<"RISCVISD::FMUL_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative]>; -def riscv_fdiv_vl : SDNode<"RISCVISD::FDIV_VL", SDT_RISCVFPBinOp_VL>; -def riscv_fneg_vl : SDNode<"RISCVISD::FNEG_VL", SDT_RISCVFPUnOp_VL>; -def riscv_fabs_vl : SDNode<"RISCVISD::FABS_VL", SDT_RISCVFPUnOp_VL>; -def riscv_fsqrt_vl : SDNode<"RISCVISD::FSQRT_VL", SDT_RISCVFPUnOp_VL>; -def riscv_fcopysign_vl : SDNode<"RISCVISD::FCOPYSIGN_VL", SDT_RISCVCopySign_VL>; -def riscv_vfmin_vl : SDNode<"RISCVISD::VFMIN_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative]>; -def riscv_vfmax_vl : SDNode<"RISCVISD::VFMAX_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative]>; - -def riscv_strict_fadd_vl : SDNode<"RISCVISD::STRICT_FADD_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative, SDNPHasChain]>; -def riscv_strict_fsub_vl : SDNode<"RISCVISD::STRICT_FSUB_VL", SDT_RISCVFPBinOp_VL, [SDNPHasChain]>; -def riscv_strict_fmul_vl : SDNode<"RISCVISD::STRICT_FMUL_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative, SDNPHasChain]>; -def riscv_strict_fdiv_vl : SDNode<"RISCVISD::STRICT_FDIV_VL", SDT_RISCVFPBinOp_VL, [SDNPHasChain]>; -def riscv_strict_fsqrt_vl : SDNode<"RISCVISD::STRICT_FSQRT_VL", SDT_RISCVFPUnOp_VL, [SDNPHasChain]>; +// VMV_V_V_VL matches the semantics of vmv.v.v but includes an extra operand +// for the VL value to be used for the operation. The first operand is +// passthru operand. +def riscv_vmv_v_v_vl : RVSDNode<"VMV_V_V_VL", + SDTypeProfile<1, 3, [SDTCisVec<0>, + SDTCisSameAs<0, 1>, + SDTCisSameAs<0, 2>, + SDTCisVT<3, XLenVT>]>>; + +// VMV_V_X_VL matches the semantics of vmv.v.x but includes an extra operand +// for the VL value to be used for the operation. The first operand is +// passthru operand. +def riscv_vmv_v_x_vl : RVSDNode<"VMV_V_X_VL", + SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisInt<0>, + SDTCisSameAs<0, 1>, + SDTCisVT<2, XLenVT>, + SDTCisVT<3, XLenVT>]>>; + +// VFMV_V_F_VL matches the semantics of vfmv.v.f but includes an extra operand +// for the VL value to be used for the operation. The first operand is +// passthru operand. +def riscv_vfmv_v_f_vl : RVSDNode<"VFMV_V_F_VL", + SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisFP<0>, + SDTCisSameAs<0, 1>, + SDTCisEltOfVec<2, 0>, + SDTCisVT<3, XLenVT>]>>; + +// VMV_S_X_VL matches the semantics of vmv.s.x. It carries a VL operand. +def riscv_vmv_s_x_vl : RVSDNode<"VMV_S_X_VL", + SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, + SDTCisInt<0>, + SDTCisVT<2, XLenVT>, + SDTCisVT<3, XLenVT>]>>; + +// VFMV_S_F_VL matches the semantics of vfmv.s.f. It carries a VL operand. +def riscv_vfmv_s_f_vl : RVSDNode<"VFMV_S_F_VL", + SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, + SDTCisFP<0>, + SDTCisEltOfVec<2, 0>, + SDTCisVT<3, XLenVT>]>>; + +// Vector binary ops with a passthru as a third operand, a mask as a fourth +// operand, and VL as a fifth operand. +let HasPassthruOp = true, HasMaskOp = true in { + def riscv_add_vl : RVSDNode<"ADD_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; + def riscv_sub_vl : RVSDNode<"SUB_VL", SDT_RISCVIntBinOp_VL>; + def riscv_mul_vl : RVSDNode<"MUL_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; + def riscv_mulhs_vl : RVSDNode<"MULHS_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; + def riscv_mulhu_vl : RVSDNode<"MULHU_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; + def riscv_and_vl : RVSDNode<"AND_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; + def riscv_or_vl : RVSDNode<"OR_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; + def riscv_xor_vl : RVSDNode<"XOR_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; + def riscv_sdiv_vl : RVSDNode<"SDIV_VL", SDT_RISCVIntBinOp_VL>; + def riscv_srem_vl : RVSDNode<"SREM_VL", SDT_RISCVIntBinOp_VL>; + def riscv_udiv_vl : RVSDNode<"UDIV_VL", SDT_RISCVIntBinOp_VL>; + def riscv_urem_vl : RVSDNode<"UREM_VL", SDT_RISCVIntBinOp_VL>; + def riscv_shl_vl : RVSDNode<"SHL_VL", SDT_RISCVIntBinOp_VL>; + def riscv_sra_vl : RVSDNode<"SRA_VL", SDT_RISCVIntBinOp_VL>; + def riscv_srl_vl : RVSDNode<"SRL_VL", SDT_RISCVIntBinOp_VL>; + def riscv_rotl_vl : RVSDNode<"ROTL_VL", SDT_RISCVIntBinOp_VL>; + def riscv_rotr_vl : RVSDNode<"ROTR_VL", SDT_RISCVIntBinOp_VL>; + def riscv_smin_vl : RVSDNode<"SMIN_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; + def riscv_smax_vl : RVSDNode<"SMAX_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; + def riscv_umin_vl : RVSDNode<"UMIN_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; + def riscv_umax_vl : RVSDNode<"UMAX_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; + + def riscv_bitreverse_vl : RVSDNode<"BITREVERSE_VL", SDT_RISCVIntUnOp_VL>; + def riscv_bswap_vl : RVSDNode<"BSWAP_VL", SDT_RISCVIntUnOp_VL>; + def riscv_ctlz_vl : RVSDNode<"CTLZ_VL", SDT_RISCVIntUnOp_VL>; + def riscv_cttz_vl : RVSDNode<"CTTZ_VL", SDT_RISCVIntUnOp_VL>; + def riscv_ctpop_vl : RVSDNode<"CTPOP_VL", SDT_RISCVIntUnOp_VL>; + + // Averaging adds of signed integers. + def riscv_avgfloors_vl : RVSDNode<"AVGFLOORS_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; + // Averaging adds of unsigned integers. + def riscv_avgflooru_vl : RVSDNode<"AVGFLOORU_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; + // Rounding averaging adds of signed integers. + def riscv_avgceils_vl : RVSDNode<"AVGCEILS_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; + // Rounding averaging adds of unsigned integers. + def riscv_avgceilu_vl : RVSDNode<"AVGCEILU_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; + def riscv_saddsat_vl : RVSDNode<"SADDSAT_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; + def riscv_uaddsat_vl : RVSDNode<"UADDSAT_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; + def riscv_ssubsat_vl : RVSDNode<"SSUBSAT_VL", SDT_RISCVIntBinOp_VL>; + def riscv_usubsat_vl : RVSDNode<"USUBSAT_VL", SDT_RISCVIntBinOp_VL>; + + def riscv_fadd_vl : RVSDNode<"FADD_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative]>; + def riscv_fsub_vl : RVSDNode<"FSUB_VL", SDT_RISCVFPBinOp_VL>; + def riscv_fmul_vl : RVSDNode<"FMUL_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative]>; + def riscv_fdiv_vl : RVSDNode<"FDIV_VL", SDT_RISCVFPBinOp_VL>; +} // let HasPassthruOp = true, HasMaskOp = true + +// Vector unary ops with a mask as a second operand and VL as a third operand. +let HasMaskOp = true in { + def riscv_fneg_vl : RVSDNode<"FNEG_VL", SDT_RISCVFPUnOp_VL>; + def riscv_fabs_vl : RVSDNode<"FABS_VL", SDT_RISCVFPUnOp_VL>; + def riscv_fsqrt_vl : RVSDNode<"FSQRT_VL", SDT_RISCVFPUnOp_VL>; +} // let HasMaskOp = true + +let HasPassthruOp = true, HasMaskOp = true in { + def riscv_fcopysign_vl : RVSDNode<"FCOPYSIGN_VL", SDT_RISCVCopySign_VL>; + def riscv_vfmin_vl : RVSDNode<"VFMIN_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative]>; + def riscv_vfmax_vl : RVSDNode<"VFMAX_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative]>; +} // let HasPassthruOp = true, HasMaskOp = true + +let IsStrictFP = true, HasPassthruOp = true, HasMaskOp = true in { + def riscv_strict_fadd_vl : RVSDNode<"STRICT_FADD_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative, SDNPHasChain]>; + def riscv_strict_fsub_vl : RVSDNode<"STRICT_FSUB_VL", SDT_RISCVFPBinOp_VL, [SDNPHasChain]>; + def riscv_strict_fmul_vl : RVSDNode<"STRICT_FMUL_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative, SDNPHasChain]>; + def riscv_strict_fdiv_vl : RVSDNode<"STRICT_FDIV_VL", SDT_RISCVFPBinOp_VL, [SDNPHasChain]>; +} // let IsStrictFP = true, HasPassthruOp = true, HasMaskOp = true + +let IsStrictFP = true, HasMaskOp = true in +def riscv_strict_fsqrt_vl : RVSDNode<"STRICT_FSQRT_VL", SDT_RISCVFPUnOp_VL, [SDNPHasChain]>; def any_riscv_fadd_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$passthru, node:$mask, node:$vl), [(riscv_fadd_vl node:$lhs, node:$rhs, node:$passthru, node:$mask, node:$vl), @@ -165,14 +225,15 @@ def any_riscv_fsqrt_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), [(riscv_fsqrt_vl node:$src, node:$mask, node:$vl), (riscv_strict_fsqrt_vl node:$src, node:$mask, node:$vl)]>; -def riscv_fclass_vl : SDNode<"RISCVISD::FCLASS_VL", - SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisVec<0>, - SDTCisFP<1>, SDTCisVec<1>, - SDTCisSameSizeAs<0, 1>, - SDTCisSameNumEltsAs<0, 1>, - SDTCVecEltisVT<2, i1>, - SDTCisSameNumEltsAs<0, 2>, - SDTCisVT<3, XLenVT>]>>; +let HasMaskOp = true in +def riscv_fclass_vl : RVSDNode<"FCLASS_VL", + SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisVec<0>, + SDTCisFP<1>, SDTCisVec<1>, + SDTCisSameSizeAs<0, 1>, + SDTCisSameNumEltsAs<0, 1>, + SDTCVecEltisVT<2, i1>, + SDTCisSameNumEltsAs<0, 2>, + SDTCisVT<3, XLenVT>]>>; def SDT_RISCVVecFMA_VL : SDTypeProfile<1, 5, [SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, @@ -181,10 +242,14 @@ def SDT_RISCVVecFMA_VL : SDTypeProfile<1, 5, [SDTCisSameAs<0, 1>, SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<0, 4>, SDTCisVT<5, XLenVT>]>; -def riscv_vfmadd_vl : SDNode<"RISCVISD::VFMADD_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative]>; -def riscv_vfnmadd_vl : SDNode<"RISCVISD::VFNMADD_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative]>; -def riscv_vfmsub_vl : SDNode<"RISCVISD::VFMSUB_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative]>; -def riscv_vfnmsub_vl : SDNode<"RISCVISD::VFNMSUB_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative]>; + +let HasMaskOp = true in { + // Vector FMA ops with a mask as a fourth operand and VL as a fifth operand. + def riscv_vfmadd_vl : RVSDNode<"VFMADD_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative]>; + def riscv_vfnmadd_vl : RVSDNode<"VFNMADD_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative]>; + def riscv_vfmsub_vl : RVSDNode<"VFMSUB_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative]>; + def riscv_vfnmsub_vl : RVSDNode<"VFNMSUB_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative]>; +} def SDT_RISCVWVecFMA_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisFP<0>, SDTCisVec<1>, SDTCisFP<1>, @@ -195,15 +260,22 @@ def SDT_RISCVWVecFMA_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisFP<0>, SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<0, 4>, SDTCisVT<5, XLenVT>]>; -def riscv_vfwmadd_vl : SDNode<"RISCVISD::VFWMADD_VL", SDT_RISCVWVecFMA_VL, [SDNPCommutative]>; -def riscv_vfwnmadd_vl : SDNode<"RISCVISD::VFWNMADD_VL", SDT_RISCVWVecFMA_VL, [SDNPCommutative]>; -def riscv_vfwmsub_vl : SDNode<"RISCVISD::VFWMSUB_VL", SDT_RISCVWVecFMA_VL, [SDNPCommutative]>; -def riscv_vfwnmsub_vl : SDNode<"RISCVISD::VFWNMSUB_VL", SDT_RISCVWVecFMA_VL, [SDNPCommutative]>; -def riscv_strict_vfmadd_vl : SDNode<"RISCVISD::STRICT_VFMADD_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative, SDNPHasChain]>; -def riscv_strict_vfnmadd_vl : SDNode<"RISCVISD::STRICT_VFNMADD_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative, SDNPHasChain]>; -def riscv_strict_vfmsub_vl : SDNode<"RISCVISD::STRICT_VFMSUB_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative, SDNPHasChain]>; -def riscv_strict_vfnmsub_vl : SDNode<"RISCVISD::STRICT_VFNMSUB_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative, SDNPHasChain]>; +let HasMaskOp = true in { + // Vector widening FMA ops with a mask as a fourth operand and VL as a fifth + // operand. + def riscv_vfwmadd_vl : RVSDNode<"VFWMADD_VL", SDT_RISCVWVecFMA_VL, [SDNPCommutative]>; + def riscv_vfwnmadd_vl : RVSDNode<"VFWNMADD_VL", SDT_RISCVWVecFMA_VL, [SDNPCommutative]>; + def riscv_vfwmsub_vl : RVSDNode<"VFWMSUB_VL", SDT_RISCVWVecFMA_VL, [SDNPCommutative]>; + def riscv_vfwnmsub_vl : RVSDNode<"VFWNMSUB_VL", SDT_RISCVWVecFMA_VL, [SDNPCommutative]>; + + let IsStrictFP = true in { + def riscv_strict_vfmadd_vl : RVSDNode<"STRICT_VFMADD_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative, SDNPHasChain]>; + def riscv_strict_vfnmadd_vl : RVSDNode<"STRICT_VFNMADD_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative, SDNPHasChain]>; + def riscv_strict_vfmsub_vl : RVSDNode<"STRICT_VFMSUB_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative, SDNPHasChain]>; + def riscv_strict_vfnmsub_vl : RVSDNode<"STRICT_VFNMSUB_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative, SDNPHasChain]>; + } // let IsStrictFP = true +} // let HasMaskOp = true def any_riscv_vfmadd_vl : PatFrags<(ops node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl), [(riscv_vfmadd_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl), @@ -227,12 +299,22 @@ def SDT_RISCVFPExtendOp_VL : SDTypeProfile<1, 3, [ SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT> ]>; -def riscv_fpround_vl : SDNode<"RISCVISD::FP_ROUND_VL", SDT_RISCVFPRoundOp_VL>; -def riscv_strict_fpround_vl : SDNode<"RISCVISD::STRICT_FP_ROUND_VL", SDT_RISCVFPRoundOp_VL, [SDNPHasChain]>; -def riscv_fpextend_vl : SDNode<"RISCVISD::FP_EXTEND_VL", SDT_RISCVFPExtendOp_VL>; -def riscv_strict_fpextend_vl : SDNode<"RISCVISD::STRICT_FP_EXTEND_VL", SDT_RISCVFPExtendOp_VL, [SDNPHasChain]>; -def riscv_fncvt_rod_vl : SDNode<"RISCVISD::VFNCVT_ROD_VL", SDT_RISCVFPRoundOp_VL>; -def riscv_strict_fncvt_rod_vl : SDNode<"RISCVISD::STRICT_VFNCVT_ROD_VL", SDT_RISCVFPRoundOp_VL, [SDNPHasChain]>; +let HasMaskOp = true in { + def riscv_fpround_vl : RVSDNode<"FP_ROUND_VL", SDT_RISCVFPRoundOp_VL>; + def riscv_fpextend_vl : RVSDNode<"FP_EXTEND_VL", SDT_RISCVFPExtendOp_VL>; + + // Matches the semantics of the vfcnvt.rod function (Convert double-width + // float to single-width float, rounding towards odd). Takes a double-width + // float vector and produces a single-width float vector. Also has a mask and + // VL operand. + def riscv_fncvt_rod_vl : RVSDNode<"VFNCVT_ROD_VL", SDT_RISCVFPRoundOp_VL>; + + let IsStrictFP = true in { + def riscv_strict_fpround_vl : RVSDNode<"STRICT_FP_ROUND_VL", SDT_RISCVFPRoundOp_VL, [SDNPHasChain]>; + def riscv_strict_fpextend_vl : RVSDNode<"STRICT_FP_EXTEND_VL", SDT_RISCVFPExtendOp_VL, [SDNPHasChain]>; + def riscv_strict_fncvt_rod_vl : RVSDNode<"STRICT_VFNCVT_ROD_VL", SDT_RISCVFPRoundOp_VL, [SDNPHasChain]>; + } // let IsStrictFP = true +} // let HasMaskOp = true def any_riscv_fpround_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), [(riscv_fpround_vl node:$src, node:$mask, node:$vl), @@ -270,15 +352,20 @@ def SDT_RISCVSETCCOP_VL : SDTypeProfile<1, 6, [ SDTCisSameAs<0, 5>, SDTCisVT<6, XLenVT>]>; // Float -> Int -def riscv_vfcvt_rm_xu_f_vl : SDNode<"RISCVISD::VFCVT_RM_XU_F_VL", SDT_RISCVFP2IOp_RM_VL>; -def riscv_vfcvt_rm_x_f_vl : SDNode<"RISCVISD::VFCVT_RM_X_F_VL", SDT_RISCVFP2IOp_RM_VL>; -def riscv_vfcvt_rtz_xu_f_vl : SDNode<"RISCVISD::VFCVT_RTZ_XU_F_VL", SDT_RISCVFP2IOp_VL>; -def riscv_vfcvt_rtz_x_f_vl : SDNode<"RISCVISD::VFCVT_RTZ_X_F_VL", SDT_RISCVFP2IOp_VL>; +let HasMaskOp = true in { + def riscv_vfcvt_rm_xu_f_vl : RVSDNode<"VFCVT_RM_XU_F_VL", SDT_RISCVFP2IOp_RM_VL>; + def riscv_vfcvt_rm_x_f_vl : RVSDNode<"VFCVT_RM_X_F_VL", SDT_RISCVFP2IOp_RM_VL>; + + def riscv_vfcvt_rtz_xu_f_vl : RVSDNode<"VFCVT_RTZ_XU_F_VL", SDT_RISCVFP2IOp_VL>; + def riscv_vfcvt_rtz_x_f_vl : RVSDNode<"VFCVT_RTZ_X_F_VL", SDT_RISCVFP2IOp_VL>; -def riscv_strict_vfcvt_rm_x_f_vl : SDNode<"RISCVISD::STRICT_VFCVT_RM_X_F_VL", SDT_RISCVFP2IOp_RM_VL, [SDNPHasChain]>; -def riscv_strict_vfcvt_rtz_xu_f_vl : SDNode<"RISCVISD::STRICT_VFCVT_RTZ_XU_F_VL", SDT_RISCVFP2IOp_VL, [SDNPHasChain]>; -def riscv_strict_vfcvt_rtz_x_f_vl : SDNode<"RISCVISD::STRICT_VFCVT_RTZ_X_F_VL", SDT_RISCVFP2IOp_VL, [SDNPHasChain]>; + let IsStrictFP = true in { + def riscv_strict_vfcvt_rm_x_f_vl : RVSDNode<"STRICT_VFCVT_RM_X_F_VL", SDT_RISCVFP2IOp_RM_VL, [SDNPHasChain]>; + def riscv_strict_vfcvt_rtz_xu_f_vl : RVSDNode<"STRICT_VFCVT_RTZ_XU_F_VL", SDT_RISCVFP2IOp_VL, [SDNPHasChain]>; + def riscv_strict_vfcvt_rtz_x_f_vl : RVSDNode<"STRICT_VFCVT_RTZ_X_F_VL", SDT_RISCVFP2IOp_VL, [SDNPHasChain]>; + } // let IsStrictFP = true +} // let HasMaskOp = true def any_riscv_vfcvt_rm_x_f_vl : PatFrags<(ops node:$src, node:$mask, node:$vl, node:$rm), [(riscv_vfcvt_rm_x_f_vl node:$src, node:$mask, node:$vl, node:$rm), @@ -291,13 +378,18 @@ def any_riscv_vfcvt_rtz_x_f_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), (riscv_strict_vfcvt_rtz_x_f_vl node:$src, node:$mask, node:$vl)]>; // Int -> Float -def riscv_sint_to_fp_vl : SDNode<"RISCVISD::SINT_TO_FP_VL", SDT_RISCVI2FPOp_VL>; -def riscv_uint_to_fp_vl : SDNode<"RISCVISD::UINT_TO_FP_VL", SDT_RISCVI2FPOp_VL>; -def riscv_vfcvt_rm_f_xu_vl : SDNode<"RISCVISD::VFCVT_RM_F_XU_VL", SDT_RISCVI2FPOp_RM_VL>; -def riscv_vfcvt_rm_f_x_vl : SDNode<"RISCVISD::VFCVT_RM_F_X_VL", SDT_RISCVI2FPOp_RM_VL>; -def riscv_strict_sint_to_fp_vl : SDNode<"RISCVISD::STRICT_SINT_TO_FP_VL", SDT_RISCVI2FPOp_VL, [SDNPHasChain]>; -def riscv_strict_uint_to_fp_vl : SDNode<"RISCVISD::STRICT_UINT_TO_FP_VL", SDT_RISCVI2FPOp_VL, [SDNPHasChain]>; +let HasMaskOp = true in { + def riscv_sint_to_fp_vl : RVSDNode<"SINT_TO_FP_VL", SDT_RISCVI2FPOp_VL>; + def riscv_uint_to_fp_vl : RVSDNode<"UINT_TO_FP_VL", SDT_RISCVI2FPOp_VL>; + def riscv_vfcvt_rm_f_xu_vl : RVSDNode<"VFCVT_RM_F_XU_VL", SDT_RISCVI2FPOp_RM_VL>; + def riscv_vfcvt_rm_f_x_vl : RVSDNode<"VFCVT_RM_F_X_VL", SDT_RISCVI2FPOp_RM_VL>; + + let IsStrictFP = true in { + def riscv_strict_sint_to_fp_vl : RVSDNode<"STRICT_SINT_TO_FP_VL", SDT_RISCVI2FPOp_VL, [SDNPHasChain]>; + def riscv_strict_uint_to_fp_vl : RVSDNode<"STRICT_UINT_TO_FP_VL", SDT_RISCVI2FPOp_VL, [SDNPHasChain]>; + } // let IsStrictFP = true +} // let HasMaskOp = true def any_riscv_sint_to_fp_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), [(riscv_sint_to_fp_vl node:$src, node:$mask, node:$vl), @@ -306,16 +398,27 @@ def any_riscv_uint_to_fp_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), [(riscv_uint_to_fp_vl node:$src, node:$mask, node:$vl), (riscv_strict_uint_to_fp_vl node:$src, node:$mask, node:$vl)]>; -def riscv_vfround_noexcept_vl: SDNode<"RISCVISD::VFROUND_NOEXCEPT_VL", SDT_RISCVFPUnOp_VL>; -def riscv_strict_vfround_noexcept_vl: SDNode<"RISCVISD::STRICT_VFROUND_NOEXCEPT_VL", SDT_RISCVFPUnOp_VL, [SDNPHasChain]>; +let HasMaskOp = true in { + def riscv_vfround_noexcept_vl: RVSDNode<"VFROUND_NOEXCEPT_VL", SDT_RISCVFPUnOp_VL>; + + let IsStrictFP = true in + def riscv_strict_vfround_noexcept_vl: RVSDNode<"STRICT_VFROUND_NOEXCEPT_VL", SDT_RISCVFPUnOp_VL, [SDNPHasChain]>; +} def any_riscv_vfround_noexcept_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), [(riscv_vfround_noexcept_vl node:$src, node:$mask, node:$vl), (riscv_strict_vfround_noexcept_vl node:$src, node:$mask, node:$vl)]>; -def riscv_setcc_vl : SDNode<"RISCVISD::SETCC_VL", SDT_RISCVSETCCOP_VL>; -def riscv_strict_fsetcc_vl : SDNode<"RISCVISD::STRICT_FSETCC_VL", SDT_RISCVSETCCOP_VL, [SDNPHasChain]>; -def riscv_strict_fsetccs_vl : SDNode<"RISCVISD::STRICT_FSETCCS_VL", SDT_RISCVSETCCOP_VL, [SDNPHasChain]>; +// Vector compare producing a mask. Fourth operand is input mask. Fifth +// operand is VL. +let HasPassthruOp = true, HasMaskOp = true in +def riscv_setcc_vl : RVSDNode<"SETCC_VL", SDT_RISCVSETCCOP_VL>; + +let IsStrictFP = true, HasMaskOp = true in { + def riscv_strict_fsetcc_vl : RVSDNode<"STRICT_FSETCC_VL", SDT_RISCVSETCCOP_VL, [SDNPHasChain]>; + def riscv_strict_fsetccs_vl : RVSDNode<"STRICT_FSETCCS_VL", SDT_RISCVSETCCOP_VL, [SDNPHasChain]>; +} // let IsStrictFP = true, HasMaskOp = true + def any_riscv_fsetcc_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$cc, node:$passthru, node:$mask, node:$vl), [(riscv_setcc_vl node:$lhs, node:$rhs, node:$cc, node:$passthru, node:$mask, node:$vl), (riscv_strict_fsetcc_vl node:$lhs, node:$rhs, node:$cc, node:$passthru, node:$mask, node:$vl)]>; @@ -323,34 +426,39 @@ def any_riscv_fsetccs_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$cc, node:$p [(riscv_setcc_vl node:$lhs, node:$rhs, node:$cc, node:$passthru, node:$mask, node:$vl), (riscv_strict_fsetccs_vl node:$lhs, node:$rhs, node:$cc, node:$passthru, node:$mask, node:$vl)]>; -def riscv_vrgather_vx_vl : SDNode<"RISCVISD::VRGATHER_VX_VL", - SDTypeProfile<1, 5, [SDTCisVec<0>, - SDTCisSameAs<0, 1>, - SDTCisVT<2, XLenVT>, - SDTCisSameAs<0, 3>, - SDTCVecEltisVT<4, i1>, - SDTCisSameNumEltsAs<0, 4>, - SDTCisVT<5, XLenVT>]>>; -def riscv_vrgather_vv_vl : SDNode<"RISCVISD::VRGATHER_VV_VL", - SDTypeProfile<1, 5, [SDTCisVec<0>, - SDTCisSameAs<0, 1>, - SDTCisInt<2>, - SDTCisSameNumEltsAs<0, 2>, - SDTCisSameSizeAs<0, 2>, - SDTCisSameAs<0, 3>, - SDTCVecEltisVT<4, i1>, - SDTCisSameNumEltsAs<0, 4>, - SDTCisVT<5, XLenVT>]>>; -def riscv_vrgatherei16_vv_vl : SDNode<"RISCVISD::VRGATHEREI16_VV_VL", +let HasMaskOp = true in { + // Matches the semantics of vrgather.vx and vrgather.vv with extra operands + // for passthru and VL, except that out of bound indices result in a poison + // result not zero. Operands are (src, index, mask, passthru, vl). + def riscv_vrgather_vx_vl : RVSDNode<"VRGATHER_VX_VL", + SDTypeProfile<1, 5, [SDTCisVec<0>, + SDTCisSameAs<0, 1>, + SDTCisVT<2, XLenVT>, + SDTCisSameAs<0, 3>, + SDTCVecEltisVT<4, i1>, + SDTCisSameNumEltsAs<0, 4>, + SDTCisVT<5, XLenVT>]>>; + def riscv_vrgather_vv_vl : RVSDNode<"VRGATHER_VV_VL", SDTypeProfile<1, 5, [SDTCisVec<0>, - SDTCisSameAs<0, 1>, - SDTCisInt<2>, - SDTCVecEltisVT<2, i16>, - SDTCisSameNumEltsAs<0, 2>, - SDTCisSameAs<0, 3>, - SDTCVecEltisVT<4, i1>, - SDTCisSameNumEltsAs<0, 4>, - SDTCisVT<5, XLenVT>]>>; + SDTCisSameAs<0, 1>, + SDTCisInt<2>, + SDTCisSameNumEltsAs<0, 2>, + SDTCisSameSizeAs<0, 2>, + SDTCisSameAs<0, 3>, + SDTCVecEltisVT<4, i1>, + SDTCisSameNumEltsAs<0, 4>, + SDTCisVT<5, XLenVT>]>>; + def riscv_vrgatherei16_vv_vl : RVSDNode<"VRGATHEREI16_VV_VL", + SDTypeProfile<1, 5, [SDTCisVec<0>, + SDTCisSameAs<0, 1>, + SDTCisInt<2>, + SDTCVecEltisVT<2, i16>, + SDTCisSameNumEltsAs<0, 2>, + SDTCisSameAs<0, 3>, + SDTCVecEltisVT<4, i1>, + SDTCisSameNumEltsAs<0, 4>, + SDTCisVT<5, XLenVT>]>>; +} // let HasMaskOp = true def SDT_RISCVVMERGE_VL : SDTypeProfile<1, 5, [ SDTCisVec<0>, SDTCisVec<1>, SDTCisSameNumEltsAs<0, 1>, SDTCVecEltisVT<1, i1>, @@ -358,47 +466,63 @@ def SDT_RISCVVMERGE_VL : SDTypeProfile<1, 5, [ SDTCisVT<5, XLenVT> ]>; -def riscv_vmerge_vl : SDNode<"RISCVISD::VMERGE_VL", SDT_RISCVVMERGE_VL>; +// General vmerge node with mask, true, false, passthru, and vl operands. +// Tail agnostic vselect can be implemented by setting passthru to undef. +let HasPassthruOp = true in +def riscv_vmerge_vl : RVSDNode<"VMERGE_VL", SDT_RISCVVMERGE_VL>; def SDT_RISCVVMSETCLR_VL : SDTypeProfile<1, 1, [SDTCVecEltisVT<0, i1>, SDTCisVT<1, XLenVT>]>; -def riscv_vmclr_vl : SDNode<"RISCVISD::VMCLR_VL", SDT_RISCVVMSETCLR_VL>; -def riscv_vmset_vl : SDNode<"RISCVISD::VMSET_VL", SDT_RISCVVMSETCLR_VL>; + +// Set mask vector to all zeros or ones. +def riscv_vmclr_vl : RVSDNode<"VMCLR_VL", SDT_RISCVVMSETCLR_VL>; +def riscv_vmset_vl : RVSDNode<"VMSET_VL", SDT_RISCVVMSETCLR_VL>; def SDT_RISCVMaskBinOp_VL : SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCVecEltisVT<0, i1>, SDTCisVT<3, XLenVT>]>; -def riscv_vmand_vl : SDNode<"RISCVISD::VMAND_VL", SDT_RISCVMaskBinOp_VL, [SDNPCommutative]>; -def riscv_vmor_vl : SDNode<"RISCVISD::VMOR_VL", SDT_RISCVMaskBinOp_VL, [SDNPCommutative]>; -def riscv_vmxor_vl : SDNode<"RISCVISD::VMXOR_VL", SDT_RISCVMaskBinOp_VL, [SDNPCommutative]>; + +// Mask binary operators. +def riscv_vmand_vl : RVSDNode<"VMAND_VL", SDT_RISCVMaskBinOp_VL, [SDNPCommutative]>; +def riscv_vmor_vl : RVSDNode<"VMOR_VL", SDT_RISCVMaskBinOp_VL, [SDNPCommutative]>; +def riscv_vmxor_vl : RVSDNode<"VMXOR_VL", SDT_RISCVMaskBinOp_VL, [SDNPCommutative]>; def true_mask : PatLeaf<(riscv_vmset_vl (XLenVT srcvalue))>; def riscv_vmnot_vl : PatFrag<(ops node:$rs, node:$vl), (riscv_vmxor_vl node:$rs, true_mask, node:$vl)>; -def riscv_vcpop_vl : SDNode<"RISCVISD::VCPOP_VL", - SDTypeProfile<1, 3, [SDTCisVT<0, XLenVT>, - SDTCisVec<1>, SDTCisInt<1>, - SDTCVecEltisVT<2, i1>, - SDTCisSameNumEltsAs<1, 2>, - SDTCisVT<3, XLenVT>]>>; +let HasMaskOp = true in { + // vcpop.m with additional mask and VL operands. + def riscv_vcpop_vl : RVSDNode<"VCPOP_VL", + SDTypeProfile<1, 3, [SDTCisVT<0, XLenVT>, + SDTCisVec<1>, SDTCisInt<1>, + SDTCVecEltisVT<2, i1>, + SDTCisSameNumEltsAs<1, 2>, + SDTCisVT<3, XLenVT>]>>; -def riscv_vfirst_vl : SDNode<"RISCVISD::VFIRST_VL", - SDTypeProfile<1, 3, [SDTCisVT<0, XLenVT>, - SDTCisVec<1>, SDTCisInt<1>, - SDTCVecEltisVT<2, i1>, - SDTCisSameNumEltsAs<1, 2>, - SDTCisVT<3, XLenVT>]>>; + // vfirst.m with additional mask and VL operands. + def riscv_vfirst_vl : RVSDNode<"VFIRST_VL", + SDTypeProfile<1, 3, [SDTCisVT<0, XLenVT>, + SDTCisVec<1>, SDTCisInt<1>, + SDTCVecEltisVT<2, i1>, + SDTCisSameNumEltsAs<1, 2>, + SDTCisVT<3, XLenVT>]>>; +} // let HasMaskOp = true def SDT_RISCVVEXTEND_VL : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameNumEltsAs<0, 1>, SDTCisSameNumEltsAs<1, 2>, SDTCVecEltisVT<2, i1>, SDTCisVT<3, XLenVT>]>; -def riscv_sext_vl : SDNode<"RISCVISD::VSEXT_VL", SDT_RISCVVEXTEND_VL>; -def riscv_zext_vl : SDNode<"RISCVISD::VZEXT_VL", SDT_RISCVVEXTEND_VL>; + +let HasMaskOp = true in { + // Vector sign/zero extend with additional mask & VL operands. + def riscv_sext_vl : RVSDNode<"VSEXT_VL", SDT_RISCVVEXTEND_VL>; + def riscv_zext_vl : RVSDNode<"VZEXT_VL", SDT_RISCVVEXTEND_VL>; +} // let HasMaskOp = true + def riscv_ext_vl : PatFrags<(ops node:$A, node:$B, node:$C), [(riscv_sext_vl node:$A, node:$B, node:$C), (riscv_zext_vl node:$A, node:$B, node:$C)]>; @@ -408,12 +532,22 @@ def SDT_RISCVVTRUNCATE_VL : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameNumEltsAs<0, 2>, SDTCVecEltisVT<2, i1>, SDTCisVT<3, XLenVT>]>; -def riscv_trunc_vector_vl : SDNode<"RISCVISD::TRUNCATE_VECTOR_VL", - SDT_RISCVVTRUNCATE_VL>; -def riscv_trunc_vector_vl_ssat : SDNode<"RISCVISD::TRUNCATE_VECTOR_VL_SSAT", - SDT_RISCVVTRUNCATE_VL>; -def riscv_trunc_vector_vl_usat : SDNode<"RISCVISD::TRUNCATE_VECTOR_VL_USAT", - SDT_RISCVVTRUNCATE_VL>; + +let HasMaskOp = true in { + // Truncates a RVV integer vector by one power-of-two. Carries both an extra + // mask and VL operand. + def riscv_trunc_vector_vl : RVSDNode<"TRUNCATE_VECTOR_VL", + SDT_RISCVVTRUNCATE_VL>; + + // Truncates a RVV integer vector by one power-of-two. If the value doesn't + // fit in the destination type, the result is saturated. These correspond to + // vnclip and vnclipu with a shift of 0. Carries both an extra mask and VL + // operand. + def riscv_trunc_vector_vl_ssat : RVSDNode<"TRUNCATE_VECTOR_VL_SSAT", + SDT_RISCVVTRUNCATE_VL>; + def riscv_trunc_vector_vl_usat : RVSDNode<"TRUNCATE_VECTOR_VL_USAT", + SDT_RISCVVTRUNCATE_VL>; +} // let HasMaskOp = true def SDT_RISCVVWIntBinOp_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisInt<0>, SDTCisInt<1>, @@ -424,14 +558,19 @@ def SDT_RISCVVWIntBinOp_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisInt<0>, SDTCisSameNumEltsAs<1, 4>, SDTCVecEltisVT<4, i1>, SDTCisVT<5, XLenVT>]>; -def riscv_vwmul_vl : SDNode<"RISCVISD::VWMUL_VL", SDT_RISCVVWIntBinOp_VL, [SDNPCommutative]>; -def riscv_vwmulu_vl : SDNode<"RISCVISD::VWMULU_VL", SDT_RISCVVWIntBinOp_VL, [SDNPCommutative]>; -def riscv_vwmulsu_vl : SDNode<"RISCVISD::VWMULSU_VL", SDT_RISCVVWIntBinOp_VL>; -def riscv_vwadd_vl : SDNode<"RISCVISD::VWADD_VL", SDT_RISCVVWIntBinOp_VL, [SDNPCommutative]>; -def riscv_vwaddu_vl : SDNode<"RISCVISD::VWADDU_VL", SDT_RISCVVWIntBinOp_VL, [SDNPCommutative]>; -def riscv_vwsub_vl : SDNode<"RISCVISD::VWSUB_VL", SDT_RISCVVWIntBinOp_VL, []>; -def riscv_vwsubu_vl : SDNode<"RISCVISD::VWSUBU_VL", SDT_RISCVVWIntBinOp_VL, []>; -def riscv_vwsll_vl : SDNode<"RISCVISD::VWSLL_VL", SDT_RISCVVWIntBinOp_VL, []>; + +let HasPassthruOp = true, HasMaskOp = true in { + // Widening instructions with a passthru value a third operand, a mask as a + // fourth operand, and VL as a fifth operand. + def riscv_vwmul_vl : RVSDNode<"VWMUL_VL", SDT_RISCVVWIntBinOp_VL, [SDNPCommutative]>; + def riscv_vwmulu_vl : RVSDNode<"VWMULU_VL", SDT_RISCVVWIntBinOp_VL, [SDNPCommutative]>; + def riscv_vwmulsu_vl : RVSDNode<"VWMULSU_VL", SDT_RISCVVWIntBinOp_VL>; + def riscv_vwadd_vl : RVSDNode<"VWADD_VL", SDT_RISCVVWIntBinOp_VL, [SDNPCommutative]>; + def riscv_vwaddu_vl : RVSDNode<"VWADDU_VL", SDT_RISCVVWIntBinOp_VL, [SDNPCommutative]>; + def riscv_vwsub_vl : RVSDNode<"VWSUB_VL", SDT_RISCVVWIntBinOp_VL, []>; + def riscv_vwsubu_vl : RVSDNode<"VWSUBU_VL", SDT_RISCVVWIntBinOp_VL, []>; + def riscv_vwsll_vl : RVSDNode<"VWSLL_VL", SDT_RISCVVWIntBinOp_VL, []>; +} // let HasPassthruOp = true, HasMaskOp = true def SDT_RISCVVWIntTernOp_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisInt<0>, SDTCisInt<1>, @@ -442,9 +581,14 @@ def SDT_RISCVVWIntTernOp_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisInt<0>, SDTCisSameNumEltsAs<1, 4>, SDTCVecEltisVT<4, i1>, SDTCisVT<5, XLenVT>]>; -def riscv_vwmacc_vl : SDNode<"RISCVISD::VWMACC_VL", SDT_RISCVVWIntTernOp_VL, [SDNPCommutative]>; -def riscv_vwmaccu_vl : SDNode<"RISCVISD::VWMACCU_VL", SDT_RISCVVWIntTernOp_VL, [SDNPCommutative]>; -def riscv_vwmaccsu_vl : SDNode<"RISCVISD::VWMACCSU_VL", SDT_RISCVVWIntTernOp_VL, []>; + +let HasMaskOp = true in { + // Widening ternary operations with a mask as the fourth operand and VL as the + // fifth operand. + def riscv_vwmacc_vl : RVSDNode<"VWMACC_VL", SDT_RISCVVWIntTernOp_VL, [SDNPCommutative]>; + def riscv_vwmaccu_vl : RVSDNode<"VWMACCU_VL", SDT_RISCVVWIntTernOp_VL, [SDNPCommutative]>; + def riscv_vwmaccsu_vl : RVSDNode<"VWMACCSU_VL", SDT_RISCVVWIntTernOp_VL, []>; +} // let HasMaskOp = true def SDT_RISCVVWFPBinOp_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisFP<0>, SDTCisFP<1>, @@ -455,9 +599,12 @@ def SDT_RISCVVWFPBinOp_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisFP<0>, SDTCisSameNumEltsAs<1, 4>, SDTCVecEltisVT<4, i1>, SDTCisVT<5, XLenVT>]>; -def riscv_vfwmul_vl : SDNode<"RISCVISD::VFWMUL_VL", SDT_RISCVVWFPBinOp_VL, [SDNPCommutative]>; -def riscv_vfwadd_vl : SDNode<"RISCVISD::VFWADD_VL", SDT_RISCVVWFPBinOp_VL, [SDNPCommutative]>; -def riscv_vfwsub_vl : SDNode<"RISCVISD::VFWSUB_VL", SDT_RISCVVWFPBinOp_VL, []>; + +let HasPassthruOp = true, HasMaskOp = true in { + def riscv_vfwmul_vl : RVSDNode<"VFWMUL_VL", SDT_RISCVVWFPBinOp_VL, [SDNPCommutative]>; + def riscv_vfwadd_vl : RVSDNode<"VFWADD_VL", SDT_RISCVVWFPBinOp_VL, [SDNPCommutative]>; + def riscv_vfwsub_vl : RVSDNode<"VFWSUB_VL", SDT_RISCVVWFPBinOp_VL, []>; +} // let HasPassthruOp = true, HasMaskOp = true def SDT_RISCVVWIntBinOpW_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisInt<0>, SDTCisSameAs<0, 1>, @@ -468,10 +615,13 @@ def SDT_RISCVVWIntBinOpW_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisInt<0>, SDTCisSameNumEltsAs<1, 4>, SDTCVecEltisVT<4, i1>, SDTCisVT<5, XLenVT>]>; -def riscv_vwadd_w_vl : SDNode<"RISCVISD::VWADD_W_VL", SDT_RISCVVWIntBinOpW_VL>; -def riscv_vwaddu_w_vl : SDNode<"RISCVISD::VWADDU_W_VL", SDT_RISCVVWIntBinOpW_VL>; -def riscv_vwsub_w_vl : SDNode<"RISCVISD::VWSUB_W_VL", SDT_RISCVVWIntBinOpW_VL>; -def riscv_vwsubu_w_vl : SDNode<"RISCVISD::VWSUBU_W_VL", SDT_RISCVVWIntBinOpW_VL>; + +let HasPassthruOp = true, HasMaskOp = true in { + def riscv_vwadd_w_vl : RVSDNode<"VWADD_W_VL", SDT_RISCVVWIntBinOpW_VL>; + def riscv_vwaddu_w_vl : RVSDNode<"VWADDU_W_VL", SDT_RISCVVWIntBinOpW_VL>; + def riscv_vwsub_w_vl : RVSDNode<"VWSUB_W_VL", SDT_RISCVVWIntBinOpW_VL>; + def riscv_vwsubu_w_vl : RVSDNode<"VWSUBU_W_VL", SDT_RISCVVWIntBinOpW_VL>; +} // let HasPassthruOp = true, HasMaskOp = true def SDT_RISCVVWFPBinOpW_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisFP<0>, SDTCisSameAs<0, 1>, @@ -483,8 +633,10 @@ def SDT_RISCVVWFPBinOpW_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisFP<0>, SDTCVecEltisVT<4, i1>, SDTCisVT<5, XLenVT>]>; -def riscv_vfwadd_w_vl : SDNode<"RISCVISD::VFWADD_W_VL", SDT_RISCVVWFPBinOpW_VL>; -def riscv_vfwsub_w_vl : SDNode<"RISCVISD::VFWSUB_W_VL", SDT_RISCVVWFPBinOpW_VL>; +let HasPassthruOp = true, HasMaskOp = true in { + def riscv_vfwadd_w_vl : RVSDNode<"VFWADD_W_VL", SDT_RISCVVWFPBinOpW_VL>; + def riscv_vfwsub_w_vl : RVSDNode<"VFWSUB_W_VL", SDT_RISCVVWFPBinOpW_VL>; +} // let HasPassthruOp = true, HasMaskOp = true def SDTRVVVecReduce : SDTypeProfile<1, 6, [ SDTCisVec<0>, SDTCisVec<1>, SDTCisVec<2>, SDTCisSameAs<0, 3>, @@ -558,9 +710,22 @@ def riscv_fpextend_vl_sameuser : PatFrag<(ops node:$A, node:$B, node:$C), return !N->use_empty() && all_equal(N->users()); }]>; +// These nodes match the semantics of the corresponding RVV vector reduction +// instructions. They produce a vector result which is the reduction +// performed over the second vector operand plus the first element of the +// third vector operand. The first operand is the pass-thru operand. The +// second operand is an unconstrained vector type, and the result, first, and +// third operand's types are expected to be the corresponding full-width +// LMUL=1 type for the second operand: +// nxv8i8 = vecreduce_add nxv8i8, nxv32i8, nxv8i8 +// nxv2i32 = vecreduce_add nxv2i32, nxv8i32, nxv2i32 +// The different in types does introduce extra vsetvli instructions but +// similarly it reduces the number of registers consumed per reduction. +// Also has a mask and VL operand. +let HasMaskOp = true in foreach kind = ["ADD", "UMAX", "SMAX", "UMIN", "SMIN", "AND", "OR", "XOR", "FADD", "SEQ_FADD", "FMIN", "FMAX"] in - def rvv_vecreduce_#kind#_vl : SDNode<"RISCVISD::VECREDUCE_"#kind#"_VL", SDTRVVVecReduce>; + def rvv_vecreduce_#kind#_vl : RVSDNode<"VECREDUCE_"#kind#"_VL", SDTRVVVecReduce>; // Give explicit Complexity to prefer simm5/uimm5. def SplatPat : ComplexPattern; @@ -2835,9 +3000,13 @@ foreach vti = !listconcat(AllFloatVectors, AllBFloatVectors) in { // Miscellaneous RISCVISD SDNodes //===----------------------------------------------------------------------===// -def riscv_vid_vl : SDNode<"RISCVISD::VID_VL", SDTypeProfile<1, 2, - [SDTCisVec<0>, SDTCVecEltisVT<1, i1>, - SDTCisSameNumEltsAs<0, 1>, SDTCisVT<2, XLenVT>]>, []>; +// Matches the semantics of the vid.v instruction, with a mask and VL +// operand. +let HasMaskOp = true in +def riscv_vid_vl : RVSDNode<"VID_VL", SDTypeProfile<1, 2, [SDTCisVec<0>, + SDTCVecEltisVT<1, i1>, + SDTCisSameNumEltsAs<0, 1>, + SDTCisVT<2, XLenVT>]>>; def SDTRVVSlide : SDTypeProfile<1, 6, [ SDTCisVec<0>, SDTCisSameAs<1, 0>, SDTCisSameAs<2, 0>, SDTCisVT<3, XLenVT>, @@ -2855,12 +3024,27 @@ def SDTRVVFSlide1 : SDTypeProfile<1, 5, [ SDTCisVT<5, XLenVT> ]>; -def riscv_slideup_vl : SDNode<"RISCVISD::VSLIDEUP_VL", SDTRVVSlide, []>; -def riscv_slide1up_vl : SDNode<"RISCVISD::VSLIDE1UP_VL", SDTRVVSlide1, []>; -def riscv_slidedown_vl : SDNode<"RISCVISD::VSLIDEDOWN_VL", SDTRVVSlide, []>; -def riscv_slide1down_vl : SDNode<"RISCVISD::VSLIDE1DOWN_VL", SDTRVVSlide1, []>; -def riscv_fslide1up_vl : SDNode<"RISCVISD::VFSLIDE1UP_VL", SDTRVVFSlide1, []>; -def riscv_fslide1down_vl : SDNode<"RISCVISD::VFSLIDE1DOWN_VL", SDTRVVFSlide1, []>; +let HasMaskOp = true in { + // Matches the semantics of vslideup/vslidedown. The first operand is the + // pass-thru operand, the second is the source vector, the third is the XLenVT + // index (either constant or non-constant), the fourth is the mask, the fifth + // is the VL and the sixth is the policy. + def riscv_slideup_vl : RVSDNode<"VSLIDEUP_VL", SDTRVVSlide, []>; + def riscv_slidedown_vl : RVSDNode<"VSLIDEDOWN_VL", SDTRVVSlide, []>; + + // Matches the semantics of vslide1up/slide1down. The first operand is + // passthru operand, the second is source vector, third is the XLenVT scalar + // value. The fourth and fifth operands are the mask and VL operands. + def riscv_slide1up_vl : RVSDNode<"VSLIDE1UP_VL", SDTRVVSlide1, []>; + def riscv_slide1down_vl : RVSDNode<"VSLIDE1DOWN_VL", SDTRVVSlide1, []>; + + // Matches the semantics of vfslide1up/vfslide1down. The first operand is + // passthru operand, the second is source vector, third is a scalar value + // whose type matches the element type of the vectors. The fourth and fifth + // operands are the mask and VL operands. + def riscv_fslide1up_vl : RVSDNode<"VFSLIDE1UP_VL", SDTRVVFSlide1, []>; + def riscv_fslide1down_vl : RVSDNode<"VFSLIDE1DOWN_VL", SDTRVVFSlide1, []>; +} // let HasMaskOp = true foreach vti = AllIntegerVectors in { let Predicates = GetVTypePredicates.Predicates in { diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXRivos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXRivos.td index eb594c876bd12..3912eb0d16c59 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoXRivos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXRivos.td @@ -68,12 +68,14 @@ defm RI_VUNZIP2B_V : VALU_IV_V<"ri.vunzip2b", 0b011000>; } // These are modeled after the int binop VL nodes -def ri_vzipeven_vl : SDNode<"RISCVISD::RI_VZIPEVEN_VL", SDT_RISCVIntBinOp_VL>; -def ri_vzipodd_vl : SDNode<"RISCVISD::RI_VZIPODD_VL", SDT_RISCVIntBinOp_VL>; -def ri_vzip2a_vl : SDNode<"RISCVISD::RI_VZIP2A_VL", SDT_RISCVIntBinOp_VL>; -def ri_vzip2b_vl : SDNode<"RISCVISD::RI_VZIP2B_VL", SDT_RISCVIntBinOp_VL>; -def ri_vunzip2a_vl : SDNode<"RISCVISD::RI_VUNZIP2A_VL", SDT_RISCVIntBinOp_VL>; -def ri_vunzip2b_vl : SDNode<"RISCVISD::RI_VUNZIP2B_VL", SDT_RISCVIntBinOp_VL>; +let HasMaskOp = true in { + def ri_vzipeven_vl : RVSDNode<"RI_VZIPEVEN_VL", SDT_RISCVIntBinOp_VL>; + def ri_vzipodd_vl : RVSDNode<"RI_VZIPODD_VL", SDT_RISCVIntBinOp_VL>; + def ri_vzip2a_vl : RVSDNode<"RI_VZIP2A_VL", SDT_RISCVIntBinOp_VL>; + def ri_vzip2b_vl : RVSDNode<"RI_VZIP2B_VL", SDT_RISCVIntBinOp_VL>; + def ri_vunzip2a_vl : RVSDNode<"RI_VUNZIP2A_VL", SDT_RISCVIntBinOp_VL>; + def ri_vunzip2b_vl : RVSDNode<"RI_VUNZIP2B_VL", SDT_RISCVIntBinOp_VL>; +} multiclass RIVPseudoVALU_VV { foreach m = MxList in @@ -129,18 +131,21 @@ def RI_VEXTRACT : CustomRivosXVI<0b010111, OPMVV, (outs GPR:$rd), "ri.vextract.x.v", "$rd, $vs2, $imm">; } - -def ri_vextract : SDNode<"RISCVISD::RI_VEXTRACT", - SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisVec<1>, - SDTCisInt<2>, - SDTCisInt<1>]>>; - -def ri_vinsert_vl : SDNode<"RISCVISD::RI_VINSERT_VL", - SDTypeProfile<1, 5, [SDTCisSameAs<0, 1>, - SDTCisInt<0>, - SDTCisVT<2, XLenVT>, - SDTCisVT<3, XLenVT>, - SDTCisVT<4, XLenVT>]>>; +// RI_VEXTRACT matches the semantics of ri.vextract.x.v. The result is always +// XLenVT sign extended from the vector element size. VEXTRACT does *not* +// have a VL operand. +def ri_vextract : RVSDNode<"RI_VEXTRACT", + SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisVec<1>, + SDTCisInt<2>, + SDTCisInt<1>]>>; + +// RI_VINSERT_VL matches the semantics of ri.vinsert.v.x. It carries a VL operand. +def ri_vinsert_vl : RVSDNode<"RI_VINSERT_VL", + SDTypeProfile<1, 5, [SDTCisSameAs<0, 1>, + SDTCisInt<0>, + SDTCisVT<2, XLenVT>, + SDTCisVT<3, XLenVT>, + SDTCisVT<4, XLenVT>]>>; let Predicates = [HasVendorXRivosVisni], mayLoad = 0, mayStore = 0, hasSideEffects = 0, HasSEWOp = 1 in diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td index fbf4c1de35234..6981daa75d092 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td @@ -495,32 +495,32 @@ def SDT_SF_VC_V_XVW : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisVec<3>, SDTCisSameAs<1, 5>]>; -def sf_vc_v_x_se : SDNode<"RISCVISD::SF_VC_V_X_SE", SDT_SF_VC_V_X, [SDNPHasChain]>; -def sf_vc_v_i_se : SDNode<"RISCVISD::SF_VC_V_I_SE", SDT_SF_VC_V_X, [SDNPHasChain]>; -def sf_vc_vv_se : SDNode<"RISCVISD::SF_VC_VV_SE", SDT_SF_VC_XV, [SDNPHasChain]>; -def sf_vc_xv_se : SDNode<"RISCVISD::SF_VC_XV_SE", SDT_SF_VC_XV, [SDNPHasChain]>; -def sf_vc_iv_se : SDNode<"RISCVISD::SF_VC_IV_SE", SDT_SF_VC_XV, [SDNPHasChain]>; -def sf_vc_fv_se : SDNode<"RISCVISD::SF_VC_FV_SE", SDT_SF_VC_XV, [SDNPHasChain]>; -def sf_vc_v_vv_se : SDNode<"RISCVISD::SF_VC_V_VV_SE", SDT_SF_VC_V_XV, [SDNPHasChain]>; -def sf_vc_v_xv_se : SDNode<"RISCVISD::SF_VC_V_XV_SE", SDT_SF_VC_V_XV, [SDNPHasChain]>; -def sf_vc_v_iv_se : SDNode<"RISCVISD::SF_VC_V_IV_SE", SDT_SF_VC_V_XV, [SDNPHasChain]>; -def sf_vc_v_fv_se : SDNode<"RISCVISD::SF_VC_V_FV_SE", SDT_SF_VC_V_XV, [SDNPHasChain]>; -def sf_vc_vvv_se : SDNode<"RISCVISD::SF_VC_VVV_SE", SDT_SF_VC_XVV, [SDNPHasChain]>; -def sf_vc_xvv_se : SDNode<"RISCVISD::SF_VC_XVV_SE", SDT_SF_VC_XVV, [SDNPHasChain]>; -def sf_vc_ivv_se : SDNode<"RISCVISD::SF_VC_IVV_SE", SDT_SF_VC_XVV, [SDNPHasChain]>; -def sf_vc_fvv_se : SDNode<"RISCVISD::SF_VC_FVV_SE", SDT_SF_VC_XVV, [SDNPHasChain]>; -def sf_vc_v_vvv_se : SDNode<"RISCVISD::SF_VC_V_VVV_SE", SDT_SF_VC_V_XVV, [SDNPHasChain]>; -def sf_vc_v_xvv_se : SDNode<"RISCVISD::SF_VC_V_XVV_SE", SDT_SF_VC_V_XVV, [SDNPHasChain]>; -def sf_vc_v_ivv_se : SDNode<"RISCVISD::SF_VC_V_IVV_SE", SDT_SF_VC_V_XVV, [SDNPHasChain]>; -def sf_vc_v_fvv_se : SDNode<"RISCVISD::SF_VC_V_FVV_SE", SDT_SF_VC_V_XVV, [SDNPHasChain]>; -def sf_vc_vvw_se : SDNode<"RISCVISD::SF_VC_VVW_SE", SDT_SF_VC_XVW, [SDNPHasChain]>; -def sf_vc_xvw_se : SDNode<"RISCVISD::SF_VC_XVW_SE", SDT_SF_VC_XVW, [SDNPHasChain]>; -def sf_vc_ivw_se : SDNode<"RISCVISD::SF_VC_IVW_SE", SDT_SF_VC_XVW, [SDNPHasChain]>; -def sf_vc_fvw_se : SDNode<"RISCVISD::SF_VC_FVW_SE", SDT_SF_VC_XVW, [SDNPHasChain]>; -def sf_vc_v_vvw_se : SDNode<"RISCVISD::SF_VC_V_VVW_SE", SDT_SF_VC_V_XVW, [SDNPHasChain]>; -def sf_vc_v_xvw_se : SDNode<"RISCVISD::SF_VC_V_XVW_SE", SDT_SF_VC_V_XVW, [SDNPHasChain]>; -def sf_vc_v_ivw_se : SDNode<"RISCVISD::SF_VC_V_IVW_SE", SDT_SF_VC_V_XVW, [SDNPHasChain]>; -def sf_vc_v_fvw_se : SDNode<"RISCVISD::SF_VC_V_FVW_SE", SDT_SF_VC_V_XVW, [SDNPHasChain]>; +def sf_vc_v_x_se : RVSDNode<"SF_VC_V_X_SE", SDT_SF_VC_V_X, [SDNPHasChain]>; +def sf_vc_v_i_se : RVSDNode<"SF_VC_V_I_SE", SDT_SF_VC_V_X, [SDNPHasChain]>; +def sf_vc_vv_se : RVSDNode<"SF_VC_VV_SE", SDT_SF_VC_XV, [SDNPHasChain]>; +def sf_vc_xv_se : RVSDNode<"SF_VC_XV_SE", SDT_SF_VC_XV, [SDNPHasChain]>; +def sf_vc_iv_se : RVSDNode<"SF_VC_IV_SE", SDT_SF_VC_XV, [SDNPHasChain]>; +def sf_vc_fv_se : RVSDNode<"SF_VC_FV_SE", SDT_SF_VC_XV, [SDNPHasChain]>; +def sf_vc_v_vv_se : RVSDNode<"SF_VC_V_VV_SE", SDT_SF_VC_V_XV, [SDNPHasChain]>; +def sf_vc_v_xv_se : RVSDNode<"SF_VC_V_XV_SE", SDT_SF_VC_V_XV, [SDNPHasChain]>; +def sf_vc_v_iv_se : RVSDNode<"SF_VC_V_IV_SE", SDT_SF_VC_V_XV, [SDNPHasChain]>; +def sf_vc_v_fv_se : RVSDNode<"SF_VC_V_FV_SE", SDT_SF_VC_V_XV, [SDNPHasChain]>; +def sf_vc_vvv_se : RVSDNode<"SF_VC_VVV_SE", SDT_SF_VC_XVV, [SDNPHasChain]>; +def sf_vc_xvv_se : RVSDNode<"SF_VC_XVV_SE", SDT_SF_VC_XVV, [SDNPHasChain]>; +def sf_vc_ivv_se : RVSDNode<"SF_VC_IVV_SE", SDT_SF_VC_XVV, [SDNPHasChain]>; +def sf_vc_fvv_se : RVSDNode<"SF_VC_FVV_SE", SDT_SF_VC_XVV, [SDNPHasChain]>; +def sf_vc_v_vvv_se : RVSDNode<"SF_VC_V_VVV_SE", SDT_SF_VC_V_XVV, [SDNPHasChain]>; +def sf_vc_v_xvv_se : RVSDNode<"SF_VC_V_XVV_SE", SDT_SF_VC_V_XVV, [SDNPHasChain]>; +def sf_vc_v_ivv_se : RVSDNode<"SF_VC_V_IVV_SE", SDT_SF_VC_V_XVV, [SDNPHasChain]>; +def sf_vc_v_fvv_se : RVSDNode<"SF_VC_V_FVV_SE", SDT_SF_VC_V_XVV, [SDNPHasChain]>; +def sf_vc_vvw_se : RVSDNode<"SF_VC_VVW_SE", SDT_SF_VC_XVW, [SDNPHasChain]>; +def sf_vc_xvw_se : RVSDNode<"SF_VC_XVW_SE", SDT_SF_VC_XVW, [SDNPHasChain]>; +def sf_vc_ivw_se : RVSDNode<"SF_VC_IVW_SE", SDT_SF_VC_XVW, [SDNPHasChain]>; +def sf_vc_fvw_se : RVSDNode<"SF_VC_FVW_SE", SDT_SF_VC_XVW, [SDNPHasChain]>; +def sf_vc_v_vvw_se : RVSDNode<"SF_VC_V_VVW_SE", SDT_SF_VC_V_XVW, [SDNPHasChain]>; +def sf_vc_v_xvw_se : RVSDNode<"SF_VC_V_XVW_SE", SDT_SF_VC_V_XVW, [SDNPHasChain]>; +def sf_vc_v_ivw_se : RVSDNode<"SF_VC_V_IVW_SE", SDT_SF_VC_V_XVW, [SDNPHasChain]>; +def sf_vc_v_fvw_se : RVSDNode<"SF_VC_V_FVW_SE", SDT_SF_VC_V_XVW, [SDNPHasChain]>; class VPatVC_OP4_ISD, SDTCisPtrTy<2>, SDTCisVT<3, XLenVT>]>; -def th_lwud : SDNode<"RISCVISD::TH_LWUD", SDT_LoadPair, - [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>; -def th_lwd : SDNode<"RISCVISD::TH_LWD", SDT_LoadPair, - [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>; -def th_ldd : SDNode<"RISCVISD::TH_LDD", SDT_LoadPair, - [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>; -def th_swd : SDNode<"RISCVISD::TH_SWD", SDT_StorePair, - [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>; -def th_sdd : SDNode<"RISCVISD::TH_SDD", SDT_StorePair, - [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>; +def th_lwud : RVSDNode<"TH_LWUD", SDT_LoadPair, + [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>; +def th_lwd : RVSDNode<"TH_LWD", SDT_LoadPair, + [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>; +def th_ldd : RVSDNode<"TH_LDD", SDT_LoadPair, + [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>; +def th_swd : RVSDNode<"TH_SWD", SDT_StorePair, + [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>; +def th_sdd : RVSDNode<"TH_SDD", SDT_StorePair, + [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>; def ImmThreeAsmOperand : AsmOperandClass { let Name = "ImmThree"; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td index 2ce909c5d0e21..9227c1b1fc18c 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td @@ -31,19 +31,32 @@ def SDTIntShiftAddOp : SDTypeProfile<1, 3, [ // shl_add SDTCisInt<3> ]>; -def riscv_shl_add : SDNode<"RISCVISD::SHL_ADD", SDTIntShiftAddOp>; -def riscv_clzw : SDNode<"RISCVISD::CLZW", SDT_RISCVIntUnaryOpW>; -def riscv_ctzw : SDNode<"RISCVISD::CTZW", SDT_RISCVIntUnaryOpW>; -def riscv_rolw : SDNode<"RISCVISD::ROLW", SDT_RISCVIntBinOpW>; -def riscv_rorw : SDNode<"RISCVISD::RORW", SDT_RISCVIntBinOpW>; -def riscv_brev8 : SDNode<"RISCVISD::BREV8", SDTIntUnaryOp>; -def riscv_orc_b : SDNode<"RISCVISD::ORC_B", SDTIntUnaryOp>; -def riscv_zip : SDNode<"RISCVISD::ZIP", SDTIntUnaryOp>; -def riscv_unzip : SDNode<"RISCVISD::UNZIP", SDTIntUnaryOp>; -def riscv_absw : SDNode<"RISCVISD::ABSW", SDTIntUnaryOp>; -def riscv_clmul : SDNode<"RISCVISD::CLMUL", SDTIntBinOp>; -def riscv_clmulh : SDNode<"RISCVISD::CLMULH", SDTIntBinOp>; -def riscv_clmulr : SDNode<"RISCVISD::CLMULR", SDTIntBinOp>; +def riscv_shl_add : RVSDNode<"SHL_ADD", SDTIntShiftAddOp>; + +// RV64IB rotates, directly matching the semantics of the named RISC-V +// instructions. +def riscv_rolw : RVSDNode<"ROLW", SDT_RISCVIntBinOpW>; +def riscv_rorw : RVSDNode<"RORW", SDT_RISCVIntBinOpW>; + +// RV64IZbb bit counting instructions directly matching the semantics of the +// named RISC-V instructions. +def riscv_clzw : RVSDNode<"CLZW", SDT_RISCVIntUnaryOpW>; +def riscv_ctzw : RVSDNode<"CTZW", SDT_RISCVIntUnaryOpW>; + +// brev8, orc.b, zip, and unzip from Zbb and Zbkb. All operands are i32 or +// XLenVT. +def riscv_brev8 : RVSDNode<"BREV8", SDTIntUnaryOp>; +def riscv_orc_b : RVSDNode<"ORC_B", SDTIntUnaryOp>; +def riscv_zip : RVSDNode<"ZIP", SDTIntUnaryOp>; +def riscv_unzip : RVSDNode<"UNZIP", SDTIntUnaryOp>; + +// RV64IZbb absolute value for i32. Expanded to (max (negw X), X) during isel. +def riscv_absw : RVSDNode<"ABSW", SDTIntUnaryOp>; + +// Scalar cryptography +def riscv_clmul : RVSDNode<"CLMUL", SDTIntBinOp>; +def riscv_clmulh : RVSDNode<"CLMULH", SDTIntBinOp>; +def riscv_clmulr : RVSDNode<"CLMULR", SDTIntBinOp>; def BCLRXForm : SDNodeXForm, SDTCisVT<1, XLenVT>]>; -def riscv_fli : SDNode<"RISCVISD::FLI", SDT_RISCVFLI>; +// Zfa fli instruction for constant materialization. +def riscv_fli : RVSDNode<"FLI", SDT_RISCVFLI>; //===----------------------------------------------------------------------===// // Operand and SDNode transformation definitions. diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td index 48b7ca516a820..7c7e106e868c1 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td @@ -20,12 +20,21 @@ def SDT_RISCVFMV_H_X def SDT_RISCVFMV_X_EXTH : SDTypeProfile<1, 1, [SDTCisVT<0, XLenVT>, SDTCisFP<1>]>; +// FPR<->GPR transfer operations when the FPR is smaller than XLEN, needed as +// XLEN is the only legal integer width. +// +// FMV_H_X matches the semantics of the FMV.H.X. +// FMV_X_ANYEXTH is similar to FMV.X.H but has an any-extended result. +// FMV_X_SIGNEXTH is similar to FMV.X.H and has a sign-extended result. +// +// This is a more convenient semantic for producing dagcombines that remove +// unnecessary GPR->FPR->GPR moves. def riscv_fmv_h_x - : SDNode<"RISCVISD::FMV_H_X", SDT_RISCVFMV_H_X>; + : RVSDNode<"FMV_H_X", SDT_RISCVFMV_H_X>; def riscv_fmv_x_anyexth - : SDNode<"RISCVISD::FMV_X_ANYEXTH", SDT_RISCVFMV_X_EXTH>; + : RVSDNode<"FMV_X_ANYEXTH", SDT_RISCVFMV_X_EXTH>; def riscv_fmv_x_signexth - : SDNode<"RISCVISD::FMV_X_SIGNEXTH", SDT_RISCVFMV_X_EXTH>; + : RVSDNode<"FMV_X_SIGNEXTH", SDT_RISCVFMV_X_EXTH>; //===----------------------------------------------------------------------===// // Operand and SDNode transformation definitions. diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZicond.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZicond.td index 35d3fdae0bd79..fcd3987bc10ab 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoZicond.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZicond.td @@ -15,8 +15,10 @@ // RISC-V specific DAG Nodes. //===----------------------------------------------------------------------===// -def riscv_czero_eqz : SDNode<"RISCVISD::CZERO_EQZ", SDTIntBinOp>; -def riscv_czero_nez : SDNode<"RISCVISD::CZERO_NEZ", SDTIntBinOp>; +// Branchless select operations, matching the semantics of the instructions +// defined in Zicond or XVentanaCondOps +def riscv_czero_eqz : RVSDNode<"CZERO_EQZ", SDTIntBinOp>; // vt.maskc for XVentanaCondOps. +def riscv_czero_nez : RVSDNode<"CZERO_NEZ", SDTIntBinOp>; // vt.maskcn for XVentanaCondOps. //===----------------------------------------------------------------------===// // Instructions diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZimop.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZimop.td index 6b26550a29026..960f5669b488c 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoZimop.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZimop.td @@ -32,13 +32,14 @@ class RVInstRMoprr imm4, bits<3> imm3, bits<3> funct3, RISCVOpcode opcod let Inst{25} = imm4{0}; } -def riscv_mopr : SDNode<"RISCVISD::MOPR", - SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisSameAs<0, 1>, - SDTCisSameAs<0, 2>]>>; -def riscv_moprr : SDNode<"RISCVISD::MOPRR", - SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisSameAs<0, 1>, - SDTCisSameAs<0, 2>, - SDTCisSameAs<0, 3>]>>; +// May-Be-Operations +def riscv_mopr : RVSDNode<"MOPR", + SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisSameAs<0, 1>, + SDTCisSameAs<0, 2>]>>; +def riscv_moprr : RVSDNode<"MOPRR", + SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisSameAs<0, 1>, + SDTCisSameAs<0, 2>, + SDTCisSameAs<0, 3>]>>; let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in class RVMopr imm7, bits<5> imm5, bits<3> funct3, diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZk.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZk.td index 34f8b006b9c04..522b05761b41c 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoZk.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZk.td @@ -15,20 +15,23 @@ // Operand and SDNode transformation definitions. //===----------------------------------------------------------------------===// -def riscv_sha256sig0 : SDNode<"RISCVISD::SHA256SIG0", SDTIntUnaryOp>; -def riscv_sha256sig1 : SDNode<"RISCVISD::SHA256SIG1", SDTIntUnaryOp>; -def riscv_sha256sum0 : SDNode<"RISCVISD::SHA256SUM0", SDTIntUnaryOp>; -def riscv_sha256sum1 : SDNode<"RISCVISD::SHA256SUM1", SDTIntUnaryOp>; +// Scalar cryptography +def riscv_sha256sig0 : RVSDNode<"SHA256SIG0", SDTIntUnaryOp>; +def riscv_sha256sig1 : RVSDNode<"SHA256SIG1", SDTIntUnaryOp>; +def riscv_sha256sum0 : RVSDNode<"SHA256SUM0", SDTIntUnaryOp>; +def riscv_sha256sum1 : RVSDNode<"SHA256SUM1", SDTIntUnaryOp>; def SDT_RISCVZkByteSelect : SDTypeProfile<1, 3, [SDTCisVT<0, XLenVT>, SDTCisVT<1, XLenVT>, SDTCisVT<2, XLenVT>, SDTCisVT<3, i32>]>; -def riscv_sm4ks : SDNode<"RISCVISD::SM4KS", SDT_RISCVZkByteSelect>; -def riscv_sm4ed : SDNode<"RISCVISD::SM4ED", SDT_RISCVZkByteSelect>; +// Scalar cryptography +def riscv_sm4ks : RVSDNode<"SM4KS", SDT_RISCVZkByteSelect>; +def riscv_sm4ed : RVSDNode<"SM4ED", SDT_RISCVZkByteSelect>; -def riscv_sm3p0 : SDNode<"RISCVISD::SM3P0", SDTIntUnaryOp>; -def riscv_sm3p1 : SDNode<"RISCVISD::SM3P1", SDTIntUnaryOp>; +// Scalar cryptography +def riscv_sm3p0 : RVSDNode<"SM3P0", SDTIntUnaryOp>; +def riscv_sm3p1 : RVSDNode<"SM3P1", SDTIntUnaryOp>; def RnumArg : AsmOperandClass { let Name = "RnumArg"; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZvqdotq.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZvqdotq.td index 6018958f6eb27..ea3c53cb0a5dd 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoZvqdotq.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZvqdotq.td @@ -28,9 +28,11 @@ let Predicates = [HasStdExtZvqdotq] in { } // Predicates = [HasStdExtZvqdotq] -def riscv_vqdot_vl : SDNode<"RISCVISD::VQDOT_VL", SDT_RISCVIntBinOp_VL>; -def riscv_vqdotu_vl : SDNode<"RISCVISD::VQDOTU_VL", SDT_RISCVIntBinOp_VL>; -def riscv_vqdotsu_vl : SDNode<"RISCVISD::VQDOTSU_VL", SDT_RISCVIntBinOp_VL>; +let HasPassthruOp = true, HasMaskOp = true in { + def riscv_vqdot_vl : RVSDNode<"VQDOT_VL", SDT_RISCVIntBinOp_VL>; + def riscv_vqdotu_vl : RVSDNode<"VQDOTU_VL", SDT_RISCVIntBinOp_VL>; + def riscv_vqdotsu_vl : RVSDNode<"VQDOTSU_VL", SDT_RISCVIntBinOp_VL>; +} // let HasPassthruOp = true, HasMaskOp = true multiclass VPseudoVQDOT_VV_VX { foreach m = MxSet<32>.m in { diff --git a/llvm/lib/Target/RISCV/RISCVSelectionDAGInfo.cpp b/llvm/lib/Target/RISCV/RISCVSelectionDAGInfo.cpp index ab1ade89a76d1..4ebf13f5bc538 100644 --- a/llvm/lib/Target/RISCV/RISCVSelectionDAGInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVSelectionDAGInfo.cpp @@ -7,18 +7,13 @@ //===----------------------------------------------------------------------===// #include "RISCVSelectionDAGInfo.h" -#include "RISCVISelLowering.h" -using namespace llvm; +#define GET_SDNODE_DESC +#include "RISCVGenSDNodeInfo.inc" -RISCVSelectionDAGInfo::~RISCVSelectionDAGInfo() = default; +using namespace llvm; -bool RISCVSelectionDAGInfo::isTargetMemoryOpcode(unsigned Opcode) const { - return Opcode >= RISCVISD::FIRST_MEMORY_OPCODE && - Opcode <= RISCVISD::LAST_MEMORY_OPCODE; -} +RISCVSelectionDAGInfo::RISCVSelectionDAGInfo() + : SelectionDAGGenTargetInfo(RISCVGenSDNodeInfo) {} -bool RISCVSelectionDAGInfo::isTargetStrictFPOpcode(unsigned Opcode) const { - return Opcode >= RISCVISD::FIRST_STRICTFP_OPCODE && - Opcode <= RISCVISD::LAST_STRICTFP_OPCODE; -} +RISCVSelectionDAGInfo::~RISCVSelectionDAGInfo() = default; diff --git a/llvm/lib/Target/RISCV/RISCVSelectionDAGInfo.h b/llvm/lib/Target/RISCV/RISCVSelectionDAGInfo.h index 6977d8507a960..4757571fb3de4 100644 --- a/llvm/lib/Target/RISCV/RISCVSelectionDAGInfo.h +++ b/llvm/lib/Target/RISCV/RISCVSelectionDAGInfo.h @@ -9,17 +9,48 @@ #ifndef LLVM_LIB_TARGET_RISCV_RISCVSELECTIONDAGINFO_H #define LLVM_LIB_TARGET_RISCV_RISCVSELECTIONDAGINFO_H +#include "llvm/CodeGen/SDNodeInfo.h" #include "llvm/CodeGen/SelectionDAGTargetInfo.h" +#define GET_SDNODE_ENUM +#include "RISCVGenSDNodeInfo.inc" + namespace llvm { -class RISCVSelectionDAGInfo : public SelectionDAGTargetInfo { +namespace RISCVISD { +// RISCVISD Node TSFlags +enum : llvm::SDNodeTSFlags { + HasPassthruOpMask = 1 << 0, + HasMaskOpMask = 1 << 1, +}; +} // namespace RISCVISD + +class RISCVSelectionDAGInfo : public SelectionDAGGenTargetInfo { public: - ~RISCVSelectionDAGInfo() override; + RISCVSelectionDAGInfo(); - bool isTargetMemoryOpcode(unsigned Opcode) const override; + ~RISCVSelectionDAGInfo() override; - bool isTargetStrictFPOpcode(unsigned Opcode) const override; + bool hasPassthruOp(unsigned Opcode) const { + return GenNodeInfo.getDesc(Opcode).TSFlags & RISCVISD::HasPassthruOpMask; + } + + bool hasMaskOp(unsigned Opcode) const { + return GenNodeInfo.getDesc(Opcode).TSFlags & RISCVISD::HasMaskOpMask; + } + + unsigned getMAccOpcode(unsigned MulOpcode) const { + switch (static_cast(MulOpcode)) { + default: + llvm_unreachable("Unexpected opcode"); + case RISCVISD::VWMUL_VL: + return RISCVISD::VWMACC_VL; + case RISCVISD::VWMULU_VL: + return RISCVISD::VWMACCU_VL; + case RISCVISD::VWMULSU_VL: + return RISCVISD::VWMACCSU_VL; + } + } }; } // namespace llvm