diff --git a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp index 6f6084b99dda2..3e9c0416d4b65 100644 --- a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp +++ b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp @@ -465,10 +465,14 @@ bool NVPTXDAGToDAGISel::tryUNPACK_VECTOR(SDNode *N) { bool NVPTXDAGToDAGISel::tryEXTRACT_VECTOR_ELEMENT(SDNode *N) { SDValue Vector = N->getOperand(0); - // We only care about 16x2 as it's the only real vector type we - // need to deal with. + // We only care about packed vector types: 16x2 and 32x2. MVT VT = Vector.getSimpleValueType(); - if (!Isv2x16VT(VT)) + unsigned NewOpcode = 0; + if (Isv2x16VT(VT)) + NewOpcode = NVPTX::I32toV2I16; + else if (VT == MVT::v2f32) + NewOpcode = NVPTX::I64toV2F32; + else return false; // Find and record all uses of this vector that extract element 0 or 1. SmallVector E0, E1; @@ -488,16 +492,19 @@ bool NVPTXDAGToDAGISel::tryEXTRACT_VECTOR_ELEMENT(SDNode *N) { } } - // There's no point scattering f16x2 if we only ever access one + // There's no point scattering f16x2 or f32x2 if we only ever access one // element of it. if (E0.empty() || E1.empty()) return false; - // Merge (f16 extractelt(V, 0), f16 extractelt(V,1)) - // into f16,f16 SplitF16x2(V) + // Merge: + // (f16 extractelt(V, 0), f16 extractelt(V,1)) + // -> f16,f16 SplitF16x2(V) + // (f32 extractelt(V, 0), f32 extractelt(V,1)) + // -> f32,f32 SplitF32x2(V) MVT EltVT = VT.getVectorElementType(); SDNode *ScatterOp = - CurDAG->getMachineNode(NVPTX::I32toV2I16, SDLoc(N), EltVT, EltVT, Vector); + CurDAG->getMachineNode(NewOpcode, SDLoc(N), EltVT, EltVT, Vector); for (auto *Node : E0) ReplaceUses(SDValue(Node, 0), SDValue(ScatterOp, 0)); for (auto *Node : E1) @@ -1026,6 +1033,7 @@ pickOpcodeForVT(MVT::SimpleValueType VT, unsigned Opcode_i8, case MVT::i32: return Opcode_i32; case MVT::i64: + case MVT::v2f32: return Opcode_i64; case MVT::f16: case MVT::bf16: @@ -1051,6 +1059,7 @@ static int getLdStRegType(EVT VT) { case MVT::bf16: case MVT::v2f16: case MVT::v2bf16: + case MVT::v2f32: return NVPTX::PTXLdStInstCode::Untyped; default: return NVPTX::PTXLdStInstCode::Float; @@ -1089,20 +1098,27 @@ bool NVPTXDAGToDAGISel::tryLoad(SDNode *N) { // Float : ISD::NON_EXTLOAD or ISD::EXTLOAD and the type is float MVT SimpleVT = LoadedVT.getSimpleVT(); MVT ScalarVT = SimpleVT.getScalarType(); - // Read at least 8 bits (predicates are stored as 8-bit values) - unsigned FromTypeWidth = std::max(8U, (unsigned)ScalarVT.getSizeInBits()); - unsigned int FromType; // Vector Setting unsigned VecType = NVPTX::PTXLdStInstCode::Scalar; if (SimpleVT.isVector()) { - assert((Isv2x16VT(LoadedVT) || LoadedVT == MVT::v4i8) && - "Unexpected vector type"); - // v2f16/v2bf16/v2i16 is loaded using ld.b32 - FromTypeWidth = 32; + switch (LoadedVT.getSimpleVT().SimpleTy) { + case MVT::v2f16: + case MVT::v2bf16: + case MVT::v2i16: + case MVT::v4i8: + case MVT::v2f32: + ScalarVT = LoadedVT.getSimpleVT(); + break; + default: + llvm_unreachable("Unsupported vector type for non-vector load"); + } } - if (PlainLoad && (PlainLoad->getExtensionType() == ISD::SEXTLOAD)) + // Read at least 8 bits (predicates are stored as 8-bit values) + unsigned FromTypeWidth = std::max(8U, (unsigned)ScalarVT.getSizeInBits()); + unsigned int FromType; + if (PlainLoad && PlainLoad->getExtensionType() == ISD::SEXTLOAD) FromType = NVPTX::PTXLdStInstCode::Signed; else FromType = getLdStRegType(ScalarVT); @@ -1142,7 +1158,7 @@ bool NVPTXDAGToDAGISel::tryLoad(SDNode *N) { return true; } -static bool isSubVectorPackedInI32(EVT EltVT) { +static bool isSubVectorPackedInInteger(EVT EltVT) { // Despite vectors like v8i8, v16i8, v8i16 being within the bit-limit for // total load/store size, PTX syntax only supports v2/v4. Thus, we can't use // vectorized loads/stores with the actual element type for i8/i16 as that @@ -1150,7 +1166,9 @@ static bool isSubVectorPackedInI32(EVT EltVT) { // In order to load/store such vectors efficiently, in Type Legalization // we split the vector into word-sized chunks (v2x16/v4i8). Now, we will // lower to PTX as vectors of b32. - return Isv2x16VT(EltVT) || EltVT == MVT::v4i8; + // We also consider v2f32 as an upsized type, which may be used in packed + // (f32x2) instructions. + return Isv2x16VT(EltVT) || EltVT == MVT::v4i8 || EltVT == MVT::v2f32; } bool NVPTXDAGToDAGISel::tryLoadVector(SDNode *N) { @@ -1199,9 +1217,24 @@ bool NVPTXDAGToDAGISel::tryLoadVector(SDNode *N) { return false; } - if (isSubVectorPackedInI32(EltVT)) { - EltVT = MVT::i32; + LLVM_DEBUG({ + dbgs() << "tryLoadVector on " << TLI->getTargetNodeName(N->getOpcode()) + << ":\n"; + dbgs() << " load type: " << MemVT << "\n"; + dbgs() << " total load width: " << TotalWidth << " bits\n"; + dbgs() << " from type width: " << FromTypeWidth << " bits\n"; + dbgs() << " element type: " << EltVT << "\n"; + }); + + if (isSubVectorPackedInInteger(EltVT)) { + FromTypeWidth = EltVT.getSizeInBits(); + EltVT = MVT::getIntegerVT(FromTypeWidth); FromType = NVPTX::PTXLdStInstCode::Untyped; + LLVM_DEBUG({ + dbgs() << " packed integers detected:\n"; + dbgs() << " from type width: " << FromTypeWidth << " (new)\n"; + dbgs() << " element type: " << EltVT << " (new)\n"; + }); } assert(isPowerOf2_32(FromTypeWidth) && FromTypeWidth >= 8 && @@ -1262,17 +1295,39 @@ bool NVPTXDAGToDAGISel::tryLDGLDU(SDNode *N) { EltVT = MVT::i64; NumElts = 2; } + + std::optional Opcode; + if (EltVT.isVector()) { NumElts = EltVT.getVectorNumElements(); EltVT = EltVT.getVectorElementType(); // vectors of 8/16bits type are loaded/stored as multiples of v4i8/v2x16 // elements. - if ((EltVT == MVT::f16 && OrigType == MVT::v2f16) || + if ((EltVT == MVT::f32 && OrigType == MVT::v2f32) || + (EltVT == MVT::f16 && OrigType == MVT::v2f16) || (EltVT == MVT::bf16 && OrigType == MVT::v2bf16) || (EltVT == MVT::i16 && OrigType == MVT::v2i16) || (EltVT == MVT::i8 && OrigType == MVT::v4i8)) { assert(NumElts % OrigType.getVectorNumElements() == 0 && "NumElts must be divisible by the number of elts in subvectors"); + if (N->getOpcode() == ISD::LOAD || + N->getOpcode() == ISD::INTRINSIC_W_CHAIN) { + switch (OrigType.getSimpleVT().SimpleTy) { + case MVT::v2f32: + Opcode = N->getOpcode() == ISD::LOAD ? NVPTX::INT_PTX_LDG_GLOBAL_b64 + : NVPTX::INT_PTX_LDU_GLOBAL_b64; + break; + case MVT::v2f16: + case MVT::v2bf16: + case MVT::v2i16: + case MVT::v4i8: + Opcode = N->getOpcode() == ISD::LOAD ? NVPTX::INT_PTX_LDG_GLOBAL_b32 + : NVPTX::INT_PTX_LDU_GLOBAL_b32; + break; + default: + llvm_unreachable("Unhandled packed vector type"); + } + } EltVT = OrigType; NumElts /= OrigType.getVectorNumElements(); } @@ -1292,50 +1347,51 @@ bool NVPTXDAGToDAGISel::tryLDGLDU(SDNode *N) { SelectADDR(Op1, Base, Offset); SDValue Ops[] = {Base, Offset, Chain}; - std::optional Opcode; - switch (N->getOpcode()) { - default: - return false; - case ISD::LOAD: - Opcode = pickOpcodeForVT( - EltVT.getSimpleVT().SimpleTy, NVPTX::INT_PTX_LDG_GLOBAL_i8, - NVPTX::INT_PTX_LDG_GLOBAL_i16, NVPTX::INT_PTX_LDG_GLOBAL_i32, - NVPTX::INT_PTX_LDG_GLOBAL_i64, NVPTX::INT_PTX_LDG_GLOBAL_f32, - NVPTX::INT_PTX_LDG_GLOBAL_f64); - break; - case ISD::INTRINSIC_W_CHAIN: - Opcode = pickOpcodeForVT( - EltVT.getSimpleVT().SimpleTy, NVPTX::INT_PTX_LDU_GLOBAL_i8, - NVPTX::INT_PTX_LDU_GLOBAL_i16, NVPTX::INT_PTX_LDU_GLOBAL_i32, - NVPTX::INT_PTX_LDU_GLOBAL_i64, NVPTX::INT_PTX_LDU_GLOBAL_f32, - NVPTX::INT_PTX_LDU_GLOBAL_f64); - break; - case NVPTXISD::LoadV2: - Opcode = pickOpcodeForVT( - EltVT.getSimpleVT().SimpleTy, NVPTX::INT_PTX_LDG_G_v2i8_ELE, - NVPTX::INT_PTX_LDG_G_v2i16_ELE, NVPTX::INT_PTX_LDG_G_v2i32_ELE, - NVPTX::INT_PTX_LDG_G_v2i64_ELE, NVPTX::INT_PTX_LDG_G_v2f32_ELE, - NVPTX::INT_PTX_LDG_G_v2f64_ELE); - break; - case NVPTXISD::LDUV2: - Opcode = pickOpcodeForVT( - EltVT.getSimpleVT().SimpleTy, NVPTX::INT_PTX_LDU_G_v2i8_ELE, - NVPTX::INT_PTX_LDU_G_v2i16_ELE, NVPTX::INT_PTX_LDU_G_v2i32_ELE, - NVPTX::INT_PTX_LDU_G_v2i64_ELE, NVPTX::INT_PTX_LDU_G_v2f32_ELE, - NVPTX::INT_PTX_LDU_G_v2f64_ELE); - break; - case NVPTXISD::LoadV4: - Opcode = pickOpcodeForVT( - EltVT.getSimpleVT().SimpleTy, NVPTX::INT_PTX_LDG_G_v4i8_ELE, - NVPTX::INT_PTX_LDG_G_v4i16_ELE, NVPTX::INT_PTX_LDG_G_v4i32_ELE, - std::nullopt, NVPTX::INT_PTX_LDG_G_v4f32_ELE, std::nullopt); - break; - case NVPTXISD::LDUV4: - Opcode = pickOpcodeForVT( - EltVT.getSimpleVT().SimpleTy, NVPTX::INT_PTX_LDU_G_v4i8_ELE, - NVPTX::INT_PTX_LDU_G_v4i16_ELE, NVPTX::INT_PTX_LDU_G_v4i32_ELE, - std::nullopt, NVPTX::INT_PTX_LDU_G_v4f32_ELE, std::nullopt); - break; + if (!Opcode) { + switch (N->getOpcode()) { + default: + return false; + case ISD::LOAD: + Opcode = pickOpcodeForVT( + EltVT.getSimpleVT().SimpleTy, NVPTX::INT_PTX_LDG_GLOBAL_i8, + NVPTX::INT_PTX_LDG_GLOBAL_i16, NVPTX::INT_PTX_LDG_GLOBAL_i32, + NVPTX::INT_PTX_LDG_GLOBAL_i64, NVPTX::INT_PTX_LDG_GLOBAL_f32, + NVPTX::INT_PTX_LDG_GLOBAL_f64); + break; + case ISD::INTRINSIC_W_CHAIN: + Opcode = pickOpcodeForVT( + EltVT.getSimpleVT().SimpleTy, NVPTX::INT_PTX_LDU_GLOBAL_i8, + NVPTX::INT_PTX_LDU_GLOBAL_i16, NVPTX::INT_PTX_LDU_GLOBAL_i32, + NVPTX::INT_PTX_LDU_GLOBAL_i64, NVPTX::INT_PTX_LDU_GLOBAL_f32, + NVPTX::INT_PTX_LDU_GLOBAL_f64); + break; + case NVPTXISD::LoadV2: + Opcode = pickOpcodeForVT( + EltVT.getSimpleVT().SimpleTy, NVPTX::INT_PTX_LDG_G_v2i8_ELE, + NVPTX::INT_PTX_LDG_G_v2i16_ELE, NVPTX::INT_PTX_LDG_G_v2i32_ELE, + NVPTX::INT_PTX_LDG_G_v2i64_ELE, NVPTX::INT_PTX_LDG_G_v2f32_ELE, + NVPTX::INT_PTX_LDG_G_v2f64_ELE); + break; + case NVPTXISD::LDUV2: + Opcode = pickOpcodeForVT( + EltVT.getSimpleVT().SimpleTy, NVPTX::INT_PTX_LDU_G_v2i8_ELE, + NVPTX::INT_PTX_LDU_G_v2i16_ELE, NVPTX::INT_PTX_LDU_G_v2i32_ELE, + NVPTX::INT_PTX_LDU_G_v2i64_ELE, NVPTX::INT_PTX_LDU_G_v2f32_ELE, + NVPTX::INT_PTX_LDU_G_v2f64_ELE); + break; + case NVPTXISD::LoadV4: + Opcode = pickOpcodeForVT( + EltVT.getSimpleVT().SimpleTy, NVPTX::INT_PTX_LDG_G_v4i8_ELE, + NVPTX::INT_PTX_LDG_G_v4i16_ELE, NVPTX::INT_PTX_LDG_G_v4i32_ELE, + std::nullopt, NVPTX::INT_PTX_LDG_G_v4f32_ELE, std::nullopt); + break; + case NVPTXISD::LDUV4: + Opcode = pickOpcodeForVT( + EltVT.getSimpleVT().SimpleTy, NVPTX::INT_PTX_LDU_G_v4i8_ELE, + NVPTX::INT_PTX_LDU_G_v4i16_ELE, NVPTX::INT_PTX_LDU_G_v4i32_ELE, + std::nullopt, NVPTX::INT_PTX_LDU_G_v4f32_ELE, std::nullopt); + break; + } } if (!Opcode) return false; @@ -1411,14 +1467,21 @@ bool NVPTXDAGToDAGISel::tryStore(SDNode *N) { // Type Setting: toType + toTypeWidth // - for integer type, always use 'u' MVT ScalarVT = SimpleVT.getScalarType(); - unsigned ToTypeWidth = ScalarVT.getSizeInBits(); if (SimpleVT.isVector()) { - assert((Isv2x16VT(StoreVT) || StoreVT == MVT::v4i8) && - "Unexpected vector type"); - // v2x16 is stored using st.b32 - ToTypeWidth = 32; + switch (StoreVT.getSimpleVT().SimpleTy) { + case MVT::v2f16: + case MVT::v2bf16: + case MVT::v2i16: + case MVT::v4i8: + case MVT::v2f32: + ScalarVT = StoreVT.getSimpleVT(); + break; + default: + llvm_unreachable("Unsupported vector type for non-vector store"); + } } + unsigned ToTypeWidth = ScalarVT.getSizeInBits(); unsigned int ToType = getLdStRegType(ScalarVT); // Create the machine instruction DAG @@ -1506,9 +1569,24 @@ bool NVPTXDAGToDAGISel::tryStoreVector(SDNode *N) { return false; } - if (isSubVectorPackedInI32(EltVT)) { - EltVT = MVT::i32; + LLVM_DEBUG({ + dbgs() << "tryStoreVector on " << TLI->getTargetNodeName(N->getOpcode()) + << ":\n"; + dbgs() << " store type: " << StoreVT << "\n"; + dbgs() << " total store width: " << TotalWidth << " bits\n"; + dbgs() << " to type width: " << ToTypeWidth << " bits\n"; + dbgs() << " element type: " << EltVT << "\n"; + }); + + if (isSubVectorPackedInInteger(EltVT)) { + ToTypeWidth = EltVT.getSizeInBits(); + EltVT = MVT::getIntegerVT(ToTypeWidth); ToType = NVPTX::PTXLdStInstCode::Untyped; + LLVM_DEBUG({ + dbgs() << " packed integers detected:\n"; + dbgs() << " to type width: " << ToTypeWidth << " (new)\n"; + dbgs() << " element type: " << EltVT << " (new)\n"; + }); } assert(isPowerOf2_32(ToTypeWidth) && ToTypeWidth >= 8 && ToTypeWidth <= 128 && diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp index 3769aae7b620f..8984d00b7c471 100644 --- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp +++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp @@ -295,8 +295,8 @@ static void ComputePTXValueVTs(const TargetLowering &TLI, const DataLayout &DL, // TargetLoweringBase::getVectorTypeBreakdown() which is invoked in // ComputePTXValueVTs() cannot currently break down non-power-of-2 sized // vectors. - if ((Is16bitsType(EltVT.getSimpleVT())) && NumElts % 2 == 0 && - isPowerOf2_32(NumElts)) { + if ((Is16bitsType(EltVT.getSimpleVT()) || EltVT == MVT::f32) && + NumElts % 2 == 0 && isPowerOf2_32(NumElts)) { // Vectors with an even number of f16 elements will be passed to // us as an array of v2f16/v2bf16 elements. We must match this so we // stay in sync with Ins/Outs. @@ -310,6 +310,9 @@ static void ComputePTXValueVTs(const TargetLowering &TLI, const DataLayout &DL, case MVT::i16: EltVT = MVT::v2i16; break; + case MVT::f32: + EltVT = MVT::v2f32; + break; default: llvm_unreachable("Unexpected type"); } @@ -576,6 +579,7 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM, addRegisterClass(MVT::v2f16, &NVPTX::Int32RegsRegClass); addRegisterClass(MVT::bf16, &NVPTX::Int16RegsRegClass); addRegisterClass(MVT::v2bf16, &NVPTX::Int32RegsRegClass); + addRegisterClass(MVT::v2f32, &NVPTX::Int64RegsRegClass); // Conversion to/from FP16/FP16x2 is always legal. setOperationAction(ISD::BUILD_VECTOR, MVT::v2f16, Custom); @@ -612,6 +616,10 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM, setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i8, Custom); setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i8, Custom); + setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f32, Custom); + setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f32, Expand); + setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f32, Expand); + // Custom conversions to/from v2i8. setOperationAction(ISD::BITCAST, MVT::v2i8, Custom); @@ -637,8 +645,9 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM, // Operations not directly supported by NVPTX. for (MVT VT : {MVT::bf16, MVT::f16, MVT::v2bf16, MVT::v2f16, MVT::f32, - MVT::f64, MVT::i1, MVT::i8, MVT::i16, MVT::v2i16, MVT::v4i8, - MVT::i32, MVT::i64}) { + MVT::v2f32, MVT::f64, MVT::i1, MVT::i8, MVT::i16, MVT::v2i16, + MVT::v4i8, MVT::i32, MVT::i64}) { + setOperationAction(ISD::VSELECT, VT, Expand); setOperationAction(ISD::SELECT_CC, VT, Expand); setOperationAction(ISD::BR_CC, VT, Expand); } @@ -820,7 +829,8 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM, // We have some custom DAG combine patterns for these nodes setTargetDAGCombine({ISD::ADD, ISD::AND, ISD::EXTRACT_VECTOR_ELT, ISD::FADD, ISD::MUL, ISD::SHL, ISD::SREM, ISD::UREM, ISD::VSELECT, - ISD::BUILD_VECTOR, ISD::ADDRSPACECAST}); + ISD::BUILD_VECTOR, ISD::ADDRSPACECAST, ISD::FP_ROUND, + ISD::TRUNCATE, ISD::LOAD, ISD::STORE, ISD::BITCAST}); // setcc for f16x2 and bf16x2 needs special handling to prevent // legalizer's attempt to scalarize it due to v2i1 not being legal. @@ -841,6 +851,8 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM, setBF16OperationAction(Op, MVT::bf16, Legal, Promote); if (getOperationAction(Op, MVT::bf16) == Promote) AddPromotedToType(Op, MVT::bf16, MVT::f32); + setOperationAction(Op, MVT::v2f32, + STI.hasF32x2Instructions() ? Legal : Expand); } // On SM80, we select add/mul/sub as fma to avoid promotion to float @@ -886,6 +898,7 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM, setOperationAction(ISD::FP_ROUND, VT, Custom); } } + setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Expand); // sm_80 only has conversions between f32 and bf16. Custom lower all other // bf16 conversions. @@ -923,6 +936,7 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM, {ISD::FDIV, ISD::FREM, ISD::FSQRT, ISD::FSIN, ISD::FCOS}) { setOperationAction(Op, MVT::f16, Promote); setOperationAction(Op, MVT::f32, Legal); + setOperationAction(Op, MVT::v2f32, Expand); setOperationAction(Op, MVT::f64, Legal); setOperationAction(Op, MVT::v2f16, Expand); setOperationAction(Op, MVT::v2bf16, Expand); @@ -978,6 +992,7 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM, setFP16OperationAction(ISD::FEXP2, MVT::v2f16, Legal, Expand); setBF16OperationAction(ISD::FEXP2, MVT::bf16, Legal, Promote); setBF16OperationAction(ISD::FEXP2, MVT::v2bf16, Legal, Expand); + setOperationAction(ISD::FEXP2, MVT::v2f32, Expand); // FLOG2 supports f32 only // f16/bf16 types aren't supported, but they are promoted/expanded to f32. @@ -985,7 +1000,8 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM, setOperationAction(ISD::FLOG2, MVT::f32, Legal); setOperationPromotedToType(ISD::FLOG2, MVT::f16, MVT::f32); setOperationPromotedToType(ISD::FLOG2, MVT::bf16, MVT::f32); - setOperationAction(ISD::FLOG2, {MVT::v2f16, MVT::v2bf16}, Expand); + setOperationAction(ISD::FLOG2, {MVT::v2f16, MVT::v2bf16, MVT::v2f32}, + Expand); } setOperationAction(ISD::ADDRSPACECAST, {MVT::i32, MVT::i64}, Custom); @@ -2257,7 +2273,8 @@ SDValue NVPTXTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, return Op; // Extract individual elements and select one of them. - assert(Isv2x16VT(VectorVT) && "Unexpected vector type."); + assert((Isv2x16VT(VectorVT) || VectorVT == MVT::v2f32) && + "Unexpected vector type."); EVT EltVT = VectorVT.getVectorElementType(); SDLoc dl(Op.getNode()); @@ -3127,10 +3144,10 @@ SDValue NVPTXTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { if (Op.getValueType() == MVT::i1) return LowerLOADi1(Op, DAG); - // v2f16/v2bf16/v2i16/v4i8 are legal, so we can't rely on legalizer to handle - // unaligned loads and have to handle it here. + // v2f16/v2bf16/v2i16/v4i8/v2f32 are legal, so we can't rely on legalizer to + // handle unaligned loads and have to handle it here. EVT VT = Op.getValueType(); - if (Isv2x16VT(VT) || VT == MVT::v4i8) { + if (Isv2x16VT(VT) || VT == MVT::v4i8 || VT == MVT::v2f32) { LoadSDNode *Load = cast(Op); EVT MemVT = Load->getMemoryVT(); if (!allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(), @@ -3174,22 +3191,22 @@ SDValue NVPTXTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { if (VT == MVT::i1) return LowerSTOREi1(Op, DAG); - // v2f16 is legal, so we can't rely on legalizer to handle unaligned - // stores and have to handle it here. - if ((Isv2x16VT(VT) || VT == MVT::v4i8) && + // v2f16/v2bf16/v2i16/v4i8/v2f32 are legal, so we can't rely on legalizer to + // handle unaligned stores and have to handle it here. + if ((Isv2x16VT(VT) || VT == MVT::v4i8 || VT == MVT::v2f32) && !allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(), VT, *Store->getMemOperand())) return expandUnalignedStore(Store, DAG); - // v2f16, v2bf16 and v2i16 don't need special handling. - if (Isv2x16VT(VT) || VT == MVT::v4i8) + // v2f16/v2bf16/v2i16/v4i8/v2f32 don't need special handling. + if (Isv2x16VT(VT) || VT == MVT::v4i8 || VT == MVT::v2f32) return SDValue(); return LowerSTOREVector(Op, DAG); } -SDValue -NVPTXTargetLowering::LowerSTOREVector(SDValue Op, SelectionDAG &DAG) const { +static SDValue convertVectorStore(SDValue Op, SelectionDAG &DAG, + const SmallVectorImpl &Elements) { MemSDNode *N = cast(Op.getNode()); SDValue Val = N->getOperand(1); SDLoc DL(N); @@ -3250,6 +3267,8 @@ NVPTXTargetLowering::LowerSTOREVector(SDValue Op, SelectionDAG &DAG) const { NumEltsPerSubVector); Ops.push_back(DAG.getBuildVector(EltVT, DL, SubVectorElts)); } + } else if (!Elements.empty()) { + Ops.insert(Ops.end(), Elements.begin(), Elements.end()); } else { SDValue V = DAG.getBitcast(MVT::getVectorVT(EltVT, NumElts), Val); for (const unsigned I : llvm::seq(NumElts)) { @@ -3273,10 +3292,19 @@ NVPTXTargetLowering::LowerSTOREVector(SDValue Op, SelectionDAG &DAG) const { DAG.getMemIntrinsicNode(Opcode, DL, DAG.getVTList(MVT::Other), Ops, N->getMemoryVT(), N->getMemOperand()); - // return DCI.CombineTo(N, NewSt, true); return NewSt; } +// Default variant where we don't pass in elements. +static SDValue convertVectorStore(SDValue Op, SelectionDAG &DAG) { + return convertVectorStore(Op, DAG, SmallVector{}); +} + +SDValue NVPTXTargetLowering::LowerSTOREVector(SDValue Op, + SelectionDAG &DAG) const { + return convertVectorStore(Op, DAG); +} + // st i1 v, addr // => // v1 = zxt v to i16 @@ -3465,6 +3493,8 @@ SDValue NVPTXTargetLowering::LowerFormalArguments( // vectors which contain v2f16 or v2bf16 elements. So we must load // using i32 here and then bitcast back. LoadVT = MVT::i32; + else if (EltVT == MVT::v2f32) + LoadVT = MVT::i64; EVT VecVT = EVT::getVectorVT(F->getContext(), LoadVT, NumElts); SDValue VecAddr = @@ -5071,26 +5101,427 @@ PerformFADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, return SDValue(); } -static SDValue PerformStoreCombineHelper(SDNode *N, std::size_t Front, - std::size_t Back) { +/// OverrideVT - allows overriding result and memory type +static std::optional> +convertVectorLoad(SDNode *N, SelectionDAG &DAG, bool BuildVector, + std::optional OverrideVT = std::nullopt) { + LoadSDNode *LD = cast(N); + const EVT ResVT = OverrideVT.value_or(LD->getValueType(0)); + const EVT MemVT = OverrideVT.value_or(LD->getMemoryVT()); + + // If we're doing sign/zero extension as part of the load, avoid lowering to + // a LoadV node. TODO: consider relaxing this restriction. + if (ResVT != MemVT) + return {}; + + const auto NumEltsAndEltVT = getVectorLoweringShape(ResVT); + if (!NumEltsAndEltVT) + return {}; + const auto [NumElts, EltVT] = NumEltsAndEltVT.value(); + + Align Alignment = LD->getAlign(); + const auto &TD = DAG.getDataLayout(); + Align PrefAlign = TD.getPrefTypeAlign(MemVT.getTypeForEVT(*DAG.getContext())); + if (Alignment < PrefAlign) { + // This load is not sufficiently aligned, so bail out and let this vector + // load be scalarized. Note that we may still be able to emit smaller + // vector loads. For example, if we are loading a <4 x float> with an + // alignment of 8, this check will fail but the legalizer will try again + // with 2 x <2 x float>, which will succeed with an alignment of 8. + return {}; + } + + // Since LoadV2 is a target node, we cannot rely on DAG type legalization. + // Therefore, we must ensure the type is legal. For i1 and i8, we set the + // loaded type to i16 and propagate the "real" type as the memory type. + const MVT LoadEltVT = (EltVT.getSizeInBits() < 16) ? MVT::i16 : EltVT; + + unsigned Opcode; + SDVTList LdResVTs; + switch (NumElts) { + default: + return {}; + case 2: + Opcode = NVPTXISD::LoadV2; + LdResVTs = DAG.getVTList(LoadEltVT, LoadEltVT, MVT::Other); + break; + case 4: { + Opcode = NVPTXISD::LoadV4; + LdResVTs = + DAG.getVTList({LoadEltVT, LoadEltVT, LoadEltVT, LoadEltVT, MVT::Other}); + break; + } + } + SDLoc DL(LD); + + // Copy regular operands + SmallVector OtherOps(LD->ops()); + + // The select routine does not have access to the LoadSDNode instance, so + // pass along the extension information + OtherOps.push_back(DAG.getIntPtrConstant(LD->getExtensionType(), DL)); + + SDValue NewLD = DAG.getMemIntrinsicNode(Opcode, DL, LdResVTs, OtherOps, MemVT, + LD->getMemOperand()); + SDValue LoadChain = NewLD.getValue(NumElts); + + if (BuildVector) { + SmallVector ScalarRes; + if (EltVT.isVector()) { + assert(EVT(EltVT.getVectorElementType()) == ResVT.getVectorElementType()); + assert(NumElts * EltVT.getVectorNumElements() == + ResVT.getVectorNumElements()); + // Generate EXTRACT_VECTOR_ELTs to split v2[i,f,bf]16/v4i8 subvectors back + // into individual elements. + for (const unsigned I : llvm::seq(NumElts)) { + SDValue SubVector = NewLD.getValue(I); + DAG.ExtractVectorElements(SubVector, ScalarRes); + } + } else { + for (const unsigned I : llvm::seq(NumElts)) { + SDValue Res = NewLD.getValue(I); + if (LoadEltVT != EltVT) + Res = DAG.getNode(ISD::TRUNCATE, DL, EltVT, Res); + ScalarRes.push_back(Res); + } + } + + const MVT BuildVecVT = + MVT::getVectorVT(EltVT.getScalarType(), ScalarRes.size()); + SDValue BuildVec = DAG.getBuildVector(BuildVecVT, DL, ScalarRes); + SDValue LoadValue = DAG.getBitcast(ResVT, BuildVec); + + return {{LoadValue, LoadChain}}; + } + + return {{NewLD, LoadChain}}; +} + +static MachineMemOperand * +getMachineMemOperandForType(const SelectionDAG &DAG, + const MachineMemOperand *MMO, + const MachinePointerInfo &PointerInfo, MVT VT) { + return DAG.getMachineFunction().getMachineMemOperand(MMO, PointerInfo, + LLT(VT)); +} + +static SDValue PerformLoadCombine(SDNode *N, + TargetLowering::DAGCombinerInfo &DCI) { + if (DCI.DAG.getOptLevel() == CodeGenOptLevel::None) + return {}; + + auto *MemN = cast(N); + // only operate on vectors of f32s / i64s + if (EVT MemVT = MemN->getMemoryVT(); + !(MemVT == MVT::i64 || + (MemVT.isVector() && (MemVT.getVectorElementType() == MVT::f32 || + MemVT.getVectorElementType() == MVT::i64)))) + return SDValue(); + + const unsigned OrigNumResults = + llvm::count_if(N->values(), [](const auto &VT) { + return VT == MVT::i64 || VT == MVT::f32 || VT.isVector(); + }); + + SmallDenseMap ExtractElts; + SmallVector ProxyRegs(OrigNumResults, nullptr); + SmallVector> WorkList{{N, {}}}; + bool ProcessingInitialLoad = true; + while (!WorkList.empty()) { + auto [V, Offset] = WorkList.pop_back_val(); + + // follow users of this to an extractelt, along the way collecting proxy + // regs and bitcasts + for (SDUse &U : V->uses()) { + if (U.getValueType() == MVT::Other || U.getValueType() == MVT::Glue) + continue; // we'll process chain/glue later + + if (ProcessingInitialLoad) + Offset = U.getResNo(); + + SDNode *User = U.getUser(); + if (User->getOpcode() == NVPTXISD::ProxyReg) { + SDNode *&ProxyReg = ProxyRegs[Offset]; + + // We shouldn't have multiple proxy regs for the same value from the + // load, but bail out anyway since we don't handle this. + if (ProxyReg) + return SDValue(); + + ProxyReg = User; + } else if (User->getOpcode() == ISD::BITCAST && + User->getValueType(0) == MVT::v2f32 && + U.getValueType() == MVT::i64) { + // match v2f32 = bitcast i64 + // continue and push the instruction + } else if (User->getOpcode() == ISD::EXTRACT_VECTOR_ELT && + User->getValueType(0) == MVT::f32) { + // match f32 = extractelt v2f32 + if (auto *CI = dyn_cast(User->getOperand(1))) { + unsigned Index = CI->getZExtValue(); + ExtractElts[User] = 2 * Offset + Index; + continue; // don't search + } + return SDValue(); // could not match + } else + return SDValue(); // couldn't match + + // enqueue this to visit its uses + WorkList.push_back({User, Offset}); + } + + // After we're done with the load, propagate the result offsets. + ProcessingInitialLoad = false; + } + + // (2) If the load's value is only used as f32 elements, replace all + // extractelts with individual elements of the newly-created load. If there's + // a ProxyReg, handle that too. After this check, we'll proceed in the + // following way: + // 1. Determine which type of load to create, which will split the results + // of the original load into f32 components. + // 2. If there's a ProxyReg, split that too. + // 3. Replace all extractelts with references to the new load / proxy reg. + // 4. Replace all glue/chain references with references to the new load / + // proxy reg. + if (ExtractElts.empty()) + return SDValue(); + + // Do we have to tweak the opcode for an NVPTXISD::Load* or do we have to + // rewrite an ISD::LOAD? + std::optional NewOpcode; + + // LoadV's are handled slightly different in ISelDAGToDAG. + bool IsLoadV = false; + switch (N->getOpcode()) { + case NVPTXISD::LoadV2: + NewOpcode = NVPTXISD::LoadV4; + IsLoadV = true; + break; + case NVPTXISD::LoadParam: + NewOpcode = NVPTXISD::LoadParamV2; + break; + case NVPTXISD::LoadParamV2: + NewOpcode = NVPTXISD::LoadParamV4; + break; + } + + SDValue OldChain, OldGlue; + for (unsigned I = 0, E = N->getNumValues(); I != E; ++I) { + if (N->getValueType(I) == MVT::Other) + OldChain = SDValue(N, I); + else if (N->getValueType(I) == MVT::Glue) + OldGlue = SDValue(N, I); + } + + SDValue NewLoad, NewChain, NewGlue /* (optional) */; + unsigned NumElts = 0; + if (NewOpcode) { // tweak NVPTXISD::Load* opcode + SmallVector VTs; + + // should always be non-null after this + std::optional NewChainIdx; + std::optional NewGlueIdx; + for (const EVT &V : N->values()) { + if (V == MVT::i64 || V == MVT::v2f32) { + VTs.append({MVT::f32, MVT::f32}); + NumElts += 2; + } else { + assert((V == MVT::Other || V == MVT::Glue) && + "expected i64,...,ch,glue = load or v2f32,ch = load"); + if (V == MVT::Other) + NewChainIdx = VTs.size(); + else + NewGlueIdx = VTs.size(); + VTs.push_back(V); + } + } + + MVT LoadVT = MVT::f32; + MachineMemOperand *MMO = MemN->getMemOperand(); + + if (IsLoadV) { + // Some loads must have an operand type that matches the number of results + // and the type of each result. Because we changed a vNi64 to v(N*2)f32 we + // have to update it here. Note that LoadParam is not handled the same way + // in NVPXISelDAGToDAG so we only do this for LoadV*. + LoadVT = MVT::getVectorVT(MVT::f32, NumElts); + MMO = getMachineMemOperandForType(DCI.DAG, MMO, MemN->getPointerInfo(), + LoadVT); + } + + NewLoad = DCI.DAG.getMemIntrinsicNode( + *NewOpcode, SDLoc(N), DCI.DAG.getVTList(VTs), + SmallVector(N->ops()), LoadVT, MMO); + NewChain = NewLoad.getValue(*NewChainIdx); + if (NewGlueIdx) + NewGlue = NewLoad.getValue(*NewGlueIdx); + } else if (N->getOpcode() == ISD::LOAD) { // rewrite a load + std::optional CastToType; + EVT ResVT = N->getValueType(0); + if (ResVT == MVT::i64) { + // ld.b64 is treated as a vector by subsequent code + CastToType = MVT::v2f32; + } + if (auto Result = + convertVectorLoad(N, DCI.DAG, /*BuildVector=*/false, CastToType)) { + std::tie(NewLoad, NewChain) = *Result; + NumElts = + CastToType.value_or(cast(NewLoad.getNode())->getMemoryVT()) + .getVectorNumElements(); + if (NewLoad->getValueType(NewLoad->getNumValues() - 1) == MVT::Glue) + NewGlue = NewLoad.getValue(NewLoad->getNumValues() - 1); + } + } + + if (!NewLoad) + return SDValue(); // could not match pattern + + // (3) begin rewriting uses + SmallVector NewOutputsF32; + + if (llvm::any_of(ProxyRegs, [](const SDNode *PR) { return PR != nullptr; })) { + // scalarize proxy regs, but first rewrite all uses of chain and glue from + // the old load to the new load + DCI.DAG.ReplaceAllUsesOfValueWith(OldChain, NewChain); + DCI.DAG.ReplaceAllUsesOfValueWith(OldGlue, NewGlue); + + for (unsigned ProxyI = 0, ProxyE = ProxyRegs.size(); ProxyI != ProxyE; + ++ProxyI) { + SDNode *ProxyReg = ProxyRegs[ProxyI]; + + // no proxy reg might mean this result is unused + if (!ProxyReg) + continue; + + // Update the new chain and glue to be old inputs to the proxyreg, if they + // came from an intervening instruction between this proxyreg and the + // original load (ex: callseq_end). Other than bitcasts and extractelts, + // we followed all other nodes by chain and glue accesses. + if (SDValue OldInChain = ProxyReg->getOperand(0); + OldInChain.getNode() != N) + NewChain = OldInChain; + if (SDValue OldInGlue = ProxyReg->getOperand(2); OldInGlue.getNode() != N) + NewGlue = OldInGlue; + + // update OldChain, OldGlue to the outputs of ProxyReg, which we will + // replace later + OldChain = SDValue(ProxyReg, 1); + OldGlue = SDValue(ProxyReg, 2); + + // generate the scalar proxy regs + for (unsigned I = 0, E = 2; I != E; ++I) { + SDValue ProxyRegElem = DCI.DAG.getNode( + NVPTXISD::ProxyReg, SDLoc(ProxyReg), + DCI.DAG.getVTList(MVT::f32, MVT::Other, MVT::Glue), + {NewChain, NewLoad.getValue(ProxyI * 2 + I), NewGlue}); + NewChain = ProxyRegElem.getValue(1); + NewGlue = ProxyRegElem.getValue(2); + NewOutputsF32.push_back(ProxyRegElem); + } + + // replace all uses of the glue and chain from the old proxy reg + DCI.DAG.ReplaceAllUsesOfValueWith(OldChain, NewChain); + DCI.DAG.ReplaceAllUsesOfValueWith(OldGlue, NewGlue); + } + } else { + for (unsigned I = 0, E = NumElts; I != E; ++I) + if (NewLoad->getValueType(I) == MVT::f32) + NewOutputsF32.push_back(NewLoad.getValue(I)); + + // replace all glue and chain nodes + DCI.DAG.ReplaceAllUsesOfValueWith(OldChain, NewChain); + if (OldGlue) + DCI.DAG.ReplaceAllUsesOfValueWith(OldGlue, NewGlue); + } + + // replace all extractelts with the new outputs + for (auto &[Extract, Index] : ExtractElts) + DCI.CombineTo(Extract, NewOutputsF32[Index], false); + + return SDValue(); +} + +static SDValue PerformStoreCombineHelper(SDNode *N, + TargetLowering::DAGCombinerInfo &DCI, + std::size_t Front, std::size_t Back) { if (all_of(N->ops().drop_front(Front).drop_back(Back), [](const SDUse &U) { return U.get()->isUndef(); })) // Operand 0 is the previous value in the chain. Cannot return EntryToken // as the previous value will become unused and eliminated later. return N->getOperand(0); + if (DCI.DAG.getOptLevel() == CodeGenOptLevel::None) + return {}; + + auto *MemN = cast(N); + if (MemN->getMemoryVT() == MVT::v2f32) { + // try to fold, and expand: + // c: v2f32 = BUILD_VECTOR (a: f32, b: f32) + // StoreRetval c + // --> + // StoreRetvalV2 {a, b} + // likewise for V2 -> V4 case + // + // We also handle target-independent stores, which require us to first + // convert to StoreV2. + + std::optional NewOpcode; + switch (N->getOpcode()) { + case NVPTXISD::StoreParam: + NewOpcode = NVPTXISD::StoreParamV2; + break; + case NVPTXISD::StoreParamV2: + NewOpcode = NVPTXISD::StoreParamV4; + break; + case NVPTXISD::StoreRetval: + NewOpcode = NVPTXISD::StoreRetvalV2; + break; + case NVPTXISD::StoreRetvalV2: + NewOpcode = NVPTXISD::StoreRetvalV4; + break; + } + + if (NewOpcode) { + // copy chain, offset from existing store + SmallVector NewOps = {N->getOperand(0), N->getOperand(1)}; + unsigned NumElts = 0; + // gather all operands to expand + for (unsigned I = 2, E = N->getNumOperands(); I < E; ++I) { + SDValue CurrentOp = N->getOperand(I); + if (CurrentOp->getOpcode() == ISD::BUILD_VECTOR) { + assert(CurrentOp.getValueType() == MVT::v2f32); + NewOps.push_back(CurrentOp.getOperand(0)); + NewOps.push_back(CurrentOp.getOperand(1)); + NumElts += 2; + } else { + NewOps.clear(); + break; + } + } + + if (!NewOps.empty()) { + return DCI.DAG.getMemIntrinsicNode(*NewOpcode, SDLoc(N), N->getVTList(), + NewOps, MVT::f32, + MemN->getMemOperand()); + } + } + } + return SDValue(); } -static SDValue PerformStoreParamCombine(SDNode *N) { +static SDValue PerformStoreParamCombine(SDNode *N, + TargetLowering::DAGCombinerInfo &DCI) { // Operands from the 3rd to the 2nd last one are the values to be stored. // {Chain, ArgID, Offset, Val, Glue} - return PerformStoreCombineHelper(N, 3, 1); + return PerformStoreCombineHelper(N, DCI, 3, 1); } -static SDValue PerformStoreRetvalCombine(SDNode *N) { +static SDValue PerformStoreRetvalCombine(SDNode *N, + TargetLowering::DAGCombinerInfo &DCI) { // Operands from the 2nd to the last one are the values to be stored - return PerformStoreCombineHelper(N, 2, 0); + return PerformStoreCombineHelper(N, DCI, 2, 0); } /// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD. @@ -5553,10 +5984,10 @@ static SDValue PerformEXTRACTCombine(SDNode *N, IsPTXVectorType(VectorVT.getSimpleVT())) return SDValue(); // Native vector loads already combine nicely w/ // extract_vector_elt. - // Don't mess with singletons or v2*16, v4i8 and v8i8 types, we already + // Don't mess with singletons or v2*16, v2f32, v4i8 and v8i8 types, we already // handle them OK. if (VectorVT.getVectorNumElements() == 1 || Isv2x16VT(VectorVT) || - VectorVT == MVT::v4i8 || VectorVT == MVT::v8i8) + VectorVT == MVT::v2f32 || VectorVT == MVT::v4i8 || VectorVT == MVT::v8i8) return SDValue(); // Don't mess with undef values as sra may be simplified to 0, not undef. @@ -5701,6 +6132,150 @@ static SDValue combineADDRSPACECAST(SDNode *N, return SDValue(); } +static SDValue PerformFP_ROUNDCombine(SDNode *N, + TargetLowering::DAGCombinerInfo &DCI) { + SDLoc DL(N); + SDValue Op = N->getOperand(0); + SDValue Trunc = N->getOperand(1); + EVT NarrowVT = N->getValueType(0); + EVT WideVT = Op.getValueType(); + + // v2[b]f16 = fp_round (v2f32 A) + // -> v2[b]f16 = (build_vector ([b]f16 = fp_round (extractelt A, 0)), + // ([b]f16 = fp_round (extractelt A, 1))) + if ((NarrowVT == MVT::v2bf16 || NarrowVT == MVT::v2f16) && + WideVT == MVT::v2f32) { + SDValue F32Op0, F32Op1; + if (Op.getOpcode() == ISD::BUILD_VECTOR) { + F32Op0 = Op.getOperand(0); + F32Op1 = Op.getOperand(1); + } else { + F32Op0 = DCI.DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op, + DCI.DAG.getIntPtrConstant(0, DL)); + F32Op1 = DCI.DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op, + DCI.DAG.getIntPtrConstant(1, DL)); + } + return DCI.DAG.getBuildVector( + NarrowVT, DL, + {DCI.DAG.getNode(ISD::FP_ROUND, DL, NarrowVT.getScalarType(), F32Op0, + Trunc), + DCI.DAG.getNode(ISD::FP_ROUND, DL, NarrowVT.getScalarType(), F32Op1, + Trunc)}); + } + + return SDValue(); +} + +static SDValue PerformTRUNCATECombine(SDNode *N, + TargetLowering::DAGCombinerInfo &DCI) { + SDLoc DL(N); + SDValue Op = N->getOperand(0); + EVT FromVT = Op.getValueType(); + EVT ResultVT = N->getValueType(0); + + if (FromVT == MVT::i64 && ResultVT == MVT::i32) { + // i32 = truncate (i64 = bitcast (v2f32 = BUILD_VECTOR (f32 A, f32 B))) + // -> i32 = bitcast (f32 A) + if (Op.getOpcode() == ISD::BITCAST) { + SDValue BV = Op.getOperand(0); + if (BV.getOpcode() == ISD::BUILD_VECTOR && + BV.getValueType() == MVT::v2f32) { + // get lower + return DCI.DAG.getNode(ISD::BITCAST, DL, ResultVT, BV.getOperand(0)); + } + } + + // i32 = truncate (i64 = srl + // (i64 = bitcast + // (v2f32 = BUILD_VECTOR (f32 A, f32 B))), 32) + // -> i32 = bitcast (f32 B) + if (Op.getOpcode() == ISD::SRL) { + if (auto *ShAmt = dyn_cast(Op.getOperand(1)); + ShAmt && ShAmt->getAsAPIntVal() == 32) { + SDValue Cast = Op.getOperand(0); + if (Cast.getOpcode() == ISD::BITCAST) { + SDValue BV = Cast.getOperand(0); + if (BV.getOpcode() == ISD::BUILD_VECTOR && + BV.getValueType() == MVT::v2f32) { + // get upper + return DCI.DAG.getNode(ISD::BITCAST, DL, ResultVT, + BV.getOperand(1)); + } + } + } + } + } + + return SDValue(); +} + +static SDValue PerformBITCASTCombine(SDNode *N, + TargetLowering::DAGCombinerInfo &DCI) { + if (N->getValueType(0) != MVT::v2f32) + return SDValue(); + + SDValue Operand = N->getOperand(0); + if (Operand.getValueType() != MVT::i64) + return SDValue(); + + // DAGCombiner handles bitcast(ISD::LOAD) already. For these, we'll do the + // same thing, by changing their output values from i64 to v2f32. Then the + // rule for combining loads (see PerformLoadCombine) may split these loads + // further. + if (Operand.getOpcode() == NVPTXISD::LoadV2 || + Operand.getOpcode() == NVPTXISD::LoadParam || + Operand.getOpcode() == NVPTXISD::LoadParamV2) { + // check for all bitcasts + SmallVector> OldUses; + for (SDUse &U : Operand->uses()) { + SDNode *User = U.getUser(); + if (!(User->getOpcode() == ISD::BITCAST && + User->getValueType(0) == MVT::v2f32 && + U.getValueType() == MVT::i64)) + return SDValue(); // unhandled pattern + OldUses.push_back({User, U.getResNo()}); + } + + auto *MemN = cast(Operand); + SmallVector VTs; + for (const auto &VT : Operand->values()) { + if (VT == MVT::i64) + VTs.push_back(MVT::v2f32); + else + VTs.push_back(VT); + } + + SDValue NewLoad = DCI.DAG.getMemIntrinsicNode( + Operand.getOpcode(), SDLoc(Operand), DCI.DAG.getVTList(VTs), + SmallVector(Operand->ops()), MemN->getMemoryVT(), + MemN->getMemOperand()); + + // replace all chain/glue uses of the old load + for (unsigned I = 0, E = Operand->getNumValues(); I != E; ++I) + if (Operand->getValueType(I) != MVT::i64) + DCI.DAG.ReplaceAllUsesOfValueWith(SDValue(MemN, I), + NewLoad.getValue(I)); + + // replace all bitcasts with values from the new load + for (auto &[BC, ResultNum] : OldUses) + DCI.CombineTo(BC, NewLoad.getValue(ResultNum), false); + } + + return SDValue(); +} + +static SDValue PerformStoreCombine(SDNode *N, + TargetLowering::DAGCombinerInfo &DCI) { + // check if the store'd value can be scalarized + SDValue StoredVal = N->getOperand(1); + if (StoredVal.getValueType() == MVT::v2f32 && + StoredVal.getOpcode() == ISD::BUILD_VECTOR) { + SmallVector Elements(StoredVal->op_values()); + return convertVectorStore(SDValue(N, 0), DCI.DAG, Elements); + } + return SDValue(); +} + SDValue NVPTXTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const { CodeGenOptLevel OptLevel = getTargetMachine().getOptLevel(); @@ -5724,11 +6299,18 @@ SDValue NVPTXTargetLowering::PerformDAGCombine(SDNode *N, case NVPTXISD::StoreRetval: case NVPTXISD::StoreRetvalV2: case NVPTXISD::StoreRetvalV4: - return PerformStoreRetvalCombine(N); + return PerformStoreRetvalCombine(N, DCI); + case ISD::LOAD: + case NVPTXISD::LoadV2: + case NVPTXISD::LoadParam: + case NVPTXISD::LoadParamV2: + return PerformLoadCombine(N, DCI); + case ISD::STORE: + return PerformStoreCombine(N, DCI); case NVPTXISD::StoreParam: case NVPTXISD::StoreParamV2: case NVPTXISD::StoreParamV4: - return PerformStoreParamCombine(N); + return PerformStoreParamCombine(N, DCI); case ISD::EXTRACT_VECTOR_ELT: return PerformEXTRACTCombine(N, DCI); case ISD::VSELECT: @@ -5737,6 +6319,12 @@ SDValue NVPTXTargetLowering::PerformDAGCombine(SDNode *N, return PerformBUILD_VECTORCombine(N, DCI); case ISD::ADDRSPACECAST: return combineADDRSPACECAST(N, DCI); + case ISD::FP_ROUND: + return PerformFP_ROUNDCombine(N, DCI); + case ISD::TRUNCATE: + return PerformTRUNCATECombine(N, DCI); + case ISD::BITCAST: + return PerformBITCASTCombine(N, DCI); } return SDValue(); } @@ -5766,94 +6354,11 @@ static void ReplaceBITCAST(SDNode *Node, SelectionDAG &DAG, /// ReplaceVectorLoad - Convert vector loads into multi-output scalar loads. static void ReplaceLoadVector(SDNode *N, SelectionDAG &DAG, SmallVectorImpl &Results) { - LoadSDNode *LD = cast(N); - const EVT ResVT = LD->getValueType(0); - const EVT MemVT = LD->getMemoryVT(); - - // If we're doing sign/zero extension as part of the load, avoid lowering to - // a LoadV node. TODO: consider relaxing this restriction. - if (ResVT != MemVT) - return; - - const auto NumEltsAndEltVT = getVectorLoweringShape(ResVT); - if (!NumEltsAndEltVT) - return; - const auto [NumElts, EltVT] = NumEltsAndEltVT.value(); - - Align Alignment = LD->getAlign(); - const auto &TD = DAG.getDataLayout(); - Align PrefAlign = TD.getPrefTypeAlign(MemVT.getTypeForEVT(*DAG.getContext())); - if (Alignment < PrefAlign) { - // This load is not sufficiently aligned, so bail out and let this vector - // load be scalarized. Note that we may still be able to emit smaller - // vector loads. For example, if we are loading a <4 x float> with an - // alignment of 8, this check will fail but the legalizer will try again - // with 2 x <2 x float>, which will succeed with an alignment of 8. - return; - } - - // Since LoadV2 is a target node, we cannot rely on DAG type legalization. - // Therefore, we must ensure the type is legal. For i1 and i8, we set the - // loaded type to i16 and propagate the "real" type as the memory type. - const MVT LoadEltVT = (EltVT.getSizeInBits() < 16) ? MVT::i16 : EltVT; - - unsigned Opcode; - SDVTList LdResVTs; - switch (NumElts) { - default: - return; - case 2: - Opcode = NVPTXISD::LoadV2; - LdResVTs = DAG.getVTList(LoadEltVT, LoadEltVT, MVT::Other); - break; - case 4: { - Opcode = NVPTXISD::LoadV4; - LdResVTs = - DAG.getVTList({LoadEltVT, LoadEltVT, LoadEltVT, LoadEltVT, MVT::Other}); - break; - } + if (auto Outputs = convertVectorLoad(N, DAG, /*BuildVector=*/true)) { + auto [BuildVec, LoadChain] = *Outputs; + Results.push_back(BuildVec); + Results.push_back(LoadChain); } - SDLoc DL(LD); - - // Copy regular operands - SmallVector OtherOps(LD->ops()); - - // The select routine does not have access to the LoadSDNode instance, so - // pass along the extension information - OtherOps.push_back(DAG.getIntPtrConstant(LD->getExtensionType(), DL)); - - SDValue NewLD = DAG.getMemIntrinsicNode(Opcode, DL, LdResVTs, OtherOps, - LD->getMemoryVT(), - LD->getMemOperand()); - - SmallVector ScalarRes; - if (EltVT.isVector()) { - assert(EVT(EltVT.getVectorElementType()) == ResVT.getVectorElementType()); - assert(NumElts * EltVT.getVectorNumElements() == - ResVT.getVectorNumElements()); - // Generate EXTRACT_VECTOR_ELTs to split v2[i,f,bf]16/v4i8 subvectors back - // into individual elements. - for (const unsigned I : llvm::seq(NumElts)) { - SDValue SubVector = NewLD.getValue(I); - DAG.ExtractVectorElements(SubVector, ScalarRes); - } - } else { - for (const unsigned I : llvm::seq(NumElts)) { - SDValue Res = NewLD.getValue(I); - if (LoadEltVT != EltVT) - Res = DAG.getNode(ISD::TRUNCATE, DL, EltVT, Res); - ScalarRes.push_back(Res); - } - } - - SDValue LoadChain = NewLD.getValue(NumElts); - - const MVT BuildVecVT = - MVT::getVectorVT(EltVT.getScalarType(), ScalarRes.size()); - SDValue BuildVec = DAG.getBuildVector(BuildVecVT, DL, ScalarRes); - SDValue LoadValue = DAG.getBitcast(ResVT, BuildVec); - - Results.append({LoadValue, LoadChain}); } // Lower vector return type of tcgen05.ld intrinsics diff --git a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td index 11d77599d4ac3..cbc63c18aad18 100644 --- a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td +++ b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td @@ -161,6 +161,7 @@ def hasHWROT32 : Predicate<"Subtarget->hasHWROT32()">; def noHWROT32 : Predicate<"!Subtarget->hasHWROT32()">; def hasDotInstructions : Predicate<"Subtarget->hasDotInstructions()">; def hasTcgen05Instructions : Predicate<"Subtarget->hasTcgen05Instructions()">; +def hasF32x2Instructions : Predicate<"Subtarget->hasF32x2Instructions()">; def True : Predicate<"true">; def False : Predicate<"false">; @@ -236,6 +237,7 @@ def F64RT : RegTyInfo; def F16RT : RegTyInfo; def BF16RT : RegTyInfo; +def F32X2RT : RegTyInfo; def F16X2RT : RegTyInfo; def BF16X2RT : RegTyInfo; @@ -414,7 +416,18 @@ multiclass F3 { (ins Float32Regs:$a, f32imm:$b), op_str # ".f32 \t$dst, $a, $b;", [(set f32:$dst, (op_pat f32:$a, fpimm:$b))]>; - + def f32x2rr_ftz : + NVPTXInst<(outs Int64Regs:$dst), + (ins Int64Regs:$a, Int64Regs:$b), + op_str # ".ftz.f32x2 \t$dst, $a, $b;", + [(set v2f32:$dst, (op_pat v2f32:$a, v2f32:$b))]>, + Requires<[doF32FTZ, hasF32x2Instructions]>; + def f32x2rr : + NVPTXInst<(outs Int64Regs:$dst), + (ins Int64Regs:$a, Int64Regs:$b), + op_str # ".f32x2 \t$dst, $a, $b;", + [(set v2f32:$dst, (op_pat v2f32:$a, v2f32:$b))]>, + Requires<[hasF32x2Instructions]>; def f16rr_ftz : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b), @@ -446,7 +459,6 @@ multiclass F3 { op_str # ".bf16 \t$dst, $a, $b;", [(set bf16:$dst, (op_pat bf16:$a, bf16:$b))]>, Requires<[hasBF16Math]>; - def bf16x2rr : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b), @@ -808,6 +820,9 @@ def : Pat<(vt (select i1:$p, vt:$a, vt:$b)), (SELP_b32rr $a, $b, $p)>; } +def : Pat<(v2f32 (select i1:$p, v2f32:$a, v2f32:$b)), + (SELP_b64rr $a, $b, $p)>; + //----------------------------------- // Test Instructions //----------------------------------- @@ -1369,6 +1384,8 @@ defm BFMA16 : FMA<"fma.rn.bf16", BF16RT, [hasBF16Math]>; defm BFMA16x2 : FMA<"fma.rn.bf16x2", BF16X2RT, [hasBF16Math]>; defm FMA32_ftz : FMA<"fma.rn.ftz.f32", F32RT, [doF32FTZ]>; defm FMA32 : FMA<"fma.rn.f32", F32RT>; +defm FMA32x2_ftz : FMA<"fma.rn.ftz.f32x2", F32X2RT, [doF32FTZ]>; +defm FMA32x2 : FMA<"fma.rn.f32x2", F32X2RT>; defm FMA64 : FMA<"fma.rn.f64", F64RT>; // sin/cos @@ -2420,6 +2437,10 @@ foreach vt = [v2f16, v2bf16, v2i16, v4i8] in { def: Pat<(vt (ProxyReg vt:$src)), (ProxyRegI32 $src)>; } +def: Pat<(v2f32 (ProxyReg v2f32:$src)), (ProxyRegI64 $src)>; + +def: Pat<(v2f32 (bitconvert i64:$src)), (ProxyRegI64 $src)>; + // // Load / Store Handling // @@ -2529,6 +2550,8 @@ def: Pat<(vt (bitconvert (f32 Float32Regs:$a))), def: Pat<(f32 (bitconvert vt:$a)), (BITCONVERT_32_I2F $a)>; } +def: Pat<(v2f32 (bitconvert (f64 Float64Regs:$a))), + (BITCONVERT_64_F2I $a)>; foreach vt = [f16, bf16] in { def: Pat<(vt (bitconvert i16:$a)), (vt Int16Regs:$a)>; @@ -2545,6 +2568,9 @@ foreach ta = [v2f16, v2bf16, v2i16, v4i8, i32] in { } } +def: Pat<(i64 (bitconvert v2f32:$a)), + (i64 Int64Regs:$a)>; + // NOTE: pred->fp are currently sub-optimal due to an issue in TableGen where // we cannot specify floating-point literals in isel patterns. Therefore, we // use an integer selp to select either 1 (or -1 in case of signed) or 0 @@ -2821,6 +2847,9 @@ let hasSideEffects = false in { def V2F32toF64 : NVPTXInst<(outs Float64Regs:$d), (ins Float32Regs:$s1, Float32Regs:$s2), "mov.b64 \t$d, {{$s1, $s2}};", []>; + def V2F32toI64 : NVPTXInst<(outs Int64Regs:$d), + (ins Float32Regs:$s1, Float32Regs:$s2), + "mov.b64 \t$d, {{$s1, $s2}};", []>; // unpack a larger int register to a set of smaller int registers def I64toV4I16 : NVPTXInst<(outs Int16Regs:$d1, Int16Regs:$d2, @@ -2833,6 +2862,9 @@ let hasSideEffects = false in { def I64toV2I32 : NVPTXInst<(outs Int32Regs:$d1, Int32Regs:$d2), (ins Int64Regs:$s), "mov.b64 \t{{$d1, $d2}}, $s;", []>; + def I64toV2F32 : NVPTXInst<(outs Float32Regs:$d1, Float32Regs:$d2), + (ins Int64Regs:$s), + "mov.b64 \t{{$d1, $d2}}, $s;", []>; def I128toV2I64: NVPTXInst<(outs Int64Regs:$d1, Int64Regs:$d2), (ins Int128Regs:$s), "mov.b128 \t{{$d1, $d2}}, $s;", []>; @@ -2856,6 +2888,24 @@ let hasSideEffects = false in { (ins Int64Regs:$s), "{{ .reg .b32 tmp; mov.b64 {$low, tmp}, $s; }}", []>; + def I64toF32H : NVPTXInst<(outs Float32Regs:$high), + (ins Int64Regs:$s), + "{{ .reg .b32 tmp; mov.b64 {tmp, $high}, $s; }}", + []>; + def I64toF32L : NVPTXInst<(outs Float32Regs:$low), + (ins Int64Regs:$s), + "{{ .reg .b32 tmp; mov.b64 {$low, tmp}, $s; }}", + []>; + def I64toF32HS : NVPTXInst<(outs Float32Regs:$high), + (ins Int64Regs:$s), + "mov.b64 {{_, $high}}, $s;", + []>, + Requires<[hasPTX<71>]>; + def I64toF32LS : NVPTXInst<(outs Float32Regs:$low), + (ins Int64Regs:$s), + "mov.b64 {{$low, _}}, $s;", + []>, + Requires<[hasPTX<71>]>; // PTX 7.1 lets you avoid a temp register and just use _ as a "sink" for the // unused high/low part. @@ -2898,12 +2948,25 @@ foreach vt = [v2f16, v2bf16, v2i16] in { def : Pat<(extractelt vt:$src, 0), (I32toI16L $src)>; def : Pat<(extractelt vt:$src, 1), (I32toI16H $src)>; } + +def : Pat<(extractelt v2f32:$src, 0), + (I64toF32LS $src)>, Requires<[hasPTX<71>]>; +def : Pat<(extractelt v2f32:$src, 1), + (I64toF32HS $src)>, Requires<[hasPTX<71>]>; + +def : Pat<(extractelt v2f32:$src, 0), + (I64toF32L $src)>; +def : Pat<(extractelt v2f32:$src, 1), + (I64toF32H $src)>; + def : Pat<(v2f16 (build_vector f16:$a, f16:$b)), (V2I16toI32 $a, $b)>; def : Pat<(v2bf16 (build_vector bf16:$a, bf16:$b)), (V2I16toI32 $a, $b)>; def : Pat<(v2i16 (build_vector i16:$a, i16:$b)), (V2I16toI32 $a, $b)>; +def : Pat<(v2f32 (build_vector f32:$a, f32:$b)), + (V2F32toI64 $a, $b)>; def: Pat<(v2i16 (scalar_to_vector i16:$a)), (CVT_u32_u16 $a, CvtNONE)>; diff --git a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td index 3eedb43e4c81a..4a33ca2d68475 100644 --- a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td +++ b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td @@ -2305,7 +2305,9 @@ class LDU_G def INT_PTX_LDU_GLOBAL_i8 : LDU_G<"u8", Int16Regs>; def INT_PTX_LDU_GLOBAL_i16 : LDU_G<"u16", Int16Regs>; def INT_PTX_LDU_GLOBAL_i32 : LDU_G<"u32", Int32Regs>; +def INT_PTX_LDU_GLOBAL_b32 : LDU_G<"b32", Int32Regs>; def INT_PTX_LDU_GLOBAL_i64 : LDU_G<"u64", Int64Regs>; +def INT_PTX_LDU_GLOBAL_b64 : LDU_G<"b64", Int64Regs>; def INT_PTX_LDU_GLOBAL_f32 : LDU_G<"f32", Float32Regs>; def INT_PTX_LDU_GLOBAL_f64 : LDU_G<"f64", Float64Regs>; @@ -2355,7 +2357,9 @@ class LDG_G def INT_PTX_LDG_GLOBAL_i8 : LDG_G<"u8", Int16Regs>; def INT_PTX_LDG_GLOBAL_i16 : LDG_G<"u16", Int16Regs>; def INT_PTX_LDG_GLOBAL_i32 : LDG_G<"u32", Int32Regs>; +def INT_PTX_LDG_GLOBAL_b32 : LDG_G<"b32", Int32Regs>; def INT_PTX_LDG_GLOBAL_i64 : LDG_G<"u64", Int64Regs>; +def INT_PTX_LDG_GLOBAL_b64 : LDG_G<"b64", Int64Regs>; def INT_PTX_LDG_GLOBAL_f32 : LDG_G<"f32", Float32Regs>; def INT_PTX_LDG_GLOBAL_f64 : LDG_G<"f64", Float64Regs>; diff --git a/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.td b/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.td index 2011f0f7e328f..7630eefe21182 100644 --- a/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.td +++ b/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.td @@ -62,7 +62,9 @@ def Int16Regs : NVPTXRegClass<[i16, f16, bf16], 16, (add (sequence "RS%u", 0, 4) def Int32Regs : NVPTXRegClass<[i32, v2f16, v2bf16, v2i16, v4i8], 32, (add (sequence "R%u", 0, 4), VRFrame32, VRFrameLocal32)>; -def Int64Regs : NVPTXRegClass<[i64], 64, (add (sequence "RL%u", 0, 4), VRFrame64, VRFrameLocal64)>; +def Int64Regs : NVPTXRegClass<[i64, v2f32], 64, + (add (sequence "RL%u", 0, 4), + VRFrame64, VRFrameLocal64)>; // 128-bit regs are not defined as general regs in NVPTX. They are used for inlineASM only. def Int128Regs : NVPTXRegClass<[i128], 128, (add (sequence "RQ%u", 0, 4))>; def Float32Regs : NVPTXRegClass<[f32], 32, (add (sequence "F%u", 0, 4))>; diff --git a/llvm/lib/Target/NVPTX/NVPTXSubtarget.h b/llvm/lib/Target/NVPTX/NVPTXSubtarget.h index 0a4fc8d1435be..dd617cbb6ab3c 100644 --- a/llvm/lib/Target/NVPTX/NVPTXSubtarget.h +++ b/llvm/lib/Target/NVPTX/NVPTXSubtarget.h @@ -112,6 +112,10 @@ class NVPTXSubtarget : public NVPTXGenSubtargetInfo { return HasTcgen05 && PTXVersion >= 86; } + bool hasF32x2Instructions() const { + return SmVersion >= 100 && PTXVersion >= 86; + } + // Prior to CUDA 12.3 ptxas did not recognize that the trap instruction // terminates a basic block. Instead, it would assume that control flow // continued to the next instruction. The next instruction could be in the diff --git a/llvm/test/CodeGen/NVPTX/aggregate-return.ll b/llvm/test/CodeGen/NVPTX/aggregate-return.ll index cda7d38ccb0b7..1101abcdc3278 100644 --- a/llvm/test/CodeGen/NVPTX/aggregate-return.ll +++ b/llvm/test/CodeGen/NVPTX/aggregate-return.ll @@ -10,9 +10,9 @@ define void @test_v2f32(<2 x float> %input, ptr %output) { ; CHECK-LABEL: @test_v2f32 %call = tail call <2 x float> @barv(<2 x float> %input) ; CHECK: .param .align 8 .b8 retval0[8]; -; CHECK: ld.param.v2.f32 {[[E0:%f[0-9]+]], [[E1:%f[0-9]+]]}, [retval0]; +; CHECK: ld.param.b64 [[E0_1:%rd[0-9]+]], [retval0]; store <2 x float> %call, ptr %output, align 8 -; CHECK: st.v2.f32 [{{%rd[0-9]+}}], {[[E0]], [[E1]]} +; CHECK: st.b64 [{{%rd[0-9]+}}], [[E0_1]] ret void } @@ -27,9 +27,7 @@ define void @test_v3f32(<3 x float> %input, ptr %output) { ; CHECK-NOT: ld.param.f32 [[E3:%f[0-9]+]], [retval0+12]; store <3 x float> %call, ptr %output, align 8 ; CHECK-DAG: st.f32 [{{%rd[0-9]}}+8], -; -- This is suboptimal. We should do st.v2.f32 instead -; of combining 2xf32 info i64. -; CHECK-DAG: st.u64 [{{%rd[0-9]}}], +; CHECK-DAG: st.v2.f32 [{{%rd[0-9]}}], {[[E0]], [[E1]]} ; CHECK: ret; ret void } diff --git a/llvm/test/CodeGen/NVPTX/bf16-instructions.ll b/llvm/test/CodeGen/NVPTX/bf16-instructions.ll index 6be13c3a6fdec..719c60fb2e3cb 100644 --- a/llvm/test/CodeGen/NVPTX/bf16-instructions.ll +++ b/llvm/test/CodeGen/NVPTX/bf16-instructions.ll @@ -762,32 +762,32 @@ define <8 x float> @test_extload_bf16x8(ptr addrspace(3) noundef %arg) #0 { ; SM70-NEXT: // %bb.0: ; SM70-NEXT: ld.param.u64 %rd1, [test_extload_bf16x8_param_0]; ; SM70-NEXT: ld.shared.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1]; -; SM70-NEXT: mov.b32 {%rs1, %rs2}, %r1; -; SM70-NEXT: mov.b32 {%rs3, %rs4}, %r2; -; SM70-NEXT: mov.b32 {%rs5, %rs6}, %r3; -; SM70-NEXT: mov.b32 {%rs7, %rs8}, %r4; -; SM70-NEXT: cvt.u32.u16 %r5, %rs8; +; SM70-NEXT: mov.b32 {%rs1, %rs2}, %r4; +; SM70-NEXT: cvt.u32.u16 %r5, %rs2; ; SM70-NEXT: shl.b32 %r6, %r5, 16; ; SM70-NEXT: mov.b32 %f1, %r6; -; SM70-NEXT: cvt.u32.u16 %r7, %rs7; +; SM70-NEXT: cvt.u32.u16 %r7, %rs1; ; SM70-NEXT: shl.b32 %r8, %r7, 16; ; SM70-NEXT: mov.b32 %f2, %r8; -; SM70-NEXT: cvt.u32.u16 %r9, %rs6; +; SM70-NEXT: mov.b32 {%rs3, %rs4}, %r3; +; SM70-NEXT: cvt.u32.u16 %r9, %rs4; ; SM70-NEXT: shl.b32 %r10, %r9, 16; ; SM70-NEXT: mov.b32 %f3, %r10; -; SM70-NEXT: cvt.u32.u16 %r11, %rs5; +; SM70-NEXT: cvt.u32.u16 %r11, %rs3; ; SM70-NEXT: shl.b32 %r12, %r11, 16; ; SM70-NEXT: mov.b32 %f4, %r12; -; SM70-NEXT: cvt.u32.u16 %r13, %rs4; +; SM70-NEXT: mov.b32 {%rs5, %rs6}, %r2; +; SM70-NEXT: cvt.u32.u16 %r13, %rs6; ; SM70-NEXT: shl.b32 %r14, %r13, 16; ; SM70-NEXT: mov.b32 %f5, %r14; -; SM70-NEXT: cvt.u32.u16 %r15, %rs3; +; SM70-NEXT: cvt.u32.u16 %r15, %rs5; ; SM70-NEXT: shl.b32 %r16, %r15, 16; ; SM70-NEXT: mov.b32 %f6, %r16; -; SM70-NEXT: cvt.u32.u16 %r17, %rs2; +; SM70-NEXT: mov.b32 {%rs7, %rs8}, %r1; +; SM70-NEXT: cvt.u32.u16 %r17, %rs8; ; SM70-NEXT: shl.b32 %r18, %r17, 16; ; SM70-NEXT: mov.b32 %f7, %r18; -; SM70-NEXT: cvt.u32.u16 %r19, %rs1; +; SM70-NEXT: cvt.u32.u16 %r19, %rs7; ; SM70-NEXT: shl.b32 %r20, %r19, 16; ; SM70-NEXT: mov.b32 %f8, %r20; ; SM70-NEXT: st.param.v4.f32 [func_retval0], {%f8, %f7, %f6, %f5}; @@ -804,18 +804,18 @@ define <8 x float> @test_extload_bf16x8(ptr addrspace(3) noundef %arg) #0 { ; SM80-NEXT: // %bb.0: ; SM80-NEXT: ld.param.u64 %rd1, [test_extload_bf16x8_param_0]; ; SM80-NEXT: ld.shared.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1]; -; SM80-NEXT: mov.b32 {%rs1, %rs2}, %r1; -; SM80-NEXT: mov.b32 {%rs3, %rs4}, %r2; -; SM80-NEXT: mov.b32 {%rs5, %rs6}, %r3; -; SM80-NEXT: mov.b32 {%rs7, %rs8}, %r4; -; SM80-NEXT: cvt.f32.bf16 %f1, %rs8; -; SM80-NEXT: cvt.f32.bf16 %f2, %rs7; -; SM80-NEXT: cvt.f32.bf16 %f3, %rs6; -; SM80-NEXT: cvt.f32.bf16 %f4, %rs5; -; SM80-NEXT: cvt.f32.bf16 %f5, %rs4; -; SM80-NEXT: cvt.f32.bf16 %f6, %rs3; -; SM80-NEXT: cvt.f32.bf16 %f7, %rs2; -; SM80-NEXT: cvt.f32.bf16 %f8, %rs1; +; SM80-NEXT: mov.b32 {%rs1, %rs2}, %r4; +; SM80-NEXT: cvt.f32.bf16 %f1, %rs2; +; SM80-NEXT: cvt.f32.bf16 %f2, %rs1; +; SM80-NEXT: mov.b32 {%rs3, %rs4}, %r3; +; SM80-NEXT: cvt.f32.bf16 %f3, %rs4; +; SM80-NEXT: cvt.f32.bf16 %f4, %rs3; +; SM80-NEXT: mov.b32 {%rs5, %rs6}, %r2; +; SM80-NEXT: cvt.f32.bf16 %f5, %rs6; +; SM80-NEXT: cvt.f32.bf16 %f6, %rs5; +; SM80-NEXT: mov.b32 {%rs7, %rs8}, %r1; +; SM80-NEXT: cvt.f32.bf16 %f7, %rs8; +; SM80-NEXT: cvt.f32.bf16 %f8, %rs7; ; SM80-NEXT: st.param.v4.f32 [func_retval0], {%f8, %f7, %f6, %f5}; ; SM80-NEXT: st.param.v4.f32 [func_retval0+16], {%f4, %f3, %f2, %f1}; ; SM80-NEXT: ret; @@ -830,18 +830,18 @@ define <8 x float> @test_extload_bf16x8(ptr addrspace(3) noundef %arg) #0 { ; SM80-FTZ-NEXT: // %bb.0: ; SM80-FTZ-NEXT: ld.param.u64 %rd1, [test_extload_bf16x8_param_0]; ; SM80-FTZ-NEXT: ld.shared.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1]; -; SM80-FTZ-NEXT: mov.b32 {%rs1, %rs2}, %r1; -; SM80-FTZ-NEXT: mov.b32 {%rs3, %rs4}, %r2; -; SM80-FTZ-NEXT: mov.b32 {%rs5, %rs6}, %r3; -; SM80-FTZ-NEXT: mov.b32 {%rs7, %rs8}, %r4; -; SM80-FTZ-NEXT: cvt.ftz.f32.bf16 %f1, %rs8; -; SM80-FTZ-NEXT: cvt.ftz.f32.bf16 %f2, %rs7; -; SM80-FTZ-NEXT: cvt.ftz.f32.bf16 %f3, %rs6; -; SM80-FTZ-NEXT: cvt.ftz.f32.bf16 %f4, %rs5; -; SM80-FTZ-NEXT: cvt.ftz.f32.bf16 %f5, %rs4; -; SM80-FTZ-NEXT: cvt.ftz.f32.bf16 %f6, %rs3; -; SM80-FTZ-NEXT: cvt.ftz.f32.bf16 %f7, %rs2; -; SM80-FTZ-NEXT: cvt.ftz.f32.bf16 %f8, %rs1; +; SM80-FTZ-NEXT: mov.b32 {%rs1, %rs2}, %r4; +; SM80-FTZ-NEXT: cvt.ftz.f32.bf16 %f1, %rs2; +; SM80-FTZ-NEXT: cvt.ftz.f32.bf16 %f2, %rs1; +; SM80-FTZ-NEXT: mov.b32 {%rs3, %rs4}, %r3; +; SM80-FTZ-NEXT: cvt.ftz.f32.bf16 %f3, %rs4; +; SM80-FTZ-NEXT: cvt.ftz.f32.bf16 %f4, %rs3; +; SM80-FTZ-NEXT: mov.b32 {%rs5, %rs6}, %r2; +; SM80-FTZ-NEXT: cvt.ftz.f32.bf16 %f5, %rs6; +; SM80-FTZ-NEXT: cvt.ftz.f32.bf16 %f6, %rs5; +; SM80-FTZ-NEXT: mov.b32 {%rs7, %rs8}, %r1; +; SM80-FTZ-NEXT: cvt.ftz.f32.bf16 %f7, %rs8; +; SM80-FTZ-NEXT: cvt.ftz.f32.bf16 %f8, %rs7; ; SM80-FTZ-NEXT: st.param.v4.f32 [func_retval0], {%f8, %f7, %f6, %f5}; ; SM80-FTZ-NEXT: st.param.v4.f32 [func_retval0+16], {%f4, %f3, %f2, %f1}; ; SM80-FTZ-NEXT: ret; @@ -856,18 +856,18 @@ define <8 x float> @test_extload_bf16x8(ptr addrspace(3) noundef %arg) #0 { ; SM90-NEXT: // %bb.0: ; SM90-NEXT: ld.param.u64 %rd1, [test_extload_bf16x8_param_0]; ; SM90-NEXT: ld.shared.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1]; -; SM90-NEXT: mov.b32 {%rs1, %rs2}, %r1; -; SM90-NEXT: mov.b32 {%rs3, %rs4}, %r2; -; SM90-NEXT: mov.b32 {%rs5, %rs6}, %r3; -; SM90-NEXT: mov.b32 {%rs7, %rs8}, %r4; -; SM90-NEXT: cvt.f32.bf16 %f1, %rs8; -; SM90-NEXT: cvt.f32.bf16 %f2, %rs7; -; SM90-NEXT: cvt.f32.bf16 %f3, %rs6; -; SM90-NEXT: cvt.f32.bf16 %f4, %rs5; -; SM90-NEXT: cvt.f32.bf16 %f5, %rs4; -; SM90-NEXT: cvt.f32.bf16 %f6, %rs3; -; SM90-NEXT: cvt.f32.bf16 %f7, %rs2; -; SM90-NEXT: cvt.f32.bf16 %f8, %rs1; +; SM90-NEXT: mov.b32 {%rs1, %rs2}, %r4; +; SM90-NEXT: cvt.f32.bf16 %f1, %rs2; +; SM90-NEXT: cvt.f32.bf16 %f2, %rs1; +; SM90-NEXT: mov.b32 {%rs3, %rs4}, %r3; +; SM90-NEXT: cvt.f32.bf16 %f3, %rs4; +; SM90-NEXT: cvt.f32.bf16 %f4, %rs3; +; SM90-NEXT: mov.b32 {%rs5, %rs6}, %r2; +; SM90-NEXT: cvt.f32.bf16 %f5, %rs6; +; SM90-NEXT: cvt.f32.bf16 %f6, %rs5; +; SM90-NEXT: mov.b32 {%rs7, %rs8}, %r1; +; SM90-NEXT: cvt.f32.bf16 %f7, %rs8; +; SM90-NEXT: cvt.f32.bf16 %f8, %rs7; ; SM90-NEXT: st.param.v4.f32 [func_retval0], {%f8, %f7, %f6, %f5}; ; SM90-NEXT: st.param.v4.f32 [func_retval0+16], {%f4, %f3, %f2, %f1}; ; SM90-NEXT: ret; diff --git a/llvm/test/CodeGen/NVPTX/f16x2-instructions.ll b/llvm/test/CodeGen/NVPTX/f16x2-instructions.ll index e854e5a6e5aaa..c3a04cd962789 100644 --- a/llvm/test/CodeGen/NVPTX/f16x2-instructions.ll +++ b/llvm/test/CodeGen/NVPTX/f16x2-instructions.ll @@ -624,16 +624,20 @@ define <2 x float> @test_select_cc_f32_f16(<2 x float> %a, <2 x float> %b, ; CHECK-F16-NEXT: .reg .pred %p<3>; ; CHECK-F16-NEXT: .reg .b32 %r<3>; ; CHECK-F16-NEXT: .reg .b32 %f<7>; +; CHECK-F16-NEXT: .reg .b64 %rd<4>; ; CHECK-F16-EMPTY: ; CHECK-F16-NEXT: // %bb.0: -; CHECK-F16-NEXT: ld.param.v2.f32 {%f3, %f4}, [test_select_cc_f32_f16_param_1]; -; CHECK-F16-NEXT: ld.param.v2.f32 {%f1, %f2}, [test_select_cc_f32_f16_param_0]; ; CHECK-F16-NEXT: ld.param.b32 %r2, [test_select_cc_f32_f16_param_3]; ; CHECK-F16-NEXT: ld.param.b32 %r1, [test_select_cc_f32_f16_param_2]; +; CHECK-F16-NEXT: ld.param.b64 %rd2, [test_select_cc_f32_f16_param_1]; +; CHECK-F16-NEXT: ld.param.b64 %rd1, [test_select_cc_f32_f16_param_0]; ; CHECK-F16-NEXT: setp.neu.f16x2 %p1|%p2, %r1, %r2; -; CHECK-F16-NEXT: selp.f32 %f5, %f2, %f4, %p2; -; CHECK-F16-NEXT: selp.f32 %f6, %f1, %f3, %p1; -; CHECK-F16-NEXT: st.param.v2.f32 [func_retval0], {%f6, %f5}; +; CHECK-F16-NEXT: mov.b64 {%f1, %f2}, %rd2; +; CHECK-F16-NEXT: mov.b64 {%f3, %f4}, %rd1; +; CHECK-F16-NEXT: selp.f32 %f5, %f4, %f2, %p2; +; CHECK-F16-NEXT: selp.f32 %f6, %f3, %f1, %p1; +; CHECK-F16-NEXT: mov.b64 %rd3, {%f6, %f5}; +; CHECK-F16-NEXT: st.param.b64 [func_retval0], %rd3; ; CHECK-F16-NEXT: ret; ; ; CHECK-NOF16-LABEL: test_select_cc_f32_f16( @@ -642,23 +646,27 @@ define <2 x float> @test_select_cc_f32_f16(<2 x float> %a, <2 x float> %b, ; CHECK-NOF16-NEXT: .reg .b16 %rs<5>; ; CHECK-NOF16-NEXT: .reg .b32 %r<3>; ; CHECK-NOF16-NEXT: .reg .b32 %f<11>; +; CHECK-NOF16-NEXT: .reg .b64 %rd<4>; ; CHECK-NOF16-EMPTY: ; CHECK-NOF16-NEXT: // %bb.0: -; CHECK-NOF16-NEXT: ld.param.v2.f32 {%f3, %f4}, [test_select_cc_f32_f16_param_1]; -; CHECK-NOF16-NEXT: ld.param.v2.f32 {%f1, %f2}, [test_select_cc_f32_f16_param_0]; ; CHECK-NOF16-NEXT: ld.param.b32 %r2, [test_select_cc_f32_f16_param_3]; ; CHECK-NOF16-NEXT: ld.param.b32 %r1, [test_select_cc_f32_f16_param_2]; +; CHECK-NOF16-NEXT: ld.param.b64 %rd2, [test_select_cc_f32_f16_param_1]; +; CHECK-NOF16-NEXT: ld.param.b64 %rd1, [test_select_cc_f32_f16_param_0]; ; CHECK-NOF16-NEXT: mov.b32 {%rs1, %rs2}, %r2; -; CHECK-NOF16-NEXT: cvt.f32.f16 %f5, %rs1; +; CHECK-NOF16-NEXT: cvt.f32.f16 %f1, %rs1; ; CHECK-NOF16-NEXT: mov.b32 {%rs3, %rs4}, %r1; -; CHECK-NOF16-NEXT: cvt.f32.f16 %f6, %rs3; -; CHECK-NOF16-NEXT: setp.neu.f32 %p1, %f6, %f5; -; CHECK-NOF16-NEXT: cvt.f32.f16 %f7, %rs2; -; CHECK-NOF16-NEXT: cvt.f32.f16 %f8, %rs4; -; CHECK-NOF16-NEXT: setp.neu.f32 %p2, %f8, %f7; -; CHECK-NOF16-NEXT: selp.f32 %f9, %f2, %f4, %p2; -; CHECK-NOF16-NEXT: selp.f32 %f10, %f1, %f3, %p1; -; CHECK-NOF16-NEXT: st.param.v2.f32 [func_retval0], {%f10, %f9}; +; CHECK-NOF16-NEXT: cvt.f32.f16 %f2, %rs3; +; CHECK-NOF16-NEXT: setp.neu.f32 %p1, %f2, %f1; +; CHECK-NOF16-NEXT: cvt.f32.f16 %f3, %rs2; +; CHECK-NOF16-NEXT: cvt.f32.f16 %f4, %rs4; +; CHECK-NOF16-NEXT: setp.neu.f32 %p2, %f4, %f3; +; CHECK-NOF16-NEXT: mov.b64 {%f5, %f6}, %rd2; +; CHECK-NOF16-NEXT: mov.b64 {%f7, %f8}, %rd1; +; CHECK-NOF16-NEXT: selp.f32 %f9, %f8, %f6, %p2; +; CHECK-NOF16-NEXT: selp.f32 %f10, %f7, %f5, %p1; +; CHECK-NOF16-NEXT: mov.b64 %rd3, {%f10, %f9}; +; CHECK-NOF16-NEXT: st.param.b64 [func_retval0], %rd3; ; CHECK-NOF16-NEXT: ret; <2 x half> %c, <2 x half> %d) #0 { %cc = fcmp une <2 x half> %c, %d @@ -673,14 +681,17 @@ define <2 x half> @test_select_cc_f16_f32(<2 x half> %a, <2 x half> %b, ; CHECK-NEXT: .reg .b16 %rs<7>; ; CHECK-NEXT: .reg .b32 %r<4>; ; CHECK-NEXT: .reg .b32 %f<5>; +; CHECK-NEXT: .reg .b64 %rd<3>; ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v2.f32 {%f3, %f4}, [test_select_cc_f16_f32_param_3]; -; CHECK-NEXT: ld.param.v2.f32 {%f1, %f2}, [test_select_cc_f16_f32_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [test_select_cc_f16_f32_param_3]; +; CHECK-NEXT: ld.param.b64 %rd1, [test_select_cc_f16_f32_param_2]; ; CHECK-NEXT: ld.param.b32 %r2, [test_select_cc_f16_f32_param_1]; ; CHECK-NEXT: ld.param.b32 %r1, [test_select_cc_f16_f32_param_0]; -; CHECK-NEXT: setp.neu.f32 %p1, %f1, %f3; -; CHECK-NEXT: setp.neu.f32 %p2, %f2, %f4; +; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd2; +; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1; +; CHECK-NEXT: setp.neu.f32 %p1, %f3, %f1; +; CHECK-NEXT: setp.neu.f32 %p2, %f4, %f2; ; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r2; ; CHECK-NEXT: mov.b32 {%rs3, %rs4}, %r1; ; CHECK-NEXT: selp.b16 %rs5, %rs4, %rs2, %p2; @@ -1546,9 +1557,11 @@ define <2 x half> @test_fptrunc_2xfloat(<2 x float> %a) #0 { ; CHECK-NEXT: .reg .b16 %rs<3>; ; CHECK-NEXT: .reg .b32 %r<2>; ; CHECK-NEXT: .reg .b32 %f<3>; +; CHECK-NEXT: .reg .b64 %rd<2>; ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v2.f32 {%f1, %f2}, [test_fptrunc_2xfloat_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [test_fptrunc_2xfloat_param_0]; +; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd1; ; CHECK-NEXT: cvt.rn.f16.f32 %rs1, %f2; ; CHECK-NEXT: cvt.rn.f16.f32 %rs2, %f1; ; CHECK-NEXT: mov.b32 %r1, {%rs2, %rs1}; @@ -1582,13 +1595,15 @@ define <2 x float> @test_fpext_2xfloat(<2 x half> %a) #0 { ; CHECK-NEXT: .reg .b16 %rs<3>; ; CHECK-NEXT: .reg .b32 %r<2>; ; CHECK-NEXT: .reg .b32 %f<3>; +; CHECK-NEXT: .reg .b64 %rd<2>; ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.b32 %r1, [test_fpext_2xfloat_param_0]; ; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r1; ; CHECK-NEXT: cvt.f32.f16 %f1, %rs2; ; CHECK-NEXT: cvt.f32.f16 %f2, %rs1; -; CHECK-NEXT: st.param.v2.f32 [func_retval0], {%f2, %f1}; +; CHECK-NEXT: mov.b64 %rd1, {%f2, %f1}; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd1; ; CHECK-NEXT: ret; %r = fpext <2 x half> %a to <2 x float> ret <2 x float> %r @@ -1985,10 +2000,12 @@ define <2 x half> @test_copysign_f32(<2 x half> %a, <2 x float> %b) #0 { ; CHECK-F16-NEXT: .reg .b16 %rs<3>; ; CHECK-F16-NEXT: .reg .b32 %r<6>; ; CHECK-F16-NEXT: .reg .b32 %f<3>; +; CHECK-F16-NEXT: .reg .b64 %rd<2>; ; CHECK-F16-EMPTY: ; CHECK-F16-NEXT: // %bb.0: -; CHECK-F16-NEXT: ld.param.v2.f32 {%f1, %f2}, [test_copysign_f32_param_1]; +; CHECK-F16-NEXT: ld.param.b64 %rd1, [test_copysign_f32_param_1]; ; CHECK-F16-NEXT: ld.param.b32 %r1, [test_copysign_f32_param_0]; +; CHECK-F16-NEXT: mov.b64 {%f1, %f2}, %rd1; ; CHECK-F16-NEXT: cvt.rn.f16.f32 %rs1, %f2; ; CHECK-F16-NEXT: cvt.rn.f16.f32 %rs2, %f1; ; CHECK-F16-NEXT: mov.b32 %r2, {%rs2, %rs1}; @@ -2003,10 +2020,12 @@ define <2 x half> @test_copysign_f32(<2 x half> %a, <2 x float> %b) #0 { ; CHECK-NOF16-NEXT: .reg .b16 %rs<9>; ; CHECK-NOF16-NEXT: .reg .b32 %r<7>; ; CHECK-NOF16-NEXT: .reg .b32 %f<3>; +; CHECK-NOF16-NEXT: .reg .b64 %rd<2>; ; CHECK-NOF16-EMPTY: ; CHECK-NOF16-NEXT: // %bb.0: -; CHECK-NOF16-NEXT: ld.param.v2.f32 {%f1, %f2}, [test_copysign_f32_param_1]; +; CHECK-NOF16-NEXT: ld.param.b64 %rd1, [test_copysign_f32_param_1]; ; CHECK-NOF16-NEXT: ld.param.b32 %r1, [test_copysign_f32_param_0]; +; CHECK-NOF16-NEXT: mov.b64 {%f1, %f2}, %rd1; ; CHECK-NOF16-NEXT: mov.b32 {%rs1, %rs2}, %r1; ; CHECK-NOF16-NEXT: and.b16 %rs3, %rs2, 32767; ; CHECK-NOF16-NEXT: mov.b32 %r2, %f2; @@ -2082,6 +2101,7 @@ define <2 x float> @test_copysign_extended(<2 x half> %a, <2 x half> %b) #0 { ; CHECK-F16-NEXT: .reg .b16 %rs<3>; ; CHECK-F16-NEXT: .reg .b32 %r<6>; ; CHECK-F16-NEXT: .reg .b32 %f<3>; +; CHECK-F16-NEXT: .reg .b64 %rd<2>; ; CHECK-F16-EMPTY: ; CHECK-F16-NEXT: // %bb.0: ; CHECK-F16-NEXT: ld.param.b32 %r2, [test_copysign_extended_param_1]; @@ -2092,7 +2112,8 @@ define <2 x float> @test_copysign_extended(<2 x half> %a, <2 x half> %b) #0 { ; CHECK-F16-NEXT: mov.b32 {%rs1, %rs2}, %r5; ; CHECK-F16-NEXT: cvt.f32.f16 %f1, %rs2; ; CHECK-F16-NEXT: cvt.f32.f16 %f2, %rs1; -; CHECK-F16-NEXT: st.param.v2.f32 [func_retval0], {%f2, %f1}; +; CHECK-F16-NEXT: mov.b64 %rd1, {%f2, %f1}; +; CHECK-F16-NEXT: st.param.b64 [func_retval0], %rd1; ; CHECK-F16-NEXT: ret; ; ; CHECK-NOF16-LABEL: test_copysign_extended( @@ -2100,6 +2121,7 @@ define <2 x float> @test_copysign_extended(<2 x half> %a, <2 x half> %b) #0 { ; CHECK-NOF16-NEXT: .reg .b16 %rs<11>; ; CHECK-NOF16-NEXT: .reg .b32 %r<3>; ; CHECK-NOF16-NEXT: .reg .b32 %f<3>; +; CHECK-NOF16-NEXT: .reg .b64 %rd<2>; ; CHECK-NOF16-EMPTY: ; CHECK-NOF16-NEXT: // %bb.0: ; CHECK-NOF16-NEXT: ld.param.b32 %r2, [test_copysign_extended_param_1]; @@ -2114,7 +2136,8 @@ define <2 x float> @test_copysign_extended(<2 x half> %a, <2 x half> %b) #0 { ; CHECK-NOF16-NEXT: or.b16 %rs10, %rs9, %rs8; ; CHECK-NOF16-NEXT: cvt.f32.f16 %f1, %rs10; ; CHECK-NOF16-NEXT: cvt.f32.f16 %f2, %rs7; -; CHECK-NOF16-NEXT: st.param.v2.f32 [func_retval0], {%f2, %f1}; +; CHECK-NOF16-NEXT: mov.b64 %rd1, {%f2, %f1}; +; CHECK-NOF16-NEXT: st.param.b64 [func_retval0], %rd1; ; CHECK-NOF16-NEXT: ret; %r = call <2 x half> @llvm.copysign.f16(<2 x half> %a, <2 x half> %b) %xr = fpext <2 x half> %r to <2 x float> diff --git a/llvm/test/CodeGen/NVPTX/f32x2-instructions.ll b/llvm/test/CodeGen/NVPTX/f32x2-instructions.ll new file mode 100644 index 0000000000000..6cb83a1598512 --- /dev/null +++ b/llvm/test/CodeGen/NVPTX/f32x2-instructions.ll @@ -0,0 +1,1375 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; ## Full FP32x2 support enabled by default. +; RUN: llc < %s -mtriple=nvptx64-nvidia-cuda -mcpu=sm_100 \ +; RUN: -O0 -disable-post-ra -frame-pointer=all -verify-machineinstrs \ +; RUN: | FileCheck --check-prefixes=CHECK %s +; RUN: %if ptxas %{ \ +; RUN: llc < %s -mtriple=nvptx64-nvidia-cuda -mcpu=sm_100 \ +; RUN: -O0 -disable-post-ra -frame-pointer=all -verify-machineinstrs \ +; RUN: | %ptxas-verify -arch=sm_100 \ +; RUN: %} + +target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128" +target triple = "nvptx64-nvidia-cuda" + +define <2 x float> @test_ret_const() #0 { +; CHECK-LABEL: test_ret_const( +; CHECK: { +; CHECK-NEXT: .reg .b32 %f<3>; +; CHECK-NEXT: .reg .b64 %rd<2>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: mov.b32 %f1, 0f40000000; +; CHECK-NEXT: mov.b32 %f2, 0f3F800000; +; CHECK-NEXT: mov.b64 %rd1, {%f2, %f1}; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd1; +; CHECK-NEXT: ret; + ret <2 x float> +} + +define float @test_extract_0(<2 x float> %a) #0 { +; CHECK-LABEL: test_extract_0( +; CHECK: { +; CHECK-NEXT: .reg .b32 %f<2>; +; CHECK-NEXT: .reg .b64 %rd<2>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd1, [test_extract_0_param_0]; +; CHECK-NEXT: mov.b64 {%f1, _}, %rd1; +; CHECK-NEXT: st.param.f32 [func_retval0], %f1; +; CHECK-NEXT: ret; + %e = extractelement <2 x float> %a, i32 0 + ret float %e +} + +define float @test_extract_1(<2 x float> %a) #0 { +; CHECK-LABEL: test_extract_1( +; CHECK: { +; CHECK-NEXT: .reg .b32 %f<2>; +; CHECK-NEXT: .reg .b64 %rd<2>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd1, [test_extract_1_param_0]; +; CHECK-NEXT: mov.b64 {_, %f1}, %rd1; +; CHECK-NEXT: st.param.f32 [func_retval0], %f1; +; CHECK-NEXT: ret; + %e = extractelement <2 x float> %a, i32 1 + ret float %e +} + +; NOTE: disabled as -O3 miscompiles this into pointer arithmetic on +; test_extract_i_param_0 where the symbol's address is not taken first (that +; is, moved to a temporary) +; define float @test_extract_i(<2 x float> %a, i64 %idx) #0 { +; %e = extractelement <2 x float> %a, i64 %idx +; ret float %e +; } + +define <2 x float> @test_fadd(<2 x float> %a, <2 x float> %b) #0 { +; CHECK-LABEL: test_fadd( +; CHECK: { +; CHECK-NEXT: .reg .b64 %rd<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd2, [test_fadd_param_1]; +; CHECK-NEXT: ld.param.b64 %rd1, [test_fadd_param_0]; +; CHECK-NEXT: add.rn.f32x2 %rd3, %rd1, %rd2; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd3; +; CHECK-NEXT: ret; + %r = fadd <2 x float> %a, %b + ret <2 x float> %r +} + +define <2 x float> @test_fadd_imm_0(<2 x float> %a) #0 { +; CHECK-LABEL: test_fadd_imm_0( +; CHECK: { +; CHECK-NEXT: .reg .b32 %f<3>; +; CHECK-NEXT: .reg .b64 %rd<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd1, [test_fadd_imm_0_param_0]; +; CHECK-NEXT: mov.b32 %f1, 0f40000000; +; CHECK-NEXT: mov.b32 %f2, 0f3F800000; +; CHECK-NEXT: mov.b64 %rd2, {%f2, %f1}; +; CHECK-NEXT: add.rn.f32x2 %rd3, %rd1, %rd2; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd3; +; CHECK-NEXT: ret; + %r = fadd <2 x float> , %a + ret <2 x float> %r +} + +define <2 x float> @test_fadd_imm_1(<2 x float> %a) #0 { +; CHECK-LABEL: test_fadd_imm_1( +; CHECK: { +; CHECK-NEXT: .reg .b32 %f<3>; +; CHECK-NEXT: .reg .b64 %rd<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd1, [test_fadd_imm_1_param_0]; +; CHECK-NEXT: mov.b32 %f1, 0f40000000; +; CHECK-NEXT: mov.b32 %f2, 0f3F800000; +; CHECK-NEXT: mov.b64 %rd2, {%f2, %f1}; +; CHECK-NEXT: add.rn.f32x2 %rd3, %rd1, %rd2; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd3; +; CHECK-NEXT: ret; + %r = fadd <2 x float> %a, + ret <2 x float> %r +} + +define <4 x float> @test_fadd_v4(<4 x float> %a, <4 x float> %b) #0 { +; CHECK-LABEL: test_fadd_v4( +; CHECK: { +; CHECK-NEXT: .reg .b64 %rd<7>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [test_fadd_v4_param_1]; +; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [test_fadd_v4_param_0]; +; CHECK-NEXT: add.rn.f32x2 %rd5, %rd2, %rd4; +; CHECK-NEXT: add.rn.f32x2 %rd6, %rd1, %rd3; +; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd6, %rd5}; +; CHECK-NEXT: ret; + %r = fadd <4 x float> %a, %b + ret <4 x float> %r +} + +define <4 x float> @test_fadd_imm_0_v4(<4 x float> %a) #0 { +; CHECK-LABEL: test_fadd_imm_0_v4( +; CHECK: { +; CHECK-NEXT: .reg .b32 %f<5>; +; CHECK-NEXT: .reg .b64 %rd<7>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [test_fadd_imm_0_v4_param_0]; +; CHECK-NEXT: mov.b32 %f1, 0f40800000; +; CHECK-NEXT: mov.b32 %f2, 0f40400000; +; CHECK-NEXT: mov.b64 %rd3, {%f2, %f1}; +; CHECK-NEXT: add.rn.f32x2 %rd4, %rd2, %rd3; +; CHECK-NEXT: mov.b32 %f3, 0f40000000; +; CHECK-NEXT: mov.b32 %f4, 0f3F800000; +; CHECK-NEXT: mov.b64 %rd5, {%f4, %f3}; +; CHECK-NEXT: add.rn.f32x2 %rd6, %rd1, %rd5; +; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd6, %rd4}; +; CHECK-NEXT: ret; + %r = fadd <4 x float> , %a + ret <4 x float> %r +} + +define <4 x float> @test_fadd_imm_1_v4(<4 x float> %a) #0 { +; CHECK-LABEL: test_fadd_imm_1_v4( +; CHECK: { +; CHECK-NEXT: .reg .b32 %f<5>; +; CHECK-NEXT: .reg .b64 %rd<7>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [test_fadd_imm_1_v4_param_0]; +; CHECK-NEXT: mov.b32 %f1, 0f40800000; +; CHECK-NEXT: mov.b32 %f2, 0f40400000; +; CHECK-NEXT: mov.b64 %rd3, {%f2, %f1}; +; CHECK-NEXT: add.rn.f32x2 %rd4, %rd2, %rd3; +; CHECK-NEXT: mov.b32 %f3, 0f40000000; +; CHECK-NEXT: mov.b32 %f4, 0f3F800000; +; CHECK-NEXT: mov.b64 %rd5, {%f4, %f3}; +; CHECK-NEXT: add.rn.f32x2 %rd6, %rd1, %rd5; +; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd6, %rd4}; +; CHECK-NEXT: ret; + %r = fadd <4 x float> %a, + ret <4 x float> %r +} + +define <2 x float> @test_fsub(<2 x float> %a, <2 x float> %b) #0 { +; CHECK-LABEL: test_fsub( +; CHECK: { +; CHECK-NEXT: .reg .b64 %rd<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd2, [test_fsub_param_1]; +; CHECK-NEXT: ld.param.b64 %rd1, [test_fsub_param_0]; +; CHECK-NEXT: sub.rn.f32x2 %rd3, %rd1, %rd2; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd3; +; CHECK-NEXT: ret; + %r = fsub <2 x float> %a, %b + ret <2 x float> %r +} + +define <2 x float> @test_fneg(<2 x float> %a) #0 { +; CHECK-LABEL: test_fneg( +; CHECK: { +; CHECK-NEXT: .reg .b32 %f<2>; +; CHECK-NEXT: .reg .b64 %rd<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd1, [test_fneg_param_0]; +; CHECK-NEXT: mov.b32 %f1, 0f00000000; +; CHECK-NEXT: mov.b64 %rd2, {%f1, %f1}; +; CHECK-NEXT: sub.rn.f32x2 %rd3, %rd2, %rd1; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd3; +; CHECK-NEXT: ret; + %r = fsub <2 x float> , %a + ret <2 x float> %r +} + +define <2 x float> @test_fmul(<2 x float> %a, <2 x float> %b) #0 { +; CHECK-LABEL: test_fmul( +; CHECK: { +; CHECK-NEXT: .reg .b64 %rd<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd2, [test_fmul_param_1]; +; CHECK-NEXT: ld.param.b64 %rd1, [test_fmul_param_0]; +; CHECK-NEXT: mul.rn.f32x2 %rd3, %rd1, %rd2; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd3; +; CHECK-NEXT: ret; + %r = fmul <2 x float> %a, %b + ret <2 x float> %r +} + +define <2 x float> @test_fma(<2 x float> %a, <2 x float> %b, <2 x float> %c) #0 { +; CHECK-LABEL: test_fma( +; CHECK: { +; CHECK-NEXT: .reg .b64 %rd<5>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd3, [test_fma_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [test_fma_param_1]; +; CHECK-NEXT: ld.param.b64 %rd1, [test_fma_param_0]; +; CHECK-NEXT: fma.rn.f32x2 %rd4, %rd1, %rd2, %rd3; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd4; +; CHECK-NEXT: ret; + %r = call <2 x float> @llvm.fma(<2 x float> %a, <2 x float> %b, <2 x float> %c) + ret <2 x float> %r +} + +define <2 x float> @test_fdiv(<2 x float> %a, <2 x float> %b) #0 { +; CHECK-LABEL: test_fdiv( +; CHECK: { +; CHECK-NEXT: .reg .b32 %f<7>; +; CHECK-NEXT: .reg .b64 %rd<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd2, [test_fdiv_param_1]; +; CHECK-NEXT: ld.param.b64 %rd1, [test_fdiv_param_0]; +; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd2; +; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1; +; CHECK-NEXT: div.rn.f32 %f5, %f4, %f2; +; CHECK-NEXT: div.rn.f32 %f6, %f3, %f1; +; CHECK-NEXT: mov.b64 %rd3, {%f6, %f5}; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd3; +; CHECK-NEXT: ret; + %r = fdiv <2 x float> %a, %b + ret <2 x float> %r +} + +define <2 x float> @test_frem(<2 x float> %a, <2 x float> %b) #0 { +; CHECK-LABEL: test_frem( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<3>; +; CHECK-NEXT: .reg .b32 %f<15>; +; CHECK-NEXT: .reg .b64 %rd<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd2, [test_frem_param_1]; +; CHECK-NEXT: ld.param.b64 %rd1, [test_frem_param_0]; +; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd2; +; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1; +; CHECK-NEXT: div.rn.f32 %f5, %f4, %f2; +; CHECK-NEXT: cvt.rzi.f32.f32 %f6, %f5; +; CHECK-NEXT: neg.f32 %f7, %f6; +; CHECK-NEXT: fma.rn.f32 %f8, %f7, %f2, %f4; +; CHECK-NEXT: testp.infinite.f32 %p1, %f2; +; CHECK-NEXT: selp.f32 %f9, %f4, %f8, %p1; +; CHECK-NEXT: div.rn.f32 %f10, %f3, %f1; +; CHECK-NEXT: cvt.rzi.f32.f32 %f11, %f10; +; CHECK-NEXT: neg.f32 %f12, %f11; +; CHECK-NEXT: fma.rn.f32 %f13, %f12, %f1, %f3; +; CHECK-NEXT: testp.infinite.f32 %p2, %f1; +; CHECK-NEXT: selp.f32 %f14, %f3, %f13, %p2; +; CHECK-NEXT: mov.b64 %rd3, {%f14, %f9}; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd3; +; CHECK-NEXT: ret; + %r = frem <2 x float> %a, %b + ret <2 x float> %r +} + +define <2 x float> @test_fadd_ftz(<2 x float> %a, <2 x float> %b) #2 { +; CHECK-LABEL: test_fadd_ftz( +; CHECK: { +; CHECK-NEXT: .reg .b64 %rd<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd2, [test_fadd_ftz_param_1]; +; CHECK-NEXT: ld.param.b64 %rd1, [test_fadd_ftz_param_0]; +; CHECK-NEXT: add.rn.ftz.f32x2 %rd3, %rd1, %rd2; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd3; +; CHECK-NEXT: ret; + %r = fadd <2 x float> %a, %b + ret <2 x float> %r +} + +define <2 x float> @test_fadd_imm_0_ftz(<2 x float> %a) #2 { +; CHECK-LABEL: test_fadd_imm_0_ftz( +; CHECK: { +; CHECK-NEXT: .reg .b32 %f<3>; +; CHECK-NEXT: .reg .b64 %rd<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd1, [test_fadd_imm_0_ftz_param_0]; +; CHECK-NEXT: mov.b32 %f1, 0f40000000; +; CHECK-NEXT: mov.b32 %f2, 0f3F800000; +; CHECK-NEXT: mov.b64 %rd2, {%f2, %f1}; +; CHECK-NEXT: add.rn.ftz.f32x2 %rd3, %rd1, %rd2; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd3; +; CHECK-NEXT: ret; + %r = fadd <2 x float> , %a + ret <2 x float> %r +} + +define <2 x float> @test_fadd_imm_1_ftz(<2 x float> %a) #2 { +; CHECK-LABEL: test_fadd_imm_1_ftz( +; CHECK: { +; CHECK-NEXT: .reg .b32 %f<3>; +; CHECK-NEXT: .reg .b64 %rd<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd1, [test_fadd_imm_1_ftz_param_0]; +; CHECK-NEXT: mov.b32 %f1, 0f40000000; +; CHECK-NEXT: mov.b32 %f2, 0f3F800000; +; CHECK-NEXT: mov.b64 %rd2, {%f2, %f1}; +; CHECK-NEXT: add.rn.ftz.f32x2 %rd3, %rd1, %rd2; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd3; +; CHECK-NEXT: ret; + %r = fadd <2 x float> %a, + ret <2 x float> %r +} + +define <4 x float> @test_fadd_v4_ftz(<4 x float> %a, <4 x float> %b) #2 { +; CHECK-LABEL: test_fadd_v4_ftz( +; CHECK: { +; CHECK-NEXT: .reg .b64 %rd<7>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [test_fadd_v4_ftz_param_1]; +; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [test_fadd_v4_ftz_param_0]; +; CHECK-NEXT: add.rn.ftz.f32x2 %rd5, %rd2, %rd4; +; CHECK-NEXT: add.rn.ftz.f32x2 %rd6, %rd1, %rd3; +; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd6, %rd5}; +; CHECK-NEXT: ret; + %r = fadd <4 x float> %a, %b + ret <4 x float> %r +} + +define <4 x float> @test_fadd_imm_0_v4_ftz(<4 x float> %a) #2 { +; CHECK-LABEL: test_fadd_imm_0_v4_ftz( +; CHECK: { +; CHECK-NEXT: .reg .b32 %f<5>; +; CHECK-NEXT: .reg .b64 %rd<7>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [test_fadd_imm_0_v4_ftz_param_0]; +; CHECK-NEXT: mov.b32 %f1, 0f40800000; +; CHECK-NEXT: mov.b32 %f2, 0f40400000; +; CHECK-NEXT: mov.b64 %rd3, {%f2, %f1}; +; CHECK-NEXT: add.rn.ftz.f32x2 %rd4, %rd2, %rd3; +; CHECK-NEXT: mov.b32 %f3, 0f40000000; +; CHECK-NEXT: mov.b32 %f4, 0f3F800000; +; CHECK-NEXT: mov.b64 %rd5, {%f4, %f3}; +; CHECK-NEXT: add.rn.ftz.f32x2 %rd6, %rd1, %rd5; +; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd6, %rd4}; +; CHECK-NEXT: ret; + %r = fadd <4 x float> , %a + ret <4 x float> %r +} + +define <4 x float> @test_fadd_imm_1_v4_ftz(<4 x float> %a) #2 { +; CHECK-LABEL: test_fadd_imm_1_v4_ftz( +; CHECK: { +; CHECK-NEXT: .reg .b32 %f<5>; +; CHECK-NEXT: .reg .b64 %rd<7>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [test_fadd_imm_1_v4_ftz_param_0]; +; CHECK-NEXT: mov.b32 %f1, 0f40800000; +; CHECK-NEXT: mov.b32 %f2, 0f40400000; +; CHECK-NEXT: mov.b64 %rd3, {%f2, %f1}; +; CHECK-NEXT: add.rn.ftz.f32x2 %rd4, %rd2, %rd3; +; CHECK-NEXT: mov.b32 %f3, 0f40000000; +; CHECK-NEXT: mov.b32 %f4, 0f3F800000; +; CHECK-NEXT: mov.b64 %rd5, {%f4, %f3}; +; CHECK-NEXT: add.rn.ftz.f32x2 %rd6, %rd1, %rd5; +; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd6, %rd4}; +; CHECK-NEXT: ret; + %r = fadd <4 x float> %a, + ret <4 x float> %r +} + +define <2 x float> @test_fsub_ftz(<2 x float> %a, <2 x float> %b) #2 { +; CHECK-LABEL: test_fsub_ftz( +; CHECK: { +; CHECK-NEXT: .reg .b64 %rd<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd2, [test_fsub_ftz_param_1]; +; CHECK-NEXT: ld.param.b64 %rd1, [test_fsub_ftz_param_0]; +; CHECK-NEXT: sub.rn.ftz.f32x2 %rd3, %rd1, %rd2; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd3; +; CHECK-NEXT: ret; + %r = fsub <2 x float> %a, %b + ret <2 x float> %r +} + +define <2 x float> @test_fneg_ftz(<2 x float> %a) #2 { +; CHECK-LABEL: test_fneg_ftz( +; CHECK: { +; CHECK-NEXT: .reg .b32 %f<2>; +; CHECK-NEXT: .reg .b64 %rd<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd1, [test_fneg_ftz_param_0]; +; CHECK-NEXT: mov.b32 %f1, 0f00000000; +; CHECK-NEXT: mov.b64 %rd2, {%f1, %f1}; +; CHECK-NEXT: sub.rn.ftz.f32x2 %rd3, %rd2, %rd1; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd3; +; CHECK-NEXT: ret; + %r = fsub <2 x float> , %a + ret <2 x float> %r +} + +define <2 x float> @test_fmul_ftz(<2 x float> %a, <2 x float> %b) #2 { +; CHECK-LABEL: test_fmul_ftz( +; CHECK: { +; CHECK-NEXT: .reg .b64 %rd<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd2, [test_fmul_ftz_param_1]; +; CHECK-NEXT: ld.param.b64 %rd1, [test_fmul_ftz_param_0]; +; CHECK-NEXT: mul.rn.ftz.f32x2 %rd3, %rd1, %rd2; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd3; +; CHECK-NEXT: ret; + %r = fmul <2 x float> %a, %b + ret <2 x float> %r +} + +define <2 x float> @test_fma_ftz(<2 x float> %a, <2 x float> %b, <2 x float> %c) #2 { +; CHECK-LABEL: test_fma_ftz( +; CHECK: { +; CHECK-NEXT: .reg .b64 %rd<5>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd3, [test_fma_ftz_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [test_fma_ftz_param_1]; +; CHECK-NEXT: ld.param.b64 %rd1, [test_fma_ftz_param_0]; +; CHECK-NEXT: fma.rn.ftz.f32x2 %rd4, %rd1, %rd2, %rd3; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd4; +; CHECK-NEXT: ret; + %r = call <2 x float> @llvm.fma(<2 x float> %a, <2 x float> %b, <2 x float> %c) + ret <2 x float> %r +} + +define <2 x float> @test_fdiv_ftz(<2 x float> %a, <2 x float> %b) #2 { +; CHECK-LABEL: test_fdiv_ftz( +; CHECK: { +; CHECK-NEXT: .reg .b32 %f<7>; +; CHECK-NEXT: .reg .b64 %rd<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd2, [test_fdiv_ftz_param_1]; +; CHECK-NEXT: ld.param.b64 %rd1, [test_fdiv_ftz_param_0]; +; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd2; +; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1; +; CHECK-NEXT: div.rn.ftz.f32 %f5, %f4, %f2; +; CHECK-NEXT: div.rn.ftz.f32 %f6, %f3, %f1; +; CHECK-NEXT: mov.b64 %rd3, {%f6, %f5}; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd3; +; CHECK-NEXT: ret; + %r = fdiv <2 x float> %a, %b + ret <2 x float> %r +} + +define <2 x float> @test_frem_ftz(<2 x float> %a, <2 x float> %b) #2 { +; CHECK-LABEL: test_frem_ftz( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<3>; +; CHECK-NEXT: .reg .b32 %f<15>; +; CHECK-NEXT: .reg .b64 %rd<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd2, [test_frem_ftz_param_1]; +; CHECK-NEXT: ld.param.b64 %rd1, [test_frem_ftz_param_0]; +; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd2; +; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1; +; CHECK-NEXT: div.rn.ftz.f32 %f5, %f4, %f2; +; CHECK-NEXT: cvt.rzi.ftz.f32.f32 %f6, %f5; +; CHECK-NEXT: neg.ftz.f32 %f7, %f6; +; CHECK-NEXT: fma.rn.ftz.f32 %f8, %f7, %f2, %f4; +; CHECK-NEXT: testp.infinite.f32 %p1, %f2; +; CHECK-NEXT: selp.f32 %f9, %f4, %f8, %p1; +; CHECK-NEXT: div.rn.ftz.f32 %f10, %f3, %f1; +; CHECK-NEXT: cvt.rzi.ftz.f32.f32 %f11, %f10; +; CHECK-NEXT: neg.ftz.f32 %f12, %f11; +; CHECK-NEXT: fma.rn.ftz.f32 %f13, %f12, %f1, %f3; +; CHECK-NEXT: testp.infinite.f32 %p2, %f1; +; CHECK-NEXT: selp.f32 %f14, %f3, %f13, %p2; +; CHECK-NEXT: mov.b64 %rd3, {%f14, %f9}; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd3; +; CHECK-NEXT: ret; + %r = frem <2 x float> %a, %b + ret <2 x float> %r +} + +define void @test_ldst_v2f32(ptr %a, ptr %b) #0 { +; CHECK-LABEL: test_ldst_v2f32( +; CHECK: { +; CHECK-NEXT: .reg .b64 %rd<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.u64 %rd2, [test_ldst_v2f32_param_1]; +; CHECK-NEXT: ld.param.u64 %rd1, [test_ldst_v2f32_param_0]; +; CHECK-NEXT: ld.b64 %rd3, [%rd1]; +; CHECK-NEXT: st.b64 [%rd2], %rd3; +; CHECK-NEXT: ret; + %t1 = load <2 x float>, ptr %a + store <2 x float> %t1, ptr %b, align 32 + ret void +} + +define void @test_ldst_v3f32(ptr %a, ptr %b) #0 { +; CHECK-LABEL: test_ldst_v3f32( +; CHECK: { +; CHECK-NEXT: .reg .b32 %f<2>; +; CHECK-NEXT: .reg .b64 %rd<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.u64 %rd2, [test_ldst_v3f32_param_1]; +; CHECK-NEXT: ld.param.u64 %rd1, [test_ldst_v3f32_param_0]; +; CHECK-NEXT: ld.u64 %rd3, [%rd1]; +; CHECK-NEXT: ld.f32 %f1, [%rd1+8]; +; CHECK-NEXT: st.f32 [%rd2+8], %f1; +; CHECK-NEXT: st.u64 [%rd2], %rd3; +; CHECK-NEXT: ret; + %t1 = load <3 x float>, ptr %a + store <3 x float> %t1, ptr %b, align 32 + ret void +} + +define void @test_ldst_v4f32(ptr %a, ptr %b) #0 { +; CHECK-LABEL: test_ldst_v4f32( +; CHECK: { +; CHECK-NEXT: .reg .b32 %f<5>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.u64 %rd2, [test_ldst_v4f32_param_1]; +; CHECK-NEXT: ld.param.u64 %rd1, [test_ldst_v4f32_param_0]; +; CHECK-NEXT: ld.v4.f32 {%f1, %f2, %f3, %f4}, [%rd1]; +; CHECK-NEXT: st.v4.f32 [%rd2], {%f1, %f2, %f3, %f4}; +; CHECK-NEXT: ret; + %t1 = load <4 x float>, ptr %a + store <4 x float> %t1, ptr %b, align 32 + ret void +} + +define void @test_ldst_v8f32(ptr %a, ptr %b) #0 { +; CHECK-LABEL: test_ldst_v8f32( +; CHECK: { +; CHECK-NEXT: .reg .b32 %f<9>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.u64 %rd2, [test_ldst_v8f32_param_1]; +; CHECK-NEXT: ld.param.u64 %rd1, [test_ldst_v8f32_param_0]; +; CHECK-NEXT: ld.v4.f32 {%f1, %f2, %f3, %f4}, [%rd1]; +; CHECK-NEXT: ld.v4.f32 {%f5, %f6, %f7, %f8}, [%rd1+16]; +; CHECK-NEXT: st.v4.f32 [%rd2+16], {%f5, %f6, %f7, %f8}; +; CHECK-NEXT: st.v4.f32 [%rd2], {%f1, %f2, %f3, %f4}; +; CHECK-NEXT: ret; + %t1 = load <8 x float>, ptr %a + store <8 x float> %t1, ptr %b, align 32 + ret void +} + +declare <2 x float> @test_callee(<2 x float> %a, <2 x float> %b) #0 + +define <2 x float> @test_call(<2 x float> %a, <2 x float> %b) #0 { +; CHECK-LABEL: test_call( +; CHECK: { +; CHECK-NEXT: .reg .b64 %rd<5>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd2, [test_call_param_1]; +; CHECK-NEXT: ld.param.b64 %rd1, [test_call_param_0]; +; CHECK-NEXT: { // callseq 0, 0 +; CHECK-NEXT: .param .align 8 .b8 param0[8]; +; CHECK-NEXT: st.param.b64 [param0], %rd1; +; CHECK-NEXT: .param .align 8 .b8 param1[8]; +; CHECK-NEXT: st.param.b64 [param1], %rd2; +; CHECK-NEXT: .param .align 8 .b8 retval0[8]; +; CHECK-NEXT: call.uni (retval0), +; CHECK-NEXT: test_callee, +; CHECK-NEXT: ( +; CHECK-NEXT: param0, +; CHECK-NEXT: param1 +; CHECK-NEXT: ); +; CHECK-NEXT: ld.param.b64 %rd3, [retval0]; +; CHECK-NEXT: } // callseq 0 +; CHECK-NEXT: st.param.b64 [func_retval0], %rd3; +; CHECK-NEXT: ret; + %r = call <2 x float> @test_callee(<2 x float> %a, <2 x float> %b) + ret <2 x float> %r +} + +define <2 x float> @test_call_flipped(<2 x float> %a, <2 x float> %b) #0 { +; CHECK-LABEL: test_call_flipped( +; CHECK: { +; CHECK-NEXT: .reg .b64 %rd<5>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd2, [test_call_flipped_param_1]; +; CHECK-NEXT: ld.param.b64 %rd1, [test_call_flipped_param_0]; +; CHECK-NEXT: { // callseq 1, 0 +; CHECK-NEXT: .param .align 8 .b8 param0[8]; +; CHECK-NEXT: st.param.b64 [param0], %rd2; +; CHECK-NEXT: .param .align 8 .b8 param1[8]; +; CHECK-NEXT: st.param.b64 [param1], %rd1; +; CHECK-NEXT: .param .align 8 .b8 retval0[8]; +; CHECK-NEXT: call.uni (retval0), +; CHECK-NEXT: test_callee, +; CHECK-NEXT: ( +; CHECK-NEXT: param0, +; CHECK-NEXT: param1 +; CHECK-NEXT: ); +; CHECK-NEXT: ld.param.b64 %rd3, [retval0]; +; CHECK-NEXT: } // callseq 1 +; CHECK-NEXT: st.param.b64 [func_retval0], %rd3; +; CHECK-NEXT: ret; + %r = call <2 x float> @test_callee(<2 x float> %b, <2 x float> %a) + ret <2 x float> %r +} + +define <2 x float> @test_tailcall_flipped(<2 x float> %a, <2 x float> %b) #0 { +; CHECK-LABEL: test_tailcall_flipped( +; CHECK: { +; CHECK-NEXT: .reg .b64 %rd<5>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd2, [test_tailcall_flipped_param_1]; +; CHECK-NEXT: ld.param.b64 %rd1, [test_tailcall_flipped_param_0]; +; CHECK-NEXT: { // callseq 2, 0 +; CHECK-NEXT: .param .align 8 .b8 param0[8]; +; CHECK-NEXT: st.param.b64 [param0], %rd2; +; CHECK-NEXT: .param .align 8 .b8 param1[8]; +; CHECK-NEXT: st.param.b64 [param1], %rd1; +; CHECK-NEXT: .param .align 8 .b8 retval0[8]; +; CHECK-NEXT: call.uni (retval0), +; CHECK-NEXT: test_callee, +; CHECK-NEXT: ( +; CHECK-NEXT: param0, +; CHECK-NEXT: param1 +; CHECK-NEXT: ); +; CHECK-NEXT: ld.param.b64 %rd3, [retval0]; +; CHECK-NEXT: } // callseq 2 +; CHECK-NEXT: st.param.b64 [func_retval0], %rd3; +; CHECK-NEXT: ret; + %r = tail call <2 x float> @test_callee(<2 x float> %b, <2 x float> %a) + ret <2 x float> %r +} + +define <2 x float> @test_select(<2 x float> %a, <2 x float> %b, i1 zeroext %c) #0 { +; CHECK-LABEL: test_select( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b64 %rd<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.u8 %rs1, [test_select_param_2]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b64 %rd2, [test_select_param_1]; +; CHECK-NEXT: ld.param.b64 %rd1, [test_select_param_0]; +; CHECK-NEXT: selp.b64 %rd3, %rd1, %rd2, %p1; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd3; +; CHECK-NEXT: ret; + %r = select i1 %c, <2 x float> %a, <2 x float> %b + ret <2 x float> %r +} + +define <2 x float> @test_select_cc(<2 x float> %a, <2 x float> %b, <2 x float> %c, <2 x float> %d) #0 { +; CHECK-LABEL: test_select_cc( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<3>; +; CHECK-NEXT: .reg .b32 %f<11>; +; CHECK-NEXT: .reg .b64 %rd<6>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd4, [test_select_cc_param_3]; +; CHECK-NEXT: ld.param.b64 %rd3, [test_select_cc_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [test_select_cc_param_1]; +; CHECK-NEXT: ld.param.b64 %rd1, [test_select_cc_param_0]; +; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd4; +; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd3; +; CHECK-NEXT: setp.neu.f32 %p1, %f3, %f1; +; CHECK-NEXT: setp.neu.f32 %p2, %f4, %f2; +; CHECK-NEXT: mov.b64 {%f5, %f6}, %rd2; +; CHECK-NEXT: mov.b64 {%f7, %f8}, %rd1; +; CHECK-NEXT: selp.f32 %f9, %f8, %f6, %p2; +; CHECK-NEXT: selp.f32 %f10, %f7, %f5, %p1; +; CHECK-NEXT: mov.b64 %rd5, {%f10, %f9}; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd5; +; CHECK-NEXT: ret; + %cc = fcmp une <2 x float> %c, %d + %r = select <2 x i1> %cc, <2 x float> %a, <2 x float> %b + ret <2 x float> %r +} + +define <2 x double> @test_select_cc_f64_f32(<2 x double> %a, <2 x double> %b, <2 x float> %c, <2 x float> %d) #0 { +; CHECK-LABEL: test_select_cc_f64_f32( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<3>; +; CHECK-NEXT: .reg .b32 %f<5>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-NEXT: .reg .b64 %fd<7>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.v2.f64 {%fd3, %fd4}, [test_select_cc_f64_f32_param_1]; +; CHECK-NEXT: ld.param.v2.f64 {%fd1, %fd2}, [test_select_cc_f64_f32_param_0]; +; CHECK-NEXT: ld.param.b64 %rd2, [test_select_cc_f64_f32_param_3]; +; CHECK-NEXT: ld.param.b64 %rd1, [test_select_cc_f64_f32_param_2]; +; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd2; +; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1; +; CHECK-NEXT: setp.neu.f32 %p1, %f3, %f1; +; CHECK-NEXT: setp.neu.f32 %p2, %f4, %f2; +; CHECK-NEXT: selp.f64 %fd5, %fd2, %fd4, %p2; +; CHECK-NEXT: selp.f64 %fd6, %fd1, %fd3, %p1; +; CHECK-NEXT: st.param.v2.f64 [func_retval0], {%fd6, %fd5}; +; CHECK-NEXT: ret; + %cc = fcmp une <2 x float> %c, %d + %r = select <2 x i1> %cc, <2 x double> %a, <2 x double> %b + ret <2 x double> %r +} + +define <2 x float> @test_select_cc_f32_f64(<2 x float> %a, <2 x float> %b, <2 x double> %c, <2 x double> %d) #0 { +; CHECK-LABEL: test_select_cc_f32_f64( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<3>; +; CHECK-NEXT: .reg .b32 %f<7>; +; CHECK-NEXT: .reg .b64 %rd<4>; +; CHECK-NEXT: .reg .b64 %fd<5>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.v2.f64 {%fd3, %fd4}, [test_select_cc_f32_f64_param_3]; +; CHECK-NEXT: ld.param.v2.f64 {%fd1, %fd2}, [test_select_cc_f32_f64_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [test_select_cc_f32_f64_param_1]; +; CHECK-NEXT: ld.param.b64 %rd1, [test_select_cc_f32_f64_param_0]; +; CHECK-NEXT: setp.neu.f64 %p1, %fd1, %fd3; +; CHECK-NEXT: setp.neu.f64 %p2, %fd2, %fd4; +; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd2; +; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1; +; CHECK-NEXT: selp.f32 %f5, %f4, %f2, %p2; +; CHECK-NEXT: selp.f32 %f6, %f3, %f1, %p1; +; CHECK-NEXT: mov.b64 %rd3, {%f6, %f5}; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd3; +; CHECK-NEXT: ret; + %cc = fcmp une <2 x double> %c, %d + %r = select <2 x i1> %cc, <2 x float> %a, <2 x float> %b + ret <2 x float> %r +} + +define <2 x i1> @test_fcmp_une(<2 x float> %a, <2 x float> %b) #0 { +; CHECK-LABEL: test_fcmp_une( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<3>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %f<5>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd2, [test_fcmp_une_param_1]; +; CHECK-NEXT: ld.param.b64 %rd1, [test_fcmp_une_param_0]; +; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd2; +; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1; +; CHECK-NEXT: setp.neu.f32 %p1, %f4, %f2; +; CHECK-NEXT: setp.neu.f32 %p2, %f3, %f1; +; CHECK-NEXT: selp.b16 %rs1, -1, 0, %p2; +; CHECK-NEXT: st.param.b8 [func_retval0], %rs1; +; CHECK-NEXT: selp.b16 %rs2, -1, 0, %p1; +; CHECK-NEXT: st.param.b8 [func_retval0+1], %rs2; +; CHECK-NEXT: ret; + %r = fcmp une <2 x float> %a, %b + ret <2 x i1> %r +} + +define <2 x i1> @test_fcmp_ueq(<2 x float> %a, <2 x float> %b) #0 { +; CHECK-LABEL: test_fcmp_ueq( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<3>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %f<5>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd2, [test_fcmp_ueq_param_1]; +; CHECK-NEXT: ld.param.b64 %rd1, [test_fcmp_ueq_param_0]; +; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd2; +; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1; +; CHECK-NEXT: setp.equ.f32 %p1, %f4, %f2; +; CHECK-NEXT: setp.equ.f32 %p2, %f3, %f1; +; CHECK-NEXT: selp.b16 %rs1, -1, 0, %p2; +; CHECK-NEXT: st.param.b8 [func_retval0], %rs1; +; CHECK-NEXT: selp.b16 %rs2, -1, 0, %p1; +; CHECK-NEXT: st.param.b8 [func_retval0+1], %rs2; +; CHECK-NEXT: ret; + %r = fcmp ueq <2 x float> %a, %b + ret <2 x i1> %r +} + +define <2 x i1> @test_fcmp_ugt(<2 x float> %a, <2 x float> %b) #0 { +; CHECK-LABEL: test_fcmp_ugt( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<3>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %f<5>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd2, [test_fcmp_ugt_param_1]; +; CHECK-NEXT: ld.param.b64 %rd1, [test_fcmp_ugt_param_0]; +; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd2; +; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1; +; CHECK-NEXT: setp.gtu.f32 %p1, %f4, %f2; +; CHECK-NEXT: setp.gtu.f32 %p2, %f3, %f1; +; CHECK-NEXT: selp.b16 %rs1, -1, 0, %p2; +; CHECK-NEXT: st.param.b8 [func_retval0], %rs1; +; CHECK-NEXT: selp.b16 %rs2, -1, 0, %p1; +; CHECK-NEXT: st.param.b8 [func_retval0+1], %rs2; +; CHECK-NEXT: ret; + %r = fcmp ugt <2 x float> %a, %b + ret <2 x i1> %r +} + +define <2 x i1> @test_fcmp_uge(<2 x float> %a, <2 x float> %b) #0 { +; CHECK-LABEL: test_fcmp_uge( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<3>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %f<5>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd2, [test_fcmp_uge_param_1]; +; CHECK-NEXT: ld.param.b64 %rd1, [test_fcmp_uge_param_0]; +; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd2; +; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1; +; CHECK-NEXT: setp.geu.f32 %p1, %f4, %f2; +; CHECK-NEXT: setp.geu.f32 %p2, %f3, %f1; +; CHECK-NEXT: selp.b16 %rs1, -1, 0, %p2; +; CHECK-NEXT: st.param.b8 [func_retval0], %rs1; +; CHECK-NEXT: selp.b16 %rs2, -1, 0, %p1; +; CHECK-NEXT: st.param.b8 [func_retval0+1], %rs2; +; CHECK-NEXT: ret; + %r = fcmp uge <2 x float> %a, %b + ret <2 x i1> %r +} + +define <2 x i1> @test_fcmp_ult(<2 x float> %a, <2 x float> %b) #0 { +; CHECK-LABEL: test_fcmp_ult( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<3>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %f<5>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd2, [test_fcmp_ult_param_1]; +; CHECK-NEXT: ld.param.b64 %rd1, [test_fcmp_ult_param_0]; +; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd2; +; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1; +; CHECK-NEXT: setp.ltu.f32 %p1, %f4, %f2; +; CHECK-NEXT: setp.ltu.f32 %p2, %f3, %f1; +; CHECK-NEXT: selp.b16 %rs1, -1, 0, %p2; +; CHECK-NEXT: st.param.b8 [func_retval0], %rs1; +; CHECK-NEXT: selp.b16 %rs2, -1, 0, %p1; +; CHECK-NEXT: st.param.b8 [func_retval0+1], %rs2; +; CHECK-NEXT: ret; + %r = fcmp ult <2 x float> %a, %b + ret <2 x i1> %r +} + +define <2 x i1> @test_fcmp_ule(<2 x float> %a, <2 x float> %b) #0 { +; CHECK-LABEL: test_fcmp_ule( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<3>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %f<5>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd2, [test_fcmp_ule_param_1]; +; CHECK-NEXT: ld.param.b64 %rd1, [test_fcmp_ule_param_0]; +; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd2; +; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1; +; CHECK-NEXT: setp.leu.f32 %p1, %f4, %f2; +; CHECK-NEXT: setp.leu.f32 %p2, %f3, %f1; +; CHECK-NEXT: selp.b16 %rs1, -1, 0, %p2; +; CHECK-NEXT: st.param.b8 [func_retval0], %rs1; +; CHECK-NEXT: selp.b16 %rs2, -1, 0, %p1; +; CHECK-NEXT: st.param.b8 [func_retval0+1], %rs2; +; CHECK-NEXT: ret; + %r = fcmp ule <2 x float> %a, %b + ret <2 x i1> %r +} + +define <2 x i1> @test_fcmp_uno(<2 x float> %a, <2 x float> %b) #0 { +; CHECK-LABEL: test_fcmp_uno( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<3>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %f<5>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd2, [test_fcmp_uno_param_1]; +; CHECK-NEXT: ld.param.b64 %rd1, [test_fcmp_uno_param_0]; +; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd2; +; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1; +; CHECK-NEXT: setp.nan.f32 %p1, %f4, %f2; +; CHECK-NEXT: setp.nan.f32 %p2, %f3, %f1; +; CHECK-NEXT: selp.b16 %rs1, -1, 0, %p2; +; CHECK-NEXT: st.param.b8 [func_retval0], %rs1; +; CHECK-NEXT: selp.b16 %rs2, -1, 0, %p1; +; CHECK-NEXT: st.param.b8 [func_retval0+1], %rs2; +; CHECK-NEXT: ret; + %r = fcmp uno <2 x float> %a, %b + ret <2 x i1> %r +} + +define <2 x i1> @test_fcmp_one(<2 x float> %a, <2 x float> %b) #0 { +; CHECK-LABEL: test_fcmp_one( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<3>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %f<5>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd2, [test_fcmp_one_param_1]; +; CHECK-NEXT: ld.param.b64 %rd1, [test_fcmp_one_param_0]; +; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd2; +; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1; +; CHECK-NEXT: setp.ne.f32 %p1, %f4, %f2; +; CHECK-NEXT: setp.ne.f32 %p2, %f3, %f1; +; CHECK-NEXT: selp.b16 %rs1, -1, 0, %p2; +; CHECK-NEXT: st.param.b8 [func_retval0], %rs1; +; CHECK-NEXT: selp.b16 %rs2, -1, 0, %p1; +; CHECK-NEXT: st.param.b8 [func_retval0+1], %rs2; +; CHECK-NEXT: ret; + %r = fcmp one <2 x float> %a, %b + ret <2 x i1> %r +} + +define <2 x i1> @test_fcmp_oeq(<2 x float> %a, <2 x float> %b) #0 { +; CHECK-LABEL: test_fcmp_oeq( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<3>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %f<5>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd2, [test_fcmp_oeq_param_1]; +; CHECK-NEXT: ld.param.b64 %rd1, [test_fcmp_oeq_param_0]; +; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd2; +; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1; +; CHECK-NEXT: setp.eq.f32 %p1, %f4, %f2; +; CHECK-NEXT: setp.eq.f32 %p2, %f3, %f1; +; CHECK-NEXT: selp.b16 %rs1, -1, 0, %p2; +; CHECK-NEXT: st.param.b8 [func_retval0], %rs1; +; CHECK-NEXT: selp.b16 %rs2, -1, 0, %p1; +; CHECK-NEXT: st.param.b8 [func_retval0+1], %rs2; +; CHECK-NEXT: ret; + %r = fcmp oeq <2 x float> %a, %b + ret <2 x i1> %r +} + +define <2 x i1> @test_fcmp_ogt(<2 x float> %a, <2 x float> %b) #0 { +; CHECK-LABEL: test_fcmp_ogt( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<3>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %f<5>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd2, [test_fcmp_ogt_param_1]; +; CHECK-NEXT: ld.param.b64 %rd1, [test_fcmp_ogt_param_0]; +; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd2; +; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1; +; CHECK-NEXT: setp.gt.f32 %p1, %f4, %f2; +; CHECK-NEXT: setp.gt.f32 %p2, %f3, %f1; +; CHECK-NEXT: selp.b16 %rs1, -1, 0, %p2; +; CHECK-NEXT: st.param.b8 [func_retval0], %rs1; +; CHECK-NEXT: selp.b16 %rs2, -1, 0, %p1; +; CHECK-NEXT: st.param.b8 [func_retval0+1], %rs2; +; CHECK-NEXT: ret; + %r = fcmp ogt <2 x float> %a, %b + ret <2 x i1> %r +} + +define <2 x i1> @test_fcmp_oge(<2 x float> %a, <2 x float> %b) #0 { +; CHECK-LABEL: test_fcmp_oge( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<3>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %f<5>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd2, [test_fcmp_oge_param_1]; +; CHECK-NEXT: ld.param.b64 %rd1, [test_fcmp_oge_param_0]; +; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd2; +; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1; +; CHECK-NEXT: setp.ge.f32 %p1, %f4, %f2; +; CHECK-NEXT: setp.ge.f32 %p2, %f3, %f1; +; CHECK-NEXT: selp.b16 %rs1, -1, 0, %p2; +; CHECK-NEXT: st.param.b8 [func_retval0], %rs1; +; CHECK-NEXT: selp.b16 %rs2, -1, 0, %p1; +; CHECK-NEXT: st.param.b8 [func_retval0+1], %rs2; +; CHECK-NEXT: ret; + %r = fcmp oge <2 x float> %a, %b + ret <2 x i1> %r +} + +define <2 x i1> @test_fcmp_olt(<2 x float> %a, <2 x float> %b) #0 { +; CHECK-LABEL: test_fcmp_olt( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<3>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %f<5>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd2, [test_fcmp_olt_param_1]; +; CHECK-NEXT: ld.param.b64 %rd1, [test_fcmp_olt_param_0]; +; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd2; +; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1; +; CHECK-NEXT: setp.lt.f32 %p1, %f4, %f2; +; CHECK-NEXT: setp.lt.f32 %p2, %f3, %f1; +; CHECK-NEXT: selp.b16 %rs1, -1, 0, %p2; +; CHECK-NEXT: st.param.b8 [func_retval0], %rs1; +; CHECK-NEXT: selp.b16 %rs2, -1, 0, %p1; +; CHECK-NEXT: st.param.b8 [func_retval0+1], %rs2; +; CHECK-NEXT: ret; + %r = fcmp olt <2 x float> %a, %b + ret <2 x i1> %r +} + +define <2 x i1> @test_fcmp_ole(<2 x float> %a, <2 x float> %b) #0 { +; CHECK-LABEL: test_fcmp_ole( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<3>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %f<5>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd2, [test_fcmp_ole_param_1]; +; CHECK-NEXT: ld.param.b64 %rd1, [test_fcmp_ole_param_0]; +; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd2; +; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1; +; CHECK-NEXT: setp.le.f32 %p1, %f4, %f2; +; CHECK-NEXT: setp.le.f32 %p2, %f3, %f1; +; CHECK-NEXT: selp.b16 %rs1, -1, 0, %p2; +; CHECK-NEXT: st.param.b8 [func_retval0], %rs1; +; CHECK-NEXT: selp.b16 %rs2, -1, 0, %p1; +; CHECK-NEXT: st.param.b8 [func_retval0+1], %rs2; +; CHECK-NEXT: ret; + %r = fcmp ole <2 x float> %a, %b + ret <2 x i1> %r +} + +define <2 x i1> @test_fcmp_ord(<2 x float> %a, <2 x float> %b) #0 { +; CHECK-LABEL: test_fcmp_ord( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<3>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %f<5>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd2, [test_fcmp_ord_param_1]; +; CHECK-NEXT: ld.param.b64 %rd1, [test_fcmp_ord_param_0]; +; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd2; +; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd1; +; CHECK-NEXT: setp.num.f32 %p1, %f4, %f2; +; CHECK-NEXT: setp.num.f32 %p2, %f3, %f1; +; CHECK-NEXT: selp.b16 %rs1, -1, 0, %p2; +; CHECK-NEXT: st.param.b8 [func_retval0], %rs1; +; CHECK-NEXT: selp.b16 %rs2, -1, 0, %p1; +; CHECK-NEXT: st.param.b8 [func_retval0+1], %rs2; +; CHECK-NEXT: ret; + %r = fcmp ord <2 x float> %a, %b + ret <2 x i1> %r +} + +define <2 x i32> @test_fptosi_i32(<2 x float> %a) #0 { +; CHECK-LABEL: test_fptosi_i32( +; CHECK: { +; CHECK-NEXT: .reg .b32 %r<3>; +; CHECK-NEXT: .reg .b32 %f<3>; +; CHECK-NEXT: .reg .b64 %rd<2>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd1, [test_fptosi_i32_param_0]; +; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd1; +; CHECK-NEXT: cvt.rzi.s32.f32 %r1, %f2; +; CHECK-NEXT: cvt.rzi.s32.f32 %r2, %f1; +; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r2, %r1}; +; CHECK-NEXT: ret; + %r = fptosi <2 x float> %a to <2 x i32> + ret <2 x i32> %r +} + +define <2 x i64> @test_fptosi_i64(<2 x float> %a) #0 { +; CHECK-LABEL: test_fptosi_i64( +; CHECK: { +; CHECK-NEXT: .reg .b32 %f<3>; +; CHECK-NEXT: .reg .b64 %rd<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd1, [test_fptosi_i64_param_0]; +; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd1; +; CHECK-NEXT: cvt.rzi.s64.f32 %rd2, %f2; +; CHECK-NEXT: cvt.rzi.s64.f32 %rd3, %f1; +; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd3, %rd2}; +; CHECK-NEXT: ret; + %r = fptosi <2 x float> %a to <2 x i64> + ret <2 x i64> %r +} + +define <2 x i32> @test_fptoui_2xi32(<2 x float> %a) #0 { +; CHECK-LABEL: test_fptoui_2xi32( +; CHECK: { +; CHECK-NEXT: .reg .b32 %r<3>; +; CHECK-NEXT: .reg .b32 %f<3>; +; CHECK-NEXT: .reg .b64 %rd<2>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd1, [test_fptoui_2xi32_param_0]; +; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd1; +; CHECK-NEXT: cvt.rzi.u32.f32 %r1, %f2; +; CHECK-NEXT: cvt.rzi.u32.f32 %r2, %f1; +; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r2, %r1}; +; CHECK-NEXT: ret; + %r = fptoui <2 x float> %a to <2 x i32> + ret <2 x i32> %r +} + +define <2 x i64> @test_fptoui_2xi64(<2 x float> %a) #0 { +; CHECK-LABEL: test_fptoui_2xi64( +; CHECK: { +; CHECK-NEXT: .reg .b32 %f<3>; +; CHECK-NEXT: .reg .b64 %rd<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd1, [test_fptoui_2xi64_param_0]; +; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd1; +; CHECK-NEXT: cvt.rzi.u64.f32 %rd2, %f2; +; CHECK-NEXT: cvt.rzi.u64.f32 %rd3, %f1; +; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd3, %rd2}; +; CHECK-NEXT: ret; + %r = fptoui <2 x float> %a to <2 x i64> + ret <2 x i64> %r +} + +define <2 x float> @test_uitofp_2xi32(<2 x i32> %a) #0 { +; CHECK-LABEL: test_uitofp_2xi32( +; CHECK: { +; CHECK-NEXT: .reg .b32 %r<3>; +; CHECK-NEXT: .reg .b32 %f<3>; +; CHECK-NEXT: .reg .b64 %rd<2>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.v2.u32 {%r1, %r2}, [test_uitofp_2xi32_param_0]; +; CHECK-NEXT: cvt.rn.f32.u32 %f1, %r2; +; CHECK-NEXT: cvt.rn.f32.u32 %f2, %r1; +; CHECK-NEXT: mov.b64 %rd1, {%f2, %f1}; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd1; +; CHECK-NEXT: ret; + %r = uitofp <2 x i32> %a to <2 x float> + ret <2 x float> %r +} + +define <2 x float> @test_uitofp_2xi64(<2 x i64> %a) #0 { +; CHECK-LABEL: test_uitofp_2xi64( +; CHECK: { +; CHECK-NEXT: .reg .b32 %f<3>; +; CHECK-NEXT: .reg .b64 %rd<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.v2.u64 {%rd1, %rd2}, [test_uitofp_2xi64_param_0]; +; CHECK-NEXT: cvt.rn.f32.u64 %f1, %rd2; +; CHECK-NEXT: cvt.rn.f32.u64 %f2, %rd1; +; CHECK-NEXT: mov.b64 %rd3, {%f2, %f1}; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd3; +; CHECK-NEXT: ret; + %r = uitofp <2 x i64> %a to <2 x float> + ret <2 x float> %r +} + +define <2 x float> @test_sitofp_2xi32(<2 x i32> %a) #0 { +; CHECK-LABEL: test_sitofp_2xi32( +; CHECK: { +; CHECK-NEXT: .reg .b32 %r<3>; +; CHECK-NEXT: .reg .b32 %f<3>; +; CHECK-NEXT: .reg .b64 %rd<2>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.v2.u32 {%r1, %r2}, [test_sitofp_2xi32_param_0]; +; CHECK-NEXT: cvt.rn.f32.s32 %f1, %r2; +; CHECK-NEXT: cvt.rn.f32.s32 %f2, %r1; +; CHECK-NEXT: mov.b64 %rd1, {%f2, %f1}; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd1; +; CHECK-NEXT: ret; + %r = sitofp <2 x i32> %a to <2 x float> + ret <2 x float> %r +} + +define <2 x float> @test_sitofp_2xi64(<2 x i64> %a) #0 { +; CHECK-LABEL: test_sitofp_2xi64( +; CHECK: { +; CHECK-NEXT: .reg .b32 %f<3>; +; CHECK-NEXT: .reg .b64 %rd<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.v2.u64 {%rd1, %rd2}, [test_sitofp_2xi64_param_0]; +; CHECK-NEXT: cvt.rn.f32.s64 %f1, %rd2; +; CHECK-NEXT: cvt.rn.f32.s64 %f2, %rd1; +; CHECK-NEXT: mov.b64 %rd3, {%f2, %f1}; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd3; +; CHECK-NEXT: ret; + %r = sitofp <2 x i64> %a to <2 x float> + ret <2 x float> %r +} + +define <2 x float> @test_uitofp_2xi32_fadd(<2 x i32> %a, <2 x float> %b) #0 { +; CHECK-LABEL: test_uitofp_2xi32_fadd( +; CHECK: { +; CHECK-NEXT: .reg .b32 %r<3>; +; CHECK-NEXT: .reg .b32 %f<3>; +; CHECK-NEXT: .reg .b64 %rd<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.v2.u32 {%r1, %r2}, [test_uitofp_2xi32_fadd_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [test_uitofp_2xi32_fadd_param_1]; +; CHECK-NEXT: cvt.rn.f32.u32 %f1, %r2; +; CHECK-NEXT: cvt.rn.f32.u32 %f2, %r1; +; CHECK-NEXT: mov.b64 %rd2, {%f2, %f1}; +; CHECK-NEXT: add.rn.f32x2 %rd3, %rd1, %rd2; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd3; +; CHECK-NEXT: ret; + %c = uitofp <2 x i32> %a to <2 x float> + %r = fadd <2 x float> %b, %c + ret <2 x float> %r +} + +define <2 x float> @test_fptrunc_2xdouble(<2 x double> %a) #0 { +; CHECK-LABEL: test_fptrunc_2xdouble( +; CHECK: { +; CHECK-NEXT: .reg .b32 %f<3>; +; CHECK-NEXT: .reg .b64 %rd<2>; +; CHECK-NEXT: .reg .b64 %fd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.v2.f64 {%fd1, %fd2}, [test_fptrunc_2xdouble_param_0]; +; CHECK-NEXT: cvt.rn.f32.f64 %f1, %fd2; +; CHECK-NEXT: cvt.rn.f32.f64 %f2, %fd1; +; CHECK-NEXT: mov.b64 %rd1, {%f2, %f1}; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd1; +; CHECK-NEXT: ret; + %r = fptrunc <2 x double> %a to <2 x float> + ret <2 x float> %r +} + +define <2 x double> @test_fpext_2xdouble(<2 x float> %a) #0 { +; CHECK-LABEL: test_fpext_2xdouble( +; CHECK: { +; CHECK-NEXT: .reg .b32 %f<3>; +; CHECK-NEXT: .reg .b64 %rd<2>; +; CHECK-NEXT: .reg .b64 %fd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd1, [test_fpext_2xdouble_param_0]; +; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd1; +; CHECK-NEXT: cvt.f64.f32 %fd1, %f2; +; CHECK-NEXT: cvt.f64.f32 %fd2, %f1; +; CHECK-NEXT: st.param.v2.f64 [func_retval0], {%fd2, %fd1}; +; CHECK-NEXT: ret; + %r = fpext <2 x float> %a to <2 x double> + ret <2 x double> %r +} + +define <2 x i32> @test_bitcast_2xfloat_to_2xi32(<2 x float> %a) #0 { +; CHECK-LABEL: test_bitcast_2xfloat_to_2xi32( +; CHECK: { +; CHECK-NEXT: .reg .b32 %r<3>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.u64 %rd2, [test_bitcast_2xfloat_to_2xi32_param_0]; +; CHECK-NEXT: mov.b64 {_, %r1}, %rd2; +; CHECK-NEXT: cvt.u32.u64 %r2, %rd2; +; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r2, %r1}; +; CHECK-NEXT: ret; + %r = bitcast <2 x float> %a to <2 x i32> + ret <2 x i32> %r +} + +define <2 x float> @test_bitcast_2xi32_to_2xfloat(<2 x i32> %a) #0 { +; CHECK-LABEL: test_bitcast_2xi32_to_2xfloat( +; CHECK: { +; CHECK-NEXT: .reg .b32 %r<3>; +; CHECK-NEXT: .reg .b64 %rd<6>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.v2.u32 {%r1, %r2}, [test_bitcast_2xi32_to_2xfloat_param_0]; +; CHECK-NEXT: cvt.u64.u32 %rd1, %r1; +; CHECK-NEXT: cvt.u64.u32 %rd2, %r2; +; CHECK-NEXT: shl.b64 %rd3, %rd2, 32; +; CHECK-NEXT: or.b64 %rd4, %rd1, %rd3; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd4; +; CHECK-NEXT: ret; + %r = bitcast <2 x i32> %a to <2 x float> + ret <2 x float> %r +} + +define <2 x float> @test_bitcast_double_to_2xfloat(double %a) #0 { +; CHECK-LABEL: test_bitcast_double_to_2xfloat( +; CHECK: { +; CHECK-NEXT: .reg .b64 %rd<2>; +; CHECK-NEXT: .reg .b64 %fd<2>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.f64 %fd1, [test_bitcast_double_to_2xfloat_param_0]; +; CHECK-NEXT: mov.b64 %rd1, %fd1; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd1; +; CHECK-NEXT: ret; + %r = bitcast double %a to <2 x float> + ret <2 x float> %r +} + +define double @test_bitcast_2xfloat_to_double(<2 x float> %a) #0 { +; CHECK-LABEL: test_bitcast_2xfloat_to_double( +; CHECK: { +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-NEXT: .reg .b64 %fd<2>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.u64 %rd2, [test_bitcast_2xfloat_to_double_param_0]; +; CHECK-NEXT: mov.b64 %fd1, %rd2; +; CHECK-NEXT: st.param.f64 [func_retval0], %fd1; +; CHECK-NEXT: ret; + %r = bitcast <2 x float> %a to double + ret double %r +} + +attributes #0 = { nounwind } +attributes #1 = { "unsafe-fp-math" = "true" } +attributes #2 = { "denormal-fp-math"="preserve-sign" } diff --git a/llvm/test/CodeGen/NVPTX/fp-contract-f32x2.ll b/llvm/test/CodeGen/NVPTX/fp-contract-f32x2.ll new file mode 100644 index 0000000000000..3ffbae53934a8 --- /dev/null +++ b/llvm/test/CodeGen/NVPTX/fp-contract-f32x2.ll @@ -0,0 +1,112 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc < %s -mtriple=nvptx64 -mcpu=sm_100 -fp-contract=fast | FileCheck %s --check-prefixes=CHECK,FAST +; RUN: llc < %s -mtriple=nvptx64 -mcpu=sm_100 | FileCheck %s --check-prefixes=CHECK,DEFAULT +; RUN: %if ptxas-12.8 %{ llc < %s -mtriple=nvptx64 -mcpu=sm_100 -fp-contract=fast | %ptxas-verify -arch sm_100 %} +; RUN: %if ptxas-12.8 %{ llc < %s -mtriple=nvptx64 -mcpu=sm_100 | %ptxas-verify -arch sm_100 %} + +target triple = "nvptx64-unknown-cuda" + +;; FAST-LABEL: @t0 +;; DEFAULT-LABEL: @t0 +define <2 x float> @t0(<2 x float> %a, <2 x float> %b, <2 x float> %c) { +; FAST-LABEL: t0( +; FAST: { +; FAST-NEXT: .reg .b64 %rd<5>; +; FAST-EMPTY: +; FAST-NEXT: // %bb.0: +; FAST-NEXT: ld.param.b64 %rd1, [t0_param_2]; +; FAST-NEXT: ld.param.b64 %rd2, [t0_param_1]; +; FAST-NEXT: ld.param.b64 %rd3, [t0_param_0]; +; FAST-NEXT: fma.rn.f32x2 %rd4, %rd3, %rd2, %rd1; +; FAST-NEXT: st.param.b64 [func_retval0], %rd4; +; FAST-NEXT: ret; +; +; DEFAULT-LABEL: t0( +; DEFAULT: { +; DEFAULT-NEXT: .reg .b64 %rd<6>; +; DEFAULT-EMPTY: +; DEFAULT-NEXT: // %bb.0: +; DEFAULT-NEXT: ld.param.b64 %rd1, [t0_param_2]; +; DEFAULT-NEXT: ld.param.b64 %rd2, [t0_param_1]; +; DEFAULT-NEXT: ld.param.b64 %rd3, [t0_param_0]; +; DEFAULT-NEXT: mul.rn.f32x2 %rd4, %rd3, %rd2; +; DEFAULT-NEXT: add.rn.f32x2 %rd5, %rd4, %rd1; +; DEFAULT-NEXT: st.param.b64 [func_retval0], %rd5; +; DEFAULT-NEXT: ret; + %v0 = fmul <2 x float> %a, %b + %v1 = fadd <2 x float> %v0, %c + ret <2 x float> %v1 +} + +;; We cannot form an fma here, but make sure we explicitly emit add.rn.f32x2 +;; to prevent ptxas from fusing this with anything else. +define <2 x float> @t1(<2 x float> %a, <2 x float> %b) { +; FAST-LABEL: t1( +; FAST: { +; FAST-NEXT: .reg .b64 %rd<6>; +; FAST-EMPTY: +; FAST-NEXT: // %bb.0: +; FAST-NEXT: ld.param.b64 %rd1, [t1_param_1]; +; FAST-NEXT: ld.param.b64 %rd2, [t1_param_0]; +; FAST-NEXT: add.f32x2 %rd3, %rd2, %rd1; +; FAST-NEXT: sub.f32x2 %rd4, %rd2, %rd1; +; FAST-NEXT: mul.f32x2 %rd5, %rd3, %rd4; +; FAST-NEXT: st.param.b64 [func_retval0], %rd5; +; FAST-NEXT: ret; +; +; DEFAULT-LABEL: t1( +; DEFAULT: { +; DEFAULT-NEXT: .reg .b64 %rd<6>; +; DEFAULT-EMPTY: +; DEFAULT-NEXT: // %bb.0: +; DEFAULT-NEXT: ld.param.b64 %rd1, [t1_param_1]; +; DEFAULT-NEXT: ld.param.b64 %rd2, [t1_param_0]; +; DEFAULT-NEXT: add.rn.f32x2 %rd3, %rd2, %rd1; +; DEFAULT-NEXT: sub.rn.f32x2 %rd4, %rd2, %rd1; +; DEFAULT-NEXT: mul.rn.f32x2 %rd5, %rd3, %rd4; +; DEFAULT-NEXT: st.param.b64 [func_retval0], %rd5; +; DEFAULT-NEXT: ret; + %v1 = fadd <2 x float> %a, %b + %v2 = fsub <2 x float> %a, %b + %v3 = fmul <2 x float> %v1, %v2 + ret <2 x float> %v3 +} + +;; Make sure we generate the non ".rn" version when the "contract" flag is +;; present on the instructions +define <2 x float> @t2(<2 x float> %a, <2 x float> %b) { +; CHECK-LABEL: t2( +; CHECK: { +; CHECK-NEXT: .reg .b64 %rd<6>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd1, [t2_param_1]; +; CHECK-NEXT: ld.param.b64 %rd2, [t2_param_0]; +; CHECK-NEXT: add.f32x2 %rd3, %rd2, %rd1; +; CHECK-NEXT: sub.f32x2 %rd4, %rd2, %rd1; +; CHECK-NEXT: mul.f32x2 %rd5, %rd3, %rd4; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd5; +; CHECK-NEXT: ret; + %v1 = fadd contract <2 x float> %a, %b + %v2 = fsub contract <2 x float> %a, %b + %v3 = fmul contract <2 x float> %v1, %v2 + ret <2 x float> %v3 +} + +;; Make sure we always fold to fma when the "contract" flag is present +define <2 x float> @t3(<2 x float> %a, <2 x float> %b, <2 x float> %c) { +; CHECK-LABEL: t3( +; CHECK: { +; CHECK-NEXT: .reg .b64 %rd<5>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd1, [t3_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [t3_param_1]; +; CHECK-NEXT: ld.param.b64 %rd3, [t3_param_0]; +; CHECK-NEXT: fma.rn.f32x2 %rd4, %rd3, %rd2, %rd1; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd4; +; CHECK-NEXT: ret; + %v0 = fmul contract <2 x float> %a, %b + %v1 = fadd contract <2 x float> %v0, %c + ret <2 x float> %v1 +} diff --git a/llvm/test/CodeGen/NVPTX/ldg-invariant.ll b/llvm/test/CodeGen/NVPTX/ldg-invariant.ll index 2fe2d28320f06..be16ef1374c86 100644 --- a/llvm/test/CodeGen/NVPTX/ldg-invariant.ll +++ b/llvm/test/CodeGen/NVPTX/ldg-invariant.ll @@ -32,7 +32,7 @@ define half @ld_global_v2f16(ptr addrspace(1) %ptr) { ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.u64 %rd1, [ld_global_v2f16_param_0]; -; CHECK-NEXT: ld.global.nc.u32 %r1, [%rd1]; +; CHECK-NEXT: ld.global.nc.b32 %r1, [%rd1]; ; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r1; ; CHECK-NEXT: cvt.f32.f16 %f1, %rs2; ; CHECK-NEXT: cvt.f32.f16 %f2, %rs1; @@ -127,6 +127,76 @@ define half @ld_global_v8f16(ptr addrspace(1) %ptr) { ret half %sum } +define float @ld_global_v2f32(ptr addrspace(1) %ptr) { +; CHECK-LABEL: ld_global_v2f32( +; CHECK: { +; CHECK-NEXT: .reg .b32 %f<4>; +; CHECK-NEXT: .reg .b64 %rd<2>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.u64 %rd1, [ld_global_v2f32_param_0]; +; CHECK-NEXT: ld.global.nc.v2.f32 {%f1, %f2}, [%rd1]; +; CHECK-NEXT: add.rn.f32 %f3, %f1, %f2; +; CHECK-NEXT: st.param.f32 [func_retval0], %f3; +; CHECK-NEXT: ret; + %a = load <2 x float>, ptr addrspace(1) %ptr, !invariant.load !0 + %v1 = extractelement <2 x float> %a, i32 0 + %v2 = extractelement <2 x float> %a, i32 1 + %sum = fadd float %v1, %v2 + ret float %sum +} + +define float @ld_global_v4f32(ptr addrspace(1) %ptr) { +; CHECK-LABEL: ld_global_v4f32( +; CHECK: { +; CHECK-NEXT: .reg .b32 %f<8>; +; CHECK-NEXT: .reg .b64 %rd<2>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.u64 %rd1, [ld_global_v4f32_param_0]; +; CHECK-NEXT: ld.global.nc.v4.f32 {%f1, %f2, %f3, %f4}, [%rd1]; +; CHECK-NEXT: add.rn.f32 %f5, %f1, %f2; +; CHECK-NEXT: add.rn.f32 %f6, %f3, %f4; +; CHECK-NEXT: add.rn.f32 %f7, %f5, %f6; +; CHECK-NEXT: st.param.f32 [func_retval0], %f7; +; CHECK-NEXT: ret; + %a = load <4 x float>, ptr addrspace(1) %ptr, !invariant.load !0 + %v1 = extractelement <4 x float> %a, i32 0 + %v2 = extractelement <4 x float> %a, i32 1 + %v3 = extractelement <4 x float> %a, i32 2 + %v4 = extractelement <4 x float> %a, i32 3 + %sum1 = fadd float %v1, %v2 + %sum2 = fadd float %v3, %v4 + %sum = fadd float %sum1, %sum2 + ret float %sum +} + +define float @ld_global_v8f32(ptr addrspace(1) %ptr) { +; CHECK-LABEL: ld_global_v8f32( +; CHECK: { +; CHECK-NEXT: .reg .b32 %f<12>; +; CHECK-NEXT: .reg .b64 %rd<2>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.u64 %rd1, [ld_global_v8f32_param_0]; +; CHECK-NEXT: ld.global.nc.v4.f32 {%f1, %f2, %f3, %f4}, [%rd1+16]; +; CHECK-NEXT: ld.global.nc.v4.f32 {%f5, %f6, %f7, %f8}, [%rd1]; +; CHECK-NEXT: add.rn.f32 %f9, %f5, %f7; +; CHECK-NEXT: add.rn.f32 %f10, %f1, %f3; +; CHECK-NEXT: add.rn.f32 %f11, %f9, %f10; +; CHECK-NEXT: st.param.f32 [func_retval0], %f11; +; CHECK-NEXT: ret; + %a = load <8 x float>, ptr addrspace(1) %ptr, !invariant.load !0 + %v1 = extractelement <8 x float> %a, i32 0 + %v2 = extractelement <8 x float> %a, i32 2 + %v3 = extractelement <8 x float> %a, i32 4 + %v4 = extractelement <8 x float> %a, i32 6 + %sum1 = fadd float %v1, %v2 + %sum2 = fadd float %v3, %v4 + %sum = fadd float %sum1, %sum2 + ret float %sum +} + define i8 @ld_global_v8i8(ptr addrspace(1) %ptr) { ; CHECK-LABEL: ld_global_v8i8( ; CHECK: { diff --git a/llvm/test/CodeGen/NVPTX/ldu-ldg.ll b/llvm/test/CodeGen/NVPTX/ldu-ldg.ll index 2c1550aa082f0..35310a4b638bb 100644 --- a/llvm/test/CodeGen/NVPTX/ldu-ldg.ll +++ b/llvm/test/CodeGen/NVPTX/ldu-ldg.ll @@ -12,6 +12,7 @@ declare float @llvm.nvvm.ldu.global.f.f32.p1(ptr addrspace(1) %ptr, i32 %align) declare double @llvm.nvvm.ldu.global.f.f64.p1(ptr addrspace(1) %ptr, i32 %align) declare half @llvm.nvvm.ldu.global.f.f16.p1(ptr addrspace(1) %ptr, i32 %align) declare <2 x half> @llvm.nvvm.ldu.global.f.v2f16.p1(ptr addrspace(1) %ptr, i32 %align) +declare <2 x float> @llvm.nvvm.ldu.global.f.v2f32.p1(ptr addrspace(1) %ptr, i32 %align) declare i8 @llvm.nvvm.ldg.global.i.i8.p1(ptr addrspace(1) %ptr, i32 %align) declare i16 @llvm.nvvm.ldg.global.i.i16.p1(ptr addrspace(1) %ptr, i32 %align) @@ -22,6 +23,7 @@ declare float @llvm.nvvm.ldg.global.f.f32.p1(ptr addrspace(1) %ptr, i32 %align) declare double @llvm.nvvm.ldg.global.f.f64.p1(ptr addrspace(1) %ptr, i32 %align) declare half @llvm.nvvm.ldg.global.f.f16.p1(ptr addrspace(1) %ptr, i32 %align) declare <2 x half> @llvm.nvvm.ldg.global.f.v2f16.p1(ptr addrspace(1) %ptr, i32 %align) +declare <2 x float> @llvm.nvvm.ldg.global.f.v2f32.p1(ptr addrspace(1) %ptr, i32 %align) define i8 @test_ldu_i8(ptr addrspace(1) %ptr) { ; CHECK-LABEL: test_ldu_i8( @@ -154,13 +156,27 @@ define <2 x half> @test_ldu_v2f16(ptr addrspace(1) %ptr) { ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.u64 %rd1, [test_ldu_v2f16_param_0]; -; CHECK-NEXT: ldu.global.u32 %r1, [%rd1]; +; CHECK-NEXT: ldu.global.b32 %r1, [%rd1]; ; CHECK-NEXT: st.param.b32 [func_retval0], %r1; ; CHECK-NEXT: ret; %val = tail call <2 x half> @llvm.nvvm.ldu.global.f.v2f16.p1(ptr addrspace(1) %ptr, i32 4) ret <2 x half> %val } +define <2 x float> @test_ldu_v2f32(ptr addrspace(1) %ptr) { +; CHECK-LABEL: test_ldu_v2f32( +; CHECK: { +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.u64 %rd1, [test_ldu_v2f32_param_0]; +; CHECK-NEXT: ldu.global.b64 %rd2, [%rd1]; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd2; +; CHECK-NEXT: ret; + %val = tail call <2 x float> @llvm.nvvm.ldu.global.f.v2f32.p1(ptr addrspace(1) %ptr, i32 8) + ret <2 x float> %val +} + define i8 @test_ldg_i8(ptr addrspace(1) %ptr) { ; CHECK-LABEL: test_ldg_i8( ; CHECK: { @@ -291,13 +307,27 @@ define <2 x half> @test_ldg_v2f16(ptr addrspace(1) %ptr) { ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.u64 %rd1, [test_ldg_v2f16_param_0]; -; CHECK-NEXT: ld.global.nc.u32 %r1, [%rd1]; +; CHECK-NEXT: ld.global.nc.b32 %r1, [%rd1]; ; CHECK-NEXT: st.param.b32 [func_retval0], %r1; ; CHECK-NEXT: ret; %val = tail call <2 x half> @llvm.nvvm.ldg.global.f.v2f16.p1(ptr addrspace(1) %ptr, i32 4) ret <2 x half> %val } +define <2 x float> @test_ldg_v2f32(ptr addrspace(1) %ptr) { +; CHECK-LABEL: test_ldg_v2f32( +; CHECK: { +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.u64 %rd1, [test_ldg_v2f32_param_0]; +; CHECK-NEXT: ld.global.nc.b64 %rd2, [%rd1]; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd2; +; CHECK-NEXT: ret; + %val = tail call <2 x float> @llvm.nvvm.ldg.global.f.v2f32.p1(ptr addrspace(1) %ptr, i32 8) + ret <2 x float> %val +} + @g = addrspace(1) global i32 0 define i32 @test_ldg_asi() { diff --git a/llvm/test/CodeGen/NVPTX/load-with-non-coherent-cache.ll b/llvm/test/CodeGen/NVPTX/load-with-non-coherent-cache.ll index 6a34135a31783..34a81867f281f 100644 --- a/llvm/test/CodeGen/NVPTX/load-with-non-coherent-cache.ll +++ b/llvm/test/CodeGen/NVPTX/load-with-non-coherent-cache.ll @@ -80,7 +80,7 @@ define ptx_kernel void @foo7(ptr noalias readonly %from, ptr %to) { ; SM20-LABEL: .visible .entry foo8( ; SM20: ld.global.u32 ; SM35-LABEL: .visible .entry foo8( -; SM35: ld.global.nc.u32 +; SM35: ld.global.nc.b32 define ptx_kernel void @foo8(ptr noalias readonly %from, ptr %to) { %1 = load <2 x i16>, ptr %from store <2 x i16> %1, ptr %to @@ -108,9 +108,9 @@ define ptx_kernel void @foo10(ptr noalias readonly %from, ptr %to) { } ; SM20-LABEL: .visible .entry foo11( -; SM20: ld.global.v2.f32 +; SM20: ld.global.b64 ; SM35-LABEL: .visible .entry foo11( -; SM35: ld.global.nc.v2.f32 +; SM35: ld.global.nc.b64 define ptx_kernel void @foo11(ptr noalias readonly %from, ptr %to) { %1 = load <2 x float>, ptr %from store <2 x float> %1, ptr %to @@ -130,7 +130,7 @@ define ptx_kernel void @foo12(ptr noalias readonly %from, ptr %to) { ; SM20-LABEL: .visible .entry foo13( ; SM20: ld.global.u32 ; SM35-LABEL: .visible .entry foo13( -; SM35: ld.global.nc.u32 +; SM35: ld.global.nc.b32 define ptx_kernel void @foo13(ptr noalias readonly %from, ptr %to) { %1 = load <4 x i8>, ptr %from store <4 x i8> %1, ptr %to diff --git a/llvm/test/CodeGen/NVPTX/misaligned-vector-ldst.ll b/llvm/test/CodeGen/NVPTX/misaligned-vector-ldst.ll index b3abcc1a21d2c..b200d8b23fe62 100644 --- a/llvm/test/CodeGen/NVPTX/misaligned-vector-ldst.ll +++ b/llvm/test/CodeGen/NVPTX/misaligned-vector-ldst.ll @@ -18,7 +18,7 @@ define <4 x float> @t1(ptr %p1) { define <4 x float> @t2(ptr %p1) { ; CHECK-NOT: ld.v4 ; CHECK-NOT: ld.v2 -; CHECK: ld.f32 +; CHECK: ld.u32 %r = load <4 x float>, ptr %p1, align 4 ret <4 x float> %r } @@ -26,7 +26,7 @@ define <4 x float> @t2(ptr %p1) { ; CHECK-LABEL: t3 define <4 x float> @t3(ptr %p1) { ; CHECK-NOT: ld.v4 -; CHECK: ld.v2 +; CHECK: ld.b64 %r = load <4 x float>, ptr %p1, align 8 ret <4 x float> %r } @@ -111,7 +111,7 @@ define void @s1(ptr %p1, <4 x float> %v) { define void @s2(ptr %p1, <4 x float> %v) { ; CHECK-NOT: st.v4 ; CHECK-NOT: st.v2 -; CHECK: st.f32 +; CHECK: st.u32 store <4 x float> %v, ptr %p1, align 4 ret void } diff --git a/llvm/test/CodeGen/NVPTX/read-global-variable-constant.ll b/llvm/test/CodeGen/NVPTX/read-global-variable-constant.ll index 1d14be9070b07..fd74fc9c76387 100644 --- a/llvm/test/CodeGen/NVPTX/read-global-variable-constant.ll +++ b/llvm/test/CodeGen/NVPTX/read-global-variable-constant.ll @@ -17,7 +17,7 @@ define float @test_gv_float() { ; CHECK-LABEL: test_gv_float2() define <2 x float> @test_gv_float2() { -; CHECK: ld.global.nc.v2.f32 +; CHECK: ld.global.nc.b64 %v = load <2 x float>, ptr @gv_float2 ret <2 x float> %v } diff --git a/llvm/test/CodeGen/NVPTX/reduction-intrinsics.ll b/llvm/test/CodeGen/NVPTX/reduction-intrinsics.ll index 020a61a1675aa..83ae31ac9647e 100644 --- a/llvm/test/CodeGen/NVPTX/reduction-intrinsics.ll +++ b/llvm/test/CodeGen/NVPTX/reduction-intrinsics.ll @@ -116,18 +116,23 @@ define float @reduce_fadd_float(<8 x float> %in) { ; CHECK-LABEL: reduce_fadd_float( ; CHECK: { ; CHECK-NEXT: .reg .b32 %f<17>; +; CHECK-NEXT: .reg .b64 %rd<5>; ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v4.f32 {%f5, %f6, %f7, %f8}, [reduce_fadd_float_param_0+16]; -; CHECK-NEXT: ld.param.v4.f32 {%f1, %f2, %f3, %f4}, [reduce_fadd_float_param_0]; -; CHECK-NEXT: add.rn.f32 %f9, %f1, 0f00000000; -; CHECK-NEXT: add.rn.f32 %f10, %f9, %f2; -; CHECK-NEXT: add.rn.f32 %f11, %f10, %f3; -; CHECK-NEXT: add.rn.f32 %f12, %f11, %f4; -; CHECK-NEXT: add.rn.f32 %f13, %f12, %f5; -; CHECK-NEXT: add.rn.f32 %f14, %f13, %f6; -; CHECK-NEXT: add.rn.f32 %f15, %f14, %f7; -; CHECK-NEXT: add.rn.f32 %f16, %f15, %f8; +; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fadd_float_param_0+16]; +; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fadd_float_param_0]; +; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd1; +; CHECK-NEXT: add.rn.f32 %f3, %f1, 0f00000000; +; CHECK-NEXT: add.rn.f32 %f4, %f3, %f2; +; CHECK-NEXT: mov.b64 {%f5, %f6}, %rd2; +; CHECK-NEXT: add.rn.f32 %f7, %f4, %f5; +; CHECK-NEXT: add.rn.f32 %f8, %f7, %f6; +; CHECK-NEXT: mov.b64 {%f9, %f10}, %rd3; +; CHECK-NEXT: add.rn.f32 %f11, %f8, %f9; +; CHECK-NEXT: add.rn.f32 %f12, %f11, %f10; +; CHECK-NEXT: mov.b64 {%f13, %f14}, %rd4; +; CHECK-NEXT: add.rn.f32 %f15, %f12, %f13; +; CHECK-NEXT: add.rn.f32 %f16, %f15, %f14; ; CHECK-NEXT: st.param.f32 [func_retval0], %f16; ; CHECK-NEXT: ret; %res = call float @llvm.vector.reduce.fadd(float 0.0, <8 x float> %in) @@ -135,45 +140,95 @@ define float @reduce_fadd_float(<8 x float> %in) { } define float @reduce_fadd_float_reassoc(<8 x float> %in) { -; CHECK-LABEL: reduce_fadd_float_reassoc( -; CHECK: { -; CHECK-NEXT: .reg .b32 %f<17>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v4.f32 {%f5, %f6, %f7, %f8}, [reduce_fadd_float_reassoc_param_0+16]; -; CHECK-NEXT: ld.param.v4.f32 {%f1, %f2, %f3, %f4}, [reduce_fadd_float_reassoc_param_0]; -; CHECK-NEXT: add.rn.f32 %f9, %f3, %f7; -; CHECK-NEXT: add.rn.f32 %f10, %f1, %f5; -; CHECK-NEXT: add.rn.f32 %f11, %f4, %f8; -; CHECK-NEXT: add.rn.f32 %f12, %f2, %f6; -; CHECK-NEXT: add.rn.f32 %f13, %f12, %f11; -; CHECK-NEXT: add.rn.f32 %f14, %f10, %f9; -; CHECK-NEXT: add.rn.f32 %f15, %f14, %f13; -; CHECK-NEXT: add.rn.f32 %f16, %f15, 0f00000000; -; CHECK-NEXT: st.param.f32 [func_retval0], %f16; -; CHECK-NEXT: ret; +; CHECK-SM80-LABEL: reduce_fadd_float_reassoc( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %f<17>; +; CHECK-SM80-NEXT: .reg .b64 %rd<5>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fadd_float_reassoc_param_0+16]; +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fadd_float_reassoc_param_0]; +; CHECK-SM80-NEXT: mov.b64 {%f1, %f2}, %rd4; +; CHECK-SM80-NEXT: mov.b64 {%f3, %f4}, %rd2; +; CHECK-SM80-NEXT: add.rn.f32 %f5, %f3, %f1; +; CHECK-SM80-NEXT: mov.b64 {%f6, %f7}, %rd3; +; CHECK-SM80-NEXT: mov.b64 {%f8, %f9}, %rd1; +; CHECK-SM80-NEXT: add.rn.f32 %f10, %f8, %f6; +; CHECK-SM80-NEXT: add.rn.f32 %f11, %f4, %f2; +; CHECK-SM80-NEXT: add.rn.f32 %f12, %f9, %f7; +; CHECK-SM80-NEXT: add.rn.f32 %f13, %f12, %f11; +; CHECK-SM80-NEXT: add.rn.f32 %f14, %f10, %f5; +; CHECK-SM80-NEXT: add.rn.f32 %f15, %f14, %f13; +; CHECK-SM80-NEXT: add.rn.f32 %f16, %f15, 0f00000000; +; CHECK-SM80-NEXT: st.param.f32 [func_retval0], %f16; +; CHECK-SM80-NEXT: ret; +; +; CHECK-SM100-LABEL: reduce_fadd_float_reassoc( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %f<5>; +; CHECK-SM100-NEXT: .reg .b64 %rd<10>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fadd_float_reassoc_param_0+16]; +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fadd_float_reassoc_param_0]; +; CHECK-SM100-NEXT: add.rn.f32x2 %rd5, %rd2, %rd4; +; CHECK-SM100-NEXT: add.rn.f32x2 %rd6, %rd1, %rd3; +; CHECK-SM100-NEXT: add.rn.f32x2 %rd7, %rd6, %rd5; +; CHECK-SM100-NEXT: mov.b64 {_, %f1}, %rd7; +; CHECK-SM100-NEXT: // implicit-def: %f2 +; CHECK-SM100-NEXT: mov.b64 %rd8, {%f1, %f2}; +; CHECK-SM100-NEXT: add.rn.f32x2 %rd9, %rd7, %rd8; +; CHECK-SM100-NEXT: mov.b64 {%f3, _}, %rd9; +; CHECK-SM100-NEXT: add.rn.f32 %f4, %f3, 0f00000000; +; CHECK-SM100-NEXT: st.param.f32 [func_retval0], %f4; +; CHECK-SM100-NEXT: ret; %res = call reassoc float @llvm.vector.reduce.fadd(float 0.0, <8 x float> %in) ret float %res } define float @reduce_fadd_float_reassoc_nonpow2(<7 x float> %in) { -; CHECK-LABEL: reduce_fadd_float_reassoc_nonpow2( -; CHECK: { -; CHECK-NEXT: .reg .b32 %f<15>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.f32 %f7, [reduce_fadd_float_reassoc_nonpow2_param_0+24]; -; CHECK-NEXT: ld.param.v2.f32 {%f5, %f6}, [reduce_fadd_float_reassoc_nonpow2_param_0+16]; -; CHECK-NEXT: ld.param.v4.f32 {%f1, %f2, %f3, %f4}, [reduce_fadd_float_reassoc_nonpow2_param_0]; -; CHECK-NEXT: add.rn.f32 %f8, %f3, %f7; -; CHECK-NEXT: add.rn.f32 %f9, %f1, %f5; -; CHECK-NEXT: add.rn.f32 %f10, %f9, %f8; -; CHECK-NEXT: add.rn.f32 %f11, %f2, %f6; -; CHECK-NEXT: add.rn.f32 %f12, %f11, %f4; -; CHECK-NEXT: add.rn.f32 %f13, %f10, %f12; -; CHECK-NEXT: add.rn.f32 %f14, %f13, 0f00000000; -; CHECK-NEXT: st.param.f32 [func_retval0], %f14; -; CHECK-NEXT: ret; +; CHECK-SM80-LABEL: reduce_fadd_float_reassoc_nonpow2( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %f<15>; +; CHECK-SM80-NEXT: .reg .b64 %rd<2>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.b64 %rd1, [reduce_fadd_float_reassoc_nonpow2_param_0+16]; +; CHECK-SM80-NEXT: mov.b64 {%f5, %f6}, %rd1; +; CHECK-SM80-NEXT: ld.param.f32 %f7, [reduce_fadd_float_reassoc_nonpow2_param_0+24]; +; CHECK-SM80-NEXT: ld.param.v4.f32 {%f1, %f2, %f3, %f4}, [reduce_fadd_float_reassoc_nonpow2_param_0]; +; CHECK-SM80-NEXT: add.rn.f32 %f8, %f3, %f7; +; CHECK-SM80-NEXT: add.rn.f32 %f9, %f1, %f5; +; CHECK-SM80-NEXT: add.rn.f32 %f10, %f9, %f8; +; CHECK-SM80-NEXT: add.rn.f32 %f11, %f2, %f6; +; CHECK-SM80-NEXT: add.rn.f32 %f12, %f11, %f4; +; CHECK-SM80-NEXT: add.rn.f32 %f13, %f10, %f12; +; CHECK-SM80-NEXT: add.rn.f32 %f14, %f13, 0f00000000; +; CHECK-SM80-NEXT: st.param.f32 [func_retval0], %f14; +; CHECK-SM80-NEXT: ret; +; +; CHECK-SM100-LABEL: reduce_fadd_float_reassoc_nonpow2( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %f<13>; +; CHECK-SM100-NEXT: .reg .b64 %rd<8>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.b64 %rd1, [reduce_fadd_float_reassoc_nonpow2_param_0+16]; +; CHECK-SM100-NEXT: mov.b64 {%f5, %f6}, %rd1; +; CHECK-SM100-NEXT: ld.param.v4.f32 {%f1, %f2, %f3, %f4}, [reduce_fadd_float_reassoc_nonpow2_param_0]; +; CHECK-SM100-NEXT: mov.b64 %rd2, {%f1, %f2}; +; CHECK-SM100-NEXT: mov.b64 %rd3, {%f3, %f4}; +; CHECK-SM100-NEXT: ld.param.f32 %f7, [reduce_fadd_float_reassoc_nonpow2_param_0+24]; +; CHECK-SM100-NEXT: mov.b32 %f8, 0f80000000; +; CHECK-SM100-NEXT: mov.b64 %rd4, {%f7, %f8}; +; CHECK-SM100-NEXT: add.rn.f32x2 %rd5, %rd3, %rd4; +; CHECK-SM100-NEXT: add.rn.f32x2 %rd6, %rd2, %rd1; +; CHECK-SM100-NEXT: add.rn.f32x2 %rd7, %rd6, %rd5; +; CHECK-SM100-NEXT: mov.b64 {%f9, %f10}, %rd7; +; CHECK-SM100-NEXT: add.rn.f32 %f11, %f9, %f10; +; CHECK-SM100-NEXT: add.rn.f32 %f12, %f11, 0f00000000; +; CHECK-SM100-NEXT: st.param.f32 [func_retval0], %f12; +; CHECK-SM100-NEXT: ret; %res = call reassoc float @llvm.vector.reduce.fadd(float 0.0, <7 x float> %in) ret float %res } @@ -275,17 +330,22 @@ define float @reduce_fmul_float(<8 x float> %in) { ; CHECK-LABEL: reduce_fmul_float( ; CHECK: { ; CHECK-NEXT: .reg .b32 %f<16>; +; CHECK-NEXT: .reg .b64 %rd<5>; ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v4.f32 {%f5, %f6, %f7, %f8}, [reduce_fmul_float_param_0+16]; -; CHECK-NEXT: ld.param.v4.f32 {%f1, %f2, %f3, %f4}, [reduce_fmul_float_param_0]; -; CHECK-NEXT: mul.rn.f32 %f9, %f1, %f2; -; CHECK-NEXT: mul.rn.f32 %f10, %f9, %f3; -; CHECK-NEXT: mul.rn.f32 %f11, %f10, %f4; -; CHECK-NEXT: mul.rn.f32 %f12, %f11, %f5; -; CHECK-NEXT: mul.rn.f32 %f13, %f12, %f6; -; CHECK-NEXT: mul.rn.f32 %f14, %f13, %f7; -; CHECK-NEXT: mul.rn.f32 %f15, %f14, %f8; +; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmul_float_param_0+16]; +; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmul_float_param_0]; +; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd1; +; CHECK-NEXT: mul.rn.f32 %f3, %f1, %f2; +; CHECK-NEXT: mov.b64 {%f4, %f5}, %rd2; +; CHECK-NEXT: mul.rn.f32 %f6, %f3, %f4; +; CHECK-NEXT: mul.rn.f32 %f7, %f6, %f5; +; CHECK-NEXT: mov.b64 {%f8, %f9}, %rd3; +; CHECK-NEXT: mul.rn.f32 %f10, %f7, %f8; +; CHECK-NEXT: mul.rn.f32 %f11, %f10, %f9; +; CHECK-NEXT: mov.b64 {%f12, %f13}, %rd4; +; CHECK-NEXT: mul.rn.f32 %f14, %f11, %f12; +; CHECK-NEXT: mul.rn.f32 %f15, %f14, %f13; ; CHECK-NEXT: st.param.f32 [func_retval0], %f15; ; CHECK-NEXT: ret; %res = call float @llvm.vector.reduce.fmul(float 1.0, <8 x float> %in) @@ -293,43 +353,91 @@ define float @reduce_fmul_float(<8 x float> %in) { } define float @reduce_fmul_float_reassoc(<8 x float> %in) { -; CHECK-LABEL: reduce_fmul_float_reassoc( -; CHECK: { -; CHECK-NEXT: .reg .b32 %f<16>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v4.f32 {%f5, %f6, %f7, %f8}, [reduce_fmul_float_reassoc_param_0+16]; -; CHECK-NEXT: ld.param.v4.f32 {%f1, %f2, %f3, %f4}, [reduce_fmul_float_reassoc_param_0]; -; CHECK-NEXT: mul.rn.f32 %f9, %f3, %f7; -; CHECK-NEXT: mul.rn.f32 %f10, %f1, %f5; -; CHECK-NEXT: mul.rn.f32 %f11, %f4, %f8; -; CHECK-NEXT: mul.rn.f32 %f12, %f2, %f6; -; CHECK-NEXT: mul.rn.f32 %f13, %f12, %f11; -; CHECK-NEXT: mul.rn.f32 %f14, %f10, %f9; -; CHECK-NEXT: mul.rn.f32 %f15, %f14, %f13; -; CHECK-NEXT: st.param.f32 [func_retval0], %f15; -; CHECK-NEXT: ret; +; CHECK-SM80-LABEL: reduce_fmul_float_reassoc( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %f<16>; +; CHECK-SM80-NEXT: .reg .b64 %rd<5>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmul_float_reassoc_param_0+16]; +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmul_float_reassoc_param_0]; +; CHECK-SM80-NEXT: mov.b64 {%f1, %f2}, %rd4; +; CHECK-SM80-NEXT: mov.b64 {%f3, %f4}, %rd2; +; CHECK-SM80-NEXT: mul.rn.f32 %f5, %f3, %f1; +; CHECK-SM80-NEXT: mov.b64 {%f6, %f7}, %rd3; +; CHECK-SM80-NEXT: mov.b64 {%f8, %f9}, %rd1; +; CHECK-SM80-NEXT: mul.rn.f32 %f10, %f8, %f6; +; CHECK-SM80-NEXT: mul.rn.f32 %f11, %f4, %f2; +; CHECK-SM80-NEXT: mul.rn.f32 %f12, %f9, %f7; +; CHECK-SM80-NEXT: mul.rn.f32 %f13, %f12, %f11; +; CHECK-SM80-NEXT: mul.rn.f32 %f14, %f10, %f5; +; CHECK-SM80-NEXT: mul.rn.f32 %f15, %f14, %f13; +; CHECK-SM80-NEXT: st.param.f32 [func_retval0], %f15; +; CHECK-SM80-NEXT: ret; +; +; CHECK-SM100-LABEL: reduce_fmul_float_reassoc( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %f<4>; +; CHECK-SM100-NEXT: .reg .b64 %rd<10>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmul_float_reassoc_param_0+16]; +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmul_float_reassoc_param_0]; +; CHECK-SM100-NEXT: mul.rn.f32x2 %rd5, %rd2, %rd4; +; CHECK-SM100-NEXT: mul.rn.f32x2 %rd6, %rd1, %rd3; +; CHECK-SM100-NEXT: mul.rn.f32x2 %rd7, %rd6, %rd5; +; CHECK-SM100-NEXT: mov.b64 {_, %f1}, %rd7; +; CHECK-SM100-NEXT: // implicit-def: %f2 +; CHECK-SM100-NEXT: mov.b64 %rd8, {%f1, %f2}; +; CHECK-SM100-NEXT: mul.rn.f32x2 %rd9, %rd7, %rd8; +; CHECK-SM100-NEXT: mov.b64 {%f3, _}, %rd9; +; CHECK-SM100-NEXT: st.param.f32 [func_retval0], %f3; +; CHECK-SM100-NEXT: ret; %res = call reassoc float @llvm.vector.reduce.fmul(float 1.0, <8 x float> %in) ret float %res } define float @reduce_fmul_float_reassoc_nonpow2(<7 x float> %in) { -; CHECK-LABEL: reduce_fmul_float_reassoc_nonpow2( -; CHECK: { -; CHECK-NEXT: .reg .b32 %f<14>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.f32 %f7, [reduce_fmul_float_reassoc_nonpow2_param_0+24]; -; CHECK-NEXT: ld.param.v2.f32 {%f5, %f6}, [reduce_fmul_float_reassoc_nonpow2_param_0+16]; -; CHECK-NEXT: ld.param.v4.f32 {%f1, %f2, %f3, %f4}, [reduce_fmul_float_reassoc_nonpow2_param_0]; -; CHECK-NEXT: mul.rn.f32 %f8, %f3, %f7; -; CHECK-NEXT: mul.rn.f32 %f9, %f1, %f5; -; CHECK-NEXT: mul.rn.f32 %f10, %f9, %f8; -; CHECK-NEXT: mul.rn.f32 %f11, %f2, %f6; -; CHECK-NEXT: mul.rn.f32 %f12, %f11, %f4; -; CHECK-NEXT: mul.rn.f32 %f13, %f10, %f12; -; CHECK-NEXT: st.param.f32 [func_retval0], %f13; -; CHECK-NEXT: ret; +; CHECK-SM80-LABEL: reduce_fmul_float_reassoc_nonpow2( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %f<14>; +; CHECK-SM80-NEXT: .reg .b64 %rd<2>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.b64 %rd1, [reduce_fmul_float_reassoc_nonpow2_param_0+16]; +; CHECK-SM80-NEXT: mov.b64 {%f5, %f6}, %rd1; +; CHECK-SM80-NEXT: ld.param.f32 %f7, [reduce_fmul_float_reassoc_nonpow2_param_0+24]; +; CHECK-SM80-NEXT: ld.param.v4.f32 {%f1, %f2, %f3, %f4}, [reduce_fmul_float_reassoc_nonpow2_param_0]; +; CHECK-SM80-NEXT: mul.rn.f32 %f8, %f3, %f7; +; CHECK-SM80-NEXT: mul.rn.f32 %f9, %f1, %f5; +; CHECK-SM80-NEXT: mul.rn.f32 %f10, %f9, %f8; +; CHECK-SM80-NEXT: mul.rn.f32 %f11, %f2, %f6; +; CHECK-SM80-NEXT: mul.rn.f32 %f12, %f11, %f4; +; CHECK-SM80-NEXT: mul.rn.f32 %f13, %f10, %f12; +; CHECK-SM80-NEXT: st.param.f32 [func_retval0], %f13; +; CHECK-SM80-NEXT: ret; +; +; CHECK-SM100-LABEL: reduce_fmul_float_reassoc_nonpow2( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %f<12>; +; CHECK-SM100-NEXT: .reg .b64 %rd<8>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.b64 %rd1, [reduce_fmul_float_reassoc_nonpow2_param_0+16]; +; CHECK-SM100-NEXT: mov.b64 {%f5, %f6}, %rd1; +; CHECK-SM100-NEXT: ld.param.v4.f32 {%f1, %f2, %f3, %f4}, [reduce_fmul_float_reassoc_nonpow2_param_0]; +; CHECK-SM100-NEXT: mov.b64 %rd2, {%f1, %f2}; +; CHECK-SM100-NEXT: mov.b64 %rd3, {%f3, %f4}; +; CHECK-SM100-NEXT: ld.param.f32 %f7, [reduce_fmul_float_reassoc_nonpow2_param_0+24]; +; CHECK-SM100-NEXT: mov.b32 %f8, 0f3F800000; +; CHECK-SM100-NEXT: mov.b64 %rd4, {%f7, %f8}; +; CHECK-SM100-NEXT: mul.rn.f32x2 %rd5, %rd3, %rd4; +; CHECK-SM100-NEXT: mul.rn.f32x2 %rd6, %rd2, %rd1; +; CHECK-SM100-NEXT: mul.rn.f32x2 %rd7, %rd6, %rd5; +; CHECK-SM100-NEXT: mov.b64 {%f9, %f10}, %rd7; +; CHECK-SM100-NEXT: mul.rn.f32 %f11, %f9, %f10; +; CHECK-SM100-NEXT: st.param.f32 [func_retval0], %f11; +; CHECK-SM100-NEXT: ret; %res = call reassoc float @llvm.vector.reduce.fmul(float 1.0, <7 x float> %in) ret float %res } @@ -405,15 +513,20 @@ define float @reduce_fmax_float(<8 x float> %in) { ; CHECK-LABEL: reduce_fmax_float( ; CHECK: { ; CHECK-NEXT: .reg .b32 %f<16>; +; CHECK-NEXT: .reg .b64 %rd<5>; ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v4.f32 {%f5, %f6, %f7, %f8}, [reduce_fmax_float_param_0+16]; -; CHECK-NEXT: ld.param.v4.f32 {%f1, %f2, %f3, %f4}, [reduce_fmax_float_param_0]; -; CHECK-NEXT: max.f32 %f9, %f4, %f8; -; CHECK-NEXT: max.f32 %f10, %f2, %f6; -; CHECK-NEXT: max.f32 %f11, %f10, %f9; -; CHECK-NEXT: max.f32 %f12, %f3, %f7; -; CHECK-NEXT: max.f32 %f13, %f1, %f5; +; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmax_float_param_0+16]; +; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmax_float_param_0]; +; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd4; +; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd2; +; CHECK-NEXT: max.f32 %f5, %f4, %f2; +; CHECK-NEXT: mov.b64 {%f6, %f7}, %rd3; +; CHECK-NEXT: mov.b64 {%f8, %f9}, %rd1; +; CHECK-NEXT: max.f32 %f10, %f9, %f7; +; CHECK-NEXT: max.f32 %f11, %f10, %f5; +; CHECK-NEXT: max.f32 %f12, %f3, %f1; +; CHECK-NEXT: max.f32 %f13, %f8, %f6; ; CHECK-NEXT: max.f32 %f14, %f13, %f12; ; CHECK-NEXT: max.f32 %f15, %f14, %f11; ; CHECK-NEXT: st.param.f32 [func_retval0], %f15; @@ -427,15 +540,20 @@ define float @reduce_fmax_float_reassoc(<8 x float> %in) { ; CHECK-LABEL: reduce_fmax_float_reassoc( ; CHECK: { ; CHECK-NEXT: .reg .b32 %f<16>; +; CHECK-NEXT: .reg .b64 %rd<5>; ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v4.f32 {%f5, %f6, %f7, %f8}, [reduce_fmax_float_reassoc_param_0+16]; -; CHECK-NEXT: ld.param.v4.f32 {%f1, %f2, %f3, %f4}, [reduce_fmax_float_reassoc_param_0]; -; CHECK-NEXT: max.f32 %f9, %f4, %f8; -; CHECK-NEXT: max.f32 %f10, %f2, %f6; -; CHECK-NEXT: max.f32 %f11, %f10, %f9; -; CHECK-NEXT: max.f32 %f12, %f3, %f7; -; CHECK-NEXT: max.f32 %f13, %f1, %f5; +; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmax_float_reassoc_param_0+16]; +; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmax_float_reassoc_param_0]; +; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd4; +; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd2; +; CHECK-NEXT: max.f32 %f5, %f4, %f2; +; CHECK-NEXT: mov.b64 {%f6, %f7}, %rd3; +; CHECK-NEXT: mov.b64 {%f8, %f9}, %rd1; +; CHECK-NEXT: max.f32 %f10, %f9, %f7; +; CHECK-NEXT: max.f32 %f11, %f10, %f5; +; CHECK-NEXT: max.f32 %f12, %f3, %f1; +; CHECK-NEXT: max.f32 %f13, %f8, %f6; ; CHECK-NEXT: max.f32 %f14, %f13, %f12; ; CHECK-NEXT: max.f32 %f15, %f14, %f11; ; CHECK-NEXT: st.param.f32 [func_retval0], %f15; @@ -449,10 +567,12 @@ define float @reduce_fmax_float_reassoc_nonpow2(<7 x float> %in) { ; CHECK-LABEL: reduce_fmax_float_reassoc_nonpow2( ; CHECK: { ; CHECK-NEXT: .reg .b32 %f<14>; +; CHECK-NEXT: .reg .b64 %rd<2>; ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd1, [reduce_fmax_float_reassoc_nonpow2_param_0+16]; +; CHECK-NEXT: mov.b64 {%f5, %f6}, %rd1; ; CHECK-NEXT: ld.param.f32 %f7, [reduce_fmax_float_reassoc_nonpow2_param_0+24]; -; CHECK-NEXT: ld.param.v2.f32 {%f5, %f6}, [reduce_fmax_float_reassoc_nonpow2_param_0+16]; ; CHECK-NEXT: ld.param.v4.f32 {%f1, %f2, %f3, %f4}, [reduce_fmax_float_reassoc_nonpow2_param_0]; ; CHECK-NEXT: max.f32 %f8, %f3, %f7; ; CHECK-NEXT: max.f32 %f9, %f1, %f5; @@ -537,15 +657,20 @@ define float @reduce_fmin_float(<8 x float> %in) { ; CHECK-LABEL: reduce_fmin_float( ; CHECK: { ; CHECK-NEXT: .reg .b32 %f<16>; +; CHECK-NEXT: .reg .b64 %rd<5>; ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v4.f32 {%f5, %f6, %f7, %f8}, [reduce_fmin_float_param_0+16]; -; CHECK-NEXT: ld.param.v4.f32 {%f1, %f2, %f3, %f4}, [reduce_fmin_float_param_0]; -; CHECK-NEXT: min.f32 %f9, %f4, %f8; -; CHECK-NEXT: min.f32 %f10, %f2, %f6; -; CHECK-NEXT: min.f32 %f11, %f10, %f9; -; CHECK-NEXT: min.f32 %f12, %f3, %f7; -; CHECK-NEXT: min.f32 %f13, %f1, %f5; +; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmin_float_param_0+16]; +; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmin_float_param_0]; +; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd4; +; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd2; +; CHECK-NEXT: min.f32 %f5, %f4, %f2; +; CHECK-NEXT: mov.b64 {%f6, %f7}, %rd3; +; CHECK-NEXT: mov.b64 {%f8, %f9}, %rd1; +; CHECK-NEXT: min.f32 %f10, %f9, %f7; +; CHECK-NEXT: min.f32 %f11, %f10, %f5; +; CHECK-NEXT: min.f32 %f12, %f3, %f1; +; CHECK-NEXT: min.f32 %f13, %f8, %f6; ; CHECK-NEXT: min.f32 %f14, %f13, %f12; ; CHECK-NEXT: min.f32 %f15, %f14, %f11; ; CHECK-NEXT: st.param.f32 [func_retval0], %f15; @@ -559,15 +684,20 @@ define float @reduce_fmin_float_reassoc(<8 x float> %in) { ; CHECK-LABEL: reduce_fmin_float_reassoc( ; CHECK: { ; CHECK-NEXT: .reg .b32 %f<16>; +; CHECK-NEXT: .reg .b64 %rd<5>; ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v4.f32 {%f5, %f6, %f7, %f8}, [reduce_fmin_float_reassoc_param_0+16]; -; CHECK-NEXT: ld.param.v4.f32 {%f1, %f2, %f3, %f4}, [reduce_fmin_float_reassoc_param_0]; -; CHECK-NEXT: min.f32 %f9, %f4, %f8; -; CHECK-NEXT: min.f32 %f10, %f2, %f6; -; CHECK-NEXT: min.f32 %f11, %f10, %f9; -; CHECK-NEXT: min.f32 %f12, %f3, %f7; -; CHECK-NEXT: min.f32 %f13, %f1, %f5; +; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmin_float_reassoc_param_0+16]; +; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmin_float_reassoc_param_0]; +; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd4; +; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd2; +; CHECK-NEXT: min.f32 %f5, %f4, %f2; +; CHECK-NEXT: mov.b64 {%f6, %f7}, %rd3; +; CHECK-NEXT: mov.b64 {%f8, %f9}, %rd1; +; CHECK-NEXT: min.f32 %f10, %f9, %f7; +; CHECK-NEXT: min.f32 %f11, %f10, %f5; +; CHECK-NEXT: min.f32 %f12, %f3, %f1; +; CHECK-NEXT: min.f32 %f13, %f8, %f6; ; CHECK-NEXT: min.f32 %f14, %f13, %f12; ; CHECK-NEXT: min.f32 %f15, %f14, %f11; ; CHECK-NEXT: st.param.f32 [func_retval0], %f15; @@ -581,10 +711,12 @@ define float @reduce_fmin_float_reassoc_nonpow2(<7 x float> %in) { ; CHECK-LABEL: reduce_fmin_float_reassoc_nonpow2( ; CHECK: { ; CHECK-NEXT: .reg .b32 %f<14>; +; CHECK-NEXT: .reg .b64 %rd<2>; ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd1, [reduce_fmin_float_reassoc_nonpow2_param_0+16]; +; CHECK-NEXT: mov.b64 {%f5, %f6}, %rd1; ; CHECK-NEXT: ld.param.f32 %f7, [reduce_fmin_float_reassoc_nonpow2_param_0+24]; -; CHECK-NEXT: ld.param.v2.f32 {%f5, %f6}, [reduce_fmin_float_reassoc_nonpow2_param_0+16]; ; CHECK-NEXT: ld.param.v4.f32 {%f1, %f2, %f3, %f4}, [reduce_fmin_float_reassoc_nonpow2_param_0]; ; CHECK-NEXT: min.f32 %f8, %f3, %f7; ; CHECK-NEXT: min.f32 %f9, %f1, %f5; @@ -669,15 +801,20 @@ define float @reduce_fmaximum_float(<8 x float> %in) { ; CHECK-LABEL: reduce_fmaximum_float( ; CHECK: { ; CHECK-NEXT: .reg .b32 %f<16>; +; CHECK-NEXT: .reg .b64 %rd<5>; ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v4.f32 {%f5, %f6, %f7, %f8}, [reduce_fmaximum_float_param_0+16]; -; CHECK-NEXT: ld.param.v4.f32 {%f1, %f2, %f3, %f4}, [reduce_fmaximum_float_param_0]; -; CHECK-NEXT: max.NaN.f32 %f9, %f4, %f8; -; CHECK-NEXT: max.NaN.f32 %f10, %f2, %f6; -; CHECK-NEXT: max.NaN.f32 %f11, %f10, %f9; -; CHECK-NEXT: max.NaN.f32 %f12, %f3, %f7; -; CHECK-NEXT: max.NaN.f32 %f13, %f1, %f5; +; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmaximum_float_param_0+16]; +; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmaximum_float_param_0]; +; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd4; +; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd2; +; CHECK-NEXT: max.NaN.f32 %f5, %f4, %f2; +; CHECK-NEXT: mov.b64 {%f6, %f7}, %rd3; +; CHECK-NEXT: mov.b64 {%f8, %f9}, %rd1; +; CHECK-NEXT: max.NaN.f32 %f10, %f9, %f7; +; CHECK-NEXT: max.NaN.f32 %f11, %f10, %f5; +; CHECK-NEXT: max.NaN.f32 %f12, %f3, %f1; +; CHECK-NEXT: max.NaN.f32 %f13, %f8, %f6; ; CHECK-NEXT: max.NaN.f32 %f14, %f13, %f12; ; CHECK-NEXT: max.NaN.f32 %f15, %f14, %f11; ; CHECK-NEXT: st.param.f32 [func_retval0], %f15; @@ -691,15 +828,20 @@ define float @reduce_fmaximum_float_reassoc(<8 x float> %in) { ; CHECK-LABEL: reduce_fmaximum_float_reassoc( ; CHECK: { ; CHECK-NEXT: .reg .b32 %f<16>; +; CHECK-NEXT: .reg .b64 %rd<5>; ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v4.f32 {%f5, %f6, %f7, %f8}, [reduce_fmaximum_float_reassoc_param_0+16]; -; CHECK-NEXT: ld.param.v4.f32 {%f1, %f2, %f3, %f4}, [reduce_fmaximum_float_reassoc_param_0]; -; CHECK-NEXT: max.NaN.f32 %f9, %f4, %f8; -; CHECK-NEXT: max.NaN.f32 %f10, %f2, %f6; -; CHECK-NEXT: max.NaN.f32 %f11, %f10, %f9; -; CHECK-NEXT: max.NaN.f32 %f12, %f3, %f7; -; CHECK-NEXT: max.NaN.f32 %f13, %f1, %f5; +; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmaximum_float_reassoc_param_0+16]; +; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmaximum_float_reassoc_param_0]; +; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd4; +; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd2; +; CHECK-NEXT: max.NaN.f32 %f5, %f4, %f2; +; CHECK-NEXT: mov.b64 {%f6, %f7}, %rd3; +; CHECK-NEXT: mov.b64 {%f8, %f9}, %rd1; +; CHECK-NEXT: max.NaN.f32 %f10, %f9, %f7; +; CHECK-NEXT: max.NaN.f32 %f11, %f10, %f5; +; CHECK-NEXT: max.NaN.f32 %f12, %f3, %f1; +; CHECK-NEXT: max.NaN.f32 %f13, %f8, %f6; ; CHECK-NEXT: max.NaN.f32 %f14, %f13, %f12; ; CHECK-NEXT: max.NaN.f32 %f15, %f14, %f11; ; CHECK-NEXT: st.param.f32 [func_retval0], %f15; @@ -713,10 +855,12 @@ define float @reduce_fmaximum_float_reassoc_nonpow2(<7 x float> %in) { ; CHECK-LABEL: reduce_fmaximum_float_reassoc_nonpow2( ; CHECK: { ; CHECK-NEXT: .reg .b32 %f<14>; +; CHECK-NEXT: .reg .b64 %rd<2>; ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd1, [reduce_fmaximum_float_reassoc_nonpow2_param_0+16]; +; CHECK-NEXT: mov.b64 {%f5, %f6}, %rd1; ; CHECK-NEXT: ld.param.f32 %f7, [reduce_fmaximum_float_reassoc_nonpow2_param_0+24]; -; CHECK-NEXT: ld.param.v2.f32 {%f5, %f6}, [reduce_fmaximum_float_reassoc_nonpow2_param_0+16]; ; CHECK-NEXT: ld.param.v4.f32 {%f1, %f2, %f3, %f4}, [reduce_fmaximum_float_reassoc_nonpow2_param_0]; ; CHECK-NEXT: max.NaN.f32 %f8, %f3, %f7; ; CHECK-NEXT: max.NaN.f32 %f9, %f1, %f5; @@ -801,15 +945,20 @@ define float @reduce_fminimum_float(<8 x float> %in) { ; CHECK-LABEL: reduce_fminimum_float( ; CHECK: { ; CHECK-NEXT: .reg .b32 %f<16>; +; CHECK-NEXT: .reg .b64 %rd<5>; ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v4.f32 {%f5, %f6, %f7, %f8}, [reduce_fminimum_float_param_0+16]; -; CHECK-NEXT: ld.param.v4.f32 {%f1, %f2, %f3, %f4}, [reduce_fminimum_float_param_0]; -; CHECK-NEXT: min.NaN.f32 %f9, %f4, %f8; -; CHECK-NEXT: min.NaN.f32 %f10, %f2, %f6; -; CHECK-NEXT: min.NaN.f32 %f11, %f10, %f9; -; CHECK-NEXT: min.NaN.f32 %f12, %f3, %f7; -; CHECK-NEXT: min.NaN.f32 %f13, %f1, %f5; +; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fminimum_float_param_0+16]; +; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fminimum_float_param_0]; +; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd4; +; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd2; +; CHECK-NEXT: min.NaN.f32 %f5, %f4, %f2; +; CHECK-NEXT: mov.b64 {%f6, %f7}, %rd3; +; CHECK-NEXT: mov.b64 {%f8, %f9}, %rd1; +; CHECK-NEXT: min.NaN.f32 %f10, %f9, %f7; +; CHECK-NEXT: min.NaN.f32 %f11, %f10, %f5; +; CHECK-NEXT: min.NaN.f32 %f12, %f3, %f1; +; CHECK-NEXT: min.NaN.f32 %f13, %f8, %f6; ; CHECK-NEXT: min.NaN.f32 %f14, %f13, %f12; ; CHECK-NEXT: min.NaN.f32 %f15, %f14, %f11; ; CHECK-NEXT: st.param.f32 [func_retval0], %f15; @@ -823,15 +972,20 @@ define float @reduce_fminimum_float_reassoc(<8 x float> %in) { ; CHECK-LABEL: reduce_fminimum_float_reassoc( ; CHECK: { ; CHECK-NEXT: .reg .b32 %f<16>; +; CHECK-NEXT: .reg .b64 %rd<5>; ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v4.f32 {%f5, %f6, %f7, %f8}, [reduce_fminimum_float_reassoc_param_0+16]; -; CHECK-NEXT: ld.param.v4.f32 {%f1, %f2, %f3, %f4}, [reduce_fminimum_float_reassoc_param_0]; -; CHECK-NEXT: min.NaN.f32 %f9, %f4, %f8; -; CHECK-NEXT: min.NaN.f32 %f10, %f2, %f6; -; CHECK-NEXT: min.NaN.f32 %f11, %f10, %f9; -; CHECK-NEXT: min.NaN.f32 %f12, %f3, %f7; -; CHECK-NEXT: min.NaN.f32 %f13, %f1, %f5; +; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fminimum_float_reassoc_param_0+16]; +; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fminimum_float_reassoc_param_0]; +; CHECK-NEXT: mov.b64 {%f1, %f2}, %rd4; +; CHECK-NEXT: mov.b64 {%f3, %f4}, %rd2; +; CHECK-NEXT: min.NaN.f32 %f5, %f4, %f2; +; CHECK-NEXT: mov.b64 {%f6, %f7}, %rd3; +; CHECK-NEXT: mov.b64 {%f8, %f9}, %rd1; +; CHECK-NEXT: min.NaN.f32 %f10, %f9, %f7; +; CHECK-NEXT: min.NaN.f32 %f11, %f10, %f5; +; CHECK-NEXT: min.NaN.f32 %f12, %f3, %f1; +; CHECK-NEXT: min.NaN.f32 %f13, %f8, %f6; ; CHECK-NEXT: min.NaN.f32 %f14, %f13, %f12; ; CHECK-NEXT: min.NaN.f32 %f15, %f14, %f11; ; CHECK-NEXT: st.param.f32 [func_retval0], %f15; @@ -845,10 +999,12 @@ define float @reduce_fminimum_float_reassoc_nonpow2(<7 x float> %in) { ; CHECK-LABEL: reduce_fminimum_float_reassoc_nonpow2( ; CHECK: { ; CHECK-NEXT: .reg .b32 %f<14>; +; CHECK-NEXT: .reg .b64 %rd<2>; ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd1, [reduce_fminimum_float_reassoc_nonpow2_param_0+16]; +; CHECK-NEXT: mov.b64 {%f5, %f6}, %rd1; ; CHECK-NEXT: ld.param.f32 %f7, [reduce_fminimum_float_reassoc_nonpow2_param_0+24]; -; CHECK-NEXT: ld.param.v2.f32 {%f5, %f6}, [reduce_fminimum_float_reassoc_nonpow2_param_0+16]; ; CHECK-NEXT: ld.param.v4.f32 {%f1, %f2, %f3, %f4}, [reduce_fminimum_float_reassoc_nonpow2_param_0]; ; CHECK-NEXT: min.NaN.f32 %f8, %f3, %f7; ; CHECK-NEXT: min.NaN.f32 %f9, %f1, %f5; diff --git a/llvm/test/CodeGen/NVPTX/vec-param-load.ll b/llvm/test/CodeGen/NVPTX/vec-param-load.ll index 5dea424c7dcc9..d50d0828faf65 100644 --- a/llvm/test/CodeGen/NVPTX/vec-param-load.ll +++ b/llvm/test/CodeGen/NVPTX/vec-param-load.ll @@ -5,40 +5,40 @@ target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3 define <16 x float> @test_v16f32(<16 x float> %a) { ; CHECK-LABEL: test_v16f32( -; CHECK-DAG: ld.param.v4.f32 {[[V_12_15:(%f[0-9]+[, ]*){4}]]}, [test_v16f32_param_0+48]; -; CHECK-DAG: ld.param.v4.f32 {[[V_8_11:(%f[0-9]+[, ]*){4}]]}, [test_v16f32_param_0+32]; -; CHECK-DAG: ld.param.v4.f32 {[[V_4_7:(%f[0-9]+[, ]*){4}]]}, [test_v16f32_param_0+16]; -; CHECK-DAG: ld.param.v4.f32 {[[V_0_3:(%f[0-9]+[, ]*){4}]]}, [test_v16f32_param_0]; -; CHECK-DAG: st.param.v4.f32 [func_retval0], {[[V_0_3]]} -; CHECK-DAG: st.param.v4.f32 [func_retval0+16], {[[V_4_7]]} -; CHECK-DAG: st.param.v4.f32 [func_retval0+32], {[[V_8_11]]} -; CHECK-DAG: st.param.v4.f32 [func_retval0+48], {[[V_12_15]]} +; CHECK-DAG: ld.param.v2.b64 {[[V_12_15:(%rd[0-9]+[, ]*){2}]]}, [test_v16f32_param_0+48]; +; CHECK-DAG: ld.param.v2.b64 {[[V_8_11:(%rd[0-9]+[, ]*){2}]]}, [test_v16f32_param_0+32]; +; CHECK-DAG: ld.param.v2.b64 {[[V_4_7:(%rd[0-9]+[, ]*){2}]]}, [test_v16f32_param_0+16]; +; CHECK-DAG: ld.param.v2.b64 {[[V_0_3:(%rd[0-9]+[, ]*){2}]]}, [test_v16f32_param_0]; +; CHECK-DAG: st.param.v2.b64 [func_retval0], {[[V_0_3]]} +; CHECK-DAG: st.param.v2.b64 [func_retval0+16], {[[V_4_7]]} +; CHECK-DAG: st.param.v2.b64 [func_retval0+32], {[[V_8_11]]} +; CHECK-DAG: st.param.v2.b64 [func_retval0+48], {[[V_12_15]]} ; CHECK: ret; ret <16 x float> %a } define <8 x float> @test_v8f32(<8 x float> %a) { ; CHECK-LABEL: test_v8f32( -; CHECK-DAG: ld.param.v4.f32 {[[V_4_7:(%f[0-9]+[, ]*){4}]]}, [test_v8f32_param_0+16]; -; CHECK-DAG: ld.param.v4.f32 {[[V_0_3:(%f[0-9]+[, ]*){4}]]}, [test_v8f32_param_0]; -; CHECK-DAG: st.param.v4.f32 [func_retval0], {[[V_0_3]]} -; CHECK-DAG: st.param.v4.f32 [func_retval0+16], {[[V_4_7]]} +; CHECK-DAG: ld.param.v2.b64 {[[V_4_7:(%rd[0-9]+[, ]*){2}]]}, [test_v8f32_param_0+16]; +; CHECK-DAG: ld.param.v2.b64 {[[V_0_3:(%rd[0-9]+[, ]*){2}]]}, [test_v8f32_param_0]; +; CHECK-DAG: st.param.v2.b64 [func_retval0], {[[V_0_3]]} +; CHECK-DAG: st.param.v2.b64 [func_retval0+16], {[[V_4_7]]} ; CHECK: ret; ret <8 x float> %a } define <4 x float> @test_v4f32(<4 x float> %a) { ; CHECK-LABEL: test_v4f32( -; CHECK-DAG: ld.param.v4.f32 {[[V_0_3:(%f[0-9]+[, ]*){4}]]}, [test_v4f32_param_0]; -; CHECK-DAG: st.param.v4.f32 [func_retval0], {[[V_0_3]]} +; CHECK-DAG: ld.param.v2.b64 {[[V_0_3:(%rd[0-9]+[, ]*){2}]]}, [test_v4f32_param_0]; +; CHECK-DAG: st.param.v2.b64 [func_retval0], {[[V_0_3]]} ; CHECK: ret; ret <4 x float> %a } define <2 x float> @test_v2f32(<2 x float> %a) { ; CHECK-LABEL: test_v2f32( -; CHECK-DAG: ld.param.v2.f32 {[[V_0_3:(%f[0-9]+[, ]*){2}]]}, [test_v2f32_param_0]; -; CHECK-DAG: st.param.v2.f32 [func_retval0], {[[V_0_3]]} +; CHECK-DAG: ld.param.b64 [[V_0_3:%rd[0-9]+]], [test_v2f32_param_0]; +; CHECK-DAG: st.param.b64 [func_retval0], [[V_0_3]] ; CHECK: ret; ret <2 x float> %a } diff --git a/llvm/test/CodeGen/NVPTX/vector-loads.ll b/llvm/test/CodeGen/NVPTX/vector-loads.ll index d731985ae9710..4c65cdbe25ad9 100644 --- a/llvm/test/CodeGen/NVPTX/vector-loads.ll +++ b/llvm/test/CodeGen/NVPTX/vector-loads.ll @@ -101,18 +101,18 @@ define void @foo_complex(ptr nocapture readonly align 16 dereferenceable(1342177 define void @extv8f16_global_a16(ptr addrspace(1) noalias readonly align 16 %dst, ptr addrspace(1) noalias readonly align 16 %src) #0 { ; CHECK: ld.global.v4.b32 {%r %v = load <8 x half>, ptr addrspace(1) %src, align 16 -; CHECK: mov.b32 {%rs -; CHECK: mov.b32 {%rs -; CHECK: mov.b32 {%rs -; CHECK: mov.b32 {%rs -; CHECK: cvt.f32.f16 %f{{.*}}, %rs -; CHECK: cvt.f32.f16 %f{{.*}}, %rs -; CHECK: cvt.f32.f16 %f{{.*}}, %rs -; CHECK: cvt.f32.f16 %f{{.*}}, %rs -; CHECK: cvt.f32.f16 %f{{.*}}, %rs -; CHECK: cvt.f32.f16 %f{{.*}}, %rs -; CHECK: cvt.f32.f16 %f{{.*}}, %rs -; CHECK: cvt.f32.f16 %f{{.*}}, %rs +; CHECK-DAG: mov.b32 {%[[RS0:rs[0-9]+]], %[[RS1:rs[0-9]+]]} +; CHECK-DAG: mov.b32 {%[[RS2:rs[0-9]+]], %[[RS3:rs[0-9]+]]} +; CHECK-DAG: mov.b32 {%[[RS4:rs[0-9]+]], %[[RS5:rs[0-9]+]]} +; CHECK-DAG: mov.b32 {%[[RS6:rs[0-9]+]], %[[RS7:rs[0-9]+]]} +; CHECK-DAG: cvt.f32.f16 %f{{.*}}, %[[RS0]] +; CHECK-DAG: cvt.f32.f16 %f{{.*}}, %[[RS1]] +; CHECK-DAG: cvt.f32.f16 %f{{.*}}, %[[RS2]] +; CHECK-DAG: cvt.f32.f16 %f{{.*}}, %[[RS3]] +; CHECK-DAG: cvt.f32.f16 %f{{.*}}, %[[RS4]] +; CHECK-DAG: cvt.f32.f16 %f{{.*}}, %[[RS5]] +; CHECK-DAG: cvt.f32.f16 %f{{.*}}, %[[RS6]] +; CHECK-DAG: cvt.f32.f16 %f{{.*}}, %[[RS7]] %ext = fpext <8 x half> %v to <8 x float> ; CHECK: st.global.v4.f32 ; CHECK: st.global.v4.f32 @@ -151,18 +151,18 @@ define void @extv8f16_global_a4(ptr addrspace(1) noalias readonly align 16 %dst, define void @extv8f16_generic_a16(ptr noalias readonly align 16 %dst, ptr noalias readonly align 16 %src) #0 { ; CHECK: ld.v4.b32 {%r %v = load <8 x half>, ptr %src, align 16 -; CHECK: mov.b32 {%rs -; CHECK: mov.b32 {%rs -; CHECK: mov.b32 {%rs -; CHECK: mov.b32 {%rs -; CHECK: cvt.f32.f16 %f{{.*}}, %rs -; CHECK: cvt.f32.f16 %f{{.*}}, %rs -; CHECK: cvt.f32.f16 %f{{.*}}, %rs -; CHECK: cvt.f32.f16 %f{{.*}}, %rs -; CHECK: cvt.f32.f16 %f{{.*}}, %rs -; CHECK: cvt.f32.f16 %f{{.*}}, %rs -; CHECK: cvt.f32.f16 %f{{.*}}, %rs -; CHECK: cvt.f32.f16 %f{{.*}}, %rs +; CHECK-DAG: mov.b32 {%[[RS0:rs[0-9]+]], %[[RS1:rs[0-9]+]]} +; CHECK-DAG: mov.b32 {%[[RS2:rs[0-9]+]], %[[RS3:rs[0-9]+]]} +; CHECK-DAG: mov.b32 {%[[RS4:rs[0-9]+]], %[[RS5:rs[0-9]+]]} +; CHECK-DAG: mov.b32 {%[[RS6:rs[0-9]+]], %[[RS7:rs[0-9]+]]} +; CHECK-DAG: cvt.f32.f16 %f{{.*}}, %[[RS0]] +; CHECK-DAG: cvt.f32.f16 %f{{.*}}, %[[RS1]] +; CHECK-DAG: cvt.f32.f16 %f{{.*}}, %[[RS2]] +; CHECK-DAG: cvt.f32.f16 %f{{.*}}, %[[RS3]] +; CHECK-DAG: cvt.f32.f16 %f{{.*}}, %[[RS4]] +; CHECK-DAG: cvt.f32.f16 %f{{.*}}, %[[RS5]] +; CHECK-DAG: cvt.f32.f16 %f{{.*}}, %[[RS6]] +; CHECK-DAG: cvt.f32.f16 %f{{.*}}, %[[RS7]] %ext = fpext <8 x half> %v to <8 x float> ; CHECK: st.v4.f32 ; CHECK: st.v4.f32 diff --git a/llvm/test/CodeGen/NVPTX/vector-stores.ll b/llvm/test/CodeGen/NVPTX/vector-stores.ll index cbcaf5fc3822e..b1cb23ea8e672 100644 --- a/llvm/test/CodeGen/NVPTX/vector-stores.ll +++ b/llvm/test/CodeGen/NVPTX/vector-stores.ll @@ -2,7 +2,7 @@ ; RUN: %if ptxas %{ llc < %s -mtriple=nvptx64 -mcpu=sm_20 | %ptxas-verify %} ; CHECK-LABEL: .visible .func foo1 -; CHECK: st.v2.f32 +; CHECK: st.u64 define void @foo1(<2 x float> %val, ptr %ptr) { store <2 x float> %val, ptr %ptr ret void