32#include "llvm/IR/IntrinsicsSPIRV.h"
36#define DEBUG_TYPE "spirv-isel"
43 std::vector<std::pair<SPIRV::InstructionSet::InstructionSet, uint32_t>>;
47llvm::SPIRV::SelectionControl::SelectionControl
48getSelectionOperandForImm(
int Imm) {
50 return SPIRV::SelectionControl::Flatten;
52 return SPIRV::SelectionControl::DontFlatten;
54 return SPIRV::SelectionControl::None;
58#define GET_GLOBALISEL_PREDICATE_BITSET
59#include "SPIRVGenGlobalISel.inc"
60#undef GET_GLOBALISEL_PREDICATE_BITSET
87#define GET_GLOBALISEL_PREDICATES_DECL
88#include "SPIRVGenGlobalISel.inc"
89#undef GET_GLOBALISEL_PREDICATES_DECL
91#define GET_GLOBALISEL_TEMPORARIES_DECL
92#include "SPIRVGenGlobalISel.inc"
93#undef GET_GLOBALISEL_TEMPORARIES_DECL
115 unsigned BitSetOpcode)
const;
119 unsigned BitSetOpcode)
const;
123 unsigned BitSetOpcode,
bool SwapPrimarySide)
const;
127 unsigned BitSetOpcode,
128 bool SwapPrimarySide)
const;
135 unsigned Opcode)
const;
138 unsigned Opcode)
const;
155 unsigned NegateOpcode = 0)
const;
215 template <
bool Signed>
218 template <
bool Signed>
236 bool IsSigned,
unsigned Opcode)
const;
238 bool IsSigned)
const;
244 bool IsSigned)
const;
283 GL::GLSLExtInst GLInst)
const;
288 GL::GLSLExtInst GLInst)
const;
320 std::pair<Register, bool>
322 const SPIRVType *ResType =
nullptr)
const;
334 SPIRV::StorageClass::StorageClass SC)
const;
341 SPIRV::StorageClass::StorageClass SC,
353 bool loadVec3BuiltinInputID(SPIRV::BuiltIn::BuiltIn BuiltInValue,
356 bool loadBuiltinInputID(SPIRV::BuiltIn::BuiltIn BuiltInValue,
363bool sampledTypeIsSignedInteger(
const llvm::Type *HandleType) {
365 if (
TET->getTargetExtName() ==
"spirv.Image") {
368 assert(
TET->getTargetExtName() ==
"spirv.SignedImage");
369 return TET->getTypeParameter(0)->isIntegerTy();
373#define GET_GLOBALISEL_IMPL
374#include "SPIRVGenGlobalISel.inc"
375#undef GET_GLOBALISEL_IMPL
381 TRI(*ST.getRegisterInfo()), RBI(RBI), GR(*ST.getSPIRVGlobalRegistry()),
384#include
"SPIRVGenGlobalISel.inc"
387#include
"SPIRVGenGlobalISel.inc"
399 InstructionSelector::setupMF(MF, VT, CoverageInfo, PSI, BFI);
403void SPIRVInstructionSelector::resetVRegsType(MachineFunction &MF) {
404 if (HasVRegsReset == &MF)
409 for (
unsigned I = 0,
E =
MRI.getNumVirtRegs();
I !=
E; ++
I) {
411 LLT RegType =
MRI.getType(
Reg);
419 for (
const auto &
MBB : MF) {
420 for (
const auto &
MI :
MBB) {
423 if (
MI.getOpcode() != SPIRV::ASSIGN_TYPE)
427 LLT DstType =
MRI.getType(DstReg);
429 LLT SrcType =
MRI.getType(SrcReg);
430 if (DstType != SrcType)
431 MRI.setType(DstReg,
MRI.getType(SrcReg));
433 const TargetRegisterClass *DstRC =
MRI.getRegClassOrNull(DstReg);
434 const TargetRegisterClass *SrcRC =
MRI.getRegClassOrNull(SrcReg);
435 if (DstRC != SrcRC && SrcRC)
436 MRI.setRegClass(DstReg, SrcRC);
452 case TargetOpcode::G_CONSTANT:
453 case TargetOpcode::G_FCONSTANT:
455 case TargetOpcode::G_INTRINSIC:
456 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
457 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
459 Intrinsic::spv_const_composite;
460 case TargetOpcode::G_BUILD_VECTOR:
461 case TargetOpcode::G_SPLAT_VECTOR: {
472 case SPIRV::OpConstantTrue:
473 case SPIRV::OpConstantFalse:
474 case SPIRV::OpConstantI:
475 case SPIRV::OpConstantF:
476 case SPIRV::OpConstantComposite:
477 case SPIRV::OpConstantCompositeContinuedINTEL:
478 case SPIRV::OpConstantSampler:
479 case SPIRV::OpConstantNull:
481 case SPIRV::OpConstantFunctionPointerINTEL:
497 for (
const auto &MO :
MI.all_defs()) {
499 if (
Reg.isPhysical() || !
MRI.use_nodbg_empty(
Reg))
502 if (
MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE ||
MI.isFakeUse() ||
503 MI.isLifetimeMarker())
507 if (
MI.mayStore() ||
MI.isCall() ||
508 (
MI.mayLoad() &&
MI.hasOrderedMemoryRef()) ||
MI.isPosition() ||
509 MI.isDebugInstr() ||
MI.isTerminator() ||
MI.isJumpTableDebugInfo())
514bool SPIRVInstructionSelector::select(MachineInstr &
I) {
515 resetVRegsType(*
I.getParent()->getParent());
517 assert(
I.getParent() &&
"Instruction should be in a basic block!");
518 assert(
I.getParent()->getParent() &&
"Instruction should be in a function!");
523 if (Opcode == SPIRV::ASSIGN_TYPE) {
524 Register DstReg =
I.getOperand(0).getReg();
525 Register SrcReg =
I.getOperand(1).getReg();
526 auto *
Def =
MRI->getVRegDef(SrcReg);
528 Def->getOpcode() != TargetOpcode::G_CONSTANT &&
529 Def->getOpcode() != TargetOpcode::G_FCONSTANT) {
531 if (
Def->getOpcode() == TargetOpcode::G_SELECT) {
532 Register SelectDstReg =
Def->getOperand(0).getReg();
536 Def->removeFromParent();
537 MRI->replaceRegWith(DstReg, SelectDstReg);
539 I.removeFromParent();
541 Res = selectImpl(
I, *CoverageInfo);
543 if (!Res &&
Def->getOpcode() != TargetOpcode::G_CONSTANT) {
544 dbgs() <<
"Unexpected pattern in ASSIGN_TYPE.\nInstruction: ";
548 assert(Res ||
Def->getOpcode() == TargetOpcode::G_CONSTANT);
555 MRI->setRegClass(SrcReg,
MRI->getRegClass(DstReg));
556 MRI->replaceRegWith(SrcReg, DstReg);
558 I.removeFromParent();
560 }
else if (
I.getNumDefs() == 1) {
567 if (DeadMIs.contains(&
I)) {
577 if (
I.getNumOperands() !=
I.getNumExplicitOperands()) {
578 LLVM_DEBUG(
errs() <<
"Generic instr has unexpected implicit operands\n");
584 bool HasDefs =
I.getNumDefs() > 0;
587 assert(!HasDefs || ResType ||
I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
588 I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF);
589 if (spvSelect(ResVReg, ResType,
I)) {
591 for (
unsigned i = 0; i <
I.getNumDefs(); ++i)
594 I.removeFromParent();
602 case TargetOpcode::G_CONSTANT:
603 case TargetOpcode::G_FCONSTANT:
605 case TargetOpcode::G_SADDO:
606 case TargetOpcode::G_SSUBO:
613 MachineInstr &
I)
const {
614 const TargetRegisterClass *DstRC =
MRI->getRegClassOrNull(DestReg);
615 const TargetRegisterClass *SrcRC =
MRI->getRegClassOrNull(SrcReg);
616 if (DstRC != SrcRC && SrcRC)
617 MRI->setRegClass(DestReg, SrcRC);
618 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
619 TII.get(TargetOpcode::COPY))
625bool SPIRVInstructionSelector::spvSelect(
Register ResVReg,
627 MachineInstr &
I)
const {
628 const unsigned Opcode =
I.getOpcode();
630 return selectImpl(
I, *CoverageInfo);
632 case TargetOpcode::G_CONSTANT:
633 case TargetOpcode::G_FCONSTANT:
634 return selectConst(ResVReg, ResType,
I);
635 case TargetOpcode::G_GLOBAL_VALUE:
636 return selectGlobalValue(ResVReg,
I);
637 case TargetOpcode::G_IMPLICIT_DEF:
638 return selectOpUndef(ResVReg, ResType,
I);
639 case TargetOpcode::G_FREEZE:
640 return selectFreeze(ResVReg, ResType,
I);
642 case TargetOpcode::G_INTRINSIC:
643 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
644 case TargetOpcode::G_INTRINSIC_CONVERGENT:
645 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
646 return selectIntrinsic(ResVReg, ResType,
I);
647 case TargetOpcode::G_BITREVERSE:
648 return selectBitreverse(ResVReg, ResType,
I);
650 case TargetOpcode::G_BUILD_VECTOR:
651 return selectBuildVector(ResVReg, ResType,
I);
652 case TargetOpcode::G_SPLAT_VECTOR:
653 return selectSplatVector(ResVReg, ResType,
I);
655 case TargetOpcode::G_SHUFFLE_VECTOR: {
656 MachineBasicBlock &BB = *
I.getParent();
657 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorShuffle))
660 .
addUse(
I.getOperand(1).getReg())
661 .
addUse(
I.getOperand(2).getReg());
662 for (
auto V :
I.getOperand(3).getShuffleMask())
666 case TargetOpcode::G_MEMMOVE:
667 case TargetOpcode::G_MEMCPY:
668 case TargetOpcode::G_MEMSET:
669 return selectMemOperation(ResVReg,
I);
671 case TargetOpcode::G_ICMP:
672 return selectICmp(ResVReg, ResType,
I);
673 case TargetOpcode::G_FCMP:
674 return selectFCmp(ResVReg, ResType,
I);
676 case TargetOpcode::G_FRAME_INDEX:
677 return selectFrameIndex(ResVReg, ResType,
I);
679 case TargetOpcode::G_LOAD:
680 return selectLoad(ResVReg, ResType,
I);
681 case TargetOpcode::G_STORE:
682 return selectStore(
I);
684 case TargetOpcode::G_BR:
685 return selectBranch(
I);
686 case TargetOpcode::G_BRCOND:
687 return selectBranchCond(
I);
689 case TargetOpcode::G_PHI:
690 return selectPhi(ResVReg, ResType,
I);
692 case TargetOpcode::G_FPTOSI:
693 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertFToS);
694 case TargetOpcode::G_FPTOUI:
695 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertFToU);
697 case TargetOpcode::G_FPTOSI_SAT:
698 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertFToS);
699 case TargetOpcode::G_FPTOUI_SAT:
700 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertFToU);
702 case TargetOpcode::G_SITOFP:
703 return selectIToF(ResVReg, ResType,
I,
true, SPIRV::OpConvertSToF);
704 case TargetOpcode::G_UITOFP:
705 return selectIToF(ResVReg, ResType,
I,
false, SPIRV::OpConvertUToF);
707 case TargetOpcode::G_CTPOP:
708 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpBitCount);
709 case TargetOpcode::G_SMIN:
710 return selectExtInst(ResVReg, ResType,
I, CL::s_min, GL::SMin);
711 case TargetOpcode::G_UMIN:
712 return selectExtInst(ResVReg, ResType,
I, CL::u_min, GL::UMin);
714 case TargetOpcode::G_SMAX:
715 return selectExtInst(ResVReg, ResType,
I, CL::s_max, GL::SMax);
716 case TargetOpcode::G_UMAX:
717 return selectExtInst(ResVReg, ResType,
I, CL::u_max, GL::UMax);
719 case TargetOpcode::G_SCMP:
720 return selectSUCmp(ResVReg, ResType,
I,
true);
721 case TargetOpcode::G_UCMP:
722 return selectSUCmp(ResVReg, ResType,
I,
false);
723 case TargetOpcode::G_LROUND:
724 case TargetOpcode::G_LLROUND: {
726 MRI->createVirtualRegister(
MRI->getRegClass(ResVReg),
"lround");
727 MRI->setRegClass(regForLround, &SPIRV::iIDRegClass);
729 regForLround, *(
I.getParent()->getParent()));
731 I, CL::round, GL::Round);
733 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConvertFToS))
739 case TargetOpcode::G_STRICT_FMA:
740 case TargetOpcode::G_FMA:
741 return selectExtInst(ResVReg, ResType,
I, CL::fma, GL::Fma);
743 case TargetOpcode::G_STRICT_FLDEXP:
744 return selectExtInst(ResVReg, ResType,
I, CL::ldexp);
746 case TargetOpcode::G_FPOW:
747 return selectExtInst(ResVReg, ResType,
I, CL::pow, GL::Pow);
748 case TargetOpcode::G_FPOWI:
749 return selectExtInst(ResVReg, ResType,
I, CL::pown);
751 case TargetOpcode::G_FEXP:
752 return selectExtInst(ResVReg, ResType,
I, CL::exp, GL::Exp);
753 case TargetOpcode::G_FEXP2:
754 return selectExtInst(ResVReg, ResType,
I, CL::exp2, GL::Exp2);
756 case TargetOpcode::G_FLOG:
757 return selectExtInst(ResVReg, ResType,
I, CL::log, GL::Log);
758 case TargetOpcode::G_FLOG2:
759 return selectExtInst(ResVReg, ResType,
I, CL::log2, GL::Log2);
760 case TargetOpcode::G_FLOG10:
761 return selectLog10(ResVReg, ResType,
I);
763 case TargetOpcode::G_FABS:
764 return selectExtInst(ResVReg, ResType,
I, CL::fabs, GL::FAbs);
765 case TargetOpcode::G_ABS:
766 return selectExtInst(ResVReg, ResType,
I, CL::s_abs, GL::SAbs);
768 case TargetOpcode::G_FMINNUM:
769 case TargetOpcode::G_FMINIMUM:
770 return selectExtInst(ResVReg, ResType,
I, CL::fmin, GL::NMin);
771 case TargetOpcode::G_FMAXNUM:
772 case TargetOpcode::G_FMAXIMUM:
773 return selectExtInst(ResVReg, ResType,
I, CL::fmax, GL::NMax);
775 case TargetOpcode::G_FCOPYSIGN:
776 return selectExtInst(ResVReg, ResType,
I, CL::copysign);
778 case TargetOpcode::G_FCEIL:
779 return selectExtInst(ResVReg, ResType,
I, CL::ceil, GL::Ceil);
780 case TargetOpcode::G_FFLOOR:
781 return selectExtInst(ResVReg, ResType,
I, CL::floor, GL::Floor);
783 case TargetOpcode::G_FCOS:
784 return selectExtInst(ResVReg, ResType,
I, CL::cos, GL::Cos);
785 case TargetOpcode::G_FSIN:
786 return selectExtInst(ResVReg, ResType,
I, CL::sin, GL::Sin);
787 case TargetOpcode::G_FTAN:
788 return selectExtInst(ResVReg, ResType,
I, CL::tan, GL::Tan);
789 case TargetOpcode::G_FACOS:
790 return selectExtInst(ResVReg, ResType,
I, CL::acos, GL::Acos);
791 case TargetOpcode::G_FASIN:
792 return selectExtInst(ResVReg, ResType,
I, CL::asin, GL::Asin);
793 case TargetOpcode::G_FATAN:
794 return selectExtInst(ResVReg, ResType,
I, CL::atan, GL::Atan);
795 case TargetOpcode::G_FATAN2:
796 return selectExtInst(ResVReg, ResType,
I, CL::atan2, GL::Atan2);
797 case TargetOpcode::G_FCOSH:
798 return selectExtInst(ResVReg, ResType,
I, CL::cosh, GL::Cosh);
799 case TargetOpcode::G_FSINH:
800 return selectExtInst(ResVReg, ResType,
I, CL::sinh, GL::Sinh);
801 case TargetOpcode::G_FTANH:
802 return selectExtInst(ResVReg, ResType,
I, CL::tanh, GL::Tanh);
804 case TargetOpcode::G_STRICT_FSQRT:
805 case TargetOpcode::G_FSQRT:
806 return selectExtInst(ResVReg, ResType,
I, CL::sqrt, GL::Sqrt);
808 case TargetOpcode::G_CTTZ:
809 case TargetOpcode::G_CTTZ_ZERO_UNDEF:
810 return selectExtInst(ResVReg, ResType,
I, CL::ctz);
811 case TargetOpcode::G_CTLZ:
812 case TargetOpcode::G_CTLZ_ZERO_UNDEF:
813 return selectExtInst(ResVReg, ResType,
I, CL::clz);
815 case TargetOpcode::G_INTRINSIC_ROUND:
816 return selectExtInst(ResVReg, ResType,
I, CL::round, GL::Round);
817 case TargetOpcode::G_INTRINSIC_ROUNDEVEN:
818 return selectExtInst(ResVReg, ResType,
I, CL::rint, GL::RoundEven);
819 case TargetOpcode::G_INTRINSIC_TRUNC:
820 return selectExtInst(ResVReg, ResType,
I, CL::trunc, GL::Trunc);
821 case TargetOpcode::G_FRINT:
822 case TargetOpcode::G_FNEARBYINT:
823 return selectExtInst(ResVReg, ResType,
I, CL::rint, GL::RoundEven);
825 case TargetOpcode::G_SMULH:
826 return selectExtInst(ResVReg, ResType,
I, CL::s_mul_hi);
827 case TargetOpcode::G_UMULH:
828 return selectExtInst(ResVReg, ResType,
I, CL::u_mul_hi);
830 case TargetOpcode::G_SADDSAT:
831 return selectExtInst(ResVReg, ResType,
I, CL::s_add_sat);
832 case TargetOpcode::G_UADDSAT:
833 return selectExtInst(ResVReg, ResType,
I, CL::u_add_sat);
834 case TargetOpcode::G_SSUBSAT:
835 return selectExtInst(ResVReg, ResType,
I, CL::s_sub_sat);
836 case TargetOpcode::G_USUBSAT:
837 return selectExtInst(ResVReg, ResType,
I, CL::u_sub_sat);
839 case TargetOpcode::G_FFREXP:
840 return selectFrexp(ResVReg, ResType,
I);
842 case TargetOpcode::G_UADDO:
843 return selectOverflowArith(ResVReg, ResType,
I,
844 ResType->
getOpcode() == SPIRV::OpTypeVector
845 ? SPIRV::OpIAddCarryV
846 : SPIRV::OpIAddCarryS);
847 case TargetOpcode::G_USUBO:
848 return selectOverflowArith(ResVReg, ResType,
I,
849 ResType->
getOpcode() == SPIRV::OpTypeVector
850 ? SPIRV::OpISubBorrowV
851 : SPIRV::OpISubBorrowS);
852 case TargetOpcode::G_UMULO:
853 return selectOverflowArith(ResVReg, ResType,
I, SPIRV::OpUMulExtended);
854 case TargetOpcode::G_SMULO:
855 return selectOverflowArith(ResVReg, ResType,
I, SPIRV::OpSMulExtended);
857 case TargetOpcode::G_SEXT:
858 return selectExt(ResVReg, ResType,
I,
true);
859 case TargetOpcode::G_ANYEXT:
860 case TargetOpcode::G_ZEXT:
861 return selectExt(ResVReg, ResType,
I,
false);
862 case TargetOpcode::G_TRUNC:
863 return selectTrunc(ResVReg, ResType,
I);
864 case TargetOpcode::G_FPTRUNC:
865 case TargetOpcode::G_FPEXT:
866 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpFConvert);
868 case TargetOpcode::G_PTRTOINT:
869 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertPtrToU);
870 case TargetOpcode::G_INTTOPTR:
871 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertUToPtr);
872 case TargetOpcode::G_BITCAST:
873 return selectBitcast(ResVReg, ResType,
I);
874 case TargetOpcode::G_ADDRSPACE_CAST:
875 return selectAddrSpaceCast(ResVReg, ResType,
I);
876 case TargetOpcode::G_PTR_ADD: {
878 assert(
I.getOperand(1).isReg() &&
I.getOperand(2).isReg());
882 assert(((*II).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
883 (*II).getOpcode() == TargetOpcode::COPY ||
884 (*II).getOpcode() == SPIRV::OpVariable) &&
887 bool IsGVInit =
false;
889 UseIt =
MRI->use_instr_begin(
I.getOperand(0).getReg()),
890 UseEnd =
MRI->use_instr_end();
891 UseIt != UseEnd; UseIt = std::next(UseIt)) {
892 if ((*UseIt).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
893 (*UseIt).getOpcode() == SPIRV::OpVariable) {
903 if (GVPointeeType && ResPointeeType && GVPointeeType != ResPointeeType) {
906 Register NewVReg =
MRI->createGenericVirtualRegister(
MRI->getType(GV));
907 MRI->setRegClass(NewVReg,
MRI->getRegClass(GV));
916 "incompatible result and operand types in a bitcast");
918 MachineInstrBuilder MIB =
919 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBitcast))
926 ? SPIRV::OpInBoundsAccessChain
927 : SPIRV::OpInBoundsPtrAccessChain))
931 .
addUse(
I.getOperand(2).getReg())
934 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSpecConstantOp))
938 static_cast<uint32_t
>(SPIRV::Opcode::InBoundsPtrAccessChain))
940 .
addUse(
I.getOperand(2).getReg())
948 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSpecConstantOp))
951 .
addImm(
static_cast<uint32_t
>(
952 SPIRV::Opcode::InBoundsPtrAccessChain))
955 .
addUse(
I.getOperand(2).getReg());
959 case TargetOpcode::G_ATOMICRMW_OR:
960 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicOr);
961 case TargetOpcode::G_ATOMICRMW_ADD:
962 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicIAdd);
963 case TargetOpcode::G_ATOMICRMW_AND:
964 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicAnd);
965 case TargetOpcode::G_ATOMICRMW_MAX:
966 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicSMax);
967 case TargetOpcode::G_ATOMICRMW_MIN:
968 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicSMin);
969 case TargetOpcode::G_ATOMICRMW_SUB:
970 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicISub);
971 case TargetOpcode::G_ATOMICRMW_XOR:
972 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicXor);
973 case TargetOpcode::G_ATOMICRMW_UMAX:
974 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicUMax);
975 case TargetOpcode::G_ATOMICRMW_UMIN:
976 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicUMin);
977 case TargetOpcode::G_ATOMICRMW_XCHG:
978 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicExchange);
979 case TargetOpcode::G_ATOMIC_CMPXCHG:
980 return selectAtomicCmpXchg(ResVReg, ResType,
I);
982 case TargetOpcode::G_ATOMICRMW_FADD:
983 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFAddEXT);
984 case TargetOpcode::G_ATOMICRMW_FSUB:
986 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFAddEXT,
988 case TargetOpcode::G_ATOMICRMW_FMIN:
989 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFMinEXT);
990 case TargetOpcode::G_ATOMICRMW_FMAX:
991 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFMaxEXT);
993 case TargetOpcode::G_FENCE:
994 return selectFence(
I);
996 case TargetOpcode::G_STACKSAVE:
997 return selectStackSave(ResVReg, ResType,
I);
998 case TargetOpcode::G_STACKRESTORE:
999 return selectStackRestore(
I);
1001 case TargetOpcode::G_UNMERGE_VALUES:
1007 case TargetOpcode::G_TRAP:
1008 case TargetOpcode::G_UBSANTRAP:
1009 case TargetOpcode::DBG_LABEL:
1011 case TargetOpcode::G_DEBUGTRAP:
1012 return selectDebugTrap(ResVReg, ResType,
I);
1019bool SPIRVInstructionSelector::selectDebugTrap(
Register ResVReg,
1021 MachineInstr &
I)
const {
1022 unsigned Opcode = SPIRV::OpNop;
1024 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
1028bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
1031 GL::GLSLExtInst GLInst)
const {
1033 SPIRV::InstructionSet::InstructionSet::GLSL_std_450)) {
1034 std::string DiagMsg;
1035 raw_string_ostream OS(DiagMsg);
1036 I.print(OS,
true,
false,
false,
false);
1037 DiagMsg +=
" is only supported with the GLSL extended instruction set.\n";
1040 return selectExtInst(ResVReg, ResType,
I,
1041 {{SPIRV::InstructionSet::GLSL_std_450, GLInst}});
1044bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
1047 CL::OpenCLExtInst CLInst)
const {
1048 return selectExtInst(ResVReg, ResType,
I,
1049 {{SPIRV::InstructionSet::OpenCL_std, CLInst}});
1052bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
1055 CL::OpenCLExtInst CLInst,
1056 GL::GLSLExtInst GLInst)
const {
1057 ExtInstList ExtInsts = {{SPIRV::InstructionSet::OpenCL_std, CLInst},
1058 {SPIRV::InstructionSet::GLSL_std_450, GLInst}};
1059 return selectExtInst(ResVReg, ResType,
I, ExtInsts);
1062bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
1067 for (
const auto &Ex : Insts) {
1068 SPIRV::InstructionSet::InstructionSet
Set = Ex.first;
1069 uint32_t Opcode = Ex.second;
1072 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
1075 .
addImm(
static_cast<uint32_t
>(Set))
1078 const unsigned NumOps =
I.getNumOperands();
1081 I.getOperand(Index).getType() ==
1082 MachineOperand::MachineOperandType::MO_IntrinsicID)
1085 MIB.
add(
I.getOperand(Index));
1091bool SPIRVInstructionSelector::selectExtInstForLRound(
1093 CL::OpenCLExtInst CLInst, GL::GLSLExtInst GLInst)
const {
1094 ExtInstList ExtInsts = {{SPIRV::InstructionSet::OpenCL_std, CLInst},
1095 {SPIRV::InstructionSet::GLSL_std_450, GLInst}};
1096 return selectExtInstForLRound(ResVReg, ResType,
I, ExtInsts);
1099bool SPIRVInstructionSelector::selectExtInstForLRound(
1102 for (
const auto &Ex : Insts) {
1103 SPIRV::InstructionSet::InstructionSet
Set = Ex.first;
1104 uint32_t Opcode = Ex.second;
1107 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
1110 .
addImm(
static_cast<uint32_t
>(Set))
1112 const unsigned NumOps =
I.getNumOperands();
1115 I.getOperand(Index).getType() ==
1116 MachineOperand::MachineOperandType::MO_IntrinsicID)
1119 MIB.
add(
I.getOperand(Index));
1127bool SPIRVInstructionSelector::selectFrexp(
Register ResVReg,
1129 MachineInstr &
I)
const {
1130 ExtInstList ExtInsts = {{SPIRV::InstructionSet::OpenCL_std, CL::frexp},
1131 {SPIRV::InstructionSet::GLSL_std_450, GL::Frexp}};
1132 for (
const auto &Ex : ExtInsts) {
1133 SPIRV::InstructionSet::InstructionSet
Set = Ex.first;
1134 uint32_t Opcode = Ex.second;
1138 MachineIRBuilder MIRBuilder(
I);
1141 PointeeTy, MIRBuilder, SPIRV::StorageClass::Function);
1146 auto MIB =
BuildMI(*It->getParent(), It, It->getDebugLoc(),
1147 TII.get(SPIRV::OpVariable))
1150 .
addImm(
static_cast<uint32_t
>(SPIRV::StorageClass::Function))
1154 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
1157 .
addImm(
static_cast<uint32_t
>(Ex.first))
1159 .
add(
I.getOperand(2))
1164 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
1165 .
addDef(
I.getOperand(1).getReg())
1174bool SPIRVInstructionSelector::selectOpWithSrcs(
Register ResVReg,
1177 std::vector<Register> Srcs,
1178 unsigned Opcode)
const {
1179 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
1188bool SPIRVInstructionSelector::selectUnOp(
Register ResVReg,
1191 unsigned Opcode)
const {
1193 Register SrcReg =
I.getOperand(1).getReg();
1196 MRI->def_instr_begin(SrcReg);
1197 DefIt !=
MRI->def_instr_end(); DefIt = std::next(DefIt)) {
1198 if ((*DefIt).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
1199 (*DefIt).getOpcode() == SPIRV::OpVariable) {
1205 uint32_t SpecOpcode = 0;
1207 case SPIRV::OpConvertPtrToU:
1208 SpecOpcode =
static_cast<uint32_t
>(SPIRV::Opcode::ConvertPtrToU);
1210 case SPIRV::OpConvertUToPtr:
1211 SpecOpcode =
static_cast<uint32_t
>(SPIRV::Opcode::ConvertUToPtr);
1215 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
1216 TII.get(SPIRV::OpSpecConstantOp))
1224 return selectOpWithSrcs(ResVReg, ResType,
I, {
I.getOperand(1).getReg()},
1228bool SPIRVInstructionSelector::selectBitcast(
Register ResVReg,
1230 MachineInstr &
I)
const {
1231 Register OpReg =
I.getOperand(1).getReg();
1235 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpBitcast);
1243 if (
MemOp->isVolatile())
1244 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
1245 if (
MemOp->isNonTemporal())
1246 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
1247 if (
MemOp->getAlign().value())
1248 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned);
1254 if (ST->canUseExtension(SPIRV::Extension::SPV_INTEL_memory_access_aliasing)) {
1255 if (
auto *MD =
MemOp->getAAInfo().Scope) {
1259 static_cast<uint32_t>(SPIRV::MemoryOperand::AliasScopeINTELMask);
1261 if (
auto *MD =
MemOp->getAAInfo().NoAlias) {
1265 static_cast<uint32_t>(SPIRV::MemoryOperand::NoAliasINTELMask);
1269 if (SpvMemOp !=
static_cast<uint32_t>(SPIRV::MemoryOperand::None)) {
1271 if (SpvMemOp &
static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned))
1283 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
1285 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
1287 if (SpvMemOp !=
static_cast<uint32_t>(SPIRV::MemoryOperand::None))
1291bool SPIRVInstructionSelector::selectLoad(
Register ResVReg,
1293 MachineInstr &
I)
const {
1300 IntPtrDef->getIntrinsicID() == Intrinsic::spv_resource_getpointer) {
1301 Register HandleReg = IntPtrDef->getOperand(2).getReg();
1303 if (HandleType->
getOpcode() == SPIRV::OpTypeImage) {
1305 MRI->createVirtualRegister(
MRI->getRegClass(HandleReg));
1307 if (!loadHandleBeforePosition(NewHandleReg, HandleType, *HandleDef,
I)) {
1311 Register IdxReg = IntPtrDef->getOperand(3).getReg();
1312 return generateImageRead(ResVReg, ResType, NewHandleReg, IdxReg,
1313 I.getDebugLoc(),
I);
1317 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
1321 if (!
I.getNumMemOperands()) {
1322 assert(
I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
1324 TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
1327 MachineIRBuilder MIRBuilder(
I);
1333bool SPIRVInstructionSelector::selectStore(MachineInstr &
I)
const {
1335 Register StoreVal =
I.getOperand(0 + OpOffset).getReg();
1341 IntPtrDef->getIntrinsicID() == Intrinsic::spv_resource_getpointer) {
1342 Register HandleReg = IntPtrDef->getOperand(2).getReg();
1344 MRI->createVirtualRegister(
MRI->getRegClass(HandleReg));
1347 if (!loadHandleBeforePosition(NewHandleReg, HandleType, *HandleDef,
I)) {
1351 Register IdxReg = IntPtrDef->getOperand(3).getReg();
1352 if (HandleType->
getOpcode() == SPIRV::OpTypeImage) {
1353 auto BMI =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
1354 TII.get(SPIRV::OpImageWrite))
1360 if (sampledTypeIsSignedInteger(LLVMHandleType))
1363 return BMI.constrainAllUses(
TII,
TRI, RBI);
1368 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpStore))
1371 if (!
I.getNumMemOperands()) {
1372 assert(
I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
1374 TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
1377 MachineIRBuilder MIRBuilder(
I);
1383bool SPIRVInstructionSelector::selectStackSave(
Register ResVReg,
1385 MachineInstr &
I)
const {
1386 if (!STI.
canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
1388 "llvm.stacksave intrinsic: this instruction requires the following "
1389 "SPIR-V extension: SPV_INTEL_variable_length_array",
1392 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSaveMemoryINTEL))
1398bool SPIRVInstructionSelector::selectStackRestore(MachineInstr &
I)
const {
1399 if (!STI.
canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
1401 "llvm.stackrestore intrinsic: this instruction requires the following "
1402 "SPIR-V extension: SPV_INTEL_variable_length_array",
1404 if (!
I.getOperand(0).isReg())
1407 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpRestoreMemoryINTEL))
1408 .
addUse(
I.getOperand(0).getReg())
1412bool SPIRVInstructionSelector::selectMemOperation(
Register ResVReg,
1413 MachineInstr &
I)
const {
1415 Register SrcReg =
I.getOperand(1).getReg();
1417 if (
I.getOpcode() == TargetOpcode::G_MEMSET) {
1418 MachineIRBuilder MIRBuilder(
I);
1419 assert(
I.getOperand(1).isReg() &&
I.getOperand(2).isReg());
1422 Type *ValTy = Type::getInt8Ty(
I.getMF()->getFunction().getContext());
1423 Type *ArrTy = ArrayType::get(ValTy, Num);
1425 ArrTy, MIRBuilder, SPIRV::StorageClass::UniformConstant);
1428 ArrTy, MIRBuilder, SPIRV::AccessQualifier::None,
false);
1435 GlobalVariable *GV =
new GlobalVariable(*CurFunction.
getParent(), LLVMArrTy,
1440 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVariable))
1443 .
addImm(SPIRV::StorageClass::UniformConstant)
1452 ValTy,
I, SPIRV::StorageClass::UniformConstant);
1454 selectOpWithSrcs(SrcReg, SourceTy,
I, {VarReg}, SPIRV::OpBitcast);
1456 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCopyMemorySized))
1457 .
addUse(
I.getOperand(0).getReg())
1459 .
addUse(
I.getOperand(2).getReg());
1460 if (
I.getNumMemOperands()) {
1461 MachineIRBuilder MIRBuilder(
I);
1470bool SPIRVInstructionSelector::selectAtomicRMW(
Register ResVReg,
1474 unsigned NegateOpcode)
const {
1477 const MachineMemOperand *MemOp = *
I.memoperands_begin();
1480 auto ScopeConstant = buildI32Constant(Scope,
I);
1481 Register ScopeReg = ScopeConstant.first;
1482 Result &= ScopeConstant.second;
1490 auto MemSemConstant = buildI32Constant(MemSem ,
I);
1491 Register MemSemReg = MemSemConstant.first;
1492 Result &= MemSemConstant.second;
1494 Register ValueReg =
I.getOperand(2).getReg();
1495 if (NegateOpcode != 0) {
1498 Result &= selectOpWithSrcs(TmpReg, ResType,
I, {ValueReg}, NegateOpcode);
1503 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(NewOpcode))
1513bool SPIRVInstructionSelector::selectUnmergeValues(MachineInstr &
I)
const {
1514 unsigned ArgI =
I.getNumOperands() - 1;
1516 I.getOperand(ArgI).isReg() ?
I.getOperand(ArgI).getReg() :
Register(0);
1519 if (!DefType || DefType->
getOpcode() != SPIRV::OpTypeVector)
1521 "cannot select G_UNMERGE_VALUES with a non-vector argument");
1527 for (
unsigned i = 0; i <
I.getNumDefs(); ++i) {
1528 Register ResVReg =
I.getOperand(i).getReg();
1532 ResType = ScalarType;
1538 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
1542 .
addImm(
static_cast<int64_t
>(i));
1548bool SPIRVInstructionSelector::selectFence(MachineInstr &
I)
const {
1551 auto MemSemConstant = buildI32Constant(MemSem,
I);
1552 Register MemSemReg = MemSemConstant.first;
1553 bool Result = MemSemConstant.second;
1555 uint32_t
Scope =
static_cast<uint32_t
>(
1557 auto ScopeConstant = buildI32Constant(Scope,
I);
1558 Register ScopeReg = ScopeConstant.first;
1559 Result &= ScopeConstant.second;
1562 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpMemoryBarrier))
1568bool SPIRVInstructionSelector::selectOverflowArith(
Register ResVReg,
1571 unsigned Opcode)
const {
1572 Type *ResTy =
nullptr;
1576 "Not enough info to select the arithmetic with overflow instruction");
1579 "with overflow instruction");
1585 MachineIRBuilder MIRBuilder(
I);
1587 ResTy, MIRBuilder, SPIRV::AccessQualifier::ReadWrite,
false);
1588 assert(
I.getNumDefs() > 1 &&
"Not enought operands");
1594 Register ZeroReg = buildZerosVal(ResType,
I);
1597 MRI->setRegClass(StructVReg, &SPIRV::IDRegClass);
1599 if (ResName.
size() > 0)
1604 BuildMI(BB, MIRBuilder.getInsertPt(),
I.getDebugLoc(),
TII.get(Opcode))
1607 for (
unsigned i =
I.getNumDefs(); i <
I.getNumOperands(); ++i)
1608 MIB.
addUse(
I.getOperand(i).getReg());
1613 MRI->setRegClass(HigherVReg, &SPIRV::iIDRegClass);
1614 for (
unsigned i = 0; i <
I.getNumDefs(); ++i) {
1616 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
1617 .
addDef(i == 1 ? HigherVReg :
I.getOperand(i).getReg())
1625 .
addDef(
I.getOperand(1).getReg())
1632bool SPIRVInstructionSelector::selectAtomicCmpXchg(
Register ResVReg,
1634 MachineInstr &
I)
const {
1642 const MachineMemOperand *MemOp = *
I.memoperands_begin();
1645 auto ScopeConstant = buildI32Constant(Scope,
I);
1646 ScopeReg = ScopeConstant.first;
1647 Result &= ScopeConstant.second;
1649 unsigned ScSem =
static_cast<uint32_t
>(
1652 unsigned MemSemEq =
static_cast<uint32_t
>(
getMemSemantics(AO)) | ScSem;
1653 auto MemSemEqConstant = buildI32Constant(MemSemEq,
I);
1654 MemSemEqReg = MemSemEqConstant.first;
1655 Result &= MemSemEqConstant.second;
1657 unsigned MemSemNeq =
static_cast<uint32_t
>(
getMemSemantics(FO)) | ScSem;
1658 if (MemSemEq == MemSemNeq)
1659 MemSemNeqReg = MemSemEqReg;
1661 auto MemSemNeqConstant = buildI32Constant(MemSemEq,
I);
1662 MemSemNeqReg = MemSemNeqConstant.first;
1663 Result &= MemSemNeqConstant.second;
1666 ScopeReg =
I.getOperand(5).getReg();
1667 MemSemEqReg =
I.getOperand(6).getReg();
1668 MemSemNeqReg =
I.getOperand(7).getReg();
1672 Register Val =
I.getOperand(4).getReg();
1677 BuildMI(*
I.getParent(),
I,
DL,
TII.get(SPIRV::OpAtomicCompareExchange))
1704 BuildMI(*
I.getParent(),
I,
DL,
TII.get(SPIRV::OpCompositeInsert))
1715 case SPIRV::StorageClass::DeviceOnlyINTEL:
1716 case SPIRV::StorageClass::HostOnlyINTEL:
1725 bool IsGRef =
false;
1726 bool IsAllowedRefs =
1727 llvm::all_of(
MRI->use_instructions(ResVReg), [&IsGRef](
auto const &It) {
1728 unsigned Opcode = It.getOpcode();
1729 if (Opcode == SPIRV::OpConstantComposite ||
1730 Opcode == SPIRV::OpVariable ||
1731 isSpvIntrinsic(It, Intrinsic::spv_init_global))
1732 return IsGRef = true;
1733 return Opcode == SPIRV::OpName;
1735 return IsAllowedRefs && IsGRef;
1738Register SPIRVInstructionSelector::getUcharPtrTypeReg(
1739 MachineInstr &
I, SPIRV::StorageClass::StorageClass SC)
const {
1741 Type::getInt8Ty(
I.getMF()->getFunction().getContext()),
I, SC));
1745SPIRVInstructionSelector::buildSpecConstantOp(MachineInstr &
I,
Register Dest,
1747 uint32_t Opcode)
const {
1748 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
1749 TII.get(SPIRV::OpSpecConstantOp))
1757SPIRVInstructionSelector::buildConstGenericPtr(MachineInstr &
I,
Register SrcPtr,
1761 Register Tmp =
MRI->createVirtualRegister(&SPIRV::pIDRegClass);
1763 SPIRV::StorageClass::Generic),
1765 MachineFunction *MF =
I.getParent()->getParent();
1767 MachineInstrBuilder MIB = buildSpecConstantOp(
1769 static_cast<uint32_t
>(SPIRV::Opcode::PtrCastToGeneric));
1779bool SPIRVInstructionSelector::selectAddrSpaceCast(
Register ResVReg,
1781 MachineInstr &
I)
const {
1785 Register SrcPtr =
I.getOperand(1).getReg();
1789 if (SrcPtrTy->
getOpcode() != SPIRV::OpTypePointer ||
1790 ResType->
getOpcode() != SPIRV::OpTypePointer)
1791 return BuildCOPY(ResVReg, SrcPtr,
I);
1801 unsigned SpecOpcode =
1803 ?
static_cast<uint32_t
>(SPIRV::Opcode::PtrCastToGeneric)
1806 ? static_cast<uint32_t>(
SPIRV::Opcode::GenericCastToPtr)
1813 return buildSpecConstantOp(
I, ResVReg, SrcPtr,
1814 getUcharPtrTypeReg(
I, DstSC), SpecOpcode)
1815 .constrainAllUses(
TII,
TRI, RBI);
1817 MachineInstrBuilder MIB = buildConstGenericPtr(
I, SrcPtr, SrcPtrTy);
1819 buildSpecConstantOp(
1821 getUcharPtrTypeReg(
I, DstSC),
1822 static_cast<uint32_t
>(SPIRV::Opcode::GenericCastToPtr))
1823 .constrainAllUses(
TII,
TRI, RBI);
1829 return BuildCOPY(ResVReg, SrcPtr,
I);
1831 if ((SrcSC == SPIRV::StorageClass::Function &&
1832 DstSC == SPIRV::StorageClass::Private) ||
1833 (DstSC == SPIRV::StorageClass::Function &&
1834 SrcSC == SPIRV::StorageClass::Private))
1835 return BuildCOPY(ResVReg, SrcPtr,
I);
1839 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpPtrCastToGeneric);
1842 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpGenericCastToPtr);
1863 return selectUnOp(ResVReg, ResType,
I,
1864 SPIRV::OpPtrCastToCrossWorkgroupINTEL);
1866 return selectUnOp(ResVReg, ResType,
I,
1867 SPIRV::OpCrossWorkgroupCastToPtrINTEL);
1869 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpPtrCastToGeneric);
1871 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpGenericCastToPtr);
1881 return SPIRV::OpFOrdEqual;
1883 return SPIRV::OpFOrdGreaterThanEqual;
1885 return SPIRV::OpFOrdGreaterThan;
1887 return SPIRV::OpFOrdLessThanEqual;
1889 return SPIRV::OpFOrdLessThan;
1891 return SPIRV::OpFOrdNotEqual;
1893 return SPIRV::OpOrdered;
1895 return SPIRV::OpFUnordEqual;
1897 return SPIRV::OpFUnordGreaterThanEqual;
1899 return SPIRV::OpFUnordGreaterThan;
1901 return SPIRV::OpFUnordLessThanEqual;
1903 return SPIRV::OpFUnordLessThan;
1905 return SPIRV::OpFUnordNotEqual;
1907 return SPIRV::OpUnordered;
1917 return SPIRV::OpIEqual;
1919 return SPIRV::OpINotEqual;
1921 return SPIRV::OpSGreaterThanEqual;
1923 return SPIRV::OpSGreaterThan;
1925 return SPIRV::OpSLessThanEqual;
1927 return SPIRV::OpSLessThan;
1929 return SPIRV::OpUGreaterThanEqual;
1931 return SPIRV::OpUGreaterThan;
1933 return SPIRV::OpULessThanEqual;
1935 return SPIRV::OpULessThan;
1944 return SPIRV::OpPtrEqual;
1946 return SPIRV::OpPtrNotEqual;
1957 return SPIRV::OpLogicalEqual;
1959 return SPIRV::OpLogicalNotEqual;
1993bool SPIRVInstructionSelector::selectAnyOrAll(
Register ResVReg,
1996 unsigned OpAnyOrAll)
const {
1997 assert(
I.getNumOperands() == 3);
1998 assert(
I.getOperand(2).isReg());
2000 Register InputRegister =
I.getOperand(2).getReg();
2007 bool IsVectorTy = InputType->
getOpcode() == SPIRV::OpTypeVector;
2008 if (IsBoolTy && !IsVectorTy) {
2009 assert(ResVReg ==
I.getOperand(0).getReg());
2010 return BuildCOPY(ResVReg, InputRegister,
I);
2014 unsigned SpirvNotEqualId =
2015 IsFloatTy ? SPIRV::OpFOrdNotEqual : SPIRV::OpINotEqual;
2022 IsBoolTy ? InputRegister
2031 IsFloatTy ? buildZerosValF(InputType,
I) : buildZerosVal(InputType,
I);
2051bool SPIRVInstructionSelector::selectAll(
Register ResVReg,
2053 MachineInstr &
I)
const {
2054 return selectAnyOrAll(ResVReg, ResType,
I, SPIRV::OpAll);
2057bool SPIRVInstructionSelector::selectAny(
Register ResVReg,
2059 MachineInstr &
I)
const {
2060 return selectAnyOrAll(ResVReg, ResType,
I, SPIRV::OpAny);
2064bool SPIRVInstructionSelector::selectFloatDot(
Register ResVReg,
2066 MachineInstr &
I)
const {
2067 assert(
I.getNumOperands() == 4);
2068 assert(
I.getOperand(2).isReg());
2069 assert(
I.getOperand(3).isReg());
2076 "dot product requires a vector of at least 2 components");
2084 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpDot))
2087 .
addUse(
I.getOperand(2).getReg())
2088 .
addUse(
I.getOperand(3).getReg())
2092bool SPIRVInstructionSelector::selectIntegerDot(
Register ResVReg,
2096 assert(
I.getNumOperands() == 4);
2097 assert(
I.getOperand(2).isReg());
2098 assert(
I.getOperand(3).isReg());
2101 auto DotOp =
Signed ? SPIRV::OpSDot : SPIRV::OpUDot;
2105 .
addUse(
I.getOperand(2).getReg())
2106 .
addUse(
I.getOperand(3).getReg())
2112bool SPIRVInstructionSelector::selectIntegerDotExpansion(
2114 assert(
I.getNumOperands() == 4);
2115 assert(
I.getOperand(2).isReg());
2116 assert(
I.getOperand(3).isReg());
2120 Register Vec0 =
I.getOperand(2).getReg();
2121 Register Vec1 =
I.getOperand(3).getReg();
2134 "dot product requires a vector of at least 2 components");
2148 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
2171bool SPIRVInstructionSelector::selectOpIsInf(
Register ResVReg,
2173 MachineInstr &
I)
const {
2175 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpIsInf))
2178 .
addUse(
I.getOperand(2).getReg())
2182bool SPIRVInstructionSelector::selectOpIsNan(
Register ResVReg,
2184 MachineInstr &
I)
const {
2186 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpIsNan))
2189 .
addUse(
I.getOperand(2).getReg())
2193template <
bool Signed>
2194bool SPIRVInstructionSelector::selectDot4AddPacked(
Register ResVReg,
2196 MachineInstr &
I)
const {
2197 assert(
I.getNumOperands() == 5);
2198 assert(
I.getOperand(2).isReg());
2199 assert(
I.getOperand(3).isReg());
2200 assert(
I.getOperand(4).isReg());
2203 Register Acc =
I.getOperand(2).getReg();
2207 auto DotOp =
Signed ? SPIRV::OpSDot : SPIRV::OpUDot;
2227template <
bool Signed>
2228bool SPIRVInstructionSelector::selectDot4AddPackedExpansion(
2230 assert(
I.getNumOperands() == 5);
2231 assert(
I.getOperand(2).isReg());
2232 assert(
I.getOperand(3).isReg());
2233 assert(
I.getOperand(4).isReg());
2238 Register Acc =
I.getOperand(2).getReg();
2244 Signed ? SPIRV::OpBitFieldSExtract : SPIRV::OpBitFieldUExtract;
2248 for (
unsigned i = 0; i < 4; i++) {
2250 Register AElt =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
2261 Register BElt =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
2281 Register MaskMul =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
2293 i < 3 ?
MRI->createVirtualRegister(&SPIRV::IDRegClass) : ResVReg;
2309bool SPIRVInstructionSelector::selectSaturate(
Register ResVReg,
2311 MachineInstr &
I)
const {
2312 assert(
I.getNumOperands() == 3);
2313 assert(
I.getOperand(2).isReg());
2315 Register VZero = buildZerosValF(ResType,
I);
2316 Register VOne = buildOnesValF(ResType,
I);
2318 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
2321 .
addImm(
static_cast<uint32_t
>(SPIRV::InstructionSet::GLSL_std_450))
2323 .
addUse(
I.getOperand(2).getReg())
2329bool SPIRVInstructionSelector::selectSign(
Register ResVReg,
2331 MachineInstr &
I)
const {
2332 assert(
I.getNumOperands() == 3);
2333 assert(
I.getOperand(2).isReg());
2335 Register InputRegister =
I.getOperand(2).getReg();
2337 auto &
DL =
I.getDebugLoc();
2347 bool NeedsConversion = IsFloatTy || SignBitWidth != ResBitWidth;
2349 auto SignOpcode = IsFloatTy ? GL::FSign : GL::SSign;
2351 ?
MRI->createVirtualRegister(&SPIRV::IDRegClass)
2358 .
addImm(
static_cast<uint32_t
>(SPIRV::InstructionSet::GLSL_std_450))
2363 if (NeedsConversion) {
2364 auto ConvertOpcode = IsFloatTy ? SPIRV::OpConvertFToS : SPIRV::OpSConvert;
2375bool SPIRVInstructionSelector::selectWaveOpInst(
Register ResVReg,
2378 unsigned Opcode)
const {
2382 auto BMI =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
2388 for (
unsigned J = 2; J <
I.getNumOperands(); J++) {
2389 BMI.addUse(
I.getOperand(J).getReg());
2395bool SPIRVInstructionSelector::selectWaveActiveCountBits(
2401 bool Result = selectWaveOpInst(BallotReg, BallotType,
I,
2402 SPIRV::OpGroupNonUniformBallot);
2406 TII.get(SPIRV::OpGroupNonUniformBallotBitCount))
2411 .
addImm(SPIRV::GroupOperation::Reduce)
2418bool SPIRVInstructionSelector::selectWaveReduceMax(
Register ResVReg,
2421 bool IsUnsigned)
const {
2422 assert(
I.getNumOperands() == 3);
2423 assert(
I.getOperand(2).isReg());
2425 Register InputRegister =
I.getOperand(2).getReg();
2434 auto IntegerOpcodeType =
2435 IsUnsigned ? SPIRV::OpGroupNonUniformUMax : SPIRV::OpGroupNonUniformSMax;
2436 auto Opcode = IsFloatTy ? SPIRV::OpGroupNonUniformFMax : IntegerOpcodeType;
2437 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
2442 .
addImm(SPIRV::GroupOperation::Reduce)
2443 .
addUse(
I.getOperand(2).getReg())
2447bool SPIRVInstructionSelector::selectWaveReduceSum(
Register ResVReg,
2449 MachineInstr &
I)
const {
2450 assert(
I.getNumOperands() == 3);
2451 assert(
I.getOperand(2).isReg());
2453 Register InputRegister =
I.getOperand(2).getReg();
2463 IsFloatTy ? SPIRV::OpGroupNonUniformFAdd : SPIRV::OpGroupNonUniformIAdd;
2464 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
2469 .
addImm(SPIRV::GroupOperation::Reduce)
2470 .
addUse(
I.getOperand(2).getReg());
2473bool SPIRVInstructionSelector::selectBitreverse(
Register ResVReg,
2475 MachineInstr &
I)
const {
2477 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBitReverse))
2480 .
addUse(
I.getOperand(1).getReg())
2484bool SPIRVInstructionSelector::selectFreeze(
Register ResVReg,
2486 MachineInstr &
I)
const {
2492 if (!
I.getOperand(0).isReg() || !
I.getOperand(1).isReg())
2494 Register OpReg =
I.getOperand(1).getReg();
2495 if (MachineInstr *Def =
MRI->getVRegDef(OpReg)) {
2496 if (
Def->getOpcode() == TargetOpcode::COPY)
2497 Def =
MRI->getVRegDef(
Def->getOperand(1).getReg());
2499 switch (
Def->getOpcode()) {
2500 case SPIRV::ASSIGN_TYPE:
2501 if (MachineInstr *AssignToDef =
2502 MRI->getVRegDef(
Def->getOperand(1).getReg())) {
2503 if (AssignToDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
2504 Reg =
Def->getOperand(2).getReg();
2507 case SPIRV::OpUndef:
2508 Reg =
Def->getOperand(1).getReg();
2511 unsigned DestOpCode;
2513 DestOpCode = SPIRV::OpConstantNull;
2515 DestOpCode = TargetOpcode::COPY;
2518 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(DestOpCode))
2519 .
addDef(
I.getOperand(0).getReg())
2526bool SPIRVInstructionSelector::selectBuildVector(
Register ResVReg,
2528 MachineInstr &
I)
const {
2530 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2532 else if (ResType->
getOpcode() == SPIRV::OpTypeArray)
2536 if (
I.getNumExplicitOperands() -
I.getNumExplicitDefs() !=
N)
2541 for (
unsigned i =
I.getNumExplicitDefs();
2542 i <
I.getNumExplicitOperands() && IsConst; ++i)
2546 if (!IsConst &&
N < 2)
2548 "There must be at least two constituent operands in a vector");
2551 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
2552 TII.get(IsConst ? SPIRV::OpConstantComposite
2553 : SPIRV::OpCompositeConstruct))
2556 for (
unsigned i =
I.getNumExplicitDefs(); i <
I.getNumExplicitOperands(); ++i)
2557 MIB.
addUse(
I.getOperand(i).getReg());
2561bool SPIRVInstructionSelector::selectSplatVector(
Register ResVReg,
2563 MachineInstr &
I)
const {
2565 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2567 else if (ResType->
getOpcode() == SPIRV::OpTypeArray)
2573 if (!
I.getOperand(
OpIdx).isReg())
2580 if (!IsConst &&
N < 2)
2582 "There must be at least two constituent operands in a vector");
2585 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
2586 TII.get(IsConst ? SPIRV::OpConstantComposite
2587 : SPIRV::OpCompositeConstruct))
2590 for (
unsigned i = 0; i <
N; ++i)
2595bool SPIRVInstructionSelector::selectDiscard(
Register ResVReg,
2597 MachineInstr &
I)
const {
2602 SPIRV::Extension::SPV_EXT_demote_to_helper_invocation) ||
2604 Opcode = SPIRV::OpDemoteToHelperInvocation;
2606 Opcode = SPIRV::OpKill;
2608 if (MachineInstr *NextI =
I.getNextNode()) {
2610 NextI->removeFromParent();
2615 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
2619bool SPIRVInstructionSelector::selectCmp(
Register ResVReg,
2622 MachineInstr &
I)
const {
2623 Register Cmp0 =
I.getOperand(2).getReg();
2624 Register Cmp1 =
I.getOperand(3).getReg();
2627 "CMP operands should have the same type");
2628 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(CmpOpc))
2637bool SPIRVInstructionSelector::selectICmp(
Register ResVReg,
2639 MachineInstr &
I)
const {
2640 auto Pred =
I.getOperand(1).getPredicate();
2643 Register CmpOperand =
I.getOperand(2).getReg();
2650 return selectCmp(ResVReg, ResType, CmpOpc,
I);
2653std::pair<Register, bool>
2654SPIRVInstructionSelector::buildI32Constant(uint32_t Val, MachineInstr &
I,
2660 auto ConstInt = ConstantInt::get(LLVMTy, Val);
2668 ?
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantNull))
2671 :
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantI))
2674 .
addImm(APInt(32, Val).getZExtValue());
2676 GR.
add(ConstInt,
MI);
2681bool SPIRVInstructionSelector::selectFCmp(
Register ResVReg,
2683 MachineInstr &
I)
const {
2685 return selectCmp(ResVReg, ResType, CmpOp,
I);
2689 MachineInstr &
I)
const {
2692 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2698 MachineInstr &
I)
const {
2702 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2708 MachineInstr &
I)
const {
2712 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2719 MachineInstr &
I)
const {
2723 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2728bool SPIRVInstructionSelector::selectSelect(
Register ResVReg,
2730 MachineInstr &
I)
const {
2731 Register SelectFirstArg =
I.getOperand(2).getReg();
2732 Register SelectSecondArg =
I.getOperand(3).getReg();
2741 SPIRV::OpTypeVector;
2748 Opcode = IsScalarBool ? SPIRV::OpSelectVFSCond : SPIRV::OpSelectVFVCond;
2749 }
else if (IsPtrTy) {
2750 Opcode = IsScalarBool ? SPIRV::OpSelectVPSCond : SPIRV::OpSelectVPVCond;
2752 Opcode = IsScalarBool ? SPIRV::OpSelectVISCond : SPIRV::OpSelectVIVCond;
2756 Opcode = IsScalarBool ? SPIRV::OpSelectSFSCond : SPIRV::OpSelectVFVCond;
2757 }
else if (IsPtrTy) {
2758 Opcode = IsScalarBool ? SPIRV::OpSelectSPSCond : SPIRV::OpSelectVPVCond;
2760 Opcode = IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectVIVCond;
2763 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
2766 .
addUse(
I.getOperand(1).getReg())
2772bool SPIRVInstructionSelector::selectSelectDefaultArgs(
Register ResVReg,
2775 bool IsSigned)
const {
2777 Register ZeroReg = buildZerosVal(ResType,
I);
2778 Register OneReg = buildOnesVal(IsSigned, ResType,
I);
2782 IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectVIVCond;
2783 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
2786 .
addUse(
I.getOperand(1).getReg())
2792bool SPIRVInstructionSelector::selectIToF(
Register ResVReg,
2794 MachineInstr &
I,
bool IsSigned,
2795 unsigned Opcode)
const {
2796 Register SrcReg =
I.getOperand(1).getReg();
2802 if (ResType->
getOpcode() == SPIRV::OpTypeVector) {
2807 selectSelectDefaultArgs(SrcReg, TmpType,
I,
false);
2809 return selectOpWithSrcs(ResVReg, ResType,
I, {SrcReg}, Opcode);
2812bool SPIRVInstructionSelector::selectExt(
Register ResVReg,
2814 MachineInstr &
I,
bool IsSigned)
const {
2815 Register SrcReg =
I.getOperand(1).getReg();
2817 return selectSelectDefaultArgs(ResVReg, ResType,
I, IsSigned);
2820 if (SrcType == ResType)
2821 return BuildCOPY(ResVReg, SrcReg,
I);
2823 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
2824 return selectUnOp(ResVReg, ResType,
I, Opcode);
2827bool SPIRVInstructionSelector::selectSUCmp(
Register ResVReg,
2830 bool IsSigned)
const {
2831 MachineIRBuilder MIRBuilder(
I);
2832 MachineRegisterInfo *
MRI = MIRBuilder.getMRI();
2847 TII.get(IsSigned ? SPIRV::OpSLessThanEqual
2848 : SPIRV::OpULessThanEqual))
2851 .
addUse(
I.getOperand(1).getReg())
2852 .
addUse(
I.getOperand(2).getReg())
2858 TII.get(IsSigned ? SPIRV::OpSLessThan : SPIRV::OpULessThan))
2861 .
addUse(
I.getOperand(1).getReg())
2862 .
addUse(
I.getOperand(2).getReg())
2870 unsigned SelectOpcode =
2871 N > 1 ? SPIRV::OpSelectVIVCond : SPIRV::OpSelectSISCond;
2876 .
addUse(buildOnesVal(
true, ResType,
I))
2877 .
addUse(buildZerosVal(ResType,
I))
2884 .
addUse(buildOnesVal(
false, ResType,
I))
2888bool SPIRVInstructionSelector::selectIntToBool(
Register IntReg,
2895 bool IsVectorTy = IntTy->
getOpcode() == SPIRV::OpTypeVector;
2896 unsigned Opcode = IsVectorTy ? SPIRV::OpBitwiseAndV : SPIRV::OpBitwiseAndS;
2898 Register One = buildOnesVal(
false, IntTy,
I);
2914bool SPIRVInstructionSelector::selectTrunc(
Register ResVReg,
2916 MachineInstr &
I)
const {
2917 Register IntReg =
I.getOperand(1).getReg();
2920 return selectIntToBool(IntReg, ResVReg,
I, ArgType, ResType);
2921 if (ArgType == ResType)
2922 return BuildCOPY(ResVReg, IntReg,
I);
2924 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
2925 return selectUnOp(ResVReg, ResType,
I, Opcode);
2928bool SPIRVInstructionSelector::selectConst(
Register ResVReg,
2930 MachineInstr &
I)
const {
2931 unsigned Opcode =
I.getOpcode();
2932 unsigned TpOpcode = ResType->
getOpcode();
2934 if (TpOpcode == SPIRV::OpTypePointer || TpOpcode == SPIRV::OpTypeEvent) {
2935 assert(Opcode == TargetOpcode::G_CONSTANT &&
2936 I.getOperand(1).getCImm()->isZero());
2937 MachineBasicBlock &DepMBB =
I.getMF()->front();
2940 }
else if (Opcode == TargetOpcode::G_FCONSTANT) {
2947 return Reg == ResVReg ?
true : BuildCOPY(ResVReg,
Reg,
I);
2950bool SPIRVInstructionSelector::selectOpUndef(
Register ResVReg,
2952 MachineInstr &
I)
const {
2953 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUndef))
2959bool SPIRVInstructionSelector::selectInsertVal(
Register ResVReg,
2961 MachineInstr &
I)
const {
2963 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeInsert))
2967 .
addUse(
I.getOperand(3).getReg())
2969 .
addUse(
I.getOperand(2).getReg());
2970 for (
unsigned i = 4; i <
I.getNumOperands(); i++)
2975bool SPIRVInstructionSelector::selectExtractVal(
Register ResVReg,
2977 MachineInstr &
I)
const {
2979 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
2982 .
addUse(
I.getOperand(2).getReg());
2983 for (
unsigned i = 3; i <
I.getNumOperands(); i++)
2988bool SPIRVInstructionSelector::selectInsertElt(
Register ResVReg,
2990 MachineInstr &
I)
const {
2992 return selectInsertVal(ResVReg, ResType,
I);
2994 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorInsertDynamic))
2997 .
addUse(
I.getOperand(2).getReg())
2998 .
addUse(
I.getOperand(3).getReg())
2999 .
addUse(
I.getOperand(4).getReg())
3003bool SPIRVInstructionSelector::selectExtractElt(
Register ResVReg,
3005 MachineInstr &
I)
const {
3007 return selectExtractVal(ResVReg, ResType,
I);
3009 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorExtractDynamic))
3012 .
addUse(
I.getOperand(2).getReg())
3013 .
addUse(
I.getOperand(3).getReg())
3017bool SPIRVInstructionSelector::selectGEP(
Register ResVReg,
3019 MachineInstr &
I)
const {
3020 const bool IsGEPInBounds =
I.getOperand(2).getImm();
3026 ? (IsGEPInBounds ? SPIRV::OpInBoundsAccessChain
3027 : SPIRV::OpAccessChain)
3028 : (IsGEPInBounds ?
SPIRV::OpInBoundsPtrAccessChain
3029 :
SPIRV::OpPtrAccessChain);
3031 auto Res =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
3035 .
addUse(
I.getOperand(3).getReg());
3037 const unsigned StartingIndex =
3038 (Opcode == SPIRV::OpAccessChain || Opcode == SPIRV::OpInBoundsAccessChain)
3041 for (
unsigned i = StartingIndex; i <
I.getNumExplicitOperands(); ++i)
3042 Res.addUse(
I.getOperand(i).getReg());
3043 return Res.constrainAllUses(
TII,
TRI, RBI);
3047bool SPIRVInstructionSelector::wrapIntoSpecConstantOp(
3050 unsigned Lim =
I.getNumExplicitOperands();
3051 for (
unsigned i =
I.getNumExplicitDefs() + 1; i < Lim; ++i) {
3052 Register OpReg =
I.getOperand(i).getReg();
3053 MachineInstr *OpDefine =
MRI->getVRegDef(OpReg);
3055 SmallPtrSet<SPIRVType *, 4> Visited;
3056 if (!OpDefine || !OpType ||
isConstReg(
MRI, OpDefine, Visited) ||
3057 OpDefine->
getOpcode() == TargetOpcode::G_ADDRSPACE_CAST ||
3064 MachineFunction *MF =
I.getMF();
3076 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3077 TII.get(SPIRV::OpSpecConstantOp))
3080 .
addImm(
static_cast<uint32_t
>(SPIRV::Opcode::Bitcast))
3082 GR.
add(OpDefine, MIB);
3090bool SPIRVInstructionSelector::selectIntrinsic(
Register ResVReg,
3092 MachineInstr &
I)
const {
3096 case Intrinsic::spv_load:
3097 return selectLoad(ResVReg, ResType,
I);
3098 case Intrinsic::spv_store:
3099 return selectStore(
I);
3100 case Intrinsic::spv_extractv:
3101 return selectExtractVal(ResVReg, ResType,
I);
3102 case Intrinsic::spv_insertv:
3103 return selectInsertVal(ResVReg, ResType,
I);
3104 case Intrinsic::spv_extractelt:
3105 return selectExtractElt(ResVReg, ResType,
I);
3106 case Intrinsic::spv_insertelt:
3107 return selectInsertElt(ResVReg, ResType,
I);
3108 case Intrinsic::spv_gep:
3109 return selectGEP(ResVReg, ResType,
I);
3110 case Intrinsic::spv_unref_global:
3111 case Intrinsic::spv_init_global: {
3112 MachineInstr *
MI =
MRI->getVRegDef(
I.getOperand(1).getReg());
3113 MachineInstr *Init =
I.getNumExplicitOperands() > 2
3114 ?
MRI->getVRegDef(
I.getOperand(2).getReg())
3117 Register GVarVReg =
MI->getOperand(0).getReg();
3118 bool Res = selectGlobalValue(GVarVReg, *
MI, Init);
3122 if (
MI->getOpcode() == TargetOpcode::G_GLOBAL_VALUE) {
3124 MI->removeFromParent();
3128 case Intrinsic::spv_undef: {
3129 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUndef))
3134 case Intrinsic::spv_const_composite: {
3136 bool IsNull =
I.getNumExplicitDefs() + 1 ==
I.getNumExplicitOperands();
3142 if (!wrapIntoSpecConstantOp(
I, CompositeArgs))
3144 MachineIRBuilder MIR(
I);
3146 MIR, SPIRV::OpConstantComposite, 3,
3147 SPIRV::OpConstantCompositeContinuedINTEL, CompositeArgs, ResVReg,
3149 for (
auto *Instr : Instructions) {
3150 Instr->setDebugLoc(
I.getDebugLoc());
3156 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantNull))
3162 case Intrinsic::spv_assign_name: {
3163 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpName));
3164 MIB.
addUse(
I.getOperand(
I.getNumExplicitDefs() + 1).getReg());
3165 for (
unsigned i =
I.getNumExplicitDefs() + 2;
3166 i <
I.getNumExplicitOperands(); ++i) {
3167 MIB.
addImm(
I.getOperand(i).getImm());
3171 case Intrinsic::spv_switch: {
3172 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSwitch));
3173 for (
unsigned i = 1; i <
I.getNumExplicitOperands(); ++i) {
3174 if (
I.getOperand(i).isReg())
3175 MIB.
addReg(
I.getOperand(i).getReg());
3176 else if (
I.getOperand(i).isCImm())
3177 addNumImm(
I.getOperand(i).getCImm()->getValue(), MIB);
3178 else if (
I.getOperand(i).isMBB())
3179 MIB.
addMBB(
I.getOperand(i).getMBB());
3185 case Intrinsic::spv_loop_merge: {
3186 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoopMerge));
3187 for (
unsigned i = 1; i <
I.getNumExplicitOperands(); ++i) {
3188 if (
I.getOperand(i).isMBB())
3189 MIB.
addMBB(
I.getOperand(i).getMBB());
3195 case Intrinsic::spv_selection_merge: {
3197 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSelectionMerge));
3198 assert(
I.getOperand(1).isMBB() &&
3199 "operand 1 to spv_selection_merge must be a basic block");
3200 MIB.
addMBB(
I.getOperand(1).getMBB());
3201 MIB.
addImm(getSelectionOperandForImm(
I.getOperand(2).getImm()));
3204 case Intrinsic::spv_cmpxchg:
3205 return selectAtomicCmpXchg(ResVReg, ResType,
I);
3206 case Intrinsic::spv_unreachable:
3207 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUnreachable))
3209 case Intrinsic::spv_alloca:
3210 return selectFrameIndex(ResVReg, ResType,
I);
3211 case Intrinsic::spv_alloca_array:
3212 return selectAllocaArray(ResVReg, ResType,
I);
3213 case Intrinsic::spv_assume:
3215 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpAssumeTrueKHR))
3216 .
addUse(
I.getOperand(1).getReg())
3219 case Intrinsic::spv_expect:
3221 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExpectKHR))
3224 .
addUse(
I.getOperand(2).getReg())
3225 .
addUse(
I.getOperand(3).getReg())
3228 case Intrinsic::arithmetic_fence:
3231 TII.get(SPIRV::OpArithmeticFenceEXT))
3234 .
addUse(
I.getOperand(2).getReg())
3237 return BuildCOPY(ResVReg,
I.getOperand(2).getReg(),
I);
3239 case Intrinsic::spv_thread_id:
3245 return loadVec3BuiltinInputID(SPIRV::BuiltIn::GlobalInvocationId, ResVReg,
3247 case Intrinsic::spv_thread_id_in_group:
3253 return loadVec3BuiltinInputID(SPIRV::BuiltIn::LocalInvocationId, ResVReg,
3255 case Intrinsic::spv_group_id:
3261 return loadVec3BuiltinInputID(SPIRV::BuiltIn::WorkgroupId, ResVReg, ResType,
3263 case Intrinsic::spv_flattened_thread_id_in_group:
3270 return loadBuiltinInputID(SPIRV::BuiltIn::LocalInvocationIndex, ResVReg,
3272 case Intrinsic::spv_workgroup_size:
3273 return loadVec3BuiltinInputID(SPIRV::BuiltIn::WorkgroupSize, ResVReg,
3275 case Intrinsic::spv_global_size:
3276 return loadVec3BuiltinInputID(SPIRV::BuiltIn::GlobalSize, ResVReg, ResType,
3278 case Intrinsic::spv_global_offset:
3279 return loadVec3BuiltinInputID(SPIRV::BuiltIn::GlobalOffset, ResVReg,
3281 case Intrinsic::spv_num_workgroups:
3282 return loadVec3BuiltinInputID(SPIRV::BuiltIn::NumWorkgroups, ResVReg,
3284 case Intrinsic::spv_subgroup_size:
3285 return loadBuiltinInputID(SPIRV::BuiltIn::SubgroupSize, ResVReg, ResType,
3287 case Intrinsic::spv_num_subgroups:
3288 return loadBuiltinInputID(SPIRV::BuiltIn::NumSubgroups, ResVReg, ResType,
3290 case Intrinsic::spv_subgroup_id:
3291 return loadBuiltinInputID(SPIRV::BuiltIn::SubgroupId, ResVReg, ResType,
I);
3292 case Intrinsic::spv_subgroup_local_invocation_id:
3293 return loadBuiltinInputID(SPIRV::BuiltIn::SubgroupLocalInvocationId,
3294 ResVReg, ResType,
I);
3295 case Intrinsic::spv_subgroup_max_size:
3296 return loadBuiltinInputID(SPIRV::BuiltIn::SubgroupMaxSize, ResVReg, ResType,
3298 case Intrinsic::spv_fdot:
3299 return selectFloatDot(ResVReg, ResType,
I);
3300 case Intrinsic::spv_udot:
3301 case Intrinsic::spv_sdot:
3302 if (STI.
canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
3304 return selectIntegerDot(ResVReg, ResType,
I,
3305 IID == Intrinsic::spv_sdot);
3306 return selectIntegerDotExpansion(ResVReg, ResType,
I);
3307 case Intrinsic::spv_dot4add_i8packed:
3308 if (STI.
canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
3310 return selectDot4AddPacked<true>(ResVReg, ResType,
I);
3311 return selectDot4AddPackedExpansion<true>(ResVReg, ResType,
I);
3312 case Intrinsic::spv_dot4add_u8packed:
3313 if (STI.
canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
3315 return selectDot4AddPacked<false>(ResVReg, ResType,
I);
3316 return selectDot4AddPackedExpansion<false>(ResVReg, ResType,
I);
3317 case Intrinsic::spv_all:
3318 return selectAll(ResVReg, ResType,
I);
3319 case Intrinsic::spv_any:
3320 return selectAny(ResVReg, ResType,
I);
3321 case Intrinsic::spv_cross:
3322 return selectExtInst(ResVReg, ResType,
I, CL::cross, GL::Cross);
3323 case Intrinsic::spv_distance:
3324 return selectExtInst(ResVReg, ResType,
I, CL::distance, GL::Distance);
3325 case Intrinsic::spv_lerp:
3326 return selectExtInst(ResVReg, ResType,
I, CL::mix, GL::FMix);
3327 case Intrinsic::spv_length:
3328 return selectExtInst(ResVReg, ResType,
I, CL::length, GL::Length);
3329 case Intrinsic::spv_degrees:
3330 return selectExtInst(ResVReg, ResType,
I, CL::degrees, GL::Degrees);
3331 case Intrinsic::spv_faceforward:
3332 return selectExtInst(ResVReg, ResType,
I, GL::FaceForward);
3333 case Intrinsic::spv_frac:
3334 return selectExtInst(ResVReg, ResType,
I, CL::fract, GL::Fract);
3335 case Intrinsic::spv_isinf:
3336 return selectOpIsInf(ResVReg, ResType,
I);
3337 case Intrinsic::spv_isnan:
3338 return selectOpIsNan(ResVReg, ResType,
I);
3339 case Intrinsic::spv_normalize:
3340 return selectExtInst(ResVReg, ResType,
I, CL::normalize, GL::Normalize);
3341 case Intrinsic::spv_refract:
3342 return selectExtInst(ResVReg, ResType,
I, GL::Refract);
3343 case Intrinsic::spv_reflect:
3344 return selectExtInst(ResVReg, ResType,
I, GL::Reflect);
3345 case Intrinsic::spv_rsqrt:
3346 return selectExtInst(ResVReg, ResType,
I, CL::rsqrt, GL::InverseSqrt);
3347 case Intrinsic::spv_sign:
3348 return selectSign(ResVReg, ResType,
I);
3349 case Intrinsic::spv_smoothstep:
3350 return selectExtInst(ResVReg, ResType,
I, CL::smoothstep, GL::SmoothStep);
3351 case Intrinsic::spv_firstbituhigh:
3352 return selectFirstBitHigh(ResVReg, ResType,
I,
false);
3353 case Intrinsic::spv_firstbitshigh:
3354 return selectFirstBitHigh(ResVReg, ResType,
I,
true);
3355 case Intrinsic::spv_firstbitlow:
3356 return selectFirstBitLow(ResVReg, ResType,
I);
3357 case Intrinsic::spv_group_memory_barrier_with_group_sync: {
3359 auto MemSemConstant =
3360 buildI32Constant(SPIRV::MemorySemantics::SequentiallyConsistent,
I);
3361 Register MemSemReg = MemSemConstant.first;
3362 Result &= MemSemConstant.second;
3363 auto ScopeConstant = buildI32Constant(SPIRV::Scope::Workgroup,
I);
3364 Register ScopeReg = ScopeConstant.first;
3365 Result &= ScopeConstant.second;
3368 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpControlBarrier))
3374 case Intrinsic::spv_generic_cast_to_ptr_explicit: {
3375 Register PtrReg =
I.getOperand(
I.getNumExplicitDefs() + 1).getReg();
3376 SPIRV::StorageClass::StorageClass ResSC =
3380 "Generic storage class");
3382 TII.get(SPIRV::OpGenericCastToPtrExplicit))
3389 case Intrinsic::spv_lifetime_start:
3390 case Intrinsic::spv_lifetime_end: {
3391 unsigned Op = IID == Intrinsic::spv_lifetime_start ? SPIRV::OpLifetimeStart
3392 : SPIRV::OpLifetimeStop;
3393 int64_t
Size =
I.getOperand(
I.getNumExplicitDefs() + 1).getImm();
3394 Register PtrReg =
I.getOperand(
I.getNumExplicitDefs() + 2).getReg();
3402 case Intrinsic::spv_saturate:
3403 return selectSaturate(ResVReg, ResType,
I);
3404 case Intrinsic::spv_nclamp:
3405 return selectExtInst(ResVReg, ResType,
I, CL::fclamp, GL::NClamp);
3406 case Intrinsic::spv_uclamp:
3407 return selectExtInst(ResVReg, ResType,
I, CL::u_clamp, GL::UClamp);
3408 case Intrinsic::spv_sclamp:
3409 return selectExtInst(ResVReg, ResType,
I, CL::s_clamp, GL::SClamp);
3410 case Intrinsic::spv_wave_active_countbits:
3411 return selectWaveActiveCountBits(ResVReg, ResType,
I);
3412 case Intrinsic::spv_wave_all:
3413 return selectWaveOpInst(ResVReg, ResType,
I, SPIRV::OpGroupNonUniformAll);
3414 case Intrinsic::spv_wave_any:
3415 return selectWaveOpInst(ResVReg, ResType,
I, SPIRV::OpGroupNonUniformAny);
3416 case Intrinsic::spv_wave_is_first_lane:
3417 return selectWaveOpInst(ResVReg, ResType,
I, SPIRV::OpGroupNonUniformElect);
3418 case Intrinsic::spv_wave_reduce_umax:
3419 return selectWaveReduceMax(ResVReg, ResType,
I,
true);
3420 case Intrinsic::spv_wave_reduce_max:
3421 return selectWaveReduceMax(ResVReg, ResType,
I,
false);
3422 case Intrinsic::spv_wave_reduce_sum:
3423 return selectWaveReduceSum(ResVReg, ResType,
I);
3424 case Intrinsic::spv_wave_readlane:
3425 return selectWaveOpInst(ResVReg, ResType,
I,
3426 SPIRV::OpGroupNonUniformShuffle);
3427 case Intrinsic::spv_step:
3428 return selectExtInst(ResVReg, ResType,
I, CL::step, GL::Step);
3429 case Intrinsic::spv_radians:
3430 return selectExtInst(ResVReg, ResType,
I, CL::radians, GL::Radians);
3434 case Intrinsic::instrprof_increment:
3435 case Intrinsic::instrprof_increment_step:
3436 case Intrinsic::instrprof_value_profile:
3439 case Intrinsic::spv_value_md:
3441 case Intrinsic::spv_resource_handlefrombinding: {
3442 return selectHandleFromBinding(ResVReg, ResType,
I);
3444 case Intrinsic::spv_resource_store_typedbuffer: {
3445 return selectImageWriteIntrinsic(
I);
3447 case Intrinsic::spv_resource_load_typedbuffer: {
3448 return selectReadImageIntrinsic(ResVReg, ResType,
I);
3450 case Intrinsic::spv_resource_getpointer: {
3451 return selectResourceGetPointer(ResVReg, ResType,
I);
3453 case Intrinsic::spv_discard: {
3454 return selectDiscard(ResVReg, ResType,
I);
3456 case Intrinsic::modf: {
3457 return selectModf(ResVReg, ResType,
I);
3460 std::string DiagMsg;
3461 raw_string_ostream OS(DiagMsg);
3463 DiagMsg =
"Intrinsic selection not implemented: " + DiagMsg;
3470bool SPIRVInstructionSelector::selectHandleFromBinding(
Register &ResVReg,
3472 MachineInstr &
I)
const {
3475 if (ResType->
getOpcode() == SPIRV::OpTypeImage)
3482bool SPIRVInstructionSelector::selectReadImageIntrinsic(
3491 Register ImageReg =
I.getOperand(2).getReg();
3493 Register NewImageReg =
MRI->createVirtualRegister(
MRI->getRegClass(ImageReg));
3499 Register IdxReg =
I.getOperand(3).getReg();
3501 MachineInstr &Pos =
I;
3503 return generateImageRead(ResVReg, ResType, NewImageReg, IdxReg, Loc, Pos);
3506bool SPIRVInstructionSelector::generateImageRead(
Register &ResVReg,
3510 MachineInstr &Pos)
const {
3513 "ImageReg is not an image type.");
3514 bool IsSignedInteger =
3518 if (ResultSize == 4) {
3525 if (IsSignedInteger)
3530 SPIRVType *ReadType = widenTypeToVec4(ResType, Pos);
3537 if (IsSignedInteger)
3543 if (ResultSize == 1) {
3545 TII.get(SPIRV::OpCompositeExtract))
3552 return extractSubvector(ResVReg, ResType, ReadReg, Pos);
3555bool SPIRVInstructionSelector::selectResourceGetPointer(
3557 Register ResourcePtr =
I.getOperand(2).getReg();
3559 if (RegType->
getOpcode() == SPIRV::OpTypeImage) {
3568 MachineIRBuilder MIRBuilder(
I);
3570 Register IndexReg =
I.getOperand(3).getReg();
3573 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3574 TII.get(SPIRV::OpAccessChain))
3583bool SPIRVInstructionSelector::extractSubvector(
3585 MachineInstr &InsertionPoint)
const {
3587 [[maybe_unused]] uint64_t InputSize =
3590 assert(InputSize > 1 &&
"The input must be a vector.");
3591 assert(ResultSize > 1 &&
"The result must be a vector.");
3592 assert(ResultSize < InputSize &&
3593 "Cannot extract more element than there are in the input.");
3596 const TargetRegisterClass *ScalarRegClass = GR.
getRegClass(ScalarType);
3597 for (uint64_t
I = 0;
I < ResultSize;
I++) {
3598 Register ComponentReg =
MRI->createVirtualRegister(ScalarRegClass);
3601 TII.get(SPIRV::OpCompositeExtract))
3612 MachineInstrBuilder MIB =
BuildMI(*InsertionPoint.
getParent(), InsertionPoint,
3614 TII.get(SPIRV::OpCompositeConstruct))
3618 for (
Register ComponentReg : ComponentRegisters)
3619 MIB.
addUse(ComponentReg);
3623bool SPIRVInstructionSelector::selectImageWriteIntrinsic(
3624 MachineInstr &
I)
const {
3631 Register ImageReg =
I.getOperand(1).getReg();
3633 Register NewImageReg =
MRI->createVirtualRegister(
MRI->getRegClass(ImageReg));
3639 Register CoordinateReg =
I.getOperand(2).getReg();
3640 Register DataReg =
I.getOperand(3).getReg();
3643 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3644 TII.get(SPIRV::OpImageWrite))
3651Register SPIRVInstructionSelector::buildPointerToResource(
3652 const SPIRVType *SpirvResType, SPIRV::StorageClass::StorageClass SC,
3653 uint32_t Set, uint32_t
Binding, uint32_t ArraySize,
Register IndexReg,
3654 bool IsNonUniform, StringRef Name, MachineIRBuilder MIRBuilder)
const {
3656 if (ArraySize == 1) {
3660 "SpirvResType did not have an explicit layout.");
3665 const Type *VarType = ArrayType::get(
const_cast<Type *
>(ResType), ArraySize);
3669 VarPointerType, Set,
Binding, Name, MIRBuilder);
3678 buildOpDecorate(IndexReg, MIRBuilder, SPIRV::Decoration::NonUniformEXT, {});
3679 buildOpDecorate(AcReg, MIRBuilder, SPIRV::Decoration::NonUniformEXT, {});
3691bool SPIRVInstructionSelector::selectFirstBitSet16(
3693 unsigned ExtendOpcode,
unsigned BitSetOpcode)
const {
3695 bool Result = selectOpWithSrcs(ExtReg, ResType,
I, {
I.getOperand(2).getReg()},
3699 selectFirstBitSet32(ResVReg, ResType,
I, ExtReg, BitSetOpcode);
3702bool SPIRVInstructionSelector::selectFirstBitSet32(
3704 Register SrcReg,
unsigned BitSetOpcode)
const {
3705 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
3708 .
addImm(
static_cast<uint32_t
>(SPIRV::InstructionSet::GLSL_std_450))
3714bool SPIRVInstructionSelector::selectFirstBitSet64Overflow(
3716 Register SrcReg,
unsigned BitSetOpcode,
bool SwapPrimarySide)
const {
3723 assert(ComponentCount < 5 &&
"Vec 5+ will generate invalid SPIR-V ops");
3725 MachineIRBuilder MIRBuilder(
I);
3733 std::vector<Register> PartialRegs;
3736 unsigned CurrentComponent = 0;
3737 for (; CurrentComponent + 1 < ComponentCount; CurrentComponent += 2) {
3743 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3744 TII.get(SPIRV::OpVectorShuffle))
3749 .
addImm(CurrentComponent)
3750 .
addImm(CurrentComponent + 1);
3758 if (!selectFirstBitSet64(SubVecBitSetReg, Vec2ResType,
I, BitSetResult,
3759 BitSetOpcode, SwapPrimarySide))
3762 PartialRegs.push_back(SubVecBitSetReg);
3766 if (CurrentComponent != ComponentCount) {
3772 if (!selectOpWithSrcs(FinalElemReg, I64Type,
I, {SrcReg, ConstIntLastIdx},
3773 SPIRV::OpVectorExtractDynamic))
3779 if (!selectFirstBitSet64(FinalElemBitSetReg,
BaseType,
I, FinalElemReg,
3780 BitSetOpcode, SwapPrimarySide))
3783 PartialRegs.push_back(FinalElemBitSetReg);
3788 return selectOpWithSrcs(ResVReg, ResType,
I, std::move(PartialRegs),
3789 SPIRV::OpCompositeConstruct);
3792bool SPIRVInstructionSelector::selectFirstBitSet64(
3794 Register SrcReg,
unsigned BitSetOpcode,
bool SwapPrimarySide)
const {
3807 if (ComponentCount > 2) {
3808 return selectFirstBitSet64Overflow(ResVReg, ResType,
I, SrcReg,
3809 BitSetOpcode, SwapPrimarySide);
3813 MachineIRBuilder MIRBuilder(
I);
3815 BaseType, 2 * ComponentCount, MIRBuilder,
false);
3819 if (!selectOpWithSrcs(BitcastReg, PostCastType,
I, {SrcReg},
3825 if (!selectFirstBitSet32(FBSReg, PostCastType,
I, BitcastReg, BitSetOpcode))
3832 bool IsScalarRes = ResType->
getOpcode() != SPIRV::OpTypeVector;
3835 if (!selectOpWithSrcs(HighReg, ResType,
I, {FBSReg, ConstIntZero},
3836 SPIRV::OpVectorExtractDynamic))
3838 if (!selectOpWithSrcs(LowReg, ResType,
I, {FBSReg, ConstIntOne},
3839 SPIRV::OpVectorExtractDynamic))
3843 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3844 TII.get(SPIRV::OpVectorShuffle))
3852 for (
unsigned J = 0; J < ComponentCount * 2; J += 2) {
3859 MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3860 TII.get(SPIRV::OpVectorShuffle))
3868 for (
unsigned J = 1; J < ComponentCount * 2; J += 2) {
3889 SelectOp = SPIRV::OpSelectSISCond;
3890 AddOp = SPIRV::OpIAddS;
3898 SelectOp = SPIRV::OpSelectVIVCond;
3899 AddOp = SPIRV::OpIAddV;
3909 if (SwapPrimarySide) {
3910 PrimaryReg = LowReg;
3911 SecondaryReg = HighReg;
3912 PrimaryShiftReg = Reg0;
3913 SecondaryShiftReg = Reg32;
3918 if (!selectOpWithSrcs(BReg, BoolType,
I, {PrimaryReg, NegOneReg},
3924 if (!selectOpWithSrcs(TmpReg, ResType,
I, {
BReg, SecondaryReg, PrimaryReg},
3930 if (!selectOpWithSrcs(ValReg, ResType,
I,
3931 {
BReg, SecondaryShiftReg, PrimaryShiftReg}, SelectOp))
3934 return selectOpWithSrcs(ResVReg, ResType,
I, {ValReg, TmpReg}, AddOp);
3937bool SPIRVInstructionSelector::selectFirstBitHigh(
Register ResVReg,
3940 bool IsSigned)
const {
3942 Register OpReg =
I.getOperand(2).getReg();
3945 unsigned ExtendOpcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
3946 unsigned BitSetOpcode = IsSigned ? GL::FindSMsb : GL::FindUMsb;
3950 return selectFirstBitSet16(ResVReg, ResType,
I, ExtendOpcode, BitSetOpcode);
3952 return selectFirstBitSet32(ResVReg, ResType,
I, OpReg, BitSetOpcode);
3954 return selectFirstBitSet64(ResVReg, ResType,
I, OpReg, BitSetOpcode,
3958 "spv_firstbituhigh and spv_firstbitshigh only support 16,32,64 bits.");
3962bool SPIRVInstructionSelector::selectFirstBitLow(
Register ResVReg,
3964 MachineInstr &
I)
const {
3966 Register OpReg =
I.getOperand(2).getReg();
3971 unsigned ExtendOpcode = SPIRV::OpUConvert;
3972 unsigned BitSetOpcode = GL::FindILsb;
3976 return selectFirstBitSet16(ResVReg, ResType,
I, ExtendOpcode, BitSetOpcode);
3978 return selectFirstBitSet32(ResVReg, ResType,
I, OpReg, BitSetOpcode);
3980 return selectFirstBitSet64(ResVReg, ResType,
I, OpReg, BitSetOpcode,
3987bool SPIRVInstructionSelector::selectAllocaArray(
Register ResVReg,
3989 MachineInstr &
I)
const {
3993 bool Res =
BuildMI(BB,
I,
I.getDebugLoc(),
3994 TII.get(SPIRV::OpVariableLengthArrayINTEL))
3997 .
addUse(
I.getOperand(2).getReg())
4000 unsigned Alignment =
I.getOperand(3).getImm();
4006bool SPIRVInstructionSelector::selectFrameIndex(
Register ResVReg,
4008 MachineInstr &
I)
const {
4012 bool Res =
BuildMI(*It->getParent(), It, It->getDebugLoc(),
4013 TII.get(SPIRV::OpVariable))
4016 .
addImm(
static_cast<uint32_t
>(SPIRV::StorageClass::Function))
4019 unsigned Alignment =
I.getOperand(2).getImm();
4026bool SPIRVInstructionSelector::selectBranch(MachineInstr &
I)
const {
4031 const MachineInstr *PrevI =
I.getPrevNode();
4033 if (PrevI !=
nullptr && PrevI->
getOpcode() == TargetOpcode::G_BRCOND) {
4034 return BuildMI(
MBB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBranchConditional))
4037 .
addMBB(
I.getOperand(0).getMBB())
4041 .
addMBB(
I.getOperand(0).getMBB())
4045bool SPIRVInstructionSelector::selectBranchCond(MachineInstr &
I)
const {
4056 const MachineInstr *NextI =
I.getNextNode();
4058 if (NextI !=
nullptr && NextI->
getOpcode() == SPIRV::OpBranchConditional)
4064 MachineBasicBlock *NextMBB =
I.getMF()->getBlockNumbered(NextMBBNum);
4065 return BuildMI(
MBB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBranchConditional))
4066 .
addUse(
I.getOperand(0).getReg())
4067 .
addMBB(
I.getOperand(1).getMBB())
4072bool SPIRVInstructionSelector::selectPhi(
Register ResVReg,
4074 MachineInstr &
I)
const {
4075 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpPhi))
4078 const unsigned NumOps =
I.getNumOperands();
4079 for (
unsigned i = 1; i <
NumOps; i += 2) {
4080 MIB.
addUse(
I.getOperand(i + 0).getReg());
4081 MIB.
addMBB(
I.getOperand(i + 1).getMBB());
4089bool SPIRVInstructionSelector::selectGlobalValue(
4090 Register ResVReg, MachineInstr &
I,
const MachineInstr *Init)
const {
4092 MachineIRBuilder MIRBuilder(
I);
4093 const GlobalValue *GV =
I.
getOperand(1).getGlobal();
4096 std::string GlobalIdent;
4098 unsigned &
ID = UnnamedGlobalIDs[GV];
4100 ID = UnnamedGlobalIDs.size();
4101 GlobalIdent =
"__unnamed_" + Twine(
ID).str();
4128 GVFun ? SPIRV::StorageClass::CodeSectionINTEL
4135 MachineRegisterInfo *
MRI = MIRBuilder.
getMRI();
4138 MRI->setRegClass(FuncVReg, &SPIRV::pIDRegClass);
4139 MachineInstrBuilder MIB1 =
4140 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUndef))
4143 MachineInstrBuilder MIB2 =
4145 TII.get(SPIRV::OpConstantFunctionPointerINTEL))
4149 GR.
add(ConstVal, MIB2);
4155 MachineInstrBuilder MIB3 =
4156 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantNull))
4159 GR.
add(ConstVal, MIB3);
4162 assert(NewReg != ResVReg);
4163 return BuildCOPY(ResVReg, NewReg,
I);
4175 SPIRV::LinkageType::LinkageType LnkType =
4177 ? SPIRV::LinkageType::Import
4180 ? SPIRV::LinkageType::LinkOnceODR
4181 : SPIRV::LinkageType::Export);
4189 GlobalVar->isConstant(), HasLnkTy, LnkType, MIRBuilder,
true);
4193bool SPIRVInstructionSelector::selectLog10(
Register ResVReg,
4195 MachineInstr &
I)
const {
4197 return selectExtInst(ResVReg, ResType,
I, CL::log10);
4205 MachineIRBuilder MIRBuilder(
I);
4211 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
4214 .
addImm(
static_cast<uint32_t
>(SPIRV::InstructionSet::GLSL_std_450))
4216 .
add(
I.getOperand(1))
4221 ResType->
getOpcode() == SPIRV::OpTypeFloat);
4224 ResType->
getOpcode() == SPIRV::OpTypeVector
4231 auto Opcode = ResType->
getOpcode() == SPIRV::OpTypeVector
4232 ? SPIRV::OpVectorTimesScalar
4242bool SPIRVInstructionSelector::selectModf(
Register ResVReg,
4244 MachineInstr &
I)
const {
4260 MachineIRBuilder MIRBuilder(
I);
4263 ResType, MIRBuilder, SPIRV::StorageClass::Function);
4274 MachineBasicBlock &EntryBB =
I.getMF()->front();
4278 BuildMI(EntryBB, VarPos,
I.getDebugLoc(),
TII.get(SPIRV::OpVariable))
4281 .
addImm(
static_cast<uint32_t
>(SPIRV::StorageClass::Function));
4285 assert(
I.getNumOperands() == 4 &&
4286 "Expected 4 operands for modf instruction");
4290 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
4293 .
addImm(
static_cast<uint32_t
>(SPIRV::InstructionSet::OpenCL_std))
4296 .
add(
I.getOperand(3))
4300 Register IntegralPartReg =
I.getOperand(1).getReg();
4301 if (IntegralPartReg.
isValid()) {
4303 auto LoadMIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
4312 assert(
false &&
"GLSL::Modf is deprecated.");
4323bool SPIRVInstructionSelector::loadVec3BuiltinInputID(
4324 SPIRV::BuiltIn::BuiltIn BuiltInValue,
Register ResVReg,
4325 const SPIRVType *ResType, MachineInstr &
I)
const {
4326 MachineIRBuilder MIRBuilder(
I);
4330 Vec3Ty, MIRBuilder, SPIRV::StorageClass::Input);
4342 SPIRV::StorageClass::Input,
nullptr,
true,
false,
4343 SPIRV::LinkageType::Import, MIRBuilder,
false);
4346 MachineRegisterInfo *
MRI = MIRBuilder.
getMRI();
4347 Register LoadedRegister =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
4353 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
4360 assert(
I.getOperand(2).isReg());
4361 const uint32_t ThreadId =
foldImm(
I.getOperand(2),
MRI);
4365 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
4375bool SPIRVInstructionSelector::loadBuiltinInputID(
4376 SPIRV::BuiltIn::BuiltIn BuiltInValue,
Register ResVReg,
4377 const SPIRVType *ResType, MachineInstr &
I)
const {
4378 MachineIRBuilder MIRBuilder(
I);
4380 ResType, MIRBuilder, SPIRV::StorageClass::Input);
4395 SPIRV::StorageClass::Input,
nullptr,
true,
false,
4396 SPIRV::LinkageType::Import, MIRBuilder,
false);
4399 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
4408 MachineInstr &
I)
const {
4409 MachineIRBuilder MIRBuilder(
I);
4410 if (
Type->getOpcode() != SPIRV::OpTypeVector)
4414 if (VectorSize == 4)
4422bool SPIRVInstructionSelector::loadHandleBeforePosition(
4424 MachineInstr &Pos)
const {
4427 Intrinsic::spv_resource_handlefrombinding);
4434 bool IsNonUniform =
false;
4438 bool IsStructuredBuffer = ResType->
getOpcode() == SPIRV::OpTypePointer;
4439 MachineIRBuilder MIRBuilder(HandleDef);
4441 SPIRV::StorageClass::StorageClass SC = SPIRV::StorageClass::UniformConstant;
4443 if (IsStructuredBuffer) {
4449 buildPointerToResource(VarType, SC, Set,
Binding, ArraySize, IndexReg,
4450 IsNonUniform, Name, MIRBuilder);
4458 uint32_t LoadOpcode =
4459 IsStructuredBuffer ? SPIRV::OpCopyObject : SPIRV::OpLoad;
4462 TII.get(LoadOpcode))
4470InstructionSelector *
4474 return new SPIRVInstructionSelector(TM, Subtarget, RBI);
unsigned const MachineRegisterInfo * MRI
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file declares a class to represent arbitrary precision floating point values and provide a varie...
static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
DXIL Resource Implicit Binding
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
Register const TargetRegisterInfo * TRI
Promote Memory to Register
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
static StringRef getName(Value *V)
static unsigned getFCmpOpcode(CmpInst::Predicate Pred, unsigned Size)
static APFloat getOneFP(const Type *LLVMFloatTy)
static bool isUSMStorageClass(SPIRV::StorageClass::StorageClass SC)
static bool isASCastInGVar(MachineRegisterInfo *MRI, Register ResVReg)
static bool mayApplyGenericSelection(unsigned Opcode)
static APFloat getZeroFP(const Type *LLVMFloatTy)
std::vector< std::pair< SPIRV::InstructionSet::InstructionSet, uint32_t > > ExtInstList
static unsigned getBoolCmpOpcode(unsigned PredNum)
static unsigned getICmpOpcode(unsigned PredNum)
static void addMemoryOperands(MachineMemOperand *MemOp, MachineInstrBuilder &MIB, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry &GR)
static bool isConstReg(MachineRegisterInfo *MRI, MachineInstr *OpDef, SmallPtrSet< SPIRVType *, 4 > &Visited)
static unsigned getPtrCmpOpcode(unsigned Pred)
bool isDead(const MachineInstr &MI, const MachineRegisterInfo &MRI)
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static APFloat getOne(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative One.
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
uint64_t getZExtValue() const
Get zero extended value.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ULT
1 1 0 0 True if unordered or less than
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ ICMP_ULT
unsigned less than
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
@ ICMP_SGE
signed greater or equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Represents a call to an intrinsic.
Intrinsic::ID getIntrinsicID() const
bool hasPrivateLinkage() const
bool hasHiddenVisibility() const
bool isDeclarationForLinker() const
unsigned getAddressSpace() const
Module * getParent()
Get the module that this global value is contained inside of...
bool hasInternalLinkage() const
bool hasLinkOnceODRLinkage() const
@ InternalLinkage
Rename collisions when linking (static functions).
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
constexpr bool isScalar() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isVector() const
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
constexpr bool isPointer() const
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
LLVM_ABI iterator getFirstNonPHI()
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
MachineInstrBundleIterator< MachineInstr > iterator
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Helper class to build MachineInstr.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineFunction & getMF()
Getter for the function we currently build.
MachineRegisterInfo * getMRI()
Getter for MRI.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
bool constrainAllUses(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineBasicBlock * getParent() const
LLVM_ABI void setDesc(const MCInstrDesc &TID)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one.
LLVM_ABI unsigned getNumExplicitDefs() const
Returns the number of non-implicit definitions.
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
LLVM_ABI void removeOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with.
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
@ MOVolatile
The memory access is volatile.
@ MONonTemporal
The memory access is non-temporal.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
defusechain_instr_iterator< true, false, false, true > use_instr_iterator
use_instr_iterator/use_instr_begin/use_instr_end - Walk all uses of the specified register,...
defusechain_instr_iterator< false, true, false, true > def_instr_iterator
def_instr_iterator/def_instr_begin/def_instr_end - Walk all defs of the specified register,...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
LLVM_ABI void setType(Register VReg, LLT Ty)
Set the low-level type of VReg to Ty.
Analysis providing profile information.
Holds all the information related to register banks.
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
SPIRVType * getSPIRVTypeForVReg(Register VReg, const MachineFunction *MF=nullptr) const
Register getOrCreateConstInt(uint64_t Val, MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII, bool ZeroAsNull=true)
SPIRVType * getResultType(Register VReg, MachineFunction *MF=nullptr)
SPIRVType * getOrCreateSPIRVBoolType(MachineIRBuilder &MIRBuilder, bool EmitIR)
MachineInstr * getOrAddMemAliasingINTELInst(MachineIRBuilder &MIRBuilder, const MDNode *AliasingListMD)
void assignSPIRVTypeToVReg(SPIRVType *Type, Register VReg, const MachineFunction &MF)
Register getOrCreateUndef(MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII)
SPIRVType * changePointerStorageClass(SPIRVType *PtrType, SPIRV::StorageClass::StorageClass SC, MachineInstr &I)
const Type * getTypeForSPIRVType(const SPIRVType *Ty) const
bool isBitcastCompatible(const SPIRVType *Type1, const SPIRVType *Type2) const
unsigned getScalarOrVectorComponentCount(Register VReg) const
bool isScalarOrVectorSigned(const SPIRVType *Type) const
Register getOrCreateGlobalVariableWithBinding(const SPIRVType *VarType, uint32_t Set, uint32_t Binding, StringRef Name, MachineIRBuilder &MIRBuilder)
SPIRVType * getOrCreateSPIRVType(const Type *Type, MachineInstr &I, SPIRV::AccessQualifier::AccessQualifier AQ, bool EmitIR)
unsigned getPointerSize() const
SPIRVType * getOrCreateSPIRVPointerType(const Type *BaseType, MachineIRBuilder &MIRBuilder, SPIRV::StorageClass::StorageClass SC)
Register buildConstantFP(APFloat Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType=nullptr)
SPIRVType * getPointeeType(SPIRVType *PtrType)
void invalidateMachineInstr(MachineInstr *MI)
Register getSPIRVTypeID(const SPIRVType *SpirvType) const
bool isScalarOfType(Register VReg, unsigned TypeOpcode) const
Register buildGlobalVariable(Register Reg, SPIRVType *BaseType, StringRef Name, const GlobalValue *GV, SPIRV::StorageClass::StorageClass Storage, const MachineInstr *Init, bool IsConst, bool HasLinkageTy, SPIRV::LinkageType::LinkageType LinkageType, MachineIRBuilder &MIRBuilder, bool IsInstSelector)
bool findValueAttrs(const MachineInstr *Key, Type *&Ty, StringRef &Name)
void addGlobalObject(const Value *V, const MachineFunction *MF, Register R)
SPIRVType * getScalarOrVectorComponentType(Register VReg) const
void recordFunctionPointer(const MachineOperand *MO, const Function *F)
bool isAggregateType(SPIRVType *Type) const
const TargetRegisterClass * getRegClass(SPIRVType *SpvType) const
SPIRVType * getOrCreateSPIRVVectorType(SPIRVType *BaseType, unsigned NumElements, MachineIRBuilder &MIRBuilder, bool EmitIR)
bool isScalarOrVectorOfType(Register VReg, unsigned TypeOpcode) const
Register getOrCreateConstIntArray(uint64_t Val, size_t Num, MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII)
MachineFunction * setCurrentFunc(MachineFunction &MF)
Register getOrCreateConstVector(uint64_t Val, MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII, bool ZeroAsNull=true)
SPIRVType * getOrCreateSPIRVIntegerType(unsigned BitWidth, MachineIRBuilder &MIRBuilder)
Type * getDeducedGlobalValueType(const GlobalValue *Global)
LLT getRegType(SPIRVType *SpvType) const
SPIRV::StorageClass::StorageClass getPointerStorageClass(Register VReg) const
Register getOrCreateConstFP(APFloat Val, MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII, bool ZeroAsNull=true)
Register getOrCreateConstNullPtr(MachineIRBuilder &MIRBuilder, SPIRVType *SpvType)
unsigned getScalarOrVectorBitWidth(const SPIRVType *Type) const
const SPIRVType * retrieveScalarOrVectorIntType(const SPIRVType *Type) const
bool erase(const MachineInstr *MI)
bool add(SPIRV::IRHandle Handle, const MachineInstr *MI)
Register find(SPIRV::IRHandle Handle, const MachineFunction *MF)
bool isPhysicalSPIRV() const
bool isAtLeastSPIRVVer(VersionTuple VerToCompareTo) const
bool canUseExtInstSet(SPIRV::InstructionSet::InstructionSet E) const
bool isLogicalSPIRV() const
bool canUseExtension(SPIRV::Extension::Extension E) const
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
constexpr size_t size() const
size - Get the string size.
static LLVM_ABI StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
The instances of the Type class are immutable: once they are created, they are never changed.
@ HalfTyID
16-bit floating point type
@ FloatTyID
32-bit floating point type
@ DoubleTyID
64-bit floating point type
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
bool isStructTy() const
True if this is an instance of StructType.
TypeID getTypeID() const
Return the type id for the type.
Value * getOperand(unsigned i) const
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char IsConst[]
Key for Kernel::Arg::Metadata::mIsConst.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Scope
Defines the scope in which this symbol should be visible: Default – Visible in the public interface o...
NodeAddr< DefNode * > Def
NodeAddr< InstrNode * > Instr
This is an optimization pass for GlobalISel generic memory operations.
void buildOpName(Register Target, const StringRef &Name, MachineIRBuilder &MIRBuilder)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
bool isTypeFoldingSupported(unsigned Opcode)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB)
LLVM_ABI void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
LLVM_ABI bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
unsigned getArrayComponentCount(const MachineRegisterInfo *MRI, const MachineInstr *ResType)
uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI)
SmallVector< MachineInstr *, 4 > createContinuedInstructions(MachineIRBuilder &MIRBuilder, unsigned Opcode, unsigned MinWC, unsigned ContinuedOpcode, ArrayRef< Register > Args, Register ReturnRegister, Register TypeID)
SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC)
constexpr unsigned storageClassToAddressSpace(SPIRV::StorageClass::StorageClass SC)
MachineBasicBlock::iterator getFirstValidInstructionInsertPoint(MachineBasicBlock &BB)
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
MachineBasicBlock::iterator getOpVariableMBBIt(MachineInstr &I)
Register createVirtualRegister(SPIRVType *SpvType, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI, const MachineFunction &MF)
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
Type * toTypedPointer(Type *Ty)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
const MachineInstr SPIRVType
constexpr bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC)
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
MachineInstr * passCopy(MachineInstr *Def, const MachineRegisterInfo *MRI)
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
SPIRV::StorageClass::StorageClass addressSpaceToStorageClass(unsigned AddrSpace, const SPIRVSubtarget &STI)
AtomicOrdering
Atomic ordering for LLVM's memory model.
SPIRV::Scope::Scope getMemScope(LLVMContext &Ctx, SyncScope::ID Id)
InstructionSelector * createSPIRVInstructionSelector(const SPIRVTargetMachine &TM, const SPIRVSubtarget &Subtarget, const RegisterBankInfo &RBI)
std::string getStringValueFromReg(Register Reg, MachineRegisterInfo &MRI)
int64_t foldImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
DWARFExpression::Operation Op
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
bool hasInitializer(const GlobalVariable *GV)
MachineInstr * getVRegDef(MachineRegisterInfo &MRI, Register Reg)
SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord)
std::string getLinkStringForBuiltIn(SPIRV::BuiltIn::BuiltIn BuiltInValue)
LLVM_ABI bool isTriviallyDead(const MachineInstr &MI, const MachineRegisterInfo &MRI)
Check whether an instruction MI is dead: it only defines dead virtual registers, and doesn't have oth...
static LLVM_ABI const fltSemantics & IEEEsingle() LLVM_READNONE
static LLVM_ABI const fltSemantics & IEEEdouble() LLVM_READNONE
static LLVM_ABI const fltSemantics & IEEEhalf() LLVM_READNONE