41#define DEBUG_TYPE "gisel-known-bits"
49 "Analysis for ComputingKnownBits",
false,
true)
52 : MF(MF),
MRI(MF.getRegInfo()), TL(*MF.getSubtarget().getTargetLowering()),
57 switch (
MI->getOpcode()) {
58 case TargetOpcode::COPY:
60 case TargetOpcode::G_ASSERT_ALIGN: {
62 return Align(
MI->getOperand(2).getImm());
64 case TargetOpcode::G_FRAME_INDEX: {
65 int FrameIdx =
MI->getOperand(1).getIndex();
66 return MF.getFrameInfo().getObjectAlign(FrameIdx);
68 case TargetOpcode::G_INTRINSIC:
69 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
70 case TargetOpcode::G_INTRINSIC_CONVERGENT:
71 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
73 return TL.computeKnownAlignForTargetInstr(*
this, R, MRI,
Depth + 1);
78 assert(
MI.getNumExplicitDefs() == 1 &&
79 "expected single return generic instruction");
84 const LLT Ty = MRI.getType(R);
94 const APInt &DemandedElts,
102 LLT Ty = MRI.getType(R);
103 unsigned BitWidth = Ty.getScalarSizeInBits();
118 <<
"] Computed for: " <<
MI <<
"[" <<
Depth <<
"] Known: 0x"
129 const APInt &DemandedElts,
160 const APInt &DemandedElts,
163 unsigned Opcode =
MI.getOpcode();
164 LLT DstTy = MRI.getType(R);
178 "DemandedElt width should equal the fixed vector number of elements");
181 "DemandedElt width should be 1 for scalars or scalable vectors");
206 TL.computeKnownBitsForTargetInstr(*
this, R, Known, DemandedElts, MRI,
209 case TargetOpcode::G_BUILD_VECTOR: {
214 if (!DemandedElts[
I])
228 case TargetOpcode::G_SPLAT_VECTOR: {
236 case TargetOpcode::COPY:
237 case TargetOpcode::G_PHI:
238 case TargetOpcode::PHI: {
244 assert(
MI.getOperand(0).getSubReg() == 0 &&
"Is this code in SSA?");
247 for (
unsigned Idx = 1; Idx <
MI.getNumOperands(); Idx += 2) {
257 if (SrcReg.
isVirtual() && Src.getSubReg() == 0 &&
258 MRI.getType(SrcReg).isValid()) {
261 Depth + (Opcode != TargetOpcode::COPY));
276 case TargetOpcode::G_CONSTANT: {
280 case TargetOpcode::G_FRAME_INDEX: {
281 int FrameIdx =
MI.getOperand(1).getIndex();
282 TL.computeKnownBitsForFrameIndex(FrameIdx, Known, MF);
285 case TargetOpcode::G_SUB: {
293 case TargetOpcode::G_XOR: {
302 case TargetOpcode::G_PTR_ADD: {
306 LLT Ty = MRI.getType(
MI.getOperand(1).getReg());
307 if (DL.isNonIntegralAddressSpace(Ty.getAddressSpace()))
311 case TargetOpcode::G_ADD: {
319 case TargetOpcode::G_AND: {
329 case TargetOpcode::G_OR: {
339 case TargetOpcode::G_MUL: {
347 case TargetOpcode::G_SELECT: {
348 computeKnownBitsMin(
MI.getOperand(2).getReg(),
MI.getOperand(3).getReg(),
349 Known, DemandedElts,
Depth + 1);
352 case TargetOpcode::G_SMIN: {
362 case TargetOpcode::G_SMAX: {
372 case TargetOpcode::G_UMIN: {
381 case TargetOpcode::G_UMAX: {
390 case TargetOpcode::G_FCMP:
391 case TargetOpcode::G_ICMP: {
394 if (TL.getBooleanContents(DstTy.
isVector(),
395 Opcode == TargetOpcode::G_FCMP) ==
401 case TargetOpcode::G_SEXT: {
409 case TargetOpcode::G_ASSERT_SEXT:
410 case TargetOpcode::G_SEXT_INREG: {
413 Known = Known.
sextInReg(
MI.getOperand(2).getImm());
416 case TargetOpcode::G_ANYEXT: {
422 case TargetOpcode::G_LOAD: {
430 case TargetOpcode::G_SEXTLOAD:
431 case TargetOpcode::G_ZEXTLOAD: {
438 Known = Opcode == TargetOpcode::G_SEXTLOAD
443 case TargetOpcode::G_ASHR: {
452 case TargetOpcode::G_LSHR: {
461 case TargetOpcode::G_SHL: {
470 case TargetOpcode::G_INTTOPTR:
471 case TargetOpcode::G_PTRTOINT:
476 case TargetOpcode::G_ZEXT:
477 case TargetOpcode::G_TRUNC: {
483 case TargetOpcode::G_ASSERT_ZEXT: {
487 unsigned SrcBitWidth =
MI.getOperand(2).getImm();
488 assert(SrcBitWidth &&
"SrcBitWidth can't be zero");
490 Known.
Zero |= (~InMask);
491 Known.
One &= (~Known.Zero);
494 case TargetOpcode::G_ASSERT_ALIGN: {
495 int64_t LogOfAlign =
Log2_64(
MI.getOperand(2).getImm());
504 case TargetOpcode::G_MERGE_VALUES: {
505 unsigned NumOps =
MI.getNumOperands();
506 unsigned OpSize = MRI.getType(
MI.getOperand(1).getReg()).getSizeInBits();
508 for (
unsigned I = 0;
I !=
NumOps - 1; ++
I) {
511 DemandedElts,
Depth + 1);
516 case TargetOpcode::G_UNMERGE_VALUES: {
517 unsigned NumOps =
MI.getNumOperands();
519 LLT SrcTy = MRI.getType(SrcReg);
521 if (SrcTy.isVector() && SrcTy.getScalarType() != DstTy.
getScalarType())
526 for (; DstIdx !=
NumOps - 1 &&
MI.getOperand(DstIdx).
getReg() != R;
530 APInt SubDemandedElts = DemandedElts;
531 if (SrcTy.isVector()) {
534 DemandedElts.
zext(SrcTy.getNumElements()).
shl(DstIdx * DstLanes);
540 if (SrcTy.isVector())
541 Known = std::move(SrcOpKnown);
546 case TargetOpcode::G_BSWAP: {
552 case TargetOpcode::G_BITREVERSE: {
558 case TargetOpcode::G_CTPOP: {
570 case TargetOpcode::G_UBFX: {
571 KnownBits SrcOpKnown, OffsetKnown, WidthKnown;
581 case TargetOpcode::G_SBFX: {
582 KnownBits SrcOpKnown, OffsetKnown, WidthKnown;
597 case TargetOpcode::G_UADDO:
598 case TargetOpcode::G_UADDE:
599 case TargetOpcode::G_SADDO:
600 case TargetOpcode::G_SADDE:
601 case TargetOpcode::G_USUBO:
602 case TargetOpcode::G_USUBE:
603 case TargetOpcode::G_SSUBO:
604 case TargetOpcode::G_SSUBE:
605 case TargetOpcode::G_UMULO:
606 case TargetOpcode::G_SMULO: {
607 if (
MI.getOperand(1).getReg() == R) {
610 if (TL.getBooleanContents(DstTy.
isVector(),
false) ==
617 case TargetOpcode::G_CTLZ:
618 case TargetOpcode::G_CTLZ_ZERO_UNDEF: {
628 case TargetOpcode::G_SHUFFLE_VECTOR: {
629 APInt DemandedLHS, DemandedRHS;
632 unsigned NumElts = MRI.getType(
MI.getOperand(1).getReg()).getNumElements();
634 DemandedElts, DemandedLHS, DemandedRHS))
655 case TargetOpcode::G_CONCAT_VECTORS: {
656 if (MRI.getType(
MI.getOperand(0).getReg()).isScalableVector())
661 unsigned NumSubVectorElts =
662 MRI.getType(
MI.getOperand(1).getReg()).getNumElements();
666 DemandedElts.
extractBits(NumSubVectorElts,
I * NumSubVectorElts);
678 case TargetOpcode::G_ABS: {
692 Ty = Ty.getScalarType();
701 LLT Ty = MRI.getType(R);
704 computeKnownFPClass(R, DemandedElts, InterestedClasses, Known,
Depth);
707void GISelValueTracking::computeKnownFPClassForFPTrunc(
715 KnownFPClass KnownSrc;
716 computeKnownFPClass(Val, DemandedElts, InterestedClasses, KnownSrc,
729void GISelValueTracking::computeKnownFPClass(
Register R,
730 const APInt &DemandedElts,
734 assert(Known.
isUnknown() &&
"should not be called with known information");
744 MachineInstr &
MI = *MRI.getVRegDef(R);
745 unsigned Opcode =
MI.getOpcode();
746 LLT DstTy = MRI.getType(R);
754 switch (Cst->getKind()) {
756 auto APF = Cst->getScalarValue();
758 Known.
SignBit = APF.isNegative();
763 bool SignBitAllZero =
true;
764 bool SignBitAllOne =
true;
766 for (
auto C : *Cst) {
769 SignBitAllZero =
false;
771 SignBitAllOne =
false;
774 if (SignBitAllOne != SignBitAllZero)
790 KnownNotFromFlags |=
fcNan;
792 KnownNotFromFlags |=
fcInf;
796 InterestedClasses &= ~KnownNotFromFlags;
798 auto ClearClassesFromFlags =
805 const MachineFunction *MF =
MI.getMF();
809 TL.computeKnownFPClassForTargetInstr(*
this, R, Known, DemandedElts, MRI,
812 case TargetOpcode::G_FNEG: {
814 computeKnownFPClass(Val, DemandedElts, InterestedClasses, Known,
Depth + 1);
818 case TargetOpcode::G_SELECT: {
841 bool LookThroughFAbsFNeg = CmpLHS !=
LHS && CmpLHS !=
RHS;
842 std::tie(TestedValue, MaskIfTrue, MaskIfFalse) =
848 MaskIfTrue = TestedMask;
849 MaskIfFalse = ~TestedMask;
852 if (TestedValue ==
LHS) {
854 FilterLHS = MaskIfTrue;
855 }
else if (TestedValue ==
RHS) {
857 FilterRHS = MaskIfFalse;
861 computeKnownFPClass(
LHS, DemandedElts, InterestedClasses & FilterLHS, Known,
865 computeKnownFPClass(
RHS, DemandedElts, InterestedClasses & FilterRHS,
872 case TargetOpcode::G_FCOPYSIGN: {
873 Register Magnitude =
MI.getOperand(1).getReg();
876 KnownFPClass KnownSign;
878 computeKnownFPClass(Magnitude, DemandedElts, InterestedClasses, Known,
880 computeKnownFPClass(Sign, DemandedElts, InterestedClasses, KnownSign,
885 case TargetOpcode::G_FMA:
886 case TargetOpcode::G_STRICT_FMA:
887 case TargetOpcode::G_FMAD: {
902 KnownFPClass KnownAddend;
903 computeKnownFPClass(
C, DemandedElts, InterestedClasses, KnownAddend,
910 case TargetOpcode::G_FSQRT:
911 case TargetOpcode::G_STRICT_FSQRT: {
912 KnownFPClass KnownSrc;
914 if (InterestedClasses &
fcNan)
919 computeKnownFPClass(Val, DemandedElts, InterestedSrcs, KnownSrc,
Depth + 1);
934 case TargetOpcode::G_FABS: {
939 computeKnownFPClass(Val, DemandedElts, InterestedClasses, Known,
945 case TargetOpcode::G_FSIN:
946 case TargetOpcode::G_FCOS:
947 case TargetOpcode::G_FSINCOS: {
950 KnownFPClass KnownSrc;
952 computeKnownFPClass(Val, DemandedElts, InterestedClasses, KnownSrc,
960 case TargetOpcode::G_FMAXNUM:
961 case TargetOpcode::G_FMINNUM:
962 case TargetOpcode::G_FMINNUM_IEEE:
963 case TargetOpcode::G_FMAXIMUM:
964 case TargetOpcode::G_FMINIMUM:
965 case TargetOpcode::G_FMAXNUM_IEEE:
966 case TargetOpcode::G_FMAXIMUMNUM:
967 case TargetOpcode::G_FMINIMUMNUM: {
970 KnownFPClass KnownLHS, KnownRHS;
972 computeKnownFPClass(
LHS, DemandedElts, InterestedClasses, KnownLHS,
974 computeKnownFPClass(
RHS, DemandedElts, InterestedClasses, KnownRHS,
978 Known = KnownLHS | KnownRHS;
981 if (NeverNaN && (Opcode == TargetOpcode::G_FMINNUM ||
982 Opcode == TargetOpcode::G_FMAXNUM ||
983 Opcode == TargetOpcode::G_FMINIMUMNUM ||
984 Opcode == TargetOpcode::G_FMAXIMUMNUM))
987 if (Opcode == TargetOpcode::G_FMAXNUM ||
988 Opcode == TargetOpcode::G_FMAXIMUMNUM ||
989 Opcode == TargetOpcode::G_FMAXNUM_IEEE) {
997 }
else if (Opcode == TargetOpcode::G_FMAXIMUM) {
1003 }
else if (Opcode == TargetOpcode::G_FMINNUM ||
1004 Opcode == TargetOpcode::G_FMINIMUMNUM ||
1005 Opcode == TargetOpcode::G_FMINNUM_IEEE) {
1013 }
else if (Opcode == TargetOpcode::G_FMINIMUM) {
1045 }
else if ((Opcode == TargetOpcode::G_FMAXIMUM ||
1046 Opcode == TargetOpcode::G_FMINIMUM) ||
1047 Opcode == TargetOpcode::G_FMAXIMUMNUM ||
1048 Opcode == TargetOpcode::G_FMINIMUMNUM ||
1049 Opcode == TargetOpcode::G_FMAXNUM_IEEE ||
1050 Opcode == TargetOpcode::G_FMINNUM_IEEE ||
1056 if ((Opcode == TargetOpcode::G_FMAXIMUM ||
1057 Opcode == TargetOpcode::G_FMAXNUM ||
1058 Opcode == TargetOpcode::G_FMAXIMUMNUM ||
1059 Opcode == TargetOpcode::G_FMAXNUM_IEEE) &&
1062 else if ((Opcode == TargetOpcode::G_FMINIMUM ||
1063 Opcode == TargetOpcode::G_FMINNUM ||
1064 Opcode == TargetOpcode::G_FMINIMUMNUM ||
1065 Opcode == TargetOpcode::G_FMINNUM_IEEE) &&
1072 case TargetOpcode::G_FCANONICALIZE: {
1074 KnownFPClass KnownSrc;
1075 computeKnownFPClass(Val, DemandedElts, InterestedClasses, KnownSrc,
1097 DenormalMode DenormMode = MF->getDenormalMode(FPType);
1116 case TargetOpcode::G_VECREDUCE_FMAX:
1117 case TargetOpcode::G_VECREDUCE_FMIN:
1118 case TargetOpcode::G_VECREDUCE_FMAXIMUM:
1119 case TargetOpcode::G_VECREDUCE_FMINIMUM: {
1125 computeKnownFPClass(Val,
MI.getFlags(), InterestedClasses,
Depth + 1);
1131 case TargetOpcode::G_TRUNC:
1132 case TargetOpcode::G_FFLOOR:
1133 case TargetOpcode::G_FCEIL:
1134 case TargetOpcode::G_FRINT:
1135 case TargetOpcode::G_FNEARBYINT:
1136 case TargetOpcode::G_INTRINSIC_FPTRUNC_ROUND:
1137 case TargetOpcode::G_INTRINSIC_ROUND: {
1139 KnownFPClass KnownSrc;
1145 computeKnownFPClass(Val, DemandedElts, InterestedSrcs, KnownSrc,
Depth + 1);
1162 case TargetOpcode::G_FEXP:
1163 case TargetOpcode::G_FEXP2:
1164 case TargetOpcode::G_FEXP10: {
1170 KnownFPClass KnownSrc;
1171 computeKnownFPClass(Val, DemandedElts, InterestedClasses, KnownSrc,
1180 case TargetOpcode::G_FLOG:
1181 case TargetOpcode::G_FLOG2:
1182 case TargetOpcode::G_FLOG10: {
1197 KnownFPClass KnownSrc;
1198 computeKnownFPClass(Val, DemandedElts, InterestedSrcs, KnownSrc,
Depth + 1);
1208 DenormalMode
Mode = MF->getDenormalMode(FltSem);
1215 case TargetOpcode::G_FPOWI: {
1220 LLT ExpTy = MRI.getType(Exp);
1222 Exp, ExpTy.
isVector() ? DemandedElts : APInt(1, 1),
Depth + 1);
1224 if (ExponentKnownBits.
Zero[0]) {
1238 KnownFPClass KnownSrc;
1239 computeKnownFPClass(Val, DemandedElts,
fcNegative, KnownSrc,
Depth + 1);
1244 case TargetOpcode::G_FLDEXP:
1245 case TargetOpcode::G_STRICT_FLDEXP: {
1247 KnownFPClass KnownSrc;
1248 computeKnownFPClass(Val, DemandedElts, InterestedClasses, KnownSrc,
1265 if ((InterestedClasses & ExpInfoMask) ==
fcNone)
1274 case TargetOpcode::G_INTRINSIC_ROUNDEVEN: {
1275 computeKnownFPClassForFPTrunc(
MI, DemandedElts, InterestedClasses, Known,
1279 case TargetOpcode::G_FADD:
1280 case TargetOpcode::G_STRICT_FADD:
1281 case TargetOpcode::G_FSUB:
1282 case TargetOpcode::G_STRICT_FSUB: {
1285 KnownFPClass KnownLHS, KnownRHS;
1287 (Opcode == TargetOpcode::G_FADD ||
1288 Opcode == TargetOpcode::G_STRICT_FADD) &&
1290 bool WantNaN = (InterestedClasses &
fcNan) !=
fcNone;
1293 if (!WantNaN && !WantNegative && !WantNegZero)
1299 if (InterestedClasses &
fcNan)
1300 InterestedSrcs |=
fcInf;
1301 computeKnownFPClass(
RHS, DemandedElts, InterestedSrcs, KnownRHS,
Depth + 1);
1306 (Opcode == TargetOpcode::G_FSUB ||
1307 Opcode == TargetOpcode::G_STRICT_FSUB)) {
1311 computeKnownFPClass(
LHS, DemandedElts, InterestedSrcs, KnownLHS,
1319 if (Opcode == Instruction::FAdd) {
1346 case TargetOpcode::G_FMUL:
1347 case TargetOpcode::G_STRICT_FMUL: {
1360 KnownFPClass KnownLHS, KnownRHS;
1361 computeKnownFPClass(
RHS, DemandedElts, NeedForNan, KnownRHS,
Depth + 1);
1365 computeKnownFPClass(
LHS, DemandedElts, NeedForNan, KnownLHS,
Depth + 1);
1392 case TargetOpcode::G_FDIV:
1393 case TargetOpcode::G_FREM: {
1399 if (Opcode == TargetOpcode::G_FDIV) {
1410 const bool WantNan = (InterestedClasses &
fcNan) !=
fcNone;
1412 const bool WantPositive = Opcode == TargetOpcode::G_FREM &&
1414 if (!WantNan && !WantNegative && !WantPositive)
1417 KnownFPClass KnownLHS, KnownRHS;
1420 KnownRHS,
Depth + 1);
1422 bool KnowSomethingUseful =
1425 if (KnowSomethingUseful || WantPositive) {
1430 computeKnownFPClass(
LHS, DemandedElts, InterestedClasses & InterestedLHS,
1431 KnownLHS,
Depth + 1);
1434 if (Opcode == Instruction::FDiv) {
1475 case TargetOpcode::G_FPEXT: {
1479 computeKnownFPClass(R, DemandedElts, InterestedClasses, Known,
Depth + 1);
1483 LLT SrcTy = MRI.getType(Src).getScalarType();
1500 case TargetOpcode::G_FPTRUNC: {
1501 computeKnownFPClassForFPTrunc(
MI, DemandedElts, InterestedClasses, Known,
1505 case TargetOpcode::G_SITOFP:
1506 case TargetOpcode::G_UITOFP: {
1515 if (Opcode == TargetOpcode::G_UITOFP)
1519 LLT Ty = MRI.getType(Val);
1521 if (InterestedClasses &
fcInf) {
1526 if (Opcode == TargetOpcode::G_SITOFP)
1540 case TargetOpcode::G_BUILD_VECTOR:
1541 case TargetOpcode::G_CONCAT_VECTORS: {
1548 for (
unsigned Idx = 0; Idx <
Merge.getNumSources(); ++Idx) {
1550 bool NeedsElt = DemandedElts[Idx];
1556 computeKnownFPClass(Src, Known, InterestedClasses,
Depth + 1);
1559 KnownFPClass Known2;
1560 computeKnownFPClass(Src, Known2, InterestedClasses,
Depth + 1);
1572 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1582 LLT VecTy = MRI.getType(Vec);
1587 if (CIdx && CIdx->ult(NumElts))
1589 return computeKnownFPClass(Vec, DemandedVecElts, InterestedClasses, Known,
1595 case TargetOpcode::G_INSERT_VECTOR_ELT: {
1601 LLT VecTy = MRI.getType(Vec);
1609 APInt DemandedVecElts = DemandedElts;
1610 bool NeedsElt =
true;
1612 if (CIdx && CIdx->ult(NumElts)) {
1613 DemandedVecElts.
clearBit(CIdx->getZExtValue());
1614 NeedsElt = DemandedElts[CIdx->getZExtValue()];
1619 computeKnownFPClass(Elt, Known, InterestedClasses,
Depth + 1);
1628 if (!DemandedVecElts.
isZero()) {
1629 KnownFPClass Known2;
1630 computeKnownFPClass(Vec, DemandedVecElts, InterestedClasses, Known2,
1637 case TargetOpcode::G_SHUFFLE_VECTOR: {
1641 APInt DemandedLHS, DemandedRHS;
1643 assert(DemandedElts == APInt(1, 1));
1644 DemandedLHS = DemandedRHS = DemandedElts;
1647 DemandedElts, DemandedLHS,
1654 if (!!DemandedLHS) {
1656 computeKnownFPClass(
LHS, DemandedLHS, InterestedClasses, Known,
1666 if (!!DemandedRHS) {
1667 KnownFPClass Known2;
1669 computeKnownFPClass(
RHS, DemandedRHS, InterestedClasses, Known2,
1675 case TargetOpcode::COPY: {
1678 if (!Src.isVirtual())
1681 computeKnownFPClass(Src, DemandedElts, InterestedClasses, Known,
Depth + 1);
1692 computeKnownFPClass(R, DemandedElts, InterestedClasses, KnownClasses,
Depth);
1693 return KnownClasses;
1699 computeKnownFPClass(R, Known, InterestedClasses,
Depth);
1707 InterestedClasses &=
~fcNan;
1709 InterestedClasses &=
~fcInf;
1712 computeKnownFPClass(R, DemandedElts, InterestedClasses,
Depth);
1715 Result.KnownFPClasses &=
~fcNan;
1717 Result.KnownFPClasses &=
~fcInf;
1723 LLT Ty = MRI.getType(R);
1724 APInt DemandedElts =
1726 return computeKnownFPClass(R, DemandedElts, Flags, InterestedClasses,
Depth);
1730unsigned GISelValueTracking::computeNumSignBitsMin(
Register Src0,
Register Src1,
1731 const APInt &DemandedElts,
1735 if (Src1SignBits == 1)
1752 case TargetOpcode::G_SEXTLOAD:
1755 case TargetOpcode::G_ZEXTLOAD:
1768 const APInt &DemandedElts,
1771 unsigned Opcode =
MI.getOpcode();
1773 if (Opcode == TargetOpcode::G_CONSTANT)
1774 return MI.getOperand(1).getCImm()->getValue().getNumSignBits();
1782 LLT DstTy = MRI.getType(R);
1792 unsigned FirstAnswer = 1;
1794 case TargetOpcode::COPY: {
1796 if (Src.getReg().isVirtual() && Src.getSubReg() == 0 &&
1797 MRI.getType(Src.getReg()).isValid()) {
1804 case TargetOpcode::G_SEXT: {
1806 LLT SrcTy = MRI.getType(Src);
1810 case TargetOpcode::G_ASSERT_SEXT:
1811 case TargetOpcode::G_SEXT_INREG: {
1814 unsigned SrcBits =
MI.getOperand(2).getImm();
1815 unsigned InRegBits = TyBits - SrcBits + 1;
1819 case TargetOpcode::G_LOAD: {
1826 case TargetOpcode::G_SEXTLOAD: {
1841 case TargetOpcode::G_ZEXTLOAD: {
1856 case TargetOpcode::G_AND:
1857 case TargetOpcode::G_OR:
1858 case TargetOpcode::G_XOR: {
1860 unsigned Src1NumSignBits =
1862 if (Src1NumSignBits != 1) {
1864 unsigned Src2NumSignBits =
1866 FirstAnswer = std::min(Src1NumSignBits, Src2NumSignBits);
1870 case TargetOpcode::G_ASHR: {
1875 FirstAnswer = std::min<uint64_t>(FirstAnswer + *
C, TyBits);
1878 case TargetOpcode::G_SHL: {
1881 if (std::optional<ConstantRange> ShAmtRange =
1883 uint64_t MaxShAmt = ShAmtRange->getUnsignedMax().getZExtValue();
1884 uint64_t MinShAmt = ShAmtRange->getUnsignedMin().getZExtValue();
1894 if (ExtOpc == TargetOpcode::G_SEXT || ExtOpc == TargetOpcode::G_ZEXT ||
1895 ExtOpc == TargetOpcode::G_ANYEXT) {
1896 LLT ExtTy = MRI.getType(Src1);
1898 LLT ExtendeeTy = MRI.getType(Extendee);
1902 if (SizeDiff <= MinShAmt) {
1906 return Tmp - MaxShAmt;
1912 return Tmp - MaxShAmt;
1916 case TargetOpcode::G_TRUNC: {
1918 LLT SrcTy = MRI.getType(Src);
1922 unsigned NumSrcBits = SrcTy.getScalarSizeInBits();
1924 if (NumSrcSignBits > (NumSrcBits - DstTyBits))
1925 return NumSrcSignBits - (NumSrcBits - DstTyBits);
1928 case TargetOpcode::G_SELECT: {
1929 return computeNumSignBitsMin(
MI.getOperand(2).getReg(),
1930 MI.getOperand(3).getReg(), DemandedElts,
1933 case TargetOpcode::G_SMIN:
1934 case TargetOpcode::G_SMAX:
1935 case TargetOpcode::G_UMIN:
1936 case TargetOpcode::G_UMAX:
1938 return computeNumSignBitsMin(
MI.getOperand(1).getReg(),
1939 MI.getOperand(2).getReg(), DemandedElts,
1941 case TargetOpcode::G_SADDO:
1942 case TargetOpcode::G_SADDE:
1943 case TargetOpcode::G_UADDO:
1944 case TargetOpcode::G_UADDE:
1945 case TargetOpcode::G_SSUBO:
1946 case TargetOpcode::G_SSUBE:
1947 case TargetOpcode::G_USUBO:
1948 case TargetOpcode::G_USUBE:
1949 case TargetOpcode::G_SMULO:
1950 case TargetOpcode::G_UMULO: {
1954 if (
MI.getOperand(1).getReg() == R) {
1955 if (TL.getBooleanContents(DstTy.
isVector(),
false) ==
1962 case TargetOpcode::G_FCMP:
1963 case TargetOpcode::G_ICMP: {
1964 bool IsFP = Opcode == TargetOpcode::G_FCMP;
1967 auto BC = TL.getBooleanContents(DstTy.
isVector(), IsFP);
1974 case TargetOpcode::G_BUILD_VECTOR: {
1976 FirstAnswer = TyBits;
1977 APInt SingleDemandedElt(1, 1);
1979 if (!DemandedElts[
I])
1984 FirstAnswer = std::min(FirstAnswer, Tmp2);
1987 if (FirstAnswer == 1)
1992 case TargetOpcode::G_CONCAT_VECTORS: {
1993 if (MRI.getType(
MI.getOperand(0).getReg()).isScalableVector())
1995 FirstAnswer = TyBits;
1998 unsigned NumSubVectorElts =
1999 MRI.getType(
MI.getOperand(1).getReg()).getNumElements();
2002 DemandedElts.
extractBits(NumSubVectorElts,
I * NumSubVectorElts);
2007 FirstAnswer = std::min(FirstAnswer, Tmp2);
2010 if (FirstAnswer == 1)
2015 case TargetOpcode::G_SHUFFLE_VECTOR: {
2018 APInt DemandedLHS, DemandedRHS;
2020 unsigned NumElts = MRI.getType(Src1).getNumElements();
2022 DemandedElts, DemandedLHS, DemandedRHS))
2028 if (FirstAnswer == 1)
2030 if (!!DemandedRHS) {
2033 FirstAnswer = std::min(FirstAnswer, Tmp2);
2037 case TargetOpcode::G_SPLAT_VECTOR: {
2041 unsigned NumSrcBits = MRI.getType(Src).getSizeInBits();
2042 if (NumSrcSignBits > (NumSrcBits - TyBits))
2043 return NumSrcSignBits - (NumSrcBits - TyBits);
2046 case TargetOpcode::G_INTRINSIC:
2047 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
2048 case TargetOpcode::G_INTRINSIC_CONVERGENT:
2049 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
2052 TL.computeNumSignBitsForTargetInstr(*
this, R, DemandedElts, MRI,
Depth);
2054 FirstAnswer = std::max(FirstAnswer, NumBits);
2074 Mask <<= Mask.getBitWidth() - TyBits;
2075 return std::max(FirstAnswer, Mask.countl_one());
2079 LLT Ty = MRI.getType(R);
2080 APInt DemandedElts =
2089 unsigned Opcode =
MI.getOpcode();
2091 LLT Ty = MRI.getType(R);
2092 unsigned BitWidth = Ty.getScalarSizeInBits();
2094 if (Opcode == TargetOpcode::G_CONSTANT) {
2095 const APInt &ShAmt =
MI.getOperand(1).getCImm()->getValue();
2097 return std::nullopt;
2101 if (Opcode == TargetOpcode::G_BUILD_VECTOR) {
2102 const APInt *MinAmt =
nullptr, *MaxAmt =
nullptr;
2103 for (
unsigned I = 0, E =
MI.getNumOperands() - 1;
I != E; ++
I) {
2104 if (!DemandedElts[
I])
2107 if (
Op->getOpcode() != TargetOpcode::G_CONSTANT) {
2108 MinAmt = MaxAmt =
nullptr;
2112 const APInt &ShAmt =
Op->getOperand(1).getCImm()->getValue();
2114 return std::nullopt;
2115 if (!MinAmt || MinAmt->
ugt(ShAmt))
2117 if (!MaxAmt || MaxAmt->ult(ShAmt))
2120 assert(((!MinAmt && !MaxAmt) || (MinAmt && MaxAmt)) &&
2121 "Failed to find matching min/max shift amounts");
2122 if (MinAmt && MaxAmt)
2132 return std::nullopt;
2137 if (std::optional<ConstantRange> AmtRange =
2139 return AmtRange->getUnsignedMin().getZExtValue();
2140 return std::nullopt;
2158 Info = std::make_unique<GISelValueTracking>(MF, MaxDepth);
2183 if (!MO.isReg() || MO.getReg().isPhysical())
2186 if (!
MRI.getType(Reg).isValid())
2188 KnownBits Known = VTA.getKnownBits(Reg);
2189 unsigned SignedBits = VTA.computeNumSignBits(Reg);
2190 OS <<
" " << MO <<
" KnownBits:" << Known <<
" SignBits:" << SignedBits
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file declares a class to represent arbitrary precision floating point values and provide a varie...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define LLVM_ATTRIBUTE_UNUSED
Utilities for dealing with flags related to floating point properties and mode controls.
static unsigned computeNumSignBitsFromRangeMetadata(const GAnyLoad *Ld, unsigned TyBits)
Compute the known number of sign bits with attached range metadata in the memory operand.
static LLVM_ATTRIBUTE_UNUSED void dumpResult(const MachineInstr &MI, const KnownBits &Known, unsigned Depth)
Provides analysis for querying information about KnownBits during GISel passes.
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
Implement a low-level type suitable for MachineInstr level instruction selection.
Contains matchers for matching SSA Machine Instructions.
Promote Memory to Register
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
const SmallVectorImpl< MachineOperand > & Cond
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file describes how to lower LLVM code to machine code.
static bool outputDenormalIsIEEEOrPosZero(const Function &F, const Type *Ty)
static Function * getFunction(FunctionType *Ty, const Twine &Name, Module *M)
static APFloat getLargest(const fltSemantics &Sem, bool Negative=false)
Returns the largest finite number in the given semantics.
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
void clearBit(unsigned BitPosition)
Set a given bit to 0.
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
void setBitsFrom(unsigned loBit)
Set the top bits starting from loBit.
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
unsigned getNumSignBits() const
Computes the number of leading bits of this APInt that are equal to its sign bit.
void clearLowBits(unsigned loBits)
Set bottom loBits bits to 0.
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
void setAllBits()
Set every bit to 1.
APInt shl(unsigned shiftAmt) const
Left-shift function.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
void setLowBits(unsigned loBits)
Set the bottom loBits bits.
LLVM_ABI APInt extractBits(unsigned numBits, unsigned bitPosition) const
Return an APInt with the extracted bits [bitPosition,bitPosition+numBits).
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
This class represents a range of values.
static LLVM_ABI ConstantRange fromKnownBits(const KnownBits &Known, bool IsSigned)
Initialize a range based on a known bits constraint.
LLVM_ABI ConstantRange zeroExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
LLVM_ABI APInt getSignedMin() const
Return the smallest signed value contained in the ConstantRange.
LLVM_ABI ConstantRange signExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
LLVM_ABI APInt getSignedMax() const
Return the largest signed value contained in the ConstantRange.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
Represents any generic load, including sign/zero extending variants.
const MDNode * getRanges() const
Returns the Ranges that describes the dereference.
static LLVM_ABI std::optional< GFConstant > getConstant(Register Const, const MachineRegisterInfo &MRI)
To use KnownBitsInfo analysis in a pass, KnownBitsInfo &Info = getAnalysis<GISelValueTrackingInfoAnal...
GISelValueTracking & get(MachineFunction &MF)
bool runOnMachineFunction(MachineFunction &MF) override
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
GISelValueTracking Result
LLVM_ABI Result run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM)
LLVM_ABI PreservedAnalyses run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM)
unsigned getMaxDepth() const
KnownBits getKnownBits(Register R)
Align computeKnownAlignment(Register R, unsigned Depth=0)
std::optional< ConstantRange > getValidShiftAmountRange(Register R, const APInt &DemandedElts, unsigned Depth)
If a G_SHL/G_ASHR/G_LSHR node with shift operand R has shift amounts that are all less than the eleme...
bool maskedValueIsZero(Register Val, const APInt &Mask)
std::optional< uint64_t > getValidMinimumShiftAmount(Register R, const APInt &DemandedElts, unsigned Depth=0)
If a G_SHL/G_ASHR/G_LSHR node with shift operand R has shift amounts that are all less than the eleme...
bool signBitIsZero(Register Op)
const DataLayout & getDataLayout() const
unsigned computeNumSignBits(Register R, const APInt &DemandedElts, unsigned Depth=0)
APInt getKnownOnes(Register R)
KnownBits getKnownBits(MachineInstr &MI)
APInt getKnownZeroes(Register R)
void computeKnownBitsImpl(Register R, KnownBits &Known, const APInt &DemandedElts, unsigned Depth=0)
Register getCondReg() const
Register getFalseReg() const
Register getTrueReg() const
Register getSrc2Reg() const
Register getSrc1Reg() const
ArrayRef< int > getMask() const
constexpr bool isScalableVector() const
Returns true if the LLT is a scalable vector.
constexpr unsigned getScalarSizeInBits() const
constexpr bool isValid() const
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
constexpr bool isVector() const
constexpr bool isFixedVector() const
Returns true if the LLT is a fixed vector.
constexpr LLT getScalarType() const
TypeSize getValue() const
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
DenormalMode getDenormalMode(const fltSemantics &FPType) const
Returns the denormal handling type for the default rounding mode of the function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
LLT getMemoryType() const
Return the memory type of the memory reference.
const MDNode * getRanges() const
Return the range tag for the memory reference.
LocationSize getSizeInBits() const
Return the size in bits of the memory reference.
MachineOperand class - Representation of each machine instruction operand.
Register getReg() const
getReg - Returns the register number.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
@ ZeroOrOneBooleanContent
@ ZeroOrNegativeOneBooleanContent
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
LLVM_ABI void printAsOperand(raw_ostream &O, bool PrintType=true, const Module *M=nullptr) const
Print the name of this Value out to the specified raw_ostream.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
operand_type_match m_Reg()
operand_type_match m_Pred()
bind_ty< FPClassTest > m_FPClassTest(FPClassTest &T)
bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P)
ClassifyOp_match< LHS, Test, TargetOpcode::G_IS_FPCLASS > m_GIsFPClass(const LHS &L, const Test &T)
Matches the register and immediate used in a fpclass test G_IS_FPCLASS val, 96.
CompareOp_match< Pred, LHS, RHS, TargetOpcode::G_FCMP > m_GFCmp(const Pred &P, const LHS &L, const RHS &R)
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
LLVM_ABI std::optional< APInt > getIConstantVRegVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT, return the corresponding value.
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
LLVM_ABI const llvm::fltSemantics & getFltSemanticForLLT(LLT Ty)
Get the appropriate floating point arithmetic semantic based on the bit size of the given scalar LLT.
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
AnalysisManager< MachineFunction > MachineFunctionAnalysisManager
int ilogb(const APFloat &Arg)
Returns the exponent of the internal representation of the APFloat.
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
LLVM_ABI ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
std::tuple< Value *, FPClassTest, FPClassTest > fcmpImpliesClass(CmpInst::Predicate Pred, const Function &F, Value *LHS, FPClassTest RHSClass, bool LookThroughSrc=true)
LLVM_ABI bool getShuffleDemandedElts(int SrcWidth, ArrayRef< int > Mask, const APInt &DemandedElts, APInt &DemandedLHS, APInt &DemandedRHS, bool AllowUndefElts=false)
Transform a shuffle mask's output demanded element mask into demanded element masks for the 2 operand...
constexpr unsigned MaxAnalysisRecursionDepth
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
DWARFExpression::Operation Op
std::string toString(const APInt &I, unsigned Radix, bool Signed, bool formatAsCLiteral=false, bool UpperCase=true, bool InsertSeparators=false)
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
static uint32_t extractBits(uint64_t Val, uint32_t Hi, uint32_t Lo)
LLVM_ABI void computeKnownBitsFromRangeMetadata(const MDNode &Ranges, KnownBits &Known)
Compute known bits from the range metadata.
static LLVM_ABI bool isRepresentableAsNormalIn(const fltSemantics &Src, const fltSemantics &Dst)
This struct is a compact representation of a valid (non-zero power of two) alignment.
A special type used by analysis passes to provide an address that identifies that particular analysis...
Represent subnormal handling kind for floating point instruction inputs and outputs.
DenormalModeKind Input
Denormal treatment kind for floating point instruction inputs in the default floating-point environme...
constexpr bool outputsAreZero() const
Return true if output denormals should be flushed to 0.
@ PositiveZero
Denormals are flushed to positive zero.
@ IEEE
IEEE-754 denormal numbers preserved.
constexpr bool inputsAreZero() const
Return true if input denormals must be implicitly treated as 0.
DenormalModeKind Output
Denormal flushing mode for floating point instruction results in the default floating point environme...
static constexpr DenormalMode getIEEE()
static KnownBits makeConstant(const APInt &C)
Create known bits from a known constant.
KnownBits anyextOrTrunc(unsigned BitWidth) const
Return known bits for an "any" extension or truncation of the value we're tracking.
LLVM_ABI KnownBits sextInReg(unsigned SrcBitWidth) const
Return known bits for a in-register sign extension of the value we're tracking.
static LLVM_ABI KnownBits smax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smax(LHS, RHS).
bool isNonNegative() const
Returns true if this value is known to be non-negative.
static LLVM_ABI KnownBits ashr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for ashr(LHS, RHS).
bool isUnknown() const
Returns true if we don't know any bits.
KnownBits trunc(unsigned BitWidth) const
Return known bits for a truncation of the value we're tracking.
KnownBits byteSwap() const
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
KnownBits reverseBits() const
unsigned getBitWidth() const
Get the bit width of this value.
static LLVM_ABI KnownBits umax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umax(LHS, RHS).
KnownBits zext(unsigned BitWidth) const
Return known bits for a zero extension of the value we're tracking.
static LLVM_ABI KnownBits lshr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for lshr(LHS, RHS).
KnownBits extractBits(unsigned NumBits, unsigned BitPosition) const
Return a subset of the known bits from [bitPosition,bitPosition+numBits).
KnownBits intersectWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for both this and RHS.
KnownBits sext(unsigned BitWidth) const
Return known bits for a sign extension of the value we're tracking.
static KnownBits add(const KnownBits &LHS, const KnownBits &RHS, bool NSW=false, bool NUW=false)
Compute knownbits resulting from addition of LHS and RHS.
KnownBits zextOrTrunc(unsigned BitWidth) const
Return known bits for a zero extension or truncation of the value we're tracking.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
static LLVM_ABI KnownBits smin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smin(LHS, RHS).
APInt getMinValue() const
Return the minimal unsigned value possible given these KnownBits.
bool isNegative() const
Returns true if this value is known to be negative.
static KnownBits sub(const KnownBits &LHS, const KnownBits &RHS, bool NSW=false, bool NUW=false)
Compute knownbits resulting from subtraction of LHS and RHS.
unsigned countMaxLeadingZeros() const
Returns the maximum number of leading zero bits possible.
void insertBits(const KnownBits &SubBits, unsigned BitPosition)
Insert the bits from a smaller known bits starting at bitPosition.
static LLVM_ABI KnownBits mul(const KnownBits &LHS, const KnownBits &RHS, bool NoUndefSelfMultiply=false)
Compute known bits resulting from multiplying LHS and RHS.
KnownBits anyext(unsigned BitWidth) const
Return known bits for an "any" extension of the value we're tracking, where we don't know anything ab...
LLVM_ABI KnownBits abs(bool IntMinIsPoison=false) const
Compute known bits for the absolute value.
static LLVM_ABI KnownBits shl(const KnownBits &LHS, const KnownBits &RHS, bool NUW=false, bool NSW=false, bool ShAmtNonZero=false)
Compute known bits for shl(LHS, RHS).
static LLVM_ABI KnownBits umin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umin(LHS, RHS).
FPClassTest KnownFPClasses
Floating-point classes the value could be one of.
bool isKnownNeverInfinity() const
Return true if it's known this can never be an infinity.
bool cannotBeOrderedGreaterThanZero() const
Return true if we can prove that the analyzed floating-point value is either NaN or never greater tha...
static constexpr FPClassTest OrderedGreaterThanZeroMask
static constexpr FPClassTest OrderedLessThanZeroMask
void knownNot(FPClassTest RuleOut)
void copysign(const KnownFPClass &Sign)
bool isKnownNeverSubnormal() const
Return true if it's known this can never be a subnormal.
LLVM_ABI bool isKnownNeverLogicalZero(DenormalMode Mode) const
Return true if it's know this can never be interpreted as a zero.
bool isKnownNeverPosZero() const
Return true if it's known this can never be a literal positive zero.
std::optional< bool > SignBit
std::nullopt if the sign bit is unknown, true if the sign bit is definitely set or false if the sign ...
bool isKnownNeverNaN() const
Return true if it's known this can never be a nan.
bool isKnownNever(FPClassTest Mask) const
Return true if it's known this can never be one of the mask entries.
bool isKnownNeverNegZero() const
Return true if it's known this can never be a negative zero.
void propagateNaN(const KnownFPClass &Src, bool PreserveSign=false)
bool cannotBeOrderedLessThanZero() const
Return true if we can prove that the analyzed floating-point value is either NaN or never less than -...
void signBitMustBeOne()
Assume the sign bit is one.
void signBitMustBeZero()
Assume the sign bit is zero.
LLVM_ABI bool isKnownNeverLogicalPosZero(DenormalMode Mode) const
Return true if it's know this can never be interpreted as a positive zero.
bool isKnownNeverPosInfinity() const
Return true if it's known this can never be +infinity.
LLVM_ABI bool isKnownNeverLogicalNegZero(DenormalMode Mode) const
Return true if it's know this can never be interpreted as a negative zero.