41#define DEBUG_TYPE "gisel-known-bits"
49 "Analysis for ComputingKnownBits",
false,
true)
52 : MF(MF),
MRI(MF.getRegInfo()), TL(*MF.getSubtarget().getTargetLowering()),
57 switch (
MI->getOpcode()) {
58 case TargetOpcode::COPY:
60 case TargetOpcode::G_ASSERT_ALIGN: {
62 return Align(
MI->getOperand(2).getImm());
64 case TargetOpcode::G_FRAME_INDEX: {
65 int FrameIdx =
MI->getOperand(1).getIndex();
66 return MF.getFrameInfo().getObjectAlign(FrameIdx);
68 case TargetOpcode::G_INTRINSIC:
69 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
70 case TargetOpcode::G_INTRINSIC_CONVERGENT:
71 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
73 return TL.computeKnownAlignForTargetInstr(*
this, R, MRI,
Depth + 1);
78 assert(
MI.getNumExplicitDefs() == 1 &&
79 "expected single return generic instruction");
84 const LLT Ty = MRI.getType(R);
94 const APInt &DemandedElts,
102 LLT Ty = MRI.getType(R);
103 unsigned BitWidth = Ty.getScalarSizeInBits();
118 <<
"] Computed for: " <<
MI <<
"[" <<
Depth <<
"] Known: 0x"
129 const APInt &DemandedElts,
160 const APInt &DemandedElts,
163 unsigned Opcode =
MI.getOpcode();
164 LLT DstTy = MRI.getType(R);
178 "DemandedElt width should equal the fixed vector number of elements");
181 "DemandedElt width should be 1 for scalars or scalable vectors");
206 TL.computeKnownBitsForTargetInstr(*
this, R, Known, DemandedElts, MRI,
209 case TargetOpcode::G_BUILD_VECTOR: {
214 if (!DemandedElts[
I])
228 case TargetOpcode::G_SPLAT_VECTOR: {
236 case TargetOpcode::COPY:
237 case TargetOpcode::G_PHI:
238 case TargetOpcode::PHI: {
244 assert(
MI.getOperand(0).getSubReg() == 0 &&
"Is this code in SSA?");
247 for (
unsigned Idx = 1; Idx <
MI.getNumOperands(); Idx += 2) {
257 if (SrcReg.
isVirtual() && Src.getSubReg() == 0 &&
258 MRI.getType(SrcReg).isValid()) {
261 Depth + (Opcode != TargetOpcode::COPY));
276 case TargetOpcode::G_CONSTANT: {
280 case TargetOpcode::G_FRAME_INDEX: {
281 int FrameIdx =
MI.getOperand(1).getIndex();
282 TL.computeKnownBitsForFrameIndex(FrameIdx, Known, MF);
285 case TargetOpcode::G_SUB: {
293 case TargetOpcode::G_XOR: {
302 case TargetOpcode::G_PTR_ADD: {
306 LLT Ty = MRI.getType(
MI.getOperand(1).getReg());
307 if (DL.isNonIntegralAddressSpace(Ty.getAddressSpace()))
311 case TargetOpcode::G_ADD: {
319 case TargetOpcode::G_AND: {
329 case TargetOpcode::G_OR: {
339 case TargetOpcode::G_MUL: {
347 case TargetOpcode::G_SELECT: {
348 computeKnownBitsMin(
MI.getOperand(2).getReg(),
MI.getOperand(3).getReg(),
349 Known, DemandedElts,
Depth + 1);
352 case TargetOpcode::G_SMIN: {
362 case TargetOpcode::G_SMAX: {
372 case TargetOpcode::G_UMIN: {
381 case TargetOpcode::G_UMAX: {
390 case TargetOpcode::G_FCMP:
391 case TargetOpcode::G_ICMP: {
394 if (TL.getBooleanContents(DstTy.
isVector(),
395 Opcode == TargetOpcode::G_FCMP) ==
401 case TargetOpcode::G_SEXT: {
409 case TargetOpcode::G_ASSERT_SEXT:
410 case TargetOpcode::G_SEXT_INREG: {
413 Known = Known.
sextInReg(
MI.getOperand(2).getImm());
416 case TargetOpcode::G_ANYEXT: {
422 case TargetOpcode::G_LOAD: {
430 case TargetOpcode::G_SEXTLOAD:
431 case TargetOpcode::G_ZEXTLOAD: {
438 Known = Opcode == TargetOpcode::G_SEXTLOAD
443 case TargetOpcode::G_ASHR: {
452 case TargetOpcode::G_LSHR: {
461 case TargetOpcode::G_SHL: {
470 case TargetOpcode::G_INTTOPTR:
471 case TargetOpcode::G_PTRTOINT:
476 case TargetOpcode::G_ZEXT:
477 case TargetOpcode::G_TRUNC: {
483 case TargetOpcode::G_ASSERT_ZEXT: {
487 unsigned SrcBitWidth =
MI.getOperand(2).getImm();
488 assert(SrcBitWidth &&
"SrcBitWidth can't be zero");
490 Known.
Zero |= (~InMask);
491 Known.
One &= (~Known.Zero);
494 case TargetOpcode::G_ASSERT_ALIGN: {
495 int64_t LogOfAlign =
Log2_64(
MI.getOperand(2).getImm());
504 case TargetOpcode::G_MERGE_VALUES: {
505 unsigned NumOps =
MI.getNumOperands();
506 unsigned OpSize = MRI.getType(
MI.getOperand(1).getReg()).getSizeInBits();
508 for (
unsigned I = 0;
I !=
NumOps - 1; ++
I) {
511 DemandedElts,
Depth + 1);
516 case TargetOpcode::G_UNMERGE_VALUES: {
517 unsigned NumOps =
MI.getNumOperands();
519 LLT SrcTy = MRI.getType(SrcReg);
521 if (SrcTy.isVector() && SrcTy.getScalarType() != DstTy.
getScalarType())
526 for (; DstIdx !=
NumOps - 1 &&
MI.getOperand(DstIdx).
getReg() != R;
530 APInt SubDemandedElts = DemandedElts;
531 if (SrcTy.isVector()) {
534 DemandedElts.
zext(SrcTy.getNumElements()).
shl(DstIdx * DstLanes);
540 if (SrcTy.isVector())
541 Known = std::move(SrcOpKnown);
546 case TargetOpcode::G_BSWAP: {
552 case TargetOpcode::G_BITREVERSE: {
558 case TargetOpcode::G_CTPOP: {
570 case TargetOpcode::G_UBFX: {
571 KnownBits SrcOpKnown, OffsetKnown, WidthKnown;
581 case TargetOpcode::G_SBFX: {
582 KnownBits SrcOpKnown, OffsetKnown, WidthKnown;
597 case TargetOpcode::G_UADDO:
598 case TargetOpcode::G_UADDE:
599 case TargetOpcode::G_SADDO:
600 case TargetOpcode::G_SADDE:
601 case TargetOpcode::G_USUBO:
602 case TargetOpcode::G_USUBE:
603 case TargetOpcode::G_SSUBO:
604 case TargetOpcode::G_SSUBE:
605 case TargetOpcode::G_UMULO:
606 case TargetOpcode::G_SMULO: {
607 if (
MI.getOperand(1).getReg() == R) {
610 if (TL.getBooleanContents(DstTy.
isVector(),
false) ==
617 case TargetOpcode::G_CTLZ:
618 case TargetOpcode::G_CTLZ_ZERO_UNDEF: {
628 case TargetOpcode::G_SHUFFLE_VECTOR: {
629 APInt DemandedLHS, DemandedRHS;
632 unsigned NumElts = MRI.getType(
MI.getOperand(1).getReg()).getNumElements();
634 DemandedElts, DemandedLHS, DemandedRHS))
655 case TargetOpcode::G_CONCAT_VECTORS: {
656 if (MRI.getType(
MI.getOperand(0).getReg()).isScalableVector())
661 unsigned NumSubVectorElts =
662 MRI.getType(
MI.getOperand(1).getReg()).getNumElements();
666 DemandedElts.
extractBits(NumSubVectorElts,
I * NumSubVectorElts);
684 Ty = Ty.getScalarType();
693 LLT Ty = MRI.getType(R);
696 computeKnownFPClass(R, DemandedElts, InterestedClasses, Known,
Depth);
699void GISelValueTracking::computeKnownFPClassForFPTrunc(
707 KnownFPClass KnownSrc;
708 computeKnownFPClass(Val, DemandedElts, InterestedClasses, KnownSrc,
721void GISelValueTracking::computeKnownFPClass(
Register R,
722 const APInt &DemandedElts,
726 assert(Known.
isUnknown() &&
"should not be called with known information");
736 MachineInstr &
MI = *MRI.getVRegDef(R);
737 unsigned Opcode =
MI.getOpcode();
738 LLT DstTy = MRI.getType(R);
746 switch (Cst->getKind()) {
748 auto APF = Cst->getScalarValue();
750 Known.
SignBit = APF.isNegative();
755 bool SignBitAllZero =
true;
756 bool SignBitAllOne =
true;
758 for (
auto C : *Cst) {
761 SignBitAllZero =
false;
763 SignBitAllOne =
false;
766 if (SignBitAllOne != SignBitAllZero)
782 KnownNotFromFlags |=
fcNan;
784 KnownNotFromFlags |=
fcInf;
788 InterestedClasses &= ~KnownNotFromFlags;
790 auto ClearClassesFromFlags =
797 const MachineFunction *MF =
MI.getMF();
801 TL.computeKnownFPClassForTargetInstr(*
this, R, Known, DemandedElts, MRI,
804 case TargetOpcode::G_FNEG: {
806 computeKnownFPClass(Val, DemandedElts, InterestedClasses, Known,
Depth + 1);
810 case TargetOpcode::G_SELECT: {
833 bool LookThroughFAbsFNeg = CmpLHS !=
LHS && CmpLHS !=
RHS;
834 std::tie(TestedValue, MaskIfTrue, MaskIfFalse) =
840 MaskIfTrue = TestedMask;
841 MaskIfFalse = ~TestedMask;
844 if (TestedValue ==
LHS) {
846 FilterLHS = MaskIfTrue;
847 }
else if (TestedValue ==
RHS) {
849 FilterRHS = MaskIfFalse;
853 computeKnownFPClass(
LHS, DemandedElts, InterestedClasses & FilterLHS, Known,
857 computeKnownFPClass(
RHS, DemandedElts, InterestedClasses & FilterRHS,
864 case TargetOpcode::G_FCOPYSIGN: {
865 Register Magnitude =
MI.getOperand(1).getReg();
868 KnownFPClass KnownSign;
870 computeKnownFPClass(Magnitude, DemandedElts, InterestedClasses, Known,
872 computeKnownFPClass(Sign, DemandedElts, InterestedClasses, KnownSign,
877 case TargetOpcode::G_FMA:
878 case TargetOpcode::G_STRICT_FMA:
879 case TargetOpcode::G_FMAD: {
894 KnownFPClass KnownAddend;
895 computeKnownFPClass(
C, DemandedElts, InterestedClasses, KnownAddend,
902 case TargetOpcode::G_FSQRT:
903 case TargetOpcode::G_STRICT_FSQRT: {
904 KnownFPClass KnownSrc;
906 if (InterestedClasses &
fcNan)
911 computeKnownFPClass(Val, DemandedElts, InterestedSrcs, KnownSrc,
Depth + 1);
926 case TargetOpcode::G_FABS: {
931 computeKnownFPClass(Val, DemandedElts, InterestedClasses, Known,
937 case TargetOpcode::G_FSIN:
938 case TargetOpcode::G_FCOS:
939 case TargetOpcode::G_FSINCOS: {
942 KnownFPClass KnownSrc;
944 computeKnownFPClass(Val, DemandedElts, InterestedClasses, KnownSrc,
952 case TargetOpcode::G_FMAXNUM:
953 case TargetOpcode::G_FMINNUM:
954 case TargetOpcode::G_FMINNUM_IEEE:
955 case TargetOpcode::G_FMAXIMUM:
956 case TargetOpcode::G_FMINIMUM:
957 case TargetOpcode::G_FMAXNUM_IEEE:
958 case TargetOpcode::G_FMAXIMUMNUM:
959 case TargetOpcode::G_FMINIMUMNUM: {
962 KnownFPClass KnownLHS, KnownRHS;
964 computeKnownFPClass(
LHS, DemandedElts, InterestedClasses, KnownLHS,
966 computeKnownFPClass(
RHS, DemandedElts, InterestedClasses, KnownRHS,
970 Known = KnownLHS | KnownRHS;
973 if (NeverNaN && (Opcode == TargetOpcode::G_FMINNUM ||
974 Opcode == TargetOpcode::G_FMAXNUM ||
975 Opcode == TargetOpcode::G_FMINIMUMNUM ||
976 Opcode == TargetOpcode::G_FMAXIMUMNUM))
979 if (Opcode == TargetOpcode::G_FMAXNUM ||
980 Opcode == TargetOpcode::G_FMAXIMUMNUM ||
981 Opcode == TargetOpcode::G_FMAXNUM_IEEE) {
989 }
else if (Opcode == TargetOpcode::G_FMAXIMUM) {
995 }
else if (Opcode == TargetOpcode::G_FMINNUM ||
996 Opcode == TargetOpcode::G_FMINIMUMNUM ||
997 Opcode == TargetOpcode::G_FMINNUM_IEEE) {
1005 }
else if (Opcode == TargetOpcode::G_FMINIMUM) {
1037 }
else if ((Opcode == TargetOpcode::G_FMAXIMUM ||
1038 Opcode == TargetOpcode::G_FMINIMUM) ||
1039 Opcode == TargetOpcode::G_FMAXIMUMNUM ||
1040 Opcode == TargetOpcode::G_FMINIMUMNUM ||
1041 Opcode == TargetOpcode::G_FMAXNUM_IEEE ||
1042 Opcode == TargetOpcode::G_FMINNUM_IEEE ||
1048 if ((Opcode == TargetOpcode::G_FMAXIMUM ||
1049 Opcode == TargetOpcode::G_FMAXNUM ||
1050 Opcode == TargetOpcode::G_FMAXIMUMNUM ||
1051 Opcode == TargetOpcode::G_FMAXNUM_IEEE) &&
1054 else if ((Opcode == TargetOpcode::G_FMINIMUM ||
1055 Opcode == TargetOpcode::G_FMINNUM ||
1056 Opcode == TargetOpcode::G_FMINIMUMNUM ||
1057 Opcode == TargetOpcode::G_FMINNUM_IEEE) &&
1064 case TargetOpcode::G_FCANONICALIZE: {
1066 KnownFPClass KnownSrc;
1067 computeKnownFPClass(Val, DemandedElts, InterestedClasses, KnownSrc,
1089 DenormalMode DenormMode = MF->getDenormalMode(FPType);
1108 case TargetOpcode::G_VECREDUCE_FMAX:
1109 case TargetOpcode::G_VECREDUCE_FMIN:
1110 case TargetOpcode::G_VECREDUCE_FMAXIMUM:
1111 case TargetOpcode::G_VECREDUCE_FMINIMUM: {
1117 computeKnownFPClass(Val,
MI.getFlags(), InterestedClasses,
Depth + 1);
1123 case TargetOpcode::G_TRUNC:
1124 case TargetOpcode::G_FFLOOR:
1125 case TargetOpcode::G_FCEIL:
1126 case TargetOpcode::G_FRINT:
1127 case TargetOpcode::G_FNEARBYINT:
1128 case TargetOpcode::G_INTRINSIC_FPTRUNC_ROUND:
1129 case TargetOpcode::G_INTRINSIC_ROUND: {
1131 KnownFPClass KnownSrc;
1137 computeKnownFPClass(Val, DemandedElts, InterestedSrcs, KnownSrc,
Depth + 1);
1154 case TargetOpcode::G_FEXP:
1155 case TargetOpcode::G_FEXP2:
1156 case TargetOpcode::G_FEXP10: {
1162 KnownFPClass KnownSrc;
1163 computeKnownFPClass(Val, DemandedElts, InterestedClasses, KnownSrc,
1172 case TargetOpcode::G_FLOG:
1173 case TargetOpcode::G_FLOG2:
1174 case TargetOpcode::G_FLOG10: {
1189 KnownFPClass KnownSrc;
1190 computeKnownFPClass(Val, DemandedElts, InterestedSrcs, KnownSrc,
Depth + 1);
1200 DenormalMode
Mode = MF->getDenormalMode(FltSem);
1207 case TargetOpcode::G_FPOWI: {
1212 LLT ExpTy = MRI.getType(Exp);
1214 Exp, ExpTy.
isVector() ? DemandedElts : APInt(1, 1),
Depth + 1);
1216 if (ExponentKnownBits.
Zero[0]) {
1230 KnownFPClass KnownSrc;
1231 computeKnownFPClass(Val, DemandedElts,
fcNegative, KnownSrc,
Depth + 1);
1236 case TargetOpcode::G_FLDEXP:
1237 case TargetOpcode::G_STRICT_FLDEXP: {
1239 KnownFPClass KnownSrc;
1240 computeKnownFPClass(Val, DemandedElts, InterestedClasses, KnownSrc,
1257 if ((InterestedClasses & ExpInfoMask) ==
fcNone)
1266 case TargetOpcode::G_INTRINSIC_ROUNDEVEN: {
1267 computeKnownFPClassForFPTrunc(
MI, DemandedElts, InterestedClasses, Known,
1271 case TargetOpcode::G_FADD:
1272 case TargetOpcode::G_STRICT_FADD:
1273 case TargetOpcode::G_FSUB:
1274 case TargetOpcode::G_STRICT_FSUB: {
1277 KnownFPClass KnownLHS, KnownRHS;
1279 (Opcode == TargetOpcode::G_FADD ||
1280 Opcode == TargetOpcode::G_STRICT_FADD) &&
1282 bool WantNaN = (InterestedClasses &
fcNan) !=
fcNone;
1285 if (!WantNaN && !WantNegative && !WantNegZero)
1291 if (InterestedClasses &
fcNan)
1292 InterestedSrcs |=
fcInf;
1293 computeKnownFPClass(
RHS, DemandedElts, InterestedSrcs, KnownRHS,
Depth + 1);
1298 (Opcode == TargetOpcode::G_FSUB ||
1299 Opcode == TargetOpcode::G_STRICT_FSUB)) {
1303 computeKnownFPClass(
LHS, DemandedElts, InterestedSrcs, KnownLHS,
1311 if (Opcode == Instruction::FAdd) {
1338 case TargetOpcode::G_FMUL:
1339 case TargetOpcode::G_STRICT_FMUL: {
1352 KnownFPClass KnownLHS, KnownRHS;
1353 computeKnownFPClass(
RHS, DemandedElts, NeedForNan, KnownRHS,
Depth + 1);
1357 computeKnownFPClass(
LHS, DemandedElts, NeedForNan, KnownLHS,
Depth + 1);
1384 case TargetOpcode::G_FDIV:
1385 case TargetOpcode::G_FREM: {
1391 if (Opcode == TargetOpcode::G_FDIV) {
1402 const bool WantNan = (InterestedClasses &
fcNan) !=
fcNone;
1404 const bool WantPositive = Opcode == TargetOpcode::G_FREM &&
1406 if (!WantNan && !WantNegative && !WantPositive)
1409 KnownFPClass KnownLHS, KnownRHS;
1412 KnownRHS,
Depth + 1);
1414 bool KnowSomethingUseful =
1417 if (KnowSomethingUseful || WantPositive) {
1422 computeKnownFPClass(
LHS, DemandedElts, InterestedClasses & InterestedLHS,
1423 KnownLHS,
Depth + 1);
1426 if (Opcode == Instruction::FDiv) {
1467 case TargetOpcode::G_FPEXT: {
1471 computeKnownFPClass(R, DemandedElts, InterestedClasses, Known,
Depth + 1);
1475 LLT SrcTy = MRI.getType(Src).getScalarType();
1492 case TargetOpcode::G_FPTRUNC: {
1493 computeKnownFPClassForFPTrunc(
MI, DemandedElts, InterestedClasses, Known,
1497 case TargetOpcode::G_SITOFP:
1498 case TargetOpcode::G_UITOFP: {
1507 if (Opcode == TargetOpcode::G_UITOFP)
1511 LLT Ty = MRI.getType(Val);
1513 if (InterestedClasses &
fcInf) {
1518 if (Opcode == TargetOpcode::G_SITOFP)
1532 case TargetOpcode::G_BUILD_VECTOR:
1533 case TargetOpcode::G_CONCAT_VECTORS: {
1540 for (
unsigned Idx = 0; Idx <
Merge.getNumSources(); ++Idx) {
1542 bool NeedsElt = DemandedElts[Idx];
1548 computeKnownFPClass(Src, Known, InterestedClasses,
Depth + 1);
1551 KnownFPClass Known2;
1552 computeKnownFPClass(Src, Known2, InterestedClasses,
Depth + 1);
1564 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1574 LLT VecTy = MRI.getType(Vec);
1579 if (CIdx && CIdx->ult(NumElts))
1581 return computeKnownFPClass(Vec, DemandedVecElts, InterestedClasses, Known,
1587 case TargetOpcode::G_INSERT_VECTOR_ELT: {
1593 LLT VecTy = MRI.getType(Vec);
1601 APInt DemandedVecElts = DemandedElts;
1602 bool NeedsElt =
true;
1604 if (CIdx && CIdx->ult(NumElts)) {
1605 DemandedVecElts.
clearBit(CIdx->getZExtValue());
1606 NeedsElt = DemandedElts[CIdx->getZExtValue()];
1611 computeKnownFPClass(Elt, Known, InterestedClasses,
Depth + 1);
1620 if (!DemandedVecElts.
isZero()) {
1621 KnownFPClass Known2;
1622 computeKnownFPClass(Vec, DemandedVecElts, InterestedClasses, Known2,
1629 case TargetOpcode::G_SHUFFLE_VECTOR: {
1633 APInt DemandedLHS, DemandedRHS;
1635 assert(DemandedElts == APInt(1, 1));
1636 DemandedLHS = DemandedRHS = DemandedElts;
1639 DemandedElts, DemandedLHS,
1646 if (!!DemandedLHS) {
1648 computeKnownFPClass(
LHS, DemandedLHS, InterestedClasses, Known,
1658 if (!!DemandedRHS) {
1659 KnownFPClass Known2;
1661 computeKnownFPClass(
RHS, DemandedRHS, InterestedClasses, Known2,
1667 case TargetOpcode::COPY: {
1670 if (!Src.isVirtual())
1673 computeKnownFPClass(Src, DemandedElts, InterestedClasses, Known,
Depth + 1);
1684 computeKnownFPClass(R, DemandedElts, InterestedClasses, KnownClasses,
Depth);
1685 return KnownClasses;
1691 computeKnownFPClass(R, Known, InterestedClasses,
Depth);
1699 InterestedClasses &=
~fcNan;
1701 InterestedClasses &=
~fcInf;
1704 computeKnownFPClass(R, DemandedElts, InterestedClasses,
Depth);
1707 Result.KnownFPClasses &=
~fcNan;
1709 Result.KnownFPClasses &=
~fcInf;
1715 LLT Ty = MRI.getType(R);
1716 APInt DemandedElts =
1718 return computeKnownFPClass(R, DemandedElts, Flags, InterestedClasses,
Depth);
1722unsigned GISelValueTracking::computeNumSignBitsMin(
Register Src0,
Register Src1,
1723 const APInt &DemandedElts,
1727 if (Src1SignBits == 1)
1744 case TargetOpcode::G_SEXTLOAD:
1747 case TargetOpcode::G_ZEXTLOAD:
1760 const APInt &DemandedElts,
1763 unsigned Opcode =
MI.getOpcode();
1765 if (Opcode == TargetOpcode::G_CONSTANT)
1766 return MI.getOperand(1).getCImm()->getValue().getNumSignBits();
1774 LLT DstTy = MRI.getType(R);
1784 unsigned FirstAnswer = 1;
1786 case TargetOpcode::COPY: {
1788 if (Src.getReg().isVirtual() && Src.getSubReg() == 0 &&
1789 MRI.getType(Src.getReg()).isValid()) {
1796 case TargetOpcode::G_SEXT: {
1798 LLT SrcTy = MRI.getType(Src);
1802 case TargetOpcode::G_ASSERT_SEXT:
1803 case TargetOpcode::G_SEXT_INREG: {
1806 unsigned SrcBits =
MI.getOperand(2).getImm();
1807 unsigned InRegBits = TyBits - SrcBits + 1;
1811 case TargetOpcode::G_LOAD: {
1818 case TargetOpcode::G_SEXTLOAD: {
1833 case TargetOpcode::G_ZEXTLOAD: {
1848 case TargetOpcode::G_AND:
1849 case TargetOpcode::G_OR:
1850 case TargetOpcode::G_XOR: {
1852 unsigned Src1NumSignBits =
1854 if (Src1NumSignBits != 1) {
1856 unsigned Src2NumSignBits =
1858 FirstAnswer = std::min(Src1NumSignBits, Src2NumSignBits);
1862 case TargetOpcode::G_ASHR: {
1867 FirstAnswer = std::min<uint64_t>(FirstAnswer + *
C, TyBits);
1870 case TargetOpcode::G_SHL: {
1873 if (std::optional<ConstantRange> ShAmtRange =
1875 uint64_t MaxShAmt = ShAmtRange->getUnsignedMax().getZExtValue();
1876 uint64_t MinShAmt = ShAmtRange->getUnsignedMin().getZExtValue();
1886 if (ExtOpc == TargetOpcode::G_SEXT || ExtOpc == TargetOpcode::G_ZEXT ||
1887 ExtOpc == TargetOpcode::G_ANYEXT) {
1888 LLT ExtTy = MRI.getType(Src1);
1890 LLT ExtendeeTy = MRI.getType(Extendee);
1894 if (SizeDiff <= MinShAmt) {
1898 return Tmp - MaxShAmt;
1904 return Tmp - MaxShAmt;
1908 case TargetOpcode::G_TRUNC: {
1910 LLT SrcTy = MRI.getType(Src);
1914 unsigned NumSrcBits = SrcTy.getScalarSizeInBits();
1916 if (NumSrcSignBits > (NumSrcBits - DstTyBits))
1917 return NumSrcSignBits - (NumSrcBits - DstTyBits);
1920 case TargetOpcode::G_SELECT: {
1921 return computeNumSignBitsMin(
MI.getOperand(2).getReg(),
1922 MI.getOperand(3).getReg(), DemandedElts,
1925 case TargetOpcode::G_SMIN:
1926 case TargetOpcode::G_SMAX:
1927 case TargetOpcode::G_UMIN:
1928 case TargetOpcode::G_UMAX:
1930 return computeNumSignBitsMin(
MI.getOperand(1).getReg(),
1931 MI.getOperand(2).getReg(), DemandedElts,
1933 case TargetOpcode::G_SADDO:
1934 case TargetOpcode::G_SADDE:
1935 case TargetOpcode::G_UADDO:
1936 case TargetOpcode::G_UADDE:
1937 case TargetOpcode::G_SSUBO:
1938 case TargetOpcode::G_SSUBE:
1939 case TargetOpcode::G_USUBO:
1940 case TargetOpcode::G_USUBE:
1941 case TargetOpcode::G_SMULO:
1942 case TargetOpcode::G_UMULO: {
1946 if (
MI.getOperand(1).getReg() == R) {
1947 if (TL.getBooleanContents(DstTy.
isVector(),
false) ==
1954 case TargetOpcode::G_FCMP:
1955 case TargetOpcode::G_ICMP: {
1956 bool IsFP = Opcode == TargetOpcode::G_FCMP;
1959 auto BC = TL.getBooleanContents(DstTy.
isVector(), IsFP);
1966 case TargetOpcode::G_BUILD_VECTOR: {
1968 FirstAnswer = TyBits;
1969 APInt SingleDemandedElt(1, 1);
1971 if (!DemandedElts[
I])
1976 FirstAnswer = std::min(FirstAnswer, Tmp2);
1979 if (FirstAnswer == 1)
1984 case TargetOpcode::G_CONCAT_VECTORS: {
1985 if (MRI.getType(
MI.getOperand(0).getReg()).isScalableVector())
1987 FirstAnswer = TyBits;
1990 unsigned NumSubVectorElts =
1991 MRI.getType(
MI.getOperand(1).getReg()).getNumElements();
1994 DemandedElts.
extractBits(NumSubVectorElts,
I * NumSubVectorElts);
1999 FirstAnswer = std::min(FirstAnswer, Tmp2);
2002 if (FirstAnswer == 1)
2007 case TargetOpcode::G_SHUFFLE_VECTOR: {
2010 APInt DemandedLHS, DemandedRHS;
2012 unsigned NumElts = MRI.getType(Src1).getNumElements();
2014 DemandedElts, DemandedLHS, DemandedRHS))
2020 if (FirstAnswer == 1)
2022 if (!!DemandedRHS) {
2025 FirstAnswer = std::min(FirstAnswer, Tmp2);
2029 case TargetOpcode::G_SPLAT_VECTOR: {
2033 unsigned NumSrcBits = MRI.getType(Src).getSizeInBits();
2034 if (NumSrcSignBits > (NumSrcBits - TyBits))
2035 return NumSrcSignBits - (NumSrcBits - TyBits);
2038 case TargetOpcode::G_INTRINSIC:
2039 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
2040 case TargetOpcode::G_INTRINSIC_CONVERGENT:
2041 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
2044 TL.computeNumSignBitsForTargetInstr(*
this, R, DemandedElts, MRI,
Depth);
2046 FirstAnswer = std::max(FirstAnswer, NumBits);
2066 Mask <<= Mask.getBitWidth() - TyBits;
2067 return std::max(FirstAnswer, Mask.countl_one());
2071 LLT Ty = MRI.getType(R);
2072 APInt DemandedElts =
2081 unsigned Opcode =
MI.getOpcode();
2083 LLT Ty = MRI.getType(R);
2084 unsigned BitWidth = Ty.getScalarSizeInBits();
2086 if (Opcode == TargetOpcode::G_CONSTANT) {
2087 const APInt &ShAmt =
MI.getOperand(1).getCImm()->getValue();
2089 return std::nullopt;
2093 if (Opcode == TargetOpcode::G_BUILD_VECTOR) {
2094 const APInt *MinAmt =
nullptr, *MaxAmt =
nullptr;
2095 for (
unsigned I = 0, E =
MI.getNumOperands() - 1;
I != E; ++
I) {
2096 if (!DemandedElts[
I])
2099 if (
Op->getOpcode() != TargetOpcode::G_CONSTANT) {
2100 MinAmt = MaxAmt =
nullptr;
2104 const APInt &ShAmt =
Op->getOperand(1).getCImm()->getValue();
2106 return std::nullopt;
2107 if (!MinAmt || MinAmt->
ugt(ShAmt))
2109 if (!MaxAmt || MaxAmt->ult(ShAmt))
2112 assert(((!MinAmt && !MaxAmt) || (MinAmt && MaxAmt)) &&
2113 "Failed to find matching min/max shift amounts");
2114 if (MinAmt && MaxAmt)
2124 return std::nullopt;
2129 if (std::optional<ConstantRange> AmtRange =
2131 return AmtRange->getUnsignedMin().getZExtValue();
2132 return std::nullopt;
2150 Info = std::make_unique<GISelValueTracking>(MF, MaxDepth);
2175 if (!MO.isReg() || MO.getReg().isPhysical())
2178 if (!
MRI.getType(Reg).isValid())
2180 KnownBits Known = VTA.getKnownBits(Reg);
2181 unsigned SignedBits = VTA.computeNumSignBits(Reg);
2182 OS <<
" " << MO <<
" KnownBits:" << Known <<
" SignBits:" << SignedBits
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file declares a class to represent arbitrary precision floating point values and provide a varie...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define LLVM_ATTRIBUTE_UNUSED
Utilities for dealing with flags related to floating point properties and mode controls.
static unsigned computeNumSignBitsFromRangeMetadata(const GAnyLoad *Ld, unsigned TyBits)
Compute the known number of sign bits with attached range metadata in the memory operand.
static LLVM_ATTRIBUTE_UNUSED void dumpResult(const MachineInstr &MI, const KnownBits &Known, unsigned Depth)
Provides analysis for querying information about KnownBits during GISel passes.
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
Implement a low-level type suitable for MachineInstr level instruction selection.
Contains matchers for matching SSA Machine Instructions.
Promote Memory to Register
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
const SmallVectorImpl< MachineOperand > & Cond
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file describes how to lower LLVM code to machine code.
static bool outputDenormalIsIEEEOrPosZero(const Function &F, const Type *Ty)
static Function * getFunction(FunctionType *Ty, const Twine &Name, Module *M)
static APFloat getLargest(const fltSemantics &Sem, bool Negative=false)
Returns the largest finite number in the given semantics.
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
void clearBit(unsigned BitPosition)
Set a given bit to 0.
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
void setBitsFrom(unsigned loBit)
Set the top bits starting from loBit.
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
unsigned getNumSignBits() const
Computes the number of leading bits of this APInt that are equal to its sign bit.
void clearLowBits(unsigned loBits)
Set bottom loBits bits to 0.
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
void setAllBits()
Set every bit to 1.
APInt shl(unsigned shiftAmt) const
Left-shift function.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
void setLowBits(unsigned loBits)
Set the bottom loBits bits.
LLVM_ABI APInt extractBits(unsigned numBits, unsigned bitPosition) const
Return an APInt with the extracted bits [bitPosition,bitPosition+numBits).
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
This class represents a range of values.
static LLVM_ABI ConstantRange fromKnownBits(const KnownBits &Known, bool IsSigned)
Initialize a range based on a known bits constraint.
LLVM_ABI ConstantRange zeroExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
LLVM_ABI APInt getSignedMin() const
Return the smallest signed value contained in the ConstantRange.
LLVM_ABI ConstantRange signExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
LLVM_ABI APInt getSignedMax() const
Return the largest signed value contained in the ConstantRange.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
Represents any generic load, including sign/zero extending variants.
const MDNode * getRanges() const
Returns the Ranges that describes the dereference.
static LLVM_ABI std::optional< GFConstant > getConstant(Register Const, const MachineRegisterInfo &MRI)
To use KnownBitsInfo analysis in a pass, KnownBitsInfo &Info = getAnalysis<GISelValueTrackingInfoAnal...
GISelValueTracking & get(MachineFunction &MF)
bool runOnMachineFunction(MachineFunction &MF) override
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
GISelValueTracking Result
LLVM_ABI Result run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM)
LLVM_ABI PreservedAnalyses run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM)
unsigned getMaxDepth() const
KnownBits getKnownBits(Register R)
Align computeKnownAlignment(Register R, unsigned Depth=0)
std::optional< ConstantRange > getValidShiftAmountRange(Register R, const APInt &DemandedElts, unsigned Depth)
If a G_SHL/G_ASHR/G_LSHR node with shift operand R has shift amounts that are all less than the eleme...
bool maskedValueIsZero(Register Val, const APInt &Mask)
std::optional< uint64_t > getValidMinimumShiftAmount(Register R, const APInt &DemandedElts, unsigned Depth=0)
If a G_SHL/G_ASHR/G_LSHR node with shift operand R has shift amounts that are all less than the eleme...
bool signBitIsZero(Register Op)
const DataLayout & getDataLayout() const
unsigned computeNumSignBits(Register R, const APInt &DemandedElts, unsigned Depth=0)
APInt getKnownOnes(Register R)
KnownBits getKnownBits(MachineInstr &MI)
APInt getKnownZeroes(Register R)
void computeKnownBitsImpl(Register R, KnownBits &Known, const APInt &DemandedElts, unsigned Depth=0)
Register getCondReg() const
Register getFalseReg() const
Register getTrueReg() const
Register getSrc2Reg() const
Register getSrc1Reg() const
ArrayRef< int > getMask() const
constexpr bool isScalableVector() const
Returns true if the LLT is a scalable vector.
constexpr unsigned getScalarSizeInBits() const
constexpr bool isValid() const
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
constexpr bool isVector() const
constexpr bool isFixedVector() const
Returns true if the LLT is a fixed vector.
constexpr LLT getScalarType() const
TypeSize getValue() const
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
DenormalMode getDenormalMode(const fltSemantics &FPType) const
Returns the denormal handling type for the default rounding mode of the function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
LLT getMemoryType() const
Return the memory type of the memory reference.
const MDNode * getRanges() const
Return the range tag for the memory reference.
LocationSize getSizeInBits() const
Return the size in bits of the memory reference.
MachineOperand class - Representation of each machine instruction operand.
Register getReg() const
getReg - Returns the register number.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
@ ZeroOrOneBooleanContent
@ ZeroOrNegativeOneBooleanContent
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
LLVM_ABI void printAsOperand(raw_ostream &O, bool PrintType=true, const Module *M=nullptr) const
Print the name of this Value out to the specified raw_ostream.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
operand_type_match m_Reg()
operand_type_match m_Pred()
bind_ty< FPClassTest > m_FPClassTest(FPClassTest &T)
bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P)
ClassifyOp_match< LHS, Test, TargetOpcode::G_IS_FPCLASS > m_GIsFPClass(const LHS &L, const Test &T)
Matches the register and immediate used in a fpclass test G_IS_FPCLASS val, 96.
CompareOp_match< Pred, LHS, RHS, TargetOpcode::G_FCMP > m_GFCmp(const Pred &P, const LHS &L, const RHS &R)
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
LLVM_ABI std::optional< APInt > getIConstantVRegVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT, return the corresponding value.
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
LLVM_ABI const llvm::fltSemantics & getFltSemanticForLLT(LLT Ty)
Get the appropriate floating point arithmetic semantic based on the bit size of the given scalar LLT.
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
AnalysisManager< MachineFunction > MachineFunctionAnalysisManager
int ilogb(const APFloat &Arg)
Returns the exponent of the internal representation of the APFloat.
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
LLVM_ABI ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
std::tuple< Value *, FPClassTest, FPClassTest > fcmpImpliesClass(CmpInst::Predicate Pred, const Function &F, Value *LHS, FPClassTest RHSClass, bool LookThroughSrc=true)
LLVM_ABI bool getShuffleDemandedElts(int SrcWidth, ArrayRef< int > Mask, const APInt &DemandedElts, APInt &DemandedLHS, APInt &DemandedRHS, bool AllowUndefElts=false)
Transform a shuffle mask's output demanded element mask into demanded element masks for the 2 operand...
constexpr unsigned MaxAnalysisRecursionDepth
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
DWARFExpression::Operation Op
std::string toString(const APInt &I, unsigned Radix, bool Signed, bool formatAsCLiteral=false, bool UpperCase=true, bool InsertSeparators=false)
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
static uint32_t extractBits(uint64_t Val, uint32_t Hi, uint32_t Lo)
LLVM_ABI void computeKnownBitsFromRangeMetadata(const MDNode &Ranges, KnownBits &Known)
Compute known bits from the range metadata.
static LLVM_ABI bool isRepresentableAsNormalIn(const fltSemantics &Src, const fltSemantics &Dst)
This struct is a compact representation of a valid (non-zero power of two) alignment.
A special type used by analysis passes to provide an address that identifies that particular analysis...
Represent subnormal handling kind for floating point instruction inputs and outputs.
DenormalModeKind Input
Denormal treatment kind for floating point instruction inputs in the default floating-point environme...
constexpr bool outputsAreZero() const
Return true if output denormals should be flushed to 0.
@ PositiveZero
Denormals are flushed to positive zero.
@ IEEE
IEEE-754 denormal numbers preserved.
constexpr bool inputsAreZero() const
Return true if input denormals must be implicitly treated as 0.
DenormalModeKind Output
Denormal flushing mode for floating point instruction results in the default floating point environme...
static constexpr DenormalMode getIEEE()
static KnownBits makeConstant(const APInt &C)
Create known bits from a known constant.
KnownBits anyextOrTrunc(unsigned BitWidth) const
Return known bits for an "any" extension or truncation of the value we're tracking.
LLVM_ABI KnownBits sextInReg(unsigned SrcBitWidth) const
Return known bits for a in-register sign extension of the value we're tracking.
static LLVM_ABI KnownBits smax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smax(LHS, RHS).
bool isNonNegative() const
Returns true if this value is known to be non-negative.
static LLVM_ABI KnownBits ashr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for ashr(LHS, RHS).
bool isUnknown() const
Returns true if we don't know any bits.
KnownBits trunc(unsigned BitWidth) const
Return known bits for a truncation of the value we're tracking.
KnownBits byteSwap() const
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
KnownBits reverseBits() const
unsigned getBitWidth() const
Get the bit width of this value.
static LLVM_ABI KnownBits umax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umax(LHS, RHS).
KnownBits zext(unsigned BitWidth) const
Return known bits for a zero extension of the value we're tracking.
static LLVM_ABI KnownBits lshr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for lshr(LHS, RHS).
KnownBits extractBits(unsigned NumBits, unsigned BitPosition) const
Return a subset of the known bits from [bitPosition,bitPosition+numBits).
KnownBits intersectWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for both this and RHS.
KnownBits sext(unsigned BitWidth) const
Return known bits for a sign extension of the value we're tracking.
static KnownBits add(const KnownBits &LHS, const KnownBits &RHS, bool NSW=false, bool NUW=false)
Compute knownbits resulting from addition of LHS and RHS.
KnownBits zextOrTrunc(unsigned BitWidth) const
Return known bits for a zero extension or truncation of the value we're tracking.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
static LLVM_ABI KnownBits smin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smin(LHS, RHS).
APInt getMinValue() const
Return the minimal unsigned value possible given these KnownBits.
bool isNegative() const
Returns true if this value is known to be negative.
static KnownBits sub(const KnownBits &LHS, const KnownBits &RHS, bool NSW=false, bool NUW=false)
Compute knownbits resulting from subtraction of LHS and RHS.
unsigned countMaxLeadingZeros() const
Returns the maximum number of leading zero bits possible.
void insertBits(const KnownBits &SubBits, unsigned BitPosition)
Insert the bits from a smaller known bits starting at bitPosition.
static LLVM_ABI KnownBits mul(const KnownBits &LHS, const KnownBits &RHS, bool NoUndefSelfMultiply=false)
Compute known bits resulting from multiplying LHS and RHS.
KnownBits anyext(unsigned BitWidth) const
Return known bits for an "any" extension of the value we're tracking, where we don't know anything ab...
static LLVM_ABI KnownBits shl(const KnownBits &LHS, const KnownBits &RHS, bool NUW=false, bool NSW=false, bool ShAmtNonZero=false)
Compute known bits for shl(LHS, RHS).
static LLVM_ABI KnownBits umin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umin(LHS, RHS).
FPClassTest KnownFPClasses
Floating-point classes the value could be one of.
bool isKnownNeverInfinity() const
Return true if it's known this can never be an infinity.
bool cannotBeOrderedGreaterThanZero() const
Return true if we can prove that the analyzed floating-point value is either NaN or never greater tha...
static constexpr FPClassTest OrderedGreaterThanZeroMask
static constexpr FPClassTest OrderedLessThanZeroMask
void knownNot(FPClassTest RuleOut)
void copysign(const KnownFPClass &Sign)
bool isKnownNeverSubnormal() const
Return true if it's known this can never be a subnormal.
LLVM_ABI bool isKnownNeverLogicalZero(DenormalMode Mode) const
Return true if it's know this can never be interpreted as a zero.
bool isKnownNeverPosZero() const
Return true if it's known this can never be a literal positive zero.
std::optional< bool > SignBit
std::nullopt if the sign bit is unknown, true if the sign bit is definitely set or false if the sign ...
bool isKnownNeverNaN() const
Return true if it's known this can never be a nan.
bool isKnownNever(FPClassTest Mask) const
Return true if it's known this can never be one of the mask entries.
bool isKnownNeverNegZero() const
Return true if it's known this can never be a negative zero.
void propagateNaN(const KnownFPClass &Src, bool PreserveSign=false)
bool cannotBeOrderedLessThanZero() const
Return true if we can prove that the analyzed floating-point value is either NaN or never less than -...
void signBitMustBeOne()
Assume the sign bit is one.
void signBitMustBeZero()
Assume the sign bit is zero.
LLVM_ABI bool isKnownNeverLogicalPosZero(DenormalMode Mode) const
Return true if it's know this can never be interpreted as a positive zero.
bool isKnownNeverPosInfinity() const
Return true if it's known this can never be +infinity.
LLVM_ABI bool isKnownNeverLogicalNegZero(DenormalMode Mode) const
Return true if it's know this can never be interpreted as a negative zero.