59#include "llvm/IR/IntrinsicsAArch64.h"
60#include "llvm/IR/IntrinsicsAMDGPU.h"
61#include "llvm/IR/IntrinsicsRISCV.h"
62#include "llvm/IR/IntrinsicsX86.h"
100 if (
unsigned BitWidth = Ty->getScalarSizeInBits())
103 return DL.getPointerTypeSizeInBits(Ty);
123 const APInt &DemandedElts,
127 DemandedLHS = DemandedRHS = DemandedElts;
134 DemandedElts, DemandedLHS, DemandedRHS);
155 bool UseInstrInfo,
unsigned Depth) {
230 R->uge(
LHS->getType()->getScalarSizeInBits()))
243 assert(LHS->getType() == RHS->getType() &&
244 "LHS and RHS should have the same type");
245 assert(LHS->getType()->isIntOrIntVectorTy() &&
246 "LHS and RHS should be integers");
257 return !
I->user_empty() &&
262 return !
I->user_empty() &&
all_of(
I->users(), [](
const User *U) {
264 return match(U, m_ICmp(P, m_Value(), m_Zero())) && ICmpInst::isEquality(P);
273 return ::isKnownToBeAPowerOfTwo(
289 return CI->getValue().isStrictlyPositive();
315 return ::isKnownNonEqual(V1, V2, DemandedElts, Q,
Depth);
322 return Mask.isSubsetOf(Known.
Zero);
329 unsigned Depth = 0) {
340 return ::ComputeNumSignBits(
350 return V->getType()->getScalarSizeInBits() - SignBits + 1;
355 const APInt &DemandedElts,
362 if (KnownOut.
isUnknown() && !NSW && !NUW)
375 bool NUW,
const APInt &DemandedElts,
392 bool isKnownNegativeOp0 = Known2.
isNegative();
395 (isKnownNonNegativeOp1 && isKnownNonNegativeOp0);
407 (isKnownNegativeOp1 && isKnownNonNegativeOp0 &&
409 (isKnownNegativeOp0 && isKnownNonNegativeOp1 && Known.
isNonZero());
413 bool SelfMultiply = Op0 == Op1;
422 unsigned OutValidBits = 2 * (TyBits - SignBits + 1);
424 if (OutValidBits < TyBits) {
425 APInt KnownZeroMask =
427 Known.
Zero |= KnownZeroMask;
445 unsigned NumRanges = Ranges.getNumOperands() / 2;
450 for (
unsigned i = 0; i < NumRanges; ++i) {
459 "Known bit width must match range bit width!");
462 unsigned CommonPrefixBits =
463 (
Range.getUnsignedMax() ^
Range.getUnsignedMin()).countl_zero();
466 Known.
One &= UnsignedMax & Mask;
467 Known.
Zero &= ~UnsignedMax & Mask;
482 while (!WorkSet.
empty()) {
484 if (!Visited.
insert(V).second)
489 return EphValues.count(cast<Instruction>(U));
494 if (V ==
I || (!V->mayHaveSideEffects() && !V->isTerminator())) {
498 for (
const Use &U : U->operands()) {
513 return CI->isAssumeLikeIntrinsic();
521 bool AllowEphemerals) {
539 if (!AllowEphemerals && Inv == CxtI)
570 if (CtxI->
getParent() != Assume->getParent() || !Assume->comesBefore(CtxI))
579 for (
const auto &[Idx,
I] :
585 if (!CB->hasFnAttr(Attribute::NoFree))
617 for (
unsigned ElemIdx = 0, NElem = VC->getNumElements(); ElemIdx < NElem;
620 Pred, VC->getElementAsAPInt(ElemIdx));
629 const PHINode **PhiOut =
nullptr) {
633 CtxIOut =
PHI->getIncomingBlock(*U)->getTerminator();
649 IncPhi && IncPhi->getNumIncomingValues() == 2) {
650 for (
int Idx = 0; Idx < 2; ++Idx) {
651 if (IncPhi->getIncomingValue(Idx) ==
PHI) {
652 ValOut = IncPhi->getIncomingValue(1 - Idx);
655 CtxIOut = IncPhi->getIncomingBlock(1 - Idx)->getTerminator();
674 "Got assumption for the wrong function!");
677 if (!V->getType()->isPointerTy())
680 *
I,
I->bundle_op_info_begin()[Elem.Index])) {
682 (RK.AttrKind == Attribute::NonNull ||
683 (RK.AttrKind == Attribute::Dereferenceable &&
712 if (
RHS->getType()->isPointerTy()) {
754 Known.
Zero |= ~*
C & *Mask;
760 Known.
One |= *
C & ~*Mask;
819 Invert ? Cmp->getInversePredicate() : Cmp->getPredicate();
825 KnownBits DstKnown(
LHS->getType()->getScalarSizeInBits());
839 bool Invert,
unsigned Depth) {
921 "Got assumption for the wrong function!");
924 if (!V->getType()->isPointerTy())
927 *
I,
I->bundle_op_info_begin()[Elem.Index])) {
931 if (RK.WasOn == V && RK.AttrKind == Attribute::Alignment &&
943 Value *Arg =
I->getArgOperand(0);
959 if (Trunc && Trunc->getOperand(0) == V &&
961 if (Trunc->hasNoUnsignedWrap()) {
1009 Known = KF(Known2, Known, ShAmtNonZero);
1020 Value *
X =
nullptr, *
Y =
nullptr;
1022 switch (
I->getOpcode()) {
1023 case Instruction::And:
1024 KnownOut = KnownLHS & KnownRHS;
1034 KnownOut = KnownLHS.
blsi();
1036 KnownOut = KnownRHS.
blsi();
1039 case Instruction::Or:
1040 KnownOut = KnownLHS | KnownRHS;
1042 case Instruction::Xor:
1043 KnownOut = KnownLHS ^ KnownRHS;
1053 const KnownBits &XBits =
I->getOperand(0) ==
X ? KnownLHS : KnownRHS;
1054 KnownOut = XBits.
blsmsk();
1067 if (!KnownOut.
Zero[0] && !KnownOut.
One[0] &&
1088 APInt DemandedEltsLHS, DemandedEltsRHS;
1090 DemandedElts, DemandedEltsLHS,
1093 const auto ComputeForSingleOpFunc =
1095 return KnownBitsFunc(
1100 if (DemandedEltsRHS.
isZero())
1101 return ComputeForSingleOpFunc(
I->getOperand(0), DemandedEltsLHS);
1102 if (DemandedEltsLHS.
isZero())
1103 return ComputeForSingleOpFunc(
I->getOperand(1), DemandedEltsRHS);
1105 return ComputeForSingleOpFunc(
I->getOperand(0), DemandedEltsLHS)
1106 .intersectWith(ComputeForSingleOpFunc(
I->getOperand(1), DemandedEltsRHS));
1116 APInt DemandedElts =
1124 Attribute Attr =
F->getFnAttribute(Attribute::VScaleRange);
1132 return ConstantRange::getEmpty(
BitWidth);
1143 Value *Arm,
bool Invert,
1182 "Input should be a Select!");
1192 const Value *LHS2 =
nullptr, *RHS2 =
nullptr;
1204 return CLow->
sle(*CHigh);
1209 const APInt *&CHigh) {
1210 assert((
II->getIntrinsicID() == Intrinsic::smin ||
1211 II->getIntrinsicID() == Intrinsic::smax) &&
1212 "Must be smin/smax");
1216 if (!InnerII || InnerII->getIntrinsicID() != InverseID ||
1221 if (
II->getIntrinsicID() == Intrinsic::smin)
1223 return CLow->
sle(*CHigh);
1228 const APInt *CLow, *CHigh;
1235 const APInt &DemandedElts,
1242 switch (
I->getOpcode()) {
1244 case Instruction::Load:
1249 case Instruction::And:
1255 case Instruction::Or:
1261 case Instruction::Xor:
1267 case Instruction::Mul: {
1271 DemandedElts, Known, Known2, Q,
Depth);
1274 case Instruction::UDiv: {
1281 case Instruction::SDiv: {
1288 case Instruction::Select: {
1289 auto ComputeForArm = [&](
Value *Arm,
bool Invert) {
1297 ComputeForArm(
I->getOperand(1),
false)
1301 case Instruction::FPTrunc:
1302 case Instruction::FPExt:
1303 case Instruction::FPToUI:
1304 case Instruction::FPToSI:
1305 case Instruction::SIToFP:
1306 case Instruction::UIToFP:
1308 case Instruction::PtrToInt:
1309 case Instruction::IntToPtr:
1312 case Instruction::ZExt:
1313 case Instruction::Trunc: {
1314 Type *SrcTy =
I->getOperand(0)->getType();
1316 unsigned SrcBitWidth;
1324 assert(SrcBitWidth &&
"SrcBitWidth can't be zero");
1328 Inst && Inst->hasNonNeg() && !Known.
isNegative())
1333 case Instruction::BitCast: {
1334 Type *SrcTy =
I->getOperand(0)->getType();
1335 if (SrcTy->isIntOrPtrTy() &&
1338 !
I->getType()->isVectorTy()) {
1346 V->getType()->isFPOrFPVectorTy()) {
1347 Type *FPType = V->getType()->getScalarType();
1359 if (FPClasses &
fcInf)
1371 if (Result.SignBit) {
1372 if (*Result.SignBit)
1383 if (!SrcVecTy || !SrcVecTy->getElementType()->isIntegerTy() ||
1384 !
I->getType()->isIntOrIntVectorTy() ||
1392 unsigned SubBitWidth = SrcVecTy->getScalarSizeInBits();
1408 unsigned SubScale =
BitWidth / SubBitWidth;
1410 for (
unsigned i = 0; i != NumElts; ++i) {
1411 if (DemandedElts[i])
1412 SubDemandedElts.
setBit(i * SubScale);
1416 for (
unsigned i = 0; i != SubScale; ++i) {
1419 unsigned ShiftElt = IsLE ? i : SubScale - 1 - i;
1420 Known.
insertBits(KnownSrc, ShiftElt * SubBitWidth);
1426 unsigned SubScale = SubBitWidth /
BitWidth;
1428 APInt SubDemandedElts =
1434 for (
unsigned i = 0; i != NumElts; ++i) {
1435 if (DemandedElts[i]) {
1436 unsigned Shifts = IsLE ? i : NumElts - 1 - i;
1446 case Instruction::SExt: {
1448 unsigned SrcBitWidth =
I->getOperand(0)->getType()->getScalarSizeInBits();
1450 Known = Known.
trunc(SrcBitWidth);
1457 case Instruction::Shl: {
1461 bool ShAmtNonZero) {
1462 return KnownBits::shl(KnownVal, KnownAmt, NUW, NSW, ShAmtNonZero);
1472 case Instruction::LShr: {
1475 bool ShAmtNonZero) {
1486 case Instruction::AShr: {
1489 bool ShAmtNonZero) {
1496 case Instruction::Sub: {
1500 DemandedElts, Known, Known2, Q,
Depth);
1503 case Instruction::Add: {
1507 DemandedElts, Known, Known2, Q,
Depth);
1510 case Instruction::SRem:
1516 case Instruction::URem:
1521 case Instruction::Alloca:
1524 case Instruction::GetElementPtr: {
1531 APInt AccConstIndices(IndexWidth, 0);
1533 auto AddIndexToKnown = [&](
KnownBits IndexBits) {
1542 "Index width can't be larger than pointer width");
1548 for (
unsigned i = 1, e =
I->getNumOperands(); i != e; ++i, ++GTI) {
1553 Value *Index =
I->getOperand(i);
1564 "Access to structure field must be known at compile time");
1572 AccConstIndices +=
Offset;
1589 CI->getValue().
sextOrTrunc(IndexWidth) * StrideInBytes;
1613 case Instruction::PHI: {
1616 Value *R =
nullptr, *L =
nullptr;
1629 case Instruction::LShr:
1630 case Instruction::AShr:
1631 case Instruction::Shl:
1632 case Instruction::UDiv:
1639 case Instruction::URem: {
1652 case Instruction::Shl:
1656 case Instruction::LShr:
1657 case Instruction::UDiv:
1658 case Instruction::URem:
1663 case Instruction::AShr:
1675 case Instruction::Add:
1676 case Instruction::Sub:
1677 case Instruction::And:
1678 case Instruction::Or:
1679 case Instruction::Mul: {
1686 unsigned OpNum =
P->getOperand(0) == R ? 0 : 1;
1687 Instruction *RInst =
P->getIncomingBlock(OpNum)->getTerminator();
1688 Instruction *LInst =
P->getIncomingBlock(1 - OpNum)->getTerminator();
1717 case Instruction::Add: {
1727 case Instruction::Sub: {
1738 case Instruction::Mul:
1755 if (
P->getNumIncomingValues() == 0)
1766 for (
const Use &U :
P->operands()) {
1801 if ((TrueSucc == CxtPhi->
getParent()) !=
1818 Known2 = KnownUnion;
1832 case Instruction::Call:
1833 case Instruction::Invoke: {
1843 if (std::optional<ConstantRange>
Range = CB->getRange())
1846 if (
const Value *RV = CB->getReturnedArgOperand()) {
1847 if (RV->getType() ==
I->getType()) {
1859 switch (
II->getIntrinsicID()) {
1862 case Intrinsic::abs: {
1864 bool IntMinIsPoison =
match(
II->getArgOperand(1),
m_One());
1868 case Intrinsic::bitreverse:
1872 case Intrinsic::bswap:
1876 case Intrinsic::ctlz: {
1882 PossibleLZ = std::min(PossibleLZ,
BitWidth - 1);
1887 case Intrinsic::cttz: {
1893 PossibleTZ = std::min(PossibleTZ,
BitWidth - 1);
1898 case Intrinsic::ctpop: {
1909 case Intrinsic::fshr:
1910 case Intrinsic::fshl: {
1917 if (
II->getIntrinsicID() == Intrinsic::fshr)
1924 Known2 <<= ShiftAmt;
1929 case Intrinsic::uadd_sat:
1934 case Intrinsic::usub_sat:
1939 case Intrinsic::sadd_sat:
1944 case Intrinsic::ssub_sat:
1950 case Intrinsic::vector_reverse:
1956 case Intrinsic::vector_reduce_and:
1957 case Intrinsic::vector_reduce_or:
1958 case Intrinsic::vector_reduce_umax:
1959 case Intrinsic::vector_reduce_umin:
1960 case Intrinsic::vector_reduce_smax:
1961 case Intrinsic::vector_reduce_smin:
1964 case Intrinsic::vector_reduce_xor: {
1971 bool EvenCnt = VecTy->getElementCount().isKnownEven();
1975 if (VecTy->isScalableTy() || EvenCnt)
1979 case Intrinsic::umin:
1984 case Intrinsic::umax:
1989 case Intrinsic::smin:
1995 case Intrinsic::smax:
2001 case Intrinsic::ptrmask: {
2004 const Value *Mask =
I->getOperand(1);
2005 Known2 =
KnownBits(Mask->getType()->getScalarSizeInBits());
2011 case Intrinsic::x86_sse2_pmulh_w:
2012 case Intrinsic::x86_avx2_pmulh_w:
2013 case Intrinsic::x86_avx512_pmulh_w_512:
2018 case Intrinsic::x86_sse2_pmulhu_w:
2019 case Intrinsic::x86_avx2_pmulhu_w:
2020 case Intrinsic::x86_avx512_pmulhu_w_512:
2025 case Intrinsic::x86_sse42_crc32_64_64:
2028 case Intrinsic::x86_ssse3_phadd_d_128:
2029 case Intrinsic::x86_ssse3_phadd_w_128:
2030 case Intrinsic::x86_avx2_phadd_d:
2031 case Intrinsic::x86_avx2_phadd_w: {
2033 I, DemandedElts, Q,
Depth,
2039 case Intrinsic::x86_ssse3_phadd_sw_128:
2040 case Intrinsic::x86_avx2_phadd_sw: {
2045 case Intrinsic::x86_ssse3_phsub_d_128:
2046 case Intrinsic::x86_ssse3_phsub_w_128:
2047 case Intrinsic::x86_avx2_phsub_d:
2048 case Intrinsic::x86_avx2_phsub_w: {
2050 I, DemandedElts, Q,
Depth,
2056 case Intrinsic::x86_ssse3_phsub_sw_128:
2057 case Intrinsic::x86_avx2_phsub_sw: {
2062 case Intrinsic::riscv_vsetvli:
2063 case Intrinsic::riscv_vsetvlimax: {
2064 bool HasAVL =
II->getIntrinsicID() == Intrinsic::riscv_vsetvli;
2077 MaxVL = std::min(MaxVL, CI->getZExtValue());
2079 unsigned KnownZeroFirstBit =
Log2_32(MaxVL) + 1;
2084 case Intrinsic::vscale: {
2085 if (!
II->getParent() || !
II->getFunction())
2095 case Instruction::ShuffleVector: {
2104 APInt DemandedLHS, DemandedRHS;
2110 if (!!DemandedLHS) {
2111 const Value *
LHS = Shuf->getOperand(0);
2117 if (!!DemandedRHS) {
2118 const Value *
RHS = Shuf->getOperand(1);
2124 case Instruction::InsertElement: {
2129 const Value *Vec =
I->getOperand(0);
2130 const Value *Elt =
I->getOperand(1);
2133 APInt DemandedVecElts = DemandedElts;
2134 bool NeedsElt =
true;
2136 if (CIdx && CIdx->getValue().ult(NumElts)) {
2137 DemandedVecElts.
clearBit(CIdx->getZExtValue());
2138 NeedsElt = DemandedElts[CIdx->getZExtValue()];
2149 if (!DemandedVecElts.
isZero()) {
2155 case Instruction::ExtractElement: {
2158 const Value *Vec =
I->getOperand(0);
2159 const Value *Idx =
I->getOperand(1);
2168 if (CIdx && CIdx->getValue().ult(NumElts))
2173 case Instruction::ExtractValue:
2178 switch (
II->getIntrinsicID()) {
2180 case Intrinsic::uadd_with_overflow:
2181 case Intrinsic::sadd_with_overflow:
2183 true,
II->getArgOperand(0),
II->getArgOperand(1),
false,
2184 false, DemandedElts, Known, Known2, Q,
Depth);
2186 case Intrinsic::usub_with_overflow:
2187 case Intrinsic::ssub_with_overflow:
2189 false,
II->getArgOperand(0),
II->getArgOperand(1),
false,
2190 false, DemandedElts, Known, Known2, Q,
Depth);
2192 case Intrinsic::umul_with_overflow:
2193 case Intrinsic::smul_with_overflow:
2195 false, DemandedElts, Known, Known2, Q,
Depth);
2201 case Instruction::Freeze:
2245 if (!DemandedElts) {
2251 assert(V &&
"No Value?");
2255 Type *Ty = V->getType();
2258 assert((Ty->isIntOrIntVectorTy(
BitWidth) || Ty->isPtrOrPtrVectorTy()) &&
2259 "Not integer or pointer type!");
2263 FVTy->getNumElements() == DemandedElts.
getBitWidth() &&
2264 "DemandedElt width should equal the fixed vector number of elements");
2267 "DemandedElt width should be 1 for scalars or scalable vectors");
2273 "V and Known should have same BitWidth");
2276 "V and Known should have same BitWidth");
2298 for (
unsigned i = 0, e = CDV->getNumElements(); i != e; ++i) {
2299 if (!DemandedElts[i])
2301 APInt Elt = CDV->getElementAsAPInt(i);
2315 for (
unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
2316 if (!DemandedElts[i])
2326 const APInt &Elt = ElementCI->getValue();
2347 if (std::optional<ConstantRange>
Range =
A->getRange())
2348 Known =
Range->toKnownBits();
2357 if (!GA->isInterposable())
2365 if (std::optional<ConstantRange> CR = GV->getAbsoluteSymbolRange())
2366 Known = CR->toKnownBits();
2371 Align Alignment = V->getPointerAlignment(Q.
DL);
2387 Value *Start =
nullptr, *Step =
nullptr;
2393 if (U.get() == Start) {
2409 case Instruction::Mul:
2414 case Instruction::SDiv:
2420 case Instruction::UDiv:
2426 case Instruction::Shl:
2428 case Instruction::AShr:
2432 case Instruction::LShr:
2470 if (OrZero && V->getType()->getScalarSizeInBits() == 1)
2512 return F->hasFnAttribute(Attribute::VScaleRange);
2529 switch (
I->getOpcode()) {
2530 case Instruction::ZExt:
2532 case Instruction::Trunc:
2534 case Instruction::Shl:
2538 case Instruction::LShr:
2542 case Instruction::UDiv:
2546 case Instruction::Mul:
2550 case Instruction::And:
2561 case Instruction::Add: {
2567 if (
match(
I->getOperand(0),
2571 if (
match(
I->getOperand(1),
2576 unsigned BitWidth = V->getType()->getScalarSizeInBits();
2585 if ((~(LHSBits.
Zero & RHSBits.
Zero)).isPowerOf2())
2598 case Instruction::Select:
2601 case Instruction::PHI: {
2622 RecQ.CxtI = PN->getIncomingBlock(U)->getTerminator();
2623 return isKnownToBeAPowerOfTwo(U.get(), OrZero, RecQ, NewDepth);
2626 case Instruction::Invoke:
2627 case Instruction::Call: {
2629 switch (
II->getIntrinsicID()) {
2630 case Intrinsic::umax:
2631 case Intrinsic::smax:
2632 case Intrinsic::umin:
2633 case Intrinsic::smin:
2638 case Intrinsic::bitreverse:
2639 case Intrinsic::bswap:
2641 case Intrinsic::fshr:
2642 case Intrinsic::fshl:
2644 if (
II->getArgOperand(0) ==
II->getArgOperand(1))
2668 F =
I->getFunction();
2672 if (!
GEP->hasNoUnsignedWrap() &&
2673 !(
GEP->isInBounds() &&
2678 assert(
GEP->getType()->isPointerTy() &&
"We only support plain pointer GEP");
2689 GTI != GTE; ++GTI) {
2691 if (
StructType *STy = GTI.getStructTypeOrNull()) {
2696 if (ElementOffset > 0)
2702 if (GTI.getSequentialElementStride(Q.
DL).isZero())
2736 unsigned NumUsesExplored = 0;
2737 for (
auto &U : V->uses()) {
2746 if (V->getType()->isPointerTy()) {
2748 if (CB->isArgOperand(&U) &&
2749 CB->paramHasNonNullAttr(CB->getArgOperandNo(&U),
2777 NonNullIfTrue =
true;
2779 NonNullIfTrue =
false;
2785 for (
const auto *CmpU : UI->
users()) {
2787 if (Visited.
insert(CmpU).second)
2790 while (!WorkList.
empty()) {
2799 for (
const auto *CurrU : Curr->users())
2800 if (Visited.
insert(CurrU).second)
2806 assert(BI->isConditional() &&
"uses a comparison!");
2809 BI->getSuccessor(NonNullIfTrue ? 0 : 1);
2813 }
else if (NonNullIfTrue &&
isGuard(Curr) &&
2828 const unsigned NumRanges = Ranges->getNumOperands() / 2;
2830 for (
unsigned i = 0; i < NumRanges; ++i) {
2846 Value *Start =
nullptr, *Step =
nullptr;
2847 const APInt *StartC, *StepC;
2853 case Instruction::Add:
2859 case Instruction::Mul:
2862 case Instruction::Shl:
2864 case Instruction::AShr:
2865 case Instruction::LShr:
2881 bool NUW,
unsigned Depth) {
2938 return ::isKnownNonEqual(
X,
Y, DemandedElts, Q,
Depth);
2943 bool NUW,
unsigned Depth) {
2972 auto ShiftOp = [&](
const APInt &Lhs,
const APInt &Rhs) {
2973 switch (
I->getOpcode()) {
2974 case Instruction::Shl:
2975 return Lhs.
shl(Rhs);
2976 case Instruction::LShr:
2977 return Lhs.
lshr(Rhs);
2978 case Instruction::AShr:
2979 return Lhs.
ashr(Rhs);
2985 auto InvShiftOp = [&](
const APInt &Lhs,
const APInt &Rhs) {
2986 switch (
I->getOpcode()) {
2987 case Instruction::Shl:
2988 return Lhs.
lshr(Rhs);
2989 case Instruction::LShr:
2990 case Instruction::AShr:
2991 return Lhs.
shl(Rhs);
3004 if (MaxShift.
uge(NumBits))
3007 if (!ShiftOp(KnownVal.
One, MaxShift).isZero())
3012 if (InvShiftOp(KnownVal.
Zero, NumBits - MaxShift)
3021 const APInt &DemandedElts,
3024 switch (
I->getOpcode()) {
3025 case Instruction::Alloca:
3027 return I->getType()->getPointerAddressSpace() == 0;
3028 case Instruction::GetElementPtr:
3029 if (
I->getType()->isPointerTy())
3032 case Instruction::BitCast: {
3060 Type *FromTy =
I->getOperand(0)->getType();
3065 case Instruction::IntToPtr:
3074 case Instruction::PtrToInt:
3082 case Instruction::Trunc:
3085 if (TI->hasNoSignedWrap() || TI->hasNoUnsignedWrap())
3091 case Instruction::Xor:
3092 case Instruction::Sub:
3094 I->getOperand(1),
Depth);
3095 case Instruction::Or:
3106 case Instruction::SExt:
3107 case Instruction::ZExt:
3111 case Instruction::Shl: {
3126 case Instruction::LShr:
3127 case Instruction::AShr: {
3142 case Instruction::UDiv:
3143 case Instruction::SDiv: {
3158 if (
I->getOpcode() == Instruction::SDiv) {
3160 XKnown = XKnown.
abs(
false);
3161 YKnown = YKnown.
abs(
false);
3167 return XUgeY && *XUgeY;
3169 case Instruction::Add: {
3179 case Instruction::Mul: {
3185 case Instruction::Select: {
3192 auto SelectArmIsNonZero = [&](
bool IsTrueArm) {
3194 Op = IsTrueArm ?
I->getOperand(1) :
I->getOperand(2);
3212 if (SelectArmIsNonZero(
true) &&
3213 SelectArmIsNonZero(
false))
3217 case Instruction::PHI: {
3228 RecQ.CxtI = PN->getIncomingBlock(U)->getTerminator();
3232 BasicBlock *TrueSucc, *FalseSucc;
3233 if (match(RecQ.CxtI,
3234 m_Br(m_c_ICmp(Pred, m_Specific(U.get()), m_Value(X)),
3235 m_BasicBlock(TrueSucc), m_BasicBlock(FalseSucc)))) {
3237 if ((TrueSucc == PN->getParent()) != (FalseSucc == PN->getParent())) {
3239 if (FalseSucc == PN->getParent())
3240 Pred = CmpInst::getInversePredicate(Pred);
3241 if (cmpExcludesZero(Pred, X))
3249 case Instruction::InsertElement: {
3253 const Value *Vec =
I->getOperand(0);
3254 const Value *Elt =
I->getOperand(1);
3258 APInt DemandedVecElts = DemandedElts;
3259 bool SkipElt =
false;
3261 if (CIdx && CIdx->getValue().ult(NumElts)) {
3262 DemandedVecElts.
clearBit(CIdx->getZExtValue());
3263 SkipElt = !DemandedElts[CIdx->getZExtValue()];
3269 (DemandedVecElts.
isZero() ||
3272 case Instruction::ExtractElement:
3274 const Value *Vec = EEI->getVectorOperand();
3275 const Value *Idx = EEI->getIndexOperand();
3278 unsigned NumElts = VecTy->getNumElements();
3280 if (CIdx && CIdx->getValue().ult(NumElts))
3286 case Instruction::ShuffleVector: {
3290 APInt DemandedLHS, DemandedRHS;
3296 return (DemandedRHS.
isZero() ||
3301 case Instruction::Freeze:
3305 case Instruction::Load: {
3322 case Instruction::ExtractValue: {
3328 case Instruction::Add:
3333 case Instruction::Sub:
3336 case Instruction::Mul:
3339 false,
false,
Depth);
3345 case Instruction::Call:
3346 case Instruction::Invoke: {
3348 if (
I->getType()->isPointerTy()) {
3349 if (
Call->isReturnNonNull())
3356 if (std::optional<ConstantRange>
Range =
Call->getRange()) {
3357 const APInt ZeroValue(
Range->getBitWidth(), 0);
3358 if (!
Range->contains(ZeroValue))
3361 if (
const Value *RV =
Call->getReturnedArgOperand())
3367 switch (
II->getIntrinsicID()) {
3368 case Intrinsic::sshl_sat:
3369 case Intrinsic::ushl_sat:
3370 case Intrinsic::abs:
3371 case Intrinsic::bitreverse:
3372 case Intrinsic::bswap:
3373 case Intrinsic::ctpop:
3377 case Intrinsic::ssub_sat:
3380 case Intrinsic::sadd_sat:
3382 II->getArgOperand(1),
3383 true,
false,
Depth);
3385 case Intrinsic::vector_reverse:
3389 case Intrinsic::vector_reduce_or:
3390 case Intrinsic::vector_reduce_umax:
3391 case Intrinsic::vector_reduce_umin:
3392 case Intrinsic::vector_reduce_smax:
3393 case Intrinsic::vector_reduce_smin:
3395 case Intrinsic::umax:
3396 case Intrinsic::uadd_sat:
3404 case Intrinsic::smax: {
3407 auto IsNonZero = [&](
Value *
Op, std::optional<bool> &OpNonZero,
3409 if (!OpNonZero.has_value())
3410 OpNonZero = OpKnown.isNonZero() ||
3415 std::optional<bool> Op0NonZero, Op1NonZero;
3419 IsNonZero(
II->getArgOperand(1), Op1NonZero, Op1Known))
3424 IsNonZero(
II->getArgOperand(0), Op0NonZero, Op0Known))
3426 return IsNonZero(
II->getArgOperand(1), Op1NonZero, Op1Known) &&
3427 IsNonZero(
II->getArgOperand(0), Op0NonZero, Op0Known);
3429 case Intrinsic::smin: {
3445 case Intrinsic::umin:
3448 case Intrinsic::cttz:
3451 case Intrinsic::ctlz:
3454 case Intrinsic::fshr:
3455 case Intrinsic::fshl:
3457 if (
II->getArgOperand(0) ==
II->getArgOperand(1))
3460 case Intrinsic::vscale:
3462 case Intrinsic::experimental_get_vector_length:
3476 return Known.
One != 0;
3487 Type *Ty = V->getType();
3494 FVTy->getNumElements() == DemandedElts.
getBitWidth() &&
3495 "DemandedElt width should equal the fixed vector number of elements");
3498 "DemandedElt width should be 1 for scalars");
3503 if (
C->isNullValue())
3512 for (
unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) {
3513 if (!DemandedElts[i])
3515 Constant *Elt =
C->getAggregateElement(i);
3532 if (!GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() &&
3533 GV->getType()->getAddressSpace() == 0)
3543 if (std::optional<ConstantRange>
Range =
A->getRange()) {
3544 const APInt ZeroValue(
Range->getBitWidth(), 0);
3545 if (!
Range->contains(ZeroValue))
3562 if (((
A->hasPassPointeeByValueCopyAttr() &&
3564 A->hasNonNullAttr()))
3586 APInt DemandedElts =
3588 return ::isKnownNonZero(V, DemandedElts, Q,
Depth);
3597static std::optional<std::pair<Value*, Value*>>
3601 return std::nullopt;
3610 case Instruction::Or:
3615 case Instruction::Xor:
3616 case Instruction::Add: {
3624 case Instruction::Sub:
3630 case Instruction::Mul: {
3636 if ((!OBO1->hasNoUnsignedWrap() || !OBO2->hasNoUnsignedWrap()) &&
3637 (!OBO1->hasNoSignedWrap() || !OBO2->hasNoSignedWrap()))
3647 case Instruction::Shl: {
3652 if ((!OBO1->hasNoUnsignedWrap() || !OBO2->hasNoUnsignedWrap()) &&
3653 (!OBO1->hasNoSignedWrap() || !OBO2->hasNoSignedWrap()))
3660 case Instruction::AShr:
3661 case Instruction::LShr: {
3664 if (!PEO1->isExact() || !PEO2->isExact())
3671 case Instruction::SExt:
3672 case Instruction::ZExt:
3676 case Instruction::PHI: {
3684 Value *Start1 =
nullptr, *Step1 =
nullptr;
3686 Value *Start2 =
nullptr, *Step2 =
nullptr;
3702 if (Values->first != PN1 || Values->second != PN2)
3705 return std::make_pair(Start1, Start2);
3708 return std::nullopt;
3715 const APInt &DemandedElts,
3723 case Instruction::Or:
3727 case Instruction::Xor:
3728 case Instruction::Add:
3749 (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) &&
3750 !
C->isZero() && !
C->isOne() &&
3764 (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) &&
3778 bool UsedFullRecursion =
false;
3780 if (!VisitedBBs.
insert(IncomBB).second)
3784 const APInt *C1, *C2;
3789 if (UsedFullRecursion)
3793 RecQ.
CxtI = IncomBB->getTerminator();
3796 UsedFullRecursion =
true;
3810 const Value *Cond2 = SI2->getCondition();
3813 DemandedElts, Q,
Depth + 1) &&
3815 DemandedElts, Q,
Depth + 1);
3828 if (!
A->getType()->isPointerTy() || !
B->getType()->isPointerTy())
3832 if (!GEPA || GEPA->getNumIndices() != 1 || !
isa<Constant>(GEPA->idx_begin()))
3837 if (!PN || PN->getNumIncomingValues() != 2)
3842 Value *Start =
nullptr;
3844 if (PN->getIncomingValue(0) == Step)
3845 Start = PN->getIncomingValue(1);
3846 else if (PN->getIncomingValue(1) == Step)
3847 Start = PN->getIncomingValue(0);
3858 APInt StartOffset(IndexWidth, 0);
3859 Start = Start->stripAndAccumulateInBoundsConstantOffsets(Q.
DL, StartOffset);
3860 APInt StepOffset(IndexWidth, 0);
3866 APInt OffsetB(IndexWidth, 0);
3867 B =
B->stripAndAccumulateInBoundsConstantOffsets(Q.
DL, OffsetB);
3868 return Start ==
B &&
3880 auto IsKnownNonEqualFromDominatingCondition = [&](
const Value *V) {
3901 if (IsKnownNonEqualFromDominatingCondition(V1) ||
3902 IsKnownNonEqualFromDominatingCondition(V2))
3916 "Got assumption for the wrong function!");
3917 assert(
I->getIntrinsicID() == Intrinsic::assume &&
3918 "must be an assume intrinsic");
3948 if (O1 && O2 && O1->getOpcode() == O2->getOpcode()) {
3950 return isKnownNonEqual(Values->first, Values->second, DemandedElts, Q,
4012 const APInt &DemandedElts,
4018 unsigned MinSignBits = TyBits;
4020 for (
unsigned i = 0; i != NumElts; ++i) {
4021 if (!DemandedElts[i])
4028 MinSignBits = std::min(MinSignBits, Elt->getValue().getNumSignBits());
4035 const APInt &DemandedElts,
4041 assert(Result > 0 &&
"At least one sign bit needs to be present!");
4053 const APInt &DemandedElts,
4055 Type *Ty = V->getType();
4061 FVTy->getNumElements() == DemandedElts.
getBitWidth() &&
4062 "DemandedElt width should equal the fixed vector number of elements");
4065 "DemandedElt width should be 1 for scalars");
4079 unsigned FirstAnswer = 1;
4090 case Instruction::BitCast: {
4091 Value *Src = U->getOperand(0);
4092 Type *SrcTy = Src->getType();
4096 if (!SrcTy->isIntOrIntVectorTy())
4102 if ((SrcBits % TyBits) != 0)
4115 case Instruction::SExt:
4116 Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
4120 case Instruction::SDiv: {
4121 const APInt *Denominator;
4134 return std::min(TyBits, NumBits + Denominator->
logBase2());
4139 case Instruction::SRem: {
4142 const APInt *Denominator;
4163 unsigned ResBits = TyBits - Denominator->
ceilLogBase2();
4164 Tmp = std::max(Tmp, ResBits);
4170 case Instruction::AShr: {
4175 if (ShAmt->
uge(TyBits))
4178 Tmp += ShAmtLimited;
4179 if (Tmp > TyBits) Tmp = TyBits;
4183 case Instruction::Shl: {
4188 if (ShAmt->
uge(TyBits))
4193 ShAmt->
uge(TyBits -
X->getType()->getScalarSizeInBits())) {
4195 Tmp += TyBits -
X->getType()->getScalarSizeInBits();
4199 if (ShAmt->
uge(Tmp))
4206 case Instruction::And:
4207 case Instruction::Or:
4208 case Instruction::Xor:
4213 FirstAnswer = std::min(Tmp, Tmp2);
4220 case Instruction::Select: {
4224 const APInt *CLow, *CHigh;
4232 return std::min(Tmp, Tmp2);
4235 case Instruction::Add:
4239 if (Tmp == 1)
break;
4243 if (CRHS->isAllOnesValue()) {
4249 if ((Known.
Zero | 1).isAllOnes())
4261 return std::min(Tmp, Tmp2) - 1;
4263 case Instruction::Sub:
4270 if (CLHS->isNullValue()) {
4275 if ((Known.
Zero | 1).isAllOnes())
4292 return std::min(Tmp, Tmp2) - 1;
4294 case Instruction::Mul: {
4297 unsigned SignBitsOp0 =
4299 if (SignBitsOp0 == 1)
4301 unsigned SignBitsOp1 =
4303 if (SignBitsOp1 == 1)
4305 unsigned OutValidBits =
4306 (TyBits - SignBitsOp0 + 1) + (TyBits - SignBitsOp1 + 1);
4307 return OutValidBits > TyBits ? 1 : TyBits - OutValidBits + 1;
4310 case Instruction::PHI: {
4314 if (NumIncomingValues > 4)
break;
4316 if (NumIncomingValues == 0)
break;
4322 for (
unsigned i = 0, e = NumIncomingValues; i != e; ++i) {
4323 if (Tmp == 1)
return Tmp;
4326 DemandedElts, RecQ,
Depth + 1));
4331 case Instruction::Trunc: {
4336 unsigned OperandTyBits = U->getOperand(0)->getType()->getScalarSizeInBits();
4337 if (Tmp > (OperandTyBits - TyBits))
4338 return Tmp - (OperandTyBits - TyBits);
4343 case Instruction::ExtractElement:
4350 case Instruction::ShuffleVector: {
4358 APInt DemandedLHS, DemandedRHS;
4363 Tmp = std::numeric_limits<unsigned>::max();
4364 if (!!DemandedLHS) {
4365 const Value *
LHS = Shuf->getOperand(0);
4372 if (!!DemandedRHS) {
4373 const Value *
RHS = Shuf->getOperand(1);
4375 Tmp = std::min(Tmp, Tmp2);
4381 assert(Tmp <= TyBits &&
"Failed to determine minimum sign bits");
4384 case Instruction::Call: {
4386 switch (
II->getIntrinsicID()) {
4389 case Intrinsic::abs:
4397 case Intrinsic::smin:
4398 case Intrinsic::smax: {
4399 const APInt *CLow, *CHigh;
4414 if (
unsigned VecSignBits =
4432 if (
F->isIntrinsic())
4433 return F->getIntrinsicID();
4439 if (
F->hasLocalLinkage() || !TLI || !TLI->
getLibFunc(CB, Func) ||
4449 return Intrinsic::sin;
4453 return Intrinsic::cos;
4457 return Intrinsic::tan;
4461 return Intrinsic::asin;
4465 return Intrinsic::acos;
4469 return Intrinsic::atan;
4471 case LibFunc_atan2f:
4472 case LibFunc_atan2l:
4473 return Intrinsic::atan2;
4477 return Intrinsic::sinh;
4481 return Intrinsic::cosh;
4485 return Intrinsic::tanh;
4489 return Intrinsic::exp;
4493 return Intrinsic::exp2;
4495 case LibFunc_exp10f:
4496 case LibFunc_exp10l:
4497 return Intrinsic::exp10;
4501 return Intrinsic::log;
4503 case LibFunc_log10f:
4504 case LibFunc_log10l:
4505 return Intrinsic::log10;
4509 return Intrinsic::log2;
4513 return Intrinsic::fabs;
4517 return Intrinsic::minnum;
4521 return Intrinsic::maxnum;
4522 case LibFunc_copysign:
4523 case LibFunc_copysignf:
4524 case LibFunc_copysignl:
4525 return Intrinsic::copysign;
4527 case LibFunc_floorf:
4528 case LibFunc_floorl:
4529 return Intrinsic::floor;
4533 return Intrinsic::ceil;
4535 case LibFunc_truncf:
4536 case LibFunc_truncl:
4537 return Intrinsic::trunc;
4541 return Intrinsic::rint;
4542 case LibFunc_nearbyint:
4543 case LibFunc_nearbyintf:
4544 case LibFunc_nearbyintl:
4545 return Intrinsic::nearbyint;
4547 case LibFunc_roundf:
4548 case LibFunc_roundl:
4549 return Intrinsic::round;
4550 case LibFunc_roundeven:
4551 case LibFunc_roundevenf:
4552 case LibFunc_roundevenl:
4553 return Intrinsic::roundeven;
4557 return Intrinsic::pow;
4561 return Intrinsic::sqrt;
4568 Ty = Ty->getScalarType();
4577 bool &TrueIfSigned) {
4580 TrueIfSigned =
true;
4581 return RHS.isZero();
4583 TrueIfSigned =
true;
4584 return RHS.isAllOnes();
4586 TrueIfSigned =
false;
4587 return RHS.isAllOnes();
4589 TrueIfSigned =
false;
4590 return RHS.isZero();
4593 TrueIfSigned =
true;
4594 return RHS.isMaxSignedValue();
4597 TrueIfSigned =
true;
4598 return RHS.isMinSignedValue();
4601 TrueIfSigned =
false;
4602 return RHS.isMinSignedValue();
4605 TrueIfSigned =
false;
4606 return RHS.isMaxSignedValue();
4616 unsigned Depth = 0) {
4641 KnownFromContext.
knownNot(~(CondIsTrue ? MaskIfTrue : MaskIfFalse));
4645 KnownFromContext.
knownNot(CondIsTrue ? ~Mask : Mask);
4651 if (TrueIfSigned == CondIsTrue)
4667 return KnownFromContext;
4687 return KnownFromContext;
4697 "Got assumption for the wrong function!");
4698 assert(
I->getIntrinsicID() == Intrinsic::assume &&
4699 "must be an assume intrinsic");
4705 true, Q.
CxtI, KnownFromContext);
4708 return KnownFromContext;
4719 APInt DemandedElts =
4725 const APInt &DemandedElts,
4730 if ((InterestedClasses &
4736 KnownSrc, Q,
Depth + 1);
4751 assert(Known.
isUnknown() &&
"should not be called with known information");
4753 if (!DemandedElts) {
4763 Known.
SignBit = CFP->isNegative();
4784 bool SignBitAllZero =
true;
4785 bool SignBitAllOne =
true;
4788 unsigned NumElts = VFVTy->getNumElements();
4789 for (
unsigned i = 0; i != NumElts; ++i) {
4790 if (!DemandedElts[i])
4806 const APFloat &
C = CElt->getValueAPF();
4809 SignBitAllZero =
false;
4811 SignBitAllOne =
false;
4813 if (SignBitAllOne != SignBitAllZero)
4814 Known.
SignBit = SignBitAllOne;
4820 KnownNotFromFlags |= CB->getRetNoFPClass();
4822 KnownNotFromFlags |= Arg->getNoFPClass();
4826 if (FPOp->hasNoNaNs())
4827 KnownNotFromFlags |=
fcNan;
4828 if (FPOp->hasNoInfs())
4829 KnownNotFromFlags |=
fcInf;
4833 KnownNotFromFlags |= ~AssumedClasses.KnownFPClasses;
4837 InterestedClasses &= ~KnownNotFromFlags;
4856 const unsigned Opc =
Op->getOpcode();
4858 case Instruction::FNeg: {
4860 Known, Q,
Depth + 1);
4864 case Instruction::Select: {
4872 Value *TestedValue =
nullptr;
4878 Value *CmpLHS, *CmpRHS;
4885 bool LookThroughFAbsFNeg = CmpLHS !=
LHS && CmpLHS !=
RHS;
4886 std::tie(TestedValue, MaskIfTrue, MaskIfFalse) =
4892 MaskIfTrue = TestedMask;
4893 MaskIfFalse = ~TestedMask;
4896 if (TestedValue ==
LHS) {
4898 FilterLHS = MaskIfTrue;
4899 }
else if (TestedValue ==
RHS) {
4901 FilterRHS = MaskIfFalse;
4910 Known2, Q,
Depth + 1);
4916 case Instruction::Call: {
4920 case Intrinsic::fabs: {
4925 InterestedClasses, Known, Q,
Depth + 1);
4931 case Intrinsic::copysign: {
4935 Known, Q,
Depth + 1);
4937 KnownSign, Q,
Depth + 1);
4941 case Intrinsic::fma:
4942 case Intrinsic::fmuladd: {
4946 if (
II->getArgOperand(0) !=
II->getArgOperand(1))
4955 KnownAddend, Q,
Depth + 1);
4961 case Intrinsic::sqrt:
4962 case Intrinsic::experimental_constrained_sqrt: {
4965 if (InterestedClasses &
fcNan)
4969 KnownSrc, Q,
Depth + 1);
4987 II->getType()->getScalarType()->getFltSemantics();
4996 case Intrinsic::sin:
4997 case Intrinsic::cos: {
5001 KnownSrc, Q,
Depth + 1);
5007 case Intrinsic::maxnum:
5008 case Intrinsic::minnum:
5009 case Intrinsic::minimum:
5010 case Intrinsic::maximum:
5011 case Intrinsic::minimumnum:
5012 case Intrinsic::maximumnum: {
5015 KnownLHS, Q,
Depth + 1);
5017 KnownRHS, Q,
Depth + 1);
5020 Known = KnownLHS | KnownRHS;
5024 (IID == Intrinsic::minnum || IID == Intrinsic::maxnum ||
5025 IID == Intrinsic::minimumnum || IID == Intrinsic::maximumnum))
5028 if (IID == Intrinsic::maxnum || IID == Intrinsic::maximumnum) {
5036 }
else if (IID == Intrinsic::maximum) {
5042 }
else if (IID == Intrinsic::minnum || IID == Intrinsic::minimumnum) {
5050 }
else if (IID == Intrinsic::minimum) {
5073 II->getType()->getScalarType()->getFltSemantics());
5085 }
else if ((IID == Intrinsic::maximum || IID == Intrinsic::minimum ||
5086 IID == Intrinsic::maximumnum ||
5087 IID == Intrinsic::minimumnum) ||
5095 KnownLHS.
SignBit = std::nullopt;
5097 KnownRHS.
SignBit = std::nullopt;
5098 if ((IID == Intrinsic::maximum || IID == Intrinsic::maximumnum ||
5099 IID == Intrinsic::maxnum) &&
5102 else if ((IID == Intrinsic::minimum || IID == Intrinsic::minimumnum ||
5103 IID == Intrinsic::minnum) &&
5110 case Intrinsic::canonicalize: {
5113 KnownSrc, Q,
Depth + 1);
5137 II->getType()->getScalarType()->getFltSemantics();
5157 case Intrinsic::vector_reduce_fmax:
5158 case Intrinsic::vector_reduce_fmin:
5159 case Intrinsic::vector_reduce_fmaximum:
5160 case Intrinsic::vector_reduce_fminimum: {
5164 InterestedClasses, Q,
Depth + 1);
5171 case Intrinsic::vector_reverse:
5174 II->getFastMathFlags(), InterestedClasses, Q,
Depth + 1);
5176 case Intrinsic::trunc:
5177 case Intrinsic::floor:
5178 case Intrinsic::ceil:
5179 case Intrinsic::rint:
5180 case Intrinsic::nearbyint:
5181 case Intrinsic::round:
5182 case Intrinsic::roundeven: {
5190 KnownSrc, Q,
Depth + 1);
5199 if (IID == Intrinsic::trunc || !V->getType()->isMultiUnitFPType()) {
5214 case Intrinsic::exp:
5215 case Intrinsic::exp2:
5216 case Intrinsic::exp10: {
5223 KnownSrc, Q,
Depth + 1);
5231 case Intrinsic::fptrunc_round: {
5236 case Intrinsic::log:
5237 case Intrinsic::log10:
5238 case Intrinsic::log2:
5239 case Intrinsic::experimental_constrained_log:
5240 case Intrinsic::experimental_constrained_log10:
5241 case Intrinsic::experimental_constrained_log2: {
5257 KnownSrc, Q,
Depth + 1);
5271 II->getType()->getScalarType()->getFltSemantics();
5279 case Intrinsic::powi: {
5283 const Value *Exp =
II->getArgOperand(1);
5284 Type *ExpTy = Exp->getType();
5288 ExponentKnownBits, Q,
Depth + 1);
5290 if (ExponentKnownBits.
Zero[0]) {
5305 KnownSrc, Q,
Depth + 1);
5310 case Intrinsic::ldexp: {
5313 KnownSrc, Q,
Depth + 1);
5329 if ((InterestedClasses & ExpInfoMask) ==
fcNone)
5335 II->getType()->getScalarType()->getFltSemantics();
5337 const Value *ExpArg =
II->getArgOperand(1);
5341 const int MantissaBits = Precision - 1;
5348 II->getType()->getScalarType()->getFltSemantics();
5349 if (ConstVal && ConstVal->
isZero()) {
5374 case Intrinsic::arithmetic_fence: {
5376 Known, Q,
Depth + 1);
5379 case Intrinsic::experimental_constrained_sitofp:
5380 case Intrinsic::experimental_constrained_uitofp:
5390 if (IID == Intrinsic::experimental_constrained_uitofp)
5401 case Instruction::FAdd:
5402 case Instruction::FSub: {
5405 Op->getOpcode() == Instruction::FAdd &&
5407 bool WantNaN = (InterestedClasses &
fcNan) !=
fcNone;
5410 if (!WantNaN && !WantNegative && !WantNegZero)
5416 if (InterestedClasses &
fcNan)
5417 InterestedSrcs |=
fcInf;
5419 KnownRHS, Q,
Depth + 1);
5423 WantNegZero ||
Opc == Instruction::FSub) {
5428 KnownLHS, Q,
Depth + 1);
5438 if (
Op->getOpcode() == Instruction::FAdd) {
5446 Op->getType()->getScalarType()->getFltSemantics();
5460 Op->getType()->getScalarType()->getFltSemantics();
5474 case Instruction::FMul: {
5476 if (
Op->getOperand(0) ==
Op->getOperand(1))
5513 Type *OpTy =
Op->getType()->getScalarType();
5525 case Instruction::FDiv:
5526 case Instruction::FRem: {
5527 if (
Op->getOperand(0) ==
Op->getOperand(1)) {
5529 if (
Op->getOpcode() == Instruction::FDiv) {
5540 const bool WantNan = (InterestedClasses &
fcNan) !=
fcNone;
5542 const bool WantPositive =
5544 if (!WantNan && !WantNegative && !WantPositive)
5553 bool KnowSomethingUseful =
5556 if (KnowSomethingUseful || WantPositive) {
5562 InterestedClasses & InterestedLHS, KnownLHS, Q,
5568 Op->getType()->getScalarType()->getFltSemantics();
5570 if (
Op->getOpcode() == Instruction::FDiv) {
5609 case Instruction::FPExt: {
5612 Known, Q,
Depth + 1);
5615 Op->getType()->getScalarType()->getFltSemantics();
5617 Op->getOperand(0)->getType()->getScalarType()->getFltSemantics();
5633 case Instruction::FPTrunc: {
5638 case Instruction::SIToFP:
5639 case Instruction::UIToFP: {
5648 if (
Op->getOpcode() == Instruction::UIToFP)
5651 if (InterestedClasses &
fcInf) {
5655 int IntSize =
Op->getOperand(0)->getType()->getScalarSizeInBits();
5656 if (
Op->getOpcode() == Instruction::SIToFP)
5661 Type *FPTy =
Op->getType()->getScalarType();
5668 case Instruction::ExtractElement: {
5671 const Value *Vec =
Op->getOperand(0);
5673 APInt DemandedVecElts;
5675 unsigned NumElts = VecTy->getNumElements();
5678 if (CIdx && CIdx->getValue().ult(NumElts))
5681 DemandedVecElts =
APInt(1, 1);
5687 case Instruction::InsertElement: {
5691 const Value *Vec =
Op->getOperand(0);
5692 const Value *Elt =
Op->getOperand(1);
5695 APInt DemandedVecElts = DemandedElts;
5696 bool NeedsElt =
true;
5698 if (CIdx && CIdx->getValue().ult(NumElts)) {
5699 DemandedVecElts.
clearBit(CIdx->getZExtValue());
5700 NeedsElt = DemandedElts[CIdx->getZExtValue()];
5714 if (!DemandedVecElts.
isZero()) {
5723 case Instruction::ShuffleVector: {
5726 APInt DemandedLHS, DemandedRHS;
5731 if (!!DemandedLHS) {
5732 const Value *
LHS = Shuf->getOperand(0);
5743 if (!!DemandedRHS) {
5745 const Value *
RHS = Shuf->getOperand(1);
5753 case Instruction::ExtractValue: {
5760 switch (
II->getIntrinsicID()) {
5761 case Intrinsic::frexp: {
5766 InterestedClasses, KnownSrc, Q,
Depth + 1);
5770 Op->getType()->getScalarType()->getFltSemantics();
5805 case Instruction::PHI: {
5808 if (
P->getNumIncomingValues() == 0)
5815 if (
Depth < PhiRecursionLimit) {
5822 for (
const Use &U :
P->operands()) {
5852 case Instruction::BitCast: {
5855 !Src->getType()->isIntOrIntVectorTy())
5858 const Type *Ty =
Op->getType()->getScalarType();
5859 KnownBits Bits(Ty->getScalarSizeInBits());
5863 if (Bits.isNonNegative())
5865 else if (Bits.isNegative())
5868 if (Ty->isIEEELikeFPTy()) {
5878 else if (!
APFloat(Ty->getFltSemantics(), ~Bits.Zero).
isNaN())
5885 InfKB.Zero.clearSignBit();
5887 assert(!InfResult.value());
5889 }
else if (Bits == InfKB) {
5897 ZeroKB.Zero.clearSignBit();
5899 assert(!ZeroResult.value());
5901 }
else if (Bits == ZeroKB) {
5914 const APInt &DemandedElts,
5921 return KnownClasses;
5947 InterestedClasses &=
~fcNan;
5949 InterestedClasses &=
~fcInf;
5955 Result.KnownFPClasses &=
~fcNan;
5957 Result.KnownFPClasses &=
~fcInf;
5966 APInt DemandedElts =
6020 if (FPOp->hasNoSignedZeros())
6024 switch (
User->getOpcode()) {
6025 case Instruction::FPToSI:
6026 case Instruction::FPToUI:
6028 case Instruction::FCmp:
6031 case Instruction::Call:
6033 switch (
II->getIntrinsicID()) {
6034 case Intrinsic::fabs:
6036 case Intrinsic::copysign:
6037 return U.getOperandNo() == 0;
6038 case Intrinsic::is_fpclass:
6039 case Intrinsic::vp_is_fpclass: {
6059 if (FPOp->hasNoNaNs())
6063 switch (
User->getOpcode()) {
6064 case Instruction::FPToSI:
6065 case Instruction::FPToUI:
6068 case Instruction::FAdd:
6069 case Instruction::FSub:
6070 case Instruction::FMul:
6071 case Instruction::FDiv:
6072 case Instruction::FRem:
6073 case Instruction::FPTrunc:
6074 case Instruction::FPExt:
6075 case Instruction::FCmp:
6078 case Instruction::FNeg:
6079 case Instruction::Select:
6080 case Instruction::PHI:
6082 case Instruction::Ret:
6083 return User->getFunction()->getAttributes().getRetNoFPClass() &
6085 case Instruction::Call:
6086 case Instruction::Invoke: {
6088 switch (
II->getIntrinsicID()) {
6089 case Intrinsic::fabs:
6091 case Intrinsic::copysign:
6092 return U.getOperandNo() == 0;
6094 case Intrinsic::maxnum:
6095 case Intrinsic::minnum:
6096 case Intrinsic::maximum:
6097 case Intrinsic::minimum:
6098 case Intrinsic::maximumnum:
6099 case Intrinsic::minimumnum:
6100 case Intrinsic::canonicalize:
6101 case Intrinsic::fma:
6102 case Intrinsic::fmuladd:
6103 case Intrinsic::sqrt:
6104 case Intrinsic::pow:
6105 case Intrinsic::powi:
6106 case Intrinsic::fptoui_sat:
6107 case Intrinsic::fptosi_sat:
6108 case Intrinsic::is_fpclass:
6109 case Intrinsic::vp_is_fpclass:
6128 if (V->getType()->isIntegerTy(8))
6139 if (
DL.getTypeStoreSize(V->getType()).isZero())
6154 if (
C->isNullValue())
6161 if (CFP->getType()->isHalfTy())
6163 else if (CFP->getType()->isFloatTy())
6165 else if (CFP->getType()->isDoubleTy())
6174 if (CI->getBitWidth() % 8 == 0) {
6175 assert(CI->getBitWidth() > 8 &&
"8 bits should be handled above!");
6176 if (!CI->getValue().isSplat(8))
6178 return ConstantInt::get(Ctx, CI->getValue().trunc(8));
6183 if (CE->getOpcode() == Instruction::IntToPtr) {
6185 unsigned BitWidth =
DL.getPointerSizeInBits(PtrTy->getAddressSpace());
6198 if (LHS == UndefInt8)
6200 if (RHS == UndefInt8)
6206 Value *Val = UndefInt8;
6207 for (
uint64_t I = 0, E = CA->getNumElements();
I != E; ++
I)
6214 Value *Val = UndefInt8;
6249 while (PrevTo != OrigTo) {
6296 unsigned IdxSkip = Idxs.
size();
6309 std::optional<BasicBlock::iterator> InsertBefore) {
6312 if (idx_range.
empty())
6315 assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&
6316 "Not looking at a struct or array?");
6318 "Invalid indices for type?");
6321 C =
C->getAggregateElement(idx_range[0]);
6322 if (!
C)
return nullptr;
6329 const unsigned *req_idx = idx_range.
begin();
6330 for (
const unsigned *i =
I->idx_begin(), *e =
I->idx_end();
6331 i != e; ++i, ++req_idx) {
6332 if (req_idx == idx_range.
end()) {
6362 ArrayRef(req_idx, idx_range.
end()), InsertBefore);
6371 unsigned size =
I->getNumIndices() + idx_range.
size();
6376 Idxs.
append(
I->idx_begin(),
I->idx_end());
6382 &&
"Number of indices added not correct?");
6399 assert(V &&
"V should not be null.");
6400 assert((ElementSize % 8) == 0 &&
6401 "ElementSize expected to be a multiple of the size of a byte.");
6402 unsigned ElementSizeInBytes = ElementSize / 8;
6414 APInt Off(
DL.getIndexTypeSizeInBits(V->getType()), 0);
6421 uint64_t StartIdx = Off.getLimitedValue();
6428 if ((StartIdx % ElementSizeInBytes) != 0)
6431 Offset += StartIdx / ElementSizeInBytes;
6437 uint64_t SizeInBytes =
DL.getTypeStoreSize(GVTy).getFixedValue();
6440 Slice.Array =
nullptr;
6452 Type *InitElTy = ArrayInit->getElementType();
6457 ArrayTy = ArrayInit->getType();
6462 if (ElementSize != 8)
6481 Slice.Array = Array;
6483 Slice.Length = NumElts -
Offset;
6497 if (Slice.Array ==
nullptr) {
6508 if (Slice.Length == 1) {
6520 Str = Str.
substr(Slice.Offset);
6526 Str = Str.substr(0, Str.find(
'\0'));
6539 unsigned CharSize) {
6541 V = V->stripPointerCasts();
6546 if (!PHIs.
insert(PN).second)
6551 for (
Value *IncValue : PN->incoming_values()) {
6553 if (Len == 0)
return 0;
6555 if (Len == ~0ULL)
continue;
6557 if (Len != LenSoFar && LenSoFar != ~0ULL)
6569 if (Len1 == 0)
return 0;
6571 if (Len2 == 0)
return 0;
6572 if (Len1 == ~0ULL)
return Len2;
6573 if (Len2 == ~0ULL)
return Len1;
6574 if (Len1 != Len2)
return 0;
6583 if (Slice.Array ==
nullptr)
6591 unsigned NullIndex = 0;
6592 for (
unsigned E = Slice.Length; NullIndex <
E; ++NullIndex) {
6593 if (Slice.Array->getElementAsInteger(Slice.Offset + NullIndex) == 0)
6597 return NullIndex + 1;
6603 if (!V->getType()->isPointerTy())
6610 return Len == ~0ULL ? 1 : Len;
6615 bool MustPreserveNullness) {
6617 "getArgumentAliasingToReturnedPointer only works on nonnull calls");
6618 if (
const Value *RV =
Call->getReturnedArgOperand())
6622 Call, MustPreserveNullness))
6623 return Call->getArgOperand(0);
6629 switch (
Call->getIntrinsicID()) {
6630 case Intrinsic::launder_invariant_group:
6631 case Intrinsic::strip_invariant_group:
6632 case Intrinsic::aarch64_irg:
6633 case Intrinsic::aarch64_tagp:
6643 case Intrinsic::amdgcn_make_buffer_rsrc:
6645 case Intrinsic::ptrmask:
6646 return !MustPreserveNullness;
6647 case Intrinsic::threadlocal_address:
6650 return !
Call->getParent()->getParent()->isPresplitCoroutine();
6667 if (!PrevValue || LI->
getLoopFor(PrevValue->getParent()) != L)
6669 if (!PrevValue || LI->
getLoopFor(PrevValue->getParent()) != L)
6678 if (!L->isLoopInvariant(Load->getPointerOperand()))
6684 for (
unsigned Count = 0; MaxLookup == 0 ||
Count < MaxLookup; ++
Count) {
6686 const Value *PtrOp =
GEP->getPointerOperand();
6697 if (GA->isInterposable())
6699 V = GA->getAliasee();
6703 if (
PHI->getNumIncomingValues() == 1) {
6704 V =
PHI->getIncomingValue(0);
6725 assert(V->getType()->isPointerTy() &&
"Unexpected operand type!");
6732 const LoopInfo *LI,
unsigned MaxLookup) {
6740 if (!Visited.
insert(
P).second)
6769 }
while (!Worklist.
empty());
6773 const unsigned MaxVisited = 8;
6778 const Value *Object =
nullptr;
6788 if (!Visited.
insert(
P).second)
6791 if (Visited.
size() == MaxVisited)
6807 else if (Object !=
P)
6809 }
while (!Worklist.
empty());
6811 return Object ? Object : FirstObject;
6821 if (U->getOpcode() == Instruction::PtrToInt)
6822 return U->getOperand(0);
6829 if (U->getOpcode() != Instruction::Add ||
6834 V = U->getOperand(0);
6838 assert(V->getType()->isIntegerTy() &&
"Unexpected operand type!");
6855 for (
const Value *V : Objs) {
6856 if (!Visited.
insert(V).second)
6861 if (O->getType()->isPointerTy()) {
6874 }
while (!Working.
empty());
6883 auto AddWork = [&](
Value *V) {
6884 if (Visited.
insert(V).second)
6894 if (Result && Result != AI)
6898 AddWork(CI->getOperand(0));
6900 for (
Value *IncValue : PN->incoming_values())
6903 AddWork(
SI->getTrueValue());
6904 AddWork(
SI->getFalseValue());
6906 if (OffsetZero && !
GEP->hasAllZeroIndices())
6908 AddWork(
GEP->getPointerOperand());
6910 Value *Returned = CB->getReturnedArgOperand();
6918 }
while (!Worklist.
empty());
6924 const Value *V,
bool AllowLifetime,
bool AllowDroppable) {
6930 if (AllowLifetime &&
II->isLifetimeStartOrEnd())
6933 if (AllowDroppable &&
II->isDroppable())
6954 return (!Shuffle || Shuffle->isSelect()) &&
6961 bool IgnoreUBImplyingAttrs) {
6963 AC, DT, TLI, UseVariableInfo,
6964 IgnoreUBImplyingAttrs);
6970 bool UseVariableInfo,
bool IgnoreUBImplyingAttrs) {
6974 auto hasEqualReturnAndLeadingOperandTypes =
6975 [](
const Instruction *Inst,
unsigned NumLeadingOperands) {
6979 for (
unsigned ItOp = 0; ItOp < NumLeadingOperands; ++ItOp)
6985 hasEqualReturnAndLeadingOperandTypes(Inst, 2));
6987 hasEqualReturnAndLeadingOperandTypes(Inst, 1));
6994 case Instruction::UDiv:
6995 case Instruction::URem: {
7002 case Instruction::SDiv:
7003 case Instruction::SRem: {
7005 const APInt *Numerator, *Denominator;
7009 if (*Denominator == 0)
7021 case Instruction::Load: {
7022 if (!UseVariableInfo)
7035 case Instruction::Call: {
7039 const Function *Callee = CI->getCalledFunction();
7043 if (!Callee || !Callee->isSpeculatable())
7047 return IgnoreUBImplyingAttrs || !CI->hasUBImplyingAttrs();
7049 case Instruction::VAArg:
7050 case Instruction::Alloca:
7051 case Instruction::Invoke:
7052 case Instruction::CallBr:
7053 case Instruction::PHI:
7054 case Instruction::Store:
7055 case Instruction::Ret:
7056 case Instruction::Br:
7057 case Instruction::IndirectBr:
7058 case Instruction::Switch:
7059 case Instruction::Unreachable:
7060 case Instruction::Fence:
7061 case Instruction::AtomicRMW:
7062 case Instruction::AtomicCmpXchg:
7063 case Instruction::LandingPad:
7064 case Instruction::Resume:
7065 case Instruction::CatchSwitch:
7066 case Instruction::CatchPad:
7067 case Instruction::CatchRet:
7068 case Instruction::CleanupPad:
7069 case Instruction::CleanupRet:
7075 if (
I.mayReadOrWriteMemory())
7143 unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
7188 if (
Add &&
Add->hasNoSignedWrap()) {
7227 bool LHSOrRHSKnownNonNegative =
7229 bool LHSOrRHSKnownNegative =
7231 if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
7234 if ((AddKnown.
isNonNegative() && LHSOrRHSKnownNonNegative) ||
7235 (AddKnown.
isNegative() && LHSOrRHSKnownNegative))
7310 assert(EVI->getNumIndices() == 1 &&
"Obvious from CI's type");
7312 if (EVI->getIndices()[0] == 0)
7315 assert(EVI->getIndices()[0] == 1 &&
"Obvious from CI's type");
7317 for (
const auto *U : EVI->users())
7319 assert(
B->isConditional() &&
"How else is it using an i1?");
7330 auto AllUsesGuardedByBranch = [&](
const BranchInst *BI) {
7336 for (
const auto *Result :
Results) {
7339 if (DT.
dominates(NoWrapEdge, Result->getParent()))
7342 for (
const auto &RU : Result->uses())
7350 return llvm::any_of(GuardingBranches, AllUsesGuardedByBranch);
7362 unsigned NumElts = FVTy->getNumElements();
7363 for (
unsigned i = 0; i < NumElts; ++i)
7364 ShiftAmounts.
push_back(
C->getAggregateElement(i));
7372 return CI && CI->getValue().ult(
C->getType()->getIntegerBitWidth());
7393 bool ConsiderFlagsAndMetadata) {
7396 Op->hasPoisonGeneratingAnnotations())
7399 unsigned Opcode =
Op->getOpcode();
7403 case Instruction::Shl:
7404 case Instruction::AShr:
7405 case Instruction::LShr:
7407 case Instruction::FPToSI:
7408 case Instruction::FPToUI:
7412 case Instruction::Call:
7414 switch (
II->getIntrinsicID()) {
7416 case Intrinsic::ctlz:
7417 case Intrinsic::cttz:
7418 case Intrinsic::abs:
7422 case Intrinsic::ctpop:
7423 case Intrinsic::bswap:
7424 case Intrinsic::bitreverse:
7425 case Intrinsic::fshl:
7426 case Intrinsic::fshr:
7427 case Intrinsic::smax:
7428 case Intrinsic::smin:
7429 case Intrinsic::scmp:
7430 case Intrinsic::umax:
7431 case Intrinsic::umin:
7432 case Intrinsic::ucmp:
7433 case Intrinsic::ptrmask:
7434 case Intrinsic::fptoui_sat:
7435 case Intrinsic::fptosi_sat:
7436 case Intrinsic::sadd_with_overflow:
7437 case Intrinsic::ssub_with_overflow:
7438 case Intrinsic::smul_with_overflow:
7439 case Intrinsic::uadd_with_overflow:
7440 case Intrinsic::usub_with_overflow:
7441 case Intrinsic::umul_with_overflow:
7442 case Intrinsic::sadd_sat:
7443 case Intrinsic::uadd_sat:
7444 case Intrinsic::ssub_sat:
7445 case Intrinsic::usub_sat:
7447 case Intrinsic::sshl_sat:
7448 case Intrinsic::ushl_sat:
7451 case Intrinsic::fma:
7452 case Intrinsic::fmuladd:
7453 case Intrinsic::sqrt:
7454 case Intrinsic::powi:
7455 case Intrinsic::sin:
7456 case Intrinsic::cos:
7457 case Intrinsic::pow:
7458 case Intrinsic::log:
7459 case Intrinsic::log10:
7460 case Intrinsic::log2:
7461 case Intrinsic::exp:
7462 case Intrinsic::exp2:
7463 case Intrinsic::exp10:
7464 case Intrinsic::fabs:
7465 case Intrinsic::copysign:
7466 case Intrinsic::floor:
7467 case Intrinsic::ceil:
7468 case Intrinsic::trunc:
7469 case Intrinsic::rint:
7470 case Intrinsic::nearbyint:
7471 case Intrinsic::round:
7472 case Intrinsic::roundeven:
7473 case Intrinsic::fptrunc_round:
7474 case Intrinsic::canonicalize:
7475 case Intrinsic::arithmetic_fence:
7476 case Intrinsic::minnum:
7477 case Intrinsic::maxnum:
7478 case Intrinsic::minimum:
7479 case Intrinsic::maximum:
7480 case Intrinsic::minimumnum:
7481 case Intrinsic::maximumnum:
7482 case Intrinsic::is_fpclass:
7483 case Intrinsic::ldexp:
7484 case Intrinsic::frexp:
7486 case Intrinsic::lround:
7487 case Intrinsic::llround:
7488 case Intrinsic::lrint:
7489 case Intrinsic::llrint:
7496 case Instruction::CallBr:
7497 case Instruction::Invoke: {
7499 return !CB->hasRetAttr(Attribute::NoUndef);
7501 case Instruction::InsertElement:
7502 case Instruction::ExtractElement: {
7505 unsigned IdxOp =
Op->getOpcode() == Instruction::InsertElement ? 2 : 1;
7509 Idx->getValue().uge(VTy->getElementCount().getKnownMinValue());
7512 case Instruction::ShuffleVector: {
7518 case Instruction::FNeg:
7519 case Instruction::PHI:
7520 case Instruction::Select:
7521 case Instruction::ExtractValue:
7522 case Instruction::InsertValue:
7523 case Instruction::Freeze:
7524 case Instruction::ICmp:
7525 case Instruction::FCmp:
7526 case Instruction::GetElementPtr:
7528 case Instruction::AddrSpaceCast:
7543 bool ConsiderFlagsAndMetadata) {
7545 ConsiderFlagsAndMetadata);
7550 ConsiderFlagsAndMetadata);
7555 if (ValAssumedPoison == V)
7558 const unsigned MaxDepth = 2;
7559 if (
Depth >= MaxDepth)
7564 return propagatesPoison(Op) &&
7565 directlyImpliesPoison(ValAssumedPoison, Op, Depth + 1);
7589 const unsigned MaxDepth = 2;
7590 if (
Depth >= MaxDepth)
7596 return impliesPoison(Op, V, Depth + 1);
7603 return ::impliesPoison(ValAssumedPoison, V, 0);
7618 if (
A->hasAttribute(Attribute::NoUndef) ||
7619 A->hasAttribute(Attribute::Dereferenceable) ||
7620 A->hasAttribute(Attribute::DereferenceableOrNull))
7635 if (
C->getType()->isVectorTy()) {
7638 if (
Constant *SplatC =
C->getSplatValue())
7646 return !
C->containsConstantExpression();
7659 auto *StrippedV = V->stripPointerCastsSameRepresentation();
7664 auto OpCheck = [&](
const Value *V) {
7675 if (CB->hasRetAttr(Attribute::NoUndef) ||
7676 CB->hasRetAttr(Attribute::Dereferenceable) ||
7677 CB->hasRetAttr(Attribute::DereferenceableOrNull))
7684 unsigned Num = PN->getNumIncomingValues();
7685 bool IsWellDefined =
true;
7686 for (
unsigned i = 0; i < Num; ++i) {
7687 if (PN == PN->getIncomingValue(i))
7689 auto *TI = PN->getIncomingBlock(i)->getTerminator();
7691 DT,
Depth + 1, Kind)) {
7692 IsWellDefined =
false;
7698 }
else if (
all_of(Opr->operands(), OpCheck))
7704 if (
I->hasMetadata(LLVMContext::MD_noundef) ||
7705 I->hasMetadata(LLVMContext::MD_dereferenceable) ||
7706 I->hasMetadata(LLVMContext::MD_dereferenceable_or_null))
7726 auto *Dominator = DNode->
getIDom();
7731 auto *TI = Dominator->getBlock()->getTerminator();
7735 if (BI->isConditional())
7736 Cond = BI->getCondition();
7738 Cond =
SI->getCondition();
7747 if (
any_of(Opr->operands(), [V](
const Use &U) {
7748 return V == U && propagatesPoison(U);
7754 Dominator = Dominator->getIDom();
7767 return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT,
Depth,
7774 return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT,
Depth,
7781 return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT,
Depth,
7805 while (!Worklist.
empty()) {
7814 if (
I != Root && !
any_of(
I->operands(), [&KnownPoison](
const Use &U) {
7815 return KnownPoison.contains(U) && propagatesPoison(U);
7819 if (KnownPoison.
insert(
I).second)
7831 return ::computeOverflowForSignedAdd(
Add->getOperand(0),
Add->getOperand(1),
7839 return ::computeOverflowForSignedAdd(LHS, RHS,
nullptr, SQ);
7871 return !
I->mayThrow() &&
I->willReturn();
7885 unsigned ScanLimit) {
7892 assert(ScanLimit &&
"scan limit must be non-zero");
7894 if (--ScanLimit == 0)
7908 if (
I->getParent() != L->getHeader())
return false;
7911 if (&LI ==
I)
return true;
7914 llvm_unreachable(
"Instruction not contained in its own parent basic block.");
7920 case Intrinsic::sadd_with_overflow:
7921 case Intrinsic::ssub_with_overflow:
7922 case Intrinsic::smul_with_overflow:
7923 case Intrinsic::uadd_with_overflow:
7924 case Intrinsic::usub_with_overflow:
7925 case Intrinsic::umul_with_overflow:
7930 case Intrinsic::ctpop:
7931 case Intrinsic::ctlz:
7932 case Intrinsic::cttz:
7933 case Intrinsic::abs:
7934 case Intrinsic::smax:
7935 case Intrinsic::smin:
7936 case Intrinsic::umax:
7937 case Intrinsic::umin:
7938 case Intrinsic::scmp:
7939 case Intrinsic::is_fpclass:
7940 case Intrinsic::ptrmask:
7941 case Intrinsic::ucmp:
7942 case Intrinsic::bitreverse:
7943 case Intrinsic::bswap:
7944 case Intrinsic::sadd_sat:
7945 case Intrinsic::ssub_sat:
7946 case Intrinsic::sshl_sat:
7947 case Intrinsic::uadd_sat:
7948 case Intrinsic::usub_sat:
7949 case Intrinsic::ushl_sat:
7950 case Intrinsic::smul_fix:
7951 case Intrinsic::smul_fix_sat:
7952 case Intrinsic::umul_fix:
7953 case Intrinsic::umul_fix_sat:
7954 case Intrinsic::pow:
7955 case Intrinsic::powi:
7956 case Intrinsic::sin:
7957 case Intrinsic::sinh:
7958 case Intrinsic::cos:
7959 case Intrinsic::cosh:
7960 case Intrinsic::sincos:
7961 case Intrinsic::sincospi:
7962 case Intrinsic::tan:
7963 case Intrinsic::tanh:
7964 case Intrinsic::asin:
7965 case Intrinsic::acos:
7966 case Intrinsic::atan:
7967 case Intrinsic::atan2:
7968 case Intrinsic::canonicalize:
7969 case Intrinsic::sqrt:
7970 case Intrinsic::exp:
7971 case Intrinsic::exp2:
7972 case Intrinsic::exp10:
7973 case Intrinsic::log:
7974 case Intrinsic::log2:
7975 case Intrinsic::log10:
7976 case Intrinsic::modf:
7977 case Intrinsic::floor:
7978 case Intrinsic::ceil:
7979 case Intrinsic::trunc:
7980 case Intrinsic::rint:
7981 case Intrinsic::nearbyint:
7982 case Intrinsic::round:
7983 case Intrinsic::roundeven:
7984 case Intrinsic::lrint:
7985 case Intrinsic::llrint:
7994 switch (
I->getOpcode()) {
7995 case Instruction::Freeze:
7996 case Instruction::PHI:
7997 case Instruction::Invoke:
7999 case Instruction::Select:
8001 case Instruction::Call:
8005 case Instruction::ICmp:
8006 case Instruction::FCmp:
8007 case Instruction::GetElementPtr:
8021template <
typename CallableT>
8023 const CallableT &Handle) {
8024 switch (
I->getOpcode()) {
8025 case Instruction::Store:
8030 case Instruction::Load:
8037 case Instruction::AtomicCmpXchg:
8042 case Instruction::AtomicRMW:
8047 case Instruction::Call:
8048 case Instruction::Invoke: {
8052 for (
unsigned i = 0; i < CB->
arg_size(); ++i)
8055 CB->
paramHasAttr(i, Attribute::DereferenceableOrNull)) &&
8060 case Instruction::Ret:
8061 if (
I->getFunction()->hasRetAttribute(Attribute::NoUndef) &&
8062 Handle(
I->getOperand(0)))
8065 case Instruction::Switch:
8069 case Instruction::Br: {
8071 if (BR->isConditional() && Handle(BR->getCondition()))
8083template <
typename CallableT>
8085 const CallableT &Handle) {
8088 switch (
I->getOpcode()) {
8090 case Instruction::UDiv:
8091 case Instruction::SDiv:
8092 case Instruction::URem:
8093 case Instruction::SRem:
8094 return Handle(
I->getOperand(1));
8103 I, [&](
const Value *V) {
return KnownPoison.
count(V); });
8122 if (Arg->getParent()->isDeclaration())
8125 Begin = BB->
begin();
8132 unsigned ScanLimit = 32;
8141 if (--ScanLimit == 0)
8145 return WellDefinedOp == V;
8165 if (--ScanLimit == 0)
8173 for (
const Use &
Op :
I.operands()) {
8183 if (
I.getOpcode() == Instruction::Select &&
8184 YieldsPoison.
count(
I.getOperand(1)) &&
8185 YieldsPoison.
count(
I.getOperand(2))) {
8191 if (!BB || !Visited.
insert(BB).second)
8201 return ::programUndefinedIfUndefOrPoison(Inst,
false);
8205 return ::programUndefinedIfUndefOrPoison(Inst,
true);
8216 if (!
C->getElementType()->isFloatingPointTy())
8218 for (
unsigned I = 0,
E =
C->getNumElements();
I <
E; ++
I) {
8219 if (
C->getElementAsAPFloat(
I).isNaN())
8233 return !
C->isZero();
8236 if (!
C->getElementType()->isFloatingPointTy())
8238 for (
unsigned I = 0,
E =
C->getNumElements();
I <
E; ++
I) {
8239 if (
C->getElementAsAPFloat(
I).isZero())
8262 if (CmpRHS == FalseVal) {
8306 if (CmpRHS != TrueVal) {
8345 Value *
A =
nullptr, *
B =
nullptr;
8350 Value *
C =
nullptr, *
D =
nullptr;
8352 if (L.Flavor != R.Flavor)
8404 return {L.Flavor,
SPNB_NA,
false};
8411 return {L.Flavor,
SPNB_NA,
false};
8418 return {L.Flavor,
SPNB_NA,
false};
8425 return {L.Flavor,
SPNB_NA,
false};
8441 return ConstantInt::get(V->getType(), ~(*
C));
8498 if ((CmpLHS == TrueVal &&
match(FalseVal,
m_APInt(C2))) ||
8518 assert(
X &&
Y &&
"Invalid operand");
8520 auto IsNegationOf = [&](
const Value *
X,
const Value *
Y) {
8525 if (NeedNSW && !BO->hasNoSignedWrap())
8529 if (!AllowPoison && !Zero->isNullValue())
8536 if (IsNegationOf(
X,
Y) || IsNegationOf(
Y,
X))
8563 const APInt *RHSC1, *RHSC2;
8574 return CR1.inverse() == CR2;
8608std::optional<std::pair<CmpPredicate, Constant *>>
8611 "Only for relational integer predicates.");
8613 return std::nullopt;
8619 bool WillIncrement =
8624 auto ConstantIsOk = [WillIncrement, IsSigned](
ConstantInt *
C) {
8625 return WillIncrement ? !
C->isMaxValue(IsSigned) : !
C->isMinValue(IsSigned);
8628 Constant *SafeReplacementConstant =
nullptr;
8631 if (!ConstantIsOk(CI))
8632 return std::nullopt;
8634 unsigned NumElts = FVTy->getNumElements();
8635 for (
unsigned i = 0; i != NumElts; ++i) {
8636 Constant *Elt =
C->getAggregateElement(i);
8638 return std::nullopt;
8646 if (!CI || !ConstantIsOk(CI))
8647 return std::nullopt;
8649 if (!SafeReplacementConstant)
8650 SafeReplacementConstant = CI;
8654 Value *SplatC =
C->getSplatValue();
8657 if (!CI || !ConstantIsOk(CI))
8658 return std::nullopt;
8661 return std::nullopt;
8668 if (
C->containsUndefOrPoisonElement()) {
8669 assert(SafeReplacementConstant &&
"Replacement constant not set");
8676 Constant *OneOrNegOne = ConstantInt::get(
Type, WillIncrement ? 1 : -1,
true);
8679 return std::make_pair(NewPred, NewC);
8688 bool HasMismatchedZeros =
false;
8694 Value *OutputZeroVal =
nullptr;
8697 OutputZeroVal = TrueVal;
8700 OutputZeroVal = FalseVal;
8702 if (OutputZeroVal) {
8704 HasMismatchedZeros =
true;
8705 CmpLHS = OutputZeroVal;
8708 HasMismatchedZeros =
true;
8709 CmpRHS = OutputZeroVal;
8726 if (!HasMismatchedZeros)
8737 bool Ordered =
false;
8748 if (LHSSafe && RHSSafe) {
8779 if (TrueVal == CmpRHS && FalseVal == CmpLHS) {
8790 if (TrueVal == CmpLHS && FalseVal == CmpRHS)
8796 auto MaybeSExtCmpLHS =
8800 if (
match(TrueVal, MaybeSExtCmpLHS)) {
8822 else if (
match(FalseVal, MaybeSExtCmpLHS)) {
8862 case Instruction::ZExt:
8866 case Instruction::SExt:
8870 case Instruction::Trunc:
8873 CmpConst->
getType() == SrcTy) {
8895 CastedTo = CmpConst;
8897 unsigned ExtOp = CmpI->
isSigned() ? Instruction::SExt : Instruction::ZExt;
8901 case Instruction::FPTrunc:
8904 case Instruction::FPExt:
8907 case Instruction::FPToUI:
8910 case Instruction::FPToSI:
8913 case Instruction::UIToFP:
8916 case Instruction::SIToFP:
8929 if (CastedBack && CastedBack !=
C)
8957 *CastOp = Cast1->getOpcode();
8958 Type *SrcTy = Cast1->getSrcTy();
8961 if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy())
8962 return Cast2->getOperand(0);
8970 Value *CastedTo =
nullptr;
8971 if (*CastOp == Instruction::Trunc) {
8985 "V2 and Cast1 should be the same type.");
9004 Value *TrueVal =
SI->getTrueValue();
9005 Value *FalseVal =
SI->getFalseValue();
9008 CmpI, TrueVal, FalseVal, LHS, RHS,
9027 if (CastOp && CmpLHS->
getType() != TrueVal->getType()) {
9031 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
9033 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
9040 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
9042 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
9047 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal,
9066 return Intrinsic::umin;
9068 return Intrinsic::umax;
9070 return Intrinsic::smin;
9072 return Intrinsic::smax;
9088 case Intrinsic::smax:
return Intrinsic::smin;
9089 case Intrinsic::smin:
return Intrinsic::smax;
9090 case Intrinsic::umax:
return Intrinsic::umin;
9091 case Intrinsic::umin:
return Intrinsic::umax;
9094 case Intrinsic::maximum:
return Intrinsic::minimum;
9095 case Intrinsic::minimum:
return Intrinsic::maximum;
9096 case Intrinsic::maxnum:
return Intrinsic::minnum;
9097 case Intrinsic::minnum:
return Intrinsic::maxnum;
9112std::pair<Intrinsic::ID, bool>
9117 bool AllCmpSingleUse =
true;
9120 if (
all_of(VL, [&SelectPattern, &AllCmpSingleUse](
Value *
I) {
9126 SelectPattern.
Flavor != CurrentPattern.Flavor)
9128 SelectPattern = CurrentPattern;
9133 switch (SelectPattern.
Flavor) {
9135 return {Intrinsic::smin, AllCmpSingleUse};
9137 return {Intrinsic::umin, AllCmpSingleUse};
9139 return {Intrinsic::smax, AllCmpSingleUse};
9141 return {Intrinsic::umax, AllCmpSingleUse};
9143 return {Intrinsic::maxnum, AllCmpSingleUse};
9145 return {Intrinsic::minnum, AllCmpSingleUse};
9153template <
typename InstTy>
9163 for (
unsigned I = 0;
I != 2; ++
I) {
9168 if (
LHS != PN &&
RHS != PN)
9204 if (
I->arg_size() != 2 ||
I->getType() !=
I->getArgOperand(0)->getType() ||
9205 I->getType() !=
I->getArgOperand(1)->getType())
9233 return !
C->isNegative();
9245 const APInt *CLHS, *CRHS;
9248 return CLHS->
sle(*CRHS);
9286 const APInt *CLHS, *CRHS;
9289 return CLHS->
ule(*CRHS);
9298static std::optional<bool>
9303 return std::nullopt;
9310 return std::nullopt;
9317 return std::nullopt;
9324 return std::nullopt;
9331 return std::nullopt;
9338static std::optional<bool>
9344 if (CR.
icmp(Pred, RCR))
9351 return std::nullopt;
9364 return std::nullopt;
9370static std::optional<bool>
9401 const APInt *Unused;
9420 return std::nullopt;
9424 if (L0 == R0 && L1 == R1)
9457 ((
A == R0 &&
B == R1) || (
A == R1 &&
B == R0) ||
9475 return std::nullopt;
9481static std::optional<bool>
9511 if (L0 == R0 && L1 == R1) {
9512 if ((LPred & RPred) == LPred)
9514 if ((LPred & ~RPred) == LPred)
9522 if (std::optional<ConstantFPRange> DomCR =
9524 if (std::optional<ConstantFPRange> ImpliedCR =
9526 if (ImpliedCR->contains(*DomCR))
9529 if (std::optional<ConstantFPRange> ImpliedCR =
9532 if (ImpliedCR->contains(*DomCR))
9538 return std::nullopt;
9545static std::optional<bool>
9550 assert((
LHS->getOpcode() == Instruction::And ||
9551 LHS->getOpcode() == Instruction::Or ||
9552 LHS->getOpcode() == Instruction::Select) &&
9553 "Expected LHS to be 'and', 'or', or 'select'.");
9560 const Value *ALHS, *ARHS;
9565 ALHS, RHSPred, RHSOp0, RHSOp1,
DL, LHSIsTrue,
Depth + 1))
9568 ARHS, RHSPred, RHSOp0, RHSOp1,
DL, LHSIsTrue,
Depth + 1))
9570 return std::nullopt;
9572 return std::nullopt;
9581 return std::nullopt;
9586 return std::nullopt;
9588 assert(LHS->getType()->isIntOrIntVectorTy(1) &&
9589 "Expected integer type only!");
9593 LHSIsTrue = !LHSIsTrue;
9599 LHSCmp->getOperand(0), LHSCmp->getOperand(1),
9600 RHSPred, RHSOp0, RHSOp1,
DL, LHSIsTrue);
9604 ConstantInt::get(V->getType(), 0), RHSPred,
9605 RHSOp0, RHSOp1,
DL, LHSIsTrue);
9608 "Expected floating point type only!");
9611 LHSCmp->getOperand(1), RHSPred, RHSOp0, RHSOp1,
9619 if ((LHSI->getOpcode() == Instruction::And ||
9620 LHSI->getOpcode() == Instruction::Or ||
9621 LHSI->getOpcode() == Instruction::Select))
9625 return std::nullopt;
9630 bool LHSIsTrue,
unsigned Depth) {
9636 bool InvertRHS =
false;
9645 LHS, RHSCmp->getCmpPredicate(), RHSCmp->getOperand(0),
9646 RHSCmp->getOperand(1),
DL, LHSIsTrue,
Depth))
9647 return InvertRHS ? !*Implied : *Implied;
9648 return std::nullopt;
9652 LHS, RHSCmp->getPredicate(), RHSCmp->getOperand(0),
9653 RHSCmp->getOperand(1),
DL, LHSIsTrue,
Depth))
9654 return InvertRHS ? !*Implied : *Implied;
9655 return std::nullopt;
9661 ConstantInt::get(V->getType(), 0),
DL,
9663 return InvertRHS ? !*Implied : *Implied;
9664 return std::nullopt;
9668 return std::nullopt;
9672 const Value *RHS1, *RHS2;
9674 if (std::optional<bool> Imp =
9678 if (std::optional<bool> Imp =
9684 if (std::optional<bool> Imp =
9688 if (std::optional<bool> Imp =
9694 return std::nullopt;
9699static std::pair<Value *, bool>
9701 if (!ContextI || !ContextI->
getParent())
9702 return {
nullptr,
false};
9709 return {
nullptr,
false};
9715 return {
nullptr,
false};
9718 if (TrueBB == FalseBB)
9719 return {
nullptr,
false};
9721 assert((TrueBB == ContextBB || FalseBB == ContextBB) &&
9722 "Predecessor block does not point to successor?");
9725 return {PredCond, TrueBB == ContextBB};
9731 assert(
Cond->getType()->isIntOrIntVectorTy(1) &&
"Condition must be bool");
9735 return std::nullopt;
9747 return std::nullopt;
9752 bool PreferSignedRange) {
9753 unsigned Width =
Lower.getBitWidth();
9756 case Instruction::Sub:
9766 if (PreferSignedRange && HasNSW && HasNUW)
9772 }
else if (HasNSW) {
9773 if (
C->isNegative()) {
9786 case Instruction::Add:
9795 if (PreferSignedRange && HasNSW && HasNUW)
9801 }
else if (HasNSW) {
9802 if (
C->isNegative()) {
9815 case Instruction::And:
9826 case Instruction::Or:
9832 case Instruction::AShr:
9838 unsigned ShiftAmount = Width - 1;
9839 if (!
C->isZero() && IIQ.
isExact(&BO))
9840 ShiftAmount =
C->countr_zero();
9841 if (
C->isNegative()) {
9844 Upper =
C->ashr(ShiftAmount) + 1;
9847 Lower =
C->ashr(ShiftAmount);
9853 case Instruction::LShr:
9859 unsigned ShiftAmount = Width - 1;
9860 if (!
C->isZero() && IIQ.
isExact(&BO))
9861 ShiftAmount =
C->countr_zero();
9862 Lower =
C->lshr(ShiftAmount);
9867 case Instruction::Shl:
9874 if (
C->isNegative()) {
9876 unsigned ShiftAmount =
C->countl_one() - 1;
9877 Lower =
C->shl(ShiftAmount);
9881 unsigned ShiftAmount =
C->countl_zero() - 1;
9883 Upper =
C->shl(ShiftAmount) + 1;
9902 case Instruction::SDiv:
9906 if (
C->isAllOnes()) {
9911 }
else if (
C->countl_zero() < Width - 1) {
9922 if (
C->isMinSignedValue()) {
9934 case Instruction::UDiv:
9944 case Instruction::SRem:
9950 if (
C->isNegative()) {
9961 case Instruction::URem:
9976 bool UseInstrInfo) {
9977 unsigned Width =
II.getType()->getScalarSizeInBits();
9979 switch (
II.getIntrinsicID()) {
9980 case Intrinsic::ctlz:
9981 case Intrinsic::cttz: {
9983 if (!UseInstrInfo || !
match(
II.getArgOperand(1),
m_One()))
9988 case Intrinsic::ctpop:
9991 APInt(Width, Width) + 1);
9992 case Intrinsic::uadd_sat:
9998 case Intrinsic::sadd_sat:
10001 if (
C->isNegative())
10012 case Intrinsic::usub_sat:
10022 case Intrinsic::ssub_sat:
10024 if (
C->isNegative())
10034 if (
C->isNegative())
10045 case Intrinsic::umin:
10046 case Intrinsic::umax:
10047 case Intrinsic::smin:
10048 case Intrinsic::smax:
10053 switch (
II.getIntrinsicID()) {
10054 case Intrinsic::umin:
10056 case Intrinsic::umax:
10058 case Intrinsic::smin:
10061 case Intrinsic::smax:
10068 case Intrinsic::abs:
10077 case Intrinsic::vscale:
10078 if (!
II.getParent() || !
II.getFunction())
10085 return ConstantRange::getFull(Width);
10090 unsigned BitWidth =
SI.getType()->getScalarSizeInBits();
10094 return ConstantRange::getFull(
BitWidth);
10117 return ConstantRange::getFull(
BitWidth);
10119 switch (R.Flavor) {
10131 return ConstantRange::getFull(
BitWidth);
10138 unsigned BitWidth =
I->getType()->getScalarSizeInBits();
10139 if (!
I->getOperand(0)->getType()->getScalarType()->isHalfTy())
10157 assert(V->getType()->isIntOrIntVectorTy() &&
"Expected integer instruction");
10160 return ConstantRange::getFull(V->getType()->getScalarSizeInBits());
10163 return C->toConstantRange();
10165 unsigned BitWidth = V->getType()->getScalarSizeInBits();
10178 SI->getTrueValue(), ForSigned, UseInstrInfo, AC, CtxI, DT,
Depth + 1);
10180 SI->getFalseValue(), ForSigned, UseInstrInfo, AC, CtxI, DT,
Depth + 1);
10190 if (std::optional<ConstantRange>
Range =
A->getRange())
10198 if (std::optional<ConstantRange>
Range = CB->getRange())
10209 "Got assumption for the wrong function!");
10210 assert(
I->getIntrinsicID() == Intrinsic::assume &&
10211 "must be an assume intrinsic");
10215 Value *Arg =
I->getArgOperand(0);
10218 if (!Cmp || Cmp->getOperand(0) != V)
10223 UseInstrInfo, AC,
I, DT,
Depth + 1);
10245 InsertAffected(
Op);
10252 auto AddAffected = [&InsertAffected](
Value *V) {
10256 auto AddCmpOperands = [&AddAffected, IsAssume](
Value *LHS,
Value *RHS) {
10267 while (!Worklist.
empty()) {
10269 if (!Visited.
insert(V).second)
10315 AddCmpOperands(
A,
B);
10352 AddCmpOperands(
A,
B);
10380 if (BO->getOpcode() == Instruction::Add ||
10381 BO->getOpcode() == Instruction::Or) {
10383 const APInt *C1, *C2;
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Register Bank Select
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Utilities for dealing with flags related to floating point properties and mode controls.
Module.h This file contains the declarations for the Module class.
static bool hasNoUnsignedWrap(BinaryOperator &I)
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
PowerPC Reduce CR logical Operation
const SmallVectorImpl< MachineOperand > & Cond
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
std::pair< BasicBlock *, BasicBlock * > Edge
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
static SmallVector< VPValue *, 4 > getOperands(ArrayRef< VPValue * > Values, unsigned OperandIndex)
static void computeKnownFPClassFromCond(const Value *V, Value *Cond, bool CondIsTrue, const Instruction *CxtI, KnownFPClass &KnownFromContext, unsigned Depth=0)
static bool isPowerOfTwoRecurrence(const PHINode *PN, bool OrZero, SimplifyQuery &Q, unsigned Depth)
Try to detect a recurrence that the value of the induction variable is always a power of two (or zero...
static cl::opt< unsigned > DomConditionsMaxUses("dom-conditions-max-uses", cl::Hidden, cl::init(20))
static unsigned computeNumSignBitsVectorConstant(const Value *V, const APInt &DemandedElts, unsigned TyBits)
For vector constants, loop over the elements and find the constant with the minimum number of sign bi...
static bool isTruePredicate(CmpInst::Predicate Pred, const Value *LHS, const Value *RHS)
Return true if "icmp Pred LHS RHS" is always true.
static bool isModifyingBinopOfNonZero(const Value *V1, const Value *V2, const APInt &DemandedElts, const SimplifyQuery &Q, unsigned Depth)
Return true if V1 == (binop V2, X), where X is known non-zero.
static bool isGEPKnownNonNull(const GEPOperator *GEP, const SimplifyQuery &Q, unsigned Depth)
Test whether a GEP's result is known to be non-null.
static bool isNonEqualShl(const Value *V1, const Value *V2, const APInt &DemandedElts, const SimplifyQuery &Q, unsigned Depth)
Return true if V2 == V1 << C, where V1 is known non-zero, C is not 0 and the shift is nuw or nsw.
static bool isKnownNonNullFromDominatingCondition(const Value *V, const Instruction *CtxI, const DominatorTree *DT)
static const Value * getUnderlyingObjectFromInt(const Value *V)
This is the function that does the work of looking through basic ptrtoint+arithmetic+inttoptr sequenc...
static bool isNonZeroMul(const APInt &DemandedElts, const SimplifyQuery &Q, unsigned BitWidth, Value *X, Value *Y, bool NSW, bool NUW, unsigned Depth)
static bool rangeMetadataExcludesValue(const MDNode *Ranges, const APInt &Value)
Does the 'Range' metadata (which must be a valid MD_range operand list) ensure that the value it's at...
static bool outputDenormalIsIEEEOrPosZero(const Function &F, const Type *Ty)
static KnownBits getKnownBitsFromAndXorOr(const Operator *I, const APInt &DemandedElts, const KnownBits &KnownLHS, const KnownBits &KnownRHS, const SimplifyQuery &Q, unsigned Depth)
static void breakSelfRecursivePHI(const Use *U, const PHINode *PHI, Value *&ValOut, Instruction *&CtxIOut, const PHINode **PhiOut=nullptr)
static bool isNonZeroSub(const APInt &DemandedElts, const SimplifyQuery &Q, unsigned BitWidth, Value *X, Value *Y, unsigned Depth)
static OverflowResult mapOverflowResult(ConstantRange::OverflowResult OR)
Convert ConstantRange OverflowResult into ValueTracking OverflowResult.
static void addValueAffectedByCondition(Value *V, function_ref< void(Value *)> InsertAffected)
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
static bool haveNoCommonBitsSetSpecialCases(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ)
static void setLimitsForBinOp(const BinaryOperator &BO, APInt &Lower, APInt &Upper, const InstrInfoQuery &IIQ, bool PreferSignedRange)
static Value * lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2, Instruction::CastOps *CastOp)
Helps to match a select pattern in case of a type mismatch.
static std::pair< Value *, bool > getDomPredecessorCondition(const Instruction *ContextI)
static constexpr unsigned MaxInstrsToCheckForFree
Maximum number of instructions to check between assume and context instruction.
static bool isNonZeroShift(const Operator *I, const APInt &DemandedElts, const SimplifyQuery &Q, const KnownBits &KnownVal, unsigned Depth)
static std::optional< bool > isImpliedCondFCmps(FCmpInst::Predicate LPred, const Value *L0, const Value *L1, FCmpInst::Predicate RPred, const Value *R0, const Value *R1, const DataLayout &DL, bool LHSIsTrue)
Return true if LHS implies RHS (expanded to its components as "R0 RPred R1") is true.
static bool isKnownNonEqualFromContext(const Value *V1, const Value *V2, const SimplifyQuery &Q, unsigned Depth)
static bool includesPoison(UndefPoisonKind Kind)
static SelectPatternResult matchFastFloatClamp(CmpInst::Predicate Pred, Value *CmpLHS, Value *CmpRHS, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS)
Match clamp pattern for float types without care about NaNs or signed zeros.
static std::optional< bool > isImpliedCondICmps(CmpPredicate LPred, const Value *L0, const Value *L1, CmpPredicate RPred, const Value *R0, const Value *R1, const DataLayout &DL, bool LHSIsTrue)
Return true if LHS implies RHS (expanded to its components as "R0 RPred R1") is true.
static bool includesUndef(UndefPoisonKind Kind)
static std::optional< bool > isImpliedCondCommonOperandWithCR(CmpPredicate LPred, const ConstantRange &LCR, CmpPredicate RPred, const ConstantRange &RCR)
Return true if "icmp LPred X, LCR" implies "icmp RPred X, RCR" is true.
static ConstantRange getRangeForSelectPattern(const SelectInst &SI, const InstrInfoQuery &IIQ)
static void computeKnownBitsFromOperator(const Operator *I, const APInt &DemandedElts, KnownBits &Known, const SimplifyQuery &Q, unsigned Depth)
static uint64_t GetStringLengthH(const Value *V, SmallPtrSetImpl< const PHINode * > &PHIs, unsigned CharSize)
If we can compute the length of the string pointed to by the specified pointer, return 'len+1'.
static void computeKnownBitsFromShiftOperator(const Operator *I, const APInt &DemandedElts, KnownBits &Known, KnownBits &Known2, const SimplifyQuery &Q, unsigned Depth, function_ref< KnownBits(const KnownBits &, const KnownBits &, bool)> KF)
Compute known bits from a shift operator, including those with a non-constant shift amount.
static bool onlyUsedByLifetimeMarkersOrDroppableInstsHelper(const Value *V, bool AllowLifetime, bool AllowDroppable)
static std::optional< bool > isImpliedCondAndOr(const Instruction *LHS, CmpPredicate RHSPred, const Value *RHSOp0, const Value *RHSOp1, const DataLayout &DL, bool LHSIsTrue, unsigned Depth)
Return true if LHS implies RHS is true.
static bool isSignedMinMaxClamp(const Value *Select, const Value *&In, const APInt *&CLow, const APInt *&CHigh)
static bool isNonZeroAdd(const APInt &DemandedElts, const SimplifyQuery &Q, unsigned BitWidth, Value *X, Value *Y, bool NSW, bool NUW, unsigned Depth)
static bool directlyImpliesPoison(const Value *ValAssumedPoison, const Value *V, unsigned Depth)
static bool isNonEqualSelect(const Value *V1, const Value *V2, const APInt &DemandedElts, const SimplifyQuery &Q, unsigned Depth)
static bool matchTwoInputRecurrence(const PHINode *PN, InstTy *&Inst, Value *&Init, Value *&OtherOp)
static bool isNonEqualPHIs(const PHINode *PN1, const PHINode *PN2, const APInt &DemandedElts, const SimplifyQuery &Q, unsigned Depth)
static void computeKnownBitsFromCmp(const Value *V, CmpInst::Predicate Pred, Value *LHS, Value *RHS, KnownBits &Known, const SimplifyQuery &Q)
static SelectPatternResult matchMinMaxOfMinMax(CmpInst::Predicate Pred, Value *CmpLHS, Value *CmpRHS, Value *TVal, Value *FVal, unsigned Depth)
Recognize variations of: a < c ?
static void unionWithMinMaxIntrinsicClamp(const IntrinsicInst *II, KnownBits &Known)
static void setLimitForFPToI(const Instruction *I, APInt &Lower, APInt &Upper)
static bool isSameUnderlyingObjectInLoop(const PHINode *PN, const LoopInfo *LI)
PN defines a loop-variant pointer to an object.
static bool isNonEqualPointersWithRecursiveGEP(const Value *A, const Value *B, const SimplifyQuery &Q)
static bool isSignedMinMaxIntrinsicClamp(const IntrinsicInst *II, const APInt *&CLow, const APInt *&CHigh)
static Value * lookThroughCastConst(CmpInst *CmpI, Type *SrcTy, Constant *C, Instruction::CastOps *CastOp)
static bool handleGuaranteedWellDefinedOps(const Instruction *I, const CallableT &Handle)
Enumerates all operands of I that are guaranteed to not be undef or poison.
static KnownFPClass computeKnownFPClassFromContext(const Value *V, const SimplifyQuery &Q)
static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1, bool NSW, bool NUW, const APInt &DemandedElts, KnownBits &KnownOut, KnownBits &Known2, const SimplifyQuery &Q, unsigned Depth)
static Value * getNotValue(Value *V)
If the input value is the result of a 'not' op, constant integer, or vector splat of a constant integ...
static unsigned ComputeNumSignBitsImpl(const Value *V, const APInt &DemandedElts, const SimplifyQuery &Q, unsigned Depth)
Return the number of times the sign bit of the register is replicated into the other bits.
static void computeKnownBitsFromICmpCond(const Value *V, ICmpInst *Cmp, KnownBits &Known, const SimplifyQuery &SQ, bool Invert)
static bool isKnownNonZeroFromOperator(const Operator *I, const APInt &DemandedElts, const SimplifyQuery &Q, unsigned Depth)
static bool matchOpWithOpEqZero(Value *Op0, Value *Op1)
static bool isNonZeroRecurrence(const PHINode *PN)
Try to detect a recurrence that monotonically increases/decreases from a non-zero starting value.
static SelectPatternResult matchClamp(CmpInst::Predicate Pred, Value *CmpLHS, Value *CmpRHS, Value *TrueVal, Value *FalseVal)
Recognize variations of: CLAMP(v,l,h) ==> ((v) < (l) ?
static bool shiftAmountKnownInRange(const Value *ShiftAmount)
Shifts return poison if shiftwidth is larger than the bitwidth.
static bool isEphemeralValueOf(const Instruction *I, const Value *E)
static SelectPatternResult matchMinMax(CmpInst::Predicate Pred, Value *CmpLHS, Value *CmpRHS, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS, unsigned Depth)
Match non-obvious integer minimum and maximum sequences.
static KnownBits computeKnownBitsForHorizontalOperation(const Operator *I, const APInt &DemandedElts, const SimplifyQuery &Q, unsigned Depth, const function_ref< KnownBits(const KnownBits &, const KnownBits &)> KnownBitsFunc)
static bool handleGuaranteedNonPoisonOps(const Instruction *I, const CallableT &Handle)
Enumerates all operands of I that are guaranteed to not be poison.
static std::optional< std::pair< Value *, Value * > > getInvertibleOperands(const Operator *Op1, const Operator *Op2)
If the pair of operators are the same invertible function, return the the operands of the function co...
static bool cmpExcludesZero(CmpInst::Predicate Pred, const Value *RHS)
static void computeKnownBitsFromCond(const Value *V, Value *Cond, KnownBits &Known, const SimplifyQuery &SQ, bool Invert, unsigned Depth)
static bool isKnownNonZeroFromAssume(const Value *V, const SimplifyQuery &Q)
static std::optional< bool > isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS, const Value *ARHS, const Value *BLHS, const Value *BRHS)
Return true if "icmp Pred BLHS BRHS" is true whenever "icmp PredALHS ARHS" is true.
static const Instruction * safeCxtI(const Value *V, const Instruction *CxtI)
static bool isNonEqualMul(const Value *V1, const Value *V2, const APInt &DemandedElts, const SimplifyQuery &Q, unsigned Depth)
Return true if V2 == V1 * C, where V1 is known non-zero, C is not 0/1 and the multiplication is nuw o...
static bool isImpliedToBeAPowerOfTwoFromCond(const Value *V, bool OrZero, const Value *Cond, bool CondIsTrue)
Return true if we can infer that V is known to be a power of 2 from dominating condition Cond (e....
static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW, bool NUW, const APInt &DemandedElts, KnownBits &Known, KnownBits &Known2, const SimplifyQuery &Q, unsigned Depth)
static bool isKnownNonNaN(const Value *V, FastMathFlags FMF)
static ConstantRange getRangeForIntrinsic(const IntrinsicInst &II, bool UseInstrInfo)
static void computeKnownFPClassForFPTrunc(const Operator *Op, const APInt &DemandedElts, FPClassTest InterestedClasses, KnownFPClass &Known, const SimplifyQuery &Q, unsigned Depth)
static Value * BuildSubAggregate(Value *From, Value *To, Type *IndexedType, SmallVectorImpl< unsigned > &Idxs, unsigned IdxSkip, BasicBlock::iterator InsertBefore)
static APFloat getLargest(const fltSemantics &Sem, bool Negative=false)
Returns the largest finite number in the given semantics.
static APFloat getInf(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Infinity.
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
Class for arbitrary precision integers.
LLVM_ABI APInt udiv(const APInt &RHS) const
Unsigned division operation.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
void clearBit(unsigned BitPosition)
Set a given bit to 0.
bool isMinSignedValue() const
Determine if this is the smallest signed value.
uint64_t getZExtValue() const
Get zero extended value.
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
unsigned popcount() const
Count the number of bits set.
void setBitsFrom(unsigned loBit)
Set the top bits starting from loBit.
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
unsigned ceilLogBase2() const
bool sgt(const APInt &RHS) const
Signed greater than comparison.
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
LLVM_ABI APInt urem(const APInt &RHS) const
Unsigned remainder operation.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
static APInt getMinValue(unsigned numBits)
Gets minimum unsigned value of APInt for a specific bit width.
bool isNegative() const
Determine sign of this APInt.
bool intersects(const APInt &RHS) const
This operation tests if there are any pairs of corresponding bits between this APInt and RHS that are...
LLVM_ABI APInt sdiv(const APInt &RHS) const
Signed division function for APInt.
void clearAllBits()
Set every bit to 0.
LLVM_ABI APInt reverseBits() const
bool sle(const APInt &RHS) const
Signed less or equal comparison.
unsigned getNumSignBits() const
Computes the number of leading bits of this APInt that are equal to its sign bit.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
LLVM_ABI APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
bool isStrictlyPositive() const
Determine if this APInt Value is positive.
unsigned logBase2() const
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
bool getBoolValue() const
Convert APInt to a boolean value.
bool isMaxSignedValue() const
Determine if this is the largest signed value.
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
APInt shl(unsigned shiftAmt) const
Left-shift function.
bool slt(const APInt &RHS) const
Signed less than comparison.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
void setLowBits(unsigned loBits)
Set the bottom loBits bits.
bool sge(const APInt &RHS) const
Signed greater or equal comparison.
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
void clearSignBit()
Set the sign bit to 0.
an instruction to allocate memory on the stack
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
Class to represent array types.
This represents the llvm.assume intrinsic.
A cache of @llvm.assume calls within a function.
MutableArrayRef< ResultElem > assumptionsFor(const Value *V)
Access the list of assumptions which affect this value.
Functions, function parameters, and return types can have attributes to indicate how they should be t...
LLVM_ABI std::optional< unsigned > getVScaleRangeMax() const
Returns the maximum value for the vscale_range attribute or std::nullopt when unknown.
LLVM_ABI unsigned getVScaleRangeMin() const
Returns the minimum value for the vscale_range attribute.
bool isValid() const
Return true if the attribute is any kind of attribute.
LLVM_ABI bool isSingleEdge() const
Check if this is the only edge between Start and End.
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
const Function * getParent() const
Return the enclosing method, or null if none.
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
InstListType::const_iterator const_iterator
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
LLVM_ABI const BasicBlock * getSingleSuccessor() const
Return the successor of this block if it has a single successor.
InstListType::iterator iterator
Instruction iterators...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
LLVM_ABI Instruction::BinaryOps getBinaryOp() const
Returns the binary operation underlying the intrinsic.
BinaryOps getOpcode() const
Conditional or Unconditional Branch instruction.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
LLVM_ABI bool isIndirectCall() const
Return true if the callsite is an indirect call.
bool onlyReadsMemory(unsigned OpNo) const
Value * getCalledOperand() const
Value * getArgOperand(unsigned i) const
unsigned arg_size() const
This class represents a function call, abstracting a target machine's calling convention.
This is the base class for all instructions that perform data casts.
This class is the base class for the comparison instructions.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ULT
1 1 0 0 True if unordered or less than
@ ICMP_ULT
unsigned less than
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ ICMP_SGE
signed greater or equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
static LLVM_ABI bool isEquality(Predicate pred)
Determine if this is an equals/not equals predicate.
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
bool isTrueWhenEqual() const
This is just a convenience.
static bool isFPPredicate(Predicate P)
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Predicate getPredicate() const
Return the predicate for this instruction.
Predicate getFlippedStrictnessPredicate() const
For predicate of kind "is X or equal to 0" returns the predicate "is X".
static bool isIntPredicate(Predicate P)
static LLVM_ABI bool isOrdered(Predicate predicate)
Determine if the predicate is an ordered operation.
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
static LLVM_ABI std::optional< CmpPredicate > getMatching(CmpPredicate A, CmpPredicate B)
Compares two CmpPredicates taking samesign into account and returns the canonicalized CmpPredicate if...
LLVM_ABI CmpInst::Predicate getPreferredSignedPredicate() const
Attempts to return a signed CmpInst::Predicate from the CmpPredicate.
CmpInst::Predicate dropSameSign() const
Drops samesign information.
bool hasSameSign() const
Query samesign information, for optimizations.
An array constant whose element type is a simple 1/2/4/8-byte integer or float/double,...
ConstantDataSequential - A vector or array constant whose element type is a simple 1/2/4/8-byte integ...
StringRef getAsString() const
If this array is isString(), then this method returns the array as a StringRef.
A vector constant whose element type is a simple 1/2/4/8-byte integer or float/double,...
static LLVM_ABI Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getBitCast(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI std::optional< ConstantFPRange > makeExactFCmpRegion(FCmpInst::Predicate Pred, const APFloat &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
ConstantFP - Floating Point Values [float, double].
This is the shared class of boolean and integer constants.
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
This class represents a range of values.
PreferredRangeType
If represented precisely, the result of some range operations may consist of multiple disjoint ranges...
const APInt * getSingleElement() const
If this set contains a single element, return it, otherwise return null.
static LLVM_ABI ConstantRange fromKnownBits(const KnownBits &Known, bool IsSigned)
Initialize a range based on a known bits constraint.
LLVM_ABI OverflowResult unsignedSubMayOverflow(const ConstantRange &Other) const
Return whether unsigned sub of the two ranges always/never overflows.
LLVM_ABI bool isAllNegative() const
Return true if all values in this range are negative.
LLVM_ABI OverflowResult unsignedAddMayOverflow(const ConstantRange &Other) const
Return whether unsigned add of the two ranges always/never overflows.
LLVM_ABI KnownBits toKnownBits() const
Return known bits for values in this range.
LLVM_ABI bool icmp(CmpInst::Predicate Pred, const ConstantRange &Other) const
Does the predicate Pred hold between ranges this and Other?
LLVM_ABI APInt getSignedMin() const
Return the smallest signed value contained in the ConstantRange.
LLVM_ABI OverflowResult unsignedMulMayOverflow(const ConstantRange &Other) const
Return whether unsigned mul of the two ranges always/never overflows.
LLVM_ABI bool isAllNonNegative() const
Return true if all values in this range are non-negative.
static LLVM_ABI ConstantRange makeAllowedICmpRegion(CmpInst::Predicate Pred, const ConstantRange &Other)
Produce the smallest range such that all values that may satisfy the given predicate with any value c...
LLVM_ABI ConstantRange unionWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the union of this range with another range.
static LLVM_ABI ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
LLVM_ABI OverflowResult signedAddMayOverflow(const ConstantRange &Other) const
Return whether signed add of the two ranges always/never overflows.
LLVM_ABI ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
OverflowResult
Represents whether an operation on the given constant range is known to always or never overflow.
@ NeverOverflows
Never overflows.
@ AlwaysOverflowsHigh
Always overflows in the direction of signed/unsigned max value.
@ AlwaysOverflowsLow
Always overflows in the direction of signed/unsigned min value.
@ MayOverflow
May or may not overflow.
static ConstantRange getNonEmpty(APInt Lower, APInt Upper)
Create non-empty constant range with the given bounds.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
LLVM_ABI OverflowResult signedSubMayOverflow(const ConstantRange &Other) const
Return whether signed sub of the two ranges always/never overflows.
LLVM_ABI ConstantRange sub(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a subtraction of a value in this r...
This is an important base class in LLVM.
static LLVM_ABI Constant * replaceUndefsWith(Constant *C, Constant *Replacement)
Try to replace undefined constant C or undefined elements in C with Replacement.
LLVM_ABI Constant * getSplatValue(bool AllowPoison=false) const
If all elements of the vector constant have the same value, return that value.
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
LLVM_ABI bool isZeroValue() const
Return true if the value is negative zero or null value.
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
A parsed version of the target data layout string in and methods for querying it.
bool isLittleEndian() const
Layout endianness...
LLVM_ABI const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
LLVM_ABI unsigned getIndexTypeSizeInBits(Type *Ty) const
The size in bits of the index used in GEP calculation for this type.
LLVM_ABI unsigned getPointerTypeSizeInBits(Type *) const
The pointer representation size in bits for this type.
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
ArrayRef< BranchInst * > conditionsFor(const Value *V) const
Access the list of branches which affect this value.
DomTreeNodeBase * getIDom() const
DomTreeNodeBase< NodeT > * getNode(const NodeT *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
This instruction compares its operands according to the predicate given to the constructor.
Utility class for floating point operations which can have information about relaxed accuracy require...
Convenience struct for specifying and reasoning about fast-math flags.
bool noSignedZeros() const
void setNoSignedZeros(bool B=true)
void setNoNaNs(bool B=true)
const BasicBlock & getEntryBlock() const
bool hasNoSync() const
Determine if the call can synchroize with other threads.
DenormalMode getDenormalMode(const fltSemantics &FPType) const
Returns the denormal handling type for the default rounding mode of the function.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
PointerType * getType() const
Global values are always pointers.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this global belongs to.
Type * getValueType() const
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
bool hasDefinitiveInitializer() const
hasDefinitiveInitializer - Whether the global variable has an initializer, and any other instances of...
This instruction compares its operands according to the predicate given to the constructor.
CmpPredicate getSwappedCmpPredicate() const
CmpPredicate getInverseCmpPredicate() const
Predicate getFlippedSignednessPredicate() const
For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->EQ.
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
static LLVM_ABI std::optional< bool > isImpliedByMatchingCmp(CmpPredicate Pred1, CmpPredicate Pred2)
Determine if Pred1 implies Pred2 is true, false, or if nothing can be inferred about the implication,...
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
Predicate getUnsignedPredicate() const
For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
This instruction inserts a struct field of array element value into an aggregate value.
Value * getAggregateOperand()
static InsertValueInst * Create(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
LLVM_ABI bool hasNoNaNs() const LLVM_READONLY
Determine whether the no-NaNs flag is set.
LLVM_ABI bool hasNoUnsignedWrap() const LLVM_READONLY
Determine whether the no unsigned wrap flag is set.
LLVM_ABI bool hasNoSignedWrap() const LLVM_READONLY
Determine whether the no signed wrap flag is set.
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
LLVM_ABI bool isExact() const LLVM_READONLY
Determine whether the exact flag is set.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
LLVM_ABI bool comesBefore(const Instruction *Other) const
Given an instruction Other in the same basic block as this instruction, return true if this instructi...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
An instruction for reading from memory.
Value * getPointerOperand()
Align getAlign() const
Return the alignment of the access that is being performed.
bool isLoopHeader(const BlockT *BB) const
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
Represents a single loop in the control flow graph.
This is a utility class that provides an abstraction for the common functionality between Instruction...
unsigned getOpcode() const
Return the opcode for this Instruction or ConstantExpr.
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
iterator_range< const_block_iterator > blocks() const
Value * getIncomingValueForBlock(const BasicBlock *BB) const
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A udiv, sdiv, lshr, or ashr instruction, which can be marked as "exact", indicating that no bits are ...
bool isExact() const
Test whether this division is known to be exact, with zero remainder.
This class represents the LLVM 'select' instruction.
const Value * getFalseValue() const
const Value * getCondition() const
const Value * getTrueValue() const
This instruction constructs a fixed permutation of two input vectors.
VectorType * getType() const
Overload to return most specific vector type.
static LLVM_ABI void getShuffleMask(const Constant *Mask, SmallVectorImpl< int > &Result)
Convert the input shuffle mask operand to a vector of integers.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void reserve(size_type N)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
constexpr StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
TypeSize getElementOffset(unsigned Idx) const
Class to represent struct types.
unsigned getNumElements() const
Random access to the elements.
Type * getElementType(unsigned N) const
Provides information about what library functions are available for the current target.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
The instances of the Type class are immutable: once they are created, they are never changed.
static LLVM_ABI IntegerType * getInt64Ty(LLVMContext &C)
LLVM_ABI unsigned getIntegerBitWidth() const
bool isVectorTy() const
True if this is an instance of VectorType.
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
bool isPointerTy() const
True if this is an instance of PointerType.
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
LLVM_ABI uint64_t getArrayNumElements() const
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
static LLVM_ABI IntegerType * getInt16Ty(LLVMContext &C)
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
LLVM_ABI const fltSemantics & getFltSemantics() const
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
A Use represents the edge between a Value definition and its users.
User * getUser() const
Returns the User that contains this Use.
LLVM_ABI unsigned getOperandNo() const
Return the operand # of this use in its User.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
const Value * stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, APInt &Offset) const
This is a wrapper around stripAndAccumulateConstantOffsets with the in-bounds requirement set to fals...
iterator_range< user_iterator > users()
LLVM_ABI const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr, bool LookThroughIntToPtr=false) const
Accumulate the constant offset this value has compared to a base pointer.
const KnownBits & getKnownBits(const SimplifyQuery &Q) const
PointerType getValue() const
Represents an op.with.overflow intrinsic.
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
An efficient, type-erasing, non-owning reference to a callable.
StructType * getStructTypeOrNull() const
TypeSize getSequentialElementStride(const DataLayout &DL) const
Type * getIndexedType() const
const ParentTy * getParent() const
self_iterator getIterator()
A range adaptor for a pair of iterators.
This provides a very simple, boring adaptor for a begin and end iterator into a range type.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
LLVM_ABI APInt ScaleBitMask(const APInt &A, unsigned NewBitWidth, bool MatchAllBits=false)
Splat/Merge neighboring bits to widen/narrow the bitmask represented by.
@ C
The default llvm calling convention, compatible with C.
SpecificConstantMatch m_ZeroInt()
Convenience matchers for specific integer values.
BinaryOp_match< SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB > m_Neg(const SrcTy &&Src)
Matches a register negated by a G_SUB.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
cst_pred_ty< is_lowbit_mask > m_LowBitMask()
Match an integer or vector with only the low bit(s) set.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
PtrToIntSameSize_match< OpTy > m_PtrToIntSameSize(const DataLayout &DL, const OpTy &Op)
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, FCmpInst > m_FCmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
cst_pred_ty< is_sign_mask > m_SignMask()
Match an integer or vector with only the sign bit(s) set.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWAdd(const LHS &L, const RHS &R)
cst_pred_ty< is_power2 > m_Power2()
Match an integer or vector power-of-2.
BinaryOp_match< LHS, RHS, Instruction::URem > m_URem(const LHS &L, const RHS &R)
auto m_LogicalOp()
Matches either L && R or L || R where L and R are arbitrary values.
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
cst_pred_ty< is_power2_or_zero > m_Power2OrZero()
Match an integer or vector of 0 or power-of-2 values.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoSignedWrap > m_NSWSub(const LHS &L, const RHS &R)
bool match(Val *V, const Pattern &P)
BinOpPred_match< LHS, RHS, is_idiv_op > m_IDiv(const LHS &L, const RHS &R)
Matches integer division operations.
cstfp_pred_ty< is_any_zero_fp > m_AnyZeroFP()
Match a floating-point negative zero or positive zero.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
ap_match< APFloat > m_APFloat(const APFloat *&Res)
Match a ConstantFP or splatted ConstantVector, binding the specified pointer to the contained APFloat...
CmpClass_match< LHS, RHS, ICmpInst, true > m_c_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
Matches an ICmp with a predicate over LHS and RHS in either order.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap, true > m_c_NUWAdd(const LHS &L, const RHS &R)
cst_pred_ty< is_nonnegative > m_NonNegative()
Match an integer or vector of non-negative values.
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
IntrinsicID_match m_VScale()
Matches a call to llvm.vscale().
match_combine_or< MaxMin_match< FCmpInst, LHS, RHS, ofmin_pred_ty >, MaxMin_match< FCmpInst, LHS, RHS, ufmin_pred_ty > > m_OrdOrUnordFMin(const LHS &L, const RHS &R)
Match an 'ordered' or 'unordered' floating point minimum function.
ExtractValue_match< Ind, Val_t > m_ExtractValue(const Val_t &V)
Match a single index ExtractValue instruction.
MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > m_SMin(const LHS &L, const RHS &R)
bind_ty< WithOverflowInst > m_WithOverflowInst(WithOverflowInst *&I)
Match a with overflow intrinsic, capturing it if we match.
BinaryOp_match< LHS, RHS, Instruction::Xor, true > m_c_Xor(const LHS &L, const RHS &R)
Matches an Xor with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty, true > m_c_SMin(const LHS &L, const RHS &R)
Matches an SMin with LHS and RHS in either order.
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty, true > m_c_UMax(const LHS &L, const RHS &R)
Matches a UMax with LHS and RHS in either order.
SpecificCmpClass_match< LHS, RHS, ICmpInst > m_SpecificICmp(CmpPredicate MatchPred, const LHS &L, const RHS &R)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
BinaryOp_match< LHS, RHS, Instruction::UDiv > m_UDiv(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty > m_UMax(const LHS &L, const RHS &R)
brc_match< Cond_t, bind_ty< BasicBlock >, bind_ty< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
match_immconstant_ty m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
NoWrapTrunc_match< OpTy, TruncInst::NoUnsignedWrap > m_NUWTrunc(const OpTy &Op)
Matches trunc nuw.
MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty, true > m_c_UMin(const LHS &L, const RHS &R)
Matches a UMin with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::Add, true > m_c_Add(const LHS &L, const RHS &R)
Matches a Add with LHS and RHS in either order.
match_combine_or< BinaryOp_match< LHS, RHS, Instruction::Add >, DisjointOr_match< LHS, RHS > > m_AddLike(const LHS &L, const RHS &R)
Match either "add" or "or disjoint".
match_combine_or< MaxMin_match< FCmpInst, LHS, RHS, ofmax_pred_ty >, MaxMin_match< FCmpInst, LHS, RHS, ufmax_pred_ty > > m_OrdOrUnordFMax(const LHS &L, const RHS &R)
Match an 'ordered' or 'unordered' floating point maximum function.
MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty, true > m_c_SMax(const LHS &L, const RHS &R)
Matches an SMax with LHS and RHS in either order.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWSub(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty > m_SMax(const LHS &L, const RHS &R)
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap >, DisjointOr_match< LHS, RHS > > m_NSWAddLike(const LHS &L, const RHS &R)
Match either "add nsw" or "or disjoint".
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)
Matches a BinaryOperator with LHS and RHS in either order.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap > m_NSWAdd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
FNeg_match< OpTy > m_FNeg(const OpTy &X)
Match 'fneg X' as 'fsub -0.0, X'.
BinOpPred_match< LHS, RHS, is_shift_op > m_Shift(const LHS &L, const RHS &R)
Matches shift operations.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
BinOpPred_match< LHS, RHS, is_irem_op > m_IRem(const LHS &L, const RHS &R)
Matches integer remainder operations.
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
class_match< BasicBlock > m_BasicBlock()
Match an arbitrary basic block value and ignore it.
BinaryOp_match< LHS, RHS, Instruction::SRem > m_SRem(const LHS &L, const RHS &R)
cst_pred_ty< is_nonpositive > m_NonPositive()
Match an integer or vector of non-positive values.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)
Matches an Or with LHS and RHS in either order.
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap >, DisjointOr_match< LHS, RHS > > m_NUWAddLike(const LHS &L, const RHS &R)
Match either "add nuw" or "or disjoint".
ElementWiseBitCast_match< OpTy > m_ElementWiseBitCast(const OpTy &Op)
m_Intrinsic_Ty< Opnd0 >::Ty m_FAbs(const Opnd0 &Op0)
CastOperator_match< OpTy, Instruction::PtrToInt > m_PtrToInt(const OpTy &Op)
Matches PtrToInt.
MatchFunctor< Val, Pattern > match_fn(const Pattern &P)
A match functor that can be used as a UnaryPredicate in functional algorithms like all_of.
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > m_UMin(const LHS &L, const RHS &R)
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
static unsigned decodeVSEW(unsigned VSEW)
LLVM_ABI unsigned getSEWLMULRatio(unsigned SEW, VLMUL VLMul)
static constexpr unsigned RVVBitsPerBlock
initializer< Ty > init(const Ty &Val)
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI bool haveNoCommonBitsSet(const WithCache< const Value * > &LHSCache, const WithCache< const Value * > &RHSCache, const SimplifyQuery &SQ)
Return true if LHS and RHS have no common bits set.
LLVM_ABI bool mustExecuteUBIfPoisonOnPathTo(Instruction *Root, Instruction *OnPathTo, DominatorTree *DT)
Return true if undefined behavior would provable be executed on the path to OnPathTo if Root produced...
LLVM_ABI Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID)
LLVM_ABI bool willNotFreeBetween(const Instruction *Assume, const Instruction *CtxI)
Returns true, if no instruction between Assume and CtxI may free memory and the function is marked as...
@ NeverOverflows
Never overflows.
@ AlwaysOverflowsHigh
Always overflows in the direction of signed/unsigned max value.
@ AlwaysOverflowsLow
Always overflows in the direction of signed/unsigned min value.
@ MayOverflow
May or may not overflow.
LLVM_ABI KnownFPClass computeKnownFPClass(const Value *V, const APInt &DemandedElts, FPClassTest InterestedClasses, const SimplifyQuery &SQ, unsigned Depth=0)
Determine which floating-point classes are valid for V, and return them in KnownFPClass bit sets.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
MaybeAlign getAlign(const CallInst &I, unsigned Index)
LLVM_ABI bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, const DominatorTree *DT=nullptr, bool AllowEphemerals=false)
Return true if it is valid to use the assumptions provided by an assume intrinsic,...
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
LLVM_ABI bool canCreatePoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
LLVM_ABI bool mustTriggerUB(const Instruction *I, const SmallPtrSetImpl< const Value * > &KnownPoison)
Return true if the given instruction must trigger undefined behavior when I is executed with any oper...
LLVM_ABI bool isKnownNeverInfinity(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if the floating-point scalar value is not an infinity or if the floating-point vector val...
LLVM_ABI void computeKnownBitsFromContext(const Value *V, KnownBits &Known, const SimplifyQuery &Q, unsigned Depth=0)
Merge bits known from context-dependent facts into Known.
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
LLVM_ABI bool isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI)
LLVM_ABI bool isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS, bool &TrueIfSigned)
Given an exploded icmp instruction, return true if the comparison only checks the sign bit.
LLVM_ABI const Value * getArgumentAliasingToReturnedPointer(const CallBase *Call, bool MustPreserveNullness)
This function returns call pointer argument that is considered the same by aliasing rules.
LLVM_ABI bool isAssumeLikeIntrinsic(const Instruction *I)
Return true if it is an intrinsic that cannot be speculated but also cannot trap.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
LLVM_ABI AllocaInst * findAllocaForValue(Value *V, bool OffsetZero=false)
Returns unique alloca where the value comes from, or nullptr.
LLVM_ABI APInt getMinMaxLimit(SelectPatternFlavor SPF, unsigned BitWidth)
Return the minimum or maximum constant value for the specified integer min/max flavor and type.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI bool isOnlyUsedInZeroComparison(const Instruction *CxtI)
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
LLVM_ABI bool getConstantStringInfo(const Value *V, StringRef &Str, bool TrimAtNul=true)
This function computes the length of a null-terminated C string pointed to by V.
LLVM_ABI bool isDereferenceableAndAlignedPointer(const Value *V, Type *Ty, Align Alignment, const DataLayout &DL, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Returns true if V is always a dereferenceable pointer with alignment greater or equal than requested.
LLVM_ABI bool onlyUsedByLifetimeMarkersOrDroppableInsts(const Value *V)
Return true if the only users of this pointer are lifetime markers or droppable instructions.
LLVM_ABI Constant * ReadByteArrayFromGlobal(const GlobalVariable *GV, uint64_t Offset)
LLVM_ABI Value * stripNullTest(Value *V)
Returns the inner value X if the expression has the form f(X) where f(X) == 0 if and only if X == 0,...
LLVM_ABI bool getUnderlyingObjectsForCodeGen(const Value *V, SmallVectorImpl< Value * > &Objects)
This is a wrapper around getUnderlyingObjects and adds support for basic ptrtoint+arithmetic+inttoptr...
LLVM_ABI std::pair< Intrinsic::ID, bool > canConvertToMinOrMaxIntrinsic(ArrayRef< Value * > VL)
Check if the values in VL are select instructions that can be converted to a min or max (vector) intr...
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
LLVM_ABI bool getConstantDataArrayInfo(const Value *V, ConstantDataArraySlice &Slice, unsigned ElementSize, uint64_t Offset=0)
Returns true if the value V is a pointer into a ConstantDataArray.
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
LLVM_ABI bool isGuaranteedToExecuteForEveryIteration(const Instruction *I, const Loop *L)
Return true if this function can prove that the instruction I is executed for every iteration of the ...
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
LLVM_ABI bool mustSuppressSpeculation(const LoadInst &LI)
Return true if speculation of the given load must be suppressed to avoid ordering or interfering with...
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
gep_type_iterator gep_type_end(const User *GEP)
int ilogb(const APFloat &Arg)
Returns the exponent of the internal representation of the APFloat.
LLVM_ABI bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr, bool UseVariableInfo=true, bool IgnoreUBImplyingAttrs=true)
Return true if the instruction does not have any effects besides calculating the result and does not ...
LLVM_ABI CmpInst::Predicate getMinMaxPred(SelectPatternFlavor SPF, bool Ordered=false)
Return the canonical comparison predicate for the specified minimum/maximum flavor.
bool isa_and_nonnull(const Y &Val)
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
LLVM_ABI bool canIgnoreSignBitOfZero(const Use &U)
Return true if the sign bit of the FP value can be ignored by the user when the value is zero.
LLVM_ABI bool isGuaranteedNotToBeUndef(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be undef, but may be poison.
LLVM_ABI ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
std::tuple< Value *, FPClassTest, FPClassTest > fcmpImpliesClass(CmpInst::Predicate Pred, const Function &F, Value *LHS, FPClassTest RHSClass, bool LookThroughSrc=true)
LLVM_ABI ConstantRange computeConstantRange(const Value *V, bool ForSigned, bool UseInstrInfo=true, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Determine the possible constant range of an integer or vector of integer value.
const Value * getPointerOperand(const Value *V)
A helper function that returns the pointer operand of a load, store or GEP instruction.
LLVM_ABI bool MaskedValueIsZero(const Value *V, const APInt &Mask, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if 'V & Mask' is known to be zero.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
LLVM_ABI bool isOverflowIntrinsicNoWrap(const WithOverflowInst *WO, const DominatorTree &DT)
Returns true if the arithmetic part of the WO 's result is used only along the paths control dependen...
LLVM_ABI RetainedKnowledge getKnowledgeFromBundle(AssumeInst &Assume, const CallBase::BundleOpInfo &BOI)
This extracts the Knowledge from an element of an operand bundle.
LLVM_ABI bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start, Value *&Step)
Attempt to match a simple first order recurrence cycle of the form: iv = phi Ty [Start,...
auto dyn_cast_or_null(const Y &Val)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI OverflowResult computeOverflowForUnsignedMul(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ, bool IsNSW=false)
LLVM_ABI bool getShuffleDemandedElts(int SrcWidth, ArrayRef< int > Mask, const APInt &DemandedElts, APInt &DemandedLHS, APInt &DemandedRHS, bool AllowUndefElts=false)
Transform a shuffle mask's output demanded element mask into demanded element masks for the 2 operand...
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
bool isGuard(const User *U)
Returns true iff U has semantics of a guard expressed in a form of call of llvm.experimental....
LLVM_ABI SelectPatternFlavor getInverseMinMaxFlavor(SelectPatternFlavor SPF)
Return the inverse minimum/maximum flavor of the specified flavor.
constexpr unsigned MaxAnalysisRecursionDepth
LLVM_ABI void adjustKnownBitsForSelectArm(KnownBits &Known, Value *Cond, Value *Arm, bool Invert, const SimplifyQuery &Q, unsigned Depth=0)
Adjust Known for the given select Arm to include information from the select Cond.
LLVM_ABI bool isKnownNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the given value is known be negative (i.e.
LLVM_ABI OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ)
SelectPatternFlavor
Specific patterns of select instructions we can match.
@ SPF_ABS
Floating point maxnum.
@ SPF_NABS
Absolute value.
@ SPF_FMAXNUM
Floating point minnum.
@ SPF_UMIN
Signed minimum.
@ SPF_UMAX
Signed maximum.
@ SPF_SMAX
Unsigned minimum.
@ SPF_FMINNUM
Unsigned maximum.
LLVM_ABI bool isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(const CallBase *Call, bool MustPreserveNullness)
{launder,strip}.invariant.group returns pointer that aliases its argument, and it only captures point...
LLVM_ABI bool impliesPoison(const Value *ValAssumedPoison, const Value *V)
Return true if V is poison given that ValAssumedPoison is already poison.
LLVM_ABI void getHorizDemandedEltsForFirstOperand(unsigned VectorBitWidth, const APInt &DemandedElts, APInt &DemandedLHS, APInt &DemandedRHS)
Compute the demanded elements mask of horizontal binary operations.
LLVM_ABI SelectPatternResult getSelectPattern(CmpInst::Predicate Pred, SelectPatternNaNBehavior NaNBehavior=SPNB_NA, bool Ordered=false)
Determine the pattern for predicate X Pred Y ? X : Y.
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
LLVM_ABI bool programUndefinedIfPoison(const Instruction *Inst)
LLVM_ABI SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind and providing the out param...
LLVM_ABI bool matchSimpleBinaryIntrinsicRecurrence(const IntrinsicInst *I, PHINode *&P, Value *&Init, Value *&OtherOp)
Attempt to match a simple value-accumulating recurrence of the form: llvm.intrinsic....
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
LLVM_ABI bool cannotBeNegativeZero(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if we can prove that the specified FP value is never equal to -0.0.
LLVM_ABI bool programUndefinedIfUndefOrPoison(const Instruction *Inst)
Return true if this function can prove that if Inst is executed and yields a poison value or undef bi...
generic_gep_type_iterator<> gep_type_iterator
FunctionAddr VTableAddr Count
LLVM_ABI uint64_t GetStringLength(const Value *V, unsigned CharSize=8)
If we can compute the length of the string pointed to by the specified pointer, return 'len+1'.
LLVM_ABI OverflowResult computeOverflowForSignedMul(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ)
LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
LLVM_ABI Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
LLVM_ABI bool canCreateUndefOrPoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
canCreateUndefOrPoison returns true if Op can create undef or poison from non-undef & non-poison oper...
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
LLVM_ABI bool isKnownInversion(const Value *X, const Value *Y)
Return true iff:
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ABI bool intrinsicPropagatesPoison(Intrinsic::ID IID)
Return whether this intrinsic propagates poison for all operands.
LLVM_ABI bool isNotCrossLaneOperation(const Instruction *I)
Return true if the instruction doesn't potentially cross vector lanes.
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
constexpr int PoisonMaskElem
LLVM_ABI RetainedKnowledge getKnowledgeValidInContext(const Value *V, ArrayRef< Attribute::AttrKind > AttrKinds, AssumptionCache &AC, const Instruction *CtxI, const DominatorTree *DT=nullptr)
Return a valid Knowledge associated to the Value V if its Attribute kind is in AttrKinds and the know...
LLVM_ABI bool isSafeToSpeculativelyExecuteWithOpcode(unsigned Opcode, const Instruction *Inst, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr, bool UseVariableInfo=true, bool IgnoreUBImplyingAttrs=true)
This returns the same result as isSafeToSpeculativelyExecute if Opcode is the actual opcode of Inst.
LLVM_ABI bool onlyUsedByLifetimeMarkers(const Value *V)
Return true if the only users of this pointer are lifetime markers.
LLVM_ABI Intrinsic::ID getIntrinsicForCallSite(const CallBase &CB, const TargetLibraryInfo *TLI)
Map a call instruction to an intrinsic ID.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
LLVM_ABI const Value * getUnderlyingObjectAggressive(const Value *V)
Like getUnderlyingObject(), but will try harder to find a single underlying object.
LLVM_ABI Intrinsic::ID getMinMaxIntrinsic(SelectPatternFlavor SPF)
Convert given SPF to equivalent min/max intrinsic.
LLVM_ABI SelectPatternResult matchDecomposedSelectPattern(CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS, FastMathFlags FMF=FastMathFlags(), Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Determine the pattern that a select with the given compare as its predicate and given values as its t...
LLVM_ABI OverflowResult computeOverflowForSignedAdd(const WithCache< const Value * > &LHS, const WithCache< const Value * > &RHS, const SimplifyQuery &SQ)
LLVM_ABI bool propagatesPoison(const Use &PoisonOp)
Return true if PoisonOp's user yields poison or raises UB if its operand PoisonOp is poison.
LLVM_ABI ConstantRange computeConstantRangeIncludingKnownBits(const WithCache< const Value * > &V, bool ForSigned, const SimplifyQuery &SQ)
Combine constant ranges from computeConstantRange() and computeKnownBits().
SelectPatternNaNBehavior
Behavior when a floating point min/max is given one NaN and one non-NaN as input.
@ SPNB_RETURNS_NAN
NaN behavior not applicable.
@ SPNB_RETURNS_OTHER
Given one NaN input, returns the NaN.
@ SPNB_RETURNS_ANY
Given one NaN input, returns the non-NaN.
LLVM_ABI bool isKnownNonEqual(const Value *V1, const Value *V2, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if the given values are known to be non-equal when defined.
DWARFExpression::Operation Op
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
ArrayRef(const T &OneElt) -> ArrayRef< T >
LLVM_ABI unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Return the number of times the sign bit of the register is replicated into the other bits.
constexpr unsigned BitWidth
LLVM_ABI KnownBits analyzeKnownBitsFromAndXorOr(const Operator *I, const KnownBits &KnownLHS, const KnownBits &KnownRHS, const SimplifyQuery &SQ, unsigned Depth=0)
Using KnownBits LHS/RHS produce the known bits for logic op (and/xor/or).
LLVM_ABI OverflowResult computeOverflowForUnsignedSub(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ)
LLVM_ABI bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
LLVM_ABI bool isKnownNeverInfOrNaN(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if the floating-point value can never contain a NaN or infinity.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI bool isKnownNeverNaN(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if the floating-point scalar value is not a NaN or if the floating-point vector value has...
gep_type_iterator gep_type_begin(const User *GEP)
LLVM_ABI Value * isBytewiseValue(Value *V, const DataLayout &DL)
If the specified value can be set by repeating the same byte in memory, return the i8 value that it i...
LLVM_ABI std::optional< std::pair< CmpPredicate, Constant * > > getFlippedStrictnessPredicateAndConstant(CmpPredicate Pred, Constant *C)
Convert an integer comparison with a constant RHS into an equivalent form with the strictness flipped...
LLVM_ABI unsigned ComputeMaxSignificantBits(const Value *Op, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Get the upper bound on bit size for this Value Op as a signed integer.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
LLVM_ABI OverflowResult computeOverflowForUnsignedAdd(const WithCache< const Value * > &LHS, const WithCache< const Value * > &RHS, const SimplifyQuery &SQ)
unsigned Log2(Align A)
Returns the log2 of the alignment.
LLVM_ABI bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, bool OrZero=false, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Return true if the given value is known to have exactly one bit set when defined.
LLVM_ABI std::optional< bool > isImpliedByDomCondition(const Value *Cond, const Instruction *ContextI, const DataLayout &DL)
Return the boolean condition value in the context of the given instruction if it is known based on do...
LLVM_ABI bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be poison, but may be undef.
LLVM_ABI void computeKnownBitsFromRangeMetadata(const MDNode &Ranges, KnownBits &Known)
Compute known bits from the range metadata.
LLVM_ABI Value * FindInsertedValue(Value *V, ArrayRef< unsigned > idx_range, std::optional< BasicBlock::iterator > InsertBefore=std::nullopt)
Given an aggregate and an sequence of indices, see if the scalar value indexed is already around as a...
LLVM_ABI bool isKnownNegation(const Value *X, const Value *Y, bool NeedNSW=false, bool AllowPoison=true)
Return true if the two given values are negation.
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
LLVM_ABI bool isKnownPositive(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the given value is known be positive (i.e.
LLVM_ABI Constant * ConstantFoldIntegerCast(Constant *C, Type *DestTy, bool IsSigned, const DataLayout &DL)
Constant fold a zext, sext or trunc, depending on IsSigned and whether the DestTy is wider or narrowe...
LLVM_ABI bool isKnownNonNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the give value is known to be non-negative.
LLVM_ABI bool cannotBeOrderedLessThanZero(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if we can prove that the specified FP value is either NaN or never less than -0....
LLVM_ABI void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, const LoopInfo *LI=nullptr, unsigned MaxLookup=MaxLookupSearchDepth)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
LLVM_ABI bool mayHaveNonDefUseDependency(const Instruction &I)
Returns true if the result or effects of the given instructions I depend values not reachable through...
LLVM_ABI bool isTriviallyVectorizable(Intrinsic::ID ID)
Identify if the intrinsic is trivially vectorizable.
LLVM_ABI bool isIdentifiedObject(const Value *V)
Return true if this pointer refers to a distinct and identifiable object.
LLVM_ABI std::optional< bool > isImpliedCondition(const Value *LHS, const Value *RHS, const DataLayout &DL, bool LHSIsTrue=true, unsigned Depth=0)
Return true if RHS is known to be implied true by LHS.
LLVM_ABI std::optional< bool > computeKnownFPSignBit(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return false if we can prove that the specified FP value's sign bit is 0.
LLVM_ABI bool canIgnoreSignBitOfNaN(const Use &U)
Return true if the sign bit of the FP value can be ignored by the user when the value is NaN.
LLVM_ABI void findValuesAffectedByCondition(Value *Cond, bool IsAssume, function_ref< void(Value *)> InsertAffected)
Call InsertAffected on all Values whose known bits / value may be affected by the condition Cond.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
static LLVM_ABI unsigned int semanticsPrecision(const fltSemantics &)
static LLVM_ABI bool isRepresentableAsNormalIn(const fltSemantics &Src, const fltSemantics &Dst)
This struct is a compact representation of a valid (non-zero power of two) alignment.
SmallPtrSet< Value *, 4 > AffectedValues
Represents offset+length into a ConstantDataArray.
const ConstantDataArray * Array
ConstantDataArray pointer.
Represent subnormal handling kind for floating point instruction inputs and outputs.
DenormalModeKind Input
Denormal treatment kind for floating point instruction inputs in the default floating-point environme...
constexpr bool outputsAreZero() const
Return true if output denormals should be flushed to 0.
@ PositiveZero
Denormals are flushed to positive zero.
@ IEEE
IEEE-754 denormal numbers preserved.
constexpr bool inputsAreZero() const
Return true if input denormals must be implicitly treated as 0.
DenormalModeKind Output
Denormal flushing mode for floating point instruction results in the default floating point environme...
static constexpr DenormalMode getIEEE()
InstrInfoQuery provides an interface to query additional information for instructions like metadata o...
bool isExact(const BinaryOperator *Op) const
MDNode * getMetadata(const Instruction *I, unsigned KindID) const
bool hasNoSignedZeros(const InstT *Op) const
bool hasNoSignedWrap(const InstT *Op) const
bool hasNoUnsignedWrap(const InstT *Op) const
static KnownBits makeConstant(const APInt &C)
Create known bits from a known constant.
static LLVM_ABI KnownBits sadd_sat(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from llvm.sadd.sat(LHS, RHS)
static LLVM_ABI std::optional< bool > eq(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_EQ result.
KnownBits anyextOrTrunc(unsigned BitWidth) const
Return known bits for an "any" extension or truncation of the value we're tracking.
static LLVM_ABI KnownBits mulhu(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits from zero-extended multiply-hi.
unsigned countMinSignBits() const
Returns the number of times the sign bit is replicated into the other bits.
static LLVM_ABI KnownBits smax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smax(LHS, RHS).
bool isNonNegative() const
Returns true if this value is known to be non-negative.
LLVM_ABI KnownBits blsi() const
Compute known bits for X & -X, which has only the lowest bit set of X set.
void makeNonNegative()
Make this value non-negative.
static LLVM_ABI KnownBits usub_sat(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from llvm.usub.sat(LHS, RHS)
unsigned countMinLeadingOnes() const
Returns the minimum number of leading one bits.
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
static LLVM_ABI KnownBits ashr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for ashr(LHS, RHS).
static LLVM_ABI KnownBits ssub_sat(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from llvm.ssub.sat(LHS, RHS)
static LLVM_ABI KnownBits urem(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for urem(LHS, RHS).
bool isUnknown() const
Returns true if we don't know any bits.
unsigned countMaxTrailingZeros() const
Returns the maximum number of trailing zero bits possible.
LLVM_ABI KnownBits blsmsk() const
Compute known bits for X ^ (X - 1), which has all bits up to and including the lowest set bit of X se...
void makeNegative()
Make this value negative.
void setAllConflict()
Make all bits known to be both zero and one.
KnownBits trunc(unsigned BitWidth) const
Return known bits for a truncation of the value we're tracking.
KnownBits byteSwap() const
bool hasConflict() const
Returns true if there is conflicting information.
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
void setAllZero()
Make all bits known to be zero and discard any previous information.
KnownBits reverseBits() const
unsigned getBitWidth() const
Get the bit width of this value.
static LLVM_ABI KnownBits umax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umax(LHS, RHS).
KnownBits zext(unsigned BitWidth) const
Return known bits for a zero extension of the value we're tracking.
bool isConstant() const
Returns true if we know the value of all bits.
void resetAll()
Resets the known state of all bits.
KnownBits unionWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for either this or RHS or both.
static LLVM_ABI KnownBits lshr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for lshr(LHS, RHS).
bool isNonZero() const
Returns true if this value is known to be non-zero.
KnownBits extractBits(unsigned NumBits, unsigned BitPosition) const
Return a subset of the known bits from [bitPosition,bitPosition+numBits).
KnownBits intersectWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for both this and RHS.
KnownBits sext(unsigned BitWidth) const
Return known bits for a sign extension of the value we're tracking.
unsigned countMinTrailingOnes() const
Returns the minimum number of trailing one bits.
static KnownBits add(const KnownBits &LHS, const KnownBits &RHS, bool NSW=false, bool NUW=false)
Compute knownbits resulting from addition of LHS and RHS.
KnownBits zextOrTrunc(unsigned BitWidth) const
Return known bits for a zero extension or truncation of the value we're tracking.
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
static LLVM_ABI KnownBits smin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smin(LHS, RHS).
static LLVM_ABI KnownBits mulhs(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits from sign-extended multiply-hi.
static LLVM_ABI KnownBits srem(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for srem(LHS, RHS).
static LLVM_ABI KnownBits udiv(const KnownBits &LHS, const KnownBits &RHS, bool Exact=false)
Compute known bits for udiv(LHS, RHS).
static LLVM_ABI KnownBits computeForAddSub(bool Add, bool NSW, bool NUW, const KnownBits &LHS, const KnownBits &RHS)
Compute known bits resulting from adding LHS and RHS.
static LLVM_ABI KnownBits sdiv(const KnownBits &LHS, const KnownBits &RHS, bool Exact=false)
Compute known bits for sdiv(LHS, RHS).
static bool haveNoCommonBitsSet(const KnownBits &LHS, const KnownBits &RHS)
Return true if LHS and RHS have no common bits set.
bool isNegative() const
Returns true if this value is known to be negative.
static KnownBits sub(const KnownBits &LHS, const KnownBits &RHS, bool NSW=false, bool NUW=false)
Compute knownbits resulting from subtraction of LHS and RHS.
unsigned countMaxLeadingZeros() const
Returns the maximum number of leading zero bits possible.
void setAllOnes()
Make all bits known to be one and discard any previous information.
void insertBits(const KnownBits &SubBits, unsigned BitPosition)
Insert the bits from a smaller known bits starting at bitPosition.
static LLVM_ABI KnownBits uadd_sat(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from llvm.uadd.sat(LHS, RHS)
static LLVM_ABI KnownBits mul(const KnownBits &LHS, const KnownBits &RHS, bool NoUndefSelfMultiply=false)
Compute known bits resulting from multiplying LHS and RHS.
KnownBits anyext(unsigned BitWidth) const
Return known bits for an "any" extension of the value we're tracking, where we don't know anything ab...
LLVM_ABI KnownBits abs(bool IntMinIsPoison=false) const
Compute known bits for the absolute value.
static LLVM_ABI std::optional< bool > sgt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SGT result.
static LLVM_ABI std::optional< bool > uge(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_UGE result.
static LLVM_ABI KnownBits shl(const KnownBits &LHS, const KnownBits &RHS, bool NUW=false, bool NSW=false, bool ShAmtNonZero=false)
Compute known bits for shl(LHS, RHS).
static LLVM_ABI KnownBits umin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umin(LHS, RHS).
KnownBits sextOrTrunc(unsigned BitWidth) const
Return known bits for a sign extension or truncation of the value we're tracking.
FPClassTest KnownFPClasses
Floating-point classes the value could be one of.
bool isKnownNeverInfinity() const
Return true if it's known this can never be an infinity.
bool cannotBeOrderedGreaterThanZero() const
Return true if we can prove that the analyzed floating-point value is either NaN or never greater tha...
static constexpr FPClassTest OrderedGreaterThanZeroMask
static constexpr FPClassTest OrderedLessThanZeroMask
void knownNot(FPClassTest RuleOut)
void copysign(const KnownFPClass &Sign)
bool isKnownNeverSubnormal() const
Return true if it's known this can never be a subnormal.
LLVM_ABI bool isKnownNeverLogicalZero(DenormalMode Mode) const
Return true if it's know this can never be interpreted as a zero.
bool isKnownNeverNegInfinity() const
Return true if it's known this can never be -infinity.
bool isKnownNeverNegSubnormal() const
Return true if it's known this can never be a negative subnormal.
bool isKnownNeverPosZero() const
Return true if it's known this can never be a literal positive zero.
std::optional< bool > SignBit
std::nullopt if the sign bit is unknown, true if the sign bit is definitely set or false if the sign ...
bool isKnownNeverNaN() const
Return true if it's known this can never be a nan.
bool isKnownNever(FPClassTest Mask) const
Return true if it's known this can never be one of the mask entries.
bool isKnownNeverNegZero() const
Return true if it's known this can never be a negative zero.
void propagateNaN(const KnownFPClass &Src, bool PreserveSign=false)
bool cannotBeOrderedLessThanZero() const
Return true if we can prove that the analyzed floating-point value is either NaN or never less than -...
void signBitMustBeOne()
Assume the sign bit is one.
LLVM_ABI void propagateCanonicalizingSrc(const KnownFPClass &Src, DenormalMode Mode)
Report known classes if Src is evaluated through a potentially canonicalizing operation.
void signBitMustBeZero()
Assume the sign bit is zero.
LLVM_ABI bool isKnownNeverLogicalPosZero(DenormalMode Mode) const
Return true if it's know this can never be interpreted as a positive zero.
bool isKnownNeverPosInfinity() const
Return true if it's known this can never be +infinity.
LLVM_ABI bool isKnownNeverLogicalNegZero(DenormalMode Mode) const
Return true if it's know this can never be interpreted as a negative zero.
bool isKnownNeverPosSubnormal() const
Return true if it's known this can never be a positive subnormal.
Represent one information held inside an operand bundle of an llvm.assume.
SelectPatternFlavor Flavor
static bool isMinOrMax(SelectPatternFlavor SPF)
When implementing this min/max pattern as fcmp; select, does the fcmp have to be ordered?
SimplifyQuery getWithoutCondContext() const
SimplifyQuery getWithInstruction(const Instruction *I) const
const DomConditionCache * DC