110#define DEBUG_TYPE "instcombine"
118 "Number of instruction combining iterations performed");
119STATISTIC(NumOneIteration,
"Number of functions with one iteration");
120STATISTIC(NumTwoIterations,
"Number of functions with two iterations");
121STATISTIC(NumThreeIterations,
"Number of functions with three iterations");
123 "Number of functions with four or more iterations");
127STATISTIC(NumDeadInst ,
"Number of dead inst eliminated");
133 "Controls which instructions are visited");
142 "instcombine-max-sink-users",
cl::init(32),
143 cl::desc(
"Maximum number of undroppable users for instruction sinking"));
147 cl::desc(
"Maximum array size considered when doing a combine"));
163std::optional<Instruction *>
166 if (
II.getCalledFunction()->isTargetIntrinsic()) {
167 return TTIForTargetIntrinsicsOnly.instCombineIntrinsic(*
this,
II);
174 bool &KnownBitsComputed) {
176 if (
II.getCalledFunction()->isTargetIntrinsic()) {
177 return TTIForTargetIntrinsicsOnly.simplifyDemandedUseBitsIntrinsic(
178 *
this,
II, DemandedMask, Known, KnownBitsComputed);
189 if (
II.getCalledFunction()->isTargetIntrinsic()) {
190 return TTIForTargetIntrinsicsOnly.simplifyDemandedVectorEltsIntrinsic(
191 *
this,
II, DemandedElts, PoisonElts, PoisonElts2, PoisonElts3,
201 return TTIForTargetIntrinsicsOnly.isValidAddrSpaceCast(FromAS, ToAS);
211 Builder.SetInsertPoint(Inst);
215 if (Inst && !
GEP->hasAllConstantIndices() &&
216 !
GEP->getSourceElementType()->isIntegerTy(8)) {
218 *Inst, Builder.CreateGEP(Builder.getInt8Ty(),
GEP->getPointerOperand(),
236 Value *Sum =
nullptr;
237 Value *OneUseSum =
nullptr;
238 Value *OneUseBase =
nullptr;
245 IRBuilderBase::InsertPointGuard Guard(
Builder);
247 if (RewriteGEPs && Inst)
251 if (
Offset->getType() != IdxTy)
254 if (
GEP->hasOneUse()) {
259 OneUseBase =
GEP->getPointerOperand();
268 if (RewriteGEPs && Inst &&
269 !(
GEP->getSourceElementType()->isIntegerTy(8) &&
274 OneUseBase ? OneUseBase :
GEP->getPointerOperand(),
Offset,
"",
281 OneUseSum = OneUseBase =
nullptr;
285 Sum =
Add(Sum, OneUseSum);
296bool InstCombinerImpl::isDesirableIntType(
unsigned BitWidth)
const {
315bool InstCombinerImpl::shouldChangeType(
unsigned FromWidth,
316 unsigned ToWidth)
const {
317 bool FromLegal = FromWidth == 1 ||
DL.isLegalInteger(FromWidth);
318 bool ToLegal = ToWidth == 1 ||
DL.isLegalInteger(ToWidth);
322 if (ToWidth < FromWidth && isDesirableIntType(ToWidth))
327 if ((FromLegal || isDesirableIntType(FromWidth)) && !ToLegal)
332 if (!FromLegal && !ToLegal && ToWidth > FromWidth)
343bool InstCombinerImpl::shouldChangeType(
Type *From,
Type *To)
const {
351 return shouldChangeType(FromWidth, ToWidth);
361 if (!OBO || !OBO->hasNoSignedWrap())
364 const APInt *BVal, *CVal;
369 bool Overflow =
false;
370 switch (
I.getOpcode()) {
371 case Instruction::Add:
372 (void)BVal->
sadd_ov(*CVal, Overflow);
374 case Instruction::Sub:
375 (void)BVal->
ssub_ov(*CVal, Overflow);
377 case Instruction::Mul:
378 (void)BVal->
smul_ov(*CVal, Overflow);
389 return OBO && OBO->hasNoUnsignedWrap();
394 return OBO && OBO->hasNoSignedWrap();
403 I.clearSubclassOptionalData();
408 I.clearSubclassOptionalData();
409 I.setFastMathFlags(FMF);
419 if (!Cast || !Cast->hasOneUse())
423 auto CastOpcode = Cast->getOpcode();
424 if (CastOpcode != Instruction::ZExt)
433 if (!BinOp2 || !BinOp2->hasOneUse() || BinOp2->getOpcode() != AssocOpcode)
459 Cast->dropPoisonGeneratingFlags();
465Value *InstCombinerImpl::simplifyIntToPtrRoundTripCast(
Value *Val) {
467 if (IntToPtr &&
DL.getTypeSizeInBits(IntToPtr->getDestTy()) ==
468 DL.getTypeSizeInBits(IntToPtr->getSrcTy())) {
470 Type *CastTy = IntToPtr->getDestTy();
473 PtrToInt->getSrcTy()->getPointerAddressSpace() &&
474 DL.getTypeSizeInBits(PtrToInt->getSrcTy()) ==
475 DL.getTypeSizeInBits(PtrToInt->getDestTy()))
476 return PtrToInt->getOperand(0);
513 if (
I.isCommutative()) {
514 if (
auto Pair = matchSymmetricPair(
I.getOperand(0),
I.getOperand(1))) {
524 if (
I.isAssociative()) {
547 I.setHasNoUnsignedWrap(
true);
550 I.setHasNoSignedWrap(
true);
579 if (
I.isAssociative() &&
I.isCommutative()) {
656 I.setHasNoUnsignedWrap(
true);
674 if (LOp == Instruction::And)
675 return ROp == Instruction::Or || ROp == Instruction::Xor;
678 if (LOp == Instruction::Or)
679 return ROp == Instruction::And;
683 if (LOp == Instruction::Mul)
684 return ROp == Instruction::Add || ROp == Instruction::Sub;
721 assert(
Op &&
"Expected a binary operator");
722 LHS =
Op->getOperand(0);
723 RHS =
Op->getOperand(1);
724 if (TopOpcode == Instruction::Add || TopOpcode == Instruction::Sub) {
729 Instruction::Shl, ConstantInt::get(
Op->getType(), 1),
C);
730 assert(
RHS &&
"Constant folding of immediate constants failed");
731 return Instruction::Mul;
736 if (OtherOp && OtherOp->
getOpcode() == Instruction::AShr &&
739 return Instruction::AShr;
742 return Op->getOpcode();
751 assert(
A &&
B &&
C &&
D &&
"All values must be provided");
754 Value *RetVal =
nullptr;
765 if (
A ==
C || (InnerCommutative &&
A ==
D)) {
774 if (!V && (
LHS->hasOneUse() ||
RHS->hasOneUse()))
775 V = Builder.CreateBinOp(TopLevelOpcode,
B,
D,
RHS->getName());
777 RetVal = Builder.CreateBinOp(InnerOpcode,
A, V);
785 if (
B ==
D || (InnerCommutative &&
B ==
C)) {
794 if (!V && (
LHS->hasOneUse() ||
RHS->hasOneUse()))
795 V = Builder.CreateBinOp(TopLevelOpcode,
A,
C,
LHS->getName());
797 RetVal = Builder.CreateBinOp(InnerOpcode, V,
B);
812 HasNSW =
I.hasNoSignedWrap();
813 HasNUW =
I.hasNoUnsignedWrap();
816 HasNSW &= LOBO->hasNoSignedWrap();
817 HasNUW &= LOBO->hasNoUnsignedWrap();
821 HasNSW &= ROBO->hasNoSignedWrap();
822 HasNUW &= ROBO->hasNoUnsignedWrap();
825 if (TopLevelOpcode == Instruction::Add && InnerOpcode == Instruction::Mul) {
853 unsigned Opc =
I->getOpcode();
854 unsigned ConstIdx = 1;
861 case Instruction::Sub:
864 case Instruction::ICmp:
871 case Instruction::Or:
875 case Instruction::Add:
881 if (!
match(
I->getOperand(1 - ConstIdx),
891 Constant *BitWidthC = ConstantInt::get(Ty, Ty->getScalarSizeInBits());
897 if (!Cmp || !Cmp->isZeroValue())
902 bool Consumes =
false;
906 assert(NotOp !=
nullptr &&
907 "Desync between isFreeToInvert and getFreelyInverted");
909 Value *CtpopOfNotOp =
Builder.CreateIntrinsic(Ty, Intrinsic::ctpop, NotOp);
916 case Instruction::Sub:
919 case Instruction::Or:
920 case Instruction::Add:
923 case Instruction::ICmp:
959 auto IsValidBinOpc = [](
unsigned Opc) {
963 case Instruction::And:
964 case Instruction::Or:
965 case Instruction::Xor:
966 case Instruction::Add:
975 auto IsCompletelyDistributable = [](
unsigned BinOpc1,
unsigned BinOpc2,
977 assert(ShOpc != Instruction::AShr);
978 return (BinOpc1 != Instruction::Add && BinOpc2 != Instruction::Add) ||
979 ShOpc == Instruction::Shl;
982 auto GetInvShift = [](
unsigned ShOpc) {
983 assert(ShOpc != Instruction::AShr);
984 return ShOpc == Instruction::LShr ? Instruction::Shl : Instruction::LShr;
987 auto CanDistributeBinops = [&](
unsigned BinOpc1,
unsigned BinOpc2,
991 if (BinOpc1 == Instruction::And)
996 if (!IsCompletelyDistributable(BinOpc1, BinOpc2, ShOpc))
1002 if (BinOpc2 == Instruction::And)
1013 auto MatchBinOp = [&](
unsigned ShOpnum) ->
Instruction * {
1015 Value *
X, *
Y, *ShiftedX, *Mask, *Shift;
1016 if (!
match(
I.getOperand(ShOpnum),
1019 if (!
match(
I.getOperand(1 - ShOpnum),
1032 unsigned ShOpc = IY->getOpcode();
1033 if (ShOpc != IX->getOpcode())
1041 unsigned BinOpc = BO2->getOpcode();
1043 if (!IsValidBinOpc(
I.getOpcode()) || !IsValidBinOpc(BinOpc))
1046 if (ShOpc == Instruction::AShr) {
1060 if (BinOpc ==
I.getOpcode() &&
1061 IsCompletelyDistributable(
I.getOpcode(), BinOpc, ShOpc)) {
1076 if (!CanDistributeBinops(
I.getOpcode(), BinOpc, ShOpc, CMask, CShift))
1083 Value *NewBinOp1 =
Builder.CreateBinOp(
I.getOpcode(),
Y, NewBinOp2);
1090 return MatchBinOp(1);
1107 Value *LHS =
I.getOperand(0), *RHS =
I.getOperand(1);
1108 Value *
A, *CondVal, *TrueVal, *FalseVal;
1111 auto MatchSelectAndCast = [&](
Value *CastOp,
Value *SelectOp) {
1113 A->getType()->getScalarSizeInBits() == 1 &&
1120 if (MatchSelectAndCast(LHS, RHS))
1122 else if (MatchSelectAndCast(RHS, LHS))
1127 auto NewFoldedConst = [&](
bool IsTrueArm,
Value *V) {
1128 bool IsCastOpRHS = (CastOp == RHS);
1134 }
else if (IsZExt) {
1135 unsigned BitWidth = V->getType()->getScalarSizeInBits();
1141 return IsCastOpRHS ?
Builder.CreateBinOp(
Opc, V,
C)
1148 Value *NewTrueVal = NewFoldedConst(
false, TrueVal);
1150 NewFoldedConst(
true, FalseVal));
1154 Value *NewTrueVal = NewFoldedConst(
true, TrueVal);
1156 NewFoldedConst(
false, FalseVal));
1163 Value *LHS =
I.getOperand(0), *RHS =
I.getOperand(1);
1177 if (Op0 && Op1 && LHSOpcode == RHSOpcode)
1206 Value *LHS =
I.getOperand(0), *RHS =
I.getOperand(1);
1223 auto SQDistributive =
SQ.getWithInstruction(&
I).getWithoutUndef();
1231 C =
Builder.CreateBinOp(InnerOpcode, L, R);
1240 C =
Builder.CreateBinOp(TopLevelOpcode,
B,
C);
1249 C =
Builder.CreateBinOp(TopLevelOpcode,
A,
C);
1262 auto SQDistributive =
SQ.getWithInstruction(&
I).getWithoutUndef();
1270 A =
Builder.CreateBinOp(InnerOpcode, L, R);
1279 A =
Builder.CreateBinOp(TopLevelOpcode,
A,
C);
1288 A =
Builder.CreateBinOp(TopLevelOpcode,
A,
B);
1297static std::optional<std::pair<Value *, Value *>>
1299 if (
LHS->getParent() !=
RHS->getParent())
1300 return std::nullopt;
1302 if (
LHS->getNumIncomingValues() < 2)
1303 return std::nullopt;
1306 return std::nullopt;
1308 Value *L0 =
LHS->getIncomingValue(0);
1309 Value *R0 =
RHS->getIncomingValue(0);
1311 for (
unsigned I = 1,
E =
LHS->getNumIncomingValues();
I !=
E; ++
I) {
1315 if ((L0 == L1 && R0 == R1) || (L0 == R1 && R0 == L1))
1318 return std::nullopt;
1321 return std::optional(std::pair(L0, R0));
1324std::optional<std::pair<Value *, Value *>>
1329 return std::nullopt;
1331 case Instruction::PHI:
1333 case Instruction::Select: {
1339 return std::pair(TrueVal, FalseVal);
1340 return std::nullopt;
1342 case Instruction::Call: {
1346 if (LHSMinMax && RHSMinMax &&
1353 return std::pair(LHSMinMax->
getLHS(), LHSMinMax->
getRHS());
1354 return std::nullopt;
1357 return std::nullopt;
1367 if (!LHSIsSelect && !RHSIsSelect)
1377 FMF =
I.getFastMathFlags();
1378 Builder.setFastMathFlags(FMF);
1384 Value *
Cond, *True =
nullptr, *False =
nullptr;
1392 if (Opcode != Instruction::Add || (!True && !False) || (True && False))
1406 if (LHSIsSelect && RHSIsSelect &&
A ==
D) {
1412 if (LHS->hasOneUse() && RHS->hasOneUse()) {
1414 True =
Builder.CreateBinOp(Opcode,
B, E);
1415 else if (True && !False)
1416 False =
Builder.CreateBinOp(Opcode,
C,
F);
1418 }
else if (LHSIsSelect && LHS->hasOneUse()) {
1423 if (
Value *NewSel = foldAddNegate(
B,
C, RHS))
1425 }
else if (RHSIsSelect && RHS->hasOneUse()) {
1430 if (
Value *NewSel = foldAddNegate(E,
F, LHS))
1434 if (!True || !False)
1447 if (U == IgnoredUser)
1450 case Instruction::Select: {
1453 SI->swapProfMetadata();
1456 case Instruction::Br: {
1463 case Instruction::Xor:
1470 "canFreelyInvertAllUsersOf() ?");
1480 for (
unsigned Idx = 0, End = DbgVal->getNumVariableLocationOps();
1482 if (DbgVal->getVariableLocationOp(Idx) ==
I)
1483 DbgVal->setExpression(
1490Value *InstCombinerImpl::dyn_castNegVal(
Value *V)
const {
1500 if (
C->getType()->getElementType()->isIntegerTy())
1504 for (
unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1520 if (CV->getType()->isVectorTy() &&
1521 CV->getType()->getScalarType()->isIntegerTy() && CV->getSplatValue())
1534Instruction *InstCombinerImpl::foldFBinOpOfIntCastsFromSign(
1535 BinaryOperator &BO,
bool OpsFromSigned, std::array<Value *, 2> IntOps,
1539 Type *IntTy = IntOps[0]->getType();
1544 unsigned MaxRepresentableBits =
1549 unsigned NumUsedLeadingBits[2] = {IntSz, IntSz};
1553 auto IsNonZero = [&](
unsigned OpNo) ->
bool {
1554 if (OpsKnown[OpNo].hasKnownBits() &&
1555 OpsKnown[OpNo].getKnownBits(
SQ).isNonZero())
1560 auto IsNonNeg = [&](
unsigned OpNo) ->
bool {
1564 return OpsKnown[OpNo].getKnownBits(
SQ).isNonNegative();
1568 auto IsValidPromotion = [&](
unsigned OpNo) ->
bool {
1579 if (MaxRepresentableBits < IntSz) {
1589 NumUsedLeadingBits[OpNo] =
1590 IntSz - OpsKnown[OpNo].getKnownBits(
SQ).countMinLeadingZeros();
1598 if (MaxRepresentableBits < NumUsedLeadingBits[OpNo])
1601 return !OpsFromSigned || BO.
getOpcode() != Instruction::FMul ||
1606 if (Op1FpC !=
nullptr) {
1608 if (OpsFromSigned && BO.
getOpcode() == Instruction::FMul &&
1613 OpsFromSigned ? Instruction::FPToSI : Instruction::FPToUI, Op1FpC,
1615 if (Op1IntC ==
nullptr)
1618 : Instruction::UIToFP,
1619 Op1IntC, FPTy,
DL) != Op1FpC)
1623 IntOps[1] = Op1IntC;
1627 if (IntTy != IntOps[1]->
getType())
1630 if (Op1FpC ==
nullptr) {
1631 if (!IsValidPromotion(1))
1634 if (!IsValidPromotion(0))
1640 bool NeedsOverflowCheck =
true;
1643 unsigned OverflowMaxOutputBits = OpsFromSigned ? 2 : 1;
1644 unsigned OverflowMaxCurBits =
1645 std::max(NumUsedLeadingBits[0], NumUsedLeadingBits[1]);
1646 bool OutputSigned = OpsFromSigned;
1648 case Instruction::FAdd:
1649 IntOpc = Instruction::Add;
1650 OverflowMaxOutputBits += OverflowMaxCurBits;
1652 case Instruction::FSub:
1653 IntOpc = Instruction::Sub;
1654 OverflowMaxOutputBits += OverflowMaxCurBits;
1656 case Instruction::FMul:
1657 IntOpc = Instruction::Mul;
1658 OverflowMaxOutputBits += OverflowMaxCurBits * 2;
1664 if (OverflowMaxOutputBits < IntSz) {
1665 NeedsOverflowCheck =
false;
1668 if (IntOpc == Instruction::Sub)
1669 OutputSigned =
true;
1675 if (NeedsOverflowCheck &&
1676 !willNotOverflow(IntOpc, IntOps[0], IntOps[1], BO, OutputSigned))
1679 Value *IntBinOp =
Builder.CreateBinOp(IntOpc, IntOps[0], IntOps[1]);
1681 IntBO->setHasNoSignedWrap(OutputSigned);
1682 IntBO->setHasNoUnsignedWrap(!OutputSigned);
1685 return new SIToFPInst(IntBinOp, FPTy);
1686 return new UIToFPInst(IntBinOp, FPTy);
1695 std::array<Value *, 2> IntOps = {
nullptr,
nullptr};
1715 if (Instruction *R = foldFBinOpOfIntCastsFromSign(BO,
false,
1716 IntOps, Op1FpC, OpsKnown))
1718 return foldFBinOpOfIntCastsFromSign(BO,
true, IntOps,
1734 !
X->getType()->isIntOrIntVectorTy(1))
1742 return createSelectInst(
X, TVal, FVal);
1751 V = IsTrueArm ?
SI->getTrueValue() :
SI->getFalseValue();
1752 }
else if (
match(
SI->getCondition(),
1777 bool FoldWithMultiUse) {
1779 if (!
SI->hasOneUse() && !FoldWithMultiUse)
1782 Value *TV =
SI->getTrueValue();
1783 Value *FV =
SI->getFalseValue();
1786 if (
SI->getType()->isIntOrIntVectorTy(1))
1792 for (
Value *IntrinOp :
Op.operands())
1794 for (
Value *PhiOp : PN->operands())
1806 if (CI->hasOneUse()) {
1807 Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1);
1808 if (((TV == Op0 && FV == Op1) || (FV == Op0 && TV == Op1)) &&
1809 !CI->isCommutative())
1818 if (!NewTV && !NewFV)
1838 Ops.push_back(InValue);
1870 bool AllowMultipleUses) {
1872 if (NumPHIValues == 0)
1879 bool IdenticalUsers =
false;
1880 if (!AllowMultipleUses && !OneUse) {
1884 if (UI != &
I && !
I.isIdenticalTo(UI))
1888 IdenticalUsers =
true;
1918 bool SeenNonSimplifiedInVal =
false;
1919 for (
unsigned i = 0; i != NumPHIValues; ++i) {
1930 auto WillFold = [&]() {
1935 const APInt *Ignored;
1956 if (!OneUse && !IdenticalUsers)
1959 if (SeenNonSimplifiedInVal)
1961 SeenNonSimplifiedInVal =
true;
1991 for (
auto OpIndex : OpsToMoveUseToIncomingBB) {
2002 U = U->DoPHITranslation(PN->
getParent(), OpBB);
2005 Clones.
insert({OpBB, Clone});
2010 NewPhiValues[
OpIndex] = Clone;
2019 for (
unsigned i = 0; i != NumPHIValues; ++i)
2022 if (IdenticalUsers) {
2053 BO0->getOpcode() !=
Opc || BO1->getOpcode() !=
Opc ||
2054 !BO0->isAssociative() || !BO1->isAssociative() ||
2055 BO0->getParent() != BO1->getParent())
2059 "Expected commutative instructions!");
2063 Value *Start0, *Step0, *Start1, *Step1;
2070 "Expected PHIs with two incoming values!");
2077 if (!Init0 || !Init1 || !C0 || !C1)
2092 if (
Opc == Instruction::FAdd ||
Opc == Instruction::FMul) {
2096 NewBO->setFastMathFlags(Intersect);
2100 Flags.AllKnownNonZero =
false;
2101 Flags.mergeFlags(*BO0);
2102 Flags.mergeFlags(*BO1);
2103 Flags.mergeFlags(BO);
2104 Flags.applyFlags(*NewBO);
2106 NewBO->takeName(&BO);
2116 "Invalid incoming block!");
2117 NewPN->addIncoming(
Init, BB);
2118 }
else if (V == BO0) {
2123 "Invalid incoming block!");
2124 NewPN->addIncoming(NewBO, BB);
2130 <<
"\n with " << *PN1 <<
"\n " << *BO1
2157 if (!Phi0 || !Phi1 || !Phi0->hasOneUse() || !Phi1->hasOneUse() ||
2158 Phi0->getNumOperands() != Phi1->getNumOperands())
2162 if (BO.
getParent() != Phi0->getParent() ||
2179 auto CanFoldIncomingValuePair = [&](std::tuple<Use &, Use &>
T) {
2180 auto &Phi0Use = std::get<0>(
T);
2181 auto &Phi1Use = std::get<1>(
T);
2182 if (Phi0->getIncomingBlock(Phi0Use) != Phi1->getIncomingBlock(Phi1Use))
2184 Value *Phi0UseV = Phi0Use.get();
2185 Value *Phi1UseV = Phi1Use.get();
2188 else if (Phi1UseV ==
C)
2195 if (
all_of(
zip(Phi0->operands(), Phi1->operands()),
2196 CanFoldIncomingValuePair)) {
2199 assert(NewIncomingValues.
size() == Phi0->getNumOperands() &&
2200 "The number of collected incoming values should equal the number "
2201 "of the original PHINode operands!");
2202 for (
unsigned I = 0;
I < Phi0->getNumOperands();
I++)
2203 NewPhi->
addIncoming(NewIncomingValues[
I], Phi0->getIncomingBlock(
I));
2208 if (Phi0->getNumOperands() != 2 || Phi1->getNumOperands() != 2)
2215 ConstBB = Phi0->getIncomingBlock(0);
2216 OtherBB = Phi0->getIncomingBlock(1);
2218 ConstBB = Phi0->getIncomingBlock(1);
2219 OtherBB = Phi0->getIncomingBlock(0);
2230 if (!PredBlockBranch || PredBlockBranch->isConditional() ||
2231 !
DT.isReachableFromEntry(OtherBB))
2237 for (
auto BBIter = BO.
getParent()->begin(); &*BBIter != &BO; ++BBIter)
2248 Builder.SetInsertPoint(PredBlockBranch);
2250 Phi0->getIncomingValueForBlock(OtherBB),
2251 Phi1->getIncomingValueForBlock(OtherBB));
2253 NotFoldedNewBO->copyIRFlags(&BO);
2280 if (
GEP.hasAllZeroIndices() && !Src.hasAllZeroIndices() &&
2309 for (
unsigned I = 0;
I < NumElts; ++
I) {
2311 if (ShMask[
I] >= 0) {
2312 assert(ShMask[
I] < (
int)NumElts &&
"Not expecting narrowing shuffle");
2323 NewVecC[ShMask[
I]] = CElt;
2343 Value *L0, *L1, *R0, *R1;
2347 LHS->hasOneUse() && RHS->hasOneUse() &&
2370 M, Intrinsic::vector_reverse, V->getType());
2381 (LHS->hasOneUse() || RHS->hasOneUse() ||
2382 (LHS == RHS && LHS->hasNUses(2))))
2383 return createBinOpReverse(V1, V2);
2387 return createBinOpReverse(V1, RHS);
2391 return createBinOpReverse(LHS, V2);
2402 M, Intrinsic::experimental_vp_reverse, V->getType());
2412 (LHS->hasOneUse() || RHS->hasOneUse() ||
2413 (LHS == RHS && LHS->hasNUses(2))))
2414 return createBinOpVPReverse(V1, V2, EVL);
2418 return createBinOpVPReverse(V1, RHS, EVL);
2424 return createBinOpVPReverse(LHS, V2, EVL);
2444 (LHS->hasOneUse() || RHS->hasOneUse() || LHS == RHS)) {
2446 return createBinOpShuffle(V1, V2, Mask);
2461 if (LShuf->isSelect() &&
2463 RShuf->isSelect() &&
2485 "Shuffle should not change scalar type");
2497 Value *NewLHS = ConstOp1 ? V1 : NewC;
2498 Value *NewRHS = ConstOp1 ? NewC : V1;
2499 return createBinOpShuffle(NewLHS, NewRHS, Mask);
2534 Value *NewSplat =
Builder.CreateShuffleVector(NewBO, NewMask);
2540 R->copyFastMathFlags(&Inst);
2544 NewInstBO->copyIRFlags(R);
2574 (Op0->
hasOneUse() || Op1->hasOneUse()))) {
2600 NewBinOp->setHasNoSignedWrap();
2602 NewBinOp->setHasNoUnsignedWrap();
2618 if (!
GEP.hasAllConstantIndices())
2634 Type *Ty =
GEP.getSourceElementType();
2635 Value *NewTrueC = Builder.CreateGEP(Ty, TrueC, IndexC,
"", NW);
2636 Value *NewFalseC = Builder.CreateGEP(Ty, FalseC, IndexC,
"", NW);
2646 if (
GEP.getNumIndices() != 1)
2656 unsigned IndexSizeInBits =
DL.getIndexTypeSizeInBits(PtrTy);
2667 if (NewOffset.
isZero() ||
2668 (Src->hasOneUse() &&
GEP.getOperand(1)->hasOneUse())) {
2670 if (
GEP.hasNoUnsignedWrap() &&
2690 if (!
GEP.hasAllConstantIndices())
2701 if (InnerGEP->hasAllConstantIndices())
2704 if (!InnerGEP->hasOneUse())
2713 if (Skipped.
empty())
2718 if (!InnerGEP->hasOneUse())
2723 if (InnerGEP->getType() != Ty)
2729 !InnerGEP->accumulateConstantOffset(
DL,
Offset))
2734 SkippedGEP->setNoWrapFlags(NW);
2756 if (Src->getResultElementType() !=
GEP.getSourceElementType())
2760 bool EndsWithSequential =
false;
2763 EndsWithSequential =
I.isSequential();
2764 if (!EndsWithSequential)
2769 Value *SO1 = Src->getOperand(Src->getNumOperands() - 1);
2787 Indices.
append(Src->op_begin() + 1, Src->op_end() - 1);
2792 unsigned NumNonZeroIndices =
count_if(Indices, [](
Value *Idx) {
2794 return !
C || !
C->isNullValue();
2796 if (NumNonZeroIndices > 1)
2801 Src->getSourceElementType(), Src->getOperand(0), Indices,
"",
2807 bool &DoesConsume,
unsigned Depth) {
2808 static Value *
const NonNull =
reinterpret_cast<Value *
>(uintptr_t(1));
2826 if (!WillInvertAllUses)
2833 return Builder->CreateCmp(
I->getInversePredicate(),
I->getOperand(0),
2842 DoesConsume,
Depth))
2845 DoesConsume,
Depth))
2854 DoesConsume,
Depth))
2857 DoesConsume,
Depth))
2866 DoesConsume,
Depth))
2875 DoesConsume,
Depth))
2887 bool LocalDoesConsume = DoesConsume;
2889 LocalDoesConsume,
Depth))
2892 LocalDoesConsume,
Depth)) {
2893 DoesConsume = LocalDoesConsume;
2896 DoesConsume,
Depth);
2897 assert(NotB !=
nullptr &&
2898 "Unable to build inverted value for known freely invertable op");
2900 return Builder->CreateBinaryIntrinsic(
2909 bool LocalDoesConsume = DoesConsume;
2911 for (
Use &U : PN->operands()) {
2912 BasicBlock *IncomingBlock = PN->getIncomingBlock(U);
2916 if (NewIncomingVal ==
nullptr)
2919 if (NewIncomingVal == V)
2922 IncomingValues.
emplace_back(NewIncomingVal, IncomingBlock);
2925 DoesConsume = LocalDoesConsume;
2930 Builder->CreatePHI(PN->getType(), PN->getNumIncomingValues());
2931 for (
auto [Val, Pred] : IncomingValues)
2940 DoesConsume,
Depth))
2941 return Builder ?
Builder->CreateSExt(AV, V->getType()) : NonNull;
2947 DoesConsume,
Depth))
2948 return Builder ?
Builder->CreateTrunc(AV, V->getType()) : NonNull;
2956 bool IsLogical,
Value *
A,
2958 bool LocalDoesConsume = DoesConsume;
2960 LocalDoesConsume,
Depth))
2963 LocalDoesConsume,
Depth)) {
2965 LocalDoesConsume,
Depth);
2966 DoesConsume = LocalDoesConsume;
2968 return Builder ?
Builder->CreateLogicalOp(Opcode, NotA, NotB) : NonNull;
2969 return Builder ?
Builder->CreateBinOp(Opcode, NotA, NotB) : NonNull;
2976 return TryInvertAndOrUsingDeMorgan(Instruction::And,
false,
A,
2980 return TryInvertAndOrUsingDeMorgan(Instruction::Or,
false,
A,
2984 return TryInvertAndOrUsingDeMorgan(Instruction::And,
true,
A,
2988 return TryInvertAndOrUsingDeMorgan(Instruction::Or,
true,
A,
2997 Type *GEPEltType =
GEP.getSourceElementType();
3008 if (
GEP.getNumIndices() == 1 &&
3017 return PtrOpGep && PtrOpGep->hasAllConstantIndices() &&
3020 return match(V, m_APInt(C)) && !C->isZero();
3044 if (!Op2 || Op1->getNumOperands() != Op2->getNumOperands() ||
3045 Op1->getSourceElementType() != Op2->getSourceElementType())
3053 Type *CurTy =
nullptr;
3055 for (
unsigned J = 0,
F = Op1->getNumOperands(); J !=
F; ++J) {
3056 if (Op1->getOperand(J)->getType() != Op2->getOperand(J)->getType())
3059 if (Op1->getOperand(J) != Op2->getOperand(J)) {
3068 assert(CurTy &&
"No current type?");
3088 CurTy = Op1->getSourceElementType();
3096 NW &= Op2->getNoWrapFlags();
3106 NewGEP->setNoWrapFlags(NW);
3118 Builder.SetInsertPoint(PN);
3119 NewPN = Builder.CreatePHI(Op1->getOperand(DI)->getType(),
3127 NewGEP->setOperand(DI, NewPN);
3130 NewGEP->insertBefore(*
GEP.getParent(),
GEP.getParent()->getFirstInsertionPt());
3137 Type *GEPType =
GEP.getType();
3138 Type *GEPEltType =
GEP.getSourceElementType();
3141 SQ.getWithInstruction(&
GEP)))
3148 auto VWidth = GEPFVTy->getNumElements();
3149 APInt PoisonElts(VWidth, 0);
3161 bool MadeChange =
false;
3165 Type *NewScalarIndexTy =
3166 DL.getIndexType(
GEP.getPointerOperandType()->getScalarType());
3175 Type *IndexTy = (*I)->getType();
3176 Type *NewIndexType =
3185 if (EltTy->
isSized() &&
DL.getTypeAllocSize(EltTy).isZero())
3191 if (IndexTy != NewIndexType) {
3197 if (
GEP.hasNoUnsignedWrap() &&
GEP.hasNoUnsignedSignedWrap())
3198 *
I =
Builder.CreateZExt(*
I, NewIndexType,
"",
true);
3200 *
I =
Builder.CreateSExt(*
I, NewIndexType);
3202 *
I =
Builder.CreateTrunc(*
I, NewIndexType,
"",
GEP.hasNoUnsignedWrap(),
3203 GEP.hasNoUnsignedSignedWrap());
3212 if (!GEPEltType->
isIntegerTy(8) &&
GEP.hasAllConstantIndices()) {
3217 GEP.getNoWrapFlags()));
3229 if (LastIdx && LastIdx->isNullValue() && !LastIdx->getType()->isVectorTy()) {
3237 if (FirstIdx && FirstIdx->isNullValue() &&
3238 !FirstIdx->getType()->isVectorTy()) {
3243 GEP.getPointerOperand(),
3245 GEP.getNoWrapFlags()));
3252 return Op->getType()->isVectorTy() && getSplatValue(Op);
3255 for (
auto &
Op :
GEP.operands()) {
3256 if (
Op->getType()->isVectorTy())
3266 GEP.getNoWrapFlags());
3269 Res =
Builder.CreateVectorSplat(EC, Res);
3274 bool SeenNonZeroIndex =
false;
3275 for (
auto [IdxNum, Idx] :
enumerate(Indices)) {
3277 if (
C &&
C->isNullValue())
3280 if (!SeenNonZeroIndex) {
3281 SeenNonZeroIndex =
true;
3288 Builder.CreateGEP(GEPEltType, PtrOp, FrontIndices,
3289 GEP.getName() +
".split",
GEP.getNoWrapFlags());
3296 BackIndices,
GEP.getNoWrapFlags());
3309 if (
GEP.getNumIndices() == 1) {
3310 unsigned AS =
GEP.getPointerAddressSpace();
3311 if (
GEP.getOperand(1)->getType()->getScalarSizeInBits() ==
3312 DL.getIndexSizeInBits(AS)) {
3313 uint64_t TyAllocSize =
DL.getTypeAllocSize(GEPEltType).getFixedValue();
3315 if (TyAllocSize == 1) {
3324 GEPType ==
Y->getType()) {
3325 bool HasSameUnderlyingObject =
3328 GEP.replaceUsesWithIf(
Y, [&](
Use &U) {
3329 bool ShouldReplace = HasSameUnderlyingObject ||
3333 return ShouldReplace;
3337 }
else if (
auto *ExactIns =
3341 if (ExactIns->isExact()) {
3349 GEP.getPointerOperand(), V,
3350 GEP.getNoWrapFlags());
3353 if (ExactIns->isExact() && ExactIns->hasOneUse()) {
3359 std::optional<APInt> NewC;
3379 if (NewC.has_value()) {
3382 ConstantInt::get(V->getType(), *NewC));
3385 GEP.getPointerOperand(), NewOp,
3386 GEP.getNoWrapFlags());
3396 if (!
GEP.isInBounds()) {
3399 APInt BasePtrOffset(IdxWidth, 0);
3400 Value *UnderlyingPtrOp =
3402 bool CanBeNull, CanBeFreed;
3404 DL, CanBeNull, CanBeFreed);
3405 if (!CanBeNull && !CanBeFreed && DerefBytes != 0) {
3406 if (
GEP.accumulateConstantOffset(
DL, BasePtrOffset) &&
3408 APInt AllocSize(IdxWidth, DerefBytes);
3409 if (BasePtrOffset.
ule(AllocSize)) {
3411 GEP.getSourceElementType(), PtrOp, Indices,
GEP.getName());
3418 if (
GEP.hasNoUnsignedSignedWrap() && !
GEP.hasNoUnsignedWrap() &&
3420 return isKnownNonNegative(Idx, SQ.getWithInstruction(&GEP));
3428 if (
GEP.getNumIndices() == 1) {
3431 auto GetPreservedNoWrapFlags = [&](
bool AddIsNUW) {
3434 if (
GEP.hasNoUnsignedWrap() && AddIsNUW)
3435 return GEP.getNoWrapFlags();
3451 Builder.CreateGEP(
GEP.getSourceElementType(),
GEP.getPointerOperand(),
3454 Builder.CreateGEP(
GEP.getSourceElementType(),
3455 NewPtr, Idx2,
"", NWFlags));
3466 bool NUW =
match(
GEP.getOperand(1),
3469 auto *NewPtr =
Builder.CreateGEP(
3470 GEP.getSourceElementType(),
GEP.getPointerOperand(),
3471 Builder.CreateSExt(Idx1,
GEP.getOperand(1)->getType()),
"", NWFlags);
3474 Builder.CreateGEP(
GEP.getSourceElementType(), NewPtr,
3475 Builder.CreateSExt(
C,
GEP.getOperand(1)->getType()),
3515 return Dest && Dest->Ptr == UsedV;
3518static std::optional<ModRefInfo>
3530 switch (
I->getOpcode()) {
3533 return std::nullopt;
3535 case Instruction::AddrSpaceCast:
3536 case Instruction::BitCast:
3537 case Instruction::GetElementPtr:
3542 case Instruction::ICmp: {
3548 return std::nullopt;
3549 unsigned OtherIndex = (ICI->
getOperand(0) == PI) ? 1 : 0;
3551 return std::nullopt;
3556 auto AlignmentAndSizeKnownValid = [](
CallBase *CB) {
3560 const APInt *Alignment;
3562 return match(CB->getArgOperand(0),
m_APInt(Alignment)) &&
3568 if (CB && TLI.
getLibFunc(*CB->getCalledFunction(), TheLibFunc) &&
3569 TLI.
has(TheLibFunc) && TheLibFunc == LibFunc_aligned_alloc &&
3570 !AlignmentAndSizeKnownValid(CB))
3571 return std::nullopt;
3576 case Instruction::Call:
3579 switch (
II->getIntrinsicID()) {
3581 return std::nullopt;
3583 case Intrinsic::memmove:
3584 case Intrinsic::memcpy:
3585 case Intrinsic::memset: {
3587 if (
MI->isVolatile())
3588 return std::nullopt;
3594 return std::nullopt;
3598 case Intrinsic::assume:
3599 case Intrinsic::invariant_start:
3600 case Intrinsic::invariant_end:
3601 case Intrinsic::lifetime_start:
3602 case Intrinsic::lifetime_end:
3603 case Intrinsic::objectsize:
3606 case Intrinsic::launder_invariant_group:
3607 case Intrinsic::strip_invariant_group:
3634 return std::nullopt;
3636 case Instruction::Store: {
3638 if (
SI->isVolatile() ||
SI->getPointerOperand() != PI)
3639 return std::nullopt;
3641 return std::nullopt;
3647 case Instruction::Load: {
3650 return std::nullopt;
3652 return std::nullopt;
3660 }
while (!Worklist.
empty());
3684 std::unique_ptr<DIBuilder> DIB;
3692 bool KnowInitUndef =
false;
3693 bool KnowInitZero =
false;
3698 KnowInitUndef =
true;
3699 else if (
Init->isNullValue())
3700 KnowInitZero =
true;
3704 auto &
F = *
MI.getFunction();
3705 if (
F.hasFnAttribute(Attribute::SanitizeMemory) ||
3706 F.hasFnAttribute(Attribute::SanitizeAddress))
3707 KnowInitUndef =
false;
3721 if (
II->getIntrinsicID() == Intrinsic::objectsize) {
3724 II,
DL, &
TLI,
AA,
true, &InsertedInstructions);
3725 for (
Instruction *Inserted : InsertedInstructions)
3733 if (KnowInitZero &&
isRefSet(*Removable)) {
3736 auto *M =
Builder.CreateMemSet(
3739 MTI->getLength(), MTI->getDestAlign());
3740 M->copyMetadata(*MTI);
3754 C->isFalseWhenEqual()));
3756 for (
auto *DVR : DVRs)
3757 if (DVR->isAddressOfVariable())
3764 assert(KnowInitZero || KnowInitUndef);
3779 F,
II->getNormalDest(),
II->getUnwindDest(), {},
"",
II->getParent());
3780 NewII->setDebugLoc(
II->getDebugLoc());
3808 for (
auto *DVR : DVRs)
3809 if (DVR->isAddressOfVariable() || DVR->getExpression()->startsWithDeref())
3810 DVR->eraseFromParent();
3856 if (FreeInstrBB->
size() != 2) {
3858 if (&Inst == &FI || &Inst == FreeInstrBBTerminator)
3861 if (!Cast || !Cast->isNoopCast(
DL))
3882 "Broken CFG: missing edge from predecessor to successor");
3887 if (&Instr == FreeInstrBBTerminator)
3892 "Only the branch instruction should remain");
3903 Attrs = Attrs.removeParamAttribute(FI.
getContext(), 0, Attribute::NonNull);
3904 Attribute Dereferenceable = Attrs.getParamAttr(0, Attribute::Dereferenceable);
3905 if (Dereferenceable.
isValid()) {
3907 Attrs = Attrs.removeParamAttribute(FI.
getContext(), 0,
3908 Attribute::Dereferenceable);
3909 Attrs = Attrs.addDereferenceableOrNullParamAttr(FI.
getContext(), 0, Bytes);
3948 if (
TLI.getLibFunc(FI, Func) &&
TLI.has(Func) && Func == LibFunc_free)
3964 bool HasDereferenceable =
3965 F->getAttributes().getRetDereferenceableBytes() > 0;
3966 if (
F->hasRetAttribute(Attribute::NonNull) ||
3967 (HasDereferenceable &&
3969 if (
Value *V = simplifyNonNullOperand(RetVal, HasDereferenceable))
3974 if (!AttributeFuncs::isNoFPClassCompatibleType(RetTy))
3977 FPClassTest ReturnClass =
F->getAttributes().getRetNoFPClass();
3978 if (ReturnClass ==
fcNone)
4001 if (Prev->isEHPad())
4033 if (BBI != FirstInstr)
4035 }
while (BBI != FirstInstr && BBI->isDebugOrPseudoInst());
4049 if (!
DeadEdges.insert({From, To}).second)
4054 for (
Use &U : PN.incoming_values())
4071 std::next(
I->getReverseIterator())))) {
4072 if (!Inst.use_empty() && !Inst.getType()->isTokenTy()) {
4076 if (Inst.isEHPad() || Inst.getType()->isTokenTy())
4079 Inst.dropDbgRecords();
4101 return DeadEdges.contains({Pred, BB}) ||
DT.dominates(BB, Pred);
4114 if (Succ == LiveSucc)
4188 if (
DT.dominates(Edge0, U)) {
4194 if (
DT.dominates(Edge1, U)) {
4201 DC.registerBranch(&BI);
4211 unsigned CstOpIdx = IsTrueArm ? 1 : 2;
4216 BasicBlock *CstBB =
SI.findCaseValue(
C)->getCaseSuccessor();
4217 if (CstBB !=
SI.getDefaultDest())
4230 for (
auto Case :
SI.cases())
4231 if (!CR.
contains(Case.getCaseValue()->getValue()))
4243 for (
auto Case :
SI.cases()) {
4246 "Result of expression should be constant");
4255 for (
auto Case :
SI.cases()) {
4258 "Result of expression should be constant");
4267 all_of(
SI.cases(), [&](
const auto &Case) {
4268 return Case.getCaseValue()->getValue().countr_zero() >= ShiftAmt;
4274 Value *NewCond = Op0;
4281 for (
auto Case :
SI.cases()) {
4282 const APInt &CaseVal = Case.getCaseValue()->getValue();
4284 : CaseVal.
lshr(ShiftAmt);
4285 Case.setValue(ConstantInt::get(
SI.getContext(), ShiftedCase));
4297 if (
all_of(
SI.cases(), [&](
const auto &Case) {
4298 const APInt &CaseVal = Case.getCaseValue()->getValue();
4299 return IsZExt ? CaseVal.isIntN(NewWidth)
4300 : CaseVal.isSignedIntN(NewWidth);
4302 for (
auto &Case :
SI.cases()) {
4303 APInt TruncatedCase = Case.getCaseValue()->getValue().
trunc(NewWidth);
4304 Case.setValue(ConstantInt::get(
SI.getContext(), TruncatedCase));
4326 for (
const auto &
C :
SI.cases()) {
4328 std::min(LeadingKnownZeros,
C.getCaseValue()->getValue().countl_zero());
4330 std::min(LeadingKnownOnes,
C.getCaseValue()->getValue().countl_one());
4333 unsigned NewWidth = Known.
getBitWidth() - std::max(LeadingKnownZeros, LeadingKnownOnes);
4339 if (NewWidth > 0 && NewWidth < Known.
getBitWidth() &&
4340 shouldChangeType(Known.
getBitWidth(), NewWidth)) {
4345 for (
auto Case :
SI.cases()) {
4346 APInt TruncatedCase = Case.getCaseValue()->getValue().
trunc(NewWidth);
4347 Case.setValue(ConstantInt::get(
SI.getContext(), TruncatedCase));
4358 SI.findCaseValue(CI)->getCaseSuccessor());
4372 const APInt *
C =
nullptr;
4374 if (*EV.
idx_begin() == 0 && (OvID == Intrinsic::smul_with_overflow ||
4375 OvID == Intrinsic::umul_with_overflow)) {
4380 if (
C->isPowerOf2()) {
4381 return BinaryOperator::CreateShl(
4383 ConstantInt::get(WO->getLHS()->getType(),
C->logBase2()));
4391 if (!WO->hasOneUse())
4405 assert(*EV.
idx_begin() == 1 &&
"Unexpected extract index for overflow inst");
4408 if (OvID == Intrinsic::usub_with_overflow)
4413 if (OvID == Intrinsic::smul_with_overflow &&
4414 WO->getLHS()->getType()->isIntOrIntVectorTy(1))
4415 return BinaryOperator::CreateAnd(WO->getLHS(), WO->getRHS());
4418 if (OvID == Intrinsic::umul_with_overflow && WO->getLHS() == WO->getRHS()) {
4419 unsigned BitWidth = WO->getLHS()->getType()->getScalarSizeInBits();
4422 return new ICmpInst(
4424 ConstantInt::get(WO->getLHS()->getType(),
4435 WO->getBinaryOp(), *
C, WO->getNoWrapKind());
4440 auto *OpTy = WO->getRHS()->getType();
4441 auto *NewLHS = WO->getLHS();
4443 NewLHS =
Builder.CreateAdd(NewLHS, ConstantInt::get(OpTy,
Offset));
4445 ConstantInt::get(OpTy, NewRHSC));
4462 const APFloat *ConstVal =
nullptr;
4463 Value *VarOp =
nullptr;
4464 bool ConstIsTrue =
false;
4471 ConstIsTrue =
false;
4476 Builder.SetInsertPoint(&EV);
4482 Value *NewEV = Builder.CreateExtractValue(NewFrexp, 0,
"mantissa");
4487 Constant *ConstantMantissa = ConstantFP::get(TrueVal->getType(), Mantissa);
4489 Value *NewSel = Builder.CreateSelectFMF(
4490 Cond, ConstIsTrue ? ConstantMantissa : NewEV,
4491 ConstIsTrue ? NewEV : ConstantMantissa,
SelectInst,
"select.frexp");
4501 SQ.getWithInstruction(&EV)))
4515 const unsigned *exti, *exte, *insi, *inse;
4516 for (exti = EV.
idx_begin(), insi =
IV->idx_begin(),
4517 exte = EV.
idx_end(), inse =
IV->idx_end();
4518 exti != exte && insi != inse;
4532 if (exti == exte && insi == inse)
4547 Value *NewEV =
Builder.CreateExtractValue(
IV->getAggregateOperand(),
4565 if (
Instruction *R = foldExtractOfOverflowIntrinsic(EV))
4571 STy && STy->isScalableTy())
4579 if (L->isSimple() && L->hasOneUse()) {
4584 for (
unsigned Idx : EV.
indices())
4591 L->getPointerOperand(), Indices);
4625 switch (Personality) {
4669 bool MakeNewInstruction =
false;
4675 bool isLastClause = i + 1 == e;
4683 if (AlreadyCaught.
insert(TypeInfo).second) {
4688 MakeNewInstruction =
true;
4695 MakeNewInstruction =
true;
4696 CleanupFlag =
false;
4715 if (!NumTypeInfos) {
4718 MakeNewInstruction =
true;
4719 CleanupFlag =
false;
4723 bool MakeNewFilter =
false;
4727 assert(NumTypeInfos > 0 &&
"Should have handled empty filter already!");
4733 MakeNewInstruction =
true;
4740 if (NumTypeInfos > 1)
4741 MakeNewFilter =
true;
4745 NewFilterElts.
reserve(NumTypeInfos);
4750 bool SawCatchAll =
false;
4751 for (
unsigned j = 0; j != NumTypeInfos; ++j) {
4779 if (SeenInFilter.
insert(TypeInfo).second)
4785 MakeNewInstruction =
true;
4790 if (NewFilterElts.
size() < NumTypeInfos)
4791 MakeNewFilter =
true;
4793 if (MakeNewFilter) {
4795 NewFilterElts.
size());
4797 MakeNewInstruction =
true;
4806 if (MakeNewFilter && !NewFilterElts.
size()) {
4807 assert(MakeNewInstruction &&
"New filter but not a new instruction!");
4808 CleanupFlag =
false;
4819 for (
unsigned i = 0, e = NewClauses.
size(); i + 1 < e; ) {
4822 for (j = i; j != e; ++j)
4829 for (
unsigned k = i; k + 1 < j; ++k)
4833 std::stable_sort(NewClauses.
begin() + i, NewClauses.
begin() + j,
4835 MakeNewInstruction =
true;
4854 for (
unsigned i = 0; i + 1 < NewClauses.
size(); ++i) {
4864 for (
unsigned j = NewClauses.
size() - 1; j != i; --j) {
4865 Value *LFilter = NewClauses[j];
4876 NewClauses.
erase(J);
4877 MakeNewInstruction =
true;
4881 unsigned LElts = LTy->getNumElements();
4891 assert(FElts <= LElts &&
"Should have handled this case earlier!");
4893 NewClauses.
erase(J);
4894 MakeNewInstruction =
true;
4903 assert(FElts > 0 &&
"Should have eliminated the empty filter earlier!");
4904 for (
unsigned l = 0; l != LElts; ++l)
4907 NewClauses.
erase(J);
4908 MakeNewInstruction =
true;
4919 bool AllFound =
true;
4920 for (
unsigned f = 0; f != FElts; ++f) {
4923 for (
unsigned l = 0; l != LElts; ++l) {
4925 if (LTypeInfo == FTypeInfo) {
4935 NewClauses.
erase(J);
4936 MakeNewInstruction =
true;
4944 if (MakeNewInstruction) {
4952 if (NewClauses.empty())
4961 assert(!CleanupFlag &&
"Adding a cleanup, not removing one?!");
4983 auto CanPushFreeze = [](
Value *V) {
5004 Value *V = U->get();
5005 if (!CanPushFreeze(V)) {
5011 Builder.SetInsertPoint(UserI);
5012 Value *Frozen =
Builder.CreateFreeze(V, V->getName() +
".fr");
5018 if (!Visited.
insert(
I).second)
5029 I->dropPoisonGeneratingAnnotations();
5030 this->Worklist.add(
I);
5033 return OrigUse->get();
5043 Use *StartU =
nullptr;
5061 Value *StartV = StartU->get();
5073 if (!Visited.
insert(V).second)
5076 if (Visited.
size() > 32)
5093 I->dropPoisonGeneratingAnnotations();
5095 if (StartNeedsFreeze) {
5123 MoveBefore = *MoveBeforeOpt;
5127 MoveBefore.setHeadBit(
false);
5130 if (&FI != &*MoveBefore) {
5131 FI.
moveBefore(*MoveBefore->getParent(), MoveBefore);
5135 Op->replaceUsesWithIf(&FI, [&](
Use &U) ->
bool {
5136 bool Dominates =
DT.dominates(&FI, U);
5146 for (
auto *U : V->users()) {
5156 Value *Op0 =
I.getOperand(0);
5186 auto getUndefReplacement = [&](
Type *Ty) {
5187 auto pickCommonConstantFromPHI = [](
PHINode &PN) ->
Value * {
5191 for (
Value *V : PN.incoming_values()) {
5202 if (BestValue && BestValue !=
C)
5211 Value *BestValue =
nullptr;
5212 for (
auto *U :
I.users()) {
5213 Value *V = NullValue;
5222 if (
Value *MaybeV = pickCommonConstantFromPHI(*
PHI))
5228 else if (BestValue != V)
5229 BestValue = NullValue;
5231 assert(BestValue &&
"Must have at least one use");
5232 assert(BestValue != &
I &&
"Cannot replace with itself");
5246 Type *Ty =
C->getType();
5250 unsigned NumElts = VTy->getNumElements();
5252 for (
unsigned i = 0; i != NumElts; ++i) {
5253 Constant *EltC =
C->getAggregateElement(i);
5264 !
C->containsConstantExpression()) {
5265 if (
Constant *Repl = getFreezeVectorReplacement(
C))
5299 for (
const User *U :
I.users()) {
5300 if (Visited.
insert(U).second)
5305 while (!AllocaUsers.
empty()) {
5328 if (
isa<PHINode>(
I) ||
I->isEHPad() ||
I->mayThrow() || !
I->willReturn() ||
5345 if (CI->isConvergent())
5351 if (
I->mayWriteToMemory()) {
5358 if (
I->mayReadFromMemory() &&
5359 !
I->hasMetadata(LLVMContext::MD_invariant_load)) {
5366 E =
I->getParent()->end();
5368 if (Scan->mayWriteToMemory())
5372 I->dropDroppableUses([&](
const Use *U) {
5374 if (
I &&
I->getParent() != DestBlock) {
5384 I->moveBefore(*DestBlock, InsertPos);
5394 if (!DbgVariableRecords.
empty())
5396 DbgVariableRecords);
5419 for (
auto &DVR : DbgVariableRecords)
5420 if (DVR->getParent() != DestBlock)
5421 DbgVariableRecordsToSalvage.
push_back(DVR);
5427 if (DVR->getParent() == SrcBlock)
5428 DbgVariableRecordsToSink.
push_back(DVR);
5435 return B->getInstruction()->comesBefore(
A->getInstruction());
5442 using InstVarPair = std::pair<const Instruction *, DebugVariable>;
5444 if (DbgVariableRecordsToSink.
size() > 1) {
5450 DVR->getDebugLoc()->getInlinedAt());
5451 CountMap[std::make_pair(DVR->getInstruction(), DbgUserVariable)] += 1;
5457 for (
auto It : CountMap) {
5458 if (It.second > 1) {
5459 FilterOutMap[It.first] =
nullptr;
5460 DupSet.
insert(It.first.first);
5471 DVR.getDebugLoc()->getInlinedAt());
5473 FilterOutMap.
find(std::make_pair(Inst, DbgUserVariable));
5474 if (FilterIt == FilterOutMap.
end())
5476 if (FilterIt->second !=
nullptr)
5478 FilterIt->second = &DVR;
5493 DVR->getDebugLoc()->getInlinedAt());
5497 if (!FilterOutMap.
empty()) {
5498 InstVarPair IVP = std::make_pair(DVR->getInstruction(), DbgUserVariable);
5499 auto It = FilterOutMap.
find(IVP);
5502 if (It != FilterOutMap.
end() && It->second != DVR)
5506 if (!SunkVariables.
insert(DbgUserVariable).second)
5509 if (DVR->isDbgAssign())
5517 if (DVRClones.
empty())
5531 assert(InsertPos.getHeadBit());
5533 InsertPos->getParent()->insertDbgRecordBefore(DVRClone, InsertPos);
5557 if (
I ==
nullptr)
continue;
5572 auto getOptionalSinkBlockForInst =
5573 [
this](
Instruction *
I) -> std::optional<BasicBlock *> {
5575 return std::nullopt;
5579 unsigned NumUsers = 0;
5581 for (
Use &U :
I->uses()) {
5586 return std::nullopt;
5592 UserBB = PN->getIncomingBlock(U);
5596 if (UserParent && UserParent != UserBB)
5597 return std::nullopt;
5598 UserParent = UserBB;
5602 if (NumUsers == 0) {
5605 if (UserParent == BB || !
DT.isReachableFromEntry(UserParent))
5606 return std::nullopt;
5618 return std::nullopt;
5620 assert(
DT.dominates(BB, UserParent) &&
"Dominance relation broken?");
5628 return std::nullopt;
5633 auto OptBB = getOptionalSinkBlockForInst(
I);
5635 auto *UserParent = *OptBB;
5643 for (
Use &U :
I->operands())
5651 Builder.CollectMetadataToCopy(
5652 I, {LLVMContext::MD_dbg, LLVMContext::MD_annotation});
5665 <<
" New = " << *Result <<
'\n');
5670 Result->setDebugLoc(Result->getDebugLoc().orElse(
I->getDebugLoc()));
5672 Result->copyMetadata(*
I, LLVMContext::MD_annotation);
5674 I->replaceAllUsesWith(Result);
5677 Result->takeName(
I);
5692 Result->insertInto(InstParent, InsertPos);
5695 Worklist.pushUsersToWorkList(*Result);
5701 <<
" New = " << *
I <<
'\n');
5733 if (!
I->hasMetadataOtherThanDebugLoc())
5736 auto Track = [](
Metadata *ScopeList,
auto &Container) {
5738 if (!MDScopeList || !Container.insert(MDScopeList).second)
5740 for (
const auto &
MDOperand : MDScopeList->operands())
5742 Container.insert(MDScope);
5745 Track(
I->getMetadata(LLVMContext::MD_alias_scope), UsedAliasScopesAndLists);
5746 Track(
I->getMetadata(LLVMContext::MD_noalias), UsedNoAliasScopesAndLists);
5755 "llvm.experimental.noalias.scope.decl in use ?");
5758 "llvm.experimental.noalias.scope should refer to a single scope");
5761 return !UsedAliasScopesAndLists.contains(MD) ||
5762 !UsedNoAliasScopesAndLists.contains(MD);
5786 if (Succ != LiveSucc &&
DeadEdges.insert({BB, Succ}).second)
5787 for (
PHINode &PN : Succ->phis())
5788 for (
Use &U : PN.incoming_values())
5797 return DeadEdges.contains({Pred, BB}) ||
DT.dominates(BB, Pred);
5799 HandleOnlyLiveSuccessor(BB,
nullptr);
5806 if (!Inst.use_empty() &&
5807 (Inst.getNumOperands() == 0 ||
isa<Constant>(Inst.getOperand(0))))
5811 Inst.replaceAllUsesWith(
C);
5814 Inst.eraseFromParent();
5820 for (
Use &U : Inst.operands()) {
5825 Constant *&FoldRes = FoldedConstants[
C];
5831 <<
"\n Old = " << *
C
5832 <<
"\n New = " << *FoldRes <<
'\n');
5841 if (!Inst.isDebugOrPseudoInst()) {
5842 InstrsForInstructionWorklist.
push_back(&Inst);
5843 SeenAliasScopes.
analyse(&Inst);
5853 HandleOnlyLiveSuccessor(BB,
nullptr);
5857 bool CondVal =
Cond->getZExtValue();
5858 HandleOnlyLiveSuccessor(BB, BI->getSuccessor(!CondVal));
5864 HandleOnlyLiveSuccessor(BB,
nullptr);
5868 HandleOnlyLiveSuccessor(BB,
5869 SI->findCaseValue(
Cond)->getCaseSuccessor());
5879 if (LiveBlocks.
count(&BB))
5882 unsigned NumDeadInstInBB;
5886 NumDeadInst += NumDeadInstInBB;
5903 Inst->eraseFromParent();
5932 auto &
DL =
F.getDataLayout();
5934 !
F.hasFnAttribute(
"instcombine-no-verify-fixpoint");
5950 bool MadeIRChange =
false;
5955 unsigned Iteration = 0;
5959 <<
" on " <<
F.getName()
5960 <<
" reached; stopping without verifying fixpoint\n");
5965 ++NumWorklistIterations;
5966 LLVM_DEBUG(
dbgs() <<
"\n\nINSTCOMBINE ITERATION #" << Iteration <<
" on "
5967 <<
F.getName() <<
"\n");
5969 InstCombinerImpl IC(Worklist, Builder,
F,
AA, AC, TLI,
TTI, DT, ORE, BFI,
5970 BPI, PSI,
DL, RPOT);
5973 MadeChangeInThisIteration |= IC.
run();
5974 if (!MadeChangeInThisIteration)
5977 MadeIRChange =
true;
5980 "Instruction Combining on " +
Twine(
F.getName()) +
5983 "Use 'instcombine<no-verify-fixpoint>' or function attribute "
5984 "'instcombine-no-verify-fixpoint' to suppress this error.");
5990 else if (Iteration == 2)
5992 else if (Iteration == 3)
5993 ++NumThreeIterations;
5995 ++NumFourOrMoreIterations;
5997 return MadeIRChange;
6005 OS, MapClassName2PassName);
6007 OS <<
"max-iterations=" << Options.MaxIterations <<
";";
6008 OS << (Options.VerifyFixpoint ?
"" :
"no-") <<
"verify-fixpoint";
6012char InstCombinePass::ID = 0;
6018 if (LRT.shouldSkip(&ID))
6031 auto *BFI = (PSI && PSI->hasProfileSummary()) ?
6036 BFI, BPI, PSI, Options)) {
6038 LRT.update(&ID,
false);
6044 LRT.update(&ID,
true);
6086 if (
auto *WrapperPass =
6088 BPI = &WrapperPass->getBPI();
6101 "Combine redundant instructions",
false,
false)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Register Bank Select
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static const Function * getParent(const Value *V)
This is the interface for LLVM's primary stateless and local alias analysis.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file provides an implementation of debug counters.
#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)
This file defines the DenseMap class.
static bool isSigned(unsigned int Opcode)
This is the interface for a simple mod/ref and alias analysis over globals.
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
This header defines various interfaces for pass management in LLVM.
This defines the Use class.
iv Induction Variable Users
static bool leftDistributesOverRight(Instruction::BinaryOps LOp, bool HasNUW, bool HasNSW, Intrinsic::ID ROp)
Return whether "X LOp (Y ROp Z)" is always equal to "(X LOp Y) ROp (X LOp Z)".
This file provides internal interfaces used to implement the InstCombine.
This file provides the primary interface to the instcombine pass.
static Value * simplifySwitchOnSelectUsingRanges(SwitchInst &SI, SelectInst *Select, bool IsTrueArm)
static bool isUsedWithinShuffleVector(Value *V)
static bool isNeverEqualToUnescapedAlloc(Value *V, const TargetLibraryInfo &TLI, Instruction *AI)
static bool shorter_filter(const Value *LHS, const Value *RHS)
static Instruction * combineConstantOffsets(GetElementPtrInst &GEP, InstCombinerImpl &IC)
Combine constant offsets separated by variable offsets.
static Instruction * foldSelectGEP(GetElementPtrInst &GEP, InstCombiner::BuilderTy &Builder)
Thread a GEP operation with constant indices through the constant true/false arms of a select.
static bool shouldMergeGEPs(GEPOperator &GEP, GEPOperator &Src)
static bool hasNoSignedWrap(BinaryOperator &I)
static bool simplifyAssocCastAssoc(BinaryOperator *BinOp1, InstCombinerImpl &IC)
Combine constant operands of associative operations either before or after a cast to eliminate one of...
static bool combineInstructionsOverFunction(Function &F, InstructionWorklist &Worklist, AliasAnalysis *AA, AssumptionCache &AC, TargetLibraryInfo &TLI, TargetTransformInfo &TTI, DominatorTree &DT, OptimizationRemarkEmitter &ORE, BlockFrequencyInfo *BFI, BranchProbabilityInfo *BPI, ProfileSummaryInfo *PSI, const InstCombineOptions &Opts)
static Value * simplifyInstructionWithPHI(Instruction &I, PHINode *PN, Value *InValue, BasicBlock *InBB, const DataLayout &DL, const SimplifyQuery SQ)
static bool shouldCanonicalizeGEPToPtrAdd(GetElementPtrInst &GEP)
Return true if we should canonicalize the gep to an i8 ptradd.
static void ClearSubclassDataAfterReassociation(BinaryOperator &I)
Conservatively clears subclassOptionalData after a reassociation or commutation.
static Value * getIdentityValue(Instruction::BinaryOps Opcode, Value *V)
This function returns identity value for given opcode, which can be used to factor patterns like (X *...
static Value * foldFrexpOfSelect(ExtractValueInst &EV, IntrinsicInst *FrexpCall, SelectInst *SelectInst, InstCombiner::BuilderTy &Builder)
static std::optional< std::pair< Value *, Value * > > matchSymmetricPhiNodesPair(PHINode *LHS, PHINode *RHS)
static Value * foldOperationIntoSelectOperand(Instruction &I, SelectInst *SI, Value *NewOp, InstCombiner &IC)
static Instruction * canonicalizeGEPOfConstGEPI8(GetElementPtrInst &GEP, GEPOperator *Src, InstCombinerImpl &IC)
static Instruction * tryToMoveFreeBeforeNullTest(CallInst &FI, const DataLayout &DL)
Move the call to free before a NULL test.
static Value * simplifyOperationIntoSelectOperand(Instruction &I, SelectInst *SI, bool IsTrueArm)
static bool rightDistributesOverLeft(Instruction::BinaryOps LOp, Instruction::BinaryOps ROp)
Return whether "(X LOp Y) ROp Z" is always equal to "(X ROp Z) LOp (Y ROp Z)".
static Value * tryFactorization(BinaryOperator &I, const SimplifyQuery &SQ, InstCombiner::BuilderTy &Builder, Instruction::BinaryOps InnerOpcode, Value *A, Value *B, Value *C, Value *D)
This tries to simplify binary operations by factorizing out common terms (e.
static bool isRemovableWrite(CallBase &CB, Value *UsedV, const TargetLibraryInfo &TLI)
Given a call CB which uses an address UsedV, return true if we can prove the call's only possible eff...
static Instruction::BinaryOps getBinOpsForFactorization(Instruction::BinaryOps TopOpcode, BinaryOperator *Op, Value *&LHS, Value *&RHS, BinaryOperator *OtherOp)
This function predicates factorization using distributive laws.
static bool hasNoUnsignedWrap(BinaryOperator &I)
static bool SoleWriteToDeadLocal(Instruction *I, TargetLibraryInfo &TLI)
Check for case where the call writes to an otherwise dead alloca.
static Instruction * foldGEPOfPhi(GetElementPtrInst &GEP, PHINode *PN, IRBuilderBase &Builder)
static std::optional< ModRefInfo > isAllocSiteRemovable(Instruction *AI, SmallVectorImpl< WeakTrackingVH > &Users, const TargetLibraryInfo &TLI, bool KnowInit)
static bool isCatchAll(EHPersonality Personality, Constant *TypeInfo)
Return 'true' if the given typeinfo will match anything.
static bool maintainNoSignedWrap(BinaryOperator &I, Value *B, Value *C)
static GEPNoWrapFlags getMergedGEPNoWrapFlags(GEPOperator &GEP1, GEPOperator &GEP2)
Determine nowrap flags for (gep (gep p, x), y) to (gep p, (x + y)) transform.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
uint64_t IntrinsicInst * II
static bool IsSelect(MachineInstr &MI)
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
const SmallVectorImpl< MachineOperand > & Cond
static unsigned getNumElements(Type *Ty)
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static SymbolRef::Type getType(const Symbol *Sym)
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
static const uint32_t IV[8]
bool isNoAliasScopeDeclDead(Instruction *Inst)
void analyse(Instruction *I)
A manager for alias analyses.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
static LLVM_ABI void udivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
Dual division/remainder interface.
bool isMinSignedValue() const
Determine if this is the smallest signed value.
static LLVM_ABI void sdivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
unsigned getBitWidth() const
Return the number of bits in the APInt.
LLVM_ABI APInt sadd_ov(const APInt &RHS, bool &Overflow) const
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
LLVM_ABI APInt smul_ov(const APInt &RHS, bool &Overflow) const
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
LLVM_ABI APInt ssub_ov(const APInt &RHS, bool &Overflow) const
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
LLVM_ABI void setPreservesCFG()
This function should be called by the pass, iff they do not:
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
ArrayRef< T > take_front(size_t N=1) const
Return a copy of *this with only the first N elements.
size_t size() const
size - Get the array size.
Class to represent array types.
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
uint64_t getNumElements() const
Type * getElementType() const
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
LLVM_ABI void registerAssumption(AssumeInst *CI)
Add an @llvm.assume intrinsic to this function's cache.
Functions, function parameters, and return types can have attributes to indicate how they should be t...
LLVM_ABI uint64_t getDereferenceableBytes() const
Returns the number of dereferenceable bytes from the dereferenceable attribute.
bool isValid() const
Return true if the attribute is any kind of attribute.
Legacy wrapper pass to provide the BasicAAResult object.
LLVM Basic Block Representation.
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
LLVM_ABI iterator_range< filter_iterator< BasicBlock::const_iterator, std::function< bool(const Instruction &)> > > instructionsWithoutDebug(bool SkipPseudoOp=true) const
Return a const iterator range over the instructions in the block, skipping any debug instructions.
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI bool isEntryBlock() const
Return true if this is the entry block of the containing function.
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
const Instruction & front() const
LLVM_ABI const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
InstListType::iterator iterator
Instruction iterators...
LLVM_ABI const_iterator getFirstNonPHIOrDbgOrAlloca() const
Returns an iterator to the first instruction in this block that is not a PHINode, a debug intrinsic,...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
static LLVM_ABI BinaryOperator * CreateNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...
BinaryOps getOpcode() const
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
static BinaryOperator * CreateNUW(BinaryOps Opc, Value *V1, Value *V2, const Twine &Name="")
Analysis pass which computes BlockFrequencyInfo.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Conditional or Unconditional Branch instruction.
LLVM_ABI void swapSuccessors()
Swap the successors of this branch instruction.
bool isConditional() const
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
Value * getCondition() const
Analysis pass which computes BranchProbabilityInfo.
Analysis providing branch probability information.
Represents analyses that only rely on functions' control flow.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
void setAttributes(AttributeList A)
Set the attributes for this call.
bool doesNotThrow() const
Determine if the call cannot unwind.
Value * getArgOperand(unsigned i) const
AttributeList getAttributes() const
Return the attributes for this call.
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_UGT
unsigned greater than
@ ICMP_ULT
unsigned less than
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
ConstantArray - Constant Array Declarations.
static LLVM_ABI Constant * get(ArrayType *T, ArrayRef< Constant * > V)
A vector constant whose element type is a simple 1/2/4/8-byte integer or float/double,...
static LLVM_ABI Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getNot(Constant *C)
static LLVM_ABI Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getBinOpIdentity(unsigned Opcode, Type *Ty, bool AllowRHSConstant=false, bool NSZ=false)
Return the identity constant for a binary opcode.
static LLVM_ABI Constant * getNeg(Constant *C, bool HasNSW=false)
This is the shared class of boolean and integer constants.
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
static LLVM_ABI ConstantInt * getBool(LLVMContext &Context, bool V)
This class represents a range of values.
LLVM_ABI bool getEquivalentICmp(CmpInst::Predicate &Pred, APInt &RHS) const
Set up Pred and RHS such that ConstantRange::makeExactICmpRegion(Pred, RHS) == *this.
static LLVM_ABI ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
static LLVM_ABI ConstantRange makeExactNoWrapRegion(Instruction::BinaryOps BinOp, const APInt &Other, unsigned NoWrapKind)
Produce the range that contains X if and only if "X BinOp Other" does not wrap.
Constant Vector Declarations.
static LLVM_ABI Constant * getSplat(ElementCount EC, Constant *Elt)
Return a ConstantVector with the specified constant in each element.
static LLVM_ABI Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
static LLVM_ABI Constant * getIntegerValue(Type *Ty, const APInt &V)
Return the value for an integer or pointer constant, or a vector thereof, with the given scalar value...
static LLVM_ABI Constant * replaceUndefsWith(Constant *C, Constant *Replacement)
Try to replace undefined constant C or undefined elements in C with Replacement.
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
const Constant * stripPointerCasts() const
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
static LLVM_ABI DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
A parsed version of the target data layout string in and methods for querying it.
Record of a variable value-assignment, aka a non instruction representation of the dbg....
static bool shouldExecute(unsigned CounterName)
Identifies a unique instance of a variable.
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Analysis pass which computes a DominatorTree.
Legacy analysis pass which computes a DominatorTree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Utility class for floating point operations which can have information about relaxed accuracy require...
Convenience struct for specifying and reasoning about fast-math flags.
This class represents a freeze function that returns random concrete value if an operand is either a ...
FunctionPass class - This class is used to implement most global optimizations.
bool skipFunction(const Function &F) const
Optional passes call this function to check whether the pass should be skipped.
const BasicBlock & getEntryBlock() const
Represents flags for the getelementptr instruction/expression.
static GEPNoWrapFlags inBounds()
static GEPNoWrapFlags all()
static GEPNoWrapFlags noUnsignedWrap()
GEPNoWrapFlags intersectForReassociate(GEPNoWrapFlags Other) const
Given (gep (gep p, x), y), determine the nowrap flags for (gep (gep, p, y), x).
bool hasNoUnsignedWrap() const
GEPNoWrapFlags intersectForOffsetAdd(GEPNoWrapFlags Other) const
Given (gep (gep p, x), y), determine the nowrap flags for (gep p, x+y).
static GEPNoWrapFlags none()
GEPNoWrapFlags getNoWrapFlags() const
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
static LLVM_ABI Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
static GetElementPtrInst * CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Create an "inbounds" getelementptr.
Legacy wrapper pass to provide the GlobalsAAResult object.
This instruction compares its operands according to the predicate given to the constructor.
CmpPredicate getCmpPredicate() const
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
Common base class shared among various IRBuilders.
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
ConstantInt * getInt(const APInt &AI)
Get a constant integer value.
Provides an 'InsertHelper' that calls a user-provided callback after performing the default insertion...
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
This instruction inserts a struct field of array element value into an aggregate value.
static InsertValueInst * Create(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
LLVM_ABI InstCombinePass(InstCombineOptions Opts={})
LLVM_ABI void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Instruction * FoldOpIntoSelect(Instruction &Op, SelectInst *SI, bool FoldWithMultiUse=false)
Given an instruction with a select as one operand and a constant as the other operand,...
Instruction * foldBinOpOfSelectAndCastOfSelectCondition(BinaryOperator &I)
Tries to simplify binops of select and cast of the select condition.
Instruction * foldBinOpIntoSelectOrPhi(BinaryOperator &I)
This is a convenience wrapper function for the above two functions.
bool SimplifyAssociativeOrCommutative(BinaryOperator &I)
Performs a few simplifications for operators which are associative or commutative.
Instruction * visitGEPOfGEP(GetElementPtrInst &GEP, GEPOperator *Src)
Value * foldUsingDistributiveLaws(BinaryOperator &I)
Tries to simplify binary operations which some other binary operation distributes over.
Instruction * foldBinOpShiftWithShift(BinaryOperator &I)
Instruction * visitUnreachableInst(UnreachableInst &I)
Instruction * foldOpIntoPhi(Instruction &I, PHINode *PN, bool AllowMultipleUses=false)
Given a binary operator, cast instruction, or select which has a PHI node as operand #0,...
void handleUnreachableFrom(Instruction *I, SmallVectorImpl< BasicBlock * > &Worklist)
Value * SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, APInt &PoisonElts, unsigned Depth=0, bool AllowMultipleUsers=false) override
The specified value produces a vector with any number of elements.
Instruction * visitFreeze(FreezeInst &I)
void handlePotentiallyDeadBlocks(SmallVectorImpl< BasicBlock * > &Worklist)
bool prepareWorklist(Function &F)
Perform early cleanup and prepare the InstCombine worklist.
Instruction * visitFree(CallInst &FI, Value *FreedOp)
Instruction * visitExtractValueInst(ExtractValueInst &EV)
void handlePotentiallyDeadSuccessors(BasicBlock *BB, BasicBlock *LiveSucc)
Instruction * visitUnconditionalBranchInst(BranchInst &BI)
Instruction * foldBinopWithRecurrence(BinaryOperator &BO)
Try to fold binary operators whose operands are simple interleaved recurrences to a single recurrence...
Instruction * eraseInstFromFunction(Instruction &I) override
Combiner aware instruction erasure.
Instruction * visitLandingPadInst(LandingPadInst &LI)
Instruction * visitReturnInst(ReturnInst &RI)
Instruction * visitSwitchInst(SwitchInst &SI)
Instruction * foldBinopWithPhiOperands(BinaryOperator &BO)
For a binary operator with 2 phi operands, try to hoist the binary operation before the phi.
bool mergeStoreIntoSuccessor(StoreInst &SI)
Try to transform: if () { *P = v1; } else { *P = v2 } or: *P = v1; if () { *P = v2; }...
Instruction * tryFoldInstWithCtpopWithNot(Instruction *I)
void CreateNonTerminatorUnreachable(Instruction *InsertAt)
Create and insert the idiom we use to indicate a block is unreachable without having to rewrite the C...
Value * pushFreezeToPreventPoisonFromPropagating(FreezeInst &FI)
bool run()
Run the combiner over the entire worklist until it is empty.
Instruction * foldVectorBinop(BinaryOperator &Inst)
Canonicalize the position of binops relative to shufflevector.
bool removeInstructionsBeforeUnreachable(Instruction &I)
Value * SimplifySelectsFeedingBinaryOp(BinaryOperator &I, Value *LHS, Value *RHS)
void tryToSinkInstructionDbgVariableRecords(Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock, BasicBlock *DestBlock, SmallVectorImpl< DbgVariableRecord * > &DPUsers)
void addDeadEdge(BasicBlock *From, BasicBlock *To, SmallVectorImpl< BasicBlock * > &Worklist)
Constant * unshuffleConstant(ArrayRef< int > ShMask, Constant *C, VectorType *NewCTy)
Find a constant NewC that has property: shuffle(NewC, ShMask) = C Returns nullptr if such a constant ...
Instruction * visitAllocSite(Instruction &FI)
Instruction * visitGetElementPtrInst(GetElementPtrInst &GEP)
Instruction * visitBranchInst(BranchInst &BI)
Value * tryFactorizationFolds(BinaryOperator &I)
This tries to simplify binary operations by factorizing out common terms (e.
Instruction * foldFreezeIntoRecurrence(FreezeInst &I, PHINode *PN)
Value * SimplifyDemandedUseFPClass(Value *V, FPClassTest DemandedMask, KnownFPClass &Known, Instruction *CxtI, unsigned Depth=0)
Attempts to replace V with a simpler value based on the demanded floating-point classes.
bool tryToSinkInstruction(Instruction *I, BasicBlock *DestBlock)
Try to move the specified instruction from its current block into the beginning of DestBlock,...
bool freezeOtherUses(FreezeInst &FI)
void freelyInvertAllUsersOf(Value *V, Value *IgnoredUser=nullptr)
Freely adapt every user of V as-if V was changed to !V.
The core instruction combiner logic.
const DataLayout & getDataLayout() const
IRBuilder< TargetFolder, IRBuilderCallbackInserter > BuilderTy
An IRBuilder that automatically inserts new instructions into the worklist.
bool isFreeToInvert(Value *V, bool WillInvertAllUses, bool &DoesConsume)
Return true if the specified value is free to invert (apply ~ to).
static unsigned getComplexity(Value *V)
Assign a complexity or rank value to LLVM Values.
unsigned ComputeNumSignBits(const Value *Op, const Instruction *CxtI=nullptr, unsigned Depth=0) const
Instruction * InsertNewInstBefore(Instruction *New, BasicBlock::iterator Old)
Inserts an instruction New before instruction Old.
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
uint64_t MaxArraySizeForCombine
Maximum size of array considered when transforming.
static bool shouldAvoidAbsorbingNotIntoSelect(const SelectInst &SI)
void replaceUse(Use &U, Value *NewValue)
Replace use and add the previously used value to the worklist.
static bool isCanonicalPredicate(CmpPredicate Pred)
Predicate canonicalization reduces the number of patterns that need to be matched by other transforms...
InstructionWorklist & Worklist
A worklist of the instructions that need to be simplified.
Instruction * InsertNewInstWith(Instruction *New, BasicBlock::iterator Old)
Same as InsertNewInstBefore, but also sets the debug loc.
BranchProbabilityInfo * BPI
ReversePostOrderTraversal< BasicBlock * > & RPOT
void computeKnownBits(const Value *V, KnownBits &Known, const Instruction *CxtI, unsigned Depth=0) const
std::optional< Instruction * > targetInstCombineIntrinsic(IntrinsicInst &II)
void addToWorklist(Instruction *I)
Value * getFreelyInvertedImpl(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume, unsigned Depth)
Return nonnull value if V is free to invert under the condition of WillInvertAllUses.
SmallDenseSet< std::pair< const BasicBlock *, const BasicBlock * >, 8 > BackEdges
Backedges, used to avoid pushing instructions across backedges in cases where this may result in infi...
std::optional< Value * > targetSimplifyDemandedVectorEltsIntrinsic(IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp)
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
static Constant * getSafeVectorConstantForBinop(BinaryOperator::BinaryOps Opcode, Constant *In, bool IsRHSConstant)
Some binary operators require special handling to avoid poison and undefined behavior.
SmallDenseSet< std::pair< BasicBlock *, BasicBlock * >, 8 > DeadEdges
Edges that are known to never be taken.
std::optional< Value * > targetSimplifyDemandedUseBitsIntrinsic(IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed)
bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const
Value * getFreelyInverted(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume)
bool isBackEdge(const BasicBlock *From, const BasicBlock *To)
void visit(Iterator Start, Iterator End)
The legacy pass manager's instcombine pass.
InstructionCombiningPass()
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
InstructionWorklist - This is the worklist management logic for InstCombine and other simplification ...
void add(Instruction *I)
Add instruction to the worklist.
LLVM_ABI void dropUBImplyingAttrsAndMetadata(ArrayRef< unsigned > Keep={})
Drop any attributes or metadata that can cause immediate undefined behavior.
static bool isBitwiseLogicOp(unsigned Opcode)
Determine if the Opcode is and/or/xor.
LLVM_ABI void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
LLVM_ABI void setAAMetadata(const AAMDNodes &N)
Sets the AA metadata on this instruction from the AAMDNodes structure.
LLVM_ABI bool isAssociative() const LLVM_READONLY
Return true if the instruction is associative:
LLVM_ABI bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
LLVM_ABI void moveBefore(InstListType::iterator InsertPos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
LLVM_ABI void setFastMathFlags(FastMathFlags FMF)
Convenience function for setting multiple fast-math flags on this instruction, which must be an opera...
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
bool isTerminator() const
LLVM_ABI FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
LLVM_ABI bool willReturn() const LLVM_READONLY
Return true if the instruction will return (unwinding is considered as a form of returning control fl...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
bool isBitwiseLogicOp() const
Return true if this is and/or/xor.
LLVM_ABI void dropPoisonGeneratingFlags()
Drops flags that may cause this instruction to evaluate to poison despite having non-poison inputs.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
A wrapper class for inspecting calls to intrinsic functions.
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
The landingpad instruction holds all of the information necessary to generate correct exception handl...
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
static LLVM_ABI LandingPadInst * Create(Type *RetTy, unsigned NumReservedClauses, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedClauses is a hint for the number of incoming clauses that this landingpad w...
LLVM_ABI void addClause(Constant *ClauseVal)
Add a catch or filter clause to the landing pad.
bool isCatch(unsigned Idx) const
Return 'true' if the clause and index Idx is a catch clause.
bool isFilter(unsigned Idx) const
Return 'true' if the clause and index Idx is a filter clause.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
void setCleanup(bool V)
Indicate that this landingpad instruction is a cleanup.
A function/module analysis which provides an empty LastRunTrackingInfo.
This is an alternative analysis pass to BlockFrequencyInfoWrapperPass.
static void getLazyBFIAnalysisUsage(AnalysisUsage &AU)
Helper for client passes to set up the analysis usage on behalf of this pass.
An instruction for reading from memory.
Value * getPointerOperand()
bool isVolatile() const
Return true if this is a load from a volatile memory location.
const MDOperand & getOperand(unsigned I) const
unsigned getNumOperands() const
Return number of MDNode operands.
Tracking metadata reference owned by Metadata.
This is the common base class for memset/memcpy/memmove.
static LLVM_ABI MemoryLocation getForDest(const MemIntrinsic *MI)
Return a location representing the destination of a memory set or transfer.
static ICmpInst::Predicate getPredicate(Intrinsic::ID ID)
Returns the comparison predicate underlying the intrinsic.
A Module instance is used to store all the information related to an LLVM module.
MDNode * getScopeList() const
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
op_range incoming_values()
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
PassRegistry - This class manages the registration and intitialization of the pass subsystem as appli...
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
AnalysisType & getAnalysis() const
getAnalysis<AnalysisType>() - This function is used by subclasses to get to the analysis information ...
AnalysisType * getAnalysisIfAvailable() const
getAnalysisIfAvailable<AnalysisType>() - Subclasses use this function to get analysis information tha...
In order to facilitate speculative execution, many instructions do not invoke immediate undefined beh...
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalyses & preserveSet()
Mark an analysis set as preserved.
PreservedAnalyses & preserve()
Mark an analysis as preserved.
An analysis pass based on the new PM to deliver ProfileSummaryInfo.
An analysis pass based on legacy pass manager to deliver ProfileSummaryInfo.
Analysis providing profile information.
bool hasProfileSummary() const
Returns true if profile summary is available.
A global registry used in conjunction with static constructors to make pluggable components (like tar...
Return a value (possibly void), from a function.
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
static ReturnInst * Create(LLVMContext &C, Value *retVal=nullptr, InsertPosition InsertBefore=nullptr)
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, Instruction *MDFrom=nullptr)
const Value * getFalseValue() const
const Value * getCondition() const
const Value * getTrueValue() const
bool insert(const value_type &X)
Insert a new element into the SetVector.
This instruction constructs a fixed permutation of two input vectors.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
iterator erase(const_iterator CI)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
typename SuperClass::iterator iterator
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
TargetFolder - Create constants with target dependent folding.
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
bool has(LibFunc F) const
Tests whether a library function is available.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
bool isPointerTy() const
True if this is an instance of PointerType.
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
bool isStructTy() const
True if this is an instance of StructType.
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
LLVM_ABI const fltSemantics & getFltSemantics() const
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
LLVM_ABI bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
const Use & getOperandUse(unsigned i) const
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
LLVM_ABI bool isDroppable() const
A droppable user is a user for which uses can be dropped without affecting correctness and should be ...
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
const Value * stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, APInt &Offset) const
This is a wrapper around stripAndAccumulateConstantOffsets with the in-bounds requirement set to fals...
LLVM_ABI bool hasOneUser() const
Return true if there is exactly one user of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
iterator_range< user_iterator > users()
bool hasUseList() const
Check if this Value has a use-list.
LLVM_ABI bool hasNUses(unsigned N) const
Return true if this Value has exactly N uses.
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
LLVM_ABI uint64_t getPointerDereferenceableBytes(const DataLayout &DL, bool &CanBeNull, bool &CanBeFreed) const
Returns the number of bytes known to be dereferenceable for the pointer value.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Base class of all SIMD vector types.
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Value handle that is nullable, but tries to track the Value.
An efficient, type-erasing, non-owning reference to a callable.
Type * getIndexedType() const
const ParentTy * getParent() const
reverse_self_iterator getReverseIterator()
self_iterator getIterator()
This class implements an extremely fast bulk output stream that can only output to a stream.
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Abstract Attribute helper functions.
@ C
The default llvm calling convention, compatible with C.
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
BinaryOp_match< SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB > m_Neg(const SrcTy &&Src)
Matches a register negated by a G_SUB.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
class_match< PoisonValue > m_Poison()
Match an arbitrary poison constant.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
PtrAdd_match< PointerOpTy, OffsetOpTy > m_PtrAdd(const PointerOpTy &PointerOp, const OffsetOpTy &OffsetOp)
Matches GEP with i8 source element type.
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
CmpClass_match< LHS, RHS, FCmpInst > m_FCmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
OneOps_match< OpTy, Instruction::Freeze > m_Freeze(const OpTy &Op)
Matches FreezeInst.
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
br_match m_UnconditionalBr(BasicBlock *&Succ)
ap_match< APInt > m_APIntAllowPoison(const APInt *&Res)
Match APInt while allowing poison in splat vector constants.
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
bool match(Val *V, const Pattern &P)
BinOpPred_match< LHS, RHS, is_idiv_op > m_IDiv(const LHS &L, const RHS &R)
Matches integer division operations.
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
DisjointOr_match< LHS, RHS > m_DisjointOr(const LHS &L, const RHS &R)
constantexpr_match m_ConstantExpr()
Match a constant expression or a constant that contains a constant expression.
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
ap_match< APFloat > m_APFloat(const APFloat *&Res)
Match a ConstantFP or splatted ConstantVector, binding the specified pointer to the contained APFloat...
cst_pred_ty< is_nonnegative > m_NonNegative()
Match an integer or vector of non-negative values.
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
ExtractValue_match< Ind, Val_t > m_ExtractValue(const Val_t &V)
Match a single index ExtractValue instruction.
match_combine_and< LTy, RTy > m_CombineAnd(const LTy &L, const RTy &R)
Combine two pattern matchers matching L && R.
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
NNegZExt_match< OpTy > m_NNegZExt(const OpTy &Op)
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
TwoOps_match< V1_t, V2_t, Instruction::ShuffleVector > m_Shuffle(const V1_t &v1, const V2_t &v2)
Matches ShuffleVectorInst independently of mask value.
ThreeOps_match< decltype(m_Value()), LHS, RHS, Instruction::Select, true > m_c_Select(const LHS &L, const RHS &R)
Match Select(C, LHS, RHS) or Select(C, RHS, LHS)
SpecificCmpClass_match< LHS, RHS, ICmpInst > m_SpecificICmp(CmpPredicate MatchPred, const LHS &L, const RHS &R)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
BinaryOp_match< LHS, RHS, Instruction::UDiv > m_UDiv(const LHS &L, const RHS &R)
brc_match< Cond_t, bind_ty< BasicBlock >, bind_ty< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
match_immconstant_ty m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
match_combine_or< BinaryOp_match< LHS, RHS, Instruction::Add >, DisjointOr_match< LHS, RHS > > m_AddLike(const LHS &L, const RHS &R)
Match either "add" or "or disjoint".
CastInst_match< OpTy, UIToFPInst > m_UIToFP(const OpTy &Op)
CastOperator_match< OpTy, Instruction::BitCast > m_BitCast(const OpTy &Op)
Matches BitCast.
match_combine_or< CastInst_match< OpTy, SExtInst >, NNegZExt_match< OpTy > > m_SExtLike(const OpTy &Op)
Match either "sext" or "zext nneg".
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap >, DisjointOr_match< LHS, RHS > > m_NSWAddLike(const LHS &L, const RHS &R)
Match either "add nsw" or "or disjoint".
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)
Matches a BinaryOperator with LHS and RHS in either order.
CastInst_match< OpTy, SIToFPInst > m_SIToFP(const OpTy &Op)
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
BinOpPred_match< LHS, RHS, is_shift_op > m_Shift(const LHS &L, const RHS &R)
Matches shift operations.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
cstfp_pred_ty< is_non_zero_fp > m_NonZeroFP()
Match a floating-point non-zero.
m_Intrinsic_Ty< Opnd0 >::Ty m_VecReverse(const Opnd0 &Op0)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > > > m_MaxOrMin(const LHS &L, const RHS &R)
auto m_Undef()
Match an arbitrary undef constant.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap >, DisjointOr_match< LHS, RHS > > m_NUWAddLike(const LHS &L, const RHS &R)
Match either "add nuw" or "or disjoint".
CastOperator_match< OpTy, Instruction::PtrToInt > m_PtrToInt(const OpTy &Op)
Matches PtrToInt.
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
initializer< Ty > init(const Ty &Val)
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
LLVM_ABI Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID)
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
FunctionAddr VTableAddr Value
void stable_sort(R &&Range)
LLVM_ABI void initializeInstructionCombiningPassPass(PassRegistry &)
LLVM_ABI unsigned removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB)
Remove all instructions from a basic block other than its terminator and any present EH pad instructi...
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI Value * simplifyGEPInst(Type *SrcTy, Value *Ptr, ArrayRef< Value * > Indices, GEPNoWrapFlags NW, const SimplifyQuery &Q)
Given operands for a GetElementPtrInst, fold the result or return null.
LLVM_ABI Constant * getInitialValueOfAllocation(const Value *V, const TargetLibraryInfo *TLI, Type *Ty)
If this is a call to an allocation function that initializes memory to a fixed value,...
bool succ_empty(const Instruction *I)
LLVM_ABI Value * simplifyFreezeInst(Value *Op, const SimplifyQuery &Q)
Given an operand for a Freeze, see if we can fold the result.
LLVM_ABI FunctionPass * createInstructionCombiningPass()
LLVM_ABI void findDbgValues(Value *V, SmallVectorImpl< DbgVariableRecord * > &DbgVariableRecords)
Finds the dbg.values describing a value.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
auto successors(const MachineBasicBlock *BB)
LLVM_ABI Constant * ConstantFoldInstruction(const Instruction *I, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldInstruction - Try to constant fold the specified instruction.
LLVM_ABI bool isRemovableAlloc(const CallBase *V, const TargetLibraryInfo *TLI)
Return true if this is a call to an allocation function that does not have side effects that we are r...
LLVM_ABI std::optional< StringRef > getAllocationFamily(const Value *I, const TargetLibraryInfo *TLI)
If a function is part of an allocation family (e.g.
OuterAnalysisManagerProxy< ModuleAnalysisManager, Function > ModuleAnalysisManagerFunctionProxy
Provide the ModuleAnalysisManager to Function proxy.
LLVM_ABI Value * lowerObjectSizeCall(IntrinsicInst *ObjectSize, const DataLayout &DL, const TargetLibraryInfo *TLI, bool MustSucceed)
Try to turn a call to @llvm.objectsize into an integer value of the given Type.
static cl::opt< bool > EnableCodeSinking("instcombine-code-sinking", cl::desc("Enable code sinking"), cl::init(true))
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
LLVM_ABI Value * simplifyInstructionWithOperands(Instruction *I, ArrayRef< Value * > NewOps, const SimplifyQuery &Q)
Like simplifyInstruction but the operands of I are replaced with NewOps.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
LLVM_ABI Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
gep_type_iterator gep_type_end(const User *GEP)
LLVM_ABI Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
LLVM_ABI Value * getReallocatedOperand(const CallBase *CB)
If this is a call to a realloc function, return the reallocated operand.
APFloat frexp(const APFloat &X, int &Exp, APFloat::roundingMode RM)
Equivalent of C standard library function.
LLVM_ABI bool isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI)
Tests if a value is a call or invoke to a library function that allocates memory (either malloc,...
LLVM_ABI bool handleUnreachableTerminator(Instruction *I, SmallVectorImpl< Value * > &PoisonedValues)
If a terminator in an unreachable basic block has an operand of type Instruction, transform it into p...
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
LLVM_ABI bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start, Value *&Step)
Attempt to match a simple first order recurrence cycle of the form: iv = phi Ty [Start,...
LLVM_ABI Value * simplifyAddInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for an Add, fold the result or return null.
LLVM_ABI Constant * ConstantFoldConstant(const Constant *C, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldConstant - Fold the constant using the specified DataLayout.
auto dyn_cast_or_null(const Y &Val)
constexpr bool has_single_bit(T Value) noexcept
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
LLVM_ABI bool isSplatValue(const Value *V, int Index=-1, unsigned Depth=0)
Return true if each element of the vector value V is poisoned or equal to every other non-poisoned el...
LLVM_ABI Value * emitGEPOffset(IRBuilderBase *Builder, const DataLayout &DL, User *GEP, bool NoAssumptions=false)
Given a getelementptr instruction/constantexpr, emit the code necessary to compute the offset from th...
constexpr unsigned MaxAnalysisRecursionDepth
auto reverse(ContainerTy &&C)
bool isModSet(const ModRefInfo MRI)
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI bool LowerDbgDeclare(Function &F)
Lowers dbg.declare records into appropriate set of dbg.value records.
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
generic_gep_type_iterator<> gep_type_iterator
LLVM_ABI void ConvertDebugDeclareToDebugValue(DbgVariableRecord *DVR, StoreInst *SI, DIBuilder &Builder)
Inserts a dbg.value record before a store to an alloca'd value that has an associated dbg....
LLVM_ABI void salvageDebugInfoForDbgValues(Instruction &I, ArrayRef< DbgVariableRecord * > DPInsns)
Implementation of salvageDebugInfo, applying only to instructions in Insns, rather than all debug use...
LLVM_ABI Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
LLVM_ABI bool canCreateUndefOrPoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
canCreateUndefOrPoison returns true if Op can create undef or poison from non-undef & non-poison oper...
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
static cl::opt< unsigned > MaxSinkNumUsers("instcombine-max-sink-users", cl::init(32), cl::desc("Maximum number of undroppable users for instruction sinking"))
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
static cl::opt< unsigned > MaxArraySize("instcombine-maxarray-size", cl::init(1024), cl::desc("Maximum array size considered when doing a combine"))
LLVM_ABI Value * simplifyExtractValueInst(Value *Agg, ArrayRef< unsigned > Idxs, const SimplifyQuery &Q)
Given operands for an ExtractValueInst, fold the result or return null.
LLVM_ABI Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
LLVM_ABI bool replaceAllDbgUsesWith(Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT)
Point debug users of From to To or salvage them.
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
constexpr int PoisonMaskElem
auto drop_end(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the last N elements excluded.
ModRefInfo
Flags indicating whether a memory access modifies or references memory.
@ Ref
The access may reference the value stored in memory.
@ ModRef
The access may reference and may modify the value stored in memory.
@ Mod
The access may modify the value stored in memory.
@ NoModRef
The access neither references nor modifies the value stored in memory.
LLVM_ABI Value * simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a BinaryOperator, fold the result or return null.
@ Sub
Subtraction of integers.
static cl::opt< unsigned > ShouldLowerDbgDeclare("instcombine-lower-dbg-declare", cl::Hidden, cl::init(true))
DWARFExpression::Operation Op
bool isSafeToSpeculativelyExecuteWithVariableReplaced(const Instruction *I, bool IgnoreUBImplyingAttrs=true)
Don't use information from its non-constant operands.
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
ArrayRef(const T &OneElt) -> ArrayRef< T >
LLVM_ABI Value * getFreedOperand(const CallBase *CB, const TargetLibraryInfo *TLI)
If this if a call to a free function, return the freed operand.
constexpr unsigned BitWidth
LLVM_ABI bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
LLVM_ABI Constant * getLosslessInvCast(Constant *C, Type *InvCastTo, unsigned CastOp, const DataLayout &DL, PreservedCastFlags *Flags=nullptr)
Try to cast C to InvC losslessly, satisfying CastOp(InvC) equals C, or CastOp(InvC) is a refined valu...
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
gep_type_iterator gep_type_begin(const User *GEP)
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
cl::opt< bool > ProfcheckDisableMetadataFixes("profcheck-disable-metadata-fixes", cl::Hidden, cl::init(false), cl::desc("Disable metadata propagation fixes discovered through Issue #147390"))
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
bool equal(L &&LRange, R &&RRange)
Wrapper function around std::equal to detect if pair-wise elements between two ranges are the same.
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
AAResults AliasAnalysis
Temporary typedef for legacy code that uses a generic AliasAnalysis pointer or reference.
static auto filterDbgVars(iterator_range< simple_ilist< DbgRecord >::iterator > R)
Filter the DbgRecord range to DbgVariableRecord types only and downcast.
LLVM_ABI void initializeInstCombine(PassRegistry &)
Initialize all passes linked into the InstCombine library.
LLVM_ABI void findDbgUsers(Value *V, SmallVectorImpl< DbgVariableRecord * > &DbgVariableRecords)
Finds the debug info records describing a value.
LLVM_ABI Constant * ConstantFoldBinaryInstruction(unsigned Opcode, Constant *V1, Constant *V2)
bool isRefSet(const ModRefInfo MRI)
LLVM_ABI std::optional< bool > isImpliedCondition(const Value *LHS, const Value *RHS, const DataLayout &DL, bool LHSIsTrue=true, unsigned Depth=0)
Return true if RHS is known to be implied true by LHS.
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
static constexpr roundingMode rmNearestTiesToEven
static LLVM_ABI unsigned int semanticsPrecision(const fltSemantics &)
unsigned countMinLeadingOnes() const
Returns the minimum number of leading one bits.
unsigned getBitWidth() const
Get the bit width of this value.
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
A CRTP mix-in to automatically provide informational APIs needed for passes.
SimplifyQuery getWithInstruction(const Instruction *I) const