30#define DEBUG_TYPE "tti"
34 cl::desc(
"Recognize reduction patterns."));
38 cl::desc(
"Use this to override the target cache line size when "
39 "specified by the user."));
43 cl::desc(
"Use this to override the target's minimum page size."));
48 "Use this to override the target's predictable branch threshold (%)."));
62 std::unique_ptr<const TargetTransformInfoImplBase> Impl)
79 ScalarizationCost(ScalarizationCost), LibInfo(LibInfo) {
82 FMF = FPMO->getFastMathFlags();
87 ParamTys.insert(ParamTys.begin(), FTy->param_begin(), FTy->param_end());
95 : II(
I), RetTy(RTy), IID(Id), FMF(Flags), ScalarizationCost(ScalarCost) {
96 ParamTys.insert(ParamTys.begin(), Tys.
begin(), Tys.
end());
101 : RetTy(Ty), IID(Id) {
103 Arguments.insert(Arguments.begin(), Args.begin(), Args.end());
104 ParamTys.reserve(Arguments.size());
113 : II(
I), RetTy(RTy), IID(Id), FMF(Flags), ScalarizationCost(ScalarCost),
115 ParamTys.insert(ParamTys.begin(), Tys.
begin(), Tys.
end());
116 Arguments.insert(Arguments.begin(), Args.begin(), Args.end());
132 L->getExitingBlocks(ExitingBlocks);
137 if (!
L->isLoopLatch(BB)) {
146 if (ConstEC->getValue()->isZero())
167 bool NotAlways =
false;
169 if (!
L->contains(Pred))
187 if (!BI->isConditional())
207 : TTIImpl(
std::make_unique<NoTTIImpl>(
DL)) {}
212 : TTIImpl(
std::
move(Arg.TTIImpl)) {}
215 TTIImpl = std::move(RHS.TTIImpl);
220 return TTIImpl->getInliningThresholdMultiplier();
225 return TTIImpl->getInliningCostBenefitAnalysisSavingsMultiplier();
231 return TTIImpl->getInliningCostBenefitAnalysisProfitableMultiplier();
235 return TTIImpl->getInliningLastCallToStaticBonus();
240 return TTIImpl->adjustInliningThreshold(CB);
245 return TTIImpl->getCallerAllocaCost(CB, AI);
249 return TTIImpl->getInlinerVectorBonusPercent();
263 "If pointers have same base address it has to be provided.");
264 return TTIImpl->getPointersChainCost(Ptrs,
Base, Info, AccessTy,
CostKind);
270 return TTIImpl->getEstimatedNumberOfCaseClusters(
SI, JTSize, PSI, BFI);
279 "TTI should not produce negative costs!");
286 : TTIImpl->getPredictableBranchThreshold();
290 return TTIImpl->getBranchMispredictPenalty();
294 return TTIImpl->hasBranchDivergence(
F);
299 if (
Call->hasFnAttr(Attribute::NoDivergenceSource))
302 return TTIImpl->isSourceOfDivergence(V);
306 return TTIImpl->isAlwaysUniform(V);
310 unsigned ToAS)
const {
311 return TTIImpl->isValidAddrSpaceCast(FromAS, ToAS);
315 unsigned ToAS)
const {
316 return TTIImpl->addrspacesMayAlias(FromAS, ToAS);
320 return TTIImpl->getFlatAddressSpace();
325 return TTIImpl->collectFlatAddressOperands(OpIndexes, IID);
329 unsigned ToAS)
const {
330 return TTIImpl->isNoopAddrSpaceCast(FromAS, ToAS);
335 return TTIImpl->canHaveNonUndefGlobalInitializerInAddressSpace(AS);
339 return TTIImpl->getAssumedAddrSpace(V);
343 return TTIImpl->isSingleThreaded();
346std::pair<const Value *, unsigned>
348 return TTIImpl->getPredicatedAddrSpace(V);
353 return TTIImpl->rewriteIntrinsicWithAddressSpace(
II, OldV, NewV);
357 return TTIImpl->isLoweredToCall(
F);
363 return TTIImpl->isHardwareLoopProfitable(L, SE, AC, LibInfo, HWLoopInfo);
367 return TTIImpl->getEpilogueVectorizationMinVF();
372 return TTIImpl->preferPredicateOverEpilogue(TFI);
376 bool IVUpdateMayOverflow)
const {
377 return TTIImpl->getPreferredTailFoldingStyle(IVUpdateMayOverflow);
380std::optional<Instruction *>
383 return TTIImpl->instCombineIntrinsic(IC,
II);
388 bool &KnownBitsComputed)
const {
389 return TTIImpl->simplifyDemandedUseBitsIntrinsic(IC,
II, DemandedMask, Known,
397 SimplifyAndSetOp)
const {
398 return TTIImpl->simplifyDemandedVectorEltsIntrinsic(
399 IC,
II, DemandedElts, UndefElts, UndefElts2, UndefElts3,
406 return TTIImpl->getUnrollingPreferences(L, SE, UP, ORE);
411 return TTIImpl->getPeelingPreferences(L, SE, PP);
415 return TTIImpl->isLegalAddImmediate(Imm);
419 return TTIImpl->isLegalAddScalableImmediate(Imm);
423 return TTIImpl->isLegalICmpImmediate(Imm);
428 bool HasBaseReg, int64_t Scale,
431 int64_t ScalableOffset)
const {
432 return TTIImpl->isLegalAddressingMode(Ty, BaseGV, BaseOffset, HasBaseReg,
433 Scale, AddrSpace,
I, ScalableOffset);
438 return TTIImpl->isLSRCostLess(C1, C2);
442 return TTIImpl->isNumRegsMajorCostOfLSR();
446 return TTIImpl->shouldDropLSRSolutionIfLessProfitable();
450 return TTIImpl->isProfitableLSRChainElement(
I);
454 return TTIImpl->canMacroFuseCmp();
461 return TTIImpl->canSaveCmp(L, BI, SE, LI, DT, AC, LibInfo);
467 return TTIImpl->getPreferredAddressingMode(L, SE);
472 return TTIImpl->isLegalMaskedStore(DataType, Alignment,
AddressSpace);
477 return TTIImpl->isLegalMaskedLoad(DataType, Alignment,
AddressSpace);
481 Align Alignment)
const {
482 return TTIImpl->isLegalNTStore(DataType, Alignment);
486 return TTIImpl->isLegalNTLoad(DataType, Alignment);
491 return TTIImpl->isLegalBroadcastLoad(ElementTy, NumElements);
495 Align Alignment)
const {
496 return TTIImpl->isLegalMaskedGather(DataType, Alignment);
500 VectorType *VecTy,
unsigned Opcode0,
unsigned Opcode1,
502 return TTIImpl->isLegalAltInstr(VecTy, Opcode0, Opcode1, OpcodeMask);
506 Align Alignment)
const {
507 return TTIImpl->isLegalMaskedScatter(DataType, Alignment);
511 Align Alignment)
const {
512 return TTIImpl->forceScalarizeMaskedGather(DataType, Alignment);
516 Align Alignment)
const {
517 return TTIImpl->forceScalarizeMaskedScatter(DataType, Alignment);
521 Align Alignment)
const {
522 return TTIImpl->isLegalMaskedCompressStore(DataType, Alignment);
526 Align Alignment)
const {
527 return TTIImpl->isLegalMaskedExpandLoad(DataType, Alignment);
531 Align Alignment)
const {
532 return TTIImpl->isLegalStridedLoadStore(DataType, Alignment);
537 unsigned AddrSpace)
const {
538 return TTIImpl->isLegalInterleavedAccessType(VTy, Factor, Alignment,
543 Type *DataType)
const {
544 return TTIImpl->isLegalMaskedVectorHistogram(AddrType, DataType);
548 return TTIImpl->enableOrderedReductions();
552 return TTIImpl->hasDivRemOp(DataType, IsSigned);
556 unsigned AddrSpace)
const {
557 return TTIImpl->hasVolatileVariant(
I, AddrSpace);
561 return TTIImpl->prefersVectorizedAddressing();
566 int64_t Scale,
unsigned AddrSpace)
const {
568 Ty, BaseGV, BaseOffset, HasBaseReg, Scale, AddrSpace);
569 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
574 return TTIImpl->LSRWithInstrQueries();
578 return TTIImpl->isTruncateFree(Ty1, Ty2);
582 return TTIImpl->isProfitableToHoist(
I);
588 return TTIImpl->isTypeLegal(Ty);
592 return TTIImpl->getRegUsageForType(Ty);
596 return TTIImpl->shouldBuildLookupTables();
601 return TTIImpl->shouldBuildLookupTablesForConstant(
C);
605 return TTIImpl->shouldBuildRelLookupTables();
609 return TTIImpl->useColdCCForColdCall(
F);
614 return TTIImpl->isTargetIntrinsicTriviallyScalarizable(
ID);
619 return TTIImpl->isTargetIntrinsicWithScalarOpAtArg(
ID, ScalarOpdIdx);
624 return TTIImpl->isTargetIntrinsicWithOverloadTypeAtArg(
ID, OpdIdx);
629 return TTIImpl->isTargetIntrinsicWithStructReturnOverloadAtField(
ID, RetIdx);
636 return TTIImpl->getScalarizationOverhead(Ty, DemandedElts, Insert, Extract,
642 return TTIImpl->getOperandsScalarizationOverhead(Tys,
CostKind);
646 return TTIImpl->supportsEfficientVectorElementLoadStore();
650 return TTIImpl->supportsTailCalls();
654 return TTIImpl->supportsTailCallFor(CB);
658 bool LoopHasReductions)
const {
659 return TTIImpl->enableAggressiveInterleaving(LoopHasReductions);
664 return TTIImpl->enableMemCmpExpansion(OptSize, IsZeroCmp);
668 return TTIImpl->enableSelectOptimize();
673 return TTIImpl->shouldTreatInstructionLikeSelect(
I);
677 return TTIImpl->enableInterleavedAccessVectorization();
681 return TTIImpl->enableMaskedInterleavedAccessVectorization();
685 return TTIImpl->isFPVectorizationPotentiallyUnsafe();
693 unsigned *
Fast)
const {
694 return TTIImpl->allowsMisalignedMemoryAccesses(Context,
BitWidth,
700 return TTIImpl->getPopcntSupport(IntTyWidthInBit);
704 return TTIImpl->haveFastSqrt(Ty);
709 return TTIImpl->isExpensiveToSpeculativelyExecute(
I);
713 return TTIImpl->isFCmpOrdCheaperThanFCmpZero(Ty);
718 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
727 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
735 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
740 unsigned Opcode,
unsigned Idx,
const APInt &Imm,
Type *Ty,
743 TTIImpl->getIntImmCostInst(Opcode, Idx, Imm, Ty,
CostKind, Inst);
744 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
753 TTIImpl->getIntImmCostIntrin(IID, Idx, Imm, Ty,
CostKind);
754 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
760 return TTIImpl->preferToKeepConstantsAttached(Inst, Fn);
764 return TTIImpl->getNumberOfRegisters(ClassID);
768 bool IsStore)
const {
769 return TTIImpl->hasConditionalLoadStoreForType(Ty, IsStore);
774 return TTIImpl->getRegisterClassForType(
Vector, Ty);
778 return TTIImpl->getRegisterClassName(ClassID);
783 return TTIImpl->getRegisterBitWidth(K);
787 return TTIImpl->getMinVectorRegisterBitWidth();
791 return TTIImpl->getMaxVScale();
795 return TTIImpl->getVScaleForTuning();
799 return TTIImpl->isVScaleKnownToBeAPowerOfTwo();
804 return TTIImpl->shouldMaximizeVectorBandwidth(K);
808 bool IsScalable)
const {
809 return TTIImpl->getMinimumVF(ElemWidth, IsScalable);
813 unsigned Opcode)
const {
814 return TTIImpl->getMaximumVF(ElemWidth, Opcode);
818 Type *ScalarValTy)
const {
819 return TTIImpl->getStoreMinimumVF(VF, ScalarMemTy, ScalarValTy);
823 const Instruction &
I,
bool &AllowPromotionWithoutCommonHeader)
const {
824 return TTIImpl->shouldConsiderAddressTypePromotion(
825 I, AllowPromotionWithoutCommonHeader);
830 : TTIImpl->getCacheLineSize();
833std::optional<unsigned>
835 return TTIImpl->getCacheSize(Level);
838std::optional<unsigned>
840 return TTIImpl->getCacheAssociativity(Level);
845 : TTIImpl->getMinPageSize();
849 return TTIImpl->getPrefetchDistance();
853 unsigned NumMemAccesses,
unsigned NumStridedMemAccesses,
854 unsigned NumPrefetches,
bool HasCall)
const {
855 return TTIImpl->getMinPrefetchStride(NumMemAccesses, NumStridedMemAccesses,
856 NumPrefetches, HasCall);
860 return TTIImpl->getMaxPrefetchIterationsAhead();
864 return TTIImpl->enableWritePrefetching();
868 return TTIImpl->shouldPrefetchAddressSpace(AS);
872 unsigned Opcode,
Type *InputTypeA,
Type *InputTypeB,
Type *AccumType,
876 return TTIImpl->getPartialReductionCost(Opcode, InputTypeA, InputTypeB,
877 AccumType, VF, OpAExtend, OpBExtend,
882 return TTIImpl->getMaxInterleaveFactor(VF);
896 if (CI->getValue().isPowerOf2())
898 else if (CI->getValue().isNegatedPowerOf2())
908 if (ShuffleInst->isZeroEltSplat())
923 if (CI->getValue().isPowerOf2())
925 else if (CI->getValue().isNegatedPowerOf2())
931 bool AllPow2 =
true, AllNegPow2 =
true;
932 for (
uint64_t I = 0, E = CDS->getNumElements();
I != E; ++
I) {
934 AllPow2 &= CI->getValue().isPowerOf2();
935 AllNegPow2 &= CI->getValue().isNegatedPowerOf2();
936 if (AllPow2 || AllNegPow2)
939 AllPow2 = AllNegPow2 =
false;
948 return {OpInfo, OpProps};
960 if (TLibInfo && Opcode == Instruction::FRem) {
964 TLibInfo->
getLibFunc(Instruction::FRem, Ty->getScalarType(), Func) &&
971 TTIImpl->getArithmeticInstrCost(Opcode, Ty,
CostKind,
974 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
979 VectorType *VecTy,
unsigned Opcode0,
unsigned Opcode1,
982 TTIImpl->getAltInstrCost(VecTy, Opcode0, Opcode1, OpcodeMask,
CostKind);
983 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
993 "Expected the Mask to match the return size if given");
995 "Expected the same scalar types");
997 Kind, DstTy, SrcTy, Mask,
CostKind, Index, SubTp, Args, CxtI);
998 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
1016 auto getLoadStoreKind = [](
const Value *V,
unsigned LdStOp,
unsigned MaskedOp,
1017 unsigned GatScatOp) {
1022 if (
I->getOpcode() == LdStOp)
1026 if (
II->getIntrinsicID() == MaskedOp)
1028 if (
II->getIntrinsicID() == GatScatOp)
1035 switch (
I->getOpcode()) {
1036 case Instruction::ZExt:
1037 case Instruction::SExt:
1038 case Instruction::FPExt:
1039 return getLoadStoreKind(
I->getOperand(0), Instruction::Load,
1040 Intrinsic::masked_load, Intrinsic::masked_gather);
1041 case Instruction::Trunc:
1042 case Instruction::FPTrunc:
1044 return getLoadStoreKind(*
I->user_begin(), Instruction::Store,
1045 Intrinsic::masked_store,
1046 Intrinsic::masked_scatter);
1058 assert((
I ==
nullptr ||
I->getOpcode() == Opcode) &&
1059 "Opcode should reflect passed instruction.");
1061 TTIImpl->getCastInstrCost(Opcode, Dst, Src, CCH,
CostKind,
I);
1062 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
1070 TTIImpl->getExtractWithExtendCost(Opcode, Dst, VecTy, Index,
CostKind);
1071 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
1077 assert((
I ==
nullptr ||
I->getOpcode() == Opcode) &&
1078 "Opcode should reflect passed instruction.");
1080 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
1088 assert((
I ==
nullptr ||
I->getOpcode() == Opcode) &&
1089 "Opcode should reflect passed instruction.");
1091 Opcode, ValTy, CondTy, VecPred,
CostKind, Op1Info, Op2Info,
I);
1092 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
1099 assert((Opcode == Instruction::InsertElement ||
1100 Opcode == Instruction::ExtractElement) &&
1101 "Expecting Opcode to be insertelement/extractelement.");
1103 TTIImpl->getVectorInstrCost(Opcode, Val,
CostKind, Index, Op0, Op1);
1104 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
1111 ArrayRef<std::tuple<Value *, User *, int>> ScalarUserAndIdx)
const {
1112 assert((Opcode == Instruction::InsertElement ||
1113 Opcode == Instruction::ExtractElement) &&
1114 "Expecting Opcode to be insertelement/extractelement.");
1116 Opcode, Val,
CostKind, Index, Scalar, ScalarUserAndIdx);
1117 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
1124 unsigned Index)
const {
1129 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
1135 unsigned Index)
const {
1137 TTIImpl->getIndexedVectorInstrCostFromEnd(Opcode, Val,
CostKind, Index);
1138 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
1144 assert((Opcode == Instruction::InsertValue ||
1145 Opcode == Instruction::ExtractValue) &&
1146 "Expecting Opcode to be insertvalue/extractvalue.");
1148 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
1153 Type *EltTy,
int ReplicationFactor,
int VF,
const APInt &DemandedDstElts,
1156 EltTy, ReplicationFactor, VF, DemandedDstElts,
CostKind);
1157 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
1165 assert((
I ==
nullptr ||
I->getOpcode() == Opcode) &&
1166 "Opcode should reflect passed instruction.");
1169 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
1178 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
1183 unsigned Opcode,
Type *DataTy,
const Value *
Ptr,
bool VariableMask,
1186 Opcode, DataTy,
Ptr, VariableMask, Alignment,
CostKind,
I);
1188 "TTI should not produce negative costs!");
1193 unsigned Opcode,
Type *DataTy,
bool VariableMask,
Align Alignment,
1196 Opcode, DataTy, VariableMask, Alignment,
CostKind,
I);
1197 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
1202 unsigned Opcode,
Type *DataTy,
const Value *
Ptr,
bool VariableMask,
1205 Opcode, DataTy,
Ptr, VariableMask, Alignment,
CostKind,
I);
1206 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
1213 bool UseMaskForCond,
bool UseMaskForGaps)
const {
1216 UseMaskForCond, UseMaskForGaps);
1217 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
1225 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
1234 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
1239 return TTIImpl->getNumberOfParts(Tp);
1246 TTIImpl->getAddressComputationCost(PtrTy, SE,
Ptr,
CostKind);
1247 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
1253 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
1258 return TTIImpl->getMaxMemIntrinsicInlineSizeThreshold();
1262 unsigned Opcode,
VectorType *Ty, std::optional<FastMathFlags> FMF,
1265 TTIImpl->getArithmeticReductionCost(Opcode, Ty, FMF,
CostKind);
1266 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
1274 TTIImpl->getMinMaxReductionCost(IID, Ty, FMF,
CostKind);
1275 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
1282 return TTIImpl->getExtendedReductionCost(Opcode, IsUnsigned, ResTy, Ty, FMF,
1287 bool IsUnsigned,
unsigned RedOpcode,
Type *ResTy,
VectorType *Ty,
1289 return TTIImpl->getMulAccReductionCost(IsUnsigned, RedOpcode, ResTy, Ty,
1295 return TTIImpl->getCostOfKeepingLiveOverCall(Tys);
1300 return TTIImpl->getTgtMemIntrinsic(Inst, Info);
1304 return TTIImpl->getAtomicMemIntrinsicMaxElementSize();
1309 return TTIImpl->getOrCreateResultFromMemIntrinsic(Inst, ExpectedType,
1315 unsigned DestAddrSpace,
Align SrcAlign,
Align DestAlign,
1316 std::optional<uint32_t> AtomicElementSize)
const {
1317 return TTIImpl->getMemcpyLoopLoweringType(Context,
Length, SrcAddrSpace,
1318 DestAddrSpace, SrcAlign, DestAlign,
1324 unsigned RemainingBytes,
unsigned SrcAddrSpace,
unsigned DestAddrSpace,
1326 std::optional<uint32_t> AtomicCpySize)
const {
1327 TTIImpl->getMemcpyLoopResidualLoweringType(
1328 OpsOut, Context, RemainingBytes, SrcAddrSpace, DestAddrSpace, SrcAlign,
1329 DestAlign, AtomicCpySize);
1334 return TTIImpl->areInlineCompatible(Caller, Callee);
1340 unsigned DefaultCallPenalty)
const {
1341 return TTIImpl->getInlineCallPenalty(
F,
Call, DefaultCallPenalty);
1347 return TTIImpl->areTypesABICompatible(Caller, Callee, Types);
1352 return TTIImpl->isIndexedLoadLegal(Mode, Ty);
1357 return TTIImpl->isIndexedStoreLegal(Mode, Ty);
1361 return TTIImpl->getLoadStoreVecRegBitWidth(AS);
1365 return TTIImpl->isLegalToVectorizeLoad(LI);
1369 return TTIImpl->isLegalToVectorizeStore(
SI);
1373 unsigned ChainSizeInBytes,
Align Alignment,
unsigned AddrSpace)
const {
1374 return TTIImpl->isLegalToVectorizeLoadChain(ChainSizeInBytes, Alignment,
1379 unsigned ChainSizeInBytes,
Align Alignment,
unsigned AddrSpace)
const {
1380 return TTIImpl->isLegalToVectorizeStoreChain(ChainSizeInBytes, Alignment,
1386 return TTIImpl->isLegalToVectorizeReduction(RdxDesc, VF);
1390 return TTIImpl->isElementTypeLegalForScalableVector(Ty);
1395 unsigned ChainSizeInBytes,
1397 return TTIImpl->getLoadVectorFactor(VF, LoadSize, ChainSizeInBytes, VecTy);
1402 unsigned ChainSizeInBytes,
1404 return TTIImpl->getStoreVectorFactor(VF, StoreSize, ChainSizeInBytes, VecTy);
1408 bool IsEpilogue)
const {
1409 return TTIImpl->preferFixedOverScalableIfEqualCost(IsEpilogue);
1414 return TTIImpl->preferInLoopReduction(Kind, Ty);
1418 return TTIImpl->preferAlternateOpcodeVectorization();
1422 return TTIImpl->preferPredicatedReductionSelect();
1426 return TTIImpl->preferEpilogueVectorization();
1430 return TTIImpl->shouldConsiderVectorizationRegPressure();
1435 return TTIImpl->getVPLegalizationStrategy(VPI);
1439 return TTIImpl->hasArmWideBranch(Thumb);
1443 return TTIImpl->getFeatureMask(
F);
1447 return TTIImpl->isMultiversionedFunction(
F);
1451 return TTIImpl->getMaxNumArgs();
1455 return TTIImpl->shouldExpandReduction(
II);
1461 return TTIImpl->getPreferredExpandedReductionShuffle(
II);
1465 return TTIImpl->getGISelRematGlobalCost();
1469 return TTIImpl->getMinTripCountTailFoldingThreshold();
1473 return TTIImpl->supportsScalableVectors();
1477 return TTIImpl->enableScalableVectorization();
1481 return TTIImpl->hasActiveVectorLength();
1486 return TTIImpl->isProfitableToSinkOperands(
I, OpsToSink);
1490 return TTIImpl->isVectorShiftByScalarCheap(Ty);
1496 return TTIImpl->getNumBytesToPadGlobalArray(
Size,
ArrayType);
1502 return TTIImpl->collectKernelLaunchBounds(
F, LB);
1506 return TTIImpl->allowVectorElementIndexingUsingGEP();
1515 : TTICallback(
std::
move(TTICallback)) {}
1519 assert(!
F.isIntrinsic() &&
"Should not request TTI for intrinsics");
1520 return TTICallback(
F);
1526 return Result(
F.getDataLayout());
1531 "Target Transform Information",
false,
true)
1545 TTI = TIRA.run(
F, DummyFAM);
for(const MachineOperand &MO :llvm::drop_begin(OldMI.operands(), Desc.getNumOperands()))
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
static cl::opt< bool > ForceNestedLoop("force-nested-hardware-loop", cl::Hidden, cl::init(false), cl::desc("Force allowance of nested hardware loops"))
static cl::opt< bool > ForceHardwareLoopPHI("force-hardware-loop-phi", cl::Hidden, cl::init(false), cl::desc("Force hardware loop counter to be updated through a phi"))
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
mir Rename Register Operands
uint64_t IntrinsicInst * II
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
Class for arbitrary precision integers.
an instruction to allocate memory on the stack
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Class to represent array types.
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Conditional or Unconditional Branch instruction.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
This is an important base class in LLVM.
A parsed version of the target data layout string in and methods for querying it.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Convenience struct for specifying and reasoning about fast-math flags.
Class to represent function types.
FunctionType * getFunctionType() const
Returns the FunctionType for me.
ImmutablePass class - This class is used to provide information that does not need to be run.
The core instruction combiner logic.
LLVM_ABI IntrinsicCostAttributes(Intrinsic::ID Id, const CallBase &CI, InstructionCost ScalarCost=InstructionCost::getInvalid(), bool TypeBasedOnly=false, TargetLibraryInfo const *LibInfo=nullptr)
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
An instruction for reading from memory.
Wrapper class to LoopBlocksDFS that provides a standard begin()/end() interface for the DFS reverse p...
void perform(const LoopInfo *LI)
Traverse the loop blocks and store the DFS result.
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
Represents a single loop in the control flow graph.
Analysis providing profile information.
The RecurrenceDescriptor is used to identify recurrences variables in a loop.
This class represents a constant integer value.
This class represents an analyzed expression in the program.
The main scalar evolution driver.
LLVM_ABI uint64_t getTypeSizeInBits(Type *Ty) const
Return the size in bits of the specified type, for which isSCEVable must return true.
LLVM_ABI bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
LLVM_ABI const SCEV * getExitCount(const Loop *L, const BasicBlock *ExitingBlock, ExitCountKind Kind=Exact)
Return the number of times the backedge executes before the given exit would be taken; if not exactly...
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
An instruction for storing to memory.
Analysis pass providing the TargetTransformInfo.
LLVM_ABI Result run(const Function &F, FunctionAnalysisManager &)
TargetTransformInfo Result
LLVM_ABI TargetIRAnalysis()
Default construct a target IR analysis.
Provides information about what library functions are available for the current target.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
StringRef getName(LibFunc F) const
bool isFunctionVectorizable(StringRef F, const ElementCount &VF) const
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
This is the common base class for vector predication intrinsics.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
Base class of all SIMD vector types.
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
bool containsIrreducibleCFG(RPOTraversalT &RPOTraversal, const LoopInfoT &LI)
Return true if the control flow in RPOTraversal is irreducible.
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ABI ImmutablePass * createTargetTransformInfoWrapperPass(TargetIRAnalysis TIRA)
Create an analysis pass wrapper around a TTI object.
RecurKind
These are the kinds of recurrences that we support.
constexpr unsigned BitWidth
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
auto predecessors(const MachineBasicBlock *BB)
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
Implement std::hash so that hash_code can be used in STL containers.
This struct is a compact representation of a valid (non-zero power of two) alignment.
A special type used by analysis passes to provide an address that identifies that particular analysis...
Attributes of a target dependent hardware loop.
LLVM_ABI bool canAnalyze(LoopInfo &LI)
HardwareLoopInfo()=delete
LLVM_ABI bool isHardwareLoopCandidate(ScalarEvolution &SE, LoopInfo &LI, DominatorTree &DT, bool ForceNestedLoop=false, bool ForceHardwareLoopPHI=false)
Information about a load/store intrinsic defined by the target.