Thanks to visit codestin.com
Credit goes to llvm.org

LLVM 22.0.0git
AArch64TargetTransformInfo.cpp File Reference

Go to the source code of this file.

Classes

struct  SVEIntrinsicInfo

Macros

#define DEBUG_TYPE   "aarch64tti"

Functions

static bool isSMEABIRoutineCall (const CallInst &CI, const AArch64TargetLowering &TLI)
static bool hasPossibleIncompatibleOps (const Function *F, const AArch64TargetLowering &TLI)
 Returns true if the function has explicit operations that can only be lowered using incompatible instructions for the selected mode.
static bool isUnpackedVectorVT (EVT VecVT)
static InstructionCost getHistogramCost (const AArch64Subtarget *ST, const IntrinsicCostAttributes &ICA)
static std::optional< Instruction * > processPhiNode (InstCombiner &IC, IntrinsicInst &II)
 The function will remove redundant reinterprets casting in the presence of the control flow.
static SVEIntrinsicInfo constructSVEIntrinsicInfo (IntrinsicInst &II)
static bool isAllActivePredicate (Value *Pred)
static ValuestripInactiveLanes (Value *V, const Value *Pg)
static std::optional< Instruction * > simplifySVEIntrinsicBinOp (InstCombiner &IC, IntrinsicInst &II, const SVEIntrinsicInfo &IInfo)
static std::optional< Instruction * > simplifySVEIntrinsic (InstCombiner &IC, IntrinsicInst &II, const SVEIntrinsicInfo &IInfo)
static std::optional< Instruction * > tryCombineFromSVBoolBinOp (InstCombiner &IC, IntrinsicInst &II)
static std::optional< Instruction * > instCombineConvertFromSVBool (InstCombiner &IC, IntrinsicInst &II)
static std::optional< Instruction * > instCombineSVESel (InstCombiner &IC, IntrinsicInst &II)
static std::optional< Instruction * > instCombineSVEDup (InstCombiner &IC, IntrinsicInst &II)
static std::optional< Instruction * > instCombineSVEDupX (InstCombiner &IC, IntrinsicInst &II)
static std::optional< Instruction * > instCombineSVECmpNE (InstCombiner &IC, IntrinsicInst &II)
static std::optional< Instruction * > instCombineSVELast (InstCombiner &IC, IntrinsicInst &II)
static std::optional< Instruction * > instCombineSVECondLast (InstCombiner &IC, IntrinsicInst &II)
static std::optional< Instruction * > instCombineRDFFR (InstCombiner &IC, IntrinsicInst &II)
static std::optional< Instruction * > instCombineSVECntElts (InstCombiner &IC, IntrinsicInst &II, unsigned NumElts)
static std::optional< Instruction * > instCombineSMECntsd (InstCombiner &IC, IntrinsicInst &II, const AArch64Subtarget *ST)
static std::optional< Instruction * > instCombineSVEPTest (InstCombiner &IC, IntrinsicInst &II)
template<Intrinsic::ID MulOpc, typename Intrinsic::ID FuseOpc>
static std::optional< Instruction * > instCombineSVEVectorFuseMulAddSub (InstCombiner &IC, IntrinsicInst &II, bool MergeIntoAddendOp)
static std::optional< Instruction * > instCombineSVELD1 (InstCombiner &IC, IntrinsicInst &II, const DataLayout &DL)
static std::optional< Instruction * > instCombineSVEST1 (InstCombiner &IC, IntrinsicInst &II, const DataLayout &DL)
static Instruction::BinaryOps intrinsicIDToBinOpCode (unsigned Intrinsic)
static std::optional< Instruction * > instCombineSVEVectorBinOp (InstCombiner &IC, IntrinsicInst &II)
static std::optional< Instruction * > instCombineSVEVectorAdd (InstCombiner &IC, IntrinsicInst &II)
static std::optional< Instruction * > instCombineSVEVectorFAdd (InstCombiner &IC, IntrinsicInst &II)
static std::optional< Instruction * > instCombineSVEVectorFAddU (InstCombiner &IC, IntrinsicInst &II)
static std::optional< Instruction * > instCombineSVEVectorFSub (InstCombiner &IC, IntrinsicInst &II)
static std::optional< Instruction * > instCombineSVEVectorFSubU (InstCombiner &IC, IntrinsicInst &II)
static std::optional< Instruction * > instCombineSVEVectorSub (InstCombiner &IC, IntrinsicInst &II)
static std::optional< Instruction * > instCombineSVEUnpack (InstCombiner &IC, IntrinsicInst &II)
static std::optional< Instruction * > instCombineSVETBL (InstCombiner &IC, IntrinsicInst &II)
static std::optional< Instruction * > instCombineSVEUzp1 (InstCombiner &IC, IntrinsicInst &II)
static std::optional< Instruction * > instCombineSVEZip (InstCombiner &IC, IntrinsicInst &II)
static std::optional< Instruction * > instCombineLD1GatherIndex (InstCombiner &IC, IntrinsicInst &II)
static std::optional< Instruction * > instCombineST1ScatterIndex (InstCombiner &IC, IntrinsicInst &II)
static std::optional< Instruction * > instCombineSVESDIV (InstCombiner &IC, IntrinsicInst &II)
bool SimplifyValuePattern (SmallVector< Value * > &Vec, bool AllowPoison)
static std::optional< Instruction * > instCombineSVEDupqLane (InstCombiner &IC, IntrinsicInst &II)
static std::optional< Instruction * > instCombineMaxMinNM (InstCombiner &IC, IntrinsicInst &II)
static std::optional< Instruction * > instCombineSVESrshl (InstCombiner &IC, IntrinsicInst &II)
static std::optional< Instruction * > instCombineSVEInsr (InstCombiner &IC, IntrinsicInst &II)
static std::optional< Instruction * > instCombineDMB (InstCombiner &IC, IntrinsicInst &II)
static std::optional< Instruction * > instCombineWhilelo (InstCombiner &IC, IntrinsicInst &II)
static std::optional< Instruction * > instCombinePTrue (InstCombiner &IC, IntrinsicInst &II)
static std::optional< Instruction * > instCombineSVEUxt (InstCombiner &IC, IntrinsicInst &II, unsigned NumBits)
static std::optional< Instruction * > instCombineInStreamingMode (InstCombiner &IC, IntrinsicInst &II)
static unsigned getSVEGatherScatterOverhead (unsigned Opcode, const AArch64Subtarget *ST)
static void getFalkorUnrollingPreferences (Loop *L, ScalarEvolution &SE, TargetTransformInfo::UnrollingPreferences &UP)
static bool isLoopSizeWithinBudget (Loop *L, const AArch64TTIImpl &TTI, InstructionCost Budget, unsigned *FinalSize)
static bool shouldUnrollMultiExitLoop (Loop *L, ScalarEvolution &SE, const AArch64TTIImpl &TTI)
static void getAppleRuntimeUnrollPreferences (Loop *L, ScalarEvolution &SE, TargetTransformInfo::UnrollingPreferences &UP, const AArch64TTIImpl &TTI)
 For Apple CPUs, we want to runtime-unroll loops to make better use if the OOO engine's wide instruction window and various predictors.
static bool containsDecreasingPointers (Loop *TheLoop, PredicatedScalarEvolution *PSE)
static bool isSplatShuffle (Value *V)
static bool areExtractShuffleVectors (Value *Op1, Value *Op2, bool AllowSplat=false)
 Check if both Op1 and Op2 are shufflevector extracts of either the lower or upper half of the vector elements.
static bool areExtractExts (Value *Ext1, Value *Ext2)
 Check if Ext1 and Ext2 are extends of the same type, doubling the bitwidth of the vector elements.
static bool isOperandOfVmullHighP64 (Value *Op)
 Check if Op could be used with vmull_high_p64 intrinsic.
static bool areOperandsOfVmullHighP64 (Value *Op1, Value *Op2)
 Check if Op1 and Op2 could be used with vmull_high_p64 intrinsic.
static bool shouldSinkVectorOfPtrs (Value *Ptrs, SmallVectorImpl< Use * > &Ops)
static bool shouldSinkVScale (Value *Op, SmallVectorImpl< Use * > &Ops)
 We want to sink following cases: (add|sub|gep) A, ((mul|shl) vscale, imm); (add|sub|gep) A, vscale; (add|sub|gep) A, ((mul|shl) zext(vscale), imm);.

Variables

static cl::opt< boolEnableFalkorHWPFUnrollFix ("enable-falkor-hwpf-unroll-fix", cl::init(true), cl::Hidden)
static cl::opt< boolSVEPreferFixedOverScalableIfEqualCost ("sve-prefer-fixed-over-scalable-if-equal", cl::Hidden)
static cl::opt< unsignedSVEGatherOverhead ("sve-gather-overhead", cl::init(10), cl::Hidden)
static cl::opt< unsignedSVEScatterOverhead ("sve-scatter-overhead", cl::init(10), cl::Hidden)
static cl::opt< unsignedSVETailFoldInsnThreshold ("sve-tail-folding-insn-threshold", cl::init(15), cl::Hidden)
static cl::opt< unsignedNeonNonConstStrideOverhead ("neon-nonconst-stride-overhead", cl::init(10), cl::Hidden)
static cl::opt< unsignedCallPenaltyChangeSM ("call-penalty-sm-change", cl::init(5), cl::Hidden, cl::desc("Penalty of calling a function that requires a change to PSTATE.SM"))
static cl::opt< unsignedInlineCallPenaltyChangeSM ("inline-call-penalty-sm-change", cl::init(10), cl::Hidden, cl::desc("Penalty of inlining a call that requires a change to PSTATE.SM"))
static cl::opt< boolEnableOrLikeSelectOpt ("enable-aarch64-or-like-select", cl::init(true), cl::Hidden)
static cl::opt< boolEnableLSRCostOpt ("enable-aarch64-lsr-cost-opt", cl::init(true), cl::Hidden)
static cl::opt< unsignedBaseHistCntCost ("aarch64-base-histcnt-cost", cl::init(8), cl::Hidden, cl::desc("The cost of a histcnt instruction"))
static cl::opt< unsignedDMBLookaheadThreshold ("dmb-lookahead-threshold", cl::init(10), cl::Hidden, cl::desc("The number of instructions to search for a redundant dmb"))
TailFoldingOption TailFoldingOptionLoc
static cl::opt< TailFoldingOption, true, cl::parser< std::string > > SVETailFolding ("sve-tail-folding", cl::desc("Control the use of vectorisation using tail-folding for SVE where the" " option is specified in the form (Initial)[+(Flag1|Flag2|...)]:" "\ndisabled (Initial) No loop types will vectorize using " "tail-folding" "\ndefault (Initial) Uses the default tail-folding settings for " "the target CPU" "\nall (Initial) All legal loop types will vectorize using " "tail-folding" "\nsimple (Initial) Use tail-folding for simple loops (not " "reductions or recurrences)" "\nreductions Use tail-folding for loops containing reductions" "\nnoreductions Inverse of above" "\nrecurrences Use tail-folding for loops containing fixed order " "recurrences" "\nnorecurrences Inverse of above" "\nreverse Use tail-folding for loops requiring reversed " "predicates" "\nnoreverse Inverse of above"), cl::location(TailFoldingOptionLoc))
static cl::opt< boolEnableFixedwidthAutovecInStreamingMode ("enable-fixedwidth-autovec-in-streaming-mode", cl::init(false), cl::Hidden)
static cl::opt< boolEnableScalableAutovecInStreamingMode ("enable-scalable-autovec-in-streaming-mode", cl::init(false), cl::Hidden)

Macro Definition Documentation

◆ DEBUG_TYPE

#define DEBUG_TYPE   "aarch64tti"

Definition at line 35 of file AArch64TargetTransformInfo.cpp.

Function Documentation

◆ areExtractExts()

bool areExtractExts ( Value * Ext1,
Value * Ext2 )
static

Check if Ext1 and Ext2 are extends of the same type, doubling the bitwidth of the vector elements.

Definition at line 6270 of file AArch64TargetTransformInfo.cpp.

References llvm::cast(), llvm::PatternMatch::m_Value(), llvm::PatternMatch::m_ZExtOrSExt(), and llvm::PatternMatch::match().

Referenced by llvm::AArch64TTIImpl::isProfitableToSinkOperands(), and llvm::ARMTTIImpl::isProfitableToSinkOperands().

◆ areExtractShuffleVectors()

◆ areOperandsOfVmullHighP64()

bool areOperandsOfVmullHighP64 ( Value * Op1,
Value * Op2 )
static

Check if Op1 and Op2 could be used with vmull_high_p64 intrinsic.

Definition at line 6297 of file AArch64TargetTransformInfo.cpp.

References isOperandOfVmullHighP64().

Referenced by llvm::AArch64TTIImpl::isProfitableToSinkOperands().

◆ constructSVEIntrinsicInfo()

◆ containsDecreasingPointers()

◆ getAppleRuntimeUnrollPreferences()

void getAppleRuntimeUnrollPreferences ( Loop * L,
ScalarEvolution & SE,
TargetTransformInfo::UnrollingPreferences & UP,
const AArch64TTIImpl & TTI )
static

For Apple CPUs, we want to runtime-unroll loops to make better use if the OOO engine's wide instruction window and various predictors.

Definition at line 4963 of file AArch64TargetTransformInfo.cpp.

References llvm::TargetTransformInfo::UnrollingPreferences::AddAdditionalAccumulators, llvm::any_of(), llvm::cast(), llvm::SmallPtrSetImpl< PtrType >::contains(), llvm::TargetTransformInfo::UnrollingPreferences::DefaultUnrollRuntimeCount, llvm::Depth, llvm::dyn_cast(), llvm::findStringMetadataForLoop(), llvm::ScalarEvolution::getBackedgeTakenCount(), llvm::getLoadStorePointerOperand(), llvm::ScalarEvolution::getSCEV(), llvm::ScalarEvolution::getSmallConstantMaxTripCount(), llvm::ScalarEvolution::getSmallConstantTripMultiple(), llvm::ScalarEvolution::getSymbolicMaxBackedgeTakenCount(), I, llvm::SmallPtrSetImpl< PtrType >::insert(), llvm::is_contained(), llvm::isa(), llvm::ScalarEvolution::isLoopInvariant(), isLoopSizeWithinBudget(), llvm::PatternMatch::m_Br(), llvm::PatternMatch::m_ICmp(), llvm::PatternMatch::m_Instruction(), llvm::PatternMatch::m_Value(), llvm::PatternMatch::match(), llvm::TargetTransformInfo::UnrollingPreferences::MaxCount, llvm::none_of(), llvm::TargetTransformInfo::UnrollingPreferences::Partial, llvm::predecessors(), Ptr, llvm::SmallVectorTemplateBase< T, bool >::push_back(), llvm::TargetTransformInfo::UnrollingPreferences::Runtime, llvm::TargetTransformInfo::UnrollingPreferences::SCEVExpansionBudget, Size, and llvm::SmallVectorTemplateCommon< T, typename >::size().

Referenced by llvm::AArch64TTIImpl::getUnrollingPreferences().

◆ getFalkorUnrollingPreferences()

◆ getHistogramCost()

◆ getSVEGatherScatterOverhead()

unsigned getSVEGatherScatterOverhead ( unsigned Opcode,
const AArch64Subtarget * ST )
static

◆ hasPossibleIncompatibleOps()

bool hasPossibleIncompatibleOps ( const Function * F,
const AArch64TargetLowering & TLI )
static

Returns true if the function has explicit operations that can only be lowered using incompatible instructions for the selected mode.

This also returns true if the function F may use or modify ZA state.

Definition at line 233 of file AArch64TargetTransformInfo.cpp.

References llvm::cast(), F, I, llvm::isa(), and isSMEABIRoutineCall().

Referenced by llvm::AArch64TTIImpl::areInlineCompatible().

◆ instCombineConvertFromSVBool()

◆ instCombineDMB()

◆ instCombineInStreamingMode()

◆ instCombineLD1GatherIndex()

◆ instCombineMaxMinNM()

std::optional< Instruction * > instCombineMaxMinNM ( InstCombiner & IC,
IntrinsicInst & II )
static

◆ instCombinePTrue()

◆ instCombineRDFFR()

◆ instCombineSMECntsd()

◆ instCombineST1ScatterIndex()

◆ instCombineSVECmpNE()

◆ instCombineSVECntElts()

◆ instCombineSVECondLast()

◆ instCombineSVEDup()

◆ instCombineSVEDupqLane()

◆ instCombineSVEDupX()

◆ instCombineSVEInsr()

std::optional< Instruction * > instCombineSVEInsr ( InstCombiner & IC,
IntrinsicInst & II )
static

◆ instCombineSVELast()

◆ instCombineSVELD1()

◆ instCombineSVEPTest()

◆ instCombineSVESDIV()

◆ instCombineSVESel()

◆ instCombineSVESrshl()

◆ instCombineSVEST1()

◆ instCombineSVETBL()

◆ instCombineSVEUnpack()

◆ instCombineSVEUxt()

◆ instCombineSVEUzp1()

◆ instCombineSVEVectorAdd()

std::optional< Instruction * > instCombineSVEVectorAdd ( InstCombiner & IC,
IntrinsicInst & II )
static

◆ instCombineSVEVectorBinOp()

◆ instCombineSVEVectorFAdd()

std::optional< Instruction * > instCombineSVEVectorFAdd ( InstCombiner & IC,
IntrinsicInst & II )
static

◆ instCombineSVEVectorFAddU()

std::optional< Instruction * > instCombineSVEVectorFAddU ( InstCombiner & IC,
IntrinsicInst & II )
static

◆ instCombineSVEVectorFSub()

std::optional< Instruction * > instCombineSVEVectorFSub ( InstCombiner & IC,
IntrinsicInst & II )
static

◆ instCombineSVEVectorFSubU()

std::optional< Instruction * > instCombineSVEVectorFSubU ( InstCombiner & IC,
IntrinsicInst & II )
static

◆ instCombineSVEVectorFuseMulAddSub()

◆ instCombineSVEVectorSub()

std::optional< Instruction * > instCombineSVEVectorSub ( InstCombiner & IC,
IntrinsicInst & II )
static

◆ instCombineSVEZip()

◆ instCombineWhilelo()

◆ intrinsicIDToBinOpCode()

Instruction::BinaryOps intrinsicIDToBinOpCode ( unsigned Intrinsic)
static

Definition at line 2268 of file AArch64TargetTransformInfo.cpp.

Referenced by instCombineSVEVectorBinOp().

◆ isAllActivePredicate()

◆ isLoopSizeWithinBudget()

◆ isOperandOfVmullHighP64()

◆ isSMEABIRoutineCall()

bool isSMEABIRoutineCall ( const CallInst & CI,
const AArch64TargetLowering & TLI )
static

◆ isSplatShuffle()

bool isSplatShuffle ( Value * V)
static

◆ isUnpackedVectorVT()

◆ processPhiNode()

◆ shouldSinkVectorOfPtrs()

◆ shouldSinkVScale()

bool shouldSinkVScale ( Value * Op,
SmallVectorImpl< Use * > & Ops )
static

We want to sink following cases: (add|sub|gep) A, ((mul|shl) vscale, imm); (add|sub|gep) A, vscale; (add|sub|gep) A, ((mul|shl) zext(vscale), imm);.

Definition at line 6329 of file AArch64TargetTransformInfo.cpp.

References AbstractManglingParser< Derived, Alloc >::Ops, llvm::cast(), llvm::PatternMatch::m_ConstantInt(), llvm::PatternMatch::m_Mul(), llvm::PatternMatch::m_Shl(), llvm::PatternMatch::m_VScale(), llvm::PatternMatch::m_ZExt(), and llvm::PatternMatch::match().

Referenced by llvm::AArch64TTIImpl::isProfitableToSinkOperands().

◆ shouldUnrollMultiExitLoop()

◆ simplifySVEIntrinsic()

◆ simplifySVEIntrinsicBinOp()

◆ SimplifyValuePattern()

◆ stripInactiveLanes()

Value * stripInactiveLanes ( Value * V,
const Value * Pg )
static

◆ tryCombineFromSVBoolBinOp()

Variable Documentation

◆ BaseHistCntCost

cl::opt< unsigned > BaseHistCntCost("aarch64-base-histcnt-cost", cl::init(8), cl::Hidden, cl::desc("The cost of a histcnt instruction")) ( "aarch64-base-histcnt-cost" ,
cl::init(8) ,
cl::Hidden ,
cl::desc("The cost of a histcnt instruction")  )
static

Referenced by getHistogramCost().

◆ CallPenaltyChangeSM

cl::opt< unsigned > CallPenaltyChangeSM("call-penalty-sm-change", cl::init(5), cl::Hidden, cl::desc( "Penalty of calling a function that requires a change to PSTATE.SM")) ( "call-penalty-sm-change" ,
cl::init(5) ,
cl::Hidden ,
cl::desc( "Penalty of calling a function that requires a change to PSTATE.SM")  )
static

◆ DMBLookaheadThreshold

cl::opt< unsigned > DMBLookaheadThreshold("dmb-lookahead-threshold", cl::init(10), cl::Hidden, cl::desc("The number of instructions to search for a redundant dmb")) ( "dmb-lookahead-threshold" ,
cl::init(10) ,
cl::Hidden ,
cl::desc("The number of instructions to search for a redundant dmb")  )
static

Referenced by instCombineDMB().

◆ EnableFalkorHWPFUnrollFix

cl::opt< bool > EnableFalkorHWPFUnrollFix("enable-falkor-hwpf-unroll-fix", cl::init(true), cl::Hidden) ( "enable-falkor-hwpf-unroll-fix" ,
cl::init(true) ,
cl::Hidden  )
static

◆ EnableFixedwidthAutovecInStreamingMode

cl::opt< bool > EnableFixedwidthAutovecInStreamingMode("enable-fixedwidth-autovec-in-streaming-mode", cl::init(false), cl::Hidden) ( "enable-fixedwidth-autovec-in-streaming-mode" ,
cl::init(false) ,
cl::Hidden  )
static

◆ EnableLSRCostOpt

cl::opt< bool > EnableLSRCostOpt("enable-aarch64-lsr-cost-opt", cl::init(true), cl::Hidden) ( "enable-aarch64-lsr-cost-opt" ,
cl::init(true) ,
cl::Hidden  )
static

◆ EnableOrLikeSelectOpt

cl::opt< bool > EnableOrLikeSelectOpt("enable-aarch64-or-like-select", cl::init(true), cl::Hidden) ( "enable-aarch64-or-like-select" ,
cl::init(true) ,
cl::Hidden  )
static

◆ EnableScalableAutovecInStreamingMode

cl::opt< bool > EnableScalableAutovecInStreamingMode("enable-scalable-autovec-in-streaming-mode", cl::init(false), cl::Hidden) ( "enable-scalable-autovec-in-streaming-mode" ,
cl::init(false) ,
cl::Hidden  )
static

◆ InlineCallPenaltyChangeSM

cl::opt< unsigned > InlineCallPenaltyChangeSM("inline-call-penalty-sm-change", cl::init(10), cl::Hidden, cl::desc("Penalty of inlining a call that requires a change to PSTATE.SM")) ( "inline-call-penalty-sm-change" ,
cl::init(10) ,
cl::Hidden ,
cl::desc("Penalty of inlining a call that requires a change to PSTATE.SM")  )
static

◆ NeonNonConstStrideOverhead

cl::opt< unsigned > NeonNonConstStrideOverhead("neon-nonconst-stride-overhead", cl::init(10), cl::Hidden) ( "neon-nonconst-stride-overhead" ,
cl::init(10) ,
cl::Hidden  )
static

◆ SVEGatherOverhead

cl::opt< unsigned > SVEGatherOverhead("sve-gather-overhead", cl::init(10), cl::Hidden) ( "sve-gather-overhead" ,
cl::init(10) ,
cl::Hidden  )
static

◆ SVEPreferFixedOverScalableIfEqualCost

cl::opt< bool > SVEPreferFixedOverScalableIfEqualCost("sve-prefer-fixed-over-scalable-if-equal", cl::Hidden) ( "sve-prefer-fixed-over-scalable-if-equal" ,
cl::Hidden  )
static

◆ SVEScatterOverhead

cl::opt< unsigned > SVEScatterOverhead("sve-scatter-overhead", cl::init(10), cl::Hidden) ( "sve-scatter-overhead" ,
cl::init(10) ,
cl::Hidden  )
static

◆ SVETailFolding

cl::opt< TailFoldingOption, true, cl::parser< std::string > > SVETailFolding("sve-tail-folding", cl::desc( "Control the use of vectorisation using tail-folding for SVE where the" " option is specified in the form (Initial)[+(Flag1|Flag2|...)]:" "\ndisabled (Initial) No loop types will vectorize using " "tail-folding" "\ndefault (Initial) Uses the default tail-folding settings for " "the target CPU" "\nall (Initial) All legal loop types will vectorize using " "tail-folding" "\nsimple (Initial) Use tail-folding for simple loops (not " "reductions or recurrences)" "\nreductions Use tail-folding for loops containing reductions" "\nnoreductions Inverse of above" "\nrecurrences Use tail-folding for loops containing fixed order " "recurrences" "\nnorecurrences Inverse of above" "\nreverse Use tail-folding for loops requiring reversed " "predicates" "\nnoreverse Inverse of above"), cl::location(TailFoldingOptionLoc)) ( "sve-tail-folding" ,
cl::desc( "Control the use of vectorisation using tail-folding for SVE where the" " option is specified in the form (Initial)[+(Flag1|Flag2|...)]:" "\ndisabled (Initial) No loop types will vectorize using " "tail-folding" "\ndefault (Initial) Uses the default tail-folding settings for " "the target CPU" "\nall (Initial) All legal loop types will vectorize using " "tail-folding" "\nsimple (Initial) Use tail-folding for simple loops (not " "reductions or recurrences)" "\nreductions Use tail-folding for loops containing reductions" "\nnoreductions Inverse of above" "\nrecurrences Use tail-folding for loops containing fixed order " "recurrences" "\nnorecurrences Inverse of above" "\nreverse Use tail-folding for loops requiring reversed " "predicates" "\nnoreverse Inverse of above") ,
cl::location(TailFoldingOptionLoc)  )
static

◆ SVETailFoldInsnThreshold

cl::opt< unsigned > SVETailFoldInsnThreshold("sve-tail-folding-insn-threshold", cl::init(15), cl::Hidden) ( "sve-tail-folding-insn-threshold" ,
cl::init(15) ,
cl::Hidden  )
static

◆ TailFoldingOptionLoc

TailFoldingOption TailFoldingOptionLoc