Thanks to visit codestin.com
Credit goes to llvm.org

LLVM 22.0.0git
ARMTargetTransformInfo.h
Go to the documentation of this file.
1//===- ARMTargetTransformInfo.h - ARM specific TTI --------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file a TargetTransformInfoImplBase conforming object specific to the
11/// ARM target machine. It uses the target's detailed information to
12/// provide more precise answers to certain TTI queries, while letting the
13/// target independent and default TTI implementations handle the rest.
14//
15//===----------------------------------------------------------------------===//
16
17#ifndef LLVM_LIB_TARGET_ARM_ARMTARGETTRANSFORMINFO_H
18#define LLVM_LIB_TARGET_ARM_ARMTARGETTRANSFORMINFO_H
19
20#include "ARM.h"
21#include "ARMSubtarget.h"
22#include "ARMTargetMachine.h"
23#include "llvm/ADT/ArrayRef.h"
26#include "llvm/IR/Constant.h"
27#include "llvm/IR/Function.h"
29#include <optional>
30
31namespace llvm {
32
33class APInt;
35class Instruction;
36class Loop;
37class SCEV;
38class ScalarEvolution;
39class Type;
40class Value;
41
51
52// For controlling conversion of memcpy into Tail Predicated loop.
53namespace TPLoop {
55}
56
57class ARMTTIImpl final : public BasicTTIImplBase<ARMTTIImpl> {
58 using BaseT = BasicTTIImplBase<ARMTTIImpl>;
59 using TTI = TargetTransformInfo;
60
61 friend BaseT;
62
63 const ARMSubtarget *ST;
64 const ARMTargetLowering *TLI;
65
66 // Currently the following features are excluded from InlineFeaturesAllowed.
67 // ModeThumb, FeatureNoARM, ModeSoftFloat, FeatureFP64, FeatureD32
68 // Depending on whether they are set or unset, different
69 // instructions/registers are available. For example, inlining a callee with
70 // -thumb-mode in a caller with +thumb-mode, may cause the assembler to
71 // fail if the callee uses ARM only instructions, e.g. in inline asm.
72 const FeatureBitset InlineFeaturesAllowed = {
73 ARM::FeatureVFP2, ARM::FeatureVFP3, ARM::FeatureNEON, ARM::FeatureThumb2,
74 ARM::FeatureFP16, ARM::FeatureVFP4, ARM::FeatureFPARMv8,
75 ARM::FeatureFullFP16, ARM::FeatureFP16FML, ARM::FeatureHWDivThumb,
76 ARM::FeatureHWDivARM, ARM::FeatureDB, ARM::FeatureV7Clrex,
77 ARM::FeatureAcquireRelease, ARM::FeatureSlowFPBrcc,
78 ARM::FeaturePerfMon, ARM::FeatureTrustZone, ARM::Feature8MSecExt,
79 ARM::FeatureCrypto, ARM::FeatureCRC, ARM::FeatureRAS,
80 ARM::FeatureFPAO, ARM::FeatureFuseAES, ARM::FeatureZCZeroing,
81 ARM::FeatureProfUnpredicate, ARM::FeatureSlowVGETLNi32,
82 ARM::FeatureSlowVDUP32, ARM::FeaturePreferVMOVSR,
83 ARM::FeaturePrefISHSTBarrier, ARM::FeatureMuxedUnits,
84 ARM::FeatureSlowOddRegister, ARM::FeatureSlowLoadDSubreg,
85 ARM::FeatureDontWidenVMOVS, ARM::FeatureExpandMLx,
86 ARM::FeatureHasVMLxHazards, ARM::FeatureNEONForFPMovs,
87 ARM::FeatureNEONForFP, ARM::FeatureCheckVLDnAlign,
88 ARM::FeatureHasSlowFPVMLx, ARM::FeatureHasSlowFPVFMx,
89 ARM::FeatureVMLxForwarding, ARM::FeaturePref32BitThumb,
90 ARM::FeatureAvoidPartialCPSR, ARM::FeatureCheapPredicableCPSR,
91 ARM::FeatureAvoidMOVsShOp, ARM::FeatureHasRetAddrStack,
92 ARM::FeatureHasNoBranchPredictor, ARM::FeatureDSP, ARM::FeatureMP,
93 ARM::FeatureVirtualization, ARM::FeatureMClass, ARM::FeatureRClass,
94 ARM::FeatureAClass, ARM::FeatureStrictAlign, ARM::FeatureLongCalls,
95 ARM::FeatureExecuteOnly, ARM::FeatureReserveR9, ARM::FeatureNoMovt,
96 ARM::FeatureNoNegativeImmediates
97 };
98
99 const ARMSubtarget *getST() const { return ST; }
100 const ARMTargetLowering *getTLI() const { return TLI; }
101
102public:
103 explicit ARMTTIImpl(const ARMBaseTargetMachine *TM, const Function &F)
104 : BaseT(TM, F.getDataLayout()), ST(TM->getSubtargetImpl(F)),
105 TLI(ST->getTargetLowering()) {}
106
107 bool areInlineCompatible(const Function *Caller,
108 const Function *Callee) const override;
109
110 bool enableInterleavedAccessVectorization() const override { return true; }
111
113 getPreferredAddressingMode(const Loop *L, ScalarEvolution *SE) const override;
114
115 /// Floating-point computation using ARMv8 AArch32 Advanced
116 /// SIMD instructions remains unchanged from ARMv7. Only AArch64 SIMD
117 /// and Arm MVE are IEEE-754 compliant.
119 return !ST->isTargetDarwin() && !ST->hasMVEFloatOps();
120 }
121
122 std::optional<Instruction *>
124 std::optional<Value *> simplifyDemandedVectorEltsIntrinsic(
125 InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
126 APInt &UndefElts2, APInt &UndefElts3,
127 std::function<void(Instruction *, unsigned, APInt, APInt &)>
128 SimplifyAndSetOp) const override;
129
130 /// \name Scalar TTI Implementations
131 /// @{
132
133 InstructionCost getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx,
134 const APInt &Imm,
135 Type *Ty) const override;
136
138 InstructionCost getIntImmCost(const APInt &Imm, Type *Ty,
139 TTI::TargetCostKind CostKind) const override;
140
141 InstructionCost getIntImmCostInst(unsigned Opcode, unsigned Idx,
142 const APInt &Imm, Type *Ty,
144 Instruction *Inst = nullptr) const override;
145
146 /// @}
147
148 /// \name Vector TTI Implementations
149 /// @{
150
151 unsigned getNumberOfRegisters(unsigned ClassID) const override {
152 bool Vector = (ClassID == 1);
153 if (Vector) {
154 if (ST->hasNEON())
155 return 16;
156 if (ST->hasMVEIntegerOps())
157 return 8;
158 return 0;
159 }
160
161 if (ST->isThumb1Only())
162 return 8;
163 return 13;
164 }
165
168 switch (K) {
170 return TypeSize::getFixed(32);
172 if (ST->hasNEON())
173 return TypeSize::getFixed(128);
174 if (ST->hasMVEIntegerOps())
175 return TypeSize::getFixed(128);
176 return TypeSize::getFixed(0);
178 return TypeSize::getScalable(0);
179 }
180 llvm_unreachable("Unsupported register kind");
181 }
182
183 unsigned getMaxInterleaveFactor(ElementCount VF) const override {
184 return ST->getMaxInterleaveFactor();
185 }
186
187 bool isProfitableLSRChainElement(Instruction *I) const override;
188
189 bool isLegalMaskedLoad(Type *DataTy, Align Alignment,
190 unsigned AddressSpace) const override;
191
192 bool isLegalMaskedStore(Type *DataTy, Align Alignment,
193 unsigned AddressSpace) const override {
194 return isLegalMaskedLoad(DataTy, Alignment, AddressSpace);
195 }
196
198 Align Alignment) const override {
199 // For MVE, we have a custom lowering pass that will already have custom
200 // legalised any gathers that we can lower to MVE intrinsics, and want to
201 // expand all the rest. The pass runs before the masked intrinsic lowering
202 // pass.
203 return true;
204 }
205
207 Align Alignment) const override {
208 return forceScalarizeMaskedGather(VTy, Alignment);
209 }
210
211 bool isLegalMaskedGather(Type *Ty, Align Alignment) const override;
212
213 bool isLegalMaskedScatter(Type *Ty, Align Alignment) const override {
214 return isLegalMaskedGather(Ty, Alignment);
215 }
216
217 InstructionCost getMemcpyCost(const Instruction *I) const override;
218
220 return ST->getMaxInlineSizeThreshold();
221 }
222
223 int getNumMemOps(const IntrinsicInst *I) const;
224
228 VectorType *SubTp, ArrayRef<const Value *> Args = {},
229 const Instruction *CxtI = nullptr) const override;
230
231 bool preferInLoopReduction(RecurKind Kind, Type *Ty) const override;
232
233 bool preferPredicatedReductionSelect() const override;
234
235 bool shouldExpandReduction(const IntrinsicInst *II) const override {
236 return false;
237 }
238
240 const Instruction *I = nullptr) const override;
241
243 getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
245 const Instruction *I = nullptr) const override;
246
248 unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred,
252 const Instruction *I = nullptr) const override;
253
255 InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val,
257 unsigned Index, const Value *Op0,
258 const Value *Op1) const override;
259
261 getAddressComputationCost(Type *Val, ScalarEvolution *SE, const SCEV *Ptr,
262 TTI::TargetCostKind CostKind) const override;
263
265 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
269 const Instruction *CxtI = nullptr) const override;
270
272 unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace,
275 const Instruction *I = nullptr) const override;
276
278 getMaskedMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment,
279 unsigned AddressSpace,
280 TTI::TargetCostKind CostKind) const override;
281
283 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
284 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
285 bool UseMaskForCond = false, bool UseMaskForGaps = false) const override;
286
288 getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr,
289 bool VariableMask, Align Alignment,
291 const Instruction *I = nullptr) const override;
292
294 getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy,
295 std::optional<FastMathFlags> FMF,
296 TTI::TargetCostKind CostKind) const override;
298 getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy,
299 VectorType *ValTy, std::optional<FastMathFlags> FMF,
300 TTI::TargetCostKind CostKind) const override;
302 getMulAccReductionCost(bool IsUnsigned, unsigned RedOpcode, Type *ResTy,
303 VectorType *ValTy,
304 TTI::TargetCostKind CostKind) const override;
305
307 getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF,
308 TTI::TargetCostKind CostKind) const override;
309
311 getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
312 TTI::TargetCostKind CostKind) const override;
313
314 /// getScalingFactorCost - Return the cost of the scaling used in
315 /// addressing mode represented by AM.
316 /// If the AM is supported, the return value must be >= 0.
317 /// If the AM is not supported, the return value is an invalid cost.
318 InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
319 StackOffset BaseOffset, bool HasBaseReg,
320 int64_t Scale,
321 unsigned AddrSpace) const override;
322
323 bool maybeLoweredToCall(Instruction &I) const;
324 bool isLoweredToCall(const Function *F) const override;
325 bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
326 AssumptionCache &AC, TargetLibraryInfo *LibInfo,
327 HardwareLoopInfo &HWLoopInfo) const override;
328 bool preferPredicateOverEpilogue(TailFoldingInfo *TFI) const override;
329 void getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
331 OptimizationRemarkEmitter *ORE) const override;
332
334 getPreferredTailFoldingStyle(bool IVUpdateMayOverflow = true) const override;
335
336 void getPeelingPreferences(Loop *L, ScalarEvolution &SE,
337 TTI::PeelingPreferences &PP) const override;
339 // In the ROPI and RWPI relocation models we can't have pointers to global
340 // variables or functions in constant data, so don't convert switches to
341 // lookup tables if any of the values would need relocation.
342 if (ST->isROPI() || ST->isRWPI())
343 return !C->needsDynamicRelocation();
344
345 return true;
346 }
347
348 bool hasArmWideBranch(bool Thumb) const override;
349
351 SmallVectorImpl<Use *> &Ops) const override;
352
353 unsigned getNumBytesToPadGlobalArray(unsigned Size,
354 Type *ArrayType) const override;
355
356 /// @}
357};
358
359/// isVREVMask - Check if a vector shuffle corresponds to a VREV
360/// instruction with the specified blocksize. (The order of the elements
361/// within each block of the vector is reversed.)
362inline bool isVREVMask(ArrayRef<int> M, EVT VT, unsigned BlockSize) {
363 assert((BlockSize == 16 || BlockSize == 32 || BlockSize == 64) &&
364 "Only possible block sizes for VREV are: 16, 32, 64");
365
366 unsigned EltSz = VT.getScalarSizeInBits();
367 if (EltSz != 8 && EltSz != 16 && EltSz != 32)
368 return false;
369
370 unsigned BlockElts = M[0] + 1;
371 // If the first shuffle index is UNDEF, be optimistic.
372 if (M[0] < 0)
373 BlockElts = BlockSize / EltSz;
374
375 if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz)
376 return false;
377
378 for (unsigned i = 0, e = M.size(); i < e; ++i) {
379 if (M[i] < 0)
380 continue; // ignore UNDEF indices
381 if ((unsigned)M[i] != (i - i % BlockElts) + (BlockElts - 1 - i % BlockElts))
382 return false;
383 }
384
385 return true;
386}
387
388} // end namespace llvm
389
390#endif // LLVM_LIB_TARGET_ARM_ARMTARGETTRANSFORMINFO_H
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file provides a helper that implements much of the TTI interface in terms of the target-independ...
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
uint64_t IntrinsicInst * II
static const int BlockSize
Definition TarWriter.cpp:33
This pass exposes codegen information to IR-level passes.
Class for arbitrary precision integers.
Definition APInt.h:78
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const override
InstructionCost getAddressComputationCost(Type *Val, ScalarEvolution *SE, const SCEV *Ptr, TTI::TargetCostKind CostKind) const override
TailFoldingStyle getPreferredTailFoldingStyle(bool IVUpdateMayOverflow=true) const override
bool isFPVectorizationPotentiallyUnsafe() const override
Floating-point computation using ARMv8 AArch32 Advanced SIMD instructions remains unchanged from ARMv...
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
InstructionCost getMemcpyCost(const Instruction *I) const override
bool isLegalMaskedScatter(Type *Ty, Align Alignment) const override
bool maybeLoweredToCall(Instruction &I) const
bool preferInLoopReduction(RecurKind Kind, Type *Ty) const override
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
InstructionCost getMulAccReductionCost(bool IsUnsigned, unsigned RedOpcode, Type *ResTy, VectorType *ValTy, TTI::TargetCostKind CostKind) const override
InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false) const override
InstructionCost getIntImmCost(const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind) const override
bool hasArmWideBranch(bool Thumb) const override
bool shouldExpandReduction(const IntrinsicInst *II) const override
bool shouldBuildLookupTablesForConstant(Constant *C) const override
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
int getNumMemOps(const IntrinsicInst *I) const
Given a memcpy/memset/memmove instruction, return the number of memory operations performed,...
InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
InstructionCost getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty) const override
bool isLoweredToCall(const Function *F) const override
InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *ValTy, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind) const override
bool isProfitableToSinkOperands(Instruction *I, SmallVectorImpl< Use * > &Ops) const override
Check if sinking I's operands to I's basic block is profitable, because the operands can be folded in...
bool isLegalMaskedStore(Type *DataTy, Align Alignment, unsigned AddressSpace) const override
uint64_t getMaxMemIntrinsicInlineSizeThreshold() const override
InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
bool forceScalarizeMaskedScatter(VectorType *VTy, Align Alignment) const override
InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind) const override
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, const Value *Op0, const Value *Op1) const override
InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind) const override
std::optional< Value * > simplifyDemandedVectorEltsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp) const override
bool isLegalMaskedLoad(Type *DataTy, Align Alignment, unsigned AddressSpace) const override
ARMTTIImpl(const ARMBaseTargetMachine *TM, const Function &F)
InstructionCost getIntImmCostInst(unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind, Instruction *Inst=nullptr) const override
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const override
std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const override
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP) const override
InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind) const override
TTI::AddressingModeKind getPreferredAddressingMode(const Loop *L, ScalarEvolution *SE) const override
bool preferPredicateOverEpilogue(TailFoldingInfo *TFI) const override
bool forceScalarizeMaskedGather(VectorType *VTy, Align Alignment) const override
unsigned getNumberOfRegisters(unsigned ClassID) const override
InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *DstTy, VectorType *SrcTy, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
bool areInlineCompatible(const Function *Caller, const Function *Callee) const override
bool preferPredicatedReductionSelect() const override
bool isLegalMaskedGather(Type *Ty, Align Alignment) const override
unsigned getMaxInterleaveFactor(ElementCount VF) const override
unsigned getNumBytesToPadGlobalArray(unsigned Size, Type *ArrayType) const override
bool isProfitableLSRChainElement(Instruction *I) const override
bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo) const override
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE) const override
bool enableInterleavedAccessVectorization() const override
InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, StackOffset BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace) const override
getScalingFactorCost - Return the cost of the scaling used in addressing mode represented by AM.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
Class to represent array types.
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, const Value *Op0, const Value *Op1) const override
BasicTTIImplBase(const TargetMachine *TM, const DataLayout &DL)
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:678
This is an important base class in LLVM.
Definition Constant.h:43
Container class for subtarget features.
The core instruction combiner logic.
A wrapper class for inspecting calls to intrinsic functions.
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
This class represents an analyzed expression in the program.
The main scalar evolution driver.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
virtual const DataLayout & getDataLayout() const
virtual InstructionCost getIntImmCost(const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind) const
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
TargetCostKind
The kind of cost model.
AddressingModeKind
Which addressing mode Loop Strength Reduction will try to generate.
ShuffleKind
The various kinds of shuffle patterns for vector queries.
CastContextHint
Represents a hint about the context in which a cast is used.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition TypeSize.h:343
static constexpr TypeSize getScalable(ScalarTy MinimumSize)
Definition TypeSize.h:346
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
LLVM Value Representation.
Definition Value.h:75
Base class of all SIMD vector types.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
ArrayRef(const T &OneElt) -> ArrayRef< T >
bool isVREVMask(ArrayRef< int > M, EVT VT, unsigned BlockSize)
isVREVMask - Check if a vector shuffle corresponds to a VREV instruction with the specified blocksize...
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Extended Value Type.
Definition ValueTypes.h:35
uint64_t getScalarSizeInBits() const
Definition ValueTypes.h:385
Parameters that control the generic loop unrolling transformation.