Thanks to visit codestin.com
Credit goes to llvm.org

LLVM 22.0.0git
SIISelLowering.h
Go to the documentation of this file.
1//===-- SIISelLowering.h - SI DAG Lowering Interface ------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// SI DAG Lowering interface definition
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_LIB_TARGET_AMDGPU_SIISELLOWERING_H
15#define LLVM_LIB_TARGET_AMDGPU_SIISELLOWERING_H
16
18#include "AMDGPUISelLowering.h"
19#include "SIDefines.h"
21
22namespace llvm {
23
24class GCNSubtarget;
26class SIRegisterInfo;
27
28namespace AMDGPU {
30}
31
33private:
34 const GCNSubtarget *Subtarget;
35
36public:
39 EVT VT) const override;
42 EVT VT) const override;
43
45 LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT,
46 unsigned &NumIntermediates, MVT &RegisterVT) const override;
47
48private:
49 SDValue lowerKernArgParameterPtr(SelectionDAG &DAG, const SDLoc &SL,
50 SDValue Chain, uint64_t Offset) const;
51 SDValue getImplicitArgPtr(SelectionDAG &DAG, const SDLoc &SL) const;
52 SDValue getLDSKernelId(SelectionDAG &DAG, const SDLoc &SL) const;
53 SDValue lowerKernargMemParameter(SelectionDAG &DAG, EVT VT, EVT MemVT,
54 const SDLoc &SL, SDValue Chain,
55 uint64_t Offset, Align Alignment,
56 bool Signed,
57 const ISD::InputArg *Arg = nullptr) const;
58 SDValue loadImplicitKernelArgument(SelectionDAG &DAG, MVT VT, const SDLoc &DL,
59 Align Alignment,
60 ImplicitParameter Param) const;
61
62 SDValue convertABITypeToValueType(SelectionDAG &DAG, SDValue Val,
63 CCValAssign &VA, const SDLoc &SL) const;
64
65 SDValue lowerStackParameter(SelectionDAG &DAG, CCValAssign &VA,
66 const SDLoc &SL, SDValue Chain,
67 const ISD::InputArg &Arg) const;
68 SDValue lowerWorkGroupId(
69 SelectionDAG &DAG, const SIMachineFunctionInfo &MFI, EVT VT,
72 AMDGPUFunctionArgInfo::PreloadedValue ClusterWorkGroupIdPV) const;
73 SDValue getPreloadedValue(SelectionDAG &DAG,
74 const SIMachineFunctionInfo &MFI,
75 EVT VT,
77
78 SDValue LowerGlobalAddress(AMDGPUMachineFunction *MFI, SDValue Op,
79 SelectionDAG &DAG) const override;
80 SDValue lowerImplicitZextParam(SelectionDAG &DAG, SDValue Op,
81 MVT VT, unsigned Offset) const;
82 SDValue lowerImage(SDValue Op, const AMDGPU::ImageDimIntrinsicInfo *Intr,
83 SelectionDAG &DAG, bool WithChain) const;
84 SDValue lowerSBuffer(EVT VT, SDLoc DL, SDValue Rsrc, SDValue Offset,
85 SDValue CachePolicy, SelectionDAG &DAG) const;
86
87 SDValue lowerRawBufferAtomicIntrin(SDValue Op, SelectionDAG &DAG,
88 unsigned NewOpcode) const;
89 SDValue lowerStructBufferAtomicIntrin(SDValue Op, SelectionDAG &DAG,
90 unsigned NewOpcode) const;
91
92 SDValue lowerWaveID(SelectionDAG &DAG, SDValue Op) const;
93 SDValue lowerConstHwRegRead(SelectionDAG &DAG, SDValue Op,
94 AMDGPU::Hwreg::Id HwReg, unsigned LowBit,
95 unsigned Width) const;
96 SDValue lowerWorkitemID(SelectionDAG &DAG, SDValue Op, unsigned Dim,
97 const ArgDescriptor &ArgDesc) const;
98
99 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
100 SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) const;
101 SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const;
102
103 // The raw.tbuffer and struct.tbuffer intrinsics have two offset args: offset
104 // (the offset that is included in bounds checking and swizzling, to be split
105 // between the instruction's voffset and immoffset fields) and soffset (the
106 // offset that is excluded from bounds checking and swizzling, to go in the
107 // instruction's soffset field). This function takes the first kind of
108 // offset and figures out how to split it between voffset and immoffset.
109 std::pair<SDValue, SDValue> splitBufferOffsets(SDValue Offset,
110 SelectionDAG &DAG) const;
111
112 SDValue widenLoad(LoadSDNode *Ld, DAGCombinerInfo &DCI) const;
113 SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG) const;
114 SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
115 SDValue lowerFastUnsafeFDIV(SDValue Op, SelectionDAG &DAG) const;
116 SDValue lowerFastUnsafeFDIV64(SDValue Op, SelectionDAG &DAG) const;
117 SDValue lowerFDIV_FAST(SDValue Op, SelectionDAG &DAG) const;
118 SDValue LowerFDIV16(SDValue Op, SelectionDAG &DAG) const;
119 SDValue LowerFDIV32(SDValue Op, SelectionDAG &DAG) const;
120 SDValue LowerFDIV64(SDValue Op, SelectionDAG &DAG) const;
121 SDValue LowerFDIV(SDValue Op, SelectionDAG &DAG) const;
122 SDValue LowerFFREXP(SDValue Op, SelectionDAG &DAG) const;
123 SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
124 SDValue LowerTrig(SDValue Op, SelectionDAG &DAG) const;
125 SDValue lowerFSQRTF16(SDValue Op, SelectionDAG &DAG) const;
126 SDValue lowerFSQRTF32(SDValue Op, SelectionDAG &DAG) const;
127 SDValue lowerFSQRTF64(SDValue Op, SelectionDAG &DAG) const;
128 SDValue LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const;
129 SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) const;
130 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
131 SDValue adjustLoadValueType(unsigned Opcode, MemSDNode *M,
133 bool IsIntrinsic = false) const;
134
135 SDValue lowerIntrinsicLoad(MemSDNode *M, bool IsFormat, SelectionDAG &DAG,
136 ArrayRef<SDValue> Ops) const;
137
138 // Call DAG.getMemIntrinsicNode for a load, but first widen a dwordx3 type to
139 // dwordx4 if on SI.
140 SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
142 MachineMemOperand *MMO, SelectionDAG &DAG) const;
143
144 SDValue handleD16VData(SDValue VData, SelectionDAG &DAG,
145 bool ImageStore = false) const;
146
147 /// Converts \p Op, which must be of floating point type, to the
148 /// floating point type \p VT, by either extending or truncating it.
149 SDValue getFPExtOrFPRound(SelectionDAG &DAG,
150 SDValue Op,
151 const SDLoc &DL,
152 EVT VT) const;
153
154 SDValue convertArgType(
155 SelectionDAG &DAG, EVT VT, EVT MemVT, const SDLoc &SL, SDValue Val,
156 bool Signed, const ISD::InputArg *Arg = nullptr) const;
157
158 /// Custom lowering for ISD::FP_ROUND for MVT::f16.
159 SDValue lowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
160 SDValue splitFP_ROUNDVectorOp(SDValue Op, SelectionDAG &DAG) const;
161 SDValue lowerFMINNUM_FMAXNUM(SDValue Op, SelectionDAG &DAG) const;
162 SDValue lowerFMINIMUMNUM_FMAXIMUMNUM(SDValue Op, SelectionDAG &DAG) const;
163 SDValue lowerFMINIMUM_FMAXIMUM(SDValue Op, SelectionDAG &DAG) const;
164 SDValue lowerFLDEXP(SDValue Op, SelectionDAG &DAG) const;
165 SDValue promoteUniformOpToI32(SDValue Op, DAGCombinerInfo &DCI) const;
166 SDValue lowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
167 SDValue lowerMUL(SDValue Op, SelectionDAG &DAG) const;
168 SDValue lowerXMULO(SDValue Op, SelectionDAG &DAG) const;
169 SDValue lowerXMUL_LOHI(SDValue Op, SelectionDAG &DAG) const;
170
171 SDValue getSegmentAperture(unsigned AS, const SDLoc &DL,
172 SelectionDAG &DAG) const;
173
174 SDValue lowerADDRSPACECAST(SDValue Op, SelectionDAG &DAG) const;
175 SDValue lowerINSERT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
176 SDValue lowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
177 SDValue lowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
178 SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
179 SDValue lowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const;
180 SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
181
182 SDValue lowerTRAP(SDValue Op, SelectionDAG &DAG) const;
183 SDValue lowerTrapEndpgm(SDValue Op, SelectionDAG &DAG) const;
184 SDValue lowerTrapHsaQueuePtr(SDValue Op, SelectionDAG &DAG) const;
185 SDValue lowerTrapHsa(SDValue Op, SelectionDAG &DAG) const;
186 SDValue lowerDEBUGTRAP(SDValue Op, SelectionDAG &DAG) const;
187
188 SDNode *adjustWritemask(MachineSDNode *&N, SelectionDAG &DAG) const;
189
190 SDValue performUCharToFloatCombine(SDNode *N,
191 DAGCombinerInfo &DCI) const;
192 SDValue performFCopySignCombine(SDNode *N, DAGCombinerInfo &DCI) const;
193
194 SDValue performSHLPtrCombine(SDNode *N,
195 unsigned AS,
196 EVT MemVT,
197 DAGCombinerInfo &DCI) const;
198
199 SDValue performMemSDNodeCombine(MemSDNode *N, DAGCombinerInfo &DCI) const;
200
201 SDValue splitBinaryBitConstantOp(DAGCombinerInfo &DCI, const SDLoc &SL,
202 unsigned Opc, SDValue LHS,
203 const ConstantSDNode *CRHS) const;
204
205 SDValue performAndCombine(SDNode *N, DAGCombinerInfo &DCI) const;
206 SDValue performOrCombine(SDNode *N, DAGCombinerInfo &DCI) const;
207 SDValue performXorCombine(SDNode *N, DAGCombinerInfo &DCI) const;
208 SDValue performZeroExtendCombine(SDNode *N, DAGCombinerInfo &DCI) const;
209 SDValue performSignExtendInRegCombine(SDNode *N, DAGCombinerInfo &DCI) const;
210 SDValue performClassCombine(SDNode *N, DAGCombinerInfo &DCI) const;
211 SDValue getCanonicalConstantFP(SelectionDAG &DAG, const SDLoc &SL, EVT VT,
212 const APFloat &C) const;
213 SDValue performFCanonicalizeCombine(SDNode *N, DAGCombinerInfo &DCI) const;
214
215 SDValue performFPMed3ImmCombine(SelectionDAG &DAG, const SDLoc &SL,
216 SDValue Op0, SDValue Op1) const;
217 SDValue performIntMed3ImmCombine(SelectionDAG &DAG, const SDLoc &SL,
218 SDValue Src, SDValue MinVal, SDValue MaxVal,
219 bool Signed) const;
220 SDValue performMinMaxCombine(SDNode *N, DAGCombinerInfo &DCI) const;
221 SDValue performFMed3Combine(SDNode *N, DAGCombinerInfo &DCI) const;
222 SDValue performCvtPkRTZCombine(SDNode *N, DAGCombinerInfo &DCI) const;
223 SDValue performExtractVectorEltCombine(SDNode *N, DAGCombinerInfo &DCI) const;
224 SDValue performInsertVectorEltCombine(SDNode *N, DAGCombinerInfo &DCI) const;
225 SDValue performFPRoundCombine(SDNode *N, DAGCombinerInfo &DCI) const;
226 SDValue performSelectCombine(SDNode *N, DAGCombinerInfo &DCI) const;
227
228 SDValue reassociateScalarOps(SDNode *N, SelectionDAG &DAG) const;
229 unsigned getFusedOpcode(const SelectionDAG &DAG,
230 const SDNode *N0, const SDNode *N1) const;
231 SDValue tryFoldToMad64_32(SDNode *N, DAGCombinerInfo &DCI) const;
232 SDValue foldAddSub64WithZeroLowBitsTo32(SDNode *N,
233 DAGCombinerInfo &DCI) const;
234
235 SDValue performAddCombine(SDNode *N, DAGCombinerInfo &DCI) const;
236 SDValue performPtrAddCombine(SDNode *N, DAGCombinerInfo &DCI) const;
237 SDValue performAddCarrySubCarryCombine(SDNode *N, DAGCombinerInfo &DCI) const;
238 SDValue performSubCombine(SDNode *N, DAGCombinerInfo &DCI) const;
239 SDValue performFAddCombine(SDNode *N, DAGCombinerInfo &DCI) const;
240 SDValue performFSubCombine(SDNode *N, DAGCombinerInfo &DCI) const;
241 SDValue performFDivCombine(SDNode *N, DAGCombinerInfo &DCI) const;
242 SDValue performFMulCombine(SDNode *N, DAGCombinerInfo &DCI) const;
243 SDValue performFMACombine(SDNode *N, DAGCombinerInfo &DCI) const;
244 SDValue performSetCCCombine(SDNode *N, DAGCombinerInfo &DCI) const;
245 SDValue performCvtF32UByteNCombine(SDNode *N, DAGCombinerInfo &DCI) const;
246 SDValue performClampCombine(SDNode *N, DAGCombinerInfo &DCI) const;
247 SDValue performRcpCombine(SDNode *N, DAGCombinerInfo &DCI) const;
248
249 bool isLegalMUBUFAddressingMode(const AddrMode &AM) const;
250
251 unsigned isCFIntrinsic(const SDNode *Intr) const;
252
253public:
254 /// \returns True if fixup needs to be emitted for given global value \p GV,
255 /// false otherwise.
256 bool shouldEmitFixup(const GlobalValue *GV) const;
257
258 /// \returns True if GOT relocation needs to be emitted for given global value
259 /// \p GV, false otherwise.
260 bool shouldEmitGOTReloc(const GlobalValue *GV) const;
261
262 /// \returns True if PC-relative relocation needs to be emitted for given
263 /// global value \p GV, false otherwise.
264 bool shouldEmitPCReloc(const GlobalValue *GV) const;
265
266 /// \returns true if this should use a literal constant for an LDS address,
267 /// and not emit a relocation for an LDS global.
268 bool shouldUseLDSConstAddress(const GlobalValue *GV) const;
269
270 /// Check if EXTRACT_VECTOR_ELT/INSERT_VECTOR_ELT (<n x e>, var-idx) should be
271 /// expanded into a set of cmp/select instructions.
272 static bool shouldExpandVectorDynExt(unsigned EltSize, unsigned NumElem,
273 bool IsDivergentIdx,
274 const GCNSubtarget *Subtarget);
275
276 bool shouldExpandVectorDynExt(SDNode *N) const;
277
278 bool shouldPreservePtrArith(const Function &F, EVT PtrVT) const override;
279
281 EVT PtrVT) const override;
282
283private:
284 // Analyze a combined offset from an amdgcn_s_buffer_load intrinsic and store
285 // the three offsets (voffset, soffset and instoffset) into the SDValue[3]
286 // array pointed to by Offsets.
287 void setBufferOffsets(SDValue CombinedOffset, SelectionDAG &DAG,
288 SDValue *Offsets, Align Alignment = Align(4)) const;
289
290 // Convert the i128 that an addrspace(8) pointer is natively represented as
291 // into the v4i32 that all the buffer intrinsics expect to receive. We can't
292 // add register classes for i128 on pain of the promotion logic going haywire,
293 // so this slightly ugly hack is what we've got. If passed a non-pointer
294 // argument (as would be seen in older buffer intrinsics), does nothing.
295 SDValue bufferRsrcPtrToVector(SDValue MaybePointer, SelectionDAG &DAG) const;
296
297 // Wrap a 64-bit pointer into a v4i32 (which is how all SelectionDAG code
298 // represents ptr addrspace(8)) using the flags specified in the intrinsic.
299 SDValue lowerPointerAsRsrcIntrin(SDNode *Op, SelectionDAG &DAG) const;
300
301 // Handle 8 bit and 16 bit buffer loads
302 SDValue handleByteShortBufferLoads(SelectionDAG &DAG, EVT LoadVT, SDLoc DL,
305 bool IsTFE = false) const;
306
307 // Handle 8 bit and 16 bit buffer stores
308 SDValue handleByteShortBufferStores(SelectionDAG &DAG, EVT VDataType,
309 SDLoc DL, SDValue Ops[],
310 MemSDNode *M) const;
311
312public:
313 SITargetLowering(const TargetMachine &tm, const GCNSubtarget &STI);
314
315 const GCNSubtarget *getSubtarget() const;
316
318
319 bool isFPExtFoldable(const SelectionDAG &DAG, unsigned Opcode, EVT DestVT,
320 EVT SrcVT) const override;
321
322 bool isFPExtFoldable(const MachineInstr &MI, unsigned Opcode, LLT DestTy,
323 LLT SrcTy) const override;
324
325 bool isShuffleMaskLegal(ArrayRef<int> /*Mask*/, EVT /*VT*/) const override;
326
327 // While address space 7 should never make it to codegen, it still needs to
328 // have a MVT to prevent some analyses that query this function from breaking.
329 // We use the custum MVT::amdgpuBufferFatPointer and
330 // amdgpu::amdgpuBufferStridedPointer for this, though we use v8i32 for the
331 // memory type (which is probably unused).
332 MVT getPointerTy(const DataLayout &DL, unsigned AS) const override;
333 MVT getPointerMemTy(const DataLayout &DL, unsigned AS) const override;
334
336 MachineFunction &MF,
337 unsigned IntrinsicID) const override;
338
341 SelectionDAG &DAG) const override;
342
345 Type *&AccessTy) const override;
346
347 bool isLegalFlatAddressingMode(const AddrMode &AM, unsigned AddrSpace) const;
348 bool isLegalGlobalAddressingMode(const AddrMode &AM) const;
349 bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty,
350 unsigned AS,
351 Instruction *I = nullptr) const override;
352
353 bool canMergeStoresTo(unsigned AS, EVT MemVT,
354 const MachineFunction &MF) const override;
355
357 unsigned Size, unsigned AddrSpace, Align Alignment,
359 unsigned *IsFast = nullptr) const;
360
362 LLT Ty, unsigned AddrSpace, Align Alignment,
364 unsigned *IsFast = nullptr) const override {
365 if (IsFast)
366 *IsFast = 0;
367 return allowsMisalignedMemoryAccessesImpl(Ty.getSizeInBits(), AddrSpace,
368 Alignment, Flags, IsFast);
369 }
370
372 EVT VT, unsigned AS, Align Alignment,
374 unsigned *IsFast = nullptr) const override;
375
376 EVT getOptimalMemOpType(LLVMContext &Context, const MemOp &Op,
377 const AttributeList &FuncAttributes) const override;
378
379 bool isMemOpHasNoClobberedMemOperand(const SDNode *N) const;
380
381 static bool isNonGlobalAddrSpace(unsigned AS);
382
383 bool isFreeAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override;
384
386 getPreferredVectorAction(MVT VT) const override;
387
389 Type *Ty) const override;
390
391 bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
392 unsigned Index) const override;
393 bool isExtractVecEltCheap(EVT VT, unsigned Index) const override;
394
395 bool isTypeDesirableForOp(unsigned Op, EVT VT) const override;
396
397 bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
398
399 unsigned combineRepeatedFPDivisors() const override {
400 // Combine multiple FDIVs with the same divisor into multiple FMULs by the
401 // reciprocal.
402 return 2;
403 }
404
405 bool supportSplitCSR(MachineFunction *MF) const override;
406 void initializeSplitCSR(MachineBasicBlock *Entry) const override;
408 MachineBasicBlock *Entry,
409 const SmallVectorImpl<MachineBasicBlock *> &Exits) const override;
410
412 bool isVarArg,
414 const SDLoc &DL, SelectionDAG &DAG,
415 SmallVectorImpl<SDValue> &InVals) const override;
416
417 bool CanLowerReturn(CallingConv::ID CallConv,
418 MachineFunction &MF, bool isVarArg,
420 LLVMContext &Context, const Type *RetTy) const override;
421
422 SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
424 const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
425 SelectionDAG &DAG) const override;
426
428 CallLoweringInfo &CLI,
429 CCState &CCInfo,
431 SmallVectorImpl<std::pair<unsigned, SDValue>> &RegsToPass,
432 SmallVectorImpl<SDValue> &MemOpChains,
433 SDValue Chain) const;
434
436 CallingConv::ID CallConv, bool isVarArg,
438 const SDLoc &DL, SelectionDAG &DAG,
439 SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
440 SDValue ThisVal) const;
441
442 bool mayBeEmittedAsTailCall(const CallInst *) const override;
443
445 SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
447 const SmallVectorImpl<SDValue> &OutVals,
448 const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const;
449
450 SDValue LowerCall(CallLoweringInfo &CLI,
451 SmallVectorImpl<SDValue> &InVals) const override;
452
457
463
464 Register getRegisterByName(const char* RegName, LLT VT,
465 const MachineFunction &MF) const override;
466
468 MachineBasicBlock *BB) const;
469
472 MachineBasicBlock *BB) const;
473
476 MachineBasicBlock *BB) const override;
477
478 bool enableAggressiveFMAFusion(EVT VT) const override;
479 bool enableAggressiveFMAFusion(LLT Ty) const override;
481 EVT VT) const override;
482 MVT getScalarShiftAmountTy(const DataLayout &, EVT) const override;
483 LLT getPreferredShiftAmountTy(LLT Ty) const override;
484
486 EVT VT) const override;
488 const LLT Ty) const override;
489 bool isFMADLegal(const SelectionDAG &DAG, const SDNode *N) const override;
490 bool isFMADLegal(const MachineInstr &MI, const LLT Ty) const override;
491
495 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
497 SelectionDAG &DAG) const override;
498
499 SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
500 SDNode *PostISelFolding(MachineSDNode *N, SelectionDAG &DAG) const override;
501 void AddMemOpInit(MachineInstr &MI) const;
503 SDNode *Node) const override;
504
506
508 SDValue Ptr) const;
510 uint32_t RsrcDword1, uint64_t RsrcDword2And3) const;
511 std::pair<unsigned, const TargetRegisterClass *>
513 StringRef Constraint, MVT VT) const override;
514 ConstraintType getConstraintType(StringRef Constraint) const override;
516 std::vector<SDValue> &Ops,
517 SelectionDAG &DAG) const override;
518 bool getAsmOperandConstVal(SDValue Op, uint64_t &Val) const;
519 bool checkAsmConstraintVal(SDValue Op, StringRef Constraint,
520 uint64_t Val) const;
522 uint64_t Val,
523 unsigned MaxSize = 64) const;
524 SDValue copyToM0(SelectionDAG &DAG, SDValue Chain, const SDLoc &DL,
525 SDValue V) const;
526
527 void finalizeLowering(MachineFunction &MF) const override;
528
530 const APInt &DemandedElts,
531 const SelectionDAG &DAG,
532 unsigned Depth = 0) const override;
533 void computeKnownBitsForFrameIndex(int FrameIdx,
534 KnownBits &Known,
535 const MachineFunction &MF) const override;
537 KnownBits &Known,
538 const APInt &DemandedElts,
540 unsigned Depth = 0) const override;
541
543 Register R,
545 unsigned Depth = 0) const override;
547 UniformityInfo *UA) const override;
548
549 bool hasMemSDNodeUser(SDNode *N) const;
550
552 SDValue N1) const override;
553
555 Register N1) const override;
556
558 unsigned MaxDepth = 5) const;
560 unsigned MaxDepth = 5) const;
561 bool denormalsEnabledForType(const SelectionDAG &DAG, EVT VT) const;
562 bool denormalsEnabledForType(LLT Ty, const MachineFunction &MF) const;
563
564 bool checkForPhysRegDependency(SDNode *Def, SDNode *User, unsigned Op,
565 const TargetRegisterInfo *TRI,
566 const TargetInstrInfo *TII,
567 MCRegister &PhysReg, int &Cost) const override;
568
569 bool isKnownNeverNaNForTargetNode(SDValue Op, const APInt &DemandedElts,
570 const SelectionDAG &DAG, bool SNaN = false,
571 unsigned Depth = 0) const override;
577
579 void emitExpandAtomicRMW(AtomicRMWInst *AI) const override;
580 void emitExpandAtomicCmpXchg(AtomicCmpXchgInst *CI) const override;
581 void emitExpandAtomicLoad(LoadInst *LI) const override;
582 void emitExpandAtomicStore(StoreInst *SI) const override;
583
584 LoadInst *
586
588 bool isDivergent) const override;
590 const Value *V) const override;
591 Align getPrefLoopAlignment(MachineLoop *ML) const override;
592
593 void allocateHSAUserSGPRs(CCState &CCInfo,
594 MachineFunction &MF,
595 const SIRegisterInfo &TRI,
597
601 MachineFunction &MF,
602 const SIRegisterInfo &TRI,
604
606 const SIRegisterInfo &TRI,
608
609 void allocateSystemSGPRs(CCState &CCInfo,
610 MachineFunction &MF,
612 CallingConv::ID CallConv,
613 bool IsShader) const;
614
616 MachineFunction &MF,
617 const SIRegisterInfo &TRI,
620 CCState &CCInfo,
621 MachineFunction &MF,
622 const SIRegisterInfo &TRI,
624
626 MachineFunction &MF,
627 const SIRegisterInfo &TRI,
630 MachineFunction &MF,
631 const SIRegisterInfo &TRI,
633
635 getTargetMMOFlags(const Instruction &I) const override;
636};
637
638// Returns true if argument is a boolean value which is not serialized into
639// memory or argument and does not require v_cndmask_b32 to be deserialized.
640bool isBoolSGPR(SDValue V);
641
642} // End namespace llvm
643
644#endif
unsigned const MachineRegisterInfo * MRI
return SDValue()
Interface definition of the TargetLowering class that is common to all AMD GPUs.
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
block Block Frequency Analysis
Analysis containing CSE Info
Definition CSEInfo.cpp:27
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define RegName(no)
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
Register Reg
Register const TargetRegisterInfo * TRI
Value * LHS
AMDGPUTargetLowering(const TargetMachine &TM, const AMDGPUSubtarget &STI)
Class for arbitrary precision integers.
Definition APInt.h:78
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
An instruction that atomically checks whether a specified value is in a memory location,...
an instruction that atomically reads a memory location, combines it with another value,...
CCState - This class holds information needed while lowering arguments and return values.
CCValAssign - Represent assignment of one arg/retval to a location.
This class represents a function call, abstracting a target machine's calling convention.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
An instruction for reading from memory.
This class is used to represent ISD::LOAD nodes.
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:33
Machine Value Type.
Representation of each machine instruction.
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
An SDNode that represents everything that will be needed to construct a MachineInstr.
This is an abstract virtual class for memory operations.
Wrapper class representing virtual and physical registers.
Definition Register.h:19
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
This class keeps track of the SPI_SP_INPUT_ADDR config register, which tells the hardware which inter...
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
bool isTypeDesirableForOp(unsigned Op, EVT VT) const override
Return true if the target has native support for the specified value type and it is 'desirable' to us...
SDNode * PostISelFolding(MachineSDNode *N, SelectionDAG &DAG) const override
Fold the instructions after selecting them.
SDValue splitTernaryVectorOp(SDValue Op, SelectionDAG &DAG) const
MachineSDNode * wrapAddr64Rsrc(SelectionDAG &DAG, const SDLoc &DL, SDValue Ptr) const
bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT VT) const override
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
SDValue lowerGET_ROUNDING(SDValue Op, SelectionDAG &DAG) const
bool requiresUniformRegister(MachineFunction &MF, const Value *V) const override
Allows target to decide about the register class of the specific value that is live outside the defin...
bool isFMADLegal(const SelectionDAG &DAG, const SDNode *N) const override
Returns true if be combined with to form an ISD::FMAD.
AtomicExpansionKind shouldExpandAtomicStoreInIR(StoreInst *SI) const override
Returns how the given (atomic) store should be expanded by the IR-level AtomicExpand pass into.
void bundleInstWithWaitcnt(MachineInstr &MI) const
Insert MI into a BUNDLE with an S_WAITCNT 0 immediately following it.
SDValue lowerROTR(SDValue Op, SelectionDAG &DAG) const
MVT getScalarShiftAmountTy(const DataLayout &, EVT) const override
Return the type to use for a scalar shift opcode, given the shifted amount type.
SDValue LowerCall(CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower calls into the specified DAG.
MVT getPointerTy(const DataLayout &DL, unsigned AS) const override
Map address space 7 to MVT::amdgpuBufferFatPointer because that's its in-memory representation.
bool denormalsEnabledForType(const SelectionDAG &DAG, EVT VT) const
void insertCopiesSplitCSR(MachineBasicBlock *Entry, const SmallVectorImpl< MachineBasicBlock * > &Exits) const override
Insert explicit copies in entry and exit blocks.
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
Return the ValueType of the result of SETCC operations.
SDNode * legalizeTargetIndependentNode(SDNode *Node, SelectionDAG &DAG) const
Legalize target independent instructions (e.g.
bool allowsMisalignedMemoryAccessesImpl(unsigned Size, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *IsFast=nullptr) const
TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const override
Return the preferred vector type legalization action.
SDValue lowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const
const GCNSubtarget * getSubtarget() const
bool enableAggressiveFMAFusion(EVT VT) const override
Return true if target always benefits from combining into FMA for a given value type.
bool shouldEmitGOTReloc(const GlobalValue *GV) const
bool isCanonicalized(SelectionDAG &DAG, SDValue Op, unsigned MaxDepth=5) const
void CollectTargetIntrinsicOperands(const CallInst &I, SmallVectorImpl< SDValue > &Ops, SelectionDAG &DAG) const override
SDValue splitUnaryVectorOp(SDValue Op, SelectionDAG &DAG) const
SDValue lowerGET_FPENV(SDValue Op, SelectionDAG &DAG) const
bool checkForPhysRegDependency(SDNode *Def, SDNode *User, unsigned Op, const TargetRegisterInfo *TRI, const TargetInstrInfo *TII, MCRegister &PhysReg, int &Cost) const override
Allows the target to handle physreg-carried dependency in target-specific way.
void allocateSpecialInputSGPRs(CCState &CCInfo, MachineFunction &MF, const SIRegisterInfo &TRI, SIMachineFunctionInfo &Info) const
void allocateLDSKernelId(CCState &CCInfo, MachineFunction &MF, const SIRegisterInfo &TRI, SIMachineFunctionInfo &Info) const
SDValue LowerSTACKSAVE(SDValue Op, SelectionDAG &DAG) const
bool isReassocProfitable(SelectionDAG &DAG, SDValue N0, SDValue N1) const override
void allocateHSAUserSGPRs(CCState &CCInfo, MachineFunction &MF, const SIRegisterInfo &TRI, SIMachineFunctionInfo &Info) const
ArrayRef< MCPhysReg > getRoundingControlRegisters() const override
Returns a 0 terminated array of rounding control registers that can be attached into strict FP call.
ConstraintType getConstraintType(StringRef Constraint) const override
Given a constraint, return the type of constraint it is for this target.
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &DL, SelectionDAG &DAG) const override
This hook must be implemented to lower outgoing return values, described by the Outs array,...
const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent) const override
Return the register class that should be used for the specified value type.
void AddMemOpInit(MachineInstr &MI) const
MachineMemOperand::Flags getTargetMMOFlags(const Instruction &I) const override
This callback is used to inspect load/store instructions and add target-specific MachineMemOperand fl...
bool isLegalGlobalAddressingMode(const AddrMode &AM) const
void computeKnownBitsForFrameIndex(int FrameIdx, KnownBits &Known, const MachineFunction &MF) const override
Determine which of the bits of FrameIndex FIOp are known to be 0.
bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const override
Return true if it is beneficial to convert a load of a constant to just the constant itself.
Align getPrefLoopAlignment(MachineLoop *ML) const override
Return the preferred loop alignment.
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
void emitExpandAtomicStore(StoreInst *SI) const override
Perform a atomic store using a target-specific way.
AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const override
Returns how the given (atomic) load should be expanded by the IR-level AtomicExpand pass.
Align computeKnownAlignForTargetInstr(GISelValueTracking &Analysis, Register R, const MachineRegisterInfo &MRI, unsigned Depth=0) const override
Determine the known alignment for the pointer value R.
bool getAsmOperandConstVal(SDValue Op, uint64_t &Val) const
bool isShuffleMaskLegal(ArrayRef< int >, EVT) const override
Targets can use this to indicate that they only support some VECTOR_SHUFFLE operations,...
void emitExpandAtomicLoad(LoadInst *LI) const override
Perform a atomic load using a target-specific way.
EVT getOptimalMemOpType(LLVMContext &Context, const MemOp &Op, const AttributeList &FuncAttributes) const override
Returns the target specific optimal type for load and store operations as a result of memset,...
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked when a node result type is illegal for the target, and the operation was reg...
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Return the register ID of the name passed in.
void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
Lower the specified operand into the Ops vector.
LLT getPreferredShiftAmountTy(LLT Ty) const override
Return the preferred type to use for a shift opcode, given the shifted amount type is ShiftValueTy.
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
SDValue lowerSET_FPENV(SDValue Op, SelectionDAG &DAG) const
bool shouldPreservePtrArith(const Function &F, EVT PtrVT) const override
True if target has some particular form of dealing with pointer arithmetic semantics for pointers wit...
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
SDValue lowerSET_ROUNDING(SDValue Op, SelectionDAG &DAG) const
void allocateSpecialInputVGPRsFixed(CCState &CCInfo, MachineFunction &MF, const SIRegisterInfo &TRI, SIMachineFunctionInfo &Info) const
Allocate implicit function VGPR arguments in fixed registers.
LoadInst * lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const override
On some platforms, an AtomicRMW that never actually modifies the value (such as fetch_add of 0) can b...
MachineBasicBlock * emitGWSMemViolTestLoop(MachineInstr &MI, MachineBasicBlock *BB) const
bool getAddrModeArguments(const IntrinsicInst *I, SmallVectorImpl< Value * > &Ops, Type *&AccessTy) const override
CodeGenPrepare sinks address calculations into the same BB as Load/Store instructions reading the add...
bool checkAsmConstraintValA(SDValue Op, uint64_t Val, unsigned MaxSize=64) const
AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
bool shouldEmitFixup(const GlobalValue *GV) const
MachineBasicBlock * splitKillBlock(MachineInstr &MI, MachineBasicBlock *BB) const
void emitExpandAtomicCmpXchg(AtomicCmpXchgInst *CI) const override
Perform a cmpxchg expansion using a target-specific method.
bool canTransformPtrArithOutOfBounds(const Function &F, EVT PtrVT) const override
True if the target allows transformations of in-bounds pointer arithmetic that cause out-of-bounds in...
bool hasMemSDNodeUser(SDNode *N) const
bool isSDNodeSourceOfDivergence(const SDNode *N, FunctionLoweringInfo *FLI, UniformityInfo *UA) const override
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *BB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
bool isEligibleForTailCallOptimization(SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SmallVectorImpl< ISD::InputArg > &Ins, SelectionDAG &DAG) const
bool isMemOpHasNoClobberedMemOperand(const SDNode *N) const
bool isLegalFlatAddressingMode(const AddrMode &AM, unsigned AddrSpace) const
SDValue LowerCallResult(SDValue Chain, SDValue InGlue, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &DL, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals, bool isThisReturn, SDValue ThisVal) const
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &DL, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
AtomicExpansionKind shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override
Returns how the given atomic cmpxchg should be expanded by the IR-level AtomicExpand pass.
bool isFPExtFoldable(const SelectionDAG &DAG, unsigned Opcode, EVT DestVT, EVT SrcVT) const override
Return true if an fpext operation input to an Opcode operation is free (for instance,...
void AdjustInstrPostInstrSelection(MachineInstr &MI, SDNode *Node) const override
Assign the register class depending on the number of bits set in the writemask.
MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
void allocateSpecialInputVGPRs(CCState &CCInfo, MachineFunction &MF, const SIRegisterInfo &TRI, SIMachineFunctionInfo &Info) const
Allocate implicit function VGPR arguments at the end of allocated user arguments.
void finalizeLowering(MachineFunction &MF) const override
Execute target specific actions to finalize target lowering.
static bool isNonGlobalAddrSpace(unsigned AS)
void emitExpandAtomicAddrSpacePredicate(Instruction *AI) const
MachineSDNode * buildRSRC(SelectionDAG &DAG, const SDLoc &DL, SDValue Ptr, uint32_t RsrcDword1, uint64_t RsrcDword2And3) const
Return a resource descriptor with the 'Add TID' bit enabled The TID (Thread ID) is multiplied by the ...
unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Certain targets require unusual breakdowns of certain types.
bool mayBeEmittedAsTailCall(const CallInst *) const override
Return true if the target may be able emit the call instruction as a tail call.
void passSpecialInputs(CallLoweringInfo &CLI, CCState &CCInfo, const SIMachineFunctionInfo &Info, SmallVectorImpl< std::pair< unsigned, SDValue > > &RegsToPass, SmallVectorImpl< SDValue > &MemOpChains, SDValue Chain) const
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
bool checkAsmConstraintVal(SDValue Op, StringRef Constraint, uint64_t Val) const
bool isKnownNeverNaNForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, bool SNaN=false, unsigned Depth=0) const override
If SNaN is false,.
void emitExpandAtomicRMW(AtomicRMWInst *AI) const override
Perform a atomicrmw expansion using a target-specific way.
static bool shouldExpandVectorDynExt(unsigned EltSize, unsigned NumElem, bool IsDivergentIdx, const GCNSubtarget *Subtarget)
Check if EXTRACT_VECTOR_ELT/INSERT_VECTOR_ELT (<n x e>, var-idx) should be expanded into a set of cmp...
bool shouldUseLDSConstAddress(const GlobalValue *GV) const
bool supportSplitCSR(MachineFunction *MF) const override
Return true if the target supports that a subset of CSRs for the given machine function is handled ex...
bool isExtractVecEltCheap(EVT VT, unsigned Index) const override
Return true if extraction of a scalar element from the given vector type at the given index is cheap.
SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const
bool allowsMisalignedMemoryAccesses(LLT Ty, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *IsFast=nullptr) const override
LLT handling variant.
bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, unsigned Index) const override
Return true if EXTRACT_SUBVECTOR is cheap for extracting this result type from this source type with ...
unsigned combineRepeatedFPDivisors() const override
Indicate whether this target prefers to combine FDIVs with the same divisor.
bool canMergeStoresTo(unsigned AS, EVT MemVT, const MachineFunction &MF) const override
Returns if it's reasonable to merge stores to MemVT size.
SDValue lowerPREFETCH(SDValue Op, SelectionDAG &DAG) const
SITargetLowering(const TargetMachine &tm, const GCNSubtarget &STI)
void computeKnownBitsForTargetInstr(GISelValueTracking &Analysis, Register R, KnownBits &Known, const APInt &DemandedElts, const MachineRegisterInfo &MRI, unsigned Depth=0) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
bool isFreeAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override
Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g.
bool shouldEmitPCReloc(const GlobalValue *GV) const
void initializeSplitCSR(MachineBasicBlock *Entry) const override
Perform necessary initialization to handle a subset of CSRs explicitly via copies.
void allocateSpecialEntryInputVGPRs(CCState &CCInfo, MachineFunction &MF, const SIRegisterInfo &TRI, SIMachineFunctionInfo &Info) const
void allocatePreloadKernArgSGPRs(CCState &CCInfo, SmallVectorImpl< CCValAssign > &ArgLocs, const SmallVectorImpl< ISD::InputArg > &Ins, MachineFunction &MF, const SIRegisterInfo &TRI, SIMachineFunctionInfo &Info) const
SDValue copyToM0(SelectionDAG &DAG, SDValue Chain, const SDLoc &DL, SDValue V) const
bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &, MachineFunction &MF, unsigned IntrinsicID) const override
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
SDValue splitBinaryVectorOp(SDValue Op, SelectionDAG &DAG) const
unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const override
Certain targets such as MIPS require that some types such as vectors are always broken down into scal...
MVT getPointerMemTy(const DataLayout &DL, unsigned AS) const override
Similarly, the in-memory representation of a p7 is {p8, i32}, aka v8i32 when padding is added.
void allocateSystemSGPRs(CCState &CCInfo, MachineFunction &MF, SIMachineFunctionInfo &Info, CallingConv::ID CallConv, bool IsShader) const
bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, LLVMContext &Context, const Type *RetTy) const override
This hook should be implemented to check whether the return values described by the Outs array can fi...
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
TargetInstrInfo - Interface to description of machine instruction set.
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
Primary interface to the complete machine description for the target machine.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
LLVM Value Representation.
Definition Value.h:75
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
This is an optimization pass for GlobalISel generic memory operations.
GenericUniformityInfo< SSAContext > UniformityInfo
@ Offset
Definition DWP.cpp:477
InstructionCost Cost
bool isBoolSGPR(SDValue V)
DWARFExpression::Operation Op
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Extended Value Type.
Definition ValueTypes.h:35
InputArg - This struct carries flags and type information about a single incoming (formal) argument o...
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...