Thanks to visit codestin.com
Credit goes to llvm.org

LLVM 22.0.0git
SPIRVEmitIntrinsics.cpp
Go to the documentation of this file.
1//===-- SPIRVEmitIntrinsics.cpp - emit SPIRV intrinsics ---------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// The pass emits SPIRV intrinsics keeping essential high-level information for
10// the translation of LLVM IR to SPIR-V.
11//
12//===----------------------------------------------------------------------===//
13
14#include "SPIRV.h"
15#include "SPIRVBuiltins.h"
16#include "SPIRVSubtarget.h"
17#include "SPIRVTargetMachine.h"
18#include "SPIRVUtils.h"
19#include "llvm/ADT/DenseSet.h"
20#include "llvm/IR/IRBuilder.h"
22#include "llvm/IR/InstVisitor.h"
23#include "llvm/IR/IntrinsicsSPIRV.h"
27
28#include <cassert>
29#include <queue>
30#include <unordered_set>
31
32// This pass performs the following transformation on LLVM IR level required
33// for the following translation to SPIR-V:
34// - replaces direct usages of aggregate constants with target-specific
35// intrinsics;
36// - replaces aggregates-related instructions (extract/insert, ld/st, etc)
37// with a target-specific intrinsics;
38// - emits intrinsics for the global variable initializers since IRTranslator
39// doesn't handle them and it's not very convenient to translate them
40// ourselves;
41// - emits intrinsics to keep track of the string names assigned to the values;
42// - emits intrinsics to keep track of constants (this is necessary to have an
43// LLVM IR constant after the IRTranslation is completed) for their further
44// deduplication;
45// - emits intrinsics to keep track of original LLVM types of the values
46// to be able to emit proper SPIR-V types eventually.
47//
48// TODO: consider removing spv.track.constant in favor of spv.assign.type.
49
50using namespace llvm;
51
52namespace llvm::SPIRV {
53#define GET_BuiltinGroup_DECL
54#include "SPIRVGenTables.inc"
55} // namespace llvm::SPIRV
56
57namespace {
58
59class SPIRVEmitIntrinsics
60 : public ModulePass,
61 public InstVisitor<SPIRVEmitIntrinsics, Instruction *> {
62 SPIRVTargetMachine *TM = nullptr;
63 SPIRVGlobalRegistry *GR = nullptr;
64 Function *CurrF = nullptr;
65 bool TrackConstants = true;
66 bool HaveFunPtrs = false;
67 DenseMap<Instruction *, Constant *> AggrConsts;
68 DenseMap<Instruction *, Type *> AggrConstTypes;
69 DenseSet<Instruction *> AggrStores;
70 std::unordered_set<Value *> Named;
71
72 // map of function declarations to <pointer arg index => element type>
73 DenseMap<Function *, SmallVector<std::pair<unsigned, Type *>>> FDeclPtrTys;
74
75 // a register of Instructions that don't have a complete type definition
76 bool CanTodoType = true;
77 unsigned TodoTypeSz = 0;
78 DenseMap<Value *, bool> TodoType;
79 void insertTodoType(Value *Op) {
80 // TODO: add isa<CallInst>(Op) to no-insert
81 if (CanTodoType && !isa<GetElementPtrInst>(Op)) {
82 auto It = TodoType.try_emplace(Op, true);
83 if (It.second)
84 ++TodoTypeSz;
85 }
86 }
87 void eraseTodoType(Value *Op) {
88 auto It = TodoType.find(Op);
89 if (It != TodoType.end() && It->second) {
90 It->second = false;
91 --TodoTypeSz;
92 }
93 }
94 bool isTodoType(Value *Op) {
96 return false;
97 auto It = TodoType.find(Op);
98 return It != TodoType.end() && It->second;
99 }
100 // a register of Instructions that were visited by deduceOperandElementType()
101 // to validate operand types with an instruction
102 std::unordered_set<Instruction *> TypeValidated;
103
104 // well known result types of builtins
105 enum WellKnownTypes { Event };
106
107 // deduce element type of untyped pointers
108 Type *deduceElementType(Value *I, bool UnknownElemTypeI8);
109 Type *deduceElementTypeHelper(Value *I, bool UnknownElemTypeI8);
110 Type *deduceElementTypeHelper(Value *I, std::unordered_set<Value *> &Visited,
111 bool UnknownElemTypeI8,
112 bool IgnoreKnownType = false);
113 Type *deduceElementTypeByValueDeep(Type *ValueTy, Value *Operand,
114 bool UnknownElemTypeI8);
115 Type *deduceElementTypeByValueDeep(Type *ValueTy, Value *Operand,
116 std::unordered_set<Value *> &Visited,
117 bool UnknownElemTypeI8);
118 Type *deduceElementTypeByUsersDeep(Value *Op,
119 std::unordered_set<Value *> &Visited,
120 bool UnknownElemTypeI8);
121 void maybeAssignPtrType(Type *&Ty, Value *I, Type *RefTy,
122 bool UnknownElemTypeI8);
123
124 // deduce nested types of composites
125 Type *deduceNestedTypeHelper(User *U, bool UnknownElemTypeI8);
126 Type *deduceNestedTypeHelper(User *U, Type *Ty,
127 std::unordered_set<Value *> &Visited,
128 bool UnknownElemTypeI8);
129
130 // deduce Types of operands of the Instruction if possible
131 void deduceOperandElementType(Instruction *I,
132 SmallPtrSet<Instruction *, 4> *IncompleteRets,
133 const SmallPtrSet<Value *, 4> *AskOps = nullptr,
134 bool IsPostprocessing = false);
135
136 void preprocessCompositeConstants(IRBuilder<> &B);
137 void preprocessUndefs(IRBuilder<> &B);
138
139 Type *reconstructType(Value *Op, bool UnknownElemTypeI8,
140 bool IsPostprocessing);
141
142 void replaceMemInstrUses(Instruction *Old, Instruction *New, IRBuilder<> &B);
143 void processInstrAfterVisit(Instruction *I, IRBuilder<> &B);
144 bool insertAssignPtrTypeIntrs(Instruction *I, IRBuilder<> &B,
145 bool UnknownElemTypeI8);
146 void insertAssignTypeIntrs(Instruction *I, IRBuilder<> &B);
147 void insertAssignPtrTypeTargetExt(TargetExtType *AssignedType, Value *V,
148 IRBuilder<> &B);
149 void replacePointerOperandWithPtrCast(Instruction *I, Value *Pointer,
150 Type *ExpectedElementType,
151 unsigned OperandToReplace,
152 IRBuilder<> &B);
153 void insertPtrCastOrAssignTypeInstr(Instruction *I, IRBuilder<> &B);
154 bool shouldTryToAddMemAliasingDecoration(Instruction *Inst);
156 void insertConstantsForFPFastMathDefault(Module &M);
157 void processGlobalValue(GlobalVariable &GV, IRBuilder<> &B);
158 void processParamTypes(Function *F, IRBuilder<> &B);
159 void processParamTypesByFunHeader(Function *F, IRBuilder<> &B);
160 Type *deduceFunParamElementType(Function *F, unsigned OpIdx);
161 Type *deduceFunParamElementType(Function *F, unsigned OpIdx,
162 std::unordered_set<Function *> &FVisited);
163
164 bool deduceOperandElementTypeCalledFunction(
165 CallInst *CI, SmallVector<std::pair<Value *, unsigned>> &Ops,
166 Type *&KnownElemTy, bool &Incomplete);
167 void deduceOperandElementTypeFunctionPointer(
168 CallInst *CI, SmallVector<std::pair<Value *, unsigned>> &Ops,
169 Type *&KnownElemTy, bool IsPostprocessing);
170 bool deduceOperandElementTypeFunctionRet(
171 Instruction *I, SmallPtrSet<Instruction *, 4> *IncompleteRets,
172 const SmallPtrSet<Value *, 4> *AskOps, bool IsPostprocessing,
173 Type *&KnownElemTy, Value *Op, Function *F);
174
175 CallInst *buildSpvPtrcast(Function *F, Value *Op, Type *ElemTy);
176 void replaceUsesOfWithSpvPtrcast(Value *Op, Type *ElemTy, Instruction *I,
177 DenseMap<Function *, CallInst *> Ptrcasts);
178 void propagateElemType(Value *Op, Type *ElemTy,
179 DenseSet<std::pair<Value *, Value *>> &VisitedSubst);
180 void
181 propagateElemTypeRec(Value *Op, Type *PtrElemTy, Type *CastElemTy,
182 DenseSet<std::pair<Value *, Value *>> &VisitedSubst);
183 void propagateElemTypeRec(Value *Op, Type *PtrElemTy, Type *CastElemTy,
184 DenseSet<std::pair<Value *, Value *>> &VisitedSubst,
185 std::unordered_set<Value *> &Visited,
186 DenseMap<Function *, CallInst *> Ptrcasts);
187
188 void replaceAllUsesWith(Value *Src, Value *Dest, bool DeleteOld = true);
189 void replaceAllUsesWithAndErase(IRBuilder<> &B, Instruction *Src,
190 Instruction *Dest, bool DeleteOld = true);
191
192 void applyDemangledPtrArgTypes(IRBuilder<> &B);
193
194 GetElementPtrInst *simplifyZeroLengthArrayGepInst(GetElementPtrInst *GEP);
195
196 bool runOnFunction(Function &F);
197 bool postprocessTypes(Module &M);
198 bool processFunctionPointers(Module &M);
199 void parseFunDeclarations(Module &M);
200
201 void useRoundingMode(ConstrainedFPIntrinsic *FPI, IRBuilder<> &B);
202
203 // Tries to walk the type accessed by the given GEP instruction.
204 // For each nested type access, one of the 2 callbacks is called:
205 // - OnLiteralIndexing when the index is a known constant value.
206 // Parameters:
207 // PointedType: the pointed type resulting of this indexing.
208 // If the parent type is an array, this is the index in the array.
209 // If the parent type is a struct, this is the field index.
210 // Index: index of the element in the parent type.
211 // - OnDynamnicIndexing when the index is a non-constant value.
212 // This callback is only called when indexing into an array.
213 // Parameters:
214 // ElementType: the type of the elements stored in the parent array.
215 // Offset: the Value* containing the byte offset into the array.
216 // Return true if an error occured during the walk, false otherwise.
217 bool walkLogicalAccessChain(
218 GetElementPtrInst &GEP,
219 const std::function<void(Type *PointedType, uint64_t Index)>
220 &OnLiteralIndexing,
221 const std::function<void(Type *ElementType, Value *Offset)>
222 &OnDynamicIndexing);
223
224 // Returns the type accessed using the given GEP instruction by relying
225 // on the GEP type.
226 // FIXME: GEP types are not supposed to be used to retrieve the pointed
227 // type. This must be fixed.
228 Type *getGEPType(GetElementPtrInst *GEP);
229
230 // Returns the type accessed using the given GEP instruction by walking
231 // the source type using the GEP indices.
232 // FIXME: without help from the frontend, this method cannot reliably retrieve
233 // the stored type, nor can robustly determine the depth of the type
234 // we are accessing.
235 Type *getGEPTypeLogical(GetElementPtrInst *GEP);
236
237 Instruction *buildLogicalAccessChainFromGEP(GetElementPtrInst &GEP);
238
239public:
240 static char ID;
241 SPIRVEmitIntrinsics(SPIRVTargetMachine *TM = nullptr)
242 : ModulePass(ID), TM(TM) {}
243 Instruction *visitInstruction(Instruction &I) { return &I; }
244 Instruction *visitSwitchInst(SwitchInst &I);
245 Instruction *visitGetElementPtrInst(GetElementPtrInst &I);
246 Instruction *visitBitCastInst(BitCastInst &I);
247 Instruction *visitInsertElementInst(InsertElementInst &I);
248 Instruction *visitExtractElementInst(ExtractElementInst &I);
249 Instruction *visitInsertValueInst(InsertValueInst &I);
250 Instruction *visitExtractValueInst(ExtractValueInst &I);
251 Instruction *visitLoadInst(LoadInst &I);
252 Instruction *visitStoreInst(StoreInst &I);
253 Instruction *visitAllocaInst(AllocaInst &I);
254 Instruction *visitAtomicCmpXchgInst(AtomicCmpXchgInst &I);
255 Instruction *visitUnreachableInst(UnreachableInst &I);
256 Instruction *visitCallInst(CallInst &I);
257
258 StringRef getPassName() const override { return "SPIRV emit intrinsics"; }
259
260 bool runOnModule(Module &M) override;
261
262 void getAnalysisUsage(AnalysisUsage &AU) const override {
263 ModulePass::getAnalysisUsage(AU);
264 }
265};
266
267bool isConvergenceIntrinsic(const Instruction *I) {
268 const auto *II = dyn_cast<IntrinsicInst>(I);
269 if (!II)
270 return false;
271
272 return II->getIntrinsicID() == Intrinsic::experimental_convergence_entry ||
273 II->getIntrinsicID() == Intrinsic::experimental_convergence_loop ||
274 II->getIntrinsicID() == Intrinsic::experimental_convergence_anchor;
275}
276
277bool expectIgnoredInIRTranslation(const Instruction *I) {
278 const auto *II = dyn_cast<IntrinsicInst>(I);
279 if (!II)
280 return false;
281 switch (II->getIntrinsicID()) {
282 case Intrinsic::invariant_start:
283 case Intrinsic::spv_resource_handlefrombinding:
284 case Intrinsic::spv_resource_getpointer:
285 return true;
286 default:
287 return false;
288 }
289}
290
291// Returns the source pointer from `I` ignoring intermediate ptrcast.
292Value *getPointerRoot(Value *I) {
293 if (auto *II = dyn_cast<IntrinsicInst>(I)) {
294 if (II->getIntrinsicID() == Intrinsic::spv_ptrcast) {
295 Value *V = II->getArgOperand(0);
296 return getPointerRoot(V);
297 }
298 }
299 return I;
300}
301
302} // namespace
303
304char SPIRVEmitIntrinsics::ID = 0;
305
306INITIALIZE_PASS(SPIRVEmitIntrinsics, "emit-intrinsics", "SPIRV emit intrinsics",
307 false, false)
308
309static inline bool isAssignTypeInstr(const Instruction *I) {
310 return isa<IntrinsicInst>(I) &&
311 cast<IntrinsicInst>(I)->getIntrinsicID() == Intrinsic::spv_assign_type;
312}
313
318
319static bool isAggrConstForceInt32(const Value *V) {
320 return isa<ConstantArray>(V) || isa<ConstantStruct>(V) ||
322 (isa<ConstantAggregateZero>(V) && !V->getType()->isVectorTy());
323}
324
326 if (isa<PHINode>(I))
327 B.SetInsertPoint(I->getParent()->getFirstNonPHIOrDbgOrAlloca());
328 else
329 B.SetInsertPoint(I);
330}
331
333 B.SetCurrentDebugLocation(I->getDebugLoc());
334 if (I->getType()->isVoidTy())
335 B.SetInsertPoint(I->getNextNode());
336 else
337 B.SetInsertPoint(*I->getInsertionPointAfterDef());
338}
339
341 if (const auto *Intr = dyn_cast<IntrinsicInst>(I)) {
342 switch (Intr->getIntrinsicID()) {
343 case Intrinsic::invariant_start:
344 case Intrinsic::invariant_end:
345 return false;
346 }
347 }
348 return true;
349}
350
351static inline void reportFatalOnTokenType(const Instruction *I) {
352 if (I->getType()->isTokenTy())
353 report_fatal_error("A token is encountered but SPIR-V without extensions "
354 "does not support token type",
355 false);
356}
357
359 if (!I->hasName() || I->getType()->isAggregateType() ||
360 expectIgnoredInIRTranslation(I))
361 return;
364 LLVMContext &Ctx = I->getContext();
365 std::vector<Value *> Args = {
367 Ctx, MDNode::get(Ctx, MDString::get(Ctx, I->getName())))};
368 B.CreateIntrinsic(Intrinsic::spv_assign_name, {I->getType()}, Args);
369}
370
371void SPIRVEmitIntrinsics::replaceAllUsesWith(Value *Src, Value *Dest,
372 bool DeleteOld) {
373 GR->replaceAllUsesWith(Src, Dest, DeleteOld);
374 // Update uncomplete type records if any
375 if (isTodoType(Src)) {
376 if (DeleteOld)
377 eraseTodoType(Src);
378 insertTodoType(Dest);
379 }
380}
381
382void SPIRVEmitIntrinsics::replaceAllUsesWithAndErase(IRBuilder<> &B,
383 Instruction *Src,
384 Instruction *Dest,
385 bool DeleteOld) {
386 replaceAllUsesWith(Src, Dest, DeleteOld);
387 std::string Name = Src->hasName() ? Src->getName().str() : "";
388 Src->eraseFromParent();
389 if (!Name.empty()) {
390 Dest->setName(Name);
391 if (Named.insert(Dest).second)
392 emitAssignName(Dest, B);
393 }
394}
395
397 return SI && F->getCallingConv() == CallingConv::SPIR_KERNEL &&
398 isPointerTy(SI->getValueOperand()->getType()) &&
399 isa<Argument>(SI->getValueOperand());
400}
401
402// Maybe restore original function return type.
404 Type *Ty) {
406 if (!CI || CI->isIndirectCall() || CI->isInlineAsm() ||
408 return Ty;
409 if (Type *OriginalTy = GR->findMutated(CI->getCalledFunction()))
410 return OriginalTy;
411 return Ty;
412}
413
414// Reconstruct type with nested element types according to deduced type info.
415// Return nullptr if no detailed type info is available.
416Type *SPIRVEmitIntrinsics::reconstructType(Value *Op, bool UnknownElemTypeI8,
417 bool IsPostprocessing) {
418 Type *Ty = Op->getType();
419 if (auto *OpI = dyn_cast<Instruction>(Op))
420 Ty = restoreMutatedType(GR, OpI, Ty);
421 if (!isUntypedPointerTy(Ty))
422 return Ty;
423 // try to find the pointee type
424 if (Type *NestedTy = GR->findDeducedElementType(Op))
426 // not a pointer according to the type info (e.g., Event object)
427 CallInst *CI = GR->findAssignPtrTypeInstr(Op);
428 if (CI) {
429 MetadataAsValue *MD = cast<MetadataAsValue>(CI->getArgOperand(1));
430 return cast<ConstantAsMetadata>(MD->getMetadata())->getType();
431 }
432 if (UnknownElemTypeI8) {
433 if (!IsPostprocessing)
434 insertTodoType(Op);
435 return getTypedPointerWrapper(IntegerType::getInt8Ty(Op->getContext()),
437 }
438 return nullptr;
439}
440
441CallInst *SPIRVEmitIntrinsics::buildSpvPtrcast(Function *F, Value *Op,
442 Type *ElemTy) {
443 IRBuilder<> B(Op->getContext());
444 if (auto *OpI = dyn_cast<Instruction>(Op)) {
445 // spv_ptrcast's argument Op denotes an instruction that generates
446 // a value, and we may use getInsertionPointAfterDef()
448 } else if (auto *OpA = dyn_cast<Argument>(Op)) {
449 B.SetInsertPointPastAllocas(OpA->getParent());
450 B.SetCurrentDebugLocation(DebugLoc());
451 } else {
452 B.SetInsertPoint(F->getEntryBlock().getFirstNonPHIOrDbgOrAlloca());
453 }
454 Type *OpTy = Op->getType();
455 SmallVector<Type *, 2> Types = {OpTy, OpTy};
456 SmallVector<Value *, 2> Args = {Op, buildMD(getNormalizedPoisonValue(ElemTy)),
457 B.getInt32(getPointerAddressSpace(OpTy))};
458 CallInst *PtrCasted =
459 B.CreateIntrinsic(Intrinsic::spv_ptrcast, {Types}, Args);
460 GR->buildAssignPtr(B, ElemTy, PtrCasted);
461 return PtrCasted;
462}
463
464void SPIRVEmitIntrinsics::replaceUsesOfWithSpvPtrcast(
465 Value *Op, Type *ElemTy, Instruction *I,
466 DenseMap<Function *, CallInst *> Ptrcasts) {
467 Function *F = I->getParent()->getParent();
468 CallInst *PtrCastedI = nullptr;
469 auto It = Ptrcasts.find(F);
470 if (It == Ptrcasts.end()) {
471 PtrCastedI = buildSpvPtrcast(F, Op, ElemTy);
472 Ptrcasts[F] = PtrCastedI;
473 } else {
474 PtrCastedI = It->second;
475 }
476 I->replaceUsesOfWith(Op, PtrCastedI);
477}
478
479void SPIRVEmitIntrinsics::propagateElemType(
480 Value *Op, Type *ElemTy,
481 DenseSet<std::pair<Value *, Value *>> &VisitedSubst) {
482 DenseMap<Function *, CallInst *> Ptrcasts;
483 SmallVector<User *> Users(Op->users());
484 for (auto *U : Users) {
485 if (!isa<Instruction>(U) || isSpvIntrinsic(U))
486 continue;
487 if (!VisitedSubst.insert(std::make_pair(U, Op)).second)
488 continue;
490 // If the instruction was validated already, we need to keep it valid by
491 // keeping current Op type.
492 if (isa<GetElementPtrInst>(UI) ||
493 TypeValidated.find(UI) != TypeValidated.end())
494 replaceUsesOfWithSpvPtrcast(Op, ElemTy, UI, Ptrcasts);
495 }
496}
497
498void SPIRVEmitIntrinsics::propagateElemTypeRec(
499 Value *Op, Type *PtrElemTy, Type *CastElemTy,
500 DenseSet<std::pair<Value *, Value *>> &VisitedSubst) {
501 std::unordered_set<Value *> Visited;
502 DenseMap<Function *, CallInst *> Ptrcasts;
503 propagateElemTypeRec(Op, PtrElemTy, CastElemTy, VisitedSubst, Visited,
504 std::move(Ptrcasts));
505}
506
507void SPIRVEmitIntrinsics::propagateElemTypeRec(
508 Value *Op, Type *PtrElemTy, Type *CastElemTy,
509 DenseSet<std::pair<Value *, Value *>> &VisitedSubst,
510 std::unordered_set<Value *> &Visited,
511 DenseMap<Function *, CallInst *> Ptrcasts) {
512 if (!Visited.insert(Op).second)
513 return;
514 SmallVector<User *> Users(Op->users());
515 for (auto *U : Users) {
516 if (!isa<Instruction>(U) || isSpvIntrinsic(U))
517 continue;
518 if (!VisitedSubst.insert(std::make_pair(U, Op)).second)
519 continue;
521 // If the instruction was validated already, we need to keep it valid by
522 // keeping current Op type.
523 if (isa<GetElementPtrInst>(UI) ||
524 TypeValidated.find(UI) != TypeValidated.end())
525 replaceUsesOfWithSpvPtrcast(Op, CastElemTy, UI, Ptrcasts);
526 }
527}
528
529// Set element pointer type to the given value of ValueTy and tries to
530// specify this type further (recursively) by Operand value, if needed.
531
532Type *
533SPIRVEmitIntrinsics::deduceElementTypeByValueDeep(Type *ValueTy, Value *Operand,
534 bool UnknownElemTypeI8) {
535 std::unordered_set<Value *> Visited;
536 return deduceElementTypeByValueDeep(ValueTy, Operand, Visited,
537 UnknownElemTypeI8);
538}
539
540Type *SPIRVEmitIntrinsics::deduceElementTypeByValueDeep(
541 Type *ValueTy, Value *Operand, std::unordered_set<Value *> &Visited,
542 bool UnknownElemTypeI8) {
543 Type *Ty = ValueTy;
544 if (Operand) {
545 if (auto *PtrTy = dyn_cast<PointerType>(Ty)) {
546 if (Type *NestedTy =
547 deduceElementTypeHelper(Operand, Visited, UnknownElemTypeI8))
548 Ty = getTypedPointerWrapper(NestedTy, PtrTy->getAddressSpace());
549 } else {
550 Ty = deduceNestedTypeHelper(dyn_cast<User>(Operand), Ty, Visited,
551 UnknownElemTypeI8);
552 }
553 }
554 return Ty;
555}
556
557// Traverse User instructions to deduce an element pointer type of the operand.
558Type *SPIRVEmitIntrinsics::deduceElementTypeByUsersDeep(
559 Value *Op, std::unordered_set<Value *> &Visited, bool UnknownElemTypeI8) {
560 if (!Op || !isPointerTy(Op->getType()) || isa<ConstantPointerNull>(Op) ||
562 return nullptr;
563
564 if (auto ElemTy = getPointeeType(Op->getType()))
565 return ElemTy;
566
567 // maybe we already know operand's element type
568 if (Type *KnownTy = GR->findDeducedElementType(Op))
569 return KnownTy;
570
571 for (User *OpU : Op->users()) {
572 if (Instruction *Inst = dyn_cast<Instruction>(OpU)) {
573 if (Type *Ty = deduceElementTypeHelper(Inst, Visited, UnknownElemTypeI8))
574 return Ty;
575 }
576 }
577 return nullptr;
578}
579
580// Implements what we know in advance about intrinsics and builtin calls
581// TODO: consider feasibility of this particular case to be generalized by
582// encoding knowledge about intrinsics and builtin calls by corresponding
583// specification rules
585 Function *CalledF, unsigned OpIdx) {
586 if ((DemangledName.starts_with("__spirv_ocl_printf(") ||
587 DemangledName.starts_with("printf(")) &&
588 OpIdx == 0)
589 return IntegerType::getInt8Ty(CalledF->getContext());
590 return nullptr;
591}
592
593// Deduce and return a successfully deduced Type of the Instruction,
594// or nullptr otherwise.
595Type *SPIRVEmitIntrinsics::deduceElementTypeHelper(Value *I,
596 bool UnknownElemTypeI8) {
597 std::unordered_set<Value *> Visited;
598 return deduceElementTypeHelper(I, Visited, UnknownElemTypeI8);
599}
600
601void SPIRVEmitIntrinsics::maybeAssignPtrType(Type *&Ty, Value *Op, Type *RefTy,
602 bool UnknownElemTypeI8) {
603 if (isUntypedPointerTy(RefTy)) {
604 if (!UnknownElemTypeI8)
605 return;
606 insertTodoType(Op);
607 }
608 Ty = RefTy;
609}
610
611bool SPIRVEmitIntrinsics::walkLogicalAccessChain(
612 GetElementPtrInst &GEP,
613 const std::function<void(Type *, uint64_t)> &OnLiteralIndexing,
614 const std::function<void(Type *, Value *)> &OnDynamicIndexing) {
615 // We only rewrite i8* GEP. Other should be left as-is.
616 // Valid i8* GEP must always have a single index.
617 assert(GEP.getSourceElementType() ==
618 IntegerType::getInt8Ty(CurrF->getContext()));
619 assert(GEP.getNumIndices() == 1);
620
621 auto &DL = CurrF->getDataLayout();
622 Value *Src = getPointerRoot(GEP.getPointerOperand());
623 Type *CurType = deduceElementType(Src, true);
624
625 Value *Operand = *GEP.idx_begin();
626 ConstantInt *CI = dyn_cast<ConstantInt>(Operand);
627 if (!CI) {
628 ArrayType *AT = dyn_cast<ArrayType>(CurType);
629 // Operand is not constant. Either we have an array and accept it, or we
630 // give up.
631 if (AT)
632 OnDynamicIndexing(AT->getElementType(), Operand);
633 return AT == nullptr;
634 }
635
636 assert(CI);
637 uint64_t Offset = CI->getZExtValue();
638
639 do {
640 if (ArrayType *AT = dyn_cast<ArrayType>(CurType)) {
641 uint32_t EltTypeSize = DL.getTypeSizeInBits(AT->getElementType()) / 8;
642 assert(Offset < AT->getNumElements() * EltTypeSize);
643 uint64_t Index = Offset / EltTypeSize;
644 Offset = Offset - (Index * EltTypeSize);
645 CurType = AT->getElementType();
646 OnLiteralIndexing(CurType, Index);
647 } else if (StructType *ST = dyn_cast<StructType>(CurType)) {
648 uint32_t StructSize = DL.getTypeSizeInBits(ST) / 8;
649 assert(Offset < StructSize);
650 (void)StructSize;
651 const auto &STL = DL.getStructLayout(ST);
652 unsigned Element = STL->getElementContainingOffset(Offset);
653 Offset -= STL->getElementOffset(Element);
654 CurType = ST->getElementType(Element);
655 OnLiteralIndexing(CurType, Element);
656 } else {
657 // Vector type indexing should not use GEP.
658 // So if we have an index left, something is wrong. Giving up.
659 return true;
660 }
661 } while (Offset > 0);
662
663 return false;
664}
665
667SPIRVEmitIntrinsics::buildLogicalAccessChainFromGEP(GetElementPtrInst &GEP) {
668 auto &DL = CurrF->getDataLayout();
669 IRBuilder<> B(GEP.getParent());
670 B.SetInsertPoint(&GEP);
671
672 std::vector<Value *> Indices;
673 Indices.push_back(ConstantInt::get(
674 IntegerType::getInt32Ty(CurrF->getContext()), 0, /* Signed= */ false));
675 walkLogicalAccessChain(
676 GEP,
677 [&Indices, &B](Type *EltType, uint64_t Index) {
678 Indices.push_back(
679 ConstantInt::get(B.getInt64Ty(), Index, /* Signed= */ false));
680 },
681 [&Indices, &B, &DL](Type *EltType, Value *Offset) {
682 uint32_t EltTypeSize = DL.getTypeSizeInBits(EltType) / 8;
683 Value *Index = B.CreateUDiv(
684 Offset, ConstantInt::get(Offset->getType(), EltTypeSize,
685 /* Signed= */ false));
686 Indices.push_back(Index);
687 });
688
689 SmallVector<Type *, 2> Types = {GEP.getType(), GEP.getOperand(0)->getType()};
691 Args.push_back(B.getInt1(GEP.isInBounds()));
692 Args.push_back(GEP.getOperand(0));
693 llvm::append_range(Args, Indices);
694 auto *NewI = B.CreateIntrinsic(Intrinsic::spv_gep, {Types}, {Args});
695 replaceAllUsesWithAndErase(B, &GEP, NewI);
696 return NewI;
697}
698
699Type *SPIRVEmitIntrinsics::getGEPTypeLogical(GetElementPtrInst *GEP) {
700
701 Type *CurType = GEP->getResultElementType();
702
703 bool Interrupted = walkLogicalAccessChain(
704 *GEP, [&CurType](Type *EltType, uint64_t Index) { CurType = EltType; },
705 [&CurType](Type *EltType, Value *Index) { CurType = EltType; });
706
707 return Interrupted ? GEP->getResultElementType() : CurType;
708}
709
710Type *SPIRVEmitIntrinsics::getGEPType(GetElementPtrInst *Ref) {
711 if (Ref->getSourceElementType() ==
712 IntegerType::getInt8Ty(CurrF->getContext()) &&
714 return getGEPTypeLogical(Ref);
715 }
716
717 Type *Ty = nullptr;
718 // TODO: not sure if GetElementPtrInst::getTypeAtIndex() does anything
719 // useful here
720 if (isNestedPointer(Ref->getSourceElementType())) {
721 Ty = Ref->getSourceElementType();
722 for (Use &U : drop_begin(Ref->indices()))
723 Ty = GetElementPtrInst::getTypeAtIndex(Ty, U.get());
724 } else {
725 Ty = Ref->getResultElementType();
726 }
727 return Ty;
728}
729
730Type *SPIRVEmitIntrinsics::deduceElementTypeHelper(
731 Value *I, std::unordered_set<Value *> &Visited, bool UnknownElemTypeI8,
732 bool IgnoreKnownType) {
733 // allow to pass nullptr as an argument
734 if (!I)
735 return nullptr;
736
737 // maybe already known
738 if (!IgnoreKnownType)
739 if (Type *KnownTy = GR->findDeducedElementType(I))
740 return KnownTy;
741
742 // maybe a cycle
743 if (!Visited.insert(I).second)
744 return nullptr;
745
746 // fallback value in case when we fail to deduce a type
747 Type *Ty = nullptr;
748 // look for known basic patterns of type inference
749 if (auto *Ref = dyn_cast<AllocaInst>(I)) {
750 maybeAssignPtrType(Ty, I, Ref->getAllocatedType(), UnknownElemTypeI8);
751 } else if (auto *Ref = dyn_cast<GetElementPtrInst>(I)) {
752 Ty = getGEPType(Ref);
753 } else if (auto *Ref = dyn_cast<LoadInst>(I)) {
754 Value *Op = Ref->getPointerOperand();
755 Type *KnownTy = GR->findDeducedElementType(Op);
756 if (!KnownTy)
757 KnownTy = Op->getType();
758 if (Type *ElemTy = getPointeeType(KnownTy))
759 maybeAssignPtrType(Ty, I, ElemTy, UnknownElemTypeI8);
760 } else if (auto *Ref = dyn_cast<GlobalValue>(I)) {
761 Ty = deduceElementTypeByValueDeep(
762 Ref->getValueType(),
763 Ref->getNumOperands() > 0 ? Ref->getOperand(0) : nullptr, Visited,
764 UnknownElemTypeI8);
765 } else if (auto *Ref = dyn_cast<AddrSpaceCastInst>(I)) {
766 Type *RefTy = deduceElementTypeHelper(Ref->getPointerOperand(), Visited,
767 UnknownElemTypeI8);
768 maybeAssignPtrType(Ty, I, RefTy, UnknownElemTypeI8);
769 } else if (auto *Ref = dyn_cast<BitCastInst>(I)) {
770 if (Type *Src = Ref->getSrcTy(), *Dest = Ref->getDestTy();
771 isPointerTy(Src) && isPointerTy(Dest))
772 Ty = deduceElementTypeHelper(Ref->getOperand(0), Visited,
773 UnknownElemTypeI8);
774 } else if (auto *Ref = dyn_cast<AtomicCmpXchgInst>(I)) {
775 Value *Op = Ref->getNewValOperand();
776 if (isPointerTy(Op->getType()))
777 Ty = deduceElementTypeHelper(Op, Visited, UnknownElemTypeI8);
778 } else if (auto *Ref = dyn_cast<AtomicRMWInst>(I)) {
779 Value *Op = Ref->getValOperand();
780 if (isPointerTy(Op->getType()))
781 Ty = deduceElementTypeHelper(Op, Visited, UnknownElemTypeI8);
782 } else if (auto *Ref = dyn_cast<PHINode>(I)) {
783 Type *BestTy = nullptr;
784 unsigned MaxN = 1;
785 DenseMap<Type *, unsigned> PhiTys;
786 for (int i = Ref->getNumIncomingValues() - 1; i >= 0; --i) {
787 Ty = deduceElementTypeByUsersDeep(Ref->getIncomingValue(i), Visited,
788 UnknownElemTypeI8);
789 if (!Ty)
790 continue;
791 auto It = PhiTys.try_emplace(Ty, 1);
792 if (!It.second) {
793 ++It.first->second;
794 if (It.first->second > MaxN) {
795 MaxN = It.first->second;
796 BestTy = Ty;
797 }
798 }
799 }
800 if (BestTy)
801 Ty = BestTy;
802 } else if (auto *Ref = dyn_cast<SelectInst>(I)) {
803 for (Value *Op : {Ref->getTrueValue(), Ref->getFalseValue()}) {
804 Ty = deduceElementTypeByUsersDeep(Op, Visited, UnknownElemTypeI8);
805 if (Ty)
806 break;
807 }
808 } else if (auto *CI = dyn_cast<CallInst>(I)) {
809 static StringMap<unsigned> ResTypeByArg = {
810 {"to_global", 0},
811 {"to_local", 0},
812 {"to_private", 0},
813 {"__spirv_GenericCastToPtr_ToGlobal", 0},
814 {"__spirv_GenericCastToPtr_ToLocal", 0},
815 {"__spirv_GenericCastToPtr_ToPrivate", 0},
816 {"__spirv_GenericCastToPtrExplicit_ToGlobal", 0},
817 {"__spirv_GenericCastToPtrExplicit_ToLocal", 0},
818 {"__spirv_GenericCastToPtrExplicit_ToPrivate", 0}};
819 // TODO: maybe improve performance by caching demangled names
820
822 if (II && II->getIntrinsicID() == Intrinsic::spv_resource_getpointer) {
823 auto *HandleType = cast<TargetExtType>(II->getOperand(0)->getType());
824 if (HandleType->getTargetExtName() == "spirv.Image" ||
825 HandleType->getTargetExtName() == "spirv.SignedImage") {
826 for (User *U : II->users()) {
827 Ty = cast<Instruction>(U)->getAccessType();
828 if (Ty)
829 break;
830 }
831 } else if (HandleType->getTargetExtName() == "spirv.VulkanBuffer") {
832 // This call is supposed to index into an array
833 Ty = HandleType->getTypeParameter(0);
834 if (Ty->isArrayTy())
835 Ty = Ty->getArrayElementType();
836 else {
837 TargetExtType *BufferTy = cast<TargetExtType>(Ty);
838 assert(BufferTy->getTargetExtName() == "spirv.Layout");
839 Ty = BufferTy->getTypeParameter(0);
840 assert(Ty && Ty->isStructTy());
841 uint32_t Index = cast<ConstantInt>(II->getOperand(1))->getZExtValue();
842 Ty = cast<StructType>(Ty)->getElementType(Index);
843 }
844 } else {
845 llvm_unreachable("Unknown handle type for spv_resource_getpointer.");
846 }
847 } else if (II && II->getIntrinsicID() ==
848 Intrinsic::spv_generic_cast_to_ptr_explicit) {
849 Ty = deduceElementTypeHelper(CI->getArgOperand(0), Visited,
850 UnknownElemTypeI8);
851 } else if (Function *CalledF = CI->getCalledFunction()) {
852 std::string DemangledName =
853 getOclOrSpirvBuiltinDemangledName(CalledF->getName());
854 if (DemangledName.length() > 0)
855 DemangledName = SPIRV::lookupBuiltinNameHelper(DemangledName);
856 auto AsArgIt = ResTypeByArg.find(DemangledName);
857 if (AsArgIt != ResTypeByArg.end())
858 Ty = deduceElementTypeHelper(CI->getArgOperand(AsArgIt->second),
859 Visited, UnknownElemTypeI8);
860 else if (Type *KnownRetTy = GR->findDeducedElementType(CalledF))
861 Ty = KnownRetTy;
862 }
863 }
864
865 // remember the found relationship
866 if (Ty && !IgnoreKnownType) {
867 // specify nested types if needed, otherwise return unchanged
869 }
870
871 return Ty;
872}
873
874// Re-create a type of the value if it has untyped pointer fields, also nested.
875// Return the original value type if no corrections of untyped pointer
876// information is found or needed.
877Type *SPIRVEmitIntrinsics::deduceNestedTypeHelper(User *U,
878 bool UnknownElemTypeI8) {
879 std::unordered_set<Value *> Visited;
880 return deduceNestedTypeHelper(U, U->getType(), Visited, UnknownElemTypeI8);
881}
882
883Type *SPIRVEmitIntrinsics::deduceNestedTypeHelper(
884 User *U, Type *OrigTy, std::unordered_set<Value *> &Visited,
885 bool UnknownElemTypeI8) {
886 if (!U)
887 return OrigTy;
888
889 // maybe already known
890 if (Type *KnownTy = GR->findDeducedCompositeType(U))
891 return KnownTy;
892
893 // maybe a cycle
894 if (!Visited.insert(U).second)
895 return OrigTy;
896
897 if (isa<StructType>(OrigTy)) {
899 bool Change = false;
900 for (unsigned i = 0; i < U->getNumOperands(); ++i) {
901 Value *Op = U->getOperand(i);
902 assert(Op && "Operands should not be null.");
903 Type *OpTy = Op->getType();
904 Type *Ty = OpTy;
905 if (auto *PtrTy = dyn_cast<PointerType>(OpTy)) {
906 if (Type *NestedTy =
907 deduceElementTypeHelper(Op, Visited, UnknownElemTypeI8))
908 Ty = getTypedPointerWrapper(NestedTy, PtrTy->getAddressSpace());
909 } else {
910 Ty = deduceNestedTypeHelper(dyn_cast<User>(Op), OpTy, Visited,
911 UnknownElemTypeI8);
912 }
913 Tys.push_back(Ty);
914 Change |= Ty != OpTy;
915 }
916 if (Change) {
917 Type *NewTy = StructType::create(Tys);
918 GR->addDeducedCompositeType(U, NewTy);
919 return NewTy;
920 }
921 } else if (auto *ArrTy = dyn_cast<ArrayType>(OrigTy)) {
922 if (Value *Op = U->getNumOperands() > 0 ? U->getOperand(0) : nullptr) {
923 Type *OpTy = ArrTy->getElementType();
924 Type *Ty = OpTy;
925 if (auto *PtrTy = dyn_cast<PointerType>(OpTy)) {
926 if (Type *NestedTy =
927 deduceElementTypeHelper(Op, Visited, UnknownElemTypeI8))
928 Ty = getTypedPointerWrapper(NestedTy, PtrTy->getAddressSpace());
929 } else {
930 Ty = deduceNestedTypeHelper(dyn_cast<User>(Op), OpTy, Visited,
931 UnknownElemTypeI8);
932 }
933 if (Ty != OpTy) {
934 Type *NewTy = ArrayType::get(Ty, ArrTy->getNumElements());
935 GR->addDeducedCompositeType(U, NewTy);
936 return NewTy;
937 }
938 }
939 } else if (auto *VecTy = dyn_cast<VectorType>(OrigTy)) {
940 if (Value *Op = U->getNumOperands() > 0 ? U->getOperand(0) : nullptr) {
941 Type *OpTy = VecTy->getElementType();
942 Type *Ty = OpTy;
943 if (auto *PtrTy = dyn_cast<PointerType>(OpTy)) {
944 if (Type *NestedTy =
945 deduceElementTypeHelper(Op, Visited, UnknownElemTypeI8))
946 Ty = getTypedPointerWrapper(NestedTy, PtrTy->getAddressSpace());
947 } else {
948 Ty = deduceNestedTypeHelper(dyn_cast<User>(Op), OpTy, Visited,
949 UnknownElemTypeI8);
950 }
951 if (Ty != OpTy) {
952 Type *NewTy = VectorType::get(Ty, VecTy->getElementCount());
954 return NewTy;
955 }
956 }
957 }
958
959 return OrigTy;
960}
961
962Type *SPIRVEmitIntrinsics::deduceElementType(Value *I, bool UnknownElemTypeI8) {
963 if (Type *Ty = deduceElementTypeHelper(I, UnknownElemTypeI8))
964 return Ty;
965 if (!UnknownElemTypeI8)
966 return nullptr;
967 insertTodoType(I);
968 return IntegerType::getInt8Ty(I->getContext());
969}
970
972 Value *PointerOperand) {
973 Type *PointeeTy = GR->findDeducedElementType(PointerOperand);
974 if (PointeeTy && !isUntypedPointerTy(PointeeTy))
975 return nullptr;
976 auto *PtrTy = dyn_cast<PointerType>(I->getType());
977 if (!PtrTy)
978 return I->getType();
979 if (Type *NestedTy = GR->findDeducedElementType(I))
980 return getTypedPointerWrapper(NestedTy, PtrTy->getAddressSpace());
981 return nullptr;
982}
983
984// Try to deduce element type for a call base. Returns false if this is an
985// indirect function invocation, and true otherwise.
986bool SPIRVEmitIntrinsics::deduceOperandElementTypeCalledFunction(
987 CallInst *CI, SmallVector<std::pair<Value *, unsigned>> &Ops,
988 Type *&KnownElemTy, bool &Incomplete) {
989 Function *CalledF = CI->getCalledFunction();
990 if (!CalledF)
991 return false;
992 std::string DemangledName =
994 if (DemangledName.length() > 0 &&
995 !StringRef(DemangledName).starts_with("llvm.")) {
996 const SPIRVSubtarget &ST = TM->getSubtarget<SPIRVSubtarget>(*CalledF);
997 auto [Grp, Opcode, ExtNo] = SPIRV::mapBuiltinToOpcode(
998 DemangledName, ST.getPreferredInstructionSet());
999 if (Opcode == SPIRV::OpGroupAsyncCopy) {
1000 for (unsigned i = 0, PtrCnt = 0; i < CI->arg_size() && PtrCnt < 2; ++i) {
1001 Value *Op = CI->getArgOperand(i);
1002 if (!isPointerTy(Op->getType()))
1003 continue;
1004 ++PtrCnt;
1005 if (Type *ElemTy = GR->findDeducedElementType(Op))
1006 KnownElemTy = ElemTy; // src will rewrite dest if both are defined
1007 Ops.push_back(std::make_pair(Op, i));
1008 }
1009 } else if (Grp == SPIRV::Atomic || Grp == SPIRV::AtomicFloating) {
1010 if (CI->arg_size() == 0)
1011 return true;
1012 Value *Op = CI->getArgOperand(0);
1013 if (!isPointerTy(Op->getType()))
1014 return true;
1015 switch (Opcode) {
1016 case SPIRV::OpAtomicFAddEXT:
1017 case SPIRV::OpAtomicFMinEXT:
1018 case SPIRV::OpAtomicFMaxEXT:
1019 case SPIRV::OpAtomicLoad:
1020 case SPIRV::OpAtomicCompareExchangeWeak:
1021 case SPIRV::OpAtomicCompareExchange:
1022 case SPIRV::OpAtomicExchange:
1023 case SPIRV::OpAtomicIAdd:
1024 case SPIRV::OpAtomicISub:
1025 case SPIRV::OpAtomicOr:
1026 case SPIRV::OpAtomicXor:
1027 case SPIRV::OpAtomicAnd:
1028 case SPIRV::OpAtomicUMin:
1029 case SPIRV::OpAtomicUMax:
1030 case SPIRV::OpAtomicSMin:
1031 case SPIRV::OpAtomicSMax: {
1032 KnownElemTy = isPointerTy(CI->getType()) ? getAtomicElemTy(GR, CI, Op)
1033 : CI->getType();
1034 if (!KnownElemTy)
1035 return true;
1036 Incomplete = isTodoType(Op);
1037 Ops.push_back(std::make_pair(Op, 0));
1038 } break;
1039 case SPIRV::OpAtomicStore: {
1040 if (CI->arg_size() < 4)
1041 return true;
1042 Value *ValOp = CI->getArgOperand(3);
1043 KnownElemTy = isPointerTy(ValOp->getType())
1044 ? getAtomicElemTy(GR, CI, Op)
1045 : ValOp->getType();
1046 if (!KnownElemTy)
1047 return true;
1048 Incomplete = isTodoType(Op);
1049 Ops.push_back(std::make_pair(Op, 0));
1050 } break;
1051 }
1052 }
1053 }
1054 return true;
1055}
1056
1057// Try to deduce element type for a function pointer.
1058void SPIRVEmitIntrinsics::deduceOperandElementTypeFunctionPointer(
1059 CallInst *CI, SmallVector<std::pair<Value *, unsigned>> &Ops,
1060 Type *&KnownElemTy, bool IsPostprocessing) {
1061 Value *Op = CI->getCalledOperand();
1062 if (!Op || !isPointerTy(Op->getType()))
1063 return;
1064 Ops.push_back(std::make_pair(Op, std::numeric_limits<unsigned>::max()));
1065 FunctionType *FTy = CI->getFunctionType();
1066 bool IsNewFTy = false, IsIncomplete = false;
1068 for (Value *Arg : CI->args()) {
1069 Type *ArgTy = Arg->getType();
1070 if (ArgTy->isPointerTy()) {
1071 if (Type *ElemTy = GR->findDeducedElementType(Arg)) {
1072 IsNewFTy = true;
1073 ArgTy = getTypedPointerWrapper(ElemTy, getPointerAddressSpace(ArgTy));
1074 if (isTodoType(Arg))
1075 IsIncomplete = true;
1076 } else {
1077 IsIncomplete = true;
1078 }
1079 }
1080 ArgTys.push_back(ArgTy);
1081 }
1082 Type *RetTy = FTy->getReturnType();
1083 if (CI->getType()->isPointerTy()) {
1084 if (Type *ElemTy = GR->findDeducedElementType(CI)) {
1085 IsNewFTy = true;
1086 RetTy =
1088 if (isTodoType(CI))
1089 IsIncomplete = true;
1090 } else {
1091 IsIncomplete = true;
1092 }
1093 }
1094 if (!IsPostprocessing && IsIncomplete)
1095 insertTodoType(Op);
1096 KnownElemTy =
1097 IsNewFTy ? FunctionType::get(RetTy, ArgTys, FTy->isVarArg()) : FTy;
1098}
1099
1100bool SPIRVEmitIntrinsics::deduceOperandElementTypeFunctionRet(
1101 Instruction *I, SmallPtrSet<Instruction *, 4> *IncompleteRets,
1102 const SmallPtrSet<Value *, 4> *AskOps, bool IsPostprocessing,
1103 Type *&KnownElemTy, Value *Op, Function *F) {
1104 KnownElemTy = GR->findDeducedElementType(F);
1105 if (KnownElemTy)
1106 return false;
1107 if (Type *OpElemTy = GR->findDeducedElementType(Op)) {
1108 OpElemTy = normalizeType(OpElemTy);
1109 GR->addDeducedElementType(F, OpElemTy);
1110 GR->addReturnType(
1111 F, TypedPointerType::get(OpElemTy,
1112 getPointerAddressSpace(F->getReturnType())));
1113 // non-recursive update of types in function uses
1114 DenseSet<std::pair<Value *, Value *>> VisitedSubst{std::make_pair(I, Op)};
1115 for (User *U : F->users()) {
1116 CallInst *CI = dyn_cast<CallInst>(U);
1117 if (!CI || CI->getCalledFunction() != F)
1118 continue;
1119 if (CallInst *AssignCI = GR->findAssignPtrTypeInstr(CI)) {
1120 if (Type *PrevElemTy = GR->findDeducedElementType(CI)) {
1121 GR->updateAssignType(AssignCI, CI,
1122 getNormalizedPoisonValue(OpElemTy));
1123 propagateElemType(CI, PrevElemTy, VisitedSubst);
1124 }
1125 }
1126 }
1127 // Non-recursive update of types in the function uncomplete returns.
1128 // This may happen just once per a function, the latch is a pair of
1129 // findDeducedElementType(F) / addDeducedElementType(F, ...).
1130 // With or without the latch it is a non-recursive call due to
1131 // IncompleteRets set to nullptr in this call.
1132 if (IncompleteRets)
1133 for (Instruction *IncompleteRetI : *IncompleteRets)
1134 deduceOperandElementType(IncompleteRetI, nullptr, AskOps,
1135 IsPostprocessing);
1136 } else if (IncompleteRets) {
1137 IncompleteRets->insert(I);
1138 }
1139 TypeValidated.insert(I);
1140 return true;
1141}
1142
1143// If the Instruction has Pointer operands with unresolved types, this function
1144// tries to deduce them. If the Instruction has Pointer operands with known
1145// types which differ from expected, this function tries to insert a bitcast to
1146// resolve the issue.
1147void SPIRVEmitIntrinsics::deduceOperandElementType(
1148 Instruction *I, SmallPtrSet<Instruction *, 4> *IncompleteRets,
1149 const SmallPtrSet<Value *, 4> *AskOps, bool IsPostprocessing) {
1151 Type *KnownElemTy = nullptr;
1152 bool Incomplete = false;
1153 // look for known basic patterns of type inference
1154 if (auto *Ref = dyn_cast<PHINode>(I)) {
1155 if (!isPointerTy(I->getType()) ||
1156 !(KnownElemTy = GR->findDeducedElementType(I)))
1157 return;
1158 Incomplete = isTodoType(I);
1159 for (unsigned i = 0; i < Ref->getNumIncomingValues(); i++) {
1160 Value *Op = Ref->getIncomingValue(i);
1161 if (isPointerTy(Op->getType()))
1162 Ops.push_back(std::make_pair(Op, i));
1163 }
1164 } else if (auto *Ref = dyn_cast<AddrSpaceCastInst>(I)) {
1165 KnownElemTy = GR->findDeducedElementType(I);
1166 if (!KnownElemTy)
1167 return;
1168 Incomplete = isTodoType(I);
1169 Ops.push_back(std::make_pair(Ref->getPointerOperand(), 0));
1170 } else if (auto *Ref = dyn_cast<BitCastInst>(I)) {
1171 if (!isPointerTy(I->getType()))
1172 return;
1173 KnownElemTy = GR->findDeducedElementType(I);
1174 if (!KnownElemTy)
1175 return;
1176 Incomplete = isTodoType(I);
1177 Ops.push_back(std::make_pair(Ref->getOperand(0), 0));
1178 } else if (auto *Ref = dyn_cast<GetElementPtrInst>(I)) {
1179 if (GR->findDeducedElementType(Ref->getPointerOperand()))
1180 return;
1181 KnownElemTy = Ref->getSourceElementType();
1182 Ops.push_back(std::make_pair(Ref->getPointerOperand(),
1184 } else if (auto *Ref = dyn_cast<LoadInst>(I)) {
1185 KnownElemTy = I->getType();
1186 if (isUntypedPointerTy(KnownElemTy))
1187 return;
1188 Type *PointeeTy = GR->findDeducedElementType(Ref->getPointerOperand());
1189 if (PointeeTy && !isUntypedPointerTy(PointeeTy))
1190 return;
1191 Ops.push_back(std::make_pair(Ref->getPointerOperand(),
1193 } else if (auto *Ref = dyn_cast<StoreInst>(I)) {
1194 if (!(KnownElemTy =
1195 reconstructType(Ref->getValueOperand(), false, IsPostprocessing)))
1196 return;
1197 Type *PointeeTy = GR->findDeducedElementType(Ref->getPointerOperand());
1198 if (PointeeTy && !isUntypedPointerTy(PointeeTy))
1199 return;
1200 Ops.push_back(std::make_pair(Ref->getPointerOperand(),
1202 } else if (auto *Ref = dyn_cast<AtomicCmpXchgInst>(I)) {
1203 KnownElemTy = isPointerTy(I->getType())
1204 ? getAtomicElemTy(GR, I, Ref->getPointerOperand())
1205 : I->getType();
1206 if (!KnownElemTy)
1207 return;
1208 Incomplete = isTodoType(Ref->getPointerOperand());
1209 Ops.push_back(std::make_pair(Ref->getPointerOperand(),
1211 } else if (auto *Ref = dyn_cast<AtomicRMWInst>(I)) {
1212 KnownElemTy = isPointerTy(I->getType())
1213 ? getAtomicElemTy(GR, I, Ref->getPointerOperand())
1214 : I->getType();
1215 if (!KnownElemTy)
1216 return;
1217 Incomplete = isTodoType(Ref->getPointerOperand());
1218 Ops.push_back(std::make_pair(Ref->getPointerOperand(),
1220 } else if (auto *Ref = dyn_cast<SelectInst>(I)) {
1221 if (!isPointerTy(I->getType()) ||
1222 !(KnownElemTy = GR->findDeducedElementType(I)))
1223 return;
1224 Incomplete = isTodoType(I);
1225 for (unsigned i = 0; i < Ref->getNumOperands(); i++) {
1226 Value *Op = Ref->getOperand(i);
1227 if (isPointerTy(Op->getType()))
1228 Ops.push_back(std::make_pair(Op, i));
1229 }
1230 } else if (auto *Ref = dyn_cast<ReturnInst>(I)) {
1231 if (!isPointerTy(CurrF->getReturnType()))
1232 return;
1233 Value *Op = Ref->getReturnValue();
1234 if (!Op)
1235 return;
1236 if (deduceOperandElementTypeFunctionRet(I, IncompleteRets, AskOps,
1237 IsPostprocessing, KnownElemTy, Op,
1238 CurrF))
1239 return;
1240 Incomplete = isTodoType(CurrF);
1241 Ops.push_back(std::make_pair(Op, 0));
1242 } else if (auto *Ref = dyn_cast<ICmpInst>(I)) {
1243 if (!isPointerTy(Ref->getOperand(0)->getType()))
1244 return;
1245 Value *Op0 = Ref->getOperand(0);
1246 Value *Op1 = Ref->getOperand(1);
1247 bool Incomplete0 = isTodoType(Op0);
1248 bool Incomplete1 = isTodoType(Op1);
1249 Type *ElemTy1 = GR->findDeducedElementType(Op1);
1250 Type *ElemTy0 = (Incomplete0 && !Incomplete1 && ElemTy1)
1251 ? nullptr
1252 : GR->findDeducedElementType(Op0);
1253 if (ElemTy0) {
1254 KnownElemTy = ElemTy0;
1255 Incomplete = Incomplete0;
1256 Ops.push_back(std::make_pair(Op1, 1));
1257 } else if (ElemTy1) {
1258 KnownElemTy = ElemTy1;
1259 Incomplete = Incomplete1;
1260 Ops.push_back(std::make_pair(Op0, 0));
1261 }
1262 } else if (CallInst *CI = dyn_cast<CallInst>(I)) {
1263 if (!CI->isIndirectCall())
1264 deduceOperandElementTypeCalledFunction(CI, Ops, KnownElemTy, Incomplete);
1265 else if (HaveFunPtrs)
1266 deduceOperandElementTypeFunctionPointer(CI, Ops, KnownElemTy,
1267 IsPostprocessing);
1268 }
1269
1270 // There is no enough info to deduce types or all is valid.
1271 if (!KnownElemTy || Ops.size() == 0)
1272 return;
1273
1274 LLVMContext &Ctx = CurrF->getContext();
1275 IRBuilder<> B(Ctx);
1276 for (auto &OpIt : Ops) {
1277 Value *Op = OpIt.first;
1278 if (AskOps && !AskOps->contains(Op))
1279 continue;
1280 Type *AskTy = nullptr;
1281 CallInst *AskCI = nullptr;
1282 if (IsPostprocessing && AskOps) {
1283 AskTy = GR->findDeducedElementType(Op);
1284 AskCI = GR->findAssignPtrTypeInstr(Op);
1285 assert(AskTy && AskCI);
1286 }
1287 Type *Ty = AskTy ? AskTy : GR->findDeducedElementType(Op);
1288 if (Ty == KnownElemTy)
1289 continue;
1290 Value *OpTyVal = getNormalizedPoisonValue(KnownElemTy);
1291 Type *OpTy = Op->getType();
1292 if (Op->hasUseList() &&
1293 (!Ty || AskTy || isUntypedPointerTy(Ty) || isTodoType(Op))) {
1294 Type *PrevElemTy = GR->findDeducedElementType(Op);
1295 GR->addDeducedElementType(Op, normalizeType(KnownElemTy));
1296 // check if KnownElemTy is complete
1297 if (!Incomplete)
1298 eraseTodoType(Op);
1299 else if (!IsPostprocessing)
1300 insertTodoType(Op);
1301 // check if there is existing Intrinsic::spv_assign_ptr_type instruction
1302 CallInst *AssignCI = AskCI ? AskCI : GR->findAssignPtrTypeInstr(Op);
1303 if (AssignCI == nullptr) {
1304 Instruction *User = dyn_cast<Instruction>(Op->use_begin()->get());
1305 setInsertPointSkippingPhis(B, User ? User->getNextNode() : I);
1306 CallInst *CI =
1307 buildIntrWithMD(Intrinsic::spv_assign_ptr_type, {OpTy}, OpTyVal, Op,
1308 {B.getInt32(getPointerAddressSpace(OpTy))}, B);
1309 GR->addAssignPtrTypeInstr(Op, CI);
1310 } else {
1311 GR->updateAssignType(AssignCI, Op, OpTyVal);
1312 DenseSet<std::pair<Value *, Value *>> VisitedSubst{
1313 std::make_pair(I, Op)};
1314 propagateElemTypeRec(Op, KnownElemTy, PrevElemTy, VisitedSubst);
1315 }
1316 } else {
1317 eraseTodoType(Op);
1318 CallInst *PtrCastI =
1319 buildSpvPtrcast(I->getParent()->getParent(), Op, KnownElemTy);
1320 if (OpIt.second == std::numeric_limits<unsigned>::max())
1321 dyn_cast<CallInst>(I)->setCalledOperand(PtrCastI);
1322 else
1323 I->setOperand(OpIt.second, PtrCastI);
1324 }
1325 }
1326 TypeValidated.insert(I);
1327}
1328
1329void SPIRVEmitIntrinsics::replaceMemInstrUses(Instruction *Old,
1330 Instruction *New,
1331 IRBuilder<> &B) {
1332 while (!Old->user_empty()) {
1333 auto *U = Old->user_back();
1334 if (isAssignTypeInstr(U)) {
1335 B.SetInsertPoint(U);
1336 SmallVector<Value *, 2> Args = {New, U->getOperand(1)};
1337 CallInst *AssignCI =
1338 B.CreateIntrinsic(Intrinsic::spv_assign_type, {New->getType()}, Args);
1339 GR->addAssignPtrTypeInstr(New, AssignCI);
1340 U->eraseFromParent();
1341 } else if (isMemInstrToReplace(U) || isa<ReturnInst>(U) ||
1342 isa<CallInst>(U)) {
1343 U->replaceUsesOfWith(Old, New);
1344 } else {
1345 llvm_unreachable("illegal aggregate intrinsic user");
1346 }
1347 }
1348 New->copyMetadata(*Old);
1349 Old->eraseFromParent();
1350}
1351
1352void SPIRVEmitIntrinsics::preprocessUndefs(IRBuilder<> &B) {
1353 std::queue<Instruction *> Worklist;
1354 for (auto &I : instructions(CurrF))
1355 Worklist.push(&I);
1356
1357 while (!Worklist.empty()) {
1358 Instruction *I = Worklist.front();
1359 bool BPrepared = false;
1360 Worklist.pop();
1361
1362 for (auto &Op : I->operands()) {
1363 auto *AggrUndef = dyn_cast<UndefValue>(Op);
1364 if (!AggrUndef || !Op->getType()->isAggregateType())
1365 continue;
1366
1367 if (!BPrepared) {
1369 BPrepared = true;
1370 }
1371 auto *IntrUndef = B.CreateIntrinsic(Intrinsic::spv_undef, {});
1372 Worklist.push(IntrUndef);
1373 I->replaceUsesOfWith(Op, IntrUndef);
1374 AggrConsts[IntrUndef] = AggrUndef;
1375 AggrConstTypes[IntrUndef] = AggrUndef->getType();
1376 }
1377 }
1378}
1379
1380void SPIRVEmitIntrinsics::preprocessCompositeConstants(IRBuilder<> &B) {
1381 std::queue<Instruction *> Worklist;
1382 for (auto &I : instructions(CurrF))
1383 Worklist.push(&I);
1384
1385 while (!Worklist.empty()) {
1386 auto *I = Worklist.front();
1387 bool IsPhi = isa<PHINode>(I), BPrepared = false;
1388 assert(I);
1389 bool KeepInst = false;
1390 for (const auto &Op : I->operands()) {
1391 Constant *AggrConst = nullptr;
1392 Type *ResTy = nullptr;
1393 if (auto *COp = dyn_cast<ConstantVector>(Op)) {
1394 AggrConst = cast<Constant>(COp);
1395 ResTy = COp->getType();
1396 } else if (auto *COp = dyn_cast<ConstantArray>(Op)) {
1397 AggrConst = cast<Constant>(COp);
1398 ResTy = B.getInt32Ty();
1399 } else if (auto *COp = dyn_cast<ConstantStruct>(Op)) {
1400 AggrConst = cast<Constant>(COp);
1401 ResTy = B.getInt32Ty();
1402 } else if (auto *COp = dyn_cast<ConstantDataArray>(Op)) {
1403 AggrConst = cast<Constant>(COp);
1404 ResTy = B.getInt32Ty();
1405 } else if (auto *COp = dyn_cast<ConstantAggregateZero>(Op)) {
1406 AggrConst = cast<Constant>(COp);
1407 ResTy = Op->getType()->isVectorTy() ? COp->getType() : B.getInt32Ty();
1408 }
1409 if (AggrConst) {
1411 if (auto *COp = dyn_cast<ConstantDataSequential>(Op))
1412 for (unsigned i = 0; i < COp->getNumElements(); ++i)
1413 Args.push_back(COp->getElementAsConstant(i));
1414 else
1415 llvm::append_range(Args, AggrConst->operands());
1416 if (!BPrepared) {
1417 IsPhi ? B.SetInsertPointPastAllocas(I->getParent()->getParent())
1418 : B.SetInsertPoint(I);
1419 BPrepared = true;
1420 }
1421 auto *CI =
1422 B.CreateIntrinsic(Intrinsic::spv_const_composite, {ResTy}, {Args});
1423 Worklist.push(CI);
1424 I->replaceUsesOfWith(Op, CI);
1425 KeepInst = true;
1426 AggrConsts[CI] = AggrConst;
1427 AggrConstTypes[CI] = deduceNestedTypeHelper(AggrConst, false);
1428 }
1429 }
1430 if (!KeepInst)
1431 Worklist.pop();
1432 }
1433}
1434
1436 IRBuilder<> &B) {
1437 LLVMContext &Ctx = I->getContext();
1439 B.CreateIntrinsic(Intrinsic::spv_assign_decoration, {I->getType()},
1440 {I, MetadataAsValue::get(Ctx, MDNode::get(Ctx, {Node}))});
1441}
1442
1444 unsigned RoundingModeDeco,
1445 IRBuilder<> &B) {
1446 LLVMContext &Ctx = I->getContext();
1448 MDNode *RoundingModeNode = MDNode::get(
1449 Ctx,
1451 ConstantInt::get(Int32Ty, SPIRV::Decoration::FPRoundingMode)),
1452 ConstantAsMetadata::get(ConstantInt::get(Int32Ty, RoundingModeDeco))});
1453 createDecorationIntrinsic(I, RoundingModeNode, B);
1454}
1455
1457 IRBuilder<> &B) {
1458 LLVMContext &Ctx = I->getContext();
1460 MDNode *SaturatedConversionNode =
1461 MDNode::get(Ctx, {ConstantAsMetadata::get(ConstantInt::get(
1462 Int32Ty, SPIRV::Decoration::SaturatedConversion))});
1463 createDecorationIntrinsic(I, SaturatedConversionNode, B);
1464}
1465
1467 if (auto *CI = dyn_cast<CallInst>(I)) {
1468 if (Function *Fu = CI->getCalledFunction()) {
1469 if (Fu->isIntrinsic()) {
1470 unsigned const int IntrinsicId = Fu->getIntrinsicID();
1471 switch (IntrinsicId) {
1472 case Intrinsic::fptosi_sat:
1473 case Intrinsic::fptoui_sat:
1475 break;
1476 default:
1477 break;
1478 }
1479 }
1480 }
1481 }
1482}
1483
1484Instruction *SPIRVEmitIntrinsics::visitCallInst(CallInst &Call) {
1485 if (!Call.isInlineAsm())
1486 return &Call;
1487
1488 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
1489 LLVMContext &Ctx = CurrF->getContext();
1490
1491 Constant *TyC = UndefValue::get(IA->getFunctionType());
1492 MDString *ConstraintString = MDString::get(Ctx, IA->getConstraintString());
1494 buildMD(TyC),
1495 MetadataAsValue::get(Ctx, MDNode::get(Ctx, ConstraintString))};
1496 for (unsigned OpIdx = 0; OpIdx < Call.arg_size(); OpIdx++)
1497 Args.push_back(Call.getArgOperand(OpIdx));
1498
1500 B.SetInsertPoint(&Call);
1501 B.CreateIntrinsic(Intrinsic::spv_inline_asm, {Args});
1502 return &Call;
1503}
1504
1505// Use a tip about rounding mode to create a decoration.
1506void SPIRVEmitIntrinsics::useRoundingMode(ConstrainedFPIntrinsic *FPI,
1507 IRBuilder<> &B) {
1508 std::optional<RoundingMode> RM = FPI->getRoundingMode();
1509 if (!RM.has_value())
1510 return;
1511 unsigned RoundingModeDeco = std::numeric_limits<unsigned>::max();
1512 switch (RM.value()) {
1513 default:
1514 // ignore unknown rounding modes
1515 break;
1516 case RoundingMode::NearestTiesToEven:
1517 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTE;
1518 break;
1519 case RoundingMode::TowardNegative:
1520 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTN;
1521 break;
1522 case RoundingMode::TowardPositive:
1523 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTP;
1524 break;
1525 case RoundingMode::TowardZero:
1526 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTZ;
1527 break;
1528 case RoundingMode::Dynamic:
1529 case RoundingMode::NearestTiesToAway:
1530 // TODO: check if supported
1531 break;
1532 }
1533 if (RoundingModeDeco == std::numeric_limits<unsigned>::max())
1534 return;
1535 // Convert the tip about rounding mode into a decoration record.
1536 createRoundingModeDecoration(FPI, RoundingModeDeco, B);
1537}
1538
1539Instruction *SPIRVEmitIntrinsics::visitSwitchInst(SwitchInst &I) {
1540 BasicBlock *ParentBB = I.getParent();
1541 IRBuilder<> B(ParentBB);
1542 B.SetInsertPoint(&I);
1545 for (auto &Op : I.operands()) {
1546 if (Op.get()->getType()->isSized()) {
1547 Args.push_back(Op);
1548 } else if (BasicBlock *BB = dyn_cast<BasicBlock>(Op.get())) {
1549 BBCases.push_back(BB);
1550 Args.push_back(BlockAddress::get(BB->getParent(), BB));
1551 } else {
1552 report_fatal_error("Unexpected switch operand");
1553 }
1554 }
1555 CallInst *NewI = B.CreateIntrinsic(Intrinsic::spv_switch,
1556 {I.getOperand(0)->getType()}, {Args});
1557 // remove switch to avoid its unneeded and undesirable unwrap into branches
1558 // and conditions
1559 replaceAllUsesWith(&I, NewI);
1560 I.eraseFromParent();
1561 // insert artificial and temporary instruction to preserve valid CFG,
1562 // it will be removed after IR translation pass
1563 B.SetInsertPoint(ParentBB);
1564 IndirectBrInst *BrI = B.CreateIndirectBr(
1565 Constant::getNullValue(PointerType::getUnqual(ParentBB->getContext())),
1566 BBCases.size());
1567 for (BasicBlock *BBCase : BBCases)
1568 BrI->addDestination(BBCase);
1569 return BrI;
1570}
1571
1572Instruction *SPIRVEmitIntrinsics::visitGetElementPtrInst(GetElementPtrInst &I) {
1573 if (I.getSourceElementType() == IntegerType::getInt8Ty(CurrF->getContext()) &&
1575 Instruction *Result = buildLogicalAccessChainFromGEP(I);
1576 if (Result)
1577 return Result;
1578 }
1579
1580 IRBuilder<> B(I.getParent());
1581 B.SetInsertPoint(&I);
1582 SmallVector<Type *, 2> Types = {I.getType(), I.getOperand(0)->getType()};
1584 Args.push_back(B.getInt1(I.isInBounds()));
1585 llvm::append_range(Args, I.operands());
1586 auto *NewI = B.CreateIntrinsic(Intrinsic::spv_gep, {Types}, {Args});
1587 replaceAllUsesWithAndErase(B, &I, NewI);
1588 return NewI;
1589}
1590
1591Instruction *SPIRVEmitIntrinsics::visitBitCastInst(BitCastInst &I) {
1592 IRBuilder<> B(I.getParent());
1593 B.SetInsertPoint(&I);
1594 Value *Source = I.getOperand(0);
1595
1596 // SPIR-V, contrary to LLVM 17+ IR, supports bitcasts between pointers of
1597 // varying element types. In case of IR coming from older versions of LLVM
1598 // such bitcasts do not provide sufficient information, should be just skipped
1599 // here, and handled in insertPtrCastOrAssignTypeInstr.
1600 if (isPointerTy(I.getType())) {
1601 replaceAllUsesWith(&I, Source);
1602 I.eraseFromParent();
1603 return nullptr;
1604 }
1605
1606 SmallVector<Type *, 2> Types = {I.getType(), Source->getType()};
1607 SmallVector<Value *> Args(I.op_begin(), I.op_end());
1608 auto *NewI = B.CreateIntrinsic(Intrinsic::spv_bitcast, {Types}, {Args});
1609 replaceAllUsesWithAndErase(B, &I, NewI);
1610 return NewI;
1611}
1612
1613void SPIRVEmitIntrinsics::insertAssignPtrTypeTargetExt(
1614 TargetExtType *AssignedType, Value *V, IRBuilder<> &B) {
1615 Type *VTy = V->getType();
1616
1617 // A couple of sanity checks.
1618 assert((isPointerTy(VTy)) && "Expect a pointer type!");
1619 if (Type *ElemTy = getPointeeType(VTy))
1620 if (ElemTy != AssignedType)
1621 report_fatal_error("Unexpected pointer element type!");
1622
1623 CallInst *AssignCI = GR->findAssignPtrTypeInstr(V);
1624 if (!AssignCI) {
1625 GR->buildAssignType(B, AssignedType, V);
1626 return;
1627 }
1628
1629 Type *CurrentType =
1631 cast<MetadataAsValue>(AssignCI->getOperand(1))->getMetadata())
1632 ->getType();
1633 if (CurrentType == AssignedType)
1634 return;
1635
1636 // Builtin types cannot be redeclared or casted.
1637 if (CurrentType->isTargetExtTy())
1638 report_fatal_error("Type mismatch " + CurrentType->getTargetExtName() +
1639 "/" + AssignedType->getTargetExtName() +
1640 " for value " + V->getName(),
1641 false);
1642
1643 // Our previous guess about the type seems to be wrong, let's update
1644 // inferred type according to a new, more precise type information.
1645 GR->updateAssignType(AssignCI, V, getNormalizedPoisonValue(AssignedType));
1646}
1647
1648void SPIRVEmitIntrinsics::replacePointerOperandWithPtrCast(
1649 Instruction *I, Value *Pointer, Type *ExpectedElementType,
1650 unsigned OperandToReplace, IRBuilder<> &B) {
1651 TypeValidated.insert(I);
1652
1653 // Do not emit spv_ptrcast if Pointer's element type is ExpectedElementType
1654 Type *PointerElemTy = deduceElementTypeHelper(Pointer, false);
1655 if (PointerElemTy == ExpectedElementType ||
1656 isEquivalentTypes(PointerElemTy, ExpectedElementType))
1657 return;
1658
1660 Value *ExpectedElementVal = getNormalizedPoisonValue(ExpectedElementType);
1661 MetadataAsValue *VMD = buildMD(ExpectedElementVal);
1662 unsigned AddressSpace = getPointerAddressSpace(Pointer->getType());
1663 bool FirstPtrCastOrAssignPtrType = true;
1664
1665 // Do not emit new spv_ptrcast if equivalent one already exists or when
1666 // spv_assign_ptr_type already targets this pointer with the same element
1667 // type.
1668 if (Pointer->hasUseList()) {
1669 for (auto User : Pointer->users()) {
1670 auto *II = dyn_cast<IntrinsicInst>(User);
1671 if (!II ||
1672 (II->getIntrinsicID() != Intrinsic::spv_assign_ptr_type &&
1673 II->getIntrinsicID() != Intrinsic::spv_ptrcast) ||
1674 II->getOperand(0) != Pointer)
1675 continue;
1676
1677 // There is some spv_ptrcast/spv_assign_ptr_type already targeting this
1678 // pointer.
1679 FirstPtrCastOrAssignPtrType = false;
1680 if (II->getOperand(1) != VMD ||
1681 dyn_cast<ConstantInt>(II->getOperand(2))->getSExtValue() !=
1683 continue;
1684
1685 // The spv_ptrcast/spv_assign_ptr_type targeting this pointer is of the
1686 // same element type and address space.
1687 if (II->getIntrinsicID() != Intrinsic::spv_ptrcast)
1688 return;
1689
1690 // This must be a spv_ptrcast, do not emit new if this one has the same BB
1691 // as I. Otherwise, search for other spv_ptrcast/spv_assign_ptr_type.
1692 if (II->getParent() != I->getParent())
1693 continue;
1694
1695 I->setOperand(OperandToReplace, II);
1696 return;
1697 }
1698 }
1699
1700 if (isa<Instruction>(Pointer) || isa<Argument>(Pointer)) {
1701 if (FirstPtrCastOrAssignPtrType) {
1702 // If this would be the first spv_ptrcast, do not emit spv_ptrcast and
1703 // emit spv_assign_ptr_type instead.
1704 GR->buildAssignPtr(B, ExpectedElementType, Pointer);
1705 return;
1706 } else if (isTodoType(Pointer)) {
1707 eraseTodoType(Pointer);
1708 if (!isa<CallInst>(Pointer) && !isa<GetElementPtrInst>(Pointer)) {
1709 // If this wouldn't be the first spv_ptrcast but existing type info is
1710 // uncomplete, update spv_assign_ptr_type arguments.
1711 if (CallInst *AssignCI = GR->findAssignPtrTypeInstr(Pointer)) {
1712 Type *PrevElemTy = GR->findDeducedElementType(Pointer);
1713 assert(PrevElemTy);
1714 DenseSet<std::pair<Value *, Value *>> VisitedSubst{
1715 std::make_pair(I, Pointer)};
1716 GR->updateAssignType(AssignCI, Pointer, ExpectedElementVal);
1717 propagateElemType(Pointer, PrevElemTy, VisitedSubst);
1718 } else {
1719 GR->buildAssignPtr(B, ExpectedElementType, Pointer);
1720 }
1721 return;
1722 }
1723 }
1724 }
1725
1726 // Emit spv_ptrcast
1727 SmallVector<Type *, 2> Types = {Pointer->getType(), Pointer->getType()};
1728 SmallVector<Value *, 2> Args = {Pointer, VMD, B.getInt32(AddressSpace)};
1729 auto *PtrCastI = B.CreateIntrinsic(Intrinsic::spv_ptrcast, {Types}, Args);
1730 I->setOperand(OperandToReplace, PtrCastI);
1731 // We need to set up a pointee type for the newly created spv_ptrcast.
1732 GR->buildAssignPtr(B, ExpectedElementType, PtrCastI);
1733}
1734
1735void SPIRVEmitIntrinsics::insertPtrCastOrAssignTypeInstr(Instruction *I,
1736 IRBuilder<> &B) {
1737 // Handle basic instructions:
1738 StoreInst *SI = dyn_cast<StoreInst>(I);
1739 if (IsKernelArgInt8(CurrF, SI)) {
1740 replacePointerOperandWithPtrCast(
1741 I, SI->getValueOperand(), IntegerType::getInt8Ty(CurrF->getContext()),
1742 0, B);
1743 }
1744 if (SI) {
1745 Value *Op = SI->getValueOperand();
1746 Value *Pointer = SI->getPointerOperand();
1747 Type *OpTy = Op->getType();
1748 if (auto *OpI = dyn_cast<Instruction>(Op))
1749 OpTy = restoreMutatedType(GR, OpI, OpTy);
1750 if (OpTy == Op->getType())
1751 OpTy = deduceElementTypeByValueDeep(OpTy, Op, false);
1752 replacePointerOperandWithPtrCast(I, Pointer, OpTy, 1, B);
1753 return;
1754 }
1755 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
1756 Value *Pointer = LI->getPointerOperand();
1757 Type *OpTy = LI->getType();
1758 if (auto *PtrTy = dyn_cast<PointerType>(OpTy)) {
1759 if (Type *ElemTy = GR->findDeducedElementType(LI)) {
1760 OpTy = getTypedPointerWrapper(ElemTy, PtrTy->getAddressSpace());
1761 } else {
1762 Type *NewOpTy = OpTy;
1763 OpTy = deduceElementTypeByValueDeep(OpTy, LI, false);
1764 if (OpTy == NewOpTy)
1765 insertTodoType(Pointer);
1766 }
1767 }
1768 replacePointerOperandWithPtrCast(I, Pointer, OpTy, 0, B);
1769 return;
1770 }
1771 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) {
1772 Value *Pointer = GEPI->getPointerOperand();
1773 Type *OpTy = nullptr;
1774
1775 // Knowing the accessed type is mandatory for logical SPIR-V. Sadly,
1776 // the GEP source element type should not be used for this purpose, and
1777 // the alternative type-scavenging method is not working.
1778 // Physical SPIR-V can work around this, but not logical, hence still
1779 // try to rely on the broken type scavenging for logical.
1780 bool IsRewrittenGEP =
1781 GEPI->getSourceElementType() == IntegerType::getInt8Ty(I->getContext());
1782 if (IsRewrittenGEP && TM->getSubtargetImpl()->isLogicalSPIRV()) {
1783 Value *Src = getPointerRoot(Pointer);
1784 OpTy = GR->findDeducedElementType(Src);
1785 }
1786
1787 // In all cases, fall back to the GEP type if type scavenging failed.
1788 if (!OpTy)
1789 OpTy = GEPI->getSourceElementType();
1790
1791 replacePointerOperandWithPtrCast(I, Pointer, OpTy, 0, B);
1792 if (isNestedPointer(OpTy))
1793 insertTodoType(Pointer);
1794 return;
1795 }
1796
1797 // TODO: review and merge with existing logics:
1798 // Handle calls to builtins (non-intrinsics):
1799 CallInst *CI = dyn_cast<CallInst>(I);
1800 if (!CI || CI->isIndirectCall() || CI->isInlineAsm() ||
1802 return;
1803
1804 // collect information about formal parameter types
1805 std::string DemangledName =
1807 Function *CalledF = CI->getCalledFunction();
1808 SmallVector<Type *, 4> CalledArgTys;
1809 bool HaveTypes = false;
1810 for (unsigned OpIdx = 0; OpIdx < CalledF->arg_size(); ++OpIdx) {
1811 Argument *CalledArg = CalledF->getArg(OpIdx);
1812 Type *ArgType = CalledArg->getType();
1813 if (!isPointerTy(ArgType)) {
1814 CalledArgTys.push_back(nullptr);
1815 } else if (Type *ArgTypeElem = getPointeeType(ArgType)) {
1816 CalledArgTys.push_back(ArgTypeElem);
1817 HaveTypes = true;
1818 } else {
1819 Type *ElemTy = GR->findDeducedElementType(CalledArg);
1820 if (!ElemTy && hasPointeeTypeAttr(CalledArg))
1821 ElemTy = getPointeeTypeByAttr(CalledArg);
1822 if (!ElemTy) {
1823 ElemTy = getPointeeTypeByCallInst(DemangledName, CalledF, OpIdx);
1824 if (ElemTy) {
1825 GR->addDeducedElementType(CalledArg, normalizeType(ElemTy));
1826 } else {
1827 for (User *U : CalledArg->users()) {
1828 if (Instruction *Inst = dyn_cast<Instruction>(U)) {
1829 if ((ElemTy = deduceElementTypeHelper(Inst, false)) != nullptr)
1830 break;
1831 }
1832 }
1833 }
1834 }
1835 HaveTypes |= ElemTy != nullptr;
1836 CalledArgTys.push_back(ElemTy);
1837 }
1838 }
1839
1840 if (DemangledName.empty() && !HaveTypes)
1841 return;
1842
1843 for (unsigned OpIdx = 0; OpIdx < CI->arg_size(); OpIdx++) {
1844 Value *ArgOperand = CI->getArgOperand(OpIdx);
1845 if (!isPointerTy(ArgOperand->getType()))
1846 continue;
1847
1848 // Constants (nulls/undefs) are handled in insertAssignPtrTypeIntrs()
1849 if (!isa<Instruction>(ArgOperand) && !isa<Argument>(ArgOperand)) {
1850 // However, we may have assumptions about the formal argument's type and
1851 // may have a need to insert a ptr cast for the actual parameter of this
1852 // call.
1853 Argument *CalledArg = CalledF->getArg(OpIdx);
1854 if (!GR->findDeducedElementType(CalledArg))
1855 continue;
1856 }
1857
1858 Type *ExpectedType =
1859 OpIdx < CalledArgTys.size() ? CalledArgTys[OpIdx] : nullptr;
1860 if (!ExpectedType && !DemangledName.empty())
1861 ExpectedType = SPIRV::parseBuiltinCallArgumentBaseType(
1862 DemangledName, OpIdx, I->getContext());
1863 if (!ExpectedType || ExpectedType->isVoidTy())
1864 continue;
1865
1866 if (ExpectedType->isTargetExtTy() &&
1868 insertAssignPtrTypeTargetExt(cast<TargetExtType>(ExpectedType),
1869 ArgOperand, B);
1870 else
1871 replacePointerOperandWithPtrCast(CI, ArgOperand, ExpectedType, OpIdx, B);
1872 }
1873}
1874
1875Instruction *SPIRVEmitIntrinsics::visitInsertElementInst(InsertElementInst &I) {
1876 // If it's a <1 x Type> vector type, don't modify it. It's not a legal vector
1877 // type in LLT and IRTranslator will replace it by the scalar.
1878 if (isVector1(I.getType()))
1879 return &I;
1880
1881 SmallVector<Type *, 4> Types = {I.getType(), I.getOperand(0)->getType(),
1882 I.getOperand(1)->getType(),
1883 I.getOperand(2)->getType()};
1884 IRBuilder<> B(I.getParent());
1885 B.SetInsertPoint(&I);
1886 SmallVector<Value *> Args(I.op_begin(), I.op_end());
1887 auto *NewI = B.CreateIntrinsic(Intrinsic::spv_insertelt, {Types}, {Args});
1888 replaceAllUsesWithAndErase(B, &I, NewI);
1889 return NewI;
1890}
1891
1893SPIRVEmitIntrinsics::visitExtractElementInst(ExtractElementInst &I) {
1894 // If it's a <1 x Type> vector type, don't modify it. It's not a legal vector
1895 // type in LLT and IRTranslator will replace it by the scalar.
1896 if (isVector1(I.getVectorOperandType()))
1897 return &I;
1898
1899 IRBuilder<> B(I.getParent());
1900 B.SetInsertPoint(&I);
1901 SmallVector<Type *, 3> Types = {I.getType(), I.getVectorOperandType(),
1902 I.getIndexOperand()->getType()};
1903 SmallVector<Value *, 2> Args = {I.getVectorOperand(), I.getIndexOperand()};
1904 auto *NewI = B.CreateIntrinsic(Intrinsic::spv_extractelt, {Types}, {Args});
1905 replaceAllUsesWithAndErase(B, &I, NewI);
1906 return NewI;
1907}
1908
1909Instruction *SPIRVEmitIntrinsics::visitInsertValueInst(InsertValueInst &I) {
1910 IRBuilder<> B(I.getParent());
1911 B.SetInsertPoint(&I);
1912 SmallVector<Type *, 1> Types = {I.getInsertedValueOperand()->getType()};
1914 Value *AggregateOp = I.getAggregateOperand();
1915 if (isa<UndefValue>(AggregateOp))
1916 Args.push_back(UndefValue::get(B.getInt32Ty()));
1917 else
1918 Args.push_back(AggregateOp);
1919 Args.push_back(I.getInsertedValueOperand());
1920 for (auto &Op : I.indices())
1921 Args.push_back(B.getInt32(Op));
1922 Instruction *NewI =
1923 B.CreateIntrinsic(Intrinsic::spv_insertv, {Types}, {Args});
1924 replaceMemInstrUses(&I, NewI, B);
1925 return NewI;
1926}
1927
1928Instruction *SPIRVEmitIntrinsics::visitExtractValueInst(ExtractValueInst &I) {
1929 if (I.getAggregateOperand()->getType()->isAggregateType())
1930 return &I;
1931 IRBuilder<> B(I.getParent());
1932 B.SetInsertPoint(&I);
1933 SmallVector<Value *> Args(I.operands());
1934 for (auto &Op : I.indices())
1935 Args.push_back(B.getInt32(Op));
1936 auto *NewI =
1937 B.CreateIntrinsic(Intrinsic::spv_extractv, {I.getType()}, {Args});
1938 replaceAllUsesWithAndErase(B, &I, NewI);
1939 return NewI;
1940}
1941
1942Instruction *SPIRVEmitIntrinsics::visitLoadInst(LoadInst &I) {
1943 if (!I.getType()->isAggregateType())
1944 return &I;
1945 IRBuilder<> B(I.getParent());
1946 B.SetInsertPoint(&I);
1947 TrackConstants = false;
1948 const auto *TLI = TM->getSubtargetImpl()->getTargetLowering();
1950 TLI->getLoadMemOperandFlags(I, CurrF->getDataLayout());
1951 auto *NewI =
1952 B.CreateIntrinsic(Intrinsic::spv_load, {I.getOperand(0)->getType()},
1953 {I.getPointerOperand(), B.getInt16(Flags),
1954 B.getInt8(I.getAlign().value())});
1955 replaceMemInstrUses(&I, NewI, B);
1956 return NewI;
1957}
1958
1959Instruction *SPIRVEmitIntrinsics::visitStoreInst(StoreInst &I) {
1960 if (!AggrStores.contains(&I))
1961 return &I;
1962 IRBuilder<> B(I.getParent());
1963 B.SetInsertPoint(&I);
1964 TrackConstants = false;
1965 const auto *TLI = TM->getSubtargetImpl()->getTargetLowering();
1967 TLI->getStoreMemOperandFlags(I, CurrF->getDataLayout());
1968 auto *PtrOp = I.getPointerOperand();
1969 auto *NewI = B.CreateIntrinsic(
1970 Intrinsic::spv_store, {I.getValueOperand()->getType(), PtrOp->getType()},
1971 {I.getValueOperand(), PtrOp, B.getInt16(Flags),
1972 B.getInt8(I.getAlign().value())});
1973 NewI->copyMetadata(I);
1974 I.eraseFromParent();
1975 return NewI;
1976}
1977
1978Instruction *SPIRVEmitIntrinsics::visitAllocaInst(AllocaInst &I) {
1979 Value *ArraySize = nullptr;
1980 if (I.isArrayAllocation()) {
1981 const SPIRVSubtarget *STI = TM->getSubtargetImpl(*I.getFunction());
1982 if (!STI->canUseExtension(
1983 SPIRV::Extension::SPV_INTEL_variable_length_array))
1985 "array allocation: this instruction requires the following "
1986 "SPIR-V extension: SPV_INTEL_variable_length_array",
1987 false);
1988 ArraySize = I.getArraySize();
1989 }
1990 IRBuilder<> B(I.getParent());
1991 B.SetInsertPoint(&I);
1992 TrackConstants = false;
1993 Type *PtrTy = I.getType();
1994 auto *NewI =
1995 ArraySize
1996 ? B.CreateIntrinsic(Intrinsic::spv_alloca_array,
1997 {PtrTy, ArraySize->getType()},
1998 {ArraySize, B.getInt8(I.getAlign().value())})
1999 : B.CreateIntrinsic(Intrinsic::spv_alloca, {PtrTy},
2000 {B.getInt8(I.getAlign().value())});
2001 replaceAllUsesWithAndErase(B, &I, NewI);
2002 return NewI;
2003}
2004
2005Instruction *SPIRVEmitIntrinsics::visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) {
2006 assert(I.getType()->isAggregateType() && "Aggregate result is expected");
2007 IRBuilder<> B(I.getParent());
2008 B.SetInsertPoint(&I);
2009 SmallVector<Value *> Args(I.operands());
2010 Args.push_back(B.getInt32(
2011 static_cast<uint32_t>(getMemScope(I.getContext(), I.getSyncScopeID()))));
2012 Args.push_back(B.getInt32(
2013 static_cast<uint32_t>(getMemSemantics(I.getSuccessOrdering()))));
2014 Args.push_back(B.getInt32(
2015 static_cast<uint32_t>(getMemSemantics(I.getFailureOrdering()))));
2016 auto *NewI = B.CreateIntrinsic(Intrinsic::spv_cmpxchg,
2017 {I.getPointerOperand()->getType()}, {Args});
2018 replaceMemInstrUses(&I, NewI, B);
2019 return NewI;
2020}
2021
2022Instruction *SPIRVEmitIntrinsics::visitUnreachableInst(UnreachableInst &I) {
2023 IRBuilder<> B(I.getParent());
2024 B.SetInsertPoint(&I);
2025 B.CreateIntrinsic(Intrinsic::spv_unreachable, {});
2026 return &I;
2027}
2028
2029void SPIRVEmitIntrinsics::processGlobalValue(GlobalVariable &GV,
2030 IRBuilder<> &B) {
2031 // Skip special artifical variable llvm.global.annotations.
2032 if (GV.getName() == "llvm.global.annotations")
2033 return;
2034 Constant *Init = nullptr;
2035 if (hasInitializer(&GV)) {
2036 // Deduce element type and store results in Global Registry.
2037 // Result is ignored, because TypedPointerType is not supported
2038 // by llvm IR general logic.
2039 deduceElementTypeHelper(&GV, false);
2040 Init = GV.getInitializer();
2041 Type *Ty = isAggrConstForceInt32(Init) ? B.getInt32Ty() : Init->getType();
2042 Constant *Const = isAggrConstForceInt32(Init) ? B.getInt32(1) : Init;
2043 auto *InitInst = B.CreateIntrinsic(Intrinsic::spv_init_global,
2044 {GV.getType(), Ty}, {&GV, Const});
2045 InitInst->setArgOperand(1, Init);
2046 }
2047 if (!Init && GV.use_empty())
2048 B.CreateIntrinsic(Intrinsic::spv_unref_global, GV.getType(), &GV);
2049}
2050
2051// Return true, if we can't decide what is the pointee type now and will get
2052// back to the question later. Return false is spv_assign_ptr_type is not needed
2053// or can be inserted immediately.
2054bool SPIRVEmitIntrinsics::insertAssignPtrTypeIntrs(Instruction *I,
2055 IRBuilder<> &B,
2056 bool UnknownElemTypeI8) {
2058 if (!isPointerTy(I->getType()) || !requireAssignType(I))
2059 return false;
2060
2062 if (Type *ElemTy = deduceElementType(I, UnknownElemTypeI8)) {
2063 GR->buildAssignPtr(B, ElemTy, I);
2064 return false;
2065 }
2066 return true;
2067}
2068
2069void SPIRVEmitIntrinsics::insertAssignTypeIntrs(Instruction *I,
2070 IRBuilder<> &B) {
2071 // TODO: extend the list of functions with known result types
2072 static StringMap<unsigned> ResTypeWellKnown = {
2073 {"async_work_group_copy", WellKnownTypes::Event},
2074 {"async_work_group_strided_copy", WellKnownTypes::Event},
2075 {"__spirv_GroupAsyncCopy", WellKnownTypes::Event}};
2076
2078
2079 bool IsKnown = false;
2080 if (auto *CI = dyn_cast<CallInst>(I)) {
2081 if (!CI->isIndirectCall() && !CI->isInlineAsm() &&
2082 CI->getCalledFunction() && !CI->getCalledFunction()->isIntrinsic()) {
2083 Function *CalledF = CI->getCalledFunction();
2084 std::string DemangledName =
2086 FPDecorationId DecorationId = FPDecorationId::NONE;
2087 if (DemangledName.length() > 0)
2088 DemangledName =
2089 SPIRV::lookupBuiltinNameHelper(DemangledName, &DecorationId);
2090 auto ResIt = ResTypeWellKnown.find(DemangledName);
2091 if (ResIt != ResTypeWellKnown.end()) {
2092 IsKnown = true;
2094 switch (ResIt->second) {
2095 case WellKnownTypes::Event:
2096 GR->buildAssignType(
2097 B, TargetExtType::get(I->getContext(), "spirv.Event"), I);
2098 break;
2099 }
2100 }
2101 // check if a floating rounding mode or saturation info is present
2102 switch (DecorationId) {
2103 default:
2104 break;
2105 case FPDecorationId::SAT:
2107 break;
2108 case FPDecorationId::RTE:
2110 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTE, B);
2111 break;
2112 case FPDecorationId::RTZ:
2114 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTZ, B);
2115 break;
2116 case FPDecorationId::RTP:
2118 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTP, B);
2119 break;
2120 case FPDecorationId::RTN:
2122 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTN, B);
2123 break;
2124 }
2125 }
2126 }
2127
2128 Type *Ty = I->getType();
2129 if (!IsKnown && !Ty->isVoidTy() && !isPointerTy(Ty) && requireAssignType(I)) {
2131 Type *TypeToAssign = Ty;
2132 if (auto *II = dyn_cast<IntrinsicInst>(I)) {
2133 if (II->getIntrinsicID() == Intrinsic::spv_const_composite ||
2134 II->getIntrinsicID() == Intrinsic::spv_undef) {
2135 auto It = AggrConstTypes.find(II);
2136 if (It == AggrConstTypes.end())
2137 report_fatal_error("Unknown composite intrinsic type");
2138 TypeToAssign = It->second;
2139 }
2140 }
2141 TypeToAssign = restoreMutatedType(GR, I, TypeToAssign);
2142 GR->buildAssignType(B, TypeToAssign, I);
2143 }
2144 for (const auto &Op : I->operands()) {
2146 // Check GetElementPtrConstantExpr case.
2149 Type *OpTy = Op->getType();
2150 if (isa<UndefValue>(Op) && OpTy->isAggregateType()) {
2151 CallInst *AssignCI =
2152 buildIntrWithMD(Intrinsic::spv_assign_type, {B.getInt32Ty()}, Op,
2153 UndefValue::get(B.getInt32Ty()), {}, B);
2154 GR->addAssignPtrTypeInstr(Op, AssignCI);
2155 } else if (!isa<Instruction>(Op)) {
2156 Type *OpTy = Op->getType();
2157 Type *OpTyElem = getPointeeType(OpTy);
2158 if (OpTyElem) {
2159 GR->buildAssignPtr(B, OpTyElem, Op);
2160 } else if (isPointerTy(OpTy)) {
2161 Type *ElemTy = GR->findDeducedElementType(Op);
2162 GR->buildAssignPtr(B, ElemTy ? ElemTy : deduceElementType(Op, true),
2163 Op);
2164 } else {
2165 Value *OpTyVal = Op;
2166 if (OpTy->isTargetExtTy()) {
2167 // We need to do this in order to be consistent with how target ext
2168 // types are handled in `processInstrAfterVisit`
2169 OpTyVal = getNormalizedPoisonValue(OpTy);
2170 }
2171 CallInst *AssignCI =
2172 buildIntrWithMD(Intrinsic::spv_assign_type, {OpTy},
2173 getNormalizedPoisonValue(OpTy), OpTyVal, {}, B);
2174 GR->addAssignPtrTypeInstr(OpTyVal, AssignCI);
2175 }
2176 }
2177 }
2178 }
2179}
2180
2181bool SPIRVEmitIntrinsics::shouldTryToAddMemAliasingDecoration(
2182 Instruction *Inst) {
2183 const SPIRVSubtarget *STI = TM->getSubtargetImpl(*Inst->getFunction());
2184 if (!STI->canUseExtension(SPIRV::Extension::SPV_INTEL_memory_access_aliasing))
2185 return false;
2186 // Add aliasing decorations to internal load and store intrinsics
2187 // and atomic instructions, skipping atomic store as it won't have ID to
2188 // attach the decoration.
2189 CallInst *CI = dyn_cast<CallInst>(Inst);
2190 if (!CI)
2191 return false;
2192 if (Function *Fun = CI->getCalledFunction()) {
2193 if (Fun->isIntrinsic()) {
2194 switch (Fun->getIntrinsicID()) {
2195 case Intrinsic::spv_load:
2196 case Intrinsic::spv_store:
2197 return true;
2198 default:
2199 return false;
2200 }
2201 }
2203 const std::string Prefix = "__spirv_Atomic";
2204 const bool IsAtomic = Name.find(Prefix) == 0;
2205
2206 if (!Fun->getReturnType()->isVoidTy() && IsAtomic)
2207 return true;
2208 }
2209 return false;
2210}
2211
2212void SPIRVEmitIntrinsics::insertSpirvDecorations(Instruction *I,
2213 IRBuilder<> &B) {
2214 if (MDNode *MD = I->getMetadata("spirv.Decorations")) {
2216 B.CreateIntrinsic(Intrinsic::spv_assign_decoration, {I->getType()},
2217 {I, MetadataAsValue::get(I->getContext(), MD)});
2218 }
2219 // Lower alias.scope/noalias metadata
2220 {
2221 auto processMemAliasingDecoration = [&](unsigned Kind) {
2222 if (MDNode *AliasListMD = I->getMetadata(Kind)) {
2223 if (shouldTryToAddMemAliasingDecoration(I)) {
2224 uint32_t Dec = Kind == LLVMContext::MD_alias_scope
2225 ? SPIRV::Decoration::AliasScopeINTEL
2226 : SPIRV::Decoration::NoAliasINTEL;
2228 I, ConstantInt::get(B.getInt32Ty(), Dec),
2229 MetadataAsValue::get(I->getContext(), AliasListMD)};
2231 B.CreateIntrinsic(Intrinsic::spv_assign_aliasing_decoration,
2232 {I->getType()}, {Args});
2233 }
2234 }
2235 };
2236 processMemAliasingDecoration(LLVMContext::MD_alias_scope);
2237 processMemAliasingDecoration(LLVMContext::MD_noalias);
2238 }
2239 // MD_fpmath
2240 if (MDNode *MD = I->getMetadata(LLVMContext::MD_fpmath)) {
2241 const SPIRVSubtarget *STI = TM->getSubtargetImpl(*I->getFunction());
2242 bool AllowFPMaxError =
2243 STI->canUseExtension(SPIRV::Extension::SPV_INTEL_fp_max_error);
2244 if (!AllowFPMaxError)
2245 return;
2246
2248 B.CreateIntrinsic(Intrinsic::spv_assign_fpmaxerror_decoration,
2249 {I->getType()},
2250 {I, MetadataAsValue::get(I->getContext(), MD)});
2251 }
2252}
2253
2255 const Module &M,
2257 &FPFastMathDefaultInfoMap,
2258 Function *F) {
2259 auto it = FPFastMathDefaultInfoMap.find(F);
2260 if (it != FPFastMathDefaultInfoMap.end())
2261 return it->second;
2262
2263 // If the map does not contain the entry, create a new one. Initialize it to
2264 // contain all 3 elements sorted by bit width of target type: {half, float,
2265 // double}.
2266 SPIRV::FPFastMathDefaultInfoVector FPFastMathDefaultInfoVec;
2267 FPFastMathDefaultInfoVec.emplace_back(Type::getHalfTy(M.getContext()),
2268 SPIRV::FPFastMathMode::None);
2269 FPFastMathDefaultInfoVec.emplace_back(Type::getFloatTy(M.getContext()),
2270 SPIRV::FPFastMathMode::None);
2271 FPFastMathDefaultInfoVec.emplace_back(Type::getDoubleTy(M.getContext()),
2272 SPIRV::FPFastMathMode::None);
2273 return FPFastMathDefaultInfoMap[F] = std::move(FPFastMathDefaultInfoVec);
2274}
2275
2277 SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec,
2278 const Type *Ty) {
2279 size_t BitWidth = Ty->getScalarSizeInBits();
2280 int Index =
2282 BitWidth);
2283 assert(Index >= 0 && Index < 3 &&
2284 "Expected FPFastMathDefaultInfo for half, float, or double");
2285 assert(FPFastMathDefaultInfoVec.size() == 3 &&
2286 "Expected FPFastMathDefaultInfoVec to have exactly 3 elements");
2287 return FPFastMathDefaultInfoVec[Index];
2288}
2289
2290void SPIRVEmitIntrinsics::insertConstantsForFPFastMathDefault(Module &M) {
2291 const SPIRVSubtarget *ST = TM->getSubtargetImpl();
2292 if (!ST->canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2))
2293 return;
2294
2295 // Store the FPFastMathDefaultInfo in the FPFastMathDefaultInfoMap.
2296 // We need the entry point (function) as the key, and the target
2297 // type and flags as the value.
2298 // We also need to check ContractionOff and SignedZeroInfNanPreserve
2299 // execution modes, as they are now deprecated and must be replaced
2300 // with FPFastMathDefaultInfo.
2301 auto Node = M.getNamedMetadata("spirv.ExecutionMode");
2302 if (!Node) {
2303 if (!M.getNamedMetadata("opencl.enable.FP_CONTRACT")) {
2304 // This requires emitting ContractionOff. However, because
2305 // ContractionOff is now deprecated, we need to replace it with
2306 // FPFastMathDefaultInfo with FP Fast Math Mode bitmask set to all 0.
2307 // We need to create the constant for that.
2308
2309 // Create constant instruction with the bitmask flags.
2310 Constant *InitValue =
2311 ConstantInt::get(Type::getInt32Ty(M.getContext()), 0);
2312 // TODO: Reuse constant if there is one already with the required
2313 // value.
2314 [[maybe_unused]] GlobalVariable *GV =
2315 new GlobalVariable(M, // Module
2316 Type::getInt32Ty(M.getContext()), // Type
2317 true, // isConstant
2319 InitValue // Initializer
2320 );
2321 }
2322 return;
2323 }
2324
2325 // The table maps function pointers to their default FP fast math info. It
2326 // can be assumed that the SmallVector is sorted by the bit width of the
2327 // type. The first element is the smallest bit width, and the last element
2328 // is the largest bit width, therefore, we will have {half, float, double}
2329 // in the order of their bit widths.
2330 DenseMap<Function *, SPIRV::FPFastMathDefaultInfoVector>
2331 FPFastMathDefaultInfoMap;
2332
2333 for (unsigned i = 0; i < Node->getNumOperands(); i++) {
2334 MDNode *MDN = cast<MDNode>(Node->getOperand(i));
2335 assert(MDN->getNumOperands() >= 2 && "Expected at least 2 operands");
2337 cast<ConstantAsMetadata>(MDN->getOperand(0))->getValue());
2338 const auto EM =
2340 cast<ConstantAsMetadata>(MDN->getOperand(1))->getValue())
2341 ->getZExtValue();
2342 if (EM == SPIRV::ExecutionMode::FPFastMathDefault) {
2343 assert(MDN->getNumOperands() == 4 &&
2344 "Expected 4 operands for FPFastMathDefault");
2345 const Type *T = cast<ValueAsMetadata>(MDN->getOperand(2))->getType();
2346 unsigned Flags =
2348 cast<ConstantAsMetadata>(MDN->getOperand(3))->getValue())
2349 ->getZExtValue();
2350 SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec =
2351 getOrCreateFPFastMathDefaultInfoVec(M, FPFastMathDefaultInfoMap, F);
2352 SPIRV::FPFastMathDefaultInfo &Info =
2353 getFPFastMathDefaultInfo(FPFastMathDefaultInfoVec, T);
2354 Info.FastMathFlags = Flags;
2355 Info.FPFastMathDefault = true;
2356 } else if (EM == SPIRV::ExecutionMode::ContractionOff) {
2357 assert(MDN->getNumOperands() == 2 &&
2358 "Expected no operands for ContractionOff");
2359
2360 // We need to save this info for every possible FP type, i.e. {half,
2361 // float, double, fp128}.
2362 SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec =
2363 getOrCreateFPFastMathDefaultInfoVec(M, FPFastMathDefaultInfoMap, F);
2364 for (SPIRV::FPFastMathDefaultInfo &Info : FPFastMathDefaultInfoVec) {
2365 Info.ContractionOff = true;
2366 }
2367 } else if (EM == SPIRV::ExecutionMode::SignedZeroInfNanPreserve) {
2368 assert(MDN->getNumOperands() == 3 &&
2369 "Expected 1 operand for SignedZeroInfNanPreserve");
2370 unsigned TargetWidth =
2372 cast<ConstantAsMetadata>(MDN->getOperand(2))->getValue())
2373 ->getZExtValue();
2374 // We need to save this info only for the FP type with TargetWidth.
2375 SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec =
2376 getOrCreateFPFastMathDefaultInfoVec(M, FPFastMathDefaultInfoMap, F);
2379 assert(Index >= 0 && Index < 3 &&
2380 "Expected FPFastMathDefaultInfo for half, float, or double");
2381 assert(FPFastMathDefaultInfoVec.size() == 3 &&
2382 "Expected FPFastMathDefaultInfoVec to have exactly 3 elements");
2383 FPFastMathDefaultInfoVec[Index].SignedZeroInfNanPreserve = true;
2384 }
2385 }
2386
2387 std::unordered_map<unsigned, GlobalVariable *> GlobalVars;
2388 for (auto &[Func, FPFastMathDefaultInfoVec] : FPFastMathDefaultInfoMap) {
2389 if (FPFastMathDefaultInfoVec.empty())
2390 continue;
2391
2392 for (const SPIRV::FPFastMathDefaultInfo &Info : FPFastMathDefaultInfoVec) {
2393 assert(Info.Ty && "Expected target type for FPFastMathDefaultInfo");
2394 // Skip if none of the execution modes was used.
2395 unsigned Flags = Info.FastMathFlags;
2396 if (Flags == SPIRV::FPFastMathMode::None && !Info.ContractionOff &&
2397 !Info.SignedZeroInfNanPreserve && !Info.FPFastMathDefault)
2398 continue;
2399
2400 // Check if flags are compatible.
2401 if (Info.ContractionOff && (Flags & SPIRV::FPFastMathMode::AllowContract))
2402 report_fatal_error("Conflicting FPFastMathFlags: ContractionOff "
2403 "and AllowContract");
2404
2405 if (Info.SignedZeroInfNanPreserve &&
2406 !(Flags &
2407 (SPIRV::FPFastMathMode::NotNaN | SPIRV::FPFastMathMode::NotInf |
2408 SPIRV::FPFastMathMode::NSZ))) {
2409 if (Info.FPFastMathDefault)
2410 report_fatal_error("Conflicting FPFastMathFlags: "
2411 "SignedZeroInfNanPreserve but at least one of "
2412 "NotNaN/NotInf/NSZ is enabled.");
2413 }
2414
2415 if ((Flags & SPIRV::FPFastMathMode::AllowTransform) &&
2416 !((Flags & SPIRV::FPFastMathMode::AllowReassoc) &&
2417 (Flags & SPIRV::FPFastMathMode::AllowContract))) {
2418 report_fatal_error("Conflicting FPFastMathFlags: "
2419 "AllowTransform requires AllowReassoc and "
2420 "AllowContract to be set.");
2421 }
2422
2423 auto it = GlobalVars.find(Flags);
2424 GlobalVariable *GV = nullptr;
2425 if (it != GlobalVars.end()) {
2426 // Reuse existing global variable.
2427 GV = it->second;
2428 } else {
2429 // Create constant instruction with the bitmask flags.
2430 Constant *InitValue =
2431 ConstantInt::get(Type::getInt32Ty(M.getContext()), Flags);
2432 // TODO: Reuse constant if there is one already with the required
2433 // value.
2434 GV = new GlobalVariable(M, // Module
2435 Type::getInt32Ty(M.getContext()), // Type
2436 true, // isConstant
2438 InitValue // Initializer
2439 );
2440 GlobalVars[Flags] = GV;
2441 }
2442 }
2443 }
2444}
2445
2446void SPIRVEmitIntrinsics::processInstrAfterVisit(Instruction *I,
2447 IRBuilder<> &B) {
2448 auto *II = dyn_cast<IntrinsicInst>(I);
2449 bool IsConstComposite =
2450 II && II->getIntrinsicID() == Intrinsic::spv_const_composite;
2451 if (IsConstComposite && TrackConstants) {
2453 auto t = AggrConsts.find(I);
2454 assert(t != AggrConsts.end());
2455 auto *NewOp =
2456 buildIntrWithMD(Intrinsic::spv_track_constant,
2457 {II->getType(), II->getType()}, t->second, I, {}, B);
2458 replaceAllUsesWith(I, NewOp, false);
2459 NewOp->setArgOperand(0, I);
2460 }
2461 bool IsPhi = isa<PHINode>(I), BPrepared = false;
2462 for (const auto &Op : I->operands()) {
2463 if (isa<PHINode>(I) || isa<SwitchInst>(I) ||
2465 continue;
2466 unsigned OpNo = Op.getOperandNo();
2467 if (II && ((II->getIntrinsicID() == Intrinsic::spv_gep && OpNo == 0) ||
2468 (II->paramHasAttr(OpNo, Attribute::ImmArg))))
2469 continue;
2470
2471 if (!BPrepared) {
2472 IsPhi ? B.SetInsertPointPastAllocas(I->getParent()->getParent())
2473 : B.SetInsertPoint(I);
2474 BPrepared = true;
2475 }
2476 Type *OpTy = Op->getType();
2477 Type *OpElemTy = GR->findDeducedElementType(Op);
2478 Value *NewOp = Op;
2479 if (OpTy->isTargetExtTy()) {
2480 // Since this value is replaced by poison, we need to do the same in
2481 // `insertAssignTypeIntrs`.
2482 Value *OpTyVal = getNormalizedPoisonValue(OpTy);
2483 NewOp = buildIntrWithMD(Intrinsic::spv_track_constant,
2484 {OpTy, OpTyVal->getType()}, Op, OpTyVal, {}, B);
2485 }
2486 if (!IsConstComposite && isPointerTy(OpTy) && OpElemTy != nullptr &&
2487 OpElemTy != IntegerType::getInt8Ty(I->getContext())) {
2488 SmallVector<Type *, 2> Types = {OpTy, OpTy};
2489 SmallVector<Value *, 2> Args = {
2490 NewOp, buildMD(getNormalizedPoisonValue(OpElemTy)),
2491 B.getInt32(getPointerAddressSpace(OpTy))};
2492 CallInst *PtrCasted =
2493 B.CreateIntrinsic(Intrinsic::spv_ptrcast, {Types}, Args);
2494 GR->buildAssignPtr(B, OpElemTy, PtrCasted);
2495 NewOp = PtrCasted;
2496 }
2497 if (NewOp != Op)
2498 I->setOperand(OpNo, NewOp);
2499 }
2500 if (Named.insert(I).second)
2501 emitAssignName(I, B);
2502}
2503
2504Type *SPIRVEmitIntrinsics::deduceFunParamElementType(Function *F,
2505 unsigned OpIdx) {
2506 std::unordered_set<Function *> FVisited;
2507 return deduceFunParamElementType(F, OpIdx, FVisited);
2508}
2509
2510Type *SPIRVEmitIntrinsics::deduceFunParamElementType(
2511 Function *F, unsigned OpIdx, std::unordered_set<Function *> &FVisited) {
2512 // maybe a cycle
2513 if (!FVisited.insert(F).second)
2514 return nullptr;
2515
2516 std::unordered_set<Value *> Visited;
2518 // search in function's call sites
2519 for (User *U : F->users()) {
2520 CallInst *CI = dyn_cast<CallInst>(U);
2521 if (!CI || OpIdx >= CI->arg_size())
2522 continue;
2523 Value *OpArg = CI->getArgOperand(OpIdx);
2524 if (!isPointerTy(OpArg->getType()))
2525 continue;
2526 // maybe we already know operand's element type
2527 if (Type *KnownTy = GR->findDeducedElementType(OpArg))
2528 return KnownTy;
2529 // try to deduce from the operand itself
2530 Visited.clear();
2531 if (Type *Ty = deduceElementTypeHelper(OpArg, Visited, false))
2532 return Ty;
2533 // search in actual parameter's users
2534 for (User *OpU : OpArg->users()) {
2536 if (!Inst || Inst == CI)
2537 continue;
2538 Visited.clear();
2539 if (Type *Ty = deduceElementTypeHelper(Inst, Visited, false))
2540 return Ty;
2541 }
2542 // check if it's a formal parameter of the outer function
2543 if (!CI->getParent() || !CI->getParent()->getParent())
2544 continue;
2545 Function *OuterF = CI->getParent()->getParent();
2546 if (FVisited.find(OuterF) != FVisited.end())
2547 continue;
2548 for (unsigned i = 0; i < OuterF->arg_size(); ++i) {
2549 if (OuterF->getArg(i) == OpArg) {
2550 Lookup.push_back(std::make_pair(OuterF, i));
2551 break;
2552 }
2553 }
2554 }
2555
2556 // search in function parameters
2557 for (auto &Pair : Lookup) {
2558 if (Type *Ty = deduceFunParamElementType(Pair.first, Pair.second, FVisited))
2559 return Ty;
2560 }
2561
2562 return nullptr;
2563}
2564
2565void SPIRVEmitIntrinsics::processParamTypesByFunHeader(Function *F,
2566 IRBuilder<> &B) {
2567 B.SetInsertPointPastAllocas(F);
2568 for (unsigned OpIdx = 0; OpIdx < F->arg_size(); ++OpIdx) {
2569 Argument *Arg = F->getArg(OpIdx);
2570 if (!isUntypedPointerTy(Arg->getType()))
2571 continue;
2572 Type *ElemTy = GR->findDeducedElementType(Arg);
2573 if (ElemTy)
2574 continue;
2575 if (hasPointeeTypeAttr(Arg) &&
2576 (ElemTy = getPointeeTypeByAttr(Arg)) != nullptr) {
2577 GR->buildAssignPtr(B, ElemTy, Arg);
2578 continue;
2579 }
2580 // search in function's call sites
2581 for (User *U : F->users()) {
2582 CallInst *CI = dyn_cast<CallInst>(U);
2583 if (!CI || OpIdx >= CI->arg_size())
2584 continue;
2585 Value *OpArg = CI->getArgOperand(OpIdx);
2586 if (!isPointerTy(OpArg->getType()))
2587 continue;
2588 // maybe we already know operand's element type
2589 if ((ElemTy = GR->findDeducedElementType(OpArg)) != nullptr)
2590 break;
2591 }
2592 if (ElemTy) {
2593 GR->buildAssignPtr(B, ElemTy, Arg);
2594 continue;
2595 }
2596 if (HaveFunPtrs) {
2597 for (User *U : Arg->users()) {
2598 CallInst *CI = dyn_cast<CallInst>(U);
2599 if (CI && !isa<IntrinsicInst>(CI) && CI->isIndirectCall() &&
2600 CI->getCalledOperand() == Arg &&
2601 CI->getParent()->getParent() == CurrF) {
2603 deduceOperandElementTypeFunctionPointer(CI, Ops, ElemTy, false);
2604 if (ElemTy) {
2605 GR->buildAssignPtr(B, ElemTy, Arg);
2606 break;
2607 }
2608 }
2609 }
2610 }
2611 }
2612}
2613
2614void SPIRVEmitIntrinsics::processParamTypes(Function *F, IRBuilder<> &B) {
2615 B.SetInsertPointPastAllocas(F);
2616 for (unsigned OpIdx = 0; OpIdx < F->arg_size(); ++OpIdx) {
2617 Argument *Arg = F->getArg(OpIdx);
2618 if (!isUntypedPointerTy(Arg->getType()))
2619 continue;
2620 Type *ElemTy = GR->findDeducedElementType(Arg);
2621 if (!ElemTy && (ElemTy = deduceFunParamElementType(F, OpIdx)) != nullptr) {
2622 if (CallInst *AssignCI = GR->findAssignPtrTypeInstr(Arg)) {
2623 DenseSet<std::pair<Value *, Value *>> VisitedSubst;
2624 GR->updateAssignType(AssignCI, Arg, getNormalizedPoisonValue(ElemTy));
2625 propagateElemType(Arg, IntegerType::getInt8Ty(F->getContext()),
2626 VisitedSubst);
2627 } else {
2628 GR->buildAssignPtr(B, ElemTy, Arg);
2629 }
2630 }
2631 }
2632}
2633
2635 SPIRVGlobalRegistry *GR) {
2636 FunctionType *FTy = F->getFunctionType();
2637 bool IsNewFTy = false;
2639 for (Argument &Arg : F->args()) {
2640 Type *ArgTy = Arg.getType();
2641 if (ArgTy->isPointerTy())
2642 if (Type *ElemTy = GR->findDeducedElementType(&Arg)) {
2643 IsNewFTy = true;
2644 ArgTy = getTypedPointerWrapper(ElemTy, getPointerAddressSpace(ArgTy));
2645 }
2646 ArgTys.push_back(ArgTy);
2647 }
2648 return IsNewFTy
2649 ? FunctionType::get(FTy->getReturnType(), ArgTys, FTy->isVarArg())
2650 : FTy;
2651}
2652
2653bool SPIRVEmitIntrinsics::processFunctionPointers(Module &M) {
2654 SmallVector<Function *> Worklist;
2655 for (auto &F : M) {
2656 if (F.isIntrinsic())
2657 continue;
2658 if (F.isDeclaration()) {
2659 for (User *U : F.users()) {
2660 CallInst *CI = dyn_cast<CallInst>(U);
2661 if (!CI || CI->getCalledFunction() != &F) {
2662 Worklist.push_back(&F);
2663 break;
2664 }
2665 }
2666 } else {
2667 if (F.user_empty())
2668 continue;
2669 Type *FPElemTy = GR->findDeducedElementType(&F);
2670 if (!FPElemTy)
2671 FPElemTy = getFunctionPointerElemType(&F, GR);
2672 for (User *U : F.users()) {
2673 IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
2674 if (!II || II->arg_size() != 3 || II->getOperand(0) != &F)
2675 continue;
2676 if (II->getIntrinsicID() == Intrinsic::spv_assign_ptr_type ||
2677 II->getIntrinsicID() == Intrinsic::spv_ptrcast) {
2679 break;
2680 }
2681 }
2682 }
2683 }
2684 if (Worklist.empty())
2685 return false;
2686
2687 std::string ServiceFunName = SPIRV_BACKEND_SERVICE_FUN_NAME;
2688 if (!getVacantFunctionName(M, ServiceFunName))
2690 "cannot allocate a name for the internal service function");
2691 LLVMContext &Ctx = M.getContext();
2692 Function *SF =
2693 Function::Create(FunctionType::get(Type::getVoidTy(Ctx), {}, false),
2694 GlobalValue::PrivateLinkage, ServiceFunName, M);
2696 BasicBlock *BB = BasicBlock::Create(Ctx, "entry", SF);
2697 IRBuilder<> IRB(BB);
2698
2699 for (Function *F : Worklist) {
2701 for (const auto &Arg : F->args())
2702 Args.push_back(getNormalizedPoisonValue(Arg.getType()));
2703 IRB.CreateCall(F, Args);
2704 }
2705 IRB.CreateRetVoid();
2706
2707 return true;
2708}
2709
2710// Apply types parsed from demangled function declarations.
2711void SPIRVEmitIntrinsics::applyDemangledPtrArgTypes(IRBuilder<> &B) {
2712 DenseMap<Function *, CallInst *> Ptrcasts;
2713 for (auto It : FDeclPtrTys) {
2714 Function *F = It.first;
2715 for (auto *U : F->users()) {
2716 CallInst *CI = dyn_cast<CallInst>(U);
2717 if (!CI || CI->getCalledFunction() != F)
2718 continue;
2719 unsigned Sz = CI->arg_size();
2720 for (auto [Idx, ElemTy] : It.second) {
2721 if (Idx >= Sz)
2722 continue;
2723 Value *Param = CI->getArgOperand(Idx);
2724 if (GR->findDeducedElementType(Param) || isa<GlobalValue>(Param))
2725 continue;
2726 if (Argument *Arg = dyn_cast<Argument>(Param)) {
2727 if (!hasPointeeTypeAttr(Arg)) {
2728 B.SetInsertPointPastAllocas(Arg->getParent());
2729 B.SetCurrentDebugLocation(DebugLoc());
2730 GR->buildAssignPtr(B, ElemTy, Arg);
2731 }
2732 } else if (isa<GetElementPtrInst>(Param)) {
2733 replaceUsesOfWithSpvPtrcast(Param, normalizeType(ElemTy), CI,
2734 Ptrcasts);
2735 } else if (isa<Instruction>(Param)) {
2736 GR->addDeducedElementType(Param, normalizeType(ElemTy));
2737 // insertAssignTypeIntrs() will complete buildAssignPtr()
2738 } else {
2739 B.SetInsertPoint(CI->getParent()
2740 ->getParent()
2741 ->getEntryBlock()
2742 .getFirstNonPHIOrDbgOrAlloca());
2743 GR->buildAssignPtr(B, ElemTy, Param);
2744 }
2745 CallInst *Ref = dyn_cast<CallInst>(Param);
2746 if (!Ref)
2747 continue;
2748 Function *RefF = Ref->getCalledFunction();
2749 if (!RefF || !isPointerTy(RefF->getReturnType()) ||
2750 GR->findDeducedElementType(RefF))
2751 continue;
2752 ElemTy = normalizeType(ElemTy);
2753 GR->addDeducedElementType(RefF, ElemTy);
2754 GR->addReturnType(
2756 ElemTy, getPointerAddressSpace(RefF->getReturnType())));
2757 }
2758 }
2759 }
2760}
2761
2762GetElementPtrInst *
2763SPIRVEmitIntrinsics::simplifyZeroLengthArrayGepInst(GetElementPtrInst *GEP) {
2764 // getelementptr [0 x T], P, 0 (zero), I -> getelementptr T, P, I.
2765 // If type is 0-length array and first index is 0 (zero), drop both the
2766 // 0-length array type and the first index. This is a common pattern in
2767 // the IR, e.g. when using a zero-length array as a placeholder for a
2768 // flexible array such as unbound arrays.
2769 assert(GEP && "GEP is null");
2770 Type *SrcTy = GEP->getSourceElementType();
2771 SmallVector<Value *, 8> Indices(GEP->indices());
2772 ArrayType *ArrTy = dyn_cast<ArrayType>(SrcTy);
2773 if (ArrTy && ArrTy->getNumElements() == 0 &&
2775 IRBuilder<> Builder(GEP);
2776 Indices.erase(Indices.begin());
2777 SrcTy = ArrTy->getElementType();
2778 Value *NewGEP = Builder.CreateGEP(SrcTy, GEP->getPointerOperand(), Indices,
2779 "", GEP->getNoWrapFlags());
2780 assert(llvm::isa<GetElementPtrInst>(NewGEP) && "NewGEP should be a GEP");
2781 return cast<GetElementPtrInst>(NewGEP);
2782 }
2783 return nullptr;
2784}
2785
2786bool SPIRVEmitIntrinsics::runOnFunction(Function &Func) {
2787 if (Func.isDeclaration())
2788 return false;
2789
2790 const SPIRVSubtarget &ST = TM->getSubtarget<SPIRVSubtarget>(Func);
2791 GR = ST.getSPIRVGlobalRegistry();
2792
2793 if (!CurrF)
2794 HaveFunPtrs =
2795 ST.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers);
2796
2797 CurrF = &Func;
2798 IRBuilder<> B(Func.getContext());
2799 AggrConsts.clear();
2800 AggrConstTypes.clear();
2801 AggrStores.clear();
2802
2803 // Fix GEP result types ahead of inference, and simplify if possible.
2804 // Data structure for dead instructions that were simplified and replaced.
2805 SmallPtrSet<Instruction *, 4> DeadInsts;
2806 for (auto &I : instructions(Func)) {
2808 if (!Ref || GR->findDeducedElementType(Ref))
2809 continue;
2810
2811 GetElementPtrInst *NewGEP = simplifyZeroLengthArrayGepInst(Ref);
2812 if (NewGEP) {
2813 Ref->replaceAllUsesWith(NewGEP);
2815 DeadInsts.insert(Ref);
2816
2817 Ref = NewGEP;
2818 }
2819 if (Type *GepTy = getGEPType(Ref))
2820 GR->addDeducedElementType(Ref, normalizeType(GepTy));
2821 }
2822 // Remove dead instructions that were simplified and replaced.
2823 for (auto *I : DeadInsts) {
2824 assert(I->use_empty() && "Dead instruction should not have any uses left");
2825 I->eraseFromParent();
2826 }
2827
2828 processParamTypesByFunHeader(CurrF, B);
2829
2830 // StoreInst's operand type can be changed during the next
2831 // transformations, so we need to store it in the set. Also store already
2832 // transformed types.
2833 for (auto &I : instructions(Func)) {
2834 StoreInst *SI = dyn_cast<StoreInst>(&I);
2835 if (!SI)
2836 continue;
2837 Type *ElTy = SI->getValueOperand()->getType();
2838 if (ElTy->isAggregateType() || ElTy->isVectorTy())
2839 AggrStores.insert(&I);
2840 }
2841
2842 B.SetInsertPoint(&Func.getEntryBlock(), Func.getEntryBlock().begin());
2843 for (auto &GV : Func.getParent()->globals())
2844 processGlobalValue(GV, B);
2845
2846 preprocessUndefs(B);
2847 preprocessCompositeConstants(B);
2850
2851 applyDemangledPtrArgTypes(B);
2852
2853 // Pass forward: use operand to deduce instructions result.
2854 for (auto &I : Worklist) {
2855 // Don't emit intrinsincs for convergence intrinsics.
2856 if (isConvergenceIntrinsic(I))
2857 continue;
2858
2859 bool Postpone = insertAssignPtrTypeIntrs(I, B, false);
2860 // if Postpone is true, we can't decide on pointee type yet
2861 insertAssignTypeIntrs(I, B);
2862 insertPtrCastOrAssignTypeInstr(I, B);
2864 // if instruction requires a pointee type set, let's check if we know it
2865 // already, and force it to be i8 if not
2866 if (Postpone && !GR->findAssignPtrTypeInstr(I))
2867 insertAssignPtrTypeIntrs(I, B, true);
2868
2869 if (auto *FPI = dyn_cast<ConstrainedFPIntrinsic>(I))
2870 useRoundingMode(FPI, B);
2871 }
2872
2873 // Pass backward: use instructions results to specify/update/cast operands
2874 // where needed.
2875 SmallPtrSet<Instruction *, 4> IncompleteRets;
2876 for (auto &I : llvm::reverse(instructions(Func)))
2877 deduceOperandElementType(&I, &IncompleteRets);
2878
2879 // Pass forward for PHIs only, their operands are not preceed the
2880 // instruction in meaning of `instructions(Func)`.
2881 for (BasicBlock &BB : Func)
2882 for (PHINode &Phi : BB.phis())
2883 if (isPointerTy(Phi.getType()))
2884 deduceOperandElementType(&Phi, nullptr);
2885
2886 for (auto *I : Worklist) {
2887 TrackConstants = true;
2888 if (!I->getType()->isVoidTy() || isa<StoreInst>(I))
2890 // Visitors return either the original/newly created instruction for
2891 // further processing, nullptr otherwise.
2892 I = visit(*I);
2893 if (!I)
2894 continue;
2895
2896 // Don't emit intrinsics for convergence operations.
2897 if (isConvergenceIntrinsic(I))
2898 continue;
2899
2901 processInstrAfterVisit(I, B);
2902 }
2903
2904 return true;
2905}
2906
2907// Try to deduce a better type for pointers to untyped ptr.
2908bool SPIRVEmitIntrinsics::postprocessTypes(Module &M) {
2909 if (!GR || TodoTypeSz == 0)
2910 return false;
2911
2912 unsigned SzTodo = TodoTypeSz;
2913 DenseMap<Value *, SmallPtrSet<Value *, 4>> ToProcess;
2914 for (auto [Op, Enabled] : TodoType) {
2915 // TODO: add isa<CallInst>(Op) to continue
2917 continue;
2918 CallInst *AssignCI = GR->findAssignPtrTypeInstr(Op);
2919 Type *KnownTy = GR->findDeducedElementType(Op);
2920 if (!KnownTy || !AssignCI)
2921 continue;
2922 assert(Op == AssignCI->getArgOperand(0));
2923 // Try to improve the type deduced after all Functions are processed.
2924 if (auto *CI = dyn_cast<Instruction>(Op)) {
2925 CurrF = CI->getParent()->getParent();
2926 std::unordered_set<Value *> Visited;
2927 if (Type *ElemTy = deduceElementTypeHelper(Op, Visited, false, true)) {
2928 if (ElemTy != KnownTy) {
2929 DenseSet<std::pair<Value *, Value *>> VisitedSubst;
2930 propagateElemType(CI, ElemTy, VisitedSubst);
2931 eraseTodoType(Op);
2932 continue;
2933 }
2934 }
2935 }
2936
2937 if (Op->hasUseList()) {
2938 for (User *U : Op->users()) {
2940 if (Inst && !isa<IntrinsicInst>(Inst))
2941 ToProcess[Inst].insert(Op);
2942 }
2943 }
2944 }
2945 if (TodoTypeSz == 0)
2946 return true;
2947
2948 for (auto &F : M) {
2949 CurrF = &F;
2950 SmallPtrSet<Instruction *, 4> IncompleteRets;
2951 for (auto &I : llvm::reverse(instructions(F))) {
2952 auto It = ToProcess.find(&I);
2953 if (It == ToProcess.end())
2954 continue;
2955 It->second.remove_if([this](Value *V) { return !isTodoType(V); });
2956 if (It->second.size() == 0)
2957 continue;
2958 deduceOperandElementType(&I, &IncompleteRets, &It->second, true);
2959 if (TodoTypeSz == 0)
2960 return true;
2961 }
2962 }
2963
2964 return SzTodo > TodoTypeSz;
2965}
2966
2967// Parse and store argument types of function declarations where needed.
2968void SPIRVEmitIntrinsics::parseFunDeclarations(Module &M) {
2969 for (auto &F : M) {
2970 if (!F.isDeclaration() || F.isIntrinsic())
2971 continue;
2972 // get the demangled name
2973 std::string DemangledName = getOclOrSpirvBuiltinDemangledName(F.getName());
2974 if (DemangledName.empty())
2975 continue;
2976 // allow only OpGroupAsyncCopy use case at the moment
2977 const SPIRVSubtarget &ST = TM->getSubtarget<SPIRVSubtarget>(F);
2978 auto [Grp, Opcode, ExtNo] = SPIRV::mapBuiltinToOpcode(
2979 DemangledName, ST.getPreferredInstructionSet());
2980 if (Opcode != SPIRV::OpGroupAsyncCopy)
2981 continue;
2982 // find pointer arguments
2983 SmallVector<unsigned> Idxs;
2984 for (unsigned OpIdx = 0; OpIdx < F.arg_size(); ++OpIdx) {
2985 Argument *Arg = F.getArg(OpIdx);
2986 if (isPointerTy(Arg->getType()) && !hasPointeeTypeAttr(Arg))
2987 Idxs.push_back(OpIdx);
2988 }
2989 if (!Idxs.size())
2990 continue;
2991 // parse function arguments
2992 LLVMContext &Ctx = F.getContext();
2994 SPIRV::parseBuiltinTypeStr(TypeStrs, DemangledName, Ctx);
2995 if (!TypeStrs.size())
2996 continue;
2997 // find type info for pointer arguments
2998 for (unsigned Idx : Idxs) {
2999 if (Idx >= TypeStrs.size())
3000 continue;
3001 if (Type *ElemTy =
3002 SPIRV::parseBuiltinCallArgumentType(TypeStrs[Idx].trim(), Ctx))
3004 !ElemTy->isTargetExtTy())
3005 FDeclPtrTys[&F].push_back(std::make_pair(Idx, ElemTy));
3006 }
3007 }
3008}
3009
3010bool SPIRVEmitIntrinsics::runOnModule(Module &M) {
3011 bool Changed = false;
3012
3013 parseFunDeclarations(M);
3014 insertConstantsForFPFastMathDefault(M);
3015
3016 TodoType.clear();
3017 for (auto &F : M)
3019
3020 // Specify function parameters after all functions were processed.
3021 for (auto &F : M) {
3022 // check if function parameter types are set
3023 CurrF = &F;
3024 if (!F.isDeclaration() && !F.isIntrinsic()) {
3025 IRBuilder<> B(F.getContext());
3026 processParamTypes(&F, B);
3027 }
3028 }
3029
3030 CanTodoType = false;
3031 Changed |= postprocessTypes(M);
3032
3033 if (HaveFunPtrs)
3034 Changed |= processFunctionPointers(M);
3035
3036 return Changed;
3037}
3038
3040 return new SPIRVEmitIntrinsics(TM);
3041}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
aarch64 promote const
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
always inline
Expand Atomic instructions
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
Definition CSEInfo.cpp:27
static void replaceAllUsesWith(Value *Old, Value *New, SmallPtrSet< BasicBlock *, 32 > &FreshBBs, bool IsHuge)
Replace all old uses with new ones, and push the updated BBs into FreshBBs.
This file defines the DenseSet and SmallDenseSet classes.
static bool runOnFunction(Function &F, bool PostInlining)
Hexagon Common GEP
iv Induction Variable Users
Definition IVUsers.cpp:48
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
Machine Check Debug Module
#define T
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
Function * Fun
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition PassSupport.h:56
static unsigned getNumElements(Type *Ty)
static bool isMemInstrToReplace(Instruction *I)
static bool isAggrConstForceInt32(const Value *V)
static SPIRV::FPFastMathDefaultInfoVector & getOrCreateFPFastMathDefaultInfoVec(const Module &M, DenseMap< Function *, SPIRV::FPFastMathDefaultInfoVector > &FPFastMathDefaultInfoMap, Function *F)
static Type * getAtomicElemTy(SPIRVGlobalRegistry *GR, Instruction *I, Value *PointerOperand)
static void reportFatalOnTokenType(const Instruction *I)
static void setInsertPointAfterDef(IRBuilder<> &B, Instruction *I)
static void emitAssignName(Instruction *I, IRBuilder<> &B)
static Type * getPointeeTypeByCallInst(StringRef DemangledName, Function *CalledF, unsigned OpIdx)
static void createRoundingModeDecoration(Instruction *I, unsigned RoundingModeDeco, IRBuilder<> &B)
static void createDecorationIntrinsic(Instruction *I, MDNode *Node, IRBuilder<> &B)
static SPIRV::FPFastMathDefaultInfo & getFPFastMathDefaultInfo(SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec, const Type *Ty)
static bool IsKernelArgInt8(Function *F, StoreInst *SI)
static void addSaturatedDecorationToIntrinsic(Instruction *I, IRBuilder<> &B)
static void setInsertPointSkippingPhis(IRBuilder<> &B, Instruction *I)
static FunctionType * getFunctionPointerElemType(Function *F, SPIRVGlobalRegistry *GR)
static void createSaturatedConversionDecoration(Instruction *I, IRBuilder<> &B)
static Type * restoreMutatedType(SPIRVGlobalRegistry *GR, Instruction *I, Type *Ty)
static bool requireAssignType(Instruction *I)
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
static void insertSpirvDecorations(MachineFunction &MF, SPIRVGlobalRegistry *GR, MachineIRBuilder MIB)
#define SPIRV_BACKEND_SERVICE_FUN_NAME
Definition SPIRVUtils.h:500
static bool Enabled
Definition Statistic.cpp:46
DEMANGLE_NAMESPACE_BEGIN bool starts_with(std::string_view self, char C) noexcept
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
static int Lookup(ArrayRef< TableEntry > Table, unsigned Opcode)
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
const Function * getParent() const
Definition Argument.h:44
static unsigned getPointerOperandIndex()
static unsigned getPointerOperandIndex()
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:528
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition BasicBlock.h:206
LLVM_ABI LLVMContext & getContext() const
Get the context in which this basic block lives.
static LLVM_ABI BlockAddress * get(Function *F, BasicBlock *BB)
Return a BlockAddress for the specified function and basic block.
bool isInlineAsm() const
Check if this call is an inline asm statement.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
LLVM_ABI bool isIndirectCall() const
Return true if the callsite is an indirect call.
Value * getCalledOperand() const
Value * getArgOperand(unsigned i) const
FunctionType * getFunctionType() const
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
unsigned arg_size() const
This class represents a function call, abstracting a target machine's calling convention.
static ConstantAsMetadata * get(Constant *C)
Definition Metadata.h:536
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:163
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI std::optional< RoundingMode > getRoundingMode() const
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:167
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
Definition DenseMap.h:237
iterator end()
Definition DenseMap.h:81
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:222
static LLVM_ABI FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
void addFnAttr(Attribute::AttrKind Kind)
Add function attributes to this function.
Definition Function.cpp:637
static Function * Create(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Definition Function.h:166
const DataLayout & getDataLayout() const
Get the data layout of the module this function belongs to.
Definition Function.cpp:363
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition Function.h:244
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
Definition Function.h:249
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition Function.cpp:359
size_t arg_size() const
Definition Function.h:899
Type * getReturnType() const
Returns the type of the ret val.
Definition Function.h:214
Argument * getArg(unsigned i) const
Definition Function.h:884
static LLVM_ABI Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
static unsigned getPointerOperandIndex()
PointerType * getType() const
Global values are always pointers.
@ PrivateLinkage
Like Internal, but omit from symbol table.
Definition GlobalValue.h:61
@ InternalLinkage
Rename collisions when linking (static functions).
Definition GlobalValue.h:60
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2780
LLVM_ABI void addDestination(BasicBlock *Dest)
Add a destination.
Base class for instruction visitors.
Definition InstVisitor.h:78
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
LLVM_ABI void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
static unsigned getPointerOperandIndex()
Metadata node.
Definition Metadata.h:1078
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1442
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition Metadata.h:1569
unsigned getNumOperands() const
Return number of MDNode operands.
Definition Metadata.h:1448
static LLVM_ABI MDString * get(LLVMContext &Context, StringRef Str)
Definition Metadata.cpp:608
Flags
Flags values. These may be or'd together.
static LLVM_ABI MetadataAsValue * get(LLVMContext &Context, Metadata *MD)
Definition Metadata.cpp:104
Metadata * getMetadata() const
Definition Metadata.h:201
ModulePass class - This class is used to implement unstructured interprocedural optimizations and ana...
Definition Pass.h:255
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
void addAssignPtrTypeInstr(Value *Val, CallInst *AssignPtrTyCI)
void buildAssignPtr(IRBuilder<> &B, Type *ElemTy, Value *Arg)
Type * findDeducedCompositeType(const Value *Val)
void replaceAllUsesWith(Value *Old, Value *New, bool DeleteOld=true)
void addDeducedElementType(Value *Val, Type *Ty)
void addReturnType(const Function *ArgF, TypedPointerType *DerivedTy)
Type * findMutated(const Value *Val)
void addDeducedCompositeType(Value *Val, Type *Ty)
void buildAssignType(IRBuilder<> &B, Type *Ty, Value *Arg)
Type * findDeducedElementType(const Value *Val)
void updateAssignType(CallInst *AssignCI, Value *Arg, Value *OfType)
CallInst * findAssignPtrTypeInstr(const Value *Val)
const SPIRVTargetLowering * getTargetLowering() const override
bool isLogicalSPIRV() const
bool canUseExtension(SPIRV::Extension::Extension E) const
const SPIRVSubtarget * getSubtargetImpl() const
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
static unsigned getPointerOperandIndex()
iterator end()
Definition StringMap.h:224
iterator find(StringRef Key)
Definition StringMap.h:237
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition StringRef.h:261
static LLVM_ABI StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
Definition Type.cpp:620
static LLVM_ABI TargetExtType * get(LLVMContext &Context, StringRef Name, ArrayRef< Type * > Types={}, ArrayRef< unsigned > Ints={})
Return a target extension type having the specified name and optional type and integer parameters.
Definition Type.cpp:908
Type * getTypeParameter(unsigned i) const
const STC & getSubtarget(const Function &F) const
This method returns a pointer to the specified type of TargetSubtargetInfo.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition Type.h:264
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:297
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:267
Type * getArrayElementType() const
Definition Type.h:408
LLVM_ABI StringRef getTargetExtName() const
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Definition Type.cpp:295
bool isStructTy() const
True if this is an instance of StructType.
Definition Type.h:261
bool isTargetExtTy() const
Return true if this is a target extension type.
Definition Type.h:203
bool isAggregateType() const
Return true if the type is an aggregate type.
Definition Type.h:304
static LLVM_ABI Type * getDoubleTy(LLVMContext &C)
Definition Type.cpp:286
static LLVM_ABI Type * getFloatTy(LLVMContext &C)
Definition Type.cpp:285
static LLVM_ABI Type * getHalfTy(LLVMContext &C)
Definition Type.cpp:283
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:139
static LLVM_ABI bool isValidElementType(Type *ElemTy)
Return true if the specified type is valid as a element type.
static LLVM_ABI TypedPointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
op_range operands()
Definition User.h:292
LLVM_ABI bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
Definition User.cpp:21
void setOperand(unsigned i, Value *Val)
Definition User.h:237
Value * getOperand(unsigned i) const
Definition User.h:232
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
Definition Value.cpp:390
iterator_range< user_iterator > users()
Definition Value.h:426
bool use_empty() const
Definition Value.h:346
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
bool user_empty() const
Definition Value.h:389
const ParentTy * getParent() const
Definition ilist_node.h:34
CallInst * Call
Changed
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
@ SPIR_KERNEL
Used for SPIR kernel functions.
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
bool match(Val *V, const Pattern &P)
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
DenseSetImpl< ValueT, DenseMap< ValueT, DenseSetEmpty, ValueInfoT, DenseSetPair< ValueT > >, ValueInfoT > DenseSet
Definition DenseSet.h:264
ElementType
The element type of an SRV or UAV resource.
Definition DXILABI.h:60
@ User
could "use" a pointer
NodeAddr< PhiNode * > Phi
Definition RDFGraph.h:390
NodeAddr< NodeBase * > Node
Definition RDFGraph.h:381
NodeAddr< FuncNode * > Func
Definition RDFGraph.h:393
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:318
bool getVacantFunctionName(Module &M, std::string &Name)
@ Offset
Definition DWP.cpp:477
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
bool isTypedPointerWrapper(const TargetExtType *ExtTy)
Definition SPIRVUtils.h:378
ModulePass * createSPIRVEmitIntrinsicsPass(SPIRVTargetMachine *TM)
unsigned getPointerAddressSpace(const Type *T)
Definition SPIRVUtils.h:342
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:644
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
Definition InstrProf.h:296
CallInst * buildIntrWithMD(Intrinsic::ID IntrID, ArrayRef< Type * > Types, Value *Arg, Value *Arg2, ArrayRef< Constant * > Imms, IRBuilder<> &B)
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2116
FPDecorationId
Definition SPIRVUtils.h:524
bool isNestedPointer(const Type *Ty)
MetadataAsValue * buildMD(Value *Arg)
Definition SPIRVUtils.h:488
std::string getOclOrSpirvBuiltinDemangledName(StringRef Name)
LLVM_ABI bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
Definition Local.cpp:402
auto reverse(ContainerTy &&C)
Definition STLExtras.h:408
Type * getTypedPointerWrapper(Type *ElemTy, unsigned AS)
Definition SPIRVUtils.h:373
bool isVector1(Type *Ty)
Definition SPIRVUtils.h:466
bool isPointerTy(const Type *T)
Definition SPIRVUtils.h:336
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:548
SPIRV::Scope::Scope getMemScope(LLVMContext &Ctx, SyncScope::ID Id)
@ Ref
The access may reference the value stored in memory.
Definition ModRef.h:32
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
DWARFExpression::Operation Op
Type * getPointeeTypeByAttr(Argument *Arg)
Definition SPIRVUtils.h:355
bool hasPointeeTypeAttr(Argument *Arg)
Definition SPIRVUtils.h:350
constexpr unsigned BitWidth
bool isEquivalentTypes(Type *Ty1, Type *Ty2)
Definition SPIRVUtils.h:428
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:560
iterator_range< pointer_iterator< WrappedIteratorT > > make_pointer_range(RangeT &&Range)
Definition iterator.h:363
bool hasInitializer(const GlobalVariable *GV)
Definition SPIRVUtils.h:321
Type * normalizeType(Type *Ty)
Definition SPIRVUtils.h:474
bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID)
Type * getPointeeType(const Type *Ty)
Definition SPIRVUtils.h:405
PoisonValue * getNormalizedPoisonValue(Type *Ty)
Definition SPIRVUtils.h:484
bool isUntypedPointerTy(const Type *T)
Definition SPIRVUtils.h:331
SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord)
static size_t computeFPFastMathDefaultInfoVecIndex(size_t BitWidth)
Definition SPIRVUtils.h:146