Thanks to visit codestin.com
Credit goes to llvm.org

LLVM 22.0.0git
Verifier.cpp
Go to the documentation of this file.
1//===-- Verifier.cpp - Implement the Module Verifier -----------------------==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the function verifier interface, that can be used for some
10// basic correctness checking of input to the system.
11//
12// Note that this does not provide full `Java style' security and verifications,
13// instead it just tries to ensure that code is well-formed.
14//
15// * Both of a binary operator's parameters are of the same type
16// * Verify that the indices of mem access instructions match other operands
17// * Verify that arithmetic and other things are only performed on first-class
18// types. Verify that shifts & logicals only happen on integrals f.e.
19// * All of the constants in a switch statement are of the correct type
20// * The code is in valid SSA form
21// * It should be illegal to put a label into any other type (like a structure)
22// or to return one. [except constant arrays!]
23// * Only phi nodes can be self referential: 'add i32 %0, %0 ; <int>:0' is bad
24// * PHI nodes must have an entry for each predecessor, with no extras.
25// * PHI nodes must be the first thing in a basic block, all grouped together
26// * All basic blocks should only end with terminator insts, not contain them
27// * The entry node to a function must not have predecessors
28// * All Instructions must be embedded into a basic block
29// * Functions cannot take a void-typed parameter
30// * Verify that a function's argument list agrees with it's declared type.
31// * It is illegal to specify a name for a void value.
32// * It is illegal to have a internal global value with no initializer
33// * It is illegal to have a ret instruction that returns a value that does not
34// agree with the function return value type.
35// * Function call argument types match the function prototype
36// * A landing pad is defined by a landingpad instruction, and can be jumped to
37// only by the unwind edge of an invoke instruction.
38// * A landingpad instruction must be the first non-PHI instruction in the
39// block.
40// * Landingpad instructions must be in a function with a personality function.
41// * Convergence control intrinsics are introduced in ConvergentOperations.rst.
42// The applied restrictions are too numerous to list here.
43// * The convergence entry intrinsic and the loop heart must be the first
44// non-PHI instruction in their respective block. This does not conflict with
45// the landing pads, since these two kinds cannot occur in the same block.
46// * All other things that are tested by asserts spread about the code...
47//
48//===----------------------------------------------------------------------===//
49
50#include "llvm/IR/Verifier.h"
51#include "llvm/ADT/APFloat.h"
52#include "llvm/ADT/APInt.h"
53#include "llvm/ADT/ArrayRef.h"
54#include "llvm/ADT/DenseMap.h"
55#include "llvm/ADT/MapVector.h"
56#include "llvm/ADT/STLExtras.h"
60#include "llvm/ADT/StringRef.h"
61#include "llvm/ADT/Twine.h"
63#include "llvm/IR/Argument.h"
65#include "llvm/IR/Attributes.h"
66#include "llvm/IR/BasicBlock.h"
67#include "llvm/IR/CFG.h"
68#include "llvm/IR/CallingConv.h"
69#include "llvm/IR/Comdat.h"
70#include "llvm/IR/Constant.h"
73#include "llvm/IR/Constants.h"
75#include "llvm/IR/DataLayout.h"
76#include "llvm/IR/DebugInfo.h"
78#include "llvm/IR/DebugLoc.h"
80#include "llvm/IR/Dominators.h"
82#include "llvm/IR/Function.h"
83#include "llvm/IR/GCStrategy.h"
84#include "llvm/IR/GlobalAlias.h"
85#include "llvm/IR/GlobalValue.h"
87#include "llvm/IR/InlineAsm.h"
88#include "llvm/IR/InstVisitor.h"
89#include "llvm/IR/InstrTypes.h"
90#include "llvm/IR/Instruction.h"
93#include "llvm/IR/Intrinsics.h"
94#include "llvm/IR/IntrinsicsAArch64.h"
95#include "llvm/IR/IntrinsicsAMDGPU.h"
96#include "llvm/IR/IntrinsicsARM.h"
97#include "llvm/IR/IntrinsicsNVPTX.h"
98#include "llvm/IR/IntrinsicsWebAssembly.h"
99#include "llvm/IR/LLVMContext.h"
101#include "llvm/IR/Metadata.h"
102#include "llvm/IR/Module.h"
104#include "llvm/IR/PassManager.h"
106#include "llvm/IR/Statepoint.h"
107#include "llvm/IR/Type.h"
108#include "llvm/IR/Use.h"
109#include "llvm/IR/User.h"
111#include "llvm/IR/Value.h"
113#include "llvm/Pass.h"
117#include "llvm/Support/Casting.h"
121#include "llvm/Support/ModRef.h"
124#include <algorithm>
125#include <cassert>
126#include <cstdint>
127#include <memory>
128#include <optional>
129#include <string>
130#include <utility>
131
132using namespace llvm;
133
135 "verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false),
136 cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical "
137 "scopes are not dominating"));
138
139namespace llvm {
140
143 const Module &M;
145 const Triple &TT;
148
149 /// Track the brokenness of the module while recursively visiting.
150 bool Broken = false;
151 /// Broken debug info can be "recovered" from by stripping the debug info.
152 bool BrokenDebugInfo = false;
153 /// Whether to treat broken debug info as an error.
155
157 : OS(OS), M(M), MST(&M), TT(M.getTargetTriple()), DL(M.getDataLayout()),
158 Context(M.getContext()) {}
159
160private:
161 void Write(const Module *M) {
162 *OS << "; ModuleID = '" << M->getModuleIdentifier() << "'\n";
163 }
164
165 void Write(const Value *V) {
166 if (V)
167 Write(*V);
168 }
169
170 void Write(const Value &V) {
171 if (isa<Instruction>(V)) {
172 V.print(*OS, MST);
173 *OS << '\n';
174 } else {
175 V.printAsOperand(*OS, true, MST);
176 *OS << '\n';
177 }
178 }
179
180 void Write(const DbgRecord *DR) {
181 if (DR) {
182 DR->print(*OS, MST, false);
183 *OS << '\n';
184 }
185 }
186
188 switch (Type) {
190 *OS << "value";
191 break;
193 *OS << "declare";
194 break;
196 *OS << "assign";
197 break;
199 *OS << "end";
200 break;
202 *OS << "any";
203 break;
204 };
205 }
206
207 void Write(const Metadata *MD) {
208 if (!MD)
209 return;
210 MD->print(*OS, MST, &M);
211 *OS << '\n';
212 }
213
214 template <class T> void Write(const MDTupleTypedArrayWrapper<T> &MD) {
215 Write(MD.get());
216 }
217
218 void Write(const NamedMDNode *NMD) {
219 if (!NMD)
220 return;
221 NMD->print(*OS, MST);
222 *OS << '\n';
223 }
224
225 void Write(Type *T) {
226 if (!T)
227 return;
228 *OS << ' ' << *T;
229 }
230
231 void Write(const Comdat *C) {
232 if (!C)
233 return;
234 *OS << *C;
235 }
236
237 void Write(const APInt *AI) {
238 if (!AI)
239 return;
240 *OS << *AI << '\n';
241 }
242
243 void Write(const unsigned i) { *OS << i << '\n'; }
244
245 // NOLINTNEXTLINE(readability-identifier-naming)
246 void Write(const Attribute *A) {
247 if (!A)
248 return;
249 *OS << A->getAsString() << '\n';
250 }
251
252 // NOLINTNEXTLINE(readability-identifier-naming)
253 void Write(const AttributeSet *AS) {
254 if (!AS)
255 return;
256 *OS << AS->getAsString() << '\n';
257 }
258
259 // NOLINTNEXTLINE(readability-identifier-naming)
260 void Write(const AttributeList *AL) {
261 if (!AL)
262 return;
263 AL->print(*OS);
264 }
265
266 void Write(Printable P) { *OS << P << '\n'; }
267
268 template <typename T> void Write(ArrayRef<T> Vs) {
269 for (const T &V : Vs)
270 Write(V);
271 }
272
273 template <typename T1, typename... Ts>
274 void WriteTs(const T1 &V1, const Ts &... Vs) {
275 Write(V1);
276 WriteTs(Vs...);
277 }
278
279 template <typename... Ts> void WriteTs() {}
280
281public:
282 /// A check failed, so printout out the condition and the message.
283 ///
284 /// This provides a nice place to put a breakpoint if you want to see why
285 /// something is not correct.
286 void CheckFailed(const Twine &Message) {
287 if (OS)
288 *OS << Message << '\n';
289 Broken = true;
290 }
291
292 /// A check failed (with values to print).
293 ///
294 /// This calls the Message-only version so that the above is easier to set a
295 /// breakpoint on.
296 template <typename T1, typename... Ts>
297 void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs) {
298 CheckFailed(Message);
299 if (OS)
300 WriteTs(V1, Vs...);
301 }
302
303 /// A debug info check failed.
304 void DebugInfoCheckFailed(const Twine &Message) {
305 if (OS)
306 *OS << Message << '\n';
308 BrokenDebugInfo = true;
309 }
310
311 /// A debug info check failed (with values to print).
312 template <typename T1, typename... Ts>
313 void DebugInfoCheckFailed(const Twine &Message, const T1 &V1,
314 const Ts &... Vs) {
315 DebugInfoCheckFailed(Message);
316 if (OS)
317 WriteTs(V1, Vs...);
318 }
319};
320
321} // namespace llvm
322
323namespace {
324
325class Verifier : public InstVisitor<Verifier>, VerifierSupport {
326 friend class InstVisitor<Verifier>;
327 DominatorTree DT;
328
329 /// When verifying a basic block, keep track of all of the
330 /// instructions we have seen so far.
331 ///
332 /// This allows us to do efficient dominance checks for the case when an
333 /// instruction has an operand that is an instruction in the same block.
334 SmallPtrSet<Instruction *, 16> InstsInThisBlock;
335
336 /// Keep track of the metadata nodes that have been checked already.
337 SmallPtrSet<const Metadata *, 32> MDNodes;
338
339 /// Keep track which DISubprogram is attached to which function.
340 DenseMap<const DISubprogram *, const Function *> DISubprogramAttachments;
341
342 /// Track all DICompileUnits visited.
343 SmallPtrSet<const Metadata *, 2> CUVisited;
344
345 /// The result type for a landingpad.
346 Type *LandingPadResultTy;
347
348 /// Whether we've seen a call to @llvm.localescape in this function
349 /// already.
350 bool SawFrameEscape;
351
352 /// Whether the current function has a DISubprogram attached to it.
353 bool HasDebugInfo = false;
354
355 /// Stores the count of how many objects were passed to llvm.localescape for a
356 /// given function and the largest index passed to llvm.localrecover.
357 DenseMap<Function *, std::pair<unsigned, unsigned>> FrameEscapeInfo;
358
359 // Maps catchswitches and cleanuppads that unwind to siblings to the
360 // terminators that indicate the unwind, used to detect cycles therein.
361 MapVector<Instruction *, Instruction *> SiblingFuncletInfo;
362
363 /// Cache which blocks are in which funclet, if an EH funclet personality is
364 /// in use. Otherwise empty.
365 DenseMap<BasicBlock *, ColorVector> BlockEHFuncletColors;
366
367 /// Cache of constants visited in search of ConstantExprs.
368 SmallPtrSet<const Constant *, 32> ConstantExprVisited;
369
370 /// Cache of declarations of the llvm.experimental.deoptimize.<ty> intrinsic.
371 SmallVector<const Function *, 4> DeoptimizeDeclarations;
372
373 /// Cache of attribute lists verified.
374 SmallPtrSet<const void *, 32> AttributeListsVisited;
375
376 // Verify that this GlobalValue is only used in this module.
377 // This map is used to avoid visiting uses twice. We can arrive at a user
378 // twice, if they have multiple operands. In particular for very large
379 // constant expressions, we can arrive at a particular user many times.
380 SmallPtrSet<const Value *, 32> GlobalValueVisited;
381
382 // Keeps track of duplicate function argument debug info.
384
385 TBAAVerifier TBAAVerifyHelper;
386 ConvergenceVerifier ConvergenceVerifyHelper;
387
388 SmallVector<IntrinsicInst *, 4> NoAliasScopeDecls;
389
390 void checkAtomicMemAccessSize(Type *Ty, const Instruction *I);
391
392public:
393 explicit Verifier(raw_ostream *OS, bool ShouldTreatBrokenDebugInfoAsError,
394 const Module &M)
395 : VerifierSupport(OS, M), LandingPadResultTy(nullptr),
396 SawFrameEscape(false), TBAAVerifyHelper(this) {
397 TreatBrokenDebugInfoAsError = ShouldTreatBrokenDebugInfoAsError;
398 }
399
400 bool hasBrokenDebugInfo() const { return BrokenDebugInfo; }
401
402 bool verify(const Function &F) {
403 llvm::TimeTraceScope timeScope("Verifier");
404 assert(F.getParent() == &M &&
405 "An instance of this class only works with a specific module!");
406
407 // First ensure the function is well-enough formed to compute dominance
408 // information, and directly compute a dominance tree. We don't rely on the
409 // pass manager to provide this as it isolates us from a potentially
410 // out-of-date dominator tree and makes it significantly more complex to run
411 // this code outside of a pass manager.
412 // FIXME: It's really gross that we have to cast away constness here.
413 if (!F.empty())
414 DT.recalculate(const_cast<Function &>(F));
415
416 for (const BasicBlock &BB : F) {
417 if (!BB.empty() && BB.back().isTerminator())
418 continue;
419
420 if (OS) {
421 *OS << "Basic Block in function '" << F.getName()
422 << "' does not have terminator!\n";
423 BB.printAsOperand(*OS, true, MST);
424 *OS << "\n";
425 }
426 return false;
427 }
428
429 auto FailureCB = [this](const Twine &Message) {
430 this->CheckFailed(Message);
431 };
432 ConvergenceVerifyHelper.initialize(OS, FailureCB, F);
433
434 Broken = false;
435 // FIXME: We strip const here because the inst visitor strips const.
436 visit(const_cast<Function &>(F));
437 verifySiblingFuncletUnwinds();
438
439 if (ConvergenceVerifyHelper.sawTokens())
440 ConvergenceVerifyHelper.verify(DT);
441
442 InstsInThisBlock.clear();
443 DebugFnArgs.clear();
444 LandingPadResultTy = nullptr;
445 SawFrameEscape = false;
446 SiblingFuncletInfo.clear();
447 verifyNoAliasScopeDecl();
448 NoAliasScopeDecls.clear();
449
450 return !Broken;
451 }
452
453 /// Verify the module that this instance of \c Verifier was initialized with.
454 bool verify() {
455 Broken = false;
456
457 // Collect all declarations of the llvm.experimental.deoptimize intrinsic.
458 for (const Function &F : M)
459 if (F.getIntrinsicID() == Intrinsic::experimental_deoptimize)
460 DeoptimizeDeclarations.push_back(&F);
461
462 // Now that we've visited every function, verify that we never asked to
463 // recover a frame index that wasn't escaped.
464 verifyFrameRecoverIndices();
465 for (const GlobalVariable &GV : M.globals())
466 visitGlobalVariable(GV);
467
468 for (const GlobalAlias &GA : M.aliases())
469 visitGlobalAlias(GA);
470
471 for (const GlobalIFunc &GI : M.ifuncs())
472 visitGlobalIFunc(GI);
473
474 for (const NamedMDNode &NMD : M.named_metadata())
475 visitNamedMDNode(NMD);
476
477 for (const StringMapEntry<Comdat> &SMEC : M.getComdatSymbolTable())
478 visitComdat(SMEC.getValue());
479
480 visitModuleFlags();
481 visitModuleIdents();
482 visitModuleCommandLines();
483 visitModuleErrnoTBAA();
484
485 verifyCompileUnits();
486
487 verifyDeoptimizeCallingConvs();
488 DISubprogramAttachments.clear();
489 return !Broken;
490 }
491
492private:
493 /// Whether a metadata node is allowed to be, or contain, a DILocation.
494 enum class AreDebugLocsAllowed { No, Yes };
495
496 /// Metadata that should be treated as a range, with slightly different
497 /// requirements.
498 enum class RangeLikeMetadataKind {
499 Range, // MD_range
500 AbsoluteSymbol, // MD_absolute_symbol
501 NoaliasAddrspace // MD_noalias_addrspace
502 };
503
504 // Verification methods...
505 void visitGlobalValue(const GlobalValue &GV);
506 void visitGlobalVariable(const GlobalVariable &GV);
507 void visitGlobalAlias(const GlobalAlias &GA);
508 void visitGlobalIFunc(const GlobalIFunc &GI);
509 void visitAliaseeSubExpr(const GlobalAlias &A, const Constant &C);
510 void visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias *> &Visited,
511 const GlobalAlias &A, const Constant &C);
512 void visitNamedMDNode(const NamedMDNode &NMD);
513 void visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs);
514 void visitMetadataAsValue(const MetadataAsValue &MD, Function *F);
515 void visitValueAsMetadata(const ValueAsMetadata &MD, Function *F);
516 void visitDIArgList(const DIArgList &AL, Function *F);
517 void visitComdat(const Comdat &C);
518 void visitModuleIdents();
519 void visitModuleCommandLines();
520 void visitModuleErrnoTBAA();
521 void visitModuleFlags();
522 void visitModuleFlag(const MDNode *Op,
523 DenseMap<const MDString *, const MDNode *> &SeenIDs,
524 SmallVectorImpl<const MDNode *> &Requirements);
525 void visitModuleFlagCGProfileEntry(const MDOperand &MDO);
526 void visitFunction(const Function &F);
527 void visitBasicBlock(BasicBlock &BB);
528 void verifyRangeLikeMetadata(const Value &V, const MDNode *Range, Type *Ty,
529 RangeLikeMetadataKind Kind);
530 void visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty);
531 void visitNoaliasAddrspaceMetadata(Instruction &I, MDNode *Range, Type *Ty);
532 void visitDereferenceableMetadata(Instruction &I, MDNode *MD);
533 void visitNofreeMetadata(Instruction &I, MDNode *MD);
534 void visitProfMetadata(Instruction &I, MDNode *MD);
535 void visitCallStackMetadata(MDNode *MD);
536 void visitMemProfMetadata(Instruction &I, MDNode *MD);
537 void visitCallsiteMetadata(Instruction &I, MDNode *MD);
538 void visitCalleeTypeMetadata(Instruction &I, MDNode *MD);
539 void visitDIAssignIDMetadata(Instruction &I, MDNode *MD);
540 void visitMMRAMetadata(Instruction &I, MDNode *MD);
541 void visitAnnotationMetadata(MDNode *Annotation);
542 void visitAliasScopeMetadata(const MDNode *MD);
543 void visitAliasScopeListMetadata(const MDNode *MD);
544 void visitAccessGroupMetadata(const MDNode *MD);
545
546 template <class Ty> bool isValidMetadataArray(const MDTuple &N);
547#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) void visit##CLASS(const CLASS &N);
548#include "llvm/IR/Metadata.def"
549 void visitDIScope(const DIScope &N);
550 void visitDIVariable(const DIVariable &N);
551 void visitDILexicalBlockBase(const DILexicalBlockBase &N);
552 void visitDITemplateParameter(const DITemplateParameter &N);
553
554 void visitTemplateParams(const MDNode &N, const Metadata &RawParams);
555
556 void visit(DbgLabelRecord &DLR);
557 void visit(DbgVariableRecord &DVR);
558 // InstVisitor overrides...
559 using InstVisitor<Verifier>::visit;
560 void visitDbgRecords(Instruction &I);
561 void visit(Instruction &I);
562
563 void visitTruncInst(TruncInst &I);
564 void visitZExtInst(ZExtInst &I);
565 void visitSExtInst(SExtInst &I);
566 void visitFPTruncInst(FPTruncInst &I);
567 void visitFPExtInst(FPExtInst &I);
568 void visitFPToUIInst(FPToUIInst &I);
569 void visitFPToSIInst(FPToSIInst &I);
570 void visitUIToFPInst(UIToFPInst &I);
571 void visitSIToFPInst(SIToFPInst &I);
572 void visitIntToPtrInst(IntToPtrInst &I);
573 void checkPtrToAddr(Type *SrcTy, Type *DestTy, const Value &V);
574 void visitPtrToAddrInst(PtrToAddrInst &I);
575 void visitPtrToIntInst(PtrToIntInst &I);
576 void visitBitCastInst(BitCastInst &I);
577 void visitAddrSpaceCastInst(AddrSpaceCastInst &I);
578 void visitPHINode(PHINode &PN);
579 void visitCallBase(CallBase &Call);
580 void visitUnaryOperator(UnaryOperator &U);
581 void visitBinaryOperator(BinaryOperator &B);
582 void visitICmpInst(ICmpInst &IC);
583 void visitFCmpInst(FCmpInst &FC);
584 void visitExtractElementInst(ExtractElementInst &EI);
585 void visitInsertElementInst(InsertElementInst &EI);
586 void visitShuffleVectorInst(ShuffleVectorInst &EI);
587 void visitVAArgInst(VAArgInst &VAA) { visitInstruction(VAA); }
588 void visitCallInst(CallInst &CI);
589 void visitInvokeInst(InvokeInst &II);
590 void visitGetElementPtrInst(GetElementPtrInst &GEP);
591 void visitLoadInst(LoadInst &LI);
592 void visitStoreInst(StoreInst &SI);
593 void verifyDominatesUse(Instruction &I, unsigned i);
594 void visitInstruction(Instruction &I);
595 void visitTerminator(Instruction &I);
596 void visitBranchInst(BranchInst &BI);
597 void visitReturnInst(ReturnInst &RI);
598 void visitSwitchInst(SwitchInst &SI);
599 void visitIndirectBrInst(IndirectBrInst &BI);
600 void visitCallBrInst(CallBrInst &CBI);
601 void visitSelectInst(SelectInst &SI);
602 void visitUserOp1(Instruction &I);
603 void visitUserOp2(Instruction &I) { visitUserOp1(I); }
604 void visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call);
605 void visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI);
606 void visitVPIntrinsic(VPIntrinsic &VPI);
607 void visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI);
608 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI);
609 void visitAtomicRMWInst(AtomicRMWInst &RMWI);
610 void visitFenceInst(FenceInst &FI);
611 void visitAllocaInst(AllocaInst &AI);
612 void visitExtractValueInst(ExtractValueInst &EVI);
613 void visitInsertValueInst(InsertValueInst &IVI);
614 void visitEHPadPredecessors(Instruction &I);
615 void visitLandingPadInst(LandingPadInst &LPI);
616 void visitResumeInst(ResumeInst &RI);
617 void visitCatchPadInst(CatchPadInst &CPI);
618 void visitCatchReturnInst(CatchReturnInst &CatchReturn);
619 void visitCleanupPadInst(CleanupPadInst &CPI);
620 void visitFuncletPadInst(FuncletPadInst &FPI);
621 void visitCatchSwitchInst(CatchSwitchInst &CatchSwitch);
622 void visitCleanupReturnInst(CleanupReturnInst &CRI);
623
624 void verifySwiftErrorCall(CallBase &Call, const Value *SwiftErrorVal);
625 void verifySwiftErrorValue(const Value *SwiftErrorVal);
626 void verifyTailCCMustTailAttrs(const AttrBuilder &Attrs, StringRef Context);
627 void verifyMustTailCall(CallInst &CI);
628 bool verifyAttributeCount(AttributeList Attrs, unsigned Params);
629 void verifyAttributeTypes(AttributeSet Attrs, const Value *V);
630 void verifyParameterAttrs(AttributeSet Attrs, Type *Ty, const Value *V);
631 void checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
632 const Value *V);
633 void verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
634 const Value *V, bool IsIntrinsic, bool IsInlineAsm);
635 void verifyFunctionMetadata(ArrayRef<std::pair<unsigned, MDNode *>> MDs);
636 void verifyUnknownProfileMetadata(MDNode *MD);
637 void visitConstantExprsRecursively(const Constant *EntryC);
638 void visitConstantExpr(const ConstantExpr *CE);
639 void visitConstantPtrAuth(const ConstantPtrAuth *CPA);
640 void verifyInlineAsmCall(const CallBase &Call);
641 void verifyStatepoint(const CallBase &Call);
642 void verifyFrameRecoverIndices();
643 void verifySiblingFuncletUnwinds();
644
645 void verifyFragmentExpression(const DbgVariableRecord &I);
646 template <typename ValueOrMetadata>
647 void verifyFragmentExpression(const DIVariable &V,
649 ValueOrMetadata *Desc);
650 void verifyFnArgs(const DbgVariableRecord &DVR);
651 void verifyNotEntryValue(const DbgVariableRecord &I);
652
653 /// Module-level debug info verification...
654 void verifyCompileUnits();
655
656 /// Module-level verification that all @llvm.experimental.deoptimize
657 /// declarations share the same calling convention.
658 void verifyDeoptimizeCallingConvs();
659
660 void verifyAttachedCallBundle(const CallBase &Call,
661 const OperandBundleUse &BU);
662
663 /// Verify the llvm.experimental.noalias.scope.decl declarations
664 void verifyNoAliasScopeDecl();
665};
666
667} // end anonymous namespace
668
669/// We know that cond should be true, if not print an error message.
670#define Check(C, ...) \
671 do { \
672 if (!(C)) { \
673 CheckFailed(__VA_ARGS__); \
674 return; \
675 } \
676 } while (false)
677
678/// We know that a debug info condition should be true, if not print
679/// an error message.
680#define CheckDI(C, ...) \
681 do { \
682 if (!(C)) { \
683 DebugInfoCheckFailed(__VA_ARGS__); \
684 return; \
685 } \
686 } while (false)
687
688void Verifier::visitDbgRecords(Instruction &I) {
689 if (!I.DebugMarker)
690 return;
691 CheckDI(I.DebugMarker->MarkedInstr == &I,
692 "Instruction has invalid DebugMarker", &I);
693 CheckDI(!isa<PHINode>(&I) || !I.hasDbgRecords(),
694 "PHI Node must not have any attached DbgRecords", &I);
695 for (DbgRecord &DR : I.getDbgRecordRange()) {
696 CheckDI(DR.getMarker() == I.DebugMarker,
697 "DbgRecord had invalid DebugMarker", &I, &DR);
698 if (auto *Loc =
700 visitMDNode(*Loc, AreDebugLocsAllowed::Yes);
701 if (auto *DVR = dyn_cast<DbgVariableRecord>(&DR)) {
702 visit(*DVR);
703 // These have to appear after `visit` for consistency with existing
704 // intrinsic behaviour.
705 verifyFragmentExpression(*DVR);
706 verifyNotEntryValue(*DVR);
707 } else if (auto *DLR = dyn_cast<DbgLabelRecord>(&DR)) {
708 visit(*DLR);
709 }
710 }
711}
712
713void Verifier::visit(Instruction &I) {
714 visitDbgRecords(I);
715 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
716 Check(I.getOperand(i) != nullptr, "Operand is null", &I);
718}
719
720// Helper to iterate over indirect users. By returning false, the callback can ask to stop traversing further.
721static void forEachUser(const Value *User,
723 llvm::function_ref<bool(const Value *)> Callback) {
724 if (!Visited.insert(User).second)
725 return;
726
728 while (!WorkList.empty()) {
729 const Value *Cur = WorkList.pop_back_val();
730 if (!Visited.insert(Cur).second)
731 continue;
732 if (Callback(Cur))
733 append_range(WorkList, Cur->materialized_users());
734 }
735}
736
737void Verifier::visitGlobalValue(const GlobalValue &GV) {
739 "Global is external, but doesn't have external or weak linkage!", &GV);
740
741 if (const GlobalObject *GO = dyn_cast<GlobalObject>(&GV)) {
742 if (const MDNode *Associated =
743 GO->getMetadata(LLVMContext::MD_associated)) {
744 Check(Associated->getNumOperands() == 1,
745 "associated metadata must have one operand", &GV, Associated);
746 const Metadata *Op = Associated->getOperand(0).get();
747 Check(Op, "associated metadata must have a global value", GO, Associated);
748
749 const auto *VM = dyn_cast_or_null<ValueAsMetadata>(Op);
750 Check(VM, "associated metadata must be ValueAsMetadata", GO, Associated);
751 if (VM) {
752 Check(isa<PointerType>(VM->getValue()->getType()),
753 "associated value must be pointer typed", GV, Associated);
754
755 const Value *Stripped = VM->getValue()->stripPointerCastsAndAliases();
756 Check(isa<GlobalObject>(Stripped) || isa<Constant>(Stripped),
757 "associated metadata must point to a GlobalObject", GO, Stripped);
758 Check(Stripped != GO,
759 "global values should not associate to themselves", GO,
760 Associated);
761 }
762 }
763
764 // FIXME: Why is getMetadata on GlobalValue protected?
765 if (const MDNode *AbsoluteSymbol =
766 GO->getMetadata(LLVMContext::MD_absolute_symbol)) {
767 verifyRangeLikeMetadata(*GO, AbsoluteSymbol,
768 DL.getIntPtrType(GO->getType()),
769 RangeLikeMetadataKind::AbsoluteSymbol);
770 }
771 }
772
774 "Only global variables can have appending linkage!", &GV);
775
776 if (GV.hasAppendingLinkage()) {
777 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(&GV);
778 Check(GVar && GVar->getValueType()->isArrayTy(),
779 "Only global arrays can have appending linkage!", GVar);
780 }
781
782 if (GV.isDeclarationForLinker())
783 Check(!GV.hasComdat(), "Declaration may not be in a Comdat!", &GV);
784
785 if (GV.hasDLLExportStorageClass()) {
787 "dllexport GlobalValue must have default or protected visibility",
788 &GV);
789 }
790 if (GV.hasDLLImportStorageClass()) {
792 "dllimport GlobalValue must have default visibility", &GV);
793 Check(!GV.isDSOLocal(), "GlobalValue with DLLImport Storage is dso_local!",
794 &GV);
795
796 Check((GV.isDeclaration() &&
799 "Global is marked as dllimport, but not external", &GV);
800 }
801
802 if (GV.isImplicitDSOLocal())
803 Check(GV.isDSOLocal(),
804 "GlobalValue with local linkage or non-default "
805 "visibility must be dso_local!",
806 &GV);
807
808 forEachUser(&GV, GlobalValueVisited, [&](const Value *V) -> bool {
809 if (const Instruction *I = dyn_cast<Instruction>(V)) {
810 if (!I->getParent() || !I->getParent()->getParent())
811 CheckFailed("Global is referenced by parentless instruction!", &GV, &M,
812 I);
813 else if (I->getParent()->getParent()->getParent() != &M)
814 CheckFailed("Global is referenced in a different module!", &GV, &M, I,
815 I->getParent()->getParent(),
816 I->getParent()->getParent()->getParent());
817 return false;
818 } else if (const Function *F = dyn_cast<Function>(V)) {
819 if (F->getParent() != &M)
820 CheckFailed("Global is used by function in a different module", &GV, &M,
821 F, F->getParent());
822 return false;
823 }
824 return true;
825 });
826}
827
828void Verifier::visitGlobalVariable(const GlobalVariable &GV) {
829 Type *GVType = GV.getValueType();
830
831 if (MaybeAlign A = GV.getAlign()) {
832 Check(A->value() <= Value::MaximumAlignment,
833 "huge alignment values are unsupported", &GV);
834 }
835
836 if (GV.hasInitializer()) {
837 Check(GV.getInitializer()->getType() == GVType,
838 "Global variable initializer type does not match global "
839 "variable type!",
840 &GV);
842 "Global variable initializer must be sized", &GV);
843 visitConstantExprsRecursively(GV.getInitializer());
844 // If the global has common linkage, it must have a zero initializer and
845 // cannot be constant.
846 if (GV.hasCommonLinkage()) {
848 "'common' global must have a zero initializer!", &GV);
849 Check(!GV.isConstant(), "'common' global may not be marked constant!",
850 &GV);
851 Check(!GV.hasComdat(), "'common' global may not be in a Comdat!", &GV);
852 }
853 }
854
855 if (GV.hasName() && (GV.getName() == "llvm.global_ctors" ||
856 GV.getName() == "llvm.global_dtors")) {
858 "invalid linkage for intrinsic global variable", &GV);
860 "invalid uses of intrinsic global variable", &GV);
861
862 // Don't worry about emitting an error for it not being an array,
863 // visitGlobalValue will complain on appending non-array.
864 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
865 StructType *STy = dyn_cast<StructType>(ATy->getElementType());
866 PointerType *FuncPtrTy =
867 PointerType::get(Context, DL.getProgramAddressSpace());
868 Check(STy && (STy->getNumElements() == 2 || STy->getNumElements() == 3) &&
869 STy->getTypeAtIndex(0u)->isIntegerTy(32) &&
870 STy->getTypeAtIndex(1) == FuncPtrTy,
871 "wrong type for intrinsic global variable", &GV);
872 Check(STy->getNumElements() == 3,
873 "the third field of the element type is mandatory, "
874 "specify ptr null to migrate from the obsoleted 2-field form");
875 Type *ETy = STy->getTypeAtIndex(2);
876 Check(ETy->isPointerTy(), "wrong type for intrinsic global variable",
877 &GV);
878 }
879 }
880
881 if (GV.hasName() && (GV.getName() == "llvm.used" ||
882 GV.getName() == "llvm.compiler.used")) {
884 "invalid linkage for intrinsic global variable", &GV);
886 "invalid uses of intrinsic global variable", &GV);
887
888 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
889 PointerType *PTy = dyn_cast<PointerType>(ATy->getElementType());
890 Check(PTy, "wrong type for intrinsic global variable", &GV);
891 if (GV.hasInitializer()) {
892 const Constant *Init = GV.getInitializer();
893 const ConstantArray *InitArray = dyn_cast<ConstantArray>(Init);
894 Check(InitArray, "wrong initalizer for intrinsic global variable",
895 Init);
896 for (Value *Op : InitArray->operands()) {
897 Value *V = Op->stripPointerCasts();
900 Twine("invalid ") + GV.getName() + " member", V);
901 Check(V->hasName(),
902 Twine("members of ") + GV.getName() + " must be named", V);
903 }
904 }
905 }
906 }
907
908 // Visit any debug info attachments.
910 GV.getMetadata(LLVMContext::MD_dbg, MDs);
911 for (auto *MD : MDs) {
912 if (auto *GVE = dyn_cast<DIGlobalVariableExpression>(MD))
913 visitDIGlobalVariableExpression(*GVE);
914 else
915 CheckDI(false, "!dbg attachment of global variable must be a "
916 "DIGlobalVariableExpression");
917 }
918
919 // Scalable vectors cannot be global variables, since we don't know
920 // the runtime size.
921 Check(!GVType->isScalableTy(), "Globals cannot contain scalable types", &GV);
922
923 // Check if it is or contains a target extension type that disallows being
924 // used as a global.
926 "Global @" + GV.getName() + " has illegal target extension type",
927 GVType);
928
929 if (!GV.hasInitializer()) {
930 visitGlobalValue(GV);
931 return;
932 }
933
934 // Walk any aggregate initializers looking for bitcasts between address spaces
935 visitConstantExprsRecursively(GV.getInitializer());
936
937 visitGlobalValue(GV);
938}
939
940void Verifier::visitAliaseeSubExpr(const GlobalAlias &GA, const Constant &C) {
941 SmallPtrSet<const GlobalAlias*, 4> Visited;
942 Visited.insert(&GA);
943 visitAliaseeSubExpr(Visited, GA, C);
944}
945
946void Verifier::visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias*> &Visited,
947 const GlobalAlias &GA, const Constant &C) {
950 cast<GlobalValue>(C).hasAvailableExternallyLinkage(),
951 "available_externally alias must point to available_externally "
952 "global value",
953 &GA);
954 }
955 if (const auto *GV = dyn_cast<GlobalValue>(&C)) {
957 Check(!GV->isDeclarationForLinker(), "Alias must point to a definition",
958 &GA);
959 }
960
961 if (const auto *GA2 = dyn_cast<GlobalAlias>(GV)) {
962 Check(Visited.insert(GA2).second, "Aliases cannot form a cycle", &GA);
963
964 Check(!GA2->isInterposable(),
965 "Alias cannot point to an interposable alias", &GA);
966 } else {
967 // Only continue verifying subexpressions of GlobalAliases.
968 // Do not recurse into global initializers.
969 return;
970 }
971 }
972
973 if (const auto *CE = dyn_cast<ConstantExpr>(&C))
974 visitConstantExprsRecursively(CE);
975
976 for (const Use &U : C.operands()) {
977 Value *V = &*U;
978 if (const auto *GA2 = dyn_cast<GlobalAlias>(V))
979 visitAliaseeSubExpr(Visited, GA, *GA2->getAliasee());
980 else if (const auto *C2 = dyn_cast<Constant>(V))
981 visitAliaseeSubExpr(Visited, GA, *C2);
982 }
983}
984
985void Verifier::visitGlobalAlias(const GlobalAlias &GA) {
987 "Alias should have private, internal, linkonce, weak, linkonce_odr, "
988 "weak_odr, external, or available_externally linkage!",
989 &GA);
990 const Constant *Aliasee = GA.getAliasee();
991 Check(Aliasee, "Aliasee cannot be NULL!", &GA);
992 Check(GA.getType() == Aliasee->getType(),
993 "Alias and aliasee types should match!", &GA);
994
995 Check(isa<GlobalValue>(Aliasee) || isa<ConstantExpr>(Aliasee),
996 "Aliasee should be either GlobalValue or ConstantExpr", &GA);
997
998 visitAliaseeSubExpr(GA, *Aliasee);
999
1000 visitGlobalValue(GA);
1001}
1002
1003void Verifier::visitGlobalIFunc(const GlobalIFunc &GI) {
1004 visitGlobalValue(GI);
1005
1007 GI.getAllMetadata(MDs);
1008 for (const auto &I : MDs) {
1009 CheckDI(I.first != LLVMContext::MD_dbg,
1010 "an ifunc may not have a !dbg attachment", &GI);
1011 Check(I.first != LLVMContext::MD_prof,
1012 "an ifunc may not have a !prof attachment", &GI);
1013 visitMDNode(*I.second, AreDebugLocsAllowed::No);
1014 }
1015
1017 "IFunc should have private, internal, linkonce, weak, linkonce_odr, "
1018 "weak_odr, or external linkage!",
1019 &GI);
1020 // Pierce through ConstantExprs and GlobalAliases and check that the resolver
1021 // is a Function definition.
1022 const Function *Resolver = GI.getResolverFunction();
1023 Check(Resolver, "IFunc must have a Function resolver", &GI);
1024 Check(!Resolver->isDeclarationForLinker(),
1025 "IFunc resolver must be a definition", &GI);
1026
1027 // Check that the immediate resolver operand (prior to any bitcasts) has the
1028 // correct type.
1029 const Type *ResolverTy = GI.getResolver()->getType();
1030
1032 "IFunc resolver must return a pointer", &GI);
1033
1034 Check(ResolverTy == PointerType::get(Context, GI.getAddressSpace()),
1035 "IFunc resolver has incorrect type", &GI);
1036}
1037
1038void Verifier::visitNamedMDNode(const NamedMDNode &NMD) {
1039 // There used to be various other llvm.dbg.* nodes, but we don't support
1040 // upgrading them and we want to reserve the namespace for future uses.
1041 if (NMD.getName().starts_with("llvm.dbg."))
1042 CheckDI(NMD.getName() == "llvm.dbg.cu",
1043 "unrecognized named metadata node in the llvm.dbg namespace", &NMD);
1044 for (const MDNode *MD : NMD.operands()) {
1045 if (NMD.getName() == "llvm.dbg.cu")
1046 CheckDI(MD && isa<DICompileUnit>(MD), "invalid compile unit", &NMD, MD);
1047
1048 if (!MD)
1049 continue;
1050
1051 visitMDNode(*MD, AreDebugLocsAllowed::Yes);
1052 }
1053}
1054
1055void Verifier::visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs) {
1056 // Only visit each node once. Metadata can be mutually recursive, so this
1057 // avoids infinite recursion here, as well as being an optimization.
1058 if (!MDNodes.insert(&MD).second)
1059 return;
1060
1061 Check(&MD.getContext() == &Context,
1062 "MDNode context does not match Module context!", &MD);
1063
1064 switch (MD.getMetadataID()) {
1065 default:
1066 llvm_unreachable("Invalid MDNode subclass");
1067 case Metadata::MDTupleKind:
1068 break;
1069#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) \
1070 case Metadata::CLASS##Kind: \
1071 visit##CLASS(cast<CLASS>(MD)); \
1072 break;
1073#include "llvm/IR/Metadata.def"
1074 }
1075
1076 for (const Metadata *Op : MD.operands()) {
1077 if (!Op)
1078 continue;
1079 Check(!isa<LocalAsMetadata>(Op), "Invalid operand for global metadata!",
1080 &MD, Op);
1081 CheckDI(!isa<DILocation>(Op) || AllowLocs == AreDebugLocsAllowed::Yes,
1082 "DILocation not allowed within this metadata node", &MD, Op);
1083 if (auto *N = dyn_cast<MDNode>(Op)) {
1084 visitMDNode(*N, AllowLocs);
1085 continue;
1086 }
1087 if (auto *V = dyn_cast<ValueAsMetadata>(Op)) {
1088 visitValueAsMetadata(*V, nullptr);
1089 continue;
1090 }
1091 }
1092
1093 // Check llvm.loop.estimated_trip_count.
1094 if (MD.getNumOperands() > 0 &&
1096 Check(MD.getNumOperands() == 2, "Expected two operands", &MD);
1098 Check(Count && Count->getType()->isIntegerTy() &&
1099 cast<IntegerType>(Count->getType())->getBitWidth() <= 32,
1100 "Expected second operand to be an integer constant of type i32 or "
1101 "smaller",
1102 &MD);
1103 }
1104
1105 // Check these last, so we diagnose problems in operands first.
1106 Check(!MD.isTemporary(), "Expected no forward declarations!", &MD);
1107 Check(MD.isResolved(), "All nodes should be resolved!", &MD);
1108}
1109
1110void Verifier::visitValueAsMetadata(const ValueAsMetadata &MD, Function *F) {
1111 Check(MD.getValue(), "Expected valid value", &MD);
1112 Check(!MD.getValue()->getType()->isMetadataTy(),
1113 "Unexpected metadata round-trip through values", &MD, MD.getValue());
1114
1115 auto *L = dyn_cast<LocalAsMetadata>(&MD);
1116 if (!L)
1117 return;
1118
1119 Check(F, "function-local metadata used outside a function", L);
1120
1121 // If this was an instruction, bb, or argument, verify that it is in the
1122 // function that we expect.
1123 Function *ActualF = nullptr;
1124 if (Instruction *I = dyn_cast<Instruction>(L->getValue())) {
1125 Check(I->getParent(), "function-local metadata not in basic block", L, I);
1126 ActualF = I->getParent()->getParent();
1127 } else if (BasicBlock *BB = dyn_cast<BasicBlock>(L->getValue()))
1128 ActualF = BB->getParent();
1129 else if (Argument *A = dyn_cast<Argument>(L->getValue()))
1130 ActualF = A->getParent();
1131 assert(ActualF && "Unimplemented function local metadata case!");
1132
1133 Check(ActualF == F, "function-local metadata used in wrong function", L);
1134}
1135
1136void Verifier::visitDIArgList(const DIArgList &AL, Function *F) {
1137 for (const ValueAsMetadata *VAM : AL.getArgs())
1138 visitValueAsMetadata(*VAM, F);
1139}
1140
1141void Verifier::visitMetadataAsValue(const MetadataAsValue &MDV, Function *F) {
1142 Metadata *MD = MDV.getMetadata();
1143 if (auto *N = dyn_cast<MDNode>(MD)) {
1144 visitMDNode(*N, AreDebugLocsAllowed::No);
1145 return;
1146 }
1147
1148 // Only visit each node once. Metadata can be mutually recursive, so this
1149 // avoids infinite recursion here, as well as being an optimization.
1150 if (!MDNodes.insert(MD).second)
1151 return;
1152
1153 if (auto *V = dyn_cast<ValueAsMetadata>(MD))
1154 visitValueAsMetadata(*V, F);
1155
1156 if (auto *AL = dyn_cast<DIArgList>(MD))
1157 visitDIArgList(*AL, F);
1158}
1159
1160static bool isType(const Metadata *MD) { return !MD || isa<DIType>(MD); }
1161static bool isScope(const Metadata *MD) { return !MD || isa<DIScope>(MD); }
1162static bool isDINode(const Metadata *MD) { return !MD || isa<DINode>(MD); }
1163
1164void Verifier::visitDILocation(const DILocation &N) {
1165 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1166 "location requires a valid scope", &N, N.getRawScope());
1167 if (auto *IA = N.getRawInlinedAt())
1168 CheckDI(isa<DILocation>(IA), "inlined-at should be a location", &N, IA);
1169 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1170 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1171}
1172
1173void Verifier::visitGenericDINode(const GenericDINode &N) {
1174 CheckDI(N.getTag(), "invalid tag", &N);
1175}
1176
1177void Verifier::visitDIScope(const DIScope &N) {
1178 if (auto *F = N.getRawFile())
1179 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1180}
1181
1182void Verifier::visitDISubrangeType(const DISubrangeType &N) {
1183 CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
1184 auto *BaseType = N.getRawBaseType();
1185 CheckDI(!BaseType || isType(BaseType), "BaseType must be a type");
1186 auto *LBound = N.getRawLowerBound();
1187 CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
1188 isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1189 "LowerBound must be signed constant or DIVariable or DIExpression",
1190 &N);
1191 auto *UBound = N.getRawUpperBound();
1192 CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
1193 isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1194 "UpperBound must be signed constant or DIVariable or DIExpression",
1195 &N);
1196 auto *Stride = N.getRawStride();
1197 CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
1198 isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1199 "Stride must be signed constant or DIVariable or DIExpression", &N);
1200 auto *Bias = N.getRawBias();
1201 CheckDI(!Bias || isa<ConstantAsMetadata>(Bias) || isa<DIVariable>(Bias) ||
1202 isa<DIExpression>(Bias),
1203 "Bias must be signed constant or DIVariable or DIExpression", &N);
1204 // Subrange types currently only support constant size.
1205 auto *Size = N.getRawSizeInBits();
1207 "SizeInBits must be a constant");
1208}
1209
1210void Verifier::visitDISubrange(const DISubrange &N) {
1211 CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
1212 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1213 "Subrange can have any one of count or upperBound", &N);
1214 auto *CBound = N.getRawCountNode();
1215 CheckDI(!CBound || isa<ConstantAsMetadata>(CBound) ||
1216 isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1217 "Count must be signed constant or DIVariable or DIExpression", &N);
1218 auto Count = N.getCount();
1220 cast<ConstantInt *>(Count)->getSExtValue() >= -1,
1221 "invalid subrange count", &N);
1222 auto *LBound = N.getRawLowerBound();
1223 CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
1224 isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1225 "LowerBound must be signed constant or DIVariable or DIExpression",
1226 &N);
1227 auto *UBound = N.getRawUpperBound();
1228 CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
1229 isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1230 "UpperBound must be signed constant or DIVariable or DIExpression",
1231 &N);
1232 auto *Stride = N.getRawStride();
1233 CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
1234 isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1235 "Stride must be signed constant or DIVariable or DIExpression", &N);
1236}
1237
1238void Verifier::visitDIGenericSubrange(const DIGenericSubrange &N) {
1239 CheckDI(N.getTag() == dwarf::DW_TAG_generic_subrange, "invalid tag", &N);
1240 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1241 "GenericSubrange can have any one of count or upperBound", &N);
1242 auto *CBound = N.getRawCountNode();
1243 CheckDI(!CBound || isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1244 "Count must be signed constant or DIVariable or DIExpression", &N);
1245 auto *LBound = N.getRawLowerBound();
1246 CheckDI(LBound, "GenericSubrange must contain lowerBound", &N);
1247 CheckDI(isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1248 "LowerBound must be signed constant or DIVariable or DIExpression",
1249 &N);
1250 auto *UBound = N.getRawUpperBound();
1251 CheckDI(!UBound || isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1252 "UpperBound must be signed constant or DIVariable or DIExpression",
1253 &N);
1254 auto *Stride = N.getRawStride();
1255 CheckDI(Stride, "GenericSubrange must contain stride", &N);
1256 CheckDI(isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1257 "Stride must be signed constant or DIVariable or DIExpression", &N);
1258}
1259
1260void Verifier::visitDIEnumerator(const DIEnumerator &N) {
1261 CheckDI(N.getTag() == dwarf::DW_TAG_enumerator, "invalid tag", &N);
1262}
1263
1264void Verifier::visitDIBasicType(const DIBasicType &N) {
1265 CheckDI(N.getTag() == dwarf::DW_TAG_base_type ||
1266 N.getTag() == dwarf::DW_TAG_unspecified_type ||
1267 N.getTag() == dwarf::DW_TAG_string_type,
1268 "invalid tag", &N);
1269 // Basic types currently only support constant size.
1270 auto *Size = N.getRawSizeInBits();
1272 "SizeInBits must be a constant");
1273}
1274
1275void Verifier::visitDIFixedPointType(const DIFixedPointType &N) {
1276 visitDIBasicType(N);
1277
1278 CheckDI(N.getTag() == dwarf::DW_TAG_base_type, "invalid tag", &N);
1279 CheckDI(N.getEncoding() == dwarf::DW_ATE_signed_fixed ||
1280 N.getEncoding() == dwarf::DW_ATE_unsigned_fixed,
1281 "invalid encoding", &N);
1285 "invalid kind", &N);
1287 N.getFactorRaw() == 0,
1288 "factor should be 0 for rationals", &N);
1290 (N.getNumeratorRaw() == 0 && N.getDenominatorRaw() == 0),
1291 "numerator and denominator should be 0 for non-rationals", &N);
1292}
1293
1294void Verifier::visitDIStringType(const DIStringType &N) {
1295 CheckDI(N.getTag() == dwarf::DW_TAG_string_type, "invalid tag", &N);
1296 CheckDI(!(N.isBigEndian() && N.isLittleEndian()), "has conflicting flags",
1297 &N);
1298}
1299
1300void Verifier::visitDIDerivedType(const DIDerivedType &N) {
1301 // Common scope checks.
1302 visitDIScope(N);
1303
1304 CheckDI(N.getTag() == dwarf::DW_TAG_typedef ||
1305 N.getTag() == dwarf::DW_TAG_pointer_type ||
1306 N.getTag() == dwarf::DW_TAG_ptr_to_member_type ||
1307 N.getTag() == dwarf::DW_TAG_reference_type ||
1308 N.getTag() == dwarf::DW_TAG_rvalue_reference_type ||
1309 N.getTag() == dwarf::DW_TAG_const_type ||
1310 N.getTag() == dwarf::DW_TAG_immutable_type ||
1311 N.getTag() == dwarf::DW_TAG_volatile_type ||
1312 N.getTag() == dwarf::DW_TAG_restrict_type ||
1313 N.getTag() == dwarf::DW_TAG_atomic_type ||
1314 N.getTag() == dwarf::DW_TAG_LLVM_ptrauth_type ||
1315 N.getTag() == dwarf::DW_TAG_member ||
1316 (N.getTag() == dwarf::DW_TAG_variable && N.isStaticMember()) ||
1317 N.getTag() == dwarf::DW_TAG_inheritance ||
1318 N.getTag() == dwarf::DW_TAG_friend ||
1319 N.getTag() == dwarf::DW_TAG_set_type ||
1320 N.getTag() == dwarf::DW_TAG_template_alias,
1321 "invalid tag", &N);
1322 if (N.getTag() == dwarf::DW_TAG_ptr_to_member_type) {
1323 CheckDI(isType(N.getRawExtraData()), "invalid pointer to member type", &N,
1324 N.getRawExtraData());
1325 }
1326
1327 if (N.getTag() == dwarf::DW_TAG_set_type) {
1328 if (auto *T = N.getRawBaseType()) {
1332 CheckDI(
1333 (Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type) ||
1334 (Subrange && Subrange->getTag() == dwarf::DW_TAG_subrange_type) ||
1335 (Basic && (Basic->getEncoding() == dwarf::DW_ATE_unsigned ||
1336 Basic->getEncoding() == dwarf::DW_ATE_signed ||
1337 Basic->getEncoding() == dwarf::DW_ATE_unsigned_char ||
1338 Basic->getEncoding() == dwarf::DW_ATE_signed_char ||
1339 Basic->getEncoding() == dwarf::DW_ATE_boolean)),
1340 "invalid set base type", &N, T);
1341 }
1342 }
1343
1344 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1345 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1346 N.getRawBaseType());
1347
1348 if (N.getDWARFAddressSpace()) {
1349 CheckDI(N.getTag() == dwarf::DW_TAG_pointer_type ||
1350 N.getTag() == dwarf::DW_TAG_reference_type ||
1351 N.getTag() == dwarf::DW_TAG_rvalue_reference_type,
1352 "DWARF address space only applies to pointer or reference types",
1353 &N);
1354 }
1355
1356 auto *Size = N.getRawSizeInBits();
1359 "SizeInBits must be a constant or DIVariable or DIExpression");
1360}
1361
1362/// Detect mutually exclusive flags.
1363static bool hasConflictingReferenceFlags(unsigned Flags) {
1364 return ((Flags & DINode::FlagLValueReference) &&
1365 (Flags & DINode::FlagRValueReference)) ||
1366 ((Flags & DINode::FlagTypePassByValue) &&
1367 (Flags & DINode::FlagTypePassByReference));
1368}
1369
1370void Verifier::visitTemplateParams(const MDNode &N, const Metadata &RawParams) {
1371 auto *Params = dyn_cast<MDTuple>(&RawParams);
1372 CheckDI(Params, "invalid template params", &N, &RawParams);
1373 for (Metadata *Op : Params->operands()) {
1374 CheckDI(Op && isa<DITemplateParameter>(Op), "invalid template parameter",
1375 &N, Params, Op);
1376 }
1377}
1378
1379void Verifier::visitDICompositeType(const DICompositeType &N) {
1380 // Common scope checks.
1381 visitDIScope(N);
1382
1383 CheckDI(N.getTag() == dwarf::DW_TAG_array_type ||
1384 N.getTag() == dwarf::DW_TAG_structure_type ||
1385 N.getTag() == dwarf::DW_TAG_union_type ||
1386 N.getTag() == dwarf::DW_TAG_enumeration_type ||
1387 N.getTag() == dwarf::DW_TAG_class_type ||
1388 N.getTag() == dwarf::DW_TAG_variant_part ||
1389 N.getTag() == dwarf::DW_TAG_variant ||
1390 N.getTag() == dwarf::DW_TAG_namelist,
1391 "invalid tag", &N);
1392
1393 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1394 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1395 N.getRawBaseType());
1396
1397 CheckDI(!N.getRawElements() || isa<MDTuple>(N.getRawElements()),
1398 "invalid composite elements", &N, N.getRawElements());
1399 CheckDI(isType(N.getRawVTableHolder()), "invalid vtable holder", &N,
1400 N.getRawVTableHolder());
1402 "invalid reference flags", &N);
1403 unsigned DIBlockByRefStruct = 1 << 4;
1404 CheckDI((N.getFlags() & DIBlockByRefStruct) == 0,
1405 "DIBlockByRefStruct on DICompositeType is no longer supported", &N);
1406 CheckDI(llvm::all_of(N.getElements(), [](const DINode *N) { return N; }),
1407 "DISubprogram contains null entry in `elements` field", &N);
1408
1409 if (N.isVector()) {
1410 const DINodeArray Elements = N.getElements();
1411 CheckDI(Elements.size() == 1 &&
1412 Elements[0]->getTag() == dwarf::DW_TAG_subrange_type,
1413 "invalid vector, expected one element of type subrange", &N);
1414 }
1415
1416 if (auto *Params = N.getRawTemplateParams())
1417 visitTemplateParams(N, *Params);
1418
1419 if (auto *D = N.getRawDiscriminator()) {
1420 CheckDI(isa<DIDerivedType>(D) && N.getTag() == dwarf::DW_TAG_variant_part,
1421 "discriminator can only appear on variant part");
1422 }
1423
1424 if (N.getRawDataLocation()) {
1425 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1426 "dataLocation can only appear in array type");
1427 }
1428
1429 if (N.getRawAssociated()) {
1430 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1431 "associated can only appear in array type");
1432 }
1433
1434 if (N.getRawAllocated()) {
1435 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1436 "allocated can only appear in array type");
1437 }
1438
1439 if (N.getRawRank()) {
1440 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1441 "rank can only appear in array type");
1442 }
1443
1444 if (N.getTag() == dwarf::DW_TAG_array_type) {
1445 CheckDI(N.getRawBaseType(), "array types must have a base type", &N);
1446 }
1447
1448 auto *Size = N.getRawSizeInBits();
1451 "SizeInBits must be a constant or DIVariable or DIExpression");
1452}
1453
1454void Verifier::visitDISubroutineType(const DISubroutineType &N) {
1455 CheckDI(N.getTag() == dwarf::DW_TAG_subroutine_type, "invalid tag", &N);
1456 if (auto *Types = N.getRawTypeArray()) {
1457 CheckDI(isa<MDTuple>(Types), "invalid composite elements", &N, Types);
1458 for (Metadata *Ty : N.getTypeArray()->operands()) {
1459 CheckDI(isType(Ty), "invalid subroutine type ref", &N, Types, Ty);
1460 }
1461 }
1463 "invalid reference flags", &N);
1464}
1465
1466void Verifier::visitDIFile(const DIFile &N) {
1467 CheckDI(N.getTag() == dwarf::DW_TAG_file_type, "invalid tag", &N);
1468 std::optional<DIFile::ChecksumInfo<StringRef>> Checksum = N.getChecksum();
1469 if (Checksum) {
1470 CheckDI(Checksum->Kind <= DIFile::ChecksumKind::CSK_Last,
1471 "invalid checksum kind", &N);
1472 size_t Size;
1473 switch (Checksum->Kind) {
1474 case DIFile::CSK_MD5:
1475 Size = 32;
1476 break;
1477 case DIFile::CSK_SHA1:
1478 Size = 40;
1479 break;
1480 case DIFile::CSK_SHA256:
1481 Size = 64;
1482 break;
1483 }
1484 CheckDI(Checksum->Value.size() == Size, "invalid checksum length", &N);
1485 CheckDI(Checksum->Value.find_if_not(llvm::isHexDigit) == StringRef::npos,
1486 "invalid checksum", &N);
1487 }
1488}
1489
1490void Verifier::visitDICompileUnit(const DICompileUnit &N) {
1491 CheckDI(N.isDistinct(), "compile units must be distinct", &N);
1492 CheckDI(N.getTag() == dwarf::DW_TAG_compile_unit, "invalid tag", &N);
1493
1494 // Don't bother verifying the compilation directory or producer string
1495 // as those could be empty.
1496 CheckDI(N.getRawFile() && isa<DIFile>(N.getRawFile()), "invalid file", &N,
1497 N.getRawFile());
1498 CheckDI(!N.getFile()->getFilename().empty(), "invalid filename", &N,
1499 N.getFile());
1500
1501 CheckDI((N.getEmissionKind() <= DICompileUnit::LastEmissionKind),
1502 "invalid emission kind", &N);
1503
1504 if (auto *Array = N.getRawEnumTypes()) {
1505 CheckDI(isa<MDTuple>(Array), "invalid enum list", &N, Array);
1506 for (Metadata *Op : N.getEnumTypes()->operands()) {
1508 CheckDI(Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type,
1509 "invalid enum type", &N, N.getEnumTypes(), Op);
1510 }
1511 }
1512 if (auto *Array = N.getRawRetainedTypes()) {
1513 CheckDI(isa<MDTuple>(Array), "invalid retained type list", &N, Array);
1514 for (Metadata *Op : N.getRetainedTypes()->operands()) {
1515 CheckDI(
1516 Op && (isa<DIType>(Op) || (isa<DISubprogram>(Op) &&
1517 !cast<DISubprogram>(Op)->isDefinition())),
1518 "invalid retained type", &N, Op);
1519 }
1520 }
1521 if (auto *Array = N.getRawGlobalVariables()) {
1522 CheckDI(isa<MDTuple>(Array), "invalid global variable list", &N, Array);
1523 for (Metadata *Op : N.getGlobalVariables()->operands()) {
1525 "invalid global variable ref", &N, Op);
1526 }
1527 }
1528 if (auto *Array = N.getRawImportedEntities()) {
1529 CheckDI(isa<MDTuple>(Array), "invalid imported entity list", &N, Array);
1530 for (Metadata *Op : N.getImportedEntities()->operands()) {
1531 CheckDI(Op && isa<DIImportedEntity>(Op), "invalid imported entity ref",
1532 &N, Op);
1533 }
1534 }
1535 if (auto *Array = N.getRawMacros()) {
1536 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1537 for (Metadata *Op : N.getMacros()->operands()) {
1538 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1539 }
1540 }
1541 CUVisited.insert(&N);
1542}
1543
1544void Verifier::visitDISubprogram(const DISubprogram &N) {
1545 CheckDI(N.getTag() == dwarf::DW_TAG_subprogram, "invalid tag", &N);
1546 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1547 if (auto *F = N.getRawFile())
1548 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1549 else
1550 CheckDI(N.getLine() == 0, "line specified with no file", &N, N.getLine());
1551 if (auto *T = N.getRawType())
1552 CheckDI(isa<DISubroutineType>(T), "invalid subroutine type", &N, T);
1553 CheckDI(isType(N.getRawContainingType()), "invalid containing type", &N,
1554 N.getRawContainingType());
1555 if (auto *Params = N.getRawTemplateParams())
1556 visitTemplateParams(N, *Params);
1557 if (auto *S = N.getRawDeclaration())
1558 CheckDI(isa<DISubprogram>(S) && !cast<DISubprogram>(S)->isDefinition(),
1559 "invalid subprogram declaration", &N, S);
1560 if (auto *RawNode = N.getRawRetainedNodes()) {
1561 auto *Node = dyn_cast<MDTuple>(RawNode);
1562 CheckDI(Node, "invalid retained nodes list", &N, RawNode);
1563 for (Metadata *Op : Node->operands()) {
1566 "invalid retained nodes, expected DILocalVariable, DILabel or "
1567 "DIImportedEntity",
1568 &N, Node, Op);
1569 }
1570 }
1572 "invalid reference flags", &N);
1573
1574 auto *Unit = N.getRawUnit();
1575 if (N.isDefinition()) {
1576 // Subprogram definitions (not part of the type hierarchy).
1577 CheckDI(N.isDistinct(), "subprogram definitions must be distinct", &N);
1578 CheckDI(Unit, "subprogram definitions must have a compile unit", &N);
1579 CheckDI(isa<DICompileUnit>(Unit), "invalid unit type", &N, Unit);
1580 // There's no good way to cross the CU boundary to insert a nested
1581 // DISubprogram definition in one CU into a type defined in another CU.
1582 auto *CT = dyn_cast_or_null<DICompositeType>(N.getRawScope());
1583 if (CT && CT->getRawIdentifier() &&
1584 M.getContext().isODRUniquingDebugTypes())
1585 CheckDI(N.getDeclaration(),
1586 "definition subprograms cannot be nested within DICompositeType "
1587 "when enabling ODR",
1588 &N);
1589 } else {
1590 // Subprogram declarations (part of the type hierarchy).
1591 CheckDI(!Unit, "subprogram declarations must not have a compile unit", &N);
1592 CheckDI(!N.getRawDeclaration(),
1593 "subprogram declaration must not have a declaration field");
1594 }
1595
1596 if (auto *RawThrownTypes = N.getRawThrownTypes()) {
1597 auto *ThrownTypes = dyn_cast<MDTuple>(RawThrownTypes);
1598 CheckDI(ThrownTypes, "invalid thrown types list", &N, RawThrownTypes);
1599 for (Metadata *Op : ThrownTypes->operands())
1600 CheckDI(Op && isa<DIType>(Op), "invalid thrown type", &N, ThrownTypes,
1601 Op);
1602 }
1603
1604 if (N.areAllCallsDescribed())
1605 CheckDI(N.isDefinition(),
1606 "DIFlagAllCallsDescribed must be attached to a definition");
1607}
1608
1609void Verifier::visitDILexicalBlockBase(const DILexicalBlockBase &N) {
1610 CheckDI(N.getTag() == dwarf::DW_TAG_lexical_block, "invalid tag", &N);
1611 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1612 "invalid local scope", &N, N.getRawScope());
1613 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1614 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1615}
1616
1617void Verifier::visitDILexicalBlock(const DILexicalBlock &N) {
1618 visitDILexicalBlockBase(N);
1619
1620 CheckDI(N.getLine() || !N.getColumn(),
1621 "cannot have column info without line info", &N);
1622}
1623
1624void Verifier::visitDILexicalBlockFile(const DILexicalBlockFile &N) {
1625 visitDILexicalBlockBase(N);
1626}
1627
1628void Verifier::visitDICommonBlock(const DICommonBlock &N) {
1629 CheckDI(N.getTag() == dwarf::DW_TAG_common_block, "invalid tag", &N);
1630 if (auto *S = N.getRawScope())
1631 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1632 if (auto *S = N.getRawDecl())
1633 CheckDI(isa<DIGlobalVariable>(S), "invalid declaration", &N, S);
1634}
1635
1636void Verifier::visitDINamespace(const DINamespace &N) {
1637 CheckDI(N.getTag() == dwarf::DW_TAG_namespace, "invalid tag", &N);
1638 if (auto *S = N.getRawScope())
1639 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1640}
1641
1642void Verifier::visitDIMacro(const DIMacro &N) {
1643 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_define ||
1644 N.getMacinfoType() == dwarf::DW_MACINFO_undef,
1645 "invalid macinfo type", &N);
1646 CheckDI(!N.getName().empty(), "anonymous macro", &N);
1647 if (!N.getValue().empty()) {
1648 assert(N.getValue().data()[0] != ' ' && "Macro value has a space prefix");
1649 }
1650}
1651
1652void Verifier::visitDIMacroFile(const DIMacroFile &N) {
1653 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_start_file,
1654 "invalid macinfo type", &N);
1655 if (auto *F = N.getRawFile())
1656 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1657
1658 if (auto *Array = N.getRawElements()) {
1659 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1660 for (Metadata *Op : N.getElements()->operands()) {
1661 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1662 }
1663 }
1664}
1665
1666void Verifier::visitDIModule(const DIModule &N) {
1667 CheckDI(N.getTag() == dwarf::DW_TAG_module, "invalid tag", &N);
1668 CheckDI(!N.getName().empty(), "anonymous module", &N);
1669}
1670
1671void Verifier::visitDITemplateParameter(const DITemplateParameter &N) {
1672 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1673}
1674
1675void Verifier::visitDITemplateTypeParameter(const DITemplateTypeParameter &N) {
1676 visitDITemplateParameter(N);
1677
1678 CheckDI(N.getTag() == dwarf::DW_TAG_template_type_parameter, "invalid tag",
1679 &N);
1680}
1681
1682void Verifier::visitDITemplateValueParameter(
1683 const DITemplateValueParameter &N) {
1684 visitDITemplateParameter(N);
1685
1686 CheckDI(N.getTag() == dwarf::DW_TAG_template_value_parameter ||
1687 N.getTag() == dwarf::DW_TAG_GNU_template_template_param ||
1688 N.getTag() == dwarf::DW_TAG_GNU_template_parameter_pack,
1689 "invalid tag", &N);
1690}
1691
1692void Verifier::visitDIVariable(const DIVariable &N) {
1693 if (auto *S = N.getRawScope())
1694 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1695 if (auto *F = N.getRawFile())
1696 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1697}
1698
1699void Verifier::visitDIGlobalVariable(const DIGlobalVariable &N) {
1700 // Checks common to all variables.
1701 visitDIVariable(N);
1702
1703 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1704 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1705 // Check only if the global variable is not an extern
1706 if (N.isDefinition())
1707 CheckDI(N.getType(), "missing global variable type", &N);
1708 if (auto *Member = N.getRawStaticDataMemberDeclaration()) {
1710 "invalid static data member declaration", &N, Member);
1711 }
1712}
1713
1714void Verifier::visitDILocalVariable(const DILocalVariable &N) {
1715 // Checks common to all variables.
1716 visitDIVariable(N);
1717
1718 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1719 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1720 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1721 "local variable requires a valid scope", &N, N.getRawScope());
1722 if (auto Ty = N.getType())
1723 CheckDI(!isa<DISubroutineType>(Ty), "invalid type", &N, N.getType());
1724}
1725
1726void Verifier::visitDIAssignID(const DIAssignID &N) {
1727 CheckDI(!N.getNumOperands(), "DIAssignID has no arguments", &N);
1728 CheckDI(N.isDistinct(), "DIAssignID must be distinct", &N);
1729}
1730
1731void Verifier::visitDILabel(const DILabel &N) {
1732 if (auto *S = N.getRawScope())
1733 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1734 if (auto *F = N.getRawFile())
1735 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1736
1737 CheckDI(N.getTag() == dwarf::DW_TAG_label, "invalid tag", &N);
1738 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1739 "label requires a valid scope", &N, N.getRawScope());
1740}
1741
1742void Verifier::visitDIExpression(const DIExpression &N) {
1743 CheckDI(N.isValid(), "invalid expression", &N);
1744}
1745
1746void Verifier::visitDIGlobalVariableExpression(
1747 const DIGlobalVariableExpression &GVE) {
1748 CheckDI(GVE.getVariable(), "missing variable");
1749 if (auto *Var = GVE.getVariable())
1750 visitDIGlobalVariable(*Var);
1751 if (auto *Expr = GVE.getExpression()) {
1752 visitDIExpression(*Expr);
1753 if (auto Fragment = Expr->getFragmentInfo())
1754 verifyFragmentExpression(*GVE.getVariable(), *Fragment, &GVE);
1755 }
1756}
1757
1758void Verifier::visitDIObjCProperty(const DIObjCProperty &N) {
1759 CheckDI(N.getTag() == dwarf::DW_TAG_APPLE_property, "invalid tag", &N);
1760 if (auto *T = N.getRawType())
1761 CheckDI(isType(T), "invalid type ref", &N, T);
1762 if (auto *F = N.getRawFile())
1763 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1764}
1765
1766void Verifier::visitDIImportedEntity(const DIImportedEntity &N) {
1767 CheckDI(N.getTag() == dwarf::DW_TAG_imported_module ||
1768 N.getTag() == dwarf::DW_TAG_imported_declaration,
1769 "invalid tag", &N);
1770 if (auto *S = N.getRawScope())
1771 CheckDI(isa<DIScope>(S), "invalid scope for imported entity", &N, S);
1772 CheckDI(isDINode(N.getRawEntity()), "invalid imported entity", &N,
1773 N.getRawEntity());
1774}
1775
1776void Verifier::visitComdat(const Comdat &C) {
1777 // In COFF the Module is invalid if the GlobalValue has private linkage.
1778 // Entities with private linkage don't have entries in the symbol table.
1779 if (TT.isOSBinFormatCOFF())
1780 if (const GlobalValue *GV = M.getNamedValue(C.getName()))
1781 Check(!GV->hasPrivateLinkage(), "comdat global value has private linkage",
1782 GV);
1783}
1784
1785void Verifier::visitModuleIdents() {
1786 const NamedMDNode *Idents = M.getNamedMetadata("llvm.ident");
1787 if (!Idents)
1788 return;
1789
1790 // llvm.ident takes a list of metadata entry. Each entry has only one string.
1791 // Scan each llvm.ident entry and make sure that this requirement is met.
1792 for (const MDNode *N : Idents->operands()) {
1793 Check(N->getNumOperands() == 1,
1794 "incorrect number of operands in llvm.ident metadata", N);
1795 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1796 ("invalid value for llvm.ident metadata entry operand"
1797 "(the operand should be a string)"),
1798 N->getOperand(0));
1799 }
1800}
1801
1802void Verifier::visitModuleCommandLines() {
1803 const NamedMDNode *CommandLines = M.getNamedMetadata("llvm.commandline");
1804 if (!CommandLines)
1805 return;
1806
1807 // llvm.commandline takes a list of metadata entry. Each entry has only one
1808 // string. Scan each llvm.commandline entry and make sure that this
1809 // requirement is met.
1810 for (const MDNode *N : CommandLines->operands()) {
1811 Check(N->getNumOperands() == 1,
1812 "incorrect number of operands in llvm.commandline metadata", N);
1813 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1814 ("invalid value for llvm.commandline metadata entry operand"
1815 "(the operand should be a string)"),
1816 N->getOperand(0));
1817 }
1818}
1819
1820void Verifier::visitModuleErrnoTBAA() {
1821 const NamedMDNode *ErrnoTBAA = M.getNamedMetadata("llvm.errno.tbaa");
1822 if (!ErrnoTBAA)
1823 return;
1824
1825 Check(ErrnoTBAA->getNumOperands() >= 1,
1826 "llvm.errno.tbaa must have at least one operand", ErrnoTBAA);
1827
1828 for (const MDNode *N : ErrnoTBAA->operands())
1829 TBAAVerifyHelper.visitTBAAMetadata(nullptr, N);
1830}
1831
1832void Verifier::visitModuleFlags() {
1833 const NamedMDNode *Flags = M.getModuleFlagsMetadata();
1834 if (!Flags) return;
1835
1836 // Scan each flag, and track the flags and requirements.
1837 DenseMap<const MDString*, const MDNode*> SeenIDs;
1838 SmallVector<const MDNode*, 16> Requirements;
1839 uint64_t PAuthABIPlatform = -1;
1840 uint64_t PAuthABIVersion = -1;
1841 for (const MDNode *MDN : Flags->operands()) {
1842 visitModuleFlag(MDN, SeenIDs, Requirements);
1843 if (MDN->getNumOperands() != 3)
1844 continue;
1845 if (const auto *FlagName = dyn_cast_or_null<MDString>(MDN->getOperand(1))) {
1846 if (FlagName->getString() == "aarch64-elf-pauthabi-platform") {
1847 if (const auto *PAP =
1849 PAuthABIPlatform = PAP->getZExtValue();
1850 } else if (FlagName->getString() == "aarch64-elf-pauthabi-version") {
1851 if (const auto *PAV =
1853 PAuthABIVersion = PAV->getZExtValue();
1854 }
1855 }
1856 }
1857
1858 if ((PAuthABIPlatform == uint64_t(-1)) != (PAuthABIVersion == uint64_t(-1)))
1859 CheckFailed("either both or no 'aarch64-elf-pauthabi-platform' and "
1860 "'aarch64-elf-pauthabi-version' module flags must be present");
1861
1862 // Validate that the requirements in the module are valid.
1863 for (const MDNode *Requirement : Requirements) {
1864 const MDString *Flag = cast<MDString>(Requirement->getOperand(0));
1865 const Metadata *ReqValue = Requirement->getOperand(1);
1866
1867 const MDNode *Op = SeenIDs.lookup(Flag);
1868 if (!Op) {
1869 CheckFailed("invalid requirement on flag, flag is not present in module",
1870 Flag);
1871 continue;
1872 }
1873
1874 if (Op->getOperand(2) != ReqValue) {
1875 CheckFailed(("invalid requirement on flag, "
1876 "flag does not have the required value"),
1877 Flag);
1878 continue;
1879 }
1880 }
1881}
1882
1883void
1884Verifier::visitModuleFlag(const MDNode *Op,
1885 DenseMap<const MDString *, const MDNode *> &SeenIDs,
1886 SmallVectorImpl<const MDNode *> &Requirements) {
1887 // Each module flag should have three arguments, the merge behavior (a
1888 // constant int), the flag ID (an MDString), and the value.
1889 Check(Op->getNumOperands() == 3,
1890 "incorrect number of operands in module flag", Op);
1891 Module::ModFlagBehavior MFB;
1892 if (!Module::isValidModFlagBehavior(Op->getOperand(0), MFB)) {
1894 "invalid behavior operand in module flag (expected constant integer)",
1895 Op->getOperand(0));
1896 Check(false,
1897 "invalid behavior operand in module flag (unexpected constant)",
1898 Op->getOperand(0));
1899 }
1900 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1));
1901 Check(ID, "invalid ID operand in module flag (expected metadata string)",
1902 Op->getOperand(1));
1903
1904 // Check the values for behaviors with additional requirements.
1905 switch (MFB) {
1906 case Module::Error:
1907 case Module::Warning:
1908 case Module::Override:
1909 // These behavior types accept any value.
1910 break;
1911
1912 case Module::Min: {
1913 auto *V = mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1914 Check(V && V->getValue().isNonNegative(),
1915 "invalid value for 'min' module flag (expected constant non-negative "
1916 "integer)",
1917 Op->getOperand(2));
1918 break;
1919 }
1920
1921 case Module::Max: {
1923 "invalid value for 'max' module flag (expected constant integer)",
1924 Op->getOperand(2));
1925 break;
1926 }
1927
1928 case Module::Require: {
1929 // The value should itself be an MDNode with two operands, a flag ID (an
1930 // MDString), and a value.
1931 MDNode *Value = dyn_cast<MDNode>(Op->getOperand(2));
1932 Check(Value && Value->getNumOperands() == 2,
1933 "invalid value for 'require' module flag (expected metadata pair)",
1934 Op->getOperand(2));
1935 Check(isa<MDString>(Value->getOperand(0)),
1936 ("invalid value for 'require' module flag "
1937 "(first value operand should be a string)"),
1938 Value->getOperand(0));
1939
1940 // Append it to the list of requirements, to check once all module flags are
1941 // scanned.
1942 Requirements.push_back(Value);
1943 break;
1944 }
1945
1946 case Module::Append:
1947 case Module::AppendUnique: {
1948 // These behavior types require the operand be an MDNode.
1949 Check(isa<MDNode>(Op->getOperand(2)),
1950 "invalid value for 'append'-type module flag "
1951 "(expected a metadata node)",
1952 Op->getOperand(2));
1953 break;
1954 }
1955 }
1956
1957 // Unless this is a "requires" flag, check the ID is unique.
1958 if (MFB != Module::Require) {
1959 bool Inserted = SeenIDs.insert(std::make_pair(ID, Op)).second;
1960 Check(Inserted,
1961 "module flag identifiers must be unique (or of 'require' type)", ID);
1962 }
1963
1964 if (ID->getString() == "wchar_size") {
1965 ConstantInt *Value
1967 Check(Value, "wchar_size metadata requires constant integer argument");
1968 }
1969
1970 if (ID->getString() == "Linker Options") {
1971 // If the llvm.linker.options named metadata exists, we assume that the
1972 // bitcode reader has upgraded the module flag. Otherwise the flag might
1973 // have been created by a client directly.
1974 Check(M.getNamedMetadata("llvm.linker.options"),
1975 "'Linker Options' named metadata no longer supported");
1976 }
1977
1978 if (ID->getString() == "SemanticInterposition") {
1979 ConstantInt *Value =
1981 Check(Value,
1982 "SemanticInterposition metadata requires constant integer argument");
1983 }
1984
1985 if (ID->getString() == "CG Profile") {
1986 for (const MDOperand &MDO : cast<MDNode>(Op->getOperand(2))->operands())
1987 visitModuleFlagCGProfileEntry(MDO);
1988 }
1989}
1990
1991void Verifier::visitModuleFlagCGProfileEntry(const MDOperand &MDO) {
1992 auto CheckFunction = [&](const MDOperand &FuncMDO) {
1993 if (!FuncMDO)
1994 return;
1995 auto F = dyn_cast<ValueAsMetadata>(FuncMDO);
1996 Check(F && isa<Function>(F->getValue()->stripPointerCasts()),
1997 "expected a Function or null", FuncMDO);
1998 };
1999 auto Node = dyn_cast_or_null<MDNode>(MDO);
2000 Check(Node && Node->getNumOperands() == 3, "expected a MDNode triple", MDO);
2001 CheckFunction(Node->getOperand(0));
2002 CheckFunction(Node->getOperand(1));
2003 auto Count = dyn_cast_or_null<ConstantAsMetadata>(Node->getOperand(2));
2004 Check(Count && Count->getType()->isIntegerTy(),
2005 "expected an integer constant", Node->getOperand(2));
2006}
2007
2008void Verifier::verifyAttributeTypes(AttributeSet Attrs, const Value *V) {
2009 for (Attribute A : Attrs) {
2010
2011 if (A.isStringAttribute()) {
2012#define GET_ATTR_NAMES
2013#define ATTRIBUTE_ENUM(ENUM_NAME, DISPLAY_NAME)
2014#define ATTRIBUTE_STRBOOL(ENUM_NAME, DISPLAY_NAME) \
2015 if (A.getKindAsString() == #DISPLAY_NAME) { \
2016 auto V = A.getValueAsString(); \
2017 if (!(V.empty() || V == "true" || V == "false")) \
2018 CheckFailed("invalid value for '" #DISPLAY_NAME "' attribute: " + V + \
2019 ""); \
2020 }
2021
2022#include "llvm/IR/Attributes.inc"
2023 continue;
2024 }
2025
2026 if (A.isIntAttribute() != Attribute::isIntAttrKind(A.getKindAsEnum())) {
2027 CheckFailed("Attribute '" + A.getAsString() + "' should have an Argument",
2028 V);
2029 return;
2030 }
2031 }
2032}
2033
2034// VerifyParameterAttrs - Check the given attributes for an argument or return
2035// value of the specified type. The value V is printed in error messages.
2036void Verifier::verifyParameterAttrs(AttributeSet Attrs, Type *Ty,
2037 const Value *V) {
2038 if (!Attrs.hasAttributes())
2039 return;
2040
2041 verifyAttributeTypes(Attrs, V);
2042
2043 for (Attribute Attr : Attrs)
2044 Check(Attr.isStringAttribute() ||
2045 Attribute::canUseAsParamAttr(Attr.getKindAsEnum()),
2046 "Attribute '" + Attr.getAsString() + "' does not apply to parameters",
2047 V);
2048
2049 if (Attrs.hasAttribute(Attribute::ImmArg)) {
2050 unsigned AttrCount =
2051 Attrs.getNumAttributes() - Attrs.hasAttribute(Attribute::Range);
2052 Check(AttrCount == 1,
2053 "Attribute 'immarg' is incompatible with other attributes except the "
2054 "'range' attribute",
2055 V);
2056 }
2057
2058 // Check for mutually incompatible attributes. Only inreg is compatible with
2059 // sret.
2060 unsigned AttrCount = 0;
2061 AttrCount += Attrs.hasAttribute(Attribute::ByVal);
2062 AttrCount += Attrs.hasAttribute(Attribute::InAlloca);
2063 AttrCount += Attrs.hasAttribute(Attribute::Preallocated);
2064 AttrCount += Attrs.hasAttribute(Attribute::StructRet) ||
2065 Attrs.hasAttribute(Attribute::InReg);
2066 AttrCount += Attrs.hasAttribute(Attribute::Nest);
2067 AttrCount += Attrs.hasAttribute(Attribute::ByRef);
2068 Check(AttrCount <= 1,
2069 "Attributes 'byval', 'inalloca', 'preallocated', 'inreg', 'nest', "
2070 "'byref', and 'sret' are incompatible!",
2071 V);
2072
2073 Check(!(Attrs.hasAttribute(Attribute::InAlloca) &&
2074 Attrs.hasAttribute(Attribute::ReadOnly)),
2075 "Attributes "
2076 "'inalloca and readonly' are incompatible!",
2077 V);
2078
2079 Check(!(Attrs.hasAttribute(Attribute::StructRet) &&
2080 Attrs.hasAttribute(Attribute::Returned)),
2081 "Attributes "
2082 "'sret and returned' are incompatible!",
2083 V);
2084
2085 Check(!(Attrs.hasAttribute(Attribute::ZExt) &&
2086 Attrs.hasAttribute(Attribute::SExt)),
2087 "Attributes "
2088 "'zeroext and signext' are incompatible!",
2089 V);
2090
2091 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
2092 Attrs.hasAttribute(Attribute::ReadOnly)),
2093 "Attributes "
2094 "'readnone and readonly' are incompatible!",
2095 V);
2096
2097 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
2098 Attrs.hasAttribute(Attribute::WriteOnly)),
2099 "Attributes "
2100 "'readnone and writeonly' are incompatible!",
2101 V);
2102
2103 Check(!(Attrs.hasAttribute(Attribute::ReadOnly) &&
2104 Attrs.hasAttribute(Attribute::WriteOnly)),
2105 "Attributes "
2106 "'readonly and writeonly' are incompatible!",
2107 V);
2108
2109 Check(!(Attrs.hasAttribute(Attribute::NoInline) &&
2110 Attrs.hasAttribute(Attribute::AlwaysInline)),
2111 "Attributes "
2112 "'noinline and alwaysinline' are incompatible!",
2113 V);
2114
2115 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2116 Attrs.hasAttribute(Attribute::ReadNone)),
2117 "Attributes writable and readnone are incompatible!", V);
2118
2119 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2120 Attrs.hasAttribute(Attribute::ReadOnly)),
2121 "Attributes writable and readonly are incompatible!", V);
2122
2123 AttributeMask IncompatibleAttrs = AttributeFuncs::typeIncompatible(Ty, Attrs);
2124 for (Attribute Attr : Attrs) {
2125 if (!Attr.isStringAttribute() &&
2126 IncompatibleAttrs.contains(Attr.getKindAsEnum())) {
2127 CheckFailed("Attribute '" + Attr.getAsString() +
2128 "' applied to incompatible type!", V);
2129 return;
2130 }
2131 }
2132
2133 if (isa<PointerType>(Ty)) {
2134 if (Attrs.hasAttribute(Attribute::Alignment)) {
2135 Align AttrAlign = Attrs.getAlignment().valueOrOne();
2136 Check(AttrAlign.value() <= Value::MaximumAlignment,
2137 "huge alignment values are unsupported", V);
2138 }
2139 if (Attrs.hasAttribute(Attribute::ByVal)) {
2140 Type *ByValTy = Attrs.getByValType();
2141 SmallPtrSet<Type *, 4> Visited;
2142 Check(ByValTy->isSized(&Visited),
2143 "Attribute 'byval' does not support unsized types!", V);
2144 // Check if it is or contains a target extension type that disallows being
2145 // used on the stack.
2147 "'byval' argument has illegal target extension type", V);
2148 Check(DL.getTypeAllocSize(ByValTy).getKnownMinValue() < (1ULL << 32),
2149 "huge 'byval' arguments are unsupported", V);
2150 }
2151 if (Attrs.hasAttribute(Attribute::ByRef)) {
2152 SmallPtrSet<Type *, 4> Visited;
2153 Check(Attrs.getByRefType()->isSized(&Visited),
2154 "Attribute 'byref' does not support unsized types!", V);
2155 Check(DL.getTypeAllocSize(Attrs.getByRefType()).getKnownMinValue() <
2156 (1ULL << 32),
2157 "huge 'byref' arguments are unsupported", V);
2158 }
2159 if (Attrs.hasAttribute(Attribute::InAlloca)) {
2160 SmallPtrSet<Type *, 4> Visited;
2161 Check(Attrs.getInAllocaType()->isSized(&Visited),
2162 "Attribute 'inalloca' does not support unsized types!", V);
2163 Check(DL.getTypeAllocSize(Attrs.getInAllocaType()).getKnownMinValue() <
2164 (1ULL << 32),
2165 "huge 'inalloca' arguments are unsupported", V);
2166 }
2167 if (Attrs.hasAttribute(Attribute::Preallocated)) {
2168 SmallPtrSet<Type *, 4> Visited;
2169 Check(Attrs.getPreallocatedType()->isSized(&Visited),
2170 "Attribute 'preallocated' does not support unsized types!", V);
2171 Check(
2172 DL.getTypeAllocSize(Attrs.getPreallocatedType()).getKnownMinValue() <
2173 (1ULL << 32),
2174 "huge 'preallocated' arguments are unsupported", V);
2175 }
2176 }
2177
2178 if (Attrs.hasAttribute(Attribute::Initializes)) {
2179 auto Inits = Attrs.getAttribute(Attribute::Initializes).getInitializes();
2180 Check(!Inits.empty(), "Attribute 'initializes' does not support empty list",
2181 V);
2183 "Attribute 'initializes' does not support unordered ranges", V);
2184 }
2185
2186 if (Attrs.hasAttribute(Attribute::NoFPClass)) {
2187 uint64_t Val = Attrs.getAttribute(Attribute::NoFPClass).getValueAsInt();
2188 Check(Val != 0, "Attribute 'nofpclass' must have at least one test bit set",
2189 V);
2190 Check((Val & ~static_cast<unsigned>(fcAllFlags)) == 0,
2191 "Invalid value for 'nofpclass' test mask", V);
2192 }
2193 if (Attrs.hasAttribute(Attribute::Range)) {
2194 const ConstantRange &CR =
2195 Attrs.getAttribute(Attribute::Range).getValueAsConstantRange();
2197 "Range bit width must match type bit width!", V);
2198 }
2199}
2200
2201void Verifier::checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
2202 const Value *V) {
2203 if (Attrs.hasFnAttr(Attr)) {
2204 StringRef S = Attrs.getFnAttr(Attr).getValueAsString();
2205 unsigned N;
2206 if (S.getAsInteger(10, N))
2207 CheckFailed("\"" + Attr + "\" takes an unsigned integer: " + S, V);
2208 }
2209}
2210
2211// Check parameter attributes against a function type.
2212// The value V is printed in error messages.
2213void Verifier::verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
2214 const Value *V, bool IsIntrinsic,
2215 bool IsInlineAsm) {
2216 if (Attrs.isEmpty())
2217 return;
2218
2219 if (AttributeListsVisited.insert(Attrs.getRawPointer()).second) {
2220 Check(Attrs.hasParentContext(Context),
2221 "Attribute list does not match Module context!", &Attrs, V);
2222 for (const auto &AttrSet : Attrs) {
2223 Check(!AttrSet.hasAttributes() || AttrSet.hasParentContext(Context),
2224 "Attribute set does not match Module context!", &AttrSet, V);
2225 for (const auto &A : AttrSet) {
2226 Check(A.hasParentContext(Context),
2227 "Attribute does not match Module context!", &A, V);
2228 }
2229 }
2230 }
2231
2232 bool SawNest = false;
2233 bool SawReturned = false;
2234 bool SawSRet = false;
2235 bool SawSwiftSelf = false;
2236 bool SawSwiftAsync = false;
2237 bool SawSwiftError = false;
2238
2239 // Verify return value attributes.
2240 AttributeSet RetAttrs = Attrs.getRetAttrs();
2241 for (Attribute RetAttr : RetAttrs)
2242 Check(RetAttr.isStringAttribute() ||
2243 Attribute::canUseAsRetAttr(RetAttr.getKindAsEnum()),
2244 "Attribute '" + RetAttr.getAsString() +
2245 "' does not apply to function return values",
2246 V);
2247
2248 unsigned MaxParameterWidth = 0;
2249 auto GetMaxParameterWidth = [&MaxParameterWidth](Type *Ty) {
2250 if (Ty->isVectorTy()) {
2251 if (auto *VT = dyn_cast<FixedVectorType>(Ty)) {
2252 unsigned Size = VT->getPrimitiveSizeInBits().getFixedValue();
2253 if (Size > MaxParameterWidth)
2254 MaxParameterWidth = Size;
2255 }
2256 }
2257 };
2258 GetMaxParameterWidth(FT->getReturnType());
2259 verifyParameterAttrs(RetAttrs, FT->getReturnType(), V);
2260
2261 // Verify parameter attributes.
2262 for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
2263 Type *Ty = FT->getParamType(i);
2264 AttributeSet ArgAttrs = Attrs.getParamAttrs(i);
2265
2266 if (!IsIntrinsic) {
2267 Check(!ArgAttrs.hasAttribute(Attribute::ImmArg),
2268 "immarg attribute only applies to intrinsics", V);
2269 if (!IsInlineAsm)
2270 Check(!ArgAttrs.hasAttribute(Attribute::ElementType),
2271 "Attribute 'elementtype' can only be applied to intrinsics"
2272 " and inline asm.",
2273 V);
2274 }
2275
2276 verifyParameterAttrs(ArgAttrs, Ty, V);
2277 GetMaxParameterWidth(Ty);
2278
2279 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
2280 Check(!SawNest, "More than one parameter has attribute nest!", V);
2281 SawNest = true;
2282 }
2283
2284 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
2285 Check(!SawReturned, "More than one parameter has attribute returned!", V);
2286 Check(Ty->canLosslesslyBitCastTo(FT->getReturnType()),
2287 "Incompatible argument and return types for 'returned' attribute",
2288 V);
2289 SawReturned = true;
2290 }
2291
2292 if (ArgAttrs.hasAttribute(Attribute::StructRet)) {
2293 Check(!SawSRet, "Cannot have multiple 'sret' parameters!", V);
2294 Check(i == 0 || i == 1,
2295 "Attribute 'sret' is not on first or second parameter!", V);
2296 SawSRet = true;
2297 }
2298
2299 if (ArgAttrs.hasAttribute(Attribute::SwiftSelf)) {
2300 Check(!SawSwiftSelf, "Cannot have multiple 'swiftself' parameters!", V);
2301 SawSwiftSelf = true;
2302 }
2303
2304 if (ArgAttrs.hasAttribute(Attribute::SwiftAsync)) {
2305 Check(!SawSwiftAsync, "Cannot have multiple 'swiftasync' parameters!", V);
2306 SawSwiftAsync = true;
2307 }
2308
2309 if (ArgAttrs.hasAttribute(Attribute::SwiftError)) {
2310 Check(!SawSwiftError, "Cannot have multiple 'swifterror' parameters!", V);
2311 SawSwiftError = true;
2312 }
2313
2314 if (ArgAttrs.hasAttribute(Attribute::InAlloca)) {
2315 Check(i == FT->getNumParams() - 1,
2316 "inalloca isn't on the last parameter!", V);
2317 }
2318 }
2319
2320 if (!Attrs.hasFnAttrs())
2321 return;
2322
2323 verifyAttributeTypes(Attrs.getFnAttrs(), V);
2324 for (Attribute FnAttr : Attrs.getFnAttrs())
2325 Check(FnAttr.isStringAttribute() ||
2326 Attribute::canUseAsFnAttr(FnAttr.getKindAsEnum()),
2327 "Attribute '" + FnAttr.getAsString() +
2328 "' does not apply to functions!",
2329 V);
2330
2331 Check(!(Attrs.hasFnAttr(Attribute::NoInline) &&
2332 Attrs.hasFnAttr(Attribute::AlwaysInline)),
2333 "Attributes 'noinline and alwaysinline' are incompatible!", V);
2334
2335 if (Attrs.hasFnAttr(Attribute::OptimizeNone)) {
2336 Check(Attrs.hasFnAttr(Attribute::NoInline),
2337 "Attribute 'optnone' requires 'noinline'!", V);
2338
2339 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2340 "Attributes 'optsize and optnone' are incompatible!", V);
2341
2342 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2343 "Attributes 'minsize and optnone' are incompatible!", V);
2344
2345 Check(!Attrs.hasFnAttr(Attribute::OptimizeForDebugging),
2346 "Attributes 'optdebug and optnone' are incompatible!", V);
2347 }
2348
2349 Check(!(Attrs.hasFnAttr(Attribute::SanitizeRealtime) &&
2350 Attrs.hasFnAttr(Attribute::SanitizeRealtimeBlocking)),
2351 "Attributes "
2352 "'sanitize_realtime and sanitize_realtime_blocking' are incompatible!",
2353 V);
2354
2355 if (Attrs.hasFnAttr(Attribute::OptimizeForDebugging)) {
2356 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2357 "Attributes 'optsize and optdebug' are incompatible!", V);
2358
2359 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2360 "Attributes 'minsize and optdebug' are incompatible!", V);
2361 }
2362
2363 Check(!Attrs.hasAttrSomewhere(Attribute::Writable) ||
2364 isModSet(Attrs.getMemoryEffects().getModRef(IRMemLocation::ArgMem)),
2365 "Attribute writable and memory without argmem: write are incompatible!",
2366 V);
2367
2368 if (Attrs.hasFnAttr("aarch64_pstate_sm_enabled")) {
2369 Check(!Attrs.hasFnAttr("aarch64_pstate_sm_compatible"),
2370 "Attributes 'aarch64_pstate_sm_enabled and "
2371 "aarch64_pstate_sm_compatible' are incompatible!",
2372 V);
2373 }
2374
2375 Check((Attrs.hasFnAttr("aarch64_new_za") + Attrs.hasFnAttr("aarch64_in_za") +
2376 Attrs.hasFnAttr("aarch64_inout_za") +
2377 Attrs.hasFnAttr("aarch64_out_za") +
2378 Attrs.hasFnAttr("aarch64_preserves_za") +
2379 Attrs.hasFnAttr("aarch64_za_state_agnostic")) <= 1,
2380 "Attributes 'aarch64_new_za', 'aarch64_in_za', 'aarch64_out_za', "
2381 "'aarch64_inout_za', 'aarch64_preserves_za' and "
2382 "'aarch64_za_state_agnostic' are mutually exclusive",
2383 V);
2384
2385 Check((Attrs.hasFnAttr("aarch64_new_zt0") +
2386 Attrs.hasFnAttr("aarch64_in_zt0") +
2387 Attrs.hasFnAttr("aarch64_inout_zt0") +
2388 Attrs.hasFnAttr("aarch64_out_zt0") +
2389 Attrs.hasFnAttr("aarch64_preserves_zt0") +
2390 Attrs.hasFnAttr("aarch64_za_state_agnostic")) <= 1,
2391 "Attributes 'aarch64_new_zt0', 'aarch64_in_zt0', 'aarch64_out_zt0', "
2392 "'aarch64_inout_zt0', 'aarch64_preserves_zt0' and "
2393 "'aarch64_za_state_agnostic' are mutually exclusive",
2394 V);
2395
2396 if (Attrs.hasFnAttr(Attribute::JumpTable)) {
2397 const GlobalValue *GV = cast<GlobalValue>(V);
2399 "Attribute 'jumptable' requires 'unnamed_addr'", V);
2400 }
2401
2402 if (auto Args = Attrs.getFnAttrs().getAllocSizeArgs()) {
2403 auto CheckParam = [&](StringRef Name, unsigned ParamNo) {
2404 if (ParamNo >= FT->getNumParams()) {
2405 CheckFailed("'allocsize' " + Name + " argument is out of bounds", V);
2406 return false;
2407 }
2408
2409 if (!FT->getParamType(ParamNo)->isIntegerTy()) {
2410 CheckFailed("'allocsize' " + Name +
2411 " argument must refer to an integer parameter",
2412 V);
2413 return false;
2414 }
2415
2416 return true;
2417 };
2418
2419 if (!CheckParam("element size", Args->first))
2420 return;
2421
2422 if (Args->second && !CheckParam("number of elements", *Args->second))
2423 return;
2424 }
2425
2426 if (Attrs.hasFnAttr(Attribute::AllocKind)) {
2427 AllocFnKind K = Attrs.getAllocKind();
2429 K & (AllocFnKind::Alloc | AllocFnKind::Realloc | AllocFnKind::Free);
2430 if (!is_contained(
2431 {AllocFnKind::Alloc, AllocFnKind::Realloc, AllocFnKind::Free},
2432 Type))
2433 CheckFailed(
2434 "'allockind()' requires exactly one of alloc, realloc, and free");
2435 if ((Type == AllocFnKind::Free) &&
2436 ((K & (AllocFnKind::Uninitialized | AllocFnKind::Zeroed |
2437 AllocFnKind::Aligned)) != AllocFnKind::Unknown))
2438 CheckFailed("'allockind(\"free\")' doesn't allow uninitialized, zeroed, "
2439 "or aligned modifiers.");
2440 AllocFnKind ZeroedUninit = AllocFnKind::Uninitialized | AllocFnKind::Zeroed;
2441 if ((K & ZeroedUninit) == ZeroedUninit)
2442 CheckFailed("'allockind()' can't be both zeroed and uninitialized");
2443 }
2444
2445 if (Attribute A = Attrs.getFnAttr("alloc-variant-zeroed"); A.isValid()) {
2446 StringRef S = A.getValueAsString();
2447 Check(!S.empty(), "'alloc-variant-zeroed' must not be empty");
2448 Function *Variant = M.getFunction(S);
2449 if (Variant) {
2450 Attribute Family = Attrs.getFnAttr("alloc-family");
2451 Attribute VariantFamily = Variant->getFnAttribute("alloc-family");
2452 if (Family.isValid())
2453 Check(VariantFamily.isValid() &&
2454 VariantFamily.getValueAsString() == Family.getValueAsString(),
2455 "'alloc-variant-zeroed' must name a function belonging to the "
2456 "same 'alloc-family'");
2457
2458 Check(Variant->hasFnAttribute(Attribute::AllocKind) &&
2459 (Variant->getFnAttribute(Attribute::AllocKind).getAllocKind() &
2460 AllocFnKind::Zeroed) != AllocFnKind::Unknown,
2461 "'alloc-variant-zeroed' must name a function with "
2462 "'allockind(\"zeroed\")'");
2463
2464 Check(FT == Variant->getFunctionType(),
2465 "'alloc-variant-zeroed' must name a function with the same "
2466 "signature");
2467 }
2468 }
2469
2470 if (Attrs.hasFnAttr(Attribute::VScaleRange)) {
2471 unsigned VScaleMin = Attrs.getFnAttrs().getVScaleRangeMin();
2472 if (VScaleMin == 0)
2473 CheckFailed("'vscale_range' minimum must be greater than 0", V);
2474 else if (!isPowerOf2_32(VScaleMin))
2475 CheckFailed("'vscale_range' minimum must be power-of-two value", V);
2476 std::optional<unsigned> VScaleMax = Attrs.getFnAttrs().getVScaleRangeMax();
2477 if (VScaleMax && VScaleMin > VScaleMax)
2478 CheckFailed("'vscale_range' minimum cannot be greater than maximum", V);
2479 else if (VScaleMax && !isPowerOf2_32(*VScaleMax))
2480 CheckFailed("'vscale_range' maximum must be power-of-two value", V);
2481 }
2482
2483 if (Attribute FPAttr = Attrs.getFnAttr("frame-pointer"); FPAttr.isValid()) {
2484 StringRef FP = FPAttr.getValueAsString();
2485 if (FP != "all" && FP != "non-leaf" && FP != "none" && FP != "reserved")
2486 CheckFailed("invalid value for 'frame-pointer' attribute: " + FP, V);
2487 }
2488
2489 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-prefix", V);
2490 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-entry", V);
2491 if (Attrs.hasFnAttr("patchable-function-entry-section"))
2492 Check(!Attrs.getFnAttr("patchable-function-entry-section")
2493 .getValueAsString()
2494 .empty(),
2495 "\"patchable-function-entry-section\" must not be empty");
2496 checkUnsignedBaseTenFuncAttr(Attrs, "warn-stack-size", V);
2497
2498 if (auto A = Attrs.getFnAttr("sign-return-address"); A.isValid()) {
2499 StringRef S = A.getValueAsString();
2500 if (S != "none" && S != "all" && S != "non-leaf")
2501 CheckFailed("invalid value for 'sign-return-address' attribute: " + S, V);
2502 }
2503
2504 if (auto A = Attrs.getFnAttr("sign-return-address-key"); A.isValid()) {
2505 StringRef S = A.getValueAsString();
2506 if (S != "a_key" && S != "b_key")
2507 CheckFailed("invalid value for 'sign-return-address-key' attribute: " + S,
2508 V);
2509 if (auto AA = Attrs.getFnAttr("sign-return-address"); !AA.isValid()) {
2510 CheckFailed(
2511 "'sign-return-address-key' present without `sign-return-address`");
2512 }
2513 }
2514
2515 if (auto A = Attrs.getFnAttr("branch-target-enforcement"); A.isValid()) {
2516 StringRef S = A.getValueAsString();
2517 if (S != "" && S != "true" && S != "false")
2518 CheckFailed(
2519 "invalid value for 'branch-target-enforcement' attribute: " + S, V);
2520 }
2521
2522 if (auto A = Attrs.getFnAttr("branch-protection-pauth-lr"); A.isValid()) {
2523 StringRef S = A.getValueAsString();
2524 if (S != "" && S != "true" && S != "false")
2525 CheckFailed(
2526 "invalid value for 'branch-protection-pauth-lr' attribute: " + S, V);
2527 }
2528
2529 if (auto A = Attrs.getFnAttr("guarded-control-stack"); A.isValid()) {
2530 StringRef S = A.getValueAsString();
2531 if (S != "" && S != "true" && S != "false")
2532 CheckFailed("invalid value for 'guarded-control-stack' attribute: " + S,
2533 V);
2534 }
2535
2536 if (auto A = Attrs.getFnAttr("vector-function-abi-variant"); A.isValid()) {
2537 StringRef S = A.getValueAsString();
2538 const std::optional<VFInfo> Info = VFABI::tryDemangleForVFABI(S, FT);
2539 if (!Info)
2540 CheckFailed("invalid name for a VFABI variant: " + S, V);
2541 }
2542
2543 if (auto A = Attrs.getFnAttr("denormal-fp-math"); A.isValid()) {
2544 StringRef S = A.getValueAsString();
2546 CheckFailed("invalid value for 'denormal-fp-math' attribute: " + S, V);
2547 }
2548
2549 if (auto A = Attrs.getFnAttr("denormal-fp-math-f32"); A.isValid()) {
2550 StringRef S = A.getValueAsString();
2552 CheckFailed("invalid value for 'denormal-fp-math-f32' attribute: " + S,
2553 V);
2554 }
2555}
2556void Verifier::verifyUnknownProfileMetadata(MDNode *MD) {
2557 Check(MD->getNumOperands() == 2,
2558 "'unknown' !prof should have a single additional operand", MD);
2559 auto *PassName = dyn_cast<MDString>(MD->getOperand(1));
2560 Check(PassName != nullptr,
2561 "'unknown' !prof should have an additional operand of type "
2562 "string");
2563 Check(!PassName->getString().empty(),
2564 "the 'unknown' !prof operand should not be an empty string");
2565}
2566
2567void Verifier::verifyFunctionMetadata(
2568 ArrayRef<std::pair<unsigned, MDNode *>> MDs) {
2569 for (const auto &Pair : MDs) {
2570 if (Pair.first == LLVMContext::MD_prof) {
2571 MDNode *MD = Pair.second;
2572 Check(MD->getNumOperands() >= 2,
2573 "!prof annotations should have no less than 2 operands", MD);
2574 // We may have functions that are synthesized by the compiler, e.g. in
2575 // WPD, that we can't currently determine the entry count.
2576 if (MD->getOperand(0).equalsStr(
2578 verifyUnknownProfileMetadata(MD);
2579 continue;
2580 }
2581
2582 // Check first operand.
2583 Check(MD->getOperand(0) != nullptr, "first operand should not be null",
2584 MD);
2586 "expected string with name of the !prof annotation", MD);
2587 MDString *MDS = cast<MDString>(MD->getOperand(0));
2588 StringRef ProfName = MDS->getString();
2591 "first operand should be 'function_entry_count'"
2592 " or 'synthetic_function_entry_count'",
2593 MD);
2594
2595 // Check second operand.
2596 Check(MD->getOperand(1) != nullptr, "second operand should not be null",
2597 MD);
2599 "expected integer argument to function_entry_count", MD);
2600 } else if (Pair.first == LLVMContext::MD_kcfi_type) {
2601 MDNode *MD = Pair.second;
2602 Check(MD->getNumOperands() == 1,
2603 "!kcfi_type must have exactly one operand", MD);
2604 Check(MD->getOperand(0) != nullptr, "!kcfi_type operand must not be null",
2605 MD);
2607 "expected a constant operand for !kcfi_type", MD);
2608 Constant *C = cast<ConstantAsMetadata>(MD->getOperand(0))->getValue();
2609 Check(isa<ConstantInt>(C) && isa<IntegerType>(C->getType()),
2610 "expected a constant integer operand for !kcfi_type", MD);
2612 "expected a 32-bit integer constant operand for !kcfi_type", MD);
2613 }
2614 }
2615}
2616
2617void Verifier::visitConstantExprsRecursively(const Constant *EntryC) {
2618 if (!ConstantExprVisited.insert(EntryC).second)
2619 return;
2620
2622 Stack.push_back(EntryC);
2623
2624 while (!Stack.empty()) {
2625 const Constant *C = Stack.pop_back_val();
2626
2627 // Check this constant expression.
2628 if (const auto *CE = dyn_cast<ConstantExpr>(C))
2629 visitConstantExpr(CE);
2630
2631 if (const auto *CPA = dyn_cast<ConstantPtrAuth>(C))
2632 visitConstantPtrAuth(CPA);
2633
2634 if (const auto *GV = dyn_cast<GlobalValue>(C)) {
2635 // Global Values get visited separately, but we do need to make sure
2636 // that the global value is in the correct module
2637 Check(GV->getParent() == &M, "Referencing global in another module!",
2638 EntryC, &M, GV, GV->getParent());
2639 continue;
2640 }
2641
2642 // Visit all sub-expressions.
2643 for (const Use &U : C->operands()) {
2644 const auto *OpC = dyn_cast<Constant>(U);
2645 if (!OpC)
2646 continue;
2647 if (!ConstantExprVisited.insert(OpC).second)
2648 continue;
2649 Stack.push_back(OpC);
2650 }
2651 }
2652}
2653
2654void Verifier::visitConstantExpr(const ConstantExpr *CE) {
2655 if (CE->getOpcode() == Instruction::BitCast)
2656 Check(CastInst::castIsValid(Instruction::BitCast, CE->getOperand(0),
2657 CE->getType()),
2658 "Invalid bitcast", CE);
2659 else if (CE->getOpcode() == Instruction::PtrToAddr)
2660 checkPtrToAddr(CE->getOperand(0)->getType(), CE->getType(), *CE);
2661}
2662
2663void Verifier::visitConstantPtrAuth(const ConstantPtrAuth *CPA) {
2664 Check(CPA->getPointer()->getType()->isPointerTy(),
2665 "signed ptrauth constant base pointer must have pointer type");
2666
2667 Check(CPA->getType() == CPA->getPointer()->getType(),
2668 "signed ptrauth constant must have same type as its base pointer");
2669
2670 Check(CPA->getKey()->getBitWidth() == 32,
2671 "signed ptrauth constant key must be i32 constant integer");
2672
2674 "signed ptrauth constant address discriminator must be a pointer");
2675
2676 Check(CPA->getDiscriminator()->getBitWidth() == 64,
2677 "signed ptrauth constant discriminator must be i64 constant integer");
2678}
2679
2680bool Verifier::verifyAttributeCount(AttributeList Attrs, unsigned Params) {
2681 // There shouldn't be more attribute sets than there are parameters plus the
2682 // function and return value.
2683 return Attrs.getNumAttrSets() <= Params + 2;
2684}
2685
2686void Verifier::verifyInlineAsmCall(const CallBase &Call) {
2687 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
2688 unsigned ArgNo = 0;
2689 unsigned LabelNo = 0;
2690 for (const InlineAsm::ConstraintInfo &CI : IA->ParseConstraints()) {
2691 if (CI.Type == InlineAsm::isLabel) {
2692 ++LabelNo;
2693 continue;
2694 }
2695
2696 // Only deal with constraints that correspond to call arguments.
2697 if (!CI.hasArg())
2698 continue;
2699
2700 if (CI.isIndirect) {
2701 const Value *Arg = Call.getArgOperand(ArgNo);
2702 Check(Arg->getType()->isPointerTy(),
2703 "Operand for indirect constraint must have pointer type", &Call);
2704
2706 "Operand for indirect constraint must have elementtype attribute",
2707 &Call);
2708 } else {
2709 Check(!Call.paramHasAttr(ArgNo, Attribute::ElementType),
2710 "Elementtype attribute can only be applied for indirect "
2711 "constraints",
2712 &Call);
2713 }
2714
2715 ArgNo++;
2716 }
2717
2718 if (auto *CallBr = dyn_cast<CallBrInst>(&Call)) {
2719 Check(LabelNo == CallBr->getNumIndirectDests(),
2720 "Number of label constraints does not match number of callbr dests",
2721 &Call);
2722 } else {
2723 Check(LabelNo == 0, "Label constraints can only be used with callbr",
2724 &Call);
2725 }
2726}
2727
2728/// Verify that statepoint intrinsic is well formed.
2729void Verifier::verifyStatepoint(const CallBase &Call) {
2730 assert(Call.getIntrinsicID() == Intrinsic::experimental_gc_statepoint);
2731
2734 "gc.statepoint must read and write all memory to preserve "
2735 "reordering restrictions required by safepoint semantics",
2736 Call);
2737
2738 const int64_t NumPatchBytes =
2739 cast<ConstantInt>(Call.getArgOperand(1))->getSExtValue();
2740 assert(isInt<32>(NumPatchBytes) && "NumPatchBytesV is an i32!");
2741 Check(NumPatchBytes >= 0,
2742 "gc.statepoint number of patchable bytes must be "
2743 "positive",
2744 Call);
2745
2746 Type *TargetElemType = Call.getParamElementType(2);
2747 Check(TargetElemType,
2748 "gc.statepoint callee argument must have elementtype attribute", Call);
2749 FunctionType *TargetFuncType = dyn_cast<FunctionType>(TargetElemType);
2750 Check(TargetFuncType,
2751 "gc.statepoint callee elementtype must be function type", Call);
2752
2753 const int NumCallArgs = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
2754 Check(NumCallArgs >= 0,
2755 "gc.statepoint number of arguments to underlying call "
2756 "must be positive",
2757 Call);
2758 const int NumParams = (int)TargetFuncType->getNumParams();
2759 if (TargetFuncType->isVarArg()) {
2760 Check(NumCallArgs >= NumParams,
2761 "gc.statepoint mismatch in number of vararg call args", Call);
2762
2763 // TODO: Remove this limitation
2764 Check(TargetFuncType->getReturnType()->isVoidTy(),
2765 "gc.statepoint doesn't support wrapping non-void "
2766 "vararg functions yet",
2767 Call);
2768 } else
2769 Check(NumCallArgs == NumParams,
2770 "gc.statepoint mismatch in number of call args", Call);
2771
2772 const uint64_t Flags
2773 = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
2774 Check((Flags & ~(uint64_t)StatepointFlags::MaskAll) == 0,
2775 "unknown flag used in gc.statepoint flags argument", Call);
2776
2777 // Verify that the types of the call parameter arguments match
2778 // the type of the wrapped callee.
2779 AttributeList Attrs = Call.getAttributes();
2780 for (int i = 0; i < NumParams; i++) {
2781 Type *ParamType = TargetFuncType->getParamType(i);
2782 Type *ArgType = Call.getArgOperand(5 + i)->getType();
2783 Check(ArgType == ParamType,
2784 "gc.statepoint call argument does not match wrapped "
2785 "function type",
2786 Call);
2787
2788 if (TargetFuncType->isVarArg()) {
2789 AttributeSet ArgAttrs = Attrs.getParamAttrs(5 + i);
2790 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
2791 "Attribute 'sret' cannot be used for vararg call arguments!", Call);
2792 }
2793 }
2794
2795 const int EndCallArgsInx = 4 + NumCallArgs;
2796
2797 const Value *NumTransitionArgsV = Call.getArgOperand(EndCallArgsInx + 1);
2798 Check(isa<ConstantInt>(NumTransitionArgsV),
2799 "gc.statepoint number of transition arguments "
2800 "must be constant integer",
2801 Call);
2802 const int NumTransitionArgs =
2803 cast<ConstantInt>(NumTransitionArgsV)->getZExtValue();
2804 Check(NumTransitionArgs == 0,
2805 "gc.statepoint w/inline transition bundle is deprecated", Call);
2806 const int EndTransitionArgsInx = EndCallArgsInx + 1 + NumTransitionArgs;
2807
2808 const Value *NumDeoptArgsV = Call.getArgOperand(EndTransitionArgsInx + 1);
2809 Check(isa<ConstantInt>(NumDeoptArgsV),
2810 "gc.statepoint number of deoptimization arguments "
2811 "must be constant integer",
2812 Call);
2813 const int NumDeoptArgs = cast<ConstantInt>(NumDeoptArgsV)->getZExtValue();
2814 Check(NumDeoptArgs == 0,
2815 "gc.statepoint w/inline deopt operands is deprecated", Call);
2816
2817 const int ExpectedNumArgs = 7 + NumCallArgs;
2818 Check(ExpectedNumArgs == (int)Call.arg_size(),
2819 "gc.statepoint too many arguments", Call);
2820
2821 // Check that the only uses of this gc.statepoint are gc.result or
2822 // gc.relocate calls which are tied to this statepoint and thus part
2823 // of the same statepoint sequence
2824 for (const User *U : Call.users()) {
2825 const CallInst *UserCall = dyn_cast<const CallInst>(U);
2826 Check(UserCall, "illegal use of statepoint token", Call, U);
2827 if (!UserCall)
2828 continue;
2829 Check(isa<GCRelocateInst>(UserCall) || isa<GCResultInst>(UserCall),
2830 "gc.result or gc.relocate are the only value uses "
2831 "of a gc.statepoint",
2832 Call, U);
2833 if (isa<GCResultInst>(UserCall)) {
2834 Check(UserCall->getArgOperand(0) == &Call,
2835 "gc.result connected to wrong gc.statepoint", Call, UserCall);
2836 } else if (isa<GCRelocateInst>(Call)) {
2837 Check(UserCall->getArgOperand(0) == &Call,
2838 "gc.relocate connected to wrong gc.statepoint", Call, UserCall);
2839 }
2840 }
2841
2842 // Note: It is legal for a single derived pointer to be listed multiple
2843 // times. It's non-optimal, but it is legal. It can also happen after
2844 // insertion if we strip a bitcast away.
2845 // Note: It is really tempting to check that each base is relocated and
2846 // that a derived pointer is never reused as a base pointer. This turns
2847 // out to be problematic since optimizations run after safepoint insertion
2848 // can recognize equality properties that the insertion logic doesn't know
2849 // about. See example statepoint.ll in the verifier subdirectory
2850}
2851
2852void Verifier::verifyFrameRecoverIndices() {
2853 for (auto &Counts : FrameEscapeInfo) {
2854 Function *F = Counts.first;
2855 unsigned EscapedObjectCount = Counts.second.first;
2856 unsigned MaxRecoveredIndex = Counts.second.second;
2857 Check(MaxRecoveredIndex <= EscapedObjectCount,
2858 "all indices passed to llvm.localrecover must be less than the "
2859 "number of arguments passed to llvm.localescape in the parent "
2860 "function",
2861 F);
2862 }
2863}
2864
2865static Instruction *getSuccPad(Instruction *Terminator) {
2866 BasicBlock *UnwindDest;
2867 if (auto *II = dyn_cast<InvokeInst>(Terminator))
2868 UnwindDest = II->getUnwindDest();
2869 else if (auto *CSI = dyn_cast<CatchSwitchInst>(Terminator))
2870 UnwindDest = CSI->getUnwindDest();
2871 else
2872 UnwindDest = cast<CleanupReturnInst>(Terminator)->getUnwindDest();
2873 return &*UnwindDest->getFirstNonPHIIt();
2874}
2875
2876void Verifier::verifySiblingFuncletUnwinds() {
2877 llvm::TimeTraceScope timeScope("Verifier verify sibling funclet unwinds");
2878 SmallPtrSet<Instruction *, 8> Visited;
2879 SmallPtrSet<Instruction *, 8> Active;
2880 for (const auto &Pair : SiblingFuncletInfo) {
2881 Instruction *PredPad = Pair.first;
2882 if (Visited.count(PredPad))
2883 continue;
2884 Active.insert(PredPad);
2885 Instruction *Terminator = Pair.second;
2886 do {
2887 Instruction *SuccPad = getSuccPad(Terminator);
2888 if (Active.count(SuccPad)) {
2889 // Found a cycle; report error
2890 Instruction *CyclePad = SuccPad;
2891 SmallVector<Instruction *, 8> CycleNodes;
2892 do {
2893 CycleNodes.push_back(CyclePad);
2894 Instruction *CycleTerminator = SiblingFuncletInfo[CyclePad];
2895 if (CycleTerminator != CyclePad)
2896 CycleNodes.push_back(CycleTerminator);
2897 CyclePad = getSuccPad(CycleTerminator);
2898 } while (CyclePad != SuccPad);
2899 Check(false, "EH pads can't handle each other's exceptions",
2900 ArrayRef<Instruction *>(CycleNodes));
2901 }
2902 // Don't re-walk a node we've already checked
2903 if (!Visited.insert(SuccPad).second)
2904 break;
2905 // Walk to this successor if it has a map entry.
2906 PredPad = SuccPad;
2907 auto TermI = SiblingFuncletInfo.find(PredPad);
2908 if (TermI == SiblingFuncletInfo.end())
2909 break;
2910 Terminator = TermI->second;
2911 Active.insert(PredPad);
2912 } while (true);
2913 // Each node only has one successor, so we've walked all the active
2914 // nodes' successors.
2915 Active.clear();
2916 }
2917}
2918
2919// visitFunction - Verify that a function is ok.
2920//
2921void Verifier::visitFunction(const Function &F) {
2922 visitGlobalValue(F);
2923
2924 // Check function arguments.
2925 FunctionType *FT = F.getFunctionType();
2926 unsigned NumArgs = F.arg_size();
2927
2928 Check(&Context == &F.getContext(),
2929 "Function context does not match Module context!", &F);
2930
2931 Check(!F.hasCommonLinkage(), "Functions may not have common linkage", &F);
2932 Check(FT->getNumParams() == NumArgs,
2933 "# formal arguments must match # of arguments for function type!", &F,
2934 FT);
2935 Check(F.getReturnType()->isFirstClassType() ||
2936 F.getReturnType()->isVoidTy() || F.getReturnType()->isStructTy(),
2937 "Functions cannot return aggregate values!", &F);
2938
2939 Check(!F.hasStructRetAttr() || F.getReturnType()->isVoidTy(),
2940 "Invalid struct return type!", &F);
2941
2942 if (MaybeAlign A = F.getAlign()) {
2943 Check(A->value() <= Value::MaximumAlignment,
2944 "huge alignment values are unsupported", &F);
2945 }
2946
2947 AttributeList Attrs = F.getAttributes();
2948
2949 Check(verifyAttributeCount(Attrs, FT->getNumParams()),
2950 "Attribute after last parameter!", &F);
2951
2952 bool IsIntrinsic = F.isIntrinsic();
2953
2954 // Check function attributes.
2955 verifyFunctionAttrs(FT, Attrs, &F, IsIntrinsic, /* IsInlineAsm */ false);
2956
2957 // On function declarations/definitions, we do not support the builtin
2958 // attribute. We do not check this in VerifyFunctionAttrs since that is
2959 // checking for Attributes that can/can not ever be on functions.
2960 Check(!Attrs.hasFnAttr(Attribute::Builtin),
2961 "Attribute 'builtin' can only be applied to a callsite.", &F);
2962
2963 Check(!Attrs.hasAttrSomewhere(Attribute::ElementType),
2964 "Attribute 'elementtype' can only be applied to a callsite.", &F);
2965
2966 Check(!Attrs.hasFnAttr("aarch64_zt0_undef"),
2967 "Attribute 'aarch64_zt0_undef' can only be applied to a callsite.");
2968
2969 if (Attrs.hasFnAttr(Attribute::Naked))
2970 for (const Argument &Arg : F.args())
2971 Check(Arg.use_empty(), "cannot use argument of naked function", &Arg);
2972
2973 // Check that this function meets the restrictions on this calling convention.
2974 // Sometimes varargs is used for perfectly forwarding thunks, so some of these
2975 // restrictions can be lifted.
2976 switch (F.getCallingConv()) {
2977 default:
2978 case CallingConv::C:
2979 break;
2980 case CallingConv::X86_INTR: {
2981 Check(F.arg_empty() || Attrs.hasParamAttr(0, Attribute::ByVal),
2982 "Calling convention parameter requires byval", &F);
2983 break;
2984 }
2985 case CallingConv::AMDGPU_KERNEL:
2986 case CallingConv::SPIR_KERNEL:
2987 case CallingConv::AMDGPU_CS_Chain:
2988 case CallingConv::AMDGPU_CS_ChainPreserve:
2989 Check(F.getReturnType()->isVoidTy(),
2990 "Calling convention requires void return type", &F);
2991 [[fallthrough]];
2992 case CallingConv::AMDGPU_VS:
2993 case CallingConv::AMDGPU_HS:
2994 case CallingConv::AMDGPU_GS:
2995 case CallingConv::AMDGPU_PS:
2996 case CallingConv::AMDGPU_CS:
2997 Check(!F.hasStructRetAttr(), "Calling convention does not allow sret", &F);
2998 if (F.getCallingConv() != CallingConv::SPIR_KERNEL) {
2999 const unsigned StackAS = DL.getAllocaAddrSpace();
3000 unsigned i = 0;
3001 for (const Argument &Arg : F.args()) {
3002 Check(!Attrs.hasParamAttr(i, Attribute::ByVal),
3003 "Calling convention disallows byval", &F);
3004 Check(!Attrs.hasParamAttr(i, Attribute::Preallocated),
3005 "Calling convention disallows preallocated", &F);
3006 Check(!Attrs.hasParamAttr(i, Attribute::InAlloca),
3007 "Calling convention disallows inalloca", &F);
3008
3009 if (Attrs.hasParamAttr(i, Attribute::ByRef)) {
3010 // FIXME: Should also disallow LDS and GDS, but we don't have the enum
3011 // value here.
3012 Check(Arg.getType()->getPointerAddressSpace() != StackAS,
3013 "Calling convention disallows stack byref", &F);
3014 }
3015
3016 ++i;
3017 }
3018 }
3019
3020 [[fallthrough]];
3021 case CallingConv::Fast:
3022 case CallingConv::Cold:
3023 case CallingConv::Intel_OCL_BI:
3024 case CallingConv::PTX_Kernel:
3025 case CallingConv::PTX_Device:
3026 Check(!F.isVarArg(),
3027 "Calling convention does not support varargs or "
3028 "perfect forwarding!",
3029 &F);
3030 break;
3031 case CallingConv::AMDGPU_Gfx_WholeWave:
3032 Check(!F.arg_empty() && F.arg_begin()->getType()->isIntegerTy(1),
3033 "Calling convention requires first argument to be i1", &F);
3034 Check(!F.arg_begin()->hasInRegAttr(),
3035 "Calling convention requires first argument to not be inreg", &F);
3036 Check(!F.isVarArg(),
3037 "Calling convention does not support varargs or "
3038 "perfect forwarding!",
3039 &F);
3040 break;
3041 }
3042
3043 // Check that the argument values match the function type for this function...
3044 unsigned i = 0;
3045 for (const Argument &Arg : F.args()) {
3046 Check(Arg.getType() == FT->getParamType(i),
3047 "Argument value does not match function argument type!", &Arg,
3048 FT->getParamType(i));
3049 Check(Arg.getType()->isFirstClassType(),
3050 "Function arguments must have first-class types!", &Arg);
3051 if (!IsIntrinsic) {
3052 Check(!Arg.getType()->isMetadataTy(),
3053 "Function takes metadata but isn't an intrinsic", &Arg, &F);
3054 Check(!Arg.getType()->isTokenLikeTy(),
3055 "Function takes token but isn't an intrinsic", &Arg, &F);
3056 Check(!Arg.getType()->isX86_AMXTy(),
3057 "Function takes x86_amx but isn't an intrinsic", &Arg, &F);
3058 }
3059
3060 // Check that swifterror argument is only used by loads and stores.
3061 if (Attrs.hasParamAttr(i, Attribute::SwiftError)) {
3062 verifySwiftErrorValue(&Arg);
3063 }
3064 ++i;
3065 }
3066
3067 if (!IsIntrinsic) {
3068 Check(!F.getReturnType()->isTokenLikeTy(),
3069 "Function returns a token but isn't an intrinsic", &F);
3070 Check(!F.getReturnType()->isX86_AMXTy(),
3071 "Function returns a x86_amx but isn't an intrinsic", &F);
3072 }
3073
3074 // Get the function metadata attachments.
3076 F.getAllMetadata(MDs);
3077 assert(F.hasMetadata() != MDs.empty() && "Bit out-of-sync");
3078 verifyFunctionMetadata(MDs);
3079
3080 // Check validity of the personality function
3081 if (F.hasPersonalityFn()) {
3082 auto *Per = dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts());
3083 if (Per)
3084 Check(Per->getParent() == F.getParent(),
3085 "Referencing personality function in another module!", &F,
3086 F.getParent(), Per, Per->getParent());
3087 }
3088
3089 // EH funclet coloring can be expensive, recompute on-demand
3090 BlockEHFuncletColors.clear();
3091
3092 if (F.isMaterializable()) {
3093 // Function has a body somewhere we can't see.
3094 Check(MDs.empty(), "unmaterialized function cannot have metadata", &F,
3095 MDs.empty() ? nullptr : MDs.front().second);
3096 } else if (F.isDeclaration()) {
3097 for (const auto &I : MDs) {
3098 // This is used for call site debug information.
3099 CheckDI(I.first != LLVMContext::MD_dbg ||
3100 !cast<DISubprogram>(I.second)->isDistinct(),
3101 "function declaration may only have a unique !dbg attachment",
3102 &F);
3103 Check(I.first != LLVMContext::MD_prof,
3104 "function declaration may not have a !prof attachment", &F);
3105
3106 // Verify the metadata itself.
3107 visitMDNode(*I.second, AreDebugLocsAllowed::Yes);
3108 }
3109 Check(!F.hasPersonalityFn(),
3110 "Function declaration shouldn't have a personality routine", &F);
3111 } else {
3112 // Verify that this function (which has a body) is not named "llvm.*". It
3113 // is not legal to define intrinsics.
3114 Check(!IsIntrinsic, "llvm intrinsics cannot be defined!", &F);
3115
3116 // Check the entry node
3117 const BasicBlock *Entry = &F.getEntryBlock();
3118 Check(pred_empty(Entry),
3119 "Entry block to function must not have predecessors!", Entry);
3120
3121 // The address of the entry block cannot be taken, unless it is dead.
3122 if (Entry->hasAddressTaken()) {
3123 Check(!BlockAddress::lookup(Entry)->isConstantUsed(),
3124 "blockaddress may not be used with the entry block!", Entry);
3125 }
3126
3127 unsigned NumDebugAttachments = 0, NumProfAttachments = 0,
3128 NumKCFIAttachments = 0;
3129 // Visit metadata attachments.
3130 for (const auto &I : MDs) {
3131 // Verify that the attachment is legal.
3132 auto AllowLocs = AreDebugLocsAllowed::No;
3133 switch (I.first) {
3134 default:
3135 break;
3136 case LLVMContext::MD_dbg: {
3137 ++NumDebugAttachments;
3138 CheckDI(NumDebugAttachments == 1,
3139 "function must have a single !dbg attachment", &F, I.second);
3140 CheckDI(isa<DISubprogram>(I.second),
3141 "function !dbg attachment must be a subprogram", &F, I.second);
3142 CheckDI(cast<DISubprogram>(I.second)->isDistinct(),
3143 "function definition may only have a distinct !dbg attachment",
3144 &F);
3145
3146 auto *SP = cast<DISubprogram>(I.second);
3147 const Function *&AttachedTo = DISubprogramAttachments[SP];
3148 CheckDI(!AttachedTo || AttachedTo == &F,
3149 "DISubprogram attached to more than one function", SP, &F);
3150 AttachedTo = &F;
3151 AllowLocs = AreDebugLocsAllowed::Yes;
3152 break;
3153 }
3154 case LLVMContext::MD_prof:
3155 ++NumProfAttachments;
3156 Check(NumProfAttachments == 1,
3157 "function must have a single !prof attachment", &F, I.second);
3158 break;
3159 case LLVMContext::MD_kcfi_type:
3160 ++NumKCFIAttachments;
3161 Check(NumKCFIAttachments == 1,
3162 "function must have a single !kcfi_type attachment", &F,
3163 I.second);
3164 break;
3165 }
3166
3167 // Verify the metadata itself.
3168 visitMDNode(*I.second, AllowLocs);
3169 }
3170 }
3171
3172 // If this function is actually an intrinsic, verify that it is only used in
3173 // direct call/invokes, never having its "address taken".
3174 // Only do this if the module is materialized, otherwise we don't have all the
3175 // uses.
3176 if (F.isIntrinsic() && F.getParent()->isMaterialized()) {
3177 const User *U;
3178 if (F.hasAddressTaken(&U, false, true, false,
3179 /*IgnoreARCAttachedCall=*/true))
3180 Check(false, "Invalid user of intrinsic instruction!", U);
3181 }
3182
3183 // Check intrinsics' signatures.
3184 switch (F.getIntrinsicID()) {
3185 case Intrinsic::experimental_gc_get_pointer_base: {
3186 FunctionType *FT = F.getFunctionType();
3187 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
3188 Check(isa<PointerType>(F.getReturnType()),
3189 "gc.get.pointer.base must return a pointer", F);
3190 Check(FT->getParamType(0) == F.getReturnType(),
3191 "gc.get.pointer.base operand and result must be of the same type", F);
3192 break;
3193 }
3194 case Intrinsic::experimental_gc_get_pointer_offset: {
3195 FunctionType *FT = F.getFunctionType();
3196 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
3197 Check(isa<PointerType>(FT->getParamType(0)),
3198 "gc.get.pointer.offset operand must be a pointer", F);
3199 Check(F.getReturnType()->isIntegerTy(),
3200 "gc.get.pointer.offset must return integer", F);
3201 break;
3202 }
3203 }
3204
3205 auto *N = F.getSubprogram();
3206 HasDebugInfo = (N != nullptr);
3207 if (!HasDebugInfo)
3208 return;
3209
3210 // Check that all !dbg attachments lead to back to N.
3211 //
3212 // FIXME: Check this incrementally while visiting !dbg attachments.
3213 // FIXME: Only check when N is the canonical subprogram for F.
3214 SmallPtrSet<const MDNode *, 32> Seen;
3215 auto VisitDebugLoc = [&](const Instruction &I, const MDNode *Node) {
3216 // Be careful about using DILocation here since we might be dealing with
3217 // broken code (this is the Verifier after all).
3218 const DILocation *DL = dyn_cast_or_null<DILocation>(Node);
3219 if (!DL)
3220 return;
3221 if (!Seen.insert(DL).second)
3222 return;
3223
3224 Metadata *Parent = DL->getRawScope();
3225 CheckDI(Parent && isa<DILocalScope>(Parent),
3226 "DILocation's scope must be a DILocalScope", N, &F, &I, DL, Parent);
3227
3228 DILocalScope *Scope = DL->getInlinedAtScope();
3229 Check(Scope, "Failed to find DILocalScope", DL);
3230
3231 if (!Seen.insert(Scope).second)
3232 return;
3233
3234 DISubprogram *SP = Scope->getSubprogram();
3235
3236 // Scope and SP could be the same MDNode and we don't want to skip
3237 // validation in that case
3238 if ((Scope != SP) && !Seen.insert(SP).second)
3239 return;
3240
3241 CheckDI(SP->describes(&F),
3242 "!dbg attachment points at wrong subprogram for function", N, &F,
3243 &I, DL, Scope, SP);
3244 };
3245 for (auto &BB : F)
3246 for (auto &I : BB) {
3247 VisitDebugLoc(I, I.getDebugLoc().getAsMDNode());
3248 // The llvm.loop annotations also contain two DILocations.
3249 if (auto MD = I.getMetadata(LLVMContext::MD_loop))
3250 for (unsigned i = 1; i < MD->getNumOperands(); ++i)
3251 VisitDebugLoc(I, dyn_cast_or_null<MDNode>(MD->getOperand(i)));
3252 if (BrokenDebugInfo)
3253 return;
3254 }
3255}
3256
3257// verifyBasicBlock - Verify that a basic block is well formed...
3258//
3259void Verifier::visitBasicBlock(BasicBlock &BB) {
3260 InstsInThisBlock.clear();
3261 ConvergenceVerifyHelper.visit(BB);
3262
3263 // Ensure that basic blocks have terminators!
3264 Check(BB.getTerminator(), "Basic Block does not have terminator!", &BB);
3265
3266 // Check constraints that this basic block imposes on all of the PHI nodes in
3267 // it.
3268 if (isa<PHINode>(BB.front())) {
3269 SmallVector<BasicBlock *, 8> Preds(predecessors(&BB));
3271 llvm::sort(Preds);
3272 for (const PHINode &PN : BB.phis()) {
3273 Check(PN.getNumIncomingValues() == Preds.size(),
3274 "PHINode should have one entry for each predecessor of its "
3275 "parent basic block!",
3276 &PN);
3277
3278 // Get and sort all incoming values in the PHI node...
3279 Values.clear();
3280 Values.reserve(PN.getNumIncomingValues());
3281 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
3282 Values.push_back(
3283 std::make_pair(PN.getIncomingBlock(i), PN.getIncomingValue(i)));
3284 llvm::sort(Values);
3285
3286 for (unsigned i = 0, e = Values.size(); i != e; ++i) {
3287 // Check to make sure that if there is more than one entry for a
3288 // particular basic block in this PHI node, that the incoming values are
3289 // all identical.
3290 //
3291 Check(i == 0 || Values[i].first != Values[i - 1].first ||
3292 Values[i].second == Values[i - 1].second,
3293 "PHI node has multiple entries for the same basic block with "
3294 "different incoming values!",
3295 &PN, Values[i].first, Values[i].second, Values[i - 1].second);
3296
3297 // Check to make sure that the predecessors and PHI node entries are
3298 // matched up.
3299 Check(Values[i].first == Preds[i],
3300 "PHI node entries do not match predecessors!", &PN,
3301 Values[i].first, Preds[i]);
3302 }
3303 }
3304 }
3305
3306 // Check that all instructions have their parent pointers set up correctly.
3307 for (auto &I : BB)
3308 {
3309 Check(I.getParent() == &BB, "Instruction has bogus parent pointer!");
3310 }
3311
3312 // Confirm that no issues arise from the debug program.
3313 CheckDI(!BB.getTrailingDbgRecords(), "Basic Block has trailing DbgRecords!",
3314 &BB);
3315}
3316
3317void Verifier::visitTerminator(Instruction &I) {
3318 // Ensure that terminators only exist at the end of the basic block.
3319 Check(&I == I.getParent()->getTerminator(),
3320 "Terminator found in the middle of a basic block!", I.getParent());
3321 visitInstruction(I);
3322}
3323
3324void Verifier::visitBranchInst(BranchInst &BI) {
3325 if (BI.isConditional()) {
3327 "Branch condition is not 'i1' type!", &BI, BI.getCondition());
3328 }
3329 visitTerminator(BI);
3330}
3331
3332void Verifier::visitReturnInst(ReturnInst &RI) {
3333 Function *F = RI.getParent()->getParent();
3334 unsigned N = RI.getNumOperands();
3335 if (F->getReturnType()->isVoidTy())
3336 Check(N == 0,
3337 "Found return instr that returns non-void in Function of void "
3338 "return type!",
3339 &RI, F->getReturnType());
3340 else
3341 Check(N == 1 && F->getReturnType() == RI.getOperand(0)->getType(),
3342 "Function return type does not match operand "
3343 "type of return inst!",
3344 &RI, F->getReturnType());
3345
3346 // Check to make sure that the return value has necessary properties for
3347 // terminators...
3348 visitTerminator(RI);
3349}
3350
3351void Verifier::visitSwitchInst(SwitchInst &SI) {
3352 Check(SI.getType()->isVoidTy(), "Switch must have void result type!", &SI);
3353 // Check to make sure that all of the constants in the switch instruction
3354 // have the same type as the switched-on value.
3355 Type *SwitchTy = SI.getCondition()->getType();
3356 SmallPtrSet<ConstantInt*, 32> Constants;
3357 for (auto &Case : SI.cases()) {
3358 Check(isa<ConstantInt>(SI.getOperand(Case.getCaseIndex() * 2 + 2)),
3359 "Case value is not a constant integer.", &SI);
3360 Check(Case.getCaseValue()->getType() == SwitchTy,
3361 "Switch constants must all be same type as switch value!", &SI);
3362 Check(Constants.insert(Case.getCaseValue()).second,
3363 "Duplicate integer as switch case", &SI, Case.getCaseValue());
3364 }
3365
3366 visitTerminator(SI);
3367}
3368
3369void Verifier::visitIndirectBrInst(IndirectBrInst &BI) {
3371 "Indirectbr operand must have pointer type!", &BI);
3372 for (unsigned i = 0, e = BI.getNumDestinations(); i != e; ++i)
3374 "Indirectbr destinations must all have pointer type!", &BI);
3375
3376 visitTerminator(BI);
3377}
3378
3379void Verifier::visitCallBrInst(CallBrInst &CBI) {
3380 Check(CBI.isInlineAsm(), "Callbr is currently only used for asm-goto!", &CBI);
3381 const InlineAsm *IA = cast<InlineAsm>(CBI.getCalledOperand());
3382 Check(!IA->canThrow(), "Unwinding from Callbr is not allowed");
3383
3384 verifyInlineAsmCall(CBI);
3385 visitTerminator(CBI);
3386}
3387
3388void Verifier::visitSelectInst(SelectInst &SI) {
3389 Check(!SelectInst::areInvalidOperands(SI.getOperand(0), SI.getOperand(1),
3390 SI.getOperand(2)),
3391 "Invalid operands for select instruction!", &SI);
3392
3393 Check(SI.getTrueValue()->getType() == SI.getType(),
3394 "Select values must have same type as select instruction!", &SI);
3395 visitInstruction(SI);
3396}
3397
3398/// visitUserOp1 - User defined operators shouldn't live beyond the lifetime of
3399/// a pass, if any exist, it's an error.
3400///
3401void Verifier::visitUserOp1(Instruction &I) {
3402 Check(false, "User-defined operators should not live outside of a pass!", &I);
3403}
3404
3405void Verifier::visitTruncInst(TruncInst &I) {
3406 // Get the source and destination types
3407 Type *SrcTy = I.getOperand(0)->getType();
3408 Type *DestTy = I.getType();
3409
3410 // Get the size of the types in bits, we'll need this later
3411 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3412 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3413
3414 Check(SrcTy->isIntOrIntVectorTy(), "Trunc only operates on integer", &I);
3415 Check(DestTy->isIntOrIntVectorTy(), "Trunc only produces integer", &I);
3416 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3417 "trunc source and destination must both be a vector or neither", &I);
3418 Check(SrcBitSize > DestBitSize, "DestTy too big for Trunc", &I);
3419
3420 visitInstruction(I);
3421}
3422
3423void Verifier::visitZExtInst(ZExtInst &I) {
3424 // Get the source and destination types
3425 Type *SrcTy = I.getOperand(0)->getType();
3426 Type *DestTy = I.getType();
3427
3428 // Get the size of the types in bits, we'll need this later
3429 Check(SrcTy->isIntOrIntVectorTy(), "ZExt only operates on integer", &I);
3430 Check(DestTy->isIntOrIntVectorTy(), "ZExt only produces an integer", &I);
3431 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3432 "zext source and destination must both be a vector or neither", &I);
3433 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3434 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3435
3436 Check(SrcBitSize < DestBitSize, "Type too small for ZExt", &I);
3437
3438 visitInstruction(I);
3439}
3440
3441void Verifier::visitSExtInst(SExtInst &I) {
3442 // Get the source and destination types
3443 Type *SrcTy = I.getOperand(0)->getType();
3444 Type *DestTy = I.getType();
3445
3446 // Get the size of the types in bits, we'll need this later
3447 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3448 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3449
3450 Check(SrcTy->isIntOrIntVectorTy(), "SExt only operates on integer", &I);
3451 Check(DestTy->isIntOrIntVectorTy(), "SExt only produces an integer", &I);
3452 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3453 "sext source and destination must both be a vector or neither", &I);
3454 Check(SrcBitSize < DestBitSize, "Type too small for SExt", &I);
3455
3456 visitInstruction(I);
3457}
3458
3459void Verifier::visitFPTruncInst(FPTruncInst &I) {
3460 // Get the source and destination types
3461 Type *SrcTy = I.getOperand(0)->getType();
3462 Type *DestTy = I.getType();
3463 // Get the size of the types in bits, we'll need this later
3464 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3465 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3466
3467 Check(SrcTy->isFPOrFPVectorTy(), "FPTrunc only operates on FP", &I);
3468 Check(DestTy->isFPOrFPVectorTy(), "FPTrunc only produces an FP", &I);
3469 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3470 "fptrunc source and destination must both be a vector or neither", &I);
3471 Check(SrcBitSize > DestBitSize, "DestTy too big for FPTrunc", &I);
3472
3473 visitInstruction(I);
3474}
3475
3476void Verifier::visitFPExtInst(FPExtInst &I) {
3477 // Get the source and destination types
3478 Type *SrcTy = I.getOperand(0)->getType();
3479 Type *DestTy = I.getType();
3480
3481 // Get the size of the types in bits, we'll need this later
3482 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3483 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3484
3485 Check(SrcTy->isFPOrFPVectorTy(), "FPExt only operates on FP", &I);
3486 Check(DestTy->isFPOrFPVectorTy(), "FPExt only produces an FP", &I);
3487 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3488 "fpext source and destination must both be a vector or neither", &I);
3489 Check(SrcBitSize < DestBitSize, "DestTy too small for FPExt", &I);
3490
3491 visitInstruction(I);
3492}
3493
3494void Verifier::visitUIToFPInst(UIToFPInst &I) {
3495 // Get the source and destination types
3496 Type *SrcTy = I.getOperand(0)->getType();
3497 Type *DestTy = I.getType();
3498
3499 bool SrcVec = SrcTy->isVectorTy();
3500 bool DstVec = DestTy->isVectorTy();
3501
3502 Check(SrcVec == DstVec,
3503 "UIToFP source and dest must both be vector or scalar", &I);
3504 Check(SrcTy->isIntOrIntVectorTy(),
3505 "UIToFP source must be integer or integer vector", &I);
3506 Check(DestTy->isFPOrFPVectorTy(), "UIToFP result must be FP or FP vector",
3507 &I);
3508
3509 if (SrcVec && DstVec)
3510 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3511 cast<VectorType>(DestTy)->getElementCount(),
3512 "UIToFP source and dest vector length mismatch", &I);
3513
3514 visitInstruction(I);
3515}
3516
3517void Verifier::visitSIToFPInst(SIToFPInst &I) {
3518 // Get the source and destination types
3519 Type *SrcTy = I.getOperand(0)->getType();
3520 Type *DestTy = I.getType();
3521
3522 bool SrcVec = SrcTy->isVectorTy();
3523 bool DstVec = DestTy->isVectorTy();
3524
3525 Check(SrcVec == DstVec,
3526 "SIToFP source and dest must both be vector or scalar", &I);
3527 Check(SrcTy->isIntOrIntVectorTy(),
3528 "SIToFP source must be integer or integer vector", &I);
3529 Check(DestTy->isFPOrFPVectorTy(), "SIToFP result must be FP or FP vector",
3530 &I);
3531
3532 if (SrcVec && DstVec)
3533 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3534 cast<VectorType>(DestTy)->getElementCount(),
3535 "SIToFP source and dest vector length mismatch", &I);
3536
3537 visitInstruction(I);
3538}
3539
3540void Verifier::visitFPToUIInst(FPToUIInst &I) {
3541 // Get the source and destination types
3542 Type *SrcTy = I.getOperand(0)->getType();
3543 Type *DestTy = I.getType();
3544
3545 bool SrcVec = SrcTy->isVectorTy();
3546 bool DstVec = DestTy->isVectorTy();
3547
3548 Check(SrcVec == DstVec,
3549 "FPToUI source and dest must both be vector or scalar", &I);
3550 Check(SrcTy->isFPOrFPVectorTy(), "FPToUI source must be FP or FP vector", &I);
3551 Check(DestTy->isIntOrIntVectorTy(),
3552 "FPToUI result must be integer or integer vector", &I);
3553
3554 if (SrcVec && DstVec)
3555 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3556 cast<VectorType>(DestTy)->getElementCount(),
3557 "FPToUI source and dest vector length mismatch", &I);
3558
3559 visitInstruction(I);
3560}
3561
3562void Verifier::visitFPToSIInst(FPToSIInst &I) {
3563 // Get the source and destination types
3564 Type *SrcTy = I.getOperand(0)->getType();
3565 Type *DestTy = I.getType();
3566
3567 bool SrcVec = SrcTy->isVectorTy();
3568 bool DstVec = DestTy->isVectorTy();
3569
3570 Check(SrcVec == DstVec,
3571 "FPToSI source and dest must both be vector or scalar", &I);
3572 Check(SrcTy->isFPOrFPVectorTy(), "FPToSI source must be FP or FP vector", &I);
3573 Check(DestTy->isIntOrIntVectorTy(),
3574 "FPToSI result must be integer or integer vector", &I);
3575
3576 if (SrcVec && DstVec)
3577 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3578 cast<VectorType>(DestTy)->getElementCount(),
3579 "FPToSI source and dest vector length mismatch", &I);
3580
3581 visitInstruction(I);
3582}
3583
3584void Verifier::checkPtrToAddr(Type *SrcTy, Type *DestTy, const Value &V) {
3585 Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToAddr source must be pointer", V);
3586 Check(DestTy->isIntOrIntVectorTy(), "PtrToAddr result must be integral", V);
3587 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToAddr type mismatch",
3588 V);
3589
3590 if (SrcTy->isVectorTy()) {
3591 auto *VSrc = cast<VectorType>(SrcTy);
3592 auto *VDest = cast<VectorType>(DestTy);
3593 Check(VSrc->getElementCount() == VDest->getElementCount(),
3594 "PtrToAddr vector length mismatch", V);
3595 }
3596
3597 Type *AddrTy = DL.getAddressType(SrcTy);
3598 Check(AddrTy == DestTy, "PtrToAddr result must be address width", V);
3599}
3600
3601void Verifier::visitPtrToAddrInst(PtrToAddrInst &I) {
3602 checkPtrToAddr(I.getOperand(0)->getType(), I.getType(), I);
3603 visitInstruction(I);
3604}
3605
3606void Verifier::visitPtrToIntInst(PtrToIntInst &I) {
3607 // Get the source and destination types
3608 Type *SrcTy = I.getOperand(0)->getType();
3609 Type *DestTy = I.getType();
3610
3611 Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToInt source must be pointer", &I);
3612
3613 Check(DestTy->isIntOrIntVectorTy(), "PtrToInt result must be integral", &I);
3614 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToInt type mismatch",
3615 &I);
3616
3617 if (SrcTy->isVectorTy()) {
3618 auto *VSrc = cast<VectorType>(SrcTy);
3619 auto *VDest = cast<VectorType>(DestTy);
3620 Check(VSrc->getElementCount() == VDest->getElementCount(),
3621 "PtrToInt Vector length mismatch", &I);
3622 }
3623
3624 visitInstruction(I);
3625}
3626
3627void Verifier::visitIntToPtrInst(IntToPtrInst &I) {
3628 // Get the source and destination types
3629 Type *SrcTy = I.getOperand(0)->getType();
3630 Type *DestTy = I.getType();
3631
3632 Check(SrcTy->isIntOrIntVectorTy(), "IntToPtr source must be an integral", &I);
3633 Check(DestTy->isPtrOrPtrVectorTy(), "IntToPtr result must be a pointer", &I);
3634
3635 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "IntToPtr type mismatch",
3636 &I);
3637 if (SrcTy->isVectorTy()) {
3638 auto *VSrc = cast<VectorType>(SrcTy);
3639 auto *VDest = cast<VectorType>(DestTy);
3640 Check(VSrc->getElementCount() == VDest->getElementCount(),
3641 "IntToPtr Vector length mismatch", &I);
3642 }
3643 visitInstruction(I);
3644}
3645
3646void Verifier::visitBitCastInst(BitCastInst &I) {
3647 Check(
3648 CastInst::castIsValid(Instruction::BitCast, I.getOperand(0), I.getType()),
3649 "Invalid bitcast", &I);
3650 visitInstruction(I);
3651}
3652
3653void Verifier::visitAddrSpaceCastInst(AddrSpaceCastInst &I) {
3654 Type *SrcTy = I.getOperand(0)->getType();
3655 Type *DestTy = I.getType();
3656
3657 Check(SrcTy->isPtrOrPtrVectorTy(), "AddrSpaceCast source must be a pointer",
3658 &I);
3659 Check(DestTy->isPtrOrPtrVectorTy(), "AddrSpaceCast result must be a pointer",
3660 &I);
3662 "AddrSpaceCast must be between different address spaces", &I);
3663 if (auto *SrcVTy = dyn_cast<VectorType>(SrcTy))
3664 Check(SrcVTy->getElementCount() ==
3665 cast<VectorType>(DestTy)->getElementCount(),
3666 "AddrSpaceCast vector pointer number of elements mismatch", &I);
3667 visitInstruction(I);
3668}
3669
3670/// visitPHINode - Ensure that a PHI node is well formed.
3671///
3672void Verifier::visitPHINode(PHINode &PN) {
3673 // Ensure that the PHI nodes are all grouped together at the top of the block.
3674 // This can be tested by checking whether the instruction before this is
3675 // either nonexistent (because this is begin()) or is a PHI node. If not,
3676 // then there is some other instruction before a PHI.
3677 Check(&PN == &PN.getParent()->front() ||
3679 "PHI nodes not grouped at top of basic block!", &PN, PN.getParent());
3680
3681 // Check that a PHI doesn't yield a Token.
3682 Check(!PN.getType()->isTokenLikeTy(), "PHI nodes cannot have token type!");
3683
3684 // Check that all of the values of the PHI node have the same type as the
3685 // result.
3686 for (Value *IncValue : PN.incoming_values()) {
3687 Check(PN.getType() == IncValue->getType(),
3688 "PHI node operands are not the same type as the result!", &PN);
3689 }
3690
3691 // All other PHI node constraints are checked in the visitBasicBlock method.
3692
3693 visitInstruction(PN);
3694}
3695
3696void Verifier::visitCallBase(CallBase &Call) {
3698 "Called function must be a pointer!", Call);
3699 FunctionType *FTy = Call.getFunctionType();
3700
3701 // Verify that the correct number of arguments are being passed
3702 if (FTy->isVarArg())
3703 Check(Call.arg_size() >= FTy->getNumParams(),
3704 "Called function requires more parameters than were provided!", Call);
3705 else
3706 Check(Call.arg_size() == FTy->getNumParams(),
3707 "Incorrect number of arguments passed to called function!", Call);
3708
3709 // Verify that all arguments to the call match the function type.
3710 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
3711 Check(Call.getArgOperand(i)->getType() == FTy->getParamType(i),
3712 "Call parameter type does not match function signature!",
3713 Call.getArgOperand(i), FTy->getParamType(i), Call);
3714
3715 AttributeList Attrs = Call.getAttributes();
3716
3717 Check(verifyAttributeCount(Attrs, Call.arg_size()),
3718 "Attribute after last parameter!", Call);
3719
3720 Function *Callee =
3722 bool IsIntrinsic = Callee && Callee->isIntrinsic();
3723 if (IsIntrinsic)
3724 Check(Callee->getValueType() == FTy,
3725 "Intrinsic called with incompatible signature", Call);
3726
3727 // Verify if the calling convention of the callee is callable.
3729 "calling convention does not permit calls", Call);
3730
3731 // Disallow passing/returning values with alignment higher than we can
3732 // represent.
3733 // FIXME: Consider making DataLayout cap the alignment, so this isn't
3734 // necessary.
3735 auto VerifyTypeAlign = [&](Type *Ty, const Twine &Message) {
3736 if (!Ty->isSized())
3737 return;
3738 Align ABIAlign = DL.getABITypeAlign(Ty);
3739 Check(ABIAlign.value() <= Value::MaximumAlignment,
3740 "Incorrect alignment of " + Message + " to called function!", Call);
3741 };
3742
3743 if (!IsIntrinsic) {
3744 VerifyTypeAlign(FTy->getReturnType(), "return type");
3745 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3746 Type *Ty = FTy->getParamType(i);
3747 VerifyTypeAlign(Ty, "argument passed");
3748 }
3749 }
3750
3751 if (Attrs.hasFnAttr(Attribute::Speculatable)) {
3752 // Don't allow speculatable on call sites, unless the underlying function
3753 // declaration is also speculatable.
3754 Check(Callee && Callee->isSpeculatable(),
3755 "speculatable attribute may not apply to call sites", Call);
3756 }
3757
3758 if (Attrs.hasFnAttr(Attribute::Preallocated)) {
3759 Check(Call.getIntrinsicID() == Intrinsic::call_preallocated_arg,
3760 "preallocated as a call site attribute can only be on "
3761 "llvm.call.preallocated.arg");
3762 }
3763
3764 // Verify call attributes.
3765 verifyFunctionAttrs(FTy, Attrs, &Call, IsIntrinsic, Call.isInlineAsm());
3766
3767 // Conservatively check the inalloca argument.
3768 // We have a bug if we can find that there is an underlying alloca without
3769 // inalloca.
3770 if (Call.hasInAllocaArgument()) {
3771 Value *InAllocaArg = Call.getArgOperand(FTy->getNumParams() - 1);
3772 if (auto AI = dyn_cast<AllocaInst>(InAllocaArg->stripInBoundsOffsets()))
3773 Check(AI->isUsedWithInAlloca(),
3774 "inalloca argument for call has mismatched alloca", AI, Call);
3775 }
3776
3777 // For each argument of the callsite, if it has the swifterror argument,
3778 // make sure the underlying alloca/parameter it comes from has a swifterror as
3779 // well.
3780 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3781 if (Call.paramHasAttr(i, Attribute::SwiftError)) {
3782 Value *SwiftErrorArg = Call.getArgOperand(i);
3783 if (auto AI = dyn_cast<AllocaInst>(SwiftErrorArg->stripInBoundsOffsets())) {
3784 Check(AI->isSwiftError(),
3785 "swifterror argument for call has mismatched alloca", AI, Call);
3786 continue;
3787 }
3788 auto ArgI = dyn_cast<Argument>(SwiftErrorArg);
3789 Check(ArgI, "swifterror argument should come from an alloca or parameter",
3790 SwiftErrorArg, Call);
3791 Check(ArgI->hasSwiftErrorAttr(),
3792 "swifterror argument for call has mismatched parameter", ArgI,
3793 Call);
3794 }
3795
3796 if (Attrs.hasParamAttr(i, Attribute::ImmArg)) {
3797 // Don't allow immarg on call sites, unless the underlying declaration
3798 // also has the matching immarg.
3799 Check(Callee && Callee->hasParamAttribute(i, Attribute::ImmArg),
3800 "immarg may not apply only to call sites", Call.getArgOperand(i),
3801 Call);
3802 }
3803
3804 if (Call.paramHasAttr(i, Attribute::ImmArg)) {
3805 Value *ArgVal = Call.getArgOperand(i);
3806 Check(isa<ConstantInt>(ArgVal) || isa<ConstantFP>(ArgVal),
3807 "immarg operand has non-immediate parameter", ArgVal, Call);
3808
3809 // If the imm-arg is an integer and also has a range attached,
3810 // check if the given value is within the range.
3811 if (Call.paramHasAttr(i, Attribute::Range)) {
3812 if (auto *CI = dyn_cast<ConstantInt>(ArgVal)) {
3813 const ConstantRange &CR =
3814 Call.getParamAttr(i, Attribute::Range).getValueAsConstantRange();
3815 Check(CR.contains(CI->getValue()),
3816 "immarg value " + Twine(CI->getValue().getSExtValue()) +
3817 " out of range [" + Twine(CR.getLower().getSExtValue()) +
3818 ", " + Twine(CR.getUpper().getSExtValue()) + ")",
3819 Call);
3820 }
3821 }
3822 }
3823
3824 if (Call.paramHasAttr(i, Attribute::Preallocated)) {
3825 Value *ArgVal = Call.getArgOperand(i);
3826 bool hasOB =
3828 bool isMustTail = Call.isMustTailCall();
3829 Check(hasOB != isMustTail,
3830 "preallocated operand either requires a preallocated bundle or "
3831 "the call to be musttail (but not both)",
3832 ArgVal, Call);
3833 }
3834 }
3835
3836 if (FTy->isVarArg()) {
3837 // FIXME? is 'nest' even legal here?
3838 bool SawNest = false;
3839 bool SawReturned = false;
3840
3841 for (unsigned Idx = 0; Idx < FTy->getNumParams(); ++Idx) {
3842 if (Attrs.hasParamAttr(Idx, Attribute::Nest))
3843 SawNest = true;
3844 if (Attrs.hasParamAttr(Idx, Attribute::Returned))
3845 SawReturned = true;
3846 }
3847
3848 // Check attributes on the varargs part.
3849 for (unsigned Idx = FTy->getNumParams(); Idx < Call.arg_size(); ++Idx) {
3850 Type *Ty = Call.getArgOperand(Idx)->getType();
3851 AttributeSet ArgAttrs = Attrs.getParamAttrs(Idx);
3852 verifyParameterAttrs(ArgAttrs, Ty, &Call);
3853
3854 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
3855 Check(!SawNest, "More than one parameter has attribute nest!", Call);
3856 SawNest = true;
3857 }
3858
3859 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
3860 Check(!SawReturned, "More than one parameter has attribute returned!",
3861 Call);
3862 Check(Ty->canLosslesslyBitCastTo(FTy->getReturnType()),
3863 "Incompatible argument and return types for 'returned' "
3864 "attribute",
3865 Call);
3866 SawReturned = true;
3867 }
3868
3869 // Statepoint intrinsic is vararg but the wrapped function may be not.
3870 // Allow sret here and check the wrapped function in verifyStatepoint.
3871 if (Call.getIntrinsicID() != Intrinsic::experimental_gc_statepoint)
3872 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
3873 "Attribute 'sret' cannot be used for vararg call arguments!",
3874 Call);
3875
3876 if (ArgAttrs.hasAttribute(Attribute::InAlloca))
3877 Check(Idx == Call.arg_size() - 1,
3878 "inalloca isn't on the last argument!", Call);
3879 }
3880 }
3881
3882 // Verify that there's no metadata unless it's a direct call to an intrinsic.
3883 if (!IsIntrinsic) {
3884 for (Type *ParamTy : FTy->params()) {
3885 Check(!ParamTy->isMetadataTy(),
3886 "Function has metadata parameter but isn't an intrinsic", Call);
3887 Check(!ParamTy->isTokenLikeTy(),
3888 "Function has token parameter but isn't an intrinsic", Call);
3889 }
3890 }
3891
3892 // Verify that indirect calls don't return tokens.
3893 if (!Call.getCalledFunction()) {
3894 Check(!FTy->getReturnType()->isTokenLikeTy(),
3895 "Return type cannot be token for indirect call!");
3896 Check(!FTy->getReturnType()->isX86_AMXTy(),
3897 "Return type cannot be x86_amx for indirect call!");
3898 }
3899
3901 visitIntrinsicCall(ID, Call);
3902
3903 // Verify that a callsite has at most one "deopt", at most one "funclet", at
3904 // most one "gc-transition", at most one "cfguardtarget", at most one
3905 // "preallocated" operand bundle, and at most one "ptrauth" operand bundle.
3906 bool FoundDeoptBundle = false, FoundFuncletBundle = false,
3907 FoundGCTransitionBundle = false, FoundCFGuardTargetBundle = false,
3908 FoundPreallocatedBundle = false, FoundGCLiveBundle = false,
3909 FoundPtrauthBundle = false, FoundKCFIBundle = false,
3910 FoundAttachedCallBundle = false;
3911 for (unsigned i = 0, e = Call.getNumOperandBundles(); i < e; ++i) {
3912 OperandBundleUse BU = Call.getOperandBundleAt(i);
3913 uint32_t Tag = BU.getTagID();
3914 if (Tag == LLVMContext::OB_deopt) {
3915 Check(!FoundDeoptBundle, "Multiple deopt operand bundles", Call);
3916 FoundDeoptBundle = true;
3917 } else if (Tag == LLVMContext::OB_gc_transition) {
3918 Check(!FoundGCTransitionBundle, "Multiple gc-transition operand bundles",
3919 Call);
3920 FoundGCTransitionBundle = true;
3921 } else if (Tag == LLVMContext::OB_funclet) {
3922 Check(!FoundFuncletBundle, "Multiple funclet operand bundles", Call);
3923 FoundFuncletBundle = true;
3924 Check(BU.Inputs.size() == 1,
3925 "Expected exactly one funclet bundle operand", Call);
3926 Check(isa<FuncletPadInst>(BU.Inputs.front()),
3927 "Funclet bundle operands should correspond to a FuncletPadInst",
3928 Call);
3929 } else if (Tag == LLVMContext::OB_cfguardtarget) {
3930 Check(!FoundCFGuardTargetBundle, "Multiple CFGuardTarget operand bundles",
3931 Call);
3932 FoundCFGuardTargetBundle = true;
3933 Check(BU.Inputs.size() == 1,
3934 "Expected exactly one cfguardtarget bundle operand", Call);
3935 } else if (Tag == LLVMContext::OB_ptrauth) {
3936 Check(!FoundPtrauthBundle, "Multiple ptrauth operand bundles", Call);
3937 FoundPtrauthBundle = true;
3938 Check(BU.Inputs.size() == 2,
3939 "Expected exactly two ptrauth bundle operands", Call);
3940 Check(isa<ConstantInt>(BU.Inputs[0]) &&
3941 BU.Inputs[0]->getType()->isIntegerTy(32),
3942 "Ptrauth bundle key operand must be an i32 constant", Call);
3943 Check(BU.Inputs[1]->getType()->isIntegerTy(64),
3944 "Ptrauth bundle discriminator operand must be an i64", Call);
3945 } else if (Tag == LLVMContext::OB_kcfi) {
3946 Check(!FoundKCFIBundle, "Multiple kcfi operand bundles", Call);
3947 FoundKCFIBundle = true;
3948 Check(BU.Inputs.size() == 1, "Expected exactly one kcfi bundle operand",
3949 Call);
3950 Check(isa<ConstantInt>(BU.Inputs[0]) &&
3951 BU.Inputs[0]->getType()->isIntegerTy(32),
3952 "Kcfi bundle operand must be an i32 constant", Call);
3953 } else if (Tag == LLVMContext::OB_preallocated) {
3954 Check(!FoundPreallocatedBundle, "Multiple preallocated operand bundles",
3955 Call);
3956 FoundPreallocatedBundle = true;
3957 Check(BU.Inputs.size() == 1,
3958 "Expected exactly one preallocated bundle operand", Call);
3959 auto Input = dyn_cast<IntrinsicInst>(BU.Inputs.front());
3960 Check(Input &&
3961 Input->getIntrinsicID() == Intrinsic::call_preallocated_setup,
3962 "\"preallocated\" argument must be a token from "
3963 "llvm.call.preallocated.setup",
3964 Call);
3965 } else if (Tag == LLVMContext::OB_gc_live) {
3966 Check(!FoundGCLiveBundle, "Multiple gc-live operand bundles", Call);
3967 FoundGCLiveBundle = true;
3969 Check(!FoundAttachedCallBundle,
3970 "Multiple \"clang.arc.attachedcall\" operand bundles", Call);
3971 FoundAttachedCallBundle = true;
3972 verifyAttachedCallBundle(Call, BU);
3973 }
3974 }
3975
3976 // Verify that callee and callsite agree on whether to use pointer auth.
3977 Check(!(Call.getCalledFunction() && FoundPtrauthBundle),
3978 "Direct call cannot have a ptrauth bundle", Call);
3979
3980 // Verify that each inlinable callsite of a debug-info-bearing function in a
3981 // debug-info-bearing function has a debug location attached to it. Failure to
3982 // do so causes assertion failures when the inliner sets up inline scope info
3983 // (Interposable functions are not inlinable, neither are functions without
3984 // definitions.)
3990 "inlinable function call in a function with "
3991 "debug info must have a !dbg location",
3992 Call);
3993
3994 if (Call.isInlineAsm())
3995 verifyInlineAsmCall(Call);
3996
3997 ConvergenceVerifyHelper.visit(Call);
3998
3999 visitInstruction(Call);
4000}
4001
4002void Verifier::verifyTailCCMustTailAttrs(const AttrBuilder &Attrs,
4003 StringRef Context) {
4004 Check(!Attrs.contains(Attribute::InAlloca),
4005 Twine("inalloca attribute not allowed in ") + Context);
4006 Check(!Attrs.contains(Attribute::InReg),
4007 Twine("inreg attribute not allowed in ") + Context);
4008 Check(!Attrs.contains(Attribute::SwiftError),
4009 Twine("swifterror attribute not allowed in ") + Context);
4010 Check(!Attrs.contains(Attribute::Preallocated),
4011 Twine("preallocated attribute not allowed in ") + Context);
4012 Check(!Attrs.contains(Attribute::ByRef),
4013 Twine("byref attribute not allowed in ") + Context);
4014}
4015
4016/// Two types are "congruent" if they are identical, or if they are both pointer
4017/// types with different pointee types and the same address space.
4018static bool isTypeCongruent(Type *L, Type *R) {
4019 if (L == R)
4020 return true;
4023 if (!PL || !PR)
4024 return false;
4025 return PL->getAddressSpace() == PR->getAddressSpace();
4026}
4027
4028static AttrBuilder getParameterABIAttributes(LLVMContext& C, unsigned I, AttributeList Attrs) {
4029 static const Attribute::AttrKind ABIAttrs[] = {
4030 Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca,
4031 Attribute::InReg, Attribute::StackAlignment, Attribute::SwiftSelf,
4032 Attribute::SwiftAsync, Attribute::SwiftError, Attribute::Preallocated,
4033 Attribute::ByRef};
4034 AttrBuilder Copy(C);
4035 for (auto AK : ABIAttrs) {
4036 Attribute Attr = Attrs.getParamAttrs(I).getAttribute(AK);
4037 if (Attr.isValid())
4038 Copy.addAttribute(Attr);
4039 }
4040
4041 // `align` is ABI-affecting only in combination with `byval` or `byref`.
4042 if (Attrs.hasParamAttr(I, Attribute::Alignment) &&
4043 (Attrs.hasParamAttr(I, Attribute::ByVal) ||
4044 Attrs.hasParamAttr(I, Attribute::ByRef)))
4045 Copy.addAlignmentAttr(Attrs.getParamAlignment(I));
4046 return Copy;
4047}
4048
4049void Verifier::verifyMustTailCall(CallInst &CI) {
4050 Check(!CI.isInlineAsm(), "cannot use musttail call with inline asm", &CI);
4051
4052 Function *F = CI.getParent()->getParent();
4053 FunctionType *CallerTy = F->getFunctionType();
4054 FunctionType *CalleeTy = CI.getFunctionType();
4055 Check(CallerTy->isVarArg() == CalleeTy->isVarArg(),
4056 "cannot guarantee tail call due to mismatched varargs", &CI);
4057 Check(isTypeCongruent(CallerTy->getReturnType(), CalleeTy->getReturnType()),
4058 "cannot guarantee tail call due to mismatched return types", &CI);
4059
4060 // - The calling conventions of the caller and callee must match.
4061 Check(F->getCallingConv() == CI.getCallingConv(),
4062 "cannot guarantee tail call due to mismatched calling conv", &CI);
4063
4064 // - The call must immediately precede a :ref:`ret <i_ret>` instruction,
4065 // or a pointer bitcast followed by a ret instruction.
4066 // - The ret instruction must return the (possibly bitcasted) value
4067 // produced by the call or void.
4068 Value *RetVal = &CI;
4070
4071 // Handle the optional bitcast.
4072 if (BitCastInst *BI = dyn_cast_or_null<BitCastInst>(Next)) {
4073 Check(BI->getOperand(0) == RetVal,
4074 "bitcast following musttail call must use the call", BI);
4075 RetVal = BI;
4076 Next = BI->getNextNode();
4077 }
4078
4079 // Check the return.
4080 ReturnInst *Ret = dyn_cast_or_null<ReturnInst>(Next);
4081 Check(Ret, "musttail call must precede a ret with an optional bitcast", &CI);
4082 Check(!Ret->getReturnValue() || Ret->getReturnValue() == RetVal ||
4083 isa<UndefValue>(Ret->getReturnValue()),
4084 "musttail call result must be returned", Ret);
4085
4086 AttributeList CallerAttrs = F->getAttributes();
4087 AttributeList CalleeAttrs = CI.getAttributes();
4088 if (CI.getCallingConv() == CallingConv::SwiftTail ||
4089 CI.getCallingConv() == CallingConv::Tail) {
4090 StringRef CCName =
4091 CI.getCallingConv() == CallingConv::Tail ? "tailcc" : "swifttailcc";
4092
4093 // - Only sret, byval, swiftself, and swiftasync ABI-impacting attributes
4094 // are allowed in swifttailcc call
4095 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4096 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
4097 SmallString<32> Context{CCName, StringRef(" musttail caller")};
4098 verifyTailCCMustTailAttrs(ABIAttrs, Context);
4099 }
4100 for (unsigned I = 0, E = CalleeTy->getNumParams(); I != E; ++I) {
4101 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
4102 SmallString<32> Context{CCName, StringRef(" musttail callee")};
4103 verifyTailCCMustTailAttrs(ABIAttrs, Context);
4104 }
4105 // - Varargs functions are not allowed
4106 Check(!CallerTy->isVarArg(), Twine("cannot guarantee ") + CCName +
4107 " tail call for varargs function");
4108 return;
4109 }
4110
4111 // - The caller and callee prototypes must match. Pointer types of
4112 // parameters or return types may differ in pointee type, but not
4113 // address space.
4114 if (!CI.getIntrinsicID()) {
4115 Check(CallerTy->getNumParams() == CalleeTy->getNumParams(),
4116 "cannot guarantee tail call due to mismatched parameter counts", &CI);
4117 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4118 Check(
4119 isTypeCongruent(CallerTy->getParamType(I), CalleeTy->getParamType(I)),
4120 "cannot guarantee tail call due to mismatched parameter types", &CI);
4121 }
4122 }
4123
4124 // - All ABI-impacting function attributes, such as sret, byval, inreg,
4125 // returned, preallocated, and inalloca, must match.
4126 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4127 AttrBuilder CallerABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
4128 AttrBuilder CalleeABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
4129 Check(CallerABIAttrs == CalleeABIAttrs,
4130 "cannot guarantee tail call due to mismatched ABI impacting "
4131 "function attributes",
4132 &CI, CI.getOperand(I));
4133 }
4134}
4135
4136void Verifier::visitCallInst(CallInst &CI) {
4137 visitCallBase(CI);
4138
4139 if (CI.isMustTailCall())
4140 verifyMustTailCall(CI);
4141}
4142
4143void Verifier::visitInvokeInst(InvokeInst &II) {
4144 visitCallBase(II);
4145
4146 // Verify that the first non-PHI instruction of the unwind destination is an
4147 // exception handling instruction.
4148 Check(
4149 II.getUnwindDest()->isEHPad(),
4150 "The unwind destination does not have an exception handling instruction!",
4151 &II);
4152
4153 visitTerminator(II);
4154}
4155
4156/// visitUnaryOperator - Check the argument to the unary operator.
4157///
4158void Verifier::visitUnaryOperator(UnaryOperator &U) {
4159 Check(U.getType() == U.getOperand(0)->getType(),
4160 "Unary operators must have same type for"
4161 "operands and result!",
4162 &U);
4163
4164 switch (U.getOpcode()) {
4165 // Check that floating-point arithmetic operators are only used with
4166 // floating-point operands.
4167 case Instruction::FNeg:
4168 Check(U.getType()->isFPOrFPVectorTy(),
4169 "FNeg operator only works with float types!", &U);
4170 break;
4171 default:
4172 llvm_unreachable("Unknown UnaryOperator opcode!");
4173 }
4174
4175 visitInstruction(U);
4176}
4177
4178/// visitBinaryOperator - Check that both arguments to the binary operator are
4179/// of the same type!
4180///
4181void Verifier::visitBinaryOperator(BinaryOperator &B) {
4182 Check(B.getOperand(0)->getType() == B.getOperand(1)->getType(),
4183 "Both operands to a binary operator are not of the same type!", &B);
4184
4185 switch (B.getOpcode()) {
4186 // Check that integer arithmetic operators are only used with
4187 // integral operands.
4188 case Instruction::Add:
4189 case Instruction::Sub:
4190 case Instruction::Mul:
4191 case Instruction::SDiv:
4192 case Instruction::UDiv:
4193 case Instruction::SRem:
4194 case Instruction::URem:
4195 Check(B.getType()->isIntOrIntVectorTy(),
4196 "Integer arithmetic operators only work with integral types!", &B);
4197 Check(B.getType() == B.getOperand(0)->getType(),
4198 "Integer arithmetic operators must have same type "
4199 "for operands and result!",
4200 &B);
4201 break;
4202 // Check that floating-point arithmetic operators are only used with
4203 // floating-point operands.
4204 case Instruction::FAdd:
4205 case Instruction::FSub:
4206 case Instruction::FMul:
4207 case Instruction::FDiv:
4208 case Instruction::FRem:
4209 Check(B.getType()->isFPOrFPVectorTy(),
4210 "Floating-point arithmetic operators only work with "
4211 "floating-point types!",
4212 &B);
4213 Check(B.getType() == B.getOperand(0)->getType(),
4214 "Floating-point arithmetic operators must have same type "
4215 "for operands and result!",
4216 &B);
4217 break;
4218 // Check that logical operators are only used with integral operands.
4219 case Instruction::And:
4220 case Instruction::Or:
4221 case Instruction::Xor:
4222 Check(B.getType()->isIntOrIntVectorTy(),
4223 "Logical operators only work with integral types!", &B);
4224 Check(B.getType() == B.getOperand(0)->getType(),
4225 "Logical operators must have same type for operands and result!", &B);
4226 break;
4227 case Instruction::Shl:
4228 case Instruction::LShr:
4229 case Instruction::AShr:
4230 Check(B.getType()->isIntOrIntVectorTy(),
4231 "Shifts only work with integral types!", &B);
4232 Check(B.getType() == B.getOperand(0)->getType(),
4233 "Shift return type must be same as operands!", &B);
4234 break;
4235 default:
4236 llvm_unreachable("Unknown BinaryOperator opcode!");
4237 }
4238
4239 visitInstruction(B);
4240}
4241
4242void Verifier::visitICmpInst(ICmpInst &IC) {
4243 // Check that the operands are the same type
4244 Type *Op0Ty = IC.getOperand(0)->getType();
4245 Type *Op1Ty = IC.getOperand(1)->getType();
4246 Check(Op0Ty == Op1Ty,
4247 "Both operands to ICmp instruction are not of the same type!", &IC);
4248 // Check that the operands are the right type
4249 Check(Op0Ty->isIntOrIntVectorTy() || Op0Ty->isPtrOrPtrVectorTy(),
4250 "Invalid operand types for ICmp instruction", &IC);
4251 // Check that the predicate is valid.
4252 Check(IC.isIntPredicate(), "Invalid predicate in ICmp instruction!", &IC);
4253
4254 visitInstruction(IC);
4255}
4256
4257void Verifier::visitFCmpInst(FCmpInst &FC) {
4258 // Check that the operands are the same type
4259 Type *Op0Ty = FC.getOperand(0)->getType();
4260 Type *Op1Ty = FC.getOperand(1)->getType();
4261 Check(Op0Ty == Op1Ty,
4262 "Both operands to FCmp instruction are not of the same type!", &FC);
4263 // Check that the operands are the right type
4264 Check(Op0Ty->isFPOrFPVectorTy(), "Invalid operand types for FCmp instruction",
4265 &FC);
4266 // Check that the predicate is valid.
4267 Check(FC.isFPPredicate(), "Invalid predicate in FCmp instruction!", &FC);
4268
4269 visitInstruction(FC);
4270}
4271
4272void Verifier::visitExtractElementInst(ExtractElementInst &EI) {
4274 "Invalid extractelement operands!", &EI);
4275 visitInstruction(EI);
4276}
4277
4278void Verifier::visitInsertElementInst(InsertElementInst &IE) {
4279 Check(InsertElementInst::isValidOperands(IE.getOperand(0), IE.getOperand(1),
4280 IE.getOperand(2)),
4281 "Invalid insertelement operands!", &IE);
4282 visitInstruction(IE);
4283}
4284
4285void Verifier::visitShuffleVectorInst(ShuffleVectorInst &SV) {
4287 SV.getShuffleMask()),
4288 "Invalid shufflevector operands!", &SV);
4289 visitInstruction(SV);
4290}
4291
4292void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
4293 Type *TargetTy = GEP.getPointerOperandType()->getScalarType();
4294
4295 Check(isa<PointerType>(TargetTy),
4296 "GEP base pointer is not a vector or a vector of pointers", &GEP);
4297 Check(GEP.getSourceElementType()->isSized(), "GEP into unsized type!", &GEP);
4298
4299 if (auto *STy = dyn_cast<StructType>(GEP.getSourceElementType())) {
4300 Check(!STy->isScalableTy(),
4301 "getelementptr cannot target structure that contains scalable vector"
4302 "type",
4303 &GEP);
4304 }
4305
4306 SmallVector<Value *, 16> Idxs(GEP.indices());
4307 Check(
4308 all_of(Idxs, [](Value *V) { return V->getType()->isIntOrIntVectorTy(); }),
4309 "GEP indexes must be integers", &GEP);
4310 Type *ElTy =
4311 GetElementPtrInst::getIndexedType(GEP.getSourceElementType(), Idxs);
4312 Check(ElTy, "Invalid indices for GEP pointer type!", &GEP);
4313
4314 PointerType *PtrTy = dyn_cast<PointerType>(GEP.getType()->getScalarType());
4315
4316 Check(PtrTy && GEP.getResultElementType() == ElTy,
4317 "GEP is not of right type for indices!", &GEP, ElTy);
4318
4319 if (auto *GEPVTy = dyn_cast<VectorType>(GEP.getType())) {
4320 // Additional checks for vector GEPs.
4321 ElementCount GEPWidth = GEPVTy->getElementCount();
4322 if (GEP.getPointerOperandType()->isVectorTy())
4323 Check(
4324 GEPWidth ==
4325 cast<VectorType>(GEP.getPointerOperandType())->getElementCount(),
4326 "Vector GEP result width doesn't match operand's", &GEP);
4327 for (Value *Idx : Idxs) {
4328 Type *IndexTy = Idx->getType();
4329 if (auto *IndexVTy = dyn_cast<VectorType>(IndexTy)) {
4330 ElementCount IndexWidth = IndexVTy->getElementCount();
4331 Check(IndexWidth == GEPWidth, "Invalid GEP index vector width", &GEP);
4332 }
4333 Check(IndexTy->isIntOrIntVectorTy(),
4334 "All GEP indices should be of integer type");
4335 }
4336 }
4337
4338 Check(GEP.getAddressSpace() == PtrTy->getAddressSpace(),
4339 "GEP address space doesn't match type", &GEP);
4340
4341 visitInstruction(GEP);
4342}
4343
4344static bool isContiguous(const ConstantRange &A, const ConstantRange &B) {
4345 return A.getUpper() == B.getLower() || A.getLower() == B.getUpper();
4346}
4347
4348/// Verify !range and !absolute_symbol metadata. These have the same
4349/// restrictions, except !absolute_symbol allows the full set.
4350void Verifier::verifyRangeLikeMetadata(const Value &I, const MDNode *Range,
4351 Type *Ty, RangeLikeMetadataKind Kind) {
4352 unsigned NumOperands = Range->getNumOperands();
4353 Check(NumOperands % 2 == 0, "Unfinished range!", Range);
4354 unsigned NumRanges = NumOperands / 2;
4355 Check(NumRanges >= 1, "It should have at least one range!", Range);
4356
4357 ConstantRange LastRange(1, true); // Dummy initial value
4358 for (unsigned i = 0; i < NumRanges; ++i) {
4359 ConstantInt *Low =
4360 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i));
4361 Check(Low, "The lower limit must be an integer!", Low);
4362 ConstantInt *High =
4363 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i + 1));
4364 Check(High, "The upper limit must be an integer!", High);
4365
4366 Check(High->getType() == Low->getType(), "Range pair types must match!",
4367 &I);
4368
4369 if (Kind == RangeLikeMetadataKind::NoaliasAddrspace) {
4370 Check(High->getType()->isIntegerTy(32),
4371 "noalias.addrspace type must be i32!", &I);
4372 } else {
4373 Check(High->getType() == Ty->getScalarType(),
4374 "Range types must match instruction type!", &I);
4375 }
4376
4377 APInt HighV = High->getValue();
4378 APInt LowV = Low->getValue();
4379
4380 // ConstantRange asserts if the ranges are the same except for the min/max
4381 // value. Leave the cases it tolerates for the empty range error below.
4382 Check(LowV != HighV || LowV.isMaxValue() || LowV.isMinValue(),
4383 "The upper and lower limits cannot be the same value", &I);
4384
4385 ConstantRange CurRange(LowV, HighV);
4386 Check(!CurRange.isEmptySet() &&
4387 (Kind == RangeLikeMetadataKind::AbsoluteSymbol ||
4388 !CurRange.isFullSet()),
4389 "Range must not be empty!", Range);
4390 if (i != 0) {
4391 Check(CurRange.intersectWith(LastRange).isEmptySet(),
4392 "Intervals are overlapping", Range);
4393 Check(LowV.sgt(LastRange.getLower()), "Intervals are not in order",
4394 Range);
4395 Check(!isContiguous(CurRange, LastRange), "Intervals are contiguous",
4396 Range);
4397 }
4398 LastRange = ConstantRange(LowV, HighV);
4399 }
4400 if (NumRanges > 2) {
4401 APInt FirstLow =
4402 mdconst::dyn_extract<ConstantInt>(Range->getOperand(0))->getValue();
4403 APInt FirstHigh =
4404 mdconst::dyn_extract<ConstantInt>(Range->getOperand(1))->getValue();
4405 ConstantRange FirstRange(FirstLow, FirstHigh);
4406 Check(FirstRange.intersectWith(LastRange).isEmptySet(),
4407 "Intervals are overlapping", Range);
4408 Check(!isContiguous(FirstRange, LastRange), "Intervals are contiguous",
4409 Range);
4410 }
4411}
4412
4413void Verifier::visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty) {
4414 assert(Range && Range == I.getMetadata(LLVMContext::MD_range) &&
4415 "precondition violation");
4416 verifyRangeLikeMetadata(I, Range, Ty, RangeLikeMetadataKind::Range);
4417}
4418
4419void Verifier::visitNoaliasAddrspaceMetadata(Instruction &I, MDNode *Range,
4420 Type *Ty) {
4421 assert(Range && Range == I.getMetadata(LLVMContext::MD_noalias_addrspace) &&
4422 "precondition violation");
4423 verifyRangeLikeMetadata(I, Range, Ty,
4424 RangeLikeMetadataKind::NoaliasAddrspace);
4425}
4426
4427void Verifier::checkAtomicMemAccessSize(Type *Ty, const Instruction *I) {
4428 unsigned Size = DL.getTypeSizeInBits(Ty).getFixedValue();
4429 Check(Size >= 8, "atomic memory access' size must be byte-sized", Ty, I);
4430 Check(!(Size & (Size - 1)),
4431 "atomic memory access' operand must have a power-of-two size", Ty, I);
4432}
4433
4434void Verifier::visitLoadInst(LoadInst &LI) {
4436 Check(PTy, "Load operand must be a pointer.", &LI);
4437 Type *ElTy = LI.getType();
4438 if (MaybeAlign A = LI.getAlign()) {
4439 Check(A->value() <= Value::MaximumAlignment,
4440 "huge alignment values are unsupported", &LI);
4441 }
4442 Check(ElTy->isSized(), "loading unsized types is not allowed", &LI);
4443 if (LI.isAtomic()) {
4444 Check(LI.getOrdering() != AtomicOrdering::Release &&
4445 LI.getOrdering() != AtomicOrdering::AcquireRelease,
4446 "Load cannot have Release ordering", &LI);
4447 Check(ElTy->isIntOrPtrTy() || ElTy->isFloatingPointTy(),
4448 "atomic load operand must have integer, pointer, or floating point "
4449 "type!",
4450 ElTy, &LI);
4451 checkAtomicMemAccessSize(ElTy, &LI);
4452 } else {
4454 "Non-atomic load cannot have SynchronizationScope specified", &LI);
4455 }
4456
4457 visitInstruction(LI);
4458}
4459
4460void Verifier::visitStoreInst(StoreInst &SI) {
4461 PointerType *PTy = dyn_cast<PointerType>(SI.getOperand(1)->getType());
4462 Check(PTy, "Store operand must be a pointer.", &SI);
4463 Type *ElTy = SI.getOperand(0)->getType();
4464 if (MaybeAlign A = SI.getAlign()) {
4465 Check(A->value() <= Value::MaximumAlignment,
4466 "huge alignment values are unsupported", &SI);
4467 }
4468 Check(ElTy->isSized(), "storing unsized types is not allowed", &SI);
4469 if (SI.isAtomic()) {
4470 Check(SI.getOrdering() != AtomicOrdering::Acquire &&
4471 SI.getOrdering() != AtomicOrdering::AcquireRelease,
4472 "Store cannot have Acquire ordering", &SI);
4473 Check(ElTy->isIntOrPtrTy() || ElTy->isFloatingPointTy(),
4474 "atomic store operand must have integer, pointer, or floating point "
4475 "type!",
4476 ElTy, &SI);
4477 checkAtomicMemAccessSize(ElTy, &SI);
4478 } else {
4479 Check(SI.getSyncScopeID() == SyncScope::System,
4480 "Non-atomic store cannot have SynchronizationScope specified", &SI);
4481 }
4482 visitInstruction(SI);
4483}
4484
4485/// Check that SwiftErrorVal is used as a swifterror argument in CS.
4486void Verifier::verifySwiftErrorCall(CallBase &Call,
4487 const Value *SwiftErrorVal) {
4488 for (const auto &I : llvm::enumerate(Call.args())) {
4489 if (I.value() == SwiftErrorVal) {
4490 Check(Call.paramHasAttr(I.index(), Attribute::SwiftError),
4491 "swifterror value when used in a callsite should be marked "
4492 "with swifterror attribute",
4493 SwiftErrorVal, Call);
4494 }
4495 }
4496}
4497
4498void Verifier::verifySwiftErrorValue(const Value *SwiftErrorVal) {
4499 // Check that swifterror value is only used by loads, stores, or as
4500 // a swifterror argument.
4501 for (const User *U : SwiftErrorVal->users()) {
4503 isa<InvokeInst>(U),
4504 "swifterror value can only be loaded and stored from, or "
4505 "as a swifterror argument!",
4506 SwiftErrorVal, U);
4507 // If it is used by a store, check it is the second operand.
4508 if (auto StoreI = dyn_cast<StoreInst>(U))
4509 Check(StoreI->getOperand(1) == SwiftErrorVal,
4510 "swifterror value should be the second operand when used "
4511 "by stores",
4512 SwiftErrorVal, U);
4513 if (auto *Call = dyn_cast<CallBase>(U))
4514 verifySwiftErrorCall(*const_cast<CallBase *>(Call), SwiftErrorVal);
4515 }
4516}
4517
4518void Verifier::visitAllocaInst(AllocaInst &AI) {
4519 Type *Ty = AI.getAllocatedType();
4520 SmallPtrSet<Type*, 4> Visited;
4521 Check(Ty->isSized(&Visited), "Cannot allocate unsized type", &AI);
4522 // Check if it's a target extension type that disallows being used on the
4523 // stack.
4525 "Alloca has illegal target extension type", &AI);
4527 "Alloca array size must have integer type", &AI);
4528 if (MaybeAlign A = AI.getAlign()) {
4529 Check(A->value() <= Value::MaximumAlignment,
4530 "huge alignment values are unsupported", &AI);
4531 }
4532
4533 if (AI.isSwiftError()) {
4534 Check(Ty->isPointerTy(), "swifterror alloca must have pointer type", &AI);
4536 "swifterror alloca must not be array allocation", &AI);
4537 verifySwiftErrorValue(&AI);
4538 }
4539
4540 if (TT.isAMDGPU()) {
4542 "alloca on amdgpu must be in addrspace(5)", &AI);
4543 }
4544
4545 visitInstruction(AI);
4546}
4547
4548void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) {
4549 Type *ElTy = CXI.getOperand(1)->getType();
4550 Check(ElTy->isIntOrPtrTy(),
4551 "cmpxchg operand must have integer or pointer type", ElTy, &CXI);
4552 checkAtomicMemAccessSize(ElTy, &CXI);
4553 visitInstruction(CXI);
4554}
4555
4556void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
4557 Check(RMWI.getOrdering() != AtomicOrdering::Unordered,
4558 "atomicrmw instructions cannot be unordered.", &RMWI);
4559 auto Op = RMWI.getOperation();
4560 Type *ElTy = RMWI.getOperand(1)->getType();
4561 if (Op == AtomicRMWInst::Xchg) {
4562 Check(ElTy->isIntegerTy() || ElTy->isFloatingPointTy() ||
4563 ElTy->isPointerTy(),
4564 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4565 " operand must have integer or floating point type!",
4566 &RMWI, ElTy);
4567 } else if (AtomicRMWInst::isFPOperation(Op)) {
4569 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4570 " operand must have floating-point or fixed vector of floating-point "
4571 "type!",
4572 &RMWI, ElTy);
4573 } else {
4574 Check(ElTy->isIntegerTy(),
4575 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4576 " operand must have integer type!",
4577 &RMWI, ElTy);
4578 }
4579 checkAtomicMemAccessSize(ElTy, &RMWI);
4581 "Invalid binary operation!", &RMWI);
4582 visitInstruction(RMWI);
4583}
4584
4585void Verifier::visitFenceInst(FenceInst &FI) {
4586 const AtomicOrdering Ordering = FI.getOrdering();
4587 Check(Ordering == AtomicOrdering::Acquire ||
4588 Ordering == AtomicOrdering::Release ||
4589 Ordering == AtomicOrdering::AcquireRelease ||
4590 Ordering == AtomicOrdering::SequentiallyConsistent,
4591 "fence instructions may only have acquire, release, acq_rel, or "
4592 "seq_cst ordering.",
4593 &FI);
4594 visitInstruction(FI);
4595}
4596
4597void Verifier::visitExtractValueInst(ExtractValueInst &EVI) {
4599 EVI.getIndices()) == EVI.getType(),
4600 "Invalid ExtractValueInst operands!", &EVI);
4601
4602 visitInstruction(EVI);
4603}
4604
4605void Verifier::visitInsertValueInst(InsertValueInst &IVI) {
4607 IVI.getIndices()) ==
4608 IVI.getOperand(1)->getType(),
4609 "Invalid InsertValueInst operands!", &IVI);
4610
4611 visitInstruction(IVI);
4612}
4613
4614static Value *getParentPad(Value *EHPad) {
4615 if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
4616 return FPI->getParentPad();
4617
4618 return cast<CatchSwitchInst>(EHPad)->getParentPad();
4619}
4620
4621void Verifier::visitEHPadPredecessors(Instruction &I) {
4622 assert(I.isEHPad());
4623
4624 BasicBlock *BB = I.getParent();
4625 Function *F = BB->getParent();
4626
4627 Check(BB != &F->getEntryBlock(), "EH pad cannot be in entry block.", &I);
4628
4629 if (auto *LPI = dyn_cast<LandingPadInst>(&I)) {
4630 // The landingpad instruction defines its parent as a landing pad block. The
4631 // landing pad block may be branched to only by the unwind edge of an
4632 // invoke.
4633 for (BasicBlock *PredBB : predecessors(BB)) {
4634 const auto *II = dyn_cast<InvokeInst>(PredBB->getTerminator());
4635 Check(II && II->getUnwindDest() == BB && II->getNormalDest() != BB,
4636 "Block containing LandingPadInst must be jumped to "
4637 "only by the unwind edge of an invoke.",
4638 LPI);
4639 }
4640 return;
4641 }
4642 if (auto *CPI = dyn_cast<CatchPadInst>(&I)) {
4643 if (!pred_empty(BB))
4644 Check(BB->getUniquePredecessor() == CPI->getCatchSwitch()->getParent(),
4645 "Block containg CatchPadInst must be jumped to "
4646 "only by its catchswitch.",
4647 CPI);
4648 Check(BB != CPI->getCatchSwitch()->getUnwindDest(),
4649 "Catchswitch cannot unwind to one of its catchpads",
4650 CPI->getCatchSwitch(), CPI);
4651 return;
4652 }
4653
4654 // Verify that each pred has a legal terminator with a legal to/from EH
4655 // pad relationship.
4656 Instruction *ToPad = &I;
4657 Value *ToPadParent = getParentPad(ToPad);
4658 for (BasicBlock *PredBB : predecessors(BB)) {
4659 Instruction *TI = PredBB->getTerminator();
4660 Value *FromPad;
4661 if (auto *II = dyn_cast<InvokeInst>(TI)) {
4662 Check(II->getUnwindDest() == BB && II->getNormalDest() != BB,
4663 "EH pad must be jumped to via an unwind edge", ToPad, II);
4664 auto *CalledFn =
4665 dyn_cast<Function>(II->getCalledOperand()->stripPointerCasts());
4666 if (CalledFn && CalledFn->isIntrinsic() && II->doesNotThrow() &&
4667 !IntrinsicInst::mayLowerToFunctionCall(CalledFn->getIntrinsicID()))
4668 continue;
4669 if (auto Bundle = II->getOperandBundle(LLVMContext::OB_funclet))
4670 FromPad = Bundle->Inputs[0];
4671 else
4672 FromPad = ConstantTokenNone::get(II->getContext());
4673 } else if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
4674 FromPad = CRI->getOperand(0);
4675 Check(FromPad != ToPadParent, "A cleanupret must exit its cleanup", CRI);
4676 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(TI)) {
4677 FromPad = CSI;
4678 } else {
4679 Check(false, "EH pad must be jumped to via an unwind edge", ToPad, TI);
4680 }
4681
4682 // The edge may exit from zero or more nested pads.
4683 SmallPtrSet<Value *, 8> Seen;
4684 for (;; FromPad = getParentPad(FromPad)) {
4685 Check(FromPad != ToPad,
4686 "EH pad cannot handle exceptions raised within it", FromPad, TI);
4687 if (FromPad == ToPadParent) {
4688 // This is a legal unwind edge.
4689 break;
4690 }
4691 Check(!isa<ConstantTokenNone>(FromPad),
4692 "A single unwind edge may only enter one EH pad", TI);
4693 Check(Seen.insert(FromPad).second, "EH pad jumps through a cycle of pads",
4694 FromPad);
4695
4696 // This will be diagnosed on the corresponding instruction already. We
4697 // need the extra check here to make sure getParentPad() works.
4698 Check(isa<FuncletPadInst>(FromPad) || isa<CatchSwitchInst>(FromPad),
4699 "Parent pad must be catchpad/cleanuppad/catchswitch", TI);
4700 }
4701 }
4702}
4703
4704void Verifier::visitLandingPadInst(LandingPadInst &LPI) {
4705 // The landingpad instruction is ill-formed if it doesn't have any clauses and
4706 // isn't a cleanup.
4707 Check(LPI.getNumClauses() > 0 || LPI.isCleanup(),
4708 "LandingPadInst needs at least one clause or to be a cleanup.", &LPI);
4709
4710 visitEHPadPredecessors(LPI);
4711
4712 if (!LandingPadResultTy)
4713 LandingPadResultTy = LPI.getType();
4714 else
4715 Check(LandingPadResultTy == LPI.getType(),
4716 "The landingpad instruction should have a consistent result type "
4717 "inside a function.",
4718 &LPI);
4719
4720 Function *F = LPI.getParent()->getParent();
4721 Check(F->hasPersonalityFn(),
4722 "LandingPadInst needs to be in a function with a personality.", &LPI);
4723
4724 // The landingpad instruction must be the first non-PHI instruction in the
4725 // block.
4726 Check(LPI.getParent()->getLandingPadInst() == &LPI,
4727 "LandingPadInst not the first non-PHI instruction in the block.", &LPI);
4728
4729 for (unsigned i = 0, e = LPI.getNumClauses(); i < e; ++i) {
4730 Constant *Clause = LPI.getClause(i);
4731 if (LPI.isCatch(i)) {
4732 Check(isa<PointerType>(Clause->getType()),
4733 "Catch operand does not have pointer type!", &LPI);
4734 } else {
4735 Check(LPI.isFilter(i), "Clause is neither catch nor filter!", &LPI);
4737 "Filter operand is not an array of constants!", &LPI);
4738 }
4739 }
4740
4741 visitInstruction(LPI);
4742}
4743
4744void Verifier::visitResumeInst(ResumeInst &RI) {
4746 "ResumeInst needs to be in a function with a personality.", &RI);
4747
4748 if (!LandingPadResultTy)
4749 LandingPadResultTy = RI.getValue()->getType();
4750 else
4751 Check(LandingPadResultTy == RI.getValue()->getType(),
4752 "The resume instruction should have a consistent result type "
4753 "inside a function.",
4754 &RI);
4755
4756 visitTerminator(RI);
4757}
4758
4759void Verifier::visitCatchPadInst(CatchPadInst &CPI) {
4760 BasicBlock *BB = CPI.getParent();
4761
4762 Function *F = BB->getParent();
4763 Check(F->hasPersonalityFn(),
4764 "CatchPadInst needs to be in a function with a personality.", &CPI);
4765
4767 "CatchPadInst needs to be directly nested in a CatchSwitchInst.",
4768 CPI.getParentPad());
4769
4770 // The catchpad instruction must be the first non-PHI instruction in the
4771 // block.
4772 Check(&*BB->getFirstNonPHIIt() == &CPI,
4773 "CatchPadInst not the first non-PHI instruction in the block.", &CPI);
4774
4775 visitEHPadPredecessors(CPI);
4776 visitFuncletPadInst(CPI);
4777}
4778
4779void Verifier::visitCatchReturnInst(CatchReturnInst &CatchReturn) {
4780 Check(isa<CatchPadInst>(CatchReturn.getOperand(0)),
4781 "CatchReturnInst needs to be provided a CatchPad", &CatchReturn,
4782 CatchReturn.getOperand(0));
4783
4784 visitTerminator(CatchReturn);
4785}
4786
4787void Verifier::visitCleanupPadInst(CleanupPadInst &CPI) {
4788 BasicBlock *BB = CPI.getParent();
4789
4790 Function *F = BB->getParent();
4791 Check(F->hasPersonalityFn(),
4792 "CleanupPadInst needs to be in a function with a personality.", &CPI);
4793
4794 // The cleanuppad instruction must be the first non-PHI instruction in the
4795 // block.
4796 Check(&*BB->getFirstNonPHIIt() == &CPI,
4797 "CleanupPadInst not the first non-PHI instruction in the block.", &CPI);
4798
4799 auto *ParentPad = CPI.getParentPad();
4800 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
4801 "CleanupPadInst has an invalid parent.", &CPI);
4802
4803 visitEHPadPredecessors(CPI);
4804 visitFuncletPadInst(CPI);
4805}
4806
4807void Verifier::visitFuncletPadInst(FuncletPadInst &FPI) {
4808 User *FirstUser = nullptr;
4809 Value *FirstUnwindPad = nullptr;
4810 SmallVector<FuncletPadInst *, 8> Worklist({&FPI});
4811 SmallPtrSet<FuncletPadInst *, 8> Seen;
4812
4813 while (!Worklist.empty()) {
4814 FuncletPadInst *CurrentPad = Worklist.pop_back_val();
4815 Check(Seen.insert(CurrentPad).second,
4816 "FuncletPadInst must not be nested within itself", CurrentPad);
4817 Value *UnresolvedAncestorPad = nullptr;
4818 for (User *U : CurrentPad->users()) {
4819 BasicBlock *UnwindDest;
4820 if (auto *CRI = dyn_cast<CleanupReturnInst>(U)) {
4821 UnwindDest = CRI->getUnwindDest();
4822 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(U)) {
4823 // We allow catchswitch unwind to caller to nest
4824 // within an outer pad that unwinds somewhere else,
4825 // because catchswitch doesn't have a nounwind variant.
4826 // See e.g. SimplifyCFGOpt::SimplifyUnreachable.
4827 if (CSI->unwindsToCaller())
4828 continue;
4829 UnwindDest = CSI->getUnwindDest();
4830 } else if (auto *II = dyn_cast<InvokeInst>(U)) {
4831 UnwindDest = II->getUnwindDest();
4832 } else if (isa<CallInst>(U)) {
4833 // Calls which don't unwind may be found inside funclet
4834 // pads that unwind somewhere else. We don't *require*
4835 // such calls to be annotated nounwind.
4836 continue;
4837 } else if (auto *CPI = dyn_cast<CleanupPadInst>(U)) {
4838 // The unwind dest for a cleanup can only be found by
4839 // recursive search. Add it to the worklist, and we'll
4840 // search for its first use that determines where it unwinds.
4841 Worklist.push_back(CPI);
4842 continue;
4843 } else {
4844 Check(isa<CatchReturnInst>(U), "Bogus funclet pad use", U);
4845 continue;
4846 }
4847
4848 Value *UnwindPad;
4849 bool ExitsFPI;
4850 if (UnwindDest) {
4851 UnwindPad = &*UnwindDest->getFirstNonPHIIt();
4852 if (!cast<Instruction>(UnwindPad)->isEHPad())
4853 continue;
4854 Value *UnwindParent = getParentPad(UnwindPad);
4855 // Ignore unwind edges that don't exit CurrentPad.
4856 if (UnwindParent == CurrentPad)
4857 continue;
4858 // Determine whether the original funclet pad is exited,
4859 // and if we are scanning nested pads determine how many
4860 // of them are exited so we can stop searching their
4861 // children.
4862 Value *ExitedPad = CurrentPad;
4863 ExitsFPI = false;
4864 do {
4865 if (ExitedPad == &FPI) {
4866 ExitsFPI = true;
4867 // Now we can resolve any ancestors of CurrentPad up to
4868 // FPI, but not including FPI since we need to make sure
4869 // to check all direct users of FPI for consistency.
4870 UnresolvedAncestorPad = &FPI;
4871 break;
4872 }
4873 Value *ExitedParent = getParentPad(ExitedPad);
4874 if (ExitedParent == UnwindParent) {
4875 // ExitedPad is the ancestor-most pad which this unwind
4876 // edge exits, so we can resolve up to it, meaning that
4877 // ExitedParent is the first ancestor still unresolved.
4878 UnresolvedAncestorPad = ExitedParent;
4879 break;
4880 }
4881 ExitedPad = ExitedParent;
4882 } while (!isa<ConstantTokenNone>(ExitedPad));
4883 } else {
4884 // Unwinding to caller exits all pads.
4885 UnwindPad = ConstantTokenNone::get(FPI.getContext());
4886 ExitsFPI = true;
4887 UnresolvedAncestorPad = &FPI;
4888 }
4889
4890 if (ExitsFPI) {
4891 // This unwind edge exits FPI. Make sure it agrees with other
4892 // such edges.
4893 if (FirstUser) {
4894 Check(UnwindPad == FirstUnwindPad,
4895 "Unwind edges out of a funclet "
4896 "pad must have the same unwind "
4897 "dest",
4898 &FPI, U, FirstUser);
4899 } else {
4900 FirstUser = U;
4901 FirstUnwindPad = UnwindPad;
4902 // Record cleanup sibling unwinds for verifySiblingFuncletUnwinds
4903 if (isa<CleanupPadInst>(&FPI) && !isa<ConstantTokenNone>(UnwindPad) &&
4904 getParentPad(UnwindPad) == getParentPad(&FPI))
4905 SiblingFuncletInfo[&FPI] = cast<Instruction>(U);
4906 }
4907 }
4908 // Make sure we visit all uses of FPI, but for nested pads stop as
4909 // soon as we know where they unwind to.
4910 if (CurrentPad != &FPI)
4911 break;
4912 }
4913 if (UnresolvedAncestorPad) {
4914 if (CurrentPad == UnresolvedAncestorPad) {
4915 // When CurrentPad is FPI itself, we don't mark it as resolved even if
4916 // we've found an unwind edge that exits it, because we need to verify
4917 // all direct uses of FPI.
4918 assert(CurrentPad == &FPI);
4919 continue;
4920 }
4921 // Pop off the worklist any nested pads that we've found an unwind
4922 // destination for. The pads on the worklist are the uncles,
4923 // great-uncles, etc. of CurrentPad. We've found an unwind destination
4924 // for all ancestors of CurrentPad up to but not including
4925 // UnresolvedAncestorPad.
4926 Value *ResolvedPad = CurrentPad;
4927 while (!Worklist.empty()) {
4928 Value *UnclePad = Worklist.back();
4929 Value *AncestorPad = getParentPad(UnclePad);
4930 // Walk ResolvedPad up the ancestor list until we either find the
4931 // uncle's parent or the last resolved ancestor.
4932 while (ResolvedPad != AncestorPad) {
4933 Value *ResolvedParent = getParentPad(ResolvedPad);
4934 if (ResolvedParent == UnresolvedAncestorPad) {
4935 break;
4936 }
4937 ResolvedPad = ResolvedParent;
4938 }
4939 // If the resolved ancestor search didn't find the uncle's parent,
4940 // then the uncle is not yet resolved.
4941 if (ResolvedPad != AncestorPad)
4942 break;
4943 // This uncle is resolved, so pop it from the worklist.
4944 Worklist.pop_back();
4945 }
4946 }
4947 }
4948
4949 if (FirstUnwindPad) {
4950 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(FPI.getParentPad())) {
4951 BasicBlock *SwitchUnwindDest = CatchSwitch->getUnwindDest();
4952 Value *SwitchUnwindPad;
4953 if (SwitchUnwindDest)
4954 SwitchUnwindPad = &*SwitchUnwindDest->getFirstNonPHIIt();
4955 else
4956 SwitchUnwindPad = ConstantTokenNone::get(FPI.getContext());
4957 Check(SwitchUnwindPad == FirstUnwindPad,
4958 "Unwind edges out of a catch must have the same unwind dest as "
4959 "the parent catchswitch",
4960 &FPI, FirstUser, CatchSwitch);
4961 }
4962 }
4963
4964 visitInstruction(FPI);
4965}
4966
4967void Verifier::visitCatchSwitchInst(CatchSwitchInst &CatchSwitch) {
4968 BasicBlock *BB = CatchSwitch.getParent();
4969
4970 Function *F = BB->getParent();
4971 Check(F->hasPersonalityFn(),
4972 "CatchSwitchInst needs to be in a function with a personality.",
4973 &CatchSwitch);
4974
4975 // The catchswitch instruction must be the first non-PHI instruction in the
4976 // block.
4977 Check(&*BB->getFirstNonPHIIt() == &CatchSwitch,
4978 "CatchSwitchInst not the first non-PHI instruction in the block.",
4979 &CatchSwitch);
4980
4981 auto *ParentPad = CatchSwitch.getParentPad();
4982 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
4983 "CatchSwitchInst has an invalid parent.", ParentPad);
4984
4985 if (BasicBlock *UnwindDest = CatchSwitch.getUnwindDest()) {
4986 BasicBlock::iterator I = UnwindDest->getFirstNonPHIIt();
4987 Check(I->isEHPad() && !isa<LandingPadInst>(I),
4988 "CatchSwitchInst must unwind to an EH block which is not a "
4989 "landingpad.",
4990 &CatchSwitch);
4991
4992 // Record catchswitch sibling unwinds for verifySiblingFuncletUnwinds
4993 if (getParentPad(&*I) == ParentPad)
4994 SiblingFuncletInfo[&CatchSwitch] = &CatchSwitch;
4995 }
4996
4997 Check(CatchSwitch.getNumHandlers() != 0,
4998 "CatchSwitchInst cannot have empty handler list", &CatchSwitch);
4999
5000 for (BasicBlock *Handler : CatchSwitch.handlers()) {
5001 Check(isa<CatchPadInst>(Handler->getFirstNonPHIIt()),
5002 "CatchSwitchInst handlers must be catchpads", &CatchSwitch, Handler);
5003 }
5004
5005 visitEHPadPredecessors(CatchSwitch);
5006 visitTerminator(CatchSwitch);
5007}
5008
5009void Verifier::visitCleanupReturnInst(CleanupReturnInst &CRI) {
5011 "CleanupReturnInst needs to be provided a CleanupPad", &CRI,
5012 CRI.getOperand(0));
5013
5014 if (BasicBlock *UnwindDest = CRI.getUnwindDest()) {
5015 BasicBlock::iterator I = UnwindDest->getFirstNonPHIIt();
5016 Check(I->isEHPad() && !isa<LandingPadInst>(I),
5017 "CleanupReturnInst must unwind to an EH block which is not a "
5018 "landingpad.",
5019 &CRI);
5020 }
5021
5022 visitTerminator(CRI);
5023}
5024
5025void Verifier::verifyDominatesUse(Instruction &I, unsigned i) {
5026 Instruction *Op = cast<Instruction>(I.getOperand(i));
5027 // If the we have an invalid invoke, don't try to compute the dominance.
5028 // We already reject it in the invoke specific checks and the dominance
5029 // computation doesn't handle multiple edges.
5030 if (InvokeInst *II = dyn_cast<InvokeInst>(Op)) {
5031 if (II->getNormalDest() == II->getUnwindDest())
5032 return;
5033 }
5034
5035 // Quick check whether the def has already been encountered in the same block.
5036 // PHI nodes are not checked to prevent accepting preceding PHIs, because PHI
5037 // uses are defined to happen on the incoming edge, not at the instruction.
5038 //
5039 // FIXME: If this operand is a MetadataAsValue (wrapping a LocalAsMetadata)
5040 // wrapping an SSA value, assert that we've already encountered it. See
5041 // related FIXME in Mapper::mapLocalAsMetadata in ValueMapper.cpp.
5042 if (!isa<PHINode>(I) && InstsInThisBlock.count(Op))
5043 return;
5044
5045 const Use &U = I.getOperandUse(i);
5046 Check(DT.dominates(Op, U), "Instruction does not dominate all uses!", Op, &I);
5047}
5048
5049void Verifier::visitDereferenceableMetadata(Instruction& I, MDNode* MD) {
5050 Check(I.getType()->isPointerTy(),
5051 "dereferenceable, dereferenceable_or_null "
5052 "apply only to pointer types",
5053 &I);
5055 "dereferenceable, dereferenceable_or_null apply only to load"
5056 " and inttoptr instructions, use attributes for calls or invokes",
5057 &I);
5058 Check(MD->getNumOperands() == 1,
5059 "dereferenceable, dereferenceable_or_null "
5060 "take one operand!",
5061 &I);
5062 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(MD->getOperand(0));
5063 Check(CI && CI->getType()->isIntegerTy(64),
5064 "dereferenceable, "
5065 "dereferenceable_or_null metadata value must be an i64!",
5066 &I);
5067}
5068
5069void Verifier::visitNofreeMetadata(Instruction &I, MDNode *MD) {
5070 Check(I.getType()->isPointerTy(), "nofree applies only to pointer types", &I);
5071 Check((isa<IntToPtrInst>(I)), "nofree applies only to inttoptr instruction",
5072 &I);
5073 Check(MD->getNumOperands() == 0, "nofree metadata must be empty", &I);
5074}
5075
5076void Verifier::visitProfMetadata(Instruction &I, MDNode *MD) {
5077 auto GetBranchingTerminatorNumOperands = [&]() {
5078 unsigned ExpectedNumOperands = 0;
5079 if (BranchInst *BI = dyn_cast<BranchInst>(&I))
5080 ExpectedNumOperands = BI->getNumSuccessors();
5081 else if (SwitchInst *SI = dyn_cast<SwitchInst>(&I))
5082 ExpectedNumOperands = SI->getNumSuccessors();
5083 else if (isa<CallInst>(&I))
5084 ExpectedNumOperands = 1;
5085 else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(&I))
5086 ExpectedNumOperands = IBI->getNumDestinations();
5087 else if (isa<SelectInst>(&I))
5088 ExpectedNumOperands = 2;
5089 else if (CallBrInst *CI = dyn_cast<CallBrInst>(&I))
5090 ExpectedNumOperands = CI->getNumSuccessors();
5091 return ExpectedNumOperands;
5092 };
5093 Check(MD->getNumOperands() >= 1,
5094 "!prof annotations should have at least 1 operand", MD);
5095 // Check first operand.
5096 Check(MD->getOperand(0) != nullptr, "first operand should not be null", MD);
5098 "expected string with name of the !prof annotation", MD);
5099 MDString *MDS = cast<MDString>(MD->getOperand(0));
5100 StringRef ProfName = MDS->getString();
5101
5103 Check(GetBranchingTerminatorNumOperands() != 0 || isa<InvokeInst>(I),
5104 "'unknown' !prof should only appear on instructions on which "
5105 "'branch_weights' would",
5106 MD);
5107 verifyUnknownProfileMetadata(MD);
5108 return;
5109 }
5110
5111 Check(MD->getNumOperands() >= 2,
5112 "!prof annotations should have no less than 2 operands", MD);
5113
5114 // Check consistency of !prof branch_weights metadata.
5115 if (ProfName == MDProfLabels::BranchWeights) {
5116 unsigned NumBranchWeights = getNumBranchWeights(*MD);
5117 if (isa<InvokeInst>(&I)) {
5118 Check(NumBranchWeights == 1 || NumBranchWeights == 2,
5119 "Wrong number of InvokeInst branch_weights operands", MD);
5120 } else {
5121 const unsigned ExpectedNumOperands = GetBranchingTerminatorNumOperands();
5122 if (ExpectedNumOperands == 0)
5123 CheckFailed("!prof branch_weights are not allowed for this instruction",
5124 MD);
5125
5126 Check(NumBranchWeights == ExpectedNumOperands, "Wrong number of operands",
5127 MD);
5128 }
5129 for (unsigned i = getBranchWeightOffset(MD); i < MD->getNumOperands();
5130 ++i) {
5131 auto &MDO = MD->getOperand(i);
5132 Check(MDO, "second operand should not be null", MD);
5134 "!prof brunch_weights operand is not a const int");
5135 }
5136 } else if (ProfName == MDProfLabels::ValueProfile) {
5137 Check(isValueProfileMD(MD), "invalid value profiling metadata", MD);
5138 ConstantInt *KindInt = mdconst::dyn_extract<ConstantInt>(MD->getOperand(1));
5139 Check(KindInt, "VP !prof missing kind argument", MD);
5140
5141 auto Kind = KindInt->getZExtValue();
5142 Check(Kind >= InstrProfValueKind::IPVK_First &&
5143 Kind <= InstrProfValueKind::IPVK_Last,
5144 "Invalid VP !prof kind", MD);
5145 Check(MD->getNumOperands() % 2 == 1,
5146 "VP !prof should have an even number "
5147 "of arguments after 'VP'",
5148 MD);
5149 if (Kind == InstrProfValueKind::IPVK_IndirectCallTarget ||
5150 Kind == InstrProfValueKind::IPVK_MemOPSize)
5152 "VP !prof indirect call or memop size expected to be applied to "
5153 "CallBase instructions only",
5154 MD);
5155 } else {
5156 CheckFailed("expected either branch_weights or VP profile name", MD);
5157 }
5158}
5159
5160void Verifier::visitDIAssignIDMetadata(Instruction &I, MDNode *MD) {
5161 assert(I.hasMetadata(LLVMContext::MD_DIAssignID));
5162 // DIAssignID metadata must be attached to either an alloca or some form of
5163 // store/memory-writing instruction.
5164 // FIXME: We allow all intrinsic insts here to avoid trying to enumerate all
5165 // possible store intrinsics.
5166 bool ExpectedInstTy =
5168 CheckDI(ExpectedInstTy, "!DIAssignID attached to unexpected instruction kind",
5169 I, MD);
5170 // Iterate over the MetadataAsValue uses of the DIAssignID - these should
5171 // only be found as DbgAssignIntrinsic operands.
5172 if (auto *AsValue = MetadataAsValue::getIfExists(Context, MD)) {
5173 for (auto *User : AsValue->users()) {
5175 "!DIAssignID should only be used by llvm.dbg.assign intrinsics",
5176 MD, User);
5177 // All of the dbg.assign intrinsics should be in the same function as I.
5178 if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(User))
5179 CheckDI(DAI->getFunction() == I.getFunction(),
5180 "dbg.assign not in same function as inst", DAI, &I);
5181 }
5182 }
5183 for (DbgVariableRecord *DVR :
5184 cast<DIAssignID>(MD)->getAllDbgVariableRecordUsers()) {
5185 CheckDI(DVR->isDbgAssign(),
5186 "!DIAssignID should only be used by Assign DVRs.", MD, DVR);
5187 CheckDI(DVR->getFunction() == I.getFunction(),
5188 "DVRAssign not in same function as inst", DVR, &I);
5189 }
5190}
5191
5192void Verifier::visitMMRAMetadata(Instruction &I, MDNode *MD) {
5194 "!mmra metadata attached to unexpected instruction kind", I, MD);
5195
5196 // MMRA Metadata should either be a tag, e.g. !{!"foo", !"bar"}, or a
5197 // list of tags such as !2 in the following example:
5198 // !0 = !{!"a", !"b"}
5199 // !1 = !{!"c", !"d"}
5200 // !2 = !{!0, !1}
5201 if (MMRAMetadata::isTagMD(MD))
5202 return;
5203
5204 Check(isa<MDTuple>(MD), "!mmra expected to be a metadata tuple", I, MD);
5205 for (const MDOperand &MDOp : MD->operands())
5206 Check(MMRAMetadata::isTagMD(MDOp.get()),
5207 "!mmra metadata tuple operand is not an MMRA tag", I, MDOp.get());
5208}
5209
5210void Verifier::visitCallStackMetadata(MDNode *MD) {
5211 // Call stack metadata should consist of a list of at least 1 constant int
5212 // (representing a hash of the location).
5213 Check(MD->getNumOperands() >= 1,
5214 "call stack metadata should have at least 1 operand", MD);
5215
5216 for (const auto &Op : MD->operands())
5218 "call stack metadata operand should be constant integer", Op);
5219}
5220
5221void Verifier::visitMemProfMetadata(Instruction &I, MDNode *MD) {
5222 Check(isa<CallBase>(I), "!memprof metadata should only exist on calls", &I);
5223 Check(MD->getNumOperands() >= 1,
5224 "!memprof annotations should have at least 1 metadata operand "
5225 "(MemInfoBlock)",
5226 MD);
5227
5228 // Check each MIB
5229 for (auto &MIBOp : MD->operands()) {
5230 MDNode *MIB = dyn_cast<MDNode>(MIBOp);
5231 // The first operand of an MIB should be the call stack metadata.
5232 // There rest of the operands should be MDString tags, and there should be
5233 // at least one.
5234 Check(MIB->getNumOperands() >= 2,
5235 "Each !memprof MemInfoBlock should have at least 2 operands", MIB);
5236
5237 // Check call stack metadata (first operand).
5238 Check(MIB->getOperand(0) != nullptr,
5239 "!memprof MemInfoBlock first operand should not be null", MIB);
5240 Check(isa<MDNode>(MIB->getOperand(0)),
5241 "!memprof MemInfoBlock first operand should be an MDNode", MIB);
5242 MDNode *StackMD = dyn_cast<MDNode>(MIB->getOperand(0));
5243 visitCallStackMetadata(StackMD);
5244
5245 // The next set of 1 or more operands should be MDString.
5246 unsigned I = 1;
5247 for (; I < MIB->getNumOperands(); ++I) {
5248 if (!isa<MDString>(MIB->getOperand(I))) {
5249 Check(I > 1,
5250 "!memprof MemInfoBlock second operand should be an MDString",
5251 MIB);
5252 break;
5253 }
5254 }
5255
5256 // Any remaining should be MDNode that are pairs of integers
5257 for (; I < MIB->getNumOperands(); ++I) {
5258 MDNode *OpNode = dyn_cast<MDNode>(MIB->getOperand(I));
5259 Check(OpNode, "Not all !memprof MemInfoBlock operands 2 to N are MDNode",
5260 MIB);
5261 Check(OpNode->getNumOperands() == 2,
5262 "Not all !memprof MemInfoBlock operands 2 to N are MDNode with 2 "
5263 "operands",
5264 MIB);
5265 // Check that all of Op's operands are ConstantInt.
5266 Check(llvm::all_of(OpNode->operands(),
5267 [](const MDOperand &Op) {
5268 return mdconst::hasa<ConstantInt>(Op);
5269 }),
5270 "Not all !memprof MemInfoBlock operands 2 to N are MDNode with "
5271 "ConstantInt operands",
5272 MIB);
5273 }
5274 }
5275}
5276
5277void Verifier::visitCallsiteMetadata(Instruction &I, MDNode *MD) {
5278 Check(isa<CallBase>(I), "!callsite metadata should only exist on calls", &I);
5279 // Verify the partial callstack annotated from memprof profiles. This callsite
5280 // is a part of a profiled allocation callstack.
5281 visitCallStackMetadata(MD);
5282}
5283
5284static inline bool isConstantIntMetadataOperand(const Metadata *MD) {
5285 if (auto *VAL = dyn_cast<ValueAsMetadata>(MD))
5286 return isa<ConstantInt>(VAL->getValue());
5287 return false;
5288}
5289
5290void Verifier::visitCalleeTypeMetadata(Instruction &I, MDNode *MD) {
5291 Check(isa<CallBase>(I), "!callee_type metadata should only exist on calls",
5292 &I);
5293 for (Metadata *Op : MD->operands()) {
5295 "The callee_type metadata must be a list of type metadata nodes", Op);
5296 auto *TypeMD = cast<MDNode>(Op);
5297 Check(TypeMD->getNumOperands() == 2,
5298 "Well-formed generalized type metadata must contain exactly two "
5299 "operands",
5300 Op);
5301 Check(isConstantIntMetadataOperand(TypeMD->getOperand(0)) &&
5302 mdconst::extract<ConstantInt>(TypeMD->getOperand(0))->isZero(),
5303 "The first operand of type metadata for functions must be zero", Op);
5304 Check(TypeMD->hasGeneralizedMDString(),
5305 "Only generalized type metadata can be part of the callee_type "
5306 "metadata list",
5307 Op);
5308 }
5309}
5310
5311void Verifier::visitAnnotationMetadata(MDNode *Annotation) {
5312 Check(isa<MDTuple>(Annotation), "annotation must be a tuple");
5313 Check(Annotation->getNumOperands() >= 1,
5314 "annotation must have at least one operand");
5315 for (const MDOperand &Op : Annotation->operands()) {
5316 bool TupleOfStrings =
5317 isa<MDTuple>(Op.get()) &&
5318 all_of(cast<MDTuple>(Op)->operands(), [](auto &Annotation) {
5319 return isa<MDString>(Annotation.get());
5320 });
5321 Check(isa<MDString>(Op.get()) || TupleOfStrings,
5322 "operands must be a string or a tuple of strings");
5323 }
5324}
5325
5326void Verifier::visitAliasScopeMetadata(const MDNode *MD) {
5327 unsigned NumOps = MD->getNumOperands();
5328 Check(NumOps >= 2 && NumOps <= 3, "scope must have two or three operands",
5329 MD);
5330 Check(MD->getOperand(0).get() == MD || isa<MDString>(MD->getOperand(0)),
5331 "first scope operand must be self-referential or string", MD);
5332 if (NumOps == 3)
5334 "third scope operand must be string (if used)", MD);
5335
5336 MDNode *Domain = dyn_cast<MDNode>(MD->getOperand(1));
5337 Check(Domain != nullptr, "second scope operand must be MDNode", MD);
5338
5339 unsigned NumDomainOps = Domain->getNumOperands();
5340 Check(NumDomainOps >= 1 && NumDomainOps <= 2,
5341 "domain must have one or two operands", Domain);
5342 Check(Domain->getOperand(0).get() == Domain ||
5343 isa<MDString>(Domain->getOperand(0)),
5344 "first domain operand must be self-referential or string", Domain);
5345 if (NumDomainOps == 2)
5346 Check(isa<MDString>(Domain->getOperand(1)),
5347 "second domain operand must be string (if used)", Domain);
5348}
5349
5350void Verifier::visitAliasScopeListMetadata(const MDNode *MD) {
5351 for (const MDOperand &Op : MD->operands()) {
5352 const MDNode *OpMD = dyn_cast<MDNode>(Op);
5353 Check(OpMD != nullptr, "scope list must consist of MDNodes", MD);
5354 visitAliasScopeMetadata(OpMD);
5355 }
5356}
5357
5358void Verifier::visitAccessGroupMetadata(const MDNode *MD) {
5359 auto IsValidAccessScope = [](const MDNode *MD) {
5360 return MD->getNumOperands() == 0 && MD->isDistinct();
5361 };
5362
5363 // It must be either an access scope itself...
5364 if (IsValidAccessScope(MD))
5365 return;
5366
5367 // ...or a list of access scopes.
5368 for (const MDOperand &Op : MD->operands()) {
5369 const MDNode *OpMD = dyn_cast<MDNode>(Op);
5370 Check(OpMD != nullptr, "Access scope list must consist of MDNodes", MD);
5371 Check(IsValidAccessScope(OpMD),
5372 "Access scope list contains invalid access scope", MD);
5373 }
5374}
5375
5376/// verifyInstruction - Verify that an instruction is well formed.
5377///
5378void Verifier::visitInstruction(Instruction &I) {
5379 BasicBlock *BB = I.getParent();
5380 Check(BB, "Instruction not embedded in basic block!", &I);
5381
5382 if (!isa<PHINode>(I)) { // Check that non-phi nodes are not self referential
5383 for (User *U : I.users()) {
5384 Check(U != (User *)&I || !DT.isReachableFromEntry(BB),
5385 "Only PHI nodes may reference their own value!", &I);
5386 }
5387 }
5388
5389 // Check that void typed values don't have names
5390 Check(!I.getType()->isVoidTy() || !I.hasName(),
5391 "Instruction has a name, but provides a void value!", &I);
5392
5393 // Check that the return value of the instruction is either void or a legal
5394 // value type.
5395 Check(I.getType()->isVoidTy() || I.getType()->isFirstClassType(),
5396 "Instruction returns a non-scalar type!", &I);
5397
5398 // Check that the instruction doesn't produce metadata. Calls are already
5399 // checked against the callee type.
5400 Check(!I.getType()->isMetadataTy() || isa<CallInst>(I) || isa<InvokeInst>(I),
5401 "Invalid use of metadata!", &I);
5402
5403 // Check that all uses of the instruction, if they are instructions
5404 // themselves, actually have parent basic blocks. If the use is not an
5405 // instruction, it is an error!
5406 for (Use &U : I.uses()) {
5407 if (Instruction *Used = dyn_cast<Instruction>(U.getUser()))
5408 Check(Used->getParent() != nullptr,
5409 "Instruction referencing"
5410 " instruction not embedded in a basic block!",
5411 &I, Used);
5412 else {
5413 CheckFailed("Use of instruction is not an instruction!", U);
5414 return;
5415 }
5416 }
5417
5418 // Get a pointer to the call base of the instruction if it is some form of
5419 // call.
5420 const CallBase *CBI = dyn_cast<CallBase>(&I);
5421
5422 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
5423 Check(I.getOperand(i) != nullptr, "Instruction has null operand!", &I);
5424
5425 // Check to make sure that only first-class-values are operands to
5426 // instructions.
5427 if (!I.getOperand(i)->getType()->isFirstClassType()) {
5428 Check(false, "Instruction operands must be first-class values!", &I);
5429 }
5430
5431 if (Function *F = dyn_cast<Function>(I.getOperand(i))) {
5432 // This code checks whether the function is used as the operand of a
5433 // clang_arc_attachedcall operand bundle.
5434 auto IsAttachedCallOperand = [](Function *F, const CallBase *CBI,
5435 int Idx) {
5436 return CBI && CBI->isOperandBundleOfType(
5438 };
5439
5440 // Check to make sure that the "address of" an intrinsic function is never
5441 // taken. Ignore cases where the address of the intrinsic function is used
5442 // as the argument of operand bundle "clang.arc.attachedcall" as those
5443 // cases are handled in verifyAttachedCallBundle.
5444 Check((!F->isIntrinsic() ||
5445 (CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i)) ||
5446 IsAttachedCallOperand(F, CBI, i)),
5447 "Cannot take the address of an intrinsic!", &I);
5448 Check(!F->isIntrinsic() || isa<CallInst>(I) ||
5449 F->getIntrinsicID() == Intrinsic::donothing ||
5450 F->getIntrinsicID() == Intrinsic::seh_try_begin ||
5451 F->getIntrinsicID() == Intrinsic::seh_try_end ||
5452 F->getIntrinsicID() == Intrinsic::seh_scope_begin ||
5453 F->getIntrinsicID() == Intrinsic::seh_scope_end ||
5454 F->getIntrinsicID() == Intrinsic::coro_resume ||
5455 F->getIntrinsicID() == Intrinsic::coro_destroy ||
5456 F->getIntrinsicID() == Intrinsic::coro_await_suspend_void ||
5457 F->getIntrinsicID() == Intrinsic::coro_await_suspend_bool ||
5458 F->getIntrinsicID() == Intrinsic::coro_await_suspend_handle ||
5459 F->getIntrinsicID() ==
5460 Intrinsic::experimental_patchpoint_void ||
5461 F->getIntrinsicID() == Intrinsic::experimental_patchpoint ||
5462 F->getIntrinsicID() == Intrinsic::fake_use ||
5463 F->getIntrinsicID() == Intrinsic::experimental_gc_statepoint ||
5464 F->getIntrinsicID() == Intrinsic::wasm_throw ||
5465 F->getIntrinsicID() == Intrinsic::wasm_rethrow ||
5466 IsAttachedCallOperand(F, CBI, i),
5467 "Cannot invoke an intrinsic other than donothing, patchpoint, "
5468 "statepoint, coro_resume, coro_destroy, clang.arc.attachedcall or "
5469 "wasm.(re)throw",
5470 &I);
5471 Check(F->getParent() == &M, "Referencing function in another module!", &I,
5472 &M, F, F->getParent());
5473 } else if (BasicBlock *OpBB = dyn_cast<BasicBlock>(I.getOperand(i))) {
5474 Check(OpBB->getParent() == BB->getParent(),
5475 "Referring to a basic block in another function!", &I);
5476 } else if (Argument *OpArg = dyn_cast<Argument>(I.getOperand(i))) {
5477 Check(OpArg->getParent() == BB->getParent(),
5478 "Referring to an argument in another function!", &I);
5479 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(I.getOperand(i))) {
5480 Check(GV->getParent() == &M, "Referencing global in another module!", &I,
5481 &M, GV, GV->getParent());
5482 } else if (Instruction *OpInst = dyn_cast<Instruction>(I.getOperand(i))) {
5483 Check(OpInst->getFunction() == BB->getParent(),
5484 "Referring to an instruction in another function!", &I);
5485 verifyDominatesUse(I, i);
5486 } else if (isa<InlineAsm>(I.getOperand(i))) {
5487 Check(CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i),
5488 "Cannot take the address of an inline asm!", &I);
5489 } else if (auto *CPA = dyn_cast<ConstantPtrAuth>(I.getOperand(i))) {
5490 visitConstantExprsRecursively(CPA);
5491 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(I.getOperand(i))) {
5492 if (CE->getType()->isPtrOrPtrVectorTy()) {
5493 // If we have a ConstantExpr pointer, we need to see if it came from an
5494 // illegal bitcast.
5495 visitConstantExprsRecursively(CE);
5496 }
5497 }
5498 }
5499
5500 if (MDNode *MD = I.getMetadata(LLVMContext::MD_fpmath)) {
5501 Check(I.getType()->isFPOrFPVectorTy(),
5502 "fpmath requires a floating point result!", &I);
5503 Check(MD->getNumOperands() == 1, "fpmath takes one operand!", &I);
5504 if (ConstantFP *CFP0 =
5506 const APFloat &Accuracy = CFP0->getValueAPF();
5507 Check(&Accuracy.getSemantics() == &APFloat::IEEEsingle(),
5508 "fpmath accuracy must have float type", &I);
5509 Check(Accuracy.isFiniteNonZero() && !Accuracy.isNegative(),
5510 "fpmath accuracy not a positive number!", &I);
5511 } else {
5512 Check(false, "invalid fpmath accuracy!", &I);
5513 }
5514 }
5515
5516 if (MDNode *Range = I.getMetadata(LLVMContext::MD_range)) {
5518 "Ranges are only for loads, calls and invokes!", &I);
5519 visitRangeMetadata(I, Range, I.getType());
5520 }
5521
5522 if (MDNode *Range = I.getMetadata(LLVMContext::MD_noalias_addrspace)) {
5525 "noalias.addrspace are only for memory operations!", &I);
5526 visitNoaliasAddrspaceMetadata(I, Range, I.getType());
5527 }
5528
5529 if (I.hasMetadata(LLVMContext::MD_invariant_group)) {
5531 "invariant.group metadata is only for loads and stores", &I);
5532 }
5533
5534 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nonnull)) {
5535 Check(I.getType()->isPointerTy(), "nonnull applies only to pointer types",
5536 &I);
5538 "nonnull applies only to load instructions, use attributes"
5539 " for calls or invokes",
5540 &I);
5541 Check(MD->getNumOperands() == 0, "nonnull metadata must be empty", &I);
5542 }
5543
5544 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable))
5545 visitDereferenceableMetadata(I, MD);
5546
5547 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable_or_null))
5548 visitDereferenceableMetadata(I, MD);
5549
5550 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nofree))
5551 visitNofreeMetadata(I, MD);
5552
5553 if (MDNode *TBAA = I.getMetadata(LLVMContext::MD_tbaa))
5554 TBAAVerifyHelper.visitTBAAMetadata(&I, TBAA);
5555
5556 if (MDNode *MD = I.getMetadata(LLVMContext::MD_noalias))
5557 visitAliasScopeListMetadata(MD);
5558 if (MDNode *MD = I.getMetadata(LLVMContext::MD_alias_scope))
5559 visitAliasScopeListMetadata(MD);
5560
5561 if (MDNode *MD = I.getMetadata(LLVMContext::MD_access_group))
5562 visitAccessGroupMetadata(MD);
5563
5564 if (MDNode *AlignMD = I.getMetadata(LLVMContext::MD_align)) {
5565 Check(I.getType()->isPointerTy(), "align applies only to pointer types",
5566 &I);
5568 "align applies only to load instructions, "
5569 "use attributes for calls or invokes",
5570 &I);
5571 Check(AlignMD->getNumOperands() == 1, "align takes one operand!", &I);
5572 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(AlignMD->getOperand(0));
5573 Check(CI && CI->getType()->isIntegerTy(64),
5574 "align metadata value must be an i64!", &I);
5575 uint64_t Align = CI->getZExtValue();
5576 Check(isPowerOf2_64(Align), "align metadata value must be a power of 2!",
5577 &I);
5578 Check(Align <= Value::MaximumAlignment,
5579 "alignment is larger that implementation defined limit", &I);
5580 }
5581
5582 if (MDNode *MD = I.getMetadata(LLVMContext::MD_prof))
5583 visitProfMetadata(I, MD);
5584
5585 if (MDNode *MD = I.getMetadata(LLVMContext::MD_memprof))
5586 visitMemProfMetadata(I, MD);
5587
5588 if (MDNode *MD = I.getMetadata(LLVMContext::MD_callsite))
5589 visitCallsiteMetadata(I, MD);
5590
5591 if (MDNode *MD = I.getMetadata(LLVMContext::MD_callee_type))
5592 visitCalleeTypeMetadata(I, MD);
5593
5594 if (MDNode *MD = I.getMetadata(LLVMContext::MD_DIAssignID))
5595 visitDIAssignIDMetadata(I, MD);
5596
5597 if (MDNode *MMRA = I.getMetadata(LLVMContext::MD_mmra))
5598 visitMMRAMetadata(I, MMRA);
5599
5600 if (MDNode *Annotation = I.getMetadata(LLVMContext::MD_annotation))
5601 visitAnnotationMetadata(Annotation);
5602
5603 if (MDNode *N = I.getDebugLoc().getAsMDNode()) {
5604 CheckDI(isa<DILocation>(N), "invalid !dbg metadata attachment", &I, N);
5605 visitMDNode(*N, AreDebugLocsAllowed::Yes);
5606
5607 if (auto *DL = dyn_cast<DILocation>(N)) {
5608 if (DL->getAtomGroup()) {
5609 CheckDI(DL->getScope()->getSubprogram()->getKeyInstructionsEnabled(),
5610 "DbgLoc uses atomGroup but DISubprogram doesn't have Key "
5611 "Instructions enabled",
5612 DL, DL->getScope()->getSubprogram());
5613 }
5614 }
5615 }
5616
5618 I.getAllMetadata(MDs);
5619 for (auto Attachment : MDs) {
5620 unsigned Kind = Attachment.first;
5621 auto AllowLocs =
5622 (Kind == LLVMContext::MD_dbg || Kind == LLVMContext::MD_loop)
5623 ? AreDebugLocsAllowed::Yes
5624 : AreDebugLocsAllowed::No;
5625 visitMDNode(*Attachment.second, AllowLocs);
5626 }
5627
5628 InstsInThisBlock.insert(&I);
5629}
5630
5631/// Allow intrinsics to be verified in different ways.
5632void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
5634 Check(IF->isDeclaration(), "Intrinsic functions should never be defined!",
5635 IF);
5636
5637 // Verify that the intrinsic prototype lines up with what the .td files
5638 // describe.
5639 FunctionType *IFTy = IF->getFunctionType();
5640 bool IsVarArg = IFTy->isVarArg();
5641
5645
5646 // Walk the descriptors to extract overloaded types.
5651 "Intrinsic has incorrect return type!", IF);
5653 "Intrinsic has incorrect argument type!", IF);
5654
5655 // Verify if the intrinsic call matches the vararg property.
5656 if (IsVarArg)
5658 "Intrinsic was not defined with variable arguments!", IF);
5659 else
5661 "Callsite was not defined with variable arguments!", IF);
5662
5663 // All descriptors should be absorbed by now.
5664 Check(TableRef.empty(), "Intrinsic has too few arguments!", IF);
5665
5666 // Now that we have the intrinsic ID and the actual argument types (and we
5667 // know they are legal for the intrinsic!) get the intrinsic name through the
5668 // usual means. This allows us to verify the mangling of argument types into
5669 // the name.
5670 const std::string ExpectedName =
5671 Intrinsic::getName(ID, ArgTys, IF->getParent(), IFTy);
5672 Check(ExpectedName == IF->getName(),
5673 "Intrinsic name not mangled correctly for type arguments! "
5674 "Should be: " +
5675 ExpectedName,
5676 IF);
5677
5678 // If the intrinsic takes MDNode arguments, verify that they are either global
5679 // or are local to *this* function.
5680 for (Value *V : Call.args()) {
5681 if (auto *MD = dyn_cast<MetadataAsValue>(V))
5682 visitMetadataAsValue(*MD, Call.getCaller());
5683 if (auto *Const = dyn_cast<Constant>(V))
5684 Check(!Const->getType()->isX86_AMXTy(),
5685 "const x86_amx is not allowed in argument!");
5686 }
5687
5688 switch (ID) {
5689 default:
5690 break;
5691 case Intrinsic::assume: {
5692 if (Call.hasOperandBundles()) {
5694 Check(Cond && Cond->isOne(),
5695 "assume with operand bundles must have i1 true condition", Call);
5696 }
5697 for (auto &Elem : Call.bundle_op_infos()) {
5698 unsigned ArgCount = Elem.End - Elem.Begin;
5699 // Separate storage assumptions are special insofar as they're the only
5700 // operand bundles allowed on assumes that aren't parameter attributes.
5701 if (Elem.Tag->getKey() == "separate_storage") {
5702 Check(ArgCount == 2,
5703 "separate_storage assumptions should have 2 arguments", Call);
5704 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy() &&
5705 Call.getOperand(Elem.Begin + 1)->getType()->isPointerTy(),
5706 "arguments to separate_storage assumptions should be pointers",
5707 Call);
5708 continue;
5709 }
5710 Check(Elem.Tag->getKey() == "ignore" ||
5711 Attribute::isExistingAttribute(Elem.Tag->getKey()),
5712 "tags must be valid attribute names", Call);
5713 Attribute::AttrKind Kind =
5714 Attribute::getAttrKindFromName(Elem.Tag->getKey());
5715 if (Kind == Attribute::Alignment) {
5716 Check(ArgCount <= 3 && ArgCount >= 2,
5717 "alignment assumptions should have 2 or 3 arguments", Call);
5718 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
5719 "first argument should be a pointer", Call);
5720 Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
5721 "second argument should be an integer", Call);
5722 if (ArgCount == 3)
5723 Check(Call.getOperand(Elem.Begin + 2)->getType()->isIntegerTy(),
5724 "third argument should be an integer if present", Call);
5725 continue;
5726 }
5727 if (Kind == Attribute::Dereferenceable) {
5728 Check(ArgCount == 2,
5729 "dereferenceable assumptions should have 2 arguments", Call);
5730 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
5731 "first argument should be a pointer", Call);
5732 Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
5733 "second argument should be an integer", Call);
5734 continue;
5735 }
5736 Check(ArgCount <= 2, "too many arguments", Call);
5737 if (Kind == Attribute::None)
5738 break;
5739 if (Attribute::isIntAttrKind(Kind)) {
5740 Check(ArgCount == 2, "this attribute should have 2 arguments", Call);
5741 Check(isa<ConstantInt>(Call.getOperand(Elem.Begin + 1)),
5742 "the second argument should be a constant integral value", Call);
5743 } else if (Attribute::canUseAsParamAttr(Kind)) {
5744 Check((ArgCount) == 1, "this attribute should have one argument", Call);
5745 } else if (Attribute::canUseAsFnAttr(Kind)) {
5746 Check((ArgCount) == 0, "this attribute has no argument", Call);
5747 }
5748 }
5749 break;
5750 }
5751 case Intrinsic::ucmp:
5752 case Intrinsic::scmp: {
5753 Type *SrcTy = Call.getOperand(0)->getType();
5754 Type *DestTy = Call.getType();
5755
5756 Check(DestTy->getScalarSizeInBits() >= 2,
5757 "result type must be at least 2 bits wide", Call);
5758
5759 bool IsDestTypeVector = DestTy->isVectorTy();
5760 Check(SrcTy->isVectorTy() == IsDestTypeVector,
5761 "ucmp/scmp argument and result types must both be either vector or "
5762 "scalar types",
5763 Call);
5764 if (IsDestTypeVector) {
5765 auto SrcVecLen = cast<VectorType>(SrcTy)->getElementCount();
5766 auto DestVecLen = cast<VectorType>(DestTy)->getElementCount();
5767 Check(SrcVecLen == DestVecLen,
5768 "return type and arguments must have the same number of "
5769 "elements",
5770 Call);
5771 }
5772 break;
5773 }
5774 case Intrinsic::coro_id: {
5775 auto *InfoArg = Call.getArgOperand(3)->stripPointerCasts();
5776 if (isa<ConstantPointerNull>(InfoArg))
5777 break;
5778 auto *GV = dyn_cast<GlobalVariable>(InfoArg);
5779 Check(GV && GV->isConstant() && GV->hasDefinitiveInitializer(),
5780 "info argument of llvm.coro.id must refer to an initialized "
5781 "constant");
5782 Constant *Init = GV->getInitializer();
5784 "info argument of llvm.coro.id must refer to either a struct or "
5785 "an array");
5786 break;
5787 }
5788 case Intrinsic::is_fpclass: {
5789 const ConstantInt *TestMask = cast<ConstantInt>(Call.getOperand(1));
5790 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
5791 "unsupported bits for llvm.is.fpclass test mask");
5792 break;
5793 }
5794 case Intrinsic::fptrunc_round: {
5795 // Check the rounding mode
5796 Metadata *MD = nullptr;
5798 if (MAV)
5799 MD = MAV->getMetadata();
5800
5801 Check(MD != nullptr, "missing rounding mode argument", Call);
5802
5803 Check(isa<MDString>(MD),
5804 ("invalid value for llvm.fptrunc.round metadata operand"
5805 " (the operand should be a string)"),
5806 MD);
5807
5808 std::optional<RoundingMode> RoundMode =
5809 convertStrToRoundingMode(cast<MDString>(MD)->getString());
5810 Check(RoundMode && *RoundMode != RoundingMode::Dynamic,
5811 "unsupported rounding mode argument", Call);
5812 break;
5813 }
5814#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
5815#include "llvm/IR/VPIntrinsics.def"
5816#undef BEGIN_REGISTER_VP_INTRINSIC
5817 visitVPIntrinsic(cast<VPIntrinsic>(Call));
5818 break;
5819#define INSTRUCTION(NAME, NARGS, ROUND_MODE, INTRINSIC) \
5820 case Intrinsic::INTRINSIC:
5821#include "llvm/IR/ConstrainedOps.def"
5822#undef INSTRUCTION
5823 visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(Call));
5824 break;
5825 case Intrinsic::dbg_declare: // llvm.dbg.declare
5826 case Intrinsic::dbg_value: // llvm.dbg.value
5827 case Intrinsic::dbg_assign: // llvm.dbg.assign
5828 case Intrinsic::dbg_label: // llvm.dbg.label
5829 // We no longer interpret debug intrinsics (the old variable-location
5830 // design). They're meaningless as far as LLVM is concerned we could make
5831 // it an error for them to appear, but it's possible we'll have users
5832 // converting back to intrinsics for the forseeable future (such as DXIL),
5833 // so tolerate their existance.
5834 break;
5835 case Intrinsic::memcpy:
5836 case Intrinsic::memcpy_inline:
5837 case Intrinsic::memmove:
5838 case Intrinsic::memset:
5839 case Intrinsic::memset_inline:
5840 break;
5841 case Intrinsic::experimental_memset_pattern: {
5842 const auto Memset = cast<MemSetPatternInst>(&Call);
5843 Check(Memset->getValue()->getType()->isSized(),
5844 "unsized types cannot be used as memset patterns", Call);
5845 break;
5846 }
5847 case Intrinsic::memcpy_element_unordered_atomic:
5848 case Intrinsic::memmove_element_unordered_atomic:
5849 case Intrinsic::memset_element_unordered_atomic: {
5850 const auto *AMI = cast<AnyMemIntrinsic>(&Call);
5851
5852 ConstantInt *ElementSizeCI =
5853 cast<ConstantInt>(AMI->getRawElementSizeInBytes());
5854 const APInt &ElementSizeVal = ElementSizeCI->getValue();
5855 Check(ElementSizeVal.isPowerOf2(),
5856 "element size of the element-wise atomic memory intrinsic "
5857 "must be a power of 2",
5858 Call);
5859
5860 auto IsValidAlignment = [&](MaybeAlign Alignment) {
5861 return Alignment && ElementSizeVal.ule(Alignment->value());
5862 };
5863 Check(IsValidAlignment(AMI->getDestAlign()),
5864 "incorrect alignment of the destination argument", Call);
5865 if (const auto *AMT = dyn_cast<AnyMemTransferInst>(AMI)) {
5866 Check(IsValidAlignment(AMT->getSourceAlign()),
5867 "incorrect alignment of the source argument", Call);
5868 }
5869 break;
5870 }
5871 case Intrinsic::call_preallocated_setup: {
5872 auto *NumArgs = dyn_cast<ConstantInt>(Call.getArgOperand(0));
5873 Check(NumArgs != nullptr,
5874 "llvm.call.preallocated.setup argument must be a constant");
5875 bool FoundCall = false;
5876 for (User *U : Call.users()) {
5877 auto *UseCall = dyn_cast<CallBase>(U);
5878 Check(UseCall != nullptr,
5879 "Uses of llvm.call.preallocated.setup must be calls");
5880 Intrinsic::ID IID = UseCall->getIntrinsicID();
5881 if (IID == Intrinsic::call_preallocated_arg) {
5882 auto *AllocArgIndex = dyn_cast<ConstantInt>(UseCall->getArgOperand(1));
5883 Check(AllocArgIndex != nullptr,
5884 "llvm.call.preallocated.alloc arg index must be a constant");
5885 auto AllocArgIndexInt = AllocArgIndex->getValue();
5886 Check(AllocArgIndexInt.sge(0) &&
5887 AllocArgIndexInt.slt(NumArgs->getValue()),
5888 "llvm.call.preallocated.alloc arg index must be between 0 and "
5889 "corresponding "
5890 "llvm.call.preallocated.setup's argument count");
5891 } else if (IID == Intrinsic::call_preallocated_teardown) {
5892 // nothing to do
5893 } else {
5894 Check(!FoundCall, "Can have at most one call corresponding to a "
5895 "llvm.call.preallocated.setup");
5896 FoundCall = true;
5897 size_t NumPreallocatedArgs = 0;
5898 for (unsigned i = 0; i < UseCall->arg_size(); i++) {
5899 if (UseCall->paramHasAttr(i, Attribute::Preallocated)) {
5900 ++NumPreallocatedArgs;
5901 }
5902 }
5903 Check(NumPreallocatedArgs != 0,
5904 "cannot use preallocated intrinsics on a call without "
5905 "preallocated arguments");
5906 Check(NumArgs->equalsInt(NumPreallocatedArgs),
5907 "llvm.call.preallocated.setup arg size must be equal to number "
5908 "of preallocated arguments "
5909 "at call site",
5910 Call, *UseCall);
5911 // getOperandBundle() cannot be called if more than one of the operand
5912 // bundle exists. There is already a check elsewhere for this, so skip
5913 // here if we see more than one.
5914 if (UseCall->countOperandBundlesOfType(LLVMContext::OB_preallocated) >
5915 1) {
5916 return;
5917 }
5918 auto PreallocatedBundle =
5919 UseCall->getOperandBundle(LLVMContext::OB_preallocated);
5920 Check(PreallocatedBundle,
5921 "Use of llvm.call.preallocated.setup outside intrinsics "
5922 "must be in \"preallocated\" operand bundle");
5923 Check(PreallocatedBundle->Inputs.front().get() == &Call,
5924 "preallocated bundle must have token from corresponding "
5925 "llvm.call.preallocated.setup");
5926 }
5927 }
5928 break;
5929 }
5930 case Intrinsic::call_preallocated_arg: {
5931 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
5932 Check(Token &&
5933 Token->getIntrinsicID() == Intrinsic::call_preallocated_setup,
5934 "llvm.call.preallocated.arg token argument must be a "
5935 "llvm.call.preallocated.setup");
5936 Check(Call.hasFnAttr(Attribute::Preallocated),
5937 "llvm.call.preallocated.arg must be called with a \"preallocated\" "
5938 "call site attribute");
5939 break;
5940 }
5941 case Intrinsic::call_preallocated_teardown: {
5942 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
5943 Check(Token &&
5944 Token->getIntrinsicID() == Intrinsic::call_preallocated_setup,
5945 "llvm.call.preallocated.teardown token argument must be a "
5946 "llvm.call.preallocated.setup");
5947 break;
5948 }
5949 case Intrinsic::gcroot:
5950 case Intrinsic::gcwrite:
5951 case Intrinsic::gcread:
5952 if (ID == Intrinsic::gcroot) {
5953 AllocaInst *AI =
5955 Check(AI, "llvm.gcroot parameter #1 must be an alloca.", Call);
5957 "llvm.gcroot parameter #2 must be a constant.", Call);
5958 if (!AI->getAllocatedType()->isPointerTy()) {
5960 "llvm.gcroot parameter #1 must either be a pointer alloca, "
5961 "or argument #2 must be a non-null constant.",
5962 Call);
5963 }
5964 }
5965
5966 Check(Call.getParent()->getParent()->hasGC(),
5967 "Enclosing function does not use GC.", Call);
5968 break;
5969 case Intrinsic::init_trampoline:
5971 "llvm.init_trampoline parameter #2 must resolve to a function.",
5972 Call);
5973 break;
5974 case Intrinsic::prefetch:
5975 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
5976 "rw argument to llvm.prefetch must be 0-1", Call);
5977 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
5978 "locality argument to llvm.prefetch must be 0-3", Call);
5979 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
5980 "cache type argument to llvm.prefetch must be 0-1", Call);
5981 break;
5982 case Intrinsic::stackprotector:
5984 "llvm.stackprotector parameter #2 must resolve to an alloca.", Call);
5985 break;
5986 case Intrinsic::localescape: {
5987 BasicBlock *BB = Call.getParent();
5988 Check(BB->isEntryBlock(), "llvm.localescape used outside of entry block",
5989 Call);
5990 Check(!SawFrameEscape, "multiple calls to llvm.localescape in one function",
5991 Call);
5992 for (Value *Arg : Call.args()) {
5993 if (isa<ConstantPointerNull>(Arg))
5994 continue; // Null values are allowed as placeholders.
5995 auto *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
5996 Check(AI && AI->isStaticAlloca(),
5997 "llvm.localescape only accepts static allocas", Call);
5998 }
5999 FrameEscapeInfo[BB->getParent()].first = Call.arg_size();
6000 SawFrameEscape = true;
6001 break;
6002 }
6003 case Intrinsic::localrecover: {
6005 Function *Fn = dyn_cast<Function>(FnArg);
6006 Check(Fn && !Fn->isDeclaration(),
6007 "llvm.localrecover first "
6008 "argument must be function defined in this module",
6009 Call);
6010 auto *IdxArg = cast<ConstantInt>(Call.getArgOperand(2));
6011 auto &Entry = FrameEscapeInfo[Fn];
6012 Entry.second = unsigned(
6013 std::max(uint64_t(Entry.second), IdxArg->getLimitedValue(~0U) + 1));
6014 break;
6015 }
6016
6017 case Intrinsic::experimental_gc_statepoint:
6018 if (auto *CI = dyn_cast<CallInst>(&Call))
6019 Check(!CI->isInlineAsm(),
6020 "gc.statepoint support for inline assembly unimplemented", CI);
6021 Check(Call.getParent()->getParent()->hasGC(),
6022 "Enclosing function does not use GC.", Call);
6023
6024 verifyStatepoint(Call);
6025 break;
6026 case Intrinsic::experimental_gc_result: {
6027 Check(Call.getParent()->getParent()->hasGC(),
6028 "Enclosing function does not use GC.", Call);
6029
6030 auto *Statepoint = Call.getArgOperand(0);
6031 if (isa<UndefValue>(Statepoint))
6032 break;
6033
6034 // Are we tied to a statepoint properly?
6035 const auto *StatepointCall = dyn_cast<CallBase>(Statepoint);
6036 Check(StatepointCall && StatepointCall->getIntrinsicID() ==
6037 Intrinsic::experimental_gc_statepoint,
6038 "gc.result operand #1 must be from a statepoint", Call,
6039 Call.getArgOperand(0));
6040
6041 // Check that result type matches wrapped callee.
6042 auto *TargetFuncType =
6043 cast<FunctionType>(StatepointCall->getParamElementType(2));
6044 Check(Call.getType() == TargetFuncType->getReturnType(),
6045 "gc.result result type does not match wrapped callee", Call);
6046 break;
6047 }
6048 case Intrinsic::experimental_gc_relocate: {
6049 Check(Call.arg_size() == 3, "wrong number of arguments", Call);
6050
6052 "gc.relocate must return a pointer or a vector of pointers", Call);
6053
6054 // Check that this relocate is correctly tied to the statepoint
6055
6056 // This is case for relocate on the unwinding path of an invoke statepoint
6057 if (LandingPadInst *LandingPad =
6059
6060 const BasicBlock *InvokeBB =
6061 LandingPad->getParent()->getUniquePredecessor();
6062
6063 // Landingpad relocates should have only one predecessor with invoke
6064 // statepoint terminator
6065 Check(InvokeBB, "safepoints should have unique landingpads",
6066 LandingPad->getParent());
6067 Check(InvokeBB->getTerminator(), "safepoint block should be well formed",
6068 InvokeBB);
6070 "gc relocate should be linked to a statepoint", InvokeBB);
6071 } else {
6072 // In all other cases relocate should be tied to the statepoint directly.
6073 // This covers relocates on a normal return path of invoke statepoint and
6074 // relocates of a call statepoint.
6075 auto *Token = Call.getArgOperand(0);
6077 "gc relocate is incorrectly tied to the statepoint", Call, Token);
6078 }
6079
6080 // Verify rest of the relocate arguments.
6081 const Value &StatepointCall = *cast<GCRelocateInst>(Call).getStatepoint();
6082
6083 // Both the base and derived must be piped through the safepoint.
6086 "gc.relocate operand #2 must be integer offset", Call);
6087
6088 Value *Derived = Call.getArgOperand(2);
6089 Check(isa<ConstantInt>(Derived),
6090 "gc.relocate operand #3 must be integer offset", Call);
6091
6092 const uint64_t BaseIndex = cast<ConstantInt>(Base)->getZExtValue();
6093 const uint64_t DerivedIndex = cast<ConstantInt>(Derived)->getZExtValue();
6094
6095 // Check the bounds
6096 if (isa<UndefValue>(StatepointCall))
6097 break;
6098 if (auto Opt = cast<GCStatepointInst>(StatepointCall)
6099 .getOperandBundle(LLVMContext::OB_gc_live)) {
6100 Check(BaseIndex < Opt->Inputs.size(),
6101 "gc.relocate: statepoint base index out of bounds", Call);
6102 Check(DerivedIndex < Opt->Inputs.size(),
6103 "gc.relocate: statepoint derived index out of bounds", Call);
6104 }
6105
6106 // Relocated value must be either a pointer type or vector-of-pointer type,
6107 // but gc_relocate does not need to return the same pointer type as the
6108 // relocated pointer. It can be casted to the correct type later if it's
6109 // desired. However, they must have the same address space and 'vectorness'
6110 GCRelocateInst &Relocate = cast<GCRelocateInst>(Call);
6111 auto *ResultType = Call.getType();
6112 auto *DerivedType = Relocate.getDerivedPtr()->getType();
6113 auto *BaseType = Relocate.getBasePtr()->getType();
6114
6115 Check(BaseType->isPtrOrPtrVectorTy(),
6116 "gc.relocate: relocated value must be a pointer", Call);
6117 Check(DerivedType->isPtrOrPtrVectorTy(),
6118 "gc.relocate: relocated value must be a pointer", Call);
6119
6120 Check(ResultType->isVectorTy() == DerivedType->isVectorTy(),
6121 "gc.relocate: vector relocates to vector and pointer to pointer",
6122 Call);
6123 Check(
6124 ResultType->getPointerAddressSpace() ==
6125 DerivedType->getPointerAddressSpace(),
6126 "gc.relocate: relocating a pointer shouldn't change its address space",
6127 Call);
6128
6129 auto GC = llvm::getGCStrategy(Relocate.getFunction()->getGC());
6130 Check(GC, "gc.relocate: calling function must have GCStrategy",
6131 Call.getFunction());
6132 if (GC) {
6133 auto isGCPtr = [&GC](Type *PTy) {
6134 return GC->isGCManagedPointer(PTy->getScalarType()).value_or(true);
6135 };
6136 Check(isGCPtr(ResultType), "gc.relocate: must return gc pointer", Call);
6137 Check(isGCPtr(BaseType),
6138 "gc.relocate: relocated value must be a gc pointer", Call);
6139 Check(isGCPtr(DerivedType),
6140 "gc.relocate: relocated value must be a gc pointer", Call);
6141 }
6142 break;
6143 }
6144 case Intrinsic::experimental_patchpoint: {
6145 if (Call.getCallingConv() == CallingConv::AnyReg) {
6147 "patchpoint: invalid return type used with anyregcc", Call);
6148 }
6149 break;
6150 }
6151 case Intrinsic::eh_exceptioncode:
6152 case Intrinsic::eh_exceptionpointer: {
6154 "eh.exceptionpointer argument must be a catchpad", Call);
6155 break;
6156 }
6157 case Intrinsic::get_active_lane_mask: {
6159 "get_active_lane_mask: must return a "
6160 "vector",
6161 Call);
6162 auto *ElemTy = Call.getType()->getScalarType();
6163 Check(ElemTy->isIntegerTy(1),
6164 "get_active_lane_mask: element type is not "
6165 "i1",
6166 Call);
6167 break;
6168 }
6169 case Intrinsic::experimental_get_vector_length: {
6170 ConstantInt *VF = cast<ConstantInt>(Call.getArgOperand(1));
6171 Check(!VF->isNegative() && !VF->isZero(),
6172 "get_vector_length: VF must be positive", Call);
6173 break;
6174 }
6175 case Intrinsic::masked_load: {
6176 Check(Call.getType()->isVectorTy(), "masked_load: must return a vector",
6177 Call);
6178
6179 ConstantInt *Alignment = cast<ConstantInt>(Call.getArgOperand(1));
6181 Value *PassThru = Call.getArgOperand(3);
6182 Check(Mask->getType()->isVectorTy(), "masked_load: mask must be vector",
6183 Call);
6184 Check(Alignment->getValue().isPowerOf2(),
6185 "masked_load: alignment must be a power of 2", Call);
6186 Check(PassThru->getType() == Call.getType(),
6187 "masked_load: pass through and return type must match", Call);
6188 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
6189 cast<VectorType>(Call.getType())->getElementCount(),
6190 "masked_load: vector mask must be same length as return", Call);
6191 break;
6192 }
6193 case Intrinsic::masked_store: {
6194 Value *Val = Call.getArgOperand(0);
6195 ConstantInt *Alignment = cast<ConstantInt>(Call.getArgOperand(2));
6197 Check(Mask->getType()->isVectorTy(), "masked_store: mask must be vector",
6198 Call);
6199 Check(Alignment->getValue().isPowerOf2(),
6200 "masked_store: alignment must be a power of 2", Call);
6201 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
6202 cast<VectorType>(Val->getType())->getElementCount(),
6203 "masked_store: vector mask must be same length as value", Call);
6204 break;
6205 }
6206
6207 case Intrinsic::masked_gather: {
6208 const APInt &Alignment =
6210 Check(Alignment.isZero() || Alignment.isPowerOf2(),
6211 "masked_gather: alignment must be 0 or a power of 2", Call);
6212 break;
6213 }
6214 case Intrinsic::masked_scatter: {
6215 const APInt &Alignment =
6216 cast<ConstantInt>(Call.getArgOperand(2))->getValue();
6217 Check(Alignment.isZero() || Alignment.isPowerOf2(),
6218 "masked_scatter: alignment must be 0 or a power of 2", Call);
6219 break;
6220 }
6221
6222 case Intrinsic::experimental_guard: {
6223 Check(isa<CallInst>(Call), "experimental_guard cannot be invoked", Call);
6225 "experimental_guard must have exactly one "
6226 "\"deopt\" operand bundle");
6227 break;
6228 }
6229
6230 case Intrinsic::experimental_deoptimize: {
6231 Check(isa<CallInst>(Call), "experimental_deoptimize cannot be invoked",
6232 Call);
6234 "experimental_deoptimize must have exactly one "
6235 "\"deopt\" operand bundle");
6237 "experimental_deoptimize return type must match caller return type");
6238
6239 if (isa<CallInst>(Call)) {
6241 Check(RI,
6242 "calls to experimental_deoptimize must be followed by a return");
6243
6244 if (!Call.getType()->isVoidTy() && RI)
6245 Check(RI->getReturnValue() == &Call,
6246 "calls to experimental_deoptimize must be followed by a return "
6247 "of the value computed by experimental_deoptimize");
6248 }
6249
6250 break;
6251 }
6252 case Intrinsic::vastart: {
6254 "va_start called in a non-varargs function");
6255 break;
6256 }
6257 case Intrinsic::get_dynamic_area_offset: {
6258 auto *IntTy = dyn_cast<IntegerType>(Call.getType());
6259 Check(IntTy && DL.getPointerSizeInBits(DL.getAllocaAddrSpace()) ==
6260 IntTy->getBitWidth(),
6261 "get_dynamic_area_offset result type must be scalar integer matching "
6262 "alloca address space width",
6263 Call);
6264 break;
6265 }
6266 case Intrinsic::vector_reduce_and:
6267 case Intrinsic::vector_reduce_or:
6268 case Intrinsic::vector_reduce_xor:
6269 case Intrinsic::vector_reduce_add:
6270 case Intrinsic::vector_reduce_mul:
6271 case Intrinsic::vector_reduce_smax:
6272 case Intrinsic::vector_reduce_smin:
6273 case Intrinsic::vector_reduce_umax:
6274 case Intrinsic::vector_reduce_umin: {
6275 Type *ArgTy = Call.getArgOperand(0)->getType();
6276 Check(ArgTy->isIntOrIntVectorTy() && ArgTy->isVectorTy(),
6277 "Intrinsic has incorrect argument type!");
6278 break;
6279 }
6280 case Intrinsic::vector_reduce_fmax:
6281 case Intrinsic::vector_reduce_fmin: {
6282 Type *ArgTy = Call.getArgOperand(0)->getType();
6283 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
6284 "Intrinsic has incorrect argument type!");
6285 break;
6286 }
6287 case Intrinsic::vector_reduce_fadd:
6288 case Intrinsic::vector_reduce_fmul: {
6289 // Unlike the other reductions, the first argument is a start value. The
6290 // second argument is the vector to be reduced.
6291 Type *ArgTy = Call.getArgOperand(1)->getType();
6292 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
6293 "Intrinsic has incorrect argument type!");
6294 break;
6295 }
6296 case Intrinsic::smul_fix:
6297 case Intrinsic::smul_fix_sat:
6298 case Intrinsic::umul_fix:
6299 case Intrinsic::umul_fix_sat:
6300 case Intrinsic::sdiv_fix:
6301 case Intrinsic::sdiv_fix_sat:
6302 case Intrinsic::udiv_fix:
6303 case Intrinsic::udiv_fix_sat: {
6304 Value *Op1 = Call.getArgOperand(0);
6305 Value *Op2 = Call.getArgOperand(1);
6307 "first operand of [us][mul|div]_fix[_sat] must be an int type or "
6308 "vector of ints");
6310 "second operand of [us][mul|div]_fix[_sat] must be an int type or "
6311 "vector of ints");
6312
6313 auto *Op3 = cast<ConstantInt>(Call.getArgOperand(2));
6314 Check(Op3->getType()->isIntegerTy(),
6315 "third operand of [us][mul|div]_fix[_sat] must be an int type");
6316 Check(Op3->getBitWidth() <= 32,
6317 "third operand of [us][mul|div]_fix[_sat] must fit within 32 bits");
6318
6319 if (ID == Intrinsic::smul_fix || ID == Intrinsic::smul_fix_sat ||
6320 ID == Intrinsic::sdiv_fix || ID == Intrinsic::sdiv_fix_sat) {
6321 Check(Op3->getZExtValue() < Op1->getType()->getScalarSizeInBits(),
6322 "the scale of s[mul|div]_fix[_sat] must be less than the width of "
6323 "the operands");
6324 } else {
6325 Check(Op3->getZExtValue() <= Op1->getType()->getScalarSizeInBits(),
6326 "the scale of u[mul|div]_fix[_sat] must be less than or equal "
6327 "to the width of the operands");
6328 }
6329 break;
6330 }
6331 case Intrinsic::lrint:
6332 case Intrinsic::llrint:
6333 case Intrinsic::lround:
6334 case Intrinsic::llround: {
6335 Type *ValTy = Call.getArgOperand(0)->getType();
6336 Type *ResultTy = Call.getType();
6337 auto *VTy = dyn_cast<VectorType>(ValTy);
6338 auto *RTy = dyn_cast<VectorType>(ResultTy);
6339 Check(ValTy->isFPOrFPVectorTy() && ResultTy->isIntOrIntVectorTy(),
6340 ExpectedName + ": argument must be floating-point or vector "
6341 "of floating-points, and result must be integer or "
6342 "vector of integers",
6343 &Call);
6344 Check(ValTy->isVectorTy() == ResultTy->isVectorTy(),
6345 ExpectedName + ": argument and result disagree on vector use", &Call);
6346 if (VTy) {
6347 Check(VTy->getElementCount() == RTy->getElementCount(),
6348 ExpectedName + ": argument must be same length as result", &Call);
6349 }
6350 break;
6351 }
6352 case Intrinsic::bswap: {
6353 Type *Ty = Call.getType();
6354 unsigned Size = Ty->getScalarSizeInBits();
6355 Check(Size % 16 == 0, "bswap must be an even number of bytes", &Call);
6356 break;
6357 }
6358 case Intrinsic::invariant_start: {
6359 ConstantInt *InvariantSize = dyn_cast<ConstantInt>(Call.getArgOperand(0));
6360 Check(InvariantSize &&
6361 (!InvariantSize->isNegative() || InvariantSize->isMinusOne()),
6362 "invariant_start parameter must be -1, 0 or a positive number",
6363 &Call);
6364 break;
6365 }
6366 case Intrinsic::matrix_multiply:
6367 case Intrinsic::matrix_transpose:
6368 case Intrinsic::matrix_column_major_load:
6369 case Intrinsic::matrix_column_major_store: {
6371 ConstantInt *Stride = nullptr;
6372 ConstantInt *NumRows;
6373 ConstantInt *NumColumns;
6374 VectorType *ResultTy;
6375 Type *Op0ElemTy = nullptr;
6376 Type *Op1ElemTy = nullptr;
6377 switch (ID) {
6378 case Intrinsic::matrix_multiply: {
6379 NumRows = cast<ConstantInt>(Call.getArgOperand(2));
6380 ConstantInt *N = cast<ConstantInt>(Call.getArgOperand(3));
6381 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
6383 ->getNumElements() ==
6384 NumRows->getZExtValue() * N->getZExtValue(),
6385 "First argument of a matrix operation does not match specified "
6386 "shape!");
6388 ->getNumElements() ==
6389 N->getZExtValue() * NumColumns->getZExtValue(),
6390 "Second argument of a matrix operation does not match specified "
6391 "shape!");
6392
6393 ResultTy = cast<VectorType>(Call.getType());
6394 Op0ElemTy =
6395 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6396 Op1ElemTy =
6397 cast<VectorType>(Call.getArgOperand(1)->getType())->getElementType();
6398 break;
6399 }
6400 case Intrinsic::matrix_transpose:
6401 NumRows = cast<ConstantInt>(Call.getArgOperand(1));
6402 NumColumns = cast<ConstantInt>(Call.getArgOperand(2));
6403 ResultTy = cast<VectorType>(Call.getType());
6404 Op0ElemTy =
6405 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6406 break;
6407 case Intrinsic::matrix_column_major_load: {
6409 NumRows = cast<ConstantInt>(Call.getArgOperand(3));
6410 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
6411 ResultTy = cast<VectorType>(Call.getType());
6412 break;
6413 }
6414 case Intrinsic::matrix_column_major_store: {
6416 NumRows = cast<ConstantInt>(Call.getArgOperand(4));
6417 NumColumns = cast<ConstantInt>(Call.getArgOperand(5));
6418 ResultTy = cast<VectorType>(Call.getArgOperand(0)->getType());
6419 Op0ElemTy =
6420 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6421 break;
6422 }
6423 default:
6424 llvm_unreachable("unexpected intrinsic");
6425 }
6426
6427 Check(ResultTy->getElementType()->isIntegerTy() ||
6428 ResultTy->getElementType()->isFloatingPointTy(),
6429 "Result type must be an integer or floating-point type!", IF);
6430
6431 if (Op0ElemTy)
6432 Check(ResultTy->getElementType() == Op0ElemTy,
6433 "Vector element type mismatch of the result and first operand "
6434 "vector!",
6435 IF);
6436
6437 if (Op1ElemTy)
6438 Check(ResultTy->getElementType() == Op1ElemTy,
6439 "Vector element type mismatch of the result and second operand "
6440 "vector!",
6441 IF);
6442
6444 NumRows->getZExtValue() * NumColumns->getZExtValue(),
6445 "Result of a matrix operation does not fit in the returned vector!");
6446
6447 if (Stride)
6448 Check(Stride->getZExtValue() >= NumRows->getZExtValue(),
6449 "Stride must be greater or equal than the number of rows!", IF);
6450
6451 break;
6452 }
6453 case Intrinsic::vector_splice: {
6455 int64_t Idx = cast<ConstantInt>(Call.getArgOperand(2))->getSExtValue();
6456 int64_t KnownMinNumElements = VecTy->getElementCount().getKnownMinValue();
6457 if (Call.getParent() && Call.getParent()->getParent()) {
6458 AttributeList Attrs = Call.getParent()->getParent()->getAttributes();
6459 if (Attrs.hasFnAttr(Attribute::VScaleRange))
6460 KnownMinNumElements *= Attrs.getFnAttrs().getVScaleRangeMin();
6461 }
6462 Check((Idx < 0 && std::abs(Idx) <= KnownMinNumElements) ||
6463 (Idx >= 0 && Idx < KnownMinNumElements),
6464 "The splice index exceeds the range [-VL, VL-1] where VL is the "
6465 "known minimum number of elements in the vector. For scalable "
6466 "vectors the minimum number of elements is determined from "
6467 "vscale_range.",
6468 &Call);
6469 break;
6470 }
6471 case Intrinsic::stepvector: {
6473 Check(VecTy && VecTy->getScalarType()->isIntegerTy() &&
6474 VecTy->getScalarSizeInBits() >= 8,
6475 "stepvector only supported for vectors of integers "
6476 "with a bitwidth of at least 8.",
6477 &Call);
6478 break;
6479 }
6480 case Intrinsic::experimental_vector_match: {
6481 Value *Op1 = Call.getArgOperand(0);
6482 Value *Op2 = Call.getArgOperand(1);
6484
6485 VectorType *Op1Ty = dyn_cast<VectorType>(Op1->getType());
6486 VectorType *Op2Ty = dyn_cast<VectorType>(Op2->getType());
6487 VectorType *MaskTy = dyn_cast<VectorType>(Mask->getType());
6488
6489 Check(Op1Ty && Op2Ty && MaskTy, "Operands must be vectors.", &Call);
6491 "Second operand must be a fixed length vector.", &Call);
6492 Check(Op1Ty->getElementType()->isIntegerTy(),
6493 "First operand must be a vector of integers.", &Call);
6494 Check(Op1Ty->getElementType() == Op2Ty->getElementType(),
6495 "First two operands must have the same element type.", &Call);
6496 Check(Op1Ty->getElementCount() == MaskTy->getElementCount(),
6497 "First operand and mask must have the same number of elements.",
6498 &Call);
6499 Check(MaskTy->getElementType()->isIntegerTy(1),
6500 "Mask must be a vector of i1's.", &Call);
6501 Check(Call.getType() == MaskTy, "Return type must match the mask type.",
6502 &Call);
6503 break;
6504 }
6505 case Intrinsic::vector_insert: {
6506 Value *Vec = Call.getArgOperand(0);
6507 Value *SubVec = Call.getArgOperand(1);
6508 Value *Idx = Call.getArgOperand(2);
6509 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6510
6511 VectorType *VecTy = cast<VectorType>(Vec->getType());
6512 VectorType *SubVecTy = cast<VectorType>(SubVec->getType());
6513
6514 ElementCount VecEC = VecTy->getElementCount();
6515 ElementCount SubVecEC = SubVecTy->getElementCount();
6516 Check(VecTy->getElementType() == SubVecTy->getElementType(),
6517 "vector_insert parameters must have the same element "
6518 "type.",
6519 &Call);
6520 Check(IdxN % SubVecEC.getKnownMinValue() == 0,
6521 "vector_insert index must be a constant multiple of "
6522 "the subvector's known minimum vector length.");
6523
6524 // If this insertion is not the 'mixed' case where a fixed vector is
6525 // inserted into a scalable vector, ensure that the insertion of the
6526 // subvector does not overrun the parent vector.
6527 if (VecEC.isScalable() == SubVecEC.isScalable()) {
6528 Check(IdxN < VecEC.getKnownMinValue() &&
6529 IdxN + SubVecEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6530 "subvector operand of vector_insert would overrun the "
6531 "vector being inserted into.");
6532 }
6533 break;
6534 }
6535 case Intrinsic::vector_extract: {
6536 Value *Vec = Call.getArgOperand(0);
6537 Value *Idx = Call.getArgOperand(1);
6538 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6539
6540 VectorType *ResultTy = cast<VectorType>(Call.getType());
6541 VectorType *VecTy = cast<VectorType>(Vec->getType());
6542
6543 ElementCount VecEC = VecTy->getElementCount();
6544 ElementCount ResultEC = ResultTy->getElementCount();
6545
6546 Check(ResultTy->getElementType() == VecTy->getElementType(),
6547 "vector_extract result must have the same element "
6548 "type as the input vector.",
6549 &Call);
6550 Check(IdxN % ResultEC.getKnownMinValue() == 0,
6551 "vector_extract index must be a constant multiple of "
6552 "the result type's known minimum vector length.");
6553
6554 // If this extraction is not the 'mixed' case where a fixed vector is
6555 // extracted from a scalable vector, ensure that the extraction does not
6556 // overrun the parent vector.
6557 if (VecEC.isScalable() == ResultEC.isScalable()) {
6558 Check(IdxN < VecEC.getKnownMinValue() &&
6559 IdxN + ResultEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6560 "vector_extract would overrun.");
6561 }
6562 break;
6563 }
6564 case Intrinsic::vector_partial_reduce_add: {
6567
6568 unsigned VecWidth = VecTy->getElementCount().getKnownMinValue();
6569 unsigned AccWidth = AccTy->getElementCount().getKnownMinValue();
6570
6571 Check((VecWidth % AccWidth) == 0,
6572 "Invalid vector widths for partial "
6573 "reduction. The width of the input vector "
6574 "must be a positive integer multiple of "
6575 "the width of the accumulator vector.");
6576 break;
6577 }
6578 case Intrinsic::experimental_noalias_scope_decl: {
6579 NoAliasScopeDecls.push_back(cast<IntrinsicInst>(&Call));
6580 break;
6581 }
6582 case Intrinsic::preserve_array_access_index:
6583 case Intrinsic::preserve_struct_access_index:
6584 case Intrinsic::aarch64_ldaxr:
6585 case Intrinsic::aarch64_ldxr:
6586 case Intrinsic::arm_ldaex:
6587 case Intrinsic::arm_ldrex: {
6588 Type *ElemTy = Call.getParamElementType(0);
6589 Check(ElemTy, "Intrinsic requires elementtype attribute on first argument.",
6590 &Call);
6591 break;
6592 }
6593 case Intrinsic::aarch64_stlxr:
6594 case Intrinsic::aarch64_stxr:
6595 case Intrinsic::arm_stlex:
6596 case Intrinsic::arm_strex: {
6597 Type *ElemTy = Call.getAttributes().getParamElementType(1);
6598 Check(ElemTy,
6599 "Intrinsic requires elementtype attribute on second argument.",
6600 &Call);
6601 break;
6602 }
6603 case Intrinsic::aarch64_prefetch: {
6604 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6605 "write argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6606 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
6607 "target argument to llvm.aarch64.prefetch must be 0-3", Call);
6608 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
6609 "stream argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6610 Check(cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue() < 2,
6611 "isdata argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6612 break;
6613 }
6614 case Intrinsic::callbr_landingpad: {
6615 const auto *CBR = dyn_cast<CallBrInst>(Call.getOperand(0));
6616 Check(CBR, "intrinstic requires callbr operand", &Call);
6617 if (!CBR)
6618 break;
6619
6620 const BasicBlock *LandingPadBB = Call.getParent();
6621 const BasicBlock *PredBB = LandingPadBB->getUniquePredecessor();
6622 if (!PredBB) {
6623 CheckFailed("Intrinsic in block must have 1 unique predecessor", &Call);
6624 break;
6625 }
6626 if (!isa<CallBrInst>(PredBB->getTerminator())) {
6627 CheckFailed("Intrinsic must have corresponding callbr in predecessor",
6628 &Call);
6629 break;
6630 }
6631 Check(llvm::is_contained(CBR->getIndirectDests(), LandingPadBB),
6632 "Intrinsic's corresponding callbr must have intrinsic's parent basic "
6633 "block in indirect destination list",
6634 &Call);
6635 const Instruction &First = *LandingPadBB->begin();
6636 Check(&First == &Call, "No other instructions may proceed intrinsic",
6637 &Call);
6638 break;
6639 }
6640 case Intrinsic::amdgcn_cs_chain: {
6641 auto CallerCC = Call.getCaller()->getCallingConv();
6642 switch (CallerCC) {
6643 case CallingConv::AMDGPU_CS:
6644 case CallingConv::AMDGPU_CS_Chain:
6645 case CallingConv::AMDGPU_CS_ChainPreserve:
6646 break;
6647 default:
6648 CheckFailed("Intrinsic can only be used from functions with the "
6649 "amdgpu_cs, amdgpu_cs_chain or amdgpu_cs_chain_preserve "
6650 "calling conventions",
6651 &Call);
6652 break;
6653 }
6654
6655 Check(Call.paramHasAttr(2, Attribute::InReg),
6656 "SGPR arguments must have the `inreg` attribute", &Call);
6657 Check(!Call.paramHasAttr(3, Attribute::InReg),
6658 "VGPR arguments must not have the `inreg` attribute", &Call);
6659
6660 auto *Next = Call.getNextNode();
6661 bool IsAMDUnreachable = Next && isa<IntrinsicInst>(Next) &&
6662 cast<IntrinsicInst>(Next)->getIntrinsicID() ==
6663 Intrinsic::amdgcn_unreachable;
6664 Check(Next && (isa<UnreachableInst>(Next) || IsAMDUnreachable),
6665 "llvm.amdgcn.cs.chain must be followed by unreachable", &Call);
6666 break;
6667 }
6668 case Intrinsic::amdgcn_init_exec_from_input: {
6669 const Argument *Arg = dyn_cast<Argument>(Call.getOperand(0));
6670 Check(Arg && Arg->hasInRegAttr(),
6671 "only inreg arguments to the parent function are valid as inputs to "
6672 "this intrinsic",
6673 &Call);
6674 break;
6675 }
6676 case Intrinsic::amdgcn_set_inactive_chain_arg: {
6677 auto CallerCC = Call.getCaller()->getCallingConv();
6678 switch (CallerCC) {
6679 case CallingConv::AMDGPU_CS_Chain:
6680 case CallingConv::AMDGPU_CS_ChainPreserve:
6681 break;
6682 default:
6683 CheckFailed("Intrinsic can only be used from functions with the "
6684 "amdgpu_cs_chain or amdgpu_cs_chain_preserve "
6685 "calling conventions",
6686 &Call);
6687 break;
6688 }
6689
6690 unsigned InactiveIdx = 1;
6691 Check(!Call.paramHasAttr(InactiveIdx, Attribute::InReg),
6692 "Value for inactive lanes must not have the `inreg` attribute",
6693 &Call);
6694 Check(isa<Argument>(Call.getArgOperand(InactiveIdx)),
6695 "Value for inactive lanes must be a function argument", &Call);
6696 Check(!cast<Argument>(Call.getArgOperand(InactiveIdx))->hasInRegAttr(),
6697 "Value for inactive lanes must be a VGPR function argument", &Call);
6698 break;
6699 }
6700 case Intrinsic::amdgcn_call_whole_wave: {
6702 Check(F, "Indirect whole wave calls are not allowed", &Call);
6703
6704 CallingConv::ID CC = F->getCallingConv();
6705 Check(CC == CallingConv::AMDGPU_Gfx_WholeWave,
6706 "Callee must have the amdgpu_gfx_whole_wave calling convention",
6707 &Call);
6708
6709 Check(!F->isVarArg(), "Variadic whole wave calls are not allowed", &Call);
6710
6711 Check(Call.arg_size() == F->arg_size(),
6712 "Call argument count must match callee argument count", &Call);
6713
6714 // The first argument of the call is the callee, and the first argument of
6715 // the callee is the active mask. The rest of the arguments must match.
6716 Check(F->arg_begin()->getType()->isIntegerTy(1),
6717 "Callee must have i1 as its first argument", &Call);
6718 for (auto [CallArg, FuncArg] :
6719 drop_begin(zip_equal(Call.args(), F->args()))) {
6720 Check(CallArg->getType() == FuncArg.getType(),
6721 "Argument types must match", &Call);
6722
6723 // Check that inreg attributes match between call site and function
6724 Check(Call.paramHasAttr(FuncArg.getArgNo(), Attribute::InReg) ==
6725 FuncArg.hasInRegAttr(),
6726 "Argument inreg attributes must match", &Call);
6727 }
6728 break;
6729 }
6730 case Intrinsic::amdgcn_s_prefetch_data: {
6731 Check(
6734 "llvm.amdgcn.s.prefetch.data only supports global or constant memory");
6735 break;
6736 }
6737 case Intrinsic::amdgcn_mfma_scale_f32_16x16x128_f8f6f4:
6738 case Intrinsic::amdgcn_mfma_scale_f32_32x32x64_f8f6f4: {
6739 Value *Src0 = Call.getArgOperand(0);
6740 Value *Src1 = Call.getArgOperand(1);
6741
6742 uint64_t CBSZ = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
6743 uint64_t BLGP = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
6744 Check(CBSZ <= 4, "invalid value for cbsz format", Call,
6745 Call.getArgOperand(3));
6746 Check(BLGP <= 4, "invalid value for blgp format", Call,
6747 Call.getArgOperand(4));
6748
6749 // AMDGPU::MFMAScaleFormats values
6750 auto getFormatNumRegs = [](unsigned FormatVal) {
6751 switch (FormatVal) {
6752 case 0:
6753 case 1:
6754 return 8u;
6755 case 2:
6756 case 3:
6757 return 6u;
6758 case 4:
6759 return 4u;
6760 default:
6761 llvm_unreachable("invalid format value");
6762 }
6763 };
6764
6765 auto isValidSrcASrcBVector = [](FixedVectorType *Ty) {
6766 if (!Ty || !Ty->getElementType()->isIntegerTy(32))
6767 return false;
6768 unsigned NumElts = Ty->getNumElements();
6769 return NumElts == 4 || NumElts == 6 || NumElts == 8;
6770 };
6771
6772 auto *Src0Ty = dyn_cast<FixedVectorType>(Src0->getType());
6773 auto *Src1Ty = dyn_cast<FixedVectorType>(Src1->getType());
6774 Check(isValidSrcASrcBVector(Src0Ty),
6775 "operand 0 must be 4, 6 or 8 element i32 vector", &Call, Src0);
6776 Check(isValidSrcASrcBVector(Src1Ty),
6777 "operand 1 must be 4, 6 or 8 element i32 vector", &Call, Src1);
6778
6779 // Permit excess registers for the format.
6780 Check(Src0Ty->getNumElements() >= getFormatNumRegs(CBSZ),
6781 "invalid vector type for format", &Call, Src0, Call.getArgOperand(3));
6782 Check(Src1Ty->getNumElements() >= getFormatNumRegs(BLGP),
6783 "invalid vector type for format", &Call, Src1, Call.getArgOperand(5));
6784 break;
6785 }
6786 case Intrinsic::amdgcn_wmma_f32_16x16x128_f8f6f4:
6787 case Intrinsic::amdgcn_wmma_scale_f32_16x16x128_f8f6f4:
6788 case Intrinsic::amdgcn_wmma_scale16_f32_16x16x128_f8f6f4: {
6789 Value *Src0 = Call.getArgOperand(1);
6790 Value *Src1 = Call.getArgOperand(3);
6791
6792 unsigned FmtA = cast<ConstantInt>(Call.getArgOperand(0))->getZExtValue();
6793 unsigned FmtB = cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue();
6794 Check(FmtA <= 4, "invalid value for matrix format", Call,
6795 Call.getArgOperand(0));
6796 Check(FmtB <= 4, "invalid value for matrix format", Call,
6797 Call.getArgOperand(2));
6798
6799 // AMDGPU::MatrixFMT values
6800 auto getFormatNumRegs = [](unsigned FormatVal) {
6801 switch (FormatVal) {
6802 case 0:
6803 case 1:
6804 return 16u;
6805 case 2:
6806 case 3:
6807 return 12u;
6808 case 4:
6809 return 8u;
6810 default:
6811 llvm_unreachable("invalid format value");
6812 }
6813 };
6814
6815 auto isValidSrcASrcBVector = [](FixedVectorType *Ty) {
6816 if (!Ty || !Ty->getElementType()->isIntegerTy(32))
6817 return false;
6818 unsigned NumElts = Ty->getNumElements();
6819 return NumElts == 16 || NumElts == 12 || NumElts == 8;
6820 };
6821
6822 auto *Src0Ty = dyn_cast<FixedVectorType>(Src0->getType());
6823 auto *Src1Ty = dyn_cast<FixedVectorType>(Src1->getType());
6824 Check(isValidSrcASrcBVector(Src0Ty),
6825 "operand 1 must be 8, 12 or 16 element i32 vector", &Call, Src0);
6826 Check(isValidSrcASrcBVector(Src1Ty),
6827 "operand 3 must be 8, 12 or 16 element i32 vector", &Call, Src1);
6828
6829 // Permit excess registers for the format.
6830 Check(Src0Ty->getNumElements() >= getFormatNumRegs(FmtA),
6831 "invalid vector type for format", &Call, Src0, Call.getArgOperand(0));
6832 Check(Src1Ty->getNumElements() >= getFormatNumRegs(FmtB),
6833 "invalid vector type for format", &Call, Src1, Call.getArgOperand(2));
6834 break;
6835 }
6836 case Intrinsic::amdgcn_cooperative_atomic_load_32x4B:
6837 case Intrinsic::amdgcn_cooperative_atomic_load_16x8B:
6838 case Intrinsic::amdgcn_cooperative_atomic_load_8x16B:
6839 case Intrinsic::amdgcn_cooperative_atomic_store_32x4B:
6840 case Intrinsic::amdgcn_cooperative_atomic_store_16x8B:
6841 case Intrinsic::amdgcn_cooperative_atomic_store_8x16B: {
6842 // Check we only use this intrinsic on the FLAT or GLOBAL address spaces.
6843 Value *PtrArg = Call.getArgOperand(0);
6844 const unsigned AS = PtrArg->getType()->getPointerAddressSpace();
6846 "cooperative atomic intrinsics require a generic or global pointer",
6847 &Call, PtrArg);
6848
6849 // Last argument must be a MD string
6851 MDNode *MD = cast<MDNode>(Op->getMetadata());
6852 Check((MD->getNumOperands() == 1) && isa<MDString>(MD->getOperand(0)),
6853 "cooperative atomic intrinsics require that the last argument is a "
6854 "metadata string",
6855 &Call, Op);
6856 break;
6857 }
6858 case Intrinsic::nvvm_setmaxnreg_inc_sync_aligned_u32:
6859 case Intrinsic::nvvm_setmaxnreg_dec_sync_aligned_u32: {
6860 Value *V = Call.getArgOperand(0);
6861 unsigned RegCount = cast<ConstantInt>(V)->getZExtValue();
6862 Check(RegCount % 8 == 0,
6863 "reg_count argument to nvvm.setmaxnreg must be in multiples of 8");
6864 break;
6865 }
6866 case Intrinsic::experimental_convergence_entry:
6867 case Intrinsic::experimental_convergence_anchor:
6868 break;
6869 case Intrinsic::experimental_convergence_loop:
6870 break;
6871 case Intrinsic::ptrmask: {
6872 Type *Ty0 = Call.getArgOperand(0)->getType();
6873 Type *Ty1 = Call.getArgOperand(1)->getType();
6875 "llvm.ptrmask intrinsic first argument must be pointer or vector "
6876 "of pointers",
6877 &Call);
6878 Check(
6879 Ty0->isVectorTy() == Ty1->isVectorTy(),
6880 "llvm.ptrmask intrinsic arguments must be both scalars or both vectors",
6881 &Call);
6882 if (Ty0->isVectorTy())
6883 Check(cast<VectorType>(Ty0)->getElementCount() ==
6884 cast<VectorType>(Ty1)->getElementCount(),
6885 "llvm.ptrmask intrinsic arguments must have the same number of "
6886 "elements",
6887 &Call);
6888 Check(DL.getIndexTypeSizeInBits(Ty0) == Ty1->getScalarSizeInBits(),
6889 "llvm.ptrmask intrinsic second argument bitwidth must match "
6890 "pointer index type size of first argument",
6891 &Call);
6892 break;
6893 }
6894 case Intrinsic::thread_pointer: {
6896 DL.getDefaultGlobalsAddressSpace(),
6897 "llvm.thread.pointer intrinsic return type must be for the globals "
6898 "address space",
6899 &Call);
6900 break;
6901 }
6902 case Intrinsic::threadlocal_address: {
6903 const Value &Arg0 = *Call.getArgOperand(0);
6904 Check(isa<GlobalValue>(Arg0),
6905 "llvm.threadlocal.address first argument must be a GlobalValue");
6906 Check(cast<GlobalValue>(Arg0).isThreadLocal(),
6907 "llvm.threadlocal.address operand isThreadLocal() must be true");
6908 break;
6909 }
6910 case Intrinsic::lifetime_start:
6911 case Intrinsic::lifetime_end: {
6914 "llvm.lifetime.start/end can only be used on alloca or poison",
6915 &Call);
6916 break;
6917 }
6918 };
6919
6920 // Verify that there aren't any unmediated control transfers between funclets.
6922 Function *F = Call.getParent()->getParent();
6923 if (F->hasPersonalityFn() &&
6924 isScopedEHPersonality(classifyEHPersonality(F->getPersonalityFn()))) {
6925 // Run EH funclet coloring on-demand and cache results for other intrinsic
6926 // calls in this function
6927 if (BlockEHFuncletColors.empty())
6928 BlockEHFuncletColors = colorEHFunclets(*F);
6929
6930 // Check for catch-/cleanup-pad in first funclet block
6931 bool InEHFunclet = false;
6932 BasicBlock *CallBB = Call.getParent();
6933 const ColorVector &CV = BlockEHFuncletColors.find(CallBB)->second;
6934 assert(CV.size() > 0 && "Uncolored block");
6935 for (BasicBlock *ColorFirstBB : CV)
6936 if (auto It = ColorFirstBB->getFirstNonPHIIt();
6937 It != ColorFirstBB->end())
6939 InEHFunclet = true;
6940
6941 // Check for funclet operand bundle
6942 bool HasToken = false;
6943 for (unsigned I = 0, E = Call.getNumOperandBundles(); I != E; ++I)
6945 HasToken = true;
6946
6947 // This would cause silent code truncation in WinEHPrepare
6948 if (InEHFunclet)
6949 Check(HasToken, "Missing funclet token on intrinsic call", &Call);
6950 }
6951 }
6952}
6953
6954/// Carefully grab the subprogram from a local scope.
6955///
6956/// This carefully grabs the subprogram from a local scope, avoiding the
6957/// built-in assertions that would typically fire.
6959 if (!LocalScope)
6960 return nullptr;
6961
6962 if (auto *SP = dyn_cast<DISubprogram>(LocalScope))
6963 return SP;
6964
6965 if (auto *LB = dyn_cast<DILexicalBlockBase>(LocalScope))
6966 return getSubprogram(LB->getRawScope());
6967
6968 // Just return null; broken scope chains are checked elsewhere.
6969 assert(!isa<DILocalScope>(LocalScope) && "Unknown type of local scope");
6970 return nullptr;
6971}
6972
6973void Verifier::visit(DbgLabelRecord &DLR) {
6975 "invalid #dbg_label intrinsic variable", &DLR, DLR.getRawLabel());
6976
6977 // Ignore broken !dbg attachments; they're checked elsewhere.
6978 if (MDNode *N = DLR.getDebugLoc().getAsMDNode())
6979 if (!isa<DILocation>(N))
6980 return;
6981
6982 BasicBlock *BB = DLR.getParent();
6983 Function *F = BB ? BB->getParent() : nullptr;
6984
6985 // The scopes for variables and !dbg attachments must agree.
6986 DILabel *Label = DLR.getLabel();
6987 DILocation *Loc = DLR.getDebugLoc();
6988 CheckDI(Loc, "#dbg_label record requires a !dbg attachment", &DLR, BB, F);
6989
6990 DISubprogram *LabelSP = getSubprogram(Label->getRawScope());
6991 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
6992 if (!LabelSP || !LocSP)
6993 return;
6994
6995 CheckDI(LabelSP == LocSP,
6996 "mismatched subprogram between #dbg_label label and !dbg attachment",
6997 &DLR, BB, F, Label, Label->getScope()->getSubprogram(), Loc,
6998 Loc->getScope()->getSubprogram());
6999}
7000
7001void Verifier::visit(DbgVariableRecord &DVR) {
7002 BasicBlock *BB = DVR.getParent();
7003 Function *F = BB->getParent();
7004
7005 CheckDI(DVR.getType() == DbgVariableRecord::LocationType::Value ||
7006 DVR.getType() == DbgVariableRecord::LocationType::Declare ||
7007 DVR.getType() == DbgVariableRecord::LocationType::Assign,
7008 "invalid #dbg record type", &DVR, DVR.getType(), BB, F);
7009
7010 // The location for a DbgVariableRecord must be either a ValueAsMetadata,
7011 // DIArgList, or an empty MDNode (which is a legacy representation for an
7012 // "undef" location).
7013 auto *MD = DVR.getRawLocation();
7014 CheckDI(MD && (isa<ValueAsMetadata>(MD) || isa<DIArgList>(MD) ||
7015 (isa<MDNode>(MD) && !cast<MDNode>(MD)->getNumOperands())),
7016 "invalid #dbg record address/value", &DVR, MD, BB, F);
7017 if (auto *VAM = dyn_cast<ValueAsMetadata>(MD)) {
7018 visitValueAsMetadata(*VAM, F);
7019 if (DVR.isDbgDeclare()) {
7020 // Allow integers here to support inttoptr salvage.
7021 Type *Ty = VAM->getValue()->getType();
7022 CheckDI(Ty->isPointerTy() || Ty->isIntegerTy(),
7023 "location of #dbg_declare must be a pointer or int", &DVR, MD, BB,
7024 F);
7025 }
7026 } else if (auto *AL = dyn_cast<DIArgList>(MD)) {
7027 visitDIArgList(*AL, F);
7028 }
7029
7031 "invalid #dbg record variable", &DVR, DVR.getRawVariable(), BB, F);
7032 visitMDNode(*DVR.getRawVariable(), AreDebugLocsAllowed::No);
7033
7035 "invalid #dbg record expression", &DVR, DVR.getRawExpression(), BB,
7036 F);
7037 visitMDNode(*DVR.getExpression(), AreDebugLocsAllowed::No);
7038
7039 if (DVR.isDbgAssign()) {
7041 "invalid #dbg_assign DIAssignID", &DVR, DVR.getRawAssignID(), BB,
7042 F);
7043 visitMDNode(*cast<DIAssignID>(DVR.getRawAssignID()),
7044 AreDebugLocsAllowed::No);
7045
7046 const auto *RawAddr = DVR.getRawAddress();
7047 // Similarly to the location above, the address for an assign
7048 // DbgVariableRecord must be a ValueAsMetadata or an empty MDNode, which
7049 // represents an undef address.
7050 CheckDI(
7051 isa<ValueAsMetadata>(RawAddr) ||
7052 (isa<MDNode>(RawAddr) && !cast<MDNode>(RawAddr)->getNumOperands()),
7053 "invalid #dbg_assign address", &DVR, DVR.getRawAddress(), BB, F);
7054 if (auto *VAM = dyn_cast<ValueAsMetadata>(RawAddr))
7055 visitValueAsMetadata(*VAM, F);
7056
7058 "invalid #dbg_assign address expression", &DVR,
7059 DVR.getRawAddressExpression(), BB, F);
7060 visitMDNode(*DVR.getAddressExpression(), AreDebugLocsAllowed::No);
7061
7062 // All of the linked instructions should be in the same function as DVR.
7063 for (Instruction *I : at::getAssignmentInsts(&DVR))
7064 CheckDI(DVR.getFunction() == I->getFunction(),
7065 "inst not in same function as #dbg_assign", I, &DVR, BB, F);
7066 }
7067
7068 // This check is redundant with one in visitLocalVariable().
7069 DILocalVariable *Var = DVR.getVariable();
7070 CheckDI(isType(Var->getRawType()), "invalid type ref", Var, Var->getRawType(),
7071 BB, F);
7072
7073 auto *DLNode = DVR.getDebugLoc().getAsMDNode();
7074 CheckDI(isa_and_nonnull<DILocation>(DLNode), "invalid #dbg record DILocation",
7075 &DVR, DLNode, BB, F);
7076 DILocation *Loc = DVR.getDebugLoc();
7077
7078 // The scopes for variables and !dbg attachments must agree.
7079 DISubprogram *VarSP = getSubprogram(Var->getRawScope());
7080 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
7081 if (!VarSP || !LocSP)
7082 return; // Broken scope chains are checked elsewhere.
7083
7084 CheckDI(VarSP == LocSP,
7085 "mismatched subprogram between #dbg record variable and DILocation",
7086 &DVR, BB, F, Var, Var->getScope()->getSubprogram(), Loc,
7087 Loc->getScope()->getSubprogram(), BB, F);
7088
7089 verifyFnArgs(DVR);
7090}
7091
7092void Verifier::visitVPIntrinsic(VPIntrinsic &VPI) {
7093 if (auto *VPCast = dyn_cast<VPCastIntrinsic>(&VPI)) {
7094 auto *RetTy = cast<VectorType>(VPCast->getType());
7095 auto *ValTy = cast<VectorType>(VPCast->getOperand(0)->getType());
7096 Check(RetTy->getElementCount() == ValTy->getElementCount(),
7097 "VP cast intrinsic first argument and result vector lengths must be "
7098 "equal",
7099 *VPCast);
7100
7101 switch (VPCast->getIntrinsicID()) {
7102 default:
7103 llvm_unreachable("Unknown VP cast intrinsic");
7104 case Intrinsic::vp_trunc:
7105 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
7106 "llvm.vp.trunc intrinsic first argument and result element type "
7107 "must be integer",
7108 *VPCast);
7109 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
7110 "llvm.vp.trunc intrinsic the bit size of first argument must be "
7111 "larger than the bit size of the return type",
7112 *VPCast);
7113 break;
7114 case Intrinsic::vp_zext:
7115 case Intrinsic::vp_sext:
7116 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
7117 "llvm.vp.zext or llvm.vp.sext intrinsic first argument and result "
7118 "element type must be integer",
7119 *VPCast);
7120 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
7121 "llvm.vp.zext or llvm.vp.sext intrinsic the bit size of first "
7122 "argument must be smaller than the bit size of the return type",
7123 *VPCast);
7124 break;
7125 case Intrinsic::vp_fptoui:
7126 case Intrinsic::vp_fptosi:
7127 case Intrinsic::vp_lrint:
7128 case Intrinsic::vp_llrint:
7129 Check(
7130 RetTy->isIntOrIntVectorTy() && ValTy->isFPOrFPVectorTy(),
7131 "llvm.vp.fptoui, llvm.vp.fptosi, llvm.vp.lrint or llvm.vp.llrint" "intrinsic first argument element "
7132 "type must be floating-point and result element type must be integer",
7133 *VPCast);
7134 break;
7135 case Intrinsic::vp_uitofp:
7136 case Intrinsic::vp_sitofp:
7137 Check(
7138 RetTy->isFPOrFPVectorTy() && ValTy->isIntOrIntVectorTy(),
7139 "llvm.vp.uitofp or llvm.vp.sitofp intrinsic first argument element "
7140 "type must be integer and result element type must be floating-point",
7141 *VPCast);
7142 break;
7143 case Intrinsic::vp_fptrunc:
7144 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
7145 "llvm.vp.fptrunc intrinsic first argument and result element type "
7146 "must be floating-point",
7147 *VPCast);
7148 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
7149 "llvm.vp.fptrunc intrinsic the bit size of first argument must be "
7150 "larger than the bit size of the return type",
7151 *VPCast);
7152 break;
7153 case Intrinsic::vp_fpext:
7154 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
7155 "llvm.vp.fpext intrinsic first argument and result element type "
7156 "must be floating-point",
7157 *VPCast);
7158 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
7159 "llvm.vp.fpext intrinsic the bit size of first argument must be "
7160 "smaller than the bit size of the return type",
7161 *VPCast);
7162 break;
7163 case Intrinsic::vp_ptrtoint:
7164 Check(RetTy->isIntOrIntVectorTy() && ValTy->isPtrOrPtrVectorTy(),
7165 "llvm.vp.ptrtoint intrinsic first argument element type must be "
7166 "pointer and result element type must be integer",
7167 *VPCast);
7168 break;
7169 case Intrinsic::vp_inttoptr:
7170 Check(RetTy->isPtrOrPtrVectorTy() && ValTy->isIntOrIntVectorTy(),
7171 "llvm.vp.inttoptr intrinsic first argument element type must be "
7172 "integer and result element type must be pointer",
7173 *VPCast);
7174 break;
7175 }
7176 }
7177
7178 switch (VPI.getIntrinsicID()) {
7179 case Intrinsic::vp_fcmp: {
7180 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
7182 "invalid predicate for VP FP comparison intrinsic", &VPI);
7183 break;
7184 }
7185 case Intrinsic::vp_icmp: {
7186 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
7188 "invalid predicate for VP integer comparison intrinsic", &VPI);
7189 break;
7190 }
7191 case Intrinsic::vp_is_fpclass: {
7192 auto TestMask = cast<ConstantInt>(VPI.getOperand(1));
7193 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
7194 "unsupported bits for llvm.vp.is.fpclass test mask");
7195 break;
7196 }
7197 case Intrinsic::experimental_vp_splice: {
7198 VectorType *VecTy = cast<VectorType>(VPI.getType());
7199 int64_t Idx = cast<ConstantInt>(VPI.getArgOperand(2))->getSExtValue();
7200 int64_t KnownMinNumElements = VecTy->getElementCount().getKnownMinValue();
7201 if (VPI.getParent() && VPI.getParent()->getParent()) {
7202 AttributeList Attrs = VPI.getParent()->getParent()->getAttributes();
7203 if (Attrs.hasFnAttr(Attribute::VScaleRange))
7204 KnownMinNumElements *= Attrs.getFnAttrs().getVScaleRangeMin();
7205 }
7206 Check((Idx < 0 && std::abs(Idx) <= KnownMinNumElements) ||
7207 (Idx >= 0 && Idx < KnownMinNumElements),
7208 "The splice index exceeds the range [-VL, VL-1] where VL is the "
7209 "known minimum number of elements in the vector. For scalable "
7210 "vectors the minimum number of elements is determined from "
7211 "vscale_range.",
7212 &VPI);
7213 break;
7214 }
7215 }
7216}
7217
7218void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) {
7219 unsigned NumOperands = FPI.getNonMetadataArgCount();
7220 bool HasRoundingMD =
7222
7223 // Add the expected number of metadata operands.
7224 NumOperands += (1 + HasRoundingMD);
7225
7226 // Compare intrinsics carry an extra predicate metadata operand.
7228 NumOperands += 1;
7229 Check((FPI.arg_size() == NumOperands),
7230 "invalid arguments for constrained FP intrinsic", &FPI);
7231
7232 switch (FPI.getIntrinsicID()) {
7233 case Intrinsic::experimental_constrained_lrint:
7234 case Intrinsic::experimental_constrained_llrint: {
7235 Type *ValTy = FPI.getArgOperand(0)->getType();
7236 Type *ResultTy = FPI.getType();
7237 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
7238 "Intrinsic does not support vectors", &FPI);
7239 break;
7240 }
7241
7242 case Intrinsic::experimental_constrained_lround:
7243 case Intrinsic::experimental_constrained_llround: {
7244 Type *ValTy = FPI.getArgOperand(0)->getType();
7245 Type *ResultTy = FPI.getType();
7246 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
7247 "Intrinsic does not support vectors", &FPI);
7248 break;
7249 }
7250
7251 case Intrinsic::experimental_constrained_fcmp:
7252 case Intrinsic::experimental_constrained_fcmps: {
7253 auto Pred = cast<ConstrainedFPCmpIntrinsic>(&FPI)->getPredicate();
7255 "invalid predicate for constrained FP comparison intrinsic", &FPI);
7256 break;
7257 }
7258
7259 case Intrinsic::experimental_constrained_fptosi:
7260 case Intrinsic::experimental_constrained_fptoui: {
7261 Value *Operand = FPI.getArgOperand(0);
7262 ElementCount SrcEC;
7263 Check(Operand->getType()->isFPOrFPVectorTy(),
7264 "Intrinsic first argument must be floating point", &FPI);
7265 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7266 SrcEC = cast<VectorType>(OperandT)->getElementCount();
7267 }
7268
7269 Operand = &FPI;
7270 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
7271 "Intrinsic first argument and result disagree on vector use", &FPI);
7272 Check(Operand->getType()->isIntOrIntVectorTy(),
7273 "Intrinsic result must be an integer", &FPI);
7274 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7275 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
7276 "Intrinsic first argument and result vector lengths must be equal",
7277 &FPI);
7278 }
7279 break;
7280 }
7281
7282 case Intrinsic::experimental_constrained_sitofp:
7283 case Intrinsic::experimental_constrained_uitofp: {
7284 Value *Operand = FPI.getArgOperand(0);
7285 ElementCount SrcEC;
7286 Check(Operand->getType()->isIntOrIntVectorTy(),
7287 "Intrinsic first argument must be integer", &FPI);
7288 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7289 SrcEC = cast<VectorType>(OperandT)->getElementCount();
7290 }
7291
7292 Operand = &FPI;
7293 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
7294 "Intrinsic first argument and result disagree on vector use", &FPI);
7295 Check(Operand->getType()->isFPOrFPVectorTy(),
7296 "Intrinsic result must be a floating point", &FPI);
7297 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7298 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
7299 "Intrinsic first argument and result vector lengths must be equal",
7300 &FPI);
7301 }
7302 break;
7303 }
7304
7305 case Intrinsic::experimental_constrained_fptrunc:
7306 case Intrinsic::experimental_constrained_fpext: {
7307 Value *Operand = FPI.getArgOperand(0);
7308 Type *OperandTy = Operand->getType();
7309 Value *Result = &FPI;
7310 Type *ResultTy = Result->getType();
7311 Check(OperandTy->isFPOrFPVectorTy(),
7312 "Intrinsic first argument must be FP or FP vector", &FPI);
7313 Check(ResultTy->isFPOrFPVectorTy(),
7314 "Intrinsic result must be FP or FP vector", &FPI);
7315 Check(OperandTy->isVectorTy() == ResultTy->isVectorTy(),
7316 "Intrinsic first argument and result disagree on vector use", &FPI);
7317 if (OperandTy->isVectorTy()) {
7318 Check(cast<VectorType>(OperandTy)->getElementCount() ==
7319 cast<VectorType>(ResultTy)->getElementCount(),
7320 "Intrinsic first argument and result vector lengths must be equal",
7321 &FPI);
7322 }
7323 if (FPI.getIntrinsicID() == Intrinsic::experimental_constrained_fptrunc) {
7324 Check(OperandTy->getScalarSizeInBits() > ResultTy->getScalarSizeInBits(),
7325 "Intrinsic first argument's type must be larger than result type",
7326 &FPI);
7327 } else {
7328 Check(OperandTy->getScalarSizeInBits() < ResultTy->getScalarSizeInBits(),
7329 "Intrinsic first argument's type must be smaller than result type",
7330 &FPI);
7331 }
7332 break;
7333 }
7334
7335 default:
7336 break;
7337 }
7338
7339 // If a non-metadata argument is passed in a metadata slot then the
7340 // error will be caught earlier when the incorrect argument doesn't
7341 // match the specification in the intrinsic call table. Thus, no
7342 // argument type check is needed here.
7343
7344 Check(FPI.getExceptionBehavior().has_value(),
7345 "invalid exception behavior argument", &FPI);
7346 if (HasRoundingMD) {
7347 Check(FPI.getRoundingMode().has_value(), "invalid rounding mode argument",
7348 &FPI);
7349 }
7350}
7351
7352void Verifier::verifyFragmentExpression(const DbgVariableRecord &DVR) {
7353 DILocalVariable *V = dyn_cast_or_null<DILocalVariable>(DVR.getRawVariable());
7354 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
7355
7356 // We don't know whether this intrinsic verified correctly.
7357 if (!V || !E || !E->isValid())
7358 return;
7359
7360 // Nothing to do if this isn't a DW_OP_LLVM_fragment expression.
7361 auto Fragment = E->getFragmentInfo();
7362 if (!Fragment)
7363 return;
7364
7365 // The frontend helps out GDB by emitting the members of local anonymous
7366 // unions as artificial local variables with shared storage. When SROA splits
7367 // the storage for artificial local variables that are smaller than the entire
7368 // union, the overhang piece will be outside of the allotted space for the
7369 // variable and this check fails.
7370 // FIXME: Remove this check as soon as clang stops doing this; it hides bugs.
7371 if (V->isArtificial())
7372 return;
7373
7374 verifyFragmentExpression(*V, *Fragment, &DVR);
7375}
7376
7377template <typename ValueOrMetadata>
7378void Verifier::verifyFragmentExpression(const DIVariable &V,
7380 ValueOrMetadata *Desc) {
7381 // If there's no size, the type is broken, but that should be checked
7382 // elsewhere.
7383 auto VarSize = V.getSizeInBits();
7384 if (!VarSize)
7385 return;
7386
7387 unsigned FragSize = Fragment.SizeInBits;
7388 unsigned FragOffset = Fragment.OffsetInBits;
7389 CheckDI(FragSize + FragOffset <= *VarSize,
7390 "fragment is larger than or outside of variable", Desc, &V);
7391 CheckDI(FragSize != *VarSize, "fragment covers entire variable", Desc, &V);
7392}
7393
7394void Verifier::verifyFnArgs(const DbgVariableRecord &DVR) {
7395 // This function does not take the scope of noninlined function arguments into
7396 // account. Don't run it if current function is nodebug, because it may
7397 // contain inlined debug intrinsics.
7398 if (!HasDebugInfo)
7399 return;
7400
7401 // For performance reasons only check non-inlined ones.
7402 if (DVR.getDebugLoc()->getInlinedAt())
7403 return;
7404
7405 DILocalVariable *Var = DVR.getVariable();
7406 CheckDI(Var, "#dbg record without variable");
7407
7408 unsigned ArgNo = Var->getArg();
7409 if (!ArgNo)
7410 return;
7411
7412 // Verify there are no duplicate function argument debug info entries.
7413 // These will cause hard-to-debug assertions in the DWARF backend.
7414 if (DebugFnArgs.size() < ArgNo)
7415 DebugFnArgs.resize(ArgNo, nullptr);
7416
7417 auto *Prev = DebugFnArgs[ArgNo - 1];
7418 DebugFnArgs[ArgNo - 1] = Var;
7419 CheckDI(!Prev || (Prev == Var), "conflicting debug info for argument", &DVR,
7420 Prev, Var);
7421}
7422
7423void Verifier::verifyNotEntryValue(const DbgVariableRecord &DVR) {
7424 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
7425
7426 // We don't know whether this intrinsic verified correctly.
7427 if (!E || !E->isValid())
7428 return;
7429
7431 Value *VarValue = DVR.getVariableLocationOp(0);
7432 if (isa<UndefValue>(VarValue) || isa<PoisonValue>(VarValue))
7433 return;
7434 // We allow EntryValues for swift async arguments, as they have an
7435 // ABI-guarantee to be turned into a specific register.
7436 if (auto *ArgLoc = dyn_cast_or_null<Argument>(VarValue);
7437 ArgLoc && ArgLoc->hasAttribute(Attribute::SwiftAsync))
7438 return;
7439 }
7440
7441 CheckDI(!E->isEntryValue(),
7442 "Entry values are only allowed in MIR unless they target a "
7443 "swiftasync Argument",
7444 &DVR);
7445}
7446
7447void Verifier::verifyCompileUnits() {
7448 // When more than one Module is imported into the same context, such as during
7449 // an LTO build before linking the modules, ODR type uniquing may cause types
7450 // to point to a different CU. This check does not make sense in this case.
7451 if (M.getContext().isODRUniquingDebugTypes())
7452 return;
7453 auto *CUs = M.getNamedMetadata("llvm.dbg.cu");
7454 SmallPtrSet<const Metadata *, 2> Listed;
7455 if (CUs)
7456 Listed.insert_range(CUs->operands());
7457 for (const auto *CU : CUVisited)
7458 CheckDI(Listed.count(CU), "DICompileUnit not listed in llvm.dbg.cu", CU);
7459 CUVisited.clear();
7460}
7461
7462void Verifier::verifyDeoptimizeCallingConvs() {
7463 if (DeoptimizeDeclarations.empty())
7464 return;
7465
7466 const Function *First = DeoptimizeDeclarations[0];
7467 for (const auto *F : ArrayRef(DeoptimizeDeclarations).slice(1)) {
7468 Check(First->getCallingConv() == F->getCallingConv(),
7469 "All llvm.experimental.deoptimize declarations must have the same "
7470 "calling convention",
7471 First, F);
7472 }
7473}
7474
7475void Verifier::verifyAttachedCallBundle(const CallBase &Call,
7476 const OperandBundleUse &BU) {
7477 FunctionType *FTy = Call.getFunctionType();
7478
7479 Check((FTy->getReturnType()->isPointerTy() ||
7480 (Call.doesNotReturn() && FTy->getReturnType()->isVoidTy())),
7481 "a call with operand bundle \"clang.arc.attachedcall\" must call a "
7482 "function returning a pointer or a non-returning function that has a "
7483 "void return type",
7484 Call);
7485
7486 Check(BU.Inputs.size() == 1 && isa<Function>(BU.Inputs.front()),
7487 "operand bundle \"clang.arc.attachedcall\" requires one function as "
7488 "an argument",
7489 Call);
7490
7491 auto *Fn = cast<Function>(BU.Inputs.front());
7492 Intrinsic::ID IID = Fn->getIntrinsicID();
7493
7494 if (IID) {
7495 Check((IID == Intrinsic::objc_retainAutoreleasedReturnValue ||
7496 IID == Intrinsic::objc_claimAutoreleasedReturnValue ||
7497 IID == Intrinsic::objc_unsafeClaimAutoreleasedReturnValue),
7498 "invalid function argument", Call);
7499 } else {
7500 StringRef FnName = Fn->getName();
7501 Check((FnName == "objc_retainAutoreleasedReturnValue" ||
7502 FnName == "objc_claimAutoreleasedReturnValue" ||
7503 FnName == "objc_unsafeClaimAutoreleasedReturnValue"),
7504 "invalid function argument", Call);
7505 }
7506}
7507
7508void Verifier::verifyNoAliasScopeDecl() {
7509 if (NoAliasScopeDecls.empty())
7510 return;
7511
7512 // only a single scope must be declared at a time.
7513 for (auto *II : NoAliasScopeDecls) {
7514 assert(II->getIntrinsicID() == Intrinsic::experimental_noalias_scope_decl &&
7515 "Not a llvm.experimental.noalias.scope.decl ?");
7516 const auto *ScopeListMV = dyn_cast<MetadataAsValue>(
7518 Check(ScopeListMV != nullptr,
7519 "llvm.experimental.noalias.scope.decl must have a MetadataAsValue "
7520 "argument",
7521 II);
7522
7523 const auto *ScopeListMD = dyn_cast<MDNode>(ScopeListMV->getMetadata());
7524 Check(ScopeListMD != nullptr, "!id.scope.list must point to an MDNode", II);
7525 Check(ScopeListMD->getNumOperands() == 1,
7526 "!id.scope.list must point to a list with a single scope", II);
7527 visitAliasScopeListMetadata(ScopeListMD);
7528 }
7529
7530 // Only check the domination rule when requested. Once all passes have been
7531 // adapted this option can go away.
7533 return;
7534
7535 // Now sort the intrinsics based on the scope MDNode so that declarations of
7536 // the same scopes are next to each other.
7537 auto GetScope = [](IntrinsicInst *II) {
7538 const auto *ScopeListMV = cast<MetadataAsValue>(
7540 return &cast<MDNode>(ScopeListMV->getMetadata())->getOperand(0);
7541 };
7542
7543 // We are sorting on MDNode pointers here. For valid input IR this is ok.
7544 // TODO: Sort on Metadata ID to avoid non-deterministic error messages.
7545 auto Compare = [GetScope](IntrinsicInst *Lhs, IntrinsicInst *Rhs) {
7546 return GetScope(Lhs) < GetScope(Rhs);
7547 };
7548
7549 llvm::sort(NoAliasScopeDecls, Compare);
7550
7551 // Go over the intrinsics and check that for the same scope, they are not
7552 // dominating each other.
7553 auto ItCurrent = NoAliasScopeDecls.begin();
7554 while (ItCurrent != NoAliasScopeDecls.end()) {
7555 auto CurScope = GetScope(*ItCurrent);
7556 auto ItNext = ItCurrent;
7557 do {
7558 ++ItNext;
7559 } while (ItNext != NoAliasScopeDecls.end() &&
7560 GetScope(*ItNext) == CurScope);
7561
7562 // [ItCurrent, ItNext) represents the declarations for the same scope.
7563 // Ensure they are not dominating each other.. but only if it is not too
7564 // expensive.
7565 if (ItNext - ItCurrent < 32)
7566 for (auto *I : llvm::make_range(ItCurrent, ItNext))
7567 for (auto *J : llvm::make_range(ItCurrent, ItNext))
7568 if (I != J)
7569 Check(!DT.dominates(I, J),
7570 "llvm.experimental.noalias.scope.decl dominates another one "
7571 "with the same scope",
7572 I);
7573 ItCurrent = ItNext;
7574 }
7575}
7576
7577//===----------------------------------------------------------------------===//
7578// Implement the public interfaces to this file...
7579//===----------------------------------------------------------------------===//
7580
7582 Function &F = const_cast<Function &>(f);
7583
7584 // Don't use a raw_null_ostream. Printing IR is expensive.
7585 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/true, *f.getParent());
7586
7587 // Note that this function's return value is inverted from what you would
7588 // expect of a function called "verify".
7589 return !V.verify(F);
7590}
7591
7593 bool *BrokenDebugInfo) {
7594 // Don't use a raw_null_ostream. Printing IR is expensive.
7595 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/!BrokenDebugInfo, M);
7596
7597 bool Broken = false;
7598 for (const Function &F : M)
7599 Broken |= !V.verify(F);
7600
7601 Broken |= !V.verify();
7602 if (BrokenDebugInfo)
7603 *BrokenDebugInfo = V.hasBrokenDebugInfo();
7604 // Note that this function's return value is inverted from what you would
7605 // expect of a function called "verify".
7606 return Broken;
7607}
7608
7609namespace {
7610
7611struct VerifierLegacyPass : public FunctionPass {
7612 static char ID;
7613
7614 std::unique_ptr<Verifier> V;
7615 bool FatalErrors = true;
7616
7617 VerifierLegacyPass() : FunctionPass(ID) {
7619 }
7620 explicit VerifierLegacyPass(bool FatalErrors)
7621 : FunctionPass(ID),
7622 FatalErrors(FatalErrors) {
7624 }
7625
7626 bool doInitialization(Module &M) override {
7627 V = std::make_unique<Verifier>(
7628 &dbgs(), /*ShouldTreatBrokenDebugInfoAsError=*/false, M);
7629 return false;
7630 }
7631
7632 bool runOnFunction(Function &F) override {
7633 if (!V->verify(F) && FatalErrors) {
7634 errs() << "in function " << F.getName() << '\n';
7635 report_fatal_error("Broken function found, compilation aborted!");
7636 }
7637 return false;
7638 }
7639
7640 bool doFinalization(Module &M) override {
7641 bool HasErrors = false;
7642 for (Function &F : M)
7643 if (F.isDeclaration())
7644 HasErrors |= !V->verify(F);
7645
7646 HasErrors |= !V->verify();
7647 if (FatalErrors && (HasErrors || V->hasBrokenDebugInfo()))
7648 report_fatal_error("Broken module found, compilation aborted!");
7649 return false;
7650 }
7651
7652 void getAnalysisUsage(AnalysisUsage &AU) const override {
7653 AU.setPreservesAll();
7654 }
7655};
7656
7657} // end anonymous namespace
7658
7659/// Helper to issue failure from the TBAA verification
7660template <typename... Tys> void TBAAVerifier::CheckFailed(Tys &&... Args) {
7661 if (Diagnostic)
7662 return Diagnostic->CheckFailed(Args...);
7663}
7664
7665#define CheckTBAA(C, ...) \
7666 do { \
7667 if (!(C)) { \
7668 CheckFailed(__VA_ARGS__); \
7669 return false; \
7670 } \
7671 } while (false)
7672
7673/// Verify that \p BaseNode can be used as the "base type" in the struct-path
7674/// TBAA scheme. This means \p BaseNode is either a scalar node, or a
7675/// struct-type node describing an aggregate data structure (like a struct).
7676TBAAVerifier::TBAABaseNodeSummary
7677TBAAVerifier::verifyTBAABaseNode(const Instruction *I, const MDNode *BaseNode,
7678 bool IsNewFormat) {
7679 if (BaseNode->getNumOperands() < 2) {
7680 CheckFailed("Base nodes must have at least two operands", I, BaseNode);
7681 return {true, ~0u};
7682 }
7683
7684 auto Itr = TBAABaseNodes.find(BaseNode);
7685 if (Itr != TBAABaseNodes.end())
7686 return Itr->second;
7687
7688 auto Result = verifyTBAABaseNodeImpl(I, BaseNode, IsNewFormat);
7689 auto InsertResult = TBAABaseNodes.insert({BaseNode, Result});
7690 (void)InsertResult;
7691 assert(InsertResult.second && "We just checked!");
7692 return Result;
7693}
7694
7695TBAAVerifier::TBAABaseNodeSummary
7696TBAAVerifier::verifyTBAABaseNodeImpl(const Instruction *I,
7697 const MDNode *BaseNode, bool IsNewFormat) {
7698 const TBAAVerifier::TBAABaseNodeSummary InvalidNode = {true, ~0u};
7699
7700 if (BaseNode->getNumOperands() == 2) {
7701 // Scalar nodes can only be accessed at offset 0.
7702 return isValidScalarTBAANode(BaseNode)
7703 ? TBAAVerifier::TBAABaseNodeSummary({false, 0})
7704 : InvalidNode;
7705 }
7706
7707 if (IsNewFormat) {
7708 if (BaseNode->getNumOperands() % 3 != 0) {
7709 CheckFailed("Access tag nodes must have the number of operands that is a "
7710 "multiple of 3!", BaseNode);
7711 return InvalidNode;
7712 }
7713 } else {
7714 if (BaseNode->getNumOperands() % 2 != 1) {
7715 CheckFailed("Struct tag nodes must have an odd number of operands!",
7716 BaseNode);
7717 return InvalidNode;
7718 }
7719 }
7720
7721 // Check the type size field.
7722 if (IsNewFormat) {
7723 auto *TypeSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7724 BaseNode->getOperand(1));
7725 if (!TypeSizeNode) {
7726 CheckFailed("Type size nodes must be constants!", I, BaseNode);
7727 return InvalidNode;
7728 }
7729 }
7730
7731 // Check the type name field. In the new format it can be anything.
7732 if (!IsNewFormat && !isa<MDString>(BaseNode->getOperand(0))) {
7733 CheckFailed("Struct tag nodes have a string as their first operand",
7734 BaseNode);
7735 return InvalidNode;
7736 }
7737
7738 bool Failed = false;
7739
7740 std::optional<APInt> PrevOffset;
7741 unsigned BitWidth = ~0u;
7742
7743 // We've already checked that BaseNode is not a degenerate root node with one
7744 // operand in \c verifyTBAABaseNode, so this loop should run at least once.
7745 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
7746 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
7747 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
7748 Idx += NumOpsPerField) {
7749 const MDOperand &FieldTy = BaseNode->getOperand(Idx);
7750 const MDOperand &FieldOffset = BaseNode->getOperand(Idx + 1);
7751 if (!isa<MDNode>(FieldTy)) {
7752 CheckFailed("Incorrect field entry in struct type node!", I, BaseNode);
7753 Failed = true;
7754 continue;
7755 }
7756
7757 auto *OffsetEntryCI =
7759 if (!OffsetEntryCI) {
7760 CheckFailed("Offset entries must be constants!", I, BaseNode);
7761 Failed = true;
7762 continue;
7763 }
7764
7765 if (BitWidth == ~0u)
7766 BitWidth = OffsetEntryCI->getBitWidth();
7767
7768 if (OffsetEntryCI->getBitWidth() != BitWidth) {
7769 CheckFailed(
7770 "Bitwidth between the offsets and struct type entries must match", I,
7771 BaseNode);
7772 Failed = true;
7773 continue;
7774 }
7775
7776 // NB! As far as I can tell, we generate a non-strictly increasing offset
7777 // sequence only from structs that have zero size bit fields. When
7778 // recursing into a contained struct in \c getFieldNodeFromTBAABaseNode we
7779 // pick the field lexically the latest in struct type metadata node. This
7780 // mirrors the actual behavior of the alias analysis implementation.
7781 bool IsAscending =
7782 !PrevOffset || PrevOffset->ule(OffsetEntryCI->getValue());
7783
7784 if (!IsAscending) {
7785 CheckFailed("Offsets must be increasing!", I, BaseNode);
7786 Failed = true;
7787 }
7788
7789 PrevOffset = OffsetEntryCI->getValue();
7790
7791 if (IsNewFormat) {
7792 auto *MemberSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7793 BaseNode->getOperand(Idx + 2));
7794 if (!MemberSizeNode) {
7795 CheckFailed("Member size entries must be constants!", I, BaseNode);
7796 Failed = true;
7797 continue;
7798 }
7799 }
7800 }
7801
7802 return Failed ? InvalidNode
7803 : TBAAVerifier::TBAABaseNodeSummary(false, BitWidth);
7804}
7805
7806static bool IsRootTBAANode(const MDNode *MD) {
7807 return MD->getNumOperands() < 2;
7808}
7809
7810static bool IsScalarTBAANodeImpl(const MDNode *MD,
7812 if (MD->getNumOperands() != 2 && MD->getNumOperands() != 3)
7813 return false;
7814
7815 if (!isa<MDString>(MD->getOperand(0)))
7816 return false;
7817
7818 if (MD->getNumOperands() == 3) {
7820 if (!(Offset && Offset->isZero() && isa<MDString>(MD->getOperand(0))))
7821 return false;
7822 }
7823
7824 auto *Parent = dyn_cast_or_null<MDNode>(MD->getOperand(1));
7825 return Parent && Visited.insert(Parent).second &&
7826 (IsRootTBAANode(Parent) || IsScalarTBAANodeImpl(Parent, Visited));
7827}
7828
7829bool TBAAVerifier::isValidScalarTBAANode(const MDNode *MD) {
7830 auto ResultIt = TBAAScalarNodes.find(MD);
7831 if (ResultIt != TBAAScalarNodes.end())
7832 return ResultIt->second;
7833
7834 SmallPtrSet<const MDNode *, 4> Visited;
7835 bool Result = IsScalarTBAANodeImpl(MD, Visited);
7836 auto InsertResult = TBAAScalarNodes.insert({MD, Result});
7837 (void)InsertResult;
7838 assert(InsertResult.second && "Just checked!");
7839
7840 return Result;
7841}
7842
7843/// Returns the field node at the offset \p Offset in \p BaseNode. Update \p
7844/// Offset in place to be the offset within the field node returned.
7845///
7846/// We assume we've okayed \p BaseNode via \c verifyTBAABaseNode.
7847MDNode *TBAAVerifier::getFieldNodeFromTBAABaseNode(const Instruction *I,
7848 const MDNode *BaseNode,
7849 APInt &Offset,
7850 bool IsNewFormat) {
7851 assert(BaseNode->getNumOperands() >= 2 && "Invalid base node!");
7852
7853 // Scalar nodes have only one possible "field" -- their parent in the access
7854 // hierarchy. Offset must be zero at this point, but our caller is supposed
7855 // to check that.
7856 if (BaseNode->getNumOperands() == 2)
7857 return cast<MDNode>(BaseNode->getOperand(1));
7858
7859 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
7860 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
7861 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
7862 Idx += NumOpsPerField) {
7863 auto *OffsetEntryCI =
7864 mdconst::extract<ConstantInt>(BaseNode->getOperand(Idx + 1));
7865 if (OffsetEntryCI->getValue().ugt(Offset)) {
7866 if (Idx == FirstFieldOpNo) {
7867 CheckFailed("Could not find TBAA parent in struct type node", I,
7868 BaseNode, &Offset);
7869 return nullptr;
7870 }
7871
7872 unsigned PrevIdx = Idx - NumOpsPerField;
7873 auto *PrevOffsetEntryCI =
7874 mdconst::extract<ConstantInt>(BaseNode->getOperand(PrevIdx + 1));
7875 Offset -= PrevOffsetEntryCI->getValue();
7876 return cast<MDNode>(BaseNode->getOperand(PrevIdx));
7877 }
7878 }
7879
7880 unsigned LastIdx = BaseNode->getNumOperands() - NumOpsPerField;
7881 auto *LastOffsetEntryCI = mdconst::extract<ConstantInt>(
7882 BaseNode->getOperand(LastIdx + 1));
7883 Offset -= LastOffsetEntryCI->getValue();
7884 return cast<MDNode>(BaseNode->getOperand(LastIdx));
7885}
7886
7888 if (!Type || Type->getNumOperands() < 3)
7889 return false;
7890
7891 // In the new format type nodes shall have a reference to the parent type as
7892 // its first operand.
7893 return isa_and_nonnull<MDNode>(Type->getOperand(0));
7894}
7895
7897 CheckTBAA(MD->getNumOperands() > 0, "TBAA metadata cannot have 0 operands", I,
7898 MD);
7899
7900 if (I)
7904 "This instruction shall not have a TBAA access tag!", I);
7905
7906 bool IsStructPathTBAA =
7907 isa<MDNode>(MD->getOperand(0)) && MD->getNumOperands() >= 3;
7908
7909 CheckTBAA(IsStructPathTBAA,
7910 "Old-style TBAA is no longer allowed, use struct-path TBAA instead",
7911 I);
7912
7913 MDNode *BaseNode = dyn_cast_or_null<MDNode>(MD->getOperand(0));
7914 MDNode *AccessType = dyn_cast_or_null<MDNode>(MD->getOperand(1));
7915
7916 bool IsNewFormat = isNewFormatTBAATypeNode(AccessType);
7917
7918 if (IsNewFormat) {
7919 CheckTBAA(MD->getNumOperands() == 4 || MD->getNumOperands() == 5,
7920 "Access tag metadata must have either 4 or 5 operands", I, MD);
7921 } else {
7922 CheckTBAA(MD->getNumOperands() < 5,
7923 "Struct tag metadata must have either 3 or 4 operands", I, MD);
7924 }
7925
7926 // Check the access size field.
7927 if (IsNewFormat) {
7928 auto *AccessSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7929 MD->getOperand(3));
7930 CheckTBAA(AccessSizeNode, "Access size field must be a constant", I, MD);
7931 }
7932
7933 // Check the immutability flag.
7934 unsigned ImmutabilityFlagOpNo = IsNewFormat ? 4 : 3;
7935 if (MD->getNumOperands() == ImmutabilityFlagOpNo + 1) {
7936 auto *IsImmutableCI = mdconst::dyn_extract_or_null<ConstantInt>(
7937 MD->getOperand(ImmutabilityFlagOpNo));
7938 CheckTBAA(IsImmutableCI,
7939 "Immutability tag on struct tag metadata must be a constant", I,
7940 MD);
7941 CheckTBAA(
7942 IsImmutableCI->isZero() || IsImmutableCI->isOne(),
7943 "Immutability part of the struct tag metadata must be either 0 or 1", I,
7944 MD);
7945 }
7946
7947 CheckTBAA(BaseNode && AccessType,
7948 "Malformed struct tag metadata: base and access-type "
7949 "should be non-null and point to Metadata nodes",
7950 I, MD, BaseNode, AccessType);
7951
7952 if (!IsNewFormat) {
7953 CheckTBAA(isValidScalarTBAANode(AccessType),
7954 "Access type node must be a valid scalar type", I, MD,
7955 AccessType);
7956 }
7957
7959 CheckTBAA(OffsetCI, "Offset must be constant integer", I, MD);
7960
7961 APInt Offset = OffsetCI->getValue();
7962 bool SeenAccessTypeInPath = false;
7963
7964 SmallPtrSet<MDNode *, 4> StructPath;
7965
7966 for (/* empty */; BaseNode && !IsRootTBAANode(BaseNode);
7967 BaseNode =
7968 getFieldNodeFromTBAABaseNode(I, BaseNode, Offset, IsNewFormat)) {
7969 if (!StructPath.insert(BaseNode).second) {
7970 CheckFailed("Cycle detected in struct path", I, MD);
7971 return false;
7972 }
7973
7974 bool Invalid;
7975 unsigned BaseNodeBitWidth;
7976 std::tie(Invalid, BaseNodeBitWidth) =
7977 verifyTBAABaseNode(I, BaseNode, IsNewFormat);
7978
7979 // If the base node is invalid in itself, then we've already printed all the
7980 // errors we wanted to print.
7981 if (Invalid)
7982 return false;
7983
7984 SeenAccessTypeInPath |= BaseNode == AccessType;
7985
7986 if (isValidScalarTBAANode(BaseNode) || BaseNode == AccessType)
7987 CheckTBAA(Offset == 0, "Offset not zero at the point of scalar access", I,
7988 MD, &Offset);
7989
7990 CheckTBAA(BaseNodeBitWidth == Offset.getBitWidth() ||
7991 (BaseNodeBitWidth == 0 && Offset == 0) ||
7992 (IsNewFormat && BaseNodeBitWidth == ~0u),
7993 "Access bit-width not the same as description bit-width", I, MD,
7994 BaseNodeBitWidth, Offset.getBitWidth());
7995
7996 if (IsNewFormat && SeenAccessTypeInPath)
7997 break;
7998 }
7999
8000 CheckTBAA(SeenAccessTypeInPath, "Did not see access type in access path!", I,
8001 MD);
8002 return true;
8003}
8004
8005char VerifierLegacyPass::ID = 0;
8006INITIALIZE_PASS(VerifierLegacyPass, "verify", "Module Verifier", false, false)
8007
8009 return new VerifierLegacyPass(FatalErrors);
8010}
8011
8012AnalysisKey VerifierAnalysis::Key;
8019
8024
8026 auto Res = AM.getResult<VerifierAnalysis>(M);
8027 if (FatalErrors && (Res.IRBroken || Res.DebugInfoBroken))
8028 report_fatal_error("Broken module found, compilation aborted!");
8029
8030 return PreservedAnalyses::all();
8031}
8032
8034 auto res = AM.getResult<VerifierAnalysis>(F);
8035 if (res.IRBroken && FatalErrors)
8036 report_fatal_error("Broken function found, compilation aborted!");
8037
8038 return PreservedAnalyses::all();
8039}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU address space definition.
ArrayRef< TableEntry > TableRef
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis false
Atomic ordering constants.
@ RetAttr
@ FnAttr
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
Definition CSEInfo.cpp:27
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file declares the LLVM IR specialization of the GenericConvergenceVerifier template.
static DISubprogram * getSubprogram(bool IsDistinct, Ts &&...Args)
dxil translate DXIL Translate Metadata
This file defines the DenseMap class.
This file contains constants used for implementing Dwarf debug support.
static bool runOnFunction(Function &F, bool PostInlining)
#define Check(C,...)
Hexagon Common GEP
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
This header defines various interfaces for pass management in LLVM.
This defines the Use class.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
Machine Check Debug Module
This file implements a map that provides insertion order iteration.
This file provides utility for Memory Model Relaxation Annotations (MMRAs).
static bool isContiguous(const ConstantRange &A, const ConstantRange &B)
This file contains the declarations for metadata subclasses.
#define T
#define T1
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t High
uint64_t IntrinsicInst * II
#define P(N)
ppc ctr loops verify
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition PassSupport.h:56
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
static unsigned getNumElements(Type *Ty)
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
This file contains some templates that are useful if you are working with the STL at all.
verify safepoint Safepoint IR Verifier
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
static bool IsScalarTBAANodeImpl(const MDNode *MD, SmallPtrSetImpl< const MDNode * > &Visited)
static bool isType(const Metadata *MD)
static Instruction * getSuccPad(Instruction *Terminator)
static bool isNewFormatTBAATypeNode(llvm::MDNode *Type)
#define CheckDI(C,...)
We know that a debug info condition should be true, if not print an error message.
Definition Verifier.cpp:680
static void forEachUser(const Value *User, SmallPtrSet< const Value *, 32 > &Visited, llvm::function_ref< bool(const Value *)> Callback)
Definition Verifier.cpp:721
static bool isDINode(const Metadata *MD)
static bool isScope(const Metadata *MD)
static cl::opt< bool > VerifyNoAliasScopeDomination("verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false), cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical " "scopes are not dominating"))
static bool isTypeCongruent(Type *L, Type *R)
Two types are "congruent" if they are identical, or if they are both pointer types with different poi...
#define CheckTBAA(C,...)
static bool isConstantIntMetadataOperand(const Metadata *MD)
static bool IsRootTBAANode(const MDNode *MD)
static Value * getParentPad(Value *EHPad)
static bool hasConflictingReferenceFlags(unsigned Flags)
Detect mutually exclusive flags.
static AttrBuilder getParameterABIAttributes(LLVMContext &C, unsigned I, AttributeList Attrs)
static const char PassName[]
bool isFiniteNonZero() const
Definition APFloat.h:1459
bool isNegative() const
Definition APFloat.h:1449
const fltSemantics & getSemantics() const
Definition APFloat.h:1457
Class for arbitrary precision integers.
Definition APInt.h:78
bool sgt(const APInt &RHS) const
Signed greater than comparison.
Definition APInt.h:1201
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition APInt.h:380
bool isMinValue() const
Determine if this is the smallest unsigned value.
Definition APInt.h:417
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition APInt.h:1150
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition APInt.h:440
int64_t getSExtValue() const
Get sign extended value.
Definition APInt.h:1562
bool isMaxValue() const
Determine if this is the largest unsigned value.
Definition APInt.h:399
This class represents a conversion between pointers from one address space to another.
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
LLVM_ABI bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
unsigned getAddressSpace() const
Return the address space for the allocation.
LLVM_ABI bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
const Value * getArraySize() const
Get the number of elements allocated.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
void setPreservesAll()
Set by analyses that do not transform their input at all.
LLVM_ABI bool hasInRegAttr() const
Return true if this argument has the inreg attribute.
Definition Function.cpp:293
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:142
static bool isFPOperation(BinOp Op)
BinOp getOperation() const
static LLVM_ABI StringRef getOperationName(BinOp Op)
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
bool contains(Attribute::AttrKind A) const
Return true if the builder has the specified attribute.
LLVM_ABI bool hasAttribute(Attribute::AttrKind Kind) const
Return true if the attribute exists in this set.
LLVM_ABI std::string getAsString(bool InAttrGrp=false) const
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:69
LLVM_ABI const ConstantRange & getValueAsConstantRange() const
Return the attribute's value as a ConstantRange.
LLVM_ABI StringRef getValueAsString() const
Return the attribute's value as a string.
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition Attributes.h:88
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:223
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator begin()
Instruction iterator methods.
Definition BasicBlock.h:459
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:528
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI bool isEntryBlock() const
Return true if this is the entry block of the containing function.
const Instruction & front() const
Definition BasicBlock.h:482
LLVM_ABI const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
This class represents a no-op cast from one type to another.
static LLVM_ABI BlockAddress * lookup(const BasicBlock *BB)
Lookup an existing BlockAddress constant for the given BasicBlock.
bool isConditional() const
Value * getCondition() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
bool isInlineAsm() const
Check if this call is an inline asm statement.
bool hasInAllocaArgument() const
Determine if there are is an inalloca argument.
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool doesNotAccessMemory(unsigned OpNo) const
bool hasFnAttr(Attribute::AttrKind Kind) const
Determine whether this call has the given attribute.
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
CallingConv::ID getCallingConv() const
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
Attribute getParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Get the attribute of a given kind from a given arg.
iterator_range< bundle_op_iterator > bundle_op_infos()
Return the range [bundle_op_info_begin, bundle_op_info_end).
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
bool onlyReadsMemory(unsigned OpNo) const
Value * getCalledOperand() const
Type * getParamElementType(unsigned ArgNo) const
Extract the elementtype type for a parameter.
Value * getArgOperand(unsigned i) const
FunctionType * getFunctionType() const
LLVM_ABI Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
bool doesNotReturn() const
Determine if the call cannot return.
LLVM_ABI bool onlyAccessesArgMemory() const
Determine if the call can access memmory only using pointers based on its arguments.
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
bool hasOperandBundles() const
Return true if this User has any operand bundles.
LLVM_ABI Function * getCaller()
Helper to get the caller (the parent function).
bool isMustTailCall() const
static LLVM_ABI bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)
This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.
unsigned getNumHandlers() const
return the number of 'handlers' in this catchswitch instruction, except the default handler
Value * getParentPad() const
BasicBlock * getUnwindDest() const
handler_range handlers()
iteration adapter for range-for loops.
BasicBlock * getUnwindDest() const
bool isFPPredicate() const
Definition InstrTypes.h:784
bool isIntPredicate() const
Definition InstrTypes.h:785
static bool isIntPredicate(Predicate P)
Definition InstrTypes.h:778
bool isMinusOne() const
This function will return true iff every bit in this constant is set to true.
Definition Constants.h:226
bool isNegative() const
Definition Constants.h:209
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition Constants.h:214
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
Definition Constants.h:157
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:163
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition Constants.h:154
Constant * getAddrDiscriminator() const
The address discriminator if any, or the null constant.
Definition Constants.h:1072
Constant * getPointer() const
The pointer that is signed in this ptrauth signed pointer.
Definition Constants.h:1059
ConstantInt * getKey() const
The Key ID, an i32 constant.
Definition Constants.h:1062
ConstantInt * getDiscriminator() const
The integer discriminator, an i64 constant, or 0.
Definition Constants.h:1065
static LLVM_ABI bool isOrderedRanges(ArrayRef< ConstantRange > RangesRef)
This class represents a range of values.
const APInt & getLower() const
Return the lower value for this range.
const APInt & getUpper() const
Return the upper value for this range.
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
static LLVM_ABI ConstantTokenNone * get(LLVMContext &Context)
Return the ConstantTokenNone.
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition Constants.cpp:90
LLVM_ABI std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
LLVM_ABI std::optional< RoundingMode > getRoundingMode() const
LLVM_ABI unsigned getNonMetadataArgCount() const
DbgVariableFragmentInfo FragmentInfo
@ FixedPointBinary
Scale factor 2^Factor.
@ FixedPointDecimal
Scale factor 10^Factor.
@ FixedPointRational
Arbitrary rational scale factor.
DIGlobalVariable * getVariable() const
LLVM_ABI DISubprogram * getSubprogram() const
Get the subprogram for this scope.
DILocalScope * getScope() const
Get the local scope for this variable.
Metadata * getRawScope() const
Base class for scope-like contexts.
Subprogram description. Uses SubclassData1.
Base class for template parameters.
Base class for variables.
Metadata * getRawType() const
Metadata * getRawScope() const
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
Records a position in IR for a source label (DILabel).
Base class for non-instruction debug metadata records that have positions within IR.
LLVM_ABI Function * getFunction()
LLVM_ABI void print(raw_ostream &O, bool IsForDebug=false) const
DebugLoc getDebugLoc() const
LLVM_ABI const BasicBlock * getParent() const
Record of a variable value-assignment, aka a non instruction representation of the dbg....
DIExpression * getExpression() const
LLVM_ABI Value * getVariableLocationOp(unsigned OpIdx) const
DILocalVariable * getVariable() const
Metadata * getRawLocation() const
Returns the metadata operand for the first location description.
@ End
Marks the end of the concrete types.
@ Any
To indicate all LocationTypes in searches.
DIExpression * getAddressExpression() const
MDNode * getAsMDNode() const
Return this as a bar MDNode.
Definition DebugLoc.h:291
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition DenseMap.h:187
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:165
bool empty() const
Definition DenseMap.h:107
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:214
This instruction extracts a single (scalar) element from a VectorType value.
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *Idx)
Return true if an extractelement instruction can be formed with the specified operands.
ArrayRef< unsigned > getIndices() const
static LLVM_ABI Type * getIndexedType(Type *Agg, ArrayRef< unsigned > Idxs)
Returns the type of the element that would be extracted with an extractvalue instruction with the spe...
This instruction compares its operands according to the predicate given to the constructor.
This class represents an extension of floating point types.
This class represents a cast from floating point to signed integer.
This class represents a cast from floating point to unsigned integer.
This class represents a truncation of floating point types.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
Value * getParentPad() const
Convenience accessors.
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
Type * getReturnType() const
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition Function.h:209
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition Function.h:244
DISubprogram * getSubprogram() const
Get the attached subprogram.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition Function.h:270
bool hasPersonalityFn() const
Check whether this function has a personality function.
Definition Function.h:903
const Function & getFunction() const
Definition Function.h:164
const std::string & getGC() const
Definition Function.cpp:831
Type * getReturnType() const
Returns the type of the ret val.
Definition Function.h:214
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
Definition Function.h:227
LLVM_ABI Value * getBasePtr() const
LLVM_ABI Value * getDerivedPtr() const
static LLVM_ABI Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
static bool isValidLinkage(LinkageTypes L)
Definition GlobalAlias.h:98
const Constant * getAliasee() const
Definition GlobalAlias.h:87
LLVM_ABI const Function * getResolverFunction() const
Definition Globals.cpp:664
static bool isValidLinkage(LinkageTypes L)
Definition GlobalIFunc.h:86
const Constant * getResolver() const
Definition GlobalIFunc.h:73
LLVM_ABI void getAllMetadata(SmallVectorImpl< std::pair< unsigned, MDNode * > > &MDs) const
Appends all metadata attached to this value to MDs, sorting by KindID.
bool hasComdat() const
MDNode * getMetadata(unsigned KindID) const
Get the current metadata attachments for the given kind, if any.
Definition Value.h:576
bool hasExternalLinkage() const
bool isDSOLocal() const
bool isImplicitDSOLocal() const
LLVM_ABI bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition Globals.cpp:328
bool hasValidDeclarationLinkage() const
LinkageTypes getLinkage() const
bool hasDefaultVisibility() const
bool hasPrivateLinkage() const
bool hasHiddenVisibility() const
bool hasExternalWeakLinkage() const
bool hasDLLImportStorageClass() const
bool hasDLLExportStorageClass() const
bool isDeclarationForLinker() const
unsigned getAddressSpace() const
Module * getParent()
Get the module that this global value is contained inside of...
PointerType * getType() const
Global values are always pointers.
LLVM_ABI bool isInterposable() const
Return true if this global's definition can be substituted with an arbitrary definition at link time ...
Definition Globals.cpp:107
bool hasComdat() const
bool hasCommonLinkage() const
bool hasGlobalUnnamedAddr() const
bool hasAppendingLinkage() const
bool hasAvailableExternallyLinkage() const
Type * getValueType() const
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool hasInitializer() const
Definitions have initializers, declarations don't.
MaybeAlign getAlign() const
Returns the alignment of the given variable.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
bool hasDefinitiveInitializer() const
hasDefinitiveInitializer - Whether the global variable has an initializer, and any other instances of...
This instruction compares its operands according to the predicate given to the constructor.
BasicBlock * getDestination(unsigned i)
Return the specified destination.
unsigned getNumDestinations() const
return the number of possible destinations in this indirectbr instruction.
unsigned getNumSuccessors() const
This instruction inserts a single (scalar) element into a VectorType value.
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *NewElt, const Value *Idx)
Return true if an insertelement instruction can be formed with the specified operands.
ArrayRef< unsigned > getIndices() const
Base class for instruction visitors.
Definition InstVisitor.h:78
void visit(Iterator Start, Iterator End)
Definition InstVisitor.h:87
LLVM_ABI unsigned getNumSuccessors() const LLVM_READONLY
Return the number of successors that this instruction has.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI bool isAtomic() const LLVM_READONLY
Return true if this instruction has an AtomicOrdering of unordered or higher.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
This class represents a cast from an integer to a pointer.
static LLVM_ABI bool mayLowerToFunctionCall(Intrinsic::ID IID)
Check if the intrinsic might lower into a regular function call in the course of IR transformations.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
bool isCatch(unsigned Idx) const
Return 'true' if the clause and index Idx is a catch clause.
bool isFilter(unsigned Idx) const
Return 'true' if the clause and index Idx is a filter clause.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Align getAlign() const
Return the alignment of the access that is being performed.
Metadata node.
Definition Metadata.h:1077
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1441
bool isTemporary() const
Definition Metadata.h:1261
ArrayRef< MDOperand > operands() const
Definition Metadata.h:1439
unsigned getNumOperands() const
Return number of MDNode operands.
Definition Metadata.h:1447
bool isDistinct() const
Definition Metadata.h:1260
bool isResolved() const
Check if node is fully resolved.
Definition Metadata.h:1257
LLVMContext & getContext() const
Definition Metadata.h:1241
bool equalsStr(StringRef Str) const
Definition Metadata.h:921
Metadata * get() const
Definition Metadata.h:928
LLVM_ABI StringRef getString() const
Definition Metadata.cpp:617
static LLVM_ABI bool isTagMD(const Metadata *MD)
static LLVM_ABI MetadataAsValue * getIfExists(LLVMContext &Context, Metadata *MD)
Definition Metadata.cpp:111
Metadata * getMetadata() const
Definition Metadata.h:200
Root of the metadata hierarchy.
Definition Metadata.h:63
LLVM_ABI void print(raw_ostream &OS, const Module *M=nullptr, bool IsForDebug=false) const
Print.
unsigned getMetadataID() const
Definition Metadata.h:103
Manage lifetime of a slot tracker for printing IR.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
LLVM_ABI StringRef getName() const
LLVM_ABI void print(raw_ostream &ROS, bool IsForDebug=false) const
LLVM_ABI unsigned getNumOperands() const
iterator_range< op_iterator > operands()
Definition Metadata.h:1845
op_range incoming_values()
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
This class represents a cast from a pointer to an address (non-capturing ptrtoint).
This class represents a cast from a pointer to an integer.
Value * getValue() const
Convenience accessor.
This class represents a sign extension of integer types.
This class represents a cast from signed integer to floating point.
static LLVM_ABI const char * areInvalidOperands(Value *Cond, Value *True, Value *False)
Return a string if the specified operands are invalid for a select operation, otherwise return null.
This instruction constructs a fixed permutation of two input vectors.
static LLVM_ABI bool isValidOperands(const Value *V1, const Value *V2, const Value *Mask)
Return true if a shufflevector instruction can be formed with the specified operands.
static LLVM_ABI void getShuffleMask(const Constant *Mask, SmallVectorImpl< int > &Result)
Convert the input shuffle mask operand to a vector of integers.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
void insert_range(Range &&R)
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
void reserve(size_type N)
iterator insert(iterator I, T &&Elt)
void resize(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition StringRef.h:472
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition StringRef.h:261
constexpr bool empty() const
empty - Check if the string is empty.
Definition StringRef.h:143
static constexpr size_t npos
Definition StringRef.h:57
unsigned getNumElements() const
Random access to the elements.
LLVM_ABI Type * getTypeAtIndex(const Value *V) const
Given an index value into the type, return the type of the element.
Definition Type.cpp:719
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Returns true if this struct contains a scalable vector.
Definition Type.cpp:441
LLVM_ABI bool visitTBAAMetadata(const Instruction *I, const MDNode *MD)
Visit an instruction, or a TBAA node itself as part of a metadata, and return true if it is valid,...
unsigned size() const
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
This class represents a truncation of integer types.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
LLVM_ABI bool containsNonGlobalTargetExtType(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this type is or contains a target extension type that disallows being used as a global...
Definition Type.cpp:75
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition Type.h:264
LLVM_ABI bool containsNonLocalTargetExtType(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this type is or contains a target extension type that disallows being used as a local.
Definition Type.cpp:91
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
Definition Type.cpp:62
bool isLabelTy() const
Return true if this is 'label'.
Definition Type.h:228
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition Type.h:246
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:267
bool isTokenLikeTy() const
Returns true if this is 'token' or a token-like target type.s.
Definition Type.cpp:1058
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
bool isSingleValueType() const
Return true if the type is a valid type for a register in codegen.
Definition Type.h:296
LLVM_ABI bool canLosslesslyBitCastTo(Type *Ty) const
Return true if this type could be converted with a lossless BitCast to type 'Ty'.
Definition Type.cpp:154
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition Type.h:311
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:231
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition Type.h:184
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition Type.h:270
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
Definition Type.h:255
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition Type.h:225
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:139
bool isMetadataTy() const
Return true if this is 'metadata'.
Definition Type.h:231
This class represents a cast unsigned integer to floating point.
op_range operands()
Definition User.h:292
Value * getOperand(unsigned i) const
Definition User.h:232
unsigned getNumOperands() const
Definition User.h:254
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
Value * getValue() const
Definition Metadata.h:497
LLVM Value Representation.
Definition Value.h:75
iterator_range< user_iterator > materialized_users()
Definition Value.h:420
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI const Value * stripInBoundsOffsets(function_ref< void(const Value *)> Func=[](const Value *) {}) const
Strip off pointer casts and inbounds GEPs.
Definition Value.cpp:812
iterator_range< user_iterator > users()
Definition Value.h:426
bool materialized_use_empty() const
Definition Value.h:351
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition Value.cpp:701
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.cpp:1101
bool hasName() const
Definition Value.h:262
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
Check a module for errors, and report separate error states for IR and debug info errors.
Definition Verifier.h:109
LLVM_ABI Result run(Module &M, ModuleAnalysisManager &)
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
This class represents zero extension of integer types.
constexpr bool isNonZero() const
Definition TypeSize.h:156
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:169
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:166
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
CallInst * Call
This file contains the declaration of the Comdat class, which represents a single COMDAT in LLVM.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ FLAT_ADDRESS
Address space for flat memory.
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
@ PRIVATE_ADDRESS
Address space for private memory.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
bool isFlatGlobalAddrSpace(unsigned AS)
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ Entry
Definition COFF.h:862
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
LLVM_ABI MatchIntrinsicTypesResult matchIntrinsicSignature(FunctionType *FTy, ArrayRef< IITDescriptor > &Infos, SmallVectorImpl< Type * > &ArgTys)
Match the specified function type with the type constraints specified by the .td file.
LLVM_ABI void getIntrinsicInfoTableEntries(ID id, SmallVectorImpl< IITDescriptor > &T)
Return the IIT table descriptor for the specified intrinsic into an array of IITDescriptors.
@ MatchIntrinsicTypes_NoMatchRet
Definition Intrinsics.h:240
@ MatchIntrinsicTypes_NoMatchArg
Definition Intrinsics.h:241
LLVM_ABI bool hasConstrainedFPRoundingModeOperand(ID QID)
Returns true if the intrinsic ID is for one of the "ConstrainedFloating-Point Intrinsics" that take r...
LLVM_ABI StringRef getName(ID id)
Return the LLVM name for an intrinsic, such as "llvm.ppc.altivec.lvx".
static const int NoAliasScopeDeclScopeArg
Definition Intrinsics.h:39
LLVM_ABI bool matchIntrinsicVarArg(bool isVarArg, ArrayRef< IITDescriptor > &Infos)
Verify if the intrinsic has variable arguments.
std::variant< std::monostate, Loc::Single, Loc::Multi, Loc::MMI, Loc::EntryValue > Variant
Alias for the std::variant specialization base class of DbgVariable.
Definition DwarfDebug.h:189
Flag
These should be considered private to the implementation of the MCInstrDesc class.
@ System
Synchronized with respect to all concurrently executing threads.
Definition LLVMContext.h:58
LLVM_ABI std::optional< VFInfo > tryDemangleForVFABI(StringRef MangledName, const FunctionType *FTy)
Function to construct a VFInfo out of a mangled names in the following format:
@ CE
Windows NT (Windows on ARM)
Definition MCAsmInfo.h:48
LLVM_ABI AssignmentInstRange getAssignmentInsts(DIAssignID *ID)
Return a range of instructions (typically just one) that have ID as an attachment.
initializer< Ty > init(const Ty &Val)
@ DW_MACINFO_undef
Definition Dwarf.h:811
@ DW_MACINFO_start_file
Definition Dwarf.h:812
@ DW_MACINFO_define
Definition Dwarf.h:810
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > dyn_extract_or_null(Y &&MD)
Extract a Value from Metadata, if any, allowing null.
Definition Metadata.h:707
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > dyn_extract(Y &&MD)
Extract a Value from Metadata, if any.
Definition Metadata.h:694
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
Definition Metadata.h:666
@ User
could "use" a pointer
NodeAddr< UseNode * > Use
Definition RDFGraph.h:385
NodeAddr< NodeBase * > Node
Definition RDFGraph.h:381
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:318
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
Definition Threading.h:262
@ Offset
Definition DWP.cpp:477
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1705
LLVM_ABI bool canInstructionHaveMMRAs(const Instruction &I)
detail::zippy< detail::zip_first, T, U, Args... > zip_equal(T &&t, U &&u, Args &&...args)
zip iterator that assumes that all iteratees have the same length.
Definition STLExtras.h:841
LLVM_ABI unsigned getBranchWeightOffset(const MDNode *ProfileData)
Return the offset to the first branch weight data.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:174
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2452
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:649
LLVM_ABI bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
AllocFnKind
Definition Attributes.h:51
testing::Matcher< const detail::ErrorHolder & > Failed()
Definition Error.h:198
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2116
LLVM_ABI DenseMap< BasicBlock *, ColorVector > colorEHFunclets(Function &F)
If an EH funclet personality is in use (see isFuncletEHPersonality), this will recompute which blocks...
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition MathExtras.h:293
bool isa_and_nonnull(const Y &Val)
Definition Casting.h:682
Op::Description Desc
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:759
GenericConvergenceVerifier< SSAContext > ConvergenceVerifier
LLVM_ABI void initializeVerifierLegacyPassPass(PassRegistry &)
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:288
bool isModSet(const ModRefInfo MRI)
Definition ModRef.h:49
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1624
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:548
LLVM_ABI bool isValueProfileMD(const MDNode *ProfileData)
Checks if an MDNode contains value profiling Metadata.
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
LLVM_ABI unsigned getNumBranchWeights(const MDNode &ProfileData)
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:71
LLVM_ABI FunctionPass * createVerifierPass(bool FatalErrors=true)
FunctionAddr VTableAddr Next
Definition InstrProf.h:141
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr unsigned BitWidth
TinyPtrVector< BasicBlock * > ColorVector
LLVM_ABI const char * LLVMLoopEstimatedTripCount
Profile-based loop metadata that should be accessed only by using llvm::getLoopEstimatedTripCount and...
DenormalMode parseDenormalFPAttribute(StringRef Str)
Returns the denormal mode to use for inputs and outputs.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:565
LLVM_ABI std::optional< RoundingMode > convertStrToRoundingMode(StringRef)
Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...
Definition FPEnv.cpp:24
LLVM_ABI std::unique_ptr< GCStrategy > getGCStrategy(const StringRef Name)
Lookup the GCStrategy object associated with the given gc name.
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1877
bool pred_empty(const BasicBlock *BB)
Definition CFG.h:119
bool isHexDigit(char C)
Checks if character C is a hexadecimal numeric character.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
constexpr bool isCallableCC(CallingConv::ID CC)
LLVM_ABI bool verifyModule(const Module &M, raw_ostream *OS=nullptr, bool *BrokenDebugInfo=nullptr)
Check a module for errors.
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
Definition MIRParser.h:39
#define N
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:85
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition Analysis.h:29
static LLVM_ABI const char * SyntheticFunctionEntryCount
static LLVM_ABI const char * BranchWeights
static LLVM_ABI const char * FunctionEntryCount
static LLVM_ABI const char * UnknownBranchWeightsMarker
static LLVM_ABI const char * ValueProfile
uint32_t getTagID() const
Return the tag of this operand bundle as an integer.
ArrayRef< Use > Inputs
void DebugInfoCheckFailed(const Twine &Message)
A debug info check failed.
Definition Verifier.cpp:304
VerifierSupport(raw_ostream *OS, const Module &M)
Definition Verifier.cpp:156
bool Broken
Track the brokenness of the module while recursively visiting.
Definition Verifier.cpp:150
void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A check failed (with values to print).
Definition Verifier.cpp:297
bool BrokenDebugInfo
Broken debug info can be "recovered" from by stripping the debug info.
Definition Verifier.cpp:152
LLVMContext & Context
Definition Verifier.cpp:147
bool TreatBrokenDebugInfoAsError
Whether to treat broken debug info as an error.
Definition Verifier.cpp:154
void CheckFailed(const Twine &Message)
A check failed, so printout out the condition and the message.
Definition Verifier.cpp:286
const Module & M
Definition Verifier.cpp:143
const DataLayout & DL
Definition Verifier.cpp:146
void DebugInfoCheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A debug info check failed (with values to print).
Definition Verifier.cpp:313
const Triple & TT
Definition Verifier.cpp:145
ModuleSlotTracker MST
Definition Verifier.cpp:144