Thanks to visit codestin.com
Credit goes to llvm.org

LLVM 22.0.0git
Verifier.cpp
Go to the documentation of this file.
1//===-- Verifier.cpp - Implement the Module Verifier -----------------------==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the function verifier interface, that can be used for some
10// basic correctness checking of input to the system.
11//
12// Note that this does not provide full `Java style' security and verifications,
13// instead it just tries to ensure that code is well-formed.
14//
15// * Both of a binary operator's parameters are of the same type
16// * Verify that the indices of mem access instructions match other operands
17// * Verify that arithmetic and other things are only performed on first-class
18// types. Verify that shifts & logicals only happen on integrals f.e.
19// * All of the constants in a switch statement are of the correct type
20// * The code is in valid SSA form
21// * It should be illegal to put a label into any other type (like a structure)
22// or to return one. [except constant arrays!]
23// * Only phi nodes can be self referential: 'add i32 %0, %0 ; <int>:0' is bad
24// * PHI nodes must have an entry for each predecessor, with no extras.
25// * PHI nodes must be the first thing in a basic block, all grouped together
26// * All basic blocks should only end with terminator insts, not contain them
27// * The entry node to a function must not have predecessors
28// * All Instructions must be embedded into a basic block
29// * Functions cannot take a void-typed parameter
30// * Verify that a function's argument list agrees with it's declared type.
31// * It is illegal to specify a name for a void value.
32// * It is illegal to have a internal global value with no initializer
33// * It is illegal to have a ret instruction that returns a value that does not
34// agree with the function return value type.
35// * Function call argument types match the function prototype
36// * A landing pad is defined by a landingpad instruction, and can be jumped to
37// only by the unwind edge of an invoke instruction.
38// * A landingpad instruction must be the first non-PHI instruction in the
39// block.
40// * Landingpad instructions must be in a function with a personality function.
41// * Convergence control intrinsics are introduced in ConvergentOperations.rst.
42// The applied restrictions are too numerous to list here.
43// * The convergence entry intrinsic and the loop heart must be the first
44// non-PHI instruction in their respective block. This does not conflict with
45// the landing pads, since these two kinds cannot occur in the same block.
46// * All other things that are tested by asserts spread about the code...
47//
48//===----------------------------------------------------------------------===//
49
50#include "llvm/IR/Verifier.h"
51#include "llvm/ADT/APFloat.h"
52#include "llvm/ADT/APInt.h"
53#include "llvm/ADT/ArrayRef.h"
54#include "llvm/ADT/DenseMap.h"
55#include "llvm/ADT/MapVector.h"
56#include "llvm/ADT/STLExtras.h"
60#include "llvm/ADT/StringRef.h"
61#include "llvm/ADT/Twine.h"
63#include "llvm/IR/Argument.h"
65#include "llvm/IR/Attributes.h"
66#include "llvm/IR/BasicBlock.h"
67#include "llvm/IR/CFG.h"
68#include "llvm/IR/CallingConv.h"
69#include "llvm/IR/Comdat.h"
70#include "llvm/IR/Constant.h"
73#include "llvm/IR/Constants.h"
75#include "llvm/IR/DataLayout.h"
76#include "llvm/IR/DebugInfo.h"
78#include "llvm/IR/DebugLoc.h"
80#include "llvm/IR/Dominators.h"
82#include "llvm/IR/Function.h"
83#include "llvm/IR/GCStrategy.h"
84#include "llvm/IR/GlobalAlias.h"
85#include "llvm/IR/GlobalValue.h"
87#include "llvm/IR/InlineAsm.h"
88#include "llvm/IR/InstVisitor.h"
89#include "llvm/IR/InstrTypes.h"
90#include "llvm/IR/Instruction.h"
93#include "llvm/IR/Intrinsics.h"
94#include "llvm/IR/IntrinsicsAArch64.h"
95#include "llvm/IR/IntrinsicsAMDGPU.h"
96#include "llvm/IR/IntrinsicsARM.h"
97#include "llvm/IR/IntrinsicsNVPTX.h"
98#include "llvm/IR/IntrinsicsWebAssembly.h"
99#include "llvm/IR/LLVMContext.h"
101#include "llvm/IR/Metadata.h"
102#include "llvm/IR/Module.h"
104#include "llvm/IR/PassManager.h"
106#include "llvm/IR/Statepoint.h"
107#include "llvm/IR/Type.h"
108#include "llvm/IR/Use.h"
109#include "llvm/IR/User.h"
111#include "llvm/IR/Value.h"
113#include "llvm/Pass.h"
117#include "llvm/Support/Casting.h"
121#include "llvm/Support/ModRef.h"
124#include <algorithm>
125#include <cassert>
126#include <cstdint>
127#include <memory>
128#include <optional>
129#include <string>
130#include <utility>
131
132using namespace llvm;
133
135 "verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false),
136 cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical "
137 "scopes are not dominating"));
138
139namespace llvm {
140
143 const Module &M;
145 const Triple &TT;
148
149 /// Track the brokenness of the module while recursively visiting.
150 bool Broken = false;
151 /// Broken debug info can be "recovered" from by stripping the debug info.
152 bool BrokenDebugInfo = false;
153 /// Whether to treat broken debug info as an error.
155
157 : OS(OS), M(M), MST(&M), TT(M.getTargetTriple()), DL(M.getDataLayout()),
158 Context(M.getContext()) {}
159
160private:
161 void Write(const Module *M) {
162 *OS << "; ModuleID = '" << M->getModuleIdentifier() << "'\n";
163 }
164
165 void Write(const Value *V) {
166 if (V)
167 Write(*V);
168 }
169
170 void Write(const Value &V) {
171 if (isa<Instruction>(V)) {
172 V.print(*OS, MST);
173 *OS << '\n';
174 } else {
175 V.printAsOperand(*OS, true, MST);
176 *OS << '\n';
177 }
178 }
179
180 void Write(const DbgRecord *DR) {
181 if (DR) {
182 DR->print(*OS, MST, false);
183 *OS << '\n';
184 }
185 }
186
188 switch (Type) {
190 *OS << "value";
191 break;
193 *OS << "declare";
194 break;
196 *OS << "assign";
197 break;
199 *OS << "end";
200 break;
202 *OS << "any";
203 break;
204 };
205 }
206
207 void Write(const Metadata *MD) {
208 if (!MD)
209 return;
210 MD->print(*OS, MST, &M);
211 *OS << '\n';
212 }
213
214 template <class T> void Write(const MDTupleTypedArrayWrapper<T> &MD) {
215 Write(MD.get());
216 }
217
218 void Write(const NamedMDNode *NMD) {
219 if (!NMD)
220 return;
221 NMD->print(*OS, MST);
222 *OS << '\n';
223 }
224
225 void Write(Type *T) {
226 if (!T)
227 return;
228 *OS << ' ' << *T;
229 }
230
231 void Write(const Comdat *C) {
232 if (!C)
233 return;
234 *OS << *C;
235 }
236
237 void Write(const APInt *AI) {
238 if (!AI)
239 return;
240 *OS << *AI << '\n';
241 }
242
243 void Write(const unsigned i) { *OS << i << '\n'; }
244
245 // NOLINTNEXTLINE(readability-identifier-naming)
246 void Write(const Attribute *A) {
247 if (!A)
248 return;
249 *OS << A->getAsString() << '\n';
250 }
251
252 // NOLINTNEXTLINE(readability-identifier-naming)
253 void Write(const AttributeSet *AS) {
254 if (!AS)
255 return;
256 *OS << AS->getAsString() << '\n';
257 }
258
259 // NOLINTNEXTLINE(readability-identifier-naming)
260 void Write(const AttributeList *AL) {
261 if (!AL)
262 return;
263 AL->print(*OS);
264 }
265
266 void Write(Printable P) { *OS << P << '\n'; }
267
268 template <typename T> void Write(ArrayRef<T> Vs) {
269 for (const T &V : Vs)
270 Write(V);
271 }
272
273 template <typename T1, typename... Ts>
274 void WriteTs(const T1 &V1, const Ts &... Vs) {
275 Write(V1);
276 WriteTs(Vs...);
277 }
278
279 template <typename... Ts> void WriteTs() {}
280
281public:
282 /// A check failed, so printout out the condition and the message.
283 ///
284 /// This provides a nice place to put a breakpoint if you want to see why
285 /// something is not correct.
286 void CheckFailed(const Twine &Message) {
287 if (OS)
288 *OS << Message << '\n';
289 Broken = true;
290 }
291
292 /// A check failed (with values to print).
293 ///
294 /// This calls the Message-only version so that the above is easier to set a
295 /// breakpoint on.
296 template <typename T1, typename... Ts>
297 void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs) {
298 CheckFailed(Message);
299 if (OS)
300 WriteTs(V1, Vs...);
301 }
302
303 /// A debug info check failed.
304 void DebugInfoCheckFailed(const Twine &Message) {
305 if (OS)
306 *OS << Message << '\n';
308 BrokenDebugInfo = true;
309 }
310
311 /// A debug info check failed (with values to print).
312 template <typename T1, typename... Ts>
313 void DebugInfoCheckFailed(const Twine &Message, const T1 &V1,
314 const Ts &... Vs) {
315 DebugInfoCheckFailed(Message);
316 if (OS)
317 WriteTs(V1, Vs...);
318 }
319};
320
321} // namespace llvm
322
323namespace {
324
325class Verifier : public InstVisitor<Verifier>, VerifierSupport {
326 friend class InstVisitor<Verifier>;
327 DominatorTree DT;
328
329 /// When verifying a basic block, keep track of all of the
330 /// instructions we have seen so far.
331 ///
332 /// This allows us to do efficient dominance checks for the case when an
333 /// instruction has an operand that is an instruction in the same block.
334 SmallPtrSet<Instruction *, 16> InstsInThisBlock;
335
336 /// Keep track of the metadata nodes that have been checked already.
337 SmallPtrSet<const Metadata *, 32> MDNodes;
338
339 /// Keep track which DISubprogram is attached to which function.
340 DenseMap<const DISubprogram *, const Function *> DISubprogramAttachments;
341
342 /// Track all DICompileUnits visited.
343 SmallPtrSet<const Metadata *, 2> CUVisited;
344
345 /// The result type for a landingpad.
346 Type *LandingPadResultTy;
347
348 /// Whether we've seen a call to @llvm.localescape in this function
349 /// already.
350 bool SawFrameEscape;
351
352 /// Whether the current function has a DISubprogram attached to it.
353 bool HasDebugInfo = false;
354
355 /// Stores the count of how many objects were passed to llvm.localescape for a
356 /// given function and the largest index passed to llvm.localrecover.
357 DenseMap<Function *, std::pair<unsigned, unsigned>> FrameEscapeInfo;
358
359 // Maps catchswitches and cleanuppads that unwind to siblings to the
360 // terminators that indicate the unwind, used to detect cycles therein.
361 MapVector<Instruction *, Instruction *> SiblingFuncletInfo;
362
363 /// Cache which blocks are in which funclet, if an EH funclet personality is
364 /// in use. Otherwise empty.
365 DenseMap<BasicBlock *, ColorVector> BlockEHFuncletColors;
366
367 /// Cache of constants visited in search of ConstantExprs.
368 SmallPtrSet<const Constant *, 32> ConstantExprVisited;
369
370 /// Cache of declarations of the llvm.experimental.deoptimize.<ty> intrinsic.
371 SmallVector<const Function *, 4> DeoptimizeDeclarations;
372
373 /// Cache of attribute lists verified.
374 SmallPtrSet<const void *, 32> AttributeListsVisited;
375
376 // Verify that this GlobalValue is only used in this module.
377 // This map is used to avoid visiting uses twice. We can arrive at a user
378 // twice, if they have multiple operands. In particular for very large
379 // constant expressions, we can arrive at a particular user many times.
380 SmallPtrSet<const Value *, 32> GlobalValueVisited;
381
382 // Keeps track of duplicate function argument debug info.
384
385 TBAAVerifier TBAAVerifyHelper;
386 ConvergenceVerifier ConvergenceVerifyHelper;
387
388 SmallVector<IntrinsicInst *, 4> NoAliasScopeDecls;
389
390 void checkAtomicMemAccessSize(Type *Ty, const Instruction *I);
391
392public:
393 explicit Verifier(raw_ostream *OS, bool ShouldTreatBrokenDebugInfoAsError,
394 const Module &M)
395 : VerifierSupport(OS, M), LandingPadResultTy(nullptr),
396 SawFrameEscape(false), TBAAVerifyHelper(this) {
397 TreatBrokenDebugInfoAsError = ShouldTreatBrokenDebugInfoAsError;
398 }
399
400 bool hasBrokenDebugInfo() const { return BrokenDebugInfo; }
401
402 bool verify(const Function &F) {
403 llvm::TimeTraceScope timeScope("Verifier");
404 assert(F.getParent() == &M &&
405 "An instance of this class only works with a specific module!");
406
407 // First ensure the function is well-enough formed to compute dominance
408 // information, and directly compute a dominance tree. We don't rely on the
409 // pass manager to provide this as it isolates us from a potentially
410 // out-of-date dominator tree and makes it significantly more complex to run
411 // this code outside of a pass manager.
412 // FIXME: It's really gross that we have to cast away constness here.
413 if (!F.empty())
414 DT.recalculate(const_cast<Function &>(F));
415
416 for (const BasicBlock &BB : F) {
417 if (!BB.empty() && BB.back().isTerminator())
418 continue;
419
420 if (OS) {
421 *OS << "Basic Block in function '" << F.getName()
422 << "' does not have terminator!\n";
423 BB.printAsOperand(*OS, true, MST);
424 *OS << "\n";
425 }
426 return false;
427 }
428
429 auto FailureCB = [this](const Twine &Message) {
430 this->CheckFailed(Message);
431 };
432 ConvergenceVerifyHelper.initialize(OS, FailureCB, F);
433
434 Broken = false;
435 // FIXME: We strip const here because the inst visitor strips const.
436 visit(const_cast<Function &>(F));
437 verifySiblingFuncletUnwinds();
438
439 if (ConvergenceVerifyHelper.sawTokens())
440 ConvergenceVerifyHelper.verify(DT);
441
442 InstsInThisBlock.clear();
443 DebugFnArgs.clear();
444 LandingPadResultTy = nullptr;
445 SawFrameEscape = false;
446 SiblingFuncletInfo.clear();
447 verifyNoAliasScopeDecl();
448 NoAliasScopeDecls.clear();
449
450 return !Broken;
451 }
452
453 /// Verify the module that this instance of \c Verifier was initialized with.
454 bool verify() {
455 Broken = false;
456
457 // Collect all declarations of the llvm.experimental.deoptimize intrinsic.
458 for (const Function &F : M)
459 if (F.getIntrinsicID() == Intrinsic::experimental_deoptimize)
460 DeoptimizeDeclarations.push_back(&F);
461
462 // Now that we've visited every function, verify that we never asked to
463 // recover a frame index that wasn't escaped.
464 verifyFrameRecoverIndices();
465 for (const GlobalVariable &GV : M.globals())
466 visitGlobalVariable(GV);
467
468 for (const GlobalAlias &GA : M.aliases())
469 visitGlobalAlias(GA);
470
471 for (const GlobalIFunc &GI : M.ifuncs())
472 visitGlobalIFunc(GI);
473
474 for (const NamedMDNode &NMD : M.named_metadata())
475 visitNamedMDNode(NMD);
476
477 for (const StringMapEntry<Comdat> &SMEC : M.getComdatSymbolTable())
478 visitComdat(SMEC.getValue());
479
480 visitModuleFlags();
481 visitModuleIdents();
482 visitModuleCommandLines();
483 visitModuleErrnoTBAA();
484
485 verifyCompileUnits();
486
487 verifyDeoptimizeCallingConvs();
488 DISubprogramAttachments.clear();
489 return !Broken;
490 }
491
492private:
493 /// Whether a metadata node is allowed to be, or contain, a DILocation.
494 enum class AreDebugLocsAllowed { No, Yes };
495
496 /// Metadata that should be treated as a range, with slightly different
497 /// requirements.
498 enum class RangeLikeMetadataKind {
499 Range, // MD_range
500 AbsoluteSymbol, // MD_absolute_symbol
501 NoaliasAddrspace // MD_noalias_addrspace
502 };
503
504 // Verification methods...
505 void visitGlobalValue(const GlobalValue &GV);
506 void visitGlobalVariable(const GlobalVariable &GV);
507 void visitGlobalAlias(const GlobalAlias &GA);
508 void visitGlobalIFunc(const GlobalIFunc &GI);
509 void visitAliaseeSubExpr(const GlobalAlias &A, const Constant &C);
510 void visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias *> &Visited,
511 const GlobalAlias &A, const Constant &C);
512 void visitNamedMDNode(const NamedMDNode &NMD);
513 void visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs);
514 void visitMetadataAsValue(const MetadataAsValue &MD, Function *F);
515 void visitValueAsMetadata(const ValueAsMetadata &MD, Function *F);
516 void visitDIArgList(const DIArgList &AL, Function *F);
517 void visitComdat(const Comdat &C);
518 void visitModuleIdents();
519 void visitModuleCommandLines();
520 void visitModuleErrnoTBAA();
521 void visitModuleFlags();
522 void visitModuleFlag(const MDNode *Op,
523 DenseMap<const MDString *, const MDNode *> &SeenIDs,
524 SmallVectorImpl<const MDNode *> &Requirements);
525 void visitModuleFlagCGProfileEntry(const MDOperand &MDO);
526 void visitFunction(const Function &F);
527 void visitBasicBlock(BasicBlock &BB);
528 void verifyRangeLikeMetadata(const Value &V, const MDNode *Range, Type *Ty,
529 RangeLikeMetadataKind Kind);
530 void visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty);
531 void visitNoaliasAddrspaceMetadata(Instruction &I, MDNode *Range, Type *Ty);
532 void visitDereferenceableMetadata(Instruction &I, MDNode *MD);
533 void visitNofreeMetadata(Instruction &I, MDNode *MD);
534 void visitProfMetadata(Instruction &I, MDNode *MD);
535 void visitCallStackMetadata(MDNode *MD);
536 void visitMemProfMetadata(Instruction &I, MDNode *MD);
537 void visitCallsiteMetadata(Instruction &I, MDNode *MD);
538 void visitCalleeTypeMetadata(Instruction &I, MDNode *MD);
539 void visitDIAssignIDMetadata(Instruction &I, MDNode *MD);
540 void visitMMRAMetadata(Instruction &I, MDNode *MD);
541 void visitAnnotationMetadata(MDNode *Annotation);
542 void visitAliasScopeMetadata(const MDNode *MD);
543 void visitAliasScopeListMetadata(const MDNode *MD);
544 void visitAccessGroupMetadata(const MDNode *MD);
545 void visitCapturesMetadata(Instruction &I, const MDNode *Captures);
546
547 template <class Ty> bool isValidMetadataArray(const MDTuple &N);
548#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) void visit##CLASS(const CLASS &N);
549#include "llvm/IR/Metadata.def"
550 void visitDIScope(const DIScope &N);
551 void visitDIVariable(const DIVariable &N);
552 void visitDILexicalBlockBase(const DILexicalBlockBase &N);
553 void visitDITemplateParameter(const DITemplateParameter &N);
554
555 void visitTemplateParams(const MDNode &N, const Metadata &RawParams);
556
557 void visit(DbgLabelRecord &DLR);
558 void visit(DbgVariableRecord &DVR);
559 // InstVisitor overrides...
560 using InstVisitor<Verifier>::visit;
561 void visitDbgRecords(Instruction &I);
562 void visit(Instruction &I);
563
564 void visitTruncInst(TruncInst &I);
565 void visitZExtInst(ZExtInst &I);
566 void visitSExtInst(SExtInst &I);
567 void visitFPTruncInst(FPTruncInst &I);
568 void visitFPExtInst(FPExtInst &I);
569 void visitFPToUIInst(FPToUIInst &I);
570 void visitFPToSIInst(FPToSIInst &I);
571 void visitUIToFPInst(UIToFPInst &I);
572 void visitSIToFPInst(SIToFPInst &I);
573 void visitIntToPtrInst(IntToPtrInst &I);
574 void checkPtrToAddr(Type *SrcTy, Type *DestTy, const Value &V);
575 void visitPtrToAddrInst(PtrToAddrInst &I);
576 void visitPtrToIntInst(PtrToIntInst &I);
577 void visitBitCastInst(BitCastInst &I);
578 void visitAddrSpaceCastInst(AddrSpaceCastInst &I);
579 void visitPHINode(PHINode &PN);
580 void visitCallBase(CallBase &Call);
581 void visitUnaryOperator(UnaryOperator &U);
582 void visitBinaryOperator(BinaryOperator &B);
583 void visitICmpInst(ICmpInst &IC);
584 void visitFCmpInst(FCmpInst &FC);
585 void visitExtractElementInst(ExtractElementInst &EI);
586 void visitInsertElementInst(InsertElementInst &EI);
587 void visitShuffleVectorInst(ShuffleVectorInst &EI);
588 void visitVAArgInst(VAArgInst &VAA) { visitInstruction(VAA); }
589 void visitCallInst(CallInst &CI);
590 void visitInvokeInst(InvokeInst &II);
591 void visitGetElementPtrInst(GetElementPtrInst &GEP);
592 void visitLoadInst(LoadInst &LI);
593 void visitStoreInst(StoreInst &SI);
594 void verifyDominatesUse(Instruction &I, unsigned i);
595 void visitInstruction(Instruction &I);
596 void visitTerminator(Instruction &I);
597 void visitBranchInst(BranchInst &BI);
598 void visitReturnInst(ReturnInst &RI);
599 void visitSwitchInst(SwitchInst &SI);
600 void visitIndirectBrInst(IndirectBrInst &BI);
601 void visitCallBrInst(CallBrInst &CBI);
602 void visitSelectInst(SelectInst &SI);
603 void visitUserOp1(Instruction &I);
604 void visitUserOp2(Instruction &I) { visitUserOp1(I); }
605 void visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call);
606 void visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI);
607 void visitVPIntrinsic(VPIntrinsic &VPI);
608 void visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI);
609 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI);
610 void visitAtomicRMWInst(AtomicRMWInst &RMWI);
611 void visitFenceInst(FenceInst &FI);
612 void visitAllocaInst(AllocaInst &AI);
613 void visitExtractValueInst(ExtractValueInst &EVI);
614 void visitInsertValueInst(InsertValueInst &IVI);
615 void visitEHPadPredecessors(Instruction &I);
616 void visitLandingPadInst(LandingPadInst &LPI);
617 void visitResumeInst(ResumeInst &RI);
618 void visitCatchPadInst(CatchPadInst &CPI);
619 void visitCatchReturnInst(CatchReturnInst &CatchReturn);
620 void visitCleanupPadInst(CleanupPadInst &CPI);
621 void visitFuncletPadInst(FuncletPadInst &FPI);
622 void visitCatchSwitchInst(CatchSwitchInst &CatchSwitch);
623 void visitCleanupReturnInst(CleanupReturnInst &CRI);
624
625 void verifySwiftErrorCall(CallBase &Call, const Value *SwiftErrorVal);
626 void verifySwiftErrorValue(const Value *SwiftErrorVal);
627 void verifyTailCCMustTailAttrs(const AttrBuilder &Attrs, StringRef Context);
628 void verifyMustTailCall(CallInst &CI);
629 bool verifyAttributeCount(AttributeList Attrs, unsigned Params);
630 void verifyAttributeTypes(AttributeSet Attrs, const Value *V);
631 void verifyParameterAttrs(AttributeSet Attrs, Type *Ty, const Value *V);
632 void checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
633 const Value *V);
634 void verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
635 const Value *V, bool IsIntrinsic, bool IsInlineAsm);
636 void verifyFunctionMetadata(ArrayRef<std::pair<unsigned, MDNode *>> MDs);
637 void verifyUnknownProfileMetadata(MDNode *MD);
638 void visitConstantExprsRecursively(const Constant *EntryC);
639 void visitConstantExpr(const ConstantExpr *CE);
640 void visitConstantPtrAuth(const ConstantPtrAuth *CPA);
641 void verifyInlineAsmCall(const CallBase &Call);
642 void verifyStatepoint(const CallBase &Call);
643 void verifyFrameRecoverIndices();
644 void verifySiblingFuncletUnwinds();
645
646 void verifyFragmentExpression(const DbgVariableRecord &I);
647 template <typename ValueOrMetadata>
648 void verifyFragmentExpression(const DIVariable &V,
650 ValueOrMetadata *Desc);
651 void verifyFnArgs(const DbgVariableRecord &DVR);
652 void verifyNotEntryValue(const DbgVariableRecord &I);
653
654 /// Module-level debug info verification...
655 void verifyCompileUnits();
656
657 /// Module-level verification that all @llvm.experimental.deoptimize
658 /// declarations share the same calling convention.
659 void verifyDeoptimizeCallingConvs();
660
661 void verifyAttachedCallBundle(const CallBase &Call,
662 const OperandBundleUse &BU);
663
664 /// Verify the llvm.experimental.noalias.scope.decl declarations
665 void verifyNoAliasScopeDecl();
666};
667
668} // end anonymous namespace
669
670/// We know that cond should be true, if not print an error message.
671#define Check(C, ...) \
672 do { \
673 if (!(C)) { \
674 CheckFailed(__VA_ARGS__); \
675 return; \
676 } \
677 } while (false)
678
679/// We know that a debug info condition should be true, if not print
680/// an error message.
681#define CheckDI(C, ...) \
682 do { \
683 if (!(C)) { \
684 DebugInfoCheckFailed(__VA_ARGS__); \
685 return; \
686 } \
687 } while (false)
688
689void Verifier::visitDbgRecords(Instruction &I) {
690 if (!I.DebugMarker)
691 return;
692 CheckDI(I.DebugMarker->MarkedInstr == &I,
693 "Instruction has invalid DebugMarker", &I);
694 CheckDI(!isa<PHINode>(&I) || !I.hasDbgRecords(),
695 "PHI Node must not have any attached DbgRecords", &I);
696 for (DbgRecord &DR : I.getDbgRecordRange()) {
697 CheckDI(DR.getMarker() == I.DebugMarker,
698 "DbgRecord had invalid DebugMarker", &I, &DR);
699 if (auto *Loc =
701 visitMDNode(*Loc, AreDebugLocsAllowed::Yes);
702 if (auto *DVR = dyn_cast<DbgVariableRecord>(&DR)) {
703 visit(*DVR);
704 // These have to appear after `visit` for consistency with existing
705 // intrinsic behaviour.
706 verifyFragmentExpression(*DVR);
707 verifyNotEntryValue(*DVR);
708 } else if (auto *DLR = dyn_cast<DbgLabelRecord>(&DR)) {
709 visit(*DLR);
710 }
711 }
712}
713
714void Verifier::visit(Instruction &I) {
715 visitDbgRecords(I);
716 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
717 Check(I.getOperand(i) != nullptr, "Operand is null", &I);
719}
720
721// Helper to iterate over indirect users. By returning false, the callback can ask to stop traversing further.
722static void forEachUser(const Value *User,
724 llvm::function_ref<bool(const Value *)> Callback) {
725 if (!Visited.insert(User).second)
726 return;
727
729 while (!WorkList.empty()) {
730 const Value *Cur = WorkList.pop_back_val();
731 if (!Visited.insert(Cur).second)
732 continue;
733 if (Callback(Cur))
734 append_range(WorkList, Cur->materialized_users());
735 }
736}
737
738void Verifier::visitGlobalValue(const GlobalValue &GV) {
740 "Global is external, but doesn't have external or weak linkage!", &GV);
741
742 if (const GlobalObject *GO = dyn_cast<GlobalObject>(&GV)) {
743 if (const MDNode *Associated =
744 GO->getMetadata(LLVMContext::MD_associated)) {
745 Check(Associated->getNumOperands() == 1,
746 "associated metadata must have one operand", &GV, Associated);
747 const Metadata *Op = Associated->getOperand(0).get();
748 Check(Op, "associated metadata must have a global value", GO, Associated);
749
750 const auto *VM = dyn_cast_or_null<ValueAsMetadata>(Op);
751 Check(VM, "associated metadata must be ValueAsMetadata", GO, Associated);
752 if (VM) {
753 Check(isa<PointerType>(VM->getValue()->getType()),
754 "associated value must be pointer typed", GV, Associated);
755
756 const Value *Stripped = VM->getValue()->stripPointerCastsAndAliases();
757 Check(isa<GlobalObject>(Stripped) || isa<Constant>(Stripped),
758 "associated metadata must point to a GlobalObject", GO, Stripped);
759 Check(Stripped != GO,
760 "global values should not associate to themselves", GO,
761 Associated);
762 }
763 }
764
765 // FIXME: Why is getMetadata on GlobalValue protected?
766 if (const MDNode *AbsoluteSymbol =
767 GO->getMetadata(LLVMContext::MD_absolute_symbol)) {
768 verifyRangeLikeMetadata(*GO, AbsoluteSymbol,
769 DL.getIntPtrType(GO->getType()),
770 RangeLikeMetadataKind::AbsoluteSymbol);
771 }
772 }
773
775 "Only global variables can have appending linkage!", &GV);
776
777 if (GV.hasAppendingLinkage()) {
778 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(&GV);
779 Check(GVar && GVar->getValueType()->isArrayTy(),
780 "Only global arrays can have appending linkage!", GVar);
781 }
782
783 if (GV.isDeclarationForLinker())
784 Check(!GV.hasComdat(), "Declaration may not be in a Comdat!", &GV);
785
786 if (GV.hasDLLExportStorageClass()) {
788 "dllexport GlobalValue must have default or protected visibility",
789 &GV);
790 }
791 if (GV.hasDLLImportStorageClass()) {
793 "dllimport GlobalValue must have default visibility", &GV);
794 Check(!GV.isDSOLocal(), "GlobalValue with DLLImport Storage is dso_local!",
795 &GV);
796
797 Check((GV.isDeclaration() &&
800 "Global is marked as dllimport, but not external", &GV);
801 }
802
803 if (GV.isImplicitDSOLocal())
804 Check(GV.isDSOLocal(),
805 "GlobalValue with local linkage or non-default "
806 "visibility must be dso_local!",
807 &GV);
808
809 forEachUser(&GV, GlobalValueVisited, [&](const Value *V) -> bool {
810 if (const Instruction *I = dyn_cast<Instruction>(V)) {
811 if (!I->getParent() || !I->getParent()->getParent())
812 CheckFailed("Global is referenced by parentless instruction!", &GV, &M,
813 I);
814 else if (I->getParent()->getParent()->getParent() != &M)
815 CheckFailed("Global is referenced in a different module!", &GV, &M, I,
816 I->getParent()->getParent(),
817 I->getParent()->getParent()->getParent());
818 return false;
819 } else if (const Function *F = dyn_cast<Function>(V)) {
820 if (F->getParent() != &M)
821 CheckFailed("Global is used by function in a different module", &GV, &M,
822 F, F->getParent());
823 return false;
824 }
825 return true;
826 });
827}
828
829void Verifier::visitGlobalVariable(const GlobalVariable &GV) {
830 Type *GVType = GV.getValueType();
831
832 if (MaybeAlign A = GV.getAlign()) {
833 Check(A->value() <= Value::MaximumAlignment,
834 "huge alignment values are unsupported", &GV);
835 }
836
837 if (GV.hasInitializer()) {
838 Check(GV.getInitializer()->getType() == GVType,
839 "Global variable initializer type does not match global "
840 "variable type!",
841 &GV);
843 "Global variable initializer must be sized", &GV);
844 visitConstantExprsRecursively(GV.getInitializer());
845 // If the global has common linkage, it must have a zero initializer and
846 // cannot be constant.
847 if (GV.hasCommonLinkage()) {
849 "'common' global must have a zero initializer!", &GV);
850 Check(!GV.isConstant(), "'common' global may not be marked constant!",
851 &GV);
852 Check(!GV.hasComdat(), "'common' global may not be in a Comdat!", &GV);
853 }
854 }
855
856 if (GV.hasName() && (GV.getName() == "llvm.global_ctors" ||
857 GV.getName() == "llvm.global_dtors")) {
859 "invalid linkage for intrinsic global variable", &GV);
861 "invalid uses of intrinsic global variable", &GV);
862
863 // Don't worry about emitting an error for it not being an array,
864 // visitGlobalValue will complain on appending non-array.
865 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
866 StructType *STy = dyn_cast<StructType>(ATy->getElementType());
867 PointerType *FuncPtrTy =
868 PointerType::get(Context, DL.getProgramAddressSpace());
869 Check(STy && (STy->getNumElements() == 2 || STy->getNumElements() == 3) &&
870 STy->getTypeAtIndex(0u)->isIntegerTy(32) &&
871 STy->getTypeAtIndex(1) == FuncPtrTy,
872 "wrong type for intrinsic global variable", &GV);
873 Check(STy->getNumElements() == 3,
874 "the third field of the element type is mandatory, "
875 "specify ptr null to migrate from the obsoleted 2-field form");
876 Type *ETy = STy->getTypeAtIndex(2);
877 Check(ETy->isPointerTy(), "wrong type for intrinsic global variable",
878 &GV);
879 }
880 }
881
882 if (GV.hasName() && (GV.getName() == "llvm.used" ||
883 GV.getName() == "llvm.compiler.used")) {
885 "invalid linkage for intrinsic global variable", &GV);
887 "invalid uses of intrinsic global variable", &GV);
888
889 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
890 PointerType *PTy = dyn_cast<PointerType>(ATy->getElementType());
891 Check(PTy, "wrong type for intrinsic global variable", &GV);
892 if (GV.hasInitializer()) {
893 const Constant *Init = GV.getInitializer();
894 const ConstantArray *InitArray = dyn_cast<ConstantArray>(Init);
895 Check(InitArray, "wrong initalizer for intrinsic global variable",
896 Init);
897 for (Value *Op : InitArray->operands()) {
898 Value *V = Op->stripPointerCasts();
901 Twine("invalid ") + GV.getName() + " member", V);
902 Check(V->hasName(),
903 Twine("members of ") + GV.getName() + " must be named", V);
904 }
905 }
906 }
907 }
908
909 // Visit any debug info attachments.
911 GV.getMetadata(LLVMContext::MD_dbg, MDs);
912 for (auto *MD : MDs) {
913 if (auto *GVE = dyn_cast<DIGlobalVariableExpression>(MD))
914 visitDIGlobalVariableExpression(*GVE);
915 else
916 CheckDI(false, "!dbg attachment of global variable must be a "
917 "DIGlobalVariableExpression");
918 }
919
920 // Scalable vectors cannot be global variables, since we don't know
921 // the runtime size.
922 Check(!GVType->isScalableTy(), "Globals cannot contain scalable types", &GV);
923
924 // Check if it is or contains a target extension type that disallows being
925 // used as a global.
927 "Global @" + GV.getName() + " has illegal target extension type",
928 GVType);
929
930 if (!GV.hasInitializer()) {
931 visitGlobalValue(GV);
932 return;
933 }
934
935 // Walk any aggregate initializers looking for bitcasts between address spaces
936 visitConstantExprsRecursively(GV.getInitializer());
937
938 visitGlobalValue(GV);
939}
940
941void Verifier::visitAliaseeSubExpr(const GlobalAlias &GA, const Constant &C) {
942 SmallPtrSet<const GlobalAlias*, 4> Visited;
943 Visited.insert(&GA);
944 visitAliaseeSubExpr(Visited, GA, C);
945}
946
947void Verifier::visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias*> &Visited,
948 const GlobalAlias &GA, const Constant &C) {
951 cast<GlobalValue>(C).hasAvailableExternallyLinkage(),
952 "available_externally alias must point to available_externally "
953 "global value",
954 &GA);
955 }
956 if (const auto *GV = dyn_cast<GlobalValue>(&C)) {
958 Check(!GV->isDeclarationForLinker(), "Alias must point to a definition",
959 &GA);
960 }
961
962 if (const auto *GA2 = dyn_cast<GlobalAlias>(GV)) {
963 Check(Visited.insert(GA2).second, "Aliases cannot form a cycle", &GA);
964
965 Check(!GA2->isInterposable(),
966 "Alias cannot point to an interposable alias", &GA);
967 } else {
968 // Only continue verifying subexpressions of GlobalAliases.
969 // Do not recurse into global initializers.
970 return;
971 }
972 }
973
974 if (const auto *CE = dyn_cast<ConstantExpr>(&C))
975 visitConstantExprsRecursively(CE);
976
977 for (const Use &U : C.operands()) {
978 Value *V = &*U;
979 if (const auto *GA2 = dyn_cast<GlobalAlias>(V))
980 visitAliaseeSubExpr(Visited, GA, *GA2->getAliasee());
981 else if (const auto *C2 = dyn_cast<Constant>(V))
982 visitAliaseeSubExpr(Visited, GA, *C2);
983 }
984}
985
986void Verifier::visitGlobalAlias(const GlobalAlias &GA) {
988 "Alias should have private, internal, linkonce, weak, linkonce_odr, "
989 "weak_odr, external, or available_externally linkage!",
990 &GA);
991 const Constant *Aliasee = GA.getAliasee();
992 Check(Aliasee, "Aliasee cannot be NULL!", &GA);
993 Check(GA.getType() == Aliasee->getType(),
994 "Alias and aliasee types should match!", &GA);
995
996 Check(isa<GlobalValue>(Aliasee) || isa<ConstantExpr>(Aliasee),
997 "Aliasee should be either GlobalValue or ConstantExpr", &GA);
998
999 visitAliaseeSubExpr(GA, *Aliasee);
1000
1001 visitGlobalValue(GA);
1002}
1003
1004void Verifier::visitGlobalIFunc(const GlobalIFunc &GI) {
1005 visitGlobalValue(GI);
1006
1008 GI.getAllMetadata(MDs);
1009 for (const auto &I : MDs) {
1010 CheckDI(I.first != LLVMContext::MD_dbg,
1011 "an ifunc may not have a !dbg attachment", &GI);
1012 Check(I.first != LLVMContext::MD_prof,
1013 "an ifunc may not have a !prof attachment", &GI);
1014 visitMDNode(*I.second, AreDebugLocsAllowed::No);
1015 }
1016
1018 "IFunc should have private, internal, linkonce, weak, linkonce_odr, "
1019 "weak_odr, or external linkage!",
1020 &GI);
1021 // Pierce through ConstantExprs and GlobalAliases and check that the resolver
1022 // is a Function definition.
1023 const Function *Resolver = GI.getResolverFunction();
1024 Check(Resolver, "IFunc must have a Function resolver", &GI);
1025 Check(!Resolver->isDeclarationForLinker(),
1026 "IFunc resolver must be a definition", &GI);
1027
1028 // Check that the immediate resolver operand (prior to any bitcasts) has the
1029 // correct type.
1030 const Type *ResolverTy = GI.getResolver()->getType();
1031
1033 "IFunc resolver must return a pointer", &GI);
1034
1035 Check(ResolverTy == PointerType::get(Context, GI.getAddressSpace()),
1036 "IFunc resolver has incorrect type", &GI);
1037}
1038
1039void Verifier::visitNamedMDNode(const NamedMDNode &NMD) {
1040 // There used to be various other llvm.dbg.* nodes, but we don't support
1041 // upgrading them and we want to reserve the namespace for future uses.
1042 if (NMD.getName().starts_with("llvm.dbg."))
1043 CheckDI(NMD.getName() == "llvm.dbg.cu",
1044 "unrecognized named metadata node in the llvm.dbg namespace", &NMD);
1045 for (const MDNode *MD : NMD.operands()) {
1046 if (NMD.getName() == "llvm.dbg.cu")
1047 CheckDI(MD && isa<DICompileUnit>(MD), "invalid compile unit", &NMD, MD);
1048
1049 if (!MD)
1050 continue;
1051
1052 visitMDNode(*MD, AreDebugLocsAllowed::Yes);
1053 }
1054}
1055
1056void Verifier::visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs) {
1057 // Only visit each node once. Metadata can be mutually recursive, so this
1058 // avoids infinite recursion here, as well as being an optimization.
1059 if (!MDNodes.insert(&MD).second)
1060 return;
1061
1062 Check(&MD.getContext() == &Context,
1063 "MDNode context does not match Module context!", &MD);
1064
1065 switch (MD.getMetadataID()) {
1066 default:
1067 llvm_unreachable("Invalid MDNode subclass");
1068 case Metadata::MDTupleKind:
1069 break;
1070#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) \
1071 case Metadata::CLASS##Kind: \
1072 visit##CLASS(cast<CLASS>(MD)); \
1073 break;
1074#include "llvm/IR/Metadata.def"
1075 }
1076
1077 for (const Metadata *Op : MD.operands()) {
1078 if (!Op)
1079 continue;
1080 Check(!isa<LocalAsMetadata>(Op), "Invalid operand for global metadata!",
1081 &MD, Op);
1082 CheckDI(!isa<DILocation>(Op) || AllowLocs == AreDebugLocsAllowed::Yes,
1083 "DILocation not allowed within this metadata node", &MD, Op);
1084 if (auto *N = dyn_cast<MDNode>(Op)) {
1085 visitMDNode(*N, AllowLocs);
1086 continue;
1087 }
1088 if (auto *V = dyn_cast<ValueAsMetadata>(Op)) {
1089 visitValueAsMetadata(*V, nullptr);
1090 continue;
1091 }
1092 }
1093
1094 // Check llvm.loop.estimated_trip_count.
1095 if (MD.getNumOperands() > 0 &&
1097 Check(MD.getNumOperands() == 2, "Expected two operands", &MD);
1099 Check(Count && Count->getType()->isIntegerTy() &&
1100 cast<IntegerType>(Count->getType())->getBitWidth() <= 32,
1101 "Expected second operand to be an integer constant of type i32 or "
1102 "smaller",
1103 &MD);
1104 }
1105
1106 // Check these last, so we diagnose problems in operands first.
1107 Check(!MD.isTemporary(), "Expected no forward declarations!", &MD);
1108 Check(MD.isResolved(), "All nodes should be resolved!", &MD);
1109}
1110
1111void Verifier::visitValueAsMetadata(const ValueAsMetadata &MD, Function *F) {
1112 Check(MD.getValue(), "Expected valid value", &MD);
1113 Check(!MD.getValue()->getType()->isMetadataTy(),
1114 "Unexpected metadata round-trip through values", &MD, MD.getValue());
1115
1116 auto *L = dyn_cast<LocalAsMetadata>(&MD);
1117 if (!L)
1118 return;
1119
1120 Check(F, "function-local metadata used outside a function", L);
1121
1122 // If this was an instruction, bb, or argument, verify that it is in the
1123 // function that we expect.
1124 Function *ActualF = nullptr;
1125 if (Instruction *I = dyn_cast<Instruction>(L->getValue())) {
1126 Check(I->getParent(), "function-local metadata not in basic block", L, I);
1127 ActualF = I->getParent()->getParent();
1128 } else if (BasicBlock *BB = dyn_cast<BasicBlock>(L->getValue()))
1129 ActualF = BB->getParent();
1130 else if (Argument *A = dyn_cast<Argument>(L->getValue()))
1131 ActualF = A->getParent();
1132 assert(ActualF && "Unimplemented function local metadata case!");
1133
1134 Check(ActualF == F, "function-local metadata used in wrong function", L);
1135}
1136
1137void Verifier::visitDIArgList(const DIArgList &AL, Function *F) {
1138 for (const ValueAsMetadata *VAM : AL.getArgs())
1139 visitValueAsMetadata(*VAM, F);
1140}
1141
1142void Verifier::visitMetadataAsValue(const MetadataAsValue &MDV, Function *F) {
1143 Metadata *MD = MDV.getMetadata();
1144 if (auto *N = dyn_cast<MDNode>(MD)) {
1145 visitMDNode(*N, AreDebugLocsAllowed::No);
1146 return;
1147 }
1148
1149 // Only visit each node once. Metadata can be mutually recursive, so this
1150 // avoids infinite recursion here, as well as being an optimization.
1151 if (!MDNodes.insert(MD).second)
1152 return;
1153
1154 if (auto *V = dyn_cast<ValueAsMetadata>(MD))
1155 visitValueAsMetadata(*V, F);
1156
1157 if (auto *AL = dyn_cast<DIArgList>(MD))
1158 visitDIArgList(*AL, F);
1159}
1160
1161static bool isType(const Metadata *MD) { return !MD || isa<DIType>(MD); }
1162static bool isScope(const Metadata *MD) { return !MD || isa<DIScope>(MD); }
1163static bool isDINode(const Metadata *MD) { return !MD || isa<DINode>(MD); }
1164
1165void Verifier::visitDILocation(const DILocation &N) {
1166 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1167 "location requires a valid scope", &N, N.getRawScope());
1168 if (auto *IA = N.getRawInlinedAt())
1169 CheckDI(isa<DILocation>(IA), "inlined-at should be a location", &N, IA);
1170 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1171 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1172}
1173
1174void Verifier::visitGenericDINode(const GenericDINode &N) {
1175 CheckDI(N.getTag(), "invalid tag", &N);
1176}
1177
1178void Verifier::visitDIScope(const DIScope &N) {
1179 if (auto *F = N.getRawFile())
1180 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1181}
1182
1183void Verifier::visitDISubrangeType(const DISubrangeType &N) {
1184 CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
1185 auto *BaseType = N.getRawBaseType();
1186 CheckDI(!BaseType || isType(BaseType), "BaseType must be a type");
1187 auto *LBound = N.getRawLowerBound();
1188 CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
1189 isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1190 "LowerBound must be signed constant or DIVariable or DIExpression",
1191 &N);
1192 auto *UBound = N.getRawUpperBound();
1193 CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
1194 isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1195 "UpperBound must be signed constant or DIVariable or DIExpression",
1196 &N);
1197 auto *Stride = N.getRawStride();
1198 CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
1199 isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1200 "Stride must be signed constant or DIVariable or DIExpression", &N);
1201 auto *Bias = N.getRawBias();
1202 CheckDI(!Bias || isa<ConstantAsMetadata>(Bias) || isa<DIVariable>(Bias) ||
1203 isa<DIExpression>(Bias),
1204 "Bias must be signed constant or DIVariable or DIExpression", &N);
1205 // Subrange types currently only support constant size.
1206 auto *Size = N.getRawSizeInBits();
1208 "SizeInBits must be a constant");
1209}
1210
1211void Verifier::visitDISubrange(const DISubrange &N) {
1212 CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
1213 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1214 "Subrange can have any one of count or upperBound", &N);
1215 auto *CBound = N.getRawCountNode();
1216 CheckDI(!CBound || isa<ConstantAsMetadata>(CBound) ||
1217 isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1218 "Count must be signed constant or DIVariable or DIExpression", &N);
1219 auto Count = N.getCount();
1221 cast<ConstantInt *>(Count)->getSExtValue() >= -1,
1222 "invalid subrange count", &N);
1223 auto *LBound = N.getRawLowerBound();
1224 CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
1225 isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1226 "LowerBound must be signed constant or DIVariable or DIExpression",
1227 &N);
1228 auto *UBound = N.getRawUpperBound();
1229 CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
1230 isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1231 "UpperBound must be signed constant or DIVariable or DIExpression",
1232 &N);
1233 auto *Stride = N.getRawStride();
1234 CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
1235 isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1236 "Stride must be signed constant or DIVariable or DIExpression", &N);
1237}
1238
1239void Verifier::visitDIGenericSubrange(const DIGenericSubrange &N) {
1240 CheckDI(N.getTag() == dwarf::DW_TAG_generic_subrange, "invalid tag", &N);
1241 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1242 "GenericSubrange can have any one of count or upperBound", &N);
1243 auto *CBound = N.getRawCountNode();
1244 CheckDI(!CBound || isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1245 "Count must be signed constant or DIVariable or DIExpression", &N);
1246 auto *LBound = N.getRawLowerBound();
1247 CheckDI(LBound, "GenericSubrange must contain lowerBound", &N);
1248 CheckDI(isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1249 "LowerBound must be signed constant or DIVariable or DIExpression",
1250 &N);
1251 auto *UBound = N.getRawUpperBound();
1252 CheckDI(!UBound || isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1253 "UpperBound must be signed constant or DIVariable or DIExpression",
1254 &N);
1255 auto *Stride = N.getRawStride();
1256 CheckDI(Stride, "GenericSubrange must contain stride", &N);
1257 CheckDI(isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1258 "Stride must be signed constant or DIVariable or DIExpression", &N);
1259}
1260
1261void Verifier::visitDIEnumerator(const DIEnumerator &N) {
1262 CheckDI(N.getTag() == dwarf::DW_TAG_enumerator, "invalid tag", &N);
1263}
1264
1265void Verifier::visitDIBasicType(const DIBasicType &N) {
1266 CheckDI(N.getTag() == dwarf::DW_TAG_base_type ||
1267 N.getTag() == dwarf::DW_TAG_unspecified_type ||
1268 N.getTag() == dwarf::DW_TAG_string_type,
1269 "invalid tag", &N);
1270 // Basic types currently only support constant size.
1271 auto *Size = N.getRawSizeInBits();
1273 "SizeInBits must be a constant");
1274}
1275
1276void Verifier::visitDIFixedPointType(const DIFixedPointType &N) {
1277 visitDIBasicType(N);
1278
1279 CheckDI(N.getTag() == dwarf::DW_TAG_base_type, "invalid tag", &N);
1280 CheckDI(N.getEncoding() == dwarf::DW_ATE_signed_fixed ||
1281 N.getEncoding() == dwarf::DW_ATE_unsigned_fixed,
1282 "invalid encoding", &N);
1286 "invalid kind", &N);
1288 N.getFactorRaw() == 0,
1289 "factor should be 0 for rationals", &N);
1291 (N.getNumeratorRaw() == 0 && N.getDenominatorRaw() == 0),
1292 "numerator and denominator should be 0 for non-rationals", &N);
1293}
1294
1295void Verifier::visitDIStringType(const DIStringType &N) {
1296 CheckDI(N.getTag() == dwarf::DW_TAG_string_type, "invalid tag", &N);
1297 CheckDI(!(N.isBigEndian() && N.isLittleEndian()), "has conflicting flags",
1298 &N);
1299}
1300
1301void Verifier::visitDIDerivedType(const DIDerivedType &N) {
1302 // Common scope checks.
1303 visitDIScope(N);
1304
1305 CheckDI(N.getTag() == dwarf::DW_TAG_typedef ||
1306 N.getTag() == dwarf::DW_TAG_pointer_type ||
1307 N.getTag() == dwarf::DW_TAG_ptr_to_member_type ||
1308 N.getTag() == dwarf::DW_TAG_reference_type ||
1309 N.getTag() == dwarf::DW_TAG_rvalue_reference_type ||
1310 N.getTag() == dwarf::DW_TAG_const_type ||
1311 N.getTag() == dwarf::DW_TAG_immutable_type ||
1312 N.getTag() == dwarf::DW_TAG_volatile_type ||
1313 N.getTag() == dwarf::DW_TAG_restrict_type ||
1314 N.getTag() == dwarf::DW_TAG_atomic_type ||
1315 N.getTag() == dwarf::DW_TAG_LLVM_ptrauth_type ||
1316 N.getTag() == dwarf::DW_TAG_member ||
1317 (N.getTag() == dwarf::DW_TAG_variable && N.isStaticMember()) ||
1318 N.getTag() == dwarf::DW_TAG_inheritance ||
1319 N.getTag() == dwarf::DW_TAG_friend ||
1320 N.getTag() == dwarf::DW_TAG_set_type ||
1321 N.getTag() == dwarf::DW_TAG_template_alias,
1322 "invalid tag", &N);
1323 if (N.getTag() == dwarf::DW_TAG_ptr_to_member_type) {
1324 CheckDI(isType(N.getRawExtraData()), "invalid pointer to member type", &N,
1325 N.getRawExtraData());
1326 }
1327
1328 if (N.getTag() == dwarf::DW_TAG_set_type) {
1329 if (auto *T = N.getRawBaseType()) {
1333 CheckDI(
1334 (Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type) ||
1335 (Subrange && Subrange->getTag() == dwarf::DW_TAG_subrange_type) ||
1336 (Basic && (Basic->getEncoding() == dwarf::DW_ATE_unsigned ||
1337 Basic->getEncoding() == dwarf::DW_ATE_signed ||
1338 Basic->getEncoding() == dwarf::DW_ATE_unsigned_char ||
1339 Basic->getEncoding() == dwarf::DW_ATE_signed_char ||
1340 Basic->getEncoding() == dwarf::DW_ATE_boolean)),
1341 "invalid set base type", &N, T);
1342 }
1343 }
1344
1345 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1346 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1347 N.getRawBaseType());
1348
1349 if (N.getDWARFAddressSpace()) {
1350 CheckDI(N.getTag() == dwarf::DW_TAG_pointer_type ||
1351 N.getTag() == dwarf::DW_TAG_reference_type ||
1352 N.getTag() == dwarf::DW_TAG_rvalue_reference_type,
1353 "DWARF address space only applies to pointer or reference types",
1354 &N);
1355 }
1356
1357 auto *Size = N.getRawSizeInBits();
1360 "SizeInBits must be a constant or DIVariable or DIExpression");
1361}
1362
1363/// Detect mutually exclusive flags.
1364static bool hasConflictingReferenceFlags(unsigned Flags) {
1365 return ((Flags & DINode::FlagLValueReference) &&
1366 (Flags & DINode::FlagRValueReference)) ||
1367 ((Flags & DINode::FlagTypePassByValue) &&
1368 (Flags & DINode::FlagTypePassByReference));
1369}
1370
1371void Verifier::visitTemplateParams(const MDNode &N, const Metadata &RawParams) {
1372 auto *Params = dyn_cast<MDTuple>(&RawParams);
1373 CheckDI(Params, "invalid template params", &N, &RawParams);
1374 for (Metadata *Op : Params->operands()) {
1375 CheckDI(Op && isa<DITemplateParameter>(Op), "invalid template parameter",
1376 &N, Params, Op);
1377 }
1378}
1379
1380void Verifier::visitDICompositeType(const DICompositeType &N) {
1381 // Common scope checks.
1382 visitDIScope(N);
1383
1384 CheckDI(N.getTag() == dwarf::DW_TAG_array_type ||
1385 N.getTag() == dwarf::DW_TAG_structure_type ||
1386 N.getTag() == dwarf::DW_TAG_union_type ||
1387 N.getTag() == dwarf::DW_TAG_enumeration_type ||
1388 N.getTag() == dwarf::DW_TAG_class_type ||
1389 N.getTag() == dwarf::DW_TAG_variant_part ||
1390 N.getTag() == dwarf::DW_TAG_variant ||
1391 N.getTag() == dwarf::DW_TAG_namelist,
1392 "invalid tag", &N);
1393
1394 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1395 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1396 N.getRawBaseType());
1397
1398 CheckDI(!N.getRawElements() || isa<MDTuple>(N.getRawElements()),
1399 "invalid composite elements", &N, N.getRawElements());
1400 CheckDI(isType(N.getRawVTableHolder()), "invalid vtable holder", &N,
1401 N.getRawVTableHolder());
1403 "invalid reference flags", &N);
1404 unsigned DIBlockByRefStruct = 1 << 4;
1405 CheckDI((N.getFlags() & DIBlockByRefStruct) == 0,
1406 "DIBlockByRefStruct on DICompositeType is no longer supported", &N);
1407 CheckDI(llvm::all_of(N.getElements(), [](const DINode *N) { return N; }),
1408 "DISubprogram contains null entry in `elements` field", &N);
1409
1410 if (N.isVector()) {
1411 const DINodeArray Elements = N.getElements();
1412 CheckDI(Elements.size() == 1 &&
1413 Elements[0]->getTag() == dwarf::DW_TAG_subrange_type,
1414 "invalid vector, expected one element of type subrange", &N);
1415 }
1416
1417 if (auto *Params = N.getRawTemplateParams())
1418 visitTemplateParams(N, *Params);
1419
1420 if (auto *D = N.getRawDiscriminator()) {
1421 CheckDI(isa<DIDerivedType>(D) && N.getTag() == dwarf::DW_TAG_variant_part,
1422 "discriminator can only appear on variant part");
1423 }
1424
1425 if (N.getRawDataLocation()) {
1426 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1427 "dataLocation can only appear in array type");
1428 }
1429
1430 if (N.getRawAssociated()) {
1431 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1432 "associated can only appear in array type");
1433 }
1434
1435 if (N.getRawAllocated()) {
1436 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1437 "allocated can only appear in array type");
1438 }
1439
1440 if (N.getRawRank()) {
1441 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1442 "rank can only appear in array type");
1443 }
1444
1445 if (N.getTag() == dwarf::DW_TAG_array_type) {
1446 CheckDI(N.getRawBaseType(), "array types must have a base type", &N);
1447 }
1448
1449 auto *Size = N.getRawSizeInBits();
1452 "SizeInBits must be a constant or DIVariable or DIExpression");
1453}
1454
1455void Verifier::visitDISubroutineType(const DISubroutineType &N) {
1456 CheckDI(N.getTag() == dwarf::DW_TAG_subroutine_type, "invalid tag", &N);
1457 if (auto *Types = N.getRawTypeArray()) {
1458 CheckDI(isa<MDTuple>(Types), "invalid composite elements", &N, Types);
1459 for (Metadata *Ty : N.getTypeArray()->operands()) {
1460 CheckDI(isType(Ty), "invalid subroutine type ref", &N, Types, Ty);
1461 }
1462 }
1464 "invalid reference flags", &N);
1465}
1466
1467void Verifier::visitDIFile(const DIFile &N) {
1468 CheckDI(N.getTag() == dwarf::DW_TAG_file_type, "invalid tag", &N);
1469 std::optional<DIFile::ChecksumInfo<StringRef>> Checksum = N.getChecksum();
1470 if (Checksum) {
1471 CheckDI(Checksum->Kind <= DIFile::ChecksumKind::CSK_Last,
1472 "invalid checksum kind", &N);
1473 size_t Size;
1474 switch (Checksum->Kind) {
1475 case DIFile::CSK_MD5:
1476 Size = 32;
1477 break;
1478 case DIFile::CSK_SHA1:
1479 Size = 40;
1480 break;
1481 case DIFile::CSK_SHA256:
1482 Size = 64;
1483 break;
1484 }
1485 CheckDI(Checksum->Value.size() == Size, "invalid checksum length", &N);
1486 CheckDI(Checksum->Value.find_if_not(llvm::isHexDigit) == StringRef::npos,
1487 "invalid checksum", &N);
1488 }
1489}
1490
1491void Verifier::visitDICompileUnit(const DICompileUnit &N) {
1492 CheckDI(N.isDistinct(), "compile units must be distinct", &N);
1493 CheckDI(N.getTag() == dwarf::DW_TAG_compile_unit, "invalid tag", &N);
1494
1495 // Don't bother verifying the compilation directory or producer string
1496 // as those could be empty.
1497 CheckDI(N.getRawFile() && isa<DIFile>(N.getRawFile()), "invalid file", &N,
1498 N.getRawFile());
1499 CheckDI(!N.getFile()->getFilename().empty(), "invalid filename", &N,
1500 N.getFile());
1501
1502 CheckDI((N.getEmissionKind() <= DICompileUnit::LastEmissionKind),
1503 "invalid emission kind", &N);
1504
1505 if (auto *Array = N.getRawEnumTypes()) {
1506 CheckDI(isa<MDTuple>(Array), "invalid enum list", &N, Array);
1507 for (Metadata *Op : N.getEnumTypes()->operands()) {
1509 CheckDI(Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type,
1510 "invalid enum type", &N, N.getEnumTypes(), Op);
1511 }
1512 }
1513 if (auto *Array = N.getRawRetainedTypes()) {
1514 CheckDI(isa<MDTuple>(Array), "invalid retained type list", &N, Array);
1515 for (Metadata *Op : N.getRetainedTypes()->operands()) {
1516 CheckDI(
1517 Op && (isa<DIType>(Op) || (isa<DISubprogram>(Op) &&
1518 !cast<DISubprogram>(Op)->isDefinition())),
1519 "invalid retained type", &N, Op);
1520 }
1521 }
1522 if (auto *Array = N.getRawGlobalVariables()) {
1523 CheckDI(isa<MDTuple>(Array), "invalid global variable list", &N, Array);
1524 for (Metadata *Op : N.getGlobalVariables()->operands()) {
1526 "invalid global variable ref", &N, Op);
1527 }
1528 }
1529 if (auto *Array = N.getRawImportedEntities()) {
1530 CheckDI(isa<MDTuple>(Array), "invalid imported entity list", &N, Array);
1531 for (Metadata *Op : N.getImportedEntities()->operands()) {
1532 CheckDI(Op && isa<DIImportedEntity>(Op), "invalid imported entity ref",
1533 &N, Op);
1534 }
1535 }
1536 if (auto *Array = N.getRawMacros()) {
1537 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1538 for (Metadata *Op : N.getMacros()->operands()) {
1539 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1540 }
1541 }
1542 CUVisited.insert(&N);
1543}
1544
1545void Verifier::visitDISubprogram(const DISubprogram &N) {
1546 CheckDI(N.getTag() == dwarf::DW_TAG_subprogram, "invalid tag", &N);
1547 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1548 if (auto *F = N.getRawFile())
1549 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1550 else
1551 CheckDI(N.getLine() == 0, "line specified with no file", &N, N.getLine());
1552 if (auto *T = N.getRawType())
1553 CheckDI(isa<DISubroutineType>(T), "invalid subroutine type", &N, T);
1554 CheckDI(isType(N.getRawContainingType()), "invalid containing type", &N,
1555 N.getRawContainingType());
1556 if (auto *Params = N.getRawTemplateParams())
1557 visitTemplateParams(N, *Params);
1558 if (auto *S = N.getRawDeclaration())
1559 CheckDI(isa<DISubprogram>(S) && !cast<DISubprogram>(S)->isDefinition(),
1560 "invalid subprogram declaration", &N, S);
1561 if (auto *RawNode = N.getRawRetainedNodes()) {
1562 auto *Node = dyn_cast<MDTuple>(RawNode);
1563 CheckDI(Node, "invalid retained nodes list", &N, RawNode);
1564 for (Metadata *Op : Node->operands()) {
1567 "invalid retained nodes, expected DILocalVariable, DILabel or "
1568 "DIImportedEntity",
1569 &N, Node, Op);
1570 }
1571 }
1573 "invalid reference flags", &N);
1574
1575 auto *Unit = N.getRawUnit();
1576 if (N.isDefinition()) {
1577 // Subprogram definitions (not part of the type hierarchy).
1578 CheckDI(N.isDistinct(), "subprogram definitions must be distinct", &N);
1579 CheckDI(Unit, "subprogram definitions must have a compile unit", &N);
1580 CheckDI(isa<DICompileUnit>(Unit), "invalid unit type", &N, Unit);
1581 // There's no good way to cross the CU boundary to insert a nested
1582 // DISubprogram definition in one CU into a type defined in another CU.
1583 auto *CT = dyn_cast_or_null<DICompositeType>(N.getRawScope());
1584 if (CT && CT->getRawIdentifier() &&
1585 M.getContext().isODRUniquingDebugTypes())
1586 CheckDI(N.getDeclaration(),
1587 "definition subprograms cannot be nested within DICompositeType "
1588 "when enabling ODR",
1589 &N);
1590 } else {
1591 // Subprogram declarations (part of the type hierarchy).
1592 CheckDI(!Unit, "subprogram declarations must not have a compile unit", &N);
1593 CheckDI(!N.getRawDeclaration(),
1594 "subprogram declaration must not have a declaration field");
1595 }
1596
1597 if (auto *RawThrownTypes = N.getRawThrownTypes()) {
1598 auto *ThrownTypes = dyn_cast<MDTuple>(RawThrownTypes);
1599 CheckDI(ThrownTypes, "invalid thrown types list", &N, RawThrownTypes);
1600 for (Metadata *Op : ThrownTypes->operands())
1601 CheckDI(Op && isa<DIType>(Op), "invalid thrown type", &N, ThrownTypes,
1602 Op);
1603 }
1604
1605 if (N.areAllCallsDescribed())
1606 CheckDI(N.isDefinition(),
1607 "DIFlagAllCallsDescribed must be attached to a definition");
1608}
1609
1610void Verifier::visitDILexicalBlockBase(const DILexicalBlockBase &N) {
1611 CheckDI(N.getTag() == dwarf::DW_TAG_lexical_block, "invalid tag", &N);
1612 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1613 "invalid local scope", &N, N.getRawScope());
1614 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1615 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1616}
1617
1618void Verifier::visitDILexicalBlock(const DILexicalBlock &N) {
1619 visitDILexicalBlockBase(N);
1620
1621 CheckDI(N.getLine() || !N.getColumn(),
1622 "cannot have column info without line info", &N);
1623}
1624
1625void Verifier::visitDILexicalBlockFile(const DILexicalBlockFile &N) {
1626 visitDILexicalBlockBase(N);
1627}
1628
1629void Verifier::visitDICommonBlock(const DICommonBlock &N) {
1630 CheckDI(N.getTag() == dwarf::DW_TAG_common_block, "invalid tag", &N);
1631 if (auto *S = N.getRawScope())
1632 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1633 if (auto *S = N.getRawDecl())
1634 CheckDI(isa<DIGlobalVariable>(S), "invalid declaration", &N, S);
1635}
1636
1637void Verifier::visitDINamespace(const DINamespace &N) {
1638 CheckDI(N.getTag() == dwarf::DW_TAG_namespace, "invalid tag", &N);
1639 if (auto *S = N.getRawScope())
1640 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1641}
1642
1643void Verifier::visitDIMacro(const DIMacro &N) {
1644 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_define ||
1645 N.getMacinfoType() == dwarf::DW_MACINFO_undef,
1646 "invalid macinfo type", &N);
1647 CheckDI(!N.getName().empty(), "anonymous macro", &N);
1648 if (!N.getValue().empty()) {
1649 assert(N.getValue().data()[0] != ' ' && "Macro value has a space prefix");
1650 }
1651}
1652
1653void Verifier::visitDIMacroFile(const DIMacroFile &N) {
1654 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_start_file,
1655 "invalid macinfo type", &N);
1656 if (auto *F = N.getRawFile())
1657 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1658
1659 if (auto *Array = N.getRawElements()) {
1660 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1661 for (Metadata *Op : N.getElements()->operands()) {
1662 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1663 }
1664 }
1665}
1666
1667void Verifier::visitDIModule(const DIModule &N) {
1668 CheckDI(N.getTag() == dwarf::DW_TAG_module, "invalid tag", &N);
1669 CheckDI(!N.getName().empty(), "anonymous module", &N);
1670}
1671
1672void Verifier::visitDITemplateParameter(const DITemplateParameter &N) {
1673 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1674}
1675
1676void Verifier::visitDITemplateTypeParameter(const DITemplateTypeParameter &N) {
1677 visitDITemplateParameter(N);
1678
1679 CheckDI(N.getTag() == dwarf::DW_TAG_template_type_parameter, "invalid tag",
1680 &N);
1681}
1682
1683void Verifier::visitDITemplateValueParameter(
1684 const DITemplateValueParameter &N) {
1685 visitDITemplateParameter(N);
1686
1687 CheckDI(N.getTag() == dwarf::DW_TAG_template_value_parameter ||
1688 N.getTag() == dwarf::DW_TAG_GNU_template_template_param ||
1689 N.getTag() == dwarf::DW_TAG_GNU_template_parameter_pack,
1690 "invalid tag", &N);
1691}
1692
1693void Verifier::visitDIVariable(const DIVariable &N) {
1694 if (auto *S = N.getRawScope())
1695 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1696 if (auto *F = N.getRawFile())
1697 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1698}
1699
1700void Verifier::visitDIGlobalVariable(const DIGlobalVariable &N) {
1701 // Checks common to all variables.
1702 visitDIVariable(N);
1703
1704 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1705 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1706 // Check only if the global variable is not an extern
1707 if (N.isDefinition())
1708 CheckDI(N.getType(), "missing global variable type", &N);
1709 if (auto *Member = N.getRawStaticDataMemberDeclaration()) {
1711 "invalid static data member declaration", &N, Member);
1712 }
1713}
1714
1715void Verifier::visitDILocalVariable(const DILocalVariable &N) {
1716 // Checks common to all variables.
1717 visitDIVariable(N);
1718
1719 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1720 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1721 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1722 "local variable requires a valid scope", &N, N.getRawScope());
1723 if (auto Ty = N.getType())
1724 CheckDI(!isa<DISubroutineType>(Ty), "invalid type", &N, N.getType());
1725}
1726
1727void Verifier::visitDIAssignID(const DIAssignID &N) {
1728 CheckDI(!N.getNumOperands(), "DIAssignID has no arguments", &N);
1729 CheckDI(N.isDistinct(), "DIAssignID must be distinct", &N);
1730}
1731
1732void Verifier::visitDILabel(const DILabel &N) {
1733 if (auto *S = N.getRawScope())
1734 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1735 if (auto *F = N.getRawFile())
1736 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1737
1738 CheckDI(N.getTag() == dwarf::DW_TAG_label, "invalid tag", &N);
1739 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1740 "label requires a valid scope", &N, N.getRawScope());
1741}
1742
1743void Verifier::visitDIExpression(const DIExpression &N) {
1744 CheckDI(N.isValid(), "invalid expression", &N);
1745}
1746
1747void Verifier::visitDIGlobalVariableExpression(
1748 const DIGlobalVariableExpression &GVE) {
1749 CheckDI(GVE.getVariable(), "missing variable");
1750 if (auto *Var = GVE.getVariable())
1751 visitDIGlobalVariable(*Var);
1752 if (auto *Expr = GVE.getExpression()) {
1753 visitDIExpression(*Expr);
1754 if (auto Fragment = Expr->getFragmentInfo())
1755 verifyFragmentExpression(*GVE.getVariable(), *Fragment, &GVE);
1756 }
1757}
1758
1759void Verifier::visitDIObjCProperty(const DIObjCProperty &N) {
1760 CheckDI(N.getTag() == dwarf::DW_TAG_APPLE_property, "invalid tag", &N);
1761 if (auto *T = N.getRawType())
1762 CheckDI(isType(T), "invalid type ref", &N, T);
1763 if (auto *F = N.getRawFile())
1764 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1765}
1766
1767void Verifier::visitDIImportedEntity(const DIImportedEntity &N) {
1768 CheckDI(N.getTag() == dwarf::DW_TAG_imported_module ||
1769 N.getTag() == dwarf::DW_TAG_imported_declaration,
1770 "invalid tag", &N);
1771 if (auto *S = N.getRawScope())
1772 CheckDI(isa<DIScope>(S), "invalid scope for imported entity", &N, S);
1773 CheckDI(isDINode(N.getRawEntity()), "invalid imported entity", &N,
1774 N.getRawEntity());
1775}
1776
1777void Verifier::visitComdat(const Comdat &C) {
1778 // In COFF the Module is invalid if the GlobalValue has private linkage.
1779 // Entities with private linkage don't have entries in the symbol table.
1780 if (TT.isOSBinFormatCOFF())
1781 if (const GlobalValue *GV = M.getNamedValue(C.getName()))
1782 Check(!GV->hasPrivateLinkage(), "comdat global value has private linkage",
1783 GV);
1784}
1785
1786void Verifier::visitModuleIdents() {
1787 const NamedMDNode *Idents = M.getNamedMetadata("llvm.ident");
1788 if (!Idents)
1789 return;
1790
1791 // llvm.ident takes a list of metadata entry. Each entry has only one string.
1792 // Scan each llvm.ident entry and make sure that this requirement is met.
1793 for (const MDNode *N : Idents->operands()) {
1794 Check(N->getNumOperands() == 1,
1795 "incorrect number of operands in llvm.ident metadata", N);
1796 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1797 ("invalid value for llvm.ident metadata entry operand"
1798 "(the operand should be a string)"),
1799 N->getOperand(0));
1800 }
1801}
1802
1803void Verifier::visitModuleCommandLines() {
1804 const NamedMDNode *CommandLines = M.getNamedMetadata("llvm.commandline");
1805 if (!CommandLines)
1806 return;
1807
1808 // llvm.commandline takes a list of metadata entry. Each entry has only one
1809 // string. Scan each llvm.commandline entry and make sure that this
1810 // requirement is met.
1811 for (const MDNode *N : CommandLines->operands()) {
1812 Check(N->getNumOperands() == 1,
1813 "incorrect number of operands in llvm.commandline metadata", N);
1814 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1815 ("invalid value for llvm.commandline metadata entry operand"
1816 "(the operand should be a string)"),
1817 N->getOperand(0));
1818 }
1819}
1820
1821void Verifier::visitModuleErrnoTBAA() {
1822 const NamedMDNode *ErrnoTBAA = M.getNamedMetadata("llvm.errno.tbaa");
1823 if (!ErrnoTBAA)
1824 return;
1825
1826 Check(ErrnoTBAA->getNumOperands() >= 1,
1827 "llvm.errno.tbaa must have at least one operand", ErrnoTBAA);
1828
1829 for (const MDNode *N : ErrnoTBAA->operands())
1830 TBAAVerifyHelper.visitTBAAMetadata(nullptr, N);
1831}
1832
1833void Verifier::visitModuleFlags() {
1834 const NamedMDNode *Flags = M.getModuleFlagsMetadata();
1835 if (!Flags) return;
1836
1837 // Scan each flag, and track the flags and requirements.
1838 DenseMap<const MDString*, const MDNode*> SeenIDs;
1839 SmallVector<const MDNode*, 16> Requirements;
1840 uint64_t PAuthABIPlatform = -1;
1841 uint64_t PAuthABIVersion = -1;
1842 for (const MDNode *MDN : Flags->operands()) {
1843 visitModuleFlag(MDN, SeenIDs, Requirements);
1844 if (MDN->getNumOperands() != 3)
1845 continue;
1846 if (const auto *FlagName = dyn_cast_or_null<MDString>(MDN->getOperand(1))) {
1847 if (FlagName->getString() == "aarch64-elf-pauthabi-platform") {
1848 if (const auto *PAP =
1850 PAuthABIPlatform = PAP->getZExtValue();
1851 } else if (FlagName->getString() == "aarch64-elf-pauthabi-version") {
1852 if (const auto *PAV =
1854 PAuthABIVersion = PAV->getZExtValue();
1855 }
1856 }
1857 }
1858
1859 if ((PAuthABIPlatform == uint64_t(-1)) != (PAuthABIVersion == uint64_t(-1)))
1860 CheckFailed("either both or no 'aarch64-elf-pauthabi-platform' and "
1861 "'aarch64-elf-pauthabi-version' module flags must be present");
1862
1863 // Validate that the requirements in the module are valid.
1864 for (const MDNode *Requirement : Requirements) {
1865 const MDString *Flag = cast<MDString>(Requirement->getOperand(0));
1866 const Metadata *ReqValue = Requirement->getOperand(1);
1867
1868 const MDNode *Op = SeenIDs.lookup(Flag);
1869 if (!Op) {
1870 CheckFailed("invalid requirement on flag, flag is not present in module",
1871 Flag);
1872 continue;
1873 }
1874
1875 if (Op->getOperand(2) != ReqValue) {
1876 CheckFailed(("invalid requirement on flag, "
1877 "flag does not have the required value"),
1878 Flag);
1879 continue;
1880 }
1881 }
1882}
1883
1884void
1885Verifier::visitModuleFlag(const MDNode *Op,
1886 DenseMap<const MDString *, const MDNode *> &SeenIDs,
1887 SmallVectorImpl<const MDNode *> &Requirements) {
1888 // Each module flag should have three arguments, the merge behavior (a
1889 // constant int), the flag ID (an MDString), and the value.
1890 Check(Op->getNumOperands() == 3,
1891 "incorrect number of operands in module flag", Op);
1892 Module::ModFlagBehavior MFB;
1893 if (!Module::isValidModFlagBehavior(Op->getOperand(0), MFB)) {
1895 "invalid behavior operand in module flag (expected constant integer)",
1896 Op->getOperand(0));
1897 Check(false,
1898 "invalid behavior operand in module flag (unexpected constant)",
1899 Op->getOperand(0));
1900 }
1901 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1));
1902 Check(ID, "invalid ID operand in module flag (expected metadata string)",
1903 Op->getOperand(1));
1904
1905 // Check the values for behaviors with additional requirements.
1906 switch (MFB) {
1907 case Module::Error:
1908 case Module::Warning:
1909 case Module::Override:
1910 // These behavior types accept any value.
1911 break;
1912
1913 case Module::Min: {
1914 auto *V = mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1915 Check(V && V->getValue().isNonNegative(),
1916 "invalid value for 'min' module flag (expected constant non-negative "
1917 "integer)",
1918 Op->getOperand(2));
1919 break;
1920 }
1921
1922 case Module::Max: {
1924 "invalid value for 'max' module flag (expected constant integer)",
1925 Op->getOperand(2));
1926 break;
1927 }
1928
1929 case Module::Require: {
1930 // The value should itself be an MDNode with two operands, a flag ID (an
1931 // MDString), and a value.
1932 MDNode *Value = dyn_cast<MDNode>(Op->getOperand(2));
1933 Check(Value && Value->getNumOperands() == 2,
1934 "invalid value for 'require' module flag (expected metadata pair)",
1935 Op->getOperand(2));
1936 Check(isa<MDString>(Value->getOperand(0)),
1937 ("invalid value for 'require' module flag "
1938 "(first value operand should be a string)"),
1939 Value->getOperand(0));
1940
1941 // Append it to the list of requirements, to check once all module flags are
1942 // scanned.
1943 Requirements.push_back(Value);
1944 break;
1945 }
1946
1947 case Module::Append:
1948 case Module::AppendUnique: {
1949 // These behavior types require the operand be an MDNode.
1950 Check(isa<MDNode>(Op->getOperand(2)),
1951 "invalid value for 'append'-type module flag "
1952 "(expected a metadata node)",
1953 Op->getOperand(2));
1954 break;
1955 }
1956 }
1957
1958 // Unless this is a "requires" flag, check the ID is unique.
1959 if (MFB != Module::Require) {
1960 bool Inserted = SeenIDs.insert(std::make_pair(ID, Op)).second;
1961 Check(Inserted,
1962 "module flag identifiers must be unique (or of 'require' type)", ID);
1963 }
1964
1965 if (ID->getString() == "wchar_size") {
1966 ConstantInt *Value
1968 Check(Value, "wchar_size metadata requires constant integer argument");
1969 }
1970
1971 if (ID->getString() == "Linker Options") {
1972 // If the llvm.linker.options named metadata exists, we assume that the
1973 // bitcode reader has upgraded the module flag. Otherwise the flag might
1974 // have been created by a client directly.
1975 Check(M.getNamedMetadata("llvm.linker.options"),
1976 "'Linker Options' named metadata no longer supported");
1977 }
1978
1979 if (ID->getString() == "SemanticInterposition") {
1980 ConstantInt *Value =
1982 Check(Value,
1983 "SemanticInterposition metadata requires constant integer argument");
1984 }
1985
1986 if (ID->getString() == "CG Profile") {
1987 for (const MDOperand &MDO : cast<MDNode>(Op->getOperand(2))->operands())
1988 visitModuleFlagCGProfileEntry(MDO);
1989 }
1990}
1991
1992void Verifier::visitModuleFlagCGProfileEntry(const MDOperand &MDO) {
1993 auto CheckFunction = [&](const MDOperand &FuncMDO) {
1994 if (!FuncMDO)
1995 return;
1996 auto F = dyn_cast<ValueAsMetadata>(FuncMDO);
1997 Check(F && isa<Function>(F->getValue()->stripPointerCasts()),
1998 "expected a Function or null", FuncMDO);
1999 };
2000 auto Node = dyn_cast_or_null<MDNode>(MDO);
2001 Check(Node && Node->getNumOperands() == 3, "expected a MDNode triple", MDO);
2002 CheckFunction(Node->getOperand(0));
2003 CheckFunction(Node->getOperand(1));
2004 auto Count = dyn_cast_or_null<ConstantAsMetadata>(Node->getOperand(2));
2005 Check(Count && Count->getType()->isIntegerTy(),
2006 "expected an integer constant", Node->getOperand(2));
2007}
2008
2009void Verifier::verifyAttributeTypes(AttributeSet Attrs, const Value *V) {
2010 for (Attribute A : Attrs) {
2011
2012 if (A.isStringAttribute()) {
2013#define GET_ATTR_NAMES
2014#define ATTRIBUTE_ENUM(ENUM_NAME, DISPLAY_NAME)
2015#define ATTRIBUTE_STRBOOL(ENUM_NAME, DISPLAY_NAME) \
2016 if (A.getKindAsString() == #DISPLAY_NAME) { \
2017 auto V = A.getValueAsString(); \
2018 if (!(V.empty() || V == "true" || V == "false")) \
2019 CheckFailed("invalid value for '" #DISPLAY_NAME "' attribute: " + V + \
2020 ""); \
2021 }
2022
2023#include "llvm/IR/Attributes.inc"
2024 continue;
2025 }
2026
2027 if (A.isIntAttribute() != Attribute::isIntAttrKind(A.getKindAsEnum())) {
2028 CheckFailed("Attribute '" + A.getAsString() + "' should have an Argument",
2029 V);
2030 return;
2031 }
2032 }
2033}
2034
2035// VerifyParameterAttrs - Check the given attributes for an argument or return
2036// value of the specified type. The value V is printed in error messages.
2037void Verifier::verifyParameterAttrs(AttributeSet Attrs, Type *Ty,
2038 const Value *V) {
2039 if (!Attrs.hasAttributes())
2040 return;
2041
2042 verifyAttributeTypes(Attrs, V);
2043
2044 for (Attribute Attr : Attrs)
2045 Check(Attr.isStringAttribute() ||
2046 Attribute::canUseAsParamAttr(Attr.getKindAsEnum()),
2047 "Attribute '" + Attr.getAsString() + "' does not apply to parameters",
2048 V);
2049
2050 if (Attrs.hasAttribute(Attribute::ImmArg)) {
2051 unsigned AttrCount =
2052 Attrs.getNumAttributes() - Attrs.hasAttribute(Attribute::Range);
2053 Check(AttrCount == 1,
2054 "Attribute 'immarg' is incompatible with other attributes except the "
2055 "'range' attribute",
2056 V);
2057 }
2058
2059 // Check for mutually incompatible attributes. Only inreg is compatible with
2060 // sret.
2061 unsigned AttrCount = 0;
2062 AttrCount += Attrs.hasAttribute(Attribute::ByVal);
2063 AttrCount += Attrs.hasAttribute(Attribute::InAlloca);
2064 AttrCount += Attrs.hasAttribute(Attribute::Preallocated);
2065 AttrCount += Attrs.hasAttribute(Attribute::StructRet) ||
2066 Attrs.hasAttribute(Attribute::InReg);
2067 AttrCount += Attrs.hasAttribute(Attribute::Nest);
2068 AttrCount += Attrs.hasAttribute(Attribute::ByRef);
2069 Check(AttrCount <= 1,
2070 "Attributes 'byval', 'inalloca', 'preallocated', 'inreg', 'nest', "
2071 "'byref', and 'sret' are incompatible!",
2072 V);
2073
2074 Check(!(Attrs.hasAttribute(Attribute::InAlloca) &&
2075 Attrs.hasAttribute(Attribute::ReadOnly)),
2076 "Attributes "
2077 "'inalloca and readonly' are incompatible!",
2078 V);
2079
2080 Check(!(Attrs.hasAttribute(Attribute::StructRet) &&
2081 Attrs.hasAttribute(Attribute::Returned)),
2082 "Attributes "
2083 "'sret and returned' are incompatible!",
2084 V);
2085
2086 Check(!(Attrs.hasAttribute(Attribute::ZExt) &&
2087 Attrs.hasAttribute(Attribute::SExt)),
2088 "Attributes "
2089 "'zeroext and signext' are incompatible!",
2090 V);
2091
2092 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
2093 Attrs.hasAttribute(Attribute::ReadOnly)),
2094 "Attributes "
2095 "'readnone and readonly' are incompatible!",
2096 V);
2097
2098 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
2099 Attrs.hasAttribute(Attribute::WriteOnly)),
2100 "Attributes "
2101 "'readnone and writeonly' are incompatible!",
2102 V);
2103
2104 Check(!(Attrs.hasAttribute(Attribute::ReadOnly) &&
2105 Attrs.hasAttribute(Attribute::WriteOnly)),
2106 "Attributes "
2107 "'readonly and writeonly' are incompatible!",
2108 V);
2109
2110 Check(!(Attrs.hasAttribute(Attribute::NoInline) &&
2111 Attrs.hasAttribute(Attribute::AlwaysInline)),
2112 "Attributes "
2113 "'noinline and alwaysinline' are incompatible!",
2114 V);
2115
2116 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2117 Attrs.hasAttribute(Attribute::ReadNone)),
2118 "Attributes writable and readnone are incompatible!", V);
2119
2120 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2121 Attrs.hasAttribute(Attribute::ReadOnly)),
2122 "Attributes writable and readonly are incompatible!", V);
2123
2124 AttributeMask IncompatibleAttrs = AttributeFuncs::typeIncompatible(Ty, Attrs);
2125 for (Attribute Attr : Attrs) {
2126 if (!Attr.isStringAttribute() &&
2127 IncompatibleAttrs.contains(Attr.getKindAsEnum())) {
2128 CheckFailed("Attribute '" + Attr.getAsString() +
2129 "' applied to incompatible type!", V);
2130 return;
2131 }
2132 }
2133
2134 if (isa<PointerType>(Ty)) {
2135 if (Attrs.hasAttribute(Attribute::Alignment)) {
2136 Align AttrAlign = Attrs.getAlignment().valueOrOne();
2137 Check(AttrAlign.value() <= Value::MaximumAlignment,
2138 "huge alignment values are unsupported", V);
2139 }
2140 if (Attrs.hasAttribute(Attribute::ByVal)) {
2141 Type *ByValTy = Attrs.getByValType();
2142 SmallPtrSet<Type *, 4> Visited;
2143 Check(ByValTy->isSized(&Visited),
2144 "Attribute 'byval' does not support unsized types!", V);
2145 // Check if it is or contains a target extension type that disallows being
2146 // used on the stack.
2148 "'byval' argument has illegal target extension type", V);
2149 Check(DL.getTypeAllocSize(ByValTy).getKnownMinValue() < (1ULL << 32),
2150 "huge 'byval' arguments are unsupported", V);
2151 }
2152 if (Attrs.hasAttribute(Attribute::ByRef)) {
2153 SmallPtrSet<Type *, 4> Visited;
2154 Check(Attrs.getByRefType()->isSized(&Visited),
2155 "Attribute 'byref' does not support unsized types!", V);
2156 Check(DL.getTypeAllocSize(Attrs.getByRefType()).getKnownMinValue() <
2157 (1ULL << 32),
2158 "huge 'byref' arguments are unsupported", V);
2159 }
2160 if (Attrs.hasAttribute(Attribute::InAlloca)) {
2161 SmallPtrSet<Type *, 4> Visited;
2162 Check(Attrs.getInAllocaType()->isSized(&Visited),
2163 "Attribute 'inalloca' does not support unsized types!", V);
2164 Check(DL.getTypeAllocSize(Attrs.getInAllocaType()).getKnownMinValue() <
2165 (1ULL << 32),
2166 "huge 'inalloca' arguments are unsupported", V);
2167 }
2168 if (Attrs.hasAttribute(Attribute::Preallocated)) {
2169 SmallPtrSet<Type *, 4> Visited;
2170 Check(Attrs.getPreallocatedType()->isSized(&Visited),
2171 "Attribute 'preallocated' does not support unsized types!", V);
2172 Check(
2173 DL.getTypeAllocSize(Attrs.getPreallocatedType()).getKnownMinValue() <
2174 (1ULL << 32),
2175 "huge 'preallocated' arguments are unsupported", V);
2176 }
2177 }
2178
2179 if (Attrs.hasAttribute(Attribute::Initializes)) {
2180 auto Inits = Attrs.getAttribute(Attribute::Initializes).getInitializes();
2181 Check(!Inits.empty(), "Attribute 'initializes' does not support empty list",
2182 V);
2184 "Attribute 'initializes' does not support unordered ranges", V);
2185 }
2186
2187 if (Attrs.hasAttribute(Attribute::NoFPClass)) {
2188 uint64_t Val = Attrs.getAttribute(Attribute::NoFPClass).getValueAsInt();
2189 Check(Val != 0, "Attribute 'nofpclass' must have at least one test bit set",
2190 V);
2191 Check((Val & ~static_cast<unsigned>(fcAllFlags)) == 0,
2192 "Invalid value for 'nofpclass' test mask", V);
2193 }
2194 if (Attrs.hasAttribute(Attribute::Range)) {
2195 const ConstantRange &CR =
2196 Attrs.getAttribute(Attribute::Range).getValueAsConstantRange();
2198 "Range bit width must match type bit width!", V);
2199 }
2200}
2201
2202void Verifier::checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
2203 const Value *V) {
2204 if (Attrs.hasFnAttr(Attr)) {
2205 StringRef S = Attrs.getFnAttr(Attr).getValueAsString();
2206 unsigned N;
2207 if (S.getAsInteger(10, N))
2208 CheckFailed("\"" + Attr + "\" takes an unsigned integer: " + S, V);
2209 }
2210}
2211
2212// Check parameter attributes against a function type.
2213// The value V is printed in error messages.
2214void Verifier::verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
2215 const Value *V, bool IsIntrinsic,
2216 bool IsInlineAsm) {
2217 if (Attrs.isEmpty())
2218 return;
2219
2220 if (AttributeListsVisited.insert(Attrs.getRawPointer()).second) {
2221 Check(Attrs.hasParentContext(Context),
2222 "Attribute list does not match Module context!", &Attrs, V);
2223 for (const auto &AttrSet : Attrs) {
2224 Check(!AttrSet.hasAttributes() || AttrSet.hasParentContext(Context),
2225 "Attribute set does not match Module context!", &AttrSet, V);
2226 for (const auto &A : AttrSet) {
2227 Check(A.hasParentContext(Context),
2228 "Attribute does not match Module context!", &A, V);
2229 }
2230 }
2231 }
2232
2233 bool SawNest = false;
2234 bool SawReturned = false;
2235 bool SawSRet = false;
2236 bool SawSwiftSelf = false;
2237 bool SawSwiftAsync = false;
2238 bool SawSwiftError = false;
2239
2240 // Verify return value attributes.
2241 AttributeSet RetAttrs = Attrs.getRetAttrs();
2242 for (Attribute RetAttr : RetAttrs)
2243 Check(RetAttr.isStringAttribute() ||
2244 Attribute::canUseAsRetAttr(RetAttr.getKindAsEnum()),
2245 "Attribute '" + RetAttr.getAsString() +
2246 "' does not apply to function return values",
2247 V);
2248
2249 unsigned MaxParameterWidth = 0;
2250 auto GetMaxParameterWidth = [&MaxParameterWidth](Type *Ty) {
2251 if (Ty->isVectorTy()) {
2252 if (auto *VT = dyn_cast<FixedVectorType>(Ty)) {
2253 unsigned Size = VT->getPrimitiveSizeInBits().getFixedValue();
2254 if (Size > MaxParameterWidth)
2255 MaxParameterWidth = Size;
2256 }
2257 }
2258 };
2259 GetMaxParameterWidth(FT->getReturnType());
2260 verifyParameterAttrs(RetAttrs, FT->getReturnType(), V);
2261
2262 // Verify parameter attributes.
2263 for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
2264 Type *Ty = FT->getParamType(i);
2265 AttributeSet ArgAttrs = Attrs.getParamAttrs(i);
2266
2267 if (!IsIntrinsic) {
2268 Check(!ArgAttrs.hasAttribute(Attribute::ImmArg),
2269 "immarg attribute only applies to intrinsics", V);
2270 if (!IsInlineAsm)
2271 Check(!ArgAttrs.hasAttribute(Attribute::ElementType),
2272 "Attribute 'elementtype' can only be applied to intrinsics"
2273 " and inline asm.",
2274 V);
2275 }
2276
2277 verifyParameterAttrs(ArgAttrs, Ty, V);
2278 GetMaxParameterWidth(Ty);
2279
2280 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
2281 Check(!SawNest, "More than one parameter has attribute nest!", V);
2282 SawNest = true;
2283 }
2284
2285 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
2286 Check(!SawReturned, "More than one parameter has attribute returned!", V);
2287 Check(Ty->canLosslesslyBitCastTo(FT->getReturnType()),
2288 "Incompatible argument and return types for 'returned' attribute",
2289 V);
2290 SawReturned = true;
2291 }
2292
2293 if (ArgAttrs.hasAttribute(Attribute::StructRet)) {
2294 Check(!SawSRet, "Cannot have multiple 'sret' parameters!", V);
2295 Check(i == 0 || i == 1,
2296 "Attribute 'sret' is not on first or second parameter!", V);
2297 SawSRet = true;
2298 }
2299
2300 if (ArgAttrs.hasAttribute(Attribute::SwiftSelf)) {
2301 Check(!SawSwiftSelf, "Cannot have multiple 'swiftself' parameters!", V);
2302 SawSwiftSelf = true;
2303 }
2304
2305 if (ArgAttrs.hasAttribute(Attribute::SwiftAsync)) {
2306 Check(!SawSwiftAsync, "Cannot have multiple 'swiftasync' parameters!", V);
2307 SawSwiftAsync = true;
2308 }
2309
2310 if (ArgAttrs.hasAttribute(Attribute::SwiftError)) {
2311 Check(!SawSwiftError, "Cannot have multiple 'swifterror' parameters!", V);
2312 SawSwiftError = true;
2313 }
2314
2315 if (ArgAttrs.hasAttribute(Attribute::InAlloca)) {
2316 Check(i == FT->getNumParams() - 1,
2317 "inalloca isn't on the last parameter!", V);
2318 }
2319 }
2320
2321 if (!Attrs.hasFnAttrs())
2322 return;
2323
2324 verifyAttributeTypes(Attrs.getFnAttrs(), V);
2325 for (Attribute FnAttr : Attrs.getFnAttrs())
2326 Check(FnAttr.isStringAttribute() ||
2327 Attribute::canUseAsFnAttr(FnAttr.getKindAsEnum()),
2328 "Attribute '" + FnAttr.getAsString() +
2329 "' does not apply to functions!",
2330 V);
2331
2332 Check(!(Attrs.hasFnAttr(Attribute::NoInline) &&
2333 Attrs.hasFnAttr(Attribute::AlwaysInline)),
2334 "Attributes 'noinline and alwaysinline' are incompatible!", V);
2335
2336 if (Attrs.hasFnAttr(Attribute::OptimizeNone)) {
2337 Check(Attrs.hasFnAttr(Attribute::NoInline),
2338 "Attribute 'optnone' requires 'noinline'!", V);
2339
2340 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2341 "Attributes 'optsize and optnone' are incompatible!", V);
2342
2343 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2344 "Attributes 'minsize and optnone' are incompatible!", V);
2345
2346 Check(!Attrs.hasFnAttr(Attribute::OptimizeForDebugging),
2347 "Attributes 'optdebug and optnone' are incompatible!", V);
2348 }
2349
2350 Check(!(Attrs.hasFnAttr(Attribute::SanitizeRealtime) &&
2351 Attrs.hasFnAttr(Attribute::SanitizeRealtimeBlocking)),
2352 "Attributes "
2353 "'sanitize_realtime and sanitize_realtime_blocking' are incompatible!",
2354 V);
2355
2356 if (Attrs.hasFnAttr(Attribute::OptimizeForDebugging)) {
2357 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2358 "Attributes 'optsize and optdebug' are incompatible!", V);
2359
2360 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2361 "Attributes 'minsize and optdebug' are incompatible!", V);
2362 }
2363
2364 Check(!Attrs.hasAttrSomewhere(Attribute::Writable) ||
2365 isModSet(Attrs.getMemoryEffects().getModRef(IRMemLocation::ArgMem)),
2366 "Attribute writable and memory without argmem: write are incompatible!",
2367 V);
2368
2369 if (Attrs.hasFnAttr("aarch64_pstate_sm_enabled")) {
2370 Check(!Attrs.hasFnAttr("aarch64_pstate_sm_compatible"),
2371 "Attributes 'aarch64_pstate_sm_enabled and "
2372 "aarch64_pstate_sm_compatible' are incompatible!",
2373 V);
2374 }
2375
2376 Check((Attrs.hasFnAttr("aarch64_new_za") + Attrs.hasFnAttr("aarch64_in_za") +
2377 Attrs.hasFnAttr("aarch64_inout_za") +
2378 Attrs.hasFnAttr("aarch64_out_za") +
2379 Attrs.hasFnAttr("aarch64_preserves_za") +
2380 Attrs.hasFnAttr("aarch64_za_state_agnostic")) <= 1,
2381 "Attributes 'aarch64_new_za', 'aarch64_in_za', 'aarch64_out_za', "
2382 "'aarch64_inout_za', 'aarch64_preserves_za' and "
2383 "'aarch64_za_state_agnostic' are mutually exclusive",
2384 V);
2385
2386 Check((Attrs.hasFnAttr("aarch64_new_zt0") +
2387 Attrs.hasFnAttr("aarch64_in_zt0") +
2388 Attrs.hasFnAttr("aarch64_inout_zt0") +
2389 Attrs.hasFnAttr("aarch64_out_zt0") +
2390 Attrs.hasFnAttr("aarch64_preserves_zt0") +
2391 Attrs.hasFnAttr("aarch64_za_state_agnostic")) <= 1,
2392 "Attributes 'aarch64_new_zt0', 'aarch64_in_zt0', 'aarch64_out_zt0', "
2393 "'aarch64_inout_zt0', 'aarch64_preserves_zt0' and "
2394 "'aarch64_za_state_agnostic' are mutually exclusive",
2395 V);
2396
2397 if (Attrs.hasFnAttr(Attribute::JumpTable)) {
2398 const GlobalValue *GV = cast<GlobalValue>(V);
2400 "Attribute 'jumptable' requires 'unnamed_addr'", V);
2401 }
2402
2403 if (auto Args = Attrs.getFnAttrs().getAllocSizeArgs()) {
2404 auto CheckParam = [&](StringRef Name, unsigned ParamNo) {
2405 if (ParamNo >= FT->getNumParams()) {
2406 CheckFailed("'allocsize' " + Name + " argument is out of bounds", V);
2407 return false;
2408 }
2409
2410 if (!FT->getParamType(ParamNo)->isIntegerTy()) {
2411 CheckFailed("'allocsize' " + Name +
2412 " argument must refer to an integer parameter",
2413 V);
2414 return false;
2415 }
2416
2417 return true;
2418 };
2419
2420 if (!CheckParam("element size", Args->first))
2421 return;
2422
2423 if (Args->second && !CheckParam("number of elements", *Args->second))
2424 return;
2425 }
2426
2427 if (Attrs.hasFnAttr(Attribute::AllocKind)) {
2428 AllocFnKind K = Attrs.getAllocKind();
2430 K & (AllocFnKind::Alloc | AllocFnKind::Realloc | AllocFnKind::Free);
2431 if (!is_contained(
2432 {AllocFnKind::Alloc, AllocFnKind::Realloc, AllocFnKind::Free},
2433 Type))
2434 CheckFailed(
2435 "'allockind()' requires exactly one of alloc, realloc, and free");
2436 if ((Type == AllocFnKind::Free) &&
2437 ((K & (AllocFnKind::Uninitialized | AllocFnKind::Zeroed |
2438 AllocFnKind::Aligned)) != AllocFnKind::Unknown))
2439 CheckFailed("'allockind(\"free\")' doesn't allow uninitialized, zeroed, "
2440 "or aligned modifiers.");
2441 AllocFnKind ZeroedUninit = AllocFnKind::Uninitialized | AllocFnKind::Zeroed;
2442 if ((K & ZeroedUninit) == ZeroedUninit)
2443 CheckFailed("'allockind()' can't be both zeroed and uninitialized");
2444 }
2445
2446 if (Attribute A = Attrs.getFnAttr("alloc-variant-zeroed"); A.isValid()) {
2447 StringRef S = A.getValueAsString();
2448 Check(!S.empty(), "'alloc-variant-zeroed' must not be empty");
2449 Function *Variant = M.getFunction(S);
2450 if (Variant) {
2451 Attribute Family = Attrs.getFnAttr("alloc-family");
2452 Attribute VariantFamily = Variant->getFnAttribute("alloc-family");
2453 if (Family.isValid())
2454 Check(VariantFamily.isValid() &&
2455 VariantFamily.getValueAsString() == Family.getValueAsString(),
2456 "'alloc-variant-zeroed' must name a function belonging to the "
2457 "same 'alloc-family'");
2458
2459 Check(Variant->hasFnAttribute(Attribute::AllocKind) &&
2460 (Variant->getFnAttribute(Attribute::AllocKind).getAllocKind() &
2461 AllocFnKind::Zeroed) != AllocFnKind::Unknown,
2462 "'alloc-variant-zeroed' must name a function with "
2463 "'allockind(\"zeroed\")'");
2464
2465 Check(FT == Variant->getFunctionType(),
2466 "'alloc-variant-zeroed' must name a function with the same "
2467 "signature");
2468 }
2469 }
2470
2471 if (Attrs.hasFnAttr(Attribute::VScaleRange)) {
2472 unsigned VScaleMin = Attrs.getFnAttrs().getVScaleRangeMin();
2473 if (VScaleMin == 0)
2474 CheckFailed("'vscale_range' minimum must be greater than 0", V);
2475 else if (!isPowerOf2_32(VScaleMin))
2476 CheckFailed("'vscale_range' minimum must be power-of-two value", V);
2477 std::optional<unsigned> VScaleMax = Attrs.getFnAttrs().getVScaleRangeMax();
2478 if (VScaleMax && VScaleMin > VScaleMax)
2479 CheckFailed("'vscale_range' minimum cannot be greater than maximum", V);
2480 else if (VScaleMax && !isPowerOf2_32(*VScaleMax))
2481 CheckFailed("'vscale_range' maximum must be power-of-two value", V);
2482 }
2483
2484 if (Attribute FPAttr = Attrs.getFnAttr("frame-pointer"); FPAttr.isValid()) {
2485 StringRef FP = FPAttr.getValueAsString();
2486 if (FP != "all" && FP != "non-leaf" && FP != "none" && FP != "reserved")
2487 CheckFailed("invalid value for 'frame-pointer' attribute: " + FP, V);
2488 }
2489
2490 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-prefix", V);
2491 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-entry", V);
2492 if (Attrs.hasFnAttr("patchable-function-entry-section"))
2493 Check(!Attrs.getFnAttr("patchable-function-entry-section")
2494 .getValueAsString()
2495 .empty(),
2496 "\"patchable-function-entry-section\" must not be empty");
2497 checkUnsignedBaseTenFuncAttr(Attrs, "warn-stack-size", V);
2498
2499 if (auto A = Attrs.getFnAttr("sign-return-address"); A.isValid()) {
2500 StringRef S = A.getValueAsString();
2501 if (S != "none" && S != "all" && S != "non-leaf")
2502 CheckFailed("invalid value for 'sign-return-address' attribute: " + S, V);
2503 }
2504
2505 if (auto A = Attrs.getFnAttr("sign-return-address-key"); A.isValid()) {
2506 StringRef S = A.getValueAsString();
2507 if (S != "a_key" && S != "b_key")
2508 CheckFailed("invalid value for 'sign-return-address-key' attribute: " + S,
2509 V);
2510 if (auto AA = Attrs.getFnAttr("sign-return-address"); !AA.isValid()) {
2511 CheckFailed(
2512 "'sign-return-address-key' present without `sign-return-address`");
2513 }
2514 }
2515
2516 if (auto A = Attrs.getFnAttr("branch-target-enforcement"); A.isValid()) {
2517 StringRef S = A.getValueAsString();
2518 if (S != "" && S != "true" && S != "false")
2519 CheckFailed(
2520 "invalid value for 'branch-target-enforcement' attribute: " + S, V);
2521 }
2522
2523 if (auto A = Attrs.getFnAttr("branch-protection-pauth-lr"); A.isValid()) {
2524 StringRef S = A.getValueAsString();
2525 if (S != "" && S != "true" && S != "false")
2526 CheckFailed(
2527 "invalid value for 'branch-protection-pauth-lr' attribute: " + S, V);
2528 }
2529
2530 if (auto A = Attrs.getFnAttr("guarded-control-stack"); A.isValid()) {
2531 StringRef S = A.getValueAsString();
2532 if (S != "" && S != "true" && S != "false")
2533 CheckFailed("invalid value for 'guarded-control-stack' attribute: " + S,
2534 V);
2535 }
2536
2537 if (auto A = Attrs.getFnAttr("vector-function-abi-variant"); A.isValid()) {
2538 StringRef S = A.getValueAsString();
2539 const std::optional<VFInfo> Info = VFABI::tryDemangleForVFABI(S, FT);
2540 if (!Info)
2541 CheckFailed("invalid name for a VFABI variant: " + S, V);
2542 }
2543
2544 if (auto A = Attrs.getFnAttr("denormal-fp-math"); A.isValid()) {
2545 StringRef S = A.getValueAsString();
2547 CheckFailed("invalid value for 'denormal-fp-math' attribute: " + S, V);
2548 }
2549
2550 if (auto A = Attrs.getFnAttr("denormal-fp-math-f32"); A.isValid()) {
2551 StringRef S = A.getValueAsString();
2553 CheckFailed("invalid value for 'denormal-fp-math-f32' attribute: " + S,
2554 V);
2555 }
2556}
2557void Verifier::verifyUnknownProfileMetadata(MDNode *MD) {
2558 Check(MD->getNumOperands() == 2,
2559 "'unknown' !prof should have a single additional operand", MD);
2560 auto *PassName = dyn_cast<MDString>(MD->getOperand(1));
2561 Check(PassName != nullptr,
2562 "'unknown' !prof should have an additional operand of type "
2563 "string");
2564 Check(!PassName->getString().empty(),
2565 "the 'unknown' !prof operand should not be an empty string");
2566}
2567
2568void Verifier::verifyFunctionMetadata(
2569 ArrayRef<std::pair<unsigned, MDNode *>> MDs) {
2570 for (const auto &Pair : MDs) {
2571 if (Pair.first == LLVMContext::MD_prof) {
2572 MDNode *MD = Pair.second;
2573 Check(MD->getNumOperands() >= 2,
2574 "!prof annotations should have no less than 2 operands", MD);
2575 // We may have functions that are synthesized by the compiler, e.g. in
2576 // WPD, that we can't currently determine the entry count.
2577 if (MD->getOperand(0).equalsStr(
2579 verifyUnknownProfileMetadata(MD);
2580 continue;
2581 }
2582
2583 // Check first operand.
2584 Check(MD->getOperand(0) != nullptr, "first operand should not be null",
2585 MD);
2587 "expected string with name of the !prof annotation", MD);
2588 MDString *MDS = cast<MDString>(MD->getOperand(0));
2589 StringRef ProfName = MDS->getString();
2592 "first operand should be 'function_entry_count'"
2593 " or 'synthetic_function_entry_count'",
2594 MD);
2595
2596 // Check second operand.
2597 Check(MD->getOperand(1) != nullptr, "second operand should not be null",
2598 MD);
2600 "expected integer argument to function_entry_count", MD);
2601 } else if (Pair.first == LLVMContext::MD_kcfi_type) {
2602 MDNode *MD = Pair.second;
2603 Check(MD->getNumOperands() == 1,
2604 "!kcfi_type must have exactly one operand", MD);
2605 Check(MD->getOperand(0) != nullptr, "!kcfi_type operand must not be null",
2606 MD);
2608 "expected a constant operand for !kcfi_type", MD);
2609 Constant *C = cast<ConstantAsMetadata>(MD->getOperand(0))->getValue();
2610 Check(isa<ConstantInt>(C) && isa<IntegerType>(C->getType()),
2611 "expected a constant integer operand for !kcfi_type", MD);
2613 "expected a 32-bit integer constant operand for !kcfi_type", MD);
2614 }
2615 }
2616}
2617
2618void Verifier::visitConstantExprsRecursively(const Constant *EntryC) {
2619 if (!ConstantExprVisited.insert(EntryC).second)
2620 return;
2621
2623 Stack.push_back(EntryC);
2624
2625 while (!Stack.empty()) {
2626 const Constant *C = Stack.pop_back_val();
2627
2628 // Check this constant expression.
2629 if (const auto *CE = dyn_cast<ConstantExpr>(C))
2630 visitConstantExpr(CE);
2631
2632 if (const auto *CPA = dyn_cast<ConstantPtrAuth>(C))
2633 visitConstantPtrAuth(CPA);
2634
2635 if (const auto *GV = dyn_cast<GlobalValue>(C)) {
2636 // Global Values get visited separately, but we do need to make sure
2637 // that the global value is in the correct module
2638 Check(GV->getParent() == &M, "Referencing global in another module!",
2639 EntryC, &M, GV, GV->getParent());
2640 continue;
2641 }
2642
2643 // Visit all sub-expressions.
2644 for (const Use &U : C->operands()) {
2645 const auto *OpC = dyn_cast<Constant>(U);
2646 if (!OpC)
2647 continue;
2648 if (!ConstantExprVisited.insert(OpC).second)
2649 continue;
2650 Stack.push_back(OpC);
2651 }
2652 }
2653}
2654
2655void Verifier::visitConstantExpr(const ConstantExpr *CE) {
2656 if (CE->getOpcode() == Instruction::BitCast)
2657 Check(CastInst::castIsValid(Instruction::BitCast, CE->getOperand(0),
2658 CE->getType()),
2659 "Invalid bitcast", CE);
2660 else if (CE->getOpcode() == Instruction::PtrToAddr)
2661 checkPtrToAddr(CE->getOperand(0)->getType(), CE->getType(), *CE);
2662}
2663
2664void Verifier::visitConstantPtrAuth(const ConstantPtrAuth *CPA) {
2665 Check(CPA->getPointer()->getType()->isPointerTy(),
2666 "signed ptrauth constant base pointer must have pointer type");
2667
2668 Check(CPA->getType() == CPA->getPointer()->getType(),
2669 "signed ptrauth constant must have same type as its base pointer");
2670
2671 Check(CPA->getKey()->getBitWidth() == 32,
2672 "signed ptrauth constant key must be i32 constant integer");
2673
2675 "signed ptrauth constant address discriminator must be a pointer");
2676
2677 Check(CPA->getDiscriminator()->getBitWidth() == 64,
2678 "signed ptrauth constant discriminator must be i64 constant integer");
2679}
2680
2681bool Verifier::verifyAttributeCount(AttributeList Attrs, unsigned Params) {
2682 // There shouldn't be more attribute sets than there are parameters plus the
2683 // function and return value.
2684 return Attrs.getNumAttrSets() <= Params + 2;
2685}
2686
2687void Verifier::verifyInlineAsmCall(const CallBase &Call) {
2688 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
2689 unsigned ArgNo = 0;
2690 unsigned LabelNo = 0;
2691 for (const InlineAsm::ConstraintInfo &CI : IA->ParseConstraints()) {
2692 if (CI.Type == InlineAsm::isLabel) {
2693 ++LabelNo;
2694 continue;
2695 }
2696
2697 // Only deal with constraints that correspond to call arguments.
2698 if (!CI.hasArg())
2699 continue;
2700
2701 if (CI.isIndirect) {
2702 const Value *Arg = Call.getArgOperand(ArgNo);
2703 Check(Arg->getType()->isPointerTy(),
2704 "Operand for indirect constraint must have pointer type", &Call);
2705
2707 "Operand for indirect constraint must have elementtype attribute",
2708 &Call);
2709 } else {
2710 Check(!Call.paramHasAttr(ArgNo, Attribute::ElementType),
2711 "Elementtype attribute can only be applied for indirect "
2712 "constraints",
2713 &Call);
2714 }
2715
2716 ArgNo++;
2717 }
2718
2719 if (auto *CallBr = dyn_cast<CallBrInst>(&Call)) {
2720 Check(LabelNo == CallBr->getNumIndirectDests(),
2721 "Number of label constraints does not match number of callbr dests",
2722 &Call);
2723 } else {
2724 Check(LabelNo == 0, "Label constraints can only be used with callbr",
2725 &Call);
2726 }
2727}
2728
2729/// Verify that statepoint intrinsic is well formed.
2730void Verifier::verifyStatepoint(const CallBase &Call) {
2731 assert(Call.getIntrinsicID() == Intrinsic::experimental_gc_statepoint);
2732
2735 "gc.statepoint must read and write all memory to preserve "
2736 "reordering restrictions required by safepoint semantics",
2737 Call);
2738
2739 const int64_t NumPatchBytes =
2740 cast<ConstantInt>(Call.getArgOperand(1))->getSExtValue();
2741 assert(isInt<32>(NumPatchBytes) && "NumPatchBytesV is an i32!");
2742 Check(NumPatchBytes >= 0,
2743 "gc.statepoint number of patchable bytes must be "
2744 "positive",
2745 Call);
2746
2747 Type *TargetElemType = Call.getParamElementType(2);
2748 Check(TargetElemType,
2749 "gc.statepoint callee argument must have elementtype attribute", Call);
2750 FunctionType *TargetFuncType = dyn_cast<FunctionType>(TargetElemType);
2751 Check(TargetFuncType,
2752 "gc.statepoint callee elementtype must be function type", Call);
2753
2754 const int NumCallArgs = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
2755 Check(NumCallArgs >= 0,
2756 "gc.statepoint number of arguments to underlying call "
2757 "must be positive",
2758 Call);
2759 const int NumParams = (int)TargetFuncType->getNumParams();
2760 if (TargetFuncType->isVarArg()) {
2761 Check(NumCallArgs >= NumParams,
2762 "gc.statepoint mismatch in number of vararg call args", Call);
2763
2764 // TODO: Remove this limitation
2765 Check(TargetFuncType->getReturnType()->isVoidTy(),
2766 "gc.statepoint doesn't support wrapping non-void "
2767 "vararg functions yet",
2768 Call);
2769 } else
2770 Check(NumCallArgs == NumParams,
2771 "gc.statepoint mismatch in number of call args", Call);
2772
2773 const uint64_t Flags
2774 = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
2775 Check((Flags & ~(uint64_t)StatepointFlags::MaskAll) == 0,
2776 "unknown flag used in gc.statepoint flags argument", Call);
2777
2778 // Verify that the types of the call parameter arguments match
2779 // the type of the wrapped callee.
2780 AttributeList Attrs = Call.getAttributes();
2781 for (int i = 0; i < NumParams; i++) {
2782 Type *ParamType = TargetFuncType->getParamType(i);
2783 Type *ArgType = Call.getArgOperand(5 + i)->getType();
2784 Check(ArgType == ParamType,
2785 "gc.statepoint call argument does not match wrapped "
2786 "function type",
2787 Call);
2788
2789 if (TargetFuncType->isVarArg()) {
2790 AttributeSet ArgAttrs = Attrs.getParamAttrs(5 + i);
2791 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
2792 "Attribute 'sret' cannot be used for vararg call arguments!", Call);
2793 }
2794 }
2795
2796 const int EndCallArgsInx = 4 + NumCallArgs;
2797
2798 const Value *NumTransitionArgsV = Call.getArgOperand(EndCallArgsInx + 1);
2799 Check(isa<ConstantInt>(NumTransitionArgsV),
2800 "gc.statepoint number of transition arguments "
2801 "must be constant integer",
2802 Call);
2803 const int NumTransitionArgs =
2804 cast<ConstantInt>(NumTransitionArgsV)->getZExtValue();
2805 Check(NumTransitionArgs == 0,
2806 "gc.statepoint w/inline transition bundle is deprecated", Call);
2807 const int EndTransitionArgsInx = EndCallArgsInx + 1 + NumTransitionArgs;
2808
2809 const Value *NumDeoptArgsV = Call.getArgOperand(EndTransitionArgsInx + 1);
2810 Check(isa<ConstantInt>(NumDeoptArgsV),
2811 "gc.statepoint number of deoptimization arguments "
2812 "must be constant integer",
2813 Call);
2814 const int NumDeoptArgs = cast<ConstantInt>(NumDeoptArgsV)->getZExtValue();
2815 Check(NumDeoptArgs == 0,
2816 "gc.statepoint w/inline deopt operands is deprecated", Call);
2817
2818 const int ExpectedNumArgs = 7 + NumCallArgs;
2819 Check(ExpectedNumArgs == (int)Call.arg_size(),
2820 "gc.statepoint too many arguments", Call);
2821
2822 // Check that the only uses of this gc.statepoint are gc.result or
2823 // gc.relocate calls which are tied to this statepoint and thus part
2824 // of the same statepoint sequence
2825 for (const User *U : Call.users()) {
2826 const CallInst *UserCall = dyn_cast<const CallInst>(U);
2827 Check(UserCall, "illegal use of statepoint token", Call, U);
2828 if (!UserCall)
2829 continue;
2830 Check(isa<GCRelocateInst>(UserCall) || isa<GCResultInst>(UserCall),
2831 "gc.result or gc.relocate are the only value uses "
2832 "of a gc.statepoint",
2833 Call, U);
2834 if (isa<GCResultInst>(UserCall)) {
2835 Check(UserCall->getArgOperand(0) == &Call,
2836 "gc.result connected to wrong gc.statepoint", Call, UserCall);
2837 } else if (isa<GCRelocateInst>(Call)) {
2838 Check(UserCall->getArgOperand(0) == &Call,
2839 "gc.relocate connected to wrong gc.statepoint", Call, UserCall);
2840 }
2841 }
2842
2843 // Note: It is legal for a single derived pointer to be listed multiple
2844 // times. It's non-optimal, but it is legal. It can also happen after
2845 // insertion if we strip a bitcast away.
2846 // Note: It is really tempting to check that each base is relocated and
2847 // that a derived pointer is never reused as a base pointer. This turns
2848 // out to be problematic since optimizations run after safepoint insertion
2849 // can recognize equality properties that the insertion logic doesn't know
2850 // about. See example statepoint.ll in the verifier subdirectory
2851}
2852
2853void Verifier::verifyFrameRecoverIndices() {
2854 for (auto &Counts : FrameEscapeInfo) {
2855 Function *F = Counts.first;
2856 unsigned EscapedObjectCount = Counts.second.first;
2857 unsigned MaxRecoveredIndex = Counts.second.second;
2858 Check(MaxRecoveredIndex <= EscapedObjectCount,
2859 "all indices passed to llvm.localrecover must be less than the "
2860 "number of arguments passed to llvm.localescape in the parent "
2861 "function",
2862 F);
2863 }
2864}
2865
2866static Instruction *getSuccPad(Instruction *Terminator) {
2867 BasicBlock *UnwindDest;
2868 if (auto *II = dyn_cast<InvokeInst>(Terminator))
2869 UnwindDest = II->getUnwindDest();
2870 else if (auto *CSI = dyn_cast<CatchSwitchInst>(Terminator))
2871 UnwindDest = CSI->getUnwindDest();
2872 else
2873 UnwindDest = cast<CleanupReturnInst>(Terminator)->getUnwindDest();
2874 return &*UnwindDest->getFirstNonPHIIt();
2875}
2876
2877void Verifier::verifySiblingFuncletUnwinds() {
2878 llvm::TimeTraceScope timeScope("Verifier verify sibling funclet unwinds");
2879 SmallPtrSet<Instruction *, 8> Visited;
2880 SmallPtrSet<Instruction *, 8> Active;
2881 for (const auto &Pair : SiblingFuncletInfo) {
2882 Instruction *PredPad = Pair.first;
2883 if (Visited.count(PredPad))
2884 continue;
2885 Active.insert(PredPad);
2886 Instruction *Terminator = Pair.second;
2887 do {
2888 Instruction *SuccPad = getSuccPad(Terminator);
2889 if (Active.count(SuccPad)) {
2890 // Found a cycle; report error
2891 Instruction *CyclePad = SuccPad;
2892 SmallVector<Instruction *, 8> CycleNodes;
2893 do {
2894 CycleNodes.push_back(CyclePad);
2895 Instruction *CycleTerminator = SiblingFuncletInfo[CyclePad];
2896 if (CycleTerminator != CyclePad)
2897 CycleNodes.push_back(CycleTerminator);
2898 CyclePad = getSuccPad(CycleTerminator);
2899 } while (CyclePad != SuccPad);
2900 Check(false, "EH pads can't handle each other's exceptions",
2901 ArrayRef<Instruction *>(CycleNodes));
2902 }
2903 // Don't re-walk a node we've already checked
2904 if (!Visited.insert(SuccPad).second)
2905 break;
2906 // Walk to this successor if it has a map entry.
2907 PredPad = SuccPad;
2908 auto TermI = SiblingFuncletInfo.find(PredPad);
2909 if (TermI == SiblingFuncletInfo.end())
2910 break;
2911 Terminator = TermI->second;
2912 Active.insert(PredPad);
2913 } while (true);
2914 // Each node only has one successor, so we've walked all the active
2915 // nodes' successors.
2916 Active.clear();
2917 }
2918}
2919
2920// visitFunction - Verify that a function is ok.
2921//
2922void Verifier::visitFunction(const Function &F) {
2923 visitGlobalValue(F);
2924
2925 // Check function arguments.
2926 FunctionType *FT = F.getFunctionType();
2927 unsigned NumArgs = F.arg_size();
2928
2929 Check(&Context == &F.getContext(),
2930 "Function context does not match Module context!", &F);
2931
2932 Check(!F.hasCommonLinkage(), "Functions may not have common linkage", &F);
2933 Check(FT->getNumParams() == NumArgs,
2934 "# formal arguments must match # of arguments for function type!", &F,
2935 FT);
2936 Check(F.getReturnType()->isFirstClassType() ||
2937 F.getReturnType()->isVoidTy() || F.getReturnType()->isStructTy(),
2938 "Functions cannot return aggregate values!", &F);
2939
2940 Check(!F.hasStructRetAttr() || F.getReturnType()->isVoidTy(),
2941 "Invalid struct return type!", &F);
2942
2943 if (MaybeAlign A = F.getAlign()) {
2944 Check(A->value() <= Value::MaximumAlignment,
2945 "huge alignment values are unsupported", &F);
2946 }
2947
2948 AttributeList Attrs = F.getAttributes();
2949
2950 Check(verifyAttributeCount(Attrs, FT->getNumParams()),
2951 "Attribute after last parameter!", &F);
2952
2953 bool IsIntrinsic = F.isIntrinsic();
2954
2955 // Check function attributes.
2956 verifyFunctionAttrs(FT, Attrs, &F, IsIntrinsic, /* IsInlineAsm */ false);
2957
2958 // On function declarations/definitions, we do not support the builtin
2959 // attribute. We do not check this in VerifyFunctionAttrs since that is
2960 // checking for Attributes that can/can not ever be on functions.
2961 Check(!Attrs.hasFnAttr(Attribute::Builtin),
2962 "Attribute 'builtin' can only be applied to a callsite.", &F);
2963
2964 Check(!Attrs.hasAttrSomewhere(Attribute::ElementType),
2965 "Attribute 'elementtype' can only be applied to a callsite.", &F);
2966
2967 Check(!Attrs.hasFnAttr("aarch64_zt0_undef"),
2968 "Attribute 'aarch64_zt0_undef' can only be applied to a callsite.");
2969
2970 if (Attrs.hasFnAttr(Attribute::Naked))
2971 for (const Argument &Arg : F.args())
2972 Check(Arg.use_empty(), "cannot use argument of naked function", &Arg);
2973
2974 // Check that this function meets the restrictions on this calling convention.
2975 // Sometimes varargs is used for perfectly forwarding thunks, so some of these
2976 // restrictions can be lifted.
2977 switch (F.getCallingConv()) {
2978 default:
2979 case CallingConv::C:
2980 break;
2981 case CallingConv::X86_INTR: {
2982 Check(F.arg_empty() || Attrs.hasParamAttr(0, Attribute::ByVal),
2983 "Calling convention parameter requires byval", &F);
2984 break;
2985 }
2986 case CallingConv::AMDGPU_KERNEL:
2987 case CallingConv::SPIR_KERNEL:
2988 case CallingConv::AMDGPU_CS_Chain:
2989 case CallingConv::AMDGPU_CS_ChainPreserve:
2990 Check(F.getReturnType()->isVoidTy(),
2991 "Calling convention requires void return type", &F);
2992 [[fallthrough]];
2993 case CallingConv::AMDGPU_VS:
2994 case CallingConv::AMDGPU_HS:
2995 case CallingConv::AMDGPU_GS:
2996 case CallingConv::AMDGPU_PS:
2997 case CallingConv::AMDGPU_CS:
2998 Check(!F.hasStructRetAttr(), "Calling convention does not allow sret", &F);
2999 if (F.getCallingConv() != CallingConv::SPIR_KERNEL) {
3000 const unsigned StackAS = DL.getAllocaAddrSpace();
3001 unsigned i = 0;
3002 for (const Argument &Arg : F.args()) {
3003 Check(!Attrs.hasParamAttr(i, Attribute::ByVal),
3004 "Calling convention disallows byval", &F);
3005 Check(!Attrs.hasParamAttr(i, Attribute::Preallocated),
3006 "Calling convention disallows preallocated", &F);
3007 Check(!Attrs.hasParamAttr(i, Attribute::InAlloca),
3008 "Calling convention disallows inalloca", &F);
3009
3010 if (Attrs.hasParamAttr(i, Attribute::ByRef)) {
3011 // FIXME: Should also disallow LDS and GDS, but we don't have the enum
3012 // value here.
3013 Check(Arg.getType()->getPointerAddressSpace() != StackAS,
3014 "Calling convention disallows stack byref", &F);
3015 }
3016
3017 ++i;
3018 }
3019 }
3020
3021 [[fallthrough]];
3022 case CallingConv::Fast:
3023 case CallingConv::Cold:
3024 case CallingConv::Intel_OCL_BI:
3025 case CallingConv::PTX_Kernel:
3026 case CallingConv::PTX_Device:
3027 Check(!F.isVarArg(),
3028 "Calling convention does not support varargs or "
3029 "perfect forwarding!",
3030 &F);
3031 break;
3032 case CallingConv::AMDGPU_Gfx_WholeWave:
3033 Check(!F.arg_empty() && F.arg_begin()->getType()->isIntegerTy(1),
3034 "Calling convention requires first argument to be i1", &F);
3035 Check(!F.arg_begin()->hasInRegAttr(),
3036 "Calling convention requires first argument to not be inreg", &F);
3037 Check(!F.isVarArg(),
3038 "Calling convention does not support varargs or "
3039 "perfect forwarding!",
3040 &F);
3041 break;
3042 }
3043
3044 // Check that the argument values match the function type for this function...
3045 unsigned i = 0;
3046 for (const Argument &Arg : F.args()) {
3047 Check(Arg.getType() == FT->getParamType(i),
3048 "Argument value does not match function argument type!", &Arg,
3049 FT->getParamType(i));
3050 Check(Arg.getType()->isFirstClassType(),
3051 "Function arguments must have first-class types!", &Arg);
3052 if (!IsIntrinsic) {
3053 Check(!Arg.getType()->isMetadataTy(),
3054 "Function takes metadata but isn't an intrinsic", &Arg, &F);
3055 Check(!Arg.getType()->isTokenLikeTy(),
3056 "Function takes token but isn't an intrinsic", &Arg, &F);
3057 Check(!Arg.getType()->isX86_AMXTy(),
3058 "Function takes x86_amx but isn't an intrinsic", &Arg, &F);
3059 }
3060
3061 // Check that swifterror argument is only used by loads and stores.
3062 if (Attrs.hasParamAttr(i, Attribute::SwiftError)) {
3063 verifySwiftErrorValue(&Arg);
3064 }
3065 ++i;
3066 }
3067
3068 if (!IsIntrinsic) {
3069 Check(!F.getReturnType()->isTokenLikeTy(),
3070 "Function returns a token but isn't an intrinsic", &F);
3071 Check(!F.getReturnType()->isX86_AMXTy(),
3072 "Function returns a x86_amx but isn't an intrinsic", &F);
3073 }
3074
3075 // Get the function metadata attachments.
3077 F.getAllMetadata(MDs);
3078 assert(F.hasMetadata() != MDs.empty() && "Bit out-of-sync");
3079 verifyFunctionMetadata(MDs);
3080
3081 // Check validity of the personality function
3082 if (F.hasPersonalityFn()) {
3083 auto *Per = dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts());
3084 if (Per)
3085 Check(Per->getParent() == F.getParent(),
3086 "Referencing personality function in another module!", &F,
3087 F.getParent(), Per, Per->getParent());
3088 }
3089
3090 // EH funclet coloring can be expensive, recompute on-demand
3091 BlockEHFuncletColors.clear();
3092
3093 if (F.isMaterializable()) {
3094 // Function has a body somewhere we can't see.
3095 Check(MDs.empty(), "unmaterialized function cannot have metadata", &F,
3096 MDs.empty() ? nullptr : MDs.front().second);
3097 } else if (F.isDeclaration()) {
3098 for (const auto &I : MDs) {
3099 // This is used for call site debug information.
3100 CheckDI(I.first != LLVMContext::MD_dbg ||
3101 !cast<DISubprogram>(I.second)->isDistinct(),
3102 "function declaration may only have a unique !dbg attachment",
3103 &F);
3104 Check(I.first != LLVMContext::MD_prof,
3105 "function declaration may not have a !prof attachment", &F);
3106
3107 // Verify the metadata itself.
3108 visitMDNode(*I.second, AreDebugLocsAllowed::Yes);
3109 }
3110 Check(!F.hasPersonalityFn(),
3111 "Function declaration shouldn't have a personality routine", &F);
3112 } else {
3113 // Verify that this function (which has a body) is not named "llvm.*". It
3114 // is not legal to define intrinsics.
3115 Check(!IsIntrinsic, "llvm intrinsics cannot be defined!", &F);
3116
3117 // Check the entry node
3118 const BasicBlock *Entry = &F.getEntryBlock();
3119 Check(pred_empty(Entry),
3120 "Entry block to function must not have predecessors!", Entry);
3121
3122 // The address of the entry block cannot be taken, unless it is dead.
3123 if (Entry->hasAddressTaken()) {
3124 Check(!BlockAddress::lookup(Entry)->isConstantUsed(),
3125 "blockaddress may not be used with the entry block!", Entry);
3126 }
3127
3128 unsigned NumDebugAttachments = 0, NumProfAttachments = 0,
3129 NumKCFIAttachments = 0;
3130 // Visit metadata attachments.
3131 for (const auto &I : MDs) {
3132 // Verify that the attachment is legal.
3133 auto AllowLocs = AreDebugLocsAllowed::No;
3134 switch (I.first) {
3135 default:
3136 break;
3137 case LLVMContext::MD_dbg: {
3138 ++NumDebugAttachments;
3139 CheckDI(NumDebugAttachments == 1,
3140 "function must have a single !dbg attachment", &F, I.second);
3141 CheckDI(isa<DISubprogram>(I.second),
3142 "function !dbg attachment must be a subprogram", &F, I.second);
3143 CheckDI(cast<DISubprogram>(I.second)->isDistinct(),
3144 "function definition may only have a distinct !dbg attachment",
3145 &F);
3146
3147 auto *SP = cast<DISubprogram>(I.second);
3148 const Function *&AttachedTo = DISubprogramAttachments[SP];
3149 CheckDI(!AttachedTo || AttachedTo == &F,
3150 "DISubprogram attached to more than one function", SP, &F);
3151 AttachedTo = &F;
3152 AllowLocs = AreDebugLocsAllowed::Yes;
3153 break;
3154 }
3155 case LLVMContext::MD_prof:
3156 ++NumProfAttachments;
3157 Check(NumProfAttachments == 1,
3158 "function must have a single !prof attachment", &F, I.second);
3159 break;
3160 case LLVMContext::MD_kcfi_type:
3161 ++NumKCFIAttachments;
3162 Check(NumKCFIAttachments == 1,
3163 "function must have a single !kcfi_type attachment", &F,
3164 I.second);
3165 break;
3166 }
3167
3168 // Verify the metadata itself.
3169 visitMDNode(*I.second, AllowLocs);
3170 }
3171 }
3172
3173 // If this function is actually an intrinsic, verify that it is only used in
3174 // direct call/invokes, never having its "address taken".
3175 // Only do this if the module is materialized, otherwise we don't have all the
3176 // uses.
3177 if (F.isIntrinsic() && F.getParent()->isMaterialized()) {
3178 const User *U;
3179 if (F.hasAddressTaken(&U, false, true, false,
3180 /*IgnoreARCAttachedCall=*/true))
3181 Check(false, "Invalid user of intrinsic instruction!", U);
3182 }
3183
3184 // Check intrinsics' signatures.
3185 switch (F.getIntrinsicID()) {
3186 case Intrinsic::experimental_gc_get_pointer_base: {
3187 FunctionType *FT = F.getFunctionType();
3188 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
3189 Check(isa<PointerType>(F.getReturnType()),
3190 "gc.get.pointer.base must return a pointer", F);
3191 Check(FT->getParamType(0) == F.getReturnType(),
3192 "gc.get.pointer.base operand and result must be of the same type", F);
3193 break;
3194 }
3195 case Intrinsic::experimental_gc_get_pointer_offset: {
3196 FunctionType *FT = F.getFunctionType();
3197 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
3198 Check(isa<PointerType>(FT->getParamType(0)),
3199 "gc.get.pointer.offset operand must be a pointer", F);
3200 Check(F.getReturnType()->isIntegerTy(),
3201 "gc.get.pointer.offset must return integer", F);
3202 break;
3203 }
3204 }
3205
3206 auto *N = F.getSubprogram();
3207 HasDebugInfo = (N != nullptr);
3208 if (!HasDebugInfo)
3209 return;
3210
3211 // Check that all !dbg attachments lead to back to N.
3212 //
3213 // FIXME: Check this incrementally while visiting !dbg attachments.
3214 // FIXME: Only check when N is the canonical subprogram for F.
3215 SmallPtrSet<const MDNode *, 32> Seen;
3216 auto VisitDebugLoc = [&](const Instruction &I, const MDNode *Node) {
3217 // Be careful about using DILocation here since we might be dealing with
3218 // broken code (this is the Verifier after all).
3219 const DILocation *DL = dyn_cast_or_null<DILocation>(Node);
3220 if (!DL)
3221 return;
3222 if (!Seen.insert(DL).second)
3223 return;
3224
3225 Metadata *Parent = DL->getRawScope();
3226 CheckDI(Parent && isa<DILocalScope>(Parent),
3227 "DILocation's scope must be a DILocalScope", N, &F, &I, DL, Parent);
3228
3229 DILocalScope *Scope = DL->getInlinedAtScope();
3230 Check(Scope, "Failed to find DILocalScope", DL);
3231
3232 if (!Seen.insert(Scope).second)
3233 return;
3234
3235 DISubprogram *SP = Scope->getSubprogram();
3236
3237 // Scope and SP could be the same MDNode and we don't want to skip
3238 // validation in that case
3239 if ((Scope != SP) && !Seen.insert(SP).second)
3240 return;
3241
3242 CheckDI(SP->describes(&F),
3243 "!dbg attachment points at wrong subprogram for function", N, &F,
3244 &I, DL, Scope, SP);
3245 };
3246 for (auto &BB : F)
3247 for (auto &I : BB) {
3248 VisitDebugLoc(I, I.getDebugLoc().getAsMDNode());
3249 // The llvm.loop annotations also contain two DILocations.
3250 if (auto MD = I.getMetadata(LLVMContext::MD_loop))
3251 for (unsigned i = 1; i < MD->getNumOperands(); ++i)
3252 VisitDebugLoc(I, dyn_cast_or_null<MDNode>(MD->getOperand(i)));
3253 if (BrokenDebugInfo)
3254 return;
3255 }
3256}
3257
3258// verifyBasicBlock - Verify that a basic block is well formed...
3259//
3260void Verifier::visitBasicBlock(BasicBlock &BB) {
3261 InstsInThisBlock.clear();
3262 ConvergenceVerifyHelper.visit(BB);
3263
3264 // Ensure that basic blocks have terminators!
3265 Check(BB.getTerminator(), "Basic Block does not have terminator!", &BB);
3266
3267 // Check constraints that this basic block imposes on all of the PHI nodes in
3268 // it.
3269 if (isa<PHINode>(BB.front())) {
3270 SmallVector<BasicBlock *, 8> Preds(predecessors(&BB));
3272 llvm::sort(Preds);
3273 for (const PHINode &PN : BB.phis()) {
3274 Check(PN.getNumIncomingValues() == Preds.size(),
3275 "PHINode should have one entry for each predecessor of its "
3276 "parent basic block!",
3277 &PN);
3278
3279 // Get and sort all incoming values in the PHI node...
3280 Values.clear();
3281 Values.reserve(PN.getNumIncomingValues());
3282 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
3283 Values.push_back(
3284 std::make_pair(PN.getIncomingBlock(i), PN.getIncomingValue(i)));
3285 llvm::sort(Values);
3286
3287 for (unsigned i = 0, e = Values.size(); i != e; ++i) {
3288 // Check to make sure that if there is more than one entry for a
3289 // particular basic block in this PHI node, that the incoming values are
3290 // all identical.
3291 //
3292 Check(i == 0 || Values[i].first != Values[i - 1].first ||
3293 Values[i].second == Values[i - 1].second,
3294 "PHI node has multiple entries for the same basic block with "
3295 "different incoming values!",
3296 &PN, Values[i].first, Values[i].second, Values[i - 1].second);
3297
3298 // Check to make sure that the predecessors and PHI node entries are
3299 // matched up.
3300 Check(Values[i].first == Preds[i],
3301 "PHI node entries do not match predecessors!", &PN,
3302 Values[i].first, Preds[i]);
3303 }
3304 }
3305 }
3306
3307 // Check that all instructions have their parent pointers set up correctly.
3308 for (auto &I : BB)
3309 {
3310 Check(I.getParent() == &BB, "Instruction has bogus parent pointer!");
3311 }
3312
3313 // Confirm that no issues arise from the debug program.
3314 CheckDI(!BB.getTrailingDbgRecords(), "Basic Block has trailing DbgRecords!",
3315 &BB);
3316}
3317
3318void Verifier::visitTerminator(Instruction &I) {
3319 // Ensure that terminators only exist at the end of the basic block.
3320 Check(&I == I.getParent()->getTerminator(),
3321 "Terminator found in the middle of a basic block!", I.getParent());
3322 visitInstruction(I);
3323}
3324
3325void Verifier::visitBranchInst(BranchInst &BI) {
3326 if (BI.isConditional()) {
3328 "Branch condition is not 'i1' type!", &BI, BI.getCondition());
3329 }
3330 visitTerminator(BI);
3331}
3332
3333void Verifier::visitReturnInst(ReturnInst &RI) {
3334 Function *F = RI.getParent()->getParent();
3335 unsigned N = RI.getNumOperands();
3336 if (F->getReturnType()->isVoidTy())
3337 Check(N == 0,
3338 "Found return instr that returns non-void in Function of void "
3339 "return type!",
3340 &RI, F->getReturnType());
3341 else
3342 Check(N == 1 && F->getReturnType() == RI.getOperand(0)->getType(),
3343 "Function return type does not match operand "
3344 "type of return inst!",
3345 &RI, F->getReturnType());
3346
3347 // Check to make sure that the return value has necessary properties for
3348 // terminators...
3349 visitTerminator(RI);
3350}
3351
3352void Verifier::visitSwitchInst(SwitchInst &SI) {
3353 Check(SI.getType()->isVoidTy(), "Switch must have void result type!", &SI);
3354 // Check to make sure that all of the constants in the switch instruction
3355 // have the same type as the switched-on value.
3356 Type *SwitchTy = SI.getCondition()->getType();
3357 SmallPtrSet<ConstantInt*, 32> Constants;
3358 for (auto &Case : SI.cases()) {
3359 Check(isa<ConstantInt>(SI.getOperand(Case.getCaseIndex() * 2 + 2)),
3360 "Case value is not a constant integer.", &SI);
3361 Check(Case.getCaseValue()->getType() == SwitchTy,
3362 "Switch constants must all be same type as switch value!", &SI);
3363 Check(Constants.insert(Case.getCaseValue()).second,
3364 "Duplicate integer as switch case", &SI, Case.getCaseValue());
3365 }
3366
3367 visitTerminator(SI);
3368}
3369
3370void Verifier::visitIndirectBrInst(IndirectBrInst &BI) {
3372 "Indirectbr operand must have pointer type!", &BI);
3373 for (unsigned i = 0, e = BI.getNumDestinations(); i != e; ++i)
3375 "Indirectbr destinations must all have pointer type!", &BI);
3376
3377 visitTerminator(BI);
3378}
3379
3380void Verifier::visitCallBrInst(CallBrInst &CBI) {
3381 Check(CBI.isInlineAsm(), "Callbr is currently only used for asm-goto!", &CBI);
3382 const InlineAsm *IA = cast<InlineAsm>(CBI.getCalledOperand());
3383 Check(!IA->canThrow(), "Unwinding from Callbr is not allowed");
3384
3385 verifyInlineAsmCall(CBI);
3386 visitTerminator(CBI);
3387}
3388
3389void Verifier::visitSelectInst(SelectInst &SI) {
3390 Check(!SelectInst::areInvalidOperands(SI.getOperand(0), SI.getOperand(1),
3391 SI.getOperand(2)),
3392 "Invalid operands for select instruction!", &SI);
3393
3394 Check(SI.getTrueValue()->getType() == SI.getType(),
3395 "Select values must have same type as select instruction!", &SI);
3396 visitInstruction(SI);
3397}
3398
3399/// visitUserOp1 - User defined operators shouldn't live beyond the lifetime of
3400/// a pass, if any exist, it's an error.
3401///
3402void Verifier::visitUserOp1(Instruction &I) {
3403 Check(false, "User-defined operators should not live outside of a pass!", &I);
3404}
3405
3406void Verifier::visitTruncInst(TruncInst &I) {
3407 // Get the source and destination types
3408 Type *SrcTy = I.getOperand(0)->getType();
3409 Type *DestTy = I.getType();
3410
3411 // Get the size of the types in bits, we'll need this later
3412 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3413 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3414
3415 Check(SrcTy->isIntOrIntVectorTy(), "Trunc only operates on integer", &I);
3416 Check(DestTy->isIntOrIntVectorTy(), "Trunc only produces integer", &I);
3417 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3418 "trunc source and destination must both be a vector or neither", &I);
3419 Check(SrcBitSize > DestBitSize, "DestTy too big for Trunc", &I);
3420
3421 visitInstruction(I);
3422}
3423
3424void Verifier::visitZExtInst(ZExtInst &I) {
3425 // Get the source and destination types
3426 Type *SrcTy = I.getOperand(0)->getType();
3427 Type *DestTy = I.getType();
3428
3429 // Get the size of the types in bits, we'll need this later
3430 Check(SrcTy->isIntOrIntVectorTy(), "ZExt only operates on integer", &I);
3431 Check(DestTy->isIntOrIntVectorTy(), "ZExt only produces an integer", &I);
3432 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3433 "zext source and destination must both be a vector or neither", &I);
3434 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3435 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3436
3437 Check(SrcBitSize < DestBitSize, "Type too small for ZExt", &I);
3438
3439 visitInstruction(I);
3440}
3441
3442void Verifier::visitSExtInst(SExtInst &I) {
3443 // Get the source and destination types
3444 Type *SrcTy = I.getOperand(0)->getType();
3445 Type *DestTy = I.getType();
3446
3447 // Get the size of the types in bits, we'll need this later
3448 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3449 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3450
3451 Check(SrcTy->isIntOrIntVectorTy(), "SExt only operates on integer", &I);
3452 Check(DestTy->isIntOrIntVectorTy(), "SExt only produces an integer", &I);
3453 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3454 "sext source and destination must both be a vector or neither", &I);
3455 Check(SrcBitSize < DestBitSize, "Type too small for SExt", &I);
3456
3457 visitInstruction(I);
3458}
3459
3460void Verifier::visitFPTruncInst(FPTruncInst &I) {
3461 // Get the source and destination types
3462 Type *SrcTy = I.getOperand(0)->getType();
3463 Type *DestTy = I.getType();
3464 // Get the size of the types in bits, we'll need this later
3465 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3466 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3467
3468 Check(SrcTy->isFPOrFPVectorTy(), "FPTrunc only operates on FP", &I);
3469 Check(DestTy->isFPOrFPVectorTy(), "FPTrunc only produces an FP", &I);
3470 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3471 "fptrunc source and destination must both be a vector or neither", &I);
3472 Check(SrcBitSize > DestBitSize, "DestTy too big for FPTrunc", &I);
3473
3474 visitInstruction(I);
3475}
3476
3477void Verifier::visitFPExtInst(FPExtInst &I) {
3478 // Get the source and destination types
3479 Type *SrcTy = I.getOperand(0)->getType();
3480 Type *DestTy = I.getType();
3481
3482 // Get the size of the types in bits, we'll need this later
3483 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3484 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3485
3486 Check(SrcTy->isFPOrFPVectorTy(), "FPExt only operates on FP", &I);
3487 Check(DestTy->isFPOrFPVectorTy(), "FPExt only produces an FP", &I);
3488 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3489 "fpext source and destination must both be a vector or neither", &I);
3490 Check(SrcBitSize < DestBitSize, "DestTy too small for FPExt", &I);
3491
3492 visitInstruction(I);
3493}
3494
3495void Verifier::visitUIToFPInst(UIToFPInst &I) {
3496 // Get the source and destination types
3497 Type *SrcTy = I.getOperand(0)->getType();
3498 Type *DestTy = I.getType();
3499
3500 bool SrcVec = SrcTy->isVectorTy();
3501 bool DstVec = DestTy->isVectorTy();
3502
3503 Check(SrcVec == DstVec,
3504 "UIToFP source and dest must both be vector or scalar", &I);
3505 Check(SrcTy->isIntOrIntVectorTy(),
3506 "UIToFP source must be integer or integer vector", &I);
3507 Check(DestTy->isFPOrFPVectorTy(), "UIToFP result must be FP or FP vector",
3508 &I);
3509
3510 if (SrcVec && DstVec)
3511 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3512 cast<VectorType>(DestTy)->getElementCount(),
3513 "UIToFP source and dest vector length mismatch", &I);
3514
3515 visitInstruction(I);
3516}
3517
3518void Verifier::visitSIToFPInst(SIToFPInst &I) {
3519 // Get the source and destination types
3520 Type *SrcTy = I.getOperand(0)->getType();
3521 Type *DestTy = I.getType();
3522
3523 bool SrcVec = SrcTy->isVectorTy();
3524 bool DstVec = DestTy->isVectorTy();
3525
3526 Check(SrcVec == DstVec,
3527 "SIToFP source and dest must both be vector or scalar", &I);
3528 Check(SrcTy->isIntOrIntVectorTy(),
3529 "SIToFP source must be integer or integer vector", &I);
3530 Check(DestTy->isFPOrFPVectorTy(), "SIToFP result must be FP or FP vector",
3531 &I);
3532
3533 if (SrcVec && DstVec)
3534 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3535 cast<VectorType>(DestTy)->getElementCount(),
3536 "SIToFP source and dest vector length mismatch", &I);
3537
3538 visitInstruction(I);
3539}
3540
3541void Verifier::visitFPToUIInst(FPToUIInst &I) {
3542 // Get the source and destination types
3543 Type *SrcTy = I.getOperand(0)->getType();
3544 Type *DestTy = I.getType();
3545
3546 bool SrcVec = SrcTy->isVectorTy();
3547 bool DstVec = DestTy->isVectorTy();
3548
3549 Check(SrcVec == DstVec,
3550 "FPToUI source and dest must both be vector or scalar", &I);
3551 Check(SrcTy->isFPOrFPVectorTy(), "FPToUI source must be FP or FP vector", &I);
3552 Check(DestTy->isIntOrIntVectorTy(),
3553 "FPToUI result must be integer or integer vector", &I);
3554
3555 if (SrcVec && DstVec)
3556 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3557 cast<VectorType>(DestTy)->getElementCount(),
3558 "FPToUI source and dest vector length mismatch", &I);
3559
3560 visitInstruction(I);
3561}
3562
3563void Verifier::visitFPToSIInst(FPToSIInst &I) {
3564 // Get the source and destination types
3565 Type *SrcTy = I.getOperand(0)->getType();
3566 Type *DestTy = I.getType();
3567
3568 bool SrcVec = SrcTy->isVectorTy();
3569 bool DstVec = DestTy->isVectorTy();
3570
3571 Check(SrcVec == DstVec,
3572 "FPToSI source and dest must both be vector or scalar", &I);
3573 Check(SrcTy->isFPOrFPVectorTy(), "FPToSI source must be FP or FP vector", &I);
3574 Check(DestTy->isIntOrIntVectorTy(),
3575 "FPToSI result must be integer or integer vector", &I);
3576
3577 if (SrcVec && DstVec)
3578 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3579 cast<VectorType>(DestTy)->getElementCount(),
3580 "FPToSI source and dest vector length mismatch", &I);
3581
3582 visitInstruction(I);
3583}
3584
3585void Verifier::checkPtrToAddr(Type *SrcTy, Type *DestTy, const Value &V) {
3586 Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToAddr source must be pointer", V);
3587 Check(DestTy->isIntOrIntVectorTy(), "PtrToAddr result must be integral", V);
3588 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToAddr type mismatch",
3589 V);
3590
3591 if (SrcTy->isVectorTy()) {
3592 auto *VSrc = cast<VectorType>(SrcTy);
3593 auto *VDest = cast<VectorType>(DestTy);
3594 Check(VSrc->getElementCount() == VDest->getElementCount(),
3595 "PtrToAddr vector length mismatch", V);
3596 }
3597
3598 Type *AddrTy = DL.getAddressType(SrcTy);
3599 Check(AddrTy == DestTy, "PtrToAddr result must be address width", V);
3600}
3601
3602void Verifier::visitPtrToAddrInst(PtrToAddrInst &I) {
3603 checkPtrToAddr(I.getOperand(0)->getType(), I.getType(), I);
3604 visitInstruction(I);
3605}
3606
3607void Verifier::visitPtrToIntInst(PtrToIntInst &I) {
3608 // Get the source and destination types
3609 Type *SrcTy = I.getOperand(0)->getType();
3610 Type *DestTy = I.getType();
3611
3612 Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToInt source must be pointer", &I);
3613
3614 Check(DestTy->isIntOrIntVectorTy(), "PtrToInt result must be integral", &I);
3615 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToInt type mismatch",
3616 &I);
3617
3618 if (SrcTy->isVectorTy()) {
3619 auto *VSrc = cast<VectorType>(SrcTy);
3620 auto *VDest = cast<VectorType>(DestTy);
3621 Check(VSrc->getElementCount() == VDest->getElementCount(),
3622 "PtrToInt Vector length mismatch", &I);
3623 }
3624
3625 visitInstruction(I);
3626}
3627
3628void Verifier::visitIntToPtrInst(IntToPtrInst &I) {
3629 // Get the source and destination types
3630 Type *SrcTy = I.getOperand(0)->getType();
3631 Type *DestTy = I.getType();
3632
3633 Check(SrcTy->isIntOrIntVectorTy(), "IntToPtr source must be an integral", &I);
3634 Check(DestTy->isPtrOrPtrVectorTy(), "IntToPtr result must be a pointer", &I);
3635
3636 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "IntToPtr type mismatch",
3637 &I);
3638 if (SrcTy->isVectorTy()) {
3639 auto *VSrc = cast<VectorType>(SrcTy);
3640 auto *VDest = cast<VectorType>(DestTy);
3641 Check(VSrc->getElementCount() == VDest->getElementCount(),
3642 "IntToPtr Vector length mismatch", &I);
3643 }
3644 visitInstruction(I);
3645}
3646
3647void Verifier::visitBitCastInst(BitCastInst &I) {
3648 Check(
3649 CastInst::castIsValid(Instruction::BitCast, I.getOperand(0), I.getType()),
3650 "Invalid bitcast", &I);
3651 visitInstruction(I);
3652}
3653
3654void Verifier::visitAddrSpaceCastInst(AddrSpaceCastInst &I) {
3655 Type *SrcTy = I.getOperand(0)->getType();
3656 Type *DestTy = I.getType();
3657
3658 Check(SrcTy->isPtrOrPtrVectorTy(), "AddrSpaceCast source must be a pointer",
3659 &I);
3660 Check(DestTy->isPtrOrPtrVectorTy(), "AddrSpaceCast result must be a pointer",
3661 &I);
3663 "AddrSpaceCast must be between different address spaces", &I);
3664 if (auto *SrcVTy = dyn_cast<VectorType>(SrcTy))
3665 Check(SrcVTy->getElementCount() ==
3666 cast<VectorType>(DestTy)->getElementCount(),
3667 "AddrSpaceCast vector pointer number of elements mismatch", &I);
3668 visitInstruction(I);
3669}
3670
3671/// visitPHINode - Ensure that a PHI node is well formed.
3672///
3673void Verifier::visitPHINode(PHINode &PN) {
3674 // Ensure that the PHI nodes are all grouped together at the top of the block.
3675 // This can be tested by checking whether the instruction before this is
3676 // either nonexistent (because this is begin()) or is a PHI node. If not,
3677 // then there is some other instruction before a PHI.
3678 Check(&PN == &PN.getParent()->front() ||
3680 "PHI nodes not grouped at top of basic block!", &PN, PN.getParent());
3681
3682 // Check that a PHI doesn't yield a Token.
3683 Check(!PN.getType()->isTokenLikeTy(), "PHI nodes cannot have token type!");
3684
3685 // Check that all of the values of the PHI node have the same type as the
3686 // result.
3687 for (Value *IncValue : PN.incoming_values()) {
3688 Check(PN.getType() == IncValue->getType(),
3689 "PHI node operands are not the same type as the result!", &PN);
3690 }
3691
3692 // All other PHI node constraints are checked in the visitBasicBlock method.
3693
3694 visitInstruction(PN);
3695}
3696
3697void Verifier::visitCallBase(CallBase &Call) {
3699 "Called function must be a pointer!", Call);
3700 FunctionType *FTy = Call.getFunctionType();
3701
3702 // Verify that the correct number of arguments are being passed
3703 if (FTy->isVarArg())
3704 Check(Call.arg_size() >= FTy->getNumParams(),
3705 "Called function requires more parameters than were provided!", Call);
3706 else
3707 Check(Call.arg_size() == FTy->getNumParams(),
3708 "Incorrect number of arguments passed to called function!", Call);
3709
3710 // Verify that all arguments to the call match the function type.
3711 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
3712 Check(Call.getArgOperand(i)->getType() == FTy->getParamType(i),
3713 "Call parameter type does not match function signature!",
3714 Call.getArgOperand(i), FTy->getParamType(i), Call);
3715
3716 AttributeList Attrs = Call.getAttributes();
3717
3718 Check(verifyAttributeCount(Attrs, Call.arg_size()),
3719 "Attribute after last parameter!", Call);
3720
3721 Function *Callee =
3723 bool IsIntrinsic = Callee && Callee->isIntrinsic();
3724 if (IsIntrinsic)
3725 Check(Callee->getValueType() == FTy,
3726 "Intrinsic called with incompatible signature", Call);
3727
3728 // Verify if the calling convention of the callee is callable.
3730 "calling convention does not permit calls", Call);
3731
3732 // Disallow passing/returning values with alignment higher than we can
3733 // represent.
3734 // FIXME: Consider making DataLayout cap the alignment, so this isn't
3735 // necessary.
3736 auto VerifyTypeAlign = [&](Type *Ty, const Twine &Message) {
3737 if (!Ty->isSized())
3738 return;
3739 Align ABIAlign = DL.getABITypeAlign(Ty);
3740 Check(ABIAlign.value() <= Value::MaximumAlignment,
3741 "Incorrect alignment of " + Message + " to called function!", Call);
3742 };
3743
3744 if (!IsIntrinsic) {
3745 VerifyTypeAlign(FTy->getReturnType(), "return type");
3746 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3747 Type *Ty = FTy->getParamType(i);
3748 VerifyTypeAlign(Ty, "argument passed");
3749 }
3750 }
3751
3752 if (Attrs.hasFnAttr(Attribute::Speculatable)) {
3753 // Don't allow speculatable on call sites, unless the underlying function
3754 // declaration is also speculatable.
3755 Check(Callee && Callee->isSpeculatable(),
3756 "speculatable attribute may not apply to call sites", Call);
3757 }
3758
3759 if (Attrs.hasFnAttr(Attribute::Preallocated)) {
3760 Check(Call.getIntrinsicID() == Intrinsic::call_preallocated_arg,
3761 "preallocated as a call site attribute can only be on "
3762 "llvm.call.preallocated.arg");
3763 }
3764
3765 // Verify call attributes.
3766 verifyFunctionAttrs(FTy, Attrs, &Call, IsIntrinsic, Call.isInlineAsm());
3767
3768 // Conservatively check the inalloca argument.
3769 // We have a bug if we can find that there is an underlying alloca without
3770 // inalloca.
3771 if (Call.hasInAllocaArgument()) {
3772 Value *InAllocaArg = Call.getArgOperand(FTy->getNumParams() - 1);
3773 if (auto AI = dyn_cast<AllocaInst>(InAllocaArg->stripInBoundsOffsets()))
3774 Check(AI->isUsedWithInAlloca(),
3775 "inalloca argument for call has mismatched alloca", AI, Call);
3776 }
3777
3778 // For each argument of the callsite, if it has the swifterror argument,
3779 // make sure the underlying alloca/parameter it comes from has a swifterror as
3780 // well.
3781 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3782 if (Call.paramHasAttr(i, Attribute::SwiftError)) {
3783 Value *SwiftErrorArg = Call.getArgOperand(i);
3784 if (auto AI = dyn_cast<AllocaInst>(SwiftErrorArg->stripInBoundsOffsets())) {
3785 Check(AI->isSwiftError(),
3786 "swifterror argument for call has mismatched alloca", AI, Call);
3787 continue;
3788 }
3789 auto ArgI = dyn_cast<Argument>(SwiftErrorArg);
3790 Check(ArgI, "swifterror argument should come from an alloca or parameter",
3791 SwiftErrorArg, Call);
3792 Check(ArgI->hasSwiftErrorAttr(),
3793 "swifterror argument for call has mismatched parameter", ArgI,
3794 Call);
3795 }
3796
3797 if (Attrs.hasParamAttr(i, Attribute::ImmArg)) {
3798 // Don't allow immarg on call sites, unless the underlying declaration
3799 // also has the matching immarg.
3800 Check(Callee && Callee->hasParamAttribute(i, Attribute::ImmArg),
3801 "immarg may not apply only to call sites", Call.getArgOperand(i),
3802 Call);
3803 }
3804
3805 if (Call.paramHasAttr(i, Attribute::ImmArg)) {
3806 Value *ArgVal = Call.getArgOperand(i);
3807 Check(isa<ConstantInt>(ArgVal) || isa<ConstantFP>(ArgVal),
3808 "immarg operand has non-immediate parameter", ArgVal, Call);
3809
3810 // If the imm-arg is an integer and also has a range attached,
3811 // check if the given value is within the range.
3812 if (Call.paramHasAttr(i, Attribute::Range)) {
3813 if (auto *CI = dyn_cast<ConstantInt>(ArgVal)) {
3814 const ConstantRange &CR =
3815 Call.getParamAttr(i, Attribute::Range).getValueAsConstantRange();
3816 Check(CR.contains(CI->getValue()),
3817 "immarg value " + Twine(CI->getValue().getSExtValue()) +
3818 " out of range [" + Twine(CR.getLower().getSExtValue()) +
3819 ", " + Twine(CR.getUpper().getSExtValue()) + ")",
3820 Call);
3821 }
3822 }
3823 }
3824
3825 if (Call.paramHasAttr(i, Attribute::Preallocated)) {
3826 Value *ArgVal = Call.getArgOperand(i);
3827 bool hasOB =
3829 bool isMustTail = Call.isMustTailCall();
3830 Check(hasOB != isMustTail,
3831 "preallocated operand either requires a preallocated bundle or "
3832 "the call to be musttail (but not both)",
3833 ArgVal, Call);
3834 }
3835 }
3836
3837 if (FTy->isVarArg()) {
3838 // FIXME? is 'nest' even legal here?
3839 bool SawNest = false;
3840 bool SawReturned = false;
3841
3842 for (unsigned Idx = 0; Idx < FTy->getNumParams(); ++Idx) {
3843 if (Attrs.hasParamAttr(Idx, Attribute::Nest))
3844 SawNest = true;
3845 if (Attrs.hasParamAttr(Idx, Attribute::Returned))
3846 SawReturned = true;
3847 }
3848
3849 // Check attributes on the varargs part.
3850 for (unsigned Idx = FTy->getNumParams(); Idx < Call.arg_size(); ++Idx) {
3851 Type *Ty = Call.getArgOperand(Idx)->getType();
3852 AttributeSet ArgAttrs = Attrs.getParamAttrs(Idx);
3853 verifyParameterAttrs(ArgAttrs, Ty, &Call);
3854
3855 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
3856 Check(!SawNest, "More than one parameter has attribute nest!", Call);
3857 SawNest = true;
3858 }
3859
3860 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
3861 Check(!SawReturned, "More than one parameter has attribute returned!",
3862 Call);
3863 Check(Ty->canLosslesslyBitCastTo(FTy->getReturnType()),
3864 "Incompatible argument and return types for 'returned' "
3865 "attribute",
3866 Call);
3867 SawReturned = true;
3868 }
3869
3870 // Statepoint intrinsic is vararg but the wrapped function may be not.
3871 // Allow sret here and check the wrapped function in verifyStatepoint.
3872 if (Call.getIntrinsicID() != Intrinsic::experimental_gc_statepoint)
3873 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
3874 "Attribute 'sret' cannot be used for vararg call arguments!",
3875 Call);
3876
3877 if (ArgAttrs.hasAttribute(Attribute::InAlloca))
3878 Check(Idx == Call.arg_size() - 1,
3879 "inalloca isn't on the last argument!", Call);
3880 }
3881 }
3882
3883 // Verify that there's no metadata unless it's a direct call to an intrinsic.
3884 if (!IsIntrinsic) {
3885 for (Type *ParamTy : FTy->params()) {
3886 Check(!ParamTy->isMetadataTy(),
3887 "Function has metadata parameter but isn't an intrinsic", Call);
3888 Check(!ParamTy->isTokenLikeTy(),
3889 "Function has token parameter but isn't an intrinsic", Call);
3890 }
3891 }
3892
3893 // Verify that indirect calls don't return tokens.
3894 if (!Call.getCalledFunction()) {
3895 Check(!FTy->getReturnType()->isTokenLikeTy(),
3896 "Return type cannot be token for indirect call!");
3897 Check(!FTy->getReturnType()->isX86_AMXTy(),
3898 "Return type cannot be x86_amx for indirect call!");
3899 }
3900
3902 visitIntrinsicCall(ID, Call);
3903
3904 // Verify that a callsite has at most one "deopt", at most one "funclet", at
3905 // most one "gc-transition", at most one "cfguardtarget", at most one
3906 // "preallocated" operand bundle, and at most one "ptrauth" operand bundle.
3907 bool FoundDeoptBundle = false, FoundFuncletBundle = false,
3908 FoundGCTransitionBundle = false, FoundCFGuardTargetBundle = false,
3909 FoundPreallocatedBundle = false, FoundGCLiveBundle = false,
3910 FoundPtrauthBundle = false, FoundKCFIBundle = false,
3911 FoundAttachedCallBundle = false;
3912 for (unsigned i = 0, e = Call.getNumOperandBundles(); i < e; ++i) {
3913 OperandBundleUse BU = Call.getOperandBundleAt(i);
3914 uint32_t Tag = BU.getTagID();
3915 if (Tag == LLVMContext::OB_deopt) {
3916 Check(!FoundDeoptBundle, "Multiple deopt operand bundles", Call);
3917 FoundDeoptBundle = true;
3918 } else if (Tag == LLVMContext::OB_gc_transition) {
3919 Check(!FoundGCTransitionBundle, "Multiple gc-transition operand bundles",
3920 Call);
3921 FoundGCTransitionBundle = true;
3922 } else if (Tag == LLVMContext::OB_funclet) {
3923 Check(!FoundFuncletBundle, "Multiple funclet operand bundles", Call);
3924 FoundFuncletBundle = true;
3925 Check(BU.Inputs.size() == 1,
3926 "Expected exactly one funclet bundle operand", Call);
3927 Check(isa<FuncletPadInst>(BU.Inputs.front()),
3928 "Funclet bundle operands should correspond to a FuncletPadInst",
3929 Call);
3930 } else if (Tag == LLVMContext::OB_cfguardtarget) {
3931 Check(!FoundCFGuardTargetBundle, "Multiple CFGuardTarget operand bundles",
3932 Call);
3933 FoundCFGuardTargetBundle = true;
3934 Check(BU.Inputs.size() == 1,
3935 "Expected exactly one cfguardtarget bundle operand", Call);
3936 } else if (Tag == LLVMContext::OB_ptrauth) {
3937 Check(!FoundPtrauthBundle, "Multiple ptrauth operand bundles", Call);
3938 FoundPtrauthBundle = true;
3939 Check(BU.Inputs.size() == 2,
3940 "Expected exactly two ptrauth bundle operands", Call);
3941 Check(isa<ConstantInt>(BU.Inputs[0]) &&
3942 BU.Inputs[0]->getType()->isIntegerTy(32),
3943 "Ptrauth bundle key operand must be an i32 constant", Call);
3944 Check(BU.Inputs[1]->getType()->isIntegerTy(64),
3945 "Ptrauth bundle discriminator operand must be an i64", Call);
3946 } else if (Tag == LLVMContext::OB_kcfi) {
3947 Check(!FoundKCFIBundle, "Multiple kcfi operand bundles", Call);
3948 FoundKCFIBundle = true;
3949 Check(BU.Inputs.size() == 1, "Expected exactly one kcfi bundle operand",
3950 Call);
3951 Check(isa<ConstantInt>(BU.Inputs[0]) &&
3952 BU.Inputs[0]->getType()->isIntegerTy(32),
3953 "Kcfi bundle operand must be an i32 constant", Call);
3954 } else if (Tag == LLVMContext::OB_preallocated) {
3955 Check(!FoundPreallocatedBundle, "Multiple preallocated operand bundles",
3956 Call);
3957 FoundPreallocatedBundle = true;
3958 Check(BU.Inputs.size() == 1,
3959 "Expected exactly one preallocated bundle operand", Call);
3960 auto Input = dyn_cast<IntrinsicInst>(BU.Inputs.front());
3961 Check(Input &&
3962 Input->getIntrinsicID() == Intrinsic::call_preallocated_setup,
3963 "\"preallocated\" argument must be a token from "
3964 "llvm.call.preallocated.setup",
3965 Call);
3966 } else if (Tag == LLVMContext::OB_gc_live) {
3967 Check(!FoundGCLiveBundle, "Multiple gc-live operand bundles", Call);
3968 FoundGCLiveBundle = true;
3970 Check(!FoundAttachedCallBundle,
3971 "Multiple \"clang.arc.attachedcall\" operand bundles", Call);
3972 FoundAttachedCallBundle = true;
3973 verifyAttachedCallBundle(Call, BU);
3974 }
3975 }
3976
3977 // Verify that callee and callsite agree on whether to use pointer auth.
3978 Check(!(Call.getCalledFunction() && FoundPtrauthBundle),
3979 "Direct call cannot have a ptrauth bundle", Call);
3980
3981 // Verify that each inlinable callsite of a debug-info-bearing function in a
3982 // debug-info-bearing function has a debug location attached to it. Failure to
3983 // do so causes assertion failures when the inliner sets up inline scope info
3984 // (Interposable functions are not inlinable, neither are functions without
3985 // definitions.)
3991 "inlinable function call in a function with "
3992 "debug info must have a !dbg location",
3993 Call);
3994
3995 if (Call.isInlineAsm())
3996 verifyInlineAsmCall(Call);
3997
3998 ConvergenceVerifyHelper.visit(Call);
3999
4000 visitInstruction(Call);
4001}
4002
4003void Verifier::verifyTailCCMustTailAttrs(const AttrBuilder &Attrs,
4004 StringRef Context) {
4005 Check(!Attrs.contains(Attribute::InAlloca),
4006 Twine("inalloca attribute not allowed in ") + Context);
4007 Check(!Attrs.contains(Attribute::InReg),
4008 Twine("inreg attribute not allowed in ") + Context);
4009 Check(!Attrs.contains(Attribute::SwiftError),
4010 Twine("swifterror attribute not allowed in ") + Context);
4011 Check(!Attrs.contains(Attribute::Preallocated),
4012 Twine("preallocated attribute not allowed in ") + Context);
4013 Check(!Attrs.contains(Attribute::ByRef),
4014 Twine("byref attribute not allowed in ") + Context);
4015}
4016
4017/// Two types are "congruent" if they are identical, or if they are both pointer
4018/// types with different pointee types and the same address space.
4019static bool isTypeCongruent(Type *L, Type *R) {
4020 if (L == R)
4021 return true;
4024 if (!PL || !PR)
4025 return false;
4026 return PL->getAddressSpace() == PR->getAddressSpace();
4027}
4028
4029static AttrBuilder getParameterABIAttributes(LLVMContext& C, unsigned I, AttributeList Attrs) {
4030 static const Attribute::AttrKind ABIAttrs[] = {
4031 Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca,
4032 Attribute::InReg, Attribute::StackAlignment, Attribute::SwiftSelf,
4033 Attribute::SwiftAsync, Attribute::SwiftError, Attribute::Preallocated,
4034 Attribute::ByRef};
4035 AttrBuilder Copy(C);
4036 for (auto AK : ABIAttrs) {
4037 Attribute Attr = Attrs.getParamAttrs(I).getAttribute(AK);
4038 if (Attr.isValid())
4039 Copy.addAttribute(Attr);
4040 }
4041
4042 // `align` is ABI-affecting only in combination with `byval` or `byref`.
4043 if (Attrs.hasParamAttr(I, Attribute::Alignment) &&
4044 (Attrs.hasParamAttr(I, Attribute::ByVal) ||
4045 Attrs.hasParamAttr(I, Attribute::ByRef)))
4046 Copy.addAlignmentAttr(Attrs.getParamAlignment(I));
4047 return Copy;
4048}
4049
4050void Verifier::verifyMustTailCall(CallInst &CI) {
4051 Check(!CI.isInlineAsm(), "cannot use musttail call with inline asm", &CI);
4052
4053 Function *F = CI.getParent()->getParent();
4054 FunctionType *CallerTy = F->getFunctionType();
4055 FunctionType *CalleeTy = CI.getFunctionType();
4056 Check(CallerTy->isVarArg() == CalleeTy->isVarArg(),
4057 "cannot guarantee tail call due to mismatched varargs", &CI);
4058 Check(isTypeCongruent(CallerTy->getReturnType(), CalleeTy->getReturnType()),
4059 "cannot guarantee tail call due to mismatched return types", &CI);
4060
4061 // - The calling conventions of the caller and callee must match.
4062 Check(F->getCallingConv() == CI.getCallingConv(),
4063 "cannot guarantee tail call due to mismatched calling conv", &CI);
4064
4065 // - The call must immediately precede a :ref:`ret <i_ret>` instruction,
4066 // or a pointer bitcast followed by a ret instruction.
4067 // - The ret instruction must return the (possibly bitcasted) value
4068 // produced by the call or void.
4069 Value *RetVal = &CI;
4071
4072 // Handle the optional bitcast.
4073 if (BitCastInst *BI = dyn_cast_or_null<BitCastInst>(Next)) {
4074 Check(BI->getOperand(0) == RetVal,
4075 "bitcast following musttail call must use the call", BI);
4076 RetVal = BI;
4077 Next = BI->getNextNode();
4078 }
4079
4080 // Check the return.
4081 ReturnInst *Ret = dyn_cast_or_null<ReturnInst>(Next);
4082 Check(Ret, "musttail call must precede a ret with an optional bitcast", &CI);
4083 Check(!Ret->getReturnValue() || Ret->getReturnValue() == RetVal ||
4084 isa<UndefValue>(Ret->getReturnValue()),
4085 "musttail call result must be returned", Ret);
4086
4087 AttributeList CallerAttrs = F->getAttributes();
4088 AttributeList CalleeAttrs = CI.getAttributes();
4089 if (CI.getCallingConv() == CallingConv::SwiftTail ||
4090 CI.getCallingConv() == CallingConv::Tail) {
4091 StringRef CCName =
4092 CI.getCallingConv() == CallingConv::Tail ? "tailcc" : "swifttailcc";
4093
4094 // - Only sret, byval, swiftself, and swiftasync ABI-impacting attributes
4095 // are allowed in swifttailcc call
4096 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4097 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
4098 SmallString<32> Context{CCName, StringRef(" musttail caller")};
4099 verifyTailCCMustTailAttrs(ABIAttrs, Context);
4100 }
4101 for (unsigned I = 0, E = CalleeTy->getNumParams(); I != E; ++I) {
4102 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
4103 SmallString<32> Context{CCName, StringRef(" musttail callee")};
4104 verifyTailCCMustTailAttrs(ABIAttrs, Context);
4105 }
4106 // - Varargs functions are not allowed
4107 Check(!CallerTy->isVarArg(), Twine("cannot guarantee ") + CCName +
4108 " tail call for varargs function");
4109 return;
4110 }
4111
4112 // - The caller and callee prototypes must match. Pointer types of
4113 // parameters or return types may differ in pointee type, but not
4114 // address space.
4115 if (!CI.getIntrinsicID()) {
4116 Check(CallerTy->getNumParams() == CalleeTy->getNumParams(),
4117 "cannot guarantee tail call due to mismatched parameter counts", &CI);
4118 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4119 Check(
4120 isTypeCongruent(CallerTy->getParamType(I), CalleeTy->getParamType(I)),
4121 "cannot guarantee tail call due to mismatched parameter types", &CI);
4122 }
4123 }
4124
4125 // - All ABI-impacting function attributes, such as sret, byval, inreg,
4126 // returned, preallocated, and inalloca, must match.
4127 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4128 AttrBuilder CallerABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
4129 AttrBuilder CalleeABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
4130 Check(CallerABIAttrs == CalleeABIAttrs,
4131 "cannot guarantee tail call due to mismatched ABI impacting "
4132 "function attributes",
4133 &CI, CI.getOperand(I));
4134 }
4135}
4136
4137void Verifier::visitCallInst(CallInst &CI) {
4138 visitCallBase(CI);
4139
4140 if (CI.isMustTailCall())
4141 verifyMustTailCall(CI);
4142}
4143
4144void Verifier::visitInvokeInst(InvokeInst &II) {
4145 visitCallBase(II);
4146
4147 // Verify that the first non-PHI instruction of the unwind destination is an
4148 // exception handling instruction.
4149 Check(
4150 II.getUnwindDest()->isEHPad(),
4151 "The unwind destination does not have an exception handling instruction!",
4152 &II);
4153
4154 visitTerminator(II);
4155}
4156
4157/// visitUnaryOperator - Check the argument to the unary operator.
4158///
4159void Verifier::visitUnaryOperator(UnaryOperator &U) {
4160 Check(U.getType() == U.getOperand(0)->getType(),
4161 "Unary operators must have same type for"
4162 "operands and result!",
4163 &U);
4164
4165 switch (U.getOpcode()) {
4166 // Check that floating-point arithmetic operators are only used with
4167 // floating-point operands.
4168 case Instruction::FNeg:
4169 Check(U.getType()->isFPOrFPVectorTy(),
4170 "FNeg operator only works with float types!", &U);
4171 break;
4172 default:
4173 llvm_unreachable("Unknown UnaryOperator opcode!");
4174 }
4175
4176 visitInstruction(U);
4177}
4178
4179/// visitBinaryOperator - Check that both arguments to the binary operator are
4180/// of the same type!
4181///
4182void Verifier::visitBinaryOperator(BinaryOperator &B) {
4183 Check(B.getOperand(0)->getType() == B.getOperand(1)->getType(),
4184 "Both operands to a binary operator are not of the same type!", &B);
4185
4186 switch (B.getOpcode()) {
4187 // Check that integer arithmetic operators are only used with
4188 // integral operands.
4189 case Instruction::Add:
4190 case Instruction::Sub:
4191 case Instruction::Mul:
4192 case Instruction::SDiv:
4193 case Instruction::UDiv:
4194 case Instruction::SRem:
4195 case Instruction::URem:
4196 Check(B.getType()->isIntOrIntVectorTy(),
4197 "Integer arithmetic operators only work with integral types!", &B);
4198 Check(B.getType() == B.getOperand(0)->getType(),
4199 "Integer arithmetic operators must have same type "
4200 "for operands and result!",
4201 &B);
4202 break;
4203 // Check that floating-point arithmetic operators are only used with
4204 // floating-point operands.
4205 case Instruction::FAdd:
4206 case Instruction::FSub:
4207 case Instruction::FMul:
4208 case Instruction::FDiv:
4209 case Instruction::FRem:
4210 Check(B.getType()->isFPOrFPVectorTy(),
4211 "Floating-point arithmetic operators only work with "
4212 "floating-point types!",
4213 &B);
4214 Check(B.getType() == B.getOperand(0)->getType(),
4215 "Floating-point arithmetic operators must have same type "
4216 "for operands and result!",
4217 &B);
4218 break;
4219 // Check that logical operators are only used with integral operands.
4220 case Instruction::And:
4221 case Instruction::Or:
4222 case Instruction::Xor:
4223 Check(B.getType()->isIntOrIntVectorTy(),
4224 "Logical operators only work with integral types!", &B);
4225 Check(B.getType() == B.getOperand(0)->getType(),
4226 "Logical operators must have same type for operands and result!", &B);
4227 break;
4228 case Instruction::Shl:
4229 case Instruction::LShr:
4230 case Instruction::AShr:
4231 Check(B.getType()->isIntOrIntVectorTy(),
4232 "Shifts only work with integral types!", &B);
4233 Check(B.getType() == B.getOperand(0)->getType(),
4234 "Shift return type must be same as operands!", &B);
4235 break;
4236 default:
4237 llvm_unreachable("Unknown BinaryOperator opcode!");
4238 }
4239
4240 visitInstruction(B);
4241}
4242
4243void Verifier::visitICmpInst(ICmpInst &IC) {
4244 // Check that the operands are the same type
4245 Type *Op0Ty = IC.getOperand(0)->getType();
4246 Type *Op1Ty = IC.getOperand(1)->getType();
4247 Check(Op0Ty == Op1Ty,
4248 "Both operands to ICmp instruction are not of the same type!", &IC);
4249 // Check that the operands are the right type
4250 Check(Op0Ty->isIntOrIntVectorTy() || Op0Ty->isPtrOrPtrVectorTy(),
4251 "Invalid operand types for ICmp instruction", &IC);
4252 // Check that the predicate is valid.
4253 Check(IC.isIntPredicate(), "Invalid predicate in ICmp instruction!", &IC);
4254
4255 visitInstruction(IC);
4256}
4257
4258void Verifier::visitFCmpInst(FCmpInst &FC) {
4259 // Check that the operands are the same type
4260 Type *Op0Ty = FC.getOperand(0)->getType();
4261 Type *Op1Ty = FC.getOperand(1)->getType();
4262 Check(Op0Ty == Op1Ty,
4263 "Both operands to FCmp instruction are not of the same type!", &FC);
4264 // Check that the operands are the right type
4265 Check(Op0Ty->isFPOrFPVectorTy(), "Invalid operand types for FCmp instruction",
4266 &FC);
4267 // Check that the predicate is valid.
4268 Check(FC.isFPPredicate(), "Invalid predicate in FCmp instruction!", &FC);
4269
4270 visitInstruction(FC);
4271}
4272
4273void Verifier::visitExtractElementInst(ExtractElementInst &EI) {
4275 "Invalid extractelement operands!", &EI);
4276 visitInstruction(EI);
4277}
4278
4279void Verifier::visitInsertElementInst(InsertElementInst &IE) {
4280 Check(InsertElementInst::isValidOperands(IE.getOperand(0), IE.getOperand(1),
4281 IE.getOperand(2)),
4282 "Invalid insertelement operands!", &IE);
4283 visitInstruction(IE);
4284}
4285
4286void Verifier::visitShuffleVectorInst(ShuffleVectorInst &SV) {
4288 SV.getShuffleMask()),
4289 "Invalid shufflevector operands!", &SV);
4290 visitInstruction(SV);
4291}
4292
4293void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
4294 Type *TargetTy = GEP.getPointerOperandType()->getScalarType();
4295
4296 Check(isa<PointerType>(TargetTy),
4297 "GEP base pointer is not a vector or a vector of pointers", &GEP);
4298 Check(GEP.getSourceElementType()->isSized(), "GEP into unsized type!", &GEP);
4299
4300 if (auto *STy = dyn_cast<StructType>(GEP.getSourceElementType())) {
4301 Check(!STy->isScalableTy(),
4302 "getelementptr cannot target structure that contains scalable vector"
4303 "type",
4304 &GEP);
4305 }
4306
4307 SmallVector<Value *, 16> Idxs(GEP.indices());
4308 Check(
4309 all_of(Idxs, [](Value *V) { return V->getType()->isIntOrIntVectorTy(); }),
4310 "GEP indexes must be integers", &GEP);
4311 Type *ElTy =
4312 GetElementPtrInst::getIndexedType(GEP.getSourceElementType(), Idxs);
4313 Check(ElTy, "Invalid indices for GEP pointer type!", &GEP);
4314
4315 PointerType *PtrTy = dyn_cast<PointerType>(GEP.getType()->getScalarType());
4316
4317 Check(PtrTy && GEP.getResultElementType() == ElTy,
4318 "GEP is not of right type for indices!", &GEP, ElTy);
4319
4320 if (auto *GEPVTy = dyn_cast<VectorType>(GEP.getType())) {
4321 // Additional checks for vector GEPs.
4322 ElementCount GEPWidth = GEPVTy->getElementCount();
4323 if (GEP.getPointerOperandType()->isVectorTy())
4324 Check(
4325 GEPWidth ==
4326 cast<VectorType>(GEP.getPointerOperandType())->getElementCount(),
4327 "Vector GEP result width doesn't match operand's", &GEP);
4328 for (Value *Idx : Idxs) {
4329 Type *IndexTy = Idx->getType();
4330 if (auto *IndexVTy = dyn_cast<VectorType>(IndexTy)) {
4331 ElementCount IndexWidth = IndexVTy->getElementCount();
4332 Check(IndexWidth == GEPWidth, "Invalid GEP index vector width", &GEP);
4333 }
4334 Check(IndexTy->isIntOrIntVectorTy(),
4335 "All GEP indices should be of integer type");
4336 }
4337 }
4338
4339 Check(GEP.getAddressSpace() == PtrTy->getAddressSpace(),
4340 "GEP address space doesn't match type", &GEP);
4341
4342 visitInstruction(GEP);
4343}
4344
4345static bool isContiguous(const ConstantRange &A, const ConstantRange &B) {
4346 return A.getUpper() == B.getLower() || A.getLower() == B.getUpper();
4347}
4348
4349/// Verify !range and !absolute_symbol metadata. These have the same
4350/// restrictions, except !absolute_symbol allows the full set.
4351void Verifier::verifyRangeLikeMetadata(const Value &I, const MDNode *Range,
4352 Type *Ty, RangeLikeMetadataKind Kind) {
4353 unsigned NumOperands = Range->getNumOperands();
4354 Check(NumOperands % 2 == 0, "Unfinished range!", Range);
4355 unsigned NumRanges = NumOperands / 2;
4356 Check(NumRanges >= 1, "It should have at least one range!", Range);
4357
4358 ConstantRange LastRange(1, true); // Dummy initial value
4359 for (unsigned i = 0; i < NumRanges; ++i) {
4360 ConstantInt *Low =
4361 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i));
4362 Check(Low, "The lower limit must be an integer!", Low);
4363 ConstantInt *High =
4364 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i + 1));
4365 Check(High, "The upper limit must be an integer!", High);
4366
4367 Check(High->getType() == Low->getType(), "Range pair types must match!",
4368 &I);
4369
4370 if (Kind == RangeLikeMetadataKind::NoaliasAddrspace) {
4371 Check(High->getType()->isIntegerTy(32),
4372 "noalias.addrspace type must be i32!", &I);
4373 } else {
4374 Check(High->getType() == Ty->getScalarType(),
4375 "Range types must match instruction type!", &I);
4376 }
4377
4378 APInt HighV = High->getValue();
4379 APInt LowV = Low->getValue();
4380
4381 // ConstantRange asserts if the ranges are the same except for the min/max
4382 // value. Leave the cases it tolerates for the empty range error below.
4383 Check(LowV != HighV || LowV.isMaxValue() || LowV.isMinValue(),
4384 "The upper and lower limits cannot be the same value", &I);
4385
4386 ConstantRange CurRange(LowV, HighV);
4387 Check(!CurRange.isEmptySet() &&
4388 (Kind == RangeLikeMetadataKind::AbsoluteSymbol ||
4389 !CurRange.isFullSet()),
4390 "Range must not be empty!", Range);
4391 if (i != 0) {
4392 Check(CurRange.intersectWith(LastRange).isEmptySet(),
4393 "Intervals are overlapping", Range);
4394 Check(LowV.sgt(LastRange.getLower()), "Intervals are not in order",
4395 Range);
4396 Check(!isContiguous(CurRange, LastRange), "Intervals are contiguous",
4397 Range);
4398 }
4399 LastRange = ConstantRange(LowV, HighV);
4400 }
4401 if (NumRanges > 2) {
4402 APInt FirstLow =
4403 mdconst::dyn_extract<ConstantInt>(Range->getOperand(0))->getValue();
4404 APInt FirstHigh =
4405 mdconst::dyn_extract<ConstantInt>(Range->getOperand(1))->getValue();
4406 ConstantRange FirstRange(FirstLow, FirstHigh);
4407 Check(FirstRange.intersectWith(LastRange).isEmptySet(),
4408 "Intervals are overlapping", Range);
4409 Check(!isContiguous(FirstRange, LastRange), "Intervals are contiguous",
4410 Range);
4411 }
4412}
4413
4414void Verifier::visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty) {
4415 assert(Range && Range == I.getMetadata(LLVMContext::MD_range) &&
4416 "precondition violation");
4417 verifyRangeLikeMetadata(I, Range, Ty, RangeLikeMetadataKind::Range);
4418}
4419
4420void Verifier::visitNoaliasAddrspaceMetadata(Instruction &I, MDNode *Range,
4421 Type *Ty) {
4422 assert(Range && Range == I.getMetadata(LLVMContext::MD_noalias_addrspace) &&
4423 "precondition violation");
4424 verifyRangeLikeMetadata(I, Range, Ty,
4425 RangeLikeMetadataKind::NoaliasAddrspace);
4426}
4427
4428void Verifier::checkAtomicMemAccessSize(Type *Ty, const Instruction *I) {
4429 unsigned Size = DL.getTypeSizeInBits(Ty).getFixedValue();
4430 Check(Size >= 8, "atomic memory access' size must be byte-sized", Ty, I);
4431 Check(!(Size & (Size - 1)),
4432 "atomic memory access' operand must have a power-of-two size", Ty, I);
4433}
4434
4435void Verifier::visitLoadInst(LoadInst &LI) {
4437 Check(PTy, "Load operand must be a pointer.", &LI);
4438 Type *ElTy = LI.getType();
4439 if (MaybeAlign A = LI.getAlign()) {
4440 Check(A->value() <= Value::MaximumAlignment,
4441 "huge alignment values are unsupported", &LI);
4442 }
4443 Check(ElTy->isSized(), "loading unsized types is not allowed", &LI);
4444 if (LI.isAtomic()) {
4445 Check(LI.getOrdering() != AtomicOrdering::Release &&
4446 LI.getOrdering() != AtomicOrdering::AcquireRelease,
4447 "Load cannot have Release ordering", &LI);
4448 Check(ElTy->isIntOrPtrTy() || ElTy->isFloatingPointTy(),
4449 "atomic load operand must have integer, pointer, or floating point "
4450 "type!",
4451 ElTy, &LI);
4452 checkAtomicMemAccessSize(ElTy, &LI);
4453 } else {
4455 "Non-atomic load cannot have SynchronizationScope specified", &LI);
4456 }
4457
4458 visitInstruction(LI);
4459}
4460
4461void Verifier::visitStoreInst(StoreInst &SI) {
4462 PointerType *PTy = dyn_cast<PointerType>(SI.getOperand(1)->getType());
4463 Check(PTy, "Store operand must be a pointer.", &SI);
4464 Type *ElTy = SI.getOperand(0)->getType();
4465 if (MaybeAlign A = SI.getAlign()) {
4466 Check(A->value() <= Value::MaximumAlignment,
4467 "huge alignment values are unsupported", &SI);
4468 }
4469 Check(ElTy->isSized(), "storing unsized types is not allowed", &SI);
4470 if (SI.isAtomic()) {
4471 Check(SI.getOrdering() != AtomicOrdering::Acquire &&
4472 SI.getOrdering() != AtomicOrdering::AcquireRelease,
4473 "Store cannot have Acquire ordering", &SI);
4474 Check(ElTy->isIntOrPtrTy() || ElTy->isFloatingPointTy(),
4475 "atomic store operand must have integer, pointer, or floating point "
4476 "type!",
4477 ElTy, &SI);
4478 checkAtomicMemAccessSize(ElTy, &SI);
4479 } else {
4480 Check(SI.getSyncScopeID() == SyncScope::System,
4481 "Non-atomic store cannot have SynchronizationScope specified", &SI);
4482 }
4483 visitInstruction(SI);
4484}
4485
4486/// Check that SwiftErrorVal is used as a swifterror argument in CS.
4487void Verifier::verifySwiftErrorCall(CallBase &Call,
4488 const Value *SwiftErrorVal) {
4489 for (const auto &I : llvm::enumerate(Call.args())) {
4490 if (I.value() == SwiftErrorVal) {
4491 Check(Call.paramHasAttr(I.index(), Attribute::SwiftError),
4492 "swifterror value when used in a callsite should be marked "
4493 "with swifterror attribute",
4494 SwiftErrorVal, Call);
4495 }
4496 }
4497}
4498
4499void Verifier::verifySwiftErrorValue(const Value *SwiftErrorVal) {
4500 // Check that swifterror value is only used by loads, stores, or as
4501 // a swifterror argument.
4502 for (const User *U : SwiftErrorVal->users()) {
4504 isa<InvokeInst>(U),
4505 "swifterror value can only be loaded and stored from, or "
4506 "as a swifterror argument!",
4507 SwiftErrorVal, U);
4508 // If it is used by a store, check it is the second operand.
4509 if (auto StoreI = dyn_cast<StoreInst>(U))
4510 Check(StoreI->getOperand(1) == SwiftErrorVal,
4511 "swifterror value should be the second operand when used "
4512 "by stores",
4513 SwiftErrorVal, U);
4514 if (auto *Call = dyn_cast<CallBase>(U))
4515 verifySwiftErrorCall(*const_cast<CallBase *>(Call), SwiftErrorVal);
4516 }
4517}
4518
4519void Verifier::visitAllocaInst(AllocaInst &AI) {
4520 Type *Ty = AI.getAllocatedType();
4521 SmallPtrSet<Type*, 4> Visited;
4522 Check(Ty->isSized(&Visited), "Cannot allocate unsized type", &AI);
4523 // Check if it's a target extension type that disallows being used on the
4524 // stack.
4526 "Alloca has illegal target extension type", &AI);
4528 "Alloca array size must have integer type", &AI);
4529 if (MaybeAlign A = AI.getAlign()) {
4530 Check(A->value() <= Value::MaximumAlignment,
4531 "huge alignment values are unsupported", &AI);
4532 }
4533
4534 if (AI.isSwiftError()) {
4535 Check(Ty->isPointerTy(), "swifterror alloca must have pointer type", &AI);
4537 "swifterror alloca must not be array allocation", &AI);
4538 verifySwiftErrorValue(&AI);
4539 }
4540
4541 if (TT.isAMDGPU()) {
4543 "alloca on amdgpu must be in addrspace(5)", &AI);
4544 }
4545
4546 visitInstruction(AI);
4547}
4548
4549void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) {
4550 Type *ElTy = CXI.getOperand(1)->getType();
4551 Check(ElTy->isIntOrPtrTy(),
4552 "cmpxchg operand must have integer or pointer type", ElTy, &CXI);
4553 checkAtomicMemAccessSize(ElTy, &CXI);
4554 visitInstruction(CXI);
4555}
4556
4557void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
4558 Check(RMWI.getOrdering() != AtomicOrdering::Unordered,
4559 "atomicrmw instructions cannot be unordered.", &RMWI);
4560 auto Op = RMWI.getOperation();
4561 Type *ElTy = RMWI.getOperand(1)->getType();
4562 if (Op == AtomicRMWInst::Xchg) {
4563 Check(ElTy->isIntegerTy() || ElTy->isFloatingPointTy() ||
4564 ElTy->isPointerTy(),
4565 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4566 " operand must have integer or floating point type!",
4567 &RMWI, ElTy);
4568 } else if (AtomicRMWInst::isFPOperation(Op)) {
4570 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4571 " operand must have floating-point or fixed vector of floating-point "
4572 "type!",
4573 &RMWI, ElTy);
4574 } else {
4575 Check(ElTy->isIntegerTy(),
4576 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4577 " operand must have integer type!",
4578 &RMWI, ElTy);
4579 }
4580 checkAtomicMemAccessSize(ElTy, &RMWI);
4582 "Invalid binary operation!", &RMWI);
4583 visitInstruction(RMWI);
4584}
4585
4586void Verifier::visitFenceInst(FenceInst &FI) {
4587 const AtomicOrdering Ordering = FI.getOrdering();
4588 Check(Ordering == AtomicOrdering::Acquire ||
4589 Ordering == AtomicOrdering::Release ||
4590 Ordering == AtomicOrdering::AcquireRelease ||
4591 Ordering == AtomicOrdering::SequentiallyConsistent,
4592 "fence instructions may only have acquire, release, acq_rel, or "
4593 "seq_cst ordering.",
4594 &FI);
4595 visitInstruction(FI);
4596}
4597
4598void Verifier::visitExtractValueInst(ExtractValueInst &EVI) {
4600 EVI.getIndices()) == EVI.getType(),
4601 "Invalid ExtractValueInst operands!", &EVI);
4602
4603 visitInstruction(EVI);
4604}
4605
4606void Verifier::visitInsertValueInst(InsertValueInst &IVI) {
4608 IVI.getIndices()) ==
4609 IVI.getOperand(1)->getType(),
4610 "Invalid InsertValueInst operands!", &IVI);
4611
4612 visitInstruction(IVI);
4613}
4614
4615static Value *getParentPad(Value *EHPad) {
4616 if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
4617 return FPI->getParentPad();
4618
4619 return cast<CatchSwitchInst>(EHPad)->getParentPad();
4620}
4621
4622void Verifier::visitEHPadPredecessors(Instruction &I) {
4623 assert(I.isEHPad());
4624
4625 BasicBlock *BB = I.getParent();
4626 Function *F = BB->getParent();
4627
4628 Check(BB != &F->getEntryBlock(), "EH pad cannot be in entry block.", &I);
4629
4630 if (auto *LPI = dyn_cast<LandingPadInst>(&I)) {
4631 // The landingpad instruction defines its parent as a landing pad block. The
4632 // landing pad block may be branched to only by the unwind edge of an
4633 // invoke.
4634 for (BasicBlock *PredBB : predecessors(BB)) {
4635 const auto *II = dyn_cast<InvokeInst>(PredBB->getTerminator());
4636 Check(II && II->getUnwindDest() == BB && II->getNormalDest() != BB,
4637 "Block containing LandingPadInst must be jumped to "
4638 "only by the unwind edge of an invoke.",
4639 LPI);
4640 }
4641 return;
4642 }
4643 if (auto *CPI = dyn_cast<CatchPadInst>(&I)) {
4644 if (!pred_empty(BB))
4645 Check(BB->getUniquePredecessor() == CPI->getCatchSwitch()->getParent(),
4646 "Block containg CatchPadInst must be jumped to "
4647 "only by its catchswitch.",
4648 CPI);
4649 Check(BB != CPI->getCatchSwitch()->getUnwindDest(),
4650 "Catchswitch cannot unwind to one of its catchpads",
4651 CPI->getCatchSwitch(), CPI);
4652 return;
4653 }
4654
4655 // Verify that each pred has a legal terminator with a legal to/from EH
4656 // pad relationship.
4657 Instruction *ToPad = &I;
4658 Value *ToPadParent = getParentPad(ToPad);
4659 for (BasicBlock *PredBB : predecessors(BB)) {
4660 Instruction *TI = PredBB->getTerminator();
4661 Value *FromPad;
4662 if (auto *II = dyn_cast<InvokeInst>(TI)) {
4663 Check(II->getUnwindDest() == BB && II->getNormalDest() != BB,
4664 "EH pad must be jumped to via an unwind edge", ToPad, II);
4665 auto *CalledFn =
4666 dyn_cast<Function>(II->getCalledOperand()->stripPointerCasts());
4667 if (CalledFn && CalledFn->isIntrinsic() && II->doesNotThrow() &&
4668 !IntrinsicInst::mayLowerToFunctionCall(CalledFn->getIntrinsicID()))
4669 continue;
4670 if (auto Bundle = II->getOperandBundle(LLVMContext::OB_funclet))
4671 FromPad = Bundle->Inputs[0];
4672 else
4673 FromPad = ConstantTokenNone::get(II->getContext());
4674 } else if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
4675 FromPad = CRI->getOperand(0);
4676 Check(FromPad != ToPadParent, "A cleanupret must exit its cleanup", CRI);
4677 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(TI)) {
4678 FromPad = CSI;
4679 } else {
4680 Check(false, "EH pad must be jumped to via an unwind edge", ToPad, TI);
4681 }
4682
4683 // The edge may exit from zero or more nested pads.
4684 SmallPtrSet<Value *, 8> Seen;
4685 for (;; FromPad = getParentPad(FromPad)) {
4686 Check(FromPad != ToPad,
4687 "EH pad cannot handle exceptions raised within it", FromPad, TI);
4688 if (FromPad == ToPadParent) {
4689 // This is a legal unwind edge.
4690 break;
4691 }
4692 Check(!isa<ConstantTokenNone>(FromPad),
4693 "A single unwind edge may only enter one EH pad", TI);
4694 Check(Seen.insert(FromPad).second, "EH pad jumps through a cycle of pads",
4695 FromPad);
4696
4697 // This will be diagnosed on the corresponding instruction already. We
4698 // need the extra check here to make sure getParentPad() works.
4699 Check(isa<FuncletPadInst>(FromPad) || isa<CatchSwitchInst>(FromPad),
4700 "Parent pad must be catchpad/cleanuppad/catchswitch", TI);
4701 }
4702 }
4703}
4704
4705void Verifier::visitLandingPadInst(LandingPadInst &LPI) {
4706 // The landingpad instruction is ill-formed if it doesn't have any clauses and
4707 // isn't a cleanup.
4708 Check(LPI.getNumClauses() > 0 || LPI.isCleanup(),
4709 "LandingPadInst needs at least one clause or to be a cleanup.", &LPI);
4710
4711 visitEHPadPredecessors(LPI);
4712
4713 if (!LandingPadResultTy)
4714 LandingPadResultTy = LPI.getType();
4715 else
4716 Check(LandingPadResultTy == LPI.getType(),
4717 "The landingpad instruction should have a consistent result type "
4718 "inside a function.",
4719 &LPI);
4720
4721 Function *F = LPI.getParent()->getParent();
4722 Check(F->hasPersonalityFn(),
4723 "LandingPadInst needs to be in a function with a personality.", &LPI);
4724
4725 // The landingpad instruction must be the first non-PHI instruction in the
4726 // block.
4727 Check(LPI.getParent()->getLandingPadInst() == &LPI,
4728 "LandingPadInst not the first non-PHI instruction in the block.", &LPI);
4729
4730 for (unsigned i = 0, e = LPI.getNumClauses(); i < e; ++i) {
4731 Constant *Clause = LPI.getClause(i);
4732 if (LPI.isCatch(i)) {
4733 Check(isa<PointerType>(Clause->getType()),
4734 "Catch operand does not have pointer type!", &LPI);
4735 } else {
4736 Check(LPI.isFilter(i), "Clause is neither catch nor filter!", &LPI);
4738 "Filter operand is not an array of constants!", &LPI);
4739 }
4740 }
4741
4742 visitInstruction(LPI);
4743}
4744
4745void Verifier::visitResumeInst(ResumeInst &RI) {
4747 "ResumeInst needs to be in a function with a personality.", &RI);
4748
4749 if (!LandingPadResultTy)
4750 LandingPadResultTy = RI.getValue()->getType();
4751 else
4752 Check(LandingPadResultTy == RI.getValue()->getType(),
4753 "The resume instruction should have a consistent result type "
4754 "inside a function.",
4755 &RI);
4756
4757 visitTerminator(RI);
4758}
4759
4760void Verifier::visitCatchPadInst(CatchPadInst &CPI) {
4761 BasicBlock *BB = CPI.getParent();
4762
4763 Function *F = BB->getParent();
4764 Check(F->hasPersonalityFn(),
4765 "CatchPadInst needs to be in a function with a personality.", &CPI);
4766
4768 "CatchPadInst needs to be directly nested in a CatchSwitchInst.",
4769 CPI.getParentPad());
4770
4771 // The catchpad instruction must be the first non-PHI instruction in the
4772 // block.
4773 Check(&*BB->getFirstNonPHIIt() == &CPI,
4774 "CatchPadInst not the first non-PHI instruction in the block.", &CPI);
4775
4776 visitEHPadPredecessors(CPI);
4777 visitFuncletPadInst(CPI);
4778}
4779
4780void Verifier::visitCatchReturnInst(CatchReturnInst &CatchReturn) {
4781 Check(isa<CatchPadInst>(CatchReturn.getOperand(0)),
4782 "CatchReturnInst needs to be provided a CatchPad", &CatchReturn,
4783 CatchReturn.getOperand(0));
4784
4785 visitTerminator(CatchReturn);
4786}
4787
4788void Verifier::visitCleanupPadInst(CleanupPadInst &CPI) {
4789 BasicBlock *BB = CPI.getParent();
4790
4791 Function *F = BB->getParent();
4792 Check(F->hasPersonalityFn(),
4793 "CleanupPadInst needs to be in a function with a personality.", &CPI);
4794
4795 // The cleanuppad instruction must be the first non-PHI instruction in the
4796 // block.
4797 Check(&*BB->getFirstNonPHIIt() == &CPI,
4798 "CleanupPadInst not the first non-PHI instruction in the block.", &CPI);
4799
4800 auto *ParentPad = CPI.getParentPad();
4801 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
4802 "CleanupPadInst has an invalid parent.", &CPI);
4803
4804 visitEHPadPredecessors(CPI);
4805 visitFuncletPadInst(CPI);
4806}
4807
4808void Verifier::visitFuncletPadInst(FuncletPadInst &FPI) {
4809 User *FirstUser = nullptr;
4810 Value *FirstUnwindPad = nullptr;
4811 SmallVector<FuncletPadInst *, 8> Worklist({&FPI});
4812 SmallPtrSet<FuncletPadInst *, 8> Seen;
4813
4814 while (!Worklist.empty()) {
4815 FuncletPadInst *CurrentPad = Worklist.pop_back_val();
4816 Check(Seen.insert(CurrentPad).second,
4817 "FuncletPadInst must not be nested within itself", CurrentPad);
4818 Value *UnresolvedAncestorPad = nullptr;
4819 for (User *U : CurrentPad->users()) {
4820 BasicBlock *UnwindDest;
4821 if (auto *CRI = dyn_cast<CleanupReturnInst>(U)) {
4822 UnwindDest = CRI->getUnwindDest();
4823 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(U)) {
4824 // We allow catchswitch unwind to caller to nest
4825 // within an outer pad that unwinds somewhere else,
4826 // because catchswitch doesn't have a nounwind variant.
4827 // See e.g. SimplifyCFGOpt::SimplifyUnreachable.
4828 if (CSI->unwindsToCaller())
4829 continue;
4830 UnwindDest = CSI->getUnwindDest();
4831 } else if (auto *II = dyn_cast<InvokeInst>(U)) {
4832 UnwindDest = II->getUnwindDest();
4833 } else if (isa<CallInst>(U)) {
4834 // Calls which don't unwind may be found inside funclet
4835 // pads that unwind somewhere else. We don't *require*
4836 // such calls to be annotated nounwind.
4837 continue;
4838 } else if (auto *CPI = dyn_cast<CleanupPadInst>(U)) {
4839 // The unwind dest for a cleanup can only be found by
4840 // recursive search. Add it to the worklist, and we'll
4841 // search for its first use that determines where it unwinds.
4842 Worklist.push_back(CPI);
4843 continue;
4844 } else {
4845 Check(isa<CatchReturnInst>(U), "Bogus funclet pad use", U);
4846 continue;
4847 }
4848
4849 Value *UnwindPad;
4850 bool ExitsFPI;
4851 if (UnwindDest) {
4852 UnwindPad = &*UnwindDest->getFirstNonPHIIt();
4853 if (!cast<Instruction>(UnwindPad)->isEHPad())
4854 continue;
4855 Value *UnwindParent = getParentPad(UnwindPad);
4856 // Ignore unwind edges that don't exit CurrentPad.
4857 if (UnwindParent == CurrentPad)
4858 continue;
4859 // Determine whether the original funclet pad is exited,
4860 // and if we are scanning nested pads determine how many
4861 // of them are exited so we can stop searching their
4862 // children.
4863 Value *ExitedPad = CurrentPad;
4864 ExitsFPI = false;
4865 do {
4866 if (ExitedPad == &FPI) {
4867 ExitsFPI = true;
4868 // Now we can resolve any ancestors of CurrentPad up to
4869 // FPI, but not including FPI since we need to make sure
4870 // to check all direct users of FPI for consistency.
4871 UnresolvedAncestorPad = &FPI;
4872 break;
4873 }
4874 Value *ExitedParent = getParentPad(ExitedPad);
4875 if (ExitedParent == UnwindParent) {
4876 // ExitedPad is the ancestor-most pad which this unwind
4877 // edge exits, so we can resolve up to it, meaning that
4878 // ExitedParent is the first ancestor still unresolved.
4879 UnresolvedAncestorPad = ExitedParent;
4880 break;
4881 }
4882 ExitedPad = ExitedParent;
4883 } while (!isa<ConstantTokenNone>(ExitedPad));
4884 } else {
4885 // Unwinding to caller exits all pads.
4886 UnwindPad = ConstantTokenNone::get(FPI.getContext());
4887 ExitsFPI = true;
4888 UnresolvedAncestorPad = &FPI;
4889 }
4890
4891 if (ExitsFPI) {
4892 // This unwind edge exits FPI. Make sure it agrees with other
4893 // such edges.
4894 if (FirstUser) {
4895 Check(UnwindPad == FirstUnwindPad,
4896 "Unwind edges out of a funclet "
4897 "pad must have the same unwind "
4898 "dest",
4899 &FPI, U, FirstUser);
4900 } else {
4901 FirstUser = U;
4902 FirstUnwindPad = UnwindPad;
4903 // Record cleanup sibling unwinds for verifySiblingFuncletUnwinds
4904 if (isa<CleanupPadInst>(&FPI) && !isa<ConstantTokenNone>(UnwindPad) &&
4905 getParentPad(UnwindPad) == getParentPad(&FPI))
4906 SiblingFuncletInfo[&FPI] = cast<Instruction>(U);
4907 }
4908 }
4909 // Make sure we visit all uses of FPI, but for nested pads stop as
4910 // soon as we know where they unwind to.
4911 if (CurrentPad != &FPI)
4912 break;
4913 }
4914 if (UnresolvedAncestorPad) {
4915 if (CurrentPad == UnresolvedAncestorPad) {
4916 // When CurrentPad is FPI itself, we don't mark it as resolved even if
4917 // we've found an unwind edge that exits it, because we need to verify
4918 // all direct uses of FPI.
4919 assert(CurrentPad == &FPI);
4920 continue;
4921 }
4922 // Pop off the worklist any nested pads that we've found an unwind
4923 // destination for. The pads on the worklist are the uncles,
4924 // great-uncles, etc. of CurrentPad. We've found an unwind destination
4925 // for all ancestors of CurrentPad up to but not including
4926 // UnresolvedAncestorPad.
4927 Value *ResolvedPad = CurrentPad;
4928 while (!Worklist.empty()) {
4929 Value *UnclePad = Worklist.back();
4930 Value *AncestorPad = getParentPad(UnclePad);
4931 // Walk ResolvedPad up the ancestor list until we either find the
4932 // uncle's parent or the last resolved ancestor.
4933 while (ResolvedPad != AncestorPad) {
4934 Value *ResolvedParent = getParentPad(ResolvedPad);
4935 if (ResolvedParent == UnresolvedAncestorPad) {
4936 break;
4937 }
4938 ResolvedPad = ResolvedParent;
4939 }
4940 // If the resolved ancestor search didn't find the uncle's parent,
4941 // then the uncle is not yet resolved.
4942 if (ResolvedPad != AncestorPad)
4943 break;
4944 // This uncle is resolved, so pop it from the worklist.
4945 Worklist.pop_back();
4946 }
4947 }
4948 }
4949
4950 if (FirstUnwindPad) {
4951 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(FPI.getParentPad())) {
4952 BasicBlock *SwitchUnwindDest = CatchSwitch->getUnwindDest();
4953 Value *SwitchUnwindPad;
4954 if (SwitchUnwindDest)
4955 SwitchUnwindPad = &*SwitchUnwindDest->getFirstNonPHIIt();
4956 else
4957 SwitchUnwindPad = ConstantTokenNone::get(FPI.getContext());
4958 Check(SwitchUnwindPad == FirstUnwindPad,
4959 "Unwind edges out of a catch must have the same unwind dest as "
4960 "the parent catchswitch",
4961 &FPI, FirstUser, CatchSwitch);
4962 }
4963 }
4964
4965 visitInstruction(FPI);
4966}
4967
4968void Verifier::visitCatchSwitchInst(CatchSwitchInst &CatchSwitch) {
4969 BasicBlock *BB = CatchSwitch.getParent();
4970
4971 Function *F = BB->getParent();
4972 Check(F->hasPersonalityFn(),
4973 "CatchSwitchInst needs to be in a function with a personality.",
4974 &CatchSwitch);
4975
4976 // The catchswitch instruction must be the first non-PHI instruction in the
4977 // block.
4978 Check(&*BB->getFirstNonPHIIt() == &CatchSwitch,
4979 "CatchSwitchInst not the first non-PHI instruction in the block.",
4980 &CatchSwitch);
4981
4982 auto *ParentPad = CatchSwitch.getParentPad();
4983 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
4984 "CatchSwitchInst has an invalid parent.", ParentPad);
4985
4986 if (BasicBlock *UnwindDest = CatchSwitch.getUnwindDest()) {
4987 BasicBlock::iterator I = UnwindDest->getFirstNonPHIIt();
4988 Check(I->isEHPad() && !isa<LandingPadInst>(I),
4989 "CatchSwitchInst must unwind to an EH block which is not a "
4990 "landingpad.",
4991 &CatchSwitch);
4992
4993 // Record catchswitch sibling unwinds for verifySiblingFuncletUnwinds
4994 if (getParentPad(&*I) == ParentPad)
4995 SiblingFuncletInfo[&CatchSwitch] = &CatchSwitch;
4996 }
4997
4998 Check(CatchSwitch.getNumHandlers() != 0,
4999 "CatchSwitchInst cannot have empty handler list", &CatchSwitch);
5000
5001 for (BasicBlock *Handler : CatchSwitch.handlers()) {
5002 Check(isa<CatchPadInst>(Handler->getFirstNonPHIIt()),
5003 "CatchSwitchInst handlers must be catchpads", &CatchSwitch, Handler);
5004 }
5005
5006 visitEHPadPredecessors(CatchSwitch);
5007 visitTerminator(CatchSwitch);
5008}
5009
5010void Verifier::visitCleanupReturnInst(CleanupReturnInst &CRI) {
5012 "CleanupReturnInst needs to be provided a CleanupPad", &CRI,
5013 CRI.getOperand(0));
5014
5015 if (BasicBlock *UnwindDest = CRI.getUnwindDest()) {
5016 BasicBlock::iterator I = UnwindDest->getFirstNonPHIIt();
5017 Check(I->isEHPad() && !isa<LandingPadInst>(I),
5018 "CleanupReturnInst must unwind to an EH block which is not a "
5019 "landingpad.",
5020 &CRI);
5021 }
5022
5023 visitTerminator(CRI);
5024}
5025
5026void Verifier::verifyDominatesUse(Instruction &I, unsigned i) {
5027 Instruction *Op = cast<Instruction>(I.getOperand(i));
5028 // If the we have an invalid invoke, don't try to compute the dominance.
5029 // We already reject it in the invoke specific checks and the dominance
5030 // computation doesn't handle multiple edges.
5031 if (InvokeInst *II = dyn_cast<InvokeInst>(Op)) {
5032 if (II->getNormalDest() == II->getUnwindDest())
5033 return;
5034 }
5035
5036 // Quick check whether the def has already been encountered in the same block.
5037 // PHI nodes are not checked to prevent accepting preceding PHIs, because PHI
5038 // uses are defined to happen on the incoming edge, not at the instruction.
5039 //
5040 // FIXME: If this operand is a MetadataAsValue (wrapping a LocalAsMetadata)
5041 // wrapping an SSA value, assert that we've already encountered it. See
5042 // related FIXME in Mapper::mapLocalAsMetadata in ValueMapper.cpp.
5043 if (!isa<PHINode>(I) && InstsInThisBlock.count(Op))
5044 return;
5045
5046 const Use &U = I.getOperandUse(i);
5047 Check(DT.dominates(Op, U), "Instruction does not dominate all uses!", Op, &I);
5048}
5049
5050void Verifier::visitDereferenceableMetadata(Instruction& I, MDNode* MD) {
5051 Check(I.getType()->isPointerTy(),
5052 "dereferenceable, dereferenceable_or_null "
5053 "apply only to pointer types",
5054 &I);
5056 "dereferenceable, dereferenceable_or_null apply only to load"
5057 " and inttoptr instructions, use attributes for calls or invokes",
5058 &I);
5059 Check(MD->getNumOperands() == 1,
5060 "dereferenceable, dereferenceable_or_null "
5061 "take one operand!",
5062 &I);
5063 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(MD->getOperand(0));
5064 Check(CI && CI->getType()->isIntegerTy(64),
5065 "dereferenceable, "
5066 "dereferenceable_or_null metadata value must be an i64!",
5067 &I);
5068}
5069
5070void Verifier::visitNofreeMetadata(Instruction &I, MDNode *MD) {
5071 Check(I.getType()->isPointerTy(), "nofree applies only to pointer types", &I);
5072 Check((isa<IntToPtrInst>(I)), "nofree applies only to inttoptr instruction",
5073 &I);
5074 Check(MD->getNumOperands() == 0, "nofree metadata must be empty", &I);
5075}
5076
5077void Verifier::visitProfMetadata(Instruction &I, MDNode *MD) {
5078 auto GetBranchingTerminatorNumOperands = [&]() {
5079 unsigned ExpectedNumOperands = 0;
5080 if (BranchInst *BI = dyn_cast<BranchInst>(&I))
5081 ExpectedNumOperands = BI->getNumSuccessors();
5082 else if (SwitchInst *SI = dyn_cast<SwitchInst>(&I))
5083 ExpectedNumOperands = SI->getNumSuccessors();
5084 else if (isa<CallInst>(&I))
5085 ExpectedNumOperands = 1;
5086 else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(&I))
5087 ExpectedNumOperands = IBI->getNumDestinations();
5088 else if (isa<SelectInst>(&I))
5089 ExpectedNumOperands = 2;
5090 else if (CallBrInst *CI = dyn_cast<CallBrInst>(&I))
5091 ExpectedNumOperands = CI->getNumSuccessors();
5092 return ExpectedNumOperands;
5093 };
5094 Check(MD->getNumOperands() >= 1,
5095 "!prof annotations should have at least 1 operand", MD);
5096 // Check first operand.
5097 Check(MD->getOperand(0) != nullptr, "first operand should not be null", MD);
5099 "expected string with name of the !prof annotation", MD);
5100 MDString *MDS = cast<MDString>(MD->getOperand(0));
5101 StringRef ProfName = MDS->getString();
5102
5104 Check(GetBranchingTerminatorNumOperands() != 0 || isa<InvokeInst>(I),
5105 "'unknown' !prof should only appear on instructions on which "
5106 "'branch_weights' would",
5107 MD);
5108 verifyUnknownProfileMetadata(MD);
5109 return;
5110 }
5111
5112 Check(MD->getNumOperands() >= 2,
5113 "!prof annotations should have no less than 2 operands", MD);
5114
5115 // Check consistency of !prof branch_weights metadata.
5116 if (ProfName == MDProfLabels::BranchWeights) {
5117 unsigned NumBranchWeights = getNumBranchWeights(*MD);
5118 if (isa<InvokeInst>(&I)) {
5119 Check(NumBranchWeights == 1 || NumBranchWeights == 2,
5120 "Wrong number of InvokeInst branch_weights operands", MD);
5121 } else {
5122 const unsigned ExpectedNumOperands = GetBranchingTerminatorNumOperands();
5123 if (ExpectedNumOperands == 0)
5124 CheckFailed("!prof branch_weights are not allowed for this instruction",
5125 MD);
5126
5127 Check(NumBranchWeights == ExpectedNumOperands, "Wrong number of operands",
5128 MD);
5129 }
5130 for (unsigned i = getBranchWeightOffset(MD); i < MD->getNumOperands();
5131 ++i) {
5132 auto &MDO = MD->getOperand(i);
5133 Check(MDO, "second operand should not be null", MD);
5135 "!prof brunch_weights operand is not a const int");
5136 }
5137 } else if (ProfName == MDProfLabels::ValueProfile) {
5138 Check(isValueProfileMD(MD), "invalid value profiling metadata", MD);
5139 ConstantInt *KindInt = mdconst::dyn_extract<ConstantInt>(MD->getOperand(1));
5140 Check(KindInt, "VP !prof missing kind argument", MD);
5141
5142 auto Kind = KindInt->getZExtValue();
5143 Check(Kind >= InstrProfValueKind::IPVK_First &&
5144 Kind <= InstrProfValueKind::IPVK_Last,
5145 "Invalid VP !prof kind", MD);
5146 Check(MD->getNumOperands() % 2 == 1,
5147 "VP !prof should have an even number "
5148 "of arguments after 'VP'",
5149 MD);
5150 if (Kind == InstrProfValueKind::IPVK_IndirectCallTarget ||
5151 Kind == InstrProfValueKind::IPVK_MemOPSize)
5153 "VP !prof indirect call or memop size expected to be applied to "
5154 "CallBase instructions only",
5155 MD);
5156 } else {
5157 CheckFailed("expected either branch_weights or VP profile name", MD);
5158 }
5159}
5160
5161void Verifier::visitDIAssignIDMetadata(Instruction &I, MDNode *MD) {
5162 assert(I.hasMetadata(LLVMContext::MD_DIAssignID));
5163 // DIAssignID metadata must be attached to either an alloca or some form of
5164 // store/memory-writing instruction.
5165 // FIXME: We allow all intrinsic insts here to avoid trying to enumerate all
5166 // possible store intrinsics.
5167 bool ExpectedInstTy =
5169 CheckDI(ExpectedInstTy, "!DIAssignID attached to unexpected instruction kind",
5170 I, MD);
5171 // Iterate over the MetadataAsValue uses of the DIAssignID - these should
5172 // only be found as DbgAssignIntrinsic operands.
5173 if (auto *AsValue = MetadataAsValue::getIfExists(Context, MD)) {
5174 for (auto *User : AsValue->users()) {
5176 "!DIAssignID should only be used by llvm.dbg.assign intrinsics",
5177 MD, User);
5178 // All of the dbg.assign intrinsics should be in the same function as I.
5179 if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(User))
5180 CheckDI(DAI->getFunction() == I.getFunction(),
5181 "dbg.assign not in same function as inst", DAI, &I);
5182 }
5183 }
5184 for (DbgVariableRecord *DVR :
5185 cast<DIAssignID>(MD)->getAllDbgVariableRecordUsers()) {
5186 CheckDI(DVR->isDbgAssign(),
5187 "!DIAssignID should only be used by Assign DVRs.", MD, DVR);
5188 CheckDI(DVR->getFunction() == I.getFunction(),
5189 "DVRAssign not in same function as inst", DVR, &I);
5190 }
5191}
5192
5193void Verifier::visitMMRAMetadata(Instruction &I, MDNode *MD) {
5195 "!mmra metadata attached to unexpected instruction kind", I, MD);
5196
5197 // MMRA Metadata should either be a tag, e.g. !{!"foo", !"bar"}, or a
5198 // list of tags such as !2 in the following example:
5199 // !0 = !{!"a", !"b"}
5200 // !1 = !{!"c", !"d"}
5201 // !2 = !{!0, !1}
5202 if (MMRAMetadata::isTagMD(MD))
5203 return;
5204
5205 Check(isa<MDTuple>(MD), "!mmra expected to be a metadata tuple", I, MD);
5206 for (const MDOperand &MDOp : MD->operands())
5207 Check(MMRAMetadata::isTagMD(MDOp.get()),
5208 "!mmra metadata tuple operand is not an MMRA tag", I, MDOp.get());
5209}
5210
5211void Verifier::visitCallStackMetadata(MDNode *MD) {
5212 // Call stack metadata should consist of a list of at least 1 constant int
5213 // (representing a hash of the location).
5214 Check(MD->getNumOperands() >= 1,
5215 "call stack metadata should have at least 1 operand", MD);
5216
5217 for (const auto &Op : MD->operands())
5219 "call stack metadata operand should be constant integer", Op);
5220}
5221
5222void Verifier::visitMemProfMetadata(Instruction &I, MDNode *MD) {
5223 Check(isa<CallBase>(I), "!memprof metadata should only exist on calls", &I);
5224 Check(MD->getNumOperands() >= 1,
5225 "!memprof annotations should have at least 1 metadata operand "
5226 "(MemInfoBlock)",
5227 MD);
5228
5229 // Check each MIB
5230 for (auto &MIBOp : MD->operands()) {
5231 MDNode *MIB = dyn_cast<MDNode>(MIBOp);
5232 // The first operand of an MIB should be the call stack metadata.
5233 // There rest of the operands should be MDString tags, and there should be
5234 // at least one.
5235 Check(MIB->getNumOperands() >= 2,
5236 "Each !memprof MemInfoBlock should have at least 2 operands", MIB);
5237
5238 // Check call stack metadata (first operand).
5239 Check(MIB->getOperand(0) != nullptr,
5240 "!memprof MemInfoBlock first operand should not be null", MIB);
5241 Check(isa<MDNode>(MIB->getOperand(0)),
5242 "!memprof MemInfoBlock first operand should be an MDNode", MIB);
5243 MDNode *StackMD = dyn_cast<MDNode>(MIB->getOperand(0));
5244 visitCallStackMetadata(StackMD);
5245
5246 // The next set of 1 or more operands should be MDString.
5247 unsigned I = 1;
5248 for (; I < MIB->getNumOperands(); ++I) {
5249 if (!isa<MDString>(MIB->getOperand(I))) {
5250 Check(I > 1,
5251 "!memprof MemInfoBlock second operand should be an MDString",
5252 MIB);
5253 break;
5254 }
5255 }
5256
5257 // Any remaining should be MDNode that are pairs of integers
5258 for (; I < MIB->getNumOperands(); ++I) {
5259 MDNode *OpNode = dyn_cast<MDNode>(MIB->getOperand(I));
5260 Check(OpNode, "Not all !memprof MemInfoBlock operands 2 to N are MDNode",
5261 MIB);
5262 Check(OpNode->getNumOperands() == 2,
5263 "Not all !memprof MemInfoBlock operands 2 to N are MDNode with 2 "
5264 "operands",
5265 MIB);
5266 // Check that all of Op's operands are ConstantInt.
5267 Check(llvm::all_of(OpNode->operands(),
5268 [](const MDOperand &Op) {
5269 return mdconst::hasa<ConstantInt>(Op);
5270 }),
5271 "Not all !memprof MemInfoBlock operands 2 to N are MDNode with "
5272 "ConstantInt operands",
5273 MIB);
5274 }
5275 }
5276}
5277
5278void Verifier::visitCallsiteMetadata(Instruction &I, MDNode *MD) {
5279 Check(isa<CallBase>(I), "!callsite metadata should only exist on calls", &I);
5280 // Verify the partial callstack annotated from memprof profiles. This callsite
5281 // is a part of a profiled allocation callstack.
5282 visitCallStackMetadata(MD);
5283}
5284
5285static inline bool isConstantIntMetadataOperand(const Metadata *MD) {
5286 if (auto *VAL = dyn_cast<ValueAsMetadata>(MD))
5287 return isa<ConstantInt>(VAL->getValue());
5288 return false;
5289}
5290
5291void Verifier::visitCalleeTypeMetadata(Instruction &I, MDNode *MD) {
5292 Check(isa<CallBase>(I), "!callee_type metadata should only exist on calls",
5293 &I);
5294 for (Metadata *Op : MD->operands()) {
5296 "The callee_type metadata must be a list of type metadata nodes", Op);
5297 auto *TypeMD = cast<MDNode>(Op);
5298 Check(TypeMD->getNumOperands() == 2,
5299 "Well-formed generalized type metadata must contain exactly two "
5300 "operands",
5301 Op);
5302 Check(isConstantIntMetadataOperand(TypeMD->getOperand(0)) &&
5303 mdconst::extract<ConstantInt>(TypeMD->getOperand(0))->isZero(),
5304 "The first operand of type metadata for functions must be zero", Op);
5305 Check(TypeMD->hasGeneralizedMDString(),
5306 "Only generalized type metadata can be part of the callee_type "
5307 "metadata list",
5308 Op);
5309 }
5310}
5311
5312void Verifier::visitAnnotationMetadata(MDNode *Annotation) {
5313 Check(isa<MDTuple>(Annotation), "annotation must be a tuple");
5314 Check(Annotation->getNumOperands() >= 1,
5315 "annotation must have at least one operand");
5316 for (const MDOperand &Op : Annotation->operands()) {
5317 bool TupleOfStrings =
5318 isa<MDTuple>(Op.get()) &&
5319 all_of(cast<MDTuple>(Op)->operands(), [](auto &Annotation) {
5320 return isa<MDString>(Annotation.get());
5321 });
5322 Check(isa<MDString>(Op.get()) || TupleOfStrings,
5323 "operands must be a string or a tuple of strings");
5324 }
5325}
5326
5327void Verifier::visitAliasScopeMetadata(const MDNode *MD) {
5328 unsigned NumOps = MD->getNumOperands();
5329 Check(NumOps >= 2 && NumOps <= 3, "scope must have two or three operands",
5330 MD);
5331 Check(MD->getOperand(0).get() == MD || isa<MDString>(MD->getOperand(0)),
5332 "first scope operand must be self-referential or string", MD);
5333 if (NumOps == 3)
5335 "third scope operand must be string (if used)", MD);
5336
5337 MDNode *Domain = dyn_cast<MDNode>(MD->getOperand(1));
5338 Check(Domain != nullptr, "second scope operand must be MDNode", MD);
5339
5340 unsigned NumDomainOps = Domain->getNumOperands();
5341 Check(NumDomainOps >= 1 && NumDomainOps <= 2,
5342 "domain must have one or two operands", Domain);
5343 Check(Domain->getOperand(0).get() == Domain ||
5344 isa<MDString>(Domain->getOperand(0)),
5345 "first domain operand must be self-referential or string", Domain);
5346 if (NumDomainOps == 2)
5347 Check(isa<MDString>(Domain->getOperand(1)),
5348 "second domain operand must be string (if used)", Domain);
5349}
5350
5351void Verifier::visitAliasScopeListMetadata(const MDNode *MD) {
5352 for (const MDOperand &Op : MD->operands()) {
5353 const MDNode *OpMD = dyn_cast<MDNode>(Op);
5354 Check(OpMD != nullptr, "scope list must consist of MDNodes", MD);
5355 visitAliasScopeMetadata(OpMD);
5356 }
5357}
5358
5359void Verifier::visitAccessGroupMetadata(const MDNode *MD) {
5360 auto IsValidAccessScope = [](const MDNode *MD) {
5361 return MD->getNumOperands() == 0 && MD->isDistinct();
5362 };
5363
5364 // It must be either an access scope itself...
5365 if (IsValidAccessScope(MD))
5366 return;
5367
5368 // ...or a list of access scopes.
5369 for (const MDOperand &Op : MD->operands()) {
5370 const MDNode *OpMD = dyn_cast<MDNode>(Op);
5371 Check(OpMD != nullptr, "Access scope list must consist of MDNodes", MD);
5372 Check(IsValidAccessScope(OpMD),
5373 "Access scope list contains invalid access scope", MD);
5374 }
5375}
5376
5377void Verifier::visitCapturesMetadata(Instruction &I, const MDNode *Captures) {
5378 static const char *ValidArgs[] = {"address_is_null", "address",
5379 "read_provenance", "provenance"};
5380
5381 auto *SI = dyn_cast<StoreInst>(&I);
5382 Check(SI, "!captures metadata can only be applied to store instructions", &I);
5383 Check(SI->getValueOperand()->getType()->isPointerTy(),
5384 "!captures metadata can only be applied to store with value operand of "
5385 "pointer type",
5386 &I);
5387 Check(Captures->getNumOperands() != 0, "!captures metadata cannot be empty",
5388 &I);
5389
5390 for (Metadata *Op : Captures->operands()) {
5391 auto *Str = dyn_cast<MDString>(Op);
5392 Check(Str, "!captures metadata must be a list of strings", &I);
5393 Check(is_contained(ValidArgs, Str->getString()),
5394 "invalid entry in !captures metadata", &I, Str);
5395 }
5396}
5397
5398/// verifyInstruction - Verify that an instruction is well formed.
5399///
5400void Verifier::visitInstruction(Instruction &I) {
5401 BasicBlock *BB = I.getParent();
5402 Check(BB, "Instruction not embedded in basic block!", &I);
5403
5404 if (!isa<PHINode>(I)) { // Check that non-phi nodes are not self referential
5405 for (User *U : I.users()) {
5406 Check(U != (User *)&I || !DT.isReachableFromEntry(BB),
5407 "Only PHI nodes may reference their own value!", &I);
5408 }
5409 }
5410
5411 // Check that void typed values don't have names
5412 Check(!I.getType()->isVoidTy() || !I.hasName(),
5413 "Instruction has a name, but provides a void value!", &I);
5414
5415 // Check that the return value of the instruction is either void or a legal
5416 // value type.
5417 Check(I.getType()->isVoidTy() || I.getType()->isFirstClassType(),
5418 "Instruction returns a non-scalar type!", &I);
5419
5420 // Check that the instruction doesn't produce metadata. Calls are already
5421 // checked against the callee type.
5422 Check(!I.getType()->isMetadataTy() || isa<CallInst>(I) || isa<InvokeInst>(I),
5423 "Invalid use of metadata!", &I);
5424
5425 // Check that all uses of the instruction, if they are instructions
5426 // themselves, actually have parent basic blocks. If the use is not an
5427 // instruction, it is an error!
5428 for (Use &U : I.uses()) {
5429 if (Instruction *Used = dyn_cast<Instruction>(U.getUser()))
5430 Check(Used->getParent() != nullptr,
5431 "Instruction referencing"
5432 " instruction not embedded in a basic block!",
5433 &I, Used);
5434 else {
5435 CheckFailed("Use of instruction is not an instruction!", U);
5436 return;
5437 }
5438 }
5439
5440 // Get a pointer to the call base of the instruction if it is some form of
5441 // call.
5442 const CallBase *CBI = dyn_cast<CallBase>(&I);
5443
5444 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
5445 Check(I.getOperand(i) != nullptr, "Instruction has null operand!", &I);
5446
5447 // Check to make sure that only first-class-values are operands to
5448 // instructions.
5449 if (!I.getOperand(i)->getType()->isFirstClassType()) {
5450 Check(false, "Instruction operands must be first-class values!", &I);
5451 }
5452
5453 if (Function *F = dyn_cast<Function>(I.getOperand(i))) {
5454 // This code checks whether the function is used as the operand of a
5455 // clang_arc_attachedcall operand bundle.
5456 auto IsAttachedCallOperand = [](Function *F, const CallBase *CBI,
5457 int Idx) {
5458 return CBI && CBI->isOperandBundleOfType(
5460 };
5461
5462 // Check to make sure that the "address of" an intrinsic function is never
5463 // taken. Ignore cases where the address of the intrinsic function is used
5464 // as the argument of operand bundle "clang.arc.attachedcall" as those
5465 // cases are handled in verifyAttachedCallBundle.
5466 Check((!F->isIntrinsic() ||
5467 (CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i)) ||
5468 IsAttachedCallOperand(F, CBI, i)),
5469 "Cannot take the address of an intrinsic!", &I);
5470 Check(!F->isIntrinsic() || isa<CallInst>(I) ||
5471 F->getIntrinsicID() == Intrinsic::donothing ||
5472 F->getIntrinsicID() == Intrinsic::seh_try_begin ||
5473 F->getIntrinsicID() == Intrinsic::seh_try_end ||
5474 F->getIntrinsicID() == Intrinsic::seh_scope_begin ||
5475 F->getIntrinsicID() == Intrinsic::seh_scope_end ||
5476 F->getIntrinsicID() == Intrinsic::coro_resume ||
5477 F->getIntrinsicID() == Intrinsic::coro_destroy ||
5478 F->getIntrinsicID() == Intrinsic::coro_await_suspend_void ||
5479 F->getIntrinsicID() == Intrinsic::coro_await_suspend_bool ||
5480 F->getIntrinsicID() == Intrinsic::coro_await_suspend_handle ||
5481 F->getIntrinsicID() ==
5482 Intrinsic::experimental_patchpoint_void ||
5483 F->getIntrinsicID() == Intrinsic::experimental_patchpoint ||
5484 F->getIntrinsicID() == Intrinsic::fake_use ||
5485 F->getIntrinsicID() == Intrinsic::experimental_gc_statepoint ||
5486 F->getIntrinsicID() == Intrinsic::wasm_throw ||
5487 F->getIntrinsicID() == Intrinsic::wasm_rethrow ||
5488 IsAttachedCallOperand(F, CBI, i),
5489 "Cannot invoke an intrinsic other than donothing, patchpoint, "
5490 "statepoint, coro_resume, coro_destroy, clang.arc.attachedcall or "
5491 "wasm.(re)throw",
5492 &I);
5493 Check(F->getParent() == &M, "Referencing function in another module!", &I,
5494 &M, F, F->getParent());
5495 } else if (BasicBlock *OpBB = dyn_cast<BasicBlock>(I.getOperand(i))) {
5496 Check(OpBB->getParent() == BB->getParent(),
5497 "Referring to a basic block in another function!", &I);
5498 } else if (Argument *OpArg = dyn_cast<Argument>(I.getOperand(i))) {
5499 Check(OpArg->getParent() == BB->getParent(),
5500 "Referring to an argument in another function!", &I);
5501 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(I.getOperand(i))) {
5502 Check(GV->getParent() == &M, "Referencing global in another module!", &I,
5503 &M, GV, GV->getParent());
5504 } else if (Instruction *OpInst = dyn_cast<Instruction>(I.getOperand(i))) {
5505 Check(OpInst->getFunction() == BB->getParent(),
5506 "Referring to an instruction in another function!", &I);
5507 verifyDominatesUse(I, i);
5508 } else if (isa<InlineAsm>(I.getOperand(i))) {
5509 Check(CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i),
5510 "Cannot take the address of an inline asm!", &I);
5511 } else if (auto *CPA = dyn_cast<ConstantPtrAuth>(I.getOperand(i))) {
5512 visitConstantExprsRecursively(CPA);
5513 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(I.getOperand(i))) {
5514 if (CE->getType()->isPtrOrPtrVectorTy()) {
5515 // If we have a ConstantExpr pointer, we need to see if it came from an
5516 // illegal bitcast.
5517 visitConstantExprsRecursively(CE);
5518 }
5519 }
5520 }
5521
5522 if (MDNode *MD = I.getMetadata(LLVMContext::MD_fpmath)) {
5523 Check(I.getType()->isFPOrFPVectorTy(),
5524 "fpmath requires a floating point result!", &I);
5525 Check(MD->getNumOperands() == 1, "fpmath takes one operand!", &I);
5526 if (ConstantFP *CFP0 =
5528 const APFloat &Accuracy = CFP0->getValueAPF();
5529 Check(&Accuracy.getSemantics() == &APFloat::IEEEsingle(),
5530 "fpmath accuracy must have float type", &I);
5531 Check(Accuracy.isFiniteNonZero() && !Accuracy.isNegative(),
5532 "fpmath accuracy not a positive number!", &I);
5533 } else {
5534 Check(false, "invalid fpmath accuracy!", &I);
5535 }
5536 }
5537
5538 if (MDNode *Range = I.getMetadata(LLVMContext::MD_range)) {
5540 "Ranges are only for loads, calls and invokes!", &I);
5541 visitRangeMetadata(I, Range, I.getType());
5542 }
5543
5544 if (MDNode *Range = I.getMetadata(LLVMContext::MD_noalias_addrspace)) {
5547 "noalias.addrspace are only for memory operations!", &I);
5548 visitNoaliasAddrspaceMetadata(I, Range, I.getType());
5549 }
5550
5551 if (I.hasMetadata(LLVMContext::MD_invariant_group)) {
5553 "invariant.group metadata is only for loads and stores", &I);
5554 }
5555
5556 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nonnull)) {
5557 Check(I.getType()->isPointerTy(), "nonnull applies only to pointer types",
5558 &I);
5560 "nonnull applies only to load instructions, use attributes"
5561 " for calls or invokes",
5562 &I);
5563 Check(MD->getNumOperands() == 0, "nonnull metadata must be empty", &I);
5564 }
5565
5566 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable))
5567 visitDereferenceableMetadata(I, MD);
5568
5569 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable_or_null))
5570 visitDereferenceableMetadata(I, MD);
5571
5572 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nofree))
5573 visitNofreeMetadata(I, MD);
5574
5575 if (MDNode *TBAA = I.getMetadata(LLVMContext::MD_tbaa))
5576 TBAAVerifyHelper.visitTBAAMetadata(&I, TBAA);
5577
5578 if (MDNode *MD = I.getMetadata(LLVMContext::MD_noalias))
5579 visitAliasScopeListMetadata(MD);
5580 if (MDNode *MD = I.getMetadata(LLVMContext::MD_alias_scope))
5581 visitAliasScopeListMetadata(MD);
5582
5583 if (MDNode *MD = I.getMetadata(LLVMContext::MD_access_group))
5584 visitAccessGroupMetadata(MD);
5585
5586 if (MDNode *AlignMD = I.getMetadata(LLVMContext::MD_align)) {
5587 Check(I.getType()->isPointerTy(), "align applies only to pointer types",
5588 &I);
5590 "align applies only to load instructions, "
5591 "use attributes for calls or invokes",
5592 &I);
5593 Check(AlignMD->getNumOperands() == 1, "align takes one operand!", &I);
5594 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(AlignMD->getOperand(0));
5595 Check(CI && CI->getType()->isIntegerTy(64),
5596 "align metadata value must be an i64!", &I);
5597 uint64_t Align = CI->getZExtValue();
5598 Check(isPowerOf2_64(Align), "align metadata value must be a power of 2!",
5599 &I);
5600 Check(Align <= Value::MaximumAlignment,
5601 "alignment is larger that implementation defined limit", &I);
5602 }
5603
5604 if (MDNode *MD = I.getMetadata(LLVMContext::MD_prof))
5605 visitProfMetadata(I, MD);
5606
5607 if (MDNode *MD = I.getMetadata(LLVMContext::MD_memprof))
5608 visitMemProfMetadata(I, MD);
5609
5610 if (MDNode *MD = I.getMetadata(LLVMContext::MD_callsite))
5611 visitCallsiteMetadata(I, MD);
5612
5613 if (MDNode *MD = I.getMetadata(LLVMContext::MD_callee_type))
5614 visitCalleeTypeMetadata(I, MD);
5615
5616 if (MDNode *MD = I.getMetadata(LLVMContext::MD_DIAssignID))
5617 visitDIAssignIDMetadata(I, MD);
5618
5619 if (MDNode *MMRA = I.getMetadata(LLVMContext::MD_mmra))
5620 visitMMRAMetadata(I, MMRA);
5621
5622 if (MDNode *Annotation = I.getMetadata(LLVMContext::MD_annotation))
5623 visitAnnotationMetadata(Annotation);
5624
5625 if (MDNode *Captures = I.getMetadata(LLVMContext::MD_captures))
5626 visitCapturesMetadata(I, Captures);
5627
5628 if (MDNode *N = I.getDebugLoc().getAsMDNode()) {
5629 CheckDI(isa<DILocation>(N), "invalid !dbg metadata attachment", &I, N);
5630 visitMDNode(*N, AreDebugLocsAllowed::Yes);
5631
5632 if (auto *DL = dyn_cast<DILocation>(N)) {
5633 if (DL->getAtomGroup()) {
5634 CheckDI(DL->getScope()->getSubprogram()->getKeyInstructionsEnabled(),
5635 "DbgLoc uses atomGroup but DISubprogram doesn't have Key "
5636 "Instructions enabled",
5637 DL, DL->getScope()->getSubprogram());
5638 }
5639 }
5640 }
5641
5643 I.getAllMetadata(MDs);
5644 for (auto Attachment : MDs) {
5645 unsigned Kind = Attachment.first;
5646 auto AllowLocs =
5647 (Kind == LLVMContext::MD_dbg || Kind == LLVMContext::MD_loop)
5648 ? AreDebugLocsAllowed::Yes
5649 : AreDebugLocsAllowed::No;
5650 visitMDNode(*Attachment.second, AllowLocs);
5651 }
5652
5653 InstsInThisBlock.insert(&I);
5654}
5655
5656/// Allow intrinsics to be verified in different ways.
5657void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
5659 Check(IF->isDeclaration(), "Intrinsic functions should never be defined!",
5660 IF);
5661
5662 // Verify that the intrinsic prototype lines up with what the .td files
5663 // describe.
5664 FunctionType *IFTy = IF->getFunctionType();
5665 bool IsVarArg = IFTy->isVarArg();
5666
5670
5671 // Walk the descriptors to extract overloaded types.
5676 "Intrinsic has incorrect return type!", IF);
5678 "Intrinsic has incorrect argument type!", IF);
5679
5680 // Verify if the intrinsic call matches the vararg property.
5681 if (IsVarArg)
5683 "Intrinsic was not defined with variable arguments!", IF);
5684 else
5686 "Callsite was not defined with variable arguments!", IF);
5687
5688 // All descriptors should be absorbed by now.
5689 Check(TableRef.empty(), "Intrinsic has too few arguments!", IF);
5690
5691 // Now that we have the intrinsic ID and the actual argument types (and we
5692 // know they are legal for the intrinsic!) get the intrinsic name through the
5693 // usual means. This allows us to verify the mangling of argument types into
5694 // the name.
5695 const std::string ExpectedName =
5696 Intrinsic::getName(ID, ArgTys, IF->getParent(), IFTy);
5697 Check(ExpectedName == IF->getName(),
5698 "Intrinsic name not mangled correctly for type arguments! "
5699 "Should be: " +
5700 ExpectedName,
5701 IF);
5702
5703 // If the intrinsic takes MDNode arguments, verify that they are either global
5704 // or are local to *this* function.
5705 for (Value *V : Call.args()) {
5706 if (auto *MD = dyn_cast<MetadataAsValue>(V))
5707 visitMetadataAsValue(*MD, Call.getCaller());
5708 if (auto *Const = dyn_cast<Constant>(V))
5709 Check(!Const->getType()->isX86_AMXTy(),
5710 "const x86_amx is not allowed in argument!");
5711 }
5712
5713 switch (ID) {
5714 default:
5715 break;
5716 case Intrinsic::assume: {
5717 if (Call.hasOperandBundles()) {
5719 Check(Cond && Cond->isOne(),
5720 "assume with operand bundles must have i1 true condition", Call);
5721 }
5722 for (auto &Elem : Call.bundle_op_infos()) {
5723 unsigned ArgCount = Elem.End - Elem.Begin;
5724 // Separate storage assumptions are special insofar as they're the only
5725 // operand bundles allowed on assumes that aren't parameter attributes.
5726 if (Elem.Tag->getKey() == "separate_storage") {
5727 Check(ArgCount == 2,
5728 "separate_storage assumptions should have 2 arguments", Call);
5729 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy() &&
5730 Call.getOperand(Elem.Begin + 1)->getType()->isPointerTy(),
5731 "arguments to separate_storage assumptions should be pointers",
5732 Call);
5733 continue;
5734 }
5735 Check(Elem.Tag->getKey() == "ignore" ||
5736 Attribute::isExistingAttribute(Elem.Tag->getKey()),
5737 "tags must be valid attribute names", Call);
5738 Attribute::AttrKind Kind =
5739 Attribute::getAttrKindFromName(Elem.Tag->getKey());
5740 if (Kind == Attribute::Alignment) {
5741 Check(ArgCount <= 3 && ArgCount >= 2,
5742 "alignment assumptions should have 2 or 3 arguments", Call);
5743 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
5744 "first argument should be a pointer", Call);
5745 Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
5746 "second argument should be an integer", Call);
5747 if (ArgCount == 3)
5748 Check(Call.getOperand(Elem.Begin + 2)->getType()->isIntegerTy(),
5749 "third argument should be an integer if present", Call);
5750 continue;
5751 }
5752 if (Kind == Attribute::Dereferenceable) {
5753 Check(ArgCount == 2,
5754 "dereferenceable assumptions should have 2 arguments", Call);
5755 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
5756 "first argument should be a pointer", Call);
5757 Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
5758 "second argument should be an integer", Call);
5759 continue;
5760 }
5761 Check(ArgCount <= 2, "too many arguments", Call);
5762 if (Kind == Attribute::None)
5763 break;
5764 if (Attribute::isIntAttrKind(Kind)) {
5765 Check(ArgCount == 2, "this attribute should have 2 arguments", Call);
5766 Check(isa<ConstantInt>(Call.getOperand(Elem.Begin + 1)),
5767 "the second argument should be a constant integral value", Call);
5768 } else if (Attribute::canUseAsParamAttr(Kind)) {
5769 Check((ArgCount) == 1, "this attribute should have one argument", Call);
5770 } else if (Attribute::canUseAsFnAttr(Kind)) {
5771 Check((ArgCount) == 0, "this attribute has no argument", Call);
5772 }
5773 }
5774 break;
5775 }
5776 case Intrinsic::ucmp:
5777 case Intrinsic::scmp: {
5778 Type *SrcTy = Call.getOperand(0)->getType();
5779 Type *DestTy = Call.getType();
5780
5781 Check(DestTy->getScalarSizeInBits() >= 2,
5782 "result type must be at least 2 bits wide", Call);
5783
5784 bool IsDestTypeVector = DestTy->isVectorTy();
5785 Check(SrcTy->isVectorTy() == IsDestTypeVector,
5786 "ucmp/scmp argument and result types must both be either vector or "
5787 "scalar types",
5788 Call);
5789 if (IsDestTypeVector) {
5790 auto SrcVecLen = cast<VectorType>(SrcTy)->getElementCount();
5791 auto DestVecLen = cast<VectorType>(DestTy)->getElementCount();
5792 Check(SrcVecLen == DestVecLen,
5793 "return type and arguments must have the same number of "
5794 "elements",
5795 Call);
5796 }
5797 break;
5798 }
5799 case Intrinsic::coro_id: {
5800 auto *InfoArg = Call.getArgOperand(3)->stripPointerCasts();
5801 if (isa<ConstantPointerNull>(InfoArg))
5802 break;
5803 auto *GV = dyn_cast<GlobalVariable>(InfoArg);
5804 Check(GV && GV->isConstant() && GV->hasDefinitiveInitializer(),
5805 "info argument of llvm.coro.id must refer to an initialized "
5806 "constant");
5807 Constant *Init = GV->getInitializer();
5809 "info argument of llvm.coro.id must refer to either a struct or "
5810 "an array");
5811 break;
5812 }
5813 case Intrinsic::is_fpclass: {
5814 const ConstantInt *TestMask = cast<ConstantInt>(Call.getOperand(1));
5815 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
5816 "unsupported bits for llvm.is.fpclass test mask");
5817 break;
5818 }
5819 case Intrinsic::fptrunc_round: {
5820 // Check the rounding mode
5821 Metadata *MD = nullptr;
5823 if (MAV)
5824 MD = MAV->getMetadata();
5825
5826 Check(MD != nullptr, "missing rounding mode argument", Call);
5827
5828 Check(isa<MDString>(MD),
5829 ("invalid value for llvm.fptrunc.round metadata operand"
5830 " (the operand should be a string)"),
5831 MD);
5832
5833 std::optional<RoundingMode> RoundMode =
5834 convertStrToRoundingMode(cast<MDString>(MD)->getString());
5835 Check(RoundMode && *RoundMode != RoundingMode::Dynamic,
5836 "unsupported rounding mode argument", Call);
5837 break;
5838 }
5839#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
5840#include "llvm/IR/VPIntrinsics.def"
5841#undef BEGIN_REGISTER_VP_INTRINSIC
5842 visitVPIntrinsic(cast<VPIntrinsic>(Call));
5843 break;
5844#define INSTRUCTION(NAME, NARGS, ROUND_MODE, INTRINSIC) \
5845 case Intrinsic::INTRINSIC:
5846#include "llvm/IR/ConstrainedOps.def"
5847#undef INSTRUCTION
5848 visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(Call));
5849 break;
5850 case Intrinsic::dbg_declare: // llvm.dbg.declare
5851 case Intrinsic::dbg_value: // llvm.dbg.value
5852 case Intrinsic::dbg_assign: // llvm.dbg.assign
5853 case Intrinsic::dbg_label: // llvm.dbg.label
5854 // We no longer interpret debug intrinsics (the old variable-location
5855 // design). They're meaningless as far as LLVM is concerned we could make
5856 // it an error for them to appear, but it's possible we'll have users
5857 // converting back to intrinsics for the forseeable future (such as DXIL),
5858 // so tolerate their existance.
5859 break;
5860 case Intrinsic::memcpy:
5861 case Intrinsic::memcpy_inline:
5862 case Intrinsic::memmove:
5863 case Intrinsic::memset:
5864 case Intrinsic::memset_inline:
5865 break;
5866 case Intrinsic::experimental_memset_pattern: {
5867 const auto Memset = cast<MemSetPatternInst>(&Call);
5868 Check(Memset->getValue()->getType()->isSized(),
5869 "unsized types cannot be used as memset patterns", Call);
5870 break;
5871 }
5872 case Intrinsic::memcpy_element_unordered_atomic:
5873 case Intrinsic::memmove_element_unordered_atomic:
5874 case Intrinsic::memset_element_unordered_atomic: {
5875 const auto *AMI = cast<AnyMemIntrinsic>(&Call);
5876
5877 ConstantInt *ElementSizeCI =
5878 cast<ConstantInt>(AMI->getRawElementSizeInBytes());
5879 const APInt &ElementSizeVal = ElementSizeCI->getValue();
5880 Check(ElementSizeVal.isPowerOf2(),
5881 "element size of the element-wise atomic memory intrinsic "
5882 "must be a power of 2",
5883 Call);
5884
5885 auto IsValidAlignment = [&](MaybeAlign Alignment) {
5886 return Alignment && ElementSizeVal.ule(Alignment->value());
5887 };
5888 Check(IsValidAlignment(AMI->getDestAlign()),
5889 "incorrect alignment of the destination argument", Call);
5890 if (const auto *AMT = dyn_cast<AnyMemTransferInst>(AMI)) {
5891 Check(IsValidAlignment(AMT->getSourceAlign()),
5892 "incorrect alignment of the source argument", Call);
5893 }
5894 break;
5895 }
5896 case Intrinsic::call_preallocated_setup: {
5897 auto *NumArgs = cast<ConstantInt>(Call.getArgOperand(0));
5898 bool FoundCall = false;
5899 for (User *U : Call.users()) {
5900 auto *UseCall = dyn_cast<CallBase>(U);
5901 Check(UseCall != nullptr,
5902 "Uses of llvm.call.preallocated.setup must be calls");
5903 Intrinsic::ID IID = UseCall->getIntrinsicID();
5904 if (IID == Intrinsic::call_preallocated_arg) {
5905 auto *AllocArgIndex = dyn_cast<ConstantInt>(UseCall->getArgOperand(1));
5906 Check(AllocArgIndex != nullptr,
5907 "llvm.call.preallocated.alloc arg index must be a constant");
5908 auto AllocArgIndexInt = AllocArgIndex->getValue();
5909 Check(AllocArgIndexInt.sge(0) &&
5910 AllocArgIndexInt.slt(NumArgs->getValue()),
5911 "llvm.call.preallocated.alloc arg index must be between 0 and "
5912 "corresponding "
5913 "llvm.call.preallocated.setup's argument count");
5914 } else if (IID == Intrinsic::call_preallocated_teardown) {
5915 // nothing to do
5916 } else {
5917 Check(!FoundCall, "Can have at most one call corresponding to a "
5918 "llvm.call.preallocated.setup");
5919 FoundCall = true;
5920 size_t NumPreallocatedArgs = 0;
5921 for (unsigned i = 0; i < UseCall->arg_size(); i++) {
5922 if (UseCall->paramHasAttr(i, Attribute::Preallocated)) {
5923 ++NumPreallocatedArgs;
5924 }
5925 }
5926 Check(NumPreallocatedArgs != 0,
5927 "cannot use preallocated intrinsics on a call without "
5928 "preallocated arguments");
5929 Check(NumArgs->equalsInt(NumPreallocatedArgs),
5930 "llvm.call.preallocated.setup arg size must be equal to number "
5931 "of preallocated arguments "
5932 "at call site",
5933 Call, *UseCall);
5934 // getOperandBundle() cannot be called if more than one of the operand
5935 // bundle exists. There is already a check elsewhere for this, so skip
5936 // here if we see more than one.
5937 if (UseCall->countOperandBundlesOfType(LLVMContext::OB_preallocated) >
5938 1) {
5939 return;
5940 }
5941 auto PreallocatedBundle =
5942 UseCall->getOperandBundle(LLVMContext::OB_preallocated);
5943 Check(PreallocatedBundle,
5944 "Use of llvm.call.preallocated.setup outside intrinsics "
5945 "must be in \"preallocated\" operand bundle");
5946 Check(PreallocatedBundle->Inputs.front().get() == &Call,
5947 "preallocated bundle must have token from corresponding "
5948 "llvm.call.preallocated.setup");
5949 }
5950 }
5951 break;
5952 }
5953 case Intrinsic::call_preallocated_arg: {
5954 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
5955 Check(Token &&
5956 Token->getIntrinsicID() == Intrinsic::call_preallocated_setup,
5957 "llvm.call.preallocated.arg token argument must be a "
5958 "llvm.call.preallocated.setup");
5959 Check(Call.hasFnAttr(Attribute::Preallocated),
5960 "llvm.call.preallocated.arg must be called with a \"preallocated\" "
5961 "call site attribute");
5962 break;
5963 }
5964 case Intrinsic::call_preallocated_teardown: {
5965 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
5966 Check(Token &&
5967 Token->getIntrinsicID() == Intrinsic::call_preallocated_setup,
5968 "llvm.call.preallocated.teardown token argument must be a "
5969 "llvm.call.preallocated.setup");
5970 break;
5971 }
5972 case Intrinsic::gcroot:
5973 case Intrinsic::gcwrite:
5974 case Intrinsic::gcread:
5975 if (ID == Intrinsic::gcroot) {
5976 AllocaInst *AI =
5978 Check(AI, "llvm.gcroot parameter #1 must be an alloca.", Call);
5980 "llvm.gcroot parameter #2 must be a constant.", Call);
5981 if (!AI->getAllocatedType()->isPointerTy()) {
5983 "llvm.gcroot parameter #1 must either be a pointer alloca, "
5984 "or argument #2 must be a non-null constant.",
5985 Call);
5986 }
5987 }
5988
5989 Check(Call.getParent()->getParent()->hasGC(),
5990 "Enclosing function does not use GC.", Call);
5991 break;
5992 case Intrinsic::init_trampoline:
5994 "llvm.init_trampoline parameter #2 must resolve to a function.",
5995 Call);
5996 break;
5997 case Intrinsic::prefetch:
5998 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
5999 "rw argument to llvm.prefetch must be 0-1", Call);
6000 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
6001 "locality argument to llvm.prefetch must be 0-3", Call);
6002 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
6003 "cache type argument to llvm.prefetch must be 0-1", Call);
6004 break;
6005 case Intrinsic::stackprotector:
6007 "llvm.stackprotector parameter #2 must resolve to an alloca.", Call);
6008 break;
6009 case Intrinsic::localescape: {
6010 BasicBlock *BB = Call.getParent();
6011 Check(BB->isEntryBlock(), "llvm.localescape used outside of entry block",
6012 Call);
6013 Check(!SawFrameEscape, "multiple calls to llvm.localescape in one function",
6014 Call);
6015 for (Value *Arg : Call.args()) {
6016 if (isa<ConstantPointerNull>(Arg))
6017 continue; // Null values are allowed as placeholders.
6018 auto *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
6019 Check(AI && AI->isStaticAlloca(),
6020 "llvm.localescape only accepts static allocas", Call);
6021 }
6022 FrameEscapeInfo[BB->getParent()].first = Call.arg_size();
6023 SawFrameEscape = true;
6024 break;
6025 }
6026 case Intrinsic::localrecover: {
6028 Function *Fn = dyn_cast<Function>(FnArg);
6029 Check(Fn && !Fn->isDeclaration(),
6030 "llvm.localrecover first "
6031 "argument must be function defined in this module",
6032 Call);
6033 auto *IdxArg = cast<ConstantInt>(Call.getArgOperand(2));
6034 auto &Entry = FrameEscapeInfo[Fn];
6035 Entry.second = unsigned(
6036 std::max(uint64_t(Entry.second), IdxArg->getLimitedValue(~0U) + 1));
6037 break;
6038 }
6039
6040 case Intrinsic::experimental_gc_statepoint:
6041 if (auto *CI = dyn_cast<CallInst>(&Call))
6042 Check(!CI->isInlineAsm(),
6043 "gc.statepoint support for inline assembly unimplemented", CI);
6044 Check(Call.getParent()->getParent()->hasGC(),
6045 "Enclosing function does not use GC.", Call);
6046
6047 verifyStatepoint(Call);
6048 break;
6049 case Intrinsic::experimental_gc_result: {
6050 Check(Call.getParent()->getParent()->hasGC(),
6051 "Enclosing function does not use GC.", Call);
6052
6053 auto *Statepoint = Call.getArgOperand(0);
6054 if (isa<UndefValue>(Statepoint))
6055 break;
6056
6057 // Are we tied to a statepoint properly?
6058 const auto *StatepointCall = dyn_cast<CallBase>(Statepoint);
6059 Check(StatepointCall && StatepointCall->getIntrinsicID() ==
6060 Intrinsic::experimental_gc_statepoint,
6061 "gc.result operand #1 must be from a statepoint", Call,
6062 Call.getArgOperand(0));
6063
6064 // Check that result type matches wrapped callee.
6065 auto *TargetFuncType =
6066 cast<FunctionType>(StatepointCall->getParamElementType(2));
6067 Check(Call.getType() == TargetFuncType->getReturnType(),
6068 "gc.result result type does not match wrapped callee", Call);
6069 break;
6070 }
6071 case Intrinsic::experimental_gc_relocate: {
6072 Check(Call.arg_size() == 3, "wrong number of arguments", Call);
6073
6075 "gc.relocate must return a pointer or a vector of pointers", Call);
6076
6077 // Check that this relocate is correctly tied to the statepoint
6078
6079 // This is case for relocate on the unwinding path of an invoke statepoint
6080 if (LandingPadInst *LandingPad =
6082
6083 const BasicBlock *InvokeBB =
6084 LandingPad->getParent()->getUniquePredecessor();
6085
6086 // Landingpad relocates should have only one predecessor with invoke
6087 // statepoint terminator
6088 Check(InvokeBB, "safepoints should have unique landingpads",
6089 LandingPad->getParent());
6090 Check(InvokeBB->getTerminator(), "safepoint block should be well formed",
6091 InvokeBB);
6093 "gc relocate should be linked to a statepoint", InvokeBB);
6094 } else {
6095 // In all other cases relocate should be tied to the statepoint directly.
6096 // This covers relocates on a normal return path of invoke statepoint and
6097 // relocates of a call statepoint.
6098 auto *Token = Call.getArgOperand(0);
6100 "gc relocate is incorrectly tied to the statepoint", Call, Token);
6101 }
6102
6103 // Verify rest of the relocate arguments.
6104 const Value &StatepointCall = *cast<GCRelocateInst>(Call).getStatepoint();
6105
6106 // Both the base and derived must be piped through the safepoint.
6109 "gc.relocate operand #2 must be integer offset", Call);
6110
6111 Value *Derived = Call.getArgOperand(2);
6112 Check(isa<ConstantInt>(Derived),
6113 "gc.relocate operand #3 must be integer offset", Call);
6114
6115 const uint64_t BaseIndex = cast<ConstantInt>(Base)->getZExtValue();
6116 const uint64_t DerivedIndex = cast<ConstantInt>(Derived)->getZExtValue();
6117
6118 // Check the bounds
6119 if (isa<UndefValue>(StatepointCall))
6120 break;
6121 if (auto Opt = cast<GCStatepointInst>(StatepointCall)
6122 .getOperandBundle(LLVMContext::OB_gc_live)) {
6123 Check(BaseIndex < Opt->Inputs.size(),
6124 "gc.relocate: statepoint base index out of bounds", Call);
6125 Check(DerivedIndex < Opt->Inputs.size(),
6126 "gc.relocate: statepoint derived index out of bounds", Call);
6127 }
6128
6129 // Relocated value must be either a pointer type or vector-of-pointer type,
6130 // but gc_relocate does not need to return the same pointer type as the
6131 // relocated pointer. It can be casted to the correct type later if it's
6132 // desired. However, they must have the same address space and 'vectorness'
6133 GCRelocateInst &Relocate = cast<GCRelocateInst>(Call);
6134 auto *ResultType = Call.getType();
6135 auto *DerivedType = Relocate.getDerivedPtr()->getType();
6136 auto *BaseType = Relocate.getBasePtr()->getType();
6137
6138 Check(BaseType->isPtrOrPtrVectorTy(),
6139 "gc.relocate: relocated value must be a pointer", Call);
6140 Check(DerivedType->isPtrOrPtrVectorTy(),
6141 "gc.relocate: relocated value must be a pointer", Call);
6142
6143 Check(ResultType->isVectorTy() == DerivedType->isVectorTy(),
6144 "gc.relocate: vector relocates to vector and pointer to pointer",
6145 Call);
6146 Check(
6147 ResultType->getPointerAddressSpace() ==
6148 DerivedType->getPointerAddressSpace(),
6149 "gc.relocate: relocating a pointer shouldn't change its address space",
6150 Call);
6151
6152 auto GC = llvm::getGCStrategy(Relocate.getFunction()->getGC());
6153 Check(GC, "gc.relocate: calling function must have GCStrategy",
6154 Call.getFunction());
6155 if (GC) {
6156 auto isGCPtr = [&GC](Type *PTy) {
6157 return GC->isGCManagedPointer(PTy->getScalarType()).value_or(true);
6158 };
6159 Check(isGCPtr(ResultType), "gc.relocate: must return gc pointer", Call);
6160 Check(isGCPtr(BaseType),
6161 "gc.relocate: relocated value must be a gc pointer", Call);
6162 Check(isGCPtr(DerivedType),
6163 "gc.relocate: relocated value must be a gc pointer", Call);
6164 }
6165 break;
6166 }
6167 case Intrinsic::experimental_patchpoint: {
6168 if (Call.getCallingConv() == CallingConv::AnyReg) {
6170 "patchpoint: invalid return type used with anyregcc", Call);
6171 }
6172 break;
6173 }
6174 case Intrinsic::eh_exceptioncode:
6175 case Intrinsic::eh_exceptionpointer: {
6177 "eh.exceptionpointer argument must be a catchpad", Call);
6178 break;
6179 }
6180 case Intrinsic::get_active_lane_mask: {
6182 "get_active_lane_mask: must return a "
6183 "vector",
6184 Call);
6185 auto *ElemTy = Call.getType()->getScalarType();
6186 Check(ElemTy->isIntegerTy(1),
6187 "get_active_lane_mask: element type is not "
6188 "i1",
6189 Call);
6190 break;
6191 }
6192 case Intrinsic::experimental_get_vector_length: {
6193 ConstantInt *VF = cast<ConstantInt>(Call.getArgOperand(1));
6194 Check(!VF->isNegative() && !VF->isZero(),
6195 "get_vector_length: VF must be positive", Call);
6196 break;
6197 }
6198 case Intrinsic::masked_load: {
6199 Check(Call.getType()->isVectorTy(), "masked_load: must return a vector",
6200 Call);
6201
6202 ConstantInt *Alignment = cast<ConstantInt>(Call.getArgOperand(1));
6204 Value *PassThru = Call.getArgOperand(3);
6205 Check(Mask->getType()->isVectorTy(), "masked_load: mask must be vector",
6206 Call);
6207 Check(Alignment->getValue().isPowerOf2(),
6208 "masked_load: alignment must be a power of 2", Call);
6209 Check(PassThru->getType() == Call.getType(),
6210 "masked_load: pass through and return type must match", Call);
6211 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
6212 cast<VectorType>(Call.getType())->getElementCount(),
6213 "masked_load: vector mask must be same length as return", Call);
6214 break;
6215 }
6216 case Intrinsic::masked_store: {
6217 Value *Val = Call.getArgOperand(0);
6218 ConstantInt *Alignment = cast<ConstantInt>(Call.getArgOperand(2));
6220 Check(Mask->getType()->isVectorTy(), "masked_store: mask must be vector",
6221 Call);
6222 Check(Alignment->getValue().isPowerOf2(),
6223 "masked_store: alignment must be a power of 2", Call);
6224 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
6225 cast<VectorType>(Val->getType())->getElementCount(),
6226 "masked_store: vector mask must be same length as value", Call);
6227 break;
6228 }
6229
6230 case Intrinsic::masked_gather: {
6231 const APInt &Alignment =
6233 Check(Alignment.isZero() || Alignment.isPowerOf2(),
6234 "masked_gather: alignment must be 0 or a power of 2", Call);
6235 break;
6236 }
6237 case Intrinsic::masked_scatter: {
6238 const APInt &Alignment =
6239 cast<ConstantInt>(Call.getArgOperand(2))->getValue();
6240 Check(Alignment.isZero() || Alignment.isPowerOf2(),
6241 "masked_scatter: alignment must be 0 or a power of 2", Call);
6242 break;
6243 }
6244
6245 case Intrinsic::experimental_guard: {
6246 Check(isa<CallInst>(Call), "experimental_guard cannot be invoked", Call);
6248 "experimental_guard must have exactly one "
6249 "\"deopt\" operand bundle");
6250 break;
6251 }
6252
6253 case Intrinsic::experimental_deoptimize: {
6254 Check(isa<CallInst>(Call), "experimental_deoptimize cannot be invoked",
6255 Call);
6257 "experimental_deoptimize must have exactly one "
6258 "\"deopt\" operand bundle");
6260 "experimental_deoptimize return type must match caller return type");
6261
6262 if (isa<CallInst>(Call)) {
6264 Check(RI,
6265 "calls to experimental_deoptimize must be followed by a return");
6266
6267 if (!Call.getType()->isVoidTy() && RI)
6268 Check(RI->getReturnValue() == &Call,
6269 "calls to experimental_deoptimize must be followed by a return "
6270 "of the value computed by experimental_deoptimize");
6271 }
6272
6273 break;
6274 }
6275 case Intrinsic::vastart: {
6277 "va_start called in a non-varargs function");
6278 break;
6279 }
6280 case Intrinsic::get_dynamic_area_offset: {
6281 auto *IntTy = dyn_cast<IntegerType>(Call.getType());
6282 Check(IntTy && DL.getPointerSizeInBits(DL.getAllocaAddrSpace()) ==
6283 IntTy->getBitWidth(),
6284 "get_dynamic_area_offset result type must be scalar integer matching "
6285 "alloca address space width",
6286 Call);
6287 break;
6288 }
6289 case Intrinsic::vector_reduce_and:
6290 case Intrinsic::vector_reduce_or:
6291 case Intrinsic::vector_reduce_xor:
6292 case Intrinsic::vector_reduce_add:
6293 case Intrinsic::vector_reduce_mul:
6294 case Intrinsic::vector_reduce_smax:
6295 case Intrinsic::vector_reduce_smin:
6296 case Intrinsic::vector_reduce_umax:
6297 case Intrinsic::vector_reduce_umin: {
6298 Type *ArgTy = Call.getArgOperand(0)->getType();
6299 Check(ArgTy->isIntOrIntVectorTy() && ArgTy->isVectorTy(),
6300 "Intrinsic has incorrect argument type!");
6301 break;
6302 }
6303 case Intrinsic::vector_reduce_fmax:
6304 case Intrinsic::vector_reduce_fmin: {
6305 Type *ArgTy = Call.getArgOperand(0)->getType();
6306 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
6307 "Intrinsic has incorrect argument type!");
6308 break;
6309 }
6310 case Intrinsic::vector_reduce_fadd:
6311 case Intrinsic::vector_reduce_fmul: {
6312 // Unlike the other reductions, the first argument is a start value. The
6313 // second argument is the vector to be reduced.
6314 Type *ArgTy = Call.getArgOperand(1)->getType();
6315 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
6316 "Intrinsic has incorrect argument type!");
6317 break;
6318 }
6319 case Intrinsic::smul_fix:
6320 case Intrinsic::smul_fix_sat:
6321 case Intrinsic::umul_fix:
6322 case Intrinsic::umul_fix_sat:
6323 case Intrinsic::sdiv_fix:
6324 case Intrinsic::sdiv_fix_sat:
6325 case Intrinsic::udiv_fix:
6326 case Intrinsic::udiv_fix_sat: {
6327 Value *Op1 = Call.getArgOperand(0);
6328 Value *Op2 = Call.getArgOperand(1);
6330 "first operand of [us][mul|div]_fix[_sat] must be an int type or "
6331 "vector of ints");
6333 "second operand of [us][mul|div]_fix[_sat] must be an int type or "
6334 "vector of ints");
6335
6336 auto *Op3 = cast<ConstantInt>(Call.getArgOperand(2));
6337 Check(Op3->getType()->isIntegerTy(),
6338 "third operand of [us][mul|div]_fix[_sat] must be an int type");
6339 Check(Op3->getBitWidth() <= 32,
6340 "third operand of [us][mul|div]_fix[_sat] must fit within 32 bits");
6341
6342 if (ID == Intrinsic::smul_fix || ID == Intrinsic::smul_fix_sat ||
6343 ID == Intrinsic::sdiv_fix || ID == Intrinsic::sdiv_fix_sat) {
6344 Check(Op3->getZExtValue() < Op1->getType()->getScalarSizeInBits(),
6345 "the scale of s[mul|div]_fix[_sat] must be less than the width of "
6346 "the operands");
6347 } else {
6348 Check(Op3->getZExtValue() <= Op1->getType()->getScalarSizeInBits(),
6349 "the scale of u[mul|div]_fix[_sat] must be less than or equal "
6350 "to the width of the operands");
6351 }
6352 break;
6353 }
6354 case Intrinsic::lrint:
6355 case Intrinsic::llrint:
6356 case Intrinsic::lround:
6357 case Intrinsic::llround: {
6358 Type *ValTy = Call.getArgOperand(0)->getType();
6359 Type *ResultTy = Call.getType();
6360 auto *VTy = dyn_cast<VectorType>(ValTy);
6361 auto *RTy = dyn_cast<VectorType>(ResultTy);
6362 Check(ValTy->isFPOrFPVectorTy() && ResultTy->isIntOrIntVectorTy(),
6363 ExpectedName + ": argument must be floating-point or vector "
6364 "of floating-points, and result must be integer or "
6365 "vector of integers",
6366 &Call);
6367 Check(ValTy->isVectorTy() == ResultTy->isVectorTy(),
6368 ExpectedName + ": argument and result disagree on vector use", &Call);
6369 if (VTy) {
6370 Check(VTy->getElementCount() == RTy->getElementCount(),
6371 ExpectedName + ": argument must be same length as result", &Call);
6372 }
6373 break;
6374 }
6375 case Intrinsic::bswap: {
6376 Type *Ty = Call.getType();
6377 unsigned Size = Ty->getScalarSizeInBits();
6378 Check(Size % 16 == 0, "bswap must be an even number of bytes", &Call);
6379 break;
6380 }
6381 case Intrinsic::invariant_start: {
6382 ConstantInt *InvariantSize = dyn_cast<ConstantInt>(Call.getArgOperand(0));
6383 Check(InvariantSize &&
6384 (!InvariantSize->isNegative() || InvariantSize->isMinusOne()),
6385 "invariant_start parameter must be -1, 0 or a positive number",
6386 &Call);
6387 break;
6388 }
6389 case Intrinsic::matrix_multiply:
6390 case Intrinsic::matrix_transpose:
6391 case Intrinsic::matrix_column_major_load:
6392 case Intrinsic::matrix_column_major_store: {
6394 ConstantInt *Stride = nullptr;
6395 ConstantInt *NumRows;
6396 ConstantInt *NumColumns;
6397 VectorType *ResultTy;
6398 Type *Op0ElemTy = nullptr;
6399 Type *Op1ElemTy = nullptr;
6400 switch (ID) {
6401 case Intrinsic::matrix_multiply: {
6402 NumRows = cast<ConstantInt>(Call.getArgOperand(2));
6403 ConstantInt *N = cast<ConstantInt>(Call.getArgOperand(3));
6404 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
6406 ->getNumElements() ==
6407 NumRows->getZExtValue() * N->getZExtValue(),
6408 "First argument of a matrix operation does not match specified "
6409 "shape!");
6411 ->getNumElements() ==
6412 N->getZExtValue() * NumColumns->getZExtValue(),
6413 "Second argument of a matrix operation does not match specified "
6414 "shape!");
6415
6416 ResultTy = cast<VectorType>(Call.getType());
6417 Op0ElemTy =
6418 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6419 Op1ElemTy =
6420 cast<VectorType>(Call.getArgOperand(1)->getType())->getElementType();
6421 break;
6422 }
6423 case Intrinsic::matrix_transpose:
6424 NumRows = cast<ConstantInt>(Call.getArgOperand(1));
6425 NumColumns = cast<ConstantInt>(Call.getArgOperand(2));
6426 ResultTy = cast<VectorType>(Call.getType());
6427 Op0ElemTy =
6428 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6429 break;
6430 case Intrinsic::matrix_column_major_load: {
6432 NumRows = cast<ConstantInt>(Call.getArgOperand(3));
6433 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
6434 ResultTy = cast<VectorType>(Call.getType());
6435 break;
6436 }
6437 case Intrinsic::matrix_column_major_store: {
6439 NumRows = cast<ConstantInt>(Call.getArgOperand(4));
6440 NumColumns = cast<ConstantInt>(Call.getArgOperand(5));
6441 ResultTy = cast<VectorType>(Call.getArgOperand(0)->getType());
6442 Op0ElemTy =
6443 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6444 break;
6445 }
6446 default:
6447 llvm_unreachable("unexpected intrinsic");
6448 }
6449
6450 Check(ResultTy->getElementType()->isIntegerTy() ||
6451 ResultTy->getElementType()->isFloatingPointTy(),
6452 "Result type must be an integer or floating-point type!", IF);
6453
6454 if (Op0ElemTy)
6455 Check(ResultTy->getElementType() == Op0ElemTy,
6456 "Vector element type mismatch of the result and first operand "
6457 "vector!",
6458 IF);
6459
6460 if (Op1ElemTy)
6461 Check(ResultTy->getElementType() == Op1ElemTy,
6462 "Vector element type mismatch of the result and second operand "
6463 "vector!",
6464 IF);
6465
6467 NumRows->getZExtValue() * NumColumns->getZExtValue(),
6468 "Result of a matrix operation does not fit in the returned vector!");
6469
6470 if (Stride)
6471 Check(Stride->getZExtValue() >= NumRows->getZExtValue(),
6472 "Stride must be greater or equal than the number of rows!", IF);
6473
6474 break;
6475 }
6476 case Intrinsic::vector_splice: {
6478 int64_t Idx = cast<ConstantInt>(Call.getArgOperand(2))->getSExtValue();
6479 int64_t KnownMinNumElements = VecTy->getElementCount().getKnownMinValue();
6480 if (Call.getParent() && Call.getParent()->getParent()) {
6481 AttributeList Attrs = Call.getParent()->getParent()->getAttributes();
6482 if (Attrs.hasFnAttr(Attribute::VScaleRange))
6483 KnownMinNumElements *= Attrs.getFnAttrs().getVScaleRangeMin();
6484 }
6485 Check((Idx < 0 && std::abs(Idx) <= KnownMinNumElements) ||
6486 (Idx >= 0 && Idx < KnownMinNumElements),
6487 "The splice index exceeds the range [-VL, VL-1] where VL is the "
6488 "known minimum number of elements in the vector. For scalable "
6489 "vectors the minimum number of elements is determined from "
6490 "vscale_range.",
6491 &Call);
6492 break;
6493 }
6494 case Intrinsic::stepvector: {
6496 Check(VecTy && VecTy->getScalarType()->isIntegerTy() &&
6497 VecTy->getScalarSizeInBits() >= 8,
6498 "stepvector only supported for vectors of integers "
6499 "with a bitwidth of at least 8.",
6500 &Call);
6501 break;
6502 }
6503 case Intrinsic::experimental_vector_match: {
6504 Value *Op1 = Call.getArgOperand(0);
6505 Value *Op2 = Call.getArgOperand(1);
6507
6508 VectorType *Op1Ty = dyn_cast<VectorType>(Op1->getType());
6509 VectorType *Op2Ty = dyn_cast<VectorType>(Op2->getType());
6510 VectorType *MaskTy = dyn_cast<VectorType>(Mask->getType());
6511
6512 Check(Op1Ty && Op2Ty && MaskTy, "Operands must be vectors.", &Call);
6514 "Second operand must be a fixed length vector.", &Call);
6515 Check(Op1Ty->getElementType()->isIntegerTy(),
6516 "First operand must be a vector of integers.", &Call);
6517 Check(Op1Ty->getElementType() == Op2Ty->getElementType(),
6518 "First two operands must have the same element type.", &Call);
6519 Check(Op1Ty->getElementCount() == MaskTy->getElementCount(),
6520 "First operand and mask must have the same number of elements.",
6521 &Call);
6522 Check(MaskTy->getElementType()->isIntegerTy(1),
6523 "Mask must be a vector of i1's.", &Call);
6524 Check(Call.getType() == MaskTy, "Return type must match the mask type.",
6525 &Call);
6526 break;
6527 }
6528 case Intrinsic::vector_insert: {
6529 Value *Vec = Call.getArgOperand(0);
6530 Value *SubVec = Call.getArgOperand(1);
6531 Value *Idx = Call.getArgOperand(2);
6532 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6533
6534 VectorType *VecTy = cast<VectorType>(Vec->getType());
6535 VectorType *SubVecTy = cast<VectorType>(SubVec->getType());
6536
6537 ElementCount VecEC = VecTy->getElementCount();
6538 ElementCount SubVecEC = SubVecTy->getElementCount();
6539 Check(VecTy->getElementType() == SubVecTy->getElementType(),
6540 "vector_insert parameters must have the same element "
6541 "type.",
6542 &Call);
6543 Check(IdxN % SubVecEC.getKnownMinValue() == 0,
6544 "vector_insert index must be a constant multiple of "
6545 "the subvector's known minimum vector length.");
6546
6547 // If this insertion is not the 'mixed' case where a fixed vector is
6548 // inserted into a scalable vector, ensure that the insertion of the
6549 // subvector does not overrun the parent vector.
6550 if (VecEC.isScalable() == SubVecEC.isScalable()) {
6551 Check(IdxN < VecEC.getKnownMinValue() &&
6552 IdxN + SubVecEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6553 "subvector operand of vector_insert would overrun the "
6554 "vector being inserted into.");
6555 }
6556 break;
6557 }
6558 case Intrinsic::vector_extract: {
6559 Value *Vec = Call.getArgOperand(0);
6560 Value *Idx = Call.getArgOperand(1);
6561 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6562
6563 VectorType *ResultTy = cast<VectorType>(Call.getType());
6564 VectorType *VecTy = cast<VectorType>(Vec->getType());
6565
6566 ElementCount VecEC = VecTy->getElementCount();
6567 ElementCount ResultEC = ResultTy->getElementCount();
6568
6569 Check(ResultTy->getElementType() == VecTy->getElementType(),
6570 "vector_extract result must have the same element "
6571 "type as the input vector.",
6572 &Call);
6573 Check(IdxN % ResultEC.getKnownMinValue() == 0,
6574 "vector_extract index must be a constant multiple of "
6575 "the result type's known minimum vector length.");
6576
6577 // If this extraction is not the 'mixed' case where a fixed vector is
6578 // extracted from a scalable vector, ensure that the extraction does not
6579 // overrun the parent vector.
6580 if (VecEC.isScalable() == ResultEC.isScalable()) {
6581 Check(IdxN < VecEC.getKnownMinValue() &&
6582 IdxN + ResultEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6583 "vector_extract would overrun.");
6584 }
6585 break;
6586 }
6587 case Intrinsic::vector_partial_reduce_add: {
6590
6591 unsigned VecWidth = VecTy->getElementCount().getKnownMinValue();
6592 unsigned AccWidth = AccTy->getElementCount().getKnownMinValue();
6593
6594 Check((VecWidth % AccWidth) == 0,
6595 "Invalid vector widths for partial "
6596 "reduction. The width of the input vector "
6597 "must be a positive integer multiple of "
6598 "the width of the accumulator vector.");
6599 break;
6600 }
6601 case Intrinsic::experimental_noalias_scope_decl: {
6602 NoAliasScopeDecls.push_back(cast<IntrinsicInst>(&Call));
6603 break;
6604 }
6605 case Intrinsic::preserve_array_access_index:
6606 case Intrinsic::preserve_struct_access_index:
6607 case Intrinsic::aarch64_ldaxr:
6608 case Intrinsic::aarch64_ldxr:
6609 case Intrinsic::arm_ldaex:
6610 case Intrinsic::arm_ldrex: {
6611 Type *ElemTy = Call.getParamElementType(0);
6612 Check(ElemTy, "Intrinsic requires elementtype attribute on first argument.",
6613 &Call);
6614 break;
6615 }
6616 case Intrinsic::aarch64_stlxr:
6617 case Intrinsic::aarch64_stxr:
6618 case Intrinsic::arm_stlex:
6619 case Intrinsic::arm_strex: {
6620 Type *ElemTy = Call.getAttributes().getParamElementType(1);
6621 Check(ElemTy,
6622 "Intrinsic requires elementtype attribute on second argument.",
6623 &Call);
6624 break;
6625 }
6626 case Intrinsic::aarch64_prefetch: {
6627 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6628 "write argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6629 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
6630 "target argument to llvm.aarch64.prefetch must be 0-3", Call);
6631 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
6632 "stream argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6633 Check(cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue() < 2,
6634 "isdata argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6635 break;
6636 }
6637 case Intrinsic::callbr_landingpad: {
6638 const auto *CBR = dyn_cast<CallBrInst>(Call.getOperand(0));
6639 Check(CBR, "intrinstic requires callbr operand", &Call);
6640 if (!CBR)
6641 break;
6642
6643 const BasicBlock *LandingPadBB = Call.getParent();
6644 const BasicBlock *PredBB = LandingPadBB->getUniquePredecessor();
6645 if (!PredBB) {
6646 CheckFailed("Intrinsic in block must have 1 unique predecessor", &Call);
6647 break;
6648 }
6649 if (!isa<CallBrInst>(PredBB->getTerminator())) {
6650 CheckFailed("Intrinsic must have corresponding callbr in predecessor",
6651 &Call);
6652 break;
6653 }
6654 Check(llvm::is_contained(CBR->getIndirectDests(), LandingPadBB),
6655 "Intrinsic's corresponding callbr must have intrinsic's parent basic "
6656 "block in indirect destination list",
6657 &Call);
6658 const Instruction &First = *LandingPadBB->begin();
6659 Check(&First == &Call, "No other instructions may proceed intrinsic",
6660 &Call);
6661 break;
6662 }
6663 case Intrinsic::amdgcn_cs_chain: {
6664 auto CallerCC = Call.getCaller()->getCallingConv();
6665 switch (CallerCC) {
6666 case CallingConv::AMDGPU_CS:
6667 case CallingConv::AMDGPU_CS_Chain:
6668 case CallingConv::AMDGPU_CS_ChainPreserve:
6669 break;
6670 default:
6671 CheckFailed("Intrinsic can only be used from functions with the "
6672 "amdgpu_cs, amdgpu_cs_chain or amdgpu_cs_chain_preserve "
6673 "calling conventions",
6674 &Call);
6675 break;
6676 }
6677
6678 Check(Call.paramHasAttr(2, Attribute::InReg),
6679 "SGPR arguments must have the `inreg` attribute", &Call);
6680 Check(!Call.paramHasAttr(3, Attribute::InReg),
6681 "VGPR arguments must not have the `inreg` attribute", &Call);
6682
6683 auto *Next = Call.getNextNode();
6684 bool IsAMDUnreachable = Next && isa<IntrinsicInst>(Next) &&
6685 cast<IntrinsicInst>(Next)->getIntrinsicID() ==
6686 Intrinsic::amdgcn_unreachable;
6687 Check(Next && (isa<UnreachableInst>(Next) || IsAMDUnreachable),
6688 "llvm.amdgcn.cs.chain must be followed by unreachable", &Call);
6689 break;
6690 }
6691 case Intrinsic::amdgcn_init_exec_from_input: {
6692 const Argument *Arg = dyn_cast<Argument>(Call.getOperand(0));
6693 Check(Arg && Arg->hasInRegAttr(),
6694 "only inreg arguments to the parent function are valid as inputs to "
6695 "this intrinsic",
6696 &Call);
6697 break;
6698 }
6699 case Intrinsic::amdgcn_set_inactive_chain_arg: {
6700 auto CallerCC = Call.getCaller()->getCallingConv();
6701 switch (CallerCC) {
6702 case CallingConv::AMDGPU_CS_Chain:
6703 case CallingConv::AMDGPU_CS_ChainPreserve:
6704 break;
6705 default:
6706 CheckFailed("Intrinsic can only be used from functions with the "
6707 "amdgpu_cs_chain or amdgpu_cs_chain_preserve "
6708 "calling conventions",
6709 &Call);
6710 break;
6711 }
6712
6713 unsigned InactiveIdx = 1;
6714 Check(!Call.paramHasAttr(InactiveIdx, Attribute::InReg),
6715 "Value for inactive lanes must not have the `inreg` attribute",
6716 &Call);
6717 Check(isa<Argument>(Call.getArgOperand(InactiveIdx)),
6718 "Value for inactive lanes must be a function argument", &Call);
6719 Check(!cast<Argument>(Call.getArgOperand(InactiveIdx))->hasInRegAttr(),
6720 "Value for inactive lanes must be a VGPR function argument", &Call);
6721 break;
6722 }
6723 case Intrinsic::amdgcn_call_whole_wave: {
6725 Check(F, "Indirect whole wave calls are not allowed", &Call);
6726
6727 CallingConv::ID CC = F->getCallingConv();
6728 Check(CC == CallingConv::AMDGPU_Gfx_WholeWave,
6729 "Callee must have the amdgpu_gfx_whole_wave calling convention",
6730 &Call);
6731
6732 Check(!F->isVarArg(), "Variadic whole wave calls are not allowed", &Call);
6733
6734 Check(Call.arg_size() == F->arg_size(),
6735 "Call argument count must match callee argument count", &Call);
6736
6737 // The first argument of the call is the callee, and the first argument of
6738 // the callee is the active mask. The rest of the arguments must match.
6739 Check(F->arg_begin()->getType()->isIntegerTy(1),
6740 "Callee must have i1 as its first argument", &Call);
6741 for (auto [CallArg, FuncArg] :
6742 drop_begin(zip_equal(Call.args(), F->args()))) {
6743 Check(CallArg->getType() == FuncArg.getType(),
6744 "Argument types must match", &Call);
6745
6746 // Check that inreg attributes match between call site and function
6747 Check(Call.paramHasAttr(FuncArg.getArgNo(), Attribute::InReg) ==
6748 FuncArg.hasInRegAttr(),
6749 "Argument inreg attributes must match", &Call);
6750 }
6751 break;
6752 }
6753 case Intrinsic::amdgcn_s_prefetch_data: {
6754 Check(
6757 "llvm.amdgcn.s.prefetch.data only supports global or constant memory");
6758 break;
6759 }
6760 case Intrinsic::amdgcn_mfma_scale_f32_16x16x128_f8f6f4:
6761 case Intrinsic::amdgcn_mfma_scale_f32_32x32x64_f8f6f4: {
6762 Value *Src0 = Call.getArgOperand(0);
6763 Value *Src1 = Call.getArgOperand(1);
6764
6765 uint64_t CBSZ = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
6766 uint64_t BLGP = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
6767 Check(CBSZ <= 4, "invalid value for cbsz format", Call,
6768 Call.getArgOperand(3));
6769 Check(BLGP <= 4, "invalid value for blgp format", Call,
6770 Call.getArgOperand(4));
6771
6772 // AMDGPU::MFMAScaleFormats values
6773 auto getFormatNumRegs = [](unsigned FormatVal) {
6774 switch (FormatVal) {
6775 case 0:
6776 case 1:
6777 return 8u;
6778 case 2:
6779 case 3:
6780 return 6u;
6781 case 4:
6782 return 4u;
6783 default:
6784 llvm_unreachable("invalid format value");
6785 }
6786 };
6787
6788 auto isValidSrcASrcBVector = [](FixedVectorType *Ty) {
6789 if (!Ty || !Ty->getElementType()->isIntegerTy(32))
6790 return false;
6791 unsigned NumElts = Ty->getNumElements();
6792 return NumElts == 4 || NumElts == 6 || NumElts == 8;
6793 };
6794
6795 auto *Src0Ty = dyn_cast<FixedVectorType>(Src0->getType());
6796 auto *Src1Ty = dyn_cast<FixedVectorType>(Src1->getType());
6797 Check(isValidSrcASrcBVector(Src0Ty),
6798 "operand 0 must be 4, 6 or 8 element i32 vector", &Call, Src0);
6799 Check(isValidSrcASrcBVector(Src1Ty),
6800 "operand 1 must be 4, 6 or 8 element i32 vector", &Call, Src1);
6801
6802 // Permit excess registers for the format.
6803 Check(Src0Ty->getNumElements() >= getFormatNumRegs(CBSZ),
6804 "invalid vector type for format", &Call, Src0, Call.getArgOperand(3));
6805 Check(Src1Ty->getNumElements() >= getFormatNumRegs(BLGP),
6806 "invalid vector type for format", &Call, Src1, Call.getArgOperand(5));
6807 break;
6808 }
6809 case Intrinsic::amdgcn_wmma_f32_16x16x128_f8f6f4:
6810 case Intrinsic::amdgcn_wmma_scale_f32_16x16x128_f8f6f4:
6811 case Intrinsic::amdgcn_wmma_scale16_f32_16x16x128_f8f6f4: {
6812 Value *Src0 = Call.getArgOperand(1);
6813 Value *Src1 = Call.getArgOperand(3);
6814
6815 unsigned FmtA = cast<ConstantInt>(Call.getArgOperand(0))->getZExtValue();
6816 unsigned FmtB = cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue();
6817 Check(FmtA <= 4, "invalid value for matrix format", Call,
6818 Call.getArgOperand(0));
6819 Check(FmtB <= 4, "invalid value for matrix format", Call,
6820 Call.getArgOperand(2));
6821
6822 // AMDGPU::MatrixFMT values
6823 auto getFormatNumRegs = [](unsigned FormatVal) {
6824 switch (FormatVal) {
6825 case 0:
6826 case 1:
6827 return 16u;
6828 case 2:
6829 case 3:
6830 return 12u;
6831 case 4:
6832 return 8u;
6833 default:
6834 llvm_unreachable("invalid format value");
6835 }
6836 };
6837
6838 auto isValidSrcASrcBVector = [](FixedVectorType *Ty) {
6839 if (!Ty || !Ty->getElementType()->isIntegerTy(32))
6840 return false;
6841 unsigned NumElts = Ty->getNumElements();
6842 return NumElts == 16 || NumElts == 12 || NumElts == 8;
6843 };
6844
6845 auto *Src0Ty = dyn_cast<FixedVectorType>(Src0->getType());
6846 auto *Src1Ty = dyn_cast<FixedVectorType>(Src1->getType());
6847 Check(isValidSrcASrcBVector(Src0Ty),
6848 "operand 1 must be 8, 12 or 16 element i32 vector", &Call, Src0);
6849 Check(isValidSrcASrcBVector(Src1Ty),
6850 "operand 3 must be 8, 12 or 16 element i32 vector", &Call, Src1);
6851
6852 // Permit excess registers for the format.
6853 Check(Src0Ty->getNumElements() >= getFormatNumRegs(FmtA),
6854 "invalid vector type for format", &Call, Src0, Call.getArgOperand(0));
6855 Check(Src1Ty->getNumElements() >= getFormatNumRegs(FmtB),
6856 "invalid vector type for format", &Call, Src1, Call.getArgOperand(2));
6857 break;
6858 }
6859 case Intrinsic::amdgcn_cooperative_atomic_load_32x4B:
6860 case Intrinsic::amdgcn_cooperative_atomic_load_16x8B:
6861 case Intrinsic::amdgcn_cooperative_atomic_load_8x16B:
6862 case Intrinsic::amdgcn_cooperative_atomic_store_32x4B:
6863 case Intrinsic::amdgcn_cooperative_atomic_store_16x8B:
6864 case Intrinsic::amdgcn_cooperative_atomic_store_8x16B: {
6865 // Check we only use this intrinsic on the FLAT or GLOBAL address spaces.
6866 Value *PtrArg = Call.getArgOperand(0);
6867 const unsigned AS = PtrArg->getType()->getPointerAddressSpace();
6869 "cooperative atomic intrinsics require a generic or global pointer",
6870 &Call, PtrArg);
6871
6872 // Last argument must be a MD string
6874 MDNode *MD = cast<MDNode>(Op->getMetadata());
6875 Check((MD->getNumOperands() == 1) && isa<MDString>(MD->getOperand(0)),
6876 "cooperative atomic intrinsics require that the last argument is a "
6877 "metadata string",
6878 &Call, Op);
6879 break;
6880 }
6881 case Intrinsic::nvvm_setmaxnreg_inc_sync_aligned_u32:
6882 case Intrinsic::nvvm_setmaxnreg_dec_sync_aligned_u32: {
6883 Value *V = Call.getArgOperand(0);
6884 unsigned RegCount = cast<ConstantInt>(V)->getZExtValue();
6885 Check(RegCount % 8 == 0,
6886 "reg_count argument to nvvm.setmaxnreg must be in multiples of 8");
6887 break;
6888 }
6889 case Intrinsic::experimental_convergence_entry:
6890 case Intrinsic::experimental_convergence_anchor:
6891 break;
6892 case Intrinsic::experimental_convergence_loop:
6893 break;
6894 case Intrinsic::ptrmask: {
6895 Type *Ty0 = Call.getArgOperand(0)->getType();
6896 Type *Ty1 = Call.getArgOperand(1)->getType();
6898 "llvm.ptrmask intrinsic first argument must be pointer or vector "
6899 "of pointers",
6900 &Call);
6901 Check(
6902 Ty0->isVectorTy() == Ty1->isVectorTy(),
6903 "llvm.ptrmask intrinsic arguments must be both scalars or both vectors",
6904 &Call);
6905 if (Ty0->isVectorTy())
6906 Check(cast<VectorType>(Ty0)->getElementCount() ==
6907 cast<VectorType>(Ty1)->getElementCount(),
6908 "llvm.ptrmask intrinsic arguments must have the same number of "
6909 "elements",
6910 &Call);
6911 Check(DL.getIndexTypeSizeInBits(Ty0) == Ty1->getScalarSizeInBits(),
6912 "llvm.ptrmask intrinsic second argument bitwidth must match "
6913 "pointer index type size of first argument",
6914 &Call);
6915 break;
6916 }
6917 case Intrinsic::thread_pointer: {
6919 DL.getDefaultGlobalsAddressSpace(),
6920 "llvm.thread.pointer intrinsic return type must be for the globals "
6921 "address space",
6922 &Call);
6923 break;
6924 }
6925 case Intrinsic::threadlocal_address: {
6926 const Value &Arg0 = *Call.getArgOperand(0);
6927 Check(isa<GlobalValue>(Arg0),
6928 "llvm.threadlocal.address first argument must be a GlobalValue");
6929 Check(cast<GlobalValue>(Arg0).isThreadLocal(),
6930 "llvm.threadlocal.address operand isThreadLocal() must be true");
6931 break;
6932 }
6933 case Intrinsic::lifetime_start:
6934 case Intrinsic::lifetime_end: {
6937 "llvm.lifetime.start/end can only be used on alloca or poison",
6938 &Call);
6939 break;
6940 }
6941 };
6942
6943 // Verify that there aren't any unmediated control transfers between funclets.
6945 Function *F = Call.getParent()->getParent();
6946 if (F->hasPersonalityFn() &&
6947 isScopedEHPersonality(classifyEHPersonality(F->getPersonalityFn()))) {
6948 // Run EH funclet coloring on-demand and cache results for other intrinsic
6949 // calls in this function
6950 if (BlockEHFuncletColors.empty())
6951 BlockEHFuncletColors = colorEHFunclets(*F);
6952
6953 // Check for catch-/cleanup-pad in first funclet block
6954 bool InEHFunclet = false;
6955 BasicBlock *CallBB = Call.getParent();
6956 const ColorVector &CV = BlockEHFuncletColors.find(CallBB)->second;
6957 assert(CV.size() > 0 && "Uncolored block");
6958 for (BasicBlock *ColorFirstBB : CV)
6959 if (auto It = ColorFirstBB->getFirstNonPHIIt();
6960 It != ColorFirstBB->end())
6962 InEHFunclet = true;
6963
6964 // Check for funclet operand bundle
6965 bool HasToken = false;
6966 for (unsigned I = 0, E = Call.getNumOperandBundles(); I != E; ++I)
6968 HasToken = true;
6969
6970 // This would cause silent code truncation in WinEHPrepare
6971 if (InEHFunclet)
6972 Check(HasToken, "Missing funclet token on intrinsic call", &Call);
6973 }
6974 }
6975}
6976
6977/// Carefully grab the subprogram from a local scope.
6978///
6979/// This carefully grabs the subprogram from a local scope, avoiding the
6980/// built-in assertions that would typically fire.
6982 if (!LocalScope)
6983 return nullptr;
6984
6985 if (auto *SP = dyn_cast<DISubprogram>(LocalScope))
6986 return SP;
6987
6988 if (auto *LB = dyn_cast<DILexicalBlockBase>(LocalScope))
6989 return getSubprogram(LB->getRawScope());
6990
6991 // Just return null; broken scope chains are checked elsewhere.
6992 assert(!isa<DILocalScope>(LocalScope) && "Unknown type of local scope");
6993 return nullptr;
6994}
6995
6996void Verifier::visit(DbgLabelRecord &DLR) {
6998 "invalid #dbg_label intrinsic variable", &DLR, DLR.getRawLabel());
6999
7000 // Ignore broken !dbg attachments; they're checked elsewhere.
7001 if (MDNode *N = DLR.getDebugLoc().getAsMDNode())
7002 if (!isa<DILocation>(N))
7003 return;
7004
7005 BasicBlock *BB = DLR.getParent();
7006 Function *F = BB ? BB->getParent() : nullptr;
7007
7008 // The scopes for variables and !dbg attachments must agree.
7009 DILabel *Label = DLR.getLabel();
7010 DILocation *Loc = DLR.getDebugLoc();
7011 CheckDI(Loc, "#dbg_label record requires a !dbg attachment", &DLR, BB, F);
7012
7013 DISubprogram *LabelSP = getSubprogram(Label->getRawScope());
7014 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
7015 if (!LabelSP || !LocSP)
7016 return;
7017
7018 CheckDI(LabelSP == LocSP,
7019 "mismatched subprogram between #dbg_label label and !dbg attachment",
7020 &DLR, BB, F, Label, Label->getScope()->getSubprogram(), Loc,
7021 Loc->getScope()->getSubprogram());
7022}
7023
7024void Verifier::visit(DbgVariableRecord &DVR) {
7025 BasicBlock *BB = DVR.getParent();
7026 Function *F = BB->getParent();
7027
7028 CheckDI(DVR.getType() == DbgVariableRecord::LocationType::Value ||
7029 DVR.getType() == DbgVariableRecord::LocationType::Declare ||
7030 DVR.getType() == DbgVariableRecord::LocationType::Assign,
7031 "invalid #dbg record type", &DVR, DVR.getType(), BB, F);
7032
7033 // The location for a DbgVariableRecord must be either a ValueAsMetadata,
7034 // DIArgList, or an empty MDNode (which is a legacy representation for an
7035 // "undef" location).
7036 auto *MD = DVR.getRawLocation();
7037 CheckDI(MD && (isa<ValueAsMetadata>(MD) || isa<DIArgList>(MD) ||
7038 (isa<MDNode>(MD) && !cast<MDNode>(MD)->getNumOperands())),
7039 "invalid #dbg record address/value", &DVR, MD, BB, F);
7040 if (auto *VAM = dyn_cast<ValueAsMetadata>(MD)) {
7041 visitValueAsMetadata(*VAM, F);
7042 if (DVR.isDbgDeclare()) {
7043 // Allow integers here to support inttoptr salvage.
7044 Type *Ty = VAM->getValue()->getType();
7045 CheckDI(Ty->isPointerTy() || Ty->isIntegerTy(),
7046 "location of #dbg_declare must be a pointer or int", &DVR, MD, BB,
7047 F);
7048 }
7049 } else if (auto *AL = dyn_cast<DIArgList>(MD)) {
7050 visitDIArgList(*AL, F);
7051 }
7052
7054 "invalid #dbg record variable", &DVR, DVR.getRawVariable(), BB, F);
7055 visitMDNode(*DVR.getRawVariable(), AreDebugLocsAllowed::No);
7056
7058 "invalid #dbg record expression", &DVR, DVR.getRawExpression(), BB,
7059 F);
7060 visitMDNode(*DVR.getExpression(), AreDebugLocsAllowed::No);
7061
7062 if (DVR.isDbgAssign()) {
7064 "invalid #dbg_assign DIAssignID", &DVR, DVR.getRawAssignID(), BB,
7065 F);
7066 visitMDNode(*cast<DIAssignID>(DVR.getRawAssignID()),
7067 AreDebugLocsAllowed::No);
7068
7069 const auto *RawAddr = DVR.getRawAddress();
7070 // Similarly to the location above, the address for an assign
7071 // DbgVariableRecord must be a ValueAsMetadata or an empty MDNode, which
7072 // represents an undef address.
7073 CheckDI(
7074 isa<ValueAsMetadata>(RawAddr) ||
7075 (isa<MDNode>(RawAddr) && !cast<MDNode>(RawAddr)->getNumOperands()),
7076 "invalid #dbg_assign address", &DVR, DVR.getRawAddress(), BB, F);
7077 if (auto *VAM = dyn_cast<ValueAsMetadata>(RawAddr))
7078 visitValueAsMetadata(*VAM, F);
7079
7081 "invalid #dbg_assign address expression", &DVR,
7082 DVR.getRawAddressExpression(), BB, F);
7083 visitMDNode(*DVR.getAddressExpression(), AreDebugLocsAllowed::No);
7084
7085 // All of the linked instructions should be in the same function as DVR.
7086 for (Instruction *I : at::getAssignmentInsts(&DVR))
7087 CheckDI(DVR.getFunction() == I->getFunction(),
7088 "inst not in same function as #dbg_assign", I, &DVR, BB, F);
7089 }
7090
7091 // This check is redundant with one in visitLocalVariable().
7092 DILocalVariable *Var = DVR.getVariable();
7093 CheckDI(isType(Var->getRawType()), "invalid type ref", Var, Var->getRawType(),
7094 BB, F);
7095
7096 auto *DLNode = DVR.getDebugLoc().getAsMDNode();
7097 CheckDI(isa_and_nonnull<DILocation>(DLNode), "invalid #dbg record DILocation",
7098 &DVR, DLNode, BB, F);
7099 DILocation *Loc = DVR.getDebugLoc();
7100
7101 // The scopes for variables and !dbg attachments must agree.
7102 DISubprogram *VarSP = getSubprogram(Var->getRawScope());
7103 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
7104 if (!VarSP || !LocSP)
7105 return; // Broken scope chains are checked elsewhere.
7106
7107 CheckDI(VarSP == LocSP,
7108 "mismatched subprogram between #dbg record variable and DILocation",
7109 &DVR, BB, F, Var, Var->getScope()->getSubprogram(), Loc,
7110 Loc->getScope()->getSubprogram(), BB, F);
7111
7112 verifyFnArgs(DVR);
7113}
7114
7115void Verifier::visitVPIntrinsic(VPIntrinsic &VPI) {
7116 if (auto *VPCast = dyn_cast<VPCastIntrinsic>(&VPI)) {
7117 auto *RetTy = cast<VectorType>(VPCast->getType());
7118 auto *ValTy = cast<VectorType>(VPCast->getOperand(0)->getType());
7119 Check(RetTy->getElementCount() == ValTy->getElementCount(),
7120 "VP cast intrinsic first argument and result vector lengths must be "
7121 "equal",
7122 *VPCast);
7123
7124 switch (VPCast->getIntrinsicID()) {
7125 default:
7126 llvm_unreachable("Unknown VP cast intrinsic");
7127 case Intrinsic::vp_trunc:
7128 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
7129 "llvm.vp.trunc intrinsic first argument and result element type "
7130 "must be integer",
7131 *VPCast);
7132 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
7133 "llvm.vp.trunc intrinsic the bit size of first argument must be "
7134 "larger than the bit size of the return type",
7135 *VPCast);
7136 break;
7137 case Intrinsic::vp_zext:
7138 case Intrinsic::vp_sext:
7139 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
7140 "llvm.vp.zext or llvm.vp.sext intrinsic first argument and result "
7141 "element type must be integer",
7142 *VPCast);
7143 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
7144 "llvm.vp.zext or llvm.vp.sext intrinsic the bit size of first "
7145 "argument must be smaller than the bit size of the return type",
7146 *VPCast);
7147 break;
7148 case Intrinsic::vp_fptoui:
7149 case Intrinsic::vp_fptosi:
7150 case Intrinsic::vp_lrint:
7151 case Intrinsic::vp_llrint:
7152 Check(
7153 RetTy->isIntOrIntVectorTy() && ValTy->isFPOrFPVectorTy(),
7154 "llvm.vp.fptoui, llvm.vp.fptosi, llvm.vp.lrint or llvm.vp.llrint" "intrinsic first argument element "
7155 "type must be floating-point and result element type must be integer",
7156 *VPCast);
7157 break;
7158 case Intrinsic::vp_uitofp:
7159 case Intrinsic::vp_sitofp:
7160 Check(
7161 RetTy->isFPOrFPVectorTy() && ValTy->isIntOrIntVectorTy(),
7162 "llvm.vp.uitofp or llvm.vp.sitofp intrinsic first argument element "
7163 "type must be integer and result element type must be floating-point",
7164 *VPCast);
7165 break;
7166 case Intrinsic::vp_fptrunc:
7167 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
7168 "llvm.vp.fptrunc intrinsic first argument and result element type "
7169 "must be floating-point",
7170 *VPCast);
7171 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
7172 "llvm.vp.fptrunc intrinsic the bit size of first argument must be "
7173 "larger than the bit size of the return type",
7174 *VPCast);
7175 break;
7176 case Intrinsic::vp_fpext:
7177 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
7178 "llvm.vp.fpext intrinsic first argument and result element type "
7179 "must be floating-point",
7180 *VPCast);
7181 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
7182 "llvm.vp.fpext intrinsic the bit size of first argument must be "
7183 "smaller than the bit size of the return type",
7184 *VPCast);
7185 break;
7186 case Intrinsic::vp_ptrtoint:
7187 Check(RetTy->isIntOrIntVectorTy() && ValTy->isPtrOrPtrVectorTy(),
7188 "llvm.vp.ptrtoint intrinsic first argument element type must be "
7189 "pointer and result element type must be integer",
7190 *VPCast);
7191 break;
7192 case Intrinsic::vp_inttoptr:
7193 Check(RetTy->isPtrOrPtrVectorTy() && ValTy->isIntOrIntVectorTy(),
7194 "llvm.vp.inttoptr intrinsic first argument element type must be "
7195 "integer and result element type must be pointer",
7196 *VPCast);
7197 break;
7198 }
7199 }
7200
7201 switch (VPI.getIntrinsicID()) {
7202 case Intrinsic::vp_fcmp: {
7203 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
7205 "invalid predicate for VP FP comparison intrinsic", &VPI);
7206 break;
7207 }
7208 case Intrinsic::vp_icmp: {
7209 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
7211 "invalid predicate for VP integer comparison intrinsic", &VPI);
7212 break;
7213 }
7214 case Intrinsic::vp_is_fpclass: {
7215 auto TestMask = cast<ConstantInt>(VPI.getOperand(1));
7216 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
7217 "unsupported bits for llvm.vp.is.fpclass test mask");
7218 break;
7219 }
7220 case Intrinsic::experimental_vp_splice: {
7221 VectorType *VecTy = cast<VectorType>(VPI.getType());
7222 int64_t Idx = cast<ConstantInt>(VPI.getArgOperand(2))->getSExtValue();
7223 int64_t KnownMinNumElements = VecTy->getElementCount().getKnownMinValue();
7224 if (VPI.getParent() && VPI.getParent()->getParent()) {
7225 AttributeList Attrs = VPI.getParent()->getParent()->getAttributes();
7226 if (Attrs.hasFnAttr(Attribute::VScaleRange))
7227 KnownMinNumElements *= Attrs.getFnAttrs().getVScaleRangeMin();
7228 }
7229 Check((Idx < 0 && std::abs(Idx) <= KnownMinNumElements) ||
7230 (Idx >= 0 && Idx < KnownMinNumElements),
7231 "The splice index exceeds the range [-VL, VL-1] where VL is the "
7232 "known minimum number of elements in the vector. For scalable "
7233 "vectors the minimum number of elements is determined from "
7234 "vscale_range.",
7235 &VPI);
7236 break;
7237 }
7238 }
7239}
7240
7241void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) {
7242 unsigned NumOperands = FPI.getNonMetadataArgCount();
7243 bool HasRoundingMD =
7245
7246 // Add the expected number of metadata operands.
7247 NumOperands += (1 + HasRoundingMD);
7248
7249 // Compare intrinsics carry an extra predicate metadata operand.
7251 NumOperands += 1;
7252 Check((FPI.arg_size() == NumOperands),
7253 "invalid arguments for constrained FP intrinsic", &FPI);
7254
7255 switch (FPI.getIntrinsicID()) {
7256 case Intrinsic::experimental_constrained_lrint:
7257 case Intrinsic::experimental_constrained_llrint: {
7258 Type *ValTy = FPI.getArgOperand(0)->getType();
7259 Type *ResultTy = FPI.getType();
7260 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
7261 "Intrinsic does not support vectors", &FPI);
7262 break;
7263 }
7264
7265 case Intrinsic::experimental_constrained_lround:
7266 case Intrinsic::experimental_constrained_llround: {
7267 Type *ValTy = FPI.getArgOperand(0)->getType();
7268 Type *ResultTy = FPI.getType();
7269 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
7270 "Intrinsic does not support vectors", &FPI);
7271 break;
7272 }
7273
7274 case Intrinsic::experimental_constrained_fcmp:
7275 case Intrinsic::experimental_constrained_fcmps: {
7276 auto Pred = cast<ConstrainedFPCmpIntrinsic>(&FPI)->getPredicate();
7278 "invalid predicate for constrained FP comparison intrinsic", &FPI);
7279 break;
7280 }
7281
7282 case Intrinsic::experimental_constrained_fptosi:
7283 case Intrinsic::experimental_constrained_fptoui: {
7284 Value *Operand = FPI.getArgOperand(0);
7285 ElementCount SrcEC;
7286 Check(Operand->getType()->isFPOrFPVectorTy(),
7287 "Intrinsic first argument must be floating point", &FPI);
7288 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7289 SrcEC = cast<VectorType>(OperandT)->getElementCount();
7290 }
7291
7292 Operand = &FPI;
7293 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
7294 "Intrinsic first argument and result disagree on vector use", &FPI);
7295 Check(Operand->getType()->isIntOrIntVectorTy(),
7296 "Intrinsic result must be an integer", &FPI);
7297 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7298 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
7299 "Intrinsic first argument and result vector lengths must be equal",
7300 &FPI);
7301 }
7302 break;
7303 }
7304
7305 case Intrinsic::experimental_constrained_sitofp:
7306 case Intrinsic::experimental_constrained_uitofp: {
7307 Value *Operand = FPI.getArgOperand(0);
7308 ElementCount SrcEC;
7309 Check(Operand->getType()->isIntOrIntVectorTy(),
7310 "Intrinsic first argument must be integer", &FPI);
7311 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7312 SrcEC = cast<VectorType>(OperandT)->getElementCount();
7313 }
7314
7315 Operand = &FPI;
7316 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
7317 "Intrinsic first argument and result disagree on vector use", &FPI);
7318 Check(Operand->getType()->isFPOrFPVectorTy(),
7319 "Intrinsic result must be a floating point", &FPI);
7320 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7321 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
7322 "Intrinsic first argument and result vector lengths must be equal",
7323 &FPI);
7324 }
7325 break;
7326 }
7327
7328 case Intrinsic::experimental_constrained_fptrunc:
7329 case Intrinsic::experimental_constrained_fpext: {
7330 Value *Operand = FPI.getArgOperand(0);
7331 Type *OperandTy = Operand->getType();
7332 Value *Result = &FPI;
7333 Type *ResultTy = Result->getType();
7334 Check(OperandTy->isFPOrFPVectorTy(),
7335 "Intrinsic first argument must be FP or FP vector", &FPI);
7336 Check(ResultTy->isFPOrFPVectorTy(),
7337 "Intrinsic result must be FP or FP vector", &FPI);
7338 Check(OperandTy->isVectorTy() == ResultTy->isVectorTy(),
7339 "Intrinsic first argument and result disagree on vector use", &FPI);
7340 if (OperandTy->isVectorTy()) {
7341 Check(cast<VectorType>(OperandTy)->getElementCount() ==
7342 cast<VectorType>(ResultTy)->getElementCount(),
7343 "Intrinsic first argument and result vector lengths must be equal",
7344 &FPI);
7345 }
7346 if (FPI.getIntrinsicID() == Intrinsic::experimental_constrained_fptrunc) {
7347 Check(OperandTy->getScalarSizeInBits() > ResultTy->getScalarSizeInBits(),
7348 "Intrinsic first argument's type must be larger than result type",
7349 &FPI);
7350 } else {
7351 Check(OperandTy->getScalarSizeInBits() < ResultTy->getScalarSizeInBits(),
7352 "Intrinsic first argument's type must be smaller than result type",
7353 &FPI);
7354 }
7355 break;
7356 }
7357
7358 default:
7359 break;
7360 }
7361
7362 // If a non-metadata argument is passed in a metadata slot then the
7363 // error will be caught earlier when the incorrect argument doesn't
7364 // match the specification in the intrinsic call table. Thus, no
7365 // argument type check is needed here.
7366
7367 Check(FPI.getExceptionBehavior().has_value(),
7368 "invalid exception behavior argument", &FPI);
7369 if (HasRoundingMD) {
7370 Check(FPI.getRoundingMode().has_value(), "invalid rounding mode argument",
7371 &FPI);
7372 }
7373}
7374
7375void Verifier::verifyFragmentExpression(const DbgVariableRecord &DVR) {
7376 DILocalVariable *V = dyn_cast_or_null<DILocalVariable>(DVR.getRawVariable());
7377 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
7378
7379 // We don't know whether this intrinsic verified correctly.
7380 if (!V || !E || !E->isValid())
7381 return;
7382
7383 // Nothing to do if this isn't a DW_OP_LLVM_fragment expression.
7384 auto Fragment = E->getFragmentInfo();
7385 if (!Fragment)
7386 return;
7387
7388 // The frontend helps out GDB by emitting the members of local anonymous
7389 // unions as artificial local variables with shared storage. When SROA splits
7390 // the storage for artificial local variables that are smaller than the entire
7391 // union, the overhang piece will be outside of the allotted space for the
7392 // variable and this check fails.
7393 // FIXME: Remove this check as soon as clang stops doing this; it hides bugs.
7394 if (V->isArtificial())
7395 return;
7396
7397 verifyFragmentExpression(*V, *Fragment, &DVR);
7398}
7399
7400template <typename ValueOrMetadata>
7401void Verifier::verifyFragmentExpression(const DIVariable &V,
7403 ValueOrMetadata *Desc) {
7404 // If there's no size, the type is broken, but that should be checked
7405 // elsewhere.
7406 auto VarSize = V.getSizeInBits();
7407 if (!VarSize)
7408 return;
7409
7410 unsigned FragSize = Fragment.SizeInBits;
7411 unsigned FragOffset = Fragment.OffsetInBits;
7412 CheckDI(FragSize + FragOffset <= *VarSize,
7413 "fragment is larger than or outside of variable", Desc, &V);
7414 CheckDI(FragSize != *VarSize, "fragment covers entire variable", Desc, &V);
7415}
7416
7417void Verifier::verifyFnArgs(const DbgVariableRecord &DVR) {
7418 // This function does not take the scope of noninlined function arguments into
7419 // account. Don't run it if current function is nodebug, because it may
7420 // contain inlined debug intrinsics.
7421 if (!HasDebugInfo)
7422 return;
7423
7424 // For performance reasons only check non-inlined ones.
7425 if (DVR.getDebugLoc()->getInlinedAt())
7426 return;
7427
7428 DILocalVariable *Var = DVR.getVariable();
7429 CheckDI(Var, "#dbg record without variable");
7430
7431 unsigned ArgNo = Var->getArg();
7432 if (!ArgNo)
7433 return;
7434
7435 // Verify there are no duplicate function argument debug info entries.
7436 // These will cause hard-to-debug assertions in the DWARF backend.
7437 if (DebugFnArgs.size() < ArgNo)
7438 DebugFnArgs.resize(ArgNo, nullptr);
7439
7440 auto *Prev = DebugFnArgs[ArgNo - 1];
7441 DebugFnArgs[ArgNo - 1] = Var;
7442 CheckDI(!Prev || (Prev == Var), "conflicting debug info for argument", &DVR,
7443 Prev, Var);
7444}
7445
7446void Verifier::verifyNotEntryValue(const DbgVariableRecord &DVR) {
7447 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
7448
7449 // We don't know whether this intrinsic verified correctly.
7450 if (!E || !E->isValid())
7451 return;
7452
7454 Value *VarValue = DVR.getVariableLocationOp(0);
7455 if (isa<UndefValue>(VarValue) || isa<PoisonValue>(VarValue))
7456 return;
7457 // We allow EntryValues for swift async arguments, as they have an
7458 // ABI-guarantee to be turned into a specific register.
7459 if (auto *ArgLoc = dyn_cast_or_null<Argument>(VarValue);
7460 ArgLoc && ArgLoc->hasAttribute(Attribute::SwiftAsync))
7461 return;
7462 }
7463
7464 CheckDI(!E->isEntryValue(),
7465 "Entry values are only allowed in MIR unless they target a "
7466 "swiftasync Argument",
7467 &DVR);
7468}
7469
7470void Verifier::verifyCompileUnits() {
7471 // When more than one Module is imported into the same context, such as during
7472 // an LTO build before linking the modules, ODR type uniquing may cause types
7473 // to point to a different CU. This check does not make sense in this case.
7474 if (M.getContext().isODRUniquingDebugTypes())
7475 return;
7476 auto *CUs = M.getNamedMetadata("llvm.dbg.cu");
7477 SmallPtrSet<const Metadata *, 2> Listed;
7478 if (CUs)
7479 Listed.insert_range(CUs->operands());
7480 for (const auto *CU : CUVisited)
7481 CheckDI(Listed.count(CU), "DICompileUnit not listed in llvm.dbg.cu", CU);
7482 CUVisited.clear();
7483}
7484
7485void Verifier::verifyDeoptimizeCallingConvs() {
7486 if (DeoptimizeDeclarations.empty())
7487 return;
7488
7489 const Function *First = DeoptimizeDeclarations[0];
7490 for (const auto *F : ArrayRef(DeoptimizeDeclarations).slice(1)) {
7491 Check(First->getCallingConv() == F->getCallingConv(),
7492 "All llvm.experimental.deoptimize declarations must have the same "
7493 "calling convention",
7494 First, F);
7495 }
7496}
7497
7498void Verifier::verifyAttachedCallBundle(const CallBase &Call,
7499 const OperandBundleUse &BU) {
7500 FunctionType *FTy = Call.getFunctionType();
7501
7502 Check((FTy->getReturnType()->isPointerTy() ||
7503 (Call.doesNotReturn() && FTy->getReturnType()->isVoidTy())),
7504 "a call with operand bundle \"clang.arc.attachedcall\" must call a "
7505 "function returning a pointer or a non-returning function that has a "
7506 "void return type",
7507 Call);
7508
7509 Check(BU.Inputs.size() == 1 && isa<Function>(BU.Inputs.front()),
7510 "operand bundle \"clang.arc.attachedcall\" requires one function as "
7511 "an argument",
7512 Call);
7513
7514 auto *Fn = cast<Function>(BU.Inputs.front());
7515 Intrinsic::ID IID = Fn->getIntrinsicID();
7516
7517 if (IID) {
7518 Check((IID == Intrinsic::objc_retainAutoreleasedReturnValue ||
7519 IID == Intrinsic::objc_claimAutoreleasedReturnValue ||
7520 IID == Intrinsic::objc_unsafeClaimAutoreleasedReturnValue),
7521 "invalid function argument", Call);
7522 } else {
7523 StringRef FnName = Fn->getName();
7524 Check((FnName == "objc_retainAutoreleasedReturnValue" ||
7525 FnName == "objc_claimAutoreleasedReturnValue" ||
7526 FnName == "objc_unsafeClaimAutoreleasedReturnValue"),
7527 "invalid function argument", Call);
7528 }
7529}
7530
7531void Verifier::verifyNoAliasScopeDecl() {
7532 if (NoAliasScopeDecls.empty())
7533 return;
7534
7535 // only a single scope must be declared at a time.
7536 for (auto *II : NoAliasScopeDecls) {
7537 assert(II->getIntrinsicID() == Intrinsic::experimental_noalias_scope_decl &&
7538 "Not a llvm.experimental.noalias.scope.decl ?");
7539 const auto *ScopeListMV = dyn_cast<MetadataAsValue>(
7541 Check(ScopeListMV != nullptr,
7542 "llvm.experimental.noalias.scope.decl must have a MetadataAsValue "
7543 "argument",
7544 II);
7545
7546 const auto *ScopeListMD = dyn_cast<MDNode>(ScopeListMV->getMetadata());
7547 Check(ScopeListMD != nullptr, "!id.scope.list must point to an MDNode", II);
7548 Check(ScopeListMD->getNumOperands() == 1,
7549 "!id.scope.list must point to a list with a single scope", II);
7550 visitAliasScopeListMetadata(ScopeListMD);
7551 }
7552
7553 // Only check the domination rule when requested. Once all passes have been
7554 // adapted this option can go away.
7556 return;
7557
7558 // Now sort the intrinsics based on the scope MDNode so that declarations of
7559 // the same scopes are next to each other.
7560 auto GetScope = [](IntrinsicInst *II) {
7561 const auto *ScopeListMV = cast<MetadataAsValue>(
7563 return &cast<MDNode>(ScopeListMV->getMetadata())->getOperand(0);
7564 };
7565
7566 // We are sorting on MDNode pointers here. For valid input IR this is ok.
7567 // TODO: Sort on Metadata ID to avoid non-deterministic error messages.
7568 auto Compare = [GetScope](IntrinsicInst *Lhs, IntrinsicInst *Rhs) {
7569 return GetScope(Lhs) < GetScope(Rhs);
7570 };
7571
7572 llvm::sort(NoAliasScopeDecls, Compare);
7573
7574 // Go over the intrinsics and check that for the same scope, they are not
7575 // dominating each other.
7576 auto ItCurrent = NoAliasScopeDecls.begin();
7577 while (ItCurrent != NoAliasScopeDecls.end()) {
7578 auto CurScope = GetScope(*ItCurrent);
7579 auto ItNext = ItCurrent;
7580 do {
7581 ++ItNext;
7582 } while (ItNext != NoAliasScopeDecls.end() &&
7583 GetScope(*ItNext) == CurScope);
7584
7585 // [ItCurrent, ItNext) represents the declarations for the same scope.
7586 // Ensure they are not dominating each other.. but only if it is not too
7587 // expensive.
7588 if (ItNext - ItCurrent < 32)
7589 for (auto *I : llvm::make_range(ItCurrent, ItNext))
7590 for (auto *J : llvm::make_range(ItCurrent, ItNext))
7591 if (I != J)
7592 Check(!DT.dominates(I, J),
7593 "llvm.experimental.noalias.scope.decl dominates another one "
7594 "with the same scope",
7595 I);
7596 ItCurrent = ItNext;
7597 }
7598}
7599
7600//===----------------------------------------------------------------------===//
7601// Implement the public interfaces to this file...
7602//===----------------------------------------------------------------------===//
7603
7605 Function &F = const_cast<Function &>(f);
7606
7607 // Don't use a raw_null_ostream. Printing IR is expensive.
7608 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/true, *f.getParent());
7609
7610 // Note that this function's return value is inverted from what you would
7611 // expect of a function called "verify".
7612 return !V.verify(F);
7613}
7614
7616 bool *BrokenDebugInfo) {
7617 // Don't use a raw_null_ostream. Printing IR is expensive.
7618 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/!BrokenDebugInfo, M);
7619
7620 bool Broken = false;
7621 for (const Function &F : M)
7622 Broken |= !V.verify(F);
7623
7624 Broken |= !V.verify();
7625 if (BrokenDebugInfo)
7626 *BrokenDebugInfo = V.hasBrokenDebugInfo();
7627 // Note that this function's return value is inverted from what you would
7628 // expect of a function called "verify".
7629 return Broken;
7630}
7631
7632namespace {
7633
7634struct VerifierLegacyPass : public FunctionPass {
7635 static char ID;
7636
7637 std::unique_ptr<Verifier> V;
7638 bool FatalErrors = true;
7639
7640 VerifierLegacyPass() : FunctionPass(ID) {
7642 }
7643 explicit VerifierLegacyPass(bool FatalErrors)
7644 : FunctionPass(ID),
7645 FatalErrors(FatalErrors) {
7647 }
7648
7649 bool doInitialization(Module &M) override {
7650 V = std::make_unique<Verifier>(
7651 &dbgs(), /*ShouldTreatBrokenDebugInfoAsError=*/false, M);
7652 return false;
7653 }
7654
7655 bool runOnFunction(Function &F) override {
7656 if (!V->verify(F) && FatalErrors) {
7657 errs() << "in function " << F.getName() << '\n';
7658 report_fatal_error("Broken function found, compilation aborted!");
7659 }
7660 return false;
7661 }
7662
7663 bool doFinalization(Module &M) override {
7664 bool HasErrors = false;
7665 for (Function &F : M)
7666 if (F.isDeclaration())
7667 HasErrors |= !V->verify(F);
7668
7669 HasErrors |= !V->verify();
7670 if (FatalErrors && (HasErrors || V->hasBrokenDebugInfo()))
7671 report_fatal_error("Broken module found, compilation aborted!");
7672 return false;
7673 }
7674
7675 void getAnalysisUsage(AnalysisUsage &AU) const override {
7676 AU.setPreservesAll();
7677 }
7678};
7679
7680} // end anonymous namespace
7681
7682/// Helper to issue failure from the TBAA verification
7683template <typename... Tys> void TBAAVerifier::CheckFailed(Tys &&... Args) {
7684 if (Diagnostic)
7685 return Diagnostic->CheckFailed(Args...);
7686}
7687
7688#define CheckTBAA(C, ...) \
7689 do { \
7690 if (!(C)) { \
7691 CheckFailed(__VA_ARGS__); \
7692 return false; \
7693 } \
7694 } while (false)
7695
7696/// Verify that \p BaseNode can be used as the "base type" in the struct-path
7697/// TBAA scheme. This means \p BaseNode is either a scalar node, or a
7698/// struct-type node describing an aggregate data structure (like a struct).
7699TBAAVerifier::TBAABaseNodeSummary
7700TBAAVerifier::verifyTBAABaseNode(const Instruction *I, const MDNode *BaseNode,
7701 bool IsNewFormat) {
7702 if (BaseNode->getNumOperands() < 2) {
7703 CheckFailed("Base nodes must have at least two operands", I, BaseNode);
7704 return {true, ~0u};
7705 }
7706
7707 auto Itr = TBAABaseNodes.find(BaseNode);
7708 if (Itr != TBAABaseNodes.end())
7709 return Itr->second;
7710
7711 auto Result = verifyTBAABaseNodeImpl(I, BaseNode, IsNewFormat);
7712 auto InsertResult = TBAABaseNodes.insert({BaseNode, Result});
7713 (void)InsertResult;
7714 assert(InsertResult.second && "We just checked!");
7715 return Result;
7716}
7717
7718TBAAVerifier::TBAABaseNodeSummary
7719TBAAVerifier::verifyTBAABaseNodeImpl(const Instruction *I,
7720 const MDNode *BaseNode, bool IsNewFormat) {
7721 const TBAAVerifier::TBAABaseNodeSummary InvalidNode = {true, ~0u};
7722
7723 if (BaseNode->getNumOperands() == 2) {
7724 // Scalar nodes can only be accessed at offset 0.
7725 return isValidScalarTBAANode(BaseNode)
7726 ? TBAAVerifier::TBAABaseNodeSummary({false, 0})
7727 : InvalidNode;
7728 }
7729
7730 if (IsNewFormat) {
7731 if (BaseNode->getNumOperands() % 3 != 0) {
7732 CheckFailed("Access tag nodes must have the number of operands that is a "
7733 "multiple of 3!", BaseNode);
7734 return InvalidNode;
7735 }
7736 } else {
7737 if (BaseNode->getNumOperands() % 2 != 1) {
7738 CheckFailed("Struct tag nodes must have an odd number of operands!",
7739 BaseNode);
7740 return InvalidNode;
7741 }
7742 }
7743
7744 // Check the type size field.
7745 if (IsNewFormat) {
7746 auto *TypeSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7747 BaseNode->getOperand(1));
7748 if (!TypeSizeNode) {
7749 CheckFailed("Type size nodes must be constants!", I, BaseNode);
7750 return InvalidNode;
7751 }
7752 }
7753
7754 // Check the type name field. In the new format it can be anything.
7755 if (!IsNewFormat && !isa<MDString>(BaseNode->getOperand(0))) {
7756 CheckFailed("Struct tag nodes have a string as their first operand",
7757 BaseNode);
7758 return InvalidNode;
7759 }
7760
7761 bool Failed = false;
7762
7763 std::optional<APInt> PrevOffset;
7764 unsigned BitWidth = ~0u;
7765
7766 // We've already checked that BaseNode is not a degenerate root node with one
7767 // operand in \c verifyTBAABaseNode, so this loop should run at least once.
7768 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
7769 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
7770 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
7771 Idx += NumOpsPerField) {
7772 const MDOperand &FieldTy = BaseNode->getOperand(Idx);
7773 const MDOperand &FieldOffset = BaseNode->getOperand(Idx + 1);
7774 if (!isa<MDNode>(FieldTy)) {
7775 CheckFailed("Incorrect field entry in struct type node!", I, BaseNode);
7776 Failed = true;
7777 continue;
7778 }
7779
7780 auto *OffsetEntryCI =
7782 if (!OffsetEntryCI) {
7783 CheckFailed("Offset entries must be constants!", I, BaseNode);
7784 Failed = true;
7785 continue;
7786 }
7787
7788 if (BitWidth == ~0u)
7789 BitWidth = OffsetEntryCI->getBitWidth();
7790
7791 if (OffsetEntryCI->getBitWidth() != BitWidth) {
7792 CheckFailed(
7793 "Bitwidth between the offsets and struct type entries must match", I,
7794 BaseNode);
7795 Failed = true;
7796 continue;
7797 }
7798
7799 // NB! As far as I can tell, we generate a non-strictly increasing offset
7800 // sequence only from structs that have zero size bit fields. When
7801 // recursing into a contained struct in \c getFieldNodeFromTBAABaseNode we
7802 // pick the field lexically the latest in struct type metadata node. This
7803 // mirrors the actual behavior of the alias analysis implementation.
7804 bool IsAscending =
7805 !PrevOffset || PrevOffset->ule(OffsetEntryCI->getValue());
7806
7807 if (!IsAscending) {
7808 CheckFailed("Offsets must be increasing!", I, BaseNode);
7809 Failed = true;
7810 }
7811
7812 PrevOffset = OffsetEntryCI->getValue();
7813
7814 if (IsNewFormat) {
7815 auto *MemberSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7816 BaseNode->getOperand(Idx + 2));
7817 if (!MemberSizeNode) {
7818 CheckFailed("Member size entries must be constants!", I, BaseNode);
7819 Failed = true;
7820 continue;
7821 }
7822 }
7823 }
7824
7825 return Failed ? InvalidNode
7826 : TBAAVerifier::TBAABaseNodeSummary(false, BitWidth);
7827}
7828
7829static bool IsRootTBAANode(const MDNode *MD) {
7830 return MD->getNumOperands() < 2;
7831}
7832
7833static bool IsScalarTBAANodeImpl(const MDNode *MD,
7835 if (MD->getNumOperands() != 2 && MD->getNumOperands() != 3)
7836 return false;
7837
7838 if (!isa<MDString>(MD->getOperand(0)))
7839 return false;
7840
7841 if (MD->getNumOperands() == 3) {
7843 if (!(Offset && Offset->isZero() && isa<MDString>(MD->getOperand(0))))
7844 return false;
7845 }
7846
7847 auto *Parent = dyn_cast_or_null<MDNode>(MD->getOperand(1));
7848 return Parent && Visited.insert(Parent).second &&
7849 (IsRootTBAANode(Parent) || IsScalarTBAANodeImpl(Parent, Visited));
7850}
7851
7852bool TBAAVerifier::isValidScalarTBAANode(const MDNode *MD) {
7853 auto ResultIt = TBAAScalarNodes.find(MD);
7854 if (ResultIt != TBAAScalarNodes.end())
7855 return ResultIt->second;
7856
7857 SmallPtrSet<const MDNode *, 4> Visited;
7858 bool Result = IsScalarTBAANodeImpl(MD, Visited);
7859 auto InsertResult = TBAAScalarNodes.insert({MD, Result});
7860 (void)InsertResult;
7861 assert(InsertResult.second && "Just checked!");
7862
7863 return Result;
7864}
7865
7866/// Returns the field node at the offset \p Offset in \p BaseNode. Update \p
7867/// Offset in place to be the offset within the field node returned.
7868///
7869/// We assume we've okayed \p BaseNode via \c verifyTBAABaseNode.
7870MDNode *TBAAVerifier::getFieldNodeFromTBAABaseNode(const Instruction *I,
7871 const MDNode *BaseNode,
7872 APInt &Offset,
7873 bool IsNewFormat) {
7874 assert(BaseNode->getNumOperands() >= 2 && "Invalid base node!");
7875
7876 // Scalar nodes have only one possible "field" -- their parent in the access
7877 // hierarchy. Offset must be zero at this point, but our caller is supposed
7878 // to check that.
7879 if (BaseNode->getNumOperands() == 2)
7880 return cast<MDNode>(BaseNode->getOperand(1));
7881
7882 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
7883 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
7884 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
7885 Idx += NumOpsPerField) {
7886 auto *OffsetEntryCI =
7887 mdconst::extract<ConstantInt>(BaseNode->getOperand(Idx + 1));
7888 if (OffsetEntryCI->getValue().ugt(Offset)) {
7889 if (Idx == FirstFieldOpNo) {
7890 CheckFailed("Could not find TBAA parent in struct type node", I,
7891 BaseNode, &Offset);
7892 return nullptr;
7893 }
7894
7895 unsigned PrevIdx = Idx - NumOpsPerField;
7896 auto *PrevOffsetEntryCI =
7897 mdconst::extract<ConstantInt>(BaseNode->getOperand(PrevIdx + 1));
7898 Offset -= PrevOffsetEntryCI->getValue();
7899 return cast<MDNode>(BaseNode->getOperand(PrevIdx));
7900 }
7901 }
7902
7903 unsigned LastIdx = BaseNode->getNumOperands() - NumOpsPerField;
7904 auto *LastOffsetEntryCI = mdconst::extract<ConstantInt>(
7905 BaseNode->getOperand(LastIdx + 1));
7906 Offset -= LastOffsetEntryCI->getValue();
7907 return cast<MDNode>(BaseNode->getOperand(LastIdx));
7908}
7909
7911 if (!Type || Type->getNumOperands() < 3)
7912 return false;
7913
7914 // In the new format type nodes shall have a reference to the parent type as
7915 // its first operand.
7916 return isa_and_nonnull<MDNode>(Type->getOperand(0));
7917}
7918
7920 CheckTBAA(MD->getNumOperands() > 0, "TBAA metadata cannot have 0 operands", I,
7921 MD);
7922
7923 if (I)
7927 "This instruction shall not have a TBAA access tag!", I);
7928
7929 bool IsStructPathTBAA =
7930 isa<MDNode>(MD->getOperand(0)) && MD->getNumOperands() >= 3;
7931
7932 CheckTBAA(IsStructPathTBAA,
7933 "Old-style TBAA is no longer allowed, use struct-path TBAA instead",
7934 I);
7935
7936 MDNode *BaseNode = dyn_cast_or_null<MDNode>(MD->getOperand(0));
7937 MDNode *AccessType = dyn_cast_or_null<MDNode>(MD->getOperand(1));
7938
7939 bool IsNewFormat = isNewFormatTBAATypeNode(AccessType);
7940
7941 if (IsNewFormat) {
7942 CheckTBAA(MD->getNumOperands() == 4 || MD->getNumOperands() == 5,
7943 "Access tag metadata must have either 4 or 5 operands", I, MD);
7944 } else {
7945 CheckTBAA(MD->getNumOperands() < 5,
7946 "Struct tag metadata must have either 3 or 4 operands", I, MD);
7947 }
7948
7949 // Check the access size field.
7950 if (IsNewFormat) {
7951 auto *AccessSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7952 MD->getOperand(3));
7953 CheckTBAA(AccessSizeNode, "Access size field must be a constant", I, MD);
7954 }
7955
7956 // Check the immutability flag.
7957 unsigned ImmutabilityFlagOpNo = IsNewFormat ? 4 : 3;
7958 if (MD->getNumOperands() == ImmutabilityFlagOpNo + 1) {
7959 auto *IsImmutableCI = mdconst::dyn_extract_or_null<ConstantInt>(
7960 MD->getOperand(ImmutabilityFlagOpNo));
7961 CheckTBAA(IsImmutableCI,
7962 "Immutability tag on struct tag metadata must be a constant", I,
7963 MD);
7964 CheckTBAA(
7965 IsImmutableCI->isZero() || IsImmutableCI->isOne(),
7966 "Immutability part of the struct tag metadata must be either 0 or 1", I,
7967 MD);
7968 }
7969
7970 CheckTBAA(BaseNode && AccessType,
7971 "Malformed struct tag metadata: base and access-type "
7972 "should be non-null and point to Metadata nodes",
7973 I, MD, BaseNode, AccessType);
7974
7975 if (!IsNewFormat) {
7976 CheckTBAA(isValidScalarTBAANode(AccessType),
7977 "Access type node must be a valid scalar type", I, MD,
7978 AccessType);
7979 }
7980
7982 CheckTBAA(OffsetCI, "Offset must be constant integer", I, MD);
7983
7984 APInt Offset = OffsetCI->getValue();
7985 bool SeenAccessTypeInPath = false;
7986
7987 SmallPtrSet<MDNode *, 4> StructPath;
7988
7989 for (/* empty */; BaseNode && !IsRootTBAANode(BaseNode);
7990 BaseNode =
7991 getFieldNodeFromTBAABaseNode(I, BaseNode, Offset, IsNewFormat)) {
7992 if (!StructPath.insert(BaseNode).second) {
7993 CheckFailed("Cycle detected in struct path", I, MD);
7994 return false;
7995 }
7996
7997 bool Invalid;
7998 unsigned BaseNodeBitWidth;
7999 std::tie(Invalid, BaseNodeBitWidth) =
8000 verifyTBAABaseNode(I, BaseNode, IsNewFormat);
8001
8002 // If the base node is invalid in itself, then we've already printed all the
8003 // errors we wanted to print.
8004 if (Invalid)
8005 return false;
8006
8007 SeenAccessTypeInPath |= BaseNode == AccessType;
8008
8009 if (isValidScalarTBAANode(BaseNode) || BaseNode == AccessType)
8010 CheckTBAA(Offset == 0, "Offset not zero at the point of scalar access", I,
8011 MD, &Offset);
8012
8013 CheckTBAA(BaseNodeBitWidth == Offset.getBitWidth() ||
8014 (BaseNodeBitWidth == 0 && Offset == 0) ||
8015 (IsNewFormat && BaseNodeBitWidth == ~0u),
8016 "Access bit-width not the same as description bit-width", I, MD,
8017 BaseNodeBitWidth, Offset.getBitWidth());
8018
8019 if (IsNewFormat && SeenAccessTypeInPath)
8020 break;
8021 }
8022
8023 CheckTBAA(SeenAccessTypeInPath, "Did not see access type in access path!", I,
8024 MD);
8025 return true;
8026}
8027
8028char VerifierLegacyPass::ID = 0;
8029INITIALIZE_PASS(VerifierLegacyPass, "verify", "Module Verifier", false, false)
8030
8032 return new VerifierLegacyPass(FatalErrors);
8033}
8034
8035AnalysisKey VerifierAnalysis::Key;
8042
8047
8049 auto Res = AM.getResult<VerifierAnalysis>(M);
8050 if (FatalErrors && (Res.IRBroken || Res.DebugInfoBroken))
8051 report_fatal_error("Broken module found, compilation aborted!");
8052
8053 return PreservedAnalyses::all();
8054}
8055
8057 auto res = AM.getResult<VerifierAnalysis>(F);
8058 if (res.IRBroken && FatalErrors)
8059 report_fatal_error("Broken function found, compilation aborted!");
8060
8061 return PreservedAnalyses::all();
8062}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU address space definition.
ArrayRef< TableEntry > TableRef
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis false
Atomic ordering constants.
@ RetAttr
@ FnAttr
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
Definition CSEInfo.cpp:27
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file declares the LLVM IR specialization of the GenericConvergenceVerifier template.
static DISubprogram * getSubprogram(bool IsDistinct, Ts &&...Args)
dxil translate DXIL Translate Metadata
This file defines the DenseMap class.
This file contains constants used for implementing Dwarf debug support.
static bool runOnFunction(Function &F, bool PostInlining)
#define Check(C,...)
Hexagon Common GEP
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
This header defines various interfaces for pass management in LLVM.
This defines the Use class.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
Machine Check Debug Module
This file implements a map that provides insertion order iteration.
This file provides utility for Memory Model Relaxation Annotations (MMRAs).
static bool isContiguous(const ConstantRange &A, const ConstantRange &B)
This file contains the declarations for metadata subclasses.
#define T
#define T1
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t High
uint64_t IntrinsicInst * II
#define P(N)
ppc ctr loops verify
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition PassSupport.h:56
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
static unsigned getNumElements(Type *Ty)
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
This file contains some templates that are useful if you are working with the STL at all.
verify safepoint Safepoint IR Verifier
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
static bool IsScalarTBAANodeImpl(const MDNode *MD, SmallPtrSetImpl< const MDNode * > &Visited)
static bool isType(const Metadata *MD)
static Instruction * getSuccPad(Instruction *Terminator)
static bool isNewFormatTBAATypeNode(llvm::MDNode *Type)
#define CheckDI(C,...)
We know that a debug info condition should be true, if not print an error message.
Definition Verifier.cpp:681
static void forEachUser(const Value *User, SmallPtrSet< const Value *, 32 > &Visited, llvm::function_ref< bool(const Value *)> Callback)
Definition Verifier.cpp:722
static bool isDINode(const Metadata *MD)
static bool isScope(const Metadata *MD)
static cl::opt< bool > VerifyNoAliasScopeDomination("verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false), cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical " "scopes are not dominating"))
static bool isTypeCongruent(Type *L, Type *R)
Two types are "congruent" if they are identical, or if they are both pointer types with different poi...
#define CheckTBAA(C,...)
static bool isConstantIntMetadataOperand(const Metadata *MD)
static bool IsRootTBAANode(const MDNode *MD)
static Value * getParentPad(Value *EHPad)
static bool hasConflictingReferenceFlags(unsigned Flags)
Detect mutually exclusive flags.
static AttrBuilder getParameterABIAttributes(LLVMContext &C, unsigned I, AttributeList Attrs)
static const char PassName[]
bool isFiniteNonZero() const
Definition APFloat.h:1459
bool isNegative() const
Definition APFloat.h:1449
const fltSemantics & getSemantics() const
Definition APFloat.h:1457
Class for arbitrary precision integers.
Definition APInt.h:78
bool sgt(const APInt &RHS) const
Signed greater than comparison.
Definition APInt.h:1201
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition APInt.h:380
bool isMinValue() const
Determine if this is the smallest unsigned value.
Definition APInt.h:417
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition APInt.h:1150
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition APInt.h:440
int64_t getSExtValue() const
Get sign extended value.
Definition APInt.h:1562
bool isMaxValue() const
Determine if this is the largest unsigned value.
Definition APInt.h:399
This class represents a conversion between pointers from one address space to another.
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
LLVM_ABI bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
unsigned getAddressSpace() const
Return the address space for the allocation.
LLVM_ABI bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
const Value * getArraySize() const
Get the number of elements allocated.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
void setPreservesAll()
Set by analyses that do not transform their input at all.
LLVM_ABI bool hasInRegAttr() const
Return true if this argument has the inreg attribute.
Definition Function.cpp:293
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:142
static bool isFPOperation(BinOp Op)
BinOp getOperation() const
static LLVM_ABI StringRef getOperationName(BinOp Op)
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
bool contains(Attribute::AttrKind A) const
Return true if the builder has the specified attribute.
LLVM_ABI bool hasAttribute(Attribute::AttrKind Kind) const
Return true if the attribute exists in this set.
LLVM_ABI std::string getAsString(bool InAttrGrp=false) const
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:69
LLVM_ABI const ConstantRange & getValueAsConstantRange() const
Return the attribute's value as a ConstantRange.
LLVM_ABI StringRef getValueAsString() const
Return the attribute's value as a string.
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition Attributes.h:88
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:223
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator begin()
Instruction iterator methods.
Definition BasicBlock.h:459
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:528
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI bool isEntryBlock() const
Return true if this is the entry block of the containing function.
const Instruction & front() const
Definition BasicBlock.h:482
LLVM_ABI const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
This class represents a no-op cast from one type to another.
static LLVM_ABI BlockAddress * lookup(const BasicBlock *BB)
Lookup an existing BlockAddress constant for the given BasicBlock.
bool isConditional() const
Value * getCondition() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
bool isInlineAsm() const
Check if this call is an inline asm statement.
bool hasInAllocaArgument() const
Determine if there are is an inalloca argument.
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool doesNotAccessMemory(unsigned OpNo) const
bool hasFnAttr(Attribute::AttrKind Kind) const
Determine whether this call has the given attribute.
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
CallingConv::ID getCallingConv() const
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
Attribute getParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Get the attribute of a given kind from a given arg.
iterator_range< bundle_op_iterator > bundle_op_infos()
Return the range [bundle_op_info_begin, bundle_op_info_end).
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
bool onlyReadsMemory(unsigned OpNo) const
Value * getCalledOperand() const
Type * getParamElementType(unsigned ArgNo) const
Extract the elementtype type for a parameter.
Value * getArgOperand(unsigned i) const
FunctionType * getFunctionType() const
LLVM_ABI Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
bool doesNotReturn() const
Determine if the call cannot return.
LLVM_ABI bool onlyAccessesArgMemory() const
Determine if the call can access memmory only using pointers based on its arguments.
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
bool hasOperandBundles() const
Return true if this User has any operand bundles.
LLVM_ABI Function * getCaller()
Helper to get the caller (the parent function).
bool isMustTailCall() const
static LLVM_ABI bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)
This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.
unsigned getNumHandlers() const
return the number of 'handlers' in this catchswitch instruction, except the default handler
Value * getParentPad() const
BasicBlock * getUnwindDest() const
handler_range handlers()
iteration adapter for range-for loops.
BasicBlock * getUnwindDest() const
bool isFPPredicate() const
Definition InstrTypes.h:784
bool isIntPredicate() const
Definition InstrTypes.h:785
static bool isIntPredicate(Predicate P)
Definition InstrTypes.h:778
bool isMinusOne() const
This function will return true iff every bit in this constant is set to true.
Definition Constants.h:226
bool isNegative() const
Definition Constants.h:209
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition Constants.h:214
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
Definition Constants.h:157
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:163
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition Constants.h:154
Constant * getAddrDiscriminator() const
The address discriminator if any, or the null constant.
Definition Constants.h:1072
Constant * getPointer() const
The pointer that is signed in this ptrauth signed pointer.
Definition Constants.h:1059
ConstantInt * getKey() const
The Key ID, an i32 constant.
Definition Constants.h:1062
ConstantInt * getDiscriminator() const
The integer discriminator, an i64 constant, or 0.
Definition Constants.h:1065
static LLVM_ABI bool isOrderedRanges(ArrayRef< ConstantRange > RangesRef)
This class represents a range of values.
const APInt & getLower() const
Return the lower value for this range.
const APInt & getUpper() const
Return the upper value for this range.
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
static LLVM_ABI ConstantTokenNone * get(LLVMContext &Context)
Return the ConstantTokenNone.
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition Constants.cpp:90
LLVM_ABI std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
LLVM_ABI std::optional< RoundingMode > getRoundingMode() const
LLVM_ABI unsigned getNonMetadataArgCount() const
DbgVariableFragmentInfo FragmentInfo
@ FixedPointBinary
Scale factor 2^Factor.
@ FixedPointDecimal
Scale factor 10^Factor.
@ FixedPointRational
Arbitrary rational scale factor.
DIGlobalVariable * getVariable() const
LLVM_ABI DISubprogram * getSubprogram() const
Get the subprogram for this scope.
DILocalScope * getScope() const
Get the local scope for this variable.
Metadata * getRawScope() const
Base class for scope-like contexts.
Subprogram description. Uses SubclassData1.
Base class for template parameters.
Base class for variables.
Metadata * getRawType() const
Metadata * getRawScope() const
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
Records a position in IR for a source label (DILabel).
Base class for non-instruction debug metadata records that have positions within IR.
LLVM_ABI Function * getFunction()
LLVM_ABI void print(raw_ostream &O, bool IsForDebug=false) const
DebugLoc getDebugLoc() const
LLVM_ABI const BasicBlock * getParent() const
Record of a variable value-assignment, aka a non instruction representation of the dbg....
DIExpression * getExpression() const
LLVM_ABI Value * getVariableLocationOp(unsigned OpIdx) const
DILocalVariable * getVariable() const
Metadata * getRawLocation() const
Returns the metadata operand for the first location description.
@ End
Marks the end of the concrete types.
@ Any
To indicate all LocationTypes in searches.
DIExpression * getAddressExpression() const
MDNode * getAsMDNode() const
Return this as a bar MDNode.
Definition DebugLoc.h:291
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition DenseMap.h:194
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:167
bool empty() const
Definition DenseMap.h:109
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:222
This instruction extracts a single (scalar) element from a VectorType value.
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *Idx)
Return true if an extractelement instruction can be formed with the specified operands.
ArrayRef< unsigned > getIndices() const
static LLVM_ABI Type * getIndexedType(Type *Agg, ArrayRef< unsigned > Idxs)
Returns the type of the element that would be extracted with an extractvalue instruction with the spe...
This instruction compares its operands according to the predicate given to the constructor.
This class represents an extension of floating point types.
This class represents a cast from floating point to signed integer.
This class represents a cast from floating point to unsigned integer.
This class represents a truncation of floating point types.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
Value * getParentPad() const
Convenience accessors.
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
Type * getReturnType() const
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition Function.h:209
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition Function.h:244
DISubprogram * getSubprogram() const
Get the attached subprogram.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition Function.h:270
bool hasPersonalityFn() const
Check whether this function has a personality function.
Definition Function.h:903
const Function & getFunction() const
Definition Function.h:164
const std::string & getGC() const
Definition Function.cpp:831
Type * getReturnType() const
Returns the type of the ret val.
Definition Function.h:214
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
Definition Function.h:227
LLVM_ABI Value * getBasePtr() const
LLVM_ABI Value * getDerivedPtr() const
static LLVM_ABI Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
static bool isValidLinkage(LinkageTypes L)
Definition GlobalAlias.h:98
const Constant * getAliasee() const
Definition GlobalAlias.h:87
LLVM_ABI const Function * getResolverFunction() const
Definition Globals.cpp:664
static bool isValidLinkage(LinkageTypes L)
Definition GlobalIFunc.h:86
const Constant * getResolver() const
Definition GlobalIFunc.h:73
LLVM_ABI void getAllMetadata(SmallVectorImpl< std::pair< unsigned, MDNode * > > &MDs) const
Appends all metadata attached to this value to MDs, sorting by KindID.
bool hasComdat() const
MDNode * getMetadata(unsigned KindID) const
Get the current metadata attachments for the given kind, if any.
Definition Value.h:576
bool hasExternalLinkage() const
bool isDSOLocal() const
bool isImplicitDSOLocal() const
LLVM_ABI bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition Globals.cpp:328
bool hasValidDeclarationLinkage() const
LinkageTypes getLinkage() const
bool hasDefaultVisibility() const
bool hasPrivateLinkage() const
bool hasHiddenVisibility() const
bool hasExternalWeakLinkage() const
bool hasDLLImportStorageClass() const
bool hasDLLExportStorageClass() const
bool isDeclarationForLinker() const
unsigned getAddressSpace() const
Module * getParent()
Get the module that this global value is contained inside of...
PointerType * getType() const
Global values are always pointers.
LLVM_ABI bool isInterposable() const
Return true if this global's definition can be substituted with an arbitrary definition at link time ...
Definition Globals.cpp:107
bool hasComdat() const
bool hasCommonLinkage() const
bool hasGlobalUnnamedAddr() const
bool hasAppendingLinkage() const
bool hasAvailableExternallyLinkage() const
Type * getValueType() const
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool hasInitializer() const
Definitions have initializers, declarations don't.
MaybeAlign getAlign() const
Returns the alignment of the given variable.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
bool hasDefinitiveInitializer() const
hasDefinitiveInitializer - Whether the global variable has an initializer, and any other instances of...
This instruction compares its operands according to the predicate given to the constructor.
BasicBlock * getDestination(unsigned i)
Return the specified destination.
unsigned getNumDestinations() const
return the number of possible destinations in this indirectbr instruction.
unsigned getNumSuccessors() const
This instruction inserts a single (scalar) element into a VectorType value.
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *NewElt, const Value *Idx)
Return true if an insertelement instruction can be formed with the specified operands.
ArrayRef< unsigned > getIndices() const
Base class for instruction visitors.
Definition InstVisitor.h:78
void visit(Iterator Start, Iterator End)
Definition InstVisitor.h:87
LLVM_ABI unsigned getNumSuccessors() const LLVM_READONLY
Return the number of successors that this instruction has.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI bool isAtomic() const LLVM_READONLY
Return true if this instruction has an AtomicOrdering of unordered or higher.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
This class represents a cast from an integer to a pointer.
static LLVM_ABI bool mayLowerToFunctionCall(Intrinsic::ID IID)
Check if the intrinsic might lower into a regular function call in the course of IR transformations.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
bool isCatch(unsigned Idx) const
Return 'true' if the clause and index Idx is a catch clause.
bool isFilter(unsigned Idx) const
Return 'true' if the clause and index Idx is a filter clause.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Align getAlign() const
Return the alignment of the access that is being performed.
Metadata node.
Definition Metadata.h:1078
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1442
bool isTemporary() const
Definition Metadata.h:1262
ArrayRef< MDOperand > operands() const
Definition Metadata.h:1440
unsigned getNumOperands() const
Return number of MDNode operands.
Definition Metadata.h:1448
bool isDistinct() const
Definition Metadata.h:1261
bool isResolved() const
Check if node is fully resolved.
Definition Metadata.h:1258
LLVMContext & getContext() const
Definition Metadata.h:1242
bool equalsStr(StringRef Str) const
Definition Metadata.h:922
Metadata * get() const
Definition Metadata.h:929
LLVM_ABI StringRef getString() const
Definition Metadata.cpp:618
static LLVM_ABI bool isTagMD(const Metadata *MD)
static LLVM_ABI MetadataAsValue * getIfExists(LLVMContext &Context, Metadata *MD)
Definition Metadata.cpp:112
Metadata * getMetadata() const
Definition Metadata.h:201
Root of the metadata hierarchy.
Definition Metadata.h:64
LLVM_ABI void print(raw_ostream &OS, const Module *M=nullptr, bool IsForDebug=false) const
Print.
unsigned getMetadataID() const
Definition Metadata.h:104
Manage lifetime of a slot tracker for printing IR.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
LLVM_ABI StringRef getName() const
LLVM_ABI void print(raw_ostream &ROS, bool IsForDebug=false) const
LLVM_ABI unsigned getNumOperands() const
iterator_range< op_iterator > operands()
Definition Metadata.h:1853
op_range incoming_values()
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
This class represents a cast from a pointer to an address (non-capturing ptrtoint).
This class represents a cast from a pointer to an integer.
Value * getValue() const
Convenience accessor.
This class represents a sign extension of integer types.
This class represents a cast from signed integer to floating point.
static LLVM_ABI const char * areInvalidOperands(Value *Cond, Value *True, Value *False)
Return a string if the specified operands are invalid for a select operation, otherwise return null.
This instruction constructs a fixed permutation of two input vectors.
static LLVM_ABI bool isValidOperands(const Value *V1, const Value *V2, const Value *Mask)
Return true if a shufflevector instruction can be formed with the specified operands.
static LLVM_ABI void getShuffleMask(const Constant *Mask, SmallVectorImpl< int > &Result)
Convert the input shuffle mask operand to a vector of integers.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
void insert_range(Range &&R)
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
void reserve(size_type N)
iterator insert(iterator I, T &&Elt)
void resize(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition StringRef.h:472
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition StringRef.h:261
constexpr bool empty() const
empty - Check if the string is empty.
Definition StringRef.h:143
static constexpr size_t npos
Definition StringRef.h:57
unsigned getNumElements() const
Random access to the elements.
LLVM_ABI Type * getTypeAtIndex(const Value *V) const
Given an index value into the type, return the type of the element.
Definition Type.cpp:719
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Returns true if this struct contains a scalable vector.
Definition Type.cpp:441
LLVM_ABI bool visitTBAAMetadata(const Instruction *I, const MDNode *MD)
Visit an instruction, or a TBAA node itself as part of a metadata, and return true if it is valid,...
unsigned size() const
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
This class represents a truncation of integer types.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
LLVM_ABI bool containsNonGlobalTargetExtType(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this type is or contains a target extension type that disallows being used as a global...
Definition Type.cpp:75
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition Type.h:264
LLVM_ABI bool containsNonLocalTargetExtType(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this type is or contains a target extension type that disallows being used as a local.
Definition Type.cpp:91
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
Definition Type.cpp:62
bool isLabelTy() const
Return true if this is 'label'.
Definition Type.h:228
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition Type.h:246
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:267
bool isTokenLikeTy() const
Returns true if this is 'token' or a token-like target type.s.
Definition Type.cpp:1058
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
bool isSingleValueType() const
Return true if the type is a valid type for a register in codegen.
Definition Type.h:296
LLVM_ABI bool canLosslesslyBitCastTo(Type *Ty) const
Return true if this type could be converted with a lossless BitCast to type 'Ty'.
Definition Type.cpp:154
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition Type.h:311
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:231
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition Type.h:184
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition Type.h:270
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
Definition Type.h:255
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition Type.h:225
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:139
bool isMetadataTy() const
Return true if this is 'metadata'.
Definition Type.h:231
This class represents a cast unsigned integer to floating point.
op_range operands()
Definition User.h:292
Value * getOperand(unsigned i) const
Definition User.h:232
unsigned getNumOperands() const
Definition User.h:254
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
Value * getValue() const
Definition Metadata.h:498
LLVM Value Representation.
Definition Value.h:75
iterator_range< user_iterator > materialized_users()
Definition Value.h:420
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI const Value * stripInBoundsOffsets(function_ref< void(const Value *)> Func=[](const Value *) {}) const
Strip off pointer casts and inbounds GEPs.
Definition Value.cpp:812
iterator_range< user_iterator > users()
Definition Value.h:426
bool materialized_use_empty() const
Definition Value.h:351
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition Value.cpp:701
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.cpp:1099
bool hasName() const
Definition Value.h:262
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
Check a module for errors, and report separate error states for IR and debug info errors.
Definition Verifier.h:109
LLVM_ABI Result run(Module &M, ModuleAnalysisManager &)
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
This class represents zero extension of integer types.
constexpr bool isNonZero() const
Definition TypeSize.h:156
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:169
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:166
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
CallInst * Call
This file contains the declaration of the Comdat class, which represents a single COMDAT in LLVM.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ FLAT_ADDRESS
Address space for flat memory.
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
@ PRIVATE_ADDRESS
Address space for private memory.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
bool isFlatGlobalAddrSpace(unsigned AS)
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ Entry
Definition COFF.h:862
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
LLVM_ABI MatchIntrinsicTypesResult matchIntrinsicSignature(FunctionType *FTy, ArrayRef< IITDescriptor > &Infos, SmallVectorImpl< Type * > &ArgTys)
Match the specified function type with the type constraints specified by the .td file.
LLVM_ABI void getIntrinsicInfoTableEntries(ID id, SmallVectorImpl< IITDescriptor > &T)
Return the IIT table descriptor for the specified intrinsic into an array of IITDescriptors.
@ MatchIntrinsicTypes_NoMatchRet
Definition Intrinsics.h:240
@ MatchIntrinsicTypes_NoMatchArg
Definition Intrinsics.h:241
LLVM_ABI bool hasConstrainedFPRoundingModeOperand(ID QID)
Returns true if the intrinsic ID is for one of the "ConstrainedFloating-Point Intrinsics" that take r...
LLVM_ABI StringRef getName(ID id)
Return the LLVM name for an intrinsic, such as "llvm.ppc.altivec.lvx".
static const int NoAliasScopeDeclScopeArg
Definition Intrinsics.h:39
LLVM_ABI bool matchIntrinsicVarArg(bool isVarArg, ArrayRef< IITDescriptor > &Infos)
Verify if the intrinsic has variable arguments.
std::variant< std::monostate, Loc::Single, Loc::Multi, Loc::MMI, Loc::EntryValue > Variant
Alias for the std::variant specialization base class of DbgVariable.
Definition DwarfDebug.h:189
Flag
These should be considered private to the implementation of the MCInstrDesc class.
@ System
Synchronized with respect to all concurrently executing threads.
Definition LLVMContext.h:58
LLVM_ABI std::optional< VFInfo > tryDemangleForVFABI(StringRef MangledName, const FunctionType *FTy)
Function to construct a VFInfo out of a mangled names in the following format:
@ CE
Windows NT (Windows on ARM)
Definition MCAsmInfo.h:48
LLVM_ABI AssignmentInstRange getAssignmentInsts(DIAssignID *ID)
Return a range of instructions (typically just one) that have ID as an attachment.
initializer< Ty > init(const Ty &Val)
@ DW_MACINFO_undef
Definition Dwarf.h:811
@ DW_MACINFO_start_file
Definition Dwarf.h:812
@ DW_MACINFO_define
Definition Dwarf.h:810
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > dyn_extract_or_null(Y &&MD)
Extract a Value from Metadata, if any, allowing null.
Definition Metadata.h:708
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > dyn_extract(Y &&MD)
Extract a Value from Metadata, if any.
Definition Metadata.h:695
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
Definition Metadata.h:667
@ User
could "use" a pointer
NodeAddr< UseNode * > Use
Definition RDFGraph.h:385
NodeAddr< NodeBase * > Node
Definition RDFGraph.h:381
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:318
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
Definition Threading.h:262
@ Offset
Definition DWP.cpp:477
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1705
LLVM_ABI bool canInstructionHaveMMRAs(const Instruction &I)
detail::zippy< detail::zip_first, T, U, Args... > zip_equal(T &&t, U &&u, Args &&...args)
zip iterator that assumes that all iteratees have the same length.
Definition STLExtras.h:841
LLVM_ABI unsigned getBranchWeightOffset(const MDNode *ProfileData)
Return the offset to the first branch weight data.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:174
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2452
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:644
LLVM_ABI bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
AllocFnKind
Definition Attributes.h:51
testing::Matcher< const detail::ErrorHolder & > Failed()
Definition Error.h:198
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2116
LLVM_ABI DenseMap< BasicBlock *, ColorVector > colorEHFunclets(Function &F)
If an EH funclet personality is in use (see isFuncletEHPersonality), this will recompute which blocks...
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition MathExtras.h:293
bool isa_and_nonnull(const Y &Val)
Definition Casting.h:677
Op::Description Desc
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:754
GenericConvergenceVerifier< SSAContext > ConvergenceVerifier
LLVM_ABI void initializeVerifierLegacyPassPass(PassRegistry &)
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:288
bool isModSet(const ModRefInfo MRI)
Definition ModRef.h:49
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1624
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:548
LLVM_ABI bool isValueProfileMD(const MDNode *ProfileData)
Checks if an MDNode contains value profiling Metadata.
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
LLVM_ABI unsigned getNumBranchWeights(const MDNode &ProfileData)
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:71
LLVM_ABI FunctionPass * createVerifierPass(bool FatalErrors=true)
FunctionAddr VTableAddr Next
Definition InstrProf.h:141
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr unsigned BitWidth
TinyPtrVector< BasicBlock * > ColorVector
LLVM_ABI const char * LLVMLoopEstimatedTripCount
Profile-based loop metadata that should be accessed only by using llvm::getLoopEstimatedTripCount and...
DenormalMode parseDenormalFPAttribute(StringRef Str)
Returns the denormal mode to use for inputs and outputs.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:560
LLVM_ABI std::optional< RoundingMode > convertStrToRoundingMode(StringRef)
Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...
Definition FPEnv.cpp:24
LLVM_ABI std::unique_ptr< GCStrategy > getGCStrategy(const StringRef Name)
Lookup the GCStrategy object associated with the given gc name.
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1877
bool pred_empty(const BasicBlock *BB)
Definition CFG.h:119
bool isHexDigit(char C)
Checks if character C is a hexadecimal numeric character.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
constexpr bool isCallableCC(CallingConv::ID CC)
LLVM_ABI bool verifyModule(const Module &M, raw_ostream *OS=nullptr, bool *BrokenDebugInfo=nullptr)
Check a module for errors.
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
Definition MIRParser.h:39
#define N
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition Analysis.h:29
static LLVM_ABI const char * SyntheticFunctionEntryCount
static LLVM_ABI const char * BranchWeights
static LLVM_ABI const char * FunctionEntryCount
static LLVM_ABI const char * UnknownBranchWeightsMarker
static LLVM_ABI const char * ValueProfile
uint32_t getTagID() const
Return the tag of this operand bundle as an integer.
ArrayRef< Use > Inputs
void DebugInfoCheckFailed(const Twine &Message)
A debug info check failed.
Definition Verifier.cpp:304
VerifierSupport(raw_ostream *OS, const Module &M)
Definition Verifier.cpp:156
bool Broken
Track the brokenness of the module while recursively visiting.
Definition Verifier.cpp:150
void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A check failed (with values to print).
Definition Verifier.cpp:297
bool BrokenDebugInfo
Broken debug info can be "recovered" from by stripping the debug info.
Definition Verifier.cpp:152
LLVMContext & Context
Definition Verifier.cpp:147
bool TreatBrokenDebugInfoAsError
Whether to treat broken debug info as an error.
Definition Verifier.cpp:154
void CheckFailed(const Twine &Message)
A check failed, so printout out the condition and the message.
Definition Verifier.cpp:286
const Module & M
Definition Verifier.cpp:143
const DataLayout & DL
Definition Verifier.cpp:146
void DebugInfoCheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A debug info check failed (with values to print).
Definition Verifier.cpp:313
const Triple & TT
Definition Verifier.cpp:145
ModuleSlotTracker MST
Definition Verifier.cpp:144