Thanks to visit codestin.com
Credit goes to clang.llvm.org

clang 22.0.0git
ThreadSafety.cpp
Go to the documentation of this file.
1//===- ThreadSafety.cpp ---------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// A intra-procedural analysis for thread safety (e.g. deadlocks and race
10// conditions), based off of an annotation system.
11//
12// See http://clang.llvm.org/docs/ThreadSafetyAnalysis.html
13// for more information.
14//
15//===----------------------------------------------------------------------===//
16
18#include "clang/AST/Attr.h"
19#include "clang/AST/Decl.h"
20#include "clang/AST/DeclCXX.h"
21#include "clang/AST/DeclGroup.h"
22#include "clang/AST/Expr.h"
23#include "clang/AST/ExprCXX.h"
25#include "clang/AST/Stmt.h"
27#include "clang/AST/Type.h"
33#include "clang/Analysis/CFG.h"
35#include "clang/Basic/LLVM.h"
39#include "llvm/ADT/DenseMap.h"
40#include "llvm/ADT/ImmutableMap.h"
41#include "llvm/ADT/STLExtras.h"
42#include "llvm/ADT/ScopeExit.h"
43#include "llvm/ADT/SmallVector.h"
44#include "llvm/ADT/StringRef.h"
45#include "llvm/Support/Allocator.h"
46#include "llvm/Support/ErrorHandling.h"
47#include "llvm/Support/TrailingObjects.h"
48#include "llvm/Support/raw_ostream.h"
49#include <cassert>
50#include <functional>
51#include <iterator>
52#include <memory>
53#include <optional>
54#include <string>
55#include <utility>
56#include <vector>
57
58using namespace clang;
59using namespace threadSafety;
60
61// Key method definition
63
64/// Issue a warning about an invalid lock expression
66 const Expr *MutexExp, const NamedDecl *D,
67 const Expr *DeclExp, StringRef Kind) {
69 if (DeclExp)
70 Loc = DeclExp->getExprLoc();
71
72 // FIXME: add a note about the attribute location in MutexExp or D
73 if (Loc.isValid())
74 Handler.handleInvalidLockExp(Loc);
75}
76
77namespace {
78
79/// A set of CapabilityExpr objects, which are compiled from thread safety
80/// attributes on a function.
81class CapExprSet : public SmallVector<CapabilityExpr, 4> {
82public:
83 /// Push M onto list, but discard duplicates.
84 void push_back_nodup(const CapabilityExpr &CapE) {
85 if (llvm::none_of(*this, [=](const CapabilityExpr &CapE2) {
86 return CapE.equals(CapE2);
87 }))
88 push_back(CapE);
89 }
90};
91
92class FactManager;
93class FactSet;
94
95/// This is a helper class that stores a fact that is known at a
96/// particular point in program execution. Currently, a fact is a capability,
97/// along with additional information, such as where it was acquired, whether
98/// it is exclusive or shared, etc.
99class FactEntry : public CapabilityExpr {
100public:
101 enum FactEntryKind { Lockable, ScopedLockable };
102
103 /// Where a fact comes from.
104 enum SourceKind {
105 Acquired, ///< The fact has been directly acquired.
106 Asserted, ///< The fact has been asserted to be held.
107 Declared, ///< The fact is assumed to be held by callers.
108 Managed, ///< The fact has been acquired through a scoped capability.
109 };
110
111private:
112 const FactEntryKind Kind : 8;
113
114 /// Exclusive or shared.
115 LockKind LKind : 8;
116
117 /// How it was acquired.
118 SourceKind Source : 8;
119
120 /// Where it was acquired.
121 SourceLocation AcquireLoc;
122
123protected:
124 ~FactEntry() = default;
125
126public:
127 FactEntry(FactEntryKind FK, const CapabilityExpr &CE, LockKind LK,
128 SourceLocation Loc, SourceKind Src)
129 : CapabilityExpr(CE), Kind(FK), LKind(LK), Source(Src), AcquireLoc(Loc) {}
130
131 LockKind kind() const { return LKind; }
132 SourceLocation loc() const { return AcquireLoc; }
133 FactEntryKind getFactEntryKind() const { return Kind; }
134
135 bool asserted() const { return Source == Asserted; }
136 bool declared() const { return Source == Declared; }
137 bool managed() const { return Source == Managed; }
138
139 virtual void
140 handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
141 SourceLocation JoinLoc, LockErrorKind LEK,
142 ThreadSafetyHandler &Handler) const = 0;
143 virtual void handleLock(FactSet &FSet, FactManager &FactMan,
144 const FactEntry &entry,
145 ThreadSafetyHandler &Handler) const = 0;
146 virtual void handleUnlock(FactSet &FSet, FactManager &FactMan,
147 const CapabilityExpr &Cp, SourceLocation UnlockLoc,
148 bool FullyRemove,
149 ThreadSafetyHandler &Handler) const = 0;
150
151 // Return true if LKind >= LK, where exclusive > shared
152 bool isAtLeast(LockKind LK) const {
153 return (LKind == LK_Exclusive) || (LK == LK_Shared);
154 }
155};
156
157using FactID = unsigned short;
158
159/// FactManager manages the memory for all facts that are created during
160/// the analysis of a single routine.
161class FactManager {
162private:
163 llvm::BumpPtrAllocator &Alloc;
164 std::vector<const FactEntry *> Facts;
165
166public:
167 FactManager(llvm::BumpPtrAllocator &Alloc) : Alloc(Alloc) {}
168
169 template <typename T, typename... ArgTypes>
170 T *createFact(ArgTypes &&...Args) {
171 static_assert(std::is_trivially_destructible_v<T>);
172 return T::create(Alloc, std::forward<ArgTypes>(Args)...);
173 }
174
175 FactID newFact(const FactEntry *Entry) {
176 Facts.push_back(Entry);
177 assert(Facts.size() - 1 <= std::numeric_limits<FactID>::max() &&
178 "FactID space exhausted");
179 return static_cast<unsigned short>(Facts.size() - 1);
180 }
181
182 const FactEntry &operator[](FactID F) const { return *Facts[F]; }
183};
184
185/// A FactSet is the set of facts that are known to be true at a
186/// particular program point. FactSets must be small, because they are
187/// frequently copied, and are thus implemented as a set of indices into a
188/// table maintained by a FactManager. A typical FactSet only holds 1 or 2
189/// locks, so we can get away with doing a linear search for lookup. Note
190/// that a hashtable or map is inappropriate in this case, because lookups
191/// may involve partial pattern matches, rather than exact matches.
192class FactSet {
193private:
194 using FactVec = SmallVector<FactID, 4>;
195
196 FactVec FactIDs;
197
198public:
199 using iterator = FactVec::iterator;
200 using const_iterator = FactVec::const_iterator;
201
202 iterator begin() { return FactIDs.begin(); }
203 const_iterator begin() const { return FactIDs.begin(); }
204
205 iterator end() { return FactIDs.end(); }
206 const_iterator end() const { return FactIDs.end(); }
207
208 bool isEmpty() const { return FactIDs.size() == 0; }
209
210 // Return true if the set contains only negative facts
211 bool isEmpty(FactManager &FactMan) const {
212 for (const auto FID : *this) {
213 if (!FactMan[FID].negative())
214 return false;
215 }
216 return true;
217 }
218
219 void addLockByID(FactID ID) { FactIDs.push_back(ID); }
220
221 FactID addLock(FactManager &FM, const FactEntry *Entry) {
222 FactID F = FM.newFact(Entry);
223 FactIDs.push_back(F);
224 return F;
225 }
226
227 bool removeLock(FactManager& FM, const CapabilityExpr &CapE) {
228 unsigned n = FactIDs.size();
229 if (n == 0)
230 return false;
231
232 for (unsigned i = 0; i < n-1; ++i) {
233 if (FM[FactIDs[i]].matches(CapE)) {
234 FactIDs[i] = FactIDs[n-1];
235 FactIDs.pop_back();
236 return true;
237 }
238 }
239 if (FM[FactIDs[n-1]].matches(CapE)) {
240 FactIDs.pop_back();
241 return true;
242 }
243 return false;
244 }
245
246 std::optional<FactID> replaceLock(FactManager &FM, iterator It,
247 const FactEntry *Entry) {
248 if (It == end())
249 return std::nullopt;
250 FactID F = FM.newFact(Entry);
251 *It = F;
252 return F;
253 }
254
255 std::optional<FactID> replaceLock(FactManager &FM, const CapabilityExpr &CapE,
256 const FactEntry *Entry) {
257 return replaceLock(FM, findLockIter(FM, CapE), Entry);
258 }
259
260 iterator findLockIter(FactManager &FM, const CapabilityExpr &CapE) {
261 return llvm::find_if(*this,
262 [&](FactID ID) { return FM[ID].matches(CapE); });
263 }
264
265 const FactEntry *findLock(FactManager &FM, const CapabilityExpr &CapE) const {
266 auto I =
267 llvm::find_if(*this, [&](FactID ID) { return FM[ID].matches(CapE); });
268 return I != end() ? &FM[*I] : nullptr;
269 }
270
271 const FactEntry *findLockUniv(FactManager &FM,
272 const CapabilityExpr &CapE) const {
273 auto I = llvm::find_if(
274 *this, [&](FactID ID) -> bool { return FM[ID].matchesUniv(CapE); });
275 return I != end() ? &FM[*I] : nullptr;
276 }
277
278 const FactEntry *findPartialMatch(FactManager &FM,
279 const CapabilityExpr &CapE) const {
280 auto I = llvm::find_if(*this, [&](FactID ID) -> bool {
281 return FM[ID].partiallyMatches(CapE);
282 });
283 return I != end() ? &FM[*I] : nullptr;
284 }
285
286 bool containsMutexDecl(FactManager &FM, const ValueDecl* Vd) const {
287 auto I = llvm::find_if(
288 *this, [&](FactID ID) -> bool { return FM[ID].valueDecl() == Vd; });
289 return I != end();
290 }
291};
292
293class ThreadSafetyAnalyzer;
294
295} // namespace
296
297namespace clang {
298namespace threadSafety {
299
301private:
302 using BeforeVect = SmallVector<const ValueDecl *, 4>;
303
304 struct BeforeInfo {
305 BeforeVect Vect;
306 int Visited = 0;
307
308 BeforeInfo() = default;
309 BeforeInfo(BeforeInfo &&) = default;
310 };
311
312 using BeforeMap =
313 llvm::DenseMap<const ValueDecl *, std::unique_ptr<BeforeInfo>>;
314 using CycleMap = llvm::DenseMap<const ValueDecl *, bool>;
315
316public:
317 BeforeSet() = default;
318
319 BeforeInfo* insertAttrExprs(const ValueDecl* Vd,
320 ThreadSafetyAnalyzer& Analyzer);
321
322 BeforeInfo *getBeforeInfoForDecl(const ValueDecl *Vd,
323 ThreadSafetyAnalyzer &Analyzer);
324
325 void checkBeforeAfter(const ValueDecl* Vd,
326 const FactSet& FSet,
327 ThreadSafetyAnalyzer& Analyzer,
328 SourceLocation Loc, StringRef CapKind);
329
330private:
331 BeforeMap BMap;
332 CycleMap CycMap;
333};
334
335} // namespace threadSafety
336} // namespace clang
337
338namespace {
339
340class LocalVariableMap;
341
342using LocalVarContext = llvm::ImmutableMap<const NamedDecl *, unsigned>;
343
344/// A side (entry or exit) of a CFG node.
345enum CFGBlockSide { CBS_Entry, CBS_Exit };
346
347/// CFGBlockInfo is a struct which contains all the information that is
348/// maintained for each block in the CFG. See LocalVariableMap for more
349/// information about the contexts.
350struct CFGBlockInfo {
351 // Lockset held at entry to block
352 FactSet EntrySet;
353
354 // Lockset held at exit from block
355 FactSet ExitSet;
356
357 // Context held at entry to block
358 LocalVarContext EntryContext;
359
360 // Context held at exit from block
361 LocalVarContext ExitContext;
362
363 // Location of first statement in block
364 SourceLocation EntryLoc;
365
366 // Location of last statement in block.
367 SourceLocation ExitLoc;
368
369 // Used to replay contexts later
370 unsigned EntryIndex;
371
372 // Is this block reachable?
373 bool Reachable = false;
374
375 const FactSet &getSet(CFGBlockSide Side) const {
376 return Side == CBS_Entry ? EntrySet : ExitSet;
377 }
378
379 SourceLocation getLocation(CFGBlockSide Side) const {
380 return Side == CBS_Entry ? EntryLoc : ExitLoc;
381 }
382
383private:
384 CFGBlockInfo(LocalVarContext EmptyCtx)
385 : EntryContext(EmptyCtx), ExitContext(EmptyCtx) {}
386
387public:
388 static CFGBlockInfo getEmptyBlockInfo(LocalVariableMap &M);
389};
390
391// A LocalVariableMap maintains a map from local variables to their currently
392// valid definitions. It provides SSA-like functionality when traversing the
393// CFG. Like SSA, each definition or assignment to a variable is assigned a
394// unique name (an integer), which acts as the SSA name for that definition.
395// The total set of names is shared among all CFG basic blocks.
396// Unlike SSA, we do not rewrite expressions to replace local variables declrefs
397// with their SSA-names. Instead, we compute a Context for each point in the
398// code, which maps local variables to the appropriate SSA-name. This map
399// changes with each assignment.
400//
401// The map is computed in a single pass over the CFG. Subsequent analyses can
402// then query the map to find the appropriate Context for a statement, and use
403// that Context to look up the definitions of variables.
404class LocalVariableMap {
405public:
406 using Context = LocalVarContext;
407
408 /// A VarDefinition consists of an expression, representing the value of the
409 /// variable, along with the context in which that expression should be
410 /// interpreted. A reference VarDefinition does not itself contain this
411 /// information, but instead contains a pointer to a previous VarDefinition.
412 struct VarDefinition {
413 public:
414 friend class LocalVariableMap;
415
416 // The original declaration for this variable.
417 const NamedDecl *Dec;
418
419 // The expression for this variable, OR
420 const Expr *Exp = nullptr;
421
422 // Reference to another VarDefinition
423 unsigned Ref = 0;
424
425 // The map with which Exp should be interpreted.
426 Context Ctx;
427
428 bool isReference() const { return !Exp; }
429
430 private:
431 // Create ordinary variable definition
432 VarDefinition(const NamedDecl *D, const Expr *E, Context C)
433 : Dec(D), Exp(E), Ctx(C) {}
434
435 // Create reference to previous definition
436 VarDefinition(const NamedDecl *D, unsigned R, Context C)
437 : Dec(D), Ref(R), Ctx(C) {}
438 };
439
440private:
441 Context::Factory ContextFactory;
442 std::vector<VarDefinition> VarDefinitions;
443 std::vector<std::pair<const Stmt *, Context>> SavedContexts;
444
445public:
446 LocalVariableMap() {
447 // index 0 is a placeholder for undefined variables (aka phi-nodes).
448 VarDefinitions.push_back(VarDefinition(nullptr, 0u, getEmptyContext()));
449 }
450
451 /// Look up a definition, within the given context.
452 const VarDefinition* lookup(const NamedDecl *D, Context Ctx) {
453 const unsigned *i = Ctx.lookup(D);
454 if (!i)
455 return nullptr;
456 assert(*i < VarDefinitions.size());
457 return &VarDefinitions[*i];
458 }
459
460 /// Look up the definition for D within the given context. Returns
461 /// NULL if the expression is not statically known. If successful, also
462 /// modifies Ctx to hold the context of the return Expr.
463 const Expr* lookupExpr(const NamedDecl *D, Context &Ctx) {
464 const unsigned *P = Ctx.lookup(D);
465 if (!P)
466 return nullptr;
467
468 unsigned i = *P;
469 while (i > 0) {
470 if (VarDefinitions[i].Exp) {
471 Ctx = VarDefinitions[i].Ctx;
472 return VarDefinitions[i].Exp;
473 }
474 i = VarDefinitions[i].Ref;
475 }
476 return nullptr;
477 }
478
479 Context getEmptyContext() { return ContextFactory.getEmptyMap(); }
480
481 /// Return the next context after processing S. This function is used by
482 /// clients of the class to get the appropriate context when traversing the
483 /// CFG. It must be called for every assignment or DeclStmt.
484 Context getNextContext(unsigned &CtxIndex, const Stmt *S, Context C) {
485 if (SavedContexts[CtxIndex+1].first == S) {
486 CtxIndex++;
487 Context Result = SavedContexts[CtxIndex].second;
488 return Result;
489 }
490 return C;
491 }
492
493 void dumpVarDefinitionName(unsigned i) {
494 if (i == 0) {
495 llvm::errs() << "Undefined";
496 return;
497 }
498 const NamedDecl *Dec = VarDefinitions[i].Dec;
499 if (!Dec) {
500 llvm::errs() << "<<NULL>>";
501 return;
502 }
503 Dec->printName(llvm::errs());
504 llvm::errs() << "." << i << " " << ((const void*) Dec);
505 }
506
507 /// Dumps an ASCII representation of the variable map to llvm::errs()
508 void dump() {
509 for (unsigned i = 1, e = VarDefinitions.size(); i < e; ++i) {
510 const Expr *Exp = VarDefinitions[i].Exp;
511 unsigned Ref = VarDefinitions[i].Ref;
512
513 dumpVarDefinitionName(i);
514 llvm::errs() << " = ";
515 if (Exp) Exp->dump();
516 else {
517 dumpVarDefinitionName(Ref);
518 llvm::errs() << "\n";
519 }
520 }
521 }
522
523 /// Dumps an ASCII representation of a Context to llvm::errs()
524 void dumpContext(Context C) {
525 for (Context::iterator I = C.begin(), E = C.end(); I != E; ++I) {
526 const NamedDecl *D = I.getKey();
527 D->printName(llvm::errs());
528 llvm::errs() << " -> ";
529 dumpVarDefinitionName(I.getData());
530 llvm::errs() << "\n";
531 }
532 }
533
534 /// Builds the variable map.
535 void traverseCFG(CFG *CFGraph, const PostOrderCFGView *SortedGraph,
536 std::vector<CFGBlockInfo> &BlockInfo);
537
538protected:
539 friend class VarMapBuilder;
540
541 // Resolve any definition ID down to its non-reference base ID.
542 unsigned getCanonicalDefinitionID(unsigned ID) {
543 while (ID > 0 && VarDefinitions[ID].isReference())
544 ID = VarDefinitions[ID].Ref;
545 return ID;
546 }
547
548 // Get the current context index
549 unsigned getContextIndex() { return SavedContexts.size()-1; }
550
551 // Save the current context for later replay
552 void saveContext(const Stmt *S, Context C) {
553 SavedContexts.push_back(std::make_pair(S, C));
554 }
555
556 // Adds a new definition to the given context, and returns a new context.
557 // This method should be called when declaring a new variable.
558 Context addDefinition(const NamedDecl *D, const Expr *Exp, Context Ctx) {
559 assert(!Ctx.contains(D));
560 unsigned newID = VarDefinitions.size();
561 Context NewCtx = ContextFactory.add(Ctx, D, newID);
562 VarDefinitions.push_back(VarDefinition(D, Exp, Ctx));
563 return NewCtx;
564 }
565
566 // Add a new reference to an existing definition.
567 Context addReference(const NamedDecl *D, unsigned i, Context Ctx) {
568 unsigned newID = VarDefinitions.size();
569 Context NewCtx = ContextFactory.add(Ctx, D, newID);
570 VarDefinitions.push_back(VarDefinition(D, i, Ctx));
571 return NewCtx;
572 }
573
574 // Updates a definition only if that definition is already in the map.
575 // This method should be called when assigning to an existing variable.
576 Context updateDefinition(const NamedDecl *D, Expr *Exp, Context Ctx) {
577 if (Ctx.contains(D)) {
578 unsigned newID = VarDefinitions.size();
579 Context NewCtx = ContextFactory.remove(Ctx, D);
580 NewCtx = ContextFactory.add(NewCtx, D, newID);
581 VarDefinitions.push_back(VarDefinition(D, Exp, Ctx));
582 return NewCtx;
583 }
584 return Ctx;
585 }
586
587 // Removes a definition from the context, but keeps the variable name
588 // as a valid variable. The index 0 is a placeholder for cleared definitions.
589 Context clearDefinition(const NamedDecl *D, Context Ctx) {
590 Context NewCtx = Ctx;
591 if (NewCtx.contains(D)) {
592 NewCtx = ContextFactory.remove(NewCtx, D);
593 NewCtx = ContextFactory.add(NewCtx, D, 0);
594 }
595 return NewCtx;
596 }
597
598 // Remove a definition entirely frmo the context.
599 Context removeDefinition(const NamedDecl *D, Context Ctx) {
600 Context NewCtx = Ctx;
601 if (NewCtx.contains(D)) {
602 NewCtx = ContextFactory.remove(NewCtx, D);
603 }
604 return NewCtx;
605 }
606
607 Context intersectContexts(Context C1, Context C2);
608 Context createReferenceContext(Context C);
609 void intersectBackEdge(Context C1, Context C2);
610};
611
612} // namespace
613
614// This has to be defined after LocalVariableMap.
615CFGBlockInfo CFGBlockInfo::getEmptyBlockInfo(LocalVariableMap &M) {
616 return CFGBlockInfo(M.getEmptyContext());
617}
618
619namespace {
620
621/// Visitor which builds a LocalVariableMap
622class VarMapBuilder : public ConstStmtVisitor<VarMapBuilder> {
623public:
624 LocalVariableMap* VMap;
625 LocalVariableMap::Context Ctx;
626
627 VarMapBuilder(LocalVariableMap *VM, LocalVariableMap::Context C)
628 : VMap(VM), Ctx(C) {}
629
630 void VisitDeclStmt(const DeclStmt *S);
631 void VisitBinaryOperator(const BinaryOperator *BO);
632 void VisitCallExpr(const CallExpr *CE);
633};
634
635} // namespace
636
637// Add new local variables to the variable map
638void VarMapBuilder::VisitDeclStmt(const DeclStmt *S) {
639 bool modifiedCtx = false;
640 const DeclGroupRef DGrp = S->getDeclGroup();
641 for (const auto *D : DGrp) {
642 if (const auto *VD = dyn_cast_or_null<VarDecl>(D)) {
643 const Expr *E = VD->getInit();
644
645 // Add local variables with trivial type to the variable map
646 QualType T = VD->getType();
647 if (T.isTrivialType(VD->getASTContext())) {
648 Ctx = VMap->addDefinition(VD, E, Ctx);
649 modifiedCtx = true;
650 }
651 }
652 }
653 if (modifiedCtx)
654 VMap->saveContext(S, Ctx);
655}
656
657// Update local variable definitions in variable map
658void VarMapBuilder::VisitBinaryOperator(const BinaryOperator *BO) {
659 if (!BO->isAssignmentOp())
660 return;
661
662 Expr *LHSExp = BO->getLHS()->IgnoreParenCasts();
663
664 // Update the variable map and current context.
665 if (const auto *DRE = dyn_cast<DeclRefExpr>(LHSExp)) {
666 const ValueDecl *VDec = DRE->getDecl();
667 if (Ctx.lookup(VDec)) {
668 if (BO->getOpcode() == BO_Assign)
669 Ctx = VMap->updateDefinition(VDec, BO->getRHS(), Ctx);
670 else
671 // FIXME -- handle compound assignment operators
672 Ctx = VMap->clearDefinition(VDec, Ctx);
673 VMap->saveContext(BO, Ctx);
674 }
675 }
676}
677
678// Invalidates local variable definitions if variable escaped.
679void VarMapBuilder::VisitCallExpr(const CallExpr *CE) {
680 const FunctionDecl *FD = CE->getDirectCallee();
681 if (!FD)
682 return;
683
684 // Heuristic for likely-benign functions that pass by mutable reference. This
685 // is needed to avoid a slew of false positives due to mutable reference
686 // passing where the captured reference is usually passed on by-value.
687 if (const IdentifierInfo *II = FD->getIdentifier()) {
688 // Any kind of std::bind-like functions.
689 if (II->isStr("bind") || II->isStr("bind_front"))
690 return;
691 }
692
693 // Invalidate local variable definitions that are passed by non-const
694 // reference or non-const pointer.
695 for (unsigned Idx = 0; Idx < CE->getNumArgs(); ++Idx) {
696 if (Idx >= FD->getNumParams())
697 break;
698
699 const Expr *Arg = CE->getArg(Idx)->IgnoreParenImpCasts();
700 const ParmVarDecl *PVD = FD->getParamDecl(Idx);
701 QualType ParamType = PVD->getType();
702
703 // Potential reassignment if passed by non-const reference / pointer.
704 const ValueDecl *VDec = nullptr;
705 if (ParamType->isReferenceType() &&
706 !ParamType->getPointeeType().isConstQualified()) {
707 if (const auto *DRE = dyn_cast<DeclRefExpr>(Arg))
708 VDec = DRE->getDecl();
709 } else if (ParamType->isPointerType() &&
710 !ParamType->getPointeeType().isConstQualified()) {
711 Arg = Arg->IgnoreParenCasts();
712 if (const auto *UO = dyn_cast<UnaryOperator>(Arg)) {
713 if (UO->getOpcode() == UO_AddrOf) {
714 const Expr *SubE = UO->getSubExpr()->IgnoreParenCasts();
715 if (const auto *DRE = dyn_cast<DeclRefExpr>(SubE))
716 VDec = DRE->getDecl();
717 }
718 }
719 }
720
721 if (VDec && Ctx.lookup(VDec)) {
722 Ctx = VMap->clearDefinition(VDec, Ctx);
723 VMap->saveContext(CE, Ctx);
724 }
725 }
726}
727
728// Computes the intersection of two contexts. The intersection is the
729// set of variables which have the same definition in both contexts;
730// variables with different definitions are discarded.
731LocalVariableMap::Context
732LocalVariableMap::intersectContexts(Context C1, Context C2) {
733 Context Result = C1;
734 for (const auto &P : C1) {
735 const NamedDecl *Dec = P.first;
736 const unsigned *I2 = C2.lookup(Dec);
737 if (!I2) {
738 // The variable doesn't exist on second path.
739 Result = removeDefinition(Dec, Result);
740 } else if (getCanonicalDefinitionID(P.second) !=
741 getCanonicalDefinitionID(*I2)) {
742 // If canonical definitions mismatch the underlying definitions are
743 // different, invalidate.
744 Result = clearDefinition(Dec, Result);
745 }
746 }
747 return Result;
748}
749
750// For every variable in C, create a new variable that refers to the
751// definition in C. Return a new context that contains these new variables.
752// (We use this for a naive implementation of SSA on loop back-edges.)
753LocalVariableMap::Context LocalVariableMap::createReferenceContext(Context C) {
754 Context Result = getEmptyContext();
755 for (const auto &P : C)
756 Result = addReference(P.first, P.second, Result);
757 return Result;
758}
759
760// This routine also takes the intersection of C1 and C2, but it does so by
761// altering the VarDefinitions. C1 must be the result of an earlier call to
762// createReferenceContext.
763void LocalVariableMap::intersectBackEdge(Context C1, Context C2) {
764 for (const auto &P : C1) {
765 const unsigned I1 = P.second;
766 VarDefinition *VDef = &VarDefinitions[I1];
767 assert(VDef->isReference());
768
769 const unsigned *I2 = C2.lookup(P.first);
770 if (!I2) {
771 // Variable does not exist at the end of the loop, invalidate.
772 VDef->Ref = 0;
773 continue;
774 }
775
776 // Compare the canonical IDs. This correctly handles chains of references
777 // and determines if the variable is truly loop-invariant.
778 if (getCanonicalDefinitionID(VDef->Ref) != getCanonicalDefinitionID(*I2)) {
779 VDef->Ref = 0; // Mark this variable as undefined
780 }
781 }
782}
783
784// Traverse the CFG in topological order, so all predecessors of a block
785// (excluding back-edges) are visited before the block itself. At
786// each point in the code, we calculate a Context, which holds the set of
787// variable definitions which are visible at that point in execution.
788// Visible variables are mapped to their definitions using an array that
789// contains all definitions.
790//
791// At join points in the CFG, the set is computed as the intersection of
792// the incoming sets along each edge, E.g.
793//
794// { Context | VarDefinitions }
795// int x = 0; { x -> x1 | x1 = 0 }
796// int y = 0; { x -> x1, y -> y1 | y1 = 0, x1 = 0 }
797// if (b) x = 1; { x -> x2, y -> y1 | x2 = 1, y1 = 0, ... }
798// else x = 2; { x -> x3, y -> y1 | x3 = 2, x2 = 1, ... }
799// ... { y -> y1 (x is unknown) | x3 = 2, x2 = 1, ... }
800//
801// This is essentially a simpler and more naive version of the standard SSA
802// algorithm. Those definitions that remain in the intersection are from blocks
803// that strictly dominate the current block. We do not bother to insert proper
804// phi nodes, because they are not used in our analysis; instead, wherever
805// a phi node would be required, we simply remove that definition from the
806// context (E.g. x above).
807//
808// The initial traversal does not capture back-edges, so those need to be
809// handled on a separate pass. Whenever the first pass encounters an
810// incoming back edge, it duplicates the context, creating new definitions
811// that refer back to the originals. (These correspond to places where SSA
812// might have to insert a phi node.) On the second pass, these definitions are
813// set to NULL if the variable has changed on the back-edge (i.e. a phi
814// node was actually required.) E.g.
815//
816// { Context | VarDefinitions }
817// int x = 0, y = 0; { x -> x1, y -> y1 | y1 = 0, x1 = 0 }
818// while (b) { x -> x2, y -> y1 | [1st:] x2=x1; [2nd:] x2=NULL; }
819// x = x+1; { x -> x3, y -> y1 | x3 = x2 + 1, ... }
820// ... { y -> y1 | x3 = 2, x2 = 1, ... }
821void LocalVariableMap::traverseCFG(CFG *CFGraph,
822 const PostOrderCFGView *SortedGraph,
823 std::vector<CFGBlockInfo> &BlockInfo) {
824 PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph);
825
826 for (const auto *CurrBlock : *SortedGraph) {
827 unsigned CurrBlockID = CurrBlock->getBlockID();
828 CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID];
829
830 VisitedBlocks.insert(CurrBlock);
831
832 // Calculate the entry context for the current block
833 bool HasBackEdges = false;
834 bool CtxInit = true;
835 for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(),
836 PE = CurrBlock->pred_end(); PI != PE; ++PI) {
837 // if *PI -> CurrBlock is a back edge, so skip it
838 if (*PI == nullptr || !VisitedBlocks.alreadySet(*PI)) {
839 HasBackEdges = true;
840 continue;
841 }
842
843 unsigned PrevBlockID = (*PI)->getBlockID();
844 CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID];
845
846 if (CtxInit) {
847 CurrBlockInfo->EntryContext = PrevBlockInfo->ExitContext;
848 CtxInit = false;
849 }
850 else {
851 CurrBlockInfo->EntryContext =
852 intersectContexts(CurrBlockInfo->EntryContext,
853 PrevBlockInfo->ExitContext);
854 }
855 }
856
857 // Duplicate the context if we have back-edges, so we can call
858 // intersectBackEdges later.
859 if (HasBackEdges)
860 CurrBlockInfo->EntryContext =
861 createReferenceContext(CurrBlockInfo->EntryContext);
862
863 // Create a starting context index for the current block
864 saveContext(nullptr, CurrBlockInfo->EntryContext);
865 CurrBlockInfo->EntryIndex = getContextIndex();
866
867 // Visit all the statements in the basic block.
868 VarMapBuilder VMapBuilder(this, CurrBlockInfo->EntryContext);
869 for (const auto &BI : *CurrBlock) {
870 switch (BI.getKind()) {
872 CFGStmt CS = BI.castAs<CFGStmt>();
873 VMapBuilder.Visit(CS.getStmt());
874 break;
875 }
876 default:
877 break;
878 }
879 }
880 CurrBlockInfo->ExitContext = VMapBuilder.Ctx;
881
882 // Mark variables on back edges as "unknown" if they've been changed.
883 for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(),
884 SE = CurrBlock->succ_end(); SI != SE; ++SI) {
885 // if CurrBlock -> *SI is *not* a back edge
886 if (*SI == nullptr || !VisitedBlocks.alreadySet(*SI))
887 continue;
888
889 CFGBlock *FirstLoopBlock = *SI;
890 Context LoopBegin = BlockInfo[FirstLoopBlock->getBlockID()].EntryContext;
891 Context LoopEnd = CurrBlockInfo->ExitContext;
892 intersectBackEdge(LoopBegin, LoopEnd);
893 }
894 }
895
896 // Put an extra entry at the end of the indexed context array
897 unsigned exitID = CFGraph->getExit().getBlockID();
898 saveContext(nullptr, BlockInfo[exitID].ExitContext);
899}
900
901/// Find the appropriate source locations to use when producing diagnostics for
902/// each block in the CFG.
903static void findBlockLocations(CFG *CFGraph,
904 const PostOrderCFGView *SortedGraph,
905 std::vector<CFGBlockInfo> &BlockInfo) {
906 for (const auto *CurrBlock : *SortedGraph) {
907 CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlock->getBlockID()];
908
909 // Find the source location of the last statement in the block, if the
910 // block is not empty.
911 if (const Stmt *S = CurrBlock->getTerminatorStmt()) {
912 CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc = S->getBeginLoc();
913 } else {
914 for (CFGBlock::const_reverse_iterator BI = CurrBlock->rbegin(),
915 BE = CurrBlock->rend(); BI != BE; ++BI) {
916 // FIXME: Handle other CFGElement kinds.
917 if (std::optional<CFGStmt> CS = BI->getAs<CFGStmt>()) {
918 CurrBlockInfo->ExitLoc = CS->getStmt()->getBeginLoc();
919 break;
920 }
921 }
922 }
923
924 if (CurrBlockInfo->ExitLoc.isValid()) {
925 // This block contains at least one statement. Find the source location
926 // of the first statement in the block.
927 for (const auto &BI : *CurrBlock) {
928 // FIXME: Handle other CFGElement kinds.
929 if (std::optional<CFGStmt> CS = BI.getAs<CFGStmt>()) {
930 CurrBlockInfo->EntryLoc = CS->getStmt()->getBeginLoc();
931 break;
932 }
933 }
934 } else if (CurrBlock->pred_size() == 1 && *CurrBlock->pred_begin() &&
935 CurrBlock != &CFGraph->getExit()) {
936 // The block is empty, and has a single predecessor. Use its exit
937 // location.
938 CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc =
939 BlockInfo[(*CurrBlock->pred_begin())->getBlockID()].ExitLoc;
940 } else if (CurrBlock->succ_size() == 1 && *CurrBlock->succ_begin()) {
941 // The block is empty, and has a single successor. Use its entry
942 // location.
943 CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc =
944 BlockInfo[(*CurrBlock->succ_begin())->getBlockID()].EntryLoc;
945 }
946 }
947}
948
949namespace {
950
951class LockableFactEntry final : public FactEntry {
952private:
953 /// Reentrancy depth: incremented when a capability has been acquired
954 /// reentrantly (after initial acquisition). Always 0 for non-reentrant
955 /// capabilities.
956 unsigned int ReentrancyDepth = 0;
957
958 LockableFactEntry(const CapabilityExpr &CE, LockKind LK, SourceLocation Loc,
959 SourceKind Src)
960 : FactEntry(Lockable, CE, LK, Loc, Src) {}
961
962public:
963 static LockableFactEntry *create(llvm::BumpPtrAllocator &Alloc,
964 const LockableFactEntry &Other) {
965 return new (Alloc) LockableFactEntry(Other);
966 }
967
968 static LockableFactEntry *create(llvm::BumpPtrAllocator &Alloc,
969 const CapabilityExpr &CE, LockKind LK,
970 SourceLocation Loc,
971 SourceKind Src = Acquired) {
972 return new (Alloc) LockableFactEntry(CE, LK, Loc, Src);
973 }
974
975 unsigned int getReentrancyDepth() const { return ReentrancyDepth; }
976
977 void
978 handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
979 SourceLocation JoinLoc, LockErrorKind LEK,
980 ThreadSafetyHandler &Handler) const override {
981 if (!asserted() && !negative() && !isUniversal()) {
982 Handler.handleMutexHeldEndOfScope(getKind(), toString(), loc(), JoinLoc,
983 LEK);
984 }
985 }
986
987 void handleLock(FactSet &FSet, FactManager &FactMan, const FactEntry &entry,
988 ThreadSafetyHandler &Handler) const override {
989 if (const FactEntry *RFact = tryReenter(FactMan, entry.kind())) {
990 // This capability has been reentrantly acquired.
991 FSet.replaceLock(FactMan, entry, RFact);
992 } else {
993 Handler.handleDoubleLock(entry.getKind(), entry.toString(), loc(),
994 entry.loc());
995 }
996 }
997
998 void handleUnlock(FactSet &FSet, FactManager &FactMan,
999 const CapabilityExpr &Cp, SourceLocation UnlockLoc,
1000 bool FullyRemove,
1001 ThreadSafetyHandler &Handler) const override {
1002 FSet.removeLock(FactMan, Cp);
1003
1004 if (const FactEntry *RFact = leaveReentrant(FactMan)) {
1005 // This capability remains reentrantly acquired.
1006 FSet.addLock(FactMan, RFact);
1007 } else if (!Cp.negative()) {
1008 FSet.addLock(FactMan, FactMan.createFact<LockableFactEntry>(
1009 !Cp, LK_Exclusive, UnlockLoc));
1010 }
1011 }
1012
1013 // Return an updated FactEntry if we can acquire this capability reentrant,
1014 // nullptr otherwise.
1015 const FactEntry *tryReenter(FactManager &FactMan,
1016 LockKind ReenterKind) const {
1017 if (!reentrant())
1018 return nullptr;
1019 if (kind() != ReenterKind)
1020 return nullptr;
1021 auto *NewFact = FactMan.createFact<LockableFactEntry>(*this);
1022 NewFact->ReentrancyDepth++;
1023 return NewFact;
1024 }
1025
1026 // Return an updated FactEntry if we are releasing a capability previously
1027 // acquired reentrant, nullptr otherwise.
1028 const FactEntry *leaveReentrant(FactManager &FactMan) const {
1029 if (!ReentrancyDepth)
1030 return nullptr;
1031 assert(reentrant());
1032 auto *NewFact = FactMan.createFact<LockableFactEntry>(*this);
1033 NewFact->ReentrancyDepth--;
1034 return NewFact;
1035 }
1036
1037 static bool classof(const FactEntry *A) {
1038 return A->getFactEntryKind() == Lockable;
1039 }
1040};
1041
1042enum UnderlyingCapabilityKind {
1043 UCK_Acquired, ///< Any kind of acquired capability.
1044 UCK_ReleasedShared, ///< Shared capability that was released.
1045 UCK_ReleasedExclusive, ///< Exclusive capability that was released.
1046};
1047
1048struct UnderlyingCapability {
1049 CapabilityExpr Cap;
1050 UnderlyingCapabilityKind Kind;
1051};
1052
1053class ScopedLockableFactEntry final
1054 : public FactEntry,
1055 private llvm::TrailingObjects<ScopedLockableFactEntry,
1056 UnderlyingCapability> {
1057 friend TrailingObjects;
1058
1059private:
1060 const unsigned ManagedCapacity;
1061 unsigned ManagedSize = 0;
1062
1063 ScopedLockableFactEntry(const CapabilityExpr &CE, SourceLocation Loc,
1064 SourceKind Src, unsigned ManagedCapacity)
1065 : FactEntry(ScopedLockable, CE, LK_Exclusive, Loc, Src),
1066 ManagedCapacity(ManagedCapacity) {}
1067
1068 void addManaged(const CapabilityExpr &M, UnderlyingCapabilityKind UCK) {
1069 assert(ManagedSize < ManagedCapacity);
1070 new (getTrailingObjects() + ManagedSize) UnderlyingCapability{M, UCK};
1071 ++ManagedSize;
1072 }
1073
1074 ArrayRef<UnderlyingCapability> getManaged() const {
1075 return getTrailingObjects(ManagedSize);
1076 }
1077
1078public:
1079 static ScopedLockableFactEntry *create(llvm::BumpPtrAllocator &Alloc,
1080 const CapabilityExpr &CE,
1081 SourceLocation Loc, SourceKind Src,
1082 unsigned ManagedCapacity) {
1083 void *Storage =
1084 Alloc.Allocate(totalSizeToAlloc<UnderlyingCapability>(ManagedCapacity),
1085 alignof(ScopedLockableFactEntry));
1086 return new (Storage) ScopedLockableFactEntry(CE, Loc, Src, ManagedCapacity);
1087 }
1088
1089 CapExprSet getUnderlyingMutexes() const {
1090 CapExprSet UnderlyingMutexesSet;
1091 for (const UnderlyingCapability &UnderlyingMutex : getManaged())
1092 UnderlyingMutexesSet.push_back(UnderlyingMutex.Cap);
1093 return UnderlyingMutexesSet;
1094 }
1095
1096 /// \name Adding managed locks
1097 /// Capacity for managed locks must have been allocated via \ref create.
1098 /// There is no reallocation in case the capacity is exceeded!
1099 /// \{
1100 void addLock(const CapabilityExpr &M) { addManaged(M, UCK_Acquired); }
1101
1102 void addExclusiveUnlock(const CapabilityExpr &M) {
1103 addManaged(M, UCK_ReleasedExclusive);
1104 }
1105
1106 void addSharedUnlock(const CapabilityExpr &M) {
1107 addManaged(M, UCK_ReleasedShared);
1108 }
1109 /// \}
1110
1111 void
1112 handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
1113 SourceLocation JoinLoc, LockErrorKind LEK,
1114 ThreadSafetyHandler &Handler) const override {
1116 return;
1117
1118 for (const auto &UnderlyingMutex : getManaged()) {
1119 const auto *Entry = FSet.findLock(FactMan, UnderlyingMutex.Cap);
1120 if ((UnderlyingMutex.Kind == UCK_Acquired && Entry) ||
1121 (UnderlyingMutex.Kind != UCK_Acquired && !Entry)) {
1122 // If this scoped lock manages another mutex, and if the underlying
1123 // mutex is still/not held, then warn about the underlying mutex.
1124 Handler.handleMutexHeldEndOfScope(UnderlyingMutex.Cap.getKind(),
1125 UnderlyingMutex.Cap.toString(), loc(),
1126 JoinLoc, LEK);
1127 }
1128 }
1129 }
1130
1131 void handleLock(FactSet &FSet, FactManager &FactMan, const FactEntry &entry,
1132 ThreadSafetyHandler &Handler) const override {
1133 for (const auto &UnderlyingMutex : getManaged()) {
1134 if (UnderlyingMutex.Kind == UCK_Acquired)
1135 lock(FSet, FactMan, UnderlyingMutex.Cap, entry.kind(), entry.loc(),
1136 &Handler);
1137 else
1138 unlock(FSet, FactMan, UnderlyingMutex.Cap, entry.loc(), &Handler);
1139 }
1140 }
1141
1142 void handleUnlock(FactSet &FSet, FactManager &FactMan,
1143 const CapabilityExpr &Cp, SourceLocation UnlockLoc,
1144 bool FullyRemove,
1145 ThreadSafetyHandler &Handler) const override {
1146 assert(!Cp.negative() && "Managing object cannot be negative.");
1147 for (const auto &UnderlyingMutex : getManaged()) {
1148 // Remove/lock the underlying mutex if it exists/is still unlocked; warn
1149 // on double unlocking/locking if we're not destroying the scoped object.
1150 ThreadSafetyHandler *TSHandler = FullyRemove ? nullptr : &Handler;
1151 if (UnderlyingMutex.Kind == UCK_Acquired) {
1152 unlock(FSet, FactMan, UnderlyingMutex.Cap, UnlockLoc, TSHandler);
1153 } else {
1154 LockKind kind = UnderlyingMutex.Kind == UCK_ReleasedShared
1155 ? LK_Shared
1156 : LK_Exclusive;
1157 lock(FSet, FactMan, UnderlyingMutex.Cap, kind, UnlockLoc, TSHandler);
1158 }
1159 }
1160 if (FullyRemove)
1161 FSet.removeLock(FactMan, Cp);
1162 }
1163
1164 static bool classof(const FactEntry *A) {
1165 return A->getFactEntryKind() == ScopedLockable;
1166 }
1167
1168private:
1169 void lock(FactSet &FSet, FactManager &FactMan, const CapabilityExpr &Cp,
1170 LockKind kind, SourceLocation loc,
1171 ThreadSafetyHandler *Handler) const {
1172 if (const auto It = FSet.findLockIter(FactMan, Cp); It != FSet.end()) {
1173 const auto &Fact = cast<LockableFactEntry>(FactMan[*It]);
1174 if (const FactEntry *RFact = Fact.tryReenter(FactMan, kind)) {
1175 // This capability has been reentrantly acquired.
1176 FSet.replaceLock(FactMan, It, RFact);
1177 } else if (Handler) {
1178 Handler->handleDoubleLock(Cp.getKind(), Cp.toString(), Fact.loc(), loc);
1179 }
1180 } else {
1181 FSet.removeLock(FactMan, !Cp);
1182 FSet.addLock(FactMan, FactMan.createFact<LockableFactEntry>(Cp, kind, loc,
1183 Managed));
1184 }
1185 }
1186
1187 void unlock(FactSet &FSet, FactManager &FactMan, const CapabilityExpr &Cp,
1188 SourceLocation loc, ThreadSafetyHandler *Handler) const {
1189 if (const auto It = FSet.findLockIter(FactMan, Cp); It != FSet.end()) {
1190 const auto &Fact = cast<LockableFactEntry>(FactMan[*It]);
1191 if (const FactEntry *RFact = Fact.leaveReentrant(FactMan)) {
1192 // This capability remains reentrantly acquired.
1193 FSet.replaceLock(FactMan, It, RFact);
1194 return;
1195 }
1196
1197 FSet.replaceLock(
1198 FactMan, It,
1199 FactMan.createFact<LockableFactEntry>(!Cp, LK_Exclusive, loc));
1200 } else if (Handler) {
1201 SourceLocation PrevLoc;
1202 if (const FactEntry *Neg = FSet.findLock(FactMan, !Cp))
1203 PrevLoc = Neg->loc();
1204 Handler->handleUnmatchedUnlock(Cp.getKind(), Cp.toString(), loc, PrevLoc);
1205 }
1206 }
1207};
1208
1209/// Class which implements the core thread safety analysis routines.
1210class ThreadSafetyAnalyzer {
1211 friend class BuildLockset;
1212 friend class threadSafety::BeforeSet;
1213
1214 llvm::BumpPtrAllocator Bpa;
1215 threadSafety::til::MemRegionRef Arena;
1216 threadSafety::SExprBuilder SxBuilder;
1217
1218 ThreadSafetyHandler &Handler;
1219 const FunctionDecl *CurrentFunction;
1220 LocalVariableMap LocalVarMap;
1221 // Maps constructed objects to `this` placeholder prior to initialization.
1222 llvm::SmallDenseMap<const Expr *, til::LiteralPtr *> ConstructedObjects;
1223 FactManager FactMan;
1224 std::vector<CFGBlockInfo> BlockInfo;
1225
1226 BeforeSet *GlobalBeforeSet;
1227
1228public:
1229 ThreadSafetyAnalyzer(ThreadSafetyHandler &H, BeforeSet *Bset)
1230 : Arena(&Bpa), SxBuilder(Arena), Handler(H), FactMan(Bpa),
1231 GlobalBeforeSet(Bset) {}
1232
1233 bool inCurrentScope(const CapabilityExpr &CapE);
1234
1235 void addLock(FactSet &FSet, const FactEntry *Entry, bool ReqAttr = false);
1236 void removeLock(FactSet &FSet, const CapabilityExpr &CapE,
1237 SourceLocation UnlockLoc, bool FullyRemove, LockKind Kind);
1238
1239 template <typename AttrType>
1240 void getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, const Expr *Exp,
1241 const NamedDecl *D, til::SExpr *Self = nullptr);
1242
1243 template <class AttrType>
1244 void getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, const Expr *Exp,
1245 const NamedDecl *D,
1246 const CFGBlock *PredBlock, const CFGBlock *CurrBlock,
1247 Expr *BrE, bool Neg);
1248
1249 const CallExpr* getTrylockCallExpr(const Stmt *Cond, LocalVarContext C,
1250 bool &Negate);
1251
1252 void getEdgeLockset(FactSet &Result, const FactSet &ExitSet,
1253 const CFGBlock* PredBlock,
1254 const CFGBlock *CurrBlock);
1255
1256 bool join(const FactEntry &A, const FactEntry &B, SourceLocation JoinLoc,
1257 LockErrorKind EntryLEK);
1258
1259 void intersectAndWarn(FactSet &EntrySet, const FactSet &ExitSet,
1260 SourceLocation JoinLoc, LockErrorKind EntryLEK,
1261 LockErrorKind ExitLEK);
1262
1263 void intersectAndWarn(FactSet &EntrySet, const FactSet &ExitSet,
1264 SourceLocation JoinLoc, LockErrorKind LEK) {
1265 intersectAndWarn(EntrySet, ExitSet, JoinLoc, LEK, LEK);
1266 }
1267
1268 void runAnalysis(AnalysisDeclContext &AC);
1269
1270 void warnIfMutexNotHeld(const FactSet &FSet, const NamedDecl *D,
1271 const Expr *Exp, AccessKind AK, Expr *MutexExp,
1272 ProtectedOperationKind POK, til::SExpr *Self,
1273 SourceLocation Loc);
1274 void warnIfMutexHeld(const FactSet &FSet, const NamedDecl *D, const Expr *Exp,
1275 Expr *MutexExp, til::SExpr *Self, SourceLocation Loc);
1276
1277 void checkAccess(const FactSet &FSet, const Expr *Exp, AccessKind AK,
1279 void checkPtAccess(const FactSet &FSet, const Expr *Exp, AccessKind AK,
1281};
1282
1283} // namespace
1284
1285/// Process acquired_before and acquired_after attributes on Vd.
1286BeforeSet::BeforeInfo* BeforeSet::insertAttrExprs(const ValueDecl* Vd,
1287 ThreadSafetyAnalyzer& Analyzer) {
1288 // Create a new entry for Vd.
1289 BeforeInfo *Info = nullptr;
1290 {
1291 // Keep InfoPtr in its own scope in case BMap is modified later and the
1292 // reference becomes invalid.
1293 std::unique_ptr<BeforeInfo> &InfoPtr = BMap[Vd];
1294 if (!InfoPtr)
1295 InfoPtr.reset(new BeforeInfo());
1296 Info = InfoPtr.get();
1297 }
1298
1299 for (const auto *At : Vd->attrs()) {
1300 switch (At->getKind()) {
1301 case attr::AcquiredBefore: {
1302 const auto *A = cast<AcquiredBeforeAttr>(At);
1303
1304 // Read exprs from the attribute, and add them to BeforeVect.
1305 for (const auto *Arg : A->args()) {
1306 CapabilityExpr Cp =
1307 Analyzer.SxBuilder.translateAttrExpr(Arg, nullptr);
1308 if (const ValueDecl *Cpvd = Cp.valueDecl()) {
1309 Info->Vect.push_back(Cpvd);
1310 const auto It = BMap.find(Cpvd);
1311 if (It == BMap.end())
1312 insertAttrExprs(Cpvd, Analyzer);
1313 }
1314 }
1315 break;
1316 }
1317 case attr::AcquiredAfter: {
1318 const auto *A = cast<AcquiredAfterAttr>(At);
1319
1320 // Read exprs from the attribute, and add them to BeforeVect.
1321 for (const auto *Arg : A->args()) {
1322 CapabilityExpr Cp =
1323 Analyzer.SxBuilder.translateAttrExpr(Arg, nullptr);
1324 if (const ValueDecl *ArgVd = Cp.valueDecl()) {
1325 // Get entry for mutex listed in attribute
1326 BeforeInfo *ArgInfo = getBeforeInfoForDecl(ArgVd, Analyzer);
1327 ArgInfo->Vect.push_back(Vd);
1328 }
1329 }
1330 break;
1331 }
1332 default:
1333 break;
1334 }
1335 }
1336
1337 return Info;
1338}
1339
1340BeforeSet::BeforeInfo *
1342 ThreadSafetyAnalyzer &Analyzer) {
1343 auto It = BMap.find(Vd);
1344 BeforeInfo *Info = nullptr;
1345 if (It == BMap.end())
1346 Info = insertAttrExprs(Vd, Analyzer);
1347 else
1348 Info = It->second.get();
1349 assert(Info && "BMap contained nullptr?");
1350 return Info;
1351}
1352
1353/// Return true if any mutexes in FSet are in the acquired_before set of Vd.
1355 const FactSet& FSet,
1356 ThreadSafetyAnalyzer& Analyzer,
1357 SourceLocation Loc, StringRef CapKind) {
1359
1360 // Do a depth-first traversal of Vd.
1361 // Return true if there are cycles.
1362 std::function<bool (const ValueDecl*)> traverse = [&](const ValueDecl* Vd) {
1363 if (!Vd)
1364 return false;
1365
1366 BeforeSet::BeforeInfo *Info = getBeforeInfoForDecl(Vd, Analyzer);
1367
1368 if (Info->Visited == 1)
1369 return true;
1370
1371 if (Info->Visited == 2)
1372 return false;
1373
1374 if (Info->Vect.empty())
1375 return false;
1376
1377 InfoVect.push_back(Info);
1378 Info->Visited = 1;
1379 for (const auto *Vdb : Info->Vect) {
1380 // Exclude mutexes in our immediate before set.
1381 if (FSet.containsMutexDecl(Analyzer.FactMan, Vdb)) {
1382 StringRef L1 = StartVd->getName();
1383 StringRef L2 = Vdb->getName();
1384 Analyzer.Handler.handleLockAcquiredBefore(CapKind, L1, L2, Loc);
1385 }
1386 // Transitively search other before sets, and warn on cycles.
1387 if (traverse(Vdb)) {
1388 if (CycMap.try_emplace(Vd, true).second) {
1389 StringRef L1 = Vd->getName();
1390 Analyzer.Handler.handleBeforeAfterCycle(L1, Vd->getLocation());
1391 }
1392 }
1393 }
1394 Info->Visited = 2;
1395 return false;
1396 };
1397
1398 traverse(StartVd);
1399
1400 for (auto *Info : InfoVect)
1401 Info->Visited = 0;
1402}
1403
1404/// Gets the value decl pointer from DeclRefExprs or MemberExprs.
1405static const ValueDecl *getValueDecl(const Expr *Exp) {
1406 if (const auto *CE = dyn_cast<ImplicitCastExpr>(Exp))
1407 return getValueDecl(CE->getSubExpr());
1408
1409 if (const auto *DR = dyn_cast<DeclRefExpr>(Exp))
1410 return DR->getDecl();
1411
1412 if (const auto *ME = dyn_cast<MemberExpr>(Exp))
1413 return ME->getMemberDecl();
1414
1415 return nullptr;
1416}
1417
1418bool ThreadSafetyAnalyzer::inCurrentScope(const CapabilityExpr &CapE) {
1419 const threadSafety::til::SExpr *SExp = CapE.sexpr();
1420 assert(SExp && "Null expressions should be ignored");
1421
1422 if (const auto *LP = dyn_cast<til::LiteralPtr>(SExp)) {
1423 const ValueDecl *VD = LP->clangDecl();
1424 // Variables defined in a function are always inaccessible.
1425 if (!VD || !VD->isDefinedOutsideFunctionOrMethod())
1426 return false;
1427 // For now we consider static class members to be inaccessible.
1429 return false;
1430 // Global variables are always in scope.
1431 return true;
1432 }
1433
1434 // Members are in scope from methods of the same class.
1435 if (const auto *P = dyn_cast<til::Project>(SExp)) {
1436 if (!isa_and_nonnull<CXXMethodDecl>(CurrentFunction))
1437 return false;
1438 const ValueDecl *VD = P->clangDecl();
1439 return VD->getDeclContext() == CurrentFunction->getDeclContext();
1440 }
1441
1442 return false;
1443}
1444
1445/// Add a new lock to the lockset, warning if the lock is already there.
1446/// \param ReqAttr -- true if this is part of an initial Requires attribute.
1447void ThreadSafetyAnalyzer::addLock(FactSet &FSet, const FactEntry *Entry,
1448 bool ReqAttr) {
1449 if (Entry->shouldIgnore())
1450 return;
1451
1452 if (!ReqAttr && !Entry->negative()) {
1453 // look for the negative capability, and remove it from the fact set.
1454 CapabilityExpr NegC = !*Entry;
1455 const FactEntry *Nen = FSet.findLock(FactMan, NegC);
1456 if (Nen) {
1457 FSet.removeLock(FactMan, NegC);
1458 }
1459 else {
1460 if (inCurrentScope(*Entry) && !Entry->asserted() && !Entry->reentrant())
1461 Handler.handleNegativeNotHeld(Entry->getKind(), Entry->toString(),
1462 NegC.toString(), Entry->loc());
1463 }
1464 }
1465
1466 // Check before/after constraints
1467 if (!Entry->asserted() && !Entry->declared()) {
1468 GlobalBeforeSet->checkBeforeAfter(Entry->valueDecl(), FSet, *this,
1469 Entry->loc(), Entry->getKind());
1470 }
1471
1472 if (const FactEntry *Cp = FSet.findLock(FactMan, *Entry)) {
1473 if (!Entry->asserted())
1474 Cp->handleLock(FSet, FactMan, *Entry, Handler);
1475 } else {
1476 FSet.addLock(FactMan, Entry);
1477 }
1478}
1479
1480/// Remove a lock from the lockset, warning if the lock is not there.
1481/// \param UnlockLoc The source location of the unlock (only used in error msg)
1482void ThreadSafetyAnalyzer::removeLock(FactSet &FSet, const CapabilityExpr &Cp,
1483 SourceLocation UnlockLoc,
1484 bool FullyRemove, LockKind ReceivedKind) {
1485 if (Cp.shouldIgnore())
1486 return;
1487
1488 const FactEntry *LDat = FSet.findLock(FactMan, Cp);
1489 if (!LDat) {
1490 SourceLocation PrevLoc;
1491 if (const FactEntry *Neg = FSet.findLock(FactMan, !Cp))
1492 PrevLoc = Neg->loc();
1493 Handler.handleUnmatchedUnlock(Cp.getKind(), Cp.toString(), UnlockLoc,
1494 PrevLoc);
1495 return;
1496 }
1497
1498 // Generic lock removal doesn't care about lock kind mismatches, but
1499 // otherwise diagnose when the lock kinds are mismatched.
1500 if (ReceivedKind != LK_Generic && LDat->kind() != ReceivedKind) {
1501 Handler.handleIncorrectUnlockKind(Cp.getKind(), Cp.toString(), LDat->kind(),
1502 ReceivedKind, LDat->loc(), UnlockLoc);
1503 }
1504
1505 LDat->handleUnlock(FSet, FactMan, Cp, UnlockLoc, FullyRemove, Handler);
1506}
1507
1508/// Extract the list of mutexIDs from the attribute on an expression,
1509/// and push them onto Mtxs, discarding any duplicates.
1510template <typename AttrType>
1511void ThreadSafetyAnalyzer::getMutexIDs(CapExprSet &Mtxs, AttrType *Attr,
1512 const Expr *Exp, const NamedDecl *D,
1513 til::SExpr *Self) {
1514 if (Attr->args_size() == 0) {
1515 // The mutex held is the "this" object.
1516 CapabilityExpr Cp = SxBuilder.translateAttrExpr(nullptr, D, Exp, Self);
1517 if (Cp.isInvalid()) {
1518 warnInvalidLock(Handler, nullptr, D, Exp, Cp.getKind());
1519 return;
1520 }
1521 //else
1522 if (!Cp.shouldIgnore())
1523 Mtxs.push_back_nodup(Cp);
1524 return;
1525 }
1526
1527 for (const auto *Arg : Attr->args()) {
1528 CapabilityExpr Cp = SxBuilder.translateAttrExpr(Arg, D, Exp, Self);
1529 if (Cp.isInvalid()) {
1530 warnInvalidLock(Handler, nullptr, D, Exp, Cp.getKind());
1531 continue;
1532 }
1533 //else
1534 if (!Cp.shouldIgnore())
1535 Mtxs.push_back_nodup(Cp);
1536 }
1537}
1538
1539/// Extract the list of mutexIDs from a trylock attribute. If the
1540/// trylock applies to the given edge, then push them onto Mtxs, discarding
1541/// any duplicates.
1542template <class AttrType>
1543void ThreadSafetyAnalyzer::getMutexIDs(CapExprSet &Mtxs, AttrType *Attr,
1544 const Expr *Exp, const NamedDecl *D,
1545 const CFGBlock *PredBlock,
1546 const CFGBlock *CurrBlock,
1547 Expr *BrE, bool Neg) {
1548 // Find out which branch has the lock
1549 bool branch = false;
1550 if (const auto *BLE = dyn_cast_or_null<CXXBoolLiteralExpr>(BrE))
1551 branch = BLE->getValue();
1552 else if (const auto *ILE = dyn_cast_or_null<IntegerLiteral>(BrE))
1553 branch = ILE->getValue().getBoolValue();
1554
1555 int branchnum = branch ? 0 : 1;
1556 if (Neg)
1557 branchnum = !branchnum;
1558
1559 // If we've taken the trylock branch, then add the lock
1560 int i = 0;
1561 for (CFGBlock::const_succ_iterator SI = PredBlock->succ_begin(),
1562 SE = PredBlock->succ_end(); SI != SE && i < 2; ++SI, ++i) {
1563 if (*SI == CurrBlock && i == branchnum)
1564 getMutexIDs(Mtxs, Attr, Exp, D);
1565 }
1566}
1567
1568static bool getStaticBooleanValue(Expr *E, bool &TCond) {
1570 TCond = false;
1571 return true;
1572 } else if (const auto *BLE = dyn_cast<CXXBoolLiteralExpr>(E)) {
1573 TCond = BLE->getValue();
1574 return true;
1575 } else if (const auto *ILE = dyn_cast<IntegerLiteral>(E)) {
1576 TCond = ILE->getValue().getBoolValue();
1577 return true;
1578 } else if (auto *CE = dyn_cast<ImplicitCastExpr>(E))
1579 return getStaticBooleanValue(CE->getSubExpr(), TCond);
1580 return false;
1581}
1582
1583// If Cond can be traced back to a function call, return the call expression.
1584// The negate variable should be called with false, and will be set to true
1585// if the function call is negated, e.g. if (!mu.tryLock(...))
1586const CallExpr* ThreadSafetyAnalyzer::getTrylockCallExpr(const Stmt *Cond,
1587 LocalVarContext C,
1588 bool &Negate) {
1589 if (!Cond)
1590 return nullptr;
1591
1592 if (const auto *CallExp = dyn_cast<CallExpr>(Cond)) {
1593 if (CallExp->getBuiltinCallee() == Builtin::BI__builtin_expect)
1594 return getTrylockCallExpr(CallExp->getArg(0), C, Negate);
1595 return CallExp;
1596 }
1597 else if (const auto *PE = dyn_cast<ParenExpr>(Cond))
1598 return getTrylockCallExpr(PE->getSubExpr(), C, Negate);
1599 else if (const auto *CE = dyn_cast<ImplicitCastExpr>(Cond))
1600 return getTrylockCallExpr(CE->getSubExpr(), C, Negate);
1601 else if (const auto *FE = dyn_cast<FullExpr>(Cond))
1602 return getTrylockCallExpr(FE->getSubExpr(), C, Negate);
1603 else if (const auto *DRE = dyn_cast<DeclRefExpr>(Cond)) {
1604 const Expr *E = LocalVarMap.lookupExpr(DRE->getDecl(), C);
1605 return getTrylockCallExpr(E, C, Negate);
1606 }
1607 else if (const auto *UOP = dyn_cast<UnaryOperator>(Cond)) {
1608 if (UOP->getOpcode() == UO_LNot) {
1609 Negate = !Negate;
1610 return getTrylockCallExpr(UOP->getSubExpr(), C, Negate);
1611 }
1612 return nullptr;
1613 }
1614 else if (const auto *BOP = dyn_cast<BinaryOperator>(Cond)) {
1615 if (BOP->getOpcode() == BO_EQ || BOP->getOpcode() == BO_NE) {
1616 if (BOP->getOpcode() == BO_NE)
1617 Negate = !Negate;
1618
1619 bool TCond = false;
1620 if (getStaticBooleanValue(BOP->getRHS(), TCond)) {
1621 if (!TCond) Negate = !Negate;
1622 return getTrylockCallExpr(BOP->getLHS(), C, Negate);
1623 }
1624 TCond = false;
1625 if (getStaticBooleanValue(BOP->getLHS(), TCond)) {
1626 if (!TCond) Negate = !Negate;
1627 return getTrylockCallExpr(BOP->getRHS(), C, Negate);
1628 }
1629 return nullptr;
1630 }
1631 if (BOP->getOpcode() == BO_LAnd) {
1632 // LHS must have been evaluated in a different block.
1633 return getTrylockCallExpr(BOP->getRHS(), C, Negate);
1634 }
1635 if (BOP->getOpcode() == BO_LOr)
1636 return getTrylockCallExpr(BOP->getRHS(), C, Negate);
1637 return nullptr;
1638 } else if (const auto *COP = dyn_cast<ConditionalOperator>(Cond)) {
1639 bool TCond, FCond;
1640 if (getStaticBooleanValue(COP->getTrueExpr(), TCond) &&
1641 getStaticBooleanValue(COP->getFalseExpr(), FCond)) {
1642 if (TCond && !FCond)
1643 return getTrylockCallExpr(COP->getCond(), C, Negate);
1644 if (!TCond && FCond) {
1645 Negate = !Negate;
1646 return getTrylockCallExpr(COP->getCond(), C, Negate);
1647 }
1648 }
1649 }
1650 return nullptr;
1651}
1652
1653/// Find the lockset that holds on the edge between PredBlock
1654/// and CurrBlock. The edge set is the exit set of PredBlock (passed
1655/// as the ExitSet parameter) plus any trylocks, which are conditionally held.
1656void ThreadSafetyAnalyzer::getEdgeLockset(FactSet& Result,
1657 const FactSet &ExitSet,
1658 const CFGBlock *PredBlock,
1659 const CFGBlock *CurrBlock) {
1660 Result = ExitSet;
1661
1662 const Stmt *Cond = PredBlock->getTerminatorCondition();
1663 // We don't acquire try-locks on ?: branches, only when its result is used.
1664 if (!Cond || isa<ConditionalOperator>(PredBlock->getTerminatorStmt()))
1665 return;
1666
1667 bool Negate = false;
1668 const CFGBlockInfo *PredBlockInfo = &BlockInfo[PredBlock->getBlockID()];
1669 const LocalVarContext &LVarCtx = PredBlockInfo->ExitContext;
1670
1671 if (Handler.issueBetaWarnings()) {
1672 // Temporarily set the lookup context for SExprBuilder.
1673 SxBuilder.setLookupLocalVarExpr(
1674 [this, Ctx = LVarCtx](const NamedDecl *D) mutable -> const Expr * {
1675 return LocalVarMap.lookupExpr(D, Ctx);
1676 });
1677 }
1678 auto Cleanup = llvm::make_scope_exit(
1679 [this] { SxBuilder.setLookupLocalVarExpr(nullptr); });
1680
1681 const auto *Exp = getTrylockCallExpr(Cond, LVarCtx, Negate);
1682 if (!Exp)
1683 return;
1684
1685 auto *FunDecl = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl());
1686 if (!FunDecl || !FunDecl->hasAttr<TryAcquireCapabilityAttr>())
1687 return;
1688
1689 CapExprSet ExclusiveLocksToAdd;
1690 CapExprSet SharedLocksToAdd;
1691
1692 // If the condition is a call to a Trylock function, then grab the attributes
1693 for (const auto *Attr : FunDecl->specific_attrs<TryAcquireCapabilityAttr>())
1694 getMutexIDs(Attr->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, Attr,
1695 Exp, FunDecl, PredBlock, CurrBlock, Attr->getSuccessValue(),
1696 Negate);
1697
1698 // Add and remove locks.
1699 SourceLocation Loc = Exp->getExprLoc();
1700 for (const auto &ExclusiveLockToAdd : ExclusiveLocksToAdd)
1701 addLock(Result, FactMan.createFact<LockableFactEntry>(ExclusiveLockToAdd,
1702 LK_Exclusive, Loc));
1703 for (const auto &SharedLockToAdd : SharedLocksToAdd)
1704 addLock(Result, FactMan.createFact<LockableFactEntry>(SharedLockToAdd,
1705 LK_Shared, Loc));
1706}
1707
1708namespace {
1709
1710/// We use this class to visit different types of expressions in
1711/// CFGBlocks, and build up the lockset.
1712/// An expression may cause us to add or remove locks from the lockset, or else
1713/// output error messages related to missing locks.
1714/// FIXME: In future, we may be able to not inherit from a visitor.
1715class BuildLockset : public ConstStmtVisitor<BuildLockset> {
1716 friend class ThreadSafetyAnalyzer;
1717
1718 ThreadSafetyAnalyzer *Analyzer;
1719 FactSet FSet;
1720 // The fact set for the function on exit.
1721 const FactSet &FunctionExitFSet;
1722 LocalVariableMap::Context LVarCtx;
1723 unsigned CtxIndex;
1724
1725 // To update and adjust the context.
1726 void updateLocalVarMapCtx(const Stmt *S) {
1727 if (S)
1728 LVarCtx = Analyzer->LocalVarMap.getNextContext(CtxIndex, S, LVarCtx);
1729 if (!Analyzer->Handler.issueBetaWarnings())
1730 return;
1731 // The lookup closure needs to be reconstructed with the refreshed LVarCtx.
1732 Analyzer->SxBuilder.setLookupLocalVarExpr(
1733 [this, Ctx = LVarCtx](const NamedDecl *D) mutable -> const Expr * {
1734 return Analyzer->LocalVarMap.lookupExpr(D, Ctx);
1735 });
1736 }
1737
1738 // helper functions
1739
1740 void checkAccess(const Expr *Exp, AccessKind AK,
1742 Analyzer->checkAccess(FSet, Exp, AK, POK);
1743 }
1744 void checkPtAccess(const Expr *Exp, AccessKind AK,
1746 Analyzer->checkPtAccess(FSet, Exp, AK, POK);
1747 }
1748
1749 void handleCall(const Expr *Exp, const NamedDecl *D,
1750 til::SExpr *Self = nullptr,
1751 SourceLocation Loc = SourceLocation());
1752 void examineArguments(const FunctionDecl *FD,
1755 bool SkipFirstParam = false);
1756
1757public:
1758 BuildLockset(ThreadSafetyAnalyzer *Anlzr, CFGBlockInfo &Info,
1759 const FactSet &FunctionExitFSet)
1760 : ConstStmtVisitor<BuildLockset>(), Analyzer(Anlzr), FSet(Info.EntrySet),
1761 FunctionExitFSet(FunctionExitFSet), LVarCtx(Info.EntryContext),
1762 CtxIndex(Info.EntryIndex) {
1763 updateLocalVarMapCtx(nullptr);
1764 }
1765
1766 ~BuildLockset() { Analyzer->SxBuilder.setLookupLocalVarExpr(nullptr); }
1767
1768 void VisitUnaryOperator(const UnaryOperator *UO);
1769 void VisitBinaryOperator(const BinaryOperator *BO);
1770 void VisitCastExpr(const CastExpr *CE);
1771 void VisitCallExpr(const CallExpr *Exp);
1772 void VisitCXXConstructExpr(const CXXConstructExpr *Exp);
1773 void VisitDeclStmt(const DeclStmt *S);
1774 void VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *Exp);
1775 void VisitReturnStmt(const ReturnStmt *S);
1776};
1777
1778} // namespace
1779
1780/// Warn if the LSet does not contain a lock sufficient to protect access
1781/// of at least the passed in AccessKind.
1782void ThreadSafetyAnalyzer::warnIfMutexNotHeld(
1783 const FactSet &FSet, const NamedDecl *D, const Expr *Exp, AccessKind AK,
1784 Expr *MutexExp, ProtectedOperationKind POK, til::SExpr *Self,
1785 SourceLocation Loc) {
1787 CapabilityExpr Cp = SxBuilder.translateAttrExpr(MutexExp, D, Exp, Self);
1788 if (Cp.isInvalid()) {
1789 warnInvalidLock(Handler, MutexExp, D, Exp, Cp.getKind());
1790 return;
1791 } else if (Cp.shouldIgnore()) {
1792 return;
1793 }
1794
1795 if (Cp.negative()) {
1796 // Negative capabilities act like locks excluded
1797 const FactEntry *LDat = FSet.findLock(FactMan, !Cp);
1798 if (LDat) {
1800 (!Cp).toString(), Loc);
1801 return;
1802 }
1803
1804 // If this does not refer to a negative capability in the same class,
1805 // then stop here.
1806 if (!inCurrentScope(Cp))
1807 return;
1808
1809 // Otherwise the negative requirement must be propagated to the caller.
1810 LDat = FSet.findLock(FactMan, Cp);
1811 if (!LDat) {
1812 Handler.handleNegativeNotHeld(D, Cp.toString(), Loc);
1813 }
1814 return;
1815 }
1816
1817 const FactEntry *LDat = FSet.findLockUniv(FactMan, Cp);
1818 bool NoError = true;
1819 if (!LDat) {
1820 // No exact match found. Look for a partial match.
1821 LDat = FSet.findPartialMatch(FactMan, Cp);
1822 if (LDat) {
1823 // Warn that there's no precise match.
1824 std::string PartMatchStr = LDat->toString();
1825 StringRef PartMatchName(PartMatchStr);
1826 Handler.handleMutexNotHeld(Cp.getKind(), D, POK, Cp.toString(), LK, Loc,
1827 &PartMatchName);
1828 } else {
1829 // Warn that there's no match at all.
1830 Handler.handleMutexNotHeld(Cp.getKind(), D, POK, Cp.toString(), LK, Loc);
1831 }
1832 NoError = false;
1833 }
1834 // Make sure the mutex we found is the right kind.
1835 if (NoError && LDat && !LDat->isAtLeast(LK)) {
1836 Handler.handleMutexNotHeld(Cp.getKind(), D, POK, Cp.toString(), LK, Loc);
1837 }
1838}
1839
1840/// Warn if the LSet contains the given lock.
1841void ThreadSafetyAnalyzer::warnIfMutexHeld(const FactSet &FSet,
1842 const NamedDecl *D, const Expr *Exp,
1843 Expr *MutexExp, til::SExpr *Self,
1844 SourceLocation Loc) {
1845 CapabilityExpr Cp = SxBuilder.translateAttrExpr(MutexExp, D, Exp, Self);
1846 if (Cp.isInvalid()) {
1847 warnInvalidLock(Handler, MutexExp, D, Exp, Cp.getKind());
1848 return;
1849 } else if (Cp.shouldIgnore()) {
1850 return;
1851 }
1852
1853 const FactEntry *LDat = FSet.findLock(FactMan, Cp);
1854 if (LDat) {
1856 Cp.toString(), Loc);
1857 }
1858}
1859
1860/// Checks guarded_by and pt_guarded_by attributes.
1861/// Whenever we identify an access (read or write) to a DeclRefExpr that is
1862/// marked with guarded_by, we must ensure the appropriate mutexes are held.
1863/// Similarly, we check if the access is to an expression that dereferences
1864/// a pointer marked with pt_guarded_by.
1865void ThreadSafetyAnalyzer::checkAccess(const FactSet &FSet, const Expr *Exp,
1866 AccessKind AK,
1868 Exp = Exp->IgnoreImplicit()->IgnoreParenCasts();
1869
1870 SourceLocation Loc = Exp->getExprLoc();
1871
1872 // Local variables of reference type cannot be re-assigned;
1873 // map them to their initializer.
1874 while (const auto *DRE = dyn_cast<DeclRefExpr>(Exp)) {
1875 const auto *VD = dyn_cast<VarDecl>(DRE->getDecl()->getCanonicalDecl());
1876 if (VD && VD->isLocalVarDecl() && VD->getType()->isReferenceType()) {
1877 if (const auto *E = VD->getInit()) {
1878 // Guard against self-initialization. e.g., int &i = i;
1879 if (E == Exp)
1880 break;
1881 Exp = E->IgnoreImplicit()->IgnoreParenCasts();
1882 continue;
1883 }
1884 }
1885 break;
1886 }
1887
1888 if (const auto *UO = dyn_cast<UnaryOperator>(Exp)) {
1889 // For dereferences
1890 if (UO->getOpcode() == UO_Deref)
1891 checkPtAccess(FSet, UO->getSubExpr(), AK, POK);
1892 return;
1893 }
1894
1895 if (const auto *BO = dyn_cast<BinaryOperator>(Exp)) {
1896 switch (BO->getOpcode()) {
1897 case BO_PtrMemD: // .*
1898 return checkAccess(FSet, BO->getLHS(), AK, POK);
1899 case BO_PtrMemI: // ->*
1900 return checkPtAccess(FSet, BO->getLHS(), AK, POK);
1901 default:
1902 return;
1903 }
1904 }
1905
1906 if (const auto *AE = dyn_cast<ArraySubscriptExpr>(Exp)) {
1907 checkPtAccess(FSet, AE->getLHS(), AK, POK);
1908 return;
1909 }
1910
1911 if (const auto *ME = dyn_cast<MemberExpr>(Exp)) {
1912 if (ME->isArrow())
1913 checkPtAccess(FSet, ME->getBase(), AK, POK);
1914 else
1915 checkAccess(FSet, ME->getBase(), AK, POK);
1916 }
1917
1918 const ValueDecl *D = getValueDecl(Exp);
1919 if (!D || !D->hasAttrs())
1920 return;
1921
1922 if (D->hasAttr<GuardedVarAttr>() && FSet.isEmpty(FactMan)) {
1923 Handler.handleNoMutexHeld(D, POK, AK, Loc);
1924 }
1925
1926 for (const auto *I : D->specific_attrs<GuardedByAttr>())
1927 warnIfMutexNotHeld(FSet, D, Exp, AK, I->getArg(), POK, nullptr, Loc);
1928}
1929
1930/// Checks pt_guarded_by and pt_guarded_var attributes.
1931/// POK is the same operationKind that was passed to checkAccess.
1932void ThreadSafetyAnalyzer::checkPtAccess(const FactSet &FSet, const Expr *Exp,
1933 AccessKind AK,
1935 // Strip off paren- and cast-expressions, checking if we encounter any other
1936 // operator that should be delegated to checkAccess() instead.
1937 while (true) {
1938 if (const auto *PE = dyn_cast<ParenExpr>(Exp)) {
1939 Exp = PE->getSubExpr();
1940 continue;
1941 }
1942 if (const auto *CE = dyn_cast<CastExpr>(Exp)) {
1943 if (CE->getCastKind() == CK_ArrayToPointerDecay) {
1944 // If it's an actual array, and not a pointer, then it's elements
1945 // are protected by GUARDED_BY, not PT_GUARDED_BY;
1946 checkAccess(FSet, CE->getSubExpr(), AK, POK);
1947 return;
1948 }
1949 Exp = CE->getSubExpr();
1950 continue;
1951 }
1952 break;
1953 }
1954
1955 if (const auto *UO = dyn_cast<UnaryOperator>(Exp)) {
1956 if (UO->getOpcode() == UO_AddrOf) {
1957 // Pointer access via pointer taken of variable, so the dereferenced
1958 // variable is not actually a pointer.
1959 checkAccess(FSet, UO->getSubExpr(), AK, POK);
1960 return;
1961 }
1962 }
1963
1964 // Pass by reference/pointer warnings are under a different flag.
1966 switch (POK) {
1967 case POK_PassByRef:
1968 PtPOK = POK_PtPassByRef;
1969 break;
1970 case POK_ReturnByRef:
1971 PtPOK = POK_PtReturnByRef;
1972 break;
1973 case POK_PassPointer:
1974 PtPOK = POK_PtPassPointer;
1975 break;
1976 case POK_ReturnPointer:
1977 PtPOK = POK_PtReturnPointer;
1978 break;
1979 default:
1980 break;
1981 }
1982
1983 const ValueDecl *D = getValueDecl(Exp);
1984 if (!D || !D->hasAttrs())
1985 return;
1986
1987 if (D->hasAttr<PtGuardedVarAttr>() && FSet.isEmpty(FactMan))
1988 Handler.handleNoMutexHeld(D, PtPOK, AK, Exp->getExprLoc());
1989
1990 for (auto const *I : D->specific_attrs<PtGuardedByAttr>())
1991 warnIfMutexNotHeld(FSet, D, Exp, AK, I->getArg(), PtPOK, nullptr,
1992 Exp->getExprLoc());
1993}
1994
1995/// Process a function call, method call, constructor call,
1996/// or destructor call. This involves looking at the attributes on the
1997/// corresponding function/method/constructor/destructor, issuing warnings,
1998/// and updating the locksets accordingly.
1999///
2000/// FIXME: For classes annotated with one of the guarded annotations, we need
2001/// to treat const method calls as reads and non-const method calls as writes,
2002/// and check that the appropriate locks are held. Non-const method calls with
2003/// the same signature as const method calls can be also treated as reads.
2004///
2005/// \param Exp The call expression.
2006/// \param D The callee declaration.
2007/// \param Self If \p Exp = nullptr, the implicit this argument or the argument
2008/// of an implicitly called cleanup function.
2009/// \param Loc If \p Exp = nullptr, the location.
2010void BuildLockset::handleCall(const Expr *Exp, const NamedDecl *D,
2011 til::SExpr *Self, SourceLocation Loc) {
2012 CapExprSet ExclusiveLocksToAdd, SharedLocksToAdd;
2013 CapExprSet ExclusiveLocksToRemove, SharedLocksToRemove, GenericLocksToRemove;
2014 CapExprSet ScopedReqsAndExcludes;
2015
2016 // Figure out if we're constructing an object of scoped lockable class
2017 CapabilityExpr Scp;
2018 if (Exp) {
2019 assert(!Self);
2020 const auto *TagT = Exp->getType()->getAs<TagType>();
2021 if (D->hasAttrs() && TagT && Exp->isPRValue()) {
2022 til::LiteralPtr *Placeholder =
2023 Analyzer->SxBuilder.createThisPlaceholder();
2024 [[maybe_unused]] auto inserted =
2025 Analyzer->ConstructedObjects.insert({Exp, Placeholder});
2026 assert(inserted.second && "Are we visiting the same expression again?");
2027 if (isa<CXXConstructExpr>(Exp))
2028 Self = Placeholder;
2029 if (TagT->getOriginalDecl()
2030 ->getMostRecentDecl()
2031 ->hasAttr<ScopedLockableAttr>())
2032 Scp = CapabilityExpr(Placeholder, Exp->getType(), /*Neg=*/false);
2033 }
2034
2035 assert(Loc.isInvalid());
2036 Loc = Exp->getExprLoc();
2037 }
2038
2039 for(const Attr *At : D->attrs()) {
2040 switch (At->getKind()) {
2041 // When we encounter a lock function, we need to add the lock to our
2042 // lockset.
2043 case attr::AcquireCapability: {
2044 const auto *A = cast<AcquireCapabilityAttr>(At);
2045 Analyzer->getMutexIDs(A->isShared() ? SharedLocksToAdd
2046 : ExclusiveLocksToAdd,
2047 A, Exp, D, Self);
2048 break;
2049 }
2050
2051 // An assert will add a lock to the lockset, but will not generate
2052 // a warning if it is already there, and will not generate a warning
2053 // if it is not removed.
2054 case attr::AssertCapability: {
2055 const auto *A = cast<AssertCapabilityAttr>(At);
2056 CapExprSet AssertLocks;
2057 Analyzer->getMutexIDs(AssertLocks, A, Exp, D, Self);
2058 for (const auto &AssertLock : AssertLocks)
2059 Analyzer->addLock(
2060 FSet, Analyzer->FactMan.createFact<LockableFactEntry>(
2061 AssertLock, A->isShared() ? LK_Shared : LK_Exclusive,
2062 Loc, FactEntry::Asserted));
2063 break;
2064 }
2065
2066 // When we encounter an unlock function, we need to remove unlocked
2067 // mutexes from the lockset, and flag a warning if they are not there.
2068 case attr::ReleaseCapability: {
2069 const auto *A = cast<ReleaseCapabilityAttr>(At);
2070 if (A->isGeneric())
2071 Analyzer->getMutexIDs(GenericLocksToRemove, A, Exp, D, Self);
2072 else if (A->isShared())
2073 Analyzer->getMutexIDs(SharedLocksToRemove, A, Exp, D, Self);
2074 else
2075 Analyzer->getMutexIDs(ExclusiveLocksToRemove, A, Exp, D, Self);
2076 break;
2077 }
2078
2079 case attr::RequiresCapability: {
2080 const auto *A = cast<RequiresCapabilityAttr>(At);
2081 for (auto *Arg : A->args()) {
2082 Analyzer->warnIfMutexNotHeld(FSet, D, Exp,
2083 A->isShared() ? AK_Read : AK_Written,
2084 Arg, POK_FunctionCall, Self, Loc);
2085 // use for adopting a lock
2086 if (!Scp.shouldIgnore())
2087 Analyzer->getMutexIDs(ScopedReqsAndExcludes, A, Exp, D, Self);
2088 }
2089 break;
2090 }
2091
2092 case attr::LocksExcluded: {
2093 const auto *A = cast<LocksExcludedAttr>(At);
2094 for (auto *Arg : A->args()) {
2095 Analyzer->warnIfMutexHeld(FSet, D, Exp, Arg, Self, Loc);
2096 // use for deferring a lock
2097 if (!Scp.shouldIgnore())
2098 Analyzer->getMutexIDs(ScopedReqsAndExcludes, A, Exp, D, Self);
2099 }
2100 break;
2101 }
2102
2103 // Ignore attributes unrelated to thread-safety
2104 default:
2105 break;
2106 }
2107 }
2108
2109 std::optional<CallExpr::const_arg_range> Args;
2110 if (Exp) {
2111 if (const auto *CE = dyn_cast<CallExpr>(Exp))
2112 Args = CE->arguments();
2113 else if (const auto *CE = dyn_cast<CXXConstructExpr>(Exp))
2114 Args = CE->arguments();
2115 else
2116 llvm_unreachable("Unknown call kind");
2117 }
2118 const auto *CalledFunction = dyn_cast<FunctionDecl>(D);
2119 if (CalledFunction && Args.has_value()) {
2120 for (auto [Param, Arg] : zip(CalledFunction->parameters(), *Args)) {
2121 CapExprSet DeclaredLocks;
2122 for (const Attr *At : Param->attrs()) {
2123 switch (At->getKind()) {
2124 case attr::AcquireCapability: {
2125 const auto *A = cast<AcquireCapabilityAttr>(At);
2126 Analyzer->getMutexIDs(A->isShared() ? SharedLocksToAdd
2127 : ExclusiveLocksToAdd,
2128 A, Exp, D, Self);
2129 Analyzer->getMutexIDs(DeclaredLocks, A, Exp, D, Self);
2130 break;
2131 }
2132
2133 case attr::ReleaseCapability: {
2134 const auto *A = cast<ReleaseCapabilityAttr>(At);
2135 if (A->isGeneric())
2136 Analyzer->getMutexIDs(GenericLocksToRemove, A, Exp, D, Self);
2137 else if (A->isShared())
2138 Analyzer->getMutexIDs(SharedLocksToRemove, A, Exp, D, Self);
2139 else
2140 Analyzer->getMutexIDs(ExclusiveLocksToRemove, A, Exp, D, Self);
2141 Analyzer->getMutexIDs(DeclaredLocks, A, Exp, D, Self);
2142 break;
2143 }
2144
2145 case attr::RequiresCapability: {
2146 const auto *A = cast<RequiresCapabilityAttr>(At);
2147 for (auto *Arg : A->args())
2148 Analyzer->warnIfMutexNotHeld(FSet, D, Exp,
2149 A->isShared() ? AK_Read : AK_Written,
2150 Arg, POK_FunctionCall, Self, Loc);
2151 Analyzer->getMutexIDs(DeclaredLocks, A, Exp, D, Self);
2152 break;
2153 }
2154
2155 case attr::LocksExcluded: {
2156 const auto *A = cast<LocksExcludedAttr>(At);
2157 for (auto *Arg : A->args())
2158 Analyzer->warnIfMutexHeld(FSet, D, Exp, Arg, Self, Loc);
2159 Analyzer->getMutexIDs(DeclaredLocks, A, Exp, D, Self);
2160 break;
2161 }
2162
2163 default:
2164 break;
2165 }
2166 }
2167 if (DeclaredLocks.empty())
2168 continue;
2169 CapabilityExpr Cp(Analyzer->SxBuilder.translate(Arg, nullptr),
2170 StringRef("mutex"), /*Neg=*/false, /*Reentrant=*/false);
2171 if (const auto *CBTE = dyn_cast<CXXBindTemporaryExpr>(Arg->IgnoreCasts());
2172 Cp.isInvalid() && CBTE) {
2173 if (auto Object = Analyzer->ConstructedObjects.find(CBTE->getSubExpr());
2174 Object != Analyzer->ConstructedObjects.end())
2175 Cp = CapabilityExpr(Object->second, StringRef("mutex"), /*Neg=*/false,
2176 /*Reentrant=*/false);
2177 }
2178 const FactEntry *Fact = FSet.findLock(Analyzer->FactMan, Cp);
2179 if (!Fact) {
2180 Analyzer->Handler.handleMutexNotHeld(Cp.getKind(), D, POK_FunctionCall,
2181 Cp.toString(), LK_Exclusive,
2182 Exp->getExprLoc());
2183 continue;
2184 }
2185 const auto *Scope = cast<ScopedLockableFactEntry>(Fact);
2186 for (const auto &[a, b] :
2187 zip_longest(DeclaredLocks, Scope->getUnderlyingMutexes())) {
2188 if (!a.has_value()) {
2189 Analyzer->Handler.handleExpectFewerUnderlyingMutexes(
2190 Exp->getExprLoc(), D->getLocation(), Scope->toString(),
2191 b.value().getKind(), b.value().toString());
2192 } else if (!b.has_value()) {
2193 Analyzer->Handler.handleExpectMoreUnderlyingMutexes(
2194 Exp->getExprLoc(), D->getLocation(), Scope->toString(),
2195 a.value().getKind(), a.value().toString());
2196 } else if (!a.value().equals(b.value())) {
2197 Analyzer->Handler.handleUnmatchedUnderlyingMutexes(
2198 Exp->getExprLoc(), D->getLocation(), Scope->toString(),
2199 a.value().getKind(), a.value().toString(), b.value().toString());
2200 break;
2201 }
2202 }
2203 }
2204 }
2205 // Remove locks first to allow lock upgrading/downgrading.
2206 // FIXME -- should only fully remove if the attribute refers to 'this'.
2207 bool Dtor = isa<CXXDestructorDecl>(D);
2208 for (const auto &M : ExclusiveLocksToRemove)
2209 Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Exclusive);
2210 for (const auto &M : SharedLocksToRemove)
2211 Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Shared);
2212 for (const auto &M : GenericLocksToRemove)
2213 Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Generic);
2214
2215 // Add locks.
2216 FactEntry::SourceKind Source =
2217 !Scp.shouldIgnore() ? FactEntry::Managed : FactEntry::Acquired;
2218 for (const auto &M : ExclusiveLocksToAdd)
2219 Analyzer->addLock(FSet, Analyzer->FactMan.createFact<LockableFactEntry>(
2220 M, LK_Exclusive, Loc, Source));
2221 for (const auto &M : SharedLocksToAdd)
2222 Analyzer->addLock(FSet, Analyzer->FactMan.createFact<LockableFactEntry>(
2223 M, LK_Shared, Loc, Source));
2224
2225 if (!Scp.shouldIgnore()) {
2226 // Add the managing object as a dummy mutex, mapped to the underlying mutex.
2227 auto *ScopedEntry = Analyzer->FactMan.createFact<ScopedLockableFactEntry>(
2228 Scp, Loc, FactEntry::Acquired,
2229 ExclusiveLocksToAdd.size() + SharedLocksToAdd.size() +
2230 ScopedReqsAndExcludes.size() + ExclusiveLocksToRemove.size() +
2231 SharedLocksToRemove.size());
2232 for (const auto &M : ExclusiveLocksToAdd)
2233 ScopedEntry->addLock(M);
2234 for (const auto &M : SharedLocksToAdd)
2235 ScopedEntry->addLock(M);
2236 for (const auto &M : ScopedReqsAndExcludes)
2237 ScopedEntry->addLock(M);
2238 for (const auto &M : ExclusiveLocksToRemove)
2239 ScopedEntry->addExclusiveUnlock(M);
2240 for (const auto &M : SharedLocksToRemove)
2241 ScopedEntry->addSharedUnlock(M);
2242 Analyzer->addLock(FSet, ScopedEntry);
2243 }
2244}
2245
2246/// For unary operations which read and write a variable, we need to
2247/// check whether we hold any required mutexes. Reads are checked in
2248/// VisitCastExpr.
2249void BuildLockset::VisitUnaryOperator(const UnaryOperator *UO) {
2250 switch (UO->getOpcode()) {
2251 case UO_PostDec:
2252 case UO_PostInc:
2253 case UO_PreDec:
2254 case UO_PreInc:
2255 checkAccess(UO->getSubExpr(), AK_Written);
2256 break;
2257 default:
2258 break;
2259 }
2260}
2261
2262/// For binary operations which assign to a variable (writes), we need to check
2263/// whether we hold any required mutexes.
2264/// FIXME: Deal with non-primitive types.
2265void BuildLockset::VisitBinaryOperator(const BinaryOperator *BO) {
2266 if (!BO->isAssignmentOp())
2267 return;
2268
2269 updateLocalVarMapCtx(BO);
2270 checkAccess(BO->getLHS(), AK_Written);
2271}
2272
2273/// Whenever we do an LValue to Rvalue cast, we are reading a variable and
2274/// need to ensure we hold any required mutexes.
2275/// FIXME: Deal with non-primitive types.
2276void BuildLockset::VisitCastExpr(const CastExpr *CE) {
2277 if (CE->getCastKind() != CK_LValueToRValue)
2278 return;
2279 checkAccess(CE->getSubExpr(), AK_Read);
2280}
2281
2282void BuildLockset::examineArguments(const FunctionDecl *FD,
2285 bool SkipFirstParam) {
2286 // Currently we can't do anything if we don't know the function declaration.
2287 if (!FD)
2288 return;
2289
2290 // NO_THREAD_SAFETY_ANALYSIS does double duty here. Normally it
2291 // only turns off checking within the body of a function, but we also
2292 // use it to turn off checking in arguments to the function. This
2293 // could result in some false negatives, but the alternative is to
2294 // create yet another attribute.
2295 if (FD->hasAttr<NoThreadSafetyAnalysisAttr>())
2296 return;
2297
2298 const ArrayRef<ParmVarDecl *> Params = FD->parameters();
2299 auto Param = Params.begin();
2300 if (SkipFirstParam)
2301 ++Param;
2302
2303 // There can be default arguments, so we stop when one iterator is at end().
2304 for (auto Arg = ArgBegin; Param != Params.end() && Arg != ArgEnd;
2305 ++Param, ++Arg) {
2306 QualType Qt = (*Param)->getType();
2307 if (Qt->isReferenceType())
2308 checkAccess(*Arg, AK_Read, POK_PassByRef);
2309 else if (Qt->isPointerType())
2310 checkPtAccess(*Arg, AK_Read, POK_PassPointer);
2311 }
2312}
2313
2314void BuildLockset::VisitCallExpr(const CallExpr *Exp) {
2315 updateLocalVarMapCtx(Exp);
2316
2317 if (const auto *CE = dyn_cast<CXXMemberCallExpr>(Exp)) {
2318 const auto *ME = dyn_cast<MemberExpr>(CE->getCallee());
2319 // ME can be null when calling a method pointer
2320 const CXXMethodDecl *MD = CE->getMethodDecl();
2321
2322 if (ME && MD) {
2323 if (ME->isArrow()) {
2324 // Should perhaps be AK_Written if !MD->isConst().
2325 checkPtAccess(CE->getImplicitObjectArgument(), AK_Read);
2326 } else {
2327 // Should perhaps be AK_Written if !MD->isConst().
2328 checkAccess(CE->getImplicitObjectArgument(), AK_Read);
2329 }
2330 }
2331
2332 examineArguments(CE->getDirectCallee(), CE->arg_begin(), CE->arg_end());
2333 } else if (const auto *OE = dyn_cast<CXXOperatorCallExpr>(Exp)) {
2334 OverloadedOperatorKind OEop = OE->getOperator();
2335 switch (OEop) {
2336 case OO_Equal:
2337 case OO_PlusEqual:
2338 case OO_MinusEqual:
2339 case OO_StarEqual:
2340 case OO_SlashEqual:
2341 case OO_PercentEqual:
2342 case OO_CaretEqual:
2343 case OO_AmpEqual:
2344 case OO_PipeEqual:
2345 case OO_LessLessEqual:
2346 case OO_GreaterGreaterEqual:
2347 checkAccess(OE->getArg(1), AK_Read);
2348 [[fallthrough]];
2349 case OO_PlusPlus:
2350 case OO_MinusMinus:
2351 checkAccess(OE->getArg(0), AK_Written);
2352 break;
2353 case OO_Star:
2354 case OO_ArrowStar:
2355 case OO_Arrow:
2356 case OO_Subscript:
2357 if (!(OEop == OO_Star && OE->getNumArgs() > 1)) {
2358 // Grrr. operator* can be multiplication...
2359 checkPtAccess(OE->getArg(0), AK_Read);
2360 }
2361 [[fallthrough]];
2362 default: {
2363 // TODO: get rid of this, and rely on pass-by-ref instead.
2364 const Expr *Obj = OE->getArg(0);
2365 checkAccess(Obj, AK_Read);
2366 // Check the remaining arguments. For method operators, the first
2367 // argument is the implicit self argument, and doesn't appear in the
2368 // FunctionDecl, but for non-methods it does.
2369 const FunctionDecl *FD = OE->getDirectCallee();
2370 examineArguments(FD, std::next(OE->arg_begin()), OE->arg_end(),
2371 /*SkipFirstParam*/ !isa<CXXMethodDecl>(FD));
2372 break;
2373 }
2374 }
2375 } else {
2376 examineArguments(Exp->getDirectCallee(), Exp->arg_begin(), Exp->arg_end());
2377 }
2378
2379 auto *D = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl());
2380 if (!D)
2381 return;
2382 handleCall(Exp, D);
2383}
2384
2385void BuildLockset::VisitCXXConstructExpr(const CXXConstructExpr *Exp) {
2386 const CXXConstructorDecl *D = Exp->getConstructor();
2387 if (D && D->isCopyConstructor()) {
2388 const Expr* Source = Exp->getArg(0);
2389 checkAccess(Source, AK_Read);
2390 } else {
2391 examineArguments(D, Exp->arg_begin(), Exp->arg_end());
2392 }
2393 if (D && D->hasAttrs())
2394 handleCall(Exp, D);
2395}
2396
2397static const Expr *UnpackConstruction(const Expr *E) {
2398 if (auto *CE = dyn_cast<CastExpr>(E))
2399 if (CE->getCastKind() == CK_NoOp)
2400 E = CE->getSubExpr()->IgnoreParens();
2401 if (auto *CE = dyn_cast<CastExpr>(E))
2402 if (CE->getCastKind() == CK_ConstructorConversion ||
2403 CE->getCastKind() == CK_UserDefinedConversion)
2404 E = CE->getSubExpr();
2405 if (auto *BTE = dyn_cast<CXXBindTemporaryExpr>(E))
2406 E = BTE->getSubExpr();
2407 return E;
2408}
2409
2410void BuildLockset::VisitDeclStmt(const DeclStmt *S) {
2411 updateLocalVarMapCtx(S);
2412
2413 for (auto *D : S->getDeclGroup()) {
2414 if (auto *VD = dyn_cast_or_null<VarDecl>(D)) {
2415 const Expr *E = VD->getInit();
2416 if (!E)
2417 continue;
2418 E = E->IgnoreParens();
2419
2420 // handle constructors that involve temporaries
2421 if (auto *EWC = dyn_cast<ExprWithCleanups>(E))
2422 E = EWC->getSubExpr()->IgnoreParens();
2423 E = UnpackConstruction(E);
2424
2425 if (auto Object = Analyzer->ConstructedObjects.find(E);
2426 Object != Analyzer->ConstructedObjects.end()) {
2427 Object->second->setClangDecl(VD);
2428 Analyzer->ConstructedObjects.erase(Object);
2429 }
2430 }
2431 }
2432}
2433
2434void BuildLockset::VisitMaterializeTemporaryExpr(
2435 const MaterializeTemporaryExpr *Exp) {
2436 if (const ValueDecl *ExtD = Exp->getExtendingDecl()) {
2437 if (auto Object = Analyzer->ConstructedObjects.find(
2439 Object != Analyzer->ConstructedObjects.end()) {
2440 Object->second->setClangDecl(ExtD);
2441 Analyzer->ConstructedObjects.erase(Object);
2442 }
2443 }
2444}
2445
2446void BuildLockset::VisitReturnStmt(const ReturnStmt *S) {
2447 if (Analyzer->CurrentFunction == nullptr)
2448 return;
2449 const Expr *RetVal = S->getRetValue();
2450 if (!RetVal)
2451 return;
2452
2453 // If returning by reference or pointer, check that the function requires the
2454 // appropriate capabilities.
2455 const QualType ReturnType =
2456 Analyzer->CurrentFunction->getReturnType().getCanonicalType();
2457 if (ReturnType->isLValueReferenceType()) {
2458 Analyzer->checkAccess(
2459 FunctionExitFSet, RetVal,
2462 } else if (ReturnType->isPointerType()) {
2463 Analyzer->checkPtAccess(
2464 FunctionExitFSet, RetVal,
2467 }
2468}
2469
2470/// Given two facts merging on a join point, possibly warn and decide whether to
2471/// keep or replace.
2472///
2473/// \return false if we should keep \p A, true if we should take \p B.
2474bool ThreadSafetyAnalyzer::join(const FactEntry &A, const FactEntry &B,
2475 SourceLocation JoinLoc,
2476 LockErrorKind EntryLEK) {
2477 // Whether we can replace \p A by \p B.
2478 const bool CanModify = EntryLEK != LEK_LockedSomeLoopIterations;
2479 unsigned int ReentrancyDepthA = 0;
2480 unsigned int ReentrancyDepthB = 0;
2481
2482 if (const auto *LFE = dyn_cast<LockableFactEntry>(&A))
2483 ReentrancyDepthA = LFE->getReentrancyDepth();
2484 if (const auto *LFE = dyn_cast<LockableFactEntry>(&B))
2485 ReentrancyDepthB = LFE->getReentrancyDepth();
2486
2487 if (ReentrancyDepthA != ReentrancyDepthB) {
2488 Handler.handleMutexHeldEndOfScope(B.getKind(), B.toString(), B.loc(),
2489 JoinLoc, EntryLEK,
2490 /*ReentrancyMismatch=*/true);
2491 // Pick the FactEntry with the greater reentrancy depth as the "good"
2492 // fact to reduce potential later warnings.
2493 return CanModify && ReentrancyDepthA < ReentrancyDepthB;
2494 } else if (A.kind() != B.kind()) {
2495 // For managed capabilities, the destructor should unlock in the right mode
2496 // anyway. For asserted capabilities no unlocking is needed.
2497 if ((A.managed() || A.asserted()) && (B.managed() || B.asserted())) {
2498 // The shared capability subsumes the exclusive capability, if possible.
2499 bool ShouldTakeB = B.kind() == LK_Shared;
2500 if (CanModify || !ShouldTakeB)
2501 return ShouldTakeB;
2502 }
2503 Handler.handleExclusiveAndShared(B.getKind(), B.toString(), B.loc(),
2504 A.loc());
2505 // Take the exclusive capability to reduce further warnings.
2506 return CanModify && B.kind() == LK_Exclusive;
2507 } else {
2508 // The non-asserted capability is the one we want to track.
2509 return CanModify && A.asserted() && !B.asserted();
2510 }
2511}
2512
2513/// Compute the intersection of two locksets and issue warnings for any
2514/// locks in the symmetric difference.
2515///
2516/// This function is used at a merge point in the CFG when comparing the lockset
2517/// of each branch being merged. For example, given the following sequence:
2518/// A; if () then B; else C; D; we need to check that the lockset after B and C
2519/// are the same. In the event of a difference, we use the intersection of these
2520/// two locksets at the start of D.
2521///
2522/// \param EntrySet A lockset for entry into a (possibly new) block.
2523/// \param ExitSet The lockset on exiting a preceding block.
2524/// \param JoinLoc The location of the join point for error reporting
2525/// \param EntryLEK The warning if a mutex is missing from \p EntrySet.
2526/// \param ExitLEK The warning if a mutex is missing from \p ExitSet.
2527void ThreadSafetyAnalyzer::intersectAndWarn(FactSet &EntrySet,
2528 const FactSet &ExitSet,
2529 SourceLocation JoinLoc,
2530 LockErrorKind EntryLEK,
2531 LockErrorKind ExitLEK) {
2532 FactSet EntrySetOrig = EntrySet;
2533
2534 // Find locks in ExitSet that conflict or are not in EntrySet, and warn.
2535 for (const auto &Fact : ExitSet) {
2536 const FactEntry &ExitFact = FactMan[Fact];
2537
2538 FactSet::iterator EntryIt = EntrySet.findLockIter(FactMan, ExitFact);
2539 if (EntryIt != EntrySet.end()) {
2540 if (join(FactMan[*EntryIt], ExitFact, JoinLoc, EntryLEK))
2541 *EntryIt = Fact;
2542 } else if (!ExitFact.managed() || EntryLEK == LEK_LockedAtEndOfFunction) {
2543 ExitFact.handleRemovalFromIntersection(ExitSet, FactMan, JoinLoc,
2544 EntryLEK, Handler);
2545 }
2546 }
2547
2548 // Find locks in EntrySet that are not in ExitSet, and remove them.
2549 for (const auto &Fact : EntrySetOrig) {
2550 const FactEntry *EntryFact = &FactMan[Fact];
2551 const FactEntry *ExitFact = ExitSet.findLock(FactMan, *EntryFact);
2552
2553 if (!ExitFact) {
2554 if (!EntryFact->managed() || ExitLEK == LEK_LockedSomeLoopIterations ||
2556 EntryFact->handleRemovalFromIntersection(EntrySetOrig, FactMan, JoinLoc,
2557 ExitLEK, Handler);
2558 if (ExitLEK == LEK_LockedSomePredecessors)
2559 EntrySet.removeLock(FactMan, *EntryFact);
2560 }
2561 }
2562}
2563
2564// Return true if block B never continues to its successors.
2565static bool neverReturns(const CFGBlock *B) {
2566 if (B->hasNoReturnElement())
2567 return true;
2568 if (B->empty())
2569 return false;
2570
2571 CFGElement Last = B->back();
2572 if (std::optional<CFGStmt> S = Last.getAs<CFGStmt>()) {
2573 if (isa<CXXThrowExpr>(S->getStmt()))
2574 return true;
2575 }
2576 return false;
2577}
2578
2579/// Check a function's CFG for thread-safety violations.
2580///
2581/// We traverse the blocks in the CFG, compute the set of mutexes that are held
2582/// at the end of each block, and issue warnings for thread safety violations.
2583/// Each block in the CFG is traversed exactly once.
2584void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
2585 // TODO: this whole function needs be rewritten as a visitor for CFGWalker.
2586 // For now, we just use the walker to set things up.
2587 threadSafety::CFGWalker walker;
2588 if (!walker.init(AC))
2589 return;
2590
2591 // AC.dumpCFG(true);
2592 // threadSafety::printSCFG(walker);
2593
2594 CFG *CFGraph = walker.getGraph();
2595 const NamedDecl *D = walker.getDecl();
2596 CurrentFunction = dyn_cast<FunctionDecl>(D);
2597
2598 if (D->hasAttr<NoThreadSafetyAnalysisAttr>())
2599 return;
2600
2601 // FIXME: Do something a bit more intelligent inside constructor and
2602 // destructor code. Constructors and destructors must assume unique access
2603 // to 'this', so checks on member variable access is disabled, but we should
2604 // still enable checks on other objects.
2606 return; // Don't check inside constructors.
2608 return; // Don't check inside destructors.
2609
2610 Handler.enterFunction(CurrentFunction);
2611
2612 BlockInfo.resize(CFGraph->getNumBlockIDs(),
2613 CFGBlockInfo::getEmptyBlockInfo(LocalVarMap));
2614
2615 // We need to explore the CFG via a "topological" ordering.
2616 // That way, we will be guaranteed to have information about required
2617 // predecessor locksets when exploring a new block.
2618 const PostOrderCFGView *SortedGraph = walker.getSortedGraph();
2619 PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph);
2620
2621 CFGBlockInfo &Initial = BlockInfo[CFGraph->getEntry().getBlockID()];
2622 CFGBlockInfo &Final = BlockInfo[CFGraph->getExit().getBlockID()];
2623
2624 // Mark entry block as reachable
2625 Initial.Reachable = true;
2626
2627 // Compute SSA names for local variables
2628 LocalVarMap.traverseCFG(CFGraph, SortedGraph, BlockInfo);
2629
2630 // Fill in source locations for all CFGBlocks.
2631 findBlockLocations(CFGraph, SortedGraph, BlockInfo);
2632
2633 CapExprSet ExclusiveLocksAcquired;
2634 CapExprSet SharedLocksAcquired;
2635 CapExprSet LocksReleased;
2636
2637 // Add locks from exclusive_locks_required and shared_locks_required
2638 // to initial lockset. Also turn off checking for lock and unlock functions.
2639 // FIXME: is there a more intelligent way to check lock/unlock functions?
2640 if (!SortedGraph->empty()) {
2641 assert(*SortedGraph->begin() == &CFGraph->getEntry());
2642 FactSet &InitialLockset = Initial.EntrySet;
2643
2644 CapExprSet ExclusiveLocksToAdd;
2645 CapExprSet SharedLocksToAdd;
2646
2647 SourceLocation Loc = D->getLocation();
2648 for (const auto *Attr : D->attrs()) {
2649 Loc = Attr->getLocation();
2650 if (const auto *A = dyn_cast<RequiresCapabilityAttr>(Attr)) {
2651 getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A,
2652 nullptr, D);
2653 } else if (const auto *A = dyn_cast<ReleaseCapabilityAttr>(Attr)) {
2654 // UNLOCK_FUNCTION() is used to hide the underlying lock implementation.
2655 // We must ignore such methods.
2656 if (A->args_size() == 0)
2657 return;
2658 getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A,
2659 nullptr, D);
2660 getMutexIDs(LocksReleased, A, nullptr, D);
2661 } else if (const auto *A = dyn_cast<AcquireCapabilityAttr>(Attr)) {
2662 if (A->args_size() == 0)
2663 return;
2664 getMutexIDs(A->isShared() ? SharedLocksAcquired
2665 : ExclusiveLocksAcquired,
2666 A, nullptr, D);
2667 } else if (isa<TryAcquireCapabilityAttr>(Attr)) {
2668 // Don't try to check trylock functions for now.
2669 return;
2670 }
2671 }
2672 ArrayRef<ParmVarDecl *> Params;
2673 if (CurrentFunction)
2674 Params = CurrentFunction->getCanonicalDecl()->parameters();
2675 else if (auto CurrentMethod = dyn_cast<ObjCMethodDecl>(D))
2676 Params = CurrentMethod->getCanonicalDecl()->parameters();
2677 else
2678 llvm_unreachable("Unknown function kind");
2679 for (const ParmVarDecl *Param : Params) {
2680 CapExprSet UnderlyingLocks;
2681 for (const auto *Attr : Param->attrs()) {
2682 Loc = Attr->getLocation();
2683 if (const auto *A = dyn_cast<ReleaseCapabilityAttr>(Attr)) {
2684 getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A,
2685 nullptr, Param);
2686 getMutexIDs(LocksReleased, A, nullptr, Param);
2687 getMutexIDs(UnderlyingLocks, A, nullptr, Param);
2688 } else if (const auto *A = dyn_cast<RequiresCapabilityAttr>(Attr)) {
2689 getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A,
2690 nullptr, Param);
2691 getMutexIDs(UnderlyingLocks, A, nullptr, Param);
2692 } else if (const auto *A = dyn_cast<AcquireCapabilityAttr>(Attr)) {
2693 getMutexIDs(A->isShared() ? SharedLocksAcquired
2694 : ExclusiveLocksAcquired,
2695 A, nullptr, Param);
2696 getMutexIDs(UnderlyingLocks, A, nullptr, Param);
2697 } else if (const auto *A = dyn_cast<LocksExcludedAttr>(Attr)) {
2698 getMutexIDs(UnderlyingLocks, A, nullptr, Param);
2699 }
2700 }
2701 if (UnderlyingLocks.empty())
2702 continue;
2703 CapabilityExpr Cp(SxBuilder.translateVariable(Param, nullptr),
2704 StringRef(),
2705 /*Neg=*/false, /*Reentrant=*/false);
2706 auto *ScopedEntry = FactMan.createFact<ScopedLockableFactEntry>(
2707 Cp, Param->getLocation(), FactEntry::Declared,
2708 UnderlyingLocks.size());
2709 for (const CapabilityExpr &M : UnderlyingLocks)
2710 ScopedEntry->addLock(M);
2711 addLock(InitialLockset, ScopedEntry, true);
2712 }
2713
2714 // FIXME -- Loc can be wrong here.
2715 for (const auto &Mu : ExclusiveLocksToAdd) {
2716 const auto *Entry = FactMan.createFact<LockableFactEntry>(
2717 Mu, LK_Exclusive, Loc, FactEntry::Declared);
2718 addLock(InitialLockset, Entry, true);
2719 }
2720 for (const auto &Mu : SharedLocksToAdd) {
2721 const auto *Entry = FactMan.createFact<LockableFactEntry>(
2722 Mu, LK_Shared, Loc, FactEntry::Declared);
2723 addLock(InitialLockset, Entry, true);
2724 }
2725 }
2726
2727 // Compute the expected exit set.
2728 // By default, we expect all locks held on entry to be held on exit.
2729 FactSet ExpectedFunctionExitSet = Initial.EntrySet;
2730
2731 // Adjust the expected exit set by adding or removing locks, as declared
2732 // by *-LOCK_FUNCTION and UNLOCK_FUNCTION. The intersect below will then
2733 // issue the appropriate warning.
2734 // FIXME: the location here is not quite right.
2735 for (const auto &Lock : ExclusiveLocksAcquired)
2736 ExpectedFunctionExitSet.addLock(
2737 FactMan, FactMan.createFact<LockableFactEntry>(Lock, LK_Exclusive,
2738 D->getLocation()));
2739 for (const auto &Lock : SharedLocksAcquired)
2740 ExpectedFunctionExitSet.addLock(
2741 FactMan, FactMan.createFact<LockableFactEntry>(Lock, LK_Shared,
2742 D->getLocation()));
2743 for (const auto &Lock : LocksReleased)
2744 ExpectedFunctionExitSet.removeLock(FactMan, Lock);
2745
2746 for (const auto *CurrBlock : *SortedGraph) {
2747 unsigned CurrBlockID = CurrBlock->getBlockID();
2748 CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID];
2749
2750 // Use the default initial lockset in case there are no predecessors.
2751 VisitedBlocks.insert(CurrBlock);
2752
2753 // Iterate through the predecessor blocks and warn if the lockset for all
2754 // predecessors is not the same. We take the entry lockset of the current
2755 // block to be the intersection of all previous locksets.
2756 // FIXME: By keeping the intersection, we may output more errors in future
2757 // for a lock which is not in the intersection, but was in the union. We
2758 // may want to also keep the union in future. As an example, let's say
2759 // the intersection contains Mutex L, and the union contains L and M.
2760 // Later we unlock M. At this point, we would output an error because we
2761 // never locked M; although the real error is probably that we forgot to
2762 // lock M on all code paths. Conversely, let's say that later we lock M.
2763 // In this case, we should compare against the intersection instead of the
2764 // union because the real error is probably that we forgot to unlock M on
2765 // all code paths.
2766 bool LocksetInitialized = false;
2767 for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(),
2768 PE = CurrBlock->pred_end(); PI != PE; ++PI) {
2769 // if *PI -> CurrBlock is a back edge
2770 if (*PI == nullptr || !VisitedBlocks.alreadySet(*PI))
2771 continue;
2772
2773 unsigned PrevBlockID = (*PI)->getBlockID();
2774 CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID];
2775
2776 // Ignore edges from blocks that can't return.
2777 if (neverReturns(*PI) || !PrevBlockInfo->Reachable)
2778 continue;
2779
2780 // Okay, we can reach this block from the entry.
2781 CurrBlockInfo->Reachable = true;
2782
2783 FactSet PrevLockset;
2784 getEdgeLockset(PrevLockset, PrevBlockInfo->ExitSet, *PI, CurrBlock);
2785
2786 if (!LocksetInitialized) {
2787 CurrBlockInfo->EntrySet = PrevLockset;
2788 LocksetInitialized = true;
2789 } else {
2790 // Surprisingly 'continue' doesn't always produce back edges, because
2791 // the CFG has empty "transition" blocks where they meet with the end
2792 // of the regular loop body. We still want to diagnose them as loop.
2793 intersectAndWarn(
2794 CurrBlockInfo->EntrySet, PrevLockset, CurrBlockInfo->EntryLoc,
2795 isa_and_nonnull<ContinueStmt>((*PI)->getTerminatorStmt())
2798 }
2799 }
2800
2801 // Skip rest of block if it's not reachable.
2802 if (!CurrBlockInfo->Reachable)
2803 continue;
2804
2805 BuildLockset LocksetBuilder(this, *CurrBlockInfo, ExpectedFunctionExitSet);
2806
2807 // Visit all the statements in the basic block.
2808 for (const auto &BI : *CurrBlock) {
2809 switch (BI.getKind()) {
2810 case CFGElement::Statement: {
2811 CFGStmt CS = BI.castAs<CFGStmt>();
2812 LocksetBuilder.Visit(CS.getStmt());
2813 break;
2814 }
2815 // Ignore BaseDtor and MemberDtor for now.
2817 CFGAutomaticObjDtor AD = BI.castAs<CFGAutomaticObjDtor>();
2818 const auto *DD = AD.getDestructorDecl(AC.getASTContext());
2819 if (!DD->hasAttrs())
2820 break;
2821
2822 LocksetBuilder.handleCall(
2823 nullptr, DD,
2824 SxBuilder.translateVariable(AD.getVarDecl(), nullptr),
2825 AD.getTriggerStmt()->getEndLoc());
2826 break;
2827 }
2828
2830 const CFGCleanupFunction &CF = BI.castAs<CFGCleanupFunction>();
2831 LocksetBuilder.handleCall(
2832 /*Exp=*/nullptr, CF.getFunctionDecl(),
2833 SxBuilder.translateVariable(CF.getVarDecl(), nullptr),
2834 CF.getVarDecl()->getLocation());
2835 break;
2836 }
2837
2839 auto TD = BI.castAs<CFGTemporaryDtor>();
2840
2841 // Clean up constructed object even if there are no attributes to
2842 // keep the number of objects in limbo as small as possible.
2843 if (auto Object = ConstructedObjects.find(
2844 TD.getBindTemporaryExpr()->getSubExpr());
2845 Object != ConstructedObjects.end()) {
2846 const auto *DD = TD.getDestructorDecl(AC.getASTContext());
2847 if (DD->hasAttrs())
2848 // TODO: the location here isn't quite correct.
2849 LocksetBuilder.handleCall(nullptr, DD, Object->second,
2850 TD.getBindTemporaryExpr()->getEndLoc());
2851 ConstructedObjects.erase(Object);
2852 }
2853 break;
2854 }
2855 default:
2856 break;
2857 }
2858 }
2859 CurrBlockInfo->ExitSet = LocksetBuilder.FSet;
2860
2861 // For every back edge from CurrBlock (the end of the loop) to another block
2862 // (FirstLoopBlock) we need to check that the Lockset of Block is equal to
2863 // the one held at the beginning of FirstLoopBlock. We can look up the
2864 // Lockset held at the beginning of FirstLoopBlock in the EntryLockSets map.
2865 for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(),
2866 SE = CurrBlock->succ_end(); SI != SE; ++SI) {
2867 // if CurrBlock -> *SI is *not* a back edge
2868 if (*SI == nullptr || !VisitedBlocks.alreadySet(*SI))
2869 continue;
2870
2871 CFGBlock *FirstLoopBlock = *SI;
2872 CFGBlockInfo *PreLoop = &BlockInfo[FirstLoopBlock->getBlockID()];
2873 CFGBlockInfo *LoopEnd = &BlockInfo[CurrBlockID];
2874 intersectAndWarn(PreLoop->EntrySet, LoopEnd->ExitSet, PreLoop->EntryLoc,
2876 }
2877 }
2878
2879 // Skip the final check if the exit block is unreachable.
2880 if (!Final.Reachable)
2881 return;
2882
2883 // FIXME: Should we call this function for all blocks which exit the function?
2884 intersectAndWarn(ExpectedFunctionExitSet, Final.ExitSet, Final.ExitLoc,
2886
2887 Handler.leaveFunction(CurrentFunction);
2888}
2889
2890/// Check a function's CFG for thread-safety violations.
2891///
2892/// We traverse the blocks in the CFG, compute the set of mutexes that are held
2893/// at the end of each block, and issue warnings for thread safety violations.
2894/// Each block in the CFG is traversed exactly once.
2896 ThreadSafetyHandler &Handler,
2897 BeforeSet **BSet) {
2898 if (!*BSet)
2899 *BSet = new BeforeSet;
2900 ThreadSafetyAnalyzer Analyzer(Handler, *BSet);
2901 Analyzer.runAnalysis(AC);
2902}
2903
2905
2906/// Helper function that returns a LockKind required for the given level
2907/// of access.
2909 switch (AK) {
2910 case AK_Read :
2911 return LK_Shared;
2912 case AK_Written :
2913 return LK_Exclusive;
2914 }
2915 llvm_unreachable("Unknown AccessKind");
2916}
This file defines AnalysisDeclContext, a class that manages the analysis context data for context sen...
Defines enum values for all the target-independent builtin functions.
static void dump(llvm::raw_ostream &OS, StringRef FunctionName, ArrayRef< CounterExpression > Expressions, ArrayRef< CounterMappingRegion > Regions)
static Decl::Kind getKind(const Decl *D)
Defines the C++ Decl subclasses, other than those for templates (found in DeclTemplate....
Defines the clang::Expr interface and subclasses for C++ expressions.
Forward-declares and imports various common LLVM datatypes that clang wants to use unqualified.
Defines an enumeration for C++ overloaded operators.
static std::string toString(const clang::SanitizerSet &Sanitizers)
Produce a string containing comma-separated names of sanitizers in Sanitizers set.
Defines the clang::SourceLocation class and associated facilities.
Defines various enumerations that describe declaration and type specifiers.
static void warnInvalidLock(ThreadSafetyHandler &Handler, const Expr *MutexExp, const NamedDecl *D, const Expr *DeclExp, StringRef Kind)
Issue a warning about an invalid lock expression.
static bool getStaticBooleanValue(Expr *E, bool &TCond)
static bool neverReturns(const CFGBlock *B)
static void findBlockLocations(CFG *CFGraph, const PostOrderCFGView *SortedGraph, std::vector< CFGBlockInfo > &BlockInfo)
Find the appropriate source locations to use when producing diagnostics for each block in the CFG.
static const ValueDecl * getValueDecl(const Expr *Exp)
Gets the value decl pointer from DeclRefExprs or MemberExprs.
static const Expr * UnpackConstruction(const Expr *E)
C Language Family Type Representation.
__device__ __2f16 b
AnalysisDeclContext contains the context data for the function, method or block under analysis.
ASTContext & getASTContext() const
Expr * getLHS() const
Definition Expr.h:4022
Expr * getRHS() const
Definition Expr.h:4024
static bool isAssignmentOp(Opcode Opc)
Definition Expr.h:4108
Opcode getOpcode() const
Definition Expr.h:4017
const VarDecl * getVarDecl() const
Definition CFG.h:423
const Stmt * getTriggerStmt() const
Definition CFG.h:428
Represents a single basic block in a source-level CFG.
Definition CFG.h:605
pred_iterator pred_end()
Definition CFG.h:973
succ_iterator succ_end()
Definition CFG.h:991
bool hasNoReturnElement() const
Definition CFG.h:1109
CFGElement back() const
Definition CFG.h:908
ElementList::const_reverse_iterator const_reverse_iterator
Definition CFG.h:903
bool empty() const
Definition CFG.h:953
succ_iterator succ_begin()
Definition CFG.h:990
Stmt * getTerminatorStmt()
Definition CFG.h:1087
AdjacentBlocks::const_iterator const_pred_iterator
Definition CFG.h:959
pred_iterator pred_begin()
Definition CFG.h:972
unsigned getBlockID() const
Definition CFG.h:1111
Stmt * getTerminatorCondition(bool StripParens=true)
Definition CFG.cpp:6378
AdjacentBlocks::const_iterator const_succ_iterator
Definition CFG.h:966
Represents a top-level expression in a basic block.
Definition CFG.h:55
@ CleanupFunction
Definition CFG.h:79
@ AutomaticObjectDtor
Definition CFG.h:72
const CXXDestructorDecl * getDestructorDecl(ASTContext &astContext) const
Definition CFG.cpp:5398
const Stmt * getStmt() const
Definition CFG.h:139
Represents a source-level, intra-procedural CFG that represents the control-flow of a Stmt.
Definition CFG.h:1222
CFGBlock & getExit()
Definition CFG.h:1332
CFGBlock & getEntry()
Definition CFG.h:1330
unsigned getNumBlockIDs() const
Returns the total number of BlockIDs allocated (which start at 0).
Definition CFG.h:1409
arg_iterator arg_begin()
Definition ExprCXX.h:1678
Expr * getArg(unsigned Arg)
Return the specified argument.
Definition ExprCXX.h:1692
arg_iterator arg_end()
Definition ExprCXX.h:1679
CXXConstructorDecl * getConstructor() const
Get the constructor that this expression will (ultimately) call.
Definition ExprCXX.h:1612
bool isCopyConstructor(unsigned &TypeQuals) const
Whether this constructor is a copy constructor (C++ [class.copy]p2, which can be used to copy the cla...
Definition DeclCXX.cpp:3008
Expr * getArg(unsigned Arg)
getArg - Return the specified argument.
Definition Expr.h:3081
ConstExprIterator const_arg_iterator
Definition Expr.h:3125
arg_iterator arg_begin()
Definition Expr.h:3134
arg_iterator arg_end()
Definition Expr.h:3137
FunctionDecl * getDirectCallee()
If the callee is a FunctionDecl, return it. Otherwise return null.
Definition Expr.h:3060
unsigned getNumArgs() const
getNumArgs - Return the number of actual arguments to this call.
Definition Expr.h:3068
Decl * getCalleeDecl()
Definition Expr.h:3054
CastKind getCastKind() const
Definition Expr.h:3654
Expr * getSubExpr()
Definition Expr.h:3660
const DeclGroupRef getDeclGroup() const
Definition Stmt.h:1629
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Stmt.h:1637
bool hasAttrs() const
Definition DeclBase.h:518
llvm::iterator_range< specific_attr_iterator< T > > specific_attrs() const
Definition DeclBase.h:559
SourceLocation getLocation() const
Definition DeclBase.h:439
bool isDefinedOutsideFunctionOrMethod() const
isDefinedOutsideFunctionOrMethod - This predicate returns true if this scoped decl is defined outside...
Definition DeclBase.h:949
DeclContext * getDeclContext()
Definition DeclBase.h:448
attr_range attrs() const
Definition DeclBase.h:535
bool hasAttr() const
Definition DeclBase.h:577
This represents one expression.
Definition Expr.h:112
Expr * IgnoreParenCasts() LLVM_READONLY
Skip past any parentheses and casts which might surround this expression until reaching a fixed point...
Definition Expr.cpp:3090
Expr * IgnoreParenImpCasts() LLVM_READONLY
Skip past any parentheses and implicit casts which might surround this expression until reaching a fi...
Definition Expr.cpp:3085
Expr * IgnoreImplicit() LLVM_READONLY
Skip past any implicit AST nodes which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3073
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3081
bool isPRValue() const
Definition Expr.h:285
Expr * IgnoreCasts() LLVM_READONLY
Skip past any casts which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3069
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:273
QualType getType() const
Definition Expr.h:144
const ParmVarDecl * getParamDecl(unsigned i) const
Definition Decl.h:2794
QualType getReturnType() const
Definition Decl.h:2842
ArrayRef< ParmVarDecl * > parameters() const
Definition Decl.h:2771
FunctionDecl * getCanonicalDecl() override
Retrieves the "canonical" declaration of the given declaration.
Definition Decl.cpp:3688
unsigned getNumParams() const
Return the number of parameters this function must have based on its FunctionType.
Definition Decl.cpp:3767
Expr * getSubExpr() const
Retrieve the temporary-generating subexpression whose value will be materialized into a glvalue.
Definition ExprCXX.h:4931
ValueDecl * getExtendingDecl()
Get the declaration which triggered the lifetime-extension of this temporary, if any.
Definition ExprCXX.h:4964
This represents a decl that may have a name.
Definition Decl.h:273
IdentifierInfo * getIdentifier() const
Get the identifier that names this declaration, if there is one.
Definition Decl.h:294
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition Decl.h:300
std::string getNameAsString() const
Get a human-readable name for the declaration, even if it is one of the special kinds of names (C++ c...
Definition Decl.h:316
virtual void printName(raw_ostream &OS, const PrintingPolicy &Policy) const
Pretty-print the unqualified name of this declaration.
Definition Decl.cpp:1672
QualType getCanonicalType() const
Definition TypeBase.h:8337
bool isConstQualified() const
Determine whether this type is const-qualified.
Definition TypeBase.h:8358
Expr * getRetValue()
Definition Stmt.h:3187
Encodes a location in the source.
bool isValid() const
Return true if this is a valid SourceLocation object.
Stmt - This represents one statement.
Definition Stmt.h:85
SourceLocation getEndLoc() const LLVM_READONLY
Definition Stmt.cpp:358
void dump() const
Dumps the specified AST fragment and all subtrees to llvm::errs().
bool isPointerType() const
Definition TypeBase.h:8522
bool isReferenceType() const
Definition TypeBase.h:8546
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:752
bool isLValueReferenceType() const
Definition TypeBase.h:8550
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9098
Expr * getSubExpr() const
Definition Expr.h:2285
Opcode getOpcode() const
Definition Expr.h:2280
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition Decl.h:711
QualType getType() const
Definition Decl.h:722
void checkBeforeAfter(const ValueDecl *Vd, const FactSet &FSet, ThreadSafetyAnalyzer &Analyzer, SourceLocation Loc, StringRef CapKind)
Return true if any mutexes in FSet are in the acquired_before set of Vd.
BeforeInfo * insertAttrExprs(const ValueDecl *Vd, ThreadSafetyAnalyzer &Analyzer)
Process acquired_before and acquired_after attributes on Vd.
BeforeInfo * getBeforeInfoForDecl(const ValueDecl *Vd, ThreadSafetyAnalyzer &Analyzer)
const PostOrderCFGView * getSortedGraph() const
const NamedDecl * getDecl() const
bool init(AnalysisDeclContext &AC)
bool equals(const CapabilityExpr &other) const
CapabilityExpr translateAttrExpr(const Expr *AttrExp, const NamedDecl *D, const Expr *DeclExp, til::SExpr *Self=nullptr)
Translate a clang expression in an attribute to a til::SExpr.
void setLookupLocalVarExpr(std::function< const Expr *(const NamedDecl *)> F)
til::SExpr * translate(const Stmt *S, CallingContext *Ctx)
til::SExpr * translateVariable(const VarDecl *VD, CallingContext *Ctx)
Handler class for thread safety warnings.
virtual void handleExpectMoreUnderlyingMutexes(SourceLocation Loc, SourceLocation DLoc, Name ScopeName, StringRef Kind, Name Expected)
Warn when we get fewer underlying mutexes than expected.
virtual void handleInvalidLockExp(SourceLocation Loc)
Warn about lock expressions which fail to resolve to lockable objects.
virtual void handleUnmatchedUnderlyingMutexes(SourceLocation Loc, SourceLocation DLoc, Name ScopeName, StringRef Kind, Name Expected, Name Actual)
Warn when an actual underlying mutex of a scoped lockable does not match the expected.
virtual void handleExpectFewerUnderlyingMutexes(SourceLocation Loc, SourceLocation DLoc, Name ScopeName, StringRef Kind, Name Actual)
Warn when we get more underlying mutexes than expected.
virtual void enterFunction(const FunctionDecl *FD)
Called by the analysis when starting analysis of a function.
virtual void handleIncorrectUnlockKind(StringRef Kind, Name LockName, LockKind Expected, LockKind Received, SourceLocation LocLocked, SourceLocation LocUnlock)
Warn about an unlock function call that attempts to unlock a lock with the incorrect lock kind.
virtual void handleMutexHeldEndOfScope(StringRef Kind, Name LockName, SourceLocation LocLocked, SourceLocation LocEndOfScope, LockErrorKind LEK, bool ReentrancyMismatch=false)
Warn about situations where a mutex is sometimes held and sometimes not.
virtual void leaveFunction(const FunctionDecl *FD)
Called by the analysis when finishing analysis of a function.
virtual void handleExclusiveAndShared(StringRef Kind, Name LockName, SourceLocation Loc1, SourceLocation Loc2)
Warn when a mutex is held exclusively and shared at the same point.
virtual void handleMutexNotHeld(StringRef Kind, const NamedDecl *D, ProtectedOperationKind POK, Name LockName, LockKind LK, SourceLocation Loc, Name *PossibleMatch=nullptr)
Warn when a protected operation occurs while the specific mutex protecting the operation is not locke...
virtual void handleFunExcludesLock(StringRef Kind, Name FunName, Name LockName, SourceLocation Loc)
Warn when a function is called while an excluded mutex is locked.
virtual void handleNoMutexHeld(const NamedDecl *D, ProtectedOperationKind POK, AccessKind AK, SourceLocation Loc)
Warn when a protected operation occurs while no locks are held.
virtual void handleUnmatchedUnlock(StringRef Kind, Name LockName, SourceLocation Loc, SourceLocation LocPreviousUnlock)
Warn about unlock function calls that do not have a prior matching lock expression.
virtual void handleNegativeNotHeld(StringRef Kind, Name LockName, Name Neg, SourceLocation Loc)
Warn when acquiring a lock that the negative capability is not held.
virtual void handleDoubleLock(StringRef Kind, Name LockName, SourceLocation LocLocked, SourceLocation LocDoubleLock)
Warn about lock function calls for locks which are already held.
#define bool
Definition gpuintrin.h:32
internal::Matcher< T > traverse(TraversalKind TK, const internal::Matcher< T > &InnerMatcher)
Causes all nested matchers to be matched with the specified traversal kind.
unsigned kind
All of the diagnostics that can be emitted by the frontend.
@ CF
Indicates that the tracked object is a CF object.
bool Alloc(InterpState &S, CodePtr OpPC, const Descriptor *Desc)
Definition Interp.h:3477
bool Dec(InterpState &S, CodePtr OpPC, bool CanOverflow)
1) Pops a pointer from the stack 2) Load the value from the pointer 3) Writes the value decreased by ...
Definition Interp.h:900
bool Neg(InterpState &S, CodePtr OpPC)
Definition Interp.h:749
std::unique_ptr< DiagnosticConsumer > create(StringRef OutputFile, DiagnosticOptions &DiagOpts, bool MergeChildRecords=false)
Returns a DiagnosticConsumer that serializes diagnostics to a bitcode file.
bool matches(const til::SExpr *E1, const til::SExpr *E2)
LockKind getLockKindFromAccessKind(AccessKind AK)
Helper function that returns a LockKind required for the given level of access.
LockErrorKind
This enum distinguishes between different situations where we warn due to inconsistent locking.
@ LEK_NotLockedAtEndOfFunction
Expecting a capability to be held at the end of function.
@ LEK_LockedSomePredecessors
A capability is locked in some but not all predecessors of a CFGBlock.
@ LEK_LockedAtEndOfFunction
A capability is still locked at the end of a function.
@ LEK_LockedSomeLoopIterations
A capability is locked for some but not all loop iterations.
void threadSafetyCleanup(BeforeSet *Cache)
AccessKind
This enum distinguishes between different ways to access (read or write) a variable.
@ AK_Written
Writing a variable.
@ AK_Read
Reading a variable.
LockKind
This enum distinguishes between different kinds of lock actions.
@ LK_Shared
Shared/reader lock of a mutex.
@ LK_Exclusive
Exclusive/writer lock of a mutex.
@ LK_Generic
Can be either Shared or Exclusive.
void runThreadSafetyAnalysis(AnalysisDeclContext &AC, ThreadSafetyHandler &Handler, BeforeSet **Bset)
Check a function's CFG for thread-safety violations.
ProtectedOperationKind
This enum distinguishes between different kinds of operations that may need to be protected by locks.
@ POK_PtPassByRef
Passing a pt-guarded variable by reference.
@ POK_PassPointer
Passing pointer to a guarded variable.
@ POK_VarDereference
Dereferencing a variable (e.g. p in *p = 5;)
@ POK_PassByRef
Passing a guarded variable by reference.
@ POK_ReturnByRef
Returning a guarded variable by reference.
@ POK_PtPassPointer
Passing a pt-guarded pointer.
@ POK_PtReturnPointer
Returning a pt-guarded pointer.
@ POK_VarAccess
Reading or writing a variable (e.g. x in x = 5;)
@ POK_FunctionCall
Making a function call (e.g. fool())
@ POK_ReturnPointer
Returning pointer to a guarded variable.
@ POK_PtReturnByRef
Returning a pt-guarded variable by reference.
The JSON file list parser is used to communicate input to InstallAPI.
OverloadedOperatorKind
Enumeration specifying the different kinds of C++ overloaded operators.
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ Self
'self' clause, allowed on Compute and Combined Constructs, plus 'update'.
nullptr
This class represents a compute construct, representing a 'Kind' of ‘parallel’, 'serial',...
Expr * Cond
};
static bool classof(const Stmt *T)
@ Result
The result type of a method or function.
Definition TypeBase.h:905
const FunctionProtoType * T
U cast(CodeGen::Address addr)
Definition Address.h:327
@ Other
Other implicit parameter.
Definition Decl.h:1745
int const char * function
Definition c++config.h:31