57#define DEBUG_TYPE "memdep"
59STATISTIC(NumCacheNonLocal,
"Number of fully cached non-local responses");
60STATISTIC(NumCacheDirtyNonLocal,
"Number of dirty cached non-local responses");
61STATISTIC(NumUncacheNonLocal,
"Number of uncached non-local responses");
64 "Number of fully cached non-local ptr responses");
66 "Number of cached, but dirty, non-local ptr responses");
67STATISTIC(NumUncacheNonLocalPtr,
"Number of uncached non-local ptr responses");
69 "Number of block queries that were completely cached");
75 cl::desc(
"The number of instructions to scan in a block in memory "
76 "dependency analysis (default = 100)"));
80 cl::desc(
"The number of blocks to scan during memory "
81 "dependency analysis (default = 200)"));
85 cl::desc(
"The max number of entries allowed in a cache (default = 10000)"));
93template <
typename KeyTy>
98 ReverseMap.find(Inst);
99 assert(InstIt != ReverseMap.
end() &&
"Reverse map out of sync?");
100 bool Found = InstIt->second.
erase(Val);
101 assert(Found &&
"Invalid reverse map!");
103 if (InstIt->second.
empty())
104 ReverseMap.erase(InstIt);
115 if (LI->isUnordered()) {
128 if (
SI->isUnordered()) {
154 switch (
II->getIntrinsicID()) {
155 case Intrinsic::lifetime_start:
156 case Intrinsic::lifetime_end:
161 case Intrinsic::invariant_start:
166 case Intrinsic::invariant_end:
171 case Intrinsic::masked_load:
174 case Intrinsic::masked_store:
191MemDepResult MemoryDependenceResults::getCallDependencyFrom(
197 while (ScanIt != BB->
begin()) {
221 if (isReadOnlyCall && !
isModSet(MR) &&
250 if (QueryInst !=
nullptr) {
254 if (InvariantGroupDependency.
isDef())
255 return InvariantGroupDependency;
259 MemLoc,
isLoad, ScanIt, BB, QueryInst, Limit, BatchAA);
260 if (SimpleDep.
isDef())
266 return InvariantGroupDependency;
269 "InvariantGroupDependency should be only unknown at this point");
285 if (!LI->
hasMetadata(LLVMContext::MD_invariant_group))
303 assert(
Other &&
"Must call it with not null instruction");
304 if (Best ==
nullptr || DT.dominates(Best,
Other))
309 for (
const Use &Us : LoadOperand->
uses()) {
311 if (!U || U == LI || !DT.dominates(U, LI))
320 U->hasMetadata(LLVMContext::MD_invariant_group))
321 ClosestDependency = GetClosestDependency(ClosestDependency, U);
324 if (!ClosestDependency)
326 if (ClosestDependency->
getParent() == BB)
332 NonLocalDefsCache.try_emplace(
335 ReverseNonLocalDefsCache[ClosestDependency].insert(LI);
347 unsigned ScanLimit) {
354 if (std::min(MemLocAlign,
SI->getAlign()).value() <
359 if (!LI || LI->getParent() !=
SI->getParent())
363 unsigned NumVisitedInsts = 0;
365 if (++NumVisitedInsts > ScanLimit ||
382 Limit = &DefaultLimit;
418 if (LI->hasMetadata(LLVMContext::MD_invariant_load))
420 MemLocAlign = LI->getAlign();
433 return I->mayReadOrWriteMemory();
437 while (ScanIt != BB->
begin()) {
451 case Intrinsic::lifetime_start: {
457 case Intrinsic::masked_load:
458 case Intrinsic::masked_store: {
466 if (
ID == Intrinsic::masked_load)
482 if (LI->isVolatile()) {
520 ClobberOffsets[LI] = R.getOffset();
541 if (!
SI->isUnordered() &&
SI->isAtomic()) {
559 if (
SI->isVolatile())
596 if (AccessPtr == Inst || BatchAA.
isMustAlias(Inst, AccessPtr))
644 ClobberOffsets.clear();
652 if (!LocalCache.isDirty())
680 isLoad |=
II->getIntrinsicID() == Intrinsic::lifetime_start;
684 QueryParent, QueryInst,
nullptr);
686 bool isReadOnly = AA.onlyReadsMemory(QueryCall);
687 LocalCache = getCallDependencyFrom(QueryCall, isReadOnly,
696 ReverseLocalDeps[
I].insert(QueryInst);
707 Count = Cache.size();
708 assert(std::is_sorted(Cache.begin(), Cache.begin() +
Count) &&
709 "Cache isn't sorted!");
716 "getNonLocalCallDependency should only be used on calls with "
718 PerInstNLInfo &CacheP = NonLocalDepsMap[QueryCall];
726 if (!Cache.empty()) {
729 if (!CacheP.second) {
736 for (
auto &Entry : Cache)
737 if (Entry.getResult().isDirty())
743 ++NumCacheDirtyNonLocal;
748 ++NumUncacheNonLocal;
752 bool isReadonlyCall = AA.onlyReadsMemory(QueryCall);
756 unsigned NumSortedEntries = Cache.
size();
760 while (!DirtyBlocks.
empty()) {
764 if (!Visited.
insert(DirtyBB).second)
770 NonLocalDepInfo::iterator Entry =
771 std::upper_bound(Cache.begin(), Cache.begin() + NumSortedEntries,
773 if (Entry != Cache.begin() && std::prev(Entry)->getBB() == DirtyBB)
777 if (Entry != Cache.begin() + NumSortedEntries &&
778 Entry->getBB() == DirtyBB) {
781 if (!Entry->getResult().isDirty())
785 ExistingResult = &*Entry;
791 if (ExistingResult) {
803 if (ScanPos != DirtyBB->
begin()) {
804 Dep = getCallDependencyFrom(QueryCall, isReadonlyCall, ScanPos, DirtyBB);
826 ReverseNonLocalDeps[Inst].insert(QueryCall);
845 assert(
Loc.Ptr->getType()->isPointerTy() &&
846 "Can't get pointer deps of a non-pointer!");
850 auto NonLocalDefIt = NonLocalDefsCache.find(QueryInst);
851 if (NonLocalDefIt != NonLocalDefsCache.end()) {
852 Result.push_back(NonLocalDefIt->second);
853 ReverseNonLocalDefsCache[NonLocalDefIt->second.getResult().getInst()]
855 NonLocalDefsCache.erase(NonLocalDefIt);
869 return !LI->isUnordered();
871 return !
SI->isUnordered();
889 Result, Visited,
true))
901MemDepResult MemoryDependenceResults::getNonLocalInfoForBlock(
903 BasicBlock *BB, NonLocalDepInfo *Cache,
unsigned NumSortedEntries,
913 NonLocalDepInfo::iterator Entry = std::upper_bound(
915 if (Entry != Cache->begin() && (Entry - 1)->getBB() == BB)
919 if (Entry != Cache->begin() + NumSortedEntries && Entry->getBB() == BB)
920 ExistingResult = &*Entry;
927 ExistingResult =
nullptr;
931 if (ExistingResult && !ExistingResult->
getResult().isDirty()) {
932 ++NumCacheNonLocalPtr;
942 "Instruction invalidated?");
943 ++NumCacheDirtyNonLocalPtr;
947 ValueIsLoadPair CacheKey(
Loc.Ptr,
isLoad);
950 ++NumUncacheNonLocalPtr;
955 QueryInst,
nullptr, BatchAA);
966 Cache->push_back(NonLocalDepEntry(BB, Dep));
977 assert(Inst &&
"Didn't depend on anything?");
978 ValueIsLoadPair CacheKey(Loc.
Ptr,
isLoad);
979 ReverseNonLocalPtrDeps[Inst].insert(CacheKey);
989 unsigned NumSortedEntries) {
992 if (Cache.size() < 2)
995 unsigned s = Cache.size() - NumSortedEntries;
1002 if (NumSortedEntries == 0) {
1010 if (s <
Log2_32(Cache.size())) {
1014 MemoryDependenceResults::NonLocalDepInfo::iterator Entry =
1015 std::upper_bound(Cache.begin(), Cache.end() - s + 1, Val);
1016 Cache.insert(Entry, Val);
1037bool MemoryDependenceResults::getNonLocalPointerDepFromBB(
1042 bool IsIncomplete) {
1050 NonLocalPointerInfo InitialNLPI;
1051 InitialNLPI.Size = Loc.
Size;
1052 InitialNLPI.AATags = Loc.
AATags;
1060 std::pair<CachedNonLocalPointerInfo::iterator, bool> Pair =
1061 NonLocalPointerDeps.insert(std::make_pair(CacheKey, InitialNLPI));
1062 NonLocalPointerInfo *CacheInfo = &Pair.first->second;
1068 if (CacheInfo->Size != Loc.
Size) {
1071 CacheInfo->Pair = BBSkipFirstBlockPair();
1072 CacheInfo->Size = Loc.
Size;
1073 for (
auto &Entry : CacheInfo->NonLocalDeps)
1074 if (Instruction *Inst =
Entry.getResult().getInst())
1076 CacheInfo->NonLocalDeps.clear();
1080 IsIncomplete =
true;
1086 if (CacheInfo->AATags != Loc.
AATags) {
1087 if (CacheInfo->AATags) {
1088 CacheInfo->Pair = BBSkipFirstBlockPair();
1089 CacheInfo->AATags = AAMDNodes();
1090 for (
auto &Entry : CacheInfo->NonLocalDeps)
1091 if (Instruction *Inst =
Entry.getResult().getInst())
1093 CacheInfo->NonLocalDeps.clear();
1097 IsIncomplete =
true;
1100 return getNonLocalPointerDepFromBB(
1102 Visited, SkipFirstBlock, IsIncomplete);
1113 CacheInfo->Pair == BBSkipFirstBlockPair(StartBB, SkipFirstBlock)) {
1119 if (!Visited.
empty()) {
1120 for (
auto &Entry : *Cache) {
1123 if (VI == Visited.
end() ||
VI->second ==
Pointer.getAddr())
1134 for (
auto &Entry : *Cache) {
1135 Visited.
insert(std::make_pair(
Entry.getBB(), Addr));
1136 if (
Entry.getResult().isNonLocal()) {
1140 if (DT.isReachableFromEntry(
Entry.getBB())) {
1142 NonLocalDepResult(
Entry.getBB(),
Entry.getResult(), Addr));
1145 ++NumCacheCompleteNonLocalPtr;
1161 if (!IsIncomplete && Cache->empty())
1162 CacheInfo->Pair = BBSkipFirstBlockPair(StartBB, SkipFirstBlock);
1164 CacheInfo->Pair = BBSkipFirstBlockPair();
1178 unsigned NumSortedEntries = Cache->size();
1180 bool GotWorklistLimit =
false;
1183 BatchAAResults BatchAA(AA, &EEA);
1184 while (!Worklist.
empty()) {
1193 if (Cache && NumSortedEntries != Cache->size()) {
1200 CacheInfo->Pair = BBSkipFirstBlockPair();
1205 if (!SkipFirstBlock) {
1208 assert(Visited.
count(BB) &&
"Should check 'visited' before adding to WL");
1213 MemDepResult Dep = getNonLocalInfoForBlock(
1214 QueryInst, Loc,
isLoad, BB, Cache, NumSortedEntries, BatchAA);
1218 if (DT.isReachableFromEntry(BB)) {
1219 Result.push_back(NonLocalDepResult(BB, Dep,
Pointer.getAddr()));
1229 if (!
Pointer.needsPHITranslationFromBlock(BB)) {
1230 SkipFirstBlock =
false;
1231 SmallVector<BasicBlock *, 16> NewBlocks;
1232 for (BasicBlock *Pred : PredCache.get(BB)) {
1234 std::pair<DenseMap<BasicBlock *, Value *>::iterator,
bool> InsertRes =
1236 if (InsertRes.second) {
1245 if (InsertRes.first->second !=
Pointer.getAddr()) {
1248 for (
auto *NewBlock : NewBlocks)
1249 Visited.
erase(NewBlock);
1250 goto PredTranslationFailure;
1253 if (NewBlocks.
size() > WorklistEntries) {
1256 for (
auto *NewBlock : NewBlocks)
1257 Visited.
erase(NewBlock);
1258 GotWorklistLimit =
true;
1259 goto PredTranslationFailure;
1261 WorklistEntries -= NewBlocks.size();
1262 Worklist.
append(NewBlocks.begin(), NewBlocks.end());
1268 if (!
Pointer.isPotentiallyPHITranslatable())
1269 goto PredTranslationFailure;
1276 if (Cache && NumSortedEntries != Cache->size()) {
1278 NumSortedEntries = Cache->size();
1283 for (BasicBlock *Pred : PredCache.get(BB)) {
1284 PredList.
push_back(std::make_pair(Pred, Pointer));
1288 PHITransAddr &PredPointer = PredList.
back().second;
1297 std::pair<DenseMap<BasicBlock *, Value *>::iterator,
bool> InsertRes =
1298 Visited.
insert(std::make_pair(Pred, PredPtrVal));
1300 if (!InsertRes.second) {
1306 if (InsertRes.first->second == PredPtrVal)
1315 for (
const auto &Pred : PredList)
1316 Visited.
erase(Pred.first);
1318 goto PredTranslationFailure;
1327 for (
auto &
I : PredList) {
1329 PHITransAddr &PredPointer =
I.second;
1332 bool CanTranslate =
true;
1338 CanTranslate =
false;
1348 if (!CanTranslate ||
1349 !getNonLocalPointerDepFromBB(QueryInst, PredPointer,
1351 Pred, Result, Visited)) {
1361 NonLocalPointerInfo &NLPI = NonLocalPointerDeps[CacheKey];
1362 NLPI.Pair = BBSkipFirstBlockPair();
1368 CacheInfo = &NonLocalPointerDeps[CacheKey];
1369 Cache = &CacheInfo->NonLocalDeps;
1370 NumSortedEntries = Cache->size();
1376 CacheInfo->Pair = BBSkipFirstBlockPair();
1377 SkipFirstBlock =
false;
1380 PredTranslationFailure:
1387 CacheInfo = &NonLocalPointerDeps[CacheKey];
1388 Cache = &CacheInfo->NonLocalDeps;
1389 NumSortedEntries = Cache->size();
1396 CacheInfo->Pair = BBSkipFirstBlockPair();
1410 if (
I.getBB() != BB)
1413 assert((GotWorklistLimit ||
I.getResult().isNonLocal() ||
1414 !DT.isReachableFromEntry(BB)) &&
1415 "Should only be here with transparent block");
1423 (void)GotWorklistLimit;
1436void MemoryDependenceResults::removeCachedNonLocalPointerDependencies(
1437 ValueIsLoadPair
P) {
1440 if (!NonLocalDefsCache.empty()) {
1441 auto it = NonLocalDefsCache.find(
P.getPointer());
1442 if (it != NonLocalDefsCache.end()) {
1444 it->second.getResult().getInst(),
P.getPointer());
1445 NonLocalDefsCache.erase(it);
1449 auto toRemoveIt = ReverseNonLocalDefsCache.find(
I);
1450 if (toRemoveIt != ReverseNonLocalDefsCache.end()) {
1451 for (
const auto *entry : toRemoveIt->second)
1452 NonLocalDefsCache.erase(entry);
1453 ReverseNonLocalDefsCache.erase(toRemoveIt);
1459 if (It == NonLocalPointerDeps.end())
1466 for (
const NonLocalDepEntry &DE : PInfo) {
1477 NonLocalPointerDeps.erase(It);
1482 if (!
Ptr->getType()->isPointerTy())
1485 removeCachedNonLocalPointerDependencies(ValueIsLoadPair(
Ptr,
false));
1487 removeCachedNonLocalPointerDependencies(ValueIsLoadPair(
Ptr,
true));
1495 EEA.removeInstruction(RemInst);
1500 if (NLDI != NonLocalDepsMap.end()) {
1502 for (
auto &Entry : BlockMap)
1503 if (
Instruction *Inst = Entry.getResult().getInst())
1505 NonLocalDepsMap.erase(NLDI);
1510 if (LocalDepEntry != LocalDeps.end()) {
1512 if (
Instruction *Inst = LocalDepEntry->second.getInst())
1516 LocalDeps.erase(LocalDepEntry);
1525 removeCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst,
false));
1526 removeCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst,
true));
1530 auto toRemoveIt = NonLocalDefsCache.find(RemInst);
1531 if (toRemoveIt != NonLocalDefsCache.end()) {
1533 "only load instructions should be added directly");
1534 const Instruction *DepV = toRemoveIt->second.getResult().getInst();
1535 ReverseNonLocalDefsCache.find(DepV)->second.erase(RemInst);
1536 NonLocalDefsCache.erase(toRemoveIt);
1551 NewDirtyVal = MemDepResult::getDirty(&*++RemInst->
getIterator());
1553 ReverseDepMapType::iterator ReverseDepIt = ReverseLocalDeps.find(RemInst);
1554 if (ReverseDepIt != ReverseLocalDeps.end()) {
1557 "Nothing can locally depend on a terminator");
1559 for (
Instruction *InstDependingOnRemInst : ReverseDepIt->second) {
1560 assert(InstDependingOnRemInst != RemInst &&
1561 "Already removed our local dep info");
1563 LocalDeps[InstDependingOnRemInst] = NewDirtyVal;
1567 "There is no way something else can have "
1568 "a local dep on this if it is a terminator!");
1570 std::make_pair(NewDirtyVal.
getInst(), InstDependingOnRemInst));
1573 ReverseLocalDeps.erase(ReverseDepIt);
1577 while (!ReverseDepsToAdd.
empty()) {
1578 ReverseLocalDeps[ReverseDepsToAdd.
back().first].insert(
1579 ReverseDepsToAdd.
back().second);
1584 ReverseDepIt = ReverseNonLocalDeps.find(RemInst);
1585 if (ReverseDepIt != ReverseNonLocalDeps.end()) {
1587 assert(
I != RemInst &&
"Already removed NonLocalDep info for RemInst");
1589 PerInstNLInfo &INLD = NonLocalDepsMap[
I];
1593 for (
auto &Entry : INLD.first) {
1594 if (Entry.getResult().getInst() != RemInst)
1598 Entry.setResult(NewDirtyVal);
1601 ReverseDepsToAdd.
push_back(std::make_pair(NextI,
I));
1605 ReverseNonLocalDeps.erase(ReverseDepIt);
1608 while (!ReverseDepsToAdd.
empty()) {
1609 ReverseNonLocalDeps[ReverseDepsToAdd.
back().first].insert(
1610 ReverseDepsToAdd.
back().second);
1617 ReverseNonLocalPtrDepTy::iterator ReversePtrDepIt =
1618 ReverseNonLocalPtrDeps.find(RemInst);
1619 if (ReversePtrDepIt != ReverseNonLocalPtrDeps.end()) {
1621 ReversePtrDepsToAdd;
1623 for (ValueIsLoadPair
P : ReversePtrDepIt->second) {
1624 assert(
P.getPointer() != RemInst &&
1625 "Already removed NonLocalPointerDeps info for RemInst");
1627 auto &NLPD = NonLocalPointerDeps[
P];
1632 NLPD.Pair = BBSkipFirstBlockPair();
1635 for (
auto &Entry : NLPDI) {
1636 if (Entry.getResult().getInst() != RemInst)
1640 Entry.setResult(NewDirtyVal);
1643 ReversePtrDepsToAdd.
push_back(std::make_pair(NewDirtyInst,
P));
1651 ReverseNonLocalPtrDeps.
erase(ReversePtrDepIt);
1653 while (!ReversePtrDepsToAdd.
empty()) {
1654 ReverseNonLocalPtrDeps[ReversePtrDepsToAdd.
back().first].insert(
1655 ReversePtrDepsToAdd.
back().second);
1660 assert(!NonLocalDepsMap.count(RemInst) &&
"RemInst got reinserted?");
1668void MemoryDependenceResults::verifyRemoved(
Instruction *
D)
const {
1670 for (
const auto &DepKV : LocalDeps) {
1671 assert(DepKV.first !=
D &&
"Inst occurs in data structures");
1672 assert(DepKV.second.getInst() !=
D &&
"Inst occurs in data structures");
1675 for (
const auto &DepKV : NonLocalPointerDeps) {
1676 assert(DepKV.first.getPointer() !=
D &&
"Inst occurs in NLPD map key");
1677 for (
const auto &Entry : DepKV.second.NonLocalDeps)
1678 assert(Entry.getResult().getInst() !=
D &&
"Inst occurs as NLPD value");
1681 for (
const auto &DepKV : NonLocalDepsMap) {
1682 assert(DepKV.first !=
D &&
"Inst occurs in data structures");
1683 const PerInstNLInfo &INLD = DepKV.second;
1684 for (
const auto &Entry : INLD.first)
1686 "Inst occurs in data structures");
1689 for (
const auto &DepKV : ReverseLocalDeps) {
1690 assert(DepKV.first !=
D &&
"Inst occurs in data structures");
1691 for (Instruction *Inst : DepKV.second)
1692 assert(Inst !=
D &&
"Inst occurs in data structures");
1695 for (
const auto &DepKV : ReverseNonLocalDeps) {
1696 assert(DepKV.first !=
D &&
"Inst occurs in data structures");
1697 for (Instruction *Inst : DepKV.second)
1698 assert(Inst !=
D &&
"Inst occurs in data structures");
1701 for (
const auto &DepKV : ReverseNonLocalPtrDeps) {
1702 assert(DepKV.first !=
D &&
"Inst occurs in rev NLPD map");
1704 for (ValueIsLoadPair
P : DepKV.second)
1705 assert(
P != ValueIsLoadPair(
D,
false) &&
P != ValueIsLoadPair(
D,
true) &&
1706 "Inst occurs in ReverseNonLocalPtrDeps map");
1728 "Memory Dependence Analysis",
false,
true)
1753 FunctionAnalysisManager::Invalidator &Inv) {
1771 return DefaultBlockScanLimit;
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static bool isLoad(int Opcode)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Atomic ordering constants.
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
This file defines the DenseMap class.
Module.h This file contains the declarations for the Module class.
This defines the Use class.
static const unsigned int NumResultsLimit
static cl::opt< unsigned > CacheGlobalLimit("memdep-cache-global-limit", cl::Hidden, cl::init(10000), cl::desc("The max number of entries allowed in a cache (default = 10000)"))
static ModRefInfo GetLocation(const Instruction *Inst, MemoryLocation &Loc, const TargetLibraryInfo &TLI)
If the given instruction references a specific memory location, fill in Loc with the details,...
static cl::opt< unsigned > BlockNumberLimit("memdep-block-number-limit", cl::Hidden, cl::init(200), cl::desc("The number of blocks to scan during memory " "dependency analysis (default = 200)"))
static void RemoveFromReverseMap(DenseMap< Instruction *, SmallPtrSet< KeyTy, 4 > > &ReverseMap, Instruction *Inst, KeyTy Val)
This is a helper function that removes Val from 'Inst's set in ReverseMap.
static void SortNonLocalDepInfoCache(MemoryDependenceResults::NonLocalDepInfo &Cache, unsigned NumSortedEntries)
Sort the NonLocalDepInfo cache, given a certain number of elements in the array that are already prop...
static void AssertSorted(MemoryDependenceResults::NonLocalDepInfo &Cache, int Count=-1)
This method is used when -debug is specified to verify that cache arrays are properly kept sorted.
static bool canSkipClobberingStore(const StoreInst *SI, const MemoryLocation &MemLoc, Align MemLocAlign, BatchAAResults &BatchAA, unsigned ScanLimit)
static cl::opt< unsigned > BlockScanLimit("memdep-block-scan-limit", cl::Hidden, cl::init(100), cl::desc("The number of instructions to scan in a block in memory " "dependency analysis (default = 100)"))
This file provides utility analysis objects describing memory locations.
static bool isOrdered(const Instruction *I)
static bool isInvariantLoad(const LoadInst *LI, const bool IsKernelFn)
uint64_t IntrinsicInst * II
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
A manager for alias analyses.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
The possible results of an alias query.
@ NoAlias
The two locations do not alias at all.
@ PartialAlias
The two locations alias, but only due to a partial overlap.
@ MustAlias
The two locations precisely alias each other.
This templated class represents "all analyses that operate over <aparticular IR unit>" (e....
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
void setPreservesAll()
Set by analyses that do not transform their input at all.
AnalysisUsage & addRequiredTransitive()
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
const Function * getParent() const
Return the enclosing method, or null if none.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
InstListType::iterator iterator
Instruction iterators...
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB)
bool isMustAlias(const MemoryLocation &LocA, const MemoryLocation &LocB)
ModRefInfo getModRefInfo(const Instruction *I, const std::optional< MemoryLocation > &OptLoc)
ModRefInfo getModRefInfoMask(const MemoryLocation &Loc, bool IgnoreLocals=false)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
A parsed version of the target data layout string in and methods for querying it.
iterator find(const_arg_type_t< KeyT > Val)
bool erase(const KeyT &Val)
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT > iterator
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Analysis pass which computes a DominatorTree.
Legacy analysis pass which computes a DominatorTree.
An instruction for ordering other memory operations.
const BasicBlock & getEntryBlock() const
LLVM_ABI bool mayWriteToMemory() const LLVM_READONLY
Return true if this instruction may modify memory.
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
LLVM_ABI bool isIdenticalToWhenDefined(const Instruction *I, bool IntersectAttrs=false) const LLVM_READONLY
This is like isIdenticalTo, except that it ignores the SubclassOptionalData flags,...
bool isTerminator() const
LLVM_ABI bool mayReadFromMemory() const LLVM_READONLY
Return true if this instruction may read memory.
LLVM_ABI bool isVolatile() const LLVM_READONLY
Return true if this instruction has a volatile memory access.
A wrapper class for inspecting calls to intrinsic functions.
An instruction for reading from memory.
Value * getPointerOperand()
TypeSize getValue() const
A memory dependence query can return one of three different answers.
bool isNonLocal() const
Tests if this MemDepResult represents a query that is transparent to the start of the block,...
static MemDepResult getNonLocal()
bool isNonFuncLocal() const
Tests if this MemDepResult represents a query that is transparent to the start of the function.
static MemDepResult getClobber(Instruction *Inst)
bool isDef() const
Tests if this MemDepResult represents a query that is an instruction definition dependency.
static MemDepResult getUnknown()
bool isLocal() const
Tests if this MemDepResult represents a valid local query (Clobber/Def).
bool isUnknown() const
Tests if this MemDepResult represents a query which cannot and/or will not be computed.
static MemDepResult getNonFuncLocal()
static MemDepResult getDef(Instruction *Inst)
get methods: These are static ctor methods for creating various MemDepResult kinds.
Instruction * getInst() const
If this is a normal dependency, returns the instruction that is depended on.
An analysis that produces MemoryDependenceResults for a function.
MemoryDependenceResults run(Function &F, FunctionAnalysisManager &AM)
MemoryDependenceAnalysis()
Provides a lazy, caching interface for making common memory aliasing information queries,...
MemDepResult getSimplePointerDependencyFrom(const MemoryLocation &MemLoc, bool isLoad, BasicBlock::iterator ScanIt, BasicBlock *BB, Instruction *QueryInst, unsigned *Limit, BatchAAResults &BatchAA)
std::vector< NonLocalDepEntry > NonLocalDepInfo
void invalidateCachedPredecessors()
Clears the PredIteratorCache info.
void invalidateCachedPointerInfo(Value *Ptr)
Invalidates cached information about the specified pointer, because it may be too conservative in mem...
MemDepResult getPointerDependencyFrom(const MemoryLocation &Loc, bool isLoad, BasicBlock::iterator ScanIt, BasicBlock *BB, Instruction *QueryInst=nullptr, unsigned *Limit=nullptr)
Returns the instruction on which a memory location depends.
void removeInstruction(Instruction *InstToRemove)
Removes an instruction from the dependence analysis, updating the dependence of instructions that pre...
MemDepResult getInvariantGroupPointerDependency(LoadInst *LI, BasicBlock *BB)
This analysis looks for other loads and stores with invariant.group metadata and the same pointer ope...
unsigned getDefaultBlockScanLimit() const
Some methods limit the number of instructions they will examine.
MemDepResult getDependency(Instruction *QueryInst)
Returns the instruction on which a memory operation depends.
const NonLocalDepInfo & getNonLocalCallDependency(CallBase *QueryCall)
Perform a full dependency query for the specified call, returning the set of blocks that the value is...
void getNonLocalPointerDependency(Instruction *QueryInst, SmallVectorImpl< NonLocalDepResult > &Result)
Perform a full dependency query for an access to the QueryInst's specified memory location,...
bool invalidate(Function &F, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
Handle invalidation in the new PM.
A wrapper analysis pass for the legacy pass manager that exposes a MemoryDepnedenceResults instance.
bool runOnFunction(Function &) override
Pass Implementation stuff. This doesn't do any analysis eagerly.
~MemoryDependenceWrapperPass() override
void getAnalysisUsage(AnalysisUsage &AU) const override
Does not modify anything. It uses Value Numbering and Alias Analysis.
void releaseMemory() override
Clean up memory in between runs.
MemoryDependenceWrapperPass()
Representation for a specific memory location.
MemoryLocation getWithoutAATags() const
static LLVM_ABI MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
LocationSize Size
The maximum size of the location, in address-units, or UnknownSize if the size is not known.
static MemoryLocation getAfter(const Value *Ptr, const AAMDNodes &AATags=AAMDNodes())
Return a location that may access any location after Ptr, while remaining within the underlying objec...
MemoryLocation getWithNewPtr(const Value *NewPtr) const
AAMDNodes AATags
The metadata nodes which describes the aliasing of the location (each member is null if that kind of ...
const Value * Ptr
The address of the start of the location.
static LLVM_ABI MemoryLocation getForArgument(const CallBase *Call, unsigned ArgIdx, const TargetLibraryInfo *TLI)
Return a location representing a particular argument of a call.
This is an entry in the NonLocalDepInfo cache.
void setResult(const MemDepResult &R)
const MemDepResult & getResult() const
This is a result from a NonLocal dependence query.
PHITransAddr - An address value which tracks and handles phi translation.
LLVM_ABI Value * translateValue(BasicBlock *CurBB, BasicBlock *PredBB, const DominatorTree *DT, bool MustDominate)
translateValue - PHI translate the current address up the CFG from CurBB to Pred, updating our state ...
AnalysisType & getAnalysis() const
getAnalysis<AnalysisType>() - This function is used by subclasses to get to the analysis information ...
A set of analyses that are preserved following a run of a transformation pass.
PreservedAnalysisChecker getChecker() const
Build a checker for this PreservedAnalyses and the specified analysis type.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
iterator erase(const_iterator CI)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
bool isPointerTy() const
True if this is an instance of PointerType.
A Use represents the edge between a Value definition and its users.
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
LLVM_ABI Align getPointerAlignment(const DataLayout &DL) const
Returns an alignment of the pointer value.
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
iterator_range< use_iterator > uses()
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
const ParentTy * getParent() const
self_iterator getIterator()
Abstract Attribute helper functions.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ BasicBlock
Various leaf nodes.
initializer< Ty > init(const Ty &Val)
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
FunctionAddr VTableAddr Value
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
bool isStrongerThanUnordered(AtomicOrdering AO)
LLVM_ABI bool isNoAliasCall(const Value *V)
Return true if this pointer is returned by a noalias function.
const Value * getPointerOperand(const Value *V)
A helper function that returns the pointer operand of a load, store or GEP instruction.
auto dyn_cast_or_null(const Y &Val)
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
auto reverse(ContainerTy &&C)
bool isModSet(const ModRefInfo MRI)
void sort(IteratorTy Start, IteratorTy End)
FunctionAddr VTableAddr Count
bool isModOrRefSet(const ModRefInfo MRI)
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
AtomicOrdering
Atomic ordering for LLVM's memory model.
ModRefInfo
Flags indicating whether a memory access modifies or references memory.
@ Ref
The access may reference the value stored in memory.
@ ModRef
The access may reference and may modify the value stored in memory.
@ Mod
The access may modify the value stored in memory.
@ NoModRef
The access neither references nor modifies the value stored in memory.
LLVM_ABI Value * getFreedOperand(const CallBase *CB, const TargetLibraryInfo *TLI)
If this if a call to a free function, return the freed operand.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
bool isNoModRef(const ModRefInfo MRI)
bool isStrongerThan(AtomicOrdering AO, AtomicOrdering Other)
Returns true if ao is stronger than other as defined by the AtomicOrdering lattice,...
This struct is a compact representation of a valid (non-zero power of two) alignment.
A special type used by analysis passes to provide an address that identifies that particular analysis...