Thanks to visit codestin.com
Credit goes to llvm.org

LLVM 22.0.0git
LazyValueInfo.cpp
Go to the documentation of this file.
1//===- LazyValueInfo.cpp - Value constraint analysis ------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the interface for lazy computation of value constraint
10// information.
11//
12//===----------------------------------------------------------------------===//
13
15#include "llvm/ADT/DenseSet.h"
16#include "llvm/ADT/STLExtras.h"
25#include "llvm/IR/CFG.h"
27#include "llvm/IR/Constants.h"
28#include "llvm/IR/DataLayout.h"
29#include "llvm/IR/Dominators.h"
30#include "llvm/IR/InstrTypes.h"
33#include "llvm/IR/Intrinsics.h"
34#include "llvm/IR/LLVMContext.h"
35#include "llvm/IR/Module.h"
37#include "llvm/IR/ValueHandle.h"
39#include "llvm/Support/Debug.h"
43#include <optional>
44using namespace llvm;
45using namespace PatternMatch;
46
47#define DEBUG_TYPE "lazy-value-info"
48
49// This is the number of worklist items we will process to try to discover an
50// answer for a given value.
51static const unsigned MaxProcessedPerValue = 500;
52
56 "Lazy Value Information Analysis", false, true)
60 "Lazy Value Information Analysis", false, true)
61
62static cl::opt<bool> PerPredRanges(
63 "lvi-per-pred-ranges", cl::Hidden, cl::init(false),
64 cl::desc("Enable tracking of ranges for a value in a block for"
65 "each block predecessor (default = false)"));
66
67namespace llvm {
71} // namespace llvm
72
73AnalysisKey LazyValueAnalysis::Key;
74
75/// Returns true if this lattice value represents at most one possible value.
76/// This is as precise as any lattice value can get while still representing
77/// reachable code.
78static bool hasSingleValue(const ValueLatticeElement &Val) {
79 if (Val.isConstantRange() &&
81 // Integer constants are single element ranges
82 return true;
83 if (Val.isConstant())
84 // Non integer constants
85 return true;
86 return false;
87}
88
89//===----------------------------------------------------------------------===//
90// LazyValueInfoCache Decl
91//===----------------------------------------------------------------------===//
92
93namespace {
94 /// A callback value handle updates the cache when values are erased.
95 class LazyValueInfoCache;
96 struct LVIValueHandle final : public CallbackVH {
97 LazyValueInfoCache *Parent;
98
99 LVIValueHandle(Value *V, LazyValueInfoCache *P = nullptr)
100 : CallbackVH(V), Parent(P) { }
101
102 void deleted() override;
103 void allUsesReplacedWith(Value *V) override {
104 deleted();
105 }
106 };
107} // end anonymous namespace
108
109namespace {
110using NonNullPointerSet = SmallDenseSet<AssertingVH<Value>, 2>;
111using BBLatticeElementMap =
113using PredecessorValueLatticeMap =
114 SmallDenseMap<AssertingVH<Value>, BBLatticeElementMap, 2>;
115
116/// This is the cache kept by LazyValueInfo which
117/// maintains information about queries across the clients' queries.
118class LazyValueInfoCache {
119 /// This is all of the cached information for one basic block. It contains
120 /// the per-value lattice elements, as well as a separate set for
121 /// overdefined values to reduce memory usage. Additionally pointers
122 /// dereferenced in the block are cached for nullability queries.
123 struct BlockCacheEntry {
124 SmallDenseMap<AssertingVH<Value>, ValueLatticeElement, 4> LatticeElements;
125 SmallDenseSet<AssertingVH<Value>, 4> OverDefined;
126 // std::nullopt indicates that the nonnull pointers for this basic block
127 // block have not been computed yet.
128 std::optional<NonNullPointerSet> NonNullPointers;
129 // This is an extension of the above LatticeElements, caching, for each
130 // Value, a ValueLatticeElement, for each predecessor of the BB tracked by
131 // this entry.
132 std::optional<PredecessorValueLatticeMap> PredecessorLatticeElements;
133 };
134
135 /// Cached information per basic block.
136 DenseMap<PoisoningVH<BasicBlock>, std::unique_ptr<BlockCacheEntry>>
137 BlockCache;
138 /// Set of value handles used to erase values from the cache on deletion.
139 DenseSet<LVIValueHandle, DenseMapInfo<Value *>> ValueHandles;
140
141 const BlockCacheEntry *getBlockEntry(BasicBlock *BB) const {
142 auto It = BlockCache.find_as(BB);
143 if (It == BlockCache.end())
144 return nullptr;
145 return It->second.get();
146 }
147
148 BlockCacheEntry *getOrCreateBlockEntry(BasicBlock *BB) {
149 auto It = BlockCache.find_as(BB);
150 if (It == BlockCache.end()) {
151 std::unique_ptr<BlockCacheEntry> BCE =
152 std::make_unique<BlockCacheEntry>();
153 if (PerPredRanges)
154 BCE->PredecessorLatticeElements =
155 std::make_optional<PredecessorValueLatticeMap>();
156 It = BlockCache.insert({BB, std::move(BCE)}).first;
157 }
158
159 return It->second.get();
160 }
161
162 void addValueHandle(Value *Val) {
163 auto HandleIt = ValueHandles.find_as(Val);
164 if (HandleIt == ValueHandles.end())
165 ValueHandles.insert({Val, this});
166 }
167
168public:
169 void insertResult(Value *Val, BasicBlock *BB,
170 const ValueLatticeElement &Result) {
171 BlockCacheEntry *Entry = getOrCreateBlockEntry(BB);
172
173 // Insert over-defined values into their own cache to reduce memory
174 // overhead.
175 if (Result.isOverdefined())
176 Entry->OverDefined.insert(Val);
177 else
178 Entry->LatticeElements.insert({Val, Result});
179
180 addValueHandle(Val);
181 }
182
183 void insertPredecessorResults(Value *Val, BasicBlock *BB,
184 BBLatticeElementMap &PredLatticeElements) {
185 BlockCacheEntry *Entry = getOrCreateBlockEntry(BB);
186
187 Entry->PredecessorLatticeElements->insert({Val, PredLatticeElements});
188
189 addValueHandle(Val);
190 }
191
192 std::optional<BBLatticeElementMap>
193 getCachedPredecessorInfo(Value *V, BasicBlock *BB) const {
194 const BlockCacheEntry *Entry = getBlockEntry(BB);
195 if (!Entry)
196 return std::nullopt;
197
198 auto LatticeIt = Entry->PredecessorLatticeElements->find_as(V);
199 if (LatticeIt == Entry->PredecessorLatticeElements->end())
200 return std::nullopt;
201
202 return LatticeIt->second;
203 }
204
205 std::optional<ValueLatticeElement> getCachedValueInfo(Value *V,
206 BasicBlock *BB) const {
207 const BlockCacheEntry *Entry = getBlockEntry(BB);
208 if (!Entry)
209 return std::nullopt;
210
211 if (Entry->OverDefined.count(V))
213
214 auto LatticeIt = Entry->LatticeElements.find_as(V);
215 if (LatticeIt == Entry->LatticeElements.end())
216 return std::nullopt;
217
218 return LatticeIt->second;
219 }
220
221 bool
222 isNonNullAtEndOfBlock(Value *V, BasicBlock *BB,
223 function_ref<NonNullPointerSet(BasicBlock *)> InitFn) {
224 BlockCacheEntry *Entry = getOrCreateBlockEntry(BB);
225 if (!Entry->NonNullPointers) {
226 Entry->NonNullPointers = InitFn(BB);
227 for (Value *V : *Entry->NonNullPointers)
228 addValueHandle(V);
229 }
230
231 return Entry->NonNullPointers->count(V);
232 }
233
234 /// clear - Empty the cache.
235 void clear() {
236 BlockCache.clear();
237 ValueHandles.clear();
238 }
239
240 /// Inform the cache that a given value has been deleted.
241 void eraseValue(Value *V);
242
243 /// This is part of the update interface to inform the cache
244 /// that a block has been deleted.
245 void eraseBlock(BasicBlock *BB);
246
247 /// Updates the cache to remove any influence an overdefined value in
248 /// OldSucc might have (unless also overdefined in NewSucc). This just
249 /// flushes elements from the cache and does not add any.
250 void threadEdgeImpl(BasicBlock *OldSucc, BasicBlock *NewSucc);
251};
252} // namespace
253
254void LazyValueInfoCache::eraseValue(Value *V) {
255 for (auto &Pair : BlockCache) {
256 Pair.second->LatticeElements.erase(V);
257 Pair.second->OverDefined.erase(V);
258 if (Pair.second->NonNullPointers)
259 Pair.second->NonNullPointers->erase(V);
260 if (PerPredRanges)
261 Pair.second->PredecessorLatticeElements->erase(V);
262 }
263
264 auto HandleIt = ValueHandles.find_as(V);
265 if (HandleIt != ValueHandles.end())
266 ValueHandles.erase(HandleIt);
267}
268
269void LVIValueHandle::deleted() {
270 // This erasure deallocates *this, so it MUST happen after we're done
271 // using any and all members of *this.
272 Parent->eraseValue(*this);
273}
274
275void LazyValueInfoCache::eraseBlock(BasicBlock *BB) {
276 // Clear all when a BB is removed.
277 if (PerPredRanges)
278 for (auto &Pair : BlockCache)
279 Pair.second->PredecessorLatticeElements->clear();
280 BlockCache.erase(BB);
281}
282
283void LazyValueInfoCache::threadEdgeImpl(BasicBlock *OldSucc,
284 BasicBlock *NewSucc) {
285 // When an edge in the graph has been threaded, values that we could not
286 // determine a value for before (i.e. were marked overdefined) may be
287 // possible to solve now. We do NOT try to proactively update these values.
288 // Instead, we clear their entries from the cache, and allow lazy updating to
289 // recompute them when needed.
290
291 // The updating process is fairly simple: we need to drop cached info
292 // for all values that were marked overdefined in OldSucc, and for those same
293 // values in any successor of OldSucc (except NewSucc) in which they were
294 // also marked overdefined.
295 std::vector<BasicBlock*> worklist;
296 worklist.push_back(OldSucc);
297
298 const BlockCacheEntry *Entry = getBlockEntry(OldSucc);
299 if (!Entry || Entry->OverDefined.empty())
300 return; // Nothing to process here.
301 SmallVector<Value *, 4> ValsToClear(Entry->OverDefined.begin(),
302 Entry->OverDefined.end());
303
304 // Use a worklist to perform a depth-first search of OldSucc's successors.
305 // NOTE: We do not need a visited list since any blocks we have already
306 // visited will have had their overdefined markers cleared already, and we
307 // thus won't loop to their successors.
308 while (!worklist.empty()) {
309 BasicBlock *ToUpdate = worklist.back();
310 worklist.pop_back();
311
312 // Skip blocks only accessible through NewSucc.
313 if (ToUpdate == NewSucc) continue;
314
315 // If a value was marked overdefined in OldSucc, and is here too...
316 auto OI = BlockCache.find_as(ToUpdate);
317 if (OI == BlockCache.end() || OI->second->OverDefined.empty())
318 continue;
319 auto &ValueSet = OI->second->OverDefined;
320
321 bool changed = false;
322 for (Value *V : ValsToClear) {
323 if (!ValueSet.erase(V))
324 continue;
325
326 // If we removed anything, then we potentially need to update
327 // blocks successors too.
328 changed = true;
329 }
330
331 if (!changed) continue;
332
333 llvm::append_range(worklist, successors(ToUpdate));
334 }
335}
336
337namespace llvm {
338namespace {
339/// An assembly annotator class to print LazyValueCache information in
340/// comments.
341class LazyValueInfoAnnotatedWriter : public AssemblyAnnotationWriter {
342 LazyValueInfoImpl *LVIImpl;
343 // While analyzing which blocks we can solve values for, we need the dominator
344 // information.
345 DominatorTree &DT;
346
347public:
348 LazyValueInfoAnnotatedWriter(LazyValueInfoImpl *L, DominatorTree &DTree)
349 : LVIImpl(L), DT(DTree) {}
350
351 void emitBasicBlockStartAnnot(const BasicBlock *BB,
352 formatted_raw_ostream &OS) override;
353
354 void emitInstructionAnnot(const Instruction *I,
355 formatted_raw_ostream &OS) override;
356};
357} // namespace
358// The actual implementation of the lazy analysis and update.
360
361 /// Cached results from previous queries
362 LazyValueInfoCache TheCache;
363
364 /// This stack holds the state of the value solver during a query.
365 /// It basically emulates the callstack of the naive
366 /// recursive value lookup process.
368
369 /// Keeps track of which block-value pairs are in BlockValueStack.
371
372 /// Push BV onto BlockValueStack unless it's already in there.
373 /// Returns true on success.
374 bool pushBlockValue(const std::pair<BasicBlock *, Value *> &BV) {
375 if (!BlockValueSet.insert(BV).second)
376 return false; // It's already in the stack.
377
378 LLVM_DEBUG(dbgs() << "PUSH: " << *BV.second << " in "
379 << BV.first->getName() << "\n");
380 BlockValueStack.push_back(BV);
381 return true;
382 }
383
384 AssumptionCache *AC; ///< A pointer to the cache of @llvm.assume calls.
385 const DataLayout &DL; ///< A mandatory DataLayout
386
387 /// Declaration of the llvm.experimental.guard() intrinsic,
388 /// if it exists in the module.
389 Function *GuardDecl;
390
391 std::optional<ValueLatticeElement> getBlockValue(Value *Val, BasicBlock *BB,
392 Instruction *CxtI);
393 std::optional<ValueLatticeElement> getEdgeValue(Value *V, BasicBlock *F,
394 BasicBlock *T,
395 Instruction *CxtI = nullptr);
396
397 // These methods process one work item and may add more. A false value
398 // returned means that the work item was not completely processed and must
399 // be revisited after going through the new items.
400 bool solveBlockValue(Value *Val, BasicBlock *BB);
401 std::optional<ValueLatticeElement> solveBlockValueImpl(Value *Val,
402 BasicBlock *BB);
403 std::optional<ValueLatticeElement> solveBlockValueNonLocal(Value *Val,
404 BasicBlock *BB);
405 std::optional<ValueLatticeElement> solveBlockValuePHINode(PHINode *PN,
406 BasicBlock *BB);
407 std::optional<ValueLatticeElement> solveBlockValueSelect(SelectInst *S,
408 BasicBlock *BB);
409 std::optional<ConstantRange> getRangeFor(Value *V, Instruction *CxtI,
410 BasicBlock *BB);
411 std::optional<ValueLatticeElement> solveBlockValueBinaryOpImpl(
413 std::function<ConstantRange(const ConstantRange &, const ConstantRange &)>
414 OpFn);
415 std::optional<ValueLatticeElement>
416 solveBlockValueBinaryOp(BinaryOperator *BBI, BasicBlock *BB);
417 std::optional<ValueLatticeElement> solveBlockValueCast(CastInst *CI,
418 BasicBlock *BB);
419 std::optional<ValueLatticeElement>
420 solveBlockValueOverflowIntrinsic(WithOverflowInst *WO, BasicBlock *BB);
421 std::optional<ValueLatticeElement> solveBlockValueIntrinsic(IntrinsicInst *II,
422 BasicBlock *BB);
423 std::optional<ValueLatticeElement>
424 solveBlockValueInsertElement(InsertElementInst *IEI, BasicBlock *BB);
425 std::optional<ValueLatticeElement>
426 solveBlockValueExtractValue(ExtractValueInst *EVI, BasicBlock *BB);
427 bool isNonNullAtEndOfBlock(Value *Val, BasicBlock *BB);
428 void intersectAssumeOrGuardBlockValueConstantRange(Value *Val,
430 Instruction *BBI);
431
432 void solve();
433
434 // For the following methods, if UseBlockValue is true, the function may
435 // push additional values to the worklist and return nullopt. If
436 // UseBlockValue is false, it will never return nullopt.
437
438 std::optional<ValueLatticeElement>
439 getValueFromSimpleICmpCondition(CmpInst::Predicate Pred, Value *RHS,
440 const APInt &Offset, Instruction *CxtI,
441 bool UseBlockValue);
442
443 std::optional<ValueLatticeElement>
444 getValueFromICmpCondition(Value *Val, ICmpInst *ICI, bool isTrueDest,
445 bool UseBlockValue);
446 ValueLatticeElement getValueFromTrunc(Value *Val, TruncInst *Trunc,
447 bool IsTrueDest);
448
449 std::optional<ValueLatticeElement>
450 getValueFromCondition(Value *Val, Value *Cond, bool IsTrueDest,
451 bool UseBlockValue, unsigned Depth = 0);
452
453 std::optional<ValueLatticeElement> getEdgeValueLocal(Value *Val,
454 BasicBlock *BBFrom,
455 BasicBlock *BBTo,
456 bool UseBlockValue);
457
458public:
459 /// This is the query interface to determine the lattice value for the
460 /// specified Value* at the context instruction (if specified) or at the
461 /// start of the block.
463 Instruction *CxtI = nullptr);
464
465 /// This is the query interface to determine the lattice value for the
466 /// specified Value* at the specified instruction using only information
467 /// from assumes/guards and range metadata. Unlike getValueInBlock(), no
468 /// recursive query is performed.
470
471 /// This is the query interface to determine the lattice
472 /// value for the specified Value* that is true on the specified edge.
474 BasicBlock *ToBB,
475 Instruction *CxtI = nullptr);
476
478
479 /// Complete flush all previously computed values
480 void clear() {
481 TheCache.clear();
482 }
483
484 /// Printing the LazyValueInfo Analysis.
486 LazyValueInfoAnnotatedWriter Writer(this, DTree);
487 F.print(OS, &Writer);
488 }
489
490 /// This is part of the update interface to remove information related to this
491 /// value from the cache.
492 void forgetValue(Value *V) { TheCache.eraseValue(V); }
493
494 /// This is part of the update interface to inform the cache
495 /// that a block has been deleted.
497 TheCache.eraseBlock(BB);
498 }
499
500 /// This is the update interface to inform the cache that an edge from
501 /// PredBB to OldSucc has been threaded to be from PredBB to NewSucc.
502 void threadEdge(BasicBlock *PredBB,BasicBlock *OldSucc,BasicBlock *NewSucc);
503
505 Function *GuardDecl)
506 : AC(AC), DL(DL), GuardDecl(GuardDecl) {}
507};
508} // namespace llvm
509
510void LazyValueInfoImpl::solve() {
512 BlockValueStack;
513
514 unsigned processedCount = 0;
515 while (!BlockValueStack.empty()) {
516 processedCount++;
517 // Abort if we have to process too many values to get a result for this one.
518 // Because of the design of the overdefined cache currently being per-block
519 // to avoid naming-related issues (IE it wants to try to give different
520 // results for the same name in different blocks), overdefined results don't
521 // get cached globally, which in turn means we will often try to rediscover
522 // the same overdefined result again and again. Once something like
523 // PredicateInfo is used in LVI or CVP, we should be able to make the
524 // overdefined cache global, and remove this throttle.
525 if (processedCount > MaxProcessedPerValue) {
527 dbgs() << "Giving up on stack because we are getting too deep\n");
528 // Fill in the original values
529 while (!StartingStack.empty()) {
530 std::pair<BasicBlock *, Value *> &e = StartingStack.back();
531 TheCache.insertResult(e.second, e.first,
533 StartingStack.pop_back();
534 }
535 BlockValueSet.clear();
536 BlockValueStack.clear();
537 return;
538 }
539 std::pair<BasicBlock *, Value *> e = BlockValueStack.back();
540 assert(BlockValueSet.count(e) && "Stack value should be in BlockValueSet!");
541 unsigned StackSize = BlockValueStack.size();
542 (void) StackSize;
543
544 if (solveBlockValue(e.second, e.first)) {
545 // The work item was completely processed.
546 assert(BlockValueStack.size() == StackSize &&
547 BlockValueStack.back() == e && "Nothing should have been pushed!");
548#ifndef NDEBUG
549 std::optional<ValueLatticeElement> BBLV =
550 TheCache.getCachedValueInfo(e.second, e.first);
551 assert(BBLV && "Result should be in cache!");
553 dbgs() << "POP " << *e.second << " in " << e.first->getName() << " = "
554 << *BBLV << "\n");
555#endif
556
557 BlockValueStack.pop_back();
558 BlockValueSet.erase(e);
559 } else {
560 // More work needs to be done before revisiting.
561 assert(BlockValueStack.size() == StackSize + 1 &&
562 "Exactly one element should have been pushed!");
563 }
564 }
565}
566
567std::optional<ValueLatticeElement>
568LazyValueInfoImpl::getBlockValue(Value *Val, BasicBlock *BB,
569 Instruction *CxtI) {
570 // If already a constant, there is nothing to compute.
571 if (Constant *VC = dyn_cast<Constant>(Val))
572 return ValueLatticeElement::get(VC);
573
574 if (std::optional<ValueLatticeElement> OptLatticeVal =
575 TheCache.getCachedValueInfo(Val, BB)) {
576 intersectAssumeOrGuardBlockValueConstantRange(Val, *OptLatticeVal, CxtI);
577 return OptLatticeVal;
578 }
579
580 // We have hit a cycle, assume overdefined.
581 if (!pushBlockValue({ BB, Val }))
583
584 // Yet to be resolved.
585 return std::nullopt;
586}
587
589 switch (BBI->getOpcode()) {
590 default:
591 break;
592 case Instruction::Call:
593 case Instruction::Invoke:
594 if (std::optional<ConstantRange> Range = cast<CallBase>(BBI)->getRange())
596 [[fallthrough]];
597 case Instruction::Load:
598 if (MDNode *Ranges = BBI->getMetadata(LLVMContext::MD_range))
599 if (isa<IntegerType>(BBI->getType())) {
602 }
603 break;
604 };
605 // Nothing known - will be intersected with other facts
607}
608
609bool LazyValueInfoImpl::solveBlockValue(Value *Val, BasicBlock *BB) {
610 assert(!isa<Constant>(Val) && "Value should not be constant");
611 assert(!TheCache.getCachedValueInfo(Val, BB) &&
612 "Value should not be in cache");
613
614 // Hold off inserting this value into the Cache in case we have to return
615 // false and come back later.
616 std::optional<ValueLatticeElement> Res = solveBlockValueImpl(Val, BB);
617 if (!Res)
618 // Work pushed, will revisit
619 return false;
620
621 TheCache.insertResult(Val, BB, *Res);
622 return true;
623}
624
625std::optional<ValueLatticeElement>
626LazyValueInfoImpl::solveBlockValueImpl(Value *Val, BasicBlock *BB) {
628 if (!BBI || BBI->getParent() != BB)
629 return solveBlockValueNonLocal(Val, BB);
630
631 if (PHINode *PN = dyn_cast<PHINode>(BBI))
632 return solveBlockValuePHINode(PN, BB);
633
634 if (auto *SI = dyn_cast<SelectInst>(BBI))
635 return solveBlockValueSelect(SI, BB);
636
637 // If this value is a nonnull pointer, record it's range and bailout. Note
638 // that for all other pointer typed values, we terminate the search at the
639 // definition. We could easily extend this to look through geps, bitcasts,
640 // and the like to prove non-nullness, but it's not clear that's worth it
641 // compile time wise. The context-insensitive value walk done inside
642 // isKnownNonZero gets most of the profitable cases at much less expense.
643 // This does mean that we have a sensitivity to where the defining
644 // instruction is placed, even if it could legally be hoisted much higher.
645 // That is unfortunate.
647 if (PT && isKnownNonZero(BBI, DL))
649
650 if (BBI->getType()->isIntOrIntVectorTy()) {
651 if (auto *CI = dyn_cast<CastInst>(BBI))
652 return solveBlockValueCast(CI, BB);
653
654 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(BBI))
655 return solveBlockValueBinaryOp(BO, BB);
656
657 if (auto *IEI = dyn_cast<InsertElementInst>(BBI))
658 return solveBlockValueInsertElement(IEI, BB);
659
660 if (auto *EVI = dyn_cast<ExtractValueInst>(BBI))
661 return solveBlockValueExtractValue(EVI, BB);
662
663 if (auto *II = dyn_cast<IntrinsicInst>(BBI))
664 return solveBlockValueIntrinsic(II, BB);
665 }
666
667 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
668 << "' - unknown inst def found.\n");
669 return getFromRangeMetadata(BBI);
670}
671
672static void AddNonNullPointer(Value *Ptr, NonNullPointerSet &PtrSet,
673 bool IsDereferenced = true) {
674 // TODO: Use NullPointerIsDefined instead.
675 if (Ptr->getType()->getPointerAddressSpace() == 0)
676 PtrSet.insert(IsDereferenced ? getUnderlyingObject(Ptr)
677 : Ptr->stripInBoundsOffsets());
678}
679
681 Instruction *I, NonNullPointerSet &PtrSet) {
682 if (LoadInst *L = dyn_cast<LoadInst>(I)) {
683 AddNonNullPointer(L->getPointerOperand(), PtrSet);
684 } else if (StoreInst *S = dyn_cast<StoreInst>(I)) {
685 AddNonNullPointer(S->getPointerOperand(), PtrSet);
686 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) {
687 if (MI->isVolatile()) return;
688
689 // FIXME: check whether it has a valuerange that excludes zero?
690 ConstantInt *Len = dyn_cast<ConstantInt>(MI->getLength());
691 if (!Len || Len->isZero()) return;
692
693 AddNonNullPointer(MI->getRawDest(), PtrSet);
695 AddNonNullPointer(MTI->getRawSource(), PtrSet);
696 } else if (auto *CB = dyn_cast<CallBase>(I)) {
697 for (auto &U : CB->args()) {
698 if (U->getType()->isPointerTy() &&
699 CB->paramHasNonNullAttr(CB->getArgOperandNo(&U),
700 /*AllowUndefOrPoison=*/false))
701 AddNonNullPointer(U.get(), PtrSet, /*IsDereferenced=*/false);
702 }
703 }
704}
705
706bool LazyValueInfoImpl::isNonNullAtEndOfBlock(Value *Val, BasicBlock *BB) {
709 return false;
710
711 Val = Val->stripInBoundsOffsets();
712 return TheCache.isNonNullAtEndOfBlock(Val, BB, [](BasicBlock *BB) {
713 NonNullPointerSet NonNullPointers;
714 for (Instruction &I : *BB)
715 AddNonNullPointersByInstruction(&I, NonNullPointers);
716 return NonNullPointers;
717 });
718}
719
720std::optional<ValueLatticeElement>
721LazyValueInfoImpl::solveBlockValueNonLocal(Value *Val, BasicBlock *BB) {
722 ValueLatticeElement Result; // Start Undefined.
723
724 // If this is the entry block, we must be asking about an argument.
725 if (BB->isEntryBlock()) {
726 assert(isa<Argument>(Val) && "Unknown live-in to the entry block");
727 if (std::optional<ConstantRange> Range = cast<Argument>(Val)->getRange())
730 }
731
732 // Loop over all of our predecessors, merging what we know from them into
733 // result. If we encounter an unexplored predecessor, we eagerly explore it
734 // in a depth first manner. In practice, this has the effect of discovering
735 // paths we can't analyze eagerly without spending compile times analyzing
736 // other paths. This heuristic benefits from the fact that predecessors are
737 // frequently arranged such that dominating ones come first and we quickly
738 // find a path to function entry. TODO: We should consider explicitly
739 // canonicalizing to make this true rather than relying on this happy
740 // accident.
741 std::optional<BBLatticeElementMap> PredLatticeElements;
742 if (PerPredRanges)
743 PredLatticeElements = std::make_optional<BBLatticeElementMap>();
744 for (BasicBlock *Pred : predecessors(BB)) {
745 // Skip self loops.
746 if (Pred == BB)
747 continue;
748 std::optional<ValueLatticeElement> EdgeResult = getEdgeValue(Val, Pred, BB);
749 if (!EdgeResult)
750 // Explore that input, then return here
751 return std::nullopt;
752
753 Result.mergeIn(*EdgeResult);
754
755 // If we hit overdefined, exit early. The BlockVals entry is already set
756 // to overdefined.
757 if (Result.isOverdefined()) {
758 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
759 << "' - overdefined because of pred '"
760 << Pred->getName() << "' (non local).\n");
761 return Result;
762 }
763 if (PerPredRanges)
764 PredLatticeElements->insert({Pred, *EdgeResult});
765 }
766
767 if (PerPredRanges)
768 TheCache.insertPredecessorResults(Val, BB, *PredLatticeElements);
769
770 // Return the merged value, which is more precise than 'overdefined'.
771 assert(!Result.isOverdefined());
772 return Result;
773}
774
775std::optional<ValueLatticeElement>
776LazyValueInfoImpl::solveBlockValuePHINode(PHINode *PN, BasicBlock *BB) {
777 ValueLatticeElement Result; // Start Undefined.
778
779 // Loop over all of our predecessors, merging what we know from them into
780 // result. See the comment about the chosen traversal order in
781 // solveBlockValueNonLocal; the same reasoning applies here.
782 std::optional<BBLatticeElementMap> PredLatticeElements;
783 if (PerPredRanges)
784 PredLatticeElements = std::make_optional<BBLatticeElementMap>();
785 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
786 BasicBlock *PhiBB = PN->getIncomingBlock(i);
787 Value *PhiVal = PN->getIncomingValue(i);
788 // Note that we can provide PN as the context value to getEdgeValue, even
789 // though the results will be cached, because PN is the value being used as
790 // the cache key in the caller.
791 std::optional<ValueLatticeElement> EdgeResult =
792 getEdgeValue(PhiVal, PhiBB, BB, PN);
793 if (!EdgeResult)
794 // Explore that input, then return here
795 return std::nullopt;
796
797 Result.mergeIn(*EdgeResult);
798
799 // If we hit overdefined, exit early. The BlockVals entry is already set
800 // to overdefined.
801 if (Result.isOverdefined()) {
802 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
803 << "' - overdefined because of pred (local).\n");
804
805 return Result;
806 }
807
808 if (PerPredRanges)
809 PredLatticeElements->insert({PhiBB, *EdgeResult});
810 }
811
812 if (PerPredRanges)
813 TheCache.insertPredecessorResults(PN, BB, *PredLatticeElements);
814
815 // Return the merged value, which is more precise than 'overdefined'.
816 assert(!Result.isOverdefined() && "Possible PHI in entry block?");
817 return Result;
818}
819
820// If we can determine a constraint on the value given conditions assumed by
821// the program, intersect those constraints with BBLV
822void LazyValueInfoImpl::intersectAssumeOrGuardBlockValueConstantRange(
823 Value *Val, ValueLatticeElement &BBLV, Instruction *BBI) {
824 BBI = BBI ? BBI : dyn_cast<Instruction>(Val);
825 if (!BBI)
826 return;
827
828 BasicBlock *BB = BBI->getParent();
829 for (auto &AssumeVH : AC->assumptionsFor(Val)) {
830 if (!AssumeVH)
831 continue;
832
833 // Only check assumes in the block of the context instruction. Other
834 // assumes will have already been taken into account when the value was
835 // propagated from predecessor blocks.
836 auto *I = cast<CallInst>(AssumeVH);
837 if (I->getParent() != BB || !isValidAssumeForContext(I, BBI))
838 continue;
839
840 BBLV = BBLV.intersect(*getValueFromCondition(Val, I->getArgOperand(0),
841 /*IsTrueDest*/ true,
842 /*UseBlockValue*/ false));
843 }
844
845 // If guards are not used in the module, don't spend time looking for them
846 if (GuardDecl && !GuardDecl->use_empty() &&
847 BBI->getIterator() != BB->begin()) {
848 for (Instruction &I :
849 make_range(std::next(BBI->getIterator().getReverse()), BB->rend())) {
850 Value *Cond = nullptr;
852 BBLV = BBLV.intersect(*getValueFromCondition(Val, Cond,
853 /*IsTrueDest*/ true,
854 /*UseBlockValue*/ false));
855 }
856 }
857
858 if (BBLV.isOverdefined()) {
859 // Check whether we're checking at the terminator, and the pointer has
860 // been dereferenced in this block.
862 if (PTy && BB->getTerminator() == BBI &&
863 isNonNullAtEndOfBlock(Val, BB))
865 }
866}
867
868std::optional<ValueLatticeElement>
869LazyValueInfoImpl::solveBlockValueSelect(SelectInst *SI, BasicBlock *BB) {
870 // Recurse on our inputs if needed
871 std::optional<ValueLatticeElement> OptTrueVal =
872 getBlockValue(SI->getTrueValue(), BB, SI);
873 if (!OptTrueVal)
874 return std::nullopt;
875 ValueLatticeElement &TrueVal = *OptTrueVal;
876
877 std::optional<ValueLatticeElement> OptFalseVal =
878 getBlockValue(SI->getFalseValue(), BB, SI);
879 if (!OptFalseVal)
880 return std::nullopt;
881 ValueLatticeElement &FalseVal = *OptFalseVal;
882
883 if (TrueVal.isConstantRange() || FalseVal.isConstantRange()) {
884 const ConstantRange &TrueCR = TrueVal.asConstantRange(SI->getType());
885 const ConstantRange &FalseCR = FalseVal.asConstantRange(SI->getType());
886 Value *LHS = nullptr;
887 Value *RHS = nullptr;
888 SelectPatternResult SPR = matchSelectPattern(SI, LHS, RHS);
889 // Is this a min specifically of our two inputs? (Avoid the risk of
890 // ValueTracking getting smarter looking back past our immediate inputs.)
892 ((LHS == SI->getTrueValue() && RHS == SI->getFalseValue()) ||
893 (RHS == SI->getTrueValue() && LHS == SI->getFalseValue()))) {
894 ConstantRange ResultCR = [&]() {
895 switch (SPR.Flavor) {
896 default:
897 llvm_unreachable("unexpected minmax type!");
898 case SPF_SMIN: /// Signed minimum
899 return TrueCR.smin(FalseCR);
900 case SPF_UMIN: /// Unsigned minimum
901 return TrueCR.umin(FalseCR);
902 case SPF_SMAX: /// Signed maximum
903 return TrueCR.smax(FalseCR);
904 case SPF_UMAX: /// Unsigned maximum
905 return TrueCR.umax(FalseCR);
906 };
907 }();
909 ResultCR, TrueVal.isConstantRangeIncludingUndef() ||
910 FalseVal.isConstantRangeIncludingUndef());
911 }
912
913 if (SPR.Flavor == SPF_ABS) {
914 if (LHS == SI->getTrueValue())
916 TrueCR.abs(), TrueVal.isConstantRangeIncludingUndef());
917 if (LHS == SI->getFalseValue())
919 FalseCR.abs(), FalseVal.isConstantRangeIncludingUndef());
920 }
921
922 if (SPR.Flavor == SPF_NABS) {
923 ConstantRange Zero(APInt::getZero(TrueCR.getBitWidth()));
924 if (LHS == SI->getTrueValue())
926 Zero.sub(TrueCR.abs()), FalseVal.isConstantRangeIncludingUndef());
927 if (LHS == SI->getFalseValue())
929 Zero.sub(FalseCR.abs()), FalseVal.isConstantRangeIncludingUndef());
930 }
931 }
932
933 // Can we constrain the facts about the true and false values by using the
934 // condition itself? This shows up with idioms like e.g. select(a > 5, a, 5).
935 // TODO: We could potentially refine an overdefined true value above.
936 Value *Cond = SI->getCondition();
937 // If the value is undef, a different value may be chosen in
938 // the select condition.
940 TrueVal =
941 TrueVal.intersect(*getValueFromCondition(SI->getTrueValue(), Cond,
942 /*IsTrueDest*/ true,
943 /*UseBlockValue*/ false));
944 FalseVal =
945 FalseVal.intersect(*getValueFromCondition(SI->getFalseValue(), Cond,
946 /*IsTrueDest*/ false,
947 /*UseBlockValue*/ false));
948 }
949
950 ValueLatticeElement Result = TrueVal;
951 Result.mergeIn(FalseVal);
952 return Result;
953}
954
955std::optional<ConstantRange>
956LazyValueInfoImpl::getRangeFor(Value *V, Instruction *CxtI, BasicBlock *BB) {
957 std::optional<ValueLatticeElement> OptVal = getBlockValue(V, BB, CxtI);
958 if (!OptVal)
959 return std::nullopt;
960 return OptVal->asConstantRange(V->getType());
961}
962
963std::optional<ValueLatticeElement>
964LazyValueInfoImpl::solveBlockValueCast(CastInst *CI, BasicBlock *BB) {
965 // Filter out casts we don't know how to reason about before attempting to
966 // recurse on our operand. This can cut a long search short if we know we're
967 // not going to be able to get any useful information anways.
968 switch (CI->getOpcode()) {
969 case Instruction::Trunc:
970 case Instruction::SExt:
971 case Instruction::ZExt:
972 break;
973 default:
974 // Unhandled instructions are overdefined.
975 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
976 << "' - overdefined (unknown cast).\n");
978 }
979
980 // Figure out the range of the LHS. If that fails, we still apply the
981 // transfer rule on the full set since we may be able to locally infer
982 // interesting facts.
983 std::optional<ConstantRange> LHSRes = getRangeFor(CI->getOperand(0), CI, BB);
984 if (!LHSRes)
985 // More work to do before applying this transfer rule.
986 return std::nullopt;
987 const ConstantRange &LHSRange = *LHSRes;
988
989 const unsigned ResultBitWidth = CI->getType()->getScalarSizeInBits();
990
991 // NOTE: We're currently limited by the set of operations that ConstantRange
992 // can evaluate symbolically. Enhancing that set will allows us to analyze
993 // more definitions.
994 ConstantRange Res = ConstantRange::getEmpty(ResultBitWidth);
995 if (auto *Trunc = dyn_cast<TruncInst>(CI))
996 Res = LHSRange.truncate(ResultBitWidth, Trunc->getNoWrapKind());
997 else
998 Res = LHSRange.castOp(CI->getOpcode(), ResultBitWidth);
999
1001}
1002
1003std::optional<ValueLatticeElement>
1004LazyValueInfoImpl::solveBlockValueBinaryOpImpl(
1005 Instruction *I, BasicBlock *BB,
1006 std::function<ConstantRange(const ConstantRange &, const ConstantRange &)>
1007 OpFn) {
1008 Value *LHS = I->getOperand(0);
1009 Value *RHS = I->getOperand(1);
1010
1011 auto ThreadBinOpOverSelect =
1012 [&](Value *X, const ConstantRange &CRX, SelectInst *Y,
1013 bool XIsLHS) -> std::optional<ValueLatticeElement> {
1014 Value *Cond = Y->getCondition();
1015 // Only handle selects with constant values.
1016 Constant *TrueC = dyn_cast<Constant>(Y->getTrueValue());
1017 if (!TrueC)
1018 return std::nullopt;
1019 Constant *FalseC = dyn_cast<Constant>(Y->getFalseValue());
1020 if (!FalseC)
1021 return std::nullopt;
1023 return std::nullopt;
1024
1025 ConstantRange TrueX =
1026 CRX.intersectWith(getValueFromCondition(X, Cond, /*CondIsTrue=*/true,
1027 /*UseBlockValue=*/false)
1028 ->asConstantRange(X->getType()));
1029 ConstantRange FalseX =
1030 CRX.intersectWith(getValueFromCondition(X, Cond, /*CondIsTrue=*/false,
1031 /*UseBlockValue=*/false)
1032 ->asConstantRange(X->getType()));
1033 ConstantRange TrueY = TrueC->toConstantRange();
1034 ConstantRange FalseY = FalseC->toConstantRange();
1035
1036 if (XIsLHS)
1038 OpFn(TrueX, TrueY).unionWith(OpFn(FalseX, FalseY)));
1040 OpFn(TrueY, TrueX).unionWith(OpFn(FalseY, FalseX)));
1041 };
1042
1043 // Figure out the ranges of the operands. If that fails, use a
1044 // conservative range, but apply the transfer rule anyways. This
1045 // lets us pick up facts from expressions like "and i32 (call i32
1046 // @foo()), 32"
1047 std::optional<ConstantRange> LHSRes = getRangeFor(LHS, I, BB);
1048 if (!LHSRes)
1049 return std::nullopt;
1050
1051 // Try to thread binop over rhs select
1052 if (auto *SI = dyn_cast<SelectInst>(RHS)) {
1053 if (auto Res = ThreadBinOpOverSelect(LHS, *LHSRes, SI, /*XIsLHS=*/true))
1054 return *Res;
1055 }
1056
1057 std::optional<ConstantRange> RHSRes = getRangeFor(RHS, I, BB);
1058 if (!RHSRes)
1059 return std::nullopt;
1060
1061 // Try to thread binop over lhs select
1062 if (auto *SI = dyn_cast<SelectInst>(LHS)) {
1063 if (auto Res = ThreadBinOpOverSelect(RHS, *RHSRes, SI, /*XIsLHS=*/false))
1064 return *Res;
1065 }
1066
1067 const ConstantRange &LHSRange = *LHSRes;
1068 const ConstantRange &RHSRange = *RHSRes;
1069
1070 std::optional<ValueLatticeElement> MergedResult =
1071 ValueLatticeElement::getRange(OpFn(LHSRange, RHSRange));
1072
1073 if (!PerPredRanges)
1074 return MergedResult;
1075
1076 std::optional<BBLatticeElementMap> PredLHS =
1077 TheCache.getCachedPredecessorInfo(LHS, BB);
1078 if (!PredLHS)
1079 return MergedResult;
1080 std::optional<BBLatticeElementMap> PredRHS =
1081 TheCache.getCachedPredecessorInfo(RHS, BB);
1082 if (!PredRHS)
1083 return MergedResult;
1084
1085 const BBLatticeElementMap &LHSPredMap = *PredLHS;
1086 const BBLatticeElementMap &RHSPredMap = *PredRHS;
1087
1088 BBLatticeElementMap PredLatticeElements;
1089 ValueLatticeElement OverallPredResult;
1090 for (auto *Pred : predecessors(BB)) {
1091 auto LHSIt = LHSPredMap.find_as(Pred);
1092 if (LHSIt == LHSPredMap.end())
1093 return MergedResult;
1094 const ValueLatticeElement &LHSFromPred = LHSIt->second;
1095 std::optional<ConstantRange> LHSFromPredRes =
1096 LHSFromPred.asConstantRange(LHS->getType());
1097 if (!LHSFromPredRes)
1098 return MergedResult;
1099
1100 auto RHSIt = RHSPredMap.find_as(Pred);
1101 if (RHSIt == RHSPredMap.end())
1102 return MergedResult;
1103 const ValueLatticeElement &RHSFromPred = RHSIt->second;
1104 std::optional<ConstantRange> RHSFromPredRes =
1105 RHSFromPred.asConstantRange(RHS->getType());
1106 if (!RHSFromPredRes)
1107 return MergedResult;
1108
1109 const ConstantRange &LHSFromPredRange = *LHSFromPredRes;
1110 const ConstantRange &RHSFromPredRange = *RHSFromPredRes;
1111 std::optional<ValueLatticeElement> PredResult =
1112 ValueLatticeElement::getRange(OpFn(LHSFromPredRange, RHSFromPredRange));
1113 if (!PredResult)
1114 return MergedResult;
1115 if (PredResult->isOverdefined()) {
1116 LLVM_DEBUG(
1117 dbgs() << " pred BB '" << Pred->getName() << "' for BB '"
1118 << BB->getName()
1119 << "' overdefined. Discarding all predecessor intervals.\n");
1120 return MergedResult;
1121 }
1122 PredLatticeElements.insert({Pred, *PredResult});
1123 OverallPredResult.mergeIn(*PredResult);
1124 }
1125
1126 // If this point is reached, all predecessors for both LHS and RHS have
1127 // constant ranges previously computed. Can cache result and use the
1128 // OverallPredResult;
1129 TheCache.insertPredecessorResults(I, BB, PredLatticeElements);
1130
1131 LLVM_DEBUG(dbgs() << " Using predecessor intervals, evaluated " << *I
1132 << " to: " << OverallPredResult << ".\n");
1133
1134 if (!MergedResult)
1135 return OverallPredResult;
1136
1137 LLVM_DEBUG(dbgs() << " Intersecting intervals for " << *I << ": "
1138 << OverallPredResult << " and " << MergedResult << ".\n");
1139 return MergedResult->intersect(OverallPredResult);
1140}
1141
1142std::optional<ValueLatticeElement>
1143LazyValueInfoImpl::solveBlockValueBinaryOp(BinaryOperator *BO, BasicBlock *BB) {
1144 assert(BO->getOperand(0)->getType()->isSized() &&
1145 "all operands to binary operators are sized");
1146 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(BO)) {
1147 unsigned NoWrapKind = OBO->getNoWrapKind();
1148 return solveBlockValueBinaryOpImpl(
1149 BO, BB,
1150 [BO, NoWrapKind](const ConstantRange &CR1, const ConstantRange &CR2) {
1151 return CR1.overflowingBinaryOp(BO->getOpcode(), CR2, NoWrapKind);
1152 });
1153 }
1154
1155 return solveBlockValueBinaryOpImpl(
1156 BO, BB, [BO](const ConstantRange &CR1, const ConstantRange &CR2) {
1157 return CR1.binaryOp(BO->getOpcode(), CR2);
1158 });
1159}
1160
1161std::optional<ValueLatticeElement>
1162LazyValueInfoImpl::solveBlockValueOverflowIntrinsic(WithOverflowInst *WO,
1163 BasicBlock *BB) {
1164 return solveBlockValueBinaryOpImpl(
1165 WO, BB, [WO](const ConstantRange &CR1, const ConstantRange &CR2) {
1166 return CR1.binaryOp(WO->getBinaryOp(), CR2);
1167 });
1168}
1169
1170std::optional<ValueLatticeElement>
1171LazyValueInfoImpl::solveBlockValueIntrinsic(IntrinsicInst *II, BasicBlock *BB) {
1172 ValueLatticeElement MetadataVal = getFromRangeMetadata(II);
1173 if (!ConstantRange::isIntrinsicSupported(II->getIntrinsicID())) {
1174 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
1175 << "' - unknown intrinsic.\n");
1176 return MetadataVal;
1177 }
1178
1180 for (Value *Op : II->args()) {
1181 std::optional<ConstantRange> Range = getRangeFor(Op, II, BB);
1182 if (!Range)
1183 return std::nullopt;
1184 OpRanges.push_back(*Range);
1185 }
1186
1188 ConstantRange::intrinsic(II->getIntrinsicID(), OpRanges))
1189 .intersect(MetadataVal);
1190}
1191
1192std::optional<ValueLatticeElement>
1193LazyValueInfoImpl::solveBlockValueInsertElement(InsertElementInst *IEI,
1194 BasicBlock *BB) {
1195 std::optional<ValueLatticeElement> OptEltVal =
1196 getBlockValue(IEI->getOperand(1), BB, IEI);
1197 if (!OptEltVal)
1198 return std::nullopt;
1199 ValueLatticeElement &Res = *OptEltVal;
1200
1201 std::optional<ValueLatticeElement> OptVecVal =
1202 getBlockValue(IEI->getOperand(0), BB, IEI);
1203 if (!OptVecVal)
1204 return std::nullopt;
1205
1206 // Bail out if the inserted element is a constant expression. Unlike other
1207 // ValueLattice types, these are not considered an implicit splat when a
1208 // vector type is used.
1209 // We could call ConstantFoldInsertElementInstruction here to handle these.
1210 if (OptEltVal->isConstant())
1212
1213 Res.mergeIn(*OptVecVal);
1214 return Res;
1215}
1216
1217std::optional<ValueLatticeElement>
1218LazyValueInfoImpl::solveBlockValueExtractValue(ExtractValueInst *EVI,
1219 BasicBlock *BB) {
1220 if (auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand()))
1221 if (EVI->getNumIndices() == 1 && *EVI->idx_begin() == 0)
1222 return solveBlockValueOverflowIntrinsic(WO, BB);
1223
1224 // Handle extractvalue of insertvalue to allow further simplification
1225 // based on replaced with.overflow intrinsics.
1227 EVI->getAggregateOperand(), EVI->getIndices(),
1228 EVI->getDataLayout()))
1229 return getBlockValue(V, BB, EVI);
1230
1231 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
1232 << "' - overdefined (unknown extractvalue).\n");
1234}
1235
1237 ICmpInst::Predicate Pred) {
1238 if (LHS == Val)
1239 return true;
1240
1241 // Handle range checking idiom produced by InstCombine. We will subtract the
1242 // offset from the allowed range for RHS in this case.
1243 const APInt *C;
1244 if (match(LHS, m_AddLike(m_Specific(Val), m_APInt(C)))) {
1245 Offset = *C;
1246 return true;
1247 }
1248
1249 // Handle the symmetric case. This appears in saturation patterns like
1250 // (x == 16) ? 16 : (x + 1).
1251 if (match(Val, m_AddLike(m_Specific(LHS), m_APInt(C)))) {
1252 Offset = -*C;
1253 return true;
1254 }
1255
1256 // If (x | y) < C, then (x < C) && (y < C).
1257 if (match(LHS, m_c_Or(m_Specific(Val), m_Value())) &&
1258 (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE))
1259 return true;
1260
1261 // If (x & y) > C, then (x > C) && (y > C).
1262 if (match(LHS, m_c_And(m_Specific(Val), m_Value())) &&
1263 (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE))
1264 return true;
1265
1266 return false;
1267}
1268
1269/// Get value range for a "(Val + Offset) Pred RHS" condition.
1270std::optional<ValueLatticeElement>
1271LazyValueInfoImpl::getValueFromSimpleICmpCondition(CmpInst::Predicate Pred,
1272 Value *RHS,
1273 const APInt &Offset,
1274 Instruction *CxtI,
1275 bool UseBlockValue) {
1276 ConstantRange RHSRange(RHS->getType()->getScalarSizeInBits(),
1277 /*isFullSet=*/true);
1278 if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
1279 RHSRange = ConstantRange(CI->getValue());
1280 } else if (UseBlockValue) {
1281 std::optional<ValueLatticeElement> R =
1282 getBlockValue(RHS, CxtI->getParent(), CxtI);
1283 if (!R)
1284 return std::nullopt;
1285 RHSRange = R->asConstantRange(RHS->getType());
1286 }
1287
1288 ConstantRange TrueValues =
1290 return ValueLatticeElement::getRange(TrueValues.subtract(Offset));
1291}
1292
1293static std::optional<ConstantRange>
1295 function_ref<std::optional<ConstantRange>(const APInt &)> Fn) {
1296 bool Invert = false;
1297 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) {
1298 Pred = ICmpInst::getInversePredicate(Pred);
1299 Invert = true;
1300 }
1301 if (Pred == ICmpInst::ICMP_SLE) {
1302 Pred = ICmpInst::ICMP_SLT;
1303 if (RHS.isMaxSignedValue())
1304 return std::nullopt; // Could also return full/empty here, if we wanted.
1305 ++RHS;
1306 }
1307 assert(Pred == ICmpInst::ICMP_SLT && "Must be signed predicate");
1308 if (auto CR = Fn(RHS))
1309 return Invert ? CR->inverse() : CR;
1310 return std::nullopt;
1311}
1312
1313/// Get value range for a "ctpop(Val) Pred RHS" condition.
1315 Value *RHS) {
1316 unsigned BitWidth = RHS->getType()->getScalarSizeInBits();
1317
1318 auto *RHSConst = dyn_cast<ConstantInt>(RHS);
1319 if (!RHSConst)
1321
1322 ConstantRange ResValRange =
1323 ConstantRange::makeExactICmpRegion(Pred, RHSConst->getValue());
1324
1325 unsigned ResMin = ResValRange.getUnsignedMin().getLimitedValue(BitWidth);
1326 unsigned ResMax = ResValRange.getUnsignedMax().getLimitedValue(BitWidth);
1327
1328 APInt ValMin = APInt::getLowBitsSet(BitWidth, ResMin);
1329 APInt ValMax = APInt::getHighBitsSet(BitWidth, ResMax);
1331 ConstantRange::getNonEmpty(std::move(ValMin), ValMax + 1));
1332}
1333
1334std::optional<ValueLatticeElement> LazyValueInfoImpl::getValueFromICmpCondition(
1335 Value *Val, ICmpInst *ICI, bool isTrueDest, bool UseBlockValue) {
1336 Value *LHS = ICI->getOperand(0);
1337 Value *RHS = ICI->getOperand(1);
1338
1339 // Get the predicate that must hold along the considered edge.
1340 CmpInst::Predicate EdgePred =
1341 isTrueDest ? ICI->getPredicate() : ICI->getInversePredicate();
1342
1343 if (isa<Constant>(RHS)) {
1344 if (ICI->isEquality() && LHS == Val) {
1345 if (EdgePred == ICmpInst::ICMP_EQ)
1347 else if (!isa<UndefValue>(RHS))
1349 }
1350 }
1351
1352 Type *Ty = Val->getType();
1353 if (!Ty->isIntegerTy())
1355
1356 unsigned BitWidth = Ty->getScalarSizeInBits();
1357 APInt Offset(BitWidth, 0);
1358 if (matchICmpOperand(Offset, LHS, Val, EdgePred))
1359 return getValueFromSimpleICmpCondition(EdgePred, RHS, Offset, ICI,
1360 UseBlockValue);
1361
1362 CmpInst::Predicate SwappedPred = CmpInst::getSwappedPredicate(EdgePred);
1363 if (matchICmpOperand(Offset, RHS, Val, SwappedPred))
1364 return getValueFromSimpleICmpCondition(SwappedPred, LHS, Offset, ICI,
1365 UseBlockValue);
1366
1368 return getValueFromICmpCtpop(EdgePred, RHS);
1369
1370 const APInt *Mask, *C;
1371 if (match(LHS, m_And(m_Specific(Val), m_APInt(Mask))) &&
1372 match(RHS, m_APInt(C))) {
1373 // If (Val & Mask) == C then all the masked bits are known and we can
1374 // compute a value range based on that.
1375 if (EdgePred == ICmpInst::ICMP_EQ) {
1376 KnownBits Known;
1377 Known.Zero = ~*C & *Mask;
1378 Known.One = *C & *Mask;
1380 ConstantRange::fromKnownBits(Known, /*IsSigned*/ false));
1381 }
1382
1383 if (EdgePred == ICmpInst::ICMP_NE)
1386 }
1387
1388 // If (X urem Modulus) >= C, then X >= C.
1389 // If trunc X >= C, then X >= C.
1390 // TODO: An upper bound could be computed as well.
1392 m_Trunc(m_Specific(Val)))) &&
1393 match(RHS, m_APInt(C))) {
1394 // Use the icmp region so we don't have to deal with different predicates.
1395 ConstantRange CR = ConstantRange::makeExactICmpRegion(EdgePred, *C);
1396 if (!CR.isEmptySet())
1398 CR.getUnsignedMin().zext(BitWidth), APInt(BitWidth, 0)));
1399 }
1400
1401 // Recognize:
1402 // icmp slt (ashr X, ShAmtC), C --> icmp slt X, C << ShAmtC
1403 // Preconditions: (C << ShAmtC) >> ShAmtC == C
1404 const APInt *ShAmtC;
1405 if (CmpInst::isSigned(EdgePred) &&
1406 match(LHS, m_AShr(m_Specific(Val), m_APInt(ShAmtC))) &&
1407 match(RHS, m_APInt(C))) {
1408 auto CR = getRangeViaSLT(
1409 EdgePred, *C, [&](const APInt &RHS) -> std::optional<ConstantRange> {
1410 APInt New = RHS << *ShAmtC;
1411 if ((New.ashr(*ShAmtC)) != RHS)
1412 return std::nullopt;
1414 APInt::getSignedMinValue(New.getBitWidth()), New);
1415 });
1416 if (CR)
1418 }
1419
1420 // a - b or ptrtoint(a) - ptrtoint(b) ==/!= 0 if a ==/!= b
1421 Value *X, *Y;
1422 if (ICI->isEquality() && match(Val, m_Sub(m_Value(X), m_Value(Y)))) {
1423 // Peek through ptrtoints
1426 if ((X == LHS && Y == RHS) || (X == RHS && Y == LHS)) {
1427 Constant *NullVal = Constant::getNullValue(Val->getType());
1428 if (EdgePred == ICmpInst::ICMP_EQ)
1429 return ValueLatticeElement::get(NullVal);
1430 return ValueLatticeElement::getNot(NullVal);
1431 }
1432 }
1433
1435}
1436
1437ValueLatticeElement LazyValueInfoImpl::getValueFromTrunc(Value *Val,
1438 TruncInst *Trunc,
1439 bool IsTrueDest) {
1440 assert(Trunc->getType()->isIntOrIntVectorTy(1));
1441
1442 if (Trunc->getOperand(0) != Val)
1444
1445 Type *Ty = Val->getType();
1446
1447 if (Trunc->hasNoUnsignedWrap()) {
1448 if (IsTrueDest)
1449 return ValueLatticeElement::get(ConstantInt::get(Ty, 1));
1451 }
1452
1453 if (IsTrueDest)
1456}
1457
1458// Handle conditions of the form
1459// extractvalue(op.with.overflow(%x, C), 1).
1461 Value *Val, WithOverflowInst *WO, bool IsTrueDest) {
1462 // TODO: This only works with a constant RHS for now. We could also compute
1463 // the range of the RHS, but this doesn't fit into the current structure of
1464 // the edge value calculation.
1465 const APInt *C;
1466 if (WO->getLHS() != Val || !match(WO->getRHS(), m_APInt(C)))
1468
1469 // Calculate the possible values of %x for which no overflow occurs.
1471 WO->getBinaryOp(), *C, WO->getNoWrapKind());
1472
1473 // If overflow is false, %x is constrained to NWR. If overflow is true, %x is
1474 // constrained to it's inverse (all values that might cause overflow).
1475 if (IsTrueDest)
1476 NWR = NWR.inverse();
1478}
1479
1480std::optional<ValueLatticeElement>
1481LazyValueInfoImpl::getValueFromCondition(Value *Val, Value *Cond,
1482 bool IsTrueDest, bool UseBlockValue,
1483 unsigned Depth) {
1484 if (ICmpInst *ICI = dyn_cast<ICmpInst>(Cond))
1485 return getValueFromICmpCondition(Val, ICI, IsTrueDest, UseBlockValue);
1486
1487 if (auto *Trunc = dyn_cast<TruncInst>(Cond))
1488 return getValueFromTrunc(Val, Trunc, IsTrueDest);
1489
1490 if (auto *EVI = dyn_cast<ExtractValueInst>(Cond))
1491 if (auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand()))
1492 if (EVI->getNumIndices() == 1 && *EVI->idx_begin() == 1)
1493 return getValueFromOverflowCondition(Val, WO, IsTrueDest);
1494
1497
1498 Value *N;
1499 if (match(Cond, m_Not(m_Value(N))))
1500 return getValueFromCondition(Val, N, !IsTrueDest, UseBlockValue, Depth);
1501
1502 Value *L, *R;
1503 bool IsAnd;
1504 if (match(Cond, m_LogicalAnd(m_Value(L), m_Value(R))))
1505 IsAnd = true;
1506 else if (match(Cond, m_LogicalOr(m_Value(L), m_Value(R))))
1507 IsAnd = false;
1508 else
1510
1511 std::optional<ValueLatticeElement> LV =
1512 getValueFromCondition(Val, L, IsTrueDest, UseBlockValue, Depth);
1513 if (!LV)
1514 return std::nullopt;
1515 std::optional<ValueLatticeElement> RV =
1516 getValueFromCondition(Val, R, IsTrueDest, UseBlockValue, Depth);
1517 if (!RV)
1518 return std::nullopt;
1519
1520 // if (L && R) -> intersect L and R
1521 // if (!(L || R)) -> intersect !L and !R
1522 // if (L || R) -> union L and R
1523 // if (!(L && R)) -> union !L and !R
1524 if (IsTrueDest ^ IsAnd) {
1525 LV->mergeIn(*RV);
1526 return *LV;
1527 }
1528
1529 return LV->intersect(*RV);
1530}
1531
1532// Return true if Usr has Op as an operand, otherwise false.
1533static bool usesOperand(User *Usr, Value *Op) {
1534 return is_contained(Usr->operands(), Op);
1535}
1536
1537// Return true if the instruction type of Val is supported by
1538// constantFoldUser(). Currently CastInst, BinaryOperator and FreezeInst only.
1539// Call this before calling constantFoldUser() to find out if it's even worth
1540// attempting to call it.
1541static bool isOperationFoldable(User *Usr) {
1542 return isa<CastInst>(Usr) || isa<BinaryOperator>(Usr) || isa<FreezeInst>(Usr);
1543}
1544
1545// Check if Usr can be simplified to an integer constant when the value of one
1546// of its operands Op is an integer constant OpConstVal. If so, return it as an
1547// lattice value range with a single element or otherwise return an overdefined
1548// lattice value.
1550 const APInt &OpConstVal,
1551 const DataLayout &DL) {
1552 assert(isOperationFoldable(Usr) && "Precondition");
1553 Constant* OpConst = Constant::getIntegerValue(Op->getType(), OpConstVal);
1554 // Check if Usr can be simplified to a constant.
1555 if (auto *CI = dyn_cast<CastInst>(Usr)) {
1556 assert(CI->getOperand(0) == Op && "Operand 0 isn't Op");
1557 if (auto *C = dyn_cast_or_null<ConstantInt>(
1558 simplifyCastInst(CI->getOpcode(), OpConst,
1559 CI->getDestTy(), DL))) {
1560 return ValueLatticeElement::getRange(ConstantRange(C->getValue()));
1561 }
1562 } else if (auto *BO = dyn_cast<BinaryOperator>(Usr)) {
1563 bool Op0Match = BO->getOperand(0) == Op;
1564 bool Op1Match = BO->getOperand(1) == Op;
1565 assert((Op0Match || Op1Match) &&
1566 "Operand 0 nor Operand 1 isn't a match");
1567 Value *LHS = Op0Match ? OpConst : BO->getOperand(0);
1568 Value *RHS = Op1Match ? OpConst : BO->getOperand(1);
1569 if (auto *C = dyn_cast_or_null<ConstantInt>(
1570 simplifyBinOp(BO->getOpcode(), LHS, RHS, DL))) {
1571 return ValueLatticeElement::getRange(ConstantRange(C->getValue()));
1572 }
1573 } else if (isa<FreezeInst>(Usr)) {
1574 assert(cast<FreezeInst>(Usr)->getOperand(0) == Op && "Operand 0 isn't Op");
1575 return ValueLatticeElement::getRange(ConstantRange(OpConstVal));
1576 }
1578}
1579
1580/// Compute the value of Val on the edge BBFrom -> BBTo.
1581std::optional<ValueLatticeElement>
1582LazyValueInfoImpl::getEdgeValueLocal(Value *Val, BasicBlock *BBFrom,
1583 BasicBlock *BBTo, bool UseBlockValue) {
1584 // TODO: Handle more complex conditionals. If (v == 0 || v2 < 1) is false, we
1585 // know that v != 0.
1586 if (BranchInst *BI = dyn_cast<BranchInst>(BBFrom->getTerminator())) {
1587 // If this is a conditional branch and only one successor goes to BBTo, then
1588 // we may be able to infer something from the condition.
1589 if (BI->isConditional() &&
1590 BI->getSuccessor(0) != BI->getSuccessor(1)) {
1591 bool isTrueDest = BI->getSuccessor(0) == BBTo;
1592 assert(BI->getSuccessor(!isTrueDest) == BBTo &&
1593 "BBTo isn't a successor of BBFrom");
1594 Value *Condition = BI->getCondition();
1595
1596 // If V is the condition of the branch itself, then we know exactly what
1597 // it is.
1598 // NB: The condition on a `br` can't be a vector type.
1599 if (Condition == Val)
1600 return ValueLatticeElement::get(ConstantInt::get(
1601 Type::getInt1Ty(Val->getContext()), isTrueDest));
1602
1603 // If the condition of the branch is an equality comparison, we may be
1604 // able to infer the value.
1605 std::optional<ValueLatticeElement> Result =
1606 getValueFromCondition(Val, Condition, isTrueDest, UseBlockValue);
1607 if (!Result)
1608 return std::nullopt;
1609
1610 if (!Result->isOverdefined())
1611 return Result;
1612
1613 if (User *Usr = dyn_cast<User>(Val)) {
1614 assert(Result->isOverdefined() && "Result isn't overdefined");
1615 // Check with isOperationFoldable() first to avoid linearly iterating
1616 // over the operands unnecessarily which can be expensive for
1617 // instructions with many operands.
1618 if (isa<IntegerType>(Usr->getType()) && isOperationFoldable(Usr)) {
1619 const DataLayout &DL = BBTo->getDataLayout();
1620 if (usesOperand(Usr, Condition)) {
1621 // If Val has Condition as an operand and Val can be folded into a
1622 // constant with either Condition == true or Condition == false,
1623 // propagate the constant.
1624 // eg.
1625 // ; %Val is true on the edge to %then.
1626 // %Val = and i1 %Condition, true.
1627 // br %Condition, label %then, label %else
1628 APInt ConditionVal(1, isTrueDest ? 1 : 0);
1629 Result = constantFoldUser(Usr, Condition, ConditionVal, DL);
1630 } else if (isa<TruncInst, ZExtInst, SExtInst>(Usr)) {
1631 ValueLatticeElement OpLatticeVal =
1632 *getValueFromCondition(Usr->getOperand(0), Condition,
1633 isTrueDest, /*UseBlockValue*/ false);
1634
1635 if (OpLatticeVal.isConstantRange()) {
1636 const unsigned ResultBitWidth =
1637 Usr->getType()->getScalarSizeInBits();
1638 if (auto *Trunc = dyn_cast<TruncInst>(Usr))
1640 OpLatticeVal.getConstantRange().truncate(
1641 ResultBitWidth, Trunc->getNoWrapKind()));
1642
1644 OpLatticeVal.getConstantRange().castOp(
1645 cast<CastInst>(Usr)->getOpcode(), ResultBitWidth));
1646 }
1647 if (OpLatticeVal.isConstant()) {
1648 Constant *C = OpLatticeVal.getConstant();
1649 if (auto *CastC = ConstantFoldCastOperand(
1650 cast<CastInst>(Usr)->getOpcode(), C, Usr->getType(), DL))
1651 return ValueLatticeElement::get(CastC);
1652 }
1654 } else {
1655 // If one of Val's operand has an inferred value, we may be able to
1656 // infer the value of Val.
1657 // eg.
1658 // ; %Val is 94 on the edge to %then.
1659 // %Val = add i8 %Op, 1
1660 // %Condition = icmp eq i8 %Op, 93
1661 // br i1 %Condition, label %then, label %else
1662 for (unsigned i = 0; i < Usr->getNumOperands(); ++i) {
1663 Value *Op = Usr->getOperand(i);
1664 ValueLatticeElement OpLatticeVal = *getValueFromCondition(
1665 Op, Condition, isTrueDest, /*UseBlockValue*/ false);
1666 if (std::optional<APInt> OpConst =
1667 OpLatticeVal.asConstantInteger()) {
1668 Result = constantFoldUser(Usr, Op, *OpConst, DL);
1669 break;
1670 }
1671 }
1672 }
1673 }
1674 }
1675 if (!Result->isOverdefined())
1676 return Result;
1677 }
1678 }
1679
1680 // If the edge was formed by a switch on the value, then we may know exactly
1681 // what it is.
1682 if (SwitchInst *SI = dyn_cast<SwitchInst>(BBFrom->getTerminator())) {
1683 Value *Condition = SI->getCondition();
1684 if (!isa<IntegerType>(Val->getType()))
1686 bool ValUsesConditionAndMayBeFoldable = false;
1687 if (Condition != Val) {
1688 // Check if Val has Condition as an operand.
1689 if (User *Usr = dyn_cast<User>(Val))
1690 ValUsesConditionAndMayBeFoldable = isOperationFoldable(Usr) &&
1691 usesOperand(Usr, Condition);
1692 if (!ValUsesConditionAndMayBeFoldable)
1694 }
1695 assert((Condition == Val || ValUsesConditionAndMayBeFoldable) &&
1696 "Condition != Val nor Val doesn't use Condition");
1697
1698 bool DefaultCase = SI->getDefaultDest() == BBTo;
1699 unsigned BitWidth = Val->getType()->getIntegerBitWidth();
1700 ConstantRange EdgesVals(BitWidth, DefaultCase/*isFullSet*/);
1701
1702 for (auto Case : SI->cases()) {
1703 APInt CaseValue = Case.getCaseValue()->getValue();
1704 ConstantRange EdgeVal(CaseValue);
1705 if (ValUsesConditionAndMayBeFoldable) {
1706 User *Usr = cast<User>(Val);
1707 const DataLayout &DL = BBTo->getDataLayout();
1708 ValueLatticeElement EdgeLatticeVal =
1709 constantFoldUser(Usr, Condition, CaseValue, DL);
1710 if (EdgeLatticeVal.isOverdefined())
1712 EdgeVal = EdgeLatticeVal.getConstantRange();
1713 }
1714 if (DefaultCase) {
1715 // It is possible that the default destination is the destination of
1716 // some cases. We cannot perform difference for those cases.
1717 // We know Condition != CaseValue in BBTo. In some cases we can use
1718 // this to infer Val == f(Condition) is != f(CaseValue). For now, we
1719 // only do this when f is identity (i.e. Val == Condition), but we
1720 // should be able to do this for any injective f.
1721 if (Case.getCaseSuccessor() != BBTo && Condition == Val)
1722 EdgesVals = EdgesVals.difference(EdgeVal);
1723 } else if (Case.getCaseSuccessor() == BBTo)
1724 EdgesVals = EdgesVals.unionWith(EdgeVal);
1725 }
1726 return ValueLatticeElement::getRange(std::move(EdgesVals));
1727 }
1729}
1730
1731/// Compute the value of Val on the edge BBFrom -> BBTo or the value at
1732/// the basic block if the edge does not constrain Val.
1733std::optional<ValueLatticeElement>
1734LazyValueInfoImpl::getEdgeValue(Value *Val, BasicBlock *BBFrom,
1735 BasicBlock *BBTo, Instruction *CxtI) {
1736 // If already a constant, there is nothing to compute.
1737 if (Constant *VC = dyn_cast<Constant>(Val))
1738 return ValueLatticeElement::get(VC);
1739
1740 std::optional<ValueLatticeElement> LocalResult =
1741 getEdgeValueLocal(Val, BBFrom, BBTo, /*UseBlockValue*/ true);
1742 if (!LocalResult)
1743 return std::nullopt;
1744
1745 if (hasSingleValue(*LocalResult))
1746 // Can't get any more precise here
1747 return LocalResult;
1748
1749 std::optional<ValueLatticeElement> OptInBlock =
1750 getBlockValue(Val, BBFrom, BBFrom->getTerminator());
1751 if (!OptInBlock)
1752 return std::nullopt;
1753 ValueLatticeElement &InBlock = *OptInBlock;
1754
1755 // We can use the context instruction (generically the ultimate instruction
1756 // the calling pass is trying to simplify) here, even though the result of
1757 // this function is generally cached when called from the solve* functions
1758 // (and that cached result might be used with queries using a different
1759 // context instruction), because when this function is called from the solve*
1760 // functions, the context instruction is not provided. When called from
1761 // LazyValueInfoImpl::getValueOnEdge, the context instruction is provided,
1762 // but then the result is not cached.
1763 intersectAssumeOrGuardBlockValueConstantRange(Val, InBlock, CxtI);
1764
1765 return LocalResult->intersect(InBlock);
1766}
1767
1769 Instruction *CxtI) {
1770 LLVM_DEBUG(dbgs() << "LVI Getting block end value " << *V << " at '"
1771 << BB->getName() << "'\n");
1772
1773 assert(BlockValueStack.empty() && BlockValueSet.empty());
1774 std::optional<ValueLatticeElement> OptResult = getBlockValue(V, BB, CxtI);
1775 if (!OptResult) {
1776 solve();
1777 OptResult = getBlockValue(V, BB, CxtI);
1778 assert(OptResult && "Value not available after solving");
1779 }
1780
1781 ValueLatticeElement Result = *OptResult;
1782 LLVM_DEBUG(dbgs() << " Result = " << Result << "\n");
1783 return Result;
1784}
1785
1787 LLVM_DEBUG(dbgs() << "LVI Getting value " << *V << " at '" << CxtI->getName()
1788 << "'\n");
1789
1790 if (auto *C = dyn_cast<Constant>(V))
1792
1794 if (auto *I = dyn_cast<Instruction>(V))
1795 Result = getFromRangeMetadata(I);
1796 intersectAssumeOrGuardBlockValueConstantRange(V, Result, CxtI);
1797
1798 LLVM_DEBUG(dbgs() << " Result = " << Result << "\n");
1799 return Result;
1800}
1801
1803getValueOnEdge(Value *V, BasicBlock *FromBB, BasicBlock *ToBB,
1804 Instruction *CxtI) {
1805 LLVM_DEBUG(dbgs() << "LVI Getting edge value " << *V << " from '"
1806 << FromBB->getName() << "' to '" << ToBB->getName()
1807 << "'\n");
1808
1809 std::optional<ValueLatticeElement> Result =
1810 getEdgeValue(V, FromBB, ToBB, CxtI);
1811 while (!Result) {
1812 // As the worklist only explicitly tracks block values (but not edge values)
1813 // we may have to call solve() multiple times, as the edge value calculation
1814 // may request additional block values.
1815 solve();
1816 Result = getEdgeValue(V, FromBB, ToBB, CxtI);
1817 }
1818
1819 LLVM_DEBUG(dbgs() << " Result = " << *Result << "\n");
1820 return *Result;
1821}
1822
1824 Value *V = U.get();
1825 auto *CxtI = cast<Instruction>(U.getUser());
1826 ValueLatticeElement VL = getValueInBlock(V, CxtI->getParent(), CxtI);
1827
1828 // Check whether the only (possibly transitive) use of the value is in a
1829 // position where V can be constrained by a select or branch condition.
1830 const Use *CurrU = &U;
1831 // TODO: Increase limit?
1832 const unsigned MaxUsesToInspect = 3;
1833 for (unsigned I = 0; I < MaxUsesToInspect; ++I) {
1834 std::optional<ValueLatticeElement> CondVal;
1835 auto *CurrI = cast<Instruction>(CurrU->getUser());
1836 if (auto *SI = dyn_cast<SelectInst>(CurrI)) {
1837 // If the value is undef, a different value may be chosen in
1838 // the select condition and at use.
1839 if (!isGuaranteedNotToBeUndef(SI->getCondition(), AC))
1840 break;
1841 if (CurrU->getOperandNo() == 1)
1842 CondVal =
1843 *getValueFromCondition(V, SI->getCondition(), /*IsTrueDest*/ true,
1844 /*UseBlockValue*/ false);
1845 else if (CurrU->getOperandNo() == 2)
1846 CondVal =
1847 *getValueFromCondition(V, SI->getCondition(), /*IsTrueDest*/ false,
1848 /*UseBlockValue*/ false);
1849 } else if (auto *PHI = dyn_cast<PHINode>(CurrI)) {
1850 // TODO: Use non-local query?
1851 CondVal = *getEdgeValueLocal(V, PHI->getIncomingBlock(*CurrU),
1852 PHI->getParent(), /*UseBlockValue*/ false);
1853 }
1854 if (CondVal)
1855 VL = VL.intersect(*CondVal);
1856
1857 // Only follow one-use chain, to allow direct intersection of conditions.
1858 // If there are multiple uses, we would have to intersect with the union of
1859 // all conditions at different uses.
1860 // Stop walking if we hit a non-speculatable instruction. Even if the
1861 // result is only used under a specific condition, executing the
1862 // instruction itself may cause side effects or UB already.
1863 // This also disallows looking through phi nodes: If the phi node is part
1864 // of a cycle, we might end up reasoning about values from different cycle
1865 // iterations (PR60629).
1866 if (!CurrI->hasOneUse() ||
1868 CurrI, /*IgnoreUBImplyingAttrs=*/false))
1869 break;
1870 CurrU = &*CurrI->use_begin();
1871 }
1872 return VL;
1873}
1874
1876 BasicBlock *NewSucc) {
1877 TheCache.threadEdgeImpl(OldSucc, NewSucc);
1878}
1879
1880//===----------------------------------------------------------------------===//
1881// LazyValueInfo Impl
1882//===----------------------------------------------------------------------===//
1883
1885 Info.AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1886
1887 if (auto *Impl = Info.getImpl())
1888 Impl->clear();
1889
1890 // Fully lazy.
1891 return false;
1892}
1893
1899
1901
1902/// This lazily constructs the LazyValueInfoImpl.
1903LazyValueInfoImpl &LazyValueInfo::getOrCreateImpl(const Module *M) {
1904 if (!PImpl) {
1905 assert(M && "getCache() called with a null Module");
1906 const DataLayout &DL = M->getDataLayout();
1907 Function *GuardDecl =
1908 Intrinsic::getDeclarationIfExists(M, Intrinsic::experimental_guard);
1909 PImpl = new LazyValueInfoImpl(AC, DL, GuardDecl);
1910 }
1911 return *PImpl;
1912}
1913
1914LazyValueInfoImpl *LazyValueInfo::getImpl() { return PImpl; }
1915
1917
1919 // If the cache was allocated, free it.
1920 if (auto *Impl = getImpl()) {
1921 delete &*Impl;
1922 PImpl = nullptr;
1923 }
1924}
1925
1927 FunctionAnalysisManager::Invalidator &Inv) {
1928 // We need to invalidate if we have either failed to preserve this analyses
1929 // result directly or if any of its dependencies have been invalidated.
1930 auto PAC = PA.getChecker<LazyValueAnalysis>();
1931 if (!(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()))
1932 return true;
1933
1934 return false;
1935}
1936
1937void LazyValueInfoWrapperPass::releaseMemory() { Info.releaseMemory(); }
1938
1941 auto &AC = FAM.getResult<AssumptionAnalysis>(F);
1942
1943 return LazyValueInfo(&AC, &F.getDataLayout());
1944}
1945
1946/// Returns true if we can statically tell that this value will never be a
1947/// "useful" constant. In practice, this means we've got something like an
1948/// alloca or a malloc call for which a comparison against a constant can
1949/// only be guarding dead code. Note that we are potentially giving up some
1950/// precision in dead code (a constant result) in favour of avoiding a
1951/// expensive search for a easily answered common query.
1952static bool isKnownNonConstant(Value *V) {
1953 V = V->stripPointerCasts();
1954 // The return val of alloc cannot be a Constant.
1955 if (isa<AllocaInst>(V))
1956 return true;
1957 return false;
1958}
1959
1961 // Bail out early if V is known not to be a Constant.
1962 if (isKnownNonConstant(V))
1963 return nullptr;
1964
1965 BasicBlock *BB = CxtI->getParent();
1966 ValueLatticeElement Result =
1967 getOrCreateImpl(BB->getModule()).getValueInBlock(V, BB, CxtI);
1968
1969 if (Result.isConstant())
1970 return Result.getConstant();
1971 if (Result.isConstantRange()) {
1972 const ConstantRange &CR = Result.getConstantRange();
1973 if (const APInt *SingleVal = CR.getSingleElement())
1974 return ConstantInt::get(V->getType(), *SingleVal);
1975 }
1976 return nullptr;
1977}
1978
1980 bool UndefAllowed) {
1981 BasicBlock *BB = CxtI->getParent();
1982 ValueLatticeElement Result =
1983 getOrCreateImpl(BB->getModule()).getValueInBlock(V, BB, CxtI);
1984 return Result.asConstantRange(V->getType(), UndefAllowed);
1985}
1986
1988 bool UndefAllowed) {
1989 auto *Inst = cast<Instruction>(U.getUser());
1990 ValueLatticeElement Result =
1991 getOrCreateImpl(Inst->getModule()).getValueAtUse(U);
1992 return Result.asConstantRange(U->getType(), UndefAllowed);
1993}
1994
1995/// Determine whether the specified value is known to be a
1996/// constant on the specified edge. Return null if not.
1998 BasicBlock *ToBB,
1999 Instruction *CxtI) {
2000 Module *M = FromBB->getModule();
2001 ValueLatticeElement Result =
2002 getOrCreateImpl(M).getValueOnEdge(V, FromBB, ToBB, CxtI);
2003
2004 if (Result.isConstant())
2005 return Result.getConstant();
2006 if (Result.isConstantRange()) {
2007 const ConstantRange &CR = Result.getConstantRange();
2008 if (const APInt *SingleVal = CR.getSingleElement())
2009 return ConstantInt::get(V->getType(), *SingleVal);
2010 }
2011 return nullptr;
2012}
2013
2015 BasicBlock *FromBB,
2016 BasicBlock *ToBB,
2017 Instruction *CxtI) {
2018 Module *M = FromBB->getModule();
2019 ValueLatticeElement Result =
2020 getOrCreateImpl(M).getValueOnEdge(V, FromBB, ToBB, CxtI);
2021 // TODO: Should undef be allowed here?
2022 return Result.asConstantRange(V->getType(), /*UndefAllowed*/ true);
2023}
2024
2026 const ValueLatticeElement &Val,
2027 const DataLayout &DL) {
2028 // If we know the value is a constant, evaluate the conditional.
2029 if (Val.isConstant())
2030 return ConstantFoldCompareInstOperands(Pred, Val.getConstant(), C, DL);
2031
2032 Type *ResTy = CmpInst::makeCmpResultType(C->getType());
2033 if (Val.isConstantRange()) {
2034 const ConstantRange &CR = Val.getConstantRange();
2035 ConstantRange RHS = C->toConstantRange();
2036 if (CR.icmp(Pred, RHS))
2037 return ConstantInt::getTrue(ResTy);
2038 if (CR.icmp(CmpInst::getInversePredicate(Pred), RHS))
2039 return ConstantInt::getFalse(ResTy);
2040 return nullptr;
2041 }
2042
2043 if (Val.isNotConstant()) {
2044 // If this is an equality comparison, we can try to fold it knowing that
2045 // "V != C1".
2046 if (Pred == ICmpInst::ICMP_EQ) {
2047 // !C1 == C -> false iff C1 == C.
2050 if (Res && Res->isNullValue())
2051 return ConstantInt::getFalse(ResTy);
2052 } else if (Pred == ICmpInst::ICMP_NE) {
2053 // !C1 != C -> true iff C1 == C.
2056 if (Res && Res->isNullValue())
2057 return ConstantInt::getTrue(ResTy);
2058 }
2059 return nullptr;
2060 }
2061
2062 return nullptr;
2063}
2064
2065/// Determine whether the specified value comparison with a constant is known to
2066/// be true or false on the specified CFG edge. Pred is a CmpInst predicate.
2068 Constant *C, BasicBlock *FromBB,
2069 BasicBlock *ToBB,
2070 Instruction *CxtI) {
2071 Module *M = FromBB->getModule();
2072 ValueLatticeElement Result =
2073 getOrCreateImpl(M).getValueOnEdge(V, FromBB, ToBB, CxtI);
2074
2075 return getPredicateResult(Pred, C, Result, M->getDataLayout());
2076}
2077
2079 Constant *C, Instruction *CxtI,
2080 bool UseBlockValue) {
2081 // Is or is not NonNull are common predicates being queried. If
2082 // isKnownNonZero can tell us the result of the predicate, we can
2083 // return it quickly. But this is only a fastpath, and falling
2084 // through would still be correct.
2085 Module *M = CxtI->getModule();
2086 const DataLayout &DL = M->getDataLayout();
2087 if (V->getType()->isPointerTy() && C->isNullValue() &&
2088 isKnownNonZero(V->stripPointerCastsSameRepresentation(), DL)) {
2089 Type *ResTy = CmpInst::makeCmpResultType(C->getType());
2090 if (Pred == ICmpInst::ICMP_EQ)
2091 return ConstantInt::getFalse(ResTy);
2092 else if (Pred == ICmpInst::ICMP_NE)
2093 return ConstantInt::getTrue(ResTy);
2094 }
2095
2096 auto &Impl = getOrCreateImpl(M);
2097 ValueLatticeElement Result =
2098 UseBlockValue ? Impl.getValueInBlock(V, CxtI->getParent(), CxtI)
2099 : Impl.getValueAt(V, CxtI);
2100 Constant *Ret = getPredicateResult(Pred, C, Result, DL);
2101 if (Ret)
2102 return Ret;
2103
2104 // Note: The following bit of code is somewhat distinct from the rest of LVI;
2105 // LVI as a whole tries to compute a lattice value which is conservatively
2106 // correct at a given location. In this case, we have a predicate which we
2107 // weren't able to prove about the merged result, and we're pushing that
2108 // predicate back along each incoming edge to see if we can prove it
2109 // separately for each input. As a motivating example, consider:
2110 // bb1:
2111 // %v1 = ... ; constantrange<1, 5>
2112 // br label %merge
2113 // bb2:
2114 // %v2 = ... ; constantrange<10, 20>
2115 // br label %merge
2116 // merge:
2117 // %phi = phi [%v1, %v2] ; constantrange<1,20>
2118 // %pred = icmp eq i32 %phi, 8
2119 // We can't tell from the lattice value for '%phi' that '%pred' is false
2120 // along each path, but by checking the predicate over each input separately,
2121 // we can.
2122 // We limit the search to one step backwards from the current BB and value.
2123 // We could consider extending this to search further backwards through the
2124 // CFG and/or value graph, but there are non-obvious compile time vs quality
2125 // tradeoffs.
2126 BasicBlock *BB = CxtI->getParent();
2127
2128 // Function entry or an unreachable block. Bail to avoid confusing
2129 // analysis below.
2130 pred_iterator PI = pred_begin(BB), PE = pred_end(BB);
2131 if (PI == PE)
2132 return nullptr;
2133
2134 // If V is a PHI node in the same block as the context, we need to ask
2135 // questions about the predicate as applied to the incoming value along
2136 // each edge. This is useful for eliminating cases where the predicate is
2137 // known along all incoming edges.
2138 if (auto *PHI = dyn_cast<PHINode>(V))
2139 if (PHI->getParent() == BB) {
2140 Constant *Baseline = nullptr;
2141 for (unsigned i = 0, e = PHI->getNumIncomingValues(); i < e; i++) {
2142 Value *Incoming = PHI->getIncomingValue(i);
2143 BasicBlock *PredBB = PHI->getIncomingBlock(i);
2144 // Note that PredBB may be BB itself.
2145 Constant *Result =
2146 getPredicateOnEdge(Pred, Incoming, C, PredBB, BB, CxtI);
2147
2148 // Keep going as long as we've seen a consistent known result for
2149 // all inputs.
2150 Baseline = (i == 0) ? Result /* First iteration */
2151 : (Baseline == Result ? Baseline
2152 : nullptr); /* All others */
2153 if (!Baseline)
2154 break;
2155 }
2156 if (Baseline)
2157 return Baseline;
2158 }
2159
2160 // For a comparison where the V is outside this block, it's possible
2161 // that we've branched on it before. Look to see if the value is known
2162 // on all incoming edges.
2163 if (!isa<Instruction>(V) || cast<Instruction>(V)->getParent() != BB) {
2164 // For predecessor edge, determine if the comparison is true or false
2165 // on that edge. If they're all true or all false, we can conclude
2166 // the value of the comparison in this block.
2167 Constant *Baseline = getPredicateOnEdge(Pred, V, C, *PI, BB, CxtI);
2168 if (Baseline) {
2169 // Check that all remaining incoming values match the first one.
2170 while (++PI != PE) {
2171 Constant *Ret = getPredicateOnEdge(Pred, V, C, *PI, BB, CxtI);
2172 if (Ret != Baseline)
2173 break;
2174 }
2175 // If we terminated early, then one of the values didn't match.
2176 if (PI == PE) {
2177 return Baseline;
2178 }
2179 }
2180 }
2181
2182 return nullptr;
2183}
2184
2186 Value *RHS, Instruction *CxtI,
2187 bool UseBlockValue) {
2188 if (auto *C = dyn_cast<Constant>(RHS))
2189 return getPredicateAt(Pred, LHS, C, CxtI, UseBlockValue);
2190 if (auto *C = dyn_cast<Constant>(LHS))
2191 return getPredicateAt(CmpInst::getSwappedPredicate(Pred), RHS, C, CxtI,
2192 UseBlockValue);
2193
2194 // Got two non-Constant values. Try to determine the comparison results based
2195 // on the block values of the two operands, e.g. because they have
2196 // non-overlapping ranges.
2197 if (UseBlockValue) {
2198 Module *M = CxtI->getModule();
2200 getOrCreateImpl(M).getValueInBlock(LHS, CxtI->getParent(), CxtI);
2201 if (L.isOverdefined())
2202 return nullptr;
2203
2205 getOrCreateImpl(M).getValueInBlock(RHS, CxtI->getParent(), CxtI);
2206 Type *Ty = CmpInst::makeCmpResultType(LHS->getType());
2207 return L.getCompare(Pred, Ty, R, M->getDataLayout());
2208 }
2209 return nullptr;
2210}
2211
2213 BasicBlock *NewSucc) {
2214 if (auto *Impl = getImpl())
2215 Impl->threadEdge(PredBB, OldSucc, NewSucc);
2216}
2217
2219 if (auto *Impl = getImpl())
2220 Impl->forgetValue(V);
2221}
2222
2224 if (auto *Impl = getImpl())
2225 Impl->eraseBlock(BB);
2226}
2227
2229 if (auto *Impl = getImpl())
2230 Impl->clear();
2231}
2232
2234 if (auto *Impl = getImpl())
2235 Impl->printLVI(F, DTree, OS);
2236}
2237
2238// Print the LVI for the function arguments at the start of each basic block.
2239void LazyValueInfoAnnotatedWriter::emitBasicBlockStartAnnot(
2240 const BasicBlock *BB, formatted_raw_ostream &OS) {
2241 // Find if there are latticevalues defined for arguments of the function.
2242 auto *F = BB->getParent();
2243 for (const auto &Arg : F->args()) {
2244 ValueLatticeElement Result = LVIImpl->getValueInBlock(
2245 const_cast<Argument *>(&Arg), const_cast<BasicBlock *>(BB));
2246 if (Result.isUnknown())
2247 continue;
2248 OS << "; LatticeVal for: '" << Arg << "' is: " << Result << "\n";
2249 }
2250}
2251
2252// This function prints the LVI analysis for the instruction I at the beginning
2253// of various basic blocks. It relies on calculated values that are stored in
2254// the LazyValueInfoCache, and in the absence of cached values, recalculate the
2255// LazyValueInfo for `I`, and print that info.
2256void LazyValueInfoAnnotatedWriter::emitInstructionAnnot(
2257 const Instruction *I, formatted_raw_ostream &OS) {
2258
2259 auto *ParentBB = I->getParent();
2260 SmallPtrSet<const BasicBlock*, 16> BlocksContainingLVI;
2261 // We can generate (solve) LVI values only for blocks that are dominated by
2262 // the I's parent. However, to avoid generating LVI for all dominating blocks,
2263 // that contain redundant/uninteresting information, we print LVI for
2264 // blocks that may use this LVI information (such as immediate successor
2265 // blocks, and blocks that contain uses of `I`).
2266 auto printResult = [&](const BasicBlock *BB) {
2267 if (!BlocksContainingLVI.insert(BB).second)
2268 return;
2269 ValueLatticeElement Result = LVIImpl->getValueInBlock(
2270 const_cast<Instruction *>(I), const_cast<BasicBlock *>(BB));
2271 OS << "; LatticeVal for: '" << *I << "' in BB: '";
2272 BB->printAsOperand(OS, false);
2273 OS << "' is: " << Result << "\n";
2274 };
2275
2276 printResult(ParentBB);
2277 // Print the LVI analysis results for the immediate successor blocks, that
2278 // are dominated by `ParentBB`.
2279 for (const auto *BBSucc : successors(ParentBB))
2280 if (DT.dominates(ParentBB, BBSucc))
2281 printResult(BBSucc);
2282
2283 // Print LVI in blocks where `I` is used.
2284 for (const auto *U : I->users())
2285 if (auto *UseI = dyn_cast<Instruction>(U))
2286 if (!isa<PHINode>(UseI) || DT.dominates(ParentBB, UseI->getParent()))
2287 printResult(UseI->getParent());
2288
2289}
2290
2293 OS << "LVI for function '" << F.getName() << "':\n";
2294 auto &LVI = AM.getResult<LazyValueAnalysis>(F);
2295 auto &DTree = AM.getResult<DominatorTreeAnalysis>(F);
2296 LVI.printLVI(F, DTree, OS);
2297 return PreservedAnalyses::all();
2298}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
Rewrite undef for PHI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static const Function * getParent(const Value *V)
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file defines the DenseSet and SmallDenseSet classes.
IRTranslator LLVM IR MI
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
static bool isOperationFoldable(User *Usr)
static void AddNonNullPointer(Value *Ptr, NonNullPointerSet &PtrSet, bool IsDereferenced=true)
static void AddNonNullPointersByInstruction(Instruction *I, NonNullPointerSet &PtrSet)
static std::optional< ConstantRange > getRangeViaSLT(CmpInst::Predicate Pred, APInt RHS, function_ref< std::optional< ConstantRange >(const APInt &)> Fn)
static const unsigned MaxProcessedPerValue
static ValueLatticeElement getValueFromICmpCtpop(ICmpInst::Predicate Pred, Value *RHS)
Get value range for a "ctpop(Val) Pred RHS" condition.
static bool usesOperand(User *Usr, Value *Op)
static ValueLatticeElement constantFoldUser(User *Usr, Value *Op, const APInt &OpConstVal, const DataLayout &DL)
static ValueLatticeElement getFromRangeMetadata(Instruction *BBI)
lazy value Lazy Value Information static true cl::opt< bool > PerPredRanges("lvi-per-pred-ranges", cl::Hidden, cl::init(false), cl::desc("Enable tracking of ranges for a value in a block for" "each block predecessor (default = false)"))
static Constant * getPredicateResult(CmpInst::Predicate Pred, Constant *C, const ValueLatticeElement &Val, const DataLayout &DL)
static ValueLatticeElement getValueFromOverflowCondition(Value *Val, WithOverflowInst *WO, bool IsTrueDest)
static bool isKnownNonConstant(Value *V)
Returns true if we can statically tell that this value will never be a "useful" constant.
static bool matchICmpOperand(APInt &Offset, Value *LHS, Value *Val, ICmpInst::Predicate Pred)
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
#define T
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
#define P(N)
FunctionAnalysisManager FAM
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition PassSupport.h:42
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition PassSupport.h:44
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition PassSupport.h:39
const SmallVectorImpl< MachineOperand > & Cond
This file contains some templates that are useful if you are working with the STL at all.
static bool InBlock(const Value *V, const BasicBlock *BB)
#define LLVM_DEBUG(...)
Definition Debug.h:114
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition VPlanSLP.cpp:247
Value * RHS
Value * LHS
Class for arbitrary precision integers.
Definition APInt.h:78
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
Definition APInt.cpp:1012
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
Definition APInt.h:219
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
Definition APInt.h:475
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
Definition APInt.h:306
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
Definition APInt.h:296
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition APInt.h:200
This templated class represents "all analyses that operate over <aparticular IR unit>" (e....
Definition Analysis.h:50
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
void setPreservesAll()
Set by analyses that do not transform their input at all.
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator begin()
Instruction iterator methods.
Definition BasicBlock.h:459
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
LLVM_ABI bool isEntryBlock() const
Return true if this is the entry block of the containing function.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
reverse_iterator rend()
Definition BasicBlock.h:477
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
LLVM_ABI const Module * getModule() const
Return the module owning the function this basic block belongs to, or nullptr if the function does no...
LLVM_ABI unsigned getNoWrapKind() const
Returns one of OBO::NoSignedWrap or OBO::NoUnsignedWrap.
LLVM_ABI Instruction::BinaryOps getBinaryOp() const
Returns the binary operation underlying the intrinsic.
BinaryOps getOpcode() const
Definition InstrTypes.h:374
Value handle with callbacks on RAUW and destruction.
This is the base class for all instructions that perform data casts.
Definition InstrTypes.h:448
Instruction::CastOps getOpcode() const
Return the opcode of this CastInst.
Definition InstrTypes.h:612
Type * getDestTy() const
Return the destination type, as a convenience.
Definition InstrTypes.h:619
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Definition InstrTypes.h:984
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:678
@ ICMP_SLT
signed less than
Definition InstrTypes.h:707
@ ICMP_SLE
signed less or equal
Definition InstrTypes.h:708
@ ICMP_UGE
unsigned greater or equal
Definition InstrTypes.h:702
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:701
@ ICMP_SGT
signed greater than
Definition InstrTypes.h:705
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:703
@ ICMP_NE
not equal
Definition InstrTypes.h:700
@ ICMP_SGE
signed greater or equal
Definition InstrTypes.h:706
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:704
bool isSigned() const
Definition InstrTypes.h:932
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition InstrTypes.h:829
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition InstrTypes.h:791
Predicate getPredicate() const
Return the predicate for this instruction.
Definition InstrTypes.h:767
This is the shared class of boolean and integer constants.
Definition Constants.h:87
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
static LLVM_ABI ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
This class represents a range of values.
LLVM_ABI ConstantRange subtract(const APInt &CI) const
Subtract the specified constant from the endpoints of this constant range.
const APInt * getSingleElement() const
If this set contains a single element, return it, otherwise return null.
static LLVM_ABI ConstantRange fromKnownBits(const KnownBits &Known, bool IsSigned)
Initialize a range based on a known bits constraint.
LLVM_ABI ConstantRange castOp(Instruction::CastOps CastOp, uint32_t BitWidth) const
Return a new range representing the possible values resulting from an application of the specified ca...
LLVM_ABI ConstantRange umin(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an unsigned minimum of a value in ...
LLVM_ABI APInt getUnsignedMin() const
Return the smallest unsigned value contained in the ConstantRange.
LLVM_ABI bool icmp(CmpInst::Predicate Pred, const ConstantRange &Other) const
Does the predicate Pred hold between ranges this and Other?
static LLVM_ABI ConstantRange intrinsic(Intrinsic::ID IntrinsicID, ArrayRef< ConstantRange > Ops)
Compute range of intrinsic result for the given operand ranges.
LLVM_ABI bool isEmptySet() const
Return true if this set contains no members.
LLVM_ABI ConstantRange abs(bool IntMinIsPoison=false) const
Calculate absolute value range.
static LLVM_ABI bool isIntrinsicSupported(Intrinsic::ID IntrinsicID)
Returns true if ConstantRange calculations are supported for intrinsic with IntrinsicID.
LLVM_ABI ConstantRange overflowingBinaryOp(Instruction::BinaryOps BinOp, const ConstantRange &Other, unsigned NoWrapKind) const
Return a new range representing the possible values resulting from an application of the specified ov...
bool isSingleElement() const
Return true if this set contains exactly one member.
LLVM_ABI ConstantRange truncate(uint32_t BitWidth, unsigned NoWrapKind=0) const
Return a new range in the specified integer type, which must be strictly smaller than the current typ...
LLVM_ABI ConstantRange umax(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an unsigned maximum of a value in ...
static LLVM_ABI ConstantRange makeAllowedICmpRegion(CmpInst::Predicate Pred, const ConstantRange &Other)
Produce the smallest range such that all values that may satisfy the given predicate with any value c...
static LLVM_ABI ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
LLVM_ABI ConstantRange inverse() const
Return a new range that is the logical not of the current set.
LLVM_ABI APInt getUnsignedMax() const
Return the largest unsigned value contained in the ConstantRange.
static LLVM_ABI ConstantRange makeMaskNotEqualRange(const APInt &Mask, const APInt &C)
Initialize a range containing all values X that satisfy (X & Mask) / != C.
static ConstantRange getNonEmpty(APInt Lower, APInt Upper)
Create non-empty constant range with the given bounds.
LLVM_ABI ConstantRange smin(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a signed minimum of a value in thi...
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
LLVM_ABI ConstantRange smax(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a signed maximum of a value in thi...
LLVM_ABI ConstantRange binaryOp(Instruction::BinaryOps BinOp, const ConstantRange &Other) const
Return a new range representing the possible values resulting from an application of the specified bi...
static LLVM_ABI ConstantRange makeExactNoWrapRegion(Instruction::BinaryOps BinOp, const APInt &Other, unsigned NoWrapKind)
Produce the range that contains X if and only if "X BinOp Other" does not wrap.
This is an important base class in LLVM.
Definition Constant.h:43
static LLVM_ABI Constant * getIntegerValue(Type *Ty, const APInt &V)
Return the value for an integer or pointer constant, or a vector thereof, with the given scalar value...
LLVM_ABI ConstantRange toConstantRange() const
Convert constant to an approximate constant range.
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition Constants.cpp:90
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
iterator find_as(const LookupKeyT &Val)
Alternate version of find() which allows a different, and possibly less expensive,...
Definition DenseMap.h:180
iterator end()
Definition DenseMap.h:81
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:222
Implements a dense probed hash-table based set.
Definition DenseSet.h:279
Analysis pass which computes a DominatorTree.
Definition Dominators.h:284
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:165
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
This instruction extracts a struct member or array element value from an aggregate value.
ArrayRef< unsigned > getIndices() const
unsigned getNumIndices() const
idx_iterator idx_begin() const
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
FunctionPass(char &pid)
Definition Pass.h:316
Module * getParent()
Get the module that this global value is contained inside of...
This instruction compares its operands according to the predicate given to the constructor.
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
This instruction inserts a single (scalar) element into a VectorType value.
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
A wrapper class for inspecting calls to intrinsic functions.
Analysis to compute lazy value information.
Result run(Function &F, FunctionAnalysisManager &FAM)
ValueLatticeElement getValueOnEdge(Value *V, BasicBlock *FromBB, BasicBlock *ToBB, Instruction *CxtI=nullptr)
This is the query interface to determine the lattice value for the specified Value* that is true on t...
ValueLatticeElement getValueAt(Value *V, Instruction *CxtI)
This is the query interface to determine the lattice value for the specified Value* at the specified ...
void threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc, BasicBlock *NewSucc)
This is the update interface to inform the cache that an edge from PredBB to OldSucc has been threade...
void printLVI(Function &F, DominatorTree &DTree, raw_ostream &OS)
Printing the LazyValueInfo Analysis.
void forgetValue(Value *V)
This is part of the update interface to remove information related to this value from the cache.
void eraseBlock(BasicBlock *BB)
This is part of the update interface to inform the cache that a block has been deleted.
void clear()
Complete flush all previously computed values.
LazyValueInfoImpl(AssumptionCache *AC, const DataLayout &DL, Function *GuardDecl)
ValueLatticeElement getValueInBlock(Value *V, BasicBlock *BB, Instruction *CxtI=nullptr)
This is the query interface to determine the lattice value for the specified Value* at the context in...
ValueLatticeElement getValueAtUse(const Use &U)
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Wrapper around LazyValueInfo.
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
void releaseMemory() override
releaseMemory() - This member can be implemented by a pass if it wants to be able to release its memo...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
This pass computes, caches, and vends lazy value constraint information.
void eraseBlock(BasicBlock *BB)
Inform the analysis cache that we have erased a block.
ConstantRange getConstantRangeAtUse(const Use &U, bool UndefAllowed)
Return the ConstantRange constraint that is known to hold for the value at a specific use-site.
ConstantRange getConstantRange(Value *V, Instruction *CxtI, bool UndefAllowed)
Return the ConstantRange constraint that is known to hold for the specified value at the specified in...
void threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc, BasicBlock *NewSucc)
Inform the analysis cache that we have threaded an edge from PredBB to OldSucc to be from PredBB to N...
Constant * getPredicateOnEdge(CmpInst::Predicate Pred, Value *V, Constant *C, BasicBlock *FromBB, BasicBlock *ToBB, Instruction *CxtI=nullptr)
Determine whether the specified value comparison with a constant is known to be true or false on the ...
Constant * getConstantOnEdge(Value *V, BasicBlock *FromBB, BasicBlock *ToBB, Instruction *CxtI=nullptr)
Determine whether the specified value is known to be a constant on the specified edge.
ConstantRange getConstantRangeOnEdge(Value *V, BasicBlock *FromBB, BasicBlock *ToBB, Instruction *CxtI=nullptr)
Return the ConstantRage constraint that is known to hold for the specified value on the specified edg...
Constant * getConstant(Value *V, Instruction *CxtI)
Determine whether the specified value is known to be a constant at the specified instruction.
void printLVI(Function &F, DominatorTree &DTree, raw_ostream &OS)
Print the \LazyValueInfo Analysis.
void forgetValue(Value *V)
Remove information related to this value from the cache.
void clear()
Complete flush all previously computed values.
Constant * getPredicateAt(CmpInst::Predicate Pred, Value *V, Constant *C, Instruction *CxtI, bool UseBlockValue)
Determine whether the specified value comparison with a constant is known to be true or false at the ...
bool invalidate(Function &F, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
Handle invalidation events in the new pass manager.
An instruction for reading from memory.
Metadata node.
Definition Metadata.h:1078
This is the common base class for memset/memcpy/memmove.
This class wraps the llvm.memcpy/memmove intrinsics.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
AnalysisType & getAnalysis() const
getAnalysis<AnalysisType>() - This function is used by subclasses to get to the analysis information ...
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalysisChecker getChecker() const
Build a checker for this PreservedAnalyses and the specified analysis type.
Definition Analysis.h:275
This class represents the LLVM 'select' instruction.
Implements a dense probed hash-table based set with some number of buckets stored inline.
Definition DenseSet.h:291
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
This class represents a truncation of integer types.
unsigned getNoWrapKind() const
Returns the no-wrap kind of the operation.
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
LLVM_ABI unsigned getIntegerBitWidth() const
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition Type.h:246
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition Type.h:311
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:231
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
Definition Type.cpp:294
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
op_range operands()
Definition User.h:292
Value * getOperand(unsigned i) const
Definition User.h:232
This class represents lattice values for constants.
static ValueLatticeElement getRange(ConstantRange CR, bool MayIncludeUndef=false)
static ValueLatticeElement getNot(Constant *C)
ConstantRange asConstantRange(unsigned BW, bool UndefAllowed=false) const
std::optional< APInt > asConstantInteger() const
const ConstantRange & getConstantRange(bool UndefAllowed=true) const
Returns the constant range for this value.
bool isConstantRange(bool UndefAllowed=true) const
Returns true if this value is a constant range.
static ValueLatticeElement get(Constant *C)
Constant * getNotConstant() const
LLVM_ABI ValueLatticeElement intersect(const ValueLatticeElement &Other) const
Combine two sets of facts about the same value into a single set of facts.
Constant * getConstant() const
bool mergeIn(const ValueLatticeElement &RHS, MergeOptions Opts=MergeOptions())
Updates this object to approximate both this object and RHS.
static ValueLatticeElement getOverdefined()
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI const Value * stripInBoundsOffsets(function_ref< void(const Value *)> Func=[](const Value *) {}) const
Strip off pointer casts and inbounds GEPs.
Definition Value.cpp:812
use_iterator use_begin()
Definition Value.h:364
LLVM_ABI void printAsOperand(raw_ostream &O, bool PrintType=true, const Module *M=nullptr) const
Print the name of this Value out to the specified raw_ostream.
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.cpp:1099
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
Represents an op.with.overflow intrinsic.
std::pair< iterator, bool > insert(const ValueT &V)
Definition DenseSet.h:202
iterator find_as(const LookupKeyT &Val)
Alternative version of find() which allows a different, and possibly less expensive,...
Definition DenseSet.h:190
bool erase(const ValueT &V)
Definition DenseSet.h:100
formatted_raw_ostream - A raw_ostream that wraps another one and keeps track of line and column posit...
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
self_iterator getIterator()
Definition ilist_node.h:123
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ Entry
Definition COFF.h:862
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
LLVM_ABI Function * getDeclarationIfExists(const Module *M, ID id)
Look up the Function declaration of the intrinsic id in the Module M and return it if it exists.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
PtrToIntSameSize_match< OpTy > m_PtrToIntSameSize(const DataLayout &DL, const OpTy &Op)
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::URem > m_URem(const LHS &L, const RHS &R)
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
bool match(Val *V, const Pattern &P)
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
match_combine_or< BinaryOp_match< LHS, RHS, Instruction::Add >, DisjointOr_match< LHS, RHS > > m_AddLike(const LHS &L, const RHS &R)
Match either "add" or "or disjoint".
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)
Matches an Or with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
This namespace contains all of the command line option processing machinery.
Definition CommandLine.h:53
constexpr double e
Definition MathExtras.h:47
@ User
could "use" a pointer
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:477
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
LLVM_ABI bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, const DominatorTree *DT=nullptr, bool AllowEphemerals=false)
Return true if it is valid to use the assumptions provided by an assume intrinsic,...
auto pred_end(const MachineBasicBlock *BB)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:644
auto successors(const MachineBasicBlock *BB)
static ConstantRange getRange(Value *Op, SCCPSolver &Solver, const SmallPtrSetImpl< Value * > &InsertedValues)
Helper for getting ranges from Solver.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2116
LLVM_ABI Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
LLVM_ABI bool isGuaranteedNotToBeUndef(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be undef, but may be poison.
LLVM_ABI ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
LLVM_ABI Value * simplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty, const SimplifyQuery &Q)
Given operands for a CastInst, fold the result or return null.
LLVM_ABI FunctionPass * createLazyValueInfoPass()
createLazyValueInfoPass - This creates an instance of the LazyValueInfo pass.
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:754
constexpr unsigned MaxAnalysisRecursionDepth
@ SPF_ABS
Floating point maxnum.
@ SPF_NABS
Absolute value.
@ SPF_UMIN
Signed minimum.
@ SPF_UMAX
Signed maximum.
@ SPF_SMAX
Unsigned minimum.
LLVM_ABI SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind and providing the out param...
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:548
LLVM_ABI Value * simplifyExtractValueInst(Value *Agg, ArrayRef< unsigned > Idxs, const SimplifyQuery &Q)
Given operands for an ExtractValueInst, fold the result or return null.
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
LLVM_ABI Value * simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a BinaryOperator, fold the result or return null.
DWARFExpression::Operation Op
bool isSafeToSpeculativelyExecuteWithVariableReplaced(const Instruction *I, bool IgnoreUBImplyingAttrs=true)
Don't use information from its non-constant operands.
PredIterator< BasicBlock, Value::user_iterator > pred_iterator
Definition CFG.h:105
static bool hasSingleValue(const ValueLatticeElement &Val)
constexpr unsigned BitWidth
auto pred_begin(const MachineBasicBlock *BB)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:560
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1877
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
#define N
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition Analysis.h:29
Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...
SelectPatternFlavor Flavor
static bool isMinOrMax(SelectPatternFlavor SPF)
When implementing this min/max pattern as fcmp; select, does the fcmp have to be ordered?