Thanks to visit codestin.com
Credit goes to llvm.org

LLVM 22.0.0git
LoopAccessAnalysis.cpp
Go to the documentation of this file.
1//===- LoopAccessAnalysis.cpp - Loop Access Analysis Implementation --------==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// The implementation for the loop memory dependence that was originally
10// developed for the loop vectorizer.
11//
12//===----------------------------------------------------------------------===//
13
15#include "llvm/ADT/APInt.h"
16#include "llvm/ADT/DenseMap.h"
19#include "llvm/ADT/STLExtras.h"
20#include "llvm/ADT/SetVector.h"
22#include "llvm/ADT/SmallSet.h"
40#include "llvm/IR/BasicBlock.h"
41#include "llvm/IR/Constants.h"
42#include "llvm/IR/DataLayout.h"
43#include "llvm/IR/DebugLoc.h"
46#include "llvm/IR/Dominators.h"
47#include "llvm/IR/Function.h"
48#include "llvm/IR/InstrTypes.h"
49#include "llvm/IR/Instruction.h"
52#include "llvm/IR/PassManager.h"
53#include "llvm/IR/Type.h"
54#include "llvm/IR/Value.h"
55#include "llvm/IR/ValueHandle.h"
58#include "llvm/Support/Debug.h"
61#include <algorithm>
62#include <cassert>
63#include <cstdint>
64#include <iterator>
65#include <utility>
66#include <variant>
67#include <vector>
68
69using namespace llvm;
70using namespace llvm::SCEVPatternMatch;
71
72#define DEBUG_TYPE "loop-accesses"
73
75VectorizationFactor("force-vector-width", cl::Hidden,
76 cl::desc("Sets the SIMD width. Zero is autoselect."),
79
81VectorizationInterleave("force-vector-interleave", cl::Hidden,
82 cl::desc("Sets the vectorization interleave count. "
83 "Zero is autoselect."),
87
89 "runtime-memory-check-threshold", cl::Hidden,
90 cl::desc("When performing memory disambiguation checks at runtime do not "
91 "generate more than this number of comparisons (default = 8)."),
94
95/// The maximum iterations used to merge memory checks
97 "memory-check-merge-threshold", cl::Hidden,
98 cl::desc("Maximum number of comparisons done when trying to merge "
99 "runtime memory checks. (default = 100)"),
100 cl::init(100));
101
102/// Maximum SIMD width.
103const unsigned VectorizerParams::MaxVectorWidth = 64;
104
105/// We collect dependences up to this threshold.
107 MaxDependences("max-dependences", cl::Hidden,
108 cl::desc("Maximum number of dependences collected by "
109 "loop-access analysis (default = 100)"),
110 cl::init(100));
111
112/// This enables versioning on the strides of symbolically striding memory
113/// accesses in code like the following.
114/// for (i = 0; i < N; ++i)
115/// A[i * Stride1] += B[i * Stride2] ...
116///
117/// Will be roughly translated to
118/// if (Stride1 == 1 && Stride2 == 1) {
119/// for (i = 0; i < N; i+=4)
120/// A[i:i+3] += ...
121/// } else
122/// ...
124 "enable-mem-access-versioning", cl::init(true), cl::Hidden,
125 cl::desc("Enable symbolic stride memory access versioning"));
126
127/// Enable store-to-load forwarding conflict detection. This option can
128/// be disabled for correctness testing.
130 "store-to-load-forwarding-conflict-detection", cl::Hidden,
131 cl::desc("Enable conflict detection in loop-access analysis"),
132 cl::init(true));
133
135 "max-forked-scev-depth", cl::Hidden,
136 cl::desc("Maximum recursion depth when finding forked SCEVs (default = 5)"),
137 cl::init(5));
138
140 "laa-speculate-unit-stride", cl::Hidden,
141 cl::desc("Speculate that non-constant strides are unit in LAA"),
142 cl::init(true));
143
145 "hoist-runtime-checks", cl::Hidden,
146 cl::desc(
147 "Hoist inner loop runtime memory checks to outer loop if possible"),
150
152 return ::VectorizationInterleave.getNumOccurrences() > 0;
153}
154
156 const DenseMap<Value *, const SCEV *> &PtrToStride,
157 Value *Ptr) {
158 const SCEV *OrigSCEV = PSE.getSCEV(Ptr);
159
160 // If there is an entry in the map return the SCEV of the pointer with the
161 // symbolic stride replaced by one.
162 const SCEV *StrideSCEV = PtrToStride.lookup(Ptr);
163 if (!StrideSCEV)
164 // For a non-symbolic stride, just return the original expression.
165 return OrigSCEV;
166
167 // Note: This assert is both overly strong and overly weak. The actual
168 // invariant here is that StrideSCEV should be loop invariant. The only
169 // such invariant strides we happen to speculate right now are unknowns
170 // and thus this is a reasonable proxy of the actual invariant.
171 assert(isa<SCEVUnknown>(StrideSCEV) && "shouldn't be in map");
172
173 ScalarEvolution *SE = PSE.getSE();
174 const SCEV *CT = SE->getOne(StrideSCEV->getType());
175 PSE.addPredicate(*SE->getEqualPredicate(StrideSCEV, CT));
176 const SCEV *Expr = PSE.getSCEV(Ptr);
177
178 LLVM_DEBUG(dbgs() << "LAA: Replacing SCEV: " << *OrigSCEV
179 << " by: " << *Expr << "\n");
180 return Expr;
181}
182
184 unsigned Index, const RuntimePointerChecking &RtCheck)
185 : High(RtCheck.Pointers[Index].End), Low(RtCheck.Pointers[Index].Start),
186 AddressSpace(RtCheck.Pointers[Index]
187 .PointerValue->getType()
189 NeedsFreeze(RtCheck.Pointers[Index].NeedsFreeze) {
190 Members.push_back(Index);
191}
192
193/// Returns \p A + \p B, if it is guaranteed not to unsigned wrap. Otherwise
194/// return nullptr. \p A and \p B must have the same type.
195static const SCEV *addSCEVNoOverflow(const SCEV *A, const SCEV *B,
196 ScalarEvolution &SE) {
197 if (!SE.willNotOverflow(Instruction::Add, /*IsSigned=*/false, A, B))
198 return nullptr;
199 return SE.getAddExpr(A, B);
200}
201
202/// Returns \p A * \p B, if it is guaranteed not to unsigned wrap. Otherwise
203/// return nullptr. \p A and \p B must have the same type.
204static const SCEV *mulSCEVOverflow(const SCEV *A, const SCEV *B,
205 ScalarEvolution &SE) {
206 if (!SE.willNotOverflow(Instruction::Mul, /*IsSigned=*/false, A, B))
207 return nullptr;
208 return SE.getMulExpr(A, B);
209}
210
211/// Return true, if evaluating \p AR at \p MaxBTC cannot wrap, because \p AR at
212/// \p MaxBTC is guaranteed inbounds of the accessed object.
214 const SCEVAddRecExpr *AR, const SCEV *MaxBTC, const SCEV *EltSize,
216 AssumptionCache *AC,
217 std::optional<ScalarEvolution::LoopGuards> &LoopGuards) {
218 auto *PointerBase = SE.getPointerBase(AR->getStart());
219 auto *StartPtr = dyn_cast<SCEVUnknown>(PointerBase);
220 if (!StartPtr)
221 return false;
222 const Loop *L = AR->getLoop();
223 bool CheckForNonNull, CheckForFreed;
224 Value *StartPtrV = StartPtr->getValue();
225 uint64_t DerefBytes = StartPtrV->getPointerDereferenceableBytes(
226 DL, CheckForNonNull, CheckForFreed);
227
228 if (DerefBytes && (CheckForNonNull || CheckForFreed))
229 return false;
230
231 const SCEV *Step = AR->getStepRecurrence(SE);
232 Type *WiderTy = SE.getWiderType(MaxBTC->getType(), Step->getType());
233 const SCEV *DerefBytesSCEV = SE.getConstant(WiderTy, DerefBytes);
234
235 // Check if we have a suitable dereferencable assumption we can use.
236 if (!StartPtrV->canBeFreed()) {
237 Instruction *CtxI = &*L->getHeader()->getFirstNonPHIIt();
238 if (BasicBlock *LoopPred = L->getLoopPredecessor()) {
239 if (isa<BranchInst>(LoopPred->getTerminator()))
240 CtxI = LoopPred->getTerminator();
241 }
242
244 StartPtrV, {Attribute::Dereferenceable}, *AC, CtxI, DT);
245 if (DerefRK) {
246 DerefBytesSCEV =
247 SE.getUMaxExpr(DerefBytesSCEV, SE.getSCEV(DerefRK.IRArgValue));
248 }
249 }
250
251 if (DerefBytesSCEV->isZero())
252 return false;
253
254 bool IsKnownNonNegative = SE.isKnownNonNegative(Step);
255 if (!IsKnownNonNegative && !SE.isKnownNegative(Step))
256 return false;
257
258 Step = SE.getNoopOrSignExtend(Step, WiderTy);
259 MaxBTC = SE.getNoopOrZeroExtend(MaxBTC, WiderTy);
260
261 // For the computations below, make sure they don't unsigned wrap.
262 if (!SE.isKnownPredicate(CmpInst::ICMP_UGE, AR->getStart(), StartPtr))
263 return false;
264 const SCEV *StartOffset = SE.getNoopOrZeroExtend(
265 SE.getMinusSCEV(AR->getStart(), StartPtr), WiderTy);
266
267 if (!LoopGuards)
268 LoopGuards.emplace(ScalarEvolution::LoopGuards::collect(AR->getLoop(), SE));
269 MaxBTC = SE.applyLoopGuards(MaxBTC, *LoopGuards);
270
271 const SCEV *OffsetAtLastIter =
272 mulSCEVOverflow(MaxBTC, SE.getAbsExpr(Step, /*IsNSW=*/false), SE);
273 if (!OffsetAtLastIter) {
274 // Re-try with constant max backedge-taken count if using the symbolic one
275 // failed.
276 MaxBTC = SE.getConstantMaxBackedgeTakenCount(AR->getLoop());
277 if (isa<SCEVCouldNotCompute>(MaxBTC))
278 return false;
279 MaxBTC = SE.getNoopOrZeroExtend(
280 MaxBTC, WiderTy);
281 OffsetAtLastIter =
282 mulSCEVOverflow(MaxBTC, SE.getAbsExpr(Step, /*IsNSW=*/false), SE);
283 if (!OffsetAtLastIter)
284 return false;
285 }
286
287 const SCEV *OffsetEndBytes = addSCEVNoOverflow(
288 OffsetAtLastIter, SE.getNoopOrZeroExtend(EltSize, WiderTy), SE);
289 if (!OffsetEndBytes)
290 return false;
291
292 if (IsKnownNonNegative) {
293 // For positive steps, check if
294 // (AR->getStart() - StartPtr) + (MaxBTC * Step) + EltSize <= DerefBytes,
295 // while making sure none of the computations unsigned wrap themselves.
296 const SCEV *EndBytes = addSCEVNoOverflow(StartOffset, OffsetEndBytes, SE);
297 if (!EndBytes)
298 return false;
299
300 DerefBytesSCEV = SE.applyLoopGuards(DerefBytesSCEV, *LoopGuards);
301 return SE.isKnownPredicate(CmpInst::ICMP_ULE, EndBytes, DerefBytesSCEV);
302 }
303
304 // For negative steps check if
305 // * StartOffset >= (MaxBTC * Step + EltSize)
306 // * StartOffset <= DerefBytes.
307 assert(SE.isKnownNegative(Step) && "must be known negative");
308 return SE.isKnownPredicate(CmpInst::ICMP_SGE, StartOffset, OffsetEndBytes) &&
309 SE.isKnownPredicate(CmpInst::ICMP_ULE, StartOffset, DerefBytesSCEV);
310}
311
312std::pair<const SCEV *, const SCEV *> llvm::getStartAndEndForAccess(
313 const Loop *Lp, const SCEV *PtrExpr, Type *AccessTy, const SCEV *BTC,
314 const SCEV *MaxBTC, ScalarEvolution *SE,
315 DenseMap<std::pair<const SCEV *, Type *>,
316 std::pair<const SCEV *, const SCEV *>> *PointerBounds,
318 std::optional<ScalarEvolution::LoopGuards> &LoopGuards) {
319 std::pair<const SCEV *, const SCEV *> *PtrBoundsPair;
320 if (PointerBounds) {
321 auto [Iter, Ins] = PointerBounds->insert(
322 {{PtrExpr, AccessTy},
323 {SE->getCouldNotCompute(), SE->getCouldNotCompute()}});
324 if (!Ins)
325 return Iter->second;
326 PtrBoundsPair = &Iter->second;
327 }
328
329 const SCEV *ScStart;
330 const SCEV *ScEnd;
331
332 auto &DL = Lp->getHeader()->getDataLayout();
333 Type *IdxTy = DL.getIndexType(PtrExpr->getType());
334 const SCEV *EltSizeSCEV = SE->getStoreSizeOfExpr(IdxTy, AccessTy);
335 if (SE->isLoopInvariant(PtrExpr, Lp)) {
336 ScStart = ScEnd = PtrExpr;
337 } else if (auto *AR = dyn_cast<SCEVAddRecExpr>(PtrExpr)) {
338 ScStart = AR->getStart();
339 if (!isa<SCEVCouldNotCompute>(BTC))
340 // Evaluating AR at an exact BTC is safe: LAA separately checks that
341 // accesses cannot wrap in the loop. If evaluating AR at BTC wraps, then
342 // the loop either triggers UB when executing a memory access with a
343 // poison pointer or the wrapping/poisoned pointer is not used.
344 ScEnd = AR->evaluateAtIteration(BTC, *SE);
345 else {
346 // Evaluating AR at MaxBTC may wrap and create an expression that is less
347 // than the start of the AddRec due to wrapping (for example consider
348 // MaxBTC = -2). If that's the case, set ScEnd to -(EltSize + 1). ScEnd
349 // will get incremented by EltSize before returning, so this effectively
350 // sets ScEnd to the maximum unsigned value for the type. Note that LAA
351 // separately checks that accesses cannot not wrap, so unsigned max
352 // represents an upper bound.
353 if (evaluatePtrAddRecAtMaxBTCWillNotWrap(AR, MaxBTC, EltSizeSCEV, *SE, DL,
354 DT, AC, LoopGuards)) {
355 ScEnd = AR->evaluateAtIteration(MaxBTC, *SE);
356 } else {
357 ScEnd = SE->getAddExpr(
358 SE->getNegativeSCEV(EltSizeSCEV),
360 ConstantInt::get(EltSizeSCEV->getType(), -1), AR->getType())));
361 }
362 }
363 const SCEV *Step = AR->getStepRecurrence(*SE);
364
365 // For expressions with negative step, the upper bound is ScStart and the
366 // lower bound is ScEnd.
367 if (const auto *CStep = dyn_cast<SCEVConstant>(Step)) {
368 if (CStep->getValue()->isNegative())
369 std::swap(ScStart, ScEnd);
370 } else {
371 // Fallback case: the step is not constant, but we can still
372 // get the upper and lower bounds of the interval by using min/max
373 // expressions.
374 ScStart = SE->getUMinExpr(ScStart, ScEnd);
375 ScEnd = SE->getUMaxExpr(AR->getStart(), ScEnd);
376 }
377 } else
378 return {SE->getCouldNotCompute(), SE->getCouldNotCompute()};
379
380 assert(SE->isLoopInvariant(ScStart, Lp) && "ScStart needs to be invariant");
381 assert(SE->isLoopInvariant(ScEnd, Lp) && "ScEnd needs to be invariant");
382
383 // Add the size of the pointed element to ScEnd.
384 ScEnd = SE->getAddExpr(ScEnd, EltSizeSCEV);
385
386 std::pair<const SCEV *, const SCEV *> Res = {ScStart, ScEnd};
387 if (PointerBounds)
388 *PtrBoundsPair = Res;
389 return Res;
390}
391
392/// Calculate Start and End points of memory access using
393/// getStartAndEndForAccess.
395 Type *AccessTy, bool WritePtr,
396 unsigned DepSetId, unsigned ASId,
398 bool NeedsFreeze) {
399 const SCEV *SymbolicMaxBTC = PSE.getSymbolicMaxBackedgeTakenCount();
400 const SCEV *BTC = PSE.getBackedgeTakenCount();
401 const auto &[ScStart, ScEnd] = getStartAndEndForAccess(
402 Lp, PtrExpr, AccessTy, BTC, SymbolicMaxBTC, PSE.getSE(),
403 &DC.getPointerBounds(), DC.getDT(), DC.getAC(), LoopGuards);
405 !isa<SCEVCouldNotCompute>(ScEnd) &&
406 "must be able to compute both start and end expressions");
407 Pointers.emplace_back(Ptr, ScStart, ScEnd, WritePtr, DepSetId, ASId, PtrExpr,
408 NeedsFreeze);
409}
410
411bool RuntimePointerChecking::tryToCreateDiffCheck(
412 const RuntimeCheckingPtrGroup &CGI, const RuntimeCheckingPtrGroup &CGJ) {
413 // If either group contains multiple different pointers, bail out.
414 // TODO: Support multiple pointers by using the minimum or maximum pointer,
415 // depending on src & sink.
416 if (CGI.Members.size() != 1 || CGJ.Members.size() != 1)
417 return false;
418
419 const PointerInfo *Src = &Pointers[CGI.Members[0]];
420 const PointerInfo *Sink = &Pointers[CGJ.Members[0]];
421
422 // If either pointer is read and written, multiple checks may be needed. Bail
423 // out.
424 if (!DC.getOrderForAccess(Src->PointerValue, !Src->IsWritePtr).empty() ||
425 !DC.getOrderForAccess(Sink->PointerValue, !Sink->IsWritePtr).empty())
426 return false;
427
428 ArrayRef<unsigned> AccSrc =
429 DC.getOrderForAccess(Src->PointerValue, Src->IsWritePtr);
430 ArrayRef<unsigned> AccSink =
431 DC.getOrderForAccess(Sink->PointerValue, Sink->IsWritePtr);
432 // If either pointer is accessed multiple times, there may not be a clear
433 // src/sink relation. Bail out for now.
434 if (AccSrc.size() != 1 || AccSink.size() != 1)
435 return false;
436
437 // If the sink is accessed before src, swap src/sink.
438 if (AccSink[0] < AccSrc[0])
439 std::swap(Src, Sink);
440
441 const SCEVConstant *Step;
442 const SCEV *SrcStart;
443 const SCEV *SinkStart;
444 const Loop *InnerLoop = DC.getInnermostLoop();
445 if (!match(Src->Expr,
447 m_SpecificLoop(InnerLoop))) ||
448 !match(Sink->Expr,
450 m_SpecificLoop(InnerLoop))))
451 return false;
452
454 DC.getInstructionsForAccess(Src->PointerValue, Src->IsWritePtr);
456 DC.getInstructionsForAccess(Sink->PointerValue, Sink->IsWritePtr);
457 Type *SrcTy = getLoadStoreType(SrcInsts[0]);
458 Type *DstTy = getLoadStoreType(SinkInsts[0]);
460 return false;
461
462 const DataLayout &DL = InnerLoop->getHeader()->getDataLayout();
463 unsigned AllocSize =
464 std::max(DL.getTypeAllocSize(SrcTy), DL.getTypeAllocSize(DstTy));
465
466 // Only matching constant steps matching the AllocSize are supported at the
467 // moment. This simplifies the difference computation. Can be extended in the
468 // future.
469 if (Step->getAPInt().abs() != AllocSize)
470 return false;
471
472 IntegerType *IntTy =
473 IntegerType::get(Src->PointerValue->getContext(),
474 DL.getPointerSizeInBits(CGI.AddressSpace));
475
476 // When counting down, the dependence distance needs to be swapped.
477 if (Step->getValue()->isNegative())
478 std::swap(SinkStart, SrcStart);
479
480 const SCEV *SinkStartInt = SE->getPtrToIntExpr(SinkStart, IntTy);
481 const SCEV *SrcStartInt = SE->getPtrToIntExpr(SrcStart, IntTy);
482 if (isa<SCEVCouldNotCompute>(SinkStartInt) ||
483 isa<SCEVCouldNotCompute>(SrcStartInt))
484 return false;
485
486 // If the start values for both Src and Sink also vary according to an outer
487 // loop, then it's probably better to avoid creating diff checks because
488 // they may not be hoisted. We should instead let llvm::addRuntimeChecks
489 // do the expanded full range overlap checks, which can be hoisted.
490 if (HoistRuntimeChecks && InnerLoop->getParentLoop() &&
491 isa<SCEVAddRecExpr>(SinkStartInt) && isa<SCEVAddRecExpr>(SrcStartInt)) {
492 auto *SrcStartAR = cast<SCEVAddRecExpr>(SrcStartInt);
493 auto *SinkStartAR = cast<SCEVAddRecExpr>(SinkStartInt);
494 const Loop *StartARLoop = SrcStartAR->getLoop();
495 if (StartARLoop == SinkStartAR->getLoop() &&
496 StartARLoop == InnerLoop->getParentLoop() &&
497 // If the diff check would already be loop invariant (due to the
498 // recurrences being the same), then we prefer to keep the diff checks
499 // because they are cheaper.
500 SrcStartAR->getStepRecurrence(*SE) !=
501 SinkStartAR->getStepRecurrence(*SE)) {
502 LLVM_DEBUG(dbgs() << "LAA: Not creating diff runtime check, since these "
503 "cannot be hoisted out of the outer loop\n");
504 return false;
505 }
506 }
507
508 LLVM_DEBUG(dbgs() << "LAA: Creating diff runtime check for:\n"
509 << "SrcStart: " << *SrcStartInt << '\n'
510 << "SinkStartInt: " << *SinkStartInt << '\n');
511 DiffChecks.emplace_back(SrcStartInt, SinkStartInt, AllocSize,
512 Src->NeedsFreeze || Sink->NeedsFreeze);
513 return true;
514}
515
517 SmallVector<RuntimePointerCheck, 4> Checks;
518
519 for (unsigned I = 0; I < CheckingGroups.size(); ++I) {
520 for (unsigned J = I + 1; J < CheckingGroups.size(); ++J) {
523
524 if (needsChecking(CGI, CGJ)) {
525 CanUseDiffCheck = CanUseDiffCheck && tryToCreateDiffCheck(CGI, CGJ);
526 Checks.emplace_back(&CGI, &CGJ);
527 }
528 }
529 }
530 return Checks;
531}
532
534 MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) {
535 assert(Checks.empty() && "Checks is not empty");
536 groupChecks(DepCands, UseDependencies);
537 Checks = generateChecks();
538}
539
541 const RuntimeCheckingPtrGroup &M, const RuntimeCheckingPtrGroup &N) const {
542 for (const auto &I : M.Members)
543 for (const auto &J : N.Members)
544 if (needsChecking(I, J))
545 return true;
546 return false;
547}
548
549/// Compare \p I and \p J and return the minimum.
550/// Return nullptr in case we couldn't find an answer.
551static const SCEV *getMinFromExprs(const SCEV *I, const SCEV *J,
552 ScalarEvolution *SE) {
553 std::optional<APInt> Diff = SE->computeConstantDifference(J, I);
554 if (!Diff)
555 return nullptr;
556 return Diff->isNegative() ? J : I;
557}
558
560 unsigned Index, const RuntimePointerChecking &RtCheck) {
561 return addPointer(
562 Index, RtCheck.Pointers[Index].Start, RtCheck.Pointers[Index].End,
563 RtCheck.Pointers[Index].PointerValue->getType()->getPointerAddressSpace(),
564 RtCheck.Pointers[Index].NeedsFreeze, *RtCheck.SE);
565}
566
567bool RuntimeCheckingPtrGroup::addPointer(unsigned Index, const SCEV *Start,
568 const SCEV *End, unsigned AS,
569 bool NeedsFreeze,
570 ScalarEvolution &SE) {
571 assert(AddressSpace == AS &&
572 "all pointers in a checking group must be in the same address space");
573
574 // Compare the starts and ends with the known minimum and maximum
575 // of this set. We need to know how we compare against the min/max
576 // of the set in order to be able to emit memchecks.
577 const SCEV *Min0 = getMinFromExprs(Start, Low, &SE);
578 if (!Min0)
579 return false;
580
581 const SCEV *Min1 = getMinFromExprs(End, High, &SE);
582 if (!Min1)
583 return false;
584
585 // Update the low bound expression if we've found a new min value.
586 if (Min0 == Start)
587 Low = Start;
588
589 // Update the high bound expression if we've found a new max value.
590 if (Min1 != End)
591 High = End;
592
593 Members.push_back(Index);
594 this->NeedsFreeze |= NeedsFreeze;
595 return true;
596}
597
598void RuntimePointerChecking::groupChecks(
599 MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) {
600 // We build the groups from dependency candidates equivalence classes
601 // because:
602 // - We know that pointers in the same equivalence class share
603 // the same underlying object and therefore there is a chance
604 // that we can compare pointers
605 // - We wouldn't be able to merge two pointers for which we need
606 // to emit a memcheck. The classes in DepCands are already
607 // conveniently built such that no two pointers in the same
608 // class need checking against each other.
609
610 // We use the following (greedy) algorithm to construct the groups
611 // For every pointer in the equivalence class:
612 // For each existing group:
613 // - if the difference between this pointer and the min/max bounds
614 // of the group is a constant, then make the pointer part of the
615 // group and update the min/max bounds of that group as required.
616
617 CheckingGroups.clear();
618
619 // If we need to check two pointers to the same underlying object
620 // with a non-constant difference, we shouldn't perform any pointer
621 // grouping with those pointers. This is because we can easily get
622 // into cases where the resulting check would return false, even when
623 // the accesses are safe.
624 //
625 // The following example shows this:
626 // for (i = 0; i < 1000; ++i)
627 // a[5000 + i * m] = a[i] + a[i + 9000]
628 //
629 // Here grouping gives a check of (5000, 5000 + 1000 * m) against
630 // (0, 10000) which is always false. However, if m is 1, there is no
631 // dependence. Not grouping the checks for a[i] and a[i + 9000] allows
632 // us to perform an accurate check in this case.
633 //
634 // In the above case, we have a non-constant distance and an Unknown
635 // dependence between accesses to the same underlying object, and could retry
636 // with runtime checks. Therefore UseDependencies is false. In this case we
637 // will use the fallback path and create separate checking groups for all
638 // pointers.
639
640 // If we don't have the dependency partitions, construct a new
641 // checking pointer group for each pointer. This is also required
642 // for correctness, because in this case we can have checking between
643 // pointers to the same underlying object.
644 if (!UseDependencies) {
645 for (unsigned I = 0; I < Pointers.size(); ++I)
646 CheckingGroups.emplace_back(I, *this);
647 return;
648 }
649
650 unsigned TotalComparisons = 0;
651
653 for (unsigned Index = 0; Index < Pointers.size(); ++Index)
654 PositionMap[Pointers[Index].PointerValue].push_back(Index);
655
656 // We need to keep track of what pointers we've already seen so we
657 // don't process them twice.
659
660 // Go through all equivalence classes, get the "pointer check groups"
661 // and add them to the overall solution. We use the order in which accesses
662 // appear in 'Pointers' to enforce determinism.
663 for (unsigned I = 0; I < Pointers.size(); ++I) {
664 // We've seen this pointer before, and therefore already processed
665 // its equivalence class.
666 if (Seen.contains(I))
667 continue;
668
670 Pointers[I].IsWritePtr);
671
673
674 // Because DepCands is constructed by visiting accesses in the order in
675 // which they appear in alias sets (which is deterministic) and the
676 // iteration order within an equivalence class member is only dependent on
677 // the order in which unions and insertions are performed on the
678 // equivalence class, the iteration order is deterministic.
679 for (auto M : DepCands.members(Access)) {
680 auto PointerI = PositionMap.find(M.getPointer());
681 // If we can't find the pointer in PositionMap that means we can't
682 // generate a memcheck for it.
683 if (PointerI == PositionMap.end())
684 continue;
685 for (unsigned Pointer : PointerI->second) {
686 bool Merged = false;
687 // Mark this pointer as seen.
688 Seen.insert(Pointer);
689
690 // Go through all the existing sets and see if we can find one
691 // which can include this pointer.
692 for (RuntimeCheckingPtrGroup &Group : Groups) {
693 // Don't perform more than a certain amount of comparisons.
694 // This should limit the cost of grouping the pointers to something
695 // reasonable. If we do end up hitting this threshold, the algorithm
696 // will create separate groups for all remaining pointers.
697 if (TotalComparisons > MemoryCheckMergeThreshold)
698 break;
699
700 TotalComparisons++;
701
702 if (Group.addPointer(Pointer, *this)) {
703 Merged = true;
704 break;
705 }
706 }
707
708 if (!Merged)
709 // We couldn't add this pointer to any existing set or the threshold
710 // for the number of comparisons has been reached. Create a new group
711 // to hold the current pointer.
712 Groups.emplace_back(Pointer, *this);
713 }
714 }
715
716 // We've computed the grouped checks for this partition.
717 // Save the results and continue with the next one.
719 }
720}
721
723 const SmallVectorImpl<int> &PtrToPartition, unsigned PtrIdx1,
724 unsigned PtrIdx2) {
725 return (PtrToPartition[PtrIdx1] != -1 &&
726 PtrToPartition[PtrIdx1] == PtrToPartition[PtrIdx2]);
727}
728
729bool RuntimePointerChecking::needsChecking(unsigned I, unsigned J) const {
730 const PointerInfo &PointerI = Pointers[I];
731 const PointerInfo &PointerJ = Pointers[J];
732
733 // No need to check if two readonly pointers intersect.
734 if (!PointerI.IsWritePtr && !PointerJ.IsWritePtr)
735 return false;
736
737 // Only need to check pointers between two different dependency sets.
738 if (PointerI.DependencySetId == PointerJ.DependencySetId)
739 return false;
740
741 // Only need to check pointers in the same alias set.
742 return PointerI.AliasSetId == PointerJ.AliasSetId;
743}
744
745/// Assign each RuntimeCheckingPtrGroup pointer an index for stable UTC output.
749 for (const auto &[Idx, CG] : enumerate(CheckingGroups))
750 PtrIndices[&CG] = Idx;
751 return PtrIndices;
752}
753
756 unsigned Depth) const {
757 unsigned N = 0;
758 auto PtrIndices = getPtrToIdxMap(CheckingGroups);
759 for (const auto &[Check1, Check2] : Checks) {
760 const auto &First = Check1->Members, &Second = Check2->Members;
761 OS.indent(Depth) << "Check " << N++ << ":\n";
762 OS.indent(Depth + 2) << "Comparing group GRP" << PtrIndices.at(Check1)
763 << ":\n";
764 for (unsigned K : First)
765 OS.indent(Depth + 2) << *Pointers[K].PointerValue << "\n";
766 OS.indent(Depth + 2) << "Against group GRP" << PtrIndices.at(Check2)
767 << ":\n";
768 for (unsigned K : Second)
769 OS.indent(Depth + 2) << *Pointers[K].PointerValue << "\n";
770 }
771}
772
774
775 OS.indent(Depth) << "Run-time memory checks:\n";
776 printChecks(OS, Checks, Depth);
777
778 OS.indent(Depth) << "Grouped accesses:\n";
779 auto PtrIndices = getPtrToIdxMap(CheckingGroups);
780 for (const auto &CG : CheckingGroups) {
781 OS.indent(Depth + 2) << "Group GRP" << PtrIndices.at(&CG) << ":\n";
782 OS.indent(Depth + 4) << "(Low: " << *CG.Low << " High: " << *CG.High
783 << ")\n";
784 for (unsigned Member : CG.Members) {
785 OS.indent(Depth + 6) << "Member: " << *Pointers[Member].Expr << "\n";
786 }
787 }
788}
789
790namespace {
791
792/// Analyses memory accesses in a loop.
793///
794/// Checks whether run time pointer checks are needed and builds sets for data
795/// dependence checking.
796class AccessAnalysis {
797public:
798 /// Read or write access location.
799 typedef PointerIntPair<Value *, 1, bool> MemAccessInfo;
800 typedef SmallVector<MemAccessInfo, 8> MemAccessInfoList;
801
802 AccessAnalysis(const Loop *TheLoop, AAResults *AA, const LoopInfo *LI,
805 SmallPtrSetImpl<MDNode *> &LoopAliasScopes)
806 : TheLoop(TheLoop), BAA(*AA), AST(BAA), LI(LI), DepCands(DA), PSE(PSE),
807 LoopAliasScopes(LoopAliasScopes) {
808 // We're analyzing dependences across loop iterations.
809 BAA.enableCrossIterationMode();
810 }
811
812 /// Register a load and whether it is only read from.
813 void addLoad(const MemoryLocation &Loc, Type *AccessTy, bool IsReadOnly) {
814 Value *Ptr = const_cast<Value *>(Loc.Ptr);
815 AST.add(adjustLoc(Loc));
816 Accesses[MemAccessInfo(Ptr, false)].insert(AccessTy);
817 if (IsReadOnly)
818 ReadOnlyPtr.insert(Ptr);
819 }
820
821 /// Register a store.
822 void addStore(const MemoryLocation &Loc, Type *AccessTy) {
823 Value *Ptr = const_cast<Value *>(Loc.Ptr);
824 AST.add(adjustLoc(Loc));
825 Accesses[MemAccessInfo(Ptr, true)].insert(AccessTy);
826 }
827
828 /// Check if we can emit a run-time no-alias check for \p Access.
829 ///
830 /// Returns true if we can emit a run-time no alias check for \p Access.
831 /// If we can check this access, this also adds it to a dependence set and
832 /// adds a run-time to check for it to \p RtCheck. If \p Assume is true,
833 /// we will attempt to use additional run-time checks in order to get
834 /// the bounds of the pointer.
835 bool createCheckForAccess(RuntimePointerChecking &RtCheck,
836 MemAccessInfo Access, Type *AccessTy,
837 const DenseMap<Value *, const SCEV *> &Strides,
838 DenseMap<Value *, unsigned> &DepSetId,
839 Loop *TheLoop, unsigned &RunningDepId,
840 unsigned ASId, bool Assume);
841
842 /// Check whether we can check the pointers at runtime for
843 /// non-intersection.
844 ///
845 /// Returns true if we need no check or if we do and we can generate them
846 /// (i.e. the pointers have computable bounds). A return value of false means
847 /// we couldn't analyze and generate runtime checks for all pointers in the
848 /// loop, but if \p AllowPartial is set then we will have checks for those
849 /// pointers we could analyze.
850 bool canCheckPtrAtRT(RuntimePointerChecking &RtCheck, Loop *TheLoop,
851 const DenseMap<Value *, const SCEV *> &Strides,
852 Value *&UncomputablePtr, bool AllowPartial);
853
854 /// Goes over all memory accesses, checks whether a RT check is needed
855 /// and builds sets of dependent accesses.
856 void buildDependenceSets() {
857 processMemAccesses();
858 }
859
860 /// Initial processing of memory accesses determined that we need to
861 /// perform dependency checking.
862 ///
863 /// Note that this can later be cleared if we retry memcheck analysis without
864 /// dependency checking (i.e. ShouldRetryWithRuntimeChecks).
865 bool isDependencyCheckNeeded() const { return !CheckDeps.empty(); }
866
867 /// We decided that no dependence analysis would be used. Reset the state.
868 void resetDepChecks(MemoryDepChecker &DepChecker) {
869 CheckDeps.clear();
870 DepChecker.clearDependences();
871 }
872
873 const MemAccessInfoList &getDependenciesToCheck() const { return CheckDeps; }
874
875private:
876 typedef MapVector<MemAccessInfo, SmallSetVector<Type *, 1>> PtrAccessMap;
877
878 /// Adjust the MemoryLocation so that it represents accesses to this
879 /// location across all iterations, rather than a single one.
880 MemoryLocation adjustLoc(MemoryLocation Loc) const {
881 // The accessed location varies within the loop, but remains within the
882 // underlying object.
884 Loc.AATags.Scope = adjustAliasScopeList(Loc.AATags.Scope);
885 Loc.AATags.NoAlias = adjustAliasScopeList(Loc.AATags.NoAlias);
886 return Loc;
887 }
888
889 /// Drop alias scopes that are only valid within a single loop iteration.
890 MDNode *adjustAliasScopeList(MDNode *ScopeList) const {
891 if (!ScopeList)
892 return nullptr;
893
894 // For the sake of simplicity, drop the whole scope list if any scope is
895 // iteration-local.
896 if (any_of(ScopeList->operands(), [&](Metadata *Scope) {
897 return LoopAliasScopes.contains(cast<MDNode>(Scope));
898 }))
899 return nullptr;
900
901 return ScopeList;
902 }
903
904 /// Go over all memory access and check whether runtime pointer checks
905 /// are needed and build sets of dependency check candidates.
906 void processMemAccesses();
907
908 /// Map of all accesses. Values are the types used to access memory pointed to
909 /// by the pointer.
910 PtrAccessMap Accesses;
911
912 /// The loop being checked.
913 const Loop *TheLoop;
914
915 /// List of accesses that need a further dependence check.
916 MemAccessInfoList CheckDeps;
917
918 /// Set of pointers that are read only.
919 SmallPtrSet<Value*, 16> ReadOnlyPtr;
920
921 /// Batched alias analysis results.
922 BatchAAResults BAA;
923
924 /// An alias set tracker to partition the access set by underlying object and
925 //intrinsic property (such as TBAA metadata).
926 AliasSetTracker AST;
927
928 /// The LoopInfo of the loop being checked.
929 const LoopInfo *LI;
930
931 /// Sets of potentially dependent accesses - members of one set share an
932 /// underlying pointer. The set "CheckDeps" identfies which sets really need a
933 /// dependence check.
935
936 /// Initial processing of memory accesses determined that we may need
937 /// to add memchecks. Perform the analysis to determine the necessary checks.
938 ///
939 /// Note that, this is different from isDependencyCheckNeeded. When we retry
940 /// memcheck analysis without dependency checking
941 /// (i.e. ShouldRetryWithRuntimeChecks), isDependencyCheckNeeded is
942 /// cleared while this remains set if we have potentially dependent accesses.
943 bool IsRTCheckAnalysisNeeded = false;
944
945 /// The SCEV predicate containing all the SCEV-related assumptions.
946 PredicatedScalarEvolution &PSE;
947
948 DenseMap<Value *, SmallVector<const Value *, 16>> UnderlyingObjects;
949
950 /// Alias scopes that are declared inside the loop, and as such not valid
951 /// across iterations.
952 SmallPtrSetImpl<MDNode *> &LoopAliasScopes;
953};
954
955} // end anonymous namespace
956
957/// Try to compute a constant stride for \p AR. Used by getPtrStride and
958/// isNoWrap.
959static std::optional<int64_t>
960getStrideFromAddRec(const SCEVAddRecExpr *AR, const Loop *Lp, Type *AccessTy,
962 if (isa<ScalableVectorType>(AccessTy)) {
963 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Scalable object: " << *AccessTy
964 << "\n");
965 return std::nullopt;
966 }
967
968 // The access function must stride over the innermost loop.
969 if (Lp != AR->getLoop()) {
970 LLVM_DEBUG({
971 dbgs() << "LAA: Bad stride - Not striding over innermost loop ";
972 if (Ptr)
973 dbgs() << *Ptr << " ";
974
975 dbgs() << "SCEV: " << *AR << "\n";
976 });
977 return std::nullopt;
978 }
979
980 // Check the step is constant.
981 const SCEV *Step = AR->getStepRecurrence(*PSE.getSE());
982
983 // Calculate the pointer stride and check if it is constant.
984 const APInt *APStepVal;
985 if (!match(Step, m_scev_APInt(APStepVal))) {
986 LLVM_DEBUG({
987 dbgs() << "LAA: Bad stride - Not a constant strided ";
988 if (Ptr)
989 dbgs() << *Ptr << " ";
990 dbgs() << "SCEV: " << *AR << "\n";
991 });
992 return std::nullopt;
993 }
994
995 const auto &DL = Lp->getHeader()->getDataLayout();
996 TypeSize AllocSize = DL.getTypeAllocSize(AccessTy);
997 int64_t Size = AllocSize.getFixedValue();
998
999 // Huge step value - give up.
1000 std::optional<int64_t> StepVal = APStepVal->trySExtValue();
1001 if (!StepVal)
1002 return std::nullopt;
1003
1004 // Strided access.
1005 return *StepVal % Size ? std::nullopt : std::make_optional(*StepVal / Size);
1006}
1007
1008/// Check whether \p AR is a non-wrapping AddRec. If \p Ptr is not nullptr, use
1009/// informating from the IR pointer value to determine no-wrap.
1011 Value *Ptr, Type *AccessTy, const Loop *L, bool Assume,
1012 std::optional<int64_t> Stride = std::nullopt) {
1013 // FIXME: This should probably only return true for NUW.
1015 return true;
1016
1018 return true;
1019
1020 // An nusw getelementptr that is an AddRec cannot wrap. If it would wrap,
1021 // the distance between the previously accessed location and the wrapped
1022 // location will be larger than half the pointer index type space. In that
1023 // case, the GEP would be poison and any memory access dependent on it would
1024 // be immediate UB when executed.
1026 GEP && GEP->hasNoUnsignedSignedWrap())
1027 return true;
1028
1029 if (!Stride)
1030 Stride = getStrideFromAddRec(AR, L, AccessTy, Ptr, PSE);
1031 if (Stride) {
1032 // If the null pointer is undefined, then a access sequence which would
1033 // otherwise access it can be assumed not to unsigned wrap. Note that this
1034 // assumes the object in memory is aligned to the natural alignment.
1035 unsigned AddrSpace = AR->getType()->getPointerAddressSpace();
1036 if (!NullPointerIsDefined(L->getHeader()->getParent(), AddrSpace) &&
1037 (Stride == 1 || Stride == -1))
1038 return true;
1039 }
1040
1041 if (Ptr && Assume) {
1043 LLVM_DEBUG(dbgs() << "LAA: Pointer may wrap:\n"
1044 << "LAA: Pointer: " << *Ptr << "\n"
1045 << "LAA: SCEV: " << *AR << "\n"
1046 << "LAA: Added an overflow assumption\n");
1047 return true;
1048 }
1049
1050 return false;
1051}
1052
1053static void visitPointers(Value *StartPtr, const Loop &InnermostLoop,
1054 function_ref<void(Value *)> AddPointer) {
1056 SmallVector<Value *> WorkList;
1057 WorkList.push_back(StartPtr);
1058
1059 while (!WorkList.empty()) {
1060 Value *Ptr = WorkList.pop_back_val();
1061 if (!Visited.insert(Ptr).second)
1062 continue;
1063 auto *PN = dyn_cast<PHINode>(Ptr);
1064 // SCEV does not look through non-header PHIs inside the loop. Such phis
1065 // can be analyzed by adding separate accesses for each incoming pointer
1066 // value.
1067 if (PN && InnermostLoop.contains(PN->getParent()) &&
1068 PN->getParent() != InnermostLoop.getHeader()) {
1069 llvm::append_range(WorkList, PN->incoming_values());
1070 } else
1071 AddPointer(Ptr);
1072 }
1073}
1074
1075// Walk back through the IR for a pointer, looking for a select like the
1076// following:
1077//
1078// %offset = select i1 %cmp, i64 %a, i64 %b
1079// %addr = getelementptr double, double* %base, i64 %offset
1080// %ld = load double, double* %addr, align 8
1081//
1082// We won't be able to form a single SCEVAddRecExpr from this since the
1083// address for each loop iteration depends on %cmp. We could potentially
1084// produce multiple valid SCEVAddRecExprs, though, and check all of them for
1085// memory safety/aliasing if needed.
1086//
1087// If we encounter some IR we don't yet handle, or something obviously fine
1088// like a constant, then we just add the SCEV for that term to the list passed
1089// in by the caller. If we have a node that may potentially yield a valid
1090// SCEVAddRecExpr then we decompose it into parts and build the SCEV terms
1091// ourselves before adding to the list.
1093 ScalarEvolution *SE, const Loop *L, Value *Ptr,
1095 unsigned Depth) {
1096 // If our Value is a SCEVAddRecExpr, loop invariant, not an instruction, or
1097 // we've exceeded our limit on recursion, just return whatever we have
1098 // regardless of whether it can be used for a forked pointer or not, along
1099 // with an indication of whether it might be a poison or undef value.
1100 const SCEV *Scev = SE->getSCEV(Ptr);
1101 if (isa<SCEVAddRecExpr>(Scev) || L->isLoopInvariant(Ptr) ||
1102 !isa<Instruction>(Ptr) || Depth == 0) {
1103 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
1104 return;
1105 }
1106
1107 Depth--;
1108
1109 auto UndefPoisonCheck = [](PointerIntPair<const SCEV *, 1, bool> S) {
1110 return get<1>(S);
1111 };
1112
1113 auto GetBinOpExpr = [&SE](unsigned Opcode, const SCEV *L, const SCEV *R) {
1114 switch (Opcode) {
1115 case Instruction::Add:
1116 return SE->getAddExpr(L, R);
1117 case Instruction::Sub:
1118 return SE->getMinusSCEV(L, R);
1119 default:
1120 llvm_unreachable("Unexpected binary operator when walking ForkedPtrs");
1121 }
1122 };
1123
1125 unsigned Opcode = I->getOpcode();
1126 switch (Opcode) {
1127 case Instruction::GetElementPtr: {
1128 auto *GEP = cast<GetElementPtrInst>(I);
1129 Type *SourceTy = GEP->getSourceElementType();
1130 // We only handle base + single offset GEPs here for now.
1131 // Not dealing with preexisting gathers yet, so no vectors.
1132 if (I->getNumOperands() != 2 || SourceTy->isVectorTy()) {
1133 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(GEP));
1134 break;
1135 }
1138 findForkedSCEVs(SE, L, I->getOperand(0), BaseScevs, Depth);
1139 findForkedSCEVs(SE, L, I->getOperand(1), OffsetScevs, Depth);
1140
1141 // See if we need to freeze our fork...
1142 bool NeedsFreeze = any_of(BaseScevs, UndefPoisonCheck) ||
1143 any_of(OffsetScevs, UndefPoisonCheck);
1144
1145 // Check that we only have a single fork, on either the base or the offset.
1146 // Copy the SCEV across for the one without a fork in order to generate
1147 // the full SCEV for both sides of the GEP.
1148 if (OffsetScevs.size() == 2 && BaseScevs.size() == 1)
1149 BaseScevs.push_back(BaseScevs[0]);
1150 else if (BaseScevs.size() == 2 && OffsetScevs.size() == 1)
1151 OffsetScevs.push_back(OffsetScevs[0]);
1152 else {
1153 ScevList.emplace_back(Scev, NeedsFreeze);
1154 break;
1155 }
1156
1157 Type *IntPtrTy = SE->getEffectiveSCEVType(GEP->getPointerOperandType());
1158
1159 // Find the size of the type being pointed to. We only have a single
1160 // index term (guarded above) so we don't need to index into arrays or
1161 // structures, just get the size of the scalar value.
1162 const SCEV *Size = SE->getSizeOfExpr(IntPtrTy, SourceTy);
1163
1164 for (auto [B, O] : zip(BaseScevs, OffsetScevs)) {
1165 const SCEV *Base = get<0>(B);
1166 const SCEV *Offset = get<0>(O);
1167
1168 // Scale up the offsets by the size of the type, then add to the bases.
1169 const SCEV *Scaled =
1170 SE->getMulExpr(Size, SE->getTruncateOrSignExtend(Offset, IntPtrTy));
1171 ScevList.emplace_back(SE->getAddExpr(Base, Scaled), NeedsFreeze);
1172 }
1173 break;
1174 }
1175 case Instruction::Select: {
1177 // A select means we've found a forked pointer, but we currently only
1178 // support a single select per pointer so if there's another behind this
1179 // then we just bail out and return the generic SCEV.
1180 findForkedSCEVs(SE, L, I->getOperand(1), ChildScevs, Depth);
1181 findForkedSCEVs(SE, L, I->getOperand(2), ChildScevs, Depth);
1182 if (ChildScevs.size() == 2)
1183 append_range(ScevList, ChildScevs);
1184 else
1185 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
1186 break;
1187 }
1188 case Instruction::PHI: {
1190 // A phi means we've found a forked pointer, but we currently only
1191 // support a single phi per pointer so if there's another behind this
1192 // then we just bail out and return the generic SCEV.
1193 if (I->getNumOperands() == 2) {
1194 findForkedSCEVs(SE, L, I->getOperand(0), ChildScevs, Depth);
1195 findForkedSCEVs(SE, L, I->getOperand(1), ChildScevs, Depth);
1196 }
1197 if (ChildScevs.size() == 2)
1198 append_range(ScevList, ChildScevs);
1199 else
1200 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
1201 break;
1202 }
1203 case Instruction::Add:
1204 case Instruction::Sub: {
1207 findForkedSCEVs(SE, L, I->getOperand(0), LScevs, Depth);
1208 findForkedSCEVs(SE, L, I->getOperand(1), RScevs, Depth);
1209
1210 // See if we need to freeze our fork...
1211 bool NeedsFreeze =
1212 any_of(LScevs, UndefPoisonCheck) || any_of(RScevs, UndefPoisonCheck);
1213
1214 // Check that we only have a single fork, on either the left or right side.
1215 // Copy the SCEV across for the one without a fork in order to generate
1216 // the full SCEV for both sides of the BinOp.
1217 if (LScevs.size() == 2 && RScevs.size() == 1)
1218 RScevs.push_back(RScevs[0]);
1219 else if (RScevs.size() == 2 && LScevs.size() == 1)
1220 LScevs.push_back(LScevs[0]);
1221 else {
1222 ScevList.emplace_back(Scev, NeedsFreeze);
1223 break;
1224 }
1225
1226 for (auto [L, R] : zip(LScevs, RScevs))
1227 ScevList.emplace_back(GetBinOpExpr(Opcode, get<0>(L), get<0>(R)),
1228 NeedsFreeze);
1229 break;
1230 }
1231 default:
1232 // Just return the current SCEV if we haven't handled the instruction yet.
1233 LLVM_DEBUG(dbgs() << "ForkedPtr unhandled instruction: " << *I << "\n");
1234 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
1235 break;
1236 }
1237}
1238
1239bool AccessAnalysis::createCheckForAccess(
1240 RuntimePointerChecking &RtCheck, MemAccessInfo Access, Type *AccessTy,
1241 const DenseMap<Value *, const SCEV *> &StridesMap,
1242 DenseMap<Value *, unsigned> &DepSetId, Loop *TheLoop,
1243 unsigned &RunningDepId, unsigned ASId, bool Assume) {
1244 Value *Ptr = Access.getPointer();
1245 ScalarEvolution *SE = PSE.getSE();
1246 assert(SE->isSCEVable(Ptr->getType()) && "Value is not SCEVable!");
1247
1249 findForkedSCEVs(SE, TheLoop, Ptr, RTCheckPtrs, MaxForkedSCEVDepth);
1250 assert(!RTCheckPtrs.empty() &&
1251 "Must have some runtime-check pointer candidates");
1252
1253 // RTCheckPtrs must have size 2 if there are forked pointers. Otherwise, there
1254 // are no forked pointers; replaceSymbolicStridesSCEV in this case.
1255 auto IsLoopInvariantOrAR =
1256 [&SE, &TheLoop](const PointerIntPair<const SCEV *, 1, bool> &P) {
1257 return SE->isLoopInvariant(P.getPointer(), TheLoop) ||
1258 isa<SCEVAddRecExpr>(P.getPointer());
1259 };
1260 if (RTCheckPtrs.size() == 2 && all_of(RTCheckPtrs, IsLoopInvariantOrAR)) {
1261 LLVM_DEBUG(dbgs() << "LAA: Found forked pointer: " << *Ptr << "\n";
1262 for (const auto &[Idx, Q] : enumerate(RTCheckPtrs)) dbgs()
1263 << "\t(" << Idx << ") " << *Q.getPointer() << "\n");
1264 } else {
1265 RTCheckPtrs = {{replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr), false}};
1266 }
1267
1268 /// Check whether all pointers can participate in a runtime bounds check. They
1269 /// must either be invariant or non-wrapping affine AddRecs.
1270 for (auto &P : RTCheckPtrs) {
1271 // The bounds for loop-invariant pointer is trivial.
1272 if (SE->isLoopInvariant(P.getPointer(), TheLoop))
1273 continue;
1274
1275 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(P.getPointer());
1276 if (!AR && Assume)
1277 AR = PSE.getAsAddRec(Ptr);
1278 if (!AR || !AR->isAffine())
1279 return false;
1280
1281 // If there's only one option for Ptr, look it up after bounds and wrap
1282 // checking, because assumptions might have been added to PSE.
1283 if (RTCheckPtrs.size() == 1) {
1284 AR =
1286 P.setPointer(AR);
1287 }
1288
1289 if (!isNoWrap(PSE, AR, RTCheckPtrs.size() == 1 ? Ptr : nullptr, AccessTy,
1290 TheLoop, Assume))
1291 return false;
1292 }
1293
1294 for (const auto &[PtrExpr, NeedsFreeze] : RTCheckPtrs) {
1295 // The id of the dependence set.
1296 unsigned DepId;
1297
1298 if (isDependencyCheckNeeded()) {
1299 Value *Leader = DepCands.getLeaderValue(Access).getPointer();
1300 unsigned &LeaderId = DepSetId[Leader];
1301 if (!LeaderId)
1302 LeaderId = RunningDepId++;
1303 DepId = LeaderId;
1304 } else
1305 // Each access has its own dependence set.
1306 DepId = RunningDepId++;
1307
1308 bool IsWrite = Access.getInt();
1309 RtCheck.insert(TheLoop, Ptr, PtrExpr, AccessTy, IsWrite, DepId, ASId, PSE,
1310 NeedsFreeze);
1311 LLVM_DEBUG(dbgs() << "LAA: Found a runtime check ptr:" << *Ptr << '\n');
1312 }
1313
1314 return true;
1315}
1316
1317bool AccessAnalysis::canCheckPtrAtRT(
1318 RuntimePointerChecking &RtCheck, Loop *TheLoop,
1319 const DenseMap<Value *, const SCEV *> &StridesMap, Value *&UncomputablePtr,
1320 bool AllowPartial) {
1321 // Find pointers with computable bounds. We are going to use this information
1322 // to place a runtime bound check.
1323 bool CanDoRT = true;
1324
1325 bool MayNeedRTCheck = false;
1326 if (!IsRTCheckAnalysisNeeded) return true;
1327
1328 bool IsDepCheckNeeded = isDependencyCheckNeeded();
1329
1330 // We assign a consecutive id to access from different alias sets.
1331 // Accesses between different groups doesn't need to be checked.
1332 unsigned ASId = 0;
1333 for (const auto &AS : AST) {
1334 int NumReadPtrChecks = 0;
1335 int NumWritePtrChecks = 0;
1336 bool CanDoAliasSetRT = true;
1337 ++ASId;
1338 auto ASPointers = AS.getPointers();
1339
1340 // We assign consecutive id to access from different dependence sets.
1341 // Accesses within the same set don't need a runtime check.
1342 unsigned RunningDepId = 1;
1344
1346
1347 // First, count how many write and read accesses are in the alias set. Also
1348 // collect MemAccessInfos for later.
1350 for (const Value *ConstPtr : ASPointers) {
1351 Value *Ptr = const_cast<Value *>(ConstPtr);
1352 bool IsWrite = Accesses.contains(MemAccessInfo(Ptr, true));
1353 if (IsWrite)
1354 ++NumWritePtrChecks;
1355 else
1356 ++NumReadPtrChecks;
1357 AccessInfos.emplace_back(Ptr, IsWrite);
1358 }
1359
1360 // We do not need runtime checks for this alias set, if there are no writes
1361 // or a single write and no reads.
1362 if (NumWritePtrChecks == 0 ||
1363 (NumWritePtrChecks == 1 && NumReadPtrChecks == 0)) {
1364 assert((ASPointers.size() <= 1 ||
1365 all_of(ASPointers,
1366 [this](const Value *Ptr) {
1367 MemAccessInfo AccessWrite(const_cast<Value *>(Ptr),
1368 true);
1369 return !DepCands.contains(AccessWrite);
1370 })) &&
1371 "Can only skip updating CanDoRT below, if all entries in AS "
1372 "are reads or there is at most 1 entry");
1373 continue;
1374 }
1375
1376 for (auto &Access : AccessInfos) {
1377 for (const auto &AccessTy : Accesses[Access]) {
1378 if (!createCheckForAccess(RtCheck, Access, AccessTy, StridesMap,
1379 DepSetId, TheLoop, RunningDepId, ASId,
1380 false)) {
1381 LLVM_DEBUG(dbgs() << "LAA: Can't find bounds for ptr:"
1382 << *Access.getPointer() << '\n');
1383 Retries.emplace_back(Access, AccessTy);
1384 CanDoAliasSetRT = false;
1385 }
1386 }
1387 }
1388
1389 // Note that this function computes CanDoRT and MayNeedRTCheck
1390 // independently. For example CanDoRT=false, MayNeedRTCheck=false means that
1391 // we have a pointer for which we couldn't find the bounds but we don't
1392 // actually need to emit any checks so it does not matter.
1393 //
1394 // We need runtime checks for this alias set, if there are at least 2
1395 // dependence sets (in which case RunningDepId > 2) or if we need to re-try
1396 // any bound checks (because in that case the number of dependence sets is
1397 // incomplete).
1398 bool NeedsAliasSetRTCheck = RunningDepId > 2 || !Retries.empty();
1399
1400 // We need to perform run-time alias checks, but some pointers had bounds
1401 // that couldn't be checked.
1402 if (NeedsAliasSetRTCheck && !CanDoAliasSetRT) {
1403 // Reset the CanDoSetRt flag and retry all accesses that have failed.
1404 // We know that we need these checks, so we can now be more aggressive
1405 // and add further checks if required (overflow checks).
1406 CanDoAliasSetRT = true;
1407 for (const auto &[Access, AccessTy] : Retries) {
1408 if (!createCheckForAccess(RtCheck, Access, AccessTy, StridesMap,
1409 DepSetId, TheLoop, RunningDepId, ASId,
1410 /*Assume=*/true)) {
1411 CanDoAliasSetRT = false;
1412 UncomputablePtr = Access.getPointer();
1413 if (!AllowPartial)
1414 break;
1415 }
1416 }
1417 }
1418
1419 CanDoRT &= CanDoAliasSetRT;
1420 MayNeedRTCheck |= NeedsAliasSetRTCheck;
1421 ++ASId;
1422 }
1423
1424 // If the pointers that we would use for the bounds comparison have different
1425 // address spaces, assume the values aren't directly comparable, so we can't
1426 // use them for the runtime check. We also have to assume they could
1427 // overlap. In the future there should be metadata for whether address spaces
1428 // are disjoint.
1429 unsigned NumPointers = RtCheck.Pointers.size();
1430 for (unsigned i = 0; i < NumPointers; ++i) {
1431 for (unsigned j = i + 1; j < NumPointers; ++j) {
1432 // Only need to check pointers between two different dependency sets.
1433 if (RtCheck.Pointers[i].DependencySetId ==
1434 RtCheck.Pointers[j].DependencySetId)
1435 continue;
1436 // Only need to check pointers in the same alias set.
1437 if (RtCheck.Pointers[i].AliasSetId != RtCheck.Pointers[j].AliasSetId)
1438 continue;
1439
1440 Value *PtrI = RtCheck.Pointers[i].PointerValue;
1441 Value *PtrJ = RtCheck.Pointers[j].PointerValue;
1442
1443 unsigned ASi = PtrI->getType()->getPointerAddressSpace();
1444 unsigned ASj = PtrJ->getType()->getPointerAddressSpace();
1445 if (ASi != ASj) {
1446 LLVM_DEBUG(
1447 dbgs() << "LAA: Runtime check would require comparison between"
1448 " different address spaces\n");
1449 return false;
1450 }
1451 }
1452 }
1453
1454 if (MayNeedRTCheck && (CanDoRT || AllowPartial))
1455 RtCheck.generateChecks(DepCands, IsDepCheckNeeded);
1456
1457 LLVM_DEBUG(dbgs() << "LAA: We need to do " << RtCheck.getNumberOfChecks()
1458 << " pointer comparisons.\n");
1459
1460 // If we can do run-time checks, but there are no checks, no runtime checks
1461 // are needed. This can happen when all pointers point to the same underlying
1462 // object for example.
1463 RtCheck.Need = CanDoRT ? RtCheck.getNumberOfChecks() != 0 : MayNeedRTCheck;
1464
1465 bool CanDoRTIfNeeded = !RtCheck.Need || CanDoRT;
1466 assert(CanDoRTIfNeeded == (CanDoRT || !MayNeedRTCheck) &&
1467 "CanDoRTIfNeeded depends on RtCheck.Need");
1468 if (!CanDoRTIfNeeded && !AllowPartial)
1469 RtCheck.reset();
1470 return CanDoRTIfNeeded;
1471}
1472
1473void AccessAnalysis::processMemAccesses() {
1474 // We process the set twice: first we process read-write pointers, last we
1475 // process read-only pointers. This allows us to skip dependence tests for
1476 // read-only pointers.
1477
1478 LLVM_DEBUG(dbgs() << "LAA: Processing memory accesses...\n");
1479 LLVM_DEBUG(dbgs() << " AST: "; AST.dump());
1480 LLVM_DEBUG(dbgs() << "LAA: Accesses(" << Accesses.size() << "):\n");
1481 LLVM_DEBUG({
1482 for (const auto &[A, _] : Accesses)
1483 dbgs() << "\t" << *A.getPointer() << " ("
1484 << (A.getInt()
1485 ? "write"
1486 : (ReadOnlyPtr.contains(A.getPointer()) ? "read-only"
1487 : "read"))
1488 << ")\n";
1489 });
1490
1491 // The AliasSetTracker has nicely partitioned our pointers by metadata
1492 // compatibility and potential for underlying-object overlap. As a result, we
1493 // only need to check for potential pointer dependencies within each alias
1494 // set.
1495 for (const auto &AS : AST) {
1496 // Note that both the alias-set tracker and the alias sets themselves used
1497 // ordered collections internally and so the iteration order here is
1498 // deterministic.
1499 auto ASPointers = AS.getPointers();
1500
1501 bool SetHasWrite = false;
1502
1503 // Map of (pointer to underlying objects, accessed address space) to last
1504 // access encountered.
1505 typedef DenseMap<std::pair<const Value *, unsigned>, MemAccessInfo>
1506 UnderlyingObjToAccessMap;
1507 UnderlyingObjToAccessMap ObjToLastAccess;
1508
1509 // Set of access to check after all writes have been processed.
1510 PtrAccessMap DeferredAccesses;
1511
1512 // Iterate over each alias set twice, once to process read/write pointers,
1513 // and then to process read-only pointers.
1514 for (int SetIteration = 0; SetIteration < 2; ++SetIteration) {
1515 bool UseDeferred = SetIteration > 0;
1516 PtrAccessMap &S = UseDeferred ? DeferredAccesses : Accesses;
1517
1518 for (const Value *ConstPtr : ASPointers) {
1519 Value *Ptr = const_cast<Value *>(ConstPtr);
1520
1521 // For a single memory access in AliasSetTracker, Accesses may contain
1522 // both read and write, and they both need to be handled for CheckDeps.
1523 for (const auto &[AC, _] : S) {
1524 if (AC.getPointer() != Ptr)
1525 continue;
1526
1527 bool IsWrite = AC.getInt();
1528
1529 // If we're using the deferred access set, then it contains only
1530 // reads.
1531 bool IsReadOnlyPtr = ReadOnlyPtr.contains(Ptr) && !IsWrite;
1532 if (UseDeferred && !IsReadOnlyPtr)
1533 continue;
1534 // Otherwise, the pointer must be in the PtrAccessSet, either as a
1535 // read or a write.
1536 assert(((IsReadOnlyPtr && UseDeferred) || IsWrite ||
1537 S.contains(MemAccessInfo(Ptr, false))) &&
1538 "Alias-set pointer not in the access set?");
1539
1540 MemAccessInfo Access(Ptr, IsWrite);
1541 DepCands.insert(Access);
1542
1543 // Memorize read-only pointers for later processing and skip them in
1544 // the first round (they need to be checked after we have seen all
1545 // write pointers). Note: we also mark pointer that are not
1546 // consecutive as "read-only" pointers (so that we check
1547 // "a[b[i]] +="). Hence, we need the second check for "!IsWrite".
1548 if (!UseDeferred && IsReadOnlyPtr) {
1549 // We only use the pointer keys, the types vector values don't
1550 // matter.
1551 DeferredAccesses.insert({Access, {}});
1552 continue;
1553 }
1554
1555 // If this is a write - check other reads and writes for conflicts. If
1556 // this is a read only check other writes for conflicts (but only if
1557 // there is no other write to the ptr - this is an optimization to
1558 // catch "a[i] = a[i] + " without having to do a dependence check).
1559 if ((IsWrite || IsReadOnlyPtr) && SetHasWrite) {
1560 CheckDeps.push_back(Access);
1561 IsRTCheckAnalysisNeeded = true;
1562 }
1563
1564 if (IsWrite)
1565 SetHasWrite = true;
1566
1567 // Create sets of pointers connected by a shared alias set and
1568 // underlying object.
1569 SmallVector<const Value *, 16> &UOs = UnderlyingObjects[Ptr];
1570 UOs = {};
1571 ::getUnderlyingObjects(Ptr, UOs, LI);
1573 << "Underlying objects for pointer " << *Ptr << "\n");
1574 for (const Value *UnderlyingObj : UOs) {
1575 // nullptr never alias, don't join sets for pointer that have "null"
1576 // in their UnderlyingObjects list.
1577 if (isa<ConstantPointerNull>(UnderlyingObj) &&
1579 TheLoop->getHeader()->getParent(),
1580 UnderlyingObj->getType()->getPointerAddressSpace()))
1581 continue;
1582
1583 auto [It, Inserted] = ObjToLastAccess.try_emplace(
1584 {UnderlyingObj,
1585 cast<PointerType>(Ptr->getType())->getAddressSpace()},
1586 Access);
1587 if (!Inserted) {
1588 DepCands.unionSets(Access, It->second);
1589 It->second = Access;
1590 }
1591
1592 LLVM_DEBUG(dbgs() << " " << *UnderlyingObj << "\n");
1593 }
1594 }
1595 }
1596 }
1597 }
1598}
1599
1600/// Check whether the access through \p Ptr has a constant stride.
1601std::optional<int64_t>
1603 const Loop *Lp,
1604 const DenseMap<Value *, const SCEV *> &StridesMap,
1605 bool Assume, bool ShouldCheckWrap) {
1606 const SCEV *PtrScev = replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr);
1607 if (PSE.getSE()->isLoopInvariant(PtrScev, Lp))
1608 return 0;
1609
1610 assert(Ptr->getType()->isPointerTy() && "Unexpected non-ptr");
1611
1612 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
1613 if (Assume && !AR)
1614 AR = PSE.getAsAddRec(Ptr);
1615
1616 if (!AR) {
1617 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not an AddRecExpr pointer " << *Ptr
1618 << " SCEV: " << *PtrScev << "\n");
1619 return std::nullopt;
1620 }
1621
1622 std::optional<int64_t> Stride =
1623 getStrideFromAddRec(AR, Lp, AccessTy, Ptr, PSE);
1624 if (!ShouldCheckWrap || !Stride)
1625 return Stride;
1626
1627 if (isNoWrap(PSE, AR, Ptr, AccessTy, Lp, Assume, Stride))
1628 return Stride;
1629
1630 LLVM_DEBUG(
1631 dbgs() << "LAA: Bad stride - Pointer may wrap in the address space "
1632 << *Ptr << " SCEV: " << *AR << "\n");
1633 return std::nullopt;
1634}
1635
1636std::optional<int64_t> llvm::getPointersDiff(Type *ElemTyA, Value *PtrA,
1637 Type *ElemTyB, Value *PtrB,
1638 const DataLayout &DL,
1639 ScalarEvolution &SE,
1640 bool StrictCheck, bool CheckType) {
1641 assert(PtrA && PtrB && "Expected non-nullptr pointers.");
1642
1643 // Make sure that A and B are different pointers.
1644 if (PtrA == PtrB)
1645 return 0;
1646
1647 // Make sure that the element types are the same if required.
1648 if (CheckType && ElemTyA != ElemTyB)
1649 return std::nullopt;
1650
1651 unsigned ASA = PtrA->getType()->getPointerAddressSpace();
1652 unsigned ASB = PtrB->getType()->getPointerAddressSpace();
1653
1654 // Check that the address spaces match.
1655 if (ASA != ASB)
1656 return std::nullopt;
1657 unsigned IdxWidth = DL.getIndexSizeInBits(ASA);
1658
1659 APInt OffsetA(IdxWidth, 0), OffsetB(IdxWidth, 0);
1660 const Value *PtrA1 = PtrA->stripAndAccumulateConstantOffsets(
1661 DL, OffsetA, /*AllowNonInbounds=*/true);
1662 const Value *PtrB1 = PtrB->stripAndAccumulateConstantOffsets(
1663 DL, OffsetB, /*AllowNonInbounds=*/true);
1664
1665 std::optional<int64_t> Val;
1666 if (PtrA1 == PtrB1) {
1667 // Retrieve the address space again as pointer stripping now tracks through
1668 // `addrspacecast`.
1669 ASA = cast<PointerType>(PtrA1->getType())->getAddressSpace();
1670 ASB = cast<PointerType>(PtrB1->getType())->getAddressSpace();
1671 // Check that the address spaces match and that the pointers are valid.
1672 if (ASA != ASB)
1673 return std::nullopt;
1674
1675 IdxWidth = DL.getIndexSizeInBits(ASA);
1676 OffsetA = OffsetA.sextOrTrunc(IdxWidth);
1677 OffsetB = OffsetB.sextOrTrunc(IdxWidth);
1678
1679 OffsetB -= OffsetA;
1680 Val = OffsetB.trySExtValue();
1681 } else {
1682 // Otherwise compute the distance with SCEV between the base pointers.
1683 const SCEV *PtrSCEVA = SE.getSCEV(PtrA);
1684 const SCEV *PtrSCEVB = SE.getSCEV(PtrB);
1685 std::optional<APInt> Diff =
1686 SE.computeConstantDifference(PtrSCEVB, PtrSCEVA);
1687 if (!Diff)
1688 return std::nullopt;
1689 Val = Diff->trySExtValue();
1690 }
1691
1692 if (!Val)
1693 return std::nullopt;
1694
1695 int64_t Size = DL.getTypeStoreSize(ElemTyA);
1696 int64_t Dist = *Val / Size;
1697
1698 // Ensure that the calculated distance matches the type-based one after all
1699 // the bitcasts removal in the provided pointers.
1700 if (!StrictCheck || Dist * Size == Val)
1701 return Dist;
1702 return std::nullopt;
1703}
1704
1706 const DataLayout &DL, ScalarEvolution &SE,
1707 SmallVectorImpl<unsigned> &SortedIndices) {
1709 VL, [](const Value *V) { return V->getType()->isPointerTy(); }) &&
1710 "Expected list of pointer operands.");
1711 // Walk over the pointers, and map each of them to an offset relative to
1712 // first pointer in the array.
1713 Value *Ptr0 = VL[0];
1714
1715 using DistOrdPair = std::pair<int64_t, unsigned>;
1716 auto Compare = llvm::less_first();
1717 std::set<DistOrdPair, decltype(Compare)> Offsets(Compare);
1718 Offsets.emplace(0, 0);
1719 bool IsConsecutive = true;
1720 for (auto [Idx, Ptr] : drop_begin(enumerate(VL))) {
1721 std::optional<int64_t> Diff =
1722 getPointersDiff(ElemTy, Ptr0, ElemTy, Ptr, DL, SE,
1723 /*StrictCheck=*/true);
1724 if (!Diff)
1725 return false;
1726
1727 // Check if the pointer with the same offset is found.
1728 int64_t Offset = *Diff;
1729 auto [It, IsInserted] = Offsets.emplace(Offset, Idx);
1730 if (!IsInserted)
1731 return false;
1732 // Consecutive order if the inserted element is the last one.
1733 IsConsecutive &= std::next(It) == Offsets.end();
1734 }
1735 SortedIndices.clear();
1736 if (!IsConsecutive) {
1737 // Fill SortedIndices array only if it is non-consecutive.
1738 SortedIndices.resize(VL.size());
1739 for (auto [Idx, Off] : enumerate(Offsets))
1740 SortedIndices[Idx] = Off.second;
1741 }
1742 return true;
1743}
1744
1745/// Returns true if the memory operations \p A and \p B are consecutive.
1747 ScalarEvolution &SE, bool CheckType) {
1750 if (!PtrA || !PtrB)
1751 return false;
1752 Type *ElemTyA = getLoadStoreType(A);
1753 Type *ElemTyB = getLoadStoreType(B);
1754 std::optional<int64_t> Diff =
1755 getPointersDiff(ElemTyA, PtrA, ElemTyB, PtrB, DL, SE,
1756 /*StrictCheck=*/true, CheckType);
1757 return Diff == 1;
1758}
1759
1761 visitPointers(SI->getPointerOperand(), *InnermostLoop,
1762 [this, SI](Value *Ptr) {
1763 Accesses[MemAccessInfo(Ptr, true)].push_back(AccessIdx);
1764 InstMap.push_back(SI);
1765 ++AccessIdx;
1766 });
1767}
1768
1770 visitPointers(LI->getPointerOperand(), *InnermostLoop,
1771 [this, LI](Value *Ptr) {
1772 Accesses[MemAccessInfo(Ptr, false)].push_back(AccessIdx);
1773 InstMap.push_back(LI);
1774 ++AccessIdx;
1775 });
1776}
1777
1796
1798 switch (Type) {
1799 case NoDep:
1800 case Forward:
1802 case Unknown:
1803 case IndirectUnsafe:
1804 return false;
1805
1807 case Backward:
1809 return true;
1810 }
1811 llvm_unreachable("unexpected DepType!");
1812}
1813
1817
1819 switch (Type) {
1820 case Forward:
1822 return true;
1823
1824 case NoDep:
1825 case Unknown:
1827 case Backward:
1829 case IndirectUnsafe:
1830 return false;
1831 }
1832 llvm_unreachable("unexpected DepType!");
1833}
1834
1835bool MemoryDepChecker::couldPreventStoreLoadForward(uint64_t Distance,
1836 uint64_t TypeByteSize,
1837 unsigned CommonStride) {
1838 // If loads occur at a distance that is not a multiple of a feasible vector
1839 // factor store-load forwarding does not take place.
1840 // Positive dependences might cause troubles because vectorizing them might
1841 // prevent store-load forwarding making vectorized code run a lot slower.
1842 // a[i] = a[i-3] ^ a[i-8];
1843 // The stores to a[i:i+1] don't align with the stores to a[i-3:i-2] and
1844 // hence on your typical architecture store-load forwarding does not take
1845 // place. Vectorizing in such cases does not make sense.
1846 // Store-load forwarding distance.
1847
1848 // After this many iterations store-to-load forwarding conflicts should not
1849 // cause any slowdowns.
1850 const uint64_t NumItersForStoreLoadThroughMemory = 8 * TypeByteSize;
1851 // Maximum vector factor.
1852 uint64_t MaxVFWithoutSLForwardIssuesPowerOf2 =
1853 std::min(VectorizerParams::MaxVectorWidth * TypeByteSize,
1854 MaxStoreLoadForwardSafeDistanceInBits);
1855
1856 // Compute the smallest VF at which the store and load would be misaligned.
1857 for (uint64_t VF = 2 * TypeByteSize;
1858 VF <= MaxVFWithoutSLForwardIssuesPowerOf2; VF *= 2) {
1859 // If the number of vector iteration between the store and the load are
1860 // small we could incur conflicts.
1861 if (Distance % VF && Distance / VF < NumItersForStoreLoadThroughMemory) {
1862 MaxVFWithoutSLForwardIssuesPowerOf2 = (VF >> 1);
1863 break;
1864 }
1865 }
1866
1867 if (MaxVFWithoutSLForwardIssuesPowerOf2 < 2 * TypeByteSize) {
1868 LLVM_DEBUG(
1869 dbgs() << "LAA: Distance " << Distance
1870 << " that could cause a store-load forwarding conflict\n");
1871 return true;
1872 }
1873
1874 if (CommonStride &&
1875 MaxVFWithoutSLForwardIssuesPowerOf2 <
1876 MaxStoreLoadForwardSafeDistanceInBits &&
1877 MaxVFWithoutSLForwardIssuesPowerOf2 !=
1878 VectorizerParams::MaxVectorWidth * TypeByteSize) {
1879 uint64_t MaxVF =
1880 bit_floor(MaxVFWithoutSLForwardIssuesPowerOf2 / CommonStride);
1881 uint64_t MaxVFInBits = MaxVF * TypeByteSize * 8;
1882 MaxStoreLoadForwardSafeDistanceInBits =
1883 std::min(MaxStoreLoadForwardSafeDistanceInBits, MaxVFInBits);
1884 }
1885 return false;
1886}
1887
1888void MemoryDepChecker::mergeInStatus(VectorizationSafetyStatus S) {
1889 if (Status < S)
1890 Status = S;
1891}
1892
1893/// Given a dependence-distance \p Dist between two memory accesses, that have
1894/// strides in the same direction whose absolute value of the maximum stride is
1895/// given in \p MaxStride, in a loop whose maximum backedge taken count is \p
1896/// MaxBTC, check if it is possible to prove statically that the dependence
1897/// distance is larger than the range that the accesses will travel through the
1898/// execution of the loop. If so, return true; false otherwise. This is useful
1899/// for example in loops such as the following (PR31098):
1900///
1901/// for (i = 0; i < D; ++i) {
1902/// = out[i];
1903/// out[i+D] =
1904/// }
1906 const SCEV &MaxBTC, const SCEV &Dist,
1907 uint64_t MaxStride) {
1908
1909 // If we can prove that
1910 // (**) |Dist| > MaxBTC * Step
1911 // where Step is the absolute stride of the memory accesses in bytes,
1912 // then there is no dependence.
1913 //
1914 // Rationale:
1915 // We basically want to check if the absolute distance (|Dist/Step|)
1916 // is >= the loop iteration count (or > MaxBTC).
1917 // This is equivalent to the Strong SIV Test (Practical Dependence Testing,
1918 // Section 4.2.1); Note, that for vectorization it is sufficient to prove
1919 // that the dependence distance is >= VF; This is checked elsewhere.
1920 // But in some cases we can prune dependence distances early, and
1921 // even before selecting the VF, and without a runtime test, by comparing
1922 // the distance against the loop iteration count. Since the vectorized code
1923 // will be executed only if LoopCount >= VF, proving distance >= LoopCount
1924 // also guarantees that distance >= VF.
1925 //
1926 const SCEV *Step = SE.getConstant(MaxBTC.getType(), MaxStride);
1927 const SCEV *Product = SE.getMulExpr(&MaxBTC, Step);
1928
1929 const SCEV *CastedDist = &Dist;
1930 const SCEV *CastedProduct = Product;
1931 uint64_t DistTypeSizeBits = DL.getTypeSizeInBits(Dist.getType());
1932 uint64_t ProductTypeSizeBits = DL.getTypeSizeInBits(Product->getType());
1933
1934 // The dependence distance can be positive/negative, so we sign extend Dist;
1935 // The multiplication of the absolute stride in bytes and the
1936 // backedgeTakenCount is non-negative, so we zero extend Product.
1937 if (DistTypeSizeBits > ProductTypeSizeBits)
1938 CastedProduct = SE.getZeroExtendExpr(Product, Dist.getType());
1939 else
1940 CastedDist = SE.getNoopOrSignExtend(&Dist, Product->getType());
1941
1942 // Is Dist - (MaxBTC * Step) > 0 ?
1943 // (If so, then we have proven (**) because |Dist| >= Dist)
1944 const SCEV *Minus = SE.getMinusSCEV(CastedDist, CastedProduct);
1945 if (SE.isKnownPositive(Minus))
1946 return true;
1947
1948 // Second try: Is -Dist - (MaxBTC * Step) > 0 ?
1949 // (If so, then we have proven (**) because |Dist| >= -1*Dist)
1950 const SCEV *NegDist = SE.getNegativeSCEV(CastedDist);
1951 Minus = SE.getMinusSCEV(NegDist, CastedProduct);
1952 return SE.isKnownPositive(Minus);
1953}
1954
1955/// Check the dependence for two accesses with the same stride \p Stride.
1956/// \p Distance is the positive distance in bytes, and \p TypeByteSize is type
1957/// size in bytes.
1958///
1959/// \returns true if they are independent.
1961 uint64_t TypeByteSize) {
1962 assert(Stride > 1 && "The stride must be greater than 1");
1963 assert(TypeByteSize > 0 && "The type size in byte must be non-zero");
1964 assert(Distance > 0 && "The distance must be non-zero");
1965
1966 // Skip if the distance is not multiple of type byte size.
1967 if (Distance % TypeByteSize)
1968 return false;
1969
1970 // No dependence if the distance is not multiple of the stride.
1971 // E.g.
1972 // for (i = 0; i < 1024 ; i += 4)
1973 // A[i+2] = A[i] + 1;
1974 //
1975 // Two accesses in memory (distance is 2, stride is 4):
1976 // | A[0] | | | | A[4] | | | |
1977 // | | | A[2] | | | | A[6] | |
1978 //
1979 // E.g.
1980 // for (i = 0; i < 1024 ; i += 3)
1981 // A[i+4] = A[i] + 1;
1982 //
1983 // Two accesses in memory (distance is 4, stride is 3):
1984 // | A[0] | | | A[3] | | | A[6] | | |
1985 // | | | | | A[4] | | | A[7] | |
1986 return Distance % Stride;
1987}
1988
1989bool MemoryDepChecker::areAccessesCompletelyBeforeOrAfter(const SCEV *Src,
1990 Type *SrcTy,
1991 const SCEV *Sink,
1992 Type *SinkTy) {
1993 const SCEV *BTC = PSE.getBackedgeTakenCount();
1994 const SCEV *SymbolicMaxBTC = PSE.getSymbolicMaxBackedgeTakenCount();
1995 ScalarEvolution &SE = *PSE.getSE();
1996 const auto &[SrcStart_, SrcEnd_] =
1997 getStartAndEndForAccess(InnermostLoop, Src, SrcTy, BTC, SymbolicMaxBTC,
1998 &SE, &PointerBounds, DT, AC, LoopGuards);
1999 if (isa<SCEVCouldNotCompute>(SrcStart_) || isa<SCEVCouldNotCompute>(SrcEnd_))
2000 return false;
2001
2002 const auto &[SinkStart_, SinkEnd_] =
2003 getStartAndEndForAccess(InnermostLoop, Sink, SinkTy, BTC, SymbolicMaxBTC,
2004 &SE, &PointerBounds, DT, AC, LoopGuards);
2005 if (isa<SCEVCouldNotCompute>(SinkStart_) ||
2006 isa<SCEVCouldNotCompute>(SinkEnd_))
2007 return false;
2008
2009 if (!LoopGuards)
2010 LoopGuards.emplace(ScalarEvolution::LoopGuards::collect(InnermostLoop, SE));
2011
2012 auto SrcEnd = SE.applyLoopGuards(SrcEnd_, *LoopGuards);
2013 auto SinkStart = SE.applyLoopGuards(SinkStart_, *LoopGuards);
2014 if (SE.isKnownPredicate(CmpInst::ICMP_ULE, SrcEnd, SinkStart))
2015 return true;
2016
2017 auto SinkEnd = SE.applyLoopGuards(SinkEnd_, *LoopGuards);
2018 auto SrcStart = SE.applyLoopGuards(SrcStart_, *LoopGuards);
2019 return SE.isKnownPredicate(CmpInst::ICMP_ULE, SinkEnd, SrcStart);
2020}
2021
2023 MemoryDepChecker::DepDistanceStrideAndSizeInfo>
2024MemoryDepChecker::getDependenceDistanceStrideAndSize(
2025 const AccessAnalysis::MemAccessInfo &A, Instruction *AInst,
2026 const AccessAnalysis::MemAccessInfo &B, Instruction *BInst) {
2027 const auto &DL = InnermostLoop->getHeader()->getDataLayout();
2028 auto &SE = *PSE.getSE();
2029 const auto &[APtr, AIsWrite] = A;
2030 const auto &[BPtr, BIsWrite] = B;
2031
2032 // Two reads are independent.
2033 if (!AIsWrite && !BIsWrite)
2035
2036 Type *ATy = getLoadStoreType(AInst);
2037 Type *BTy = getLoadStoreType(BInst);
2038
2039 // We cannot check pointers in different address spaces.
2040 if (APtr->getType()->getPointerAddressSpace() !=
2041 BPtr->getType()->getPointerAddressSpace())
2043
2044 std::optional<int64_t> StrideAPtr =
2045 getPtrStride(PSE, ATy, APtr, InnermostLoop, SymbolicStrides, true, true);
2046 std::optional<int64_t> StrideBPtr =
2047 getPtrStride(PSE, BTy, BPtr, InnermostLoop, SymbolicStrides, true, true);
2048
2049 const SCEV *Src = PSE.getSCEV(APtr);
2050 const SCEV *Sink = PSE.getSCEV(BPtr);
2051
2052 // If the induction step is negative we have to invert source and sink of the
2053 // dependence when measuring the distance between them. We should not swap
2054 // AIsWrite with BIsWrite, as their uses expect them in program order.
2055 if (StrideAPtr && *StrideAPtr < 0) {
2056 std::swap(Src, Sink);
2057 std::swap(AInst, BInst);
2058 std::swap(ATy, BTy);
2059 std::swap(StrideAPtr, StrideBPtr);
2060 }
2061
2062 const SCEV *Dist = SE.getMinusSCEV(Sink, Src);
2063
2064 LLVM_DEBUG(dbgs() << "LAA: Src Scev: " << *Src << "Sink Scev: " << *Sink
2065 << "\n");
2066 LLVM_DEBUG(dbgs() << "LAA: Distance for " << *AInst << " to " << *BInst
2067 << ": " << *Dist << "\n");
2068
2069 // Need accesses with constant strides and the same direction for further
2070 // dependence analysis. We don't want to vectorize "A[B[i]] += ..." and
2071 // similar code or pointer arithmetic that could wrap in the address space.
2072
2073 // If either Src or Sink are not strided (i.e. not a non-wrapping AddRec) and
2074 // not loop-invariant (stride will be 0 in that case), we cannot analyze the
2075 // dependence further and also cannot generate runtime checks.
2076 if (!StrideAPtr || !StrideBPtr) {
2077 LLVM_DEBUG(dbgs() << "Pointer access with non-constant stride\n");
2079 }
2080
2081 int64_t StrideAPtrInt = *StrideAPtr;
2082 int64_t StrideBPtrInt = *StrideBPtr;
2083 LLVM_DEBUG(dbgs() << "LAA: Src induction step: " << StrideAPtrInt
2084 << " Sink induction step: " << StrideBPtrInt << "\n");
2085 // At least Src or Sink are loop invariant and the other is strided or
2086 // invariant. We can generate a runtime check to disambiguate the accesses.
2087 if (!StrideAPtrInt || !StrideBPtrInt)
2089
2090 // Both Src and Sink have a constant stride, check if they are in the same
2091 // direction.
2092 if ((StrideAPtrInt > 0) != (StrideBPtrInt > 0)) {
2093 LLVM_DEBUG(
2094 dbgs() << "Pointer access with strides in different directions\n");
2096 }
2097
2098 TypeSize AStoreSz = DL.getTypeStoreSize(ATy);
2099 TypeSize BStoreSz = DL.getTypeStoreSize(BTy);
2100
2101 // If store sizes are not the same, set TypeByteSize to zero, so we can check
2102 // it in the caller isDependent.
2103 uint64_t ASz = DL.getTypeAllocSize(ATy);
2104 uint64_t BSz = DL.getTypeAllocSize(BTy);
2105 uint64_t TypeByteSize = (AStoreSz == BStoreSz) ? BSz : 0;
2106
2107 uint64_t StrideAScaled = std::abs(StrideAPtrInt) * ASz;
2108 uint64_t StrideBScaled = std::abs(StrideBPtrInt) * BSz;
2109
2110 uint64_t MaxStride = std::max(StrideAScaled, StrideBScaled);
2111
2112 std::optional<uint64_t> CommonStride;
2113 if (StrideAScaled == StrideBScaled)
2114 CommonStride = StrideAScaled;
2115
2116 // TODO: Historically, we didn't retry with runtime checks when (unscaled)
2117 // strides were different but there is no inherent reason to.
2118 if (!isa<SCEVConstant>(Dist))
2119 ShouldRetryWithRuntimeChecks |= StrideAPtrInt == StrideBPtrInt;
2120
2121 // If distance is a SCEVCouldNotCompute, return Unknown immediately.
2122 if (isa<SCEVCouldNotCompute>(Dist)) {
2123 LLVM_DEBUG(dbgs() << "LAA: Uncomputable distance.\n");
2124 return Dependence::Unknown;
2125 }
2126
2127 return DepDistanceStrideAndSizeInfo(Dist, MaxStride, CommonStride,
2128 TypeByteSize, AIsWrite, BIsWrite);
2129}
2130
2132MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx,
2133 const MemAccessInfo &B, unsigned BIdx) {
2134 assert(AIdx < BIdx && "Must pass arguments in program order");
2135
2136 // Check if we can prove that Sink only accesses memory after Src's end or
2137 // vice versa. The helper is used to perform the checks only on the exit paths
2138 // where it helps to improve the analysis result.
2139 auto CheckCompletelyBeforeOrAfter = [&]() {
2140 auto *APtr = A.getPointer();
2141 auto *BPtr = B.getPointer();
2142 Type *ATy = getLoadStoreType(InstMap[AIdx]);
2143 Type *BTy = getLoadStoreType(InstMap[BIdx]);
2144 const SCEV *Src = PSE.getSCEV(APtr);
2145 const SCEV *Sink = PSE.getSCEV(BPtr);
2146 return areAccessesCompletelyBeforeOrAfter(Src, ATy, Sink, BTy);
2147 };
2148
2149 // Get the dependence distance, stride, type size and what access writes for
2150 // the dependence between A and B.
2151 auto Res =
2152 getDependenceDistanceStrideAndSize(A, InstMap[AIdx], B, InstMap[BIdx]);
2153 if (std::holds_alternative<Dependence::DepType>(Res)) {
2154 if (std::get<Dependence::DepType>(Res) == Dependence::Unknown &&
2155 CheckCompletelyBeforeOrAfter())
2156 return Dependence::NoDep;
2157 return std::get<Dependence::DepType>(Res);
2158 }
2159
2160 auto &[Dist, MaxStride, CommonStride, TypeByteSize, AIsWrite, BIsWrite] =
2161 std::get<DepDistanceStrideAndSizeInfo>(Res);
2162 bool HasSameSize = TypeByteSize > 0;
2163
2164 ScalarEvolution &SE = *PSE.getSE();
2165 auto &DL = InnermostLoop->getHeader()->getDataLayout();
2166
2167 // If the distance between the acecsses is larger than their maximum absolute
2168 // stride multiplied by the symbolic maximum backedge taken count (which is an
2169 // upper bound of the number of iterations), the accesses are independet, i.e.
2170 // they are far enough appart that accesses won't access the same location
2171 // across all loop ierations.
2172 if (HasSameSize &&
2174 DL, SE, *(PSE.getSymbolicMaxBackedgeTakenCount()), *Dist, MaxStride))
2175 return Dependence::NoDep;
2176
2177 // The rest of this function relies on ConstDist being at most 64-bits, which
2178 // is checked earlier. Will assert if the calling code changes.
2179 const APInt *APDist = nullptr;
2180 uint64_t ConstDist =
2181 match(Dist, m_scev_APInt(APDist)) ? APDist->abs().getZExtValue() : 0;
2182
2183 // Attempt to prove strided accesses independent.
2184 if (APDist) {
2185 // If the distance between accesses and their strides are known constants,
2186 // check whether the accesses interlace each other.
2187 if (ConstDist > 0 && CommonStride && CommonStride > 1 && HasSameSize &&
2188 areStridedAccessesIndependent(ConstDist, *CommonStride, TypeByteSize)) {
2189 LLVM_DEBUG(dbgs() << "LAA: Strided accesses are independent\n");
2190 return Dependence::NoDep;
2191 }
2192 } else {
2193 if (!LoopGuards)
2194 LoopGuards.emplace(
2195 ScalarEvolution::LoopGuards::collect(InnermostLoop, SE));
2196 Dist = SE.applyLoopGuards(Dist, *LoopGuards);
2197 }
2198
2199 // Negative distances are not plausible dependencies.
2200 if (SE.isKnownNonPositive(Dist)) {
2201 if (SE.isKnownNonNegative(Dist)) {
2202 if (HasSameSize) {
2203 // Write to the same location with the same size.
2204 return Dependence::Forward;
2205 }
2206 LLVM_DEBUG(dbgs() << "LAA: possibly zero dependence difference but "
2207 "different type sizes\n");
2208 return Dependence::Unknown;
2209 }
2210
2211 bool IsTrueDataDependence = (AIsWrite && !BIsWrite);
2212 // Check if the first access writes to a location that is read in a later
2213 // iteration, where the distance between them is not a multiple of a vector
2214 // factor and relatively small.
2215 //
2216 // NOTE: There is no need to update MaxSafeVectorWidthInBits after call to
2217 // couldPreventStoreLoadForward, even if it changed MinDepDistBytes, since a
2218 // forward dependency will allow vectorization using any width.
2219
2220 if (IsTrueDataDependence && EnableForwardingConflictDetection) {
2221 if (!ConstDist) {
2222 return CheckCompletelyBeforeOrAfter() ? Dependence::NoDep
2224 }
2225 if (!HasSameSize ||
2226 couldPreventStoreLoadForward(ConstDist, TypeByteSize)) {
2227 LLVM_DEBUG(
2228 dbgs() << "LAA: Forward but may prevent st->ld forwarding\n");
2230 }
2231 }
2232
2233 LLVM_DEBUG(dbgs() << "LAA: Dependence is negative\n");
2234 return Dependence::Forward;
2235 }
2236
2237 int64_t MinDistance = SE.getSignedRangeMin(Dist).getSExtValue();
2238 // Below we only handle strictly positive distances.
2239 if (MinDistance <= 0) {
2240 return CheckCompletelyBeforeOrAfter() ? Dependence::NoDep
2242 }
2243
2244 if (!HasSameSize) {
2245 if (CheckCompletelyBeforeOrAfter())
2246 return Dependence::NoDep;
2247 LLVM_DEBUG(dbgs() << "LAA: ReadWrite-Write positive dependency with "
2248 "different type sizes\n");
2249 return Dependence::Unknown;
2250 }
2251 // Bail out early if passed-in parameters make vectorization not feasible.
2252 unsigned ForcedFactor = (VectorizerParams::VectorizationFactor ?
2254 unsigned ForcedUnroll = (VectorizerParams::VectorizationInterleave ?
2256 // The minimum number of iterations for a vectorized/unrolled version.
2257 unsigned MinNumIter = std::max(ForcedFactor * ForcedUnroll, 2U);
2258
2259 // It's not vectorizable if the distance is smaller than the minimum distance
2260 // needed for a vectroized/unrolled version. Vectorizing one iteration in
2261 // front needs MaxStride. Vectorizing the last iteration needs TypeByteSize.
2262 // (No need to plus the last gap distance).
2263 //
2264 // E.g. Assume one char is 1 byte in memory and one int is 4 bytes.
2265 // foo(int *A) {
2266 // int *B = (int *)((char *)A + 14);
2267 // for (i = 0 ; i < 1024 ; i += 2)
2268 // B[i] = A[i] + 1;
2269 // }
2270 //
2271 // Two accesses in memory (stride is 4 * 2):
2272 // | A[0] | | A[2] | | A[4] | | A[6] | |
2273 // | B[0] | | B[2] | | B[4] |
2274 //
2275 // MinDistance needs for vectorizing iterations except the last iteration:
2276 // 4 * 2 * (MinNumIter - 1). MinDistance needs for the last iteration: 4.
2277 // So the minimum distance needed is: 4 * 2 * (MinNumIter - 1) + 4.
2278 //
2279 // If MinNumIter is 2, it is vectorizable as the minimum distance needed is
2280 // 12, which is less than distance.
2281 //
2282 // If MinNumIter is 4 (Say if a user forces the vectorization factor to be 4),
2283 // the minimum distance needed is 28, which is greater than distance. It is
2284 // not safe to do vectorization.
2285 //
2286 // We use MaxStride (maximum of src and sink strides) to get a conservative
2287 // lower bound on the MinDistanceNeeded in case of different strides.
2288
2289 // We know that Dist is positive, but it may not be constant. Use the signed
2290 // minimum for computations below, as this ensures we compute the closest
2291 // possible dependence distance.
2292 uint64_t MinDistanceNeeded = MaxStride * (MinNumIter - 1) + TypeByteSize;
2293 if (MinDistanceNeeded > static_cast<uint64_t>(MinDistance)) {
2294 if (!ConstDist) {
2295 // For non-constant distances, we checked the lower bound of the
2296 // dependence distance and the distance may be larger at runtime (and safe
2297 // for vectorization). Classify it as Unknown, so we re-try with runtime
2298 // checks, unless we can prove both accesses cannot overlap.
2299 return CheckCompletelyBeforeOrAfter() ? Dependence::NoDep
2301 }
2302 LLVM_DEBUG(dbgs() << "LAA: Failure because of positive minimum distance "
2303 << MinDistance << '\n');
2304 return Dependence::Backward;
2305 }
2306
2307 // Unsafe if the minimum distance needed is greater than smallest dependence
2308 // distance distance.
2309 if (MinDistanceNeeded > MinDepDistBytes) {
2310 LLVM_DEBUG(dbgs() << "LAA: Failure because it needs at least "
2311 << MinDistanceNeeded << " size in bytes\n");
2312 return Dependence::Backward;
2313 }
2314
2315 MinDepDistBytes =
2316 std::min(static_cast<uint64_t>(MinDistance), MinDepDistBytes);
2317
2318 bool IsTrueDataDependence = (!AIsWrite && BIsWrite);
2319 if (IsTrueDataDependence && EnableForwardingConflictDetection && ConstDist &&
2320 couldPreventStoreLoadForward(MinDistance, TypeByteSize, *CommonStride))
2322
2323 uint64_t MaxVF = MinDepDistBytes / MaxStride;
2324 LLVM_DEBUG(dbgs() << "LAA: Positive min distance " << MinDistance
2325 << " with max VF = " << MaxVF << '\n');
2326
2327 uint64_t MaxVFInBits = MaxVF * TypeByteSize * 8;
2328 if (!ConstDist && MaxVFInBits < MaxTargetVectorWidthInBits) {
2329 // For non-constant distances, we checked the lower bound of the dependence
2330 // distance and the distance may be larger at runtime (and safe for
2331 // vectorization). Classify it as Unknown, so we re-try with runtime checks,
2332 // unless we can prove both accesses cannot overlap.
2333 return CheckCompletelyBeforeOrAfter() ? Dependence::NoDep
2335 }
2336
2337 if (CheckCompletelyBeforeOrAfter())
2338 return Dependence::NoDep;
2339
2340 MaxSafeVectorWidthInBits = std::min(MaxSafeVectorWidthInBits, MaxVFInBits);
2342}
2343
2345 const MemAccessInfoList &CheckDeps) {
2346
2347 MinDepDistBytes = -1;
2349 for (MemAccessInfo CurAccess : CheckDeps) {
2350 if (Visited.contains(CurAccess))
2351 continue;
2352
2353 // Check accesses within this set.
2355 DepCands.findLeader(CurAccess);
2357 DepCands.member_end();
2358
2359 // Check every access pair.
2360 while (AI != AE) {
2361 Visited.insert(*AI);
2362 bool AIIsWrite = AI->getInt();
2363 // Check loads only against next equivalent class, but stores also against
2364 // other stores in the same equivalence class - to the same address.
2366 (AIIsWrite ? AI : std::next(AI));
2367 while (OI != AE) {
2368 // Check every accessing instruction pair in program order.
2369 auto &Acc = Accesses[*AI];
2370 for (std::vector<unsigned>::iterator I1 = Acc.begin(), I1E = Acc.end();
2371 I1 != I1E; ++I1)
2372 // Scan all accesses of another equivalence class, but only the next
2373 // accesses of the same equivalent class.
2374 for (std::vector<unsigned>::iterator
2375 I2 = (OI == AI ? std::next(I1) : Accesses[*OI].begin()),
2376 I2E = (OI == AI ? I1E : Accesses[*OI].end());
2377 I2 != I2E; ++I2) {
2378 auto A = std::make_pair(&*AI, *I1);
2379 auto B = std::make_pair(&*OI, *I2);
2380
2381 assert(*I1 != *I2);
2382 if (*I1 > *I2)
2383 std::swap(A, B);
2384
2386 isDependent(*A.first, A.second, *B.first, B.second);
2388
2389 // Gather dependences unless we accumulated MaxDependences
2390 // dependences. In that case return as soon as we find the first
2391 // unsafe dependence. This puts a limit on this quadratic
2392 // algorithm.
2393 if (RecordDependences) {
2394 if (Type != Dependence::NoDep)
2395 Dependences.emplace_back(A.second, B.second, Type);
2396
2397 if (Dependences.size() >= MaxDependences) {
2398 RecordDependences = false;
2399 Dependences.clear();
2401 << "Too many dependences, stopped recording\n");
2402 }
2403 }
2404 if (!RecordDependences && !isSafeForVectorization())
2405 return false;
2406 }
2407 ++OI;
2408 }
2409 ++AI;
2410 }
2411 }
2412
2413 LLVM_DEBUG(dbgs() << "Total Dependences: " << Dependences.size() << "\n");
2414 return isSafeForVectorization();
2415}
2416
2419 MemAccessInfo Access(Ptr, IsWrite);
2420 auto I = Accesses.find(Access);
2422 if (I != Accesses.end()) {
2423 transform(I->second, std::back_inserter(Insts),
2424 [&](unsigned Idx) { return this->InstMap[Idx]; });
2425 }
2426
2427 return Insts;
2428}
2429
2431 "NoDep",
2432 "Unknown",
2433 "IndirectUnsafe",
2434 "Forward",
2435 "ForwardButPreventsForwarding",
2436 "Backward",
2437 "BackwardVectorizable",
2438 "BackwardVectorizableButPreventsForwarding"};
2439
2441 raw_ostream &OS, unsigned Depth,
2442 const SmallVectorImpl<Instruction *> &Instrs) const {
2443 OS.indent(Depth) << DepName[Type] << ":\n";
2444 OS.indent(Depth + 2) << *Instrs[Source] << " -> \n";
2445 OS.indent(Depth + 2) << *Instrs[Destination] << "\n";
2446}
2447
2448bool LoopAccessInfo::canAnalyzeLoop() {
2449 // We need to have a loop header.
2450 LLVM_DEBUG(dbgs() << "\nLAA: Checking a loop in '"
2451 << TheLoop->getHeader()->getParent()->getName() << "' from "
2452 << TheLoop->getLocStr() << "\n");
2453
2454 // We can only analyze innermost loops.
2455 if (!TheLoop->isInnermost()) {
2456 LLVM_DEBUG(dbgs() << "LAA: loop is not the innermost loop\n");
2457 recordAnalysis("NotInnerMostLoop") << "loop is not the innermost loop";
2458 return false;
2459 }
2460
2461 // We must have a single backedge.
2462 if (TheLoop->getNumBackEdges() != 1) {
2463 LLVM_DEBUG(
2464 dbgs() << "LAA: loop control flow is not understood by analyzer\n");
2465 recordAnalysis("CFGNotUnderstood")
2466 << "loop control flow is not understood by analyzer";
2467 return false;
2468 }
2469
2470 // ScalarEvolution needs to be able to find the symbolic max backedge taken
2471 // count, which is an upper bound on the number of loop iterations. The loop
2472 // may execute fewer iterations, if it exits via an uncountable exit.
2473 const SCEV *ExitCount = PSE->getSymbolicMaxBackedgeTakenCount();
2474 if (isa<SCEVCouldNotCompute>(ExitCount)) {
2475 recordAnalysis("CantComputeNumberOfIterations")
2476 << "could not determine number of loop iterations";
2477 LLVM_DEBUG(dbgs() << "LAA: SCEV could not compute the loop exit count.\n");
2478 return false;
2479 }
2480
2481 LLVM_DEBUG(dbgs() << "LAA: Found an analyzable loop: "
2482 << TheLoop->getHeader()->getName() << "\n");
2483 return true;
2484}
2485
2486bool LoopAccessInfo::analyzeLoop(AAResults *AA, const LoopInfo *LI,
2487 const TargetLibraryInfo *TLI,
2488 DominatorTree *DT) {
2489 // Holds the Load and Store instructions.
2492 SmallPtrSet<MDNode *, 8> LoopAliasScopes;
2493
2494 // Holds all the different accesses in the loop.
2495 unsigned NumReads = 0;
2496 unsigned NumReadWrites = 0;
2497
2498 bool HasComplexMemInst = false;
2499
2500 // A runtime check is only legal to insert if there are no convergent calls.
2501 HasConvergentOp = false;
2502
2503 PtrRtChecking->Pointers.clear();
2504 PtrRtChecking->Need = false;
2505
2506 const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel();
2507
2508 const bool EnableMemAccessVersioningOfLoop =
2510 !TheLoop->getHeader()->getParent()->hasOptSize();
2511
2512 // Traverse blocks in fixed RPOT order, regardless of their storage in the
2513 // loop info, as it may be arbitrary.
2514 LoopBlocksRPO RPOT(TheLoop);
2515 RPOT.perform(LI);
2516 for (BasicBlock *BB : RPOT) {
2517 // Scan the BB and collect legal loads and stores. Also detect any
2518 // convergent instructions.
2519 for (Instruction &I : *BB) {
2520 if (auto *Call = dyn_cast<CallBase>(&I)) {
2521 if (Call->isConvergent())
2522 HasConvergentOp = true;
2523 }
2524
2525 // With both a non-vectorizable memory instruction and a convergent
2526 // operation, found in this loop, no reason to continue the search.
2527 if (HasComplexMemInst && HasConvergentOp)
2528 return false;
2529
2530 // Avoid hitting recordAnalysis multiple times.
2531 if (HasComplexMemInst)
2532 continue;
2533
2534 // Record alias scopes defined inside the loop.
2535 if (auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&I))
2536 for (Metadata *Op : Decl->getScopeList()->operands())
2537 LoopAliasScopes.insert(cast<MDNode>(Op));
2538
2539 // Many math library functions read the rounding mode. We will only
2540 // vectorize a loop if it contains known function calls that don't set
2541 // the flag. Therefore, it is safe to ignore this read from memory.
2542 auto *Call = dyn_cast<CallInst>(&I);
2544 continue;
2545
2546 // If this is a load, save it. If this instruction can read from memory
2547 // but is not a load, we only allow it if it's a call to a function with a
2548 // vector mapping and no pointer arguments.
2549 if (I.mayReadFromMemory()) {
2550 auto hasPointerArgs = [](CallBase *CB) {
2551 return any_of(CB->args(), [](Value const *Arg) {
2552 return Arg->getType()->isPointerTy();
2553 });
2554 };
2555
2556 // If the function has an explicit vectorized counterpart, and does not
2557 // take output/input pointers, we can safely assume that it can be
2558 // vectorized.
2559 if (Call && !Call->isNoBuiltin() && Call->getCalledFunction() &&
2560 !hasPointerArgs(Call) && !VFDatabase::getMappings(*Call).empty())
2561 continue;
2562
2563 auto *Ld = dyn_cast<LoadInst>(&I);
2564 if (!Ld) {
2565 recordAnalysis("CantVectorizeInstruction", Ld)
2566 << "instruction cannot be vectorized";
2567 HasComplexMemInst = true;
2568 continue;
2569 }
2570 if (!Ld->isSimple() && !IsAnnotatedParallel) {
2571 recordAnalysis("NonSimpleLoad", Ld)
2572 << "read with atomic ordering or volatile read";
2573 LLVM_DEBUG(dbgs() << "LAA: Found a non-simple load.\n");
2574 HasComplexMemInst = true;
2575 continue;
2576 }
2577 NumLoads++;
2578 Loads.push_back(Ld);
2579 DepChecker->addAccess(Ld);
2580 if (EnableMemAccessVersioningOfLoop)
2581 collectStridedAccess(Ld);
2582 continue;
2583 }
2584
2585 // Save 'store' instructions. Abort if other instructions write to memory.
2586 if (I.mayWriteToMemory()) {
2587 auto *St = dyn_cast<StoreInst>(&I);
2588 if (!St) {
2589 recordAnalysis("CantVectorizeInstruction", St)
2590 << "instruction cannot be vectorized";
2591 HasComplexMemInst = true;
2592 continue;
2593 }
2594 if (!St->isSimple() && !IsAnnotatedParallel) {
2595 recordAnalysis("NonSimpleStore", St)
2596 << "write with atomic ordering or volatile write";
2597 LLVM_DEBUG(dbgs() << "LAA: Found a non-simple store.\n");
2598 HasComplexMemInst = true;
2599 continue;
2600 }
2601 NumStores++;
2602 Stores.push_back(St);
2603 DepChecker->addAccess(St);
2604 if (EnableMemAccessVersioningOfLoop)
2605 collectStridedAccess(St);
2606 }
2607 } // Next instr.
2608 } // Next block.
2609
2610 if (HasComplexMemInst)
2611 return false;
2612
2613 // Now we have two lists that hold the loads and the stores.
2614 // Next, we find the pointers that they use.
2615
2616 // Check if we see any stores. If there are no stores, then we don't
2617 // care if the pointers are *restrict*.
2618 if (!Stores.size()) {
2619 LLVM_DEBUG(dbgs() << "LAA: Found a read-only loop!\n");
2620 return true;
2621 }
2622
2624 AccessAnalysis Accesses(TheLoop, AA, LI, DepCands, *PSE, LoopAliasScopes);
2625
2626 // Holds the analyzed pointers. We don't want to call getUnderlyingObjects
2627 // multiple times on the same object. If the ptr is accessed twice, once
2628 // for read and once for write, it will only appear once (on the write
2629 // list). This is okay, since we are going to check for conflicts between
2630 // writes and between reads and writes, but not between reads and reads.
2631 SmallSet<std::pair<Value *, Type *>, 16> Seen;
2632
2633 // Record uniform store addresses to identify if we have multiple stores
2634 // to the same address.
2635 SmallPtrSet<Value *, 16> UniformStores;
2636
2637 for (StoreInst *ST : Stores) {
2638 Value *Ptr = ST->getPointerOperand();
2639
2640 if (isInvariant(Ptr)) {
2641 // Record store instructions to loop invariant addresses
2642 StoresToInvariantAddresses.push_back(ST);
2643 HasStoreStoreDependenceInvolvingLoopInvariantAddress |=
2644 !UniformStores.insert(Ptr).second;
2645 }
2646
2647 // If we did *not* see this pointer before, insert it to the read-write
2648 // list. At this phase it is only a 'write' list.
2649 Type *AccessTy = getLoadStoreType(ST);
2650 if (Seen.insert({Ptr, AccessTy}).second) {
2651 ++NumReadWrites;
2652
2653 MemoryLocation Loc = MemoryLocation::get(ST);
2654 // The TBAA metadata could have a control dependency on the predication
2655 // condition, so we cannot rely on it when determining whether or not we
2656 // need runtime pointer checks.
2657 if (blockNeedsPredication(ST->getParent(), TheLoop, DT))
2658 Loc.AATags.TBAA = nullptr;
2659
2660 visitPointers(const_cast<Value *>(Loc.Ptr), *TheLoop,
2661 [&Accesses, AccessTy, Loc](Value *Ptr) {
2662 MemoryLocation NewLoc = Loc.getWithNewPtr(Ptr);
2663 Accesses.addStore(NewLoc, AccessTy);
2664 });
2665 }
2666 }
2667
2668 if (IsAnnotatedParallel) {
2669 LLVM_DEBUG(
2670 dbgs() << "LAA: A loop annotated parallel, ignore memory dependency "
2671 << "checks.\n");
2672 return true;
2673 }
2674
2675 for (LoadInst *LD : Loads) {
2676 Value *Ptr = LD->getPointerOperand();
2677 // If we did *not* see this pointer before, insert it to the
2678 // read list. If we *did* see it before, then it is already in
2679 // the read-write list. This allows us to vectorize expressions
2680 // such as A[i] += x; Because the address of A[i] is a read-write
2681 // pointer. This only works if the index of A[i] is consecutive.
2682 // If the address of i is unknown (for example A[B[i]]) then we may
2683 // read a few words, modify, and write a few words, and some of the
2684 // words may be written to the same address.
2685 bool IsReadOnlyPtr = false;
2686 Type *AccessTy = getLoadStoreType(LD);
2687 if (Seen.insert({Ptr, AccessTy}).second ||
2688 !getPtrStride(*PSE, AccessTy, Ptr, TheLoop, SymbolicStrides)) {
2689 ++NumReads;
2690 IsReadOnlyPtr = true;
2691 }
2692
2693 // See if there is an unsafe dependency between a load to a uniform address and
2694 // store to the same uniform address.
2695 if (UniformStores.contains(Ptr)) {
2696 LLVM_DEBUG(dbgs() << "LAA: Found an unsafe dependency between a uniform "
2697 "load and uniform store to the same address!\n");
2698 HasLoadStoreDependenceInvolvingLoopInvariantAddress = true;
2699 }
2700
2701 MemoryLocation Loc = MemoryLocation::get(LD);
2702 // The TBAA metadata could have a control dependency on the predication
2703 // condition, so we cannot rely on it when determining whether or not we
2704 // need runtime pointer checks.
2705 if (blockNeedsPredication(LD->getParent(), TheLoop, DT))
2706 Loc.AATags.TBAA = nullptr;
2707
2708 visitPointers(const_cast<Value *>(Loc.Ptr), *TheLoop,
2709 [&Accesses, AccessTy, Loc, IsReadOnlyPtr](Value *Ptr) {
2710 MemoryLocation NewLoc = Loc.getWithNewPtr(Ptr);
2711 Accesses.addLoad(NewLoc, AccessTy, IsReadOnlyPtr);
2712 });
2713 }
2714
2715 // If we write (or read-write) to a single destination and there are no
2716 // other reads in this loop then is it safe to vectorize.
2717 if (NumReadWrites == 1 && NumReads == 0) {
2718 LLVM_DEBUG(dbgs() << "LAA: Found a write-only loop!\n");
2719 return true;
2720 }
2721
2722 // Build dependence sets and check whether we need a runtime pointer bounds
2723 // check.
2724 Accesses.buildDependenceSets();
2725
2726 // Find pointers with computable bounds. We are going to use this information
2727 // to place a runtime bound check.
2728 Value *UncomputablePtr = nullptr;
2729 HasCompletePtrRtChecking = Accesses.canCheckPtrAtRT(
2730 *PtrRtChecking, TheLoop, SymbolicStrides, UncomputablePtr, AllowPartial);
2731 if (!HasCompletePtrRtChecking) {
2732 const auto *I = dyn_cast_or_null<Instruction>(UncomputablePtr);
2733 recordAnalysis("CantIdentifyArrayBounds", I)
2734 << "cannot identify array bounds";
2735 LLVM_DEBUG(dbgs() << "LAA: We can't vectorize because we can't find "
2736 << "the array bounds.\n");
2737 return false;
2738 }
2739
2740 LLVM_DEBUG(
2741 dbgs() << "LAA: May be able to perform a memory runtime check if needed.\n");
2742
2743 bool DepsAreSafe = true;
2744 if (Accesses.isDependencyCheckNeeded()) {
2745 LLVM_DEBUG(dbgs() << "LAA: Checking memory dependencies\n");
2746 DepsAreSafe =
2747 DepChecker->areDepsSafe(DepCands, Accesses.getDependenciesToCheck());
2748
2749 if (!DepsAreSafe && DepChecker->shouldRetryWithRuntimeChecks()) {
2750 LLVM_DEBUG(dbgs() << "LAA: Retrying with memory checks\n");
2751
2752 // Clear the dependency checks. We assume they are not needed.
2753 Accesses.resetDepChecks(*DepChecker);
2754
2755 PtrRtChecking->reset();
2756 PtrRtChecking->Need = true;
2757
2758 UncomputablePtr = nullptr;
2759 HasCompletePtrRtChecking =
2760 Accesses.canCheckPtrAtRT(*PtrRtChecking, TheLoop, SymbolicStrides,
2761 UncomputablePtr, AllowPartial);
2762
2763 // Check that we found the bounds for the pointer.
2764 if (!HasCompletePtrRtChecking) {
2765 auto *I = dyn_cast_or_null<Instruction>(UncomputablePtr);
2766 recordAnalysis("CantCheckMemDepsAtRunTime", I)
2767 << "cannot check memory dependencies at runtime";
2768 LLVM_DEBUG(dbgs() << "LAA: Can't vectorize with memory checks\n");
2769 return false;
2770 }
2771 DepsAreSafe = true;
2772 }
2773 }
2774
2775 if (HasConvergentOp) {
2776 recordAnalysis("CantInsertRuntimeCheckWithConvergent")
2777 << "cannot add control dependency to convergent operation";
2778 LLVM_DEBUG(dbgs() << "LAA: We can't vectorize because a runtime check "
2779 "would be needed with a convergent operation\n");
2780 return false;
2781 }
2782
2783 if (DepsAreSafe) {
2784 LLVM_DEBUG(
2785 dbgs() << "LAA: No unsafe dependent memory operations in loop. We"
2786 << (PtrRtChecking->Need ? "" : " don't")
2787 << " need runtime memory checks.\n");
2788 return true;
2789 }
2790
2791 emitUnsafeDependenceRemark();
2792 return false;
2793}
2794
2795void LoopAccessInfo::emitUnsafeDependenceRemark() {
2796 const auto *Deps = getDepChecker().getDependences();
2797 if (!Deps)
2798 return;
2799 const auto *Found =
2800 llvm::find_if(*Deps, [](const MemoryDepChecker::Dependence &D) {
2803 });
2804 if (Found == Deps->end())
2805 return;
2806 MemoryDepChecker::Dependence Dep = *Found;
2807
2808 LLVM_DEBUG(dbgs() << "LAA: unsafe dependent memory operations in loop\n");
2809
2810 // Emit remark for first unsafe dependence
2811 bool HasForcedDistribution = false;
2812 std::optional<const MDOperand *> Value =
2813 findStringMetadataForLoop(TheLoop, "llvm.loop.distribute.enable");
2814 if (Value) {
2815 const MDOperand *Op = *Value;
2816 assert(Op && mdconst::hasa<ConstantInt>(*Op) && "invalid metadata");
2817 HasForcedDistribution = mdconst::extract<ConstantInt>(*Op)->getZExtValue();
2818 }
2819
2820 const std::string Info =
2821 HasForcedDistribution
2822 ? "unsafe dependent memory operations in loop."
2823 : "unsafe dependent memory operations in loop. Use "
2824 "#pragma clang loop distribute(enable) to allow loop distribution "
2825 "to attempt to isolate the offending operations into a separate "
2826 "loop";
2827 OptimizationRemarkAnalysis &R =
2828 recordAnalysis("UnsafeDep", Dep.getDestination(getDepChecker())) << Info;
2829
2830 switch (Dep.Type) {
2834 llvm_unreachable("Unexpected dependence");
2836 R << "\nBackward loop carried data dependence.";
2837 break;
2839 R << "\nForward loop carried data dependence that prevents "
2840 "store-to-load forwarding.";
2841 break;
2843 R << "\nBackward loop carried data dependence that prevents "
2844 "store-to-load forwarding.";
2845 break;
2847 R << "\nUnsafe indirect dependence.";
2848 break;
2850 R << "\nUnknown data dependence.";
2851 break;
2852 }
2853
2854 if (Instruction *I = Dep.getSource(getDepChecker())) {
2855 DebugLoc SourceLoc = I->getDebugLoc();
2857 SourceLoc = DD->getDebugLoc();
2858 if (SourceLoc)
2859 R << " Memory location is the same as accessed at "
2860 << ore::NV("Location", SourceLoc);
2861 }
2862}
2863
2865 const Loop *TheLoop,
2866 const DominatorTree *DT) {
2867 assert(TheLoop->contains(BB) && "Unknown block used");
2868
2869 // Blocks that do not dominate the latch need predication.
2870 const BasicBlock *Latch = TheLoop->getLoopLatch();
2871 return !DT->dominates(BB, Latch);
2872}
2873
2875LoopAccessInfo::recordAnalysis(StringRef RemarkName, const Instruction *I) {
2876 assert(!Report && "Multiple reports generated");
2877
2878 const BasicBlock *CodeRegion = TheLoop->getHeader();
2879 DebugLoc DL = TheLoop->getStartLoc();
2880
2881 if (I) {
2882 CodeRegion = I->getParent();
2883 // If there is no debug location attached to the instruction, revert back to
2884 // using the loop's.
2885 if (I->getDebugLoc())
2886 DL = I->getDebugLoc();
2887 }
2888
2889 Report = std::make_unique<OptimizationRemarkAnalysis>(DEBUG_TYPE, RemarkName,
2890 DL, CodeRegion);
2891 return *Report;
2892}
2893
2895 auto *SE = PSE->getSE();
2896 if (TheLoop->isLoopInvariant(V))
2897 return true;
2898 if (!SE->isSCEVable(V->getType()))
2899 return false;
2900 const SCEV *S = SE->getSCEV(V);
2901 return SE->isLoopInvariant(S, TheLoop);
2902}
2903
2904/// If \p Ptr is a GEP, which has a loop-variant operand, return that operand.
2905/// Otherwise, return \p Ptr.
2907 Loop *Lp) {
2909 if (!GEP)
2910 return Ptr;
2911
2912 Value *V = Ptr;
2913 for (const Use &U : GEP->operands()) {
2914 if (!SE->isLoopInvariant(SE->getSCEV(U), Lp)) {
2915 if (V == Ptr)
2916 V = U;
2917 else
2918 // There must be exactly one loop-variant operand.
2919 return Ptr;
2920 }
2921 }
2922 return V;
2923}
2924
2925/// Get the stride of a pointer access in a loop. Looks for symbolic
2926/// strides "a[i*stride]". Returns the symbolic stride, or null otherwise.
2928 auto *PtrTy = dyn_cast<PointerType>(Ptr->getType());
2929 if (!PtrTy)
2930 return nullptr;
2931
2932 // Try to remove a gep instruction to make the pointer (actually index at this
2933 // point) easier analyzable. If OrigPtr is equal to Ptr we are analyzing the
2934 // pointer, otherwise, we are analyzing the index.
2935 Value *OrigPtr = Ptr;
2936
2937 Ptr = getLoopVariantGEPOperand(Ptr, SE, Lp);
2938 const SCEV *V = SE->getSCEV(Ptr);
2939
2940 if (Ptr != OrigPtr)
2941 // Strip off casts.
2942 while (auto *C = dyn_cast<SCEVIntegralCastExpr>(V))
2943 V = C->getOperand();
2944
2946 return nullptr;
2947
2948 // Note that the restriction after this loop invariant check are only
2949 // profitability restrictions.
2950 if (!SE->isLoopInvariant(V, Lp))
2951 return nullptr;
2952
2953 // Look for the loop invariant symbolic value.
2954 if (isa<SCEVUnknown>(V))
2955 return V;
2956
2957 if (auto *C = dyn_cast<SCEVIntegralCastExpr>(V))
2958 if (isa<SCEVUnknown>(C->getOperand()))
2959 return V;
2960
2961 return nullptr;
2962}
2963
2964void LoopAccessInfo::collectStridedAccess(Value *MemAccess) {
2965 Value *Ptr = getLoadStorePointerOperand(MemAccess);
2966 if (!Ptr)
2967 return;
2968
2969 // Note: getStrideFromPointer is a *profitability* heuristic. We
2970 // could broaden the scope of values returned here - to anything
2971 // which happens to be loop invariant and contributes to the
2972 // computation of an interesting IV - but we chose not to as we
2973 // don't have a cost model here, and broadening the scope exposes
2974 // far too many unprofitable cases.
2975 const SCEV *StrideExpr = getStrideFromPointer(Ptr, PSE->getSE(), TheLoop);
2976 if (!StrideExpr)
2977 return;
2978
2979 LLVM_DEBUG(dbgs() << "LAA: Found a strided access that is a candidate for "
2980 "versioning:");
2981 LLVM_DEBUG(dbgs() << " Ptr: " << *Ptr << " Stride: " << *StrideExpr << "\n");
2982
2983 if (!SpeculateUnitStride) {
2984 LLVM_DEBUG(dbgs() << " Chose not to due to -laa-speculate-unit-stride\n");
2985 return;
2986 }
2987
2988 // Avoid adding the "Stride == 1" predicate when we know that
2989 // Stride >= Trip-Count. Such a predicate will effectively optimize a single
2990 // or zero iteration loop, as Trip-Count <= Stride == 1.
2991 //
2992 // TODO: We are currently not making a very informed decision on when it is
2993 // beneficial to apply stride versioning. It might make more sense that the
2994 // users of this analysis (such as the vectorizer) will trigger it, based on
2995 // their specific cost considerations; For example, in cases where stride
2996 // versioning does not help resolving memory accesses/dependences, the
2997 // vectorizer should evaluate the cost of the runtime test, and the benefit
2998 // of various possible stride specializations, considering the alternatives
2999 // of using gather/scatters (if available).
3000
3001 const SCEV *MaxBTC = PSE->getSymbolicMaxBackedgeTakenCount();
3002
3003 // Match the types so we can compare the stride and the MaxBTC.
3004 // The Stride can be positive/negative, so we sign extend Stride;
3005 // The backedgeTakenCount is non-negative, so we zero extend MaxBTC.
3006 const DataLayout &DL = TheLoop->getHeader()->getDataLayout();
3007 uint64_t StrideTypeSizeBits = DL.getTypeSizeInBits(StrideExpr->getType());
3008 uint64_t BETypeSizeBits = DL.getTypeSizeInBits(MaxBTC->getType());
3009 const SCEV *CastedStride = StrideExpr;
3010 const SCEV *CastedBECount = MaxBTC;
3011 ScalarEvolution *SE = PSE->getSE();
3012 if (BETypeSizeBits >= StrideTypeSizeBits)
3013 CastedStride = SE->getNoopOrSignExtend(StrideExpr, MaxBTC->getType());
3014 else
3015 CastedBECount = SE->getZeroExtendExpr(MaxBTC, StrideExpr->getType());
3016 const SCEV *StrideMinusBETaken = SE->getMinusSCEV(CastedStride, CastedBECount);
3017 // Since TripCount == BackEdgeTakenCount + 1, checking:
3018 // "Stride >= TripCount" is equivalent to checking:
3019 // Stride - MaxBTC> 0
3020 if (SE->isKnownPositive(StrideMinusBETaken)) {
3021 LLVM_DEBUG(
3022 dbgs() << "LAA: Stride>=TripCount; No point in versioning as the "
3023 "Stride==1 predicate will imply that the loop executes "
3024 "at most once.\n");
3025 return;
3026 }
3027 LLVM_DEBUG(dbgs() << "LAA: Found a strided access that we can version.\n");
3028
3029 // Strip back off the integer cast, and check that our result is a
3030 // SCEVUnknown as we expect.
3031 const SCEV *StrideBase = StrideExpr;
3032 if (const auto *C = dyn_cast<SCEVIntegralCastExpr>(StrideBase))
3033 StrideBase = C->getOperand();
3034 SymbolicStrides[Ptr] = cast<SCEVUnknown>(StrideBase);
3035}
3036
3038 const TargetTransformInfo *TTI,
3039 const TargetLibraryInfo *TLI, AAResults *AA,
3040 DominatorTree *DT, LoopInfo *LI,
3041 AssumptionCache *AC, bool AllowPartial)
3042 : PSE(std::make_unique<PredicatedScalarEvolution>(*SE, *L)),
3043 PtrRtChecking(nullptr), TheLoop(L), AllowPartial(AllowPartial) {
3044 unsigned MaxTargetVectorWidthInBits = std::numeric_limits<unsigned>::max();
3045 if (TTI && !TTI->enableScalableVectorization())
3046 // Scale the vector width by 2 as rough estimate to also consider
3047 // interleaving.
3048 MaxTargetVectorWidthInBits =
3049 TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector) * 2;
3050
3051 DepChecker = std::make_unique<MemoryDepChecker>(
3052 *PSE, AC, DT, L, SymbolicStrides, MaxTargetVectorWidthInBits, LoopGuards);
3053 PtrRtChecking =
3054 std::make_unique<RuntimePointerChecking>(*DepChecker, SE, LoopGuards);
3055 if (canAnalyzeLoop())
3056 CanVecMem = analyzeLoop(AA, LI, TLI, DT);
3057}
3058
3059void LoopAccessInfo::print(raw_ostream &OS, unsigned Depth) const {
3060 if (CanVecMem) {
3061 OS.indent(Depth) << "Memory dependences are safe";
3062 const MemoryDepChecker &DC = getDepChecker();
3063 if (!DC.isSafeForAnyVectorWidth())
3064 OS << " with a maximum safe vector width of "
3065 << DC.getMaxSafeVectorWidthInBits() << " bits";
3068 OS << ", with a maximum safe store-load forward width of " << SLDist
3069 << " bits";
3070 }
3071 if (PtrRtChecking->Need)
3072 OS << " with run-time checks";
3073 OS << "\n";
3074 }
3075
3076 if (HasConvergentOp)
3077 OS.indent(Depth) << "Has convergent operation in loop\n";
3078
3079 if (Report)
3080 OS.indent(Depth) << "Report: " << Report->getMsg() << "\n";
3081
3082 if (auto *Dependences = DepChecker->getDependences()) {
3083 OS.indent(Depth) << "Dependences:\n";
3084 for (const auto &Dep : *Dependences) {
3085 Dep.print(OS, Depth + 2, DepChecker->getMemoryInstructions());
3086 OS << "\n";
3087 }
3088 } else
3089 OS.indent(Depth) << "Too many dependences, not recorded\n";
3090
3091 // List the pair of accesses need run-time checks to prove independence.
3092 PtrRtChecking->print(OS, Depth);
3093 if (PtrRtChecking->Need && !HasCompletePtrRtChecking)
3094 OS.indent(Depth) << "Generated run-time checks are incomplete\n";
3095 OS << "\n";
3096
3097 OS.indent(Depth)
3098 << "Non vectorizable stores to invariant address were "
3099 << (HasStoreStoreDependenceInvolvingLoopInvariantAddress ||
3100 HasLoadStoreDependenceInvolvingLoopInvariantAddress
3101 ? ""
3102 : "not ")
3103 << "found in loop.\n";
3104
3105 OS.indent(Depth) << "SCEV assumptions:\n";
3106 PSE->getPredicate().print(OS, Depth);
3107
3108 OS << "\n";
3109
3110 OS.indent(Depth) << "Expressions re-written:\n";
3111 PSE->print(OS, Depth);
3112}
3113
3115 bool AllowPartial) {
3116 const auto &[It, Inserted] = LoopAccessInfoMap.try_emplace(&L);
3117
3118 // We need to create the LoopAccessInfo if either we don't already have one,
3119 // or if it was created with a different value of AllowPartial.
3120 if (Inserted || It->second->hasAllowPartial() != AllowPartial)
3121 It->second = std::make_unique<LoopAccessInfo>(&L, &SE, TTI, TLI, &AA, &DT,
3122 &LI, AC, AllowPartial);
3123
3124 return *It->second;
3125}
3127 // Collect LoopAccessInfo entries that may keep references to IR outside the
3128 // analyzed loop or SCEVs that may have been modified or invalidated. At the
3129 // moment, that is loops requiring memory or SCEV runtime checks, as those cache
3130 // SCEVs, e.g. for pointer expressions.
3131 for (const auto &[L, LAI] : LoopAccessInfoMap) {
3132 if (LAI->getRuntimePointerChecking()->getChecks().empty() &&
3133 LAI->getPSE().getPredicate().isAlwaysTrue())
3134 continue;
3135 LoopAccessInfoMap.erase(L);
3136 }
3137}
3138
3140 Function &F, const PreservedAnalyses &PA,
3141 FunctionAnalysisManager::Invalidator &Inv) {
3142 // Check whether our analysis is preserved.
3143 auto PAC = PA.getChecker<LoopAccessAnalysis>();
3144 if (!PAC.preserved() && !PAC.preservedSet<AllAnalysesOn<Function>>())
3145 // If not, give up now.
3146 return true;
3147
3148 // Check whether the analyses we depend on became invalid for any reason.
3149 // Skip checking TargetLibraryAnalysis as it is immutable and can't become
3150 // invalid.
3151 return Inv.invalidate<AAManager>(F, PA) ||
3152 Inv.invalidate<ScalarEvolutionAnalysis>(F, PA) ||
3153 Inv.invalidate<LoopAnalysis>(F, PA) ||
3154 Inv.invalidate<DominatorTreeAnalysis>(F, PA);
3155}
3156
3159 auto &SE = FAM.getResult<ScalarEvolutionAnalysis>(F);
3160 auto &AA = FAM.getResult<AAManager>(F);
3161 auto &DT = FAM.getResult<DominatorTreeAnalysis>(F);
3162 auto &LI = FAM.getResult<LoopAnalysis>(F);
3163 auto &TTI = FAM.getResult<TargetIRAnalysis>(F);
3164 auto &TLI = FAM.getResult<TargetLibraryAnalysis>(F);
3165 auto &AC = FAM.getResult<AssumptionAnalysis>(F);
3166 return LoopAccessInfoManager(SE, AA, DT, LI, &TTI, &TLI, &AC);
3167}
3168
3169AnalysisKey LoopAccessAnalysis::Key;
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements a class to represent arbitrary precision integral constant values and operations...
@ Scaled
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
Definition CSEInfo.cpp:27
This file contains the declarations for the subclasses of Constant, which represent the different fla...
DXIL Forward Handle Accesses
DXIL Resource Access
dxil translate DXIL Translate Metadata
This file defines the DenseMap class.
Generic implementation of equivalence classes through the use Tarjan's efficient union-find algorithm...
#define DEBUG_TYPE
Hexagon Common GEP
#define _
This header defines various interfaces for pass management in LLVM.
static cl::opt< unsigned > MaxDependences("max-dependences", cl::Hidden, cl::desc("Maximum number of dependences collected by " "loop-access analysis (default = 100)"), cl::init(100))
We collect dependences up to this threshold.
static cl::opt< bool > EnableForwardingConflictDetection("store-to-load-forwarding-conflict-detection", cl::Hidden, cl::desc("Enable conflict detection in loop-access analysis"), cl::init(true))
Enable store-to-load forwarding conflict detection.
static void findForkedSCEVs(ScalarEvolution *SE, const Loop *L, Value *Ptr, SmallVectorImpl< PointerIntPair< const SCEV *, 1, bool > > &ScevList, unsigned Depth)
static cl::opt< unsigned > MemoryCheckMergeThreshold("memory-check-merge-threshold", cl::Hidden, cl::desc("Maximum number of comparisons done when trying to merge " "runtime memory checks. (default = 100)"), cl::init(100))
The maximum iterations used to merge memory checks.
static const SCEV * getStrideFromPointer(Value *Ptr, ScalarEvolution *SE, Loop *Lp)
Get the stride of a pointer access in a loop.
static bool evaluatePtrAddRecAtMaxBTCWillNotWrap(const SCEVAddRecExpr *AR, const SCEV *MaxBTC, const SCEV *EltSize, ScalarEvolution &SE, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC, std::optional< ScalarEvolution::LoopGuards > &LoopGuards)
Return true, if evaluating AR at MaxBTC cannot wrap, because AR at MaxBTC is guaranteed inbounds of t...
static std::optional< int64_t > getStrideFromAddRec(const SCEVAddRecExpr *AR, const Loop *Lp, Type *AccessTy, Value *Ptr, PredicatedScalarEvolution &PSE)
Try to compute a constant stride for AR.
static cl::opt< unsigned, true > VectorizationInterleave("force-vector-interleave", cl::Hidden, cl::desc("Sets the vectorization interleave count. " "Zero is autoselect."), cl::location(VectorizerParams::VectorizationInterleave))
static cl::opt< bool, true > HoistRuntimeChecks("hoist-runtime-checks", cl::Hidden, cl::desc("Hoist inner loop runtime memory checks to outer loop if possible"), cl::location(VectorizerParams::HoistRuntimeChecks), cl::init(true))
static DenseMap< const RuntimeCheckingPtrGroup *, unsigned > getPtrToIdxMap(ArrayRef< RuntimeCheckingPtrGroup > CheckingGroups)
Assign each RuntimeCheckingPtrGroup pointer an index for stable UTC output.
static cl::opt< unsigned, true > VectorizationFactor("force-vector-width", cl::Hidden, cl::desc("Sets the SIMD width. Zero is autoselect."), cl::location(VectorizerParams::VectorizationFactor))
static cl::opt< unsigned, true > RuntimeMemoryCheckThreshold("runtime-memory-check-threshold", cl::Hidden, cl::desc("When performing memory disambiguation checks at runtime do not " "generate more than this number of comparisons (default = 8)."), cl::location(VectorizerParams::RuntimeMemoryCheckThreshold), cl::init(8))
static void visitPointers(Value *StartPtr, const Loop &InnermostLoop, function_ref< void(Value *)> AddPointer)
static bool isNoWrap(PredicatedScalarEvolution &PSE, const SCEVAddRecExpr *AR, Value *Ptr, Type *AccessTy, const Loop *L, bool Assume, std::optional< int64_t > Stride=std::nullopt)
Check whether AR is a non-wrapping AddRec.
static bool isSafeDependenceDistance(const DataLayout &DL, ScalarEvolution &SE, const SCEV &MaxBTC, const SCEV &Dist, uint64_t MaxStride)
Given a dependence-distance Dist between two memory accesses, that have strides in the same direction...
static bool areStridedAccessesIndependent(uint64_t Distance, uint64_t Stride, uint64_t TypeByteSize)
Check the dependence for two accesses with the same stride Stride.
static const SCEV * getMinFromExprs(const SCEV *I, const SCEV *J, ScalarEvolution *SE)
Compare I and J and return the minimum.
static const SCEV * mulSCEVOverflow(const SCEV *A, const SCEV *B, ScalarEvolution &SE)
Returns A * B, if it is guaranteed not to unsigned wrap.
static Value * getLoopVariantGEPOperand(Value *Ptr, ScalarEvolution *SE, Loop *Lp)
If Ptr is a GEP, which has a loop-variant operand, return that operand.
static cl::opt< unsigned > MaxForkedSCEVDepth("max-forked-scev-depth", cl::Hidden, cl::desc("Maximum recursion depth when finding forked SCEVs (default = 5)"), cl::init(5))
static cl::opt< bool > SpeculateUnitStride("laa-speculate-unit-stride", cl::Hidden, cl::desc("Speculate that non-constant strides are unit in LAA"), cl::init(true))
static cl::opt< bool > EnableMemAccessVersioning("enable-mem-access-versioning", cl::init(true), cl::Hidden, cl::desc("Enable symbolic stride memory access versioning"))
This enables versioning on the strides of symbolically striding memory accesses in code like the foll...
static const SCEV * addSCEVNoOverflow(const SCEV *A, const SCEV *B, ScalarEvolution &SE)
Returns A + B, if it is guaranteed not to unsigned wrap.
This header provides classes for managing per-loop analyses.
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
This file provides utility analysis objects describing memory locations.
#define P(N)
FunctionAnalysisManager FAM
This file defines the PointerIntPair class.
This file contains some templates that are useful if you are working with the STL at all.
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallSet class.
This file defines the SmallVector class.
#define LLVM_DEBUG(...)
Definition Debug.h:114
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
This pass exposes codegen information to IR-level passes.
static const X86InstrFMA3Group Groups[]
A manager for alias analyses.
Class for arbitrary precision integers.
Definition APInt.h:78
uint64_t getZExtValue() const
Get zero extended value.
Definition APInt.h:1540
APInt abs() const
Get the absolute value.
Definition APInt.h:1795
LLVM_ABI APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
Definition APInt.cpp:1041
std::optional< int64_t > trySExtValue() const
Get sign extended value if possible.
Definition APInt.h:1574
int64_t getSExtValue() const
Get sign extended value.
Definition APInt.h:1562
This templated class represents "all analyses that operate over <aparticular IR unit>" (e....
Definition Analysis.h:50
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition ArrayRef.h:147
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:142
A function analysis which provides an AssumptionCache.
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
bool isNoBuiltin() const
Return true if the call should not be treated as a call to a builtin.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool isConvergent() const
Determine if the invoke is convergent.
@ ICMP_UGE
unsigned greater or equal
Definition InstrTypes.h:702
@ ICMP_SGE
signed greater or equal
Definition InstrTypes.h:706
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:704
static LLVM_ABI Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
bool isNegative() const
Definition Constants.h:209
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
A debug info location.
Definition DebugLoc.h:124
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition DenseMap.h:194
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:167
iterator end()
Definition DenseMap.h:81
Analysis pass which computes a DominatorTree.
Definition Dominators.h:284
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:165
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
iterator_range< member_iterator > members(const ECValue &ECV) const
bool contains(const ElemTy &V) const
Returns true if V is contained an equivalence class.
const ECValue & insert(const ElemTy &Data)
insert - Insert a new value into the union/find set, ignoring the request if the value already exists...
member_iterator member_end() const
const ElemTy & getLeaderValue(const ElemTy &V) const
getLeaderValue - Return the leader for the specified value that is in the set.
member_iterator findLeader(const ElemTy &V) const
findLeader - Given a value in the set, return a member iterator for the equivalence class it is in.
member_iterator unionSets(const ElemTy &V1, const ElemTy &V2)
union - Merge the two equivalence sets for the specified values, inserting them if they do not alread...
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
Definition Function.h:706
bool empty() const
Definition Function.h:857
PointerType * getType() const
Global values are always pointers.
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:319
An instruction for reading from memory.
Value * getPointerOperand()
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
This analysis provides dependence information for the memory accesses of a loop.
LLVM_ABI Result run(Function &F, FunctionAnalysisManager &AM)
LLVM_ABI bool invalidate(Function &F, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
LLVM_ABI const LoopAccessInfo & getInfo(Loop &L, bool AllowPartial=false)
Drive the analysis of memory accesses in the loop.
const MemoryDepChecker & getDepChecker() const
the Memory Dependence Checker which can determine the loop-independent and loop-carried dependences b...
LLVM_ABI bool isInvariant(Value *V) const
Returns true if value V is loop invariant.
LLVM_ABI void print(raw_ostream &OS, unsigned Depth=0) const
Print the information about the memory accesses in the loop.
static LLVM_ABI bool blockNeedsPredication(const BasicBlock *BB, const Loop *TheLoop, const DominatorTree *DT)
Return true if the block BB needs to be predicated in order for the loop to be vectorized.
LLVM_ABI LoopAccessInfo(Loop *L, ScalarEvolution *SE, const TargetTransformInfo *TTI, const TargetLibraryInfo *TLI, AAResults *AA, DominatorTree *DT, LoopInfo *LI, AssumptionCache *AC, bool AllowPartial=false)
Analysis pass that exposes the LoopInfo for a function.
Definition LoopInfo.h:569
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
bool isInnermost() const
Return true if the loop does not contain any (natural) loops.
unsigned getNumBackEdges() const
Calculate the number of back edges to the loop header.
BlockT * getHeader() const
LoopT * getParentLoop() const
Return the parent loop if it exists or nullptr for top level loops.
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
std::string getLocStr() const
Return a string containing the debug location of the loop (file name + line number if present,...
Definition LoopInfo.cpp:667
bool isAnnotatedParallel() const
Returns true if the loop is annotated parallel.
Definition LoopInfo.cpp:565
DebugLoc getStartLoc() const
Return the debug location of the start of this loop.
Definition LoopInfo.cpp:632
ArrayRef< MDOperand > operands() const
Definition Metadata.h:1440
Checks memory dependences among accesses to the same underlying object to determine whether there vec...
ArrayRef< unsigned > getOrderForAccess(Value *Ptr, bool IsWrite) const
Return the program order indices for the access location (Ptr, IsWrite).
bool isSafeForAnyStoreLoadForwardDistances() const
Return true if there are no store-load forwarding dependencies.
bool isSafeForAnyVectorWidth() const
Return true if the number of elements that are safe to operate on simultaneously is not bounded.
LLVM_ABI bool areDepsSafe(const DepCandidates &AccessSets, const MemAccessInfoList &CheckDeps)
Check whether the dependencies between the accesses are safe, and records the dependence information ...
EquivalenceClasses< MemAccessInfo > DepCandidates
Set of potential dependent memory accesses.
bool shouldRetryWithRuntimeChecks() const
In same cases when the dependency check fails we can still vectorize the loop with a dynamic array ac...
const Loop * getInnermostLoop() const
uint64_t getMaxSafeVectorWidthInBits() const
Return the number of elements that are safe to operate on simultaneously, multiplied by the size of t...
bool isSafeForVectorization() const
No memory dependence was encountered that would inhibit vectorization.
SmallVector< MemAccessInfo, 8 > MemAccessInfoList
LLVM_ABI SmallVector< Instruction *, 4 > getInstructionsForAccess(Value *Ptr, bool isWrite) const
Find the set of instructions that read or write via Ptr.
VectorizationSafetyStatus
Type to keep track of the status of the dependence check.
LLVM_ABI void addAccess(StoreInst *SI)
Register the location (instructions are given increasing numbers) of a write access.
PointerIntPair< Value *, 1, bool > MemAccessInfo
uint64_t getStoreLoadForwardSafeDistanceInBits() const
Return safe power-of-2 number of elements, which do not prevent store-load forwarding,...
Representation for a specific memory location.
static LLVM_ABI MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
LocationSize Size
The maximum size of the location, in address-units, or UnknownSize if the size is not known.
AAMDNodes AATags
The metadata nodes which describes the aliasing of the location (each member is null if that kind of ...
const Value * Ptr
The address of the start of the location.
Diagnostic information for optimization analysis remarks.
PointerIntPair - This class implements a pair of a pointer and small integer.
An interface layer with SCEV used to manage how we see SCEV expressions for values in the context of ...
LLVM_ABI void addPredicate(const SCEVPredicate &Pred)
Adds a new predicate.
ScalarEvolution * getSE() const
Returns the ScalarEvolution analysis used.
LLVM_ABI bool hasNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Returns true if we've proved that V doesn't wrap by means of a SCEV predicate.
LLVM_ABI void setNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Proves that V doesn't overflow by adding SCEV predicate.
LLVM_ABI const SCEVAddRecExpr * getAsAddRec(Value *V)
Attempts to produce an AddRecExpr for V by adding additional SCEV predicates.
LLVM_ABI const SCEV * getBackedgeTakenCount()
Get the (predicated) backedge count for the analyzed loop.
LLVM_ABI const SCEV * getSymbolicMaxBackedgeTakenCount()
Get the (predicated) symbolic max backedge count for the analyzed loop.
LLVM_ABI const SCEV * getSCEV(Value *V)
Returns the SCEV expression of V, in the context of the current SCEV predicate.
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
PreservedAnalysisChecker getChecker() const
Build a checker for this PreservedAnalyses and the specified analysis type.
Definition Analysis.h:275
Holds information about the memory runtime legality checks to verify that a group of pointers do not ...
bool Need
This flag indicates if we need to add the runtime check.
void reset()
Reset the state of the pointer runtime information.
unsigned getNumberOfChecks() const
Returns the number of run-time checks required according to needsChecking.
LLVM_ABI void printChecks(raw_ostream &OS, const SmallVectorImpl< RuntimePointerCheck > &Checks, unsigned Depth=0) const
Print Checks.
LLVM_ABI bool needsChecking(const RuntimeCheckingPtrGroup &M, const RuntimeCheckingPtrGroup &N) const
Decide if we need to add a check between two groups of pointers, according to needsChecking.
LLVM_ABI void print(raw_ostream &OS, unsigned Depth=0) const
Print the list run-time memory checks necessary.
SmallVector< RuntimeCheckingPtrGroup, 2 > CheckingGroups
Holds a partitioning of pointers into "check groups".
LLVM_ABI void generateChecks(MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies)
Generate the checks and store it.
static LLVM_ABI bool arePointersInSamePartition(const SmallVectorImpl< int > &PtrToPartition, unsigned PtrIdx1, unsigned PtrIdx2)
Check if pointers are in the same partition.
SmallVector< PointerInfo, 2 > Pointers
Information about the pointers that may require checking.
LLVM_ABI void insert(Loop *Lp, Value *Ptr, const SCEV *PtrExpr, Type *AccessTy, bool WritePtr, unsigned DepSetId, unsigned ASId, PredicatedScalarEvolution &PSE, bool NeedsFreeze)
Insert a pointer and calculate the start and end SCEVs.
This node represents a polynomial recurrence on the trip count of the specified loop.
const SCEV * getStepRecurrence(ScalarEvolution &SE) const
Constructs and returns the recurrence indicating how much this expression steps by.
bool isAffine() const
Return true if this represents an expression A + B*x where A and B are loop invariant values.
This class represents a constant integer value.
ConstantInt * getValue() const
const APInt & getAPInt() const
NoWrapFlags getNoWrapFlags(NoWrapFlags Mask=NoWrapMask) const
This class represents an analyzed expression in the program.
LLVM_ABI bool isZero() const
Return true if the expression is a constant zero.
LLVM_ABI Type * getType() const
Return the LLVM type of this SCEV expression.
Analysis pass that exposes the ScalarEvolution for a function.
static LLVM_ABI LoopGuards collect(const Loop *L, ScalarEvolution &SE)
Collect rewrite map for loop guards for loop L, together with flags indicating if NUW and NSW can be ...
The main scalar evolution driver.
const SCEV * getConstantMaxBackedgeTakenCount(const Loop *L)
When successful, this returns a SCEVConstant that is greater than or equal to (i.e.
LLVM_ABI bool isKnownNonNegative(const SCEV *S)
Test if the given expression is known to be non-negative.
LLVM_ABI const SCEV * getNegativeSCEV(const SCEV *V, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
Return the SCEV object corresponding to -V.
LLVM_ABI Type * getWiderType(Type *Ty1, Type *Ty2) const
LLVM_ABI const SCEV * getAbsExpr(const SCEV *Op, bool IsNSW)
LLVM_ABI bool isKnownNonPositive(const SCEV *S)
Test if the given expression is known to be non-positive.
LLVM_ABI bool isKnownNegative(const SCEV *S)
Test if the given expression is known to be negative.
LLVM_ABI const SCEV * getUMaxExpr(const SCEV *LHS, const SCEV *RHS)
LLVM_ABI bool willNotOverflow(Instruction::BinaryOps BinOp, bool Signed, const SCEV *LHS, const SCEV *RHS, const Instruction *CtxI=nullptr)
Is operation BinOp between LHS and RHS provably does not have a signed/unsigned overflow (Signed)?
LLVM_ABI const SCEVPredicate * getEqualPredicate(const SCEV *LHS, const SCEV *RHS)
LLVM_ABI const SCEV * getConstant(ConstantInt *V)
LLVM_ABI const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
LLVM_ABI const SCEV * getNoopOrSignExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
const SCEV * getOne(Type *Ty)
Return a SCEV for the constant 1 of a specific type.
LLVM_ABI const SCEV * getPtrToIntExpr(const SCEV *Op, Type *Ty)
LLVM_ABI bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
LLVM_ABI bool isKnownPositive(const SCEV *S)
Test if the given expression is known to be positive.
LLVM_ABI const SCEV * getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI bool isSCEVable(Type *Ty) const
Test if values of the given type are analyzable within the SCEV framework.
LLVM_ABI Type * getEffectiveSCEVType(Type *Ty) const
Return a type with the same bitwidth as the given type and which represents how SCEV will treat the g...
LLVM_ABI const SCEV * getUMinExpr(const SCEV *LHS, const SCEV *RHS, bool Sequential=false)
APInt getSignedRangeMin(const SCEV *S)
Determine the min of the signed range for a particular SCEV.
LLVM_ABI const SCEV * getStoreSizeOfExpr(Type *IntTy, Type *StoreTy)
Return an expression for the store size of StoreTy that is type IntTy.
LLVM_ABI const SCEV * getMinusSCEV(const SCEV *LHS, const SCEV *RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Return LHS-RHS.
LLVM_ABI const SCEV * getNoopOrZeroExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI const SCEV * getCouldNotCompute()
LLVM_ABI const SCEV * getPointerBase(const SCEV *V)
Transitively follow the chain of pointer-type operands until reaching a SCEV that does not have a sin...
LLVM_ABI const SCEV * applyLoopGuards(const SCEV *Expr, const Loop *L)
Try to apply information from loop guards for L to Expr.
LLVM_ABI const SCEV * getMulExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical multiply expression, or something simpler if possible.
LLVM_ABI const SCEV * getSizeOfExpr(Type *IntTy, TypeSize Size)
Return an expression for a TypeSize.
LLVM_ABI std::optional< APInt > computeConstantDifference(const SCEV *LHS, const SCEV *RHS)
Compute LHS - RHS and returns the result as an APInt if it is a constant, and std::nullopt if it isn'...
LLVM_ABI const SCEV * getAddExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
LLVM_ABI const SCEV * getTruncateOrSignExtend(const SCEV *V, Type *Ty, unsigned Depth=0)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI bool isKnownPredicate(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition SmallSet.h:133
bool contains(const T &V) const
Check if the SmallSet contains the given element.
Definition SmallSet.h:228
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition SmallSet.h:183
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void resize(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
static SmallVector< VFInfo, 8 > getMappings(const CallInst &CI)
Retrieve all the VFInfo instances associated to the CallInst CI.
Definition VectorUtils.h:74
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI bool canBeFreed() const
Return true if the memory object referred to by V can by freed in the scope for which the SSA value d...
Definition Value.cpp:816
LLVM_ABI const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr, bool LookThroughIntToPtr=false) const
Accumulate the constant offset this value has compared to a base pointer.
LLVM_ABI uint64_t getPointerDereferenceableBytes(const DataLayout &DL, bool &CanBeNull, bool &CanBeFreed) const
Returns the number of bytes known to be dereferenceable for the pointer value.
Definition Value.cpp:881
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:201
An efficient, type-erasing, non-owning reference to a callable.
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
raw_ostream & indent(unsigned NumSpaces)
indent - Insert 'NumSpaces' spaces.
CallInst * Call
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Abstract Attribute helper functions.
Definition Attributor.h:165
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
bool match(Val *V, const Pattern &P)
bind_cst_ty m_scev_APInt(const APInt *&C)
Match an SCEV constant and bind it to an APInt.
class_match< const SCEVConstant > m_SCEVConstant()
specificloop_ty m_SpecificLoop(const Loop *L)
SCEVAffineAddRec_match< Op0_t, Op1_t, class_match< const Loop > > m_scev_AffineAddRec(const Op0_t &Op0, const Op1_t &Op1)
specificscev_ty m_scev_Specific(const SCEV *S)
Match if we have a specific specified SCEV.
class_match< const SCEV > m_SCEV()
initializer< Ty > init(const Ty &Val)
LocationClass< Ty > location(Ty &L)
std::enable_if_t< detail::IsValidPointer< X, Y >::value, bool > hasa(Y &&MD)
Check whether Metadata has a Value.
Definition Metadata.h:650
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
Definition Metadata.h:667
DiagnosticInfoOptimizationBase::Argument NV
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:318
@ Offset
Definition DWP.cpp:477
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
Definition STLExtras.h:831
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1705
LLVM_ABI Intrinsic::ID getVectorIntrinsicIDForCall(const CallInst *CI, const TargetLibraryInfo *TLI)
Returns intrinsic ID for call.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2452
unsigned getPointerAddressSpace(const Type *T)
Definition SPIRVUtils.h:342
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:644
LLVM_ABI std::optional< const MDOperand * > findStringMetadataForLoop(const Loop *TheLoop, StringRef Name)
Find string metadata for loop.
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
auto dyn_cast_if_present(const Y &Val)
dyn_cast_if_present<X> - Functionally identical to dyn_cast, except that a null (or none in the case ...
Definition Casting.h:733
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2116
const Value * getPointerOperand(const Value *V)
A helper function that returns the pointer operand of a load, store or GEP instruction.
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:754
OutputIt transform(R &&Range, OutputIt d_first, UnaryFunction F)
Wrapper function around std::transform to apply a function to a range and store the result elsewhere.
Definition STLExtras.h:1948
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1712
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI std::optional< int64_t > getPtrStride(PredicatedScalarEvolution &PSE, Type *AccessTy, Value *Ptr, const Loop *Lp, const DenseMap< Value *, const SCEV * > &StridesMap=DenseMap< Value *, const SCEV * >(), bool Assume=false, bool ShouldCheckWrap=true)
If the pointer has a constant stride return it in units of the access type size.
LLVM_ABI std::optional< int64_t > getPointersDiff(Type *ElemTyA, Value *PtrA, Type *ElemTyB, Value *PtrB, const DataLayout &DL, ScalarEvolution &SE, bool StrictCheck=false, bool CheckType=true)
Returns the distance between the pointers PtrA and PtrB iff they are compatible and it is possible to...
LLVM_ABI bool sortPtrAccesses(ArrayRef< Value * > VL, Type *ElemTy, const DataLayout &DL, ScalarEvolution &SE, SmallVectorImpl< unsigned > &SortedIndices)
Attempt to sort the pointers in VL and return the sorted indices in SortedIndices,...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:548
LLVM_ABI RetainedKnowledge getKnowledgeValidInContext(const Value *V, ArrayRef< Attribute::AttrKind > AttrKinds, AssumptionCache &AC, const Instruction *CtxI, const DominatorTree *DT=nullptr)
Return a valid Knowledge associated to the Value V if its Attribute kind is in AttrKinds and the know...
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:71
TargetTransformInfo TTI
LLVM_ABI const SCEV * replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &PtrToStride, Value *Ptr)
Return the SCEV corresponding to a pointer with the symbolic stride replaced with constant one,...
LLVM_ABI bool isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL, ScalarEvolution &SE, bool CheckType=true)
Returns true if the memory operations A and B are consecutive.
DWARFExpression::Operation Op
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:560
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1738
Type * getLoadStoreType(const Value *I)
A helper function that returns the type of a load or store instruction.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
T bit_floor(T Value)
Returns the largest integral power of two no greater than Value if Value is nonzero.
Definition bit.h:299
LLVM_ABI void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, const LoopInfo *LI=nullptr, unsigned MaxLookup=MaxLookupSearchDepth)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
LLVM_ABI std::pair< const SCEV *, const SCEV * > getStartAndEndForAccess(const Loop *Lp, const SCEV *PtrExpr, Type *AccessTy, const SCEV *BTC, const SCEV *MaxBTC, ScalarEvolution *SE, DenseMap< std::pair< const SCEV *, Type * >, std::pair< const SCEV *, const SCEV * > > *PointerBounds, DominatorTree *DT, AssumptionCache *AC, std::optional< ScalarEvolution::LoopGuards > &LoopGuards)
Calculate Start and End points of memory access using exact backedge taken count BTC if computable or...
Implement std::hash so that hash_code can be used in STL containers.
Definition BitVector.h:870
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:872
#define N
IR Values for the lower and upper bounds of a pointer evolution.
MDNode * Scope
The tag for alias scope specification (used with noalias).
Definition Metadata.h:784
MDNode * TBAA
The tag for type-based alias analysis.
Definition Metadata.h:778
MDNode * NoAlias
The tag specifying the noalias scope.
Definition Metadata.h:787
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition Analysis.h:29
Instruction * getDestination(const MemoryDepChecker &DepChecker) const
Return the destination instruction of the dependence.
DepType Type
The type of the dependence.
unsigned Destination
Index of the destination of the dependence in the InstMap vector.
LLVM_ABI bool isPossiblyBackward() const
May be a lexically backward dependence type (includes Unknown).
Instruction * getSource(const MemoryDepChecker &DepChecker) const
Return the source instruction of the dependence.
LLVM_ABI bool isForward() const
Lexically forward dependence.
LLVM_ABI bool isBackward() const
Lexically backward dependence.
LLVM_ABI void print(raw_ostream &OS, unsigned Depth, const SmallVectorImpl< Instruction * > &Instrs) const
Print the dependence.
unsigned Source
Index of the source of the dependence in the InstMap vector.
DepType
The type of the dependence.
static LLVM_ABI const char * DepName[]
String version of the types.
static LLVM_ABI VectorizationSafetyStatus isSafeForVectorization(DepType Type)
Dependence types that don't prevent vectorization.
Represent one information held inside an operand bundle of an llvm.assume.
unsigned AddressSpace
Address space of the involved pointers.
LLVM_ABI bool addPointer(unsigned Index, const RuntimePointerChecking &RtCheck)
Tries to add the pointer recorded in RtCheck at index Index to this pointer checking group.
bool NeedsFreeze
Whether the pointer needs to be frozen after expansion, e.g.
LLVM_ABI RuntimeCheckingPtrGroup(unsigned Index, const RuntimePointerChecking &RtCheck)
Create a new pointer checking group containing a single pointer, with index Index in RtCheck.
const SCEV * High
The SCEV expression which represents the upper bound of all the pointers in this group.
SmallVector< unsigned, 2 > Members
Indices of all the pointers that constitute this grouping.
const SCEV * Low
The SCEV expression which represents the lower bound of all the pointers in this group.
bool IsWritePtr
Holds the information if this pointer is used for writing to memory.
unsigned DependencySetId
Holds the id of the set of pointers that could be dependent because of a shared underlying object.
unsigned AliasSetId
Holds the id of the disjoint alias set to which this pointer belongs.
static LLVM_ABI const unsigned MaxVectorWidth
Maximum SIMD width.
static LLVM_ABI unsigned VectorizationFactor
VF as overridden by the user.
static LLVM_ABI unsigned RuntimeMemoryCheckThreshold
\When performing memory disambiguation checks at runtime do not make more than this number of compari...
static LLVM_ABI bool isInterleaveForced()
True if force-vector-interleave was specified by the user.
static LLVM_ABI unsigned VectorizationInterleave
Interleave factor as overridden by the user.
static LLVM_ABI bool HoistRuntimeChecks
Function object to check whether the first component of a container supported by std::get (like std::...
Definition STLExtras.h:1427