Thanks to visit codestin.com
Credit goes to llvm.org

LLVM 22.0.0git
LoopVectorize.cpp
Go to the documentation of this file.
1//===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops
10// and generates target-independent LLVM-IR.
11// The vectorizer uses the TargetTransformInfo analysis to estimate the costs
12// of instructions in order to estimate the profitability of vectorization.
13//
14// The loop vectorizer combines consecutive loop iterations into a single
15// 'wide' iteration. After this transformation the index is incremented
16// by the SIMD vector width, and not by one.
17//
18// This pass has three parts:
19// 1. The main loop pass that drives the different parts.
20// 2. LoopVectorizationLegality - A unit that checks for the legality
21// of the vectorization.
22// 3. InnerLoopVectorizer - A unit that performs the actual
23// widening of instructions.
24// 4. LoopVectorizationCostModel - A unit that checks for the profitability
25// of vectorization. It decides on the optimal vector width, which
26// can be one, if vectorization is not profitable.
27//
28// There is a development effort going on to migrate loop vectorizer to the
29// VPlan infrastructure and to introduce outer loop vectorization support (see
30// docs/VectorizationPlan.rst and
31// http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this
32// purpose, we temporarily introduced the VPlan-native vectorization path: an
33// alternative vectorization path that is natively implemented on top of the
34// VPlan infrastructure. See EnableVPlanNativePath for enabling.
35//
36//===----------------------------------------------------------------------===//
37//
38// The reduction-variable vectorization is based on the paper:
39// D. Nuzman and R. Henderson. Multi-platform Auto-vectorization.
40//
41// Variable uniformity checks are inspired by:
42// Karrenberg, R. and Hack, S. Whole Function Vectorization.
43//
44// The interleaved access vectorization is based on the paper:
45// Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved
46// Data for SIMD
47//
48// Other ideas/concepts are from:
49// A. Zaks and D. Nuzman. Autovectorization in GCC-two years later.
50//
51// S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of
52// Vectorizing Compilers.
53//
54//===----------------------------------------------------------------------===//
55
58#include "VPRecipeBuilder.h"
59#include "VPlan.h"
60#include "VPlanAnalysis.h"
61#include "VPlanCFG.h"
62#include "VPlanHelpers.h"
63#include "VPlanPatternMatch.h"
64#include "VPlanTransforms.h"
65#include "VPlanUtils.h"
66#include "VPlanVerifier.h"
67#include "llvm/ADT/APInt.h"
68#include "llvm/ADT/ArrayRef.h"
69#include "llvm/ADT/DenseMap.h"
71#include "llvm/ADT/Hashing.h"
72#include "llvm/ADT/MapVector.h"
73#include "llvm/ADT/STLExtras.h"
76#include "llvm/ADT/Statistic.h"
77#include "llvm/ADT/StringRef.h"
78#include "llvm/ADT/Twine.h"
79#include "llvm/ADT/TypeSwitch.h"
84#include "llvm/Analysis/CFG.h"
101#include "llvm/IR/Attributes.h"
102#include "llvm/IR/BasicBlock.h"
103#include "llvm/IR/CFG.h"
104#include "llvm/IR/Constant.h"
105#include "llvm/IR/Constants.h"
106#include "llvm/IR/DataLayout.h"
107#include "llvm/IR/DebugInfo.h"
108#include "llvm/IR/DebugLoc.h"
109#include "llvm/IR/DerivedTypes.h"
111#include "llvm/IR/Dominators.h"
112#include "llvm/IR/Function.h"
113#include "llvm/IR/IRBuilder.h"
114#include "llvm/IR/InstrTypes.h"
115#include "llvm/IR/Instruction.h"
116#include "llvm/IR/Instructions.h"
118#include "llvm/IR/Intrinsics.h"
119#include "llvm/IR/MDBuilder.h"
120#include "llvm/IR/Metadata.h"
121#include "llvm/IR/Module.h"
122#include "llvm/IR/Operator.h"
123#include "llvm/IR/PatternMatch.h"
125#include "llvm/IR/Type.h"
126#include "llvm/IR/Use.h"
127#include "llvm/IR/User.h"
128#include "llvm/IR/Value.h"
129#include "llvm/IR/Verifier.h"
130#include "llvm/Support/Casting.h"
132#include "llvm/Support/Debug.h"
147#include <algorithm>
148#include <cassert>
149#include <cstdint>
150#include <functional>
151#include <iterator>
152#include <limits>
153#include <memory>
154#include <string>
155#include <tuple>
156#include <utility>
157
158using namespace llvm;
159using namespace SCEVPatternMatch;
160
161#define LV_NAME "loop-vectorize"
162#define DEBUG_TYPE LV_NAME
163
164#ifndef NDEBUG
165const char VerboseDebug[] = DEBUG_TYPE "-verbose";
166#endif
167
168STATISTIC(LoopsVectorized, "Number of loops vectorized");
169STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization");
170STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized");
171STATISTIC(LoopsEarlyExitVectorized, "Number of early exit loops vectorized");
172
174 "enable-epilogue-vectorization", cl::init(true), cl::Hidden,
175 cl::desc("Enable vectorization of epilogue loops."));
176
178 "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden,
179 cl::desc("When epilogue vectorization is enabled, and a value greater than "
180 "1 is specified, forces the given VF for all applicable epilogue "
181 "loops."));
182
184 "epilogue-vectorization-minimum-VF", cl::Hidden,
185 cl::desc("Only loops with vectorization factor equal to or larger than "
186 "the specified value are considered for epilogue vectorization."));
187
188/// Loops with a known constant trip count below this number are vectorized only
189/// if no scalar iteration overheads are incurred.
191 "vectorizer-min-trip-count", cl::init(16), cl::Hidden,
192 cl::desc("Loops with a constant trip count that is smaller than this "
193 "value are vectorized only if no scalar iteration overheads "
194 "are incurred."));
195
197 "vectorize-memory-check-threshold", cl::init(128), cl::Hidden,
198 cl::desc("The maximum allowed number of runtime memory checks"));
199
200// Option prefer-predicate-over-epilogue indicates that an epilogue is undesired,
201// that predication is preferred, and this lists all options. I.e., the
202// vectorizer will try to fold the tail-loop (epilogue) into the vector body
203// and predicate the instructions accordingly. If tail-folding fails, there are
204// different fallback strategies depending on these values:
211} // namespace PreferPredicateTy
212
214 "prefer-predicate-over-epilogue",
217 cl::desc("Tail-folding and predication preferences over creating a scalar "
218 "epilogue loop."),
220 "scalar-epilogue",
221 "Don't tail-predicate loops, create scalar epilogue"),
223 "predicate-else-scalar-epilogue",
224 "prefer tail-folding, create scalar epilogue if tail "
225 "folding fails."),
227 "predicate-dont-vectorize",
228 "prefers tail-folding, don't attempt vectorization if "
229 "tail-folding fails.")));
230
232 "force-tail-folding-style", cl::desc("Force the tail folding style"),
235 clEnumValN(TailFoldingStyle::None, "none", "Disable tail folding"),
238 "Create lane mask for data only, using active.lane.mask intrinsic"),
240 "data-without-lane-mask",
241 "Create lane mask with compare/stepvector"),
243 "Create lane mask using active.lane.mask intrinsic, and use "
244 "it for both data and control flow"),
246 "data-and-control-without-rt-check",
247 "Similar to data-and-control, but remove the runtime check"),
249 "Use predicated EVL instructions for tail folding. If EVL "
250 "is unsupported, fallback to data-without-lane-mask.")));
251
253 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden,
254 cl::desc("Maximize bandwidth when selecting vectorization factor which "
255 "will be determined by the smallest type in loop."));
256
258 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden,
259 cl::desc("Enable vectorization on interleaved memory accesses in a loop"));
260
261/// An interleave-group may need masking if it resides in a block that needs
262/// predication, or in order to mask away gaps.
264 "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden,
265 cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"));
266
268 "force-target-num-scalar-regs", cl::init(0), cl::Hidden,
269 cl::desc("A flag that overrides the target's number of scalar registers."));
270
272 "force-target-num-vector-regs", cl::init(0), cl::Hidden,
273 cl::desc("A flag that overrides the target's number of vector registers."));
274
276 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden,
277 cl::desc("A flag that overrides the target's max interleave factor for "
278 "scalar loops."));
279
281 "force-target-max-vector-interleave", cl::init(0), cl::Hidden,
282 cl::desc("A flag that overrides the target's max interleave factor for "
283 "vectorized loops."));
284
286 "force-target-instruction-cost", cl::init(0), cl::Hidden,
287 cl::desc("A flag that overrides the target's expected cost for "
288 "an instruction to a single constant value. Mostly "
289 "useful for getting consistent testing."));
290
292 "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden,
293 cl::desc(
294 "Pretend that scalable vectors are supported, even if the target does "
295 "not support them. This flag should only be used for testing."));
296
298 "small-loop-cost", cl::init(20), cl::Hidden,
299 cl::desc(
300 "The cost of a loop that is considered 'small' by the interleaver."));
301
303 "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden,
304 cl::desc("Enable the use of the block frequency analysis to access PGO "
305 "heuristics minimizing code growth in cold regions and being more "
306 "aggressive in hot regions."));
307
308// Runtime interleave loops for load/store throughput.
310 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden,
311 cl::desc(
312 "Enable runtime interleaving until load/store ports are saturated"));
313
314/// The number of stores in a loop that are allowed to need predication.
316 "vectorize-num-stores-pred", cl::init(1), cl::Hidden,
317 cl::desc("Max number of stores to be predicated behind an if."));
318
320 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden,
321 cl::desc("Count the induction variable only once when interleaving"));
322
324 "enable-cond-stores-vec", cl::init(true), cl::Hidden,
325 cl::desc("Enable if predication of stores during vectorization."));
326
328 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden,
329 cl::desc("The maximum interleave count to use when interleaving a scalar "
330 "reduction in a nested loop."));
331
332static cl::opt<bool>
333 PreferInLoopReductions("prefer-inloop-reductions", cl::init(false),
335 cl::desc("Prefer in-loop vector reductions, "
336 "overriding the targets preference."));
337
339 "force-ordered-reductions", cl::init(false), cl::Hidden,
340 cl::desc("Enable the vectorisation of loops with in-order (strict) "
341 "FP reductions"));
342
344 "prefer-predicated-reduction-select", cl::init(false), cl::Hidden,
345 cl::desc(
346 "Prefer predicating a reduction operation over an after loop select."));
347
349 "enable-vplan-native-path", cl::Hidden,
350 cl::desc("Enable VPlan-native vectorization path with "
351 "support for outer loop vectorization."));
352
354 llvm::VerifyEachVPlan("vplan-verify-each",
355#ifdef EXPENSIVE_CHECKS
356 cl::init(true),
357#else
358 cl::init(false),
359#endif
361 cl::desc("Verfiy VPlans after VPlan transforms."));
362
363// This flag enables the stress testing of the VPlan H-CFG construction in the
364// VPlan-native vectorization path. It must be used in conjuction with
365// -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the
366// verification of the H-CFGs built.
368 "vplan-build-stress-test", cl::init(false), cl::Hidden,
369 cl::desc(
370 "Build VPlan for every supported loop nest in the function and bail "
371 "out right after the build (stress test the VPlan H-CFG construction "
372 "in the VPlan-native vectorization path)."));
373
375 "interleave-loops", cl::init(true), cl::Hidden,
376 cl::desc("Enable loop interleaving in Loop vectorization passes"));
378 "vectorize-loops", cl::init(true), cl::Hidden,
379 cl::desc("Run the Loop vectorization passes"));
380
382 "force-widen-divrem-via-safe-divisor", cl::Hidden,
383 cl::desc(
384 "Override cost based safe divisor widening for div/rem instructions"));
385
387 "vectorizer-maximize-bandwidth-for-vector-calls", cl::init(true),
389 cl::desc("Try wider VFs if they enable the use of vector variants"));
390
392 "enable-early-exit-vectorization", cl::init(true), cl::Hidden,
393 cl::desc(
394 "Enable vectorization of early exit loops with uncountable exits."));
395
397 "vectorizer-consider-reg-pressure", cl::init(false), cl::Hidden,
398 cl::desc("Discard VFs if their register pressure is too high."));
399
400// Likelyhood of bypassing the vectorized loop because there are zero trips left
401// after prolog. See `emitIterationCountCheck`.
402static constexpr uint32_t MinItersBypassWeights[] = {1, 127};
403
404/// A helper function that returns true if the given type is irregular. The
405/// type is irregular if its allocated size doesn't equal the store size of an
406/// element of the corresponding vector type.
407static bool hasIrregularType(Type *Ty, const DataLayout &DL) {
408 // Determine if an array of N elements of type Ty is "bitcast compatible"
409 // with a <N x Ty> vector.
410 // This is only true if there is no padding between the array elements.
411 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty);
412}
413
414/// A version of ScalarEvolution::getSmallConstantTripCount that returns an
415/// ElementCount to include loops whose trip count is a function of vscale.
417 const Loop *L) {
418 if (unsigned ExpectedTC = SE->getSmallConstantTripCount(L))
419 return ElementCount::getFixed(ExpectedTC);
420
421 const SCEV *BTC = SE->getBackedgeTakenCount(L);
423 return ElementCount::getFixed(0);
424
425 const SCEV *ExitCount = SE->getTripCountFromExitCount(BTC, BTC->getType(), L);
426 if (isa<SCEVVScale>(ExitCount))
428
429 const APInt *Scale;
430 if (match(ExitCount, m_scev_Mul(m_scev_APInt(Scale), m_SCEVVScale())))
431 if (cast<SCEVMulExpr>(ExitCount)->hasNoUnsignedWrap())
432 if (Scale->getActiveBits() <= 32)
434
435 return ElementCount::getFixed(0);
436}
437
438/// Returns "best known" trip count, which is either a valid positive trip count
439/// or std::nullopt when an estimate cannot be made (including when the trip
440/// count would overflow), for the specified loop \p L as defined by the
441/// following procedure:
442/// 1) Returns exact trip count if it is known.
443/// 2) Returns expected trip count according to profile data if any.
444/// 3) Returns upper bound estimate if known, and if \p CanUseConstantMax.
445/// 4) Returns std::nullopt if all of the above failed.
446static std::optional<ElementCount>
448 bool CanUseConstantMax = true) {
449 // Check if exact trip count is known.
450 if (auto ExpectedTC = getSmallConstantTripCount(PSE.getSE(), L))
451 return ExpectedTC;
452
453 // Check if there is an expected trip count available from profile data.
455 if (auto EstimatedTC = getLoopEstimatedTripCount(L))
456 return ElementCount::getFixed(*EstimatedTC);
457
458 if (!CanUseConstantMax)
459 return std::nullopt;
460
461 // Check if upper bound estimate is known.
462 if (unsigned ExpectedTC = PSE.getSmallConstantMaxTripCount())
463 return ElementCount::getFixed(ExpectedTC);
464
465 return std::nullopt;
466}
467
468namespace {
469// Forward declare GeneratedRTChecks.
470class GeneratedRTChecks;
471
472using SCEV2ValueTy = DenseMap<const SCEV *, Value *>;
473} // namespace
474
475namespace llvm {
476
478
479/// InnerLoopVectorizer vectorizes loops which contain only one basic
480/// block to a specified vectorization factor (VF).
481/// This class performs the widening of scalars into vectors, or multiple
482/// scalars. This class also implements the following features:
483/// * It inserts an epilogue loop for handling loops that don't have iteration
484/// counts that are known to be a multiple of the vectorization factor.
485/// * It handles the code generation for reduction variables.
486/// * Scalarization (implementation using scalars) of un-vectorizable
487/// instructions.
488/// InnerLoopVectorizer does not perform any vectorization-legality
489/// checks, and relies on the caller to check for the different legality
490/// aspects. The InnerLoopVectorizer relies on the
491/// LoopVectorizationLegality class to provide information about the induction
492/// and reduction variables that were found to a given vectorization factor.
494public:
498 ElementCount VecWidth, unsigned UnrollFactor,
500 ProfileSummaryInfo *PSI, GeneratedRTChecks &RTChecks,
501 VPlan &Plan)
502 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TTI(TTI), AC(AC),
503 VF(VecWidth), UF(UnrollFactor), Builder(PSE.getSE()->getContext()),
506 Plan.getVectorLoopRegion()->getSinglePredecessor())) {}
507
508 virtual ~InnerLoopVectorizer() = default;
509
510 /// Creates a basic block for the scalar preheader. Both
511 /// EpilogueVectorizerMainLoop and EpilogueVectorizerEpilogueLoop overwrite
512 /// the method to create additional blocks and checks needed for epilogue
513 /// vectorization.
515
516 /// Fix the vectorized code, taking care of header phi's, and more.
518
519 /// Fix the non-induction PHIs in \p Plan.
521
522 /// Returns the original loop trip count.
523 Value *getTripCount() const { return TripCount; }
524
525 /// Used to set the trip count after ILV's construction and after the
526 /// preheader block has been executed. Note that this always holds the trip
527 /// count of the original loop for both main loop and epilogue vectorization.
528 void setTripCount(Value *TC) { TripCount = TC; }
529
530protected:
532
533 /// Create and return a new IR basic block for the scalar preheader whose name
534 /// is prefixed with \p Prefix.
536
537 /// Allow subclasses to override and print debug traces before/after vplan
538 /// execution, when trace information is requested.
539 virtual void printDebugTracesAtStart() {}
540 virtual void printDebugTracesAtEnd() {}
541
542 /// The original loop.
544
545 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies
546 /// dynamic knowledge to simplify SCEV expressions and converts them to a
547 /// more usable form.
549
550 /// Loop Info.
552
553 /// Dominator Tree.
555
556 /// Target Transform Info.
558
559 /// Assumption Cache.
561
562 /// The vectorization SIMD factor to use. Each vector will have this many
563 /// vector elements.
565
566 /// The vectorization unroll factor to use. Each scalar is vectorized to this
567 /// many different vector instructions.
568 unsigned UF;
569
570 /// The builder that we use
572
573 // --- Vectorization state ---
574
575 /// Trip count of the original loop.
576 Value *TripCount = nullptr;
577
578 /// The profitablity analysis.
580
581 /// BFI and PSI are used to check for profile guided size optimizations.
584
585 /// Structure to hold information about generated runtime checks, responsible
586 /// for cleaning the checks, if vectorization turns out unprofitable.
587 GeneratedRTChecks &RTChecks;
588
590
591 /// The vector preheader block of \p Plan, used as target for check blocks
592 /// introduced during skeleton creation.
594};
595
596/// Encapsulate information regarding vectorization of a loop and its epilogue.
597/// This information is meant to be updated and used across two stages of
598/// epilogue vectorization.
601 unsigned MainLoopUF = 0;
603 unsigned EpilogueUF = 0;
606 Value *TripCount = nullptr;
609
611 ElementCount EVF, unsigned EUF,
613 : MainLoopVF(MVF), MainLoopUF(MUF), EpilogueVF(EVF), EpilogueUF(EUF),
615 assert(EUF == 1 &&
616 "A high UF for the epilogue loop is likely not beneficial.");
617 }
618};
619
620/// An extension of the inner loop vectorizer that creates a skeleton for a
621/// vectorized loop that has its epilogue (residual) also vectorized.
622/// The idea is to run the vplan on a given loop twice, firstly to setup the
623/// skeleton and vectorize the main loop, and secondly to complete the skeleton
624/// from the first step and vectorize the epilogue. This is achieved by
625/// deriving two concrete strategy classes from this base class and invoking
626/// them in succession from the loop vectorizer planner.
628public:
639
640 /// Holds and updates state information required to vectorize the main loop
641 /// and its epilogue in two separate passes. This setup helps us avoid
642 /// regenerating and recomputing runtime safety checks. It also helps us to
643 /// shorten the iteration-count-check path length for the cases where the
644 /// iteration count of the loop is so small that the main vector loop is
645 /// completely skipped.
647
648protected:
650};
651
652/// A specialized derived class of inner loop vectorizer that performs
653/// vectorization of *main* loops in the process of vectorizing loops and their
654/// epilogues.
656public:
668 /// Implements the interface for creating a vectorized skeleton using the
669 /// *main loop* strategy (i.e., the first pass of VPlan execution).
671
672protected:
673 /// Introduces a new VPIRBasicBlock for \p CheckIRBB to Plan between the
674 /// vector preheader and its predecessor, also connecting the new block to the
675 /// scalar preheader.
676 void introduceCheckBlockInVPlan(BasicBlock *CheckIRBB);
677
678 // Create a check to see if the main vector loop should be executed
680 unsigned UF) const;
681
682 /// Emits an iteration count bypass check once for the main loop (when \p
683 /// ForEpilogue is false) and once for the epilogue loop (when \p
684 /// ForEpilogue is true).
686 bool ForEpilogue);
687 void printDebugTracesAtStart() override;
688 void printDebugTracesAtEnd() override;
689};
690
691// A specialized derived class of inner loop vectorizer that performs
692// vectorization of *epilogue* loops in the process of vectorizing loops and
693// their epilogues.
695 /// The additional bypass block which conditionally skips over the epilogue
696 /// loop after executing the main loop. Needed to resume inductions and
697 /// reductions during epilogue vectorization.
698 BasicBlock *AdditionalBypassBlock = nullptr;
699
700public:
712 /// Implements the interface for creating a vectorized skeleton using the
713 /// *epilogue loop* strategy (i.e., the second pass of VPlan execution).
715
716 /// Return the additional bypass block which targets the scalar loop by
717 /// skipping the epilogue loop after completing the main loop.
719 assert(AdditionalBypassBlock &&
720 "Trying to access AdditionalBypassBlock but it has not been set");
721 return AdditionalBypassBlock;
722 }
723
724protected:
725 /// Emits an iteration count bypass check after the main vector loop has
726 /// finished to see if there are any iterations left to execute by either
727 /// the vector epilogue or the scalar epilogue.
728 BasicBlock *emitMinimumVectorEpilogueIterCountCheck(BasicBlock *VectorPH,
729 BasicBlock *Bypass,
730 BasicBlock *Insert);
731 void printDebugTracesAtStart() override;
732 void printDebugTracesAtEnd() override;
733};
734} // end namespace llvm
735
736/// Look for a meaningful debug location on the instruction or its operands.
738 if (!I)
739 return DebugLoc::getUnknown();
740
742 if (I->getDebugLoc() != Empty)
743 return I->getDebugLoc();
744
745 for (Use &Op : I->operands()) {
746 if (Instruction *OpInst = dyn_cast<Instruction>(Op))
747 if (OpInst->getDebugLoc() != Empty)
748 return OpInst->getDebugLoc();
749 }
750
751 return I->getDebugLoc();
752}
753
754/// Write a \p DebugMsg about vectorization to the debug output stream. If \p I
755/// is passed, the message relates to that particular instruction.
756#ifndef NDEBUG
757static void debugVectorizationMessage(const StringRef Prefix,
758 const StringRef DebugMsg,
759 Instruction *I) {
760 dbgs() << "LV: " << Prefix << DebugMsg;
761 if (I != nullptr)
762 dbgs() << " " << *I;
763 else
764 dbgs() << '.';
765 dbgs() << '\n';
766}
767#endif
768
769/// Create an analysis remark that explains why vectorization failed
770///
771/// \p PassName is the name of the pass (e.g. can be AlwaysPrint). \p
772/// RemarkName is the identifier for the remark. If \p I is passed it is an
773/// instruction that prevents vectorization. Otherwise \p TheLoop is used for
774/// the location of the remark. If \p DL is passed, use it as debug location for
775/// the remark. \return the remark object that can be streamed to.
777createLVAnalysis(const char *PassName, StringRef RemarkName, Loop *TheLoop,
778 Instruction *I, DebugLoc DL = {}) {
779 BasicBlock *CodeRegion = I ? I->getParent() : TheLoop->getHeader();
780 // If debug location is attached to the instruction, use it. Otherwise if DL
781 // was not provided, use the loop's.
782 if (I && I->getDebugLoc())
783 DL = I->getDebugLoc();
784 else if (!DL)
785 DL = TheLoop->getStartLoc();
786
787 return OptimizationRemarkAnalysis(PassName, RemarkName, DL, CodeRegion);
788}
789
790namespace llvm {
791
792/// Return a value for Step multiplied by VF.
794 int64_t Step) {
795 assert(Ty->isIntegerTy() && "Expected an integer step");
796 ElementCount VFxStep = VF.multiplyCoefficientBy(Step);
797 assert(isPowerOf2_64(VF.getKnownMinValue()) && "must pass power-of-2 VF");
798 if (VF.isScalable() && isPowerOf2_64(Step)) {
799 return B.CreateShl(
800 B.CreateVScale(Ty),
801 ConstantInt::get(Ty, Log2_64(VFxStep.getKnownMinValue())), "", true);
802 }
803 return B.CreateElementCount(Ty, VFxStep);
804}
805
806/// Return the runtime value for VF.
808 return B.CreateElementCount(Ty, VF);
809}
810
812 const StringRef OREMsg, const StringRef ORETag,
813 OptimizationRemarkEmitter *ORE, Loop *TheLoop,
814 Instruction *I) {
815 LLVM_DEBUG(debugVectorizationMessage("Not vectorizing: ", DebugMsg, I));
816 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
817 ORE->emit(
818 createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I)
819 << "loop not vectorized: " << OREMsg);
820}
821
822/// Reports an informative message: print \p Msg for debugging purposes as well
823/// as an optimization remark. Uses either \p I as location of the remark, or
824/// otherwise \p TheLoop. If \p DL is passed, use it as debug location for the
825/// remark. If \p DL is passed, use it as debug location for the remark.
826static void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag,
828 Loop *TheLoop, Instruction *I = nullptr,
829 DebugLoc DL = {}) {
831 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
832 ORE->emit(createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop,
833 I, DL)
834 << Msg);
835}
836
837/// Report successful vectorization of the loop. In case an outer loop is
838/// vectorized, prepend "outer" to the vectorization remark.
840 VectorizationFactor VF, unsigned IC) {
842 "Vectorizing: ", TheLoop->isInnermost() ? "innermost loop" : "outer loop",
843 nullptr));
844 StringRef LoopType = TheLoop->isInnermost() ? "" : "outer ";
845 ORE->emit([&]() {
846 return OptimizationRemark(LV_NAME, "Vectorized", TheLoop->getStartLoc(),
847 TheLoop->getHeader())
848 << "vectorized " << LoopType << "loop (vectorization width: "
849 << ore::NV("VectorizationFactor", VF.Width)
850 << ", interleaved count: " << ore::NV("InterleaveCount", IC) << ")";
851 });
852}
853
854} // end namespace llvm
855
856namespace llvm {
857
858// Loop vectorization cost-model hints how the scalar epilogue loop should be
859// lowered.
861
862 // The default: allowing scalar epilogues.
864
865 // Vectorization with OptForSize: don't allow epilogues.
867
868 // A special case of vectorisation with OptForSize: loops with a very small
869 // trip count are considered for vectorization under OptForSize, thereby
870 // making sure the cost of their loop body is dominant, free of runtime
871 // guards and scalar iteration overheads.
873
874 // Loop hint predicate indicating an epilogue is undesired.
876
877 // Directive indicating we must either tail fold or not vectorize
879};
880
881/// LoopVectorizationCostModel - estimates the expected speedups due to
882/// vectorization.
883/// In many cases vectorization is not profitable. This can happen because of
884/// a number of reasons. In this class we mainly attempt to predict the
885/// expected speedup/slowdowns due to the supported instruction set. We use the
886/// TargetTransformInfo to query the different backends for the cost of
887/// different operations.
890
891public:
902 : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal),
903 TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F),
904 Hints(Hints), InterleaveInfo(IAI) {
905 if (TTI.supportsScalableVectors() || ForceTargetSupportsScalableVectors)
906 initializeVScaleForTuning();
908 // Query this against the original loop and save it here because the profile
909 // of the original loop header may change as the transformation happens.
910 OptForSize = llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI,
912 }
913
914 /// \return An upper bound for the vectorization factors (both fixed and
915 /// scalable). If the factors are 0, vectorization and interleaving should be
916 /// avoided up front.
917 FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC);
918
919 /// \return True if runtime checks are required for vectorization, and false
920 /// otherwise.
922
923 /// Setup cost-based decisions for user vectorization factor.
924 /// \return true if the UserVF is a feasible VF to be chosen.
929
930 /// \return True if maximizing vector bandwidth is enabled by the target or
931 /// user options, for the given register kind.
933
934 /// \return True if register pressure should be considered for the given VF.
936
937 /// \return The size (in bits) of the smallest and widest types in the code
938 /// that needs to be vectorized. We ignore values that remain scalar such as
939 /// 64 bit loop indices.
940 std::pair<unsigned, unsigned> getSmallestAndWidestTypes();
941
942 /// Memory access instruction may be vectorized in more than one way.
943 /// Form of instruction after vectorization depends on cost.
944 /// This function takes cost-based decisions for Load/Store instructions
945 /// and collects them in a map. This decisions map is used for building
946 /// the lists of loop-uniform and loop-scalar instructions.
947 /// The calculated cost is saved with widening decision in order to
948 /// avoid redundant calculations.
950
951 /// A call may be vectorized in different ways depending on whether we have
952 /// vectorized variants available and whether the target supports masking.
953 /// This function analyzes all calls in the function at the supplied VF,
954 /// makes a decision based on the costs of available options, and stores that
955 /// decision in a map for use in planning and plan execution.
957
958 /// Collect values we want to ignore in the cost model.
960
961 /// Collect all element types in the loop for which widening is needed.
963
964 /// Split reductions into those that happen in the loop, and those that happen
965 /// outside. In loop reductions are collected into InLoopReductions.
967
968 /// Returns true if we should use strict in-order reductions for the given
969 /// RdxDesc. This is true if the -enable-strict-reductions flag is passed,
970 /// the IsOrdered flag of RdxDesc is set and we do not allow reordering
971 /// of FP operations.
972 bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc) const {
973 return !Hints->allowReordering() && RdxDesc.isOrdered();
974 }
975
976 /// \returns The smallest bitwidth each instruction can be represented with.
977 /// The vector equivalents of these instructions should be truncated to this
978 /// type.
980 return MinBWs;
981 }
982
983 /// \returns True if it is more profitable to scalarize instruction \p I for
984 /// vectorization factor \p VF.
986 assert(VF.isVector() &&
987 "Profitable to scalarize relevant only for VF > 1.");
988 assert(
989 TheLoop->isInnermost() &&
990 "cost-model should not be used for outer loops (in VPlan-native path)");
991
992 auto Scalars = InstsToScalarize.find(VF);
993 assert(Scalars != InstsToScalarize.end() &&
994 "VF not yet analyzed for scalarization profitability");
995 return Scalars->second.contains(I);
996 }
997
998 /// Returns true if \p I is known to be uniform after vectorization.
1000 assert(
1001 TheLoop->isInnermost() &&
1002 "cost-model should not be used for outer loops (in VPlan-native path)");
1003 // Pseudo probe needs to be duplicated for each unrolled iteration and
1004 // vector lane so that profiled loop trip count can be accurately
1005 // accumulated instead of being under counted.
1007 return false;
1008
1009 if (VF.isScalar())
1010 return true;
1011
1012 auto UniformsPerVF = Uniforms.find(VF);
1013 assert(UniformsPerVF != Uniforms.end() &&
1014 "VF not yet analyzed for uniformity");
1015 return UniformsPerVF->second.count(I);
1016 }
1017
1018 /// Returns true if \p I is known to be scalar after vectorization.
1020 assert(
1021 TheLoop->isInnermost() &&
1022 "cost-model should not be used for outer loops (in VPlan-native path)");
1023 if (VF.isScalar())
1024 return true;
1025
1026 auto ScalarsPerVF = Scalars.find(VF);
1027 assert(ScalarsPerVF != Scalars.end() &&
1028 "Scalar values are not calculated for VF");
1029 return ScalarsPerVF->second.count(I);
1030 }
1031
1032 /// \returns True if instruction \p I can be truncated to a smaller bitwidth
1033 /// for vectorization factor \p VF.
1035 return VF.isVector() && MinBWs.contains(I) &&
1036 !isProfitableToScalarize(I, VF) &&
1038 }
1039
1040 /// Decision that was taken during cost calculation for memory instruction.
1043 CM_Widen, // For consecutive accesses with stride +1.
1044 CM_Widen_Reverse, // For consecutive accesses with stride -1.
1050 };
1051
1052 /// Save vectorization decision \p W and \p Cost taken by the cost model for
1053 /// instruction \p I and vector width \p VF.
1056 assert(VF.isVector() && "Expected VF >=2");
1057 WideningDecisions[{I, VF}] = {W, Cost};
1058 }
1059
1060 /// Save vectorization decision \p W and \p Cost taken by the cost model for
1061 /// interleaving group \p Grp and vector width \p VF.
1065 assert(VF.isVector() && "Expected VF >=2");
1066 /// Broadcast this decicion to all instructions inside the group.
1067 /// When interleaving, the cost will only be assigned one instruction, the
1068 /// insert position. For other cases, add the appropriate fraction of the
1069 /// total cost to each instruction. This ensures accurate costs are used,
1070 /// even if the insert position instruction is not used.
1071 InstructionCost InsertPosCost = Cost;
1072 InstructionCost OtherMemberCost = 0;
1073 if (W != CM_Interleave)
1074 OtherMemberCost = InsertPosCost = Cost / Grp->getNumMembers();
1075 ;
1076 for (unsigned Idx = 0; Idx < Grp->getFactor(); ++Idx) {
1077 if (auto *I = Grp->getMember(Idx)) {
1078 if (Grp->getInsertPos() == I)
1079 WideningDecisions[{I, VF}] = {W, InsertPosCost};
1080 else
1081 WideningDecisions[{I, VF}] = {W, OtherMemberCost};
1082 }
1083 }
1084 }
1085
1086 /// Return the cost model decision for the given instruction \p I and vector
1087 /// width \p VF. Return CM_Unknown if this instruction did not pass
1088 /// through the cost modeling.
1090 assert(VF.isVector() && "Expected VF to be a vector VF");
1091 assert(
1092 TheLoop->isInnermost() &&
1093 "cost-model should not be used for outer loops (in VPlan-native path)");
1094
1095 std::pair<Instruction *, ElementCount> InstOnVF(I, VF);
1096 auto Itr = WideningDecisions.find(InstOnVF);
1097 if (Itr == WideningDecisions.end())
1098 return CM_Unknown;
1099 return Itr->second.first;
1100 }
1101
1102 /// Return the vectorization cost for the given instruction \p I and vector
1103 /// width \p VF.
1105 assert(VF.isVector() && "Expected VF >=2");
1106 std::pair<Instruction *, ElementCount> InstOnVF(I, VF);
1107 assert(WideningDecisions.contains(InstOnVF) &&
1108 "The cost is not calculated");
1109 return WideningDecisions[InstOnVF].second;
1110 }
1111
1119
1121 Function *Variant, Intrinsic::ID IID,
1122 std::optional<unsigned> MaskPos,
1124 assert(!VF.isScalar() && "Expected vector VF");
1125 CallWideningDecisions[{CI, VF}] = {Kind, Variant, IID, MaskPos, Cost};
1126 }
1127
1129 ElementCount VF) const {
1130 assert(!VF.isScalar() && "Expected vector VF");
1131 auto I = CallWideningDecisions.find({CI, VF});
1132 if (I == CallWideningDecisions.end())
1133 return {CM_Unknown, nullptr, Intrinsic::not_intrinsic, std::nullopt, 0};
1134 return I->second;
1135 }
1136
1137 /// Return True if instruction \p I is an optimizable truncate whose operand
1138 /// is an induction variable. Such a truncate will be removed by adding a new
1139 /// induction variable with the destination type.
1141 // If the instruction is not a truncate, return false.
1142 auto *Trunc = dyn_cast<TruncInst>(I);
1143 if (!Trunc)
1144 return false;
1145
1146 // Get the source and destination types of the truncate.
1147 Type *SrcTy = toVectorTy(Trunc->getSrcTy(), VF);
1148 Type *DestTy = toVectorTy(Trunc->getDestTy(), VF);
1149
1150 // If the truncate is free for the given types, return false. Replacing a
1151 // free truncate with an induction variable would add an induction variable
1152 // update instruction to each iteration of the loop. We exclude from this
1153 // check the primary induction variable since it will need an update
1154 // instruction regardless.
1155 Value *Op = Trunc->getOperand(0);
1156 if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy))
1157 return false;
1158
1159 // If the truncated value is not an induction variable, return false.
1160 return Legal->isInductionPhi(Op);
1161 }
1162
1163 /// Collects the instructions to scalarize for each predicated instruction in
1164 /// the loop.
1166
1167 /// Collect values that will not be widened, including Uniforms, Scalars, and
1168 /// Instructions to Scalarize for the given \p VF.
1169 /// The sets depend on CM decision for Load/Store instructions
1170 /// that may be vectorized as interleave, gather-scatter or scalarized.
1171 /// Also make a decision on what to do about call instructions in the loop
1172 /// at that VF -- scalarize, call a known vector routine, or call a
1173 /// vector intrinsic.
1175 // Do the analysis once.
1176 if (VF.isScalar() || Uniforms.contains(VF))
1177 return;
1179 collectLoopUniforms(VF);
1181 collectLoopScalars(VF);
1183 }
1184
1185 /// Returns true if the target machine supports masked store operation
1186 /// for the given \p DataType and kind of access to \p Ptr.
1187 bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment,
1188 unsigned AddressSpace) const {
1189 return Legal->isConsecutivePtr(DataType, Ptr) &&
1190 TTI.isLegalMaskedStore(DataType, Alignment, AddressSpace);
1191 }
1192
1193 /// Returns true if the target machine supports masked load operation
1194 /// for the given \p DataType and kind of access to \p Ptr.
1195 bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment,
1196 unsigned AddressSpace) const {
1197 return Legal->isConsecutivePtr(DataType, Ptr) &&
1198 TTI.isLegalMaskedLoad(DataType, Alignment, AddressSpace);
1199 }
1200
1201 /// Returns true if the target machine can represent \p V as a masked gather
1202 /// or scatter operation.
1204 bool LI = isa<LoadInst>(V);
1205 bool SI = isa<StoreInst>(V);
1206 if (!LI && !SI)
1207 return false;
1208 auto *Ty = getLoadStoreType(V);
1210 if (VF.isVector())
1211 Ty = VectorType::get(Ty, VF);
1212 return (LI && TTI.isLegalMaskedGather(Ty, Align)) ||
1213 (SI && TTI.isLegalMaskedScatter(Ty, Align));
1214 }
1215
1216 /// Returns true if the target machine supports all of the reduction
1217 /// variables found for the given VF.
1219 return (all_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool {
1220 const RecurrenceDescriptor &RdxDesc = Reduction.second;
1221 return TTI.isLegalToVectorizeReduction(RdxDesc, VF);
1222 }));
1223 }
1224
1225 /// Given costs for both strategies, return true if the scalar predication
1226 /// lowering should be used for div/rem. This incorporates an override
1227 /// option so it is not simply a cost comparison.
1229 InstructionCost SafeDivisorCost) const {
1230 switch (ForceSafeDivisor) {
1231 case cl::BOU_UNSET:
1232 return ScalarCost < SafeDivisorCost;
1233 case cl::BOU_TRUE:
1234 return false;
1235 case cl::BOU_FALSE:
1236 return true;
1237 }
1238 llvm_unreachable("impossible case value");
1239 }
1240
1241 /// Returns true if \p I is an instruction which requires predication and
1242 /// for which our chosen predication strategy is scalarization (i.e. we
1243 /// don't have an alternate strategy such as masking available).
1244 /// \p VF is the vectorization factor that will be used to vectorize \p I.
1246
1247 /// Returns true if \p I is an instruction that needs to be predicated
1248 /// at runtime. The result is independent of the predication mechanism.
1249 /// Superset of instructions that return true for isScalarWithPredication.
1250 bool isPredicatedInst(Instruction *I) const;
1251
1252 /// Return the costs for our two available strategies for lowering a
1253 /// div/rem operation which requires speculating at least one lane.
1254 /// First result is for scalarization (will be invalid for scalable
1255 /// vectors); second is for the safe-divisor strategy.
1256 std::pair<InstructionCost, InstructionCost>
1258 ElementCount VF) const;
1259
1260 /// Returns true if \p I is a memory instruction with consecutive memory
1261 /// access that can be widened.
1263
1264 /// Returns true if \p I is a memory instruction in an interleaved-group
1265 /// of memory accesses that can be vectorized with wide vector loads/stores
1266 /// and shuffles.
1268
1269 /// Check if \p Instr belongs to any interleaved access group.
1271 return InterleaveInfo.isInterleaved(Instr);
1272 }
1273
1274 /// Get the interleaved access group that \p Instr belongs to.
1277 return InterleaveInfo.getInterleaveGroup(Instr);
1278 }
1279
1280 /// Returns true if we're required to use a scalar epilogue for at least
1281 /// the final iteration of the original loop.
1282 bool requiresScalarEpilogue(bool IsVectorizing) const {
1283 if (!isScalarEpilogueAllowed()) {
1284 LLVM_DEBUG(dbgs() << "LV: Loop does not require scalar epilogue\n");
1285 return false;
1286 }
1287 // If we might exit from anywhere but the latch and early exit vectorization
1288 // is disabled, we must run the exiting iteration in scalar form.
1289 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch() &&
1290 !(EnableEarlyExitVectorization && Legal->hasUncountableEarlyExit())) {
1291 LLVM_DEBUG(dbgs() << "LV: Loop requires scalar epilogue: not exiting "
1292 "from latch block\n");
1293 return true;
1294 }
1295 if (IsVectorizing && InterleaveInfo.requiresScalarEpilogue()) {
1296 LLVM_DEBUG(dbgs() << "LV: Loop requires scalar epilogue: "
1297 "interleaved group requires scalar epilogue\n");
1298 return true;
1299 }
1300 LLVM_DEBUG(dbgs() << "LV: Loop does not require scalar epilogue\n");
1301 return false;
1302 }
1303
1304 /// Returns true if a scalar epilogue is not allowed due to optsize or a
1305 /// loop hint annotation.
1307 return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed;
1308 }
1309
1310 /// Returns the TailFoldingStyle that is best for the current loop.
1311 TailFoldingStyle getTailFoldingStyle(bool IVUpdateMayOverflow = true) const {
1312 if (!ChosenTailFoldingStyle)
1314 return IVUpdateMayOverflow ? ChosenTailFoldingStyle->first
1315 : ChosenTailFoldingStyle->second;
1316 }
1317
1318 /// Selects and saves TailFoldingStyle for 2 options - if IV update may
1319 /// overflow or not.
1320 /// \param IsScalableVF true if scalable vector factors enabled.
1321 /// \param UserIC User specific interleave count.
1322 void setTailFoldingStyles(bool IsScalableVF, unsigned UserIC) {
1323 assert(!ChosenTailFoldingStyle && "Tail folding must not be selected yet.");
1324 if (!Legal->canFoldTailByMasking()) {
1325 ChosenTailFoldingStyle = {TailFoldingStyle::None, TailFoldingStyle::None};
1326 return;
1327 }
1328
1329 // Default to TTI preference, but allow command line override.
1330 ChosenTailFoldingStyle = {
1331 TTI.getPreferredTailFoldingStyle(/*IVUpdateMayOverflow=*/true),
1332 TTI.getPreferredTailFoldingStyle(/*IVUpdateMayOverflow=*/false)};
1333 if (ForceTailFoldingStyle.getNumOccurrences())
1334 ChosenTailFoldingStyle = {ForceTailFoldingStyle.getValue(),
1335 ForceTailFoldingStyle.getValue()};
1336
1337 if (ChosenTailFoldingStyle->first != TailFoldingStyle::DataWithEVL &&
1338 ChosenTailFoldingStyle->second != TailFoldingStyle::DataWithEVL)
1339 return;
1340 // Override EVL styles if needed.
1341 // FIXME: Investigate opportunity for fixed vector factor.
1342 bool EVLIsLegal = UserIC <= 1 && IsScalableVF &&
1343 TTI.hasActiveVectorLength() && !EnableVPlanNativePath;
1344 if (EVLIsLegal)
1345 return;
1346 // If for some reason EVL mode is unsupported, fallback to a scalar epilogue
1347 // if it's allowed, or DataWithoutLaneMask otherwise.
1348 if (ScalarEpilogueStatus == CM_ScalarEpilogueAllowed ||
1349 ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate)
1350 ChosenTailFoldingStyle = {TailFoldingStyle::None, TailFoldingStyle::None};
1351 else
1352 ChosenTailFoldingStyle = {TailFoldingStyle::DataWithoutLaneMask,
1354
1355 LLVM_DEBUG(
1356 dbgs() << "LV: Preference for VP intrinsics indicated. Will "
1357 "not try to generate VP Intrinsics "
1358 << (UserIC > 1
1359 ? "since interleave count specified is greater than 1.\n"
1360 : "due to non-interleaving reasons.\n"));
1361 }
1362
1363 /// Returns true if all loop blocks should be masked to fold tail loop.
1364 bool foldTailByMasking() const {
1365 // TODO: check if it is possible to check for None style independent of
1366 // IVUpdateMayOverflow flag in getTailFoldingStyle.
1368 }
1369
1370 /// Return maximum safe number of elements to be processed per vector
1371 /// iteration, which do not prevent store-load forwarding and are safe with
1372 /// regard to the memory dependencies. Required for EVL-based VPlans to
1373 /// correctly calculate AVL (application vector length) as min(remaining AVL,
1374 /// MaxSafeElements).
1375 /// TODO: need to consider adjusting cost model to use this value as a
1376 /// vectorization factor for EVL-based vectorization.
1377 std::optional<unsigned> getMaxSafeElements() const { return MaxSafeElements; }
1378
1379 /// Returns true if the instructions in this block requires predication
1380 /// for any reason, e.g. because tail folding now requires a predicate
1381 /// or because the block in the original loop was predicated.
1383 return foldTailByMasking() || Legal->blockNeedsPredication(BB);
1384 }
1385
1386 /// Returns true if VP intrinsics with explicit vector length support should
1387 /// be generated in the tail folded loop.
1391
1392 /// Returns true if the Phi is part of an inloop reduction.
1393 bool isInLoopReduction(PHINode *Phi) const {
1394 return InLoopReductions.contains(Phi);
1395 }
1396
1397 /// Returns true if the predicated reduction select should be used to set the
1398 /// incoming value for the reduction phi.
1400 // Force to use predicated reduction select since the EVL of the
1401 // second-to-last iteration might not be VF*UF.
1402 if (foldTailWithEVL())
1403 return true;
1405 TTI.preferPredicatedReductionSelect();
1406 }
1407
1408 /// Estimate cost of an intrinsic call instruction CI if it were vectorized
1409 /// with factor VF. Return the cost of the instruction, including
1410 /// scalarization overhead if it's needed.
1412
1413 /// Estimate cost of a call instruction CI if it were vectorized with factor
1414 /// VF. Return the cost of the instruction, including scalarization overhead
1415 /// if it's needed.
1417
1418 /// Invalidates decisions already taken by the cost model.
1420 WideningDecisions.clear();
1421 CallWideningDecisions.clear();
1422 Uniforms.clear();
1423 Scalars.clear();
1424 }
1425
1426 /// Returns the expected execution cost. The unit of the cost does
1427 /// not matter because we use the 'cost' units to compare different
1428 /// vector widths. The cost that is returned is *not* normalized by
1429 /// the factor width.
1431
1432 bool hasPredStores() const { return NumPredStores > 0; }
1433
1434 /// Returns true if epilogue vectorization is considered profitable, and
1435 /// false otherwise.
1436 /// \p VF is the vectorization factor chosen for the original loop.
1437 /// \p Multiplier is an aditional scaling factor applied to VF before
1438 /// comparing to EpilogueVectorizationMinVF.
1440 const unsigned IC) const;
1441
1442 /// Returns the execution time cost of an instruction for a given vector
1443 /// width. Vector width of one means scalar.
1445
1446 /// Return the cost of instructions in an inloop reduction pattern, if I is
1447 /// part of that pattern.
1448 std::optional<InstructionCost> getReductionPatternCost(Instruction *I,
1449 ElementCount VF,
1450 Type *VectorTy) const;
1451
1452 /// Returns true if \p Op should be considered invariant and if it is
1453 /// trivially hoistable.
1455
1456 /// Return the value of vscale used for tuning the cost model.
1457 std::optional<unsigned> getVScaleForTuning() const { return VScaleForTuning; }
1458
1459private:
1460 unsigned NumPredStores = 0;
1461
1462 /// Used to store the value of vscale used for tuning the cost model. It is
1463 /// initialized during object construction.
1464 std::optional<unsigned> VScaleForTuning;
1465
1466 /// Initializes the value of vscale used for tuning the cost model. If
1467 /// vscale_range.min == vscale_range.max then return vscale_range.max, else
1468 /// return the value returned by the corresponding TTI method.
1469 void initializeVScaleForTuning() {
1470 const Function *Fn = TheLoop->getHeader()->getParent();
1471 if (Fn->hasFnAttribute(Attribute::VScaleRange)) {
1472 auto Attr = Fn->getFnAttribute(Attribute::VScaleRange);
1473 auto Min = Attr.getVScaleRangeMin();
1474 auto Max = Attr.getVScaleRangeMax();
1475 if (Max && Min == Max) {
1476 VScaleForTuning = Max;
1477 return;
1478 }
1479 }
1480
1481 VScaleForTuning = TTI.getVScaleForTuning();
1482 }
1483
1484 /// \return An upper bound for the vectorization factors for both
1485 /// fixed and scalable vectorization, where the minimum-known number of
1486 /// elements is a power-of-2 larger than zero. If scalable vectorization is
1487 /// disabled or unsupported, then the scalable part will be equal to
1488 /// ElementCount::getScalable(0).
1489 FixedScalableVFPair computeFeasibleMaxVF(unsigned MaxTripCount,
1490 ElementCount UserVF,
1491 bool FoldTailByMasking);
1492
1493 /// If \p VF > MaxTripcount, clamps it to the next lower VF that is <=
1494 /// MaxTripCount.
1495 ElementCount clampVFByMaxTripCount(ElementCount VF, unsigned MaxTripCount,
1496 bool FoldTailByMasking) const;
1497
1498 /// \return the maximized element count based on the targets vector
1499 /// registers and the loop trip-count, but limited to a maximum safe VF.
1500 /// This is a helper function of computeFeasibleMaxVF.
1501 ElementCount getMaximizedVFForTarget(unsigned MaxTripCount,
1502 unsigned SmallestType,
1503 unsigned WidestType,
1504 ElementCount MaxSafeVF,
1505 bool FoldTailByMasking);
1506
1507 /// Checks if scalable vectorization is supported and enabled. Caches the
1508 /// result to avoid repeated debug dumps for repeated queries.
1509 bool isScalableVectorizationAllowed();
1510
1511 /// \return the maximum legal scalable VF, based on the safe max number
1512 /// of elements.
1513 ElementCount getMaxLegalScalableVF(unsigned MaxSafeElements);
1514
1515 /// Calculate vectorization cost of memory instruction \p I.
1516 InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF);
1517
1518 /// The cost computation for scalarized memory instruction.
1519 InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF);
1520
1521 /// The cost computation for interleaving group of memory instructions.
1522 InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF);
1523
1524 /// The cost computation for Gather/Scatter instruction.
1525 InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF);
1526
1527 /// The cost computation for widening instruction \p I with consecutive
1528 /// memory access.
1529 InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF);
1530
1531 /// The cost calculation for Load/Store instruction \p I with uniform pointer -
1532 /// Load: scalar load + broadcast.
1533 /// Store: scalar store + (loop invariant value stored? 0 : extract of last
1534 /// element)
1535 InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF);
1536
1537 /// Estimate the overhead of scalarizing an instruction. This is a
1538 /// convenience wrapper for the type-based getScalarizationOverhead API.
1539 InstructionCost getScalarizationOverhead(Instruction *I,
1540 ElementCount VF) const;
1541
1542 /// Returns true if an artificially high cost for emulated masked memrefs
1543 /// should be used.
1544 bool useEmulatedMaskMemRefHack(Instruction *I, ElementCount VF);
1545
1546 /// Map of scalar integer values to the smallest bitwidth they can be legally
1547 /// represented as. The vector equivalents of these values should be truncated
1548 /// to this type.
1549 MapVector<Instruction *, uint64_t> MinBWs;
1550
1551 /// A type representing the costs for instructions if they were to be
1552 /// scalarized rather than vectorized. The entries are Instruction-Cost
1553 /// pairs.
1554 using ScalarCostsTy = MapVector<Instruction *, InstructionCost>;
1555
1556 /// A set containing all BasicBlocks that are known to present after
1557 /// vectorization as a predicated block.
1558 DenseMap<ElementCount, SmallPtrSet<BasicBlock *, 4>>
1559 PredicatedBBsAfterVectorization;
1560
1561 /// Records whether it is allowed to have the original scalar loop execute at
1562 /// least once. This may be needed as a fallback loop in case runtime
1563 /// aliasing/dependence checks fail, or to handle the tail/remainder
1564 /// iterations when the trip count is unknown or doesn't divide by the VF,
1565 /// or as a peel-loop to handle gaps in interleave-groups.
1566 /// Under optsize and when the trip count is very small we don't allow any
1567 /// iterations to execute in the scalar loop.
1568 ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
1569
1570 /// Control finally chosen tail folding style. The first element is used if
1571 /// the IV update may overflow, the second element - if it does not.
1572 std::optional<std::pair<TailFoldingStyle, TailFoldingStyle>>
1573 ChosenTailFoldingStyle;
1574
1575 /// true if scalable vectorization is supported and enabled.
1576 std::optional<bool> IsScalableVectorizationAllowed;
1577
1578 /// Maximum safe number of elements to be processed per vector iteration,
1579 /// which do not prevent store-load forwarding and are safe with regard to the
1580 /// memory dependencies. Required for EVL-based veectorization, where this
1581 /// value is used as the upper bound of the safe AVL.
1582 std::optional<unsigned> MaxSafeElements;
1583
1584 /// A map holding scalar costs for different vectorization factors. The
1585 /// presence of a cost for an instruction in the mapping indicates that the
1586 /// instruction will be scalarized when vectorizing with the associated
1587 /// vectorization factor. The entries are VF-ScalarCostTy pairs.
1588 MapVector<ElementCount, ScalarCostsTy> InstsToScalarize;
1589
1590 /// Holds the instructions known to be uniform after vectorization.
1591 /// The data is collected per VF.
1592 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms;
1593
1594 /// Holds the instructions known to be scalar after vectorization.
1595 /// The data is collected per VF.
1596 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars;
1597
1598 /// Holds the instructions (address computations) that are forced to be
1599 /// scalarized.
1600 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars;
1601
1602 /// PHINodes of the reductions that should be expanded in-loop.
1603 SmallPtrSet<PHINode *, 4> InLoopReductions;
1604
1605 /// A Map of inloop reduction operations and their immediate chain operand.
1606 /// FIXME: This can be removed once reductions can be costed correctly in
1607 /// VPlan. This was added to allow quick lookup of the inloop operations.
1608 DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains;
1609
1610 /// Returns the expected difference in cost from scalarizing the expression
1611 /// feeding a predicated instruction \p PredInst. The instructions to
1612 /// scalarize and their scalar costs are collected in \p ScalarCosts. A
1613 /// non-negative return value implies the expression will be scalarized.
1614 /// Currently, only single-use chains are considered for scalarization.
1615 InstructionCost computePredInstDiscount(Instruction *PredInst,
1616 ScalarCostsTy &ScalarCosts,
1617 ElementCount VF);
1618
1619 /// Collect the instructions that are uniform after vectorization. An
1620 /// instruction is uniform if we represent it with a single scalar value in
1621 /// the vectorized loop corresponding to each vector iteration. Examples of
1622 /// uniform instructions include pointer operands of consecutive or
1623 /// interleaved memory accesses. Note that although uniformity implies an
1624 /// instruction will be scalar, the reverse is not true. In general, a
1625 /// scalarized instruction will be represented by VF scalar values in the
1626 /// vectorized loop, each corresponding to an iteration of the original
1627 /// scalar loop.
1628 void collectLoopUniforms(ElementCount VF);
1629
1630 /// Collect the instructions that are scalar after vectorization. An
1631 /// instruction is scalar if it is known to be uniform or will be scalarized
1632 /// during vectorization. collectLoopScalars should only add non-uniform nodes
1633 /// to the list if they are used by a load/store instruction that is marked as
1634 /// CM_Scalarize. Non-uniform scalarized instructions will be represented by
1635 /// VF values in the vectorized loop, each corresponding to an iteration of
1636 /// the original scalar loop.
1637 void collectLoopScalars(ElementCount VF);
1638
1639 /// Keeps cost model vectorization decision and cost for instructions.
1640 /// Right now it is used for memory instructions only.
1641 using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>,
1642 std::pair<InstWidening, InstructionCost>>;
1643
1644 DecisionList WideningDecisions;
1645
1646 using CallDecisionList =
1647 DenseMap<std::pair<CallInst *, ElementCount>, CallWideningDecision>;
1648
1649 CallDecisionList CallWideningDecisions;
1650
1651 /// Returns true if \p V is expected to be vectorized and it needs to be
1652 /// extracted.
1653 bool needsExtract(Value *V, ElementCount VF) const {
1655 if (VF.isScalar() || !I || !TheLoop->contains(I) ||
1656 TheLoop->isLoopInvariant(I) ||
1658 (isa<CallInst>(I) &&
1660 return false;
1661
1662 // Assume we can vectorize V (and hence we need extraction) if the
1663 // scalars are not computed yet. This can happen, because it is called
1664 // via getScalarizationOverhead from setCostBasedWideningDecision, before
1665 // the scalars are collected. That should be a safe assumption in most
1666 // cases, because we check if the operands have vectorizable types
1667 // beforehand in LoopVectorizationLegality.
1668 return !Scalars.contains(VF) || !isScalarAfterVectorization(I, VF);
1669 };
1670
1671 /// Returns a range containing only operands needing to be extracted.
1672 SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops,
1673 ElementCount VF) const {
1674
1675 SmallPtrSet<const Value *, 4> UniqueOperands;
1677 for (Value *Op : Ops) {
1678 if (isa<Constant>(Op) || !UniqueOperands.insert(Op).second ||
1679 !needsExtract(Op, VF))
1680 continue;
1681 Res.push_back(Op);
1682 }
1683 return Res;
1684 }
1685
1686public:
1687 /// The loop that we evaluate.
1689
1690 /// Predicated scalar evolution analysis.
1692
1693 /// Loop Info analysis.
1695
1696 /// Vectorization legality.
1698
1699 /// Vector target information.
1701
1702 /// Target Library Info.
1704
1705 /// Demanded bits analysis.
1707
1708 /// Assumption cache.
1710
1711 /// Interface to emit optimization remarks.
1713
1715
1716 /// Loop Vectorize Hint.
1718
1719 /// The interleave access information contains groups of interleaved accesses
1720 /// with the same stride and close to each other.
1722
1723 /// Values to ignore in the cost model.
1725
1726 /// Values to ignore in the cost model when VF > 1.
1728
1729 /// All element types found in the loop.
1731
1732 /// The kind of cost that we are calculating
1734
1735 /// Whether this loop should be optimized for size based on function attribute
1736 /// or profile information.
1738
1739 /// The highest VF possible for this loop, without using MaxBandwidth.
1741};
1742} // end namespace llvm
1743
1744namespace {
1745/// Helper struct to manage generating runtime checks for vectorization.
1746///
1747/// The runtime checks are created up-front in temporary blocks to allow better
1748/// estimating the cost and un-linked from the existing IR. After deciding to
1749/// vectorize, the checks are moved back. If deciding not to vectorize, the
1750/// temporary blocks are completely removed.
1751class GeneratedRTChecks {
1752 /// Basic block which contains the generated SCEV checks, if any.
1753 BasicBlock *SCEVCheckBlock = nullptr;
1754
1755 /// The value representing the result of the generated SCEV checks. If it is
1756 /// nullptr no SCEV checks have been generated.
1757 Value *SCEVCheckCond = nullptr;
1758
1759 /// Basic block which contains the generated memory runtime checks, if any.
1760 BasicBlock *MemCheckBlock = nullptr;
1761
1762 /// The value representing the result of the generated memory runtime checks.
1763 /// If it is nullptr no memory runtime checks have been generated.
1764 Value *MemRuntimeCheckCond = nullptr;
1765
1766 DominatorTree *DT;
1767 LoopInfo *LI;
1769
1770 SCEVExpander SCEVExp;
1771 SCEVExpander MemCheckExp;
1772
1773 bool CostTooHigh = false;
1774
1775 Loop *OuterLoop = nullptr;
1776
1778
1779 /// The kind of cost that we are calculating
1781
1782public:
1783 GeneratedRTChecks(PredicatedScalarEvolution &PSE, DominatorTree *DT,
1786 : DT(DT), LI(LI), TTI(TTI), SCEVExp(*PSE.getSE(), DL, "scev.check"),
1787 MemCheckExp(*PSE.getSE(), DL, "scev.check"), PSE(PSE),
1788 CostKind(CostKind) {}
1789
1790 /// Generate runtime checks in SCEVCheckBlock and MemCheckBlock, so we can
1791 /// accurately estimate the cost of the runtime checks. The blocks are
1792 /// un-linked from the IR and are added back during vector code generation. If
1793 /// there is no vector code generation, the check blocks are removed
1794 /// completely.
1795 void create(Loop *L, const LoopAccessInfo &LAI,
1796 const SCEVPredicate &UnionPred, ElementCount VF, unsigned IC) {
1797
1798 // Hard cutoff to limit compile-time increase in case a very large number of
1799 // runtime checks needs to be generated.
1800 // TODO: Skip cutoff if the loop is guaranteed to execute, e.g. due to
1801 // profile info.
1802 CostTooHigh =
1804 if (CostTooHigh)
1805 return;
1806
1807 BasicBlock *LoopHeader = L->getHeader();
1808 BasicBlock *Preheader = L->getLoopPreheader();
1809
1810 // Use SplitBlock to create blocks for SCEV & memory runtime checks to
1811 // ensure the blocks are properly added to LoopInfo & DominatorTree. Those
1812 // may be used by SCEVExpander. The blocks will be un-linked from their
1813 // predecessors and removed from LI & DT at the end of the function.
1814 if (!UnionPred.isAlwaysTrue()) {
1815 SCEVCheckBlock = SplitBlock(Preheader, Preheader->getTerminator(), DT, LI,
1816 nullptr, "vector.scevcheck");
1817
1818 SCEVCheckCond = SCEVExp.expandCodeForPredicate(
1819 &UnionPred, SCEVCheckBlock->getTerminator());
1820 if (isa<Constant>(SCEVCheckCond)) {
1821 // Clean up directly after expanding the predicate to a constant, to
1822 // avoid further expansions re-using anything left over from SCEVExp.
1823 SCEVExpanderCleaner SCEVCleaner(SCEVExp);
1824 SCEVCleaner.cleanup();
1825 }
1826 }
1827
1828 const auto &RtPtrChecking = *LAI.getRuntimePointerChecking();
1829 if (RtPtrChecking.Need) {
1830 auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader;
1831 MemCheckBlock = SplitBlock(Pred, Pred->getTerminator(), DT, LI, nullptr,
1832 "vector.memcheck");
1833
1834 auto DiffChecks = RtPtrChecking.getDiffChecks();
1835 if (DiffChecks) {
1836 Value *RuntimeVF = nullptr;
1837 MemRuntimeCheckCond = addDiffRuntimeChecks(
1838 MemCheckBlock->getTerminator(), *DiffChecks, MemCheckExp,
1839 [VF, &RuntimeVF](IRBuilderBase &B, unsigned Bits) {
1840 if (!RuntimeVF)
1841 RuntimeVF = getRuntimeVF(B, B.getIntNTy(Bits), VF);
1842 return RuntimeVF;
1843 },
1844 IC);
1845 } else {
1846 MemRuntimeCheckCond = addRuntimeChecks(
1847 MemCheckBlock->getTerminator(), L, RtPtrChecking.getChecks(),
1849 }
1850 assert(MemRuntimeCheckCond &&
1851 "no RT checks generated although RtPtrChecking "
1852 "claimed checks are required");
1853 }
1854
1855 SCEVExp.eraseDeadInstructions(SCEVCheckCond);
1856
1857 if (!MemCheckBlock && !SCEVCheckBlock)
1858 return;
1859
1860 // Unhook the temporary block with the checks, update various places
1861 // accordingly.
1862 if (SCEVCheckBlock)
1863 SCEVCheckBlock->replaceAllUsesWith(Preheader);
1864 if (MemCheckBlock)
1865 MemCheckBlock->replaceAllUsesWith(Preheader);
1866
1867 if (SCEVCheckBlock) {
1868 SCEVCheckBlock->getTerminator()->moveBefore(
1869 Preheader->getTerminator()->getIterator());
1870 auto *UI = new UnreachableInst(Preheader->getContext(), SCEVCheckBlock);
1871 UI->setDebugLoc(DebugLoc::getTemporary());
1872 Preheader->getTerminator()->eraseFromParent();
1873 }
1874 if (MemCheckBlock) {
1875 MemCheckBlock->getTerminator()->moveBefore(
1876 Preheader->getTerminator()->getIterator());
1877 auto *UI = new UnreachableInst(Preheader->getContext(), MemCheckBlock);
1878 UI->setDebugLoc(DebugLoc::getTemporary());
1879 Preheader->getTerminator()->eraseFromParent();
1880 }
1881
1882 DT->changeImmediateDominator(LoopHeader, Preheader);
1883 if (MemCheckBlock) {
1884 DT->eraseNode(MemCheckBlock);
1885 LI->removeBlock(MemCheckBlock);
1886 }
1887 if (SCEVCheckBlock) {
1888 DT->eraseNode(SCEVCheckBlock);
1889 LI->removeBlock(SCEVCheckBlock);
1890 }
1891
1892 // Outer loop is used as part of the later cost calculations.
1893 OuterLoop = L->getParentLoop();
1894 }
1895
1897 if (SCEVCheckBlock || MemCheckBlock)
1898 LLVM_DEBUG(dbgs() << "Calculating cost of runtime checks:\n");
1899
1900 if (CostTooHigh) {
1902 Cost.setInvalid();
1903 LLVM_DEBUG(dbgs() << " number of checks exceeded threshold\n");
1904 return Cost;
1905 }
1906
1907 InstructionCost RTCheckCost = 0;
1908 if (SCEVCheckBlock)
1909 for (Instruction &I : *SCEVCheckBlock) {
1910 if (SCEVCheckBlock->getTerminator() == &I)
1911 continue;
1913 LLVM_DEBUG(dbgs() << " " << C << " for " << I << "\n");
1914 RTCheckCost += C;
1915 }
1916 if (MemCheckBlock) {
1917 InstructionCost MemCheckCost = 0;
1918 for (Instruction &I : *MemCheckBlock) {
1919 if (MemCheckBlock->getTerminator() == &I)
1920 continue;
1922 LLVM_DEBUG(dbgs() << " " << C << " for " << I << "\n");
1923 MemCheckCost += C;
1924 }
1925
1926 // If the runtime memory checks are being created inside an outer loop
1927 // we should find out if these checks are outer loop invariant. If so,
1928 // the checks will likely be hoisted out and so the effective cost will
1929 // reduce according to the outer loop trip count.
1930 if (OuterLoop) {
1931 ScalarEvolution *SE = MemCheckExp.getSE();
1932 // TODO: If profitable, we could refine this further by analysing every
1933 // individual memory check, since there could be a mixture of loop
1934 // variant and invariant checks that mean the final condition is
1935 // variant.
1936 const SCEV *Cond = SE->getSCEV(MemRuntimeCheckCond);
1937 if (SE->isLoopInvariant(Cond, OuterLoop)) {
1938 // It seems reasonable to assume that we can reduce the effective
1939 // cost of the checks even when we know nothing about the trip
1940 // count. Assume that the outer loop executes at least twice.
1941 unsigned BestTripCount = 2;
1942
1943 // Get the best known TC estimate.
1944 if (auto EstimatedTC = getSmallBestKnownTC(
1945 PSE, OuterLoop, /* CanUseConstantMax = */ false))
1946 if (EstimatedTC->isFixed())
1947 BestTripCount = EstimatedTC->getFixedValue();
1948
1949 InstructionCost NewMemCheckCost = MemCheckCost / BestTripCount;
1950
1951 // Let's ensure the cost is always at least 1.
1952 NewMemCheckCost = std::max(NewMemCheckCost.getValue(),
1953 (InstructionCost::CostType)1);
1954
1955 if (BestTripCount > 1)
1957 << "We expect runtime memory checks to be hoisted "
1958 << "out of the outer loop. Cost reduced from "
1959 << MemCheckCost << " to " << NewMemCheckCost << '\n');
1960
1961 MemCheckCost = NewMemCheckCost;
1962 }
1963 }
1964
1965 RTCheckCost += MemCheckCost;
1966 }
1967
1968 if (SCEVCheckBlock || MemCheckBlock)
1969 LLVM_DEBUG(dbgs() << "Total cost of runtime checks: " << RTCheckCost
1970 << "\n");
1971
1972 return RTCheckCost;
1973 }
1974
1975 /// Remove the created SCEV & memory runtime check blocks & instructions, if
1976 /// unused.
1977 ~GeneratedRTChecks() {
1978 SCEVExpanderCleaner SCEVCleaner(SCEVExp);
1979 SCEVExpanderCleaner MemCheckCleaner(MemCheckExp);
1980 bool SCEVChecksUsed = !SCEVCheckBlock || !pred_empty(SCEVCheckBlock);
1981 bool MemChecksUsed = !MemCheckBlock || !pred_empty(MemCheckBlock);
1982 if (SCEVChecksUsed)
1983 SCEVCleaner.markResultUsed();
1984
1985 if (MemChecksUsed) {
1986 MemCheckCleaner.markResultUsed();
1987 } else {
1988 auto &SE = *MemCheckExp.getSE();
1989 // Memory runtime check generation creates compares that use expanded
1990 // values. Remove them before running the SCEVExpanderCleaners.
1991 for (auto &I : make_early_inc_range(reverse(*MemCheckBlock))) {
1992 if (MemCheckExp.isInsertedInstruction(&I))
1993 continue;
1994 SE.forgetValue(&I);
1995 I.eraseFromParent();
1996 }
1997 }
1998 MemCheckCleaner.cleanup();
1999 SCEVCleaner.cleanup();
2000
2001 if (!SCEVChecksUsed)
2002 SCEVCheckBlock->eraseFromParent();
2003 if (!MemChecksUsed)
2004 MemCheckBlock->eraseFromParent();
2005 }
2006
2007 /// Retrieves the SCEVCheckCond and SCEVCheckBlock that were generated as IR
2008 /// outside VPlan.
2009 std::pair<Value *, BasicBlock *> getSCEVChecks() const {
2010 using namespace llvm::PatternMatch;
2011 if (!SCEVCheckCond || match(SCEVCheckCond, m_ZeroInt()))
2012 return {nullptr, nullptr};
2013
2014 return {SCEVCheckCond, SCEVCheckBlock};
2015 }
2016
2017 /// Retrieves the MemCheckCond and MemCheckBlock that were generated as IR
2018 /// outside VPlan.
2019 std::pair<Value *, BasicBlock *> getMemRuntimeChecks() const {
2020 using namespace llvm::PatternMatch;
2021 if (MemRuntimeCheckCond && match(MemRuntimeCheckCond, m_ZeroInt()))
2022 return {nullptr, nullptr};
2023 return {MemRuntimeCheckCond, MemCheckBlock};
2024 }
2025
2026 /// Return true if any runtime checks have been added
2027 bool hasChecks() const {
2028 return getSCEVChecks().first || getMemRuntimeChecks().first;
2029 }
2030};
2031} // namespace
2032
2038
2043
2044// Return true if \p OuterLp is an outer loop annotated with hints for explicit
2045// vectorization. The loop needs to be annotated with #pragma omp simd
2046// simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the
2047// vector length information is not provided, vectorization is not considered
2048// explicit. Interleave hints are not allowed either. These limitations will be
2049// relaxed in the future.
2050// Please, note that we are currently forced to abuse the pragma 'clang
2051// vectorize' semantics. This pragma provides *auto-vectorization hints*
2052// (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd'
2053// provides *explicit vectorization hints* (LV can bypass legal checks and
2054// assume that vectorization is legal). However, both hints are implemented
2055// using the same metadata (llvm.loop.vectorize, processed by
2056// LoopVectorizeHints). This will be fixed in the future when the native IR
2057// representation for pragma 'omp simd' is introduced.
2058static bool isExplicitVecOuterLoop(Loop *OuterLp,
2060 assert(!OuterLp->isInnermost() && "This is not an outer loop");
2061 LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE);
2062
2063 // Only outer loops with an explicit vectorization hint are supported.
2064 // Unannotated outer loops are ignored.
2066 return false;
2067
2068 Function *Fn = OuterLp->getHeader()->getParent();
2069 if (!Hints.allowVectorization(Fn, OuterLp,
2070 true /*VectorizeOnlyWhenForced*/)) {
2071 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n");
2072 return false;
2073 }
2074
2075 if (Hints.getInterleave() > 1) {
2076 // TODO: Interleave support is future work.
2077 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for "
2078 "outer loops.\n");
2079 Hints.emitRemarkWithHints();
2080 return false;
2081 }
2082
2083 return true;
2084}
2085
2089 // Collect inner loops and outer loops without irreducible control flow. For
2090 // now, only collect outer loops that have explicit vectorization hints. If we
2091 // are stress testing the VPlan H-CFG construction, we collect the outermost
2092 // loop of every loop nest.
2093 if (L.isInnermost() || VPlanBuildStressTest ||
2095 LoopBlocksRPO RPOT(&L);
2096 RPOT.perform(LI);
2098 V.push_back(&L);
2099 // TODO: Collect inner loops inside marked outer loops in case
2100 // vectorization fails for the outer loop. Do not invoke
2101 // 'containsIrreducibleCFG' again for inner loops when the outer loop is
2102 // already known to be reducible. We can use an inherited attribute for
2103 // that.
2104 return;
2105 }
2106 }
2107 for (Loop *InnerL : L)
2108 collectSupportedLoops(*InnerL, LI, ORE, V);
2109}
2110
2111//===----------------------------------------------------------------------===//
2112// Implementation of LoopVectorizationLegality, InnerLoopVectorizer and
2113// LoopVectorizationCostModel and LoopVectorizationPlanner.
2114//===----------------------------------------------------------------------===//
2115
2116/// Compute the transformed value of Index at offset StartValue using step
2117/// StepValue.
2118/// For integer induction, returns StartValue + Index * StepValue.
2119/// For pointer induction, returns StartValue[Index * StepValue].
2120/// FIXME: The newly created binary instructions should contain nsw/nuw
2121/// flags, which can be found from the original scalar operations.
2122static Value *
2124 Value *Step,
2126 const BinaryOperator *InductionBinOp) {
2127 using namespace llvm::PatternMatch;
2128 Type *StepTy = Step->getType();
2129 Value *CastedIndex = StepTy->isIntegerTy()
2130 ? B.CreateSExtOrTrunc(Index, StepTy)
2131 : B.CreateCast(Instruction::SIToFP, Index, StepTy);
2132 if (CastedIndex != Index) {
2133 CastedIndex->setName(CastedIndex->getName() + ".cast");
2134 Index = CastedIndex;
2135 }
2136
2137 // Note: the IR at this point is broken. We cannot use SE to create any new
2138 // SCEV and then expand it, hoping that SCEV's simplification will give us
2139 // a more optimal code. Unfortunately, attempt of doing so on invalid IR may
2140 // lead to various SCEV crashes. So all we can do is to use builder and rely
2141 // on InstCombine for future simplifications. Here we handle some trivial
2142 // cases only.
2143 auto CreateAdd = [&B](Value *X, Value *Y) {
2144 assert(X->getType() == Y->getType() && "Types don't match!");
2145 if (match(X, m_ZeroInt()))
2146 return Y;
2147 if (match(Y, m_ZeroInt()))
2148 return X;
2149 return B.CreateAdd(X, Y);
2150 };
2151
2152 // We allow X to be a vector type, in which case Y will potentially be
2153 // splatted into a vector with the same element count.
2154 auto CreateMul = [&B](Value *X, Value *Y) {
2155 assert(X->getType()->getScalarType() == Y->getType() &&
2156 "Types don't match!");
2157 if (match(X, m_One()))
2158 return Y;
2159 if (match(Y, m_One()))
2160 return X;
2161 VectorType *XVTy = dyn_cast<VectorType>(X->getType());
2162 if (XVTy && !isa<VectorType>(Y->getType()))
2163 Y = B.CreateVectorSplat(XVTy->getElementCount(), Y);
2164 return B.CreateMul(X, Y);
2165 };
2166
2167 switch (InductionKind) {
2169 assert(!isa<VectorType>(Index->getType()) &&
2170 "Vector indices not supported for integer inductions yet");
2171 assert(Index->getType() == StartValue->getType() &&
2172 "Index type does not match StartValue type");
2173 if (isa<ConstantInt>(Step) && cast<ConstantInt>(Step)->isMinusOne())
2174 return B.CreateSub(StartValue, Index);
2175 auto *Offset = CreateMul(Index, Step);
2176 return CreateAdd(StartValue, Offset);
2177 }
2179 return B.CreatePtrAdd(StartValue, CreateMul(Index, Step));
2181 assert(!isa<VectorType>(Index->getType()) &&
2182 "Vector indices not supported for FP inductions yet");
2183 assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value");
2184 assert(InductionBinOp &&
2185 (InductionBinOp->getOpcode() == Instruction::FAdd ||
2186 InductionBinOp->getOpcode() == Instruction::FSub) &&
2187 "Original bin op should be defined for FP induction");
2188
2189 Value *MulExp = B.CreateFMul(Step, Index);
2190 return B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp,
2191 "induction");
2192 }
2194 return nullptr;
2195 }
2196 llvm_unreachable("invalid enum");
2197}
2198
2199static std::optional<unsigned> getMaxVScale(const Function &F,
2200 const TargetTransformInfo &TTI) {
2201 if (std::optional<unsigned> MaxVScale = TTI.getMaxVScale())
2202 return MaxVScale;
2203
2204 if (F.hasFnAttribute(Attribute::VScaleRange))
2205 return F.getFnAttribute(Attribute::VScaleRange).getVScaleRangeMax();
2206
2207 return std::nullopt;
2208}
2209
2210/// For the given VF and UF and maximum trip count computed for the loop, return
2211/// whether the induction variable might overflow in the vectorized loop. If not,
2212/// then we know a runtime overflow check always evaluates to false and can be
2213/// removed.
2215 const LoopVectorizationCostModel *Cost,
2216 ElementCount VF, std::optional<unsigned> UF = std::nullopt) {
2217 // Always be conservative if we don't know the exact unroll factor.
2218 unsigned MaxUF = UF ? *UF : Cost->TTI.getMaxInterleaveFactor(VF);
2219
2220 IntegerType *IdxTy = Cost->Legal->getWidestInductionType();
2221 APInt MaxUIntTripCount = IdxTy->getMask();
2222
2223 // We know the runtime overflow check is known false iff the (max) trip-count
2224 // is known and (max) trip-count + (VF * UF) does not overflow in the type of
2225 // the vector loop induction variable.
2226 if (unsigned TC = Cost->PSE.getSmallConstantMaxTripCount()) {
2227 uint64_t MaxVF = VF.getKnownMinValue();
2228 if (VF.isScalable()) {
2229 std::optional<unsigned> MaxVScale =
2230 getMaxVScale(*Cost->TheFunction, Cost->TTI);
2231 if (!MaxVScale)
2232 return false;
2233 MaxVF *= *MaxVScale;
2234 }
2235
2236 return (MaxUIntTripCount - TC).ugt(MaxVF * MaxUF);
2237 }
2238
2239 return false;
2240}
2241
2242// Return whether we allow using masked interleave-groups (for dealing with
2243// strided loads/stores that reside in predicated blocks, or for dealing
2244// with gaps).
2246 // If an override option has been passed in for interleaved accesses, use it.
2247 if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0)
2249
2250 return TTI.enableMaskedInterleavedAccessVectorization();
2251}
2252
2254 BasicBlock *CheckIRBB) {
2255 // Note: The block with the minimum trip-count check is already connected
2256 // during earlier VPlan construction.
2257 VPBlockBase *ScalarPH = Plan.getScalarPreheader();
2258 VPBlockBase *PreVectorPH = VectorPHVPBB->getSinglePredecessor();
2259 assert(PreVectorPH->getNumSuccessors() == 2 && "Expected 2 successors");
2260 assert(PreVectorPH->getSuccessors()[0] == ScalarPH && "Unexpected successor");
2261 VPIRBasicBlock *CheckVPIRBB = Plan.createVPIRBasicBlock(CheckIRBB);
2262 VPBlockUtils::insertOnEdge(PreVectorPH, VectorPHVPBB, CheckVPIRBB);
2263 PreVectorPH = CheckVPIRBB;
2264 VPBlockUtils::connectBlocks(PreVectorPH, ScalarPH);
2265 PreVectorPH->swapSuccessors();
2266
2267 // We just connected a new block to the scalar preheader. Update all
2268 // VPPhis by adding an incoming value for it, replicating the last value.
2269 unsigned NumPredecessors = ScalarPH->getNumPredecessors();
2270 for (VPRecipeBase &R : cast<VPBasicBlock>(ScalarPH)->phis()) {
2271 assert(isa<VPPhi>(&R) && "Phi expected to be VPPhi");
2272 assert(cast<VPPhi>(&R)->getNumIncoming() == NumPredecessors - 1 &&
2273 "must have incoming values for all operands");
2274 R.addOperand(R.getOperand(NumPredecessors - 2));
2275 }
2276}
2277
2279 BasicBlock *VectorPH, ElementCount VF, unsigned UF) const {
2280 // Generate code to check if the loop's trip count is less than VF * UF, or
2281 // equal to it in case a scalar epilogue is required; this implies that the
2282 // vector trip count is zero. This check also covers the case where adding one
2283 // to the backedge-taken count overflowed leading to an incorrect trip count
2284 // of zero. In this case we will also jump to the scalar loop.
2285 auto P = Cost->requiresScalarEpilogue(VF.isVector()) ? ICmpInst::ICMP_ULE
2287
2288 // Reuse existing vector loop preheader for TC checks.
2289 // Note that new preheader block is generated for vector loop.
2290 BasicBlock *const TCCheckBlock = VectorPH;
2292 TCCheckBlock->getContext(),
2293 InstSimplifyFolder(TCCheckBlock->getDataLayout()));
2294 Builder.SetInsertPoint(TCCheckBlock->getTerminator());
2295
2296 // If tail is to be folded, vector loop takes care of all iterations.
2298 Type *CountTy = Count->getType();
2299 Value *CheckMinIters = Builder.getFalse();
2300 auto CreateStep = [&]() -> Value * {
2301 // Create step with max(MinProTripCount, UF * VF).
2302 if (UF * VF.getKnownMinValue() >= MinProfitableTripCount.getKnownMinValue())
2303 return createStepForVF(Builder, CountTy, VF, UF);
2304
2305 Value *MinProfTC =
2306 Builder.CreateElementCount(CountTy, MinProfitableTripCount);
2307 if (!VF.isScalable())
2308 return MinProfTC;
2309 return Builder.CreateBinaryIntrinsic(
2310 Intrinsic::umax, MinProfTC, createStepForVF(Builder, CountTy, VF, UF));
2311 };
2312
2313 TailFoldingStyle Style = Cost->getTailFoldingStyle();
2314 if (Style == TailFoldingStyle::None) {
2315 Value *Step = CreateStep();
2316 ScalarEvolution &SE = *PSE.getSE();
2317 // TODO: Emit unconditional branch to vector preheader instead of
2318 // conditional branch with known condition.
2319 const SCEV *TripCountSCEV = SE.applyLoopGuards(SE.getSCEV(Count), OrigLoop);
2320 // Check if the trip count is < the step.
2321 if (SE.isKnownPredicate(P, TripCountSCEV, SE.getSCEV(Step))) {
2322 // TODO: Ensure step is at most the trip count when determining max VF and
2323 // UF, w/o tail folding.
2324 CheckMinIters = Builder.getTrue();
2326 TripCountSCEV, SE.getSCEV(Step))) {
2327 // Generate the minimum iteration check only if we cannot prove the
2328 // check is known to be true, or known to be false.
2329 CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check");
2330 } // else step known to be < trip count, use CheckMinIters preset to false.
2331 } else if (VF.isScalable() && !TTI->isVScaleKnownToBeAPowerOfTwo() &&
2334 // vscale is not necessarily a power-of-2, which means we cannot guarantee
2335 // an overflow to zero when updating induction variables and so an
2336 // additional overflow check is required before entering the vector loop.
2337
2338 // Get the maximum unsigned value for the type.
2339 Value *MaxUIntTripCount =
2340 ConstantInt::get(CountTy, cast<IntegerType>(CountTy)->getMask());
2341 Value *LHS = Builder.CreateSub(MaxUIntTripCount, Count);
2342
2343 // Don't execute the vector loop if (UMax - n) < (VF * UF).
2344 CheckMinIters = Builder.CreateICmp(ICmpInst::ICMP_ULT, LHS, CreateStep());
2345 }
2346 return CheckMinIters;
2347}
2348
2349/// Replace \p VPBB with a VPIRBasicBlock wrapping \p IRBB. All recipes from \p
2350/// VPBB are moved to the end of the newly created VPIRBasicBlock. All
2351/// predecessors and successors of VPBB, if any, are rewired to the new
2352/// VPIRBasicBlock. If \p VPBB may be unreachable, \p Plan must be passed.
2354 BasicBlock *IRBB,
2355 VPlan *Plan = nullptr) {
2356 if (!Plan)
2357 Plan = VPBB->getPlan();
2358 VPIRBasicBlock *IRVPBB = Plan->createVPIRBasicBlock(IRBB);
2359 auto IP = IRVPBB->begin();
2360 for (auto &R : make_early_inc_range(VPBB->phis()))
2361 R.moveBefore(*IRVPBB, IP);
2362
2363 for (auto &R :
2365 R.moveBefore(*IRVPBB, IRVPBB->end());
2366
2367 VPBlockUtils::reassociateBlocks(VPBB, IRVPBB);
2368 // VPBB is now dead and will be cleaned up when the plan gets destroyed.
2369 return IRVPBB;
2370}
2371
2373 BasicBlock *VectorPH = OrigLoop->getLoopPreheader();
2374 assert(VectorPH && "Invalid loop structure");
2375 assert((OrigLoop->getUniqueLatchExitBlock() ||
2376 Cost->requiresScalarEpilogue(VF.isVector())) &&
2377 "loops not exiting via the latch without required epilogue?");
2378
2379 // NOTE: The Plan's scalar preheader VPBB isn't replaced with a VPIRBasicBlock
2380 // wrapping the newly created scalar preheader here at the moment, because the
2381 // Plan's scalar preheader may be unreachable at this point. Instead it is
2382 // replaced in executePlan.
2383 return SplitBlock(VectorPH, VectorPH->getTerminator(), DT, LI, nullptr,
2384 Twine(Prefix) + "scalar.ph");
2385}
2386
2387/// Return the expanded step for \p ID using \p ExpandedSCEVs to look up SCEV
2388/// expansion results.
2390 const SCEV2ValueTy &ExpandedSCEVs) {
2391 const SCEV *Step = ID.getStep();
2392 if (auto *C = dyn_cast<SCEVConstant>(Step))
2393 return C->getValue();
2394 if (auto *U = dyn_cast<SCEVUnknown>(Step))
2395 return U->getValue();
2396 Value *V = ExpandedSCEVs.lookup(Step);
2397 assert(V && "SCEV must be expanded at this point");
2398 return V;
2399}
2400
2401/// Knowing that loop \p L executes a single vector iteration, add instructions
2402/// that will get simplified and thus should not have any cost to \p
2403/// InstsToIgnore.
2406 SmallPtrSetImpl<Instruction *> &InstsToIgnore) {
2407 auto *Cmp = L->getLatchCmpInst();
2408 if (Cmp)
2409 InstsToIgnore.insert(Cmp);
2410 for (const auto &KV : IL) {
2411 // Extract the key by hand so that it can be used in the lambda below. Note
2412 // that captured structured bindings are a C++20 extension.
2413 const PHINode *IV = KV.first;
2414
2415 // Get next iteration value of the induction variable.
2416 Instruction *IVInst =
2417 cast<Instruction>(IV->getIncomingValueForBlock(L->getLoopLatch()));
2418 if (all_of(IVInst->users(),
2419 [&](const User *U) { return U == IV || U == Cmp; }))
2420 InstsToIgnore.insert(IVInst);
2421 }
2422}
2423
2425 // Create a new IR basic block for the scalar preheader.
2426 BasicBlock *ScalarPH = createScalarPreheader("");
2427 return ScalarPH->getSinglePredecessor();
2428}
2429
2430namespace {
2431
2432struct CSEDenseMapInfo {
2433 static bool canHandle(const Instruction *I) {
2436 }
2437
2438 static inline Instruction *getEmptyKey() {
2440 }
2441
2442 static inline Instruction *getTombstoneKey() {
2443 return DenseMapInfo<Instruction *>::getTombstoneKey();
2444 }
2445
2446 static unsigned getHashValue(const Instruction *I) {
2447 assert(canHandle(I) && "Unknown instruction!");
2448 return hash_combine(I->getOpcode(),
2449 hash_combine_range(I->operand_values()));
2450 }
2451
2452 static bool isEqual(const Instruction *LHS, const Instruction *RHS) {
2453 if (LHS == getEmptyKey() || RHS == getEmptyKey() ||
2454 LHS == getTombstoneKey() || RHS == getTombstoneKey())
2455 return LHS == RHS;
2456 return LHS->isIdenticalTo(RHS);
2457 }
2458};
2459
2460} // end anonymous namespace
2461
2462///Perform cse of induction variable instructions.
2463static void cse(BasicBlock *BB) {
2464 // Perform simple cse.
2466 for (Instruction &In : llvm::make_early_inc_range(*BB)) {
2467 if (!CSEDenseMapInfo::canHandle(&In))
2468 continue;
2469
2470 // Check if we can replace this instruction with any of the
2471 // visited instructions.
2472 if (Instruction *V = CSEMap.lookup(&In)) {
2473 In.replaceAllUsesWith(V);
2474 In.eraseFromParent();
2475 continue;
2476 }
2477
2478 CSEMap[&In] = &In;
2479 }
2480}
2481
2482/// This function attempts to return a value that represents the ElementCount
2483/// at runtime. For fixed-width VFs we know this precisely at compile
2484/// time, but for scalable VFs we calculate it based on an estimate of the
2485/// vscale value.
2487 std::optional<unsigned> VScale) {
2488 unsigned EstimatedVF = VF.getKnownMinValue();
2489 if (VF.isScalable())
2490 if (VScale)
2491 EstimatedVF *= *VScale;
2492 assert(EstimatedVF >= 1 && "Estimated VF shouldn't be less than 1");
2493 return EstimatedVF;
2494}
2495
2498 ElementCount VF) const {
2499 // We only need to calculate a cost if the VF is scalar; for actual vectors
2500 // we should already have a pre-calculated cost at each VF.
2501 if (!VF.isScalar())
2502 return getCallWideningDecision(CI, VF).Cost;
2503
2504 Type *RetTy = CI->getType();
2506 if (auto RedCost = getReductionPatternCost(CI, VF, RetTy))
2507 return *RedCost;
2508
2510 for (auto &ArgOp : CI->args())
2511 Tys.push_back(ArgOp->getType());
2512
2513 InstructionCost ScalarCallCost =
2514 TTI.getCallInstrCost(CI->getCalledFunction(), RetTy, Tys, CostKind);
2515
2516 // If this is an intrinsic we may have a lower cost for it.
2519 return std::min(ScalarCallCost, IntrinsicCost);
2520 }
2521 return ScalarCallCost;
2522}
2523
2525 if (VF.isScalar() || !canVectorizeTy(Ty))
2526 return Ty;
2527 return toVectorizedTy(Ty, VF);
2528}
2529
2532 ElementCount VF) const {
2534 assert(ID && "Expected intrinsic call!");
2535 Type *RetTy = maybeVectorizeType(CI->getType(), VF);
2536 FastMathFlags FMF;
2537 if (auto *FPMO = dyn_cast<FPMathOperator>(CI))
2538 FMF = FPMO->getFastMathFlags();
2539
2542 SmallVector<Type *> ParamTys;
2543 std::transform(FTy->param_begin(), FTy->param_end(),
2544 std::back_inserter(ParamTys),
2545 [&](Type *Ty) { return maybeVectorizeType(Ty, VF); });
2546
2547 IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF,
2550 return TTI.getIntrinsicInstrCost(CostAttrs, CostKind);
2551}
2552
2554 // Fix widened non-induction PHIs by setting up the PHI operands.
2555 fixNonInductionPHIs(State);
2556
2557 // Don't apply optimizations below when no (vector) loop remains, as they all
2558 // require one at the moment.
2559 VPBasicBlock *HeaderVPBB =
2560 vputils::getFirstLoopHeader(*State.Plan, State.VPDT);
2561 if (!HeaderVPBB)
2562 return;
2563
2564 BasicBlock *HeaderBB = State.CFG.VPBB2IRBB[HeaderVPBB];
2565
2566 // Remove redundant induction instructions.
2567 cse(HeaderBB);
2568}
2569
2571 auto Iter = vp_depth_first_shallow(Plan.getEntry());
2573 for (VPRecipeBase &P : VPBB->phis()) {
2575 if (!VPPhi)
2576 continue;
2577 PHINode *NewPhi = cast<PHINode>(State.get(VPPhi));
2578 // Make sure the builder has a valid insert point.
2579 Builder.SetInsertPoint(NewPhi);
2580 for (const auto &[Inc, VPBB] : VPPhi->incoming_values_and_blocks())
2581 NewPhi->addIncoming(State.get(Inc), State.CFG.VPBB2IRBB[VPBB]);
2582 }
2583 }
2584}
2585
2586void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) {
2587 // We should not collect Scalars more than once per VF. Right now, this
2588 // function is called from collectUniformsAndScalars(), which already does
2589 // this check. Collecting Scalars for VF=1 does not make any sense.
2590 assert(VF.isVector() && !Scalars.contains(VF) &&
2591 "This function should not be visited twice for the same VF");
2592
2593 // This avoids any chances of creating a REPLICATE recipe during planning
2594 // since that would result in generation of scalarized code during execution,
2595 // which is not supported for scalable vectors.
2596 if (VF.isScalable()) {
2597 Scalars[VF].insert_range(Uniforms[VF]);
2598 return;
2599 }
2600
2602
2603 // These sets are used to seed the analysis with pointers used by memory
2604 // accesses that will remain scalar.
2606 SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs;
2607 auto *Latch = TheLoop->getLoopLatch();
2608
2609 // A helper that returns true if the use of Ptr by MemAccess will be scalar.
2610 // The pointer operands of loads and stores will be scalar as long as the
2611 // memory access is not a gather or scatter operation. The value operand of a
2612 // store will remain scalar if the store is scalarized.
2613 auto IsScalarUse = [&](Instruction *MemAccess, Value *Ptr) {
2614 InstWidening WideningDecision = getWideningDecision(MemAccess, VF);
2615 assert(WideningDecision != CM_Unknown &&
2616 "Widening decision should be ready at this moment");
2617 if (auto *Store = dyn_cast<StoreInst>(MemAccess))
2618 if (Ptr == Store->getValueOperand())
2619 return WideningDecision == CM_Scalarize;
2620 assert(Ptr == getLoadStorePointerOperand(MemAccess) &&
2621 "Ptr is neither a value or pointer operand");
2622 return WideningDecision != CM_GatherScatter;
2623 };
2624
2625 // A helper that returns true if the given value is a getelementptr
2626 // instruction contained in the loop.
2627 auto IsLoopVaryingGEP = [&](Value *V) {
2628 return isa<GetElementPtrInst>(V) && !TheLoop->isLoopInvariant(V);
2629 };
2630
2631 // A helper that evaluates a memory access's use of a pointer. If the use will
2632 // be a scalar use and the pointer is only used by memory accesses, we place
2633 // the pointer in ScalarPtrs. Otherwise, the pointer is placed in
2634 // PossibleNonScalarPtrs.
2635 auto EvaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) {
2636 // We only care about bitcast and getelementptr instructions contained in
2637 // the loop.
2638 if (!IsLoopVaryingGEP(Ptr))
2639 return;
2640
2641 // If the pointer has already been identified as scalar (e.g., if it was
2642 // also identified as uniform), there's nothing to do.
2643 auto *I = cast<Instruction>(Ptr);
2644 if (Worklist.count(I))
2645 return;
2646
2647 // If the use of the pointer will be a scalar use, and all users of the
2648 // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise,
2649 // place the pointer in PossibleNonScalarPtrs.
2650 if (IsScalarUse(MemAccess, Ptr) &&
2652 ScalarPtrs.insert(I);
2653 else
2654 PossibleNonScalarPtrs.insert(I);
2655 };
2656
2657 // We seed the scalars analysis with three classes of instructions: (1)
2658 // instructions marked uniform-after-vectorization and (2) bitcast,
2659 // getelementptr and (pointer) phi instructions used by memory accesses
2660 // requiring a scalar use.
2661 //
2662 // (1) Add to the worklist all instructions that have been identified as
2663 // uniform-after-vectorization.
2664 Worklist.insert_range(Uniforms[VF]);
2665
2666 // (2) Add to the worklist all bitcast and getelementptr instructions used by
2667 // memory accesses requiring a scalar use. The pointer operands of loads and
2668 // stores will be scalar unless the operation is a gather or scatter.
2669 // The value operand of a store will remain scalar if the store is scalarized.
2670 for (auto *BB : TheLoop->blocks())
2671 for (auto &I : *BB) {
2672 if (auto *Load = dyn_cast<LoadInst>(&I)) {
2673 EvaluatePtrUse(Load, Load->getPointerOperand());
2674 } else if (auto *Store = dyn_cast<StoreInst>(&I)) {
2675 EvaluatePtrUse(Store, Store->getPointerOperand());
2676 EvaluatePtrUse(Store, Store->getValueOperand());
2677 }
2678 }
2679 for (auto *I : ScalarPtrs)
2680 if (!PossibleNonScalarPtrs.count(I)) {
2681 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n");
2682 Worklist.insert(I);
2683 }
2684
2685 // Insert the forced scalars.
2686 // FIXME: Currently VPWidenPHIRecipe() often creates a dead vector
2687 // induction variable when the PHI user is scalarized.
2688 auto ForcedScalar = ForcedScalars.find(VF);
2689 if (ForcedScalar != ForcedScalars.end())
2690 for (auto *I : ForcedScalar->second) {
2691 LLVM_DEBUG(dbgs() << "LV: Found (forced) scalar instruction: " << *I << "\n");
2692 Worklist.insert(I);
2693 }
2694
2695 // Expand the worklist by looking through any bitcasts and getelementptr
2696 // instructions we've already identified as scalar. This is similar to the
2697 // expansion step in collectLoopUniforms(); however, here we're only
2698 // expanding to include additional bitcasts and getelementptr instructions.
2699 unsigned Idx = 0;
2700 while (Idx != Worklist.size()) {
2701 Instruction *Dst = Worklist[Idx++];
2702 if (!IsLoopVaryingGEP(Dst->getOperand(0)))
2703 continue;
2704 auto *Src = cast<Instruction>(Dst->getOperand(0));
2705 if (llvm::all_of(Src->users(), [&](User *U) -> bool {
2706 auto *J = cast<Instruction>(U);
2707 return !TheLoop->contains(J) || Worklist.count(J) ||
2708 ((isa<LoadInst>(J) || isa<StoreInst>(J)) &&
2709 IsScalarUse(J, Src));
2710 })) {
2711 Worklist.insert(Src);
2712 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n");
2713 }
2714 }
2715
2716 // An induction variable will remain scalar if all users of the induction
2717 // variable and induction variable update remain scalar.
2718 for (const auto &Induction : Legal->getInductionVars()) {
2719 auto *Ind = Induction.first;
2720 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
2721
2722 // If tail-folding is applied, the primary induction variable will be used
2723 // to feed a vector compare.
2724 if (Ind == Legal->getPrimaryInduction() && foldTailByMasking())
2725 continue;
2726
2727 // Returns true if \p Indvar is a pointer induction that is used directly by
2728 // load/store instruction \p I.
2729 auto IsDirectLoadStoreFromPtrIndvar = [&](Instruction *Indvar,
2730 Instruction *I) {
2731 return Induction.second.getKind() ==
2734 Indvar == getLoadStorePointerOperand(I) && IsScalarUse(I, Indvar);
2735 };
2736
2737 // Determine if all users of the induction variable are scalar after
2738 // vectorization.
2739 bool ScalarInd = all_of(Ind->users(), [&](User *U) -> bool {
2740 auto *I = cast<Instruction>(U);
2741 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
2742 IsDirectLoadStoreFromPtrIndvar(Ind, I);
2743 });
2744 if (!ScalarInd)
2745 continue;
2746
2747 // If the induction variable update is a fixed-order recurrence, neither the
2748 // induction variable or its update should be marked scalar after
2749 // vectorization.
2750 auto *IndUpdatePhi = dyn_cast<PHINode>(IndUpdate);
2751 if (IndUpdatePhi && Legal->isFixedOrderRecurrence(IndUpdatePhi))
2752 continue;
2753
2754 // Determine if all users of the induction variable update instruction are
2755 // scalar after vectorization.
2756 bool ScalarIndUpdate = all_of(IndUpdate->users(), [&](User *U) -> bool {
2757 auto *I = cast<Instruction>(U);
2758 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
2759 IsDirectLoadStoreFromPtrIndvar(IndUpdate, I);
2760 });
2761 if (!ScalarIndUpdate)
2762 continue;
2763
2764 // The induction variable and its update instruction will remain scalar.
2765 Worklist.insert(Ind);
2766 Worklist.insert(IndUpdate);
2767 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
2768 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate
2769 << "\n");
2770 }
2771
2772 Scalars[VF].insert_range(Worklist);
2773}
2774
2776 Instruction *I, ElementCount VF) const {
2777 if (!isPredicatedInst(I))
2778 return false;
2779
2780 // Do we have a non-scalar lowering for this predicated
2781 // instruction? No - it is scalar with predication.
2782 switch(I->getOpcode()) {
2783 default:
2784 return true;
2785 case Instruction::Call:
2786 if (VF.isScalar())
2787 return true;
2789 case Instruction::Load:
2790 case Instruction::Store: {
2792 auto *Ty = getLoadStoreType(I);
2793 unsigned AS = getLoadStoreAddressSpace(I);
2794 Type *VTy = Ty;
2795 if (VF.isVector())
2796 VTy = VectorType::get(Ty, VF);
2797 const Align Alignment = getLoadStoreAlignment(I);
2798 return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment, AS) ||
2799 TTI.isLegalMaskedGather(VTy, Alignment))
2800 : !(isLegalMaskedStore(Ty, Ptr, Alignment, AS) ||
2801 TTI.isLegalMaskedScatter(VTy, Alignment));
2802 }
2803 case Instruction::UDiv:
2804 case Instruction::SDiv:
2805 case Instruction::SRem:
2806 case Instruction::URem: {
2807 // We have the option to use the safe-divisor idiom to avoid predication.
2808 // The cost based decision here will always select safe-divisor for
2809 // scalable vectors as scalarization isn't legal.
2810 const auto [ScalarCost, SafeDivisorCost] = getDivRemSpeculationCost(I, VF);
2811 return isDivRemScalarWithPredication(ScalarCost, SafeDivisorCost);
2812 }
2813 }
2814}
2815
2816// TODO: Fold into LoopVectorizationLegality::isMaskRequired.
2818 // TODO: We can use the loop-preheader as context point here and get
2819 // context sensitive reasoning for isSafeToSpeculativelyExecute.
2821 (isa<LoadInst, StoreInst, CallInst>(I) && !Legal->isMaskRequired(I)) ||
2823 return false;
2824
2825 // If the instruction was executed conditionally in the original scalar loop,
2826 // predication is needed with a mask whose lanes are all possibly inactive.
2827 if (Legal->blockNeedsPredication(I->getParent()))
2828 return true;
2829
2830 // If we're not folding the tail by masking, predication is unnecessary.
2831 if (!foldTailByMasking())
2832 return false;
2833
2834 // All that remain are instructions with side-effects originally executed in
2835 // the loop unconditionally, but now execute under a tail-fold mask (only)
2836 // having at least one active lane (the first). If the side-effects of the
2837 // instruction are invariant, executing it w/o (the tail-folding) mask is safe
2838 // - it will cause the same side-effects as when masked.
2839 switch(I->getOpcode()) {
2840 default:
2842 "instruction should have been considered by earlier checks");
2843 case Instruction::Call:
2844 // Side-effects of a Call are assumed to be non-invariant, needing a
2845 // (fold-tail) mask.
2846 assert(Legal->isMaskRequired(I) &&
2847 "should have returned earlier for calls not needing a mask");
2848 return true;
2849 case Instruction::Load:
2850 // If the address is loop invariant no predication is needed.
2851 return !Legal->isInvariant(getLoadStorePointerOperand(I));
2852 case Instruction::Store: {
2853 // For stores, we need to prove both speculation safety (which follows from
2854 // the same argument as loads), but also must prove the value being stored
2855 // is correct. The easiest form of the later is to require that all values
2856 // stored are the same.
2857 return !(Legal->isInvariant(getLoadStorePointerOperand(I)) &&
2858 TheLoop->isLoopInvariant(cast<StoreInst>(I)->getValueOperand()));
2859 }
2860 case Instruction::UDiv:
2861 case Instruction::SDiv:
2862 case Instruction::SRem:
2863 case Instruction::URem:
2864 // If the divisor is loop-invariant no predication is needed.
2865 return !Legal->isInvariant(I->getOperand(1));
2866 }
2867}
2868
2869std::pair<InstructionCost, InstructionCost>
2871 ElementCount VF) const {
2872 assert(I->getOpcode() == Instruction::UDiv ||
2873 I->getOpcode() == Instruction::SDiv ||
2874 I->getOpcode() == Instruction::SRem ||
2875 I->getOpcode() == Instruction::URem);
2877
2878 // Scalarization isn't legal for scalable vector types
2879 InstructionCost ScalarizationCost = InstructionCost::getInvalid();
2880 if (!VF.isScalable()) {
2881 // Get the scalarization cost and scale this amount by the probability of
2882 // executing the predicated block. If the instruction is not predicated,
2883 // we fall through to the next case.
2884 ScalarizationCost = 0;
2885
2886 // These instructions have a non-void type, so account for the phi nodes
2887 // that we will create. This cost is likely to be zero. The phi node
2888 // cost, if any, should be scaled by the block probability because it
2889 // models a copy at the end of each predicated block.
2890 ScalarizationCost +=
2891 VF.getFixedValue() * TTI.getCFInstrCost(Instruction::PHI, CostKind);
2892
2893 // The cost of the non-predicated instruction.
2894 ScalarizationCost +=
2895 VF.getFixedValue() *
2896 TTI.getArithmeticInstrCost(I->getOpcode(), I->getType(), CostKind);
2897
2898 // The cost of insertelement and extractelement instructions needed for
2899 // scalarization.
2900 ScalarizationCost += getScalarizationOverhead(I, VF);
2901
2902 // Scale the cost by the probability of executing the predicated blocks.
2903 // This assumes the predicated block for each vector lane is equally
2904 // likely.
2905 ScalarizationCost = ScalarizationCost / getPredBlockCostDivisor(CostKind);
2906 }
2907
2908 InstructionCost SafeDivisorCost = 0;
2909 auto *VecTy = toVectorTy(I->getType(), VF);
2910 // The cost of the select guard to ensure all lanes are well defined
2911 // after we speculate above any internal control flow.
2912 SafeDivisorCost +=
2913 TTI.getCmpSelInstrCost(Instruction::Select, VecTy,
2914 toVectorTy(Type::getInt1Ty(I->getContext()), VF),
2916
2917 SmallVector<const Value *, 4> Operands(I->operand_values());
2918 SafeDivisorCost += TTI.getArithmeticInstrCost(
2919 I->getOpcode(), VecTy, CostKind,
2920 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
2921 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
2922 Operands, I);
2923 return {ScalarizationCost, SafeDivisorCost};
2924}
2925
2927 Instruction *I, ElementCount VF) const {
2928 assert(isAccessInterleaved(I) && "Expecting interleaved access.");
2930 "Decision should not be set yet.");
2931 auto *Group = getInterleavedAccessGroup(I);
2932 assert(Group && "Must have a group.");
2933 unsigned InterleaveFactor = Group->getFactor();
2934
2935 // If the instruction's allocated size doesn't equal its type size, it
2936 // requires padding and will be scalarized.
2937 auto &DL = I->getDataLayout();
2938 auto *ScalarTy = getLoadStoreType(I);
2939 if (hasIrregularType(ScalarTy, DL))
2940 return false;
2941
2942 // For scalable vectors, the interleave factors must be <= 8 since we require
2943 // the (de)interleaveN intrinsics instead of shufflevectors.
2944 if (VF.isScalable() && InterleaveFactor > 8)
2945 return false;
2946
2947 // If the group involves a non-integral pointer, we may not be able to
2948 // losslessly cast all values to a common type.
2949 bool ScalarNI = DL.isNonIntegralPointerType(ScalarTy);
2950 for (unsigned Idx = 0; Idx < InterleaveFactor; Idx++) {
2951 Instruction *Member = Group->getMember(Idx);
2952 if (!Member)
2953 continue;
2954 auto *MemberTy = getLoadStoreType(Member);
2955 bool MemberNI = DL.isNonIntegralPointerType(MemberTy);
2956 // Don't coerce non-integral pointers to integers or vice versa.
2957 if (MemberNI != ScalarNI)
2958 // TODO: Consider adding special nullptr value case here
2959 return false;
2960 if (MemberNI && ScalarNI &&
2961 ScalarTy->getPointerAddressSpace() !=
2962 MemberTy->getPointerAddressSpace())
2963 return false;
2964 }
2965
2966 // Check if masking is required.
2967 // A Group may need masking for one of two reasons: it resides in a block that
2968 // needs predication, or it was decided to use masking to deal with gaps
2969 // (either a gap at the end of a load-access that may result in a speculative
2970 // load, or any gaps in a store-access).
2971 bool PredicatedAccessRequiresMasking =
2972 blockNeedsPredicationForAnyReason(I->getParent()) &&
2973 Legal->isMaskRequired(I);
2974 bool LoadAccessWithGapsRequiresEpilogMasking =
2975 isa<LoadInst>(I) && Group->requiresScalarEpilogue() &&
2977 bool StoreAccessWithGapsRequiresMasking =
2978 isa<StoreInst>(I) && !Group->isFull();
2979 if (!PredicatedAccessRequiresMasking &&
2980 !LoadAccessWithGapsRequiresEpilogMasking &&
2981 !StoreAccessWithGapsRequiresMasking)
2982 return true;
2983
2984 // If masked interleaving is required, we expect that the user/target had
2985 // enabled it, because otherwise it either wouldn't have been created or
2986 // it should have been invalidated by the CostModel.
2988 "Masked interleave-groups for predicated accesses are not enabled.");
2989
2990 if (Group->isReverse())
2991 return false;
2992
2993 // TODO: Support interleaved access that requires a gap mask for scalable VFs.
2994 bool NeedsMaskForGaps = LoadAccessWithGapsRequiresEpilogMasking ||
2995 StoreAccessWithGapsRequiresMasking;
2996 if (VF.isScalable() && NeedsMaskForGaps)
2997 return false;
2998
2999 auto *Ty = getLoadStoreType(I);
3000 const Align Alignment = getLoadStoreAlignment(I);
3001 unsigned AS = getLoadStoreAddressSpace(I);
3002 return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment, AS)
3003 : TTI.isLegalMaskedStore(Ty, Alignment, AS);
3004}
3005
3007 Instruction *I, ElementCount VF) {
3008 // Get and ensure we have a valid memory instruction.
3009 assert((isa<LoadInst, StoreInst>(I)) && "Invalid memory instruction");
3010
3012 auto *ScalarTy = getLoadStoreType(I);
3013
3014 // In order to be widened, the pointer should be consecutive, first of all.
3015 if (!Legal->isConsecutivePtr(ScalarTy, Ptr))
3016 return false;
3017
3018 // If the instruction is a store located in a predicated block, it will be
3019 // scalarized.
3020 if (isScalarWithPredication(I, VF))
3021 return false;
3022
3023 // If the instruction's allocated size doesn't equal it's type size, it
3024 // requires padding and will be scalarized.
3025 auto &DL = I->getDataLayout();
3026 if (hasIrregularType(ScalarTy, DL))
3027 return false;
3028
3029 return true;
3030}
3031
3032void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) {
3033 // We should not collect Uniforms more than once per VF. Right now,
3034 // this function is called from collectUniformsAndScalars(), which
3035 // already does this check. Collecting Uniforms for VF=1 does not make any
3036 // sense.
3037
3038 assert(VF.isVector() && !Uniforms.contains(VF) &&
3039 "This function should not be visited twice for the same VF");
3040
3041 // Visit the list of Uniforms. If we find no uniform value, we won't
3042 // analyze again. Uniforms.count(VF) will return 1.
3043 Uniforms[VF].clear();
3044
3045 // Now we know that the loop is vectorizable!
3046 // Collect instructions inside the loop that will remain uniform after
3047 // vectorization.
3048
3049 // Global values, params and instructions outside of current loop are out of
3050 // scope.
3051 auto IsOutOfScope = [&](Value *V) -> bool {
3053 return (!I || !TheLoop->contains(I));
3054 };
3055
3056 // Worklist containing uniform instructions demanding lane 0.
3057 SetVector<Instruction *> Worklist;
3058
3059 // Add uniform instructions demanding lane 0 to the worklist. Instructions
3060 // that require predication must not be considered uniform after
3061 // vectorization, because that would create an erroneous replicating region
3062 // where only a single instance out of VF should be formed.
3063 auto AddToWorklistIfAllowed = [&](Instruction *I) -> void {
3064 if (IsOutOfScope(I)) {
3065 LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: "
3066 << *I << "\n");
3067 return;
3068 }
3069 if (isPredicatedInst(I)) {
3070 LLVM_DEBUG(
3071 dbgs() << "LV: Found not uniform due to requiring predication: " << *I
3072 << "\n");
3073 return;
3074 }
3075 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n");
3076 Worklist.insert(I);
3077 };
3078
3079 // Start with the conditional branches exiting the loop. If the branch
3080 // condition is an instruction contained in the loop that is only used by the
3081 // branch, it is uniform. Note conditions from uncountable early exits are not
3082 // uniform.
3084 TheLoop->getExitingBlocks(Exiting);
3085 for (BasicBlock *E : Exiting) {
3086 if (Legal->hasUncountableEarlyExit() && TheLoop->getLoopLatch() != E)
3087 continue;
3088 auto *Cmp = dyn_cast<Instruction>(E->getTerminator()->getOperand(0));
3089 if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse())
3090 AddToWorklistIfAllowed(Cmp);
3091 }
3092
3093 auto PrevVF = VF.divideCoefficientBy(2);
3094 // Return true if all lanes perform the same memory operation, and we can
3095 // thus choose to execute only one.
3096 auto IsUniformMemOpUse = [&](Instruction *I) {
3097 // If the value was already known to not be uniform for the previous
3098 // (smaller VF), it cannot be uniform for the larger VF.
3099 if (PrevVF.isVector()) {
3100 auto Iter = Uniforms.find(PrevVF);
3101 if (Iter != Uniforms.end() && !Iter->second.contains(I))
3102 return false;
3103 }
3104 if (!Legal->isUniformMemOp(*I, VF))
3105 return false;
3106 if (isa<LoadInst>(I))
3107 // Loading the same address always produces the same result - at least
3108 // assuming aliasing and ordering which have already been checked.
3109 return true;
3110 // Storing the same value on every iteration.
3111 return TheLoop->isLoopInvariant(cast<StoreInst>(I)->getValueOperand());
3112 };
3113
3114 auto IsUniformDecision = [&](Instruction *I, ElementCount VF) {
3115 InstWidening WideningDecision = getWideningDecision(I, VF);
3116 assert(WideningDecision != CM_Unknown &&
3117 "Widening decision should be ready at this moment");
3118
3119 if (IsUniformMemOpUse(I))
3120 return true;
3121
3122 return (WideningDecision == CM_Widen ||
3123 WideningDecision == CM_Widen_Reverse ||
3124 WideningDecision == CM_Interleave);
3125 };
3126
3127 // Returns true if Ptr is the pointer operand of a memory access instruction
3128 // I, I is known to not require scalarization, and the pointer is not also
3129 // stored.
3130 auto IsVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool {
3131 if (isa<StoreInst>(I) && I->getOperand(0) == Ptr)
3132 return false;
3133 return getLoadStorePointerOperand(I) == Ptr &&
3134 (IsUniformDecision(I, VF) || Legal->isInvariant(Ptr));
3135 };
3136
3137 // Holds a list of values which are known to have at least one uniform use.
3138 // Note that there may be other uses which aren't uniform. A "uniform use"
3139 // here is something which only demands lane 0 of the unrolled iterations;
3140 // it does not imply that all lanes produce the same value (e.g. this is not
3141 // the usual meaning of uniform)
3142 SetVector<Value *> HasUniformUse;
3143
3144 // Scan the loop for instructions which are either a) known to have only
3145 // lane 0 demanded or b) are uses which demand only lane 0 of their operand.
3146 for (auto *BB : TheLoop->blocks())
3147 for (auto &I : *BB) {
3148 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I)) {
3149 switch (II->getIntrinsicID()) {
3150 case Intrinsic::sideeffect:
3151 case Intrinsic::experimental_noalias_scope_decl:
3152 case Intrinsic::assume:
3153 case Intrinsic::lifetime_start:
3154 case Intrinsic::lifetime_end:
3155 if (TheLoop->hasLoopInvariantOperands(&I))
3156 AddToWorklistIfAllowed(&I);
3157 break;
3158 default:
3159 break;
3160 }
3161 }
3162
3163 if (auto *EVI = dyn_cast<ExtractValueInst>(&I)) {
3164 if (IsOutOfScope(EVI->getAggregateOperand())) {
3165 AddToWorklistIfAllowed(EVI);
3166 continue;
3167 }
3168 // Only ExtractValue instructions where the aggregate value comes from a
3169 // call are allowed to be non-uniform.
3170 assert(isa<CallInst>(EVI->getAggregateOperand()) &&
3171 "Expected aggregate value to be call return value");
3172 }
3173
3174 // If there's no pointer operand, there's nothing to do.
3176 if (!Ptr)
3177 continue;
3178
3179 // If the pointer can be proven to be uniform, always add it to the
3180 // worklist.
3181 if (isa<Instruction>(Ptr) && Legal->isUniform(Ptr, VF))
3182 AddToWorklistIfAllowed(cast<Instruction>(Ptr));
3183
3184 if (IsUniformMemOpUse(&I))
3185 AddToWorklistIfAllowed(&I);
3186
3187 if (IsVectorizedMemAccessUse(&I, Ptr))
3188 HasUniformUse.insert(Ptr);
3189 }
3190
3191 // Add to the worklist any operands which have *only* uniform (e.g. lane 0
3192 // demanding) users. Since loops are assumed to be in LCSSA form, this
3193 // disallows uses outside the loop as well.
3194 for (auto *V : HasUniformUse) {
3195 if (IsOutOfScope(V))
3196 continue;
3197 auto *I = cast<Instruction>(V);
3198 bool UsersAreMemAccesses = all_of(I->users(), [&](User *U) -> bool {
3199 auto *UI = cast<Instruction>(U);
3200 return TheLoop->contains(UI) && IsVectorizedMemAccessUse(UI, V);
3201 });
3202 if (UsersAreMemAccesses)
3203 AddToWorklistIfAllowed(I);
3204 }
3205
3206 // Expand Worklist in topological order: whenever a new instruction
3207 // is added , its users should be already inside Worklist. It ensures
3208 // a uniform instruction will only be used by uniform instructions.
3209 unsigned Idx = 0;
3210 while (Idx != Worklist.size()) {
3211 Instruction *I = Worklist[Idx++];
3212
3213 for (auto *OV : I->operand_values()) {
3214 // isOutOfScope operands cannot be uniform instructions.
3215 if (IsOutOfScope(OV))
3216 continue;
3217 // First order recurrence Phi's should typically be considered
3218 // non-uniform.
3219 auto *OP = dyn_cast<PHINode>(OV);
3220 if (OP && Legal->isFixedOrderRecurrence(OP))
3221 continue;
3222 // If all the users of the operand are uniform, then add the
3223 // operand into the uniform worklist.
3224 auto *OI = cast<Instruction>(OV);
3225 if (llvm::all_of(OI->users(), [&](User *U) -> bool {
3226 auto *J = cast<Instruction>(U);
3227 return Worklist.count(J) || IsVectorizedMemAccessUse(J, OI);
3228 }))
3229 AddToWorklistIfAllowed(OI);
3230 }
3231 }
3232
3233 // For an instruction to be added into Worklist above, all its users inside
3234 // the loop should also be in Worklist. However, this condition cannot be
3235 // true for phi nodes that form a cyclic dependence. We must process phi
3236 // nodes separately. An induction variable will remain uniform if all users
3237 // of the induction variable and induction variable update remain uniform.
3238 // The code below handles both pointer and non-pointer induction variables.
3239 BasicBlock *Latch = TheLoop->getLoopLatch();
3240 for (const auto &Induction : Legal->getInductionVars()) {
3241 auto *Ind = Induction.first;
3242 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
3243
3244 // Determine if all users of the induction variable are uniform after
3245 // vectorization.
3246 bool UniformInd = all_of(Ind->users(), [&](User *U) -> bool {
3247 auto *I = cast<Instruction>(U);
3248 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
3249 IsVectorizedMemAccessUse(I, Ind);
3250 });
3251 if (!UniformInd)
3252 continue;
3253
3254 // Determine if all users of the induction variable update instruction are
3255 // uniform after vectorization.
3256 bool UniformIndUpdate = all_of(IndUpdate->users(), [&](User *U) -> bool {
3257 auto *I = cast<Instruction>(U);
3258 return I == Ind || Worklist.count(I) ||
3259 IsVectorizedMemAccessUse(I, IndUpdate);
3260 });
3261 if (!UniformIndUpdate)
3262 continue;
3263
3264 // The induction variable and its update instruction will remain uniform.
3265 AddToWorklistIfAllowed(Ind);
3266 AddToWorklistIfAllowed(IndUpdate);
3267 }
3268
3269 Uniforms[VF].insert_range(Worklist);
3270}
3271
3273 LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n");
3274
3275 if (Legal->getRuntimePointerChecking()->Need) {
3276 reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz",
3277 "runtime pointer checks needed. Enable vectorization of this "
3278 "loop with '#pragma clang loop vectorize(enable)' when "
3279 "compiling with -Os/-Oz",
3280 "CantVersionLoopWithOptForSize", ORE, TheLoop);
3281 return true;
3282 }
3283
3284 if (!PSE.getPredicate().isAlwaysTrue()) {
3285 reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz",
3286 "runtime SCEV checks needed. Enable vectorization of this "
3287 "loop with '#pragma clang loop vectorize(enable)' when "
3288 "compiling with -Os/-Oz",
3289 "CantVersionLoopWithOptForSize", ORE, TheLoop);
3290 return true;
3291 }
3292
3293 // FIXME: Avoid specializing for stride==1 instead of bailing out.
3294 if (!Legal->getLAI()->getSymbolicStrides().empty()) {
3295 reportVectorizationFailure("Runtime stride check for small trip count",
3296 "runtime stride == 1 checks needed. Enable vectorization of "
3297 "this loop without such check by compiling with -Os/-Oz",
3298 "CantVersionLoopWithOptForSize", ORE, TheLoop);
3299 return true;
3300 }
3301
3302 return false;
3303}
3304
3305bool LoopVectorizationCostModel::isScalableVectorizationAllowed() {
3306 if (IsScalableVectorizationAllowed)
3307 return *IsScalableVectorizationAllowed;
3308
3309 IsScalableVectorizationAllowed = false;
3310 if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors)
3311 return false;
3312
3314 reportVectorizationInfo("Scalable vectorization is explicitly disabled",
3315 "ScalableVectorizationDisabled", ORE, TheLoop);
3316 return false;
3317 }
3318
3319 LLVM_DEBUG(dbgs() << "LV: Scalable vectorization is available\n");
3320
3321 auto MaxScalableVF = ElementCount::getScalable(
3322 std::numeric_limits<ElementCount::ScalarTy>::max());
3323
3324 // Test that the loop-vectorizer can legalize all operations for this MaxVF.
3325 // FIXME: While for scalable vectors this is currently sufficient, this should
3326 // be replaced by a more detailed mechanism that filters out specific VFs,
3327 // instead of invalidating vectorization for a whole set of VFs based on the
3328 // MaxVF.
3329
3330 // Disable scalable vectorization if the loop contains unsupported reductions.
3331 if (!canVectorizeReductions(MaxScalableVF)) {
3333 "Scalable vectorization not supported for the reduction "
3334 "operations found in this loop.",
3335 "ScalableVFUnfeasible", ORE, TheLoop);
3336 return false;
3337 }
3338
3339 // Disable scalable vectorization if the loop contains any instructions
3340 // with element types not supported for scalable vectors.
3341 if (any_of(ElementTypesInLoop, [&](Type *Ty) {
3342 return !Ty->isVoidTy() &&
3343 !this->TTI.isElementTypeLegalForScalableVector(Ty);
3344 })) {
3345 reportVectorizationInfo("Scalable vectorization is not supported "
3346 "for all element types found in this loop.",
3347 "ScalableVFUnfeasible", ORE, TheLoop);
3348 return false;
3349 }
3350
3351 if (!Legal->isSafeForAnyVectorWidth() && !getMaxVScale(*TheFunction, TTI)) {
3352 reportVectorizationInfo("The target does not provide maximum vscale value "
3353 "for safe distance analysis.",
3354 "ScalableVFUnfeasible", ORE, TheLoop);
3355 return false;
3356 }
3357
3358 IsScalableVectorizationAllowed = true;
3359 return true;
3360}
3361
3363LoopVectorizationCostModel::getMaxLegalScalableVF(unsigned MaxSafeElements) {
3364 if (!isScalableVectorizationAllowed())
3365 return ElementCount::getScalable(0);
3366
3367 auto MaxScalableVF = ElementCount::getScalable(
3368 std::numeric_limits<ElementCount::ScalarTy>::max());
3369 if (Legal->isSafeForAnyVectorWidth())
3370 return MaxScalableVF;
3371
3372 std::optional<unsigned> MaxVScale = getMaxVScale(*TheFunction, TTI);
3373 // Limit MaxScalableVF by the maximum safe dependence distance.
3374 MaxScalableVF = ElementCount::getScalable(MaxSafeElements / *MaxVScale);
3375
3376 if (!MaxScalableVF)
3378 "Max legal vector width too small, scalable vectorization "
3379 "unfeasible.",
3380 "ScalableVFUnfeasible", ORE, TheLoop);
3381
3382 return MaxScalableVF;
3383}
3384
3385FixedScalableVFPair LoopVectorizationCostModel::computeFeasibleMaxVF(
3386 unsigned MaxTripCount, ElementCount UserVF, bool FoldTailByMasking) {
3387 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI);
3388 unsigned SmallestType, WidestType;
3389 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes();
3390
3391 // Get the maximum safe dependence distance in bits computed by LAA.
3392 // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from
3393 // the memory accesses that is most restrictive (involved in the smallest
3394 // dependence distance).
3395 unsigned MaxSafeElementsPowerOf2 =
3396 bit_floor(Legal->getMaxSafeVectorWidthInBits() / WidestType);
3397 if (!Legal->isSafeForAnyStoreLoadForwardDistances()) {
3398 unsigned SLDist = Legal->getMaxStoreLoadForwardSafeDistanceInBits();
3399 MaxSafeElementsPowerOf2 =
3400 std::min(MaxSafeElementsPowerOf2, SLDist / WidestType);
3401 }
3402 auto MaxSafeFixedVF = ElementCount::getFixed(MaxSafeElementsPowerOf2);
3403 auto MaxSafeScalableVF = getMaxLegalScalableVF(MaxSafeElementsPowerOf2);
3404
3405 if (!Legal->isSafeForAnyVectorWidth())
3406 this->MaxSafeElements = MaxSafeElementsPowerOf2;
3407
3408 LLVM_DEBUG(dbgs() << "LV: The max safe fixed VF is: " << MaxSafeFixedVF
3409 << ".\n");
3410 LLVM_DEBUG(dbgs() << "LV: The max safe scalable VF is: " << MaxSafeScalableVF
3411 << ".\n");
3412
3413 // First analyze the UserVF, fall back if the UserVF should be ignored.
3414 if (UserVF) {
3415 auto MaxSafeUserVF =
3416 UserVF.isScalable() ? MaxSafeScalableVF : MaxSafeFixedVF;
3417
3418 if (ElementCount::isKnownLE(UserVF, MaxSafeUserVF)) {
3419 // If `VF=vscale x N` is safe, then so is `VF=N`
3420 if (UserVF.isScalable())
3421 return FixedScalableVFPair(
3422 ElementCount::getFixed(UserVF.getKnownMinValue()), UserVF);
3423
3424 return UserVF;
3425 }
3426
3427 assert(ElementCount::isKnownGT(UserVF, MaxSafeUserVF));
3428
3429 // Only clamp if the UserVF is not scalable. If the UserVF is scalable, it
3430 // is better to ignore the hint and let the compiler choose a suitable VF.
3431 if (!UserVF.isScalable()) {
3432 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
3433 << " is unsafe, clamping to max safe VF="
3434 << MaxSafeFixedVF << ".\n");
3435 ORE->emit([&]() {
3436 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
3437 TheLoop->getStartLoc(),
3438 TheLoop->getHeader())
3439 << "User-specified vectorization factor "
3440 << ore::NV("UserVectorizationFactor", UserVF)
3441 << " is unsafe, clamping to maximum safe vectorization factor "
3442 << ore::NV("VectorizationFactor", MaxSafeFixedVF);
3443 });
3444 return MaxSafeFixedVF;
3445 }
3446
3447 if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) {
3448 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
3449 << " is ignored because scalable vectors are not "
3450 "available.\n");
3451 ORE->emit([&]() {
3452 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
3453 TheLoop->getStartLoc(),
3454 TheLoop->getHeader())
3455 << "User-specified vectorization factor "
3456 << ore::NV("UserVectorizationFactor", UserVF)
3457 << " is ignored because the target does not support scalable "
3458 "vectors. The compiler will pick a more suitable value.";
3459 });
3460 } else {
3461 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
3462 << " is unsafe. Ignoring scalable UserVF.\n");
3463 ORE->emit([&]() {
3464 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
3465 TheLoop->getStartLoc(),
3466 TheLoop->getHeader())
3467 << "User-specified vectorization factor "
3468 << ore::NV("UserVectorizationFactor", UserVF)
3469 << " is unsafe. Ignoring the hint to let the compiler pick a "
3470 "more suitable value.";
3471 });
3472 }
3473 }
3474
3475 LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType
3476 << " / " << WidestType << " bits.\n");
3477
3478 FixedScalableVFPair Result(ElementCount::getFixed(1),
3480 if (auto MaxVF =
3481 getMaximizedVFForTarget(MaxTripCount, SmallestType, WidestType,
3482 MaxSafeFixedVF, FoldTailByMasking))
3483 Result.FixedVF = MaxVF;
3484
3485 if (auto MaxVF =
3486 getMaximizedVFForTarget(MaxTripCount, SmallestType, WidestType,
3487 MaxSafeScalableVF, FoldTailByMasking))
3488 if (MaxVF.isScalable()) {
3489 Result.ScalableVF = MaxVF;
3490 LLVM_DEBUG(dbgs() << "LV: Found feasible scalable VF = " << MaxVF
3491 << "\n");
3492 }
3493
3494 return Result;
3495}
3496
3499 if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) {
3500 // TODO: It may be useful to do since it's still likely to be dynamically
3501 // uniform if the target can skip.
3503 "Not inserting runtime ptr check for divergent target",
3504 "runtime pointer checks needed. Not enabled for divergent target",
3505 "CantVersionLoopWithDivergentTarget", ORE, TheLoop);
3507 }
3508
3509 ScalarEvolution *SE = PSE.getSE();
3511 unsigned MaxTC = PSE.getSmallConstantMaxTripCount();
3512 LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n');
3513 if (TC != ElementCount::getFixed(MaxTC))
3514 LLVM_DEBUG(dbgs() << "LV: Found maximum trip count: " << MaxTC << '\n');
3515 if (TC.isScalar()) {
3516 reportVectorizationFailure("Single iteration (non) loop",
3517 "loop trip count is one, irrelevant for vectorization",
3518 "SingleIterationLoop", ORE, TheLoop);
3520 }
3521
3522 // If BTC matches the widest induction type and is -1 then the trip count
3523 // computation will wrap to 0 and the vector trip count will be 0. Do not try
3524 // to vectorize.
3525 const SCEV *BTC = SE->getBackedgeTakenCount(TheLoop);
3526 if (!isa<SCEVCouldNotCompute>(BTC) &&
3527 BTC->getType()->getScalarSizeInBits() >=
3528 Legal->getWidestInductionType()->getScalarSizeInBits() &&
3530 SE->getMinusOne(BTC->getType()))) {
3532 "Trip count computation wrapped",
3533 "backedge-taken count is -1, loop trip count wrapped to 0",
3534 "TripCountWrapped", ORE, TheLoop);
3536 }
3537
3538 switch (ScalarEpilogueStatus) {
3540 return computeFeasibleMaxVF(MaxTC, UserVF, false);
3542 [[fallthrough]];
3544 LLVM_DEBUG(
3545 dbgs() << "LV: vector predicate hint/switch found.\n"
3546 << "LV: Not allowing scalar epilogue, creating predicated "
3547 << "vector loop.\n");
3548 break;
3550 // fallthrough as a special case of OptForSize
3552 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize)
3553 LLVM_DEBUG(
3554 dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n");
3555 else
3556 LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip "
3557 << "count.\n");
3558
3559 // Bail if runtime checks are required, which are not good when optimising
3560 // for size.
3563
3564 break;
3565 }
3566
3567 // Now try the tail folding
3568
3569 // Invalidate interleave groups that require an epilogue if we can't mask
3570 // the interleave-group.
3572 assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() &&
3573 "No decisions should have been taken at this point");
3574 // Note: There is no need to invalidate any cost modeling decisions here, as
3575 // none were taken so far.
3576 InterleaveInfo.invalidateGroupsRequiringScalarEpilogue();
3577 }
3578
3579 FixedScalableVFPair MaxFactors = computeFeasibleMaxVF(MaxTC, UserVF, true);
3580
3581 // Avoid tail folding if the trip count is known to be a multiple of any VF
3582 // we choose.
3583 std::optional<unsigned> MaxPowerOf2RuntimeVF =
3584 MaxFactors.FixedVF.getFixedValue();
3585 if (MaxFactors.ScalableVF) {
3586 std::optional<unsigned> MaxVScale = getMaxVScale(*TheFunction, TTI);
3587 if (MaxVScale && TTI.isVScaleKnownToBeAPowerOfTwo()) {
3588 MaxPowerOf2RuntimeVF = std::max<unsigned>(
3589 *MaxPowerOf2RuntimeVF,
3590 *MaxVScale * MaxFactors.ScalableVF.getKnownMinValue());
3591 } else
3592 MaxPowerOf2RuntimeVF = std::nullopt; // Stick with tail-folding for now.
3593 }
3594
3595 auto NoScalarEpilogueNeeded = [this, &UserIC](unsigned MaxVF) {
3596 // Return false if the loop is neither a single-latch-exit loop nor an
3597 // early-exit loop as tail-folding is not supported in that case.
3598 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch() &&
3599 !Legal->hasUncountableEarlyExit())
3600 return false;
3601 unsigned MaxVFtimesIC = UserIC ? MaxVF * UserIC : MaxVF;
3602 ScalarEvolution *SE = PSE.getSE();
3603 // Calling getSymbolicMaxBackedgeTakenCount enables support for loops
3604 // with uncountable exits. For countable loops, the symbolic maximum must
3605 // remain identical to the known back-edge taken count.
3606 const SCEV *BackedgeTakenCount = PSE.getSymbolicMaxBackedgeTakenCount();
3607 assert((Legal->hasUncountableEarlyExit() ||
3608 BackedgeTakenCount == PSE.getBackedgeTakenCount()) &&
3609 "Invalid loop count");
3610 const SCEV *ExitCount = SE->getAddExpr(
3611 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
3612 const SCEV *Rem = SE->getURemExpr(
3613 SE->applyLoopGuards(ExitCount, TheLoop),
3614 SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC));
3615 return Rem->isZero();
3616 };
3617
3618 if (MaxPowerOf2RuntimeVF > 0u) {
3619 assert((UserVF.isNonZero() || isPowerOf2_32(*MaxPowerOf2RuntimeVF)) &&
3620 "MaxFixedVF must be a power of 2");
3621 if (NoScalarEpilogueNeeded(*MaxPowerOf2RuntimeVF)) {
3622 // Accept MaxFixedVF if we do not have a tail.
3623 LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n");
3624 return MaxFactors;
3625 }
3626 }
3627
3628 auto ExpectedTC = getSmallBestKnownTC(PSE, TheLoop);
3629 if (ExpectedTC && ExpectedTC->isFixed() &&
3630 ExpectedTC->getFixedValue() <=
3631 TTI.getMinTripCountTailFoldingThreshold()) {
3632 if (MaxPowerOf2RuntimeVF > 0u) {
3633 // If we have a low-trip-count, and the fixed-width VF is known to divide
3634 // the trip count but the scalable factor does not, use the fixed-width
3635 // factor in preference to allow the generation of a non-predicated loop.
3636 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedLowTripLoop &&
3637 NoScalarEpilogueNeeded(MaxFactors.FixedVF.getFixedValue())) {
3638 LLVM_DEBUG(dbgs() << "LV: Picking a fixed-width so that no tail will "
3639 "remain for any chosen VF.\n");
3640 MaxFactors.ScalableVF = ElementCount::getScalable(0);
3641 return MaxFactors;
3642 }
3643 }
3644
3646 "The trip count is below the minial threshold value.",
3647 "loop trip count is too low, avoiding vectorization", "LowTripCount",
3648 ORE, TheLoop);
3650 }
3651
3652 // If we don't know the precise trip count, or if the trip count that we
3653 // found modulo the vectorization factor is not zero, try to fold the tail
3654 // by masking.
3655 // FIXME: look for a smaller MaxVF that does divide TC rather than masking.
3656 bool ContainsScalableVF = MaxFactors.ScalableVF.isNonZero();
3657 setTailFoldingStyles(ContainsScalableVF, UserIC);
3658 if (foldTailByMasking()) {
3660 LLVM_DEBUG(
3661 dbgs()
3662 << "LV: tail is folded with EVL, forcing unroll factor to be 1. Will "
3663 "try to generate VP Intrinsics with scalable vector "
3664 "factors only.\n");
3665 // Tail folded loop using VP intrinsics restricts the VF to be scalable
3666 // for now.
3667 // TODO: extend it for fixed vectors, if required.
3668 assert(ContainsScalableVF && "Expected scalable vector factor.");
3669
3670 MaxFactors.FixedVF = ElementCount::getFixed(1);
3671 }
3672 return MaxFactors;
3673 }
3674
3675 // If there was a tail-folding hint/switch, but we can't fold the tail by
3676 // masking, fallback to a vectorization with a scalar epilogue.
3677 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) {
3678 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a "
3679 "scalar epilogue instead.\n");
3680 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
3681 return MaxFactors;
3682 }
3683
3684 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) {
3685 LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n");
3687 }
3688
3689 if (TC.isZero()) {
3691 "unable to calculate the loop count due to complex control flow",
3692 "UnknownLoopCountComplexCFG", ORE, TheLoop);
3694 }
3695
3697 "Cannot optimize for size and vectorize at the same time.",
3698 "cannot optimize for size and vectorize at the same time. "
3699 "Enable vectorization of this loop with '#pragma clang loop "
3700 "vectorize(enable)' when compiling with -Os/-Oz",
3701 "NoTailLoopWithOptForSize", ORE, TheLoop);
3703}
3704
3706 ElementCount VF) {
3707 if (ConsiderRegPressure.getNumOccurrences())
3708 return ConsiderRegPressure;
3709
3710 // TODO: We should eventually consider register pressure for all targets. The
3711 // TTI hook is temporary whilst target-specific issues are being fixed.
3712 if (TTI.shouldConsiderVectorizationRegPressure())
3713 return true;
3714
3715 if (!useMaxBandwidth(VF.isScalable()
3718 return false;
3719 // Only calculate register pressure for VFs enabled by MaxBandwidth.
3721 VF, VF.isScalable() ? MaxPermissibleVFWithoutMaxBW.ScalableVF
3723}
3724
3727 return MaximizeBandwidth || (MaximizeBandwidth.getNumOccurrences() == 0 &&
3728 (TTI.shouldMaximizeVectorBandwidth(RegKind) ||
3730 Legal->hasVectorCallVariants())));
3731}
3732
3733ElementCount LoopVectorizationCostModel::clampVFByMaxTripCount(
3734 ElementCount VF, unsigned MaxTripCount, bool FoldTailByMasking) const {
3735 unsigned EstimatedVF = VF.getKnownMinValue();
3736 if (VF.isScalable() && TheFunction->hasFnAttribute(Attribute::VScaleRange)) {
3737 auto Attr = TheFunction->getFnAttribute(Attribute::VScaleRange);
3738 auto Min = Attr.getVScaleRangeMin();
3739 EstimatedVF *= Min;
3740 }
3741
3742 // When a scalar epilogue is required, at least one iteration of the scalar
3743 // loop has to execute. Adjust MaxTripCount accordingly to avoid picking a
3744 // max VF that results in a dead vector loop.
3745 if (MaxTripCount > 0 && requiresScalarEpilogue(true))
3746 MaxTripCount -= 1;
3747
3748 if (MaxTripCount && MaxTripCount <= EstimatedVF &&
3749 (!FoldTailByMasking || isPowerOf2_32(MaxTripCount))) {
3750 // If upper bound loop trip count (TC) is known at compile time there is no
3751 // point in choosing VF greater than TC (as done in the loop below). Select
3752 // maximum power of two which doesn't exceed TC. If VF is
3753 // scalable, we only fall back on a fixed VF when the TC is less than or
3754 // equal to the known number of lanes.
3755 auto ClampedUpperTripCount = llvm::bit_floor(MaxTripCount);
3756 LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to maximum power of two not "
3757 "exceeding the constant trip count: "
3758 << ClampedUpperTripCount << "\n");
3759 return ElementCount::get(ClampedUpperTripCount,
3760 FoldTailByMasking ? VF.isScalable() : false);
3761 }
3762 return VF;
3763}
3764
3765ElementCount LoopVectorizationCostModel::getMaximizedVFForTarget(
3766 unsigned MaxTripCount, unsigned SmallestType, unsigned WidestType,
3767 ElementCount MaxSafeVF, bool FoldTailByMasking) {
3768 bool ComputeScalableMaxVF = MaxSafeVF.isScalable();
3769 const TypeSize WidestRegister = TTI.getRegisterBitWidth(
3770 ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector
3772
3773 // Convenience function to return the minimum of two ElementCounts.
3774 auto MinVF = [](const ElementCount &LHS, const ElementCount &RHS) {
3775 assert((LHS.isScalable() == RHS.isScalable()) &&
3776 "Scalable flags must match");
3777 return ElementCount::isKnownLT(LHS, RHS) ? LHS : RHS;
3778 };
3779
3780 // Ensure MaxVF is a power of 2; the dependence distance bound may not be.
3781 // Note that both WidestRegister and WidestType may not be a powers of 2.
3782 auto MaxVectorElementCount = ElementCount::get(
3783 llvm::bit_floor(WidestRegister.getKnownMinValue() / WidestType),
3784 ComputeScalableMaxVF);
3785 MaxVectorElementCount = MinVF(MaxVectorElementCount, MaxSafeVF);
3786 LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: "
3787 << (MaxVectorElementCount * WidestType) << " bits.\n");
3788
3789 if (!MaxVectorElementCount) {
3790 LLVM_DEBUG(dbgs() << "LV: The target has no "
3791 << (ComputeScalableMaxVF ? "scalable" : "fixed")
3792 << " vector registers.\n");
3793 return ElementCount::getFixed(1);
3794 }
3795
3796 ElementCount MaxVF = clampVFByMaxTripCount(MaxVectorElementCount,
3797 MaxTripCount, FoldTailByMasking);
3798 // If the MaxVF was already clamped, there's no point in trying to pick a
3799 // larger one.
3800 if (MaxVF != MaxVectorElementCount)
3801 return MaxVF;
3802
3804 ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector
3806
3807 if (MaxVF.isScalable())
3808 MaxPermissibleVFWithoutMaxBW.ScalableVF = MaxVF;
3809 else
3810 MaxPermissibleVFWithoutMaxBW.FixedVF = MaxVF;
3811
3812 if (useMaxBandwidth(RegKind)) {
3813 auto MaxVectorElementCountMaxBW = ElementCount::get(
3814 llvm::bit_floor(WidestRegister.getKnownMinValue() / SmallestType),
3815 ComputeScalableMaxVF);
3816 MaxVF = MinVF(MaxVectorElementCountMaxBW, MaxSafeVF);
3817
3818 if (ElementCount MinVF =
3819 TTI.getMinimumVF(SmallestType, ComputeScalableMaxVF)) {
3820 if (ElementCount::isKnownLT(MaxVF, MinVF)) {
3821 LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF
3822 << ") with target's minimum: " << MinVF << '\n');
3823 MaxVF = MinVF;
3824 }
3825 }
3826
3827 MaxVF = clampVFByMaxTripCount(MaxVF, MaxTripCount, FoldTailByMasking);
3828
3829 if (MaxVectorElementCount != MaxVF) {
3830 // Invalidate any widening decisions we might have made, in case the loop
3831 // requires prediction (decided later), but we have already made some
3832 // load/store widening decisions.
3834 }
3835 }
3836 return MaxVF;
3837}
3838
3839bool LoopVectorizationPlanner::isMoreProfitable(const VectorizationFactor &A,
3840 const VectorizationFactor &B,
3841 const unsigned MaxTripCount,
3842 bool HasTail,
3843 bool IsEpilogue) const {
3844 InstructionCost CostA = A.Cost;
3845 InstructionCost CostB = B.Cost;
3846
3847 // Improve estimate for the vector width if it is scalable.
3848 unsigned EstimatedWidthA = A.Width.getKnownMinValue();
3849 unsigned EstimatedWidthB = B.Width.getKnownMinValue();
3850 if (std::optional<unsigned> VScale = CM.getVScaleForTuning()) {
3851 if (A.Width.isScalable())
3852 EstimatedWidthA *= *VScale;
3853 if (B.Width.isScalable())
3854 EstimatedWidthB *= *VScale;
3855 }
3856
3857 // When optimizing for size choose whichever is smallest, which will be the
3858 // one with the smallest cost for the whole loop. On a tie pick the larger
3859 // vector width, on the assumption that throughput will be greater.
3860 if (CM.CostKind == TTI::TCK_CodeSize)
3861 return CostA < CostB ||
3862 (CostA == CostB && EstimatedWidthA > EstimatedWidthB);
3863
3864 // Assume vscale may be larger than 1 (or the value being tuned for),
3865 // so that scalable vectorization is slightly favorable over fixed-width
3866 // vectorization.
3867 bool PreferScalable = !TTI.preferFixedOverScalableIfEqualCost(IsEpilogue) &&
3868 A.Width.isScalable() && !B.Width.isScalable();
3869
3870 auto CmpFn = [PreferScalable](const InstructionCost &LHS,
3871 const InstructionCost &RHS) {
3872 return PreferScalable ? LHS <= RHS : LHS < RHS;
3873 };
3874
3875 // To avoid the need for FP division:
3876 // (CostA / EstimatedWidthA) < (CostB / EstimatedWidthB)
3877 // <=> (CostA * EstimatedWidthB) < (CostB * EstimatedWidthA)
3878 if (!MaxTripCount)
3879 return CmpFn(CostA * EstimatedWidthB, CostB * EstimatedWidthA);
3880
3881 auto GetCostForTC = [MaxTripCount, HasTail](unsigned VF,
3882 InstructionCost VectorCost,
3883 InstructionCost ScalarCost) {
3884 // If the trip count is a known (possibly small) constant, the trip count
3885 // will be rounded up to an integer number of iterations under
3886 // FoldTailByMasking. The total cost in that case will be
3887 // VecCost*ceil(TripCount/VF). When not folding the tail, the total
3888 // cost will be VecCost*floor(TC/VF) + ScalarCost*(TC%VF). There will be
3889 // some extra overheads, but for the purpose of comparing the costs of
3890 // different VFs we can use this to compare the total loop-body cost
3891 // expected after vectorization.
3892 if (HasTail)
3893 return VectorCost * (MaxTripCount / VF) +
3894 ScalarCost * (MaxTripCount % VF);
3895 return VectorCost * divideCeil(MaxTripCount, VF);
3896 };
3897
3898 auto RTCostA = GetCostForTC(EstimatedWidthA, CostA, A.ScalarCost);
3899 auto RTCostB = GetCostForTC(EstimatedWidthB, CostB, B.ScalarCost);
3900 return CmpFn(RTCostA, RTCostB);
3901}
3902
3903bool LoopVectorizationPlanner::isMoreProfitable(const VectorizationFactor &A,
3904 const VectorizationFactor &B,
3905 bool HasTail,
3906 bool IsEpilogue) const {
3907 const unsigned MaxTripCount = PSE.getSmallConstantMaxTripCount();
3908 return LoopVectorizationPlanner::isMoreProfitable(A, B, MaxTripCount, HasTail,
3909 IsEpilogue);
3910}
3911
3914 using RecipeVFPair = std::pair<VPRecipeBase *, ElementCount>;
3915 SmallVector<RecipeVFPair> InvalidCosts;
3916 for (const auto &Plan : VPlans) {
3917 for (ElementCount VF : Plan->vectorFactors()) {
3918 // The VPlan-based cost model is designed for computing vector cost.
3919 // Querying VPlan-based cost model with a scarlar VF will cause some
3920 // errors because we expect the VF is vector for most of the widen
3921 // recipes.
3922 if (VF.isScalar())
3923 continue;
3924
3925 VPCostContext CostCtx(CM.TTI, *CM.TLI, *Plan, CM, CM.CostKind);
3926 precomputeCosts(*Plan, VF, CostCtx);
3927 auto Iter = vp_depth_first_deep(Plan->getVectorLoopRegion()->getEntry());
3929 for (auto &R : *VPBB) {
3930 if (!R.cost(VF, CostCtx).isValid())
3931 InvalidCosts.emplace_back(&R, VF);
3932 }
3933 }
3934 }
3935 }
3936 if (InvalidCosts.empty())
3937 return;
3938
3939 // Emit a report of VFs with invalid costs in the loop.
3940
3941 // Group the remarks per recipe, keeping the recipe order from InvalidCosts.
3943 unsigned I = 0;
3944 for (auto &Pair : InvalidCosts)
3945 if (Numbering.try_emplace(Pair.first, I).second)
3946 ++I;
3947
3948 // Sort the list, first on recipe(number) then on VF.
3949 sort(InvalidCosts, [&Numbering](RecipeVFPair &A, RecipeVFPair &B) {
3950 unsigned NA = Numbering[A.first];
3951 unsigned NB = Numbering[B.first];
3952 if (NA != NB)
3953 return NA < NB;
3954 return ElementCount::isKnownLT(A.second, B.second);
3955 });
3956
3957 // For a list of ordered recipe-VF pairs:
3958 // [(load, VF1), (load, VF2), (store, VF1)]
3959 // group the recipes together to emit separate remarks for:
3960 // load (VF1, VF2)
3961 // store (VF1)
3962 auto Tail = ArrayRef<RecipeVFPair>(InvalidCosts);
3963 auto Subset = ArrayRef<RecipeVFPair>();
3964 do {
3965 if (Subset.empty())
3966 Subset = Tail.take_front(1);
3967
3968 VPRecipeBase *R = Subset.front().first;
3969
3970 unsigned Opcode =
3973 [](const auto *R) { return Instruction::PHI; })
3974 .Case<VPWidenSelectRecipe>(
3975 [](const auto *R) { return Instruction::Select; })
3976 .Case<VPWidenStoreRecipe>(
3977 [](const auto *R) { return Instruction::Store; })
3978 .Case<VPWidenLoadRecipe>(
3979 [](const auto *R) { return Instruction::Load; })
3980 .Case<VPWidenCallRecipe, VPWidenIntrinsicRecipe>(
3981 [](const auto *R) { return Instruction::Call; })
3984 [](const auto *R) { return R->getOpcode(); })
3985 .Case<VPInterleaveRecipe>([](const VPInterleaveRecipe *R) {
3986 return R->getStoredValues().empty() ? Instruction::Load
3987 : Instruction::Store;
3988 });
3989
3990 // If the next recipe is different, or if there are no other pairs,
3991 // emit a remark for the collated subset. e.g.
3992 // [(load, VF1), (load, VF2))]
3993 // to emit:
3994 // remark: invalid costs for 'load' at VF=(VF1, VF2)
3995 if (Subset == Tail || Tail[Subset.size()].first != R) {
3996 std::string OutString;
3997 raw_string_ostream OS(OutString);
3998 assert(!Subset.empty() && "Unexpected empty range");
3999 OS << "Recipe with invalid costs prevented vectorization at VF=(";
4000 for (const auto &Pair : Subset)
4001 OS << (Pair.second == Subset.front().second ? "" : ", ") << Pair.second;
4002 OS << "):";
4003 if (Opcode == Instruction::Call) {
4004 StringRef Name = "";
4005 if (auto *Int = dyn_cast<VPWidenIntrinsicRecipe>(R)) {
4006 Name = Int->getIntrinsicName();
4007 } else {
4008 auto *WidenCall = dyn_cast<VPWidenCallRecipe>(R);
4009 Function *CalledFn =
4010 WidenCall ? WidenCall->getCalledScalarFunction()
4011 : cast<Function>(R->getOperand(R->getNumOperands() - 1)
4012 ->getLiveInIRValue());
4013 Name = CalledFn->getName();
4014 }
4015 OS << " call to " << Name;
4016 } else
4017 OS << " " << Instruction::getOpcodeName(Opcode);
4018 reportVectorizationInfo(OutString, "InvalidCost", ORE, OrigLoop, nullptr,
4019 R->getDebugLoc());
4020 Tail = Tail.drop_front(Subset.size());
4021 Subset = {};
4022 } else
4023 // Grow the subset by one element
4024 Subset = Tail.take_front(Subset.size() + 1);
4025 } while (!Tail.empty());
4026}
4027
4028/// Check if any recipe of \p Plan will generate a vector value, which will be
4029/// assigned a vector register.
4031 const TargetTransformInfo &TTI) {
4032 assert(VF.isVector() && "Checking a scalar VF?");
4033 VPTypeAnalysis TypeInfo(Plan);
4034 DenseSet<VPRecipeBase *> EphemeralRecipes;
4035 collectEphemeralRecipesForVPlan(Plan, EphemeralRecipes);
4036 // Set of already visited types.
4037 DenseSet<Type *> Visited;
4040 for (VPRecipeBase &R : *VPBB) {
4041 if (EphemeralRecipes.contains(&R))
4042 continue;
4043 // Continue early if the recipe is considered to not produce a vector
4044 // result. Note that this includes VPInstruction where some opcodes may
4045 // produce a vector, to preserve existing behavior as VPInstructions model
4046 // aspects not directly mapped to existing IR instructions.
4047 switch (R.getVPDefID()) {
4048 case VPDef::VPDerivedIVSC:
4049 case VPDef::VPScalarIVStepsSC:
4050 case VPDef::VPReplicateSC:
4051 case VPDef::VPInstructionSC:
4052 case VPDef::VPCanonicalIVPHISC:
4053 case VPDef::VPVectorPointerSC:
4054 case VPDef::VPVectorEndPointerSC:
4055 case VPDef::VPExpandSCEVSC:
4056 case VPDef::VPEVLBasedIVPHISC:
4057 case VPDef::VPPredInstPHISC:
4058 case VPDef::VPBranchOnMaskSC:
4059 continue;
4060 case VPDef::VPReductionSC:
4061 case VPDef::VPActiveLaneMaskPHISC:
4062 case VPDef::VPWidenCallSC:
4063 case VPDef::VPWidenCanonicalIVSC:
4064 case VPDef::VPWidenCastSC:
4065 case VPDef::VPWidenGEPSC:
4066 case VPDef::VPWidenIntrinsicSC:
4067 case VPDef::VPWidenSC:
4068 case VPDef::VPWidenSelectSC:
4069 case VPDef::VPBlendSC:
4070 case VPDef::VPFirstOrderRecurrencePHISC:
4071 case VPDef::VPHistogramSC:
4072 case VPDef::VPWidenPHISC:
4073 case VPDef::VPWidenIntOrFpInductionSC:
4074 case VPDef::VPWidenPointerInductionSC:
4075 case VPDef::VPReductionPHISC:
4076 case VPDef::VPInterleaveEVLSC:
4077 case VPDef::VPInterleaveSC:
4078 case VPDef::VPWidenLoadEVLSC:
4079 case VPDef::VPWidenLoadSC:
4080 case VPDef::VPWidenStoreEVLSC:
4081 case VPDef::VPWidenStoreSC:
4082 break;
4083 default:
4084 llvm_unreachable("unhandled recipe");
4085 }
4086
4087 auto WillGenerateTargetVectors = [&TTI, VF](Type *VectorTy) {
4088 unsigned NumLegalParts = TTI.getNumberOfParts(VectorTy);
4089 if (!NumLegalParts)
4090 return false;
4091 if (VF.isScalable()) {
4092 // <vscale x 1 x iN> is assumed to be profitable over iN because
4093 // scalable registers are a distinct register class from scalar
4094 // ones. If we ever find a target which wants to lower scalable
4095 // vectors back to scalars, we'll need to update this code to
4096 // explicitly ask TTI about the register class uses for each part.
4097 return NumLegalParts <= VF.getKnownMinValue();
4098 }
4099 // Two or more elements that share a register - are vectorized.
4100 return NumLegalParts < VF.getFixedValue();
4101 };
4102
4103 // If no def nor is a store, e.g., branches, continue - no value to check.
4104 if (R.getNumDefinedValues() == 0 &&
4106 continue;
4107 // For multi-def recipes, currently only interleaved loads, suffice to
4108 // check first def only.
4109 // For stores check their stored value; for interleaved stores suffice
4110 // the check first stored value only. In all cases this is the second
4111 // operand.
4112 VPValue *ToCheck =
4113 R.getNumDefinedValues() >= 1 ? R.getVPValue(0) : R.getOperand(1);
4114 Type *ScalarTy = TypeInfo.inferScalarType(ToCheck);
4115 if (!Visited.insert({ScalarTy}).second)
4116 continue;
4117 Type *WideTy = toVectorizedTy(ScalarTy, VF);
4118 if (any_of(getContainedTypes(WideTy), WillGenerateTargetVectors))
4119 return true;
4120 }
4121 }
4122
4123 return false;
4124}
4125
4126static bool hasReplicatorRegion(VPlan &Plan) {
4128 Plan.getVectorLoopRegion()->getEntry())),
4129 [](auto *VPRB) { return VPRB->isReplicator(); });
4130}
4131
4132#ifndef NDEBUG
4133VectorizationFactor LoopVectorizationPlanner::selectVectorizationFactor() {
4134 InstructionCost ExpectedCost = CM.expectedCost(ElementCount::getFixed(1));
4135 LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n");
4136 assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop");
4137 assert(
4138 any_of(VPlans,
4139 [](std::unique_ptr<VPlan> &P) { return P->hasScalarVFOnly(); }) &&
4140 "Expected Scalar VF to be a candidate");
4141
4142 const VectorizationFactor ScalarCost(ElementCount::getFixed(1), ExpectedCost,
4143 ExpectedCost);
4144 VectorizationFactor ChosenFactor = ScalarCost;
4145
4146 bool ForceVectorization = Hints.getForce() == LoopVectorizeHints::FK_Enabled;
4147 if (ForceVectorization &&
4148 (VPlans.size() > 1 || !VPlans[0]->hasScalarVFOnly())) {
4149 // Ignore scalar width, because the user explicitly wants vectorization.
4150 // Initialize cost to max so that VF = 2 is, at least, chosen during cost
4151 // evaluation.
4152 ChosenFactor.Cost = InstructionCost::getMax();
4153 }
4154
4155 for (auto &P : VPlans) {
4156 ArrayRef<ElementCount> VFs(P->vectorFactors().begin(),
4157 P->vectorFactors().end());
4158
4160 if (any_of(VFs, [this](ElementCount VF) {
4161 return CM.shouldConsiderRegPressureForVF(VF);
4162 }))
4163 RUs = calculateRegisterUsageForPlan(*P, VFs, TTI, CM.ValuesToIgnore);
4164
4165 for (unsigned I = 0; I < VFs.size(); I++) {
4166 ElementCount VF = VFs[I];
4167 // The cost for scalar VF=1 is already calculated, so ignore it.
4168 if (VF.isScalar())
4169 continue;
4170
4171 /// If the register pressure needs to be considered for VF,
4172 /// don't consider the VF as valid if it exceeds the number
4173 /// of registers for the target.
4174 if (CM.shouldConsiderRegPressureForVF(VF) &&
4175 RUs[I].exceedsMaxNumRegs(TTI, ForceTargetNumVectorRegs))
4176 continue;
4177
4178 InstructionCost C = CM.expectedCost(VF);
4179
4180 // Add on other costs that are modelled in VPlan, but not in the legacy
4181 // cost model.
4182 VPCostContext CostCtx(CM.TTI, *CM.TLI, *P, CM, CM.CostKind);
4183 VPRegionBlock *VectorRegion = P->getVectorLoopRegion();
4184 assert(VectorRegion && "Expected to have a vector region!");
4185 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
4186 vp_depth_first_shallow(VectorRegion->getEntry()))) {
4187 for (VPRecipeBase &R : *VPBB) {
4188 auto *VPI = dyn_cast<VPInstruction>(&R);
4189 if (!VPI)
4190 continue;
4191 switch (VPI->getOpcode()) {
4192 // Selects are only modelled in the legacy cost model for safe
4193 // divisors.
4194 case Instruction::Select: {
4195 VPValue *VPV = VPI->getVPSingleValue();
4196 if (VPV->getNumUsers() == 1) {
4197 if (auto *WR = dyn_cast<VPWidenRecipe>(*VPV->user_begin())) {
4198 switch (WR->getOpcode()) {
4199 case Instruction::UDiv:
4200 case Instruction::SDiv:
4201 case Instruction::URem:
4202 case Instruction::SRem:
4203 continue;
4204 default:
4205 break;
4206 }
4207 }
4208 }
4209 C += VPI->cost(VF, CostCtx);
4210 break;
4211 }
4213 unsigned Multiplier =
4214 cast<ConstantInt>(VPI->getOperand(2)->getLiveInIRValue())
4215 ->getZExtValue();
4216 C += VPI->cost(VF * Multiplier, CostCtx);
4217 break;
4218 }
4220 C += VPI->cost(VF, CostCtx);
4221 break;
4222 default:
4223 break;
4224 }
4225 }
4226 }
4227
4228 VectorizationFactor Candidate(VF, C, ScalarCost.ScalarCost);
4229 unsigned Width =
4230 estimateElementCount(Candidate.Width, CM.getVScaleForTuning());
4231 LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << VF
4232 << " costs: " << (Candidate.Cost / Width));
4233 if (VF.isScalable())
4234 LLVM_DEBUG(dbgs() << " (assuming a minimum vscale of "
4235 << CM.getVScaleForTuning().value_or(1) << ")");
4236 LLVM_DEBUG(dbgs() << ".\n");
4237
4238 if (!ForceVectorization && !willGenerateVectors(*P, VF, TTI)) {
4239 LLVM_DEBUG(
4240 dbgs()
4241 << "LV: Not considering vector loop of width " << VF
4242 << " because it will not generate any vector instructions.\n");
4243 continue;
4244 }
4245
4246 if (CM.OptForSize && !ForceVectorization && hasReplicatorRegion(*P)) {
4247 LLVM_DEBUG(
4248 dbgs()
4249 << "LV: Not considering vector loop of width " << VF
4250 << " because it would cause replicated blocks to be generated,"
4251 << " which isn't allowed when optimizing for size.\n");
4252 continue;
4253 }
4254
4255 if (isMoreProfitable(Candidate, ChosenFactor, P->hasScalarTail()))
4256 ChosenFactor = Candidate;
4257 }
4258 }
4259
4260 if (!EnableCondStoresVectorization && CM.hasPredStores()) {
4262 "There are conditional stores.",
4263 "store that is conditionally executed prevents vectorization",
4264 "ConditionalStore", ORE, OrigLoop);
4265 ChosenFactor = ScalarCost;
4266 }
4267
4268 LLVM_DEBUG(if (ForceVectorization && !ChosenFactor.Width.isScalar() &&
4269 !isMoreProfitable(ChosenFactor, ScalarCost,
4270 !CM.foldTailByMasking())) dbgs()
4271 << "LV: Vectorization seems to be not beneficial, "
4272 << "but was forced by a user.\n");
4273 return ChosenFactor;
4274}
4275#endif
4276
4277bool LoopVectorizationPlanner::isCandidateForEpilogueVectorization(
4278 ElementCount VF) const {
4279 // Cross iteration phis such as fixed-order recurrences and FMaxNum/FMinNum
4280 // reductions need special handling and are currently unsupported.
4281 if (any_of(OrigLoop->getHeader()->phis(), [&](PHINode &Phi) {
4282 if (!Legal->isReductionVariable(&Phi))
4283 return Legal->isFixedOrderRecurrence(&Phi);
4284 RecurKind RK = Legal->getRecurrenceDescriptor(&Phi).getRecurrenceKind();
4285 return RK == RecurKind::FMinNum || RK == RecurKind::FMaxNum;
4286 }))
4287 return false;
4288
4289 // Phis with uses outside of the loop require special handling and are
4290 // currently unsupported.
4291 for (const auto &Entry : Legal->getInductionVars()) {
4292 // Look for uses of the value of the induction at the last iteration.
4293 Value *PostInc =
4294 Entry.first->getIncomingValueForBlock(OrigLoop->getLoopLatch());
4295 for (User *U : PostInc->users())
4296 if (!OrigLoop->contains(cast<Instruction>(U)))
4297 return false;
4298 // Look for uses of penultimate value of the induction.
4299 for (User *U : Entry.first->users())
4300 if (!OrigLoop->contains(cast<Instruction>(U)))
4301 return false;
4302 }
4303
4304 // Epilogue vectorization code has not been auditted to ensure it handles
4305 // non-latch exits properly. It may be fine, but it needs auditted and
4306 // tested.
4307 // TODO: Add support for loops with an early exit.
4308 if (OrigLoop->getExitingBlock() != OrigLoop->getLoopLatch())
4309 return false;
4310
4311 return true;
4312}
4313
4315 const ElementCount VF, const unsigned IC) const {
4316 // FIXME: We need a much better cost-model to take different parameters such
4317 // as register pressure, code size increase and cost of extra branches into
4318 // account. For now we apply a very crude heuristic and only consider loops
4319 // with vectorization factors larger than a certain value.
4320
4321 // Allow the target to opt out entirely.
4322 if (!TTI.preferEpilogueVectorization())
4323 return false;
4324
4325 // We also consider epilogue vectorization unprofitable for targets that don't
4326 // consider interleaving beneficial (eg. MVE).
4327 if (TTI.getMaxInterleaveFactor(VF) <= 1)
4328 return false;
4329
4330 unsigned MinVFThreshold = EpilogueVectorizationMinVF.getNumOccurrences() > 0
4332 : TTI.getEpilogueVectorizationMinVF();
4333 return estimateElementCount(VF * IC, VScaleForTuning) >= MinVFThreshold;
4334}
4335
4337 const ElementCount MainLoopVF, unsigned IC) {
4340 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n");
4341 return Result;
4342 }
4343
4344 if (!CM.isScalarEpilogueAllowed()) {
4345 LLVM_DEBUG(dbgs() << "LEV: Unable to vectorize epilogue because no "
4346 "epilogue is allowed.\n");
4347 return Result;
4348 }
4349
4350 // Not really a cost consideration, but check for unsupported cases here to
4351 // simplify the logic.
4352 if (!isCandidateForEpilogueVectorization(MainLoopVF)) {
4353 LLVM_DEBUG(dbgs() << "LEV: Unable to vectorize epilogue because the loop "
4354 "is not a supported candidate.\n");
4355 return Result;
4356 }
4357
4359 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n");
4361 if (hasPlanWithVF(ForcedEC))
4362 return {ForcedEC, 0, 0};
4363
4364 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization forced factor is not "
4365 "viable.\n");
4366 return Result;
4367 }
4368
4369 if (OrigLoop->getHeader()->getParent()->hasOptSize()) {
4370 LLVM_DEBUG(
4371 dbgs() << "LEV: Epilogue vectorization skipped due to opt for size.\n");
4372 return Result;
4373 }
4374
4375 if (!CM.isEpilogueVectorizationProfitable(MainLoopVF, IC)) {
4376 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is not profitable for "
4377 "this loop\n");
4378 return Result;
4379 }
4380
4381 // If MainLoopVF = vscale x 2, and vscale is expected to be 4, then we know
4382 // the main loop handles 8 lanes per iteration. We could still benefit from
4383 // vectorizing the epilogue loop with VF=4.
4384 ElementCount EstimatedRuntimeVF = ElementCount::getFixed(
4385 estimateElementCount(MainLoopVF, CM.getVScaleForTuning()));
4386
4387 ScalarEvolution &SE = *PSE.getSE();
4388 Type *TCType = Legal->getWidestInductionType();
4389 const SCEV *RemainingIterations = nullptr;
4390 unsigned MaxTripCount = 0;
4391 const SCEV *TC =
4392 vputils::getSCEVExprForVPValue(getPlanFor(MainLoopVF).getTripCount(), SE);
4393 assert(!isa<SCEVCouldNotCompute>(TC) && "Trip count SCEV must be computable");
4394 RemainingIterations =
4395 SE.getURemExpr(TC, SE.getElementCount(TCType, MainLoopVF * IC));
4396
4397 // No iterations left to process in the epilogue.
4398 if (RemainingIterations->isZero())
4399 return Result;
4400
4401 if (MainLoopVF.isFixed()) {
4402 MaxTripCount = MainLoopVF.getFixedValue() * IC - 1;
4403 if (SE.isKnownPredicate(CmpInst::ICMP_ULT, RemainingIterations,
4404 SE.getConstant(TCType, MaxTripCount))) {
4405 MaxTripCount = SE.getUnsignedRangeMax(RemainingIterations).getZExtValue();
4406 }
4407 LLVM_DEBUG(dbgs() << "LEV: Maximum Trip Count for Epilogue: "
4408 << MaxTripCount << "\n");
4409 }
4410
4411 for (auto &NextVF : ProfitableVFs) {
4412 // Skip candidate VFs without a corresponding VPlan.
4413 if (!hasPlanWithVF(NextVF.Width))
4414 continue;
4415
4416 // Skip candidate VFs with widths >= the (estimated) runtime VF (scalable
4417 // vectors) or > the VF of the main loop (fixed vectors).
4418 if ((!NextVF.Width.isScalable() && MainLoopVF.isScalable() &&
4419 ElementCount::isKnownGE(NextVF.Width, EstimatedRuntimeVF)) ||
4420 (NextVF.Width.isScalable() &&
4421 ElementCount::isKnownGE(NextVF.Width, MainLoopVF)) ||
4422 (!NextVF.Width.isScalable() && !MainLoopVF.isScalable() &&
4423 ElementCount::isKnownGT(NextVF.Width, MainLoopVF)))
4424 continue;
4425
4426 // If NextVF is greater than the number of remaining iterations, the
4427 // epilogue loop would be dead. Skip such factors.
4428 if (RemainingIterations && !NextVF.Width.isScalable()) {
4429 if (SE.isKnownPredicate(
4431 SE.getConstant(TCType, NextVF.Width.getFixedValue()),
4432 RemainingIterations))
4433 continue;
4434 }
4435
4436 if (Result.Width.isScalar() ||
4437 isMoreProfitable(NextVF, Result, MaxTripCount, !CM.foldTailByMasking(),
4438 /*IsEpilogue*/ true))
4439 Result = NextVF;
4440 }
4441
4442 if (Result != VectorizationFactor::Disabled())
4443 LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = "
4444 << Result.Width << "\n");
4445 return Result;
4446}
4447
4448std::pair<unsigned, unsigned>
4450 unsigned MinWidth = -1U;
4451 unsigned MaxWidth = 8;
4452 const DataLayout &DL = TheFunction->getDataLayout();
4453 // For in-loop reductions, no element types are added to ElementTypesInLoop
4454 // if there are no loads/stores in the loop. In this case, check through the
4455 // reduction variables to determine the maximum width.
4456 if (ElementTypesInLoop.empty() && !Legal->getReductionVars().empty()) {
4457 for (const auto &PhiDescriptorPair : Legal->getReductionVars()) {
4458 const RecurrenceDescriptor &RdxDesc = PhiDescriptorPair.second;
4459 // When finding the min width used by the recurrence we need to account
4460 // for casts on the input operands of the recurrence.
4461 MinWidth = std::min(
4462 MinWidth,
4463 std::min(RdxDesc.getMinWidthCastToRecurrenceTypeInBits(),
4465 MaxWidth = std::max(MaxWidth,
4467 }
4468 } else {
4469 for (Type *T : ElementTypesInLoop) {
4470 MinWidth = std::min<unsigned>(
4471 MinWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedValue());
4472 MaxWidth = std::max<unsigned>(
4473 MaxWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedValue());
4474 }
4475 }
4476 return {MinWidth, MaxWidth};
4477}
4478
4480 ElementTypesInLoop.clear();
4481 // For each block.
4482 for (BasicBlock *BB : TheLoop->blocks()) {
4483 // For each instruction in the loop.
4484 for (Instruction &I : BB->instructionsWithoutDebug()) {
4485 Type *T = I.getType();
4486
4487 // Skip ignored values.
4488 if (ValuesToIgnore.count(&I))
4489 continue;
4490
4491 // Only examine Loads, Stores and PHINodes.
4492 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I))
4493 continue;
4494
4495 // Examine PHI nodes that are reduction variables. Update the type to
4496 // account for the recurrence type.
4497 if (auto *PN = dyn_cast<PHINode>(&I)) {
4498 if (!Legal->isReductionVariable(PN))
4499 continue;
4500 const RecurrenceDescriptor &RdxDesc =
4501 Legal->getRecurrenceDescriptor(PN);
4503 TTI.preferInLoopReduction(RdxDesc.getRecurrenceKind(),
4504 RdxDesc.getRecurrenceType()))
4505 continue;
4506 T = RdxDesc.getRecurrenceType();
4507 }
4508
4509 // Examine the stored values.
4510 if (auto *ST = dyn_cast<StoreInst>(&I))
4511 T = ST->getValueOperand()->getType();
4512
4513 assert(T->isSized() &&
4514 "Expected the load/store/recurrence type to be sized");
4515
4516 ElementTypesInLoop.insert(T);
4517 }
4518 }
4519}
4520
4521unsigned
4523 InstructionCost LoopCost) {
4524 // -- The interleave heuristics --
4525 // We interleave the loop in order to expose ILP and reduce the loop overhead.
4526 // There are many micro-architectural considerations that we can't predict
4527 // at this level. For example, frontend pressure (on decode or fetch) due to
4528 // code size, or the number and capabilities of the execution ports.
4529 //
4530 // We use the following heuristics to select the interleave count:
4531 // 1. If the code has reductions, then we interleave to break the cross
4532 // iteration dependency.
4533 // 2. If the loop is really small, then we interleave to reduce the loop
4534 // overhead.
4535 // 3. We don't interleave if we think that we will spill registers to memory
4536 // due to the increased register pressure.
4537
4538 if (!CM.isScalarEpilogueAllowed())
4539 return 1;
4540
4543 LLVM_DEBUG(dbgs() << "LV: Preference for VP intrinsics indicated. "
4544 "Unroll factor forced to be 1.\n");
4545 return 1;
4546 }
4547
4548 // We used the distance for the interleave count.
4549 if (!Legal->isSafeForAnyVectorWidth())
4550 return 1;
4551
4552 // We don't attempt to perform interleaving for loops with uncountable early
4553 // exits because the VPInstruction::AnyOf code cannot currently handle
4554 // multiple parts.
4555 if (Plan.hasEarlyExit())
4556 return 1;
4557
4558 const bool HasReductions =
4561
4562 // If we did not calculate the cost for VF (because the user selected the VF)
4563 // then we calculate the cost of VF here.
4564 if (LoopCost == 0) {
4565 if (VF.isScalar())
4566 LoopCost = CM.expectedCost(VF);
4567 else
4568 LoopCost = cost(Plan, VF);
4569 assert(LoopCost.isValid() && "Expected to have chosen a VF with valid cost");
4570
4571 // Loop body is free and there is no need for interleaving.
4572 if (LoopCost == 0)
4573 return 1;
4574 }
4575
4576 VPRegisterUsage R =
4577 calculateRegisterUsageForPlan(Plan, {VF}, TTI, CM.ValuesToIgnore)[0];
4578 // We divide by these constants so assume that we have at least one
4579 // instruction that uses at least one register.
4580 for (auto &Pair : R.MaxLocalUsers) {
4581 Pair.second = std::max(Pair.second, 1U);
4582 }
4583
4584 // We calculate the interleave count using the following formula.
4585 // Subtract the number of loop invariants from the number of available
4586 // registers. These registers are used by all of the interleaved instances.
4587 // Next, divide the remaining registers by the number of registers that is
4588 // required by the loop, in order to estimate how many parallel instances
4589 // fit without causing spills. All of this is rounded down if necessary to be
4590 // a power of two. We want power of two interleave count to simplify any
4591 // addressing operations or alignment considerations.
4592 // We also want power of two interleave counts to ensure that the induction
4593 // variable of the vector loop wraps to zero, when tail is folded by masking;
4594 // this currently happens when OptForSize, in which case IC is set to 1 above.
4595 unsigned IC = UINT_MAX;
4596
4597 for (const auto &Pair : R.MaxLocalUsers) {
4598 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(Pair.first);
4599 LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters
4600 << " registers of "
4601 << TTI.getRegisterClassName(Pair.first)
4602 << " register class\n");
4603 if (VF.isScalar()) {
4604 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0)
4605 TargetNumRegisters = ForceTargetNumScalarRegs;
4606 } else {
4607 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0)
4608 TargetNumRegisters = ForceTargetNumVectorRegs;
4609 }
4610 unsigned MaxLocalUsers = Pair.second;
4611 unsigned LoopInvariantRegs = 0;
4612 if (R.LoopInvariantRegs.contains(Pair.first))
4613 LoopInvariantRegs = R.LoopInvariantRegs[Pair.first];
4614
4615 unsigned TmpIC = llvm::bit_floor((TargetNumRegisters - LoopInvariantRegs) /
4616 MaxLocalUsers);
4617 // Don't count the induction variable as interleaved.
4619 TmpIC = llvm::bit_floor((TargetNumRegisters - LoopInvariantRegs - 1) /
4620 std::max(1U, (MaxLocalUsers - 1)));
4621 }
4622
4623 IC = std::min(IC, TmpIC);
4624 }
4625
4626 // Clamp the interleave ranges to reasonable counts.
4627 unsigned MaxInterleaveCount = TTI.getMaxInterleaveFactor(VF);
4628
4629 // Check if the user has overridden the max.
4630 if (VF.isScalar()) {
4631 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0)
4632 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor;
4633 } else {
4634 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0)
4635 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor;
4636 }
4637
4638 // Try to get the exact trip count, or an estimate based on profiling data or
4639 // ConstantMax from PSE, failing that.
4640 auto BestKnownTC = getSmallBestKnownTC(PSE, OrigLoop);
4641
4642 // For fixed length VFs treat a scalable trip count as unknown.
4643 if (BestKnownTC && (BestKnownTC->isFixed() || VF.isScalable())) {
4644 // Re-evaluate trip counts and VFs to be in the same numerical space.
4645 unsigned AvailableTC =
4646 estimateElementCount(*BestKnownTC, CM.getVScaleForTuning());
4647 unsigned EstimatedVF = estimateElementCount(VF, CM.getVScaleForTuning());
4648
4649 // At least one iteration must be scalar when this constraint holds. So the
4650 // maximum available iterations for interleaving is one less.
4651 if (CM.requiresScalarEpilogue(VF.isVector()))
4652 --AvailableTC;
4653
4654 unsigned InterleaveCountLB = bit_floor(std::max(
4655 1u, std::min(AvailableTC / (EstimatedVF * 2), MaxInterleaveCount)));
4656
4657 if (getSmallConstantTripCount(PSE.getSE(), OrigLoop).isNonZero()) {
4658 // If the best known trip count is exact, we select between two
4659 // prospective ICs, where
4660 //
4661 // 1) the aggressive IC is capped by the trip count divided by VF
4662 // 2) the conservative IC is capped by the trip count divided by (VF * 2)
4663 //
4664 // The final IC is selected in a way that the epilogue loop trip count is
4665 // minimized while maximizing the IC itself, so that we either run the
4666 // vector loop at least once if it generates a small epilogue loop, or
4667 // else we run the vector loop at least twice.
4668
4669 unsigned InterleaveCountUB = bit_floor(std::max(
4670 1u, std::min(AvailableTC / EstimatedVF, MaxInterleaveCount)));
4671 MaxInterleaveCount = InterleaveCountLB;
4672
4673 if (InterleaveCountUB != InterleaveCountLB) {
4674 unsigned TailTripCountUB =
4675 (AvailableTC % (EstimatedVF * InterleaveCountUB));
4676 unsigned TailTripCountLB =
4677 (AvailableTC % (EstimatedVF * InterleaveCountLB));
4678 // If both produce same scalar tail, maximize the IC to do the same work
4679 // in fewer vector loop iterations
4680 if (TailTripCountUB == TailTripCountLB)
4681 MaxInterleaveCount = InterleaveCountUB;
4682 }
4683 } else {
4684 // If trip count is an estimated compile time constant, limit the
4685 // IC to be capped by the trip count divided by VF * 2, such that the
4686 // vector loop runs at least twice to make interleaving seem profitable
4687 // when there is an epilogue loop present. Since exact Trip count is not
4688 // known we choose to be conservative in our IC estimate.
4689 MaxInterleaveCount = InterleaveCountLB;
4690 }
4691 }
4692
4693 assert(MaxInterleaveCount > 0 &&
4694 "Maximum interleave count must be greater than 0");
4695
4696 // Clamp the calculated IC to be between the 1 and the max interleave count
4697 // that the target and trip count allows.
4698 if (IC > MaxInterleaveCount)
4699 IC = MaxInterleaveCount;
4700 else
4701 // Make sure IC is greater than 0.
4702 IC = std::max(1u, IC);
4703
4704 assert(IC > 0 && "Interleave count must be greater than 0.");
4705
4706 // Interleave if we vectorized this loop and there is a reduction that could
4707 // benefit from interleaving.
4708 if (VF.isVector() && HasReductions) {
4709 LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n");
4710 return IC;
4711 }
4712
4713 // For any scalar loop that either requires runtime checks or predication we
4714 // are better off leaving this to the unroller. Note that if we've already
4715 // vectorized the loop we will have done the runtime check and so interleaving
4716 // won't require further checks.
4717 bool ScalarInterleavingRequiresPredication =
4718 (VF.isScalar() && any_of(OrigLoop->blocks(), [this](BasicBlock *BB) {
4719 return Legal->blockNeedsPredication(BB);
4720 }));
4721 bool ScalarInterleavingRequiresRuntimePointerCheck =
4722 (VF.isScalar() && Legal->getRuntimePointerChecking()->Need);
4723
4724 // We want to interleave small loops in order to reduce the loop overhead and
4725 // potentially expose ILP opportunities.
4726 LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'
4727 << "LV: IC is " << IC << '\n'
4728 << "LV: VF is " << VF << '\n');
4729 const bool AggressivelyInterleaveReductions =
4730 TTI.enableAggressiveInterleaving(HasReductions);
4731 if (!ScalarInterleavingRequiresRuntimePointerCheck &&
4732 !ScalarInterleavingRequiresPredication && LoopCost < SmallLoopCost) {
4733 // We assume that the cost overhead is 1 and we use the cost model
4734 // to estimate the cost of the loop and interleave until the cost of the
4735 // loop overhead is about 5% of the cost of the loop.
4736 unsigned SmallIC = std::min(IC, (unsigned)llvm::bit_floor<uint64_t>(
4737 SmallLoopCost / LoopCost.getValue()));
4738
4739 // Interleave until store/load ports (estimated by max interleave count) are
4740 // saturated.
4741 unsigned NumStores = 0;
4742 unsigned NumLoads = 0;
4745 for (VPRecipeBase &R : *VPBB) {
4747 NumLoads++;
4748 continue;
4749 }
4751 NumStores++;
4752 continue;
4753 }
4754
4755 if (auto *InterleaveR = dyn_cast<VPInterleaveRecipe>(&R)) {
4756 if (unsigned StoreOps = InterleaveR->getNumStoreOperands())
4757 NumStores += StoreOps;
4758 else
4759 NumLoads += InterleaveR->getNumDefinedValues();
4760 continue;
4761 }
4762 if (auto *RepR = dyn_cast<VPReplicateRecipe>(&R)) {
4763 NumLoads += isa<LoadInst>(RepR->getUnderlyingInstr());
4764 NumStores += isa<StoreInst>(RepR->getUnderlyingInstr());
4765 continue;
4766 }
4767 if (isa<VPHistogramRecipe>(&R)) {
4768 NumLoads++;
4769 NumStores++;
4770 continue;
4771 }
4772 }
4773 }
4774 unsigned StoresIC = IC / (NumStores ? NumStores : 1);
4775 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1);
4776
4777 // There is little point in interleaving for reductions containing selects
4778 // and compares when VF=1 since it may just create more overhead than it's
4779 // worth for loops with small trip counts. This is because we still have to
4780 // do the final reduction after the loop.
4781 bool HasSelectCmpReductions =
4782 HasReductions &&
4784 [](VPRecipeBase &R) {
4785 auto *RedR = dyn_cast<VPReductionPHIRecipe>(&R);
4786 return RedR && (RecurrenceDescriptor::isAnyOfRecurrenceKind(
4787 RedR->getRecurrenceKind()) ||
4788 RecurrenceDescriptor::isFindIVRecurrenceKind(
4789 RedR->getRecurrenceKind()));
4790 });
4791 if (HasSelectCmpReductions) {
4792 LLVM_DEBUG(dbgs() << "LV: Not interleaving select-cmp reductions.\n");
4793 return 1;
4794 }
4795
4796 // If we have a scalar reduction (vector reductions are already dealt with
4797 // by this point), we can increase the critical path length if the loop
4798 // we're interleaving is inside another loop. For tree-wise reductions
4799 // set the limit to 2, and for ordered reductions it's best to disable
4800 // interleaving entirely.
4801 if (HasReductions && OrigLoop->getLoopDepth() > 1) {
4802 bool HasOrderedReductions =
4804 [](VPRecipeBase &R) {
4805 auto *RedR = dyn_cast<VPReductionPHIRecipe>(&R);
4806
4807 return RedR && RedR->isOrdered();
4808 });
4809 if (HasOrderedReductions) {
4810 LLVM_DEBUG(
4811 dbgs() << "LV: Not interleaving scalar ordered reductions.\n");
4812 return 1;
4813 }
4814
4815 unsigned F = MaxNestedScalarReductionIC;
4816 SmallIC = std::min(SmallIC, F);
4817 StoresIC = std::min(StoresIC, F);
4818 LoadsIC = std::min(LoadsIC, F);
4819 }
4820
4822 std::max(StoresIC, LoadsIC) > SmallIC) {
4823 LLVM_DEBUG(
4824 dbgs() << "LV: Interleaving to saturate store or load ports.\n");
4825 return std::max(StoresIC, LoadsIC);
4826 }
4827
4828 // If there are scalar reductions and TTI has enabled aggressive
4829 // interleaving for reductions, we will interleave to expose ILP.
4830 if (VF.isScalar() && AggressivelyInterleaveReductions) {
4831 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
4832 // Interleave no less than SmallIC but not as aggressive as the normal IC
4833 // to satisfy the rare situation when resources are too limited.
4834 return std::max(IC / 2, SmallIC);
4835 }
4836
4837 LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n");
4838 return SmallIC;
4839 }
4840
4841 // Interleave if this is a large loop (small loops are already dealt with by
4842 // this point) that could benefit from interleaving.
4843 if (AggressivelyInterleaveReductions) {
4844 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
4845 return IC;
4846 }
4847
4848 LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n");
4849 return 1;
4850}
4851
4852bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I,
4853 ElementCount VF) {
4854 // TODO: Cost model for emulated masked load/store is completely
4855 // broken. This hack guides the cost model to use an artificially
4856 // high enough value to practically disable vectorization with such
4857 // operations, except where previously deployed legality hack allowed
4858 // using very low cost values. This is to avoid regressions coming simply
4859 // from moving "masked load/store" check from legality to cost model.
4860 // Masked Load/Gather emulation was previously never allowed.
4861 // Limited number of Masked Store/Scatter emulation was allowed.
4863 "Expecting a scalar emulated instruction");
4864 return isa<LoadInst>(I) ||
4865 (isa<StoreInst>(I) &&
4866 NumPredStores > NumberOfStoresToPredicate);
4867}
4868
4870 assert(VF.isVector() && "Expected VF >= 2");
4871
4872 // If we've already collected the instructions to scalarize or the predicated
4873 // BBs after vectorization, there's nothing to do. Collection may already have
4874 // occurred if we have a user-selected VF and are now computing the expected
4875 // cost for interleaving.
4876 if (InstsToScalarize.contains(VF) ||
4877 PredicatedBBsAfterVectorization.contains(VF))
4878 return;
4879
4880 // Initialize a mapping for VF in InstsToScalalarize. If we find that it's
4881 // not profitable to scalarize any instructions, the presence of VF in the
4882 // map will indicate that we've analyzed it already.
4883 ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF];
4884
4885 // Find all the instructions that are scalar with predication in the loop and
4886 // determine if it would be better to not if-convert the blocks they are in.
4887 // If so, we also record the instructions to scalarize.
4888 for (BasicBlock *BB : TheLoop->blocks()) {
4890 continue;
4891 for (Instruction &I : *BB)
4892 if (isScalarWithPredication(&I, VF)) {
4893 ScalarCostsTy ScalarCosts;
4894 // Do not apply discount logic for:
4895 // 1. Scalars after vectorization, as there will only be a single copy
4896 // of the instruction.
4897 // 2. Scalable VF, as that would lead to invalid scalarization costs.
4898 // 3. Emulated masked memrefs, if a hacked cost is needed.
4899 if (!isScalarAfterVectorization(&I, VF) && !VF.isScalable() &&
4900 !useEmulatedMaskMemRefHack(&I, VF) &&
4901 computePredInstDiscount(&I, ScalarCosts, VF) >= 0) {
4902 for (const auto &[I, IC] : ScalarCosts)
4903 ScalarCostsVF.insert({I, IC});
4904 // Check if we decided to scalarize a call. If so, update the widening
4905 // decision of the call to CM_Scalarize with the computed scalar cost.
4906 for (const auto &[I, Cost] : ScalarCosts) {
4907 auto *CI = dyn_cast<CallInst>(I);
4908 if (!CI || !CallWideningDecisions.contains({CI, VF}))
4909 continue;
4910 CallWideningDecisions[{CI, VF}].Kind = CM_Scalarize;
4911 CallWideningDecisions[{CI, VF}].Cost = Cost;
4912 }
4913 }
4914 // Remember that BB will remain after vectorization.
4915 PredicatedBBsAfterVectorization[VF].insert(BB);
4916 for (auto *Pred : predecessors(BB)) {
4917 if (Pred->getSingleSuccessor() == BB)
4918 PredicatedBBsAfterVectorization[VF].insert(Pred);
4919 }
4920 }
4921 }
4922}
4923
4924InstructionCost LoopVectorizationCostModel::computePredInstDiscount(
4925 Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) {
4926 assert(!isUniformAfterVectorization(PredInst, VF) &&
4927 "Instruction marked uniform-after-vectorization will be predicated");
4928
4929 // Initialize the discount to zero, meaning that the scalar version and the
4930 // vector version cost the same.
4931 InstructionCost Discount = 0;
4932
4933 // Holds instructions to analyze. The instructions we visit are mapped in
4934 // ScalarCosts. Those instructions are the ones that would be scalarized if
4935 // we find that the scalar version costs less.
4937
4938 // Returns true if the given instruction can be scalarized.
4939 auto CanBeScalarized = [&](Instruction *I) -> bool {
4940 // We only attempt to scalarize instructions forming a single-use chain
4941 // from the original predicated block that would otherwise be vectorized.
4942 // Although not strictly necessary, we give up on instructions we know will
4943 // already be scalar to avoid traversing chains that are unlikely to be
4944 // beneficial.
4945 if (!I->hasOneUse() || PredInst->getParent() != I->getParent() ||
4947 return false;
4948
4949 // If the instruction is scalar with predication, it will be analyzed
4950 // separately. We ignore it within the context of PredInst.
4951 if (isScalarWithPredication(I, VF))
4952 return false;
4953
4954 // If any of the instruction's operands are uniform after vectorization,
4955 // the instruction cannot be scalarized. This prevents, for example, a
4956 // masked load from being scalarized.
4957 //
4958 // We assume we will only emit a value for lane zero of an instruction
4959 // marked uniform after vectorization, rather than VF identical values.
4960 // Thus, if we scalarize an instruction that uses a uniform, we would
4961 // create uses of values corresponding to the lanes we aren't emitting code
4962 // for. This behavior can be changed by allowing getScalarValue to clone
4963 // the lane zero values for uniforms rather than asserting.
4964 for (Use &U : I->operands())
4965 if (auto *J = dyn_cast<Instruction>(U.get()))
4966 if (isUniformAfterVectorization(J, VF))
4967 return false;
4968
4969 // Otherwise, we can scalarize the instruction.
4970 return true;
4971 };
4972
4973 // Compute the expected cost discount from scalarizing the entire expression
4974 // feeding the predicated instruction. We currently only consider expressions
4975 // that are single-use instruction chains.
4976 Worklist.push_back(PredInst);
4977 while (!Worklist.empty()) {
4978 Instruction *I = Worklist.pop_back_val();
4979
4980 // If we've already analyzed the instruction, there's nothing to do.
4981 if (ScalarCosts.contains(I))
4982 continue;
4983
4984 // Cannot scalarize fixed-order recurrence phis at the moment.
4985 if (isa<PHINode>(I) && Legal->isFixedOrderRecurrence(cast<PHINode>(I)))
4986 continue;
4987
4988 // Compute the cost of the vector instruction. Note that this cost already
4989 // includes the scalarization overhead of the predicated instruction.
4990 InstructionCost VectorCost = getInstructionCost(I, VF);
4991
4992 // Compute the cost of the scalarized instruction. This cost is the cost of
4993 // the instruction as if it wasn't if-converted and instead remained in the
4994 // predicated block. We will scale this cost by block probability after
4995 // computing the scalarization overhead.
4996 InstructionCost ScalarCost =
4998
4999 // Compute the scalarization overhead of needed insertelement instructions
5000 // and phi nodes.
5001 if (isScalarWithPredication(I, VF) && !I->getType()->isVoidTy()) {
5002 Type *WideTy = toVectorizedTy(I->getType(), VF);
5003 for (Type *VectorTy : getContainedTypes(WideTy)) {
5004 ScalarCost += TTI.getScalarizationOverhead(
5006 /*Insert=*/true,
5007 /*Extract=*/false, CostKind);
5008 }
5009 ScalarCost +=
5010 VF.getFixedValue() * TTI.getCFInstrCost(Instruction::PHI, CostKind);
5011 }
5012
5013 // Compute the scalarization overhead of needed extractelement
5014 // instructions. For each of the instruction's operands, if the operand can
5015 // be scalarized, add it to the worklist; otherwise, account for the
5016 // overhead.
5017 for (Use &U : I->operands())
5018 if (auto *J = dyn_cast<Instruction>(U.get())) {
5019 assert(canVectorizeTy(J->getType()) &&
5020 "Instruction has non-scalar type");
5021 if (CanBeScalarized(J))
5022 Worklist.push_back(J);
5023 else if (needsExtract(J, VF)) {
5024 Type *WideTy = toVectorizedTy(J->getType(), VF);
5025 for (Type *VectorTy : getContainedTypes(WideTy)) {
5026 ScalarCost += TTI.getScalarizationOverhead(
5027 cast<VectorType>(VectorTy),
5028 APInt::getAllOnes(VF.getFixedValue()), /*Insert*/ false,
5029 /*Extract*/ true, CostKind);
5030 }
5031 }
5032 }
5033
5034 // Scale the total scalar cost by block probability.
5035 ScalarCost /= getPredBlockCostDivisor(CostKind);
5036
5037 // Compute the discount. A non-negative discount means the vector version
5038 // of the instruction costs more, and scalarizing would be beneficial.
5039 Discount += VectorCost - ScalarCost;
5040 ScalarCosts[I] = ScalarCost;
5041 }
5042
5043 return Discount;
5044}
5045
5048
5049 // If the vector loop gets executed exactly once with the given VF, ignore the
5050 // costs of comparison and induction instructions, as they'll get simplified
5051 // away.
5052 SmallPtrSet<Instruction *, 2> ValuesToIgnoreForVF;
5053 auto TC = getSmallConstantTripCount(PSE.getSE(), TheLoop);
5054 if (TC == VF && !foldTailByMasking())
5056 ValuesToIgnoreForVF);
5057
5058 // For each block.
5059 for (BasicBlock *BB : TheLoop->blocks()) {
5060 InstructionCost BlockCost;
5061
5062 // For each instruction in the old loop.
5063 for (Instruction &I : BB->instructionsWithoutDebug()) {
5064 // Skip ignored values.
5065 if (ValuesToIgnore.count(&I) || ValuesToIgnoreForVF.count(&I) ||
5066 (VF.isVector() && VecValuesToIgnore.count(&I)))
5067 continue;
5068
5070
5071 // Check if we should override the cost.
5072 if (C.isValid() && ForceTargetInstructionCost.getNumOccurrences() > 0)
5074
5075 BlockCost += C;
5076 LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C << " for VF "
5077 << VF << " For instruction: " << I << '\n');
5078 }
5079
5080 // If we are vectorizing a predicated block, it will have been
5081 // if-converted. This means that the block's instructions (aside from
5082 // stores and instructions that may divide by zero) will now be
5083 // unconditionally executed. For the scalar case, we may not always execute
5084 // the predicated block, if it is an if-else block. Thus, scale the block's
5085 // cost by the probability of executing it. blockNeedsPredication from
5086 // Legal is used so as to not include all blocks in tail folded loops.
5087 if (VF.isScalar() && Legal->blockNeedsPredication(BB))
5088 BlockCost /= getPredBlockCostDivisor(CostKind);
5089
5090 Cost += BlockCost;
5091 }
5092
5093 return Cost;
5094}
5095
5096/// Gets Address Access SCEV after verifying that the access pattern
5097/// is loop invariant except the induction variable dependence.
5098///
5099/// This SCEV can be sent to the Target in order to estimate the address
5100/// calculation cost.
5102 Value *Ptr,
5105 const Loop *TheLoop) {
5106
5107 auto *Gep = dyn_cast<GetElementPtrInst>(Ptr);
5108 if (!Gep)
5109 return nullptr;
5110
5111 // We are looking for a gep with all loop invariant indices except for one
5112 // which should be an induction variable.
5113 auto *SE = PSE.getSE();
5114 unsigned NumOperands = Gep->getNumOperands();
5115 for (unsigned Idx = 1; Idx < NumOperands; ++Idx) {
5116 Value *Opd = Gep->getOperand(Idx);
5117 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) &&
5118 !Legal->isInductionVariable(Opd))
5119 return nullptr;
5120 }
5121
5122 // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV.
5123 return PSE.getSCEV(Ptr);
5124}
5125
5127LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
5128 ElementCount VF) {
5129 assert(VF.isVector() &&
5130 "Scalarization cost of instruction implies vectorization.");
5131 if (VF.isScalable())
5133
5134 Type *ValTy = getLoadStoreType(I);
5135 auto *SE = PSE.getSE();
5136
5137 unsigned AS = getLoadStoreAddressSpace(I);
5139 Type *PtrTy = toVectorTy(Ptr->getType(), VF);
5140 // NOTE: PtrTy is a vector to signal `TTI::getAddressComputationCost`
5141 // that it is being called from this specific place.
5142
5143 // Figure out whether the access is strided and get the stride value
5144 // if it's known in compile time
5145 const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop);
5146
5147 // Get the cost of the scalar memory instruction and address computation.
5148 InstructionCost Cost = VF.getFixedValue() * TTI.getAddressComputationCost(
5149 PtrTy, SE, PtrSCEV, CostKind);
5150
5151 // Don't pass *I here, since it is scalar but will actually be part of a
5152 // vectorized loop where the user of it is a vectorized instruction.
5153 const Align Alignment = getLoadStoreAlignment(I);
5154 TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(I->getOperand(0));
5155 Cost += VF.getFixedValue() *
5156 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment,
5157 AS, CostKind, OpInfo);
5158
5159 // Get the overhead of the extractelement and insertelement instructions
5160 // we might create due to scalarization.
5161 Cost += getScalarizationOverhead(I, VF);
5162
5163 // If we have a predicated load/store, it will need extra i1 extracts and
5164 // conditional branches, but may not be executed for each vector lane. Scale
5165 // the cost by the probability of executing the predicated block.
5166 if (isPredicatedInst(I)) {
5168
5169 // Add the cost of an i1 extract and a branch
5170 auto *VecI1Ty =
5172 Cost += TTI.getScalarizationOverhead(
5173 VecI1Ty, APInt::getAllOnes(VF.getFixedValue()),
5174 /*Insert=*/false, /*Extract=*/true, CostKind);
5175 Cost += TTI.getCFInstrCost(Instruction::Br, CostKind);
5176
5177 if (useEmulatedMaskMemRefHack(I, VF))
5178 // Artificially setting to a high enough value to practically disable
5179 // vectorization with such operations.
5180 Cost = 3000000;
5181 }
5182
5183 return Cost;
5184}
5185
5187LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I,
5188 ElementCount VF) {
5189 Type *ValTy = getLoadStoreType(I);
5190 auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
5192 unsigned AS = getLoadStoreAddressSpace(I);
5193 int ConsecutiveStride = Legal->isConsecutivePtr(ValTy, Ptr);
5194
5195 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
5196 "Stride should be 1 or -1 for consecutive memory access");
5197 const Align Alignment = getLoadStoreAlignment(I);
5199 if (Legal->isMaskRequired(I)) {
5200 Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
5201 CostKind);
5202 } else {
5203 TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(I->getOperand(0));
5204 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
5205 CostKind, OpInfo, I);
5206 }
5207
5208 bool Reverse = ConsecutiveStride < 0;
5209 if (Reverse)
5210 Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy,
5211 VectorTy, {}, CostKind, 0);
5212 return Cost;
5213}
5214
5216LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I,
5217 ElementCount VF) {
5218 assert(Legal->isUniformMemOp(*I, VF));
5219
5220 Type *ValTy = getLoadStoreType(I);
5222 auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
5223 const Align Alignment = getLoadStoreAlignment(I);
5224 unsigned AS = getLoadStoreAddressSpace(I);
5225 if (isa<LoadInst>(I)) {
5226 return TTI.getAddressComputationCost(PtrTy, nullptr, nullptr, CostKind) +
5227 TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS,
5228 CostKind) +
5229 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy,
5230 VectorTy, {}, CostKind);
5231 }
5232 StoreInst *SI = cast<StoreInst>(I);
5233
5234 bool IsLoopInvariantStoreValue = Legal->isInvariant(SI->getValueOperand());
5235 // TODO: We have existing tests that request the cost of extracting element
5236 // VF.getKnownMinValue() - 1 from a scalable vector. This does not represent
5237 // the actual generated code, which involves extracting the last element of
5238 // a scalable vector where the lane to extract is unknown at compile time.
5240 TTI.getAddressComputationCost(PtrTy, nullptr, nullptr, CostKind) +
5241 TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS, CostKind);
5242 if (!IsLoopInvariantStoreValue)
5243 Cost += TTI.getIndexedVectorInstrCostFromEnd(Instruction::ExtractElement,
5244 VectorTy, CostKind, 0);
5245 return Cost;
5246}
5247
5249LoopVectorizationCostModel::getGatherScatterCost(Instruction *I,
5250 ElementCount VF) {
5251 Type *ValTy = getLoadStoreType(I);
5252 auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
5253 const Align Alignment = getLoadStoreAlignment(I);
5255 Type *PtrTy = Ptr->getType();
5256
5257 if (!Legal->isUniform(Ptr, VF))
5258 PtrTy = toVectorTy(PtrTy, VF);
5259
5260 return TTI.getAddressComputationCost(PtrTy, nullptr, nullptr, CostKind) +
5261 TTI.getGatherScatterOpCost(I->getOpcode(), VectorTy, Ptr,
5262 Legal->isMaskRequired(I), Alignment,
5263 CostKind, I);
5264}
5265
5267LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
5268 ElementCount VF) {
5269 const auto *Group = getInterleavedAccessGroup(I);
5270 assert(Group && "Fail to get an interleaved access group.");
5271
5272 Instruction *InsertPos = Group->getInsertPos();
5273 Type *ValTy = getLoadStoreType(InsertPos);
5274 auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
5275 unsigned AS = getLoadStoreAddressSpace(InsertPos);
5276
5277 unsigned InterleaveFactor = Group->getFactor();
5278 auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor);
5279
5280 // Holds the indices of existing members in the interleaved group.
5281 SmallVector<unsigned, 4> Indices;
5282 for (unsigned IF = 0; IF < InterleaveFactor; IF++)
5283 if (Group->getMember(IF))
5284 Indices.push_back(IF);
5285
5286 // Calculate the cost of the whole interleaved group.
5287 bool UseMaskForGaps =
5288 (Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed()) ||
5289 (isa<StoreInst>(I) && !Group->isFull());
5290 InstructionCost Cost = TTI.getInterleavedMemoryOpCost(
5291 InsertPos->getOpcode(), WideVecTy, Group->getFactor(), Indices,
5292 Group->getAlign(), AS, CostKind, Legal->isMaskRequired(I),
5293 UseMaskForGaps);
5294
5295 if (Group->isReverse()) {
5296 // TODO: Add support for reversed masked interleaved access.
5297 assert(!Legal->isMaskRequired(I) &&
5298 "Reverse masked interleaved access not supported.");
5299 Cost += Group->getNumMembers() *
5300 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy,
5301 VectorTy, {}, CostKind, 0);
5302 }
5303 return Cost;
5304}
5305
5306std::optional<InstructionCost>
5308 ElementCount VF,
5309 Type *Ty) const {
5310 using namespace llvm::PatternMatch;
5311 // Early exit for no inloop reductions
5312 if (InLoopReductions.empty() || VF.isScalar() || !isa<VectorType>(Ty))
5313 return std::nullopt;
5314 auto *VectorTy = cast<VectorType>(Ty);
5315
5316 // We are looking for a pattern of, and finding the minimal acceptable cost:
5317 // reduce(mul(ext(A), ext(B))) or
5318 // reduce(mul(A, B)) or
5319 // reduce(ext(A)) or
5320 // reduce(A).
5321 // The basic idea is that we walk down the tree to do that, finding the root
5322 // reduction instruction in InLoopReductionImmediateChains. From there we find
5323 // the pattern of mul/ext and test the cost of the entire pattern vs the cost
5324 // of the components. If the reduction cost is lower then we return it for the
5325 // reduction instruction and 0 for the other instructions in the pattern. If
5326 // it is not we return an invalid cost specifying the orignal cost method
5327 // should be used.
5328 Instruction *RetI = I;
5329 if (match(RetI, m_ZExtOrSExt(m_Value()))) {
5330 if (!RetI->hasOneUser())
5331 return std::nullopt;
5332 RetI = RetI->user_back();
5333 }
5334
5335 if (match(RetI, m_OneUse(m_Mul(m_Value(), m_Value()))) &&
5336 RetI->user_back()->getOpcode() == Instruction::Add) {
5337 RetI = RetI->user_back();
5338 }
5339
5340 // Test if the found instruction is a reduction, and if not return an invalid
5341 // cost specifying the parent to use the original cost modelling.
5342 Instruction *LastChain = InLoopReductionImmediateChains.lookup(RetI);
5343 if (!LastChain)
5344 return std::nullopt;
5345
5346 // Find the reduction this chain is a part of and calculate the basic cost of
5347 // the reduction on its own.
5348 Instruction *ReductionPhi = LastChain;
5349 while (!isa<PHINode>(ReductionPhi))
5350 ReductionPhi = InLoopReductionImmediateChains.at(ReductionPhi);
5351
5352 const RecurrenceDescriptor &RdxDesc =
5353 Legal->getRecurrenceDescriptor(cast<PHINode>(ReductionPhi));
5354
5355 InstructionCost BaseCost;
5356 RecurKind RK = RdxDesc.getRecurrenceKind();
5359 BaseCost = TTI.getMinMaxReductionCost(MinMaxID, VectorTy,
5360 RdxDesc.getFastMathFlags(), CostKind);
5361 } else {
5362 BaseCost = TTI.getArithmeticReductionCost(
5363 RdxDesc.getOpcode(), VectorTy, RdxDesc.getFastMathFlags(), CostKind);
5364 }
5365
5366 // For a call to the llvm.fmuladd intrinsic we need to add the cost of a
5367 // normal fmul instruction to the cost of the fadd reduction.
5368 if (RK == RecurKind::FMulAdd)
5369 BaseCost +=
5370 TTI.getArithmeticInstrCost(Instruction::FMul, VectorTy, CostKind);
5371
5372 // If we're using ordered reductions then we can just return the base cost
5373 // here, since getArithmeticReductionCost calculates the full ordered
5374 // reduction cost when FP reassociation is not allowed.
5375 if (useOrderedReductions(RdxDesc))
5376 return BaseCost;
5377
5378 // Get the operand that was not the reduction chain and match it to one of the
5379 // patterns, returning the better cost if it is found.
5380 Instruction *RedOp = RetI->getOperand(1) == LastChain
5383
5384 VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy);
5385
5386 Instruction *Op0, *Op1;
5387 if (RedOp && RdxDesc.getOpcode() == Instruction::Add &&
5388 match(RedOp,
5390 match(Op0, m_ZExtOrSExt(m_Value())) &&
5391 Op0->getOpcode() == Op1->getOpcode() &&
5392 Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() &&
5393 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1) &&
5394 (Op0->getOpcode() == RedOp->getOpcode() || Op0 == Op1)) {
5395
5396 // Matched reduce.add(ext(mul(ext(A), ext(B)))
5397 // Note that the extend opcodes need to all match, or if A==B they will have
5398 // been converted to zext(mul(sext(A), sext(A))) as it is known positive,
5399 // which is equally fine.
5400 bool IsUnsigned = isa<ZExtInst>(Op0);
5401 auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy);
5402 auto *MulType = VectorType::get(Op0->getType(), VectorTy);
5403
5404 InstructionCost ExtCost =
5405 TTI.getCastInstrCost(Op0->getOpcode(), MulType, ExtType,
5407 InstructionCost MulCost =
5408 TTI.getArithmeticInstrCost(Instruction::Mul, MulType, CostKind);
5409 InstructionCost Ext2Cost =
5410 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, MulType,
5412
5413 InstructionCost RedCost = TTI.getMulAccReductionCost(
5414 IsUnsigned, RdxDesc.getOpcode(), RdxDesc.getRecurrenceType(), ExtType,
5415 CostKind);
5416
5417 if (RedCost.isValid() &&
5418 RedCost < ExtCost * 2 + MulCost + Ext2Cost + BaseCost)
5419 return I == RetI ? RedCost : 0;
5420 } else if (RedOp && match(RedOp, m_ZExtOrSExt(m_Value())) &&
5421 !TheLoop->isLoopInvariant(RedOp)) {
5422 // Matched reduce(ext(A))
5423 bool IsUnsigned = isa<ZExtInst>(RedOp);
5424 auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy);
5425 InstructionCost RedCost = TTI.getExtendedReductionCost(
5426 RdxDesc.getOpcode(), IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
5427 RdxDesc.getFastMathFlags(), CostKind);
5428
5429 InstructionCost ExtCost =
5430 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType,
5432 if (RedCost.isValid() && RedCost < BaseCost + ExtCost)
5433 return I == RetI ? RedCost : 0;
5434 } else if (RedOp && RdxDesc.getOpcode() == Instruction::Add &&
5435 match(RedOp, m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) {
5436 if (match(Op0, m_ZExtOrSExt(m_Value())) &&
5437 Op0->getOpcode() == Op1->getOpcode() &&
5438 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) {
5439 bool IsUnsigned = isa<ZExtInst>(Op0);
5440 Type *Op0Ty = Op0->getOperand(0)->getType();
5441 Type *Op1Ty = Op1->getOperand(0)->getType();
5442 Type *LargestOpTy =
5443 Op0Ty->getIntegerBitWidth() < Op1Ty->getIntegerBitWidth() ? Op1Ty
5444 : Op0Ty;
5445 auto *ExtType = VectorType::get(LargestOpTy, VectorTy);
5446
5447 // Matched reduce.add(mul(ext(A), ext(B))), where the two ext may be of
5448 // different sizes. We take the largest type as the ext to reduce, and add
5449 // the remaining cost as, for example reduce(mul(ext(ext(A)), ext(B))).
5450 InstructionCost ExtCost0 = TTI.getCastInstrCost(
5451 Op0->getOpcode(), VectorTy, VectorType::get(Op0Ty, VectorTy),
5453 InstructionCost ExtCost1 = TTI.getCastInstrCost(
5454 Op1->getOpcode(), VectorTy, VectorType::get(Op1Ty, VectorTy),
5456 InstructionCost MulCost =
5457 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
5458
5459 InstructionCost RedCost = TTI.getMulAccReductionCost(
5460 IsUnsigned, RdxDesc.getOpcode(), RdxDesc.getRecurrenceType(), ExtType,
5461 CostKind);
5462 InstructionCost ExtraExtCost = 0;
5463 if (Op0Ty != LargestOpTy || Op1Ty != LargestOpTy) {
5464 Instruction *ExtraExtOp = (Op0Ty != LargestOpTy) ? Op0 : Op1;
5465 ExtraExtCost = TTI.getCastInstrCost(
5466 ExtraExtOp->getOpcode(), ExtType,
5467 VectorType::get(ExtraExtOp->getOperand(0)->getType(), VectorTy),
5469 }
5470
5471 if (RedCost.isValid() &&
5472 (RedCost + ExtraExtCost) < (ExtCost0 + ExtCost1 + MulCost + BaseCost))
5473 return I == RetI ? RedCost : 0;
5474 } else if (!match(I, m_ZExtOrSExt(m_Value()))) {
5475 // Matched reduce.add(mul())
5476 InstructionCost MulCost =
5477 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
5478
5479 InstructionCost RedCost = TTI.getMulAccReductionCost(
5480 true, RdxDesc.getOpcode(), RdxDesc.getRecurrenceType(), VectorTy,
5481 CostKind);
5482
5483 if (RedCost.isValid() && RedCost < MulCost + BaseCost)
5484 return I == RetI ? RedCost : 0;
5485 }
5486 }
5487
5488 return I == RetI ? std::optional<InstructionCost>(BaseCost) : std::nullopt;
5489}
5490
5492LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I,
5493 ElementCount VF) {
5494 // Calculate scalar cost only. Vectorization cost should be ready at this
5495 // moment.
5496 if (VF.isScalar()) {
5497 Type *ValTy = getLoadStoreType(I);
5499 const Align Alignment = getLoadStoreAlignment(I);
5500 unsigned AS = getLoadStoreAddressSpace(I);
5501
5502 TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(I->getOperand(0));
5503 return TTI.getAddressComputationCost(PtrTy, nullptr, nullptr, CostKind) +
5504 TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, CostKind,
5505 OpInfo, I);
5506 }
5507 return getWideningCost(I, VF);
5508}
5509
5511LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I,
5512 ElementCount VF) const {
5513
5514 // There is no mechanism yet to create a scalable scalarization loop,
5515 // so this is currently Invalid.
5516 if (VF.isScalable())
5518
5519 if (VF.isScalar())
5520 return 0;
5521
5523 Type *RetTy = toVectorizedTy(I->getType(), VF);
5524 if (!RetTy->isVoidTy() &&
5525 (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore())) {
5526
5527 for (Type *VectorTy : getContainedTypes(RetTy)) {
5528 Cost += TTI.getScalarizationOverhead(
5530 /*Insert=*/true,
5531 /*Extract=*/false, CostKind);
5532 }
5533 }
5534
5535 // Some targets keep addresses scalar.
5536 if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing())
5537 return Cost;
5538
5539 // Some targets support efficient element stores.
5540 if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore())
5541 return Cost;
5542
5543 // Collect operands to consider.
5544 CallInst *CI = dyn_cast<CallInst>(I);
5545 Instruction::op_range Ops = CI ? CI->args() : I->operands();
5546
5547 // Skip operands that do not require extraction/scalarization and do not incur
5548 // any overhead.
5550 for (auto *V : filterExtractingOperands(Ops, VF))
5551 Tys.push_back(maybeVectorizeType(V->getType(), VF));
5552 return Cost + TTI.getOperandsScalarizationOverhead(Tys, CostKind);
5553}
5554
5556 if (VF.isScalar())
5557 return;
5558 NumPredStores = 0;
5559 for (BasicBlock *BB : TheLoop->blocks()) {
5560 // For each instruction in the old loop.
5561 for (Instruction &I : *BB) {
5563 if (!Ptr)
5564 continue;
5565
5566 // TODO: We should generate better code and update the cost model for
5567 // predicated uniform stores. Today they are treated as any other
5568 // predicated store (see added test cases in
5569 // invariant-store-vectorization.ll).
5571 NumPredStores++;
5572
5573 if (Legal->isUniformMemOp(I, VF)) {
5574 auto IsLegalToScalarize = [&]() {
5575 if (!VF.isScalable())
5576 // Scalarization of fixed length vectors "just works".
5577 return true;
5578
5579 // We have dedicated lowering for unpredicated uniform loads and
5580 // stores. Note that even with tail folding we know that at least
5581 // one lane is active (i.e. generalized predication is not possible
5582 // here), and the logic below depends on this fact.
5583 if (!foldTailByMasking())
5584 return true;
5585
5586 // For scalable vectors, a uniform memop load is always
5587 // uniform-by-parts and we know how to scalarize that.
5588 if (isa<LoadInst>(I))
5589 return true;
5590
5591 // A uniform store isn't neccessarily uniform-by-part
5592 // and we can't assume scalarization.
5593 auto &SI = cast<StoreInst>(I);
5594 return TheLoop->isLoopInvariant(SI.getValueOperand());
5595 };
5596
5597 const InstructionCost GatherScatterCost =
5599 getGatherScatterCost(&I, VF) : InstructionCost::getInvalid();
5600
5601 // Load: Scalar load + broadcast
5602 // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract
5603 // FIXME: This cost is a significant under-estimate for tail folded
5604 // memory ops.
5605 const InstructionCost ScalarizationCost =
5606 IsLegalToScalarize() ? getUniformMemOpCost(&I, VF)
5608
5609 // Choose better solution for the current VF, Note that Invalid
5610 // costs compare as maximumal large. If both are invalid, we get
5611 // scalable invalid which signals a failure and a vectorization abort.
5612 if (GatherScatterCost < ScalarizationCost)
5613 setWideningDecision(&I, VF, CM_GatherScatter, GatherScatterCost);
5614 else
5615 setWideningDecision(&I, VF, CM_Scalarize, ScalarizationCost);
5616 continue;
5617 }
5618
5619 // We assume that widening is the best solution when possible.
5620 if (memoryInstructionCanBeWidened(&I, VF)) {
5621 InstructionCost Cost = getConsecutiveMemOpCost(&I, VF);
5622 int ConsecutiveStride = Legal->isConsecutivePtr(
5624 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
5625 "Expected consecutive stride.");
5626 InstWidening Decision =
5627 ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse;
5628 setWideningDecision(&I, VF, Decision, Cost);
5629 continue;
5630 }
5631
5632 // Choose between Interleaving, Gather/Scatter or Scalarization.
5634 unsigned NumAccesses = 1;
5635 if (isAccessInterleaved(&I)) {
5636 const auto *Group = getInterleavedAccessGroup(&I);
5637 assert(Group && "Fail to get an interleaved access group.");
5638
5639 // Make one decision for the whole group.
5640 if (getWideningDecision(&I, VF) != CM_Unknown)
5641 continue;
5642
5643 NumAccesses = Group->getNumMembers();
5645 InterleaveCost = getInterleaveGroupCost(&I, VF);
5646 }
5647
5648 InstructionCost GatherScatterCost =
5650 ? getGatherScatterCost(&I, VF) * NumAccesses
5652
5653 InstructionCost ScalarizationCost =
5654 getMemInstScalarizationCost(&I, VF) * NumAccesses;
5655
5656 // Choose better solution for the current VF,
5657 // write down this decision and use it during vectorization.
5659 InstWidening Decision;
5660 if (InterleaveCost <= GatherScatterCost &&
5661 InterleaveCost < ScalarizationCost) {
5662 Decision = CM_Interleave;
5663 Cost = InterleaveCost;
5664 } else if (GatherScatterCost < ScalarizationCost) {
5665 Decision = CM_GatherScatter;
5666 Cost = GatherScatterCost;
5667 } else {
5668 Decision = CM_Scalarize;
5669 Cost = ScalarizationCost;
5670 }
5671 // If the instructions belongs to an interleave group, the whole group
5672 // receives the same decision. The whole group receives the cost, but
5673 // the cost will actually be assigned to one instruction.
5674 if (const auto *Group = getInterleavedAccessGroup(&I)) {
5675 if (Decision == CM_Scalarize) {
5676 for (unsigned Idx = 0; Idx < Group->getFactor(); ++Idx) {
5677 if (auto *I = Group->getMember(Idx)) {
5678 setWideningDecision(I, VF, Decision,
5679 getMemInstScalarizationCost(I, VF));
5680 }
5681 }
5682 } else {
5683 setWideningDecision(Group, VF, Decision, Cost);
5684 }
5685 } else
5686 setWideningDecision(&I, VF, Decision, Cost);
5687 }
5688 }
5689
5690 // Make sure that any load of address and any other address computation
5691 // remains scalar unless there is gather/scatter support. This avoids
5692 // inevitable extracts into address registers, and also has the benefit of
5693 // activating LSR more, since that pass can't optimize vectorized
5694 // addresses.
5695 if (TTI.prefersVectorizedAddressing())
5696 return;
5697
5698 // Start with all scalar pointer uses.
5700 for (BasicBlock *BB : TheLoop->blocks())
5701 for (Instruction &I : *BB) {
5702 Instruction *PtrDef =
5704 if (PtrDef && TheLoop->contains(PtrDef) &&
5706 AddrDefs.insert(PtrDef);
5707 }
5708
5709 // Add all instructions used to generate the addresses.
5711 append_range(Worklist, AddrDefs);
5712 while (!Worklist.empty()) {
5713 Instruction *I = Worklist.pop_back_val();
5714 for (auto &Op : I->operands())
5715 if (auto *InstOp = dyn_cast<Instruction>(Op))
5716 if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) &&
5717 AddrDefs.insert(InstOp).second)
5718 Worklist.push_back(InstOp);
5719 }
5720
5721 for (auto *I : AddrDefs) {
5722 if (isa<LoadInst>(I)) {
5723 // Setting the desired widening decision should ideally be handled in
5724 // by cost functions, but since this involves the task of finding out
5725 // if the loaded register is involved in an address computation, it is
5726 // instead changed here when we know this is the case.
5727 InstWidening Decision = getWideningDecision(I, VF);
5728 if (Decision == CM_Widen || Decision == CM_Widen_Reverse ||
5729 (!isPredicatedInst(I) && !Legal->isUniformMemOp(*I, VF) &&
5730 Decision == CM_Scalarize))
5731 // Scalarize a widened load of address or update the cost of a scalar
5732 // load of an address.
5734 I, VF, CM_Scalarize,
5735 (VF.getKnownMinValue() *
5736 getMemoryInstructionCost(I, ElementCount::getFixed(1))));
5737 else if (const auto *Group = getInterleavedAccessGroup(I)) {
5738 // Scalarize an interleave group of address loads.
5739 for (unsigned I = 0; I < Group->getFactor(); ++I) {
5740 if (Instruction *Member = Group->getMember(I))
5742 Member, VF, CM_Scalarize,
5743 (VF.getKnownMinValue() *
5744 getMemoryInstructionCost(Member, ElementCount::getFixed(1))));
5745 }
5746 }
5747 } else {
5748 // Cannot scalarize fixed-order recurrence phis at the moment.
5749 if (isa<PHINode>(I) && Legal->isFixedOrderRecurrence(cast<PHINode>(I)))
5750 continue;
5751
5752 // Make sure I gets scalarized and a cost estimate without
5753 // scalarization overhead.
5754 ForcedScalars[VF].insert(I);
5755 }
5756 }
5757}
5758
5760 assert(!VF.isScalar() &&
5761 "Trying to set a vectorization decision for a scalar VF");
5762
5763 auto ForcedScalar = ForcedScalars.find(VF);
5764 for (BasicBlock *BB : TheLoop->blocks()) {
5765 // For each instruction in the old loop.
5766 for (Instruction &I : *BB) {
5768
5769 if (!CI)
5770 continue;
5771
5775 Function *ScalarFunc = CI->getCalledFunction();
5776 Type *ScalarRetTy = CI->getType();
5777 SmallVector<Type *, 4> Tys, ScalarTys;
5778 for (auto &ArgOp : CI->args())
5779 ScalarTys.push_back(ArgOp->getType());
5780
5781 // Estimate cost of scalarized vector call. The source operands are
5782 // assumed to be vectors, so we need to extract individual elements from
5783 // there, execute VF scalar calls, and then gather the result into the
5784 // vector return value.
5785 if (VF.isFixed()) {
5786 InstructionCost ScalarCallCost =
5787 TTI.getCallInstrCost(ScalarFunc, ScalarRetTy, ScalarTys, CostKind);
5788
5789 // Compute costs of unpacking argument values for the scalar calls and
5790 // packing the return values to a vector.
5791 InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF);
5792 ScalarCost = ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost;
5793 } else {
5794 // There is no point attempting to calculate the scalar cost for a
5795 // scalable VF as we know it will be Invalid.
5796 assert(!getScalarizationOverhead(CI, VF).isValid() &&
5797 "Unexpected valid cost for scalarizing scalable vectors");
5798 ScalarCost = InstructionCost::getInvalid();
5799 }
5800
5801 // Honor ForcedScalars and UniformAfterVectorization decisions.
5802 // TODO: For calls, it might still be more profitable to widen. Use
5803 // VPlan-based cost model to compare different options.
5804 if (VF.isVector() && ((ForcedScalar != ForcedScalars.end() &&
5805 ForcedScalar->second.contains(CI)) ||
5806 isUniformAfterVectorization(CI, VF))) {
5807 setCallWideningDecision(CI, VF, CM_Scalarize, nullptr,
5808 Intrinsic::not_intrinsic, std::nullopt,
5809 ScalarCost);
5810 continue;
5811 }
5812
5813 bool MaskRequired = Legal->isMaskRequired(CI);
5814 // Compute corresponding vector type for return value and arguments.
5815 Type *RetTy = toVectorizedTy(ScalarRetTy, VF);
5816 for (Type *ScalarTy : ScalarTys)
5817 Tys.push_back(toVectorizedTy(ScalarTy, VF));
5818
5819 // An in-loop reduction using an fmuladd intrinsic is a special case;
5820 // we don't want the normal cost for that intrinsic.
5822 if (auto RedCost = getReductionPatternCost(CI, VF, RetTy)) {
5825 std::nullopt, *RedCost);
5826 continue;
5827 }
5828
5829 // Find the cost of vectorizing the call, if we can find a suitable
5830 // vector variant of the function.
5831 VFInfo FuncInfo;
5832 Function *VecFunc = nullptr;
5833 // Search through any available variants for one we can use at this VF.
5834 for (VFInfo &Info : VFDatabase::getMappings(*CI)) {
5835 // Must match requested VF.
5836 if (Info.Shape.VF != VF)
5837 continue;
5838
5839 // Must take a mask argument if one is required
5840 if (MaskRequired && !Info.isMasked())
5841 continue;
5842
5843 // Check that all parameter kinds are supported
5844 bool ParamsOk = true;
5845 for (VFParameter Param : Info.Shape.Parameters) {
5846 switch (Param.ParamKind) {
5848 break;
5850 Value *ScalarParam = CI->getArgOperand(Param.ParamPos);
5851 // Make sure the scalar parameter in the loop is invariant.
5852 if (!PSE.getSE()->isLoopInvariant(PSE.getSCEV(ScalarParam),
5853 TheLoop))
5854 ParamsOk = false;
5855 break;
5856 }
5858 Value *ScalarParam = CI->getArgOperand(Param.ParamPos);
5859 // Find the stride for the scalar parameter in this loop and see if
5860 // it matches the stride for the variant.
5861 // TODO: do we need to figure out the cost of an extract to get the
5862 // first lane? Or do we hope that it will be folded away?
5863 ScalarEvolution *SE = PSE.getSE();
5864 if (!match(SE->getSCEV(ScalarParam),
5866 m_SCEV(), m_scev_SpecificSInt(Param.LinearStepOrPos),
5868 ParamsOk = false;
5869 break;
5870 }
5872 break;
5873 default:
5874 ParamsOk = false;
5875 break;
5876 }
5877 }
5878
5879 if (!ParamsOk)
5880 continue;
5881
5882 // Found a suitable candidate, stop here.
5883 VecFunc = CI->getModule()->getFunction(Info.VectorName);
5884 FuncInfo = Info;
5885 break;
5886 }
5887
5888 if (TLI && VecFunc && !CI->isNoBuiltin())
5889 VectorCost = TTI.getCallInstrCost(nullptr, RetTy, Tys, CostKind);
5890
5891 // Find the cost of an intrinsic; some targets may have instructions that
5892 // perform the operation without needing an actual call.
5894 if (IID != Intrinsic::not_intrinsic)
5896
5897 InstructionCost Cost = ScalarCost;
5898 InstWidening Decision = CM_Scalarize;
5899
5900 if (VectorCost <= Cost) {
5901 Cost = VectorCost;
5902 Decision = CM_VectorCall;
5903 }
5904
5905 if (IntrinsicCost <= Cost) {
5907 Decision = CM_IntrinsicCall;
5908 }
5909
5910 setCallWideningDecision(CI, VF, Decision, VecFunc, IID,
5912 }
5913 }
5914}
5915
5917 if (!Legal->isInvariant(Op))
5918 return false;
5919 // Consider Op invariant, if it or its operands aren't predicated
5920 // instruction in the loop. In that case, it is not trivially hoistable.
5921 auto *OpI = dyn_cast<Instruction>(Op);
5922 return !OpI || !TheLoop->contains(OpI) ||
5923 (!isPredicatedInst(OpI) &&
5924 (!isa<PHINode>(OpI) || OpI->getParent() != TheLoop->getHeader()) &&
5925 all_of(OpI->operands(),
5926 [this](Value *Op) { return shouldConsiderInvariant(Op); }));
5927}
5928
5931 ElementCount VF) {
5932 // If we know that this instruction will remain uniform, check the cost of
5933 // the scalar version.
5935 VF = ElementCount::getFixed(1);
5936
5937 if (VF.isVector() && isProfitableToScalarize(I, VF))
5938 return InstsToScalarize[VF][I];
5939
5940 // Forced scalars do not have any scalarization overhead.
5941 auto ForcedScalar = ForcedScalars.find(VF);
5942 if (VF.isVector() && ForcedScalar != ForcedScalars.end()) {
5943 auto InstSet = ForcedScalar->second;
5944 if (InstSet.count(I))
5946 VF.getKnownMinValue();
5947 }
5948
5949 Type *RetTy = I->getType();
5951 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]);
5952 auto *SE = PSE.getSE();
5953
5954 Type *VectorTy;
5955 if (isScalarAfterVectorization(I, VF)) {
5956 [[maybe_unused]] auto HasSingleCopyAfterVectorization =
5957 [this](Instruction *I, ElementCount VF) -> bool {
5958 if (VF.isScalar())
5959 return true;
5960
5961 auto Scalarized = InstsToScalarize.find(VF);
5962 assert(Scalarized != InstsToScalarize.end() &&
5963 "VF not yet analyzed for scalarization profitability");
5964 return !Scalarized->second.count(I) &&
5965 llvm::all_of(I->users(), [&](User *U) {
5966 auto *UI = cast<Instruction>(U);
5967 return !Scalarized->second.count(UI);
5968 });
5969 };
5970
5971 // With the exception of GEPs and PHIs, after scalarization there should
5972 // only be one copy of the instruction generated in the loop. This is
5973 // because the VF is either 1, or any instructions that need scalarizing
5974 // have already been dealt with by the time we get here. As a result,
5975 // it means we don't have to multiply the instruction cost by VF.
5976 assert(I->getOpcode() == Instruction::GetElementPtr ||
5977 I->getOpcode() == Instruction::PHI ||
5978 (I->getOpcode() == Instruction::BitCast &&
5979 I->getType()->isPointerTy()) ||
5980 HasSingleCopyAfterVectorization(I, VF));
5981 VectorTy = RetTy;
5982 } else
5983 VectorTy = toVectorizedTy(RetTy, VF);
5984
5985 if (VF.isVector() && VectorTy->isVectorTy() &&
5986 !TTI.getNumberOfParts(VectorTy))
5988
5989 // TODO: We need to estimate the cost of intrinsic calls.
5990 switch (I->getOpcode()) {
5991 case Instruction::GetElementPtr:
5992 // We mark this instruction as zero-cost because the cost of GEPs in
5993 // vectorized code depends on whether the corresponding memory instruction
5994 // is scalarized or not. Therefore, we handle GEPs with the memory
5995 // instruction cost.
5996 return 0;
5997 case Instruction::Br: {
5998 // In cases of scalarized and predicated instructions, there will be VF
5999 // predicated blocks in the vectorized loop. Each branch around these
6000 // blocks requires also an extract of its vector compare i1 element.
6001 // Note that the conditional branch from the loop latch will be replaced by
6002 // a single branch controlling the loop, so there is no extra overhead from
6003 // scalarization.
6004 bool ScalarPredicatedBB = false;
6006 if (VF.isVector() && BI->isConditional() &&
6007 (PredicatedBBsAfterVectorization[VF].count(BI->getSuccessor(0)) ||
6008 PredicatedBBsAfterVectorization[VF].count(BI->getSuccessor(1))) &&
6009 BI->getParent() != TheLoop->getLoopLatch())
6010 ScalarPredicatedBB = true;
6011
6012 if (ScalarPredicatedBB) {
6013 // Not possible to scalarize scalable vector with predicated instructions.
6014 if (VF.isScalable())
6016 // Return cost for branches around scalarized and predicated blocks.
6017 auto *VecI1Ty =
6019 return (
6020 TTI.getScalarizationOverhead(
6021 VecI1Ty, APInt::getAllOnes(VF.getFixedValue()),
6022 /*Insert*/ false, /*Extract*/ true, CostKind) +
6023 (TTI.getCFInstrCost(Instruction::Br, CostKind) * VF.getFixedValue()));
6024 }
6025
6026 if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar())
6027 // The back-edge branch will remain, as will all scalar branches.
6028 return TTI.getCFInstrCost(Instruction::Br, CostKind);
6029
6030 // This branch will be eliminated by if-conversion.
6031 return 0;
6032 // Note: We currently assume zero cost for an unconditional branch inside
6033 // a predicated block since it will become a fall-through, although we
6034 // may decide in the future to call TTI for all branches.
6035 }
6036 case Instruction::Switch: {
6037 if (VF.isScalar())
6038 return TTI.getCFInstrCost(Instruction::Switch, CostKind);
6039 auto *Switch = cast<SwitchInst>(I);
6040 return Switch->getNumCases() *
6041 TTI.getCmpSelInstrCost(
6042 Instruction::ICmp,
6043 toVectorTy(Switch->getCondition()->getType(), VF),
6044 toVectorTy(Type::getInt1Ty(I->getContext()), VF),
6046 }
6047 case Instruction::PHI: {
6048 auto *Phi = cast<PHINode>(I);
6049
6050 // First-order recurrences are replaced by vector shuffles inside the loop.
6051 if (VF.isVector() && Legal->isFixedOrderRecurrence(Phi)) {
6053 std::iota(Mask.begin(), Mask.end(), VF.getKnownMinValue() - 1);
6054 return TTI.getShuffleCost(TargetTransformInfo::SK_Splice,
6055 cast<VectorType>(VectorTy),
6056 cast<VectorType>(VectorTy), Mask, CostKind,
6057 VF.getKnownMinValue() - 1);
6058 }
6059
6060 // Phi nodes in non-header blocks (not inductions, reductions, etc.) are
6061 // converted into select instructions. We require N - 1 selects per phi
6062 // node, where N is the number of incoming values.
6063 if (VF.isVector() && Phi->getParent() != TheLoop->getHeader()) {
6064 Type *ResultTy = Phi->getType();
6065
6066 // All instructions in an Any-of reduction chain are narrowed to bool.
6067 // Check if that is the case for this phi node.
6068 auto *HeaderUser = cast_if_present<PHINode>(
6069 find_singleton<User>(Phi->users(), [this](User *U, bool) -> User * {
6070 auto *Phi = dyn_cast<PHINode>(U);
6071 if (Phi && Phi->getParent() == TheLoop->getHeader())
6072 return Phi;
6073 return nullptr;
6074 }));
6075 if (HeaderUser) {
6076 auto &ReductionVars = Legal->getReductionVars();
6077 auto Iter = ReductionVars.find(HeaderUser);
6078 if (Iter != ReductionVars.end() &&
6080 Iter->second.getRecurrenceKind()))
6081 ResultTy = Type::getInt1Ty(Phi->getContext());
6082 }
6083 return (Phi->getNumIncomingValues() - 1) *
6084 TTI.getCmpSelInstrCost(
6085 Instruction::Select, toVectorTy(ResultTy, VF),
6086 toVectorTy(Type::getInt1Ty(Phi->getContext()), VF),
6088 }
6089
6090 // When tail folding with EVL, if the phi is part of an out of loop
6091 // reduction then it will be transformed into a wide vp_merge.
6092 if (VF.isVector() && foldTailWithEVL() &&
6093 Legal->getReductionVars().contains(Phi) && !isInLoopReduction(Phi)) {
6095 Intrinsic::vp_merge, toVectorTy(Phi->getType(), VF),
6096 {toVectorTy(Type::getInt1Ty(Phi->getContext()), VF)});
6097 return TTI.getIntrinsicInstrCost(ICA, CostKind);
6098 }
6099
6100 return TTI.getCFInstrCost(Instruction::PHI, CostKind);
6101 }
6102 case Instruction::UDiv:
6103 case Instruction::SDiv:
6104 case Instruction::URem:
6105 case Instruction::SRem:
6106 if (VF.isVector() && isPredicatedInst(I)) {
6107 const auto [ScalarCost, SafeDivisorCost] = getDivRemSpeculationCost(I, VF);
6108 return isDivRemScalarWithPredication(ScalarCost, SafeDivisorCost) ?
6109 ScalarCost : SafeDivisorCost;
6110 }
6111 // We've proven all lanes safe to speculate, fall through.
6112 [[fallthrough]];
6113 case Instruction::Add:
6114 case Instruction::Sub: {
6115 auto Info = Legal->getHistogramInfo(I);
6116 if (Info && VF.isVector()) {
6117 const HistogramInfo *HGram = Info.value();
6118 // Assume that a non-constant update value (or a constant != 1) requires
6119 // a multiply, and add that into the cost.
6121 ConstantInt *RHS = dyn_cast<ConstantInt>(I->getOperand(1));
6122 if (!RHS || RHS->getZExtValue() != 1)
6123 MulCost =
6124 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
6125
6126 // Find the cost of the histogram operation itself.
6127 Type *PtrTy = VectorType::get(HGram->Load->getPointerOperandType(), VF);
6128 Type *ScalarTy = I->getType();
6129 Type *MaskTy = VectorType::get(Type::getInt1Ty(I->getContext()), VF);
6130 IntrinsicCostAttributes ICA(Intrinsic::experimental_vector_histogram_add,
6131 Type::getVoidTy(I->getContext()),
6132 {PtrTy, ScalarTy, MaskTy});
6133
6134 // Add the costs together with the add/sub operation.
6135 return TTI.getIntrinsicInstrCost(ICA, CostKind) + MulCost +
6136 TTI.getArithmeticInstrCost(I->getOpcode(), VectorTy, CostKind);
6137 }
6138 [[fallthrough]];
6139 }
6140 case Instruction::FAdd:
6141 case Instruction::FSub:
6142 case Instruction::Mul:
6143 case Instruction::FMul:
6144 case Instruction::FDiv:
6145 case Instruction::FRem:
6146 case Instruction::Shl:
6147 case Instruction::LShr:
6148 case Instruction::AShr:
6149 case Instruction::And:
6150 case Instruction::Or:
6151 case Instruction::Xor: {
6152 // If we're speculating on the stride being 1, the multiplication may
6153 // fold away. We can generalize this for all operations using the notion
6154 // of neutral elements. (TODO)
6155 if (I->getOpcode() == Instruction::Mul &&
6156 ((TheLoop->isLoopInvariant(I->getOperand(0)) &&
6157 PSE.getSCEV(I->getOperand(0))->isOne()) ||
6158 (TheLoop->isLoopInvariant(I->getOperand(1)) &&
6159 PSE.getSCEV(I->getOperand(1))->isOne())))
6160 return 0;
6161
6162 // Detect reduction patterns
6163 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy))
6164 return *RedCost;
6165
6166 // Certain instructions can be cheaper to vectorize if they have a constant
6167 // second vector operand. One example of this are shifts on x86.
6168 Value *Op2 = I->getOperand(1);
6169 if (!isa<Constant>(Op2) && TheLoop->isLoopInvariant(Op2) &&
6170 PSE.getSE()->isSCEVable(Op2->getType()) &&
6171 isa<SCEVConstant>(PSE.getSCEV(Op2))) {
6172 Op2 = cast<SCEVConstant>(PSE.getSCEV(Op2))->getValue();
6173 }
6174 auto Op2Info = TTI.getOperandInfo(Op2);
6175 if (Op2Info.Kind == TargetTransformInfo::OK_AnyValue &&
6178
6179 SmallVector<const Value *, 4> Operands(I->operand_values());
6180 return TTI.getArithmeticInstrCost(
6181 I->getOpcode(), VectorTy, CostKind,
6182 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
6183 Op2Info, Operands, I, TLI);
6184 }
6185 case Instruction::FNeg: {
6186 return TTI.getArithmeticInstrCost(
6187 I->getOpcode(), VectorTy, CostKind,
6188 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
6189 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
6190 I->getOperand(0), I);
6191 }
6192 case Instruction::Select: {
6194 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition());
6195 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop));
6196
6197 const Value *Op0, *Op1;
6198 using namespace llvm::PatternMatch;
6199 if (!ScalarCond && (match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1))) ||
6200 match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1))))) {
6201 // select x, y, false --> x & y
6202 // select x, true, y --> x | y
6203 const auto [Op1VK, Op1VP] = TTI::getOperandInfo(Op0);
6204 const auto [Op2VK, Op2VP] = TTI::getOperandInfo(Op1);
6205 assert(Op0->getType()->getScalarSizeInBits() == 1 &&
6206 Op1->getType()->getScalarSizeInBits() == 1);
6207
6208 return TTI.getArithmeticInstrCost(
6209 match(I, m_LogicalOr()) ? Instruction::Or : Instruction::And,
6210 VectorTy, CostKind, {Op1VK, Op1VP}, {Op2VK, Op2VP}, {Op0, Op1}, I);
6211 }
6212
6213 Type *CondTy = SI->getCondition()->getType();
6214 if (!ScalarCond)
6215 CondTy = VectorType::get(CondTy, VF);
6216
6218 if (auto *Cmp = dyn_cast<CmpInst>(SI->getCondition()))
6219 Pred = Cmp->getPredicate();
6220 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, Pred,
6221 CostKind, {TTI::OK_AnyValue, TTI::OP_None},
6222 {TTI::OK_AnyValue, TTI::OP_None}, I);
6223 }
6224 case Instruction::ICmp:
6225 case Instruction::FCmp: {
6226 Type *ValTy = I->getOperand(0)->getType();
6227
6229 [[maybe_unused]] Instruction *Op0AsInstruction =
6230 dyn_cast<Instruction>(I->getOperand(0));
6231 assert((!canTruncateToMinimalBitwidth(Op0AsInstruction, VF) ||
6232 MinBWs[I] == MinBWs[Op0AsInstruction]) &&
6233 "if both the operand and the compare are marked for "
6234 "truncation, they must have the same bitwidth");
6235 ValTy = IntegerType::get(ValTy->getContext(), MinBWs[I]);
6236 }
6237
6238 VectorTy = toVectorTy(ValTy, VF);
6239 return TTI.getCmpSelInstrCost(
6240 I->getOpcode(), VectorTy, CmpInst::makeCmpResultType(VectorTy),
6241 cast<CmpInst>(I)->getPredicate(), CostKind,
6242 {TTI::OK_AnyValue, TTI::OP_None}, {TTI::OK_AnyValue, TTI::OP_None}, I);
6243 }
6244 case Instruction::Store:
6245 case Instruction::Load: {
6246 ElementCount Width = VF;
6247 if (Width.isVector()) {
6248 InstWidening Decision = getWideningDecision(I, Width);
6249 assert(Decision != CM_Unknown &&
6250 "CM decision should be taken at this point");
6253 if (Decision == CM_Scalarize)
6254 Width = ElementCount::getFixed(1);
6255 }
6256 VectorTy = toVectorTy(getLoadStoreType(I), Width);
6257 return getMemoryInstructionCost(I, VF);
6258 }
6259 case Instruction::BitCast:
6260 if (I->getType()->isPointerTy())
6261 return 0;
6262 [[fallthrough]];
6263 case Instruction::ZExt:
6264 case Instruction::SExt:
6265 case Instruction::FPToUI:
6266 case Instruction::FPToSI:
6267 case Instruction::FPExt:
6268 case Instruction::PtrToInt:
6269 case Instruction::IntToPtr:
6270 case Instruction::SIToFP:
6271 case Instruction::UIToFP:
6272 case Instruction::Trunc:
6273 case Instruction::FPTrunc: {
6274 // Computes the CastContextHint from a Load/Store instruction.
6275 auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint {
6277 "Expected a load or a store!");
6278
6279 if (VF.isScalar() || !TheLoop->contains(I))
6281
6282 switch (getWideningDecision(I, VF)) {
6294 llvm_unreachable("Instr did not go through cost modelling?");
6297 llvm_unreachable_internal("Instr has invalid widening decision");
6298 }
6299
6300 llvm_unreachable("Unhandled case!");
6301 };
6302
6303 unsigned Opcode = I->getOpcode();
6305 // For Trunc, the context is the only user, which must be a StoreInst.
6306 if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) {
6307 if (I->hasOneUse())
6308 if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin()))
6309 CCH = ComputeCCH(Store);
6310 }
6311 // For Z/Sext, the context is the operand, which must be a LoadInst.
6312 else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt ||
6313 Opcode == Instruction::FPExt) {
6314 if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0)))
6315 CCH = ComputeCCH(Load);
6316 }
6317
6318 // We optimize the truncation of induction variables having constant
6319 // integer steps. The cost of these truncations is the same as the scalar
6320 // operation.
6321 if (isOptimizableIVTruncate(I, VF)) {
6322 auto *Trunc = cast<TruncInst>(I);
6323 return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(),
6324 Trunc->getSrcTy(), CCH, CostKind, Trunc);
6325 }
6326
6327 // Detect reduction patterns
6328 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy))
6329 return *RedCost;
6330
6331 Type *SrcScalarTy = I->getOperand(0)->getType();
6332 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0));
6333 if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF))
6334 SrcScalarTy =
6335 IntegerType::get(SrcScalarTy->getContext(), MinBWs[Op0AsInstruction]);
6336 Type *SrcVecTy =
6337 VectorTy->isVectorTy() ? toVectorTy(SrcScalarTy, VF) : SrcScalarTy;
6338
6340 // If the result type is <= the source type, there will be no extend
6341 // after truncating the users to the minimal required bitwidth.
6342 if (VectorTy->getScalarSizeInBits() <= SrcVecTy->getScalarSizeInBits() &&
6343 (I->getOpcode() == Instruction::ZExt ||
6344 I->getOpcode() == Instruction::SExt))
6345 return 0;
6346 }
6347
6348 return TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I);
6349 }
6350 case Instruction::Call:
6351 return getVectorCallCost(cast<CallInst>(I), VF);
6352 case Instruction::ExtractValue:
6353 return TTI.getInstructionCost(I, CostKind);
6354 case Instruction::Alloca:
6355 // We cannot easily widen alloca to a scalable alloca, as
6356 // the result would need to be a vector of pointers.
6357 if (VF.isScalable())
6359 [[fallthrough]];
6360 default:
6361 // This opcode is unknown. Assume that it is the same as 'mul'.
6362 return TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
6363 } // end of switch.
6364}
6365
6367 // Ignore ephemeral values.
6369
6370 SmallVector<Value *, 4> DeadInterleavePointerOps;
6372
6373 // If a scalar epilogue is required, users outside the loop won't use
6374 // live-outs from the vector loop but from the scalar epilogue. Ignore them if
6375 // that is the case.
6376 bool RequiresScalarEpilogue = requiresScalarEpilogue(true);
6377 auto IsLiveOutDead = [this, RequiresScalarEpilogue](User *U) {
6378 return RequiresScalarEpilogue &&
6379 !TheLoop->contains(cast<Instruction>(U)->getParent());
6380 };
6381
6383 DFS.perform(LI);
6384 MapVector<Value *, SmallVector<Value *>> DeadInvariantStoreOps;
6385 for (BasicBlock *BB : reverse(make_range(DFS.beginRPO(), DFS.endRPO())))
6386 for (Instruction &I : reverse(*BB)) {
6387 // Find all stores to invariant variables. Since they are going to sink
6388 // outside the loop we do not need calculate cost for them.
6389 StoreInst *SI;
6390 if ((SI = dyn_cast<StoreInst>(&I)) &&
6391 Legal->isInvariantAddressOfReduction(SI->getPointerOperand())) {
6392 ValuesToIgnore.insert(&I);
6393 DeadInvariantStoreOps[SI->getPointerOperand()].push_back(
6394 SI->getValueOperand());
6395 }
6396
6397 if (VecValuesToIgnore.contains(&I) || ValuesToIgnore.contains(&I))
6398 continue;
6399
6400 // Add instructions that would be trivially dead and are only used by
6401 // values already ignored to DeadOps to seed worklist.
6403 all_of(I.users(), [this, IsLiveOutDead](User *U) {
6404 return VecValuesToIgnore.contains(U) ||
6405 ValuesToIgnore.contains(U) || IsLiveOutDead(U);
6406 }))
6407 DeadOps.push_back(&I);
6408
6409 // For interleave groups, we only create a pointer for the start of the
6410 // interleave group. Queue up addresses of group members except the insert
6411 // position for further processing.
6412 if (isAccessInterleaved(&I)) {
6413 auto *Group = getInterleavedAccessGroup(&I);
6414 if (Group->getInsertPos() == &I)
6415 continue;
6416 Value *PointerOp = getLoadStorePointerOperand(&I);
6417 DeadInterleavePointerOps.push_back(PointerOp);
6418 }
6419
6420 // Queue branches for analysis. They are dead, if their successors only
6421 // contain dead instructions.
6422 if (auto *Br = dyn_cast<BranchInst>(&I)) {
6423 if (Br->isConditional())
6424 DeadOps.push_back(&I);
6425 }
6426 }
6427
6428 // Mark ops feeding interleave group members as free, if they are only used
6429 // by other dead computations.
6430 for (unsigned I = 0; I != DeadInterleavePointerOps.size(); ++I) {
6431 auto *Op = dyn_cast<Instruction>(DeadInterleavePointerOps[I]);
6432 if (!Op || !TheLoop->contains(Op) || any_of(Op->users(), [this](User *U) {
6433 Instruction *UI = cast<Instruction>(U);
6434 return !VecValuesToIgnore.contains(U) &&
6435 (!isAccessInterleaved(UI) ||
6436 getInterleavedAccessGroup(UI)->getInsertPos() == UI);
6437 }))
6438 continue;
6439 VecValuesToIgnore.insert(Op);
6440 append_range(DeadInterleavePointerOps, Op->operands());
6441 }
6442
6443 for (const auto &[_, Ops] : DeadInvariantStoreOps)
6444 llvm::append_range(DeadOps, drop_end(Ops));
6445
6446 // Mark ops that would be trivially dead and are only used by ignored
6447 // instructions as free.
6448 BasicBlock *Header = TheLoop->getHeader();
6449
6450 // Returns true if the block contains only dead instructions. Such blocks will
6451 // be removed by VPlan-to-VPlan transforms and won't be considered by the
6452 // VPlan-based cost model, so skip them in the legacy cost-model as well.
6453 auto IsEmptyBlock = [this](BasicBlock *BB) {
6454 return all_of(*BB, [this](Instruction &I) {
6455 return ValuesToIgnore.contains(&I) || VecValuesToIgnore.contains(&I) ||
6456 (isa<BranchInst>(&I) && !cast<BranchInst>(&I)->isConditional());
6457 });
6458 };
6459 for (unsigned I = 0; I != DeadOps.size(); ++I) {
6460 auto *Op = dyn_cast<Instruction>(DeadOps[I]);
6461
6462 // Check if the branch should be considered dead.
6463 if (auto *Br = dyn_cast_or_null<BranchInst>(Op)) {
6464 BasicBlock *ThenBB = Br->getSuccessor(0);
6465 BasicBlock *ElseBB = Br->getSuccessor(1);
6466 // Don't considers branches leaving the loop for simplification.
6467 if (!TheLoop->contains(ThenBB) || !TheLoop->contains(ElseBB))
6468 continue;
6469 bool ThenEmpty = IsEmptyBlock(ThenBB);
6470 bool ElseEmpty = IsEmptyBlock(ElseBB);
6471 if ((ThenEmpty && ElseEmpty) ||
6472 (ThenEmpty && ThenBB->getSingleSuccessor() == ElseBB &&
6473 ElseBB->phis().empty()) ||
6474 (ElseEmpty && ElseBB->getSingleSuccessor() == ThenBB &&
6475 ThenBB->phis().empty())) {
6476 VecValuesToIgnore.insert(Br);
6477 DeadOps.push_back(Br->getCondition());
6478 }
6479 continue;
6480 }
6481
6482 // Skip any op that shouldn't be considered dead.
6483 if (!Op || !TheLoop->contains(Op) ||
6484 (isa<PHINode>(Op) && Op->getParent() == Header) ||
6486 any_of(Op->users(), [this, IsLiveOutDead](User *U) {
6487 return !VecValuesToIgnore.contains(U) &&
6488 !ValuesToIgnore.contains(U) && !IsLiveOutDead(U);
6489 }))
6490 continue;
6491
6492 // If all of Op's users are in ValuesToIgnore, add it to ValuesToIgnore
6493 // which applies for both scalar and vector versions. Otherwise it is only
6494 // dead in vector versions, so only add it to VecValuesToIgnore.
6495 if (all_of(Op->users(),
6496 [this](User *U) { return ValuesToIgnore.contains(U); }))
6497 ValuesToIgnore.insert(Op);
6498
6499 VecValuesToIgnore.insert(Op);
6500 append_range(DeadOps, Op->operands());
6501 }
6502
6503 // Ignore type-promoting instructions we identified during reduction
6504 // detection.
6505 for (const auto &Reduction : Legal->getReductionVars()) {
6506 const RecurrenceDescriptor &RedDes = Reduction.second;
6507 const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts();
6508 VecValuesToIgnore.insert_range(Casts);
6509 }
6510 // Ignore type-casting instructions we identified during induction
6511 // detection.
6512 for (const auto &Induction : Legal->getInductionVars()) {
6513 const InductionDescriptor &IndDes = Induction.second;
6514 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
6515 VecValuesToIgnore.insert_range(Casts);
6516 }
6517}
6518
6520 // Avoid duplicating work finding in-loop reductions.
6521 if (!InLoopReductions.empty())
6522 return;
6523
6524 for (const auto &Reduction : Legal->getReductionVars()) {
6525 PHINode *Phi = Reduction.first;
6526 const RecurrenceDescriptor &RdxDesc = Reduction.second;
6527
6528 // We don't collect reductions that are type promoted (yet).
6529 if (RdxDesc.getRecurrenceType() != Phi->getType())
6530 continue;
6531
6532 // If the target would prefer this reduction to happen "in-loop", then we
6533 // want to record it as such.
6534 RecurKind Kind = RdxDesc.getRecurrenceKind();
6535 if (!PreferInLoopReductions && !useOrderedReductions(RdxDesc) &&
6536 !TTI.preferInLoopReduction(Kind, Phi->getType()))
6537 continue;
6538
6539 // Check that we can correctly put the reductions into the loop, by
6540 // finding the chain of operations that leads from the phi to the loop
6541 // exit value.
6542 SmallVector<Instruction *, 4> ReductionOperations =
6543 RdxDesc.getReductionOpChain(Phi, TheLoop);
6544 bool InLoop = !ReductionOperations.empty();
6545
6546 if (InLoop) {
6547 InLoopReductions.insert(Phi);
6548 // Add the elements to InLoopReductionImmediateChains for cost modelling.
6549 Instruction *LastChain = Phi;
6550 for (auto *I : ReductionOperations) {
6551 InLoopReductionImmediateChains[I] = LastChain;
6552 LastChain = I;
6553 }
6554 }
6555 LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop")
6556 << " reduction for phi: " << *Phi << "\n");
6557 }
6558}
6559
6560// This function will select a scalable VF if the target supports scalable
6561// vectors and a fixed one otherwise.
6562// TODO: we could return a pair of values that specify the max VF and
6563// min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of
6564// `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment
6565// doesn't have a cost model that can choose which plan to execute if
6566// more than one is generated.
6569 unsigned WidestType;
6570 std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes();
6571
6573 TTI.enableScalableVectorization()
6576
6577 TypeSize RegSize = TTI.getRegisterBitWidth(RegKind);
6578 unsigned N = RegSize.getKnownMinValue() / WidestType;
6579 return ElementCount::get(N, RegSize.isScalable());
6580}
6581
6584 ElementCount VF = UserVF;
6585 // Outer loop handling: They may require CFG and instruction level
6586 // transformations before even evaluating whether vectorization is profitable.
6587 // Since we cannot modify the incoming IR, we need to build VPlan upfront in
6588 // the vectorization pipeline.
6589 if (!OrigLoop->isInnermost()) {
6590 // If the user doesn't provide a vectorization factor, determine a
6591 // reasonable one.
6592 if (UserVF.isZero()) {
6593 VF = determineVPlanVF(TTI, CM);
6594 LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n");
6595
6596 // Make sure we have a VF > 1 for stress testing.
6597 if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) {
6598 LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: "
6599 << "overriding computed VF.\n");
6600 VF = ElementCount::getFixed(4);
6601 }
6602 } else if (UserVF.isScalable() && !TTI.supportsScalableVectors() &&
6604 LLVM_DEBUG(dbgs() << "LV: Not vectorizing. Scalable VF requested, but "
6605 << "not supported by the target.\n");
6607 "Scalable vectorization requested but not supported by the target",
6608 "the scalable user-specified vectorization width for outer-loop "
6609 "vectorization cannot be used because the target does not support "
6610 "scalable vectors.",
6611 "ScalableVFUnfeasible", ORE, OrigLoop);
6613 }
6614 assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
6616 "VF needs to be a power of two");
6617 LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "")
6618 << "VF " << VF << " to build VPlans.\n");
6619 buildVPlans(VF, VF);
6620
6621 if (VPlans.empty())
6623
6624 // For VPlan build stress testing, we bail out after VPlan construction.
6627
6628 return {VF, 0 /*Cost*/, 0 /* ScalarCost */};
6629 }
6630
6631 LLVM_DEBUG(
6632 dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the "
6633 "VPlan-native path.\n");
6635}
6636
6637void LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) {
6638 assert(OrigLoop->isInnermost() && "Inner loop expected.");
6639 CM.collectValuesToIgnore();
6640 CM.collectElementTypesForWidening();
6641
6642 FixedScalableVFPair MaxFactors = CM.computeMaxVF(UserVF, UserIC);
6643 if (!MaxFactors) // Cases that should not to be vectorized nor interleaved.
6644 return;
6645
6646 // Invalidate interleave groups if all blocks of loop will be predicated.
6647 if (CM.blockNeedsPredicationForAnyReason(OrigLoop->getHeader()) &&
6649 LLVM_DEBUG(
6650 dbgs()
6651 << "LV: Invalidate all interleaved groups due to fold-tail by masking "
6652 "which requires masked-interleaved support.\n");
6653 if (CM.InterleaveInfo.invalidateGroups())
6654 // Invalidating interleave groups also requires invalidating all decisions
6655 // based on them, which includes widening decisions and uniform and scalar
6656 // values.
6657 CM.invalidateCostModelingDecisions();
6658 }
6659
6660 if (CM.foldTailByMasking())
6661 Legal->prepareToFoldTailByMasking();
6662
6663 ElementCount MaxUserVF =
6664 UserVF.isScalable() ? MaxFactors.ScalableVF : MaxFactors.FixedVF;
6665 if (UserVF) {
6666 if (!ElementCount::isKnownLE(UserVF, MaxUserVF)) {
6668 "UserVF ignored because it may be larger than the maximal safe VF",
6669 "InvalidUserVF", ORE, OrigLoop);
6670 } else {
6672 "VF needs to be a power of two");
6673 // Collect the instructions (and their associated costs) that will be more
6674 // profitable to scalarize.
6675 CM.collectInLoopReductions();
6676 if (CM.selectUserVectorizationFactor(UserVF)) {
6677 LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n");
6678 buildVPlansWithVPRecipes(UserVF, UserVF);
6680 return;
6681 }
6682 reportVectorizationInfo("UserVF ignored because of invalid costs.",
6683 "InvalidCost", ORE, OrigLoop);
6684 }
6685 }
6686
6687 // Collect the Vectorization Factor Candidates.
6688 SmallVector<ElementCount> VFCandidates;
6689 for (auto VF = ElementCount::getFixed(1);
6690 ElementCount::isKnownLE(VF, MaxFactors.FixedVF); VF *= 2)
6691 VFCandidates.push_back(VF);
6692 for (auto VF = ElementCount::getScalable(1);
6693 ElementCount::isKnownLE(VF, MaxFactors.ScalableVF); VF *= 2)
6694 VFCandidates.push_back(VF);
6695
6696 CM.collectInLoopReductions();
6697 for (const auto &VF : VFCandidates) {
6698 // Collect Uniform and Scalar instructions after vectorization with VF.
6699 CM.collectNonVectorizedAndSetWideningDecisions(VF);
6700 }
6701
6702 buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxFactors.FixedVF);
6703 buildVPlansWithVPRecipes(ElementCount::getScalable(1), MaxFactors.ScalableVF);
6704
6706}
6707
6709 ElementCount VF) const {
6710 InstructionCost Cost = CM.getInstructionCost(UI, VF);
6711 if (Cost.isValid() && ForceTargetInstructionCost.getNumOccurrences())
6713 return Cost;
6714}
6715
6717 ElementCount VF) const {
6718 return CM.isUniformAfterVectorization(I, VF);
6719}
6720
6721bool VPCostContext::skipCostComputation(Instruction *UI, bool IsVector) const {
6722 return CM.ValuesToIgnore.contains(UI) ||
6723 (IsVector && CM.VecValuesToIgnore.contains(UI)) ||
6724 SkipCostComputation.contains(UI);
6725}
6726
6728LoopVectorizationPlanner::precomputeCosts(VPlan &Plan, ElementCount VF,
6729 VPCostContext &CostCtx) const {
6731 // Cost modeling for inductions is inaccurate in the legacy cost model
6732 // compared to the recipes that are generated. To match here initially during
6733 // VPlan cost model bring up directly use the induction costs from the legacy
6734 // cost model. Note that we do this as pre-processing; the VPlan may not have
6735 // any recipes associated with the original induction increment instruction
6736 // and may replace truncates with VPWidenIntOrFpInductionRecipe. We precompute
6737 // the cost of induction phis and increments (both that are represented by
6738 // recipes and those that are not), to avoid distinguishing between them here,
6739 // and skip all recipes that represent induction phis and increments (the
6740 // former case) later on, if they exist, to avoid counting them twice.
6741 // Similarly we pre-compute the cost of any optimized truncates.
6742 // TODO: Switch to more accurate costing based on VPlan.
6743 for (const auto &[IV, IndDesc] : Legal->getInductionVars()) {
6745 IV->getIncomingValueForBlock(OrigLoop->getLoopLatch()));
6746 SmallVector<Instruction *> IVInsts = {IVInc};
6747 for (unsigned I = 0; I != IVInsts.size(); I++) {
6748 for (Value *Op : IVInsts[I]->operands()) {
6749 auto *OpI = dyn_cast<Instruction>(Op);
6750 if (Op == IV || !OpI || !OrigLoop->contains(OpI) || !Op->hasOneUse())
6751 continue;
6752 IVInsts.push_back(OpI);
6753 }
6754 }
6755 IVInsts.push_back(IV);
6756 for (User *U : IV->users()) {
6757 auto *CI = cast<Instruction>(U);
6758 if (!CostCtx.CM.isOptimizableIVTruncate(CI, VF))
6759 continue;
6760 IVInsts.push_back(CI);
6761 }
6762
6763 // If the vector loop gets executed exactly once with the given VF, ignore
6764 // the costs of comparison and induction instructions, as they'll get
6765 // simplified away.
6766 // TODO: Remove this code after stepping away from the legacy cost model and
6767 // adding code to simplify VPlans before calculating their costs.
6768 auto TC = getSmallConstantTripCount(PSE.getSE(), OrigLoop);
6769 if (TC == VF && !CM.foldTailByMasking())
6770 addFullyUnrolledInstructionsToIgnore(OrigLoop, Legal->getInductionVars(),
6771 CostCtx.SkipCostComputation);
6772
6773 for (Instruction *IVInst : IVInsts) {
6774 if (CostCtx.skipCostComputation(IVInst, VF.isVector()))
6775 continue;
6776 InstructionCost InductionCost = CostCtx.getLegacyCost(IVInst, VF);
6777 LLVM_DEBUG({
6778 dbgs() << "Cost of " << InductionCost << " for VF " << VF
6779 << ": induction instruction " << *IVInst << "\n";
6780 });
6781 Cost += InductionCost;
6782 CostCtx.SkipCostComputation.insert(IVInst);
6783 }
6784 }
6785
6786 /// Compute the cost of all exiting conditions of the loop using the legacy
6787 /// cost model. This is to match the legacy behavior, which adds the cost of
6788 /// all exit conditions. Note that this over-estimates the cost, as there will
6789 /// be a single condition to control the vector loop.
6791 CM.TheLoop->getExitingBlocks(Exiting);
6792 SetVector<Instruction *> ExitInstrs;
6793 // Collect all exit conditions.
6794 for (BasicBlock *EB : Exiting) {
6795 auto *Term = dyn_cast<BranchInst>(EB->getTerminator());
6796 if (!Term || CostCtx.skipCostComputation(Term, VF.isVector()))
6797 continue;
6798 if (auto *CondI = dyn_cast<Instruction>(Term->getOperand(0))) {
6799 ExitInstrs.insert(CondI);
6800 }
6801 }
6802 // Compute the cost of all instructions only feeding the exit conditions.
6803 for (unsigned I = 0; I != ExitInstrs.size(); ++I) {
6804 Instruction *CondI = ExitInstrs[I];
6805 if (!OrigLoop->contains(CondI) ||
6806 !CostCtx.SkipCostComputation.insert(CondI).second)
6807 continue;
6808 InstructionCost CondICost = CostCtx.getLegacyCost(CondI, VF);
6809 LLVM_DEBUG({
6810 dbgs() << "Cost of " << CondICost << " for VF " << VF
6811 << ": exit condition instruction " << *CondI << "\n";
6812 });
6813 Cost += CondICost;
6814 for (Value *Op : CondI->operands()) {
6815 auto *OpI = dyn_cast<Instruction>(Op);
6816 if (!OpI || CostCtx.skipCostComputation(OpI, VF.isVector()) ||
6817 any_of(OpI->users(), [&ExitInstrs, this](User *U) {
6818 return OrigLoop->contains(cast<Instruction>(U)->getParent()) &&
6819 !ExitInstrs.contains(cast<Instruction>(U));
6820 }))
6821 continue;
6822 ExitInstrs.insert(OpI);
6823 }
6824 }
6825
6826 // Pre-compute the costs for branches except for the backedge, as the number
6827 // of replicate regions in a VPlan may not directly match the number of
6828 // branches, which would lead to different decisions.
6829 // TODO: Compute cost of branches for each replicate region in the VPlan,
6830 // which is more accurate than the legacy cost model.
6831 for (BasicBlock *BB : OrigLoop->blocks()) {
6832 if (CostCtx.skipCostComputation(BB->getTerminator(), VF.isVector()))
6833 continue;
6834 CostCtx.SkipCostComputation.insert(BB->getTerminator());
6835 if (BB == OrigLoop->getLoopLatch())
6836 continue;
6837 auto BranchCost = CostCtx.getLegacyCost(BB->getTerminator(), VF);
6838 Cost += BranchCost;
6839 }
6840
6841 // Pre-compute costs for instructions that are forced-scalar or profitable to
6842 // scalarize. Their costs will be computed separately in the legacy cost
6843 // model.
6844 for (Instruction *ForcedScalar : CM.ForcedScalars[VF]) {
6845 if (CostCtx.skipCostComputation(ForcedScalar, VF.isVector()))
6846 continue;
6847 CostCtx.SkipCostComputation.insert(ForcedScalar);
6848 InstructionCost ForcedCost = CostCtx.getLegacyCost(ForcedScalar, VF);
6849 LLVM_DEBUG({
6850 dbgs() << "Cost of " << ForcedCost << " for VF " << VF
6851 << ": forced scalar " << *ForcedScalar << "\n";
6852 });
6853 Cost += ForcedCost;
6854 }
6855 for (const auto &[Scalarized, ScalarCost] : CM.InstsToScalarize[VF]) {
6856 if (CostCtx.skipCostComputation(Scalarized, VF.isVector()))
6857 continue;
6858 CostCtx.SkipCostComputation.insert(Scalarized);
6859 LLVM_DEBUG({
6860 dbgs() << "Cost of " << ScalarCost << " for VF " << VF
6861 << ": profitable to scalarize " << *Scalarized << "\n";
6862 });
6863 Cost += ScalarCost;
6864 }
6865
6866 return Cost;
6867}
6868
6869InstructionCost LoopVectorizationPlanner::cost(VPlan &Plan,
6870 ElementCount VF) const {
6871 VPCostContext CostCtx(CM.TTI, *CM.TLI, Plan, CM, CM.CostKind);
6872 InstructionCost Cost = precomputeCosts(Plan, VF, CostCtx);
6873
6874 // Now compute and add the VPlan-based cost.
6875 Cost += Plan.cost(VF, CostCtx);
6876#ifndef NDEBUG
6877 unsigned EstimatedWidth = estimateElementCount(VF, CM.getVScaleForTuning());
6878 LLVM_DEBUG(dbgs() << "Cost for VF " << VF << ": " << Cost
6879 << " (Estimated cost per lane: ");
6880 if (Cost.isValid()) {
6881 double CostPerLane = double(Cost.getValue()) / EstimatedWidth;
6882 LLVM_DEBUG(dbgs() << format("%.1f", CostPerLane));
6883 } else /* No point dividing an invalid cost - it will still be invalid */
6884 LLVM_DEBUG(dbgs() << "Invalid");
6885 LLVM_DEBUG(dbgs() << ")\n");
6886#endif
6887 return Cost;
6888}
6889
6890#ifndef NDEBUG
6891/// Return true if the original loop \ TheLoop contains any instructions that do
6892/// not have corresponding recipes in \p Plan and are not marked to be ignored
6893/// in \p CostCtx. This means the VPlan contains simplification that the legacy
6894/// cost-model did not account for.
6896 VPCostContext &CostCtx,
6897 Loop *TheLoop,
6898 ElementCount VF) {
6899 // First collect all instructions for the recipes in Plan.
6900 auto GetInstructionForCost = [](const VPRecipeBase *R) -> Instruction * {
6901 if (auto *S = dyn_cast<VPSingleDefRecipe>(R))
6902 return dyn_cast_or_null<Instruction>(S->getUnderlyingValue());
6903 if (auto *WidenMem = dyn_cast<VPWidenMemoryRecipe>(R))
6904 return &WidenMem->getIngredient();
6905 return nullptr;
6906 };
6907
6908 // Check if a select for a safe divisor was hoisted to the pre-header. If so,
6909 // the select doesn't need to be considered for the vector loop cost; go with
6910 // the more accurate VPlan-based cost model.
6911 for (VPRecipeBase &R : *Plan.getVectorPreheader()) {
6912 auto *VPI = dyn_cast<VPInstruction>(&R);
6913 if (!VPI || VPI->getOpcode() != Instruction::Select ||
6914 VPI->getNumUsers() != 1)
6915 continue;
6916
6917 if (auto *WR = dyn_cast<VPWidenRecipe>(*VPI->user_begin())) {
6918 switch (WR->getOpcode()) {
6919 case Instruction::UDiv:
6920 case Instruction::SDiv:
6921 case Instruction::URem:
6922 case Instruction::SRem:
6923 return true;
6924 default:
6925 break;
6926 }
6927 }
6928 }
6929
6930 DenseSet<Instruction *> SeenInstrs;
6931 auto Iter = vp_depth_first_deep(Plan.getVectorLoopRegion()->getEntry());
6933 for (VPRecipeBase &R : *VPBB) {
6934 if (auto *IR = dyn_cast<VPInterleaveRecipe>(&R)) {
6935 auto *IG = IR->getInterleaveGroup();
6936 unsigned NumMembers = IG->getNumMembers();
6937 for (unsigned I = 0; I != NumMembers; ++I) {
6938 if (Instruction *M = IG->getMember(I))
6939 SeenInstrs.insert(M);
6940 }
6941 continue;
6942 }
6943 // Unused FOR splices are removed by VPlan transforms, so the VPlan-based
6944 // cost model won't cost it whilst the legacy will.
6945 if (auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R)) {
6946 using namespace VPlanPatternMatch;
6947 if (none_of(FOR->users(),
6948 match_fn(m_VPInstruction<
6950 return true;
6951 }
6952 // The VPlan-based cost model is more accurate for partial reduction and
6953 // comparing against the legacy cost isn't desirable.
6955 return true;
6956
6957 // The VPlan-based cost model can analyze if recipes are scalar
6958 // recursively, but the legacy cost model cannot.
6959 if (auto *WidenMemR = dyn_cast<VPWidenMemoryRecipe>(&R)) {
6960 auto *AddrI = dyn_cast<Instruction>(
6961 getLoadStorePointerOperand(&WidenMemR->getIngredient()));
6962 if (AddrI && vputils::isSingleScalar(WidenMemR->getAddr()) !=
6963 CostCtx.isLegacyUniformAfterVectorization(AddrI, VF))
6964 return true;
6965 }
6966
6967 /// If a VPlan transform folded a recipe to one producing a single-scalar,
6968 /// but the original instruction wasn't uniform-after-vectorization in the
6969 /// legacy cost model, the legacy cost overestimates the actual cost.
6970 if (auto *RepR = dyn_cast<VPReplicateRecipe>(&R)) {
6971 if (RepR->isSingleScalar() &&
6973 RepR->getUnderlyingInstr(), VF))
6974 return true;
6975 }
6976 if (Instruction *UI = GetInstructionForCost(&R)) {
6977 // If we adjusted the predicate of the recipe, the cost in the legacy
6978 // cost model may be different.
6979 using namespace VPlanPatternMatch;
6980 CmpPredicate Pred;
6981 if (match(&R, m_Cmp(Pred, m_VPValue(), m_VPValue())) &&
6982 cast<VPRecipeWithIRFlags>(R).getPredicate() !=
6983 cast<CmpInst>(UI)->getPredicate())
6984 return true;
6985 SeenInstrs.insert(UI);
6986 }
6987 }
6988 }
6989
6990 // Return true if the loop contains any instructions that are not also part of
6991 // the VPlan or are skipped for VPlan-based cost computations. This indicates
6992 // that the VPlan contains extra simplifications.
6993 return any_of(TheLoop->blocks(), [&SeenInstrs, &CostCtx,
6994 TheLoop](BasicBlock *BB) {
6995 return any_of(*BB, [&SeenInstrs, &CostCtx, TheLoop, BB](Instruction &I) {
6996 // Skip induction phis when checking for simplifications, as they may not
6997 // be lowered directly be lowered to a corresponding PHI recipe.
6998 if (isa<PHINode>(&I) && BB == TheLoop->getHeader() &&
6999 CostCtx.CM.Legal->isInductionPhi(cast<PHINode>(&I)))
7000 return false;
7001 return !SeenInstrs.contains(&I) && !CostCtx.skipCostComputation(&I, true);
7002 });
7003 });
7004}
7005#endif
7006
7008 if (VPlans.empty())
7010 // If there is a single VPlan with a single VF, return it directly.
7011 VPlan &FirstPlan = *VPlans[0];
7012 if (VPlans.size() == 1 && size(FirstPlan.vectorFactors()) == 1)
7013 return {*FirstPlan.vectorFactors().begin(), 0, 0};
7014
7015 LLVM_DEBUG(dbgs() << "LV: Computing best VF using cost kind: "
7016 << (CM.CostKind == TTI::TCK_RecipThroughput
7017 ? "Reciprocal Throughput\n"
7018 : CM.CostKind == TTI::TCK_Latency
7019 ? "Instruction Latency\n"
7020 : CM.CostKind == TTI::TCK_CodeSize ? "Code Size\n"
7021 : CM.CostKind == TTI::TCK_SizeAndLatency
7022 ? "Code Size and Latency\n"
7023 : "Unknown\n"));
7024
7026 assert(hasPlanWithVF(ScalarVF) &&
7027 "More than a single plan/VF w/o any plan having scalar VF");
7028
7029 // TODO: Compute scalar cost using VPlan-based cost model.
7030 InstructionCost ScalarCost = CM.expectedCost(ScalarVF);
7031 LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ScalarCost << ".\n");
7032 VectorizationFactor ScalarFactor(ScalarVF, ScalarCost, ScalarCost);
7033 VectorizationFactor BestFactor = ScalarFactor;
7034
7035 bool ForceVectorization = Hints.getForce() == LoopVectorizeHints::FK_Enabled;
7036 if (ForceVectorization) {
7037 // Ignore scalar width, because the user explicitly wants vectorization.
7038 // Initialize cost to max so that VF = 2 is, at least, chosen during cost
7039 // evaluation.
7040 BestFactor.Cost = InstructionCost::getMax();
7041 }
7042
7043 for (auto &P : VPlans) {
7044 ArrayRef<ElementCount> VFs(P->vectorFactors().begin(),
7045 P->vectorFactors().end());
7046
7048 if (any_of(VFs, [this](ElementCount VF) {
7049 return CM.shouldConsiderRegPressureForVF(VF);
7050 }))
7051 RUs = calculateRegisterUsageForPlan(*P, VFs, TTI, CM.ValuesToIgnore);
7052
7053 for (unsigned I = 0; I < VFs.size(); I++) {
7054 ElementCount VF = VFs[I];
7055 if (VF.isScalar())
7056 continue;
7057 if (!ForceVectorization && !willGenerateVectors(*P, VF, TTI)) {
7058 LLVM_DEBUG(
7059 dbgs()
7060 << "LV: Not considering vector loop of width " << VF
7061 << " because it will not generate any vector instructions.\n");
7062 continue;
7063 }
7064 if (CM.OptForSize && !ForceVectorization && hasReplicatorRegion(*P)) {
7065 LLVM_DEBUG(
7066 dbgs()
7067 << "LV: Not considering vector loop of width " << VF
7068 << " because it would cause replicated blocks to be generated,"
7069 << " which isn't allowed when optimizing for size.\n");
7070 continue;
7071 }
7072
7073 InstructionCost Cost = cost(*P, VF);
7074 VectorizationFactor CurrentFactor(VF, Cost, ScalarCost);
7075
7076 if (CM.shouldConsiderRegPressureForVF(VF) &&
7077 RUs[I].exceedsMaxNumRegs(TTI, ForceTargetNumVectorRegs)) {
7078 LLVM_DEBUG(dbgs() << "LV(REG): Not considering vector loop of width "
7079 << VF << " because it uses too many registers\n");
7080 continue;
7081 }
7082
7083 if (isMoreProfitable(CurrentFactor, BestFactor, P->hasScalarTail()))
7084 BestFactor = CurrentFactor;
7085
7086 // If profitable add it to ProfitableVF list.
7087 if (isMoreProfitable(CurrentFactor, ScalarFactor, P->hasScalarTail()))
7088 ProfitableVFs.push_back(CurrentFactor);
7089 }
7090 }
7091
7092#ifndef NDEBUG
7093 // Select the optimal vectorization factor according to the legacy cost-model.
7094 // This is now only used to verify the decisions by the new VPlan-based
7095 // cost-model and will be retired once the VPlan-based cost-model is
7096 // stabilized.
7097 VectorizationFactor LegacyVF = selectVectorizationFactor();
7098 VPlan &BestPlan = getPlanFor(BestFactor.Width);
7099
7100 // Pre-compute the cost and use it to check if BestPlan contains any
7101 // simplifications not accounted for in the legacy cost model. If that's the
7102 // case, don't trigger the assertion, as the extra simplifications may cause a
7103 // different VF to be picked by the VPlan-based cost model.
7104 VPCostContext CostCtx(CM.TTI, *CM.TLI, BestPlan, CM, CM.CostKind);
7105 precomputeCosts(BestPlan, BestFactor.Width, CostCtx);
7106 // Verify that the VPlan-based and legacy cost models agree, except for VPlans
7107 // with early exits and plans with additional VPlan simplifications. The
7108 // legacy cost model doesn't properly model costs for such loops.
7109 assert((BestFactor.Width == LegacyVF.Width || BestPlan.hasEarlyExit() ||
7111 CostCtx, OrigLoop,
7112 BestFactor.Width) ||
7114 getPlanFor(LegacyVF.Width), CostCtx, OrigLoop, LegacyVF.Width)) &&
7115 " VPlan cost model and legacy cost model disagreed");
7116 assert((BestFactor.Width.isScalar() || BestFactor.ScalarCost > 0) &&
7117 "when vectorizing, the scalar cost must be computed.");
7118#endif
7119
7120 LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << BestFactor.Width << ".\n");
7121 return BestFactor;
7122}
7123
7125 using namespace VPlanPatternMatch;
7127 "RdxResult must be ComputeFindIVResult");
7128 VPValue *StartVPV = RdxResult->getOperand(1);
7129 match(StartVPV, m_Freeze(m_VPValue(StartVPV)));
7130 return StartVPV->getLiveInIRValue();
7131}
7132
7133// If \p EpiResumePhiR is resume VPPhi for a reduction when vectorizing the
7134// epilog loop, fix the reduction's scalar PHI node by adding the incoming value
7135// from the main vector loop.
7137 VPPhi *EpiResumePhiR, PHINode &EpiResumePhi, BasicBlock *BypassBlock) {
7138 // Get the VPInstruction computing the reduction result in the middle block.
7139 // The first operand may not be from the middle block if it is not connected
7140 // to the scalar preheader. In that case, there's nothing to fix.
7141 VPValue *Incoming = EpiResumePhiR->getOperand(0);
7144 auto *EpiRedResult = dyn_cast<VPInstruction>(Incoming);
7145 if (!EpiRedResult ||
7146 (EpiRedResult->getOpcode() != VPInstruction::ComputeAnyOfResult &&
7147 EpiRedResult->getOpcode() != VPInstruction::ComputeReductionResult &&
7148 EpiRedResult->getOpcode() != VPInstruction::ComputeFindIVResult))
7149 return;
7150
7151 auto *EpiRedHeaderPhi =
7152 cast<VPReductionPHIRecipe>(EpiRedResult->getOperand(0));
7153 RecurKind Kind = EpiRedHeaderPhi->getRecurrenceKind();
7154 Value *MainResumeValue;
7155 if (auto *VPI = dyn_cast<VPInstruction>(EpiRedHeaderPhi->getStartValue())) {
7156 assert((VPI->getOpcode() == VPInstruction::Broadcast ||
7157 VPI->getOpcode() == VPInstruction::ReductionStartVector) &&
7158 "unexpected start recipe");
7159 MainResumeValue = VPI->getOperand(0)->getUnderlyingValue();
7160 } else
7161 MainResumeValue = EpiRedHeaderPhi->getStartValue()->getUnderlyingValue();
7163 [[maybe_unused]] Value *StartV =
7164 EpiRedResult->getOperand(1)->getLiveInIRValue();
7165 auto *Cmp = cast<ICmpInst>(MainResumeValue);
7166 assert(Cmp->getPredicate() == CmpInst::ICMP_NE &&
7167 "AnyOf expected to start with ICMP_NE");
7168 assert(Cmp->getOperand(1) == StartV &&
7169 "AnyOf expected to start by comparing main resume value to original "
7170 "start value");
7171 MainResumeValue = Cmp->getOperand(0);
7173 Value *StartV = getStartValueFromReductionResult(EpiRedResult);
7174 Value *SentinelV = EpiRedResult->getOperand(2)->getLiveInIRValue();
7175 using namespace llvm::PatternMatch;
7176 Value *Cmp, *OrigResumeV, *CmpOp;
7177 [[maybe_unused]] bool IsExpectedPattern =
7178 match(MainResumeValue,
7179 m_Select(m_OneUse(m_Value(Cmp)), m_Specific(SentinelV),
7180 m_Value(OrigResumeV))) &&
7182 m_Value(CmpOp))) &&
7183 ((CmpOp == StartV && isGuaranteedNotToBeUndefOrPoison(CmpOp))));
7184 assert(IsExpectedPattern && "Unexpected reduction resume pattern");
7185 MainResumeValue = OrigResumeV;
7186 }
7187 PHINode *MainResumePhi = cast<PHINode>(MainResumeValue);
7188
7189 // When fixing reductions in the epilogue loop we should already have
7190 // created a bc.merge.rdx Phi after the main vector body. Ensure that we carry
7191 // over the incoming values correctly.
7192 EpiResumePhi.setIncomingValueForBlock(
7193 BypassBlock, MainResumePhi->getIncomingValueForBlock(BypassBlock));
7194}
7195
7197 ElementCount BestVF, unsigned BestUF, VPlan &BestVPlan,
7198 InnerLoopVectorizer &ILV, DominatorTree *DT, bool VectorizingEpilogue) {
7199 assert(BestVPlan.hasVF(BestVF) &&
7200 "Trying to execute plan with unsupported VF");
7201 assert(BestVPlan.hasUF(BestUF) &&
7202 "Trying to execute plan with unsupported UF");
7203 if (BestVPlan.hasEarlyExit())
7204 ++LoopsEarlyExitVectorized;
7205 // TODO: Move to VPlan transform stage once the transition to the VPlan-based
7206 // cost model is complete for better cost estimates.
7211 bool HasBranchWeights =
7212 hasBranchWeightMD(*OrigLoop->getLoopLatch()->getTerminator());
7213 if (HasBranchWeights) {
7214 std::optional<unsigned> VScale = CM.getVScaleForTuning();
7216 BestVPlan, BestVF, VScale);
7217 }
7218
7219 // Checks are the same for all VPlans, added to BestVPlan only for
7220 // compactness.
7221 attachRuntimeChecks(BestVPlan, ILV.RTChecks, HasBranchWeights);
7222
7223 // Retrieving VectorPH now when it's easier while VPlan still has Regions.
7224 VPBasicBlock *VectorPH = cast<VPBasicBlock>(BestVPlan.getVectorPreheader());
7225
7226 VPlanTransforms::optimizeForVFAndUF(BestVPlan, BestVF, BestUF, PSE);
7229 if (BestVPlan.getEntry()->getSingleSuccessor() ==
7230 BestVPlan.getScalarPreheader()) {
7231 // TODO: The vector loop would be dead, should not even try to vectorize.
7232 ORE->emit([&]() {
7233 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationDead",
7234 OrigLoop->getStartLoc(),
7235 OrigLoop->getHeader())
7236 << "Created vector loop never executes due to insufficient trip "
7237 "count.";
7238 });
7240 }
7241
7243 BestVPlan, BestVF,
7244 TTI.getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector));
7245 VPlanTransforms::cse(BestVPlan);
7247
7249 // Regions are dissolved after optimizing for VF and UF, which completely
7250 // removes unneeded loop regions first.
7252 // Canonicalize EVL loops after regions are dissolved.
7256 BestVPlan, VectorPH, CM.foldTailByMasking(),
7257 CM.requiresScalarEpilogue(BestVF.isVector()));
7258 VPlanTransforms::materializeVFAndVFxUF(BestVPlan, VectorPH, BestVF);
7260
7261 // 0. Generate SCEV-dependent code in the entry, including TripCount, before
7262 // making any changes to the CFG.
7263 DenseMap<const SCEV *, Value *> ExpandedSCEVs =
7264 VPlanTransforms::expandSCEVs(BestVPlan, *PSE.getSE());
7265 if (!ILV.getTripCount())
7266 ILV.setTripCount(BestVPlan.getTripCount()->getLiveInIRValue());
7267 else
7268 assert(VectorizingEpilogue && "should only re-use the existing trip "
7269 "count during epilogue vectorization");
7270
7271 // Perform the actual loop transformation.
7272 VPTransformState State(&TTI, BestVF, LI, DT, ILV.AC, ILV.Builder, &BestVPlan,
7273 OrigLoop->getParentLoop(),
7274 Legal->getWidestInductionType());
7275
7276#ifdef EXPENSIVE_CHECKS
7277 assert(DT->verify(DominatorTree::VerificationLevel::Fast));
7278#endif
7279
7280 // 1. Set up the skeleton for vectorization, including vector pre-header and
7281 // middle block. The vector loop is created during VPlan execution.
7282 State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton();
7284 State.CFG.PrevBB->getSingleSuccessor(), &BestVPlan);
7286
7287 assert(verifyVPlanIsValid(BestVPlan, true /*VerifyLate*/) &&
7288 "final VPlan is invalid");
7289
7290 // After vectorization, the exit blocks of the original loop will have
7291 // additional predecessors. Invalidate SCEVs for the exit phis in case SE
7292 // looked through single-entry phis.
7293 ScalarEvolution &SE = *PSE.getSE();
7294 for (VPIRBasicBlock *Exit : BestVPlan.getExitBlocks()) {
7295 if (!Exit->hasPredecessors())
7296 continue;
7297 for (VPRecipeBase &PhiR : Exit->phis())
7299 OrigLoop, cast<PHINode>(&cast<VPIRPhi>(PhiR).getInstruction()));
7300 }
7301 // Forget the original loop and block dispositions.
7302 SE.forgetLoop(OrigLoop);
7304
7306
7307 //===------------------------------------------------===//
7308 //
7309 // Notice: any optimization or new instruction that go
7310 // into the code below should also be implemented in
7311 // the cost-model.
7312 //
7313 //===------------------------------------------------===//
7314
7315 // Retrieve loop information before executing the plan, which may remove the
7316 // original loop, if it becomes unreachable.
7317 MDNode *LID = OrigLoop->getLoopID();
7318 unsigned OrigLoopInvocationWeight = 0;
7319 std::optional<unsigned> OrigAverageTripCount =
7320 getLoopEstimatedTripCount(OrigLoop, &OrigLoopInvocationWeight);
7321
7322 BestVPlan.execute(&State);
7323
7324 // 2.6. Maintain Loop Hints
7325 // Keep all loop hints from the original loop on the vector loop (we'll
7326 // replace the vectorizer-specific hints below).
7327 VPBasicBlock *HeaderVPBB = vputils::getFirstLoopHeader(BestVPlan, State.VPDT);
7328 // Add metadata to disable runtime unrolling a scalar loop when there
7329 // are no runtime checks about strides and memory. A scalar loop that is
7330 // rarely used is not worth unrolling.
7331 bool DisableRuntimeUnroll = !ILV.RTChecks.hasChecks() && !BestVF.isScalar();
7333 HeaderVPBB ? LI->getLoopFor(State.CFG.VPBB2IRBB.lookup(HeaderVPBB))
7334 : nullptr,
7335 HeaderVPBB, BestVPlan, VectorizingEpilogue, LID, OrigAverageTripCount,
7336 OrigLoopInvocationWeight,
7337 estimateElementCount(BestVF * BestUF, CM.getVScaleForTuning()),
7338 DisableRuntimeUnroll);
7339
7340 // 3. Fix the vectorized code: take care of header phi's, live-outs,
7341 // predication, updating analyses.
7342 ILV.fixVectorizedLoop(State);
7343
7345
7346 return ExpandedSCEVs;
7347}
7348
7349//===--------------------------------------------------------------------===//
7350// EpilogueVectorizerMainLoop
7351//===--------------------------------------------------------------------===//
7352
7353/// This function is partially responsible for generating the control flow
7354/// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization.
7356 BasicBlock *ScalarPH = createScalarPreheader("");
7357 BasicBlock *VectorPH = ScalarPH->getSinglePredecessor();
7358
7359 // Generate the code to check the minimum iteration count of the vector
7360 // epilogue (see below).
7361 EPI.EpilogueIterationCountCheck =
7362 emitIterationCountCheck(VectorPH, ScalarPH, true);
7363 EPI.EpilogueIterationCountCheck->setName("iter.check");
7364
7365 VectorPH = cast<BranchInst>(EPI.EpilogueIterationCountCheck->getTerminator())
7366 ->getSuccessor(1);
7367 // Generate the iteration count check for the main loop, *after* the check
7368 // for the epilogue loop, so that the path-length is shorter for the case
7369 // that goes directly through the vector epilogue. The longer-path length for
7370 // the main loop is compensated for, by the gain from vectorizing the larger
7371 // trip count. Note: the branch will get updated later on when we vectorize
7372 // the epilogue.
7373 EPI.MainLoopIterationCountCheck =
7374 emitIterationCountCheck(VectorPH, ScalarPH, false);
7375
7376 return cast<BranchInst>(EPI.MainLoopIterationCountCheck->getTerminator())
7377 ->getSuccessor(1);
7378}
7379
7381 LLVM_DEBUG({
7382 dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n"
7383 << "Main Loop VF:" << EPI.MainLoopVF
7384 << ", Main Loop UF:" << EPI.MainLoopUF
7385 << ", Epilogue Loop VF:" << EPI.EpilogueVF
7386 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
7387 });
7388}
7389
7392 dbgs() << "intermediate fn:\n"
7393 << *OrigLoop->getHeader()->getParent() << "\n";
7394 });
7395}
7396
7398 BasicBlock *VectorPH, BasicBlock *Bypass, bool ForEpilogue) {
7399 assert(Bypass && "Expected valid bypass basic block.");
7402 Value *CheckMinIters = createIterationCountCheck(
7403 VectorPH, ForEpilogue ? EPI.EpilogueVF : EPI.MainLoopVF,
7404 ForEpilogue ? EPI.EpilogueUF : EPI.MainLoopUF);
7405
7406 BasicBlock *const TCCheckBlock = VectorPH;
7407 if (!ForEpilogue)
7408 TCCheckBlock->setName("vector.main.loop.iter.check");
7409
7410 // Create new preheader for vector loop.
7411 VectorPH = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(),
7412 static_cast<DominatorTree *>(nullptr), LI, nullptr,
7413 "vector.ph");
7414 if (ForEpilogue) {
7415 // Save the trip count so we don't have to regenerate it in the
7416 // vec.epilog.iter.check. This is safe to do because the trip count
7417 // generated here dominates the vector epilog iter check.
7418 EPI.TripCount = Count;
7419 } else {
7421 }
7422
7423 BranchInst &BI = *BranchInst::Create(Bypass, VectorPH, CheckMinIters);
7424 if (hasBranchWeightMD(*OrigLoop->getLoopLatch()->getTerminator()))
7425 setBranchWeights(BI, MinItersBypassWeights, /*IsExpected=*/false);
7426 ReplaceInstWithInst(TCCheckBlock->getTerminator(), &BI);
7427
7428 // When vectorizing the main loop, its trip-count check is placed in a new
7429 // block, whereas the overall trip-count check is placed in the VPlan entry
7430 // block. When vectorizing the epilogue loop, its trip-count check is placed
7431 // in the VPlan entry block.
7432 if (!ForEpilogue)
7433 introduceCheckBlockInVPlan(TCCheckBlock);
7434 return TCCheckBlock;
7435}
7436
7437//===--------------------------------------------------------------------===//
7438// EpilogueVectorizerEpilogueLoop
7439//===--------------------------------------------------------------------===//
7440
7441/// This function is partially responsible for generating the control flow
7442/// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization.
7444 BasicBlock *ScalarPH = createScalarPreheader("vec.epilog.");
7445 BasicBlock *VectorPH = ScalarPH->getSinglePredecessor();
7446 // Now, compare the remaining count and if there aren't enough iterations to
7447 // execute the vectorized epilogue skip to the scalar part.
7448 VectorPH->setName("vec.epilog.ph");
7449 BasicBlock *VecEpilogueIterationCountCheck =
7450 SplitBlock(VectorPH, VectorPH->begin(), DT, LI, nullptr,
7451 "vec.epilog.iter.check", true);
7453
7454 emitMinimumVectorEpilogueIterCountCheck(VectorPH, ScalarPH,
7455 VecEpilogueIterationCountCheck);
7456 AdditionalBypassBlock = VecEpilogueIterationCountCheck;
7457
7458 // Adjust the control flow taking the state info from the main loop
7459 // vectorization into account.
7460 assert(EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck &&
7461 "expected this to be saved from the previous pass.");
7462 EPI.MainLoopIterationCountCheck->getTerminator()->replaceUsesOfWith(
7463 VecEpilogueIterationCountCheck, VectorPH);
7464
7465 EPI.EpilogueIterationCountCheck->getTerminator()->replaceUsesOfWith(
7466 VecEpilogueIterationCountCheck, ScalarPH);
7467
7468 // Adjust the terminators of runtime check blocks and phis using them.
7469 BasicBlock *SCEVCheckBlock = RTChecks.getSCEVChecks().second;
7470 BasicBlock *MemCheckBlock = RTChecks.getMemRuntimeChecks().second;
7471 if (SCEVCheckBlock)
7472 SCEVCheckBlock->getTerminator()->replaceUsesOfWith(
7473 VecEpilogueIterationCountCheck, ScalarPH);
7474 if (MemCheckBlock)
7475 MemCheckBlock->getTerminator()->replaceUsesOfWith(
7476 VecEpilogueIterationCountCheck, ScalarPH);
7477
7478 DT->changeImmediateDominator(ScalarPH, EPI.EpilogueIterationCountCheck);
7479
7480 // The vec.epilog.iter.check block may contain Phi nodes from inductions or
7481 // reductions which merge control-flow from the latch block and the middle
7482 // block. Update the incoming values here and move the Phi into the preheader.
7483 SmallVector<PHINode *, 4> PhisInBlock(
7484 llvm::make_pointer_range(VecEpilogueIterationCountCheck->phis()));
7485
7486 for (PHINode *Phi : PhisInBlock) {
7487 Phi->moveBefore(VectorPH->getFirstNonPHIIt());
7488 Phi->replaceIncomingBlockWith(
7489 VecEpilogueIterationCountCheck->getSinglePredecessor(),
7490 VecEpilogueIterationCountCheck);
7491
7492 // If the phi doesn't have an incoming value from the
7493 // EpilogueIterationCountCheck, we are done. Otherwise remove the incoming
7494 // value and also those from other check blocks. This is needed for
7495 // reduction phis only.
7496 if (none_of(Phi->blocks(), [&](BasicBlock *IncB) {
7497 return EPI.EpilogueIterationCountCheck == IncB;
7498 }))
7499 continue;
7500 Phi->removeIncomingValue(EPI.EpilogueIterationCountCheck);
7501 if (SCEVCheckBlock)
7502 Phi->removeIncomingValue(SCEVCheckBlock);
7503 if (MemCheckBlock)
7504 Phi->removeIncomingValue(MemCheckBlock);
7505 }
7506
7507 return VectorPH;
7508}
7509
7510BasicBlock *
7512 BasicBlock *VectorPH, BasicBlock *Bypass, BasicBlock *Insert) {
7513
7514 assert(EPI.TripCount &&
7515 "Expected trip count to have been saved in the first pass.");
7516 Value *TC = EPI.TripCount;
7517 IRBuilder<> Builder(Insert->getTerminator());
7518 Value *Count = Builder.CreateSub(TC, EPI.VectorTripCount, "n.vec.remaining");
7519
7520 // Generate code to check if the loop's trip count is less than VF * UF of the
7521 // vector epilogue loop.
7522 auto P = Cost->requiresScalarEpilogue(EPI.EpilogueVF.isVector())
7525
7526 Value *CheckMinIters =
7527 Builder.CreateICmp(P, Count,
7528 createStepForVF(Builder, Count->getType(),
7529 EPI.EpilogueVF, EPI.EpilogueUF),
7530 "min.epilog.iters.check");
7531
7532 BranchInst &BI = *BranchInst::Create(Bypass, VectorPH, CheckMinIters);
7533 auto VScale = Cost->getVScaleForTuning();
7534 unsigned MainLoopStep =
7535 estimateElementCount(EPI.MainLoopVF * EPI.MainLoopUF, VScale);
7536 unsigned EpilogueLoopStep =
7537 estimateElementCount(EPI.EpilogueVF * EPI.EpilogueUF, VScale);
7538 // We assume the remaining `Count` is equally distributed in
7539 // [0, MainLoopStep)
7540 // So the probability for `Count < EpilogueLoopStep` should be
7541 // min(MainLoopStep, EpilogueLoopStep) / MainLoopStep
7542 // TODO: Improve the estimate by taking the estimated trip count into
7543 // consideration.
7544 unsigned EstimatedSkipCount = std::min(MainLoopStep, EpilogueLoopStep);
7545 const uint32_t Weights[] = {EstimatedSkipCount,
7546 MainLoopStep - EstimatedSkipCount};
7547 setBranchWeights(BI, Weights, /*IsExpected=*/false);
7548 ReplaceInstWithInst(Insert->getTerminator(), &BI);
7549
7550 // A new entry block has been created for the epilogue VPlan. Hook it in, as
7551 // otherwise we would try to modify the entry to the main vector loop.
7552 VPIRBasicBlock *NewEntry = Plan.createVPIRBasicBlock(Insert);
7553 VPBasicBlock *OldEntry = Plan.getEntry();
7554 VPBlockUtils::reassociateBlocks(OldEntry, NewEntry);
7555 Plan.setEntry(NewEntry);
7556 // OldEntry is now dead and will be cleaned up when the plan gets destroyed.
7557
7558 return Insert;
7559}
7560
7562 LLVM_DEBUG({
7563 dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n"
7564 << "Epilogue Loop VF:" << EPI.EpilogueVF
7565 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
7566 });
7567}
7568
7571 dbgs() << "final fn:\n" << *OrigLoop->getHeader()->getParent() << "\n";
7572 });
7573}
7574
7576VPRecipeBuilder::tryToWidenMemory(Instruction *I, ArrayRef<VPValue *> Operands,
7577 VFRange &Range) {
7579 "Must be called with either a load or store");
7580
7581 auto WillWiden = [&](ElementCount VF) -> bool {
7583 CM.getWideningDecision(I, VF);
7585 "CM decision should be taken at this point.");
7587 return true;
7588 if (CM.isScalarAfterVectorization(I, VF) ||
7589 CM.isProfitableToScalarize(I, VF))
7590 return false;
7592 };
7593
7595 return nullptr;
7596
7597 VPValue *Mask = nullptr;
7598 if (Legal->isMaskRequired(I))
7599 Mask = getBlockInMask(Builder.getInsertBlock());
7600
7601 // Determine if the pointer operand of the access is either consecutive or
7602 // reverse consecutive.
7604 CM.getWideningDecision(I, Range.Start);
7606 bool Consecutive =
7608
7610 if (Consecutive) {
7612 Ptr->getUnderlyingValue()->stripPointerCasts());
7613 VPSingleDefRecipe *VectorPtr;
7614 if (Reverse) {
7615 // When folding the tail, we may compute an address that we don't in the
7616 // original scalar loop and it may not be inbounds. Drop Inbounds in that
7617 // case.
7618 GEPNoWrapFlags Flags =
7619 (CM.foldTailByMasking() || !GEP || !GEP->isInBounds())
7621 : GEPNoWrapFlags::inBounds();
7622 VectorPtr =
7624 /*Stride*/ -1, Flags, I->getDebugLoc());
7625 } else {
7626 VectorPtr = new VPVectorPointerRecipe(Ptr, getLoadStoreType(I),
7627 GEP ? GEP->getNoWrapFlags()
7629 I->getDebugLoc());
7630 }
7631 Builder.insert(VectorPtr);
7632 Ptr = VectorPtr;
7633 }
7634 if (LoadInst *Load = dyn_cast<LoadInst>(I))
7635 return new VPWidenLoadRecipe(*Load, Ptr, Mask, Consecutive, Reverse,
7636 VPIRMetadata(*Load, LVer), I->getDebugLoc());
7637
7638 StoreInst *Store = cast<StoreInst>(I);
7639 return new VPWidenStoreRecipe(*Store, Ptr, Operands[0], Mask, Consecutive,
7640 Reverse, VPIRMetadata(*Store, LVer),
7641 I->getDebugLoc());
7642}
7643
7644/// Creates a VPWidenIntOrFpInductionRecpipe for \p Phi. If needed, it will also
7645/// insert a recipe to expand the step for the induction recipe.
7648 VPValue *Start, const InductionDescriptor &IndDesc,
7649 VPlan &Plan, ScalarEvolution &SE, Loop &OrigLoop) {
7650 assert(IndDesc.getStartValue() ==
7651 Phi->getIncomingValueForBlock(OrigLoop.getLoopPreheader()));
7652 assert(SE.isLoopInvariant(IndDesc.getStep(), &OrigLoop) &&
7653 "step must be loop invariant");
7654
7655 VPValue *Step =
7657 if (auto *TruncI = dyn_cast<TruncInst>(PhiOrTrunc)) {
7658 return new VPWidenIntOrFpInductionRecipe(Phi, Start, Step, &Plan.getVF(),
7659 IndDesc, TruncI,
7660 TruncI->getDebugLoc());
7661 }
7662 assert(isa<PHINode>(PhiOrTrunc) && "must be a phi node here");
7663 return new VPWidenIntOrFpInductionRecipe(Phi, Start, Step, &Plan.getVF(),
7664 IndDesc, Phi->getDebugLoc());
7665}
7666
7667VPHeaderPHIRecipe *VPRecipeBuilder::tryToOptimizeInductionPHI(
7669
7670 // Check if this is an integer or fp induction. If so, build the recipe that
7671 // produces its scalar and vector values.
7672 if (auto *II = Legal->getIntOrFpInductionDescriptor(Phi))
7673 return createWidenInductionRecipes(Phi, Phi, Operands[0], *II, Plan,
7674 *PSE.getSE(), *OrigLoop);
7675
7676 // Check if this is pointer induction. If so, build the recipe for it.
7677 if (auto *II = Legal->getPointerInductionDescriptor(Phi)) {
7678 VPValue *Step = vputils::getOrCreateVPValueForSCEVExpr(Plan, II->getStep());
7679 return new VPWidenPointerInductionRecipe(
7680 Phi, Operands[0], Step, &Plan.getVFxUF(), *II,
7682 [&](ElementCount VF) {
7683 return CM.isScalarAfterVectorization(Phi, VF);
7684 },
7685 Range),
7686 Phi->getDebugLoc());
7687 }
7688 return nullptr;
7689}
7690
7691VPWidenIntOrFpInductionRecipe *VPRecipeBuilder::tryToOptimizeInductionTruncate(
7693 // Optimize the special case where the source is a constant integer
7694 // induction variable. Notice that we can only optimize the 'trunc' case
7695 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and
7696 // (c) other casts depend on pointer size.
7697
7698 // Determine whether \p K is a truncation based on an induction variable that
7699 // can be optimized.
7700 auto IsOptimizableIVTruncate =
7701 [&](Instruction *K) -> std::function<bool(ElementCount)> {
7702 return [=](ElementCount VF) -> bool {
7703 return CM.isOptimizableIVTruncate(K, VF);
7704 };
7705 };
7706
7708 IsOptimizableIVTruncate(I), Range)) {
7709
7710 auto *Phi = cast<PHINode>(I->getOperand(0));
7711 const InductionDescriptor &II = *Legal->getIntOrFpInductionDescriptor(Phi);
7712 VPValue *Start = Plan.getOrAddLiveIn(II.getStartValue());
7713 return createWidenInductionRecipes(Phi, I, Start, II, Plan, *PSE.getSE(),
7714 *OrigLoop);
7715 }
7716 return nullptr;
7717}
7718
7719VPSingleDefRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI,
7721 VFRange &Range) {
7723 [this, CI](ElementCount VF) {
7724 return CM.isScalarWithPredication(CI, VF);
7725 },
7726 Range);
7727
7728 if (IsPredicated)
7729 return nullptr;
7730
7732 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end ||
7733 ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect ||
7734 ID == Intrinsic::pseudoprobe ||
7735 ID == Intrinsic::experimental_noalias_scope_decl))
7736 return nullptr;
7737
7739
7740 // Is it beneficial to perform intrinsic call compared to lib call?
7741 bool ShouldUseVectorIntrinsic =
7743 [&](ElementCount VF) -> bool {
7744 return CM.getCallWideningDecision(CI, VF).Kind ==
7746 },
7747 Range);
7748 if (ShouldUseVectorIntrinsic)
7749 return new VPWidenIntrinsicRecipe(*CI, ID, Ops, CI->getType(),
7750 CI->getDebugLoc());
7751
7752 Function *Variant = nullptr;
7753 std::optional<unsigned> MaskPos;
7754 // Is better to call a vectorized version of the function than to to scalarize
7755 // the call?
7756 auto ShouldUseVectorCall = LoopVectorizationPlanner::getDecisionAndClampRange(
7757 [&](ElementCount VF) -> bool {
7758 // The following case may be scalarized depending on the VF.
7759 // The flag shows whether we can use a usual Call for vectorized
7760 // version of the instruction.
7761
7762 // If we've found a variant at a previous VF, then stop looking. A
7763 // vectorized variant of a function expects input in a certain shape
7764 // -- basically the number of input registers, the number of lanes
7765 // per register, and whether there's a mask required.
7766 // We store a pointer to the variant in the VPWidenCallRecipe, so
7767 // once we have an appropriate variant it's only valid for that VF.
7768 // This will force a different vplan to be generated for each VF that
7769 // finds a valid variant.
7770 if (Variant)
7771 return false;
7772 LoopVectorizationCostModel::CallWideningDecision Decision =
7773 CM.getCallWideningDecision(CI, VF);
7775 Variant = Decision.Variant;
7776 MaskPos = Decision.MaskPos;
7777 return true;
7778 }
7779
7780 return false;
7781 },
7782 Range);
7783 if (ShouldUseVectorCall) {
7784 if (MaskPos.has_value()) {
7785 // We have 2 cases that would require a mask:
7786 // 1) The block needs to be predicated, either due to a conditional
7787 // in the scalar loop or use of an active lane mask with
7788 // tail-folding, and we use the appropriate mask for the block.
7789 // 2) No mask is required for the block, but the only available
7790 // vector variant at this VF requires a mask, so we synthesize an
7791 // all-true mask.
7792 VPValue *Mask = nullptr;
7793 if (Legal->isMaskRequired(CI))
7794 Mask = getBlockInMask(Builder.getInsertBlock());
7795 else
7796 Mask = Plan.getOrAddLiveIn(
7798
7799 Ops.insert(Ops.begin() + *MaskPos, Mask);
7800 }
7801
7802 Ops.push_back(Operands.back());
7803 return new VPWidenCallRecipe(CI, Variant, Ops, CI->getDebugLoc());
7804 }
7805
7806 return nullptr;
7807}
7808
7809bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const {
7811 !isa<StoreInst>(I) && "Instruction should have been handled earlier");
7812 // Instruction should be widened, unless it is scalar after vectorization,
7813 // scalarization is profitable or it is predicated.
7814 auto WillScalarize = [this, I](ElementCount VF) -> bool {
7815 return CM.isScalarAfterVectorization(I, VF) ||
7816 CM.isProfitableToScalarize(I, VF) ||
7817 CM.isScalarWithPredication(I, VF);
7818 };
7820 Range);
7821}
7822
7823VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I,
7825 switch (I->getOpcode()) {
7826 default:
7827 return nullptr;
7828 case Instruction::SDiv:
7829 case Instruction::UDiv:
7830 case Instruction::SRem:
7831 case Instruction::URem: {
7832 // If not provably safe, use a select to form a safe divisor before widening the
7833 // div/rem operation itself. Otherwise fall through to general handling below.
7834 if (CM.isPredicatedInst(I)) {
7836 VPValue *Mask = getBlockInMask(Builder.getInsertBlock());
7837 VPValue *One =
7838 Plan.getOrAddLiveIn(ConstantInt::get(I->getType(), 1u, false));
7839 auto *SafeRHS = Builder.createSelect(Mask, Ops[1], One, I->getDebugLoc());
7840 Ops[1] = SafeRHS;
7841 return new VPWidenRecipe(*I, Ops);
7842 }
7843 [[fallthrough]];
7844 }
7845 case Instruction::Add:
7846 case Instruction::And:
7847 case Instruction::AShr:
7848 case Instruction::FAdd:
7849 case Instruction::FCmp:
7850 case Instruction::FDiv:
7851 case Instruction::FMul:
7852 case Instruction::FNeg:
7853 case Instruction::FRem:
7854 case Instruction::FSub:
7855 case Instruction::ICmp:
7856 case Instruction::LShr:
7857 case Instruction::Mul:
7858 case Instruction::Or:
7859 case Instruction::Select:
7860 case Instruction::Shl:
7861 case Instruction::Sub:
7862 case Instruction::Xor:
7863 case Instruction::Freeze: {
7865 if (Instruction::isBinaryOp(I->getOpcode())) {
7866 // The legacy cost model uses SCEV to check if some of the operands are
7867 // constants. To match the legacy cost model's behavior, use SCEV to try
7868 // to replace operands with constants.
7869 ScalarEvolution &SE = *PSE.getSE();
7870 auto GetConstantViaSCEV = [this, &SE](VPValue *Op) {
7871 if (!Op->isLiveIn())
7872 return Op;
7873 Value *V = Op->getUnderlyingValue();
7874 if (isa<Constant>(V) || !SE.isSCEVable(V->getType()))
7875 return Op;
7876 auto *C = dyn_cast<SCEVConstant>(SE.getSCEV(V));
7877 if (!C)
7878 return Op;
7879 return Plan.getOrAddLiveIn(C->getValue());
7880 };
7881 // For Mul, the legacy cost model checks both operands.
7882 if (I->getOpcode() == Instruction::Mul)
7883 NewOps[0] = GetConstantViaSCEV(NewOps[0]);
7884 // For other binops, the legacy cost model only checks the second operand.
7885 NewOps[1] = GetConstantViaSCEV(NewOps[1]);
7886 }
7887 return new VPWidenRecipe(*I, NewOps);
7888 }
7889 case Instruction::ExtractValue: {
7891 Type *I32Ty = IntegerType::getInt32Ty(I->getContext());
7892 auto *EVI = cast<ExtractValueInst>(I);
7893 assert(EVI->getNumIndices() == 1 && "Expected one extractvalue index");
7894 unsigned Idx = EVI->getIndices()[0];
7895 NewOps.push_back(Plan.getOrAddLiveIn(ConstantInt::get(I32Ty, Idx, false)));
7896 return new VPWidenRecipe(*I, NewOps);
7897 }
7898 };
7899}
7900
7902VPRecipeBuilder::tryToWidenHistogram(const HistogramInfo *HI,
7904 // FIXME: Support other operations.
7905 unsigned Opcode = HI->Update->getOpcode();
7906 assert((Opcode == Instruction::Add || Opcode == Instruction::Sub) &&
7907 "Histogram update operation must be an Add or Sub");
7908
7910 // Bucket address.
7911 HGramOps.push_back(Operands[1]);
7912 // Increment value.
7913 HGramOps.push_back(getVPValueOrAddLiveIn(HI->Update->getOperand(1)));
7914
7915 // In case of predicated execution (due to tail-folding, or conditional
7916 // execution, or both), pass the relevant mask.
7917 if (Legal->isMaskRequired(HI->Store))
7918 HGramOps.push_back(getBlockInMask(Builder.getInsertBlock()));
7919
7920 return new VPHistogramRecipe(Opcode, HGramOps, HI->Store->getDebugLoc());
7921}
7922
7925 VFRange &Range) {
7927 [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); },
7928 Range);
7929
7930 bool IsPredicated = CM.isPredicatedInst(I);
7931
7932 // Even if the instruction is not marked as uniform, there are certain
7933 // intrinsic calls that can be effectively treated as such, so we check for
7934 // them here. Conservatively, we only do this for scalable vectors, since
7935 // for fixed-width VFs we can always fall back on full scalarization.
7936 if (!IsUniform && Range.Start.isScalable() && isa<IntrinsicInst>(I)) {
7937 switch (cast<IntrinsicInst>(I)->getIntrinsicID()) {
7938 case Intrinsic::assume:
7939 case Intrinsic::lifetime_start:
7940 case Intrinsic::lifetime_end:
7941 // For scalable vectors if one of the operands is variant then we still
7942 // want to mark as uniform, which will generate one instruction for just
7943 // the first lane of the vector. We can't scalarize the call in the same
7944 // way as for fixed-width vectors because we don't know how many lanes
7945 // there are.
7946 //
7947 // The reasons for doing it this way for scalable vectors are:
7948 // 1. For the assume intrinsic generating the instruction for the first
7949 // lane is still be better than not generating any at all. For
7950 // example, the input may be a splat across all lanes.
7951 // 2. For the lifetime start/end intrinsics the pointer operand only
7952 // does anything useful when the input comes from a stack object,
7953 // which suggests it should always be uniform. For non-stack objects
7954 // the effect is to poison the object, which still allows us to
7955 // remove the call.
7956 IsUniform = true;
7957 break;
7958 default:
7959 break;
7960 }
7961 }
7962 VPValue *BlockInMask = nullptr;
7963 if (!IsPredicated) {
7964 // Finalize the recipe for Instr, first if it is not predicated.
7965 LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n");
7966 } else {
7967 LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n");
7968 // Instructions marked for predication are replicated and a mask operand is
7969 // added initially. Masked replicate recipes will later be placed under an
7970 // if-then construct to prevent side-effects. Generate recipes to compute
7971 // the block mask for this region.
7972 BlockInMask = getBlockInMask(Builder.getInsertBlock());
7973 }
7974
7975 // Note that there is some custom logic to mark some intrinsics as uniform
7976 // manually above for scalable vectors, which this assert needs to account for
7977 // as well.
7978 assert((Range.Start.isScalar() || !IsUniform || !IsPredicated ||
7979 (Range.Start.isScalable() && isa<IntrinsicInst>(I))) &&
7980 "Should not predicate a uniform recipe");
7981 auto *Recipe = new VPReplicateRecipe(I, Operands, IsUniform, BlockInMask,
7982 VPIRMetadata(*I, LVer));
7983 return Recipe;
7984}
7985
7986/// Find all possible partial reductions in the loop and track all of those that
7987/// are valid so recipes can be formed later.
7989 // Find all possible partial reductions.
7991 PartialReductionChains;
7992 for (const auto &[Phi, RdxDesc] : Legal->getReductionVars()) {
7993 getScaledReductions(Phi, RdxDesc.getLoopExitInstr(), Range,
7994 PartialReductionChains);
7995 }
7996
7997 // A partial reduction is invalid if any of its extends are used by
7998 // something that isn't another partial reduction. This is because the
7999 // extends are intended to be lowered along with the reduction itself.
8000
8001 // Build up a set of partial reduction ops for efficient use checking.
8002 SmallPtrSet<User *, 4> PartialReductionOps;
8003 for (const auto &[PartialRdx, _] : PartialReductionChains)
8004 PartialReductionOps.insert(PartialRdx.ExtendUser);
8005
8006 auto ExtendIsOnlyUsedByPartialReductions =
8007 [&PartialReductionOps](Instruction *Extend) {
8008 return all_of(Extend->users(), [&](const User *U) {
8009 return PartialReductionOps.contains(U);
8010 });
8011 };
8012
8013 // Check if each use of a chain's two extends is a partial reduction
8014 // and only add those that don't have non-partial reduction users.
8015 for (auto Pair : PartialReductionChains) {
8016 PartialReductionChain Chain = Pair.first;
8017 if (ExtendIsOnlyUsedByPartialReductions(Chain.ExtendA) &&
8018 (!Chain.ExtendB || ExtendIsOnlyUsedByPartialReductions(Chain.ExtendB)))
8019 ScaledReductionMap.try_emplace(Chain.Reduction, Pair.second);
8020 }
8021}
8022
8023bool VPRecipeBuilder::getScaledReductions(
8024 Instruction *PHI, Instruction *RdxExitInstr, VFRange &Range,
8025 SmallVectorImpl<std::pair<PartialReductionChain, unsigned>> &Chains) {
8026 if (!CM.TheLoop->contains(RdxExitInstr))
8027 return false;
8028
8029 auto *Update = dyn_cast<BinaryOperator>(RdxExitInstr);
8030 if (!Update)
8031 return false;
8032
8033 Value *Op = Update->getOperand(0);
8034 Value *PhiOp = Update->getOperand(1);
8035 if (Op == PHI)
8036 std::swap(Op, PhiOp);
8037
8038 // Try and get a scaled reduction from the first non-phi operand.
8039 // If one is found, we use the discovered reduction instruction in
8040 // place of the accumulator for costing.
8041 if (auto *OpInst = dyn_cast<Instruction>(Op)) {
8042 if (getScaledReductions(PHI, OpInst, Range, Chains)) {
8043 PHI = Chains.rbegin()->first.Reduction;
8044
8045 Op = Update->getOperand(0);
8046 PhiOp = Update->getOperand(1);
8047 if (Op == PHI)
8048 std::swap(Op, PhiOp);
8049 }
8050 }
8051 if (PhiOp != PHI)
8052 return false;
8053
8054 using namespace llvm::PatternMatch;
8055
8056 // If the update is a binary operator, check both of its operands to see if
8057 // they are extends. Otherwise, see if the update comes directly from an
8058 // extend.
8059 Instruction *Exts[2] = {nullptr};
8060 BinaryOperator *ExtendUser = dyn_cast<BinaryOperator>(Op);
8061 std::optional<unsigned> BinOpc;
8062 Type *ExtOpTypes[2] = {nullptr};
8063
8064 auto CollectExtInfo = [this, &Exts,
8065 &ExtOpTypes](SmallVectorImpl<Value *> &Ops) -> bool {
8066 unsigned I = 0;
8067 for (Value *OpI : Ops) {
8068 Value *ExtOp;
8069 if (!match(OpI, m_ZExtOrSExt(m_Value(ExtOp))))
8070 return false;
8071 Exts[I] = cast<Instruction>(OpI);
8072
8073 // TODO: We should be able to support live-ins.
8074 if (!CM.TheLoop->contains(Exts[I]))
8075 return false;
8076
8077 ExtOpTypes[I] = ExtOp->getType();
8078 I++;
8079 }
8080 return true;
8081 };
8082
8083 if (ExtendUser) {
8084 if (!ExtendUser->hasOneUse())
8085 return false;
8086
8087 // Use the side-effect of match to replace BinOp only if the pattern is
8088 // matched, we don't care at this point whether it actually matched.
8089 match(ExtendUser, m_Neg(m_BinOp(ExtendUser)));
8090
8091 SmallVector<Value *> Ops(ExtendUser->operands());
8092 if (!CollectExtInfo(Ops))
8093 return false;
8094
8095 BinOpc = std::make_optional(ExtendUser->getOpcode());
8096 } else if (match(Update, m_Add(m_Value(), m_Value()))) {
8097 // We already know the operands for Update are Op and PhiOp.
8099 if (!CollectExtInfo(Ops))
8100 return false;
8101
8102 ExtendUser = Update;
8103 BinOpc = std::nullopt;
8104 } else
8105 return false;
8106
8110 Exts[1] ? TTI::getPartialReductionExtendKind(Exts[1]) : TTI::PR_None;
8111 PartialReductionChain Chain(RdxExitInstr, Exts[0], Exts[1], ExtendUser);
8112
8113 TypeSize PHISize = PHI->getType()->getPrimitiveSizeInBits();
8114 TypeSize ASize = ExtOpTypes[0]->getPrimitiveSizeInBits();
8115 if (!PHISize.hasKnownScalarFactor(ASize))
8116 return false;
8117 unsigned TargetScaleFactor = PHISize.getKnownScalarFactor(ASize);
8118
8120 [&](ElementCount VF) {
8121 InstructionCost Cost = TTI->getPartialReductionCost(
8122 Update->getOpcode(), ExtOpTypes[0], ExtOpTypes[1],
8123 PHI->getType(), VF, OpAExtend, OpBExtend, BinOpc, CM.CostKind);
8124 return Cost.isValid();
8125 },
8126 Range)) {
8127 Chains.emplace_back(Chain, TargetScaleFactor);
8128 return true;
8129 }
8130
8131 return false;
8132}
8133
8135 VFRange &Range) {
8136 // First, check for specific widening recipes that deal with inductions, Phi
8137 // nodes, calls and memory operations.
8138 VPRecipeBase *Recipe;
8139 Instruction *Instr = R->getUnderlyingInstr();
8140 SmallVector<VPValue *, 4> Operands(R->operands());
8141 if (auto *PhiR = dyn_cast<VPPhi>(R)) {
8142 VPBasicBlock *Parent = PhiR->getParent();
8143 [[maybe_unused]] VPRegionBlock *LoopRegionOf =
8144 Parent->getEnclosingLoopRegion();
8145 assert(LoopRegionOf && LoopRegionOf->getEntry() == Parent &&
8146 "Non-header phis should have been handled during predication");
8147 auto *Phi = cast<PHINode>(R->getUnderlyingInstr());
8148 assert(Operands.size() == 2 && "Must have 2 operands for header phis");
8149 if ((Recipe = tryToOptimizeInductionPHI(Phi, Operands, Range)))
8150 return Recipe;
8151
8152 VPHeaderPHIRecipe *PhiRecipe = nullptr;
8153 assert((Legal->isReductionVariable(Phi) ||
8154 Legal->isFixedOrderRecurrence(Phi)) &&
8155 "can only widen reductions and fixed-order recurrences here");
8156 VPValue *StartV = Operands[0];
8157 if (Legal->isReductionVariable(Phi)) {
8158 const RecurrenceDescriptor &RdxDesc = Legal->getRecurrenceDescriptor(Phi);
8159 assert(RdxDesc.getRecurrenceStartValue() ==
8160 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()));
8161
8162 // If the PHI is used by a partial reduction, set the scale factor.
8163 unsigned ScaleFactor =
8164 getScalingForReduction(RdxDesc.getLoopExitInstr()).value_or(1);
8165 PhiRecipe = new VPReductionPHIRecipe(
8166 Phi, RdxDesc.getRecurrenceKind(), *StartV, CM.isInLoopReduction(Phi),
8167 CM.useOrderedReductions(RdxDesc), ScaleFactor);
8168 } else {
8169 // TODO: Currently fixed-order recurrences are modeled as chains of
8170 // first-order recurrences. If there are no users of the intermediate
8171 // recurrences in the chain, the fixed order recurrence should be modeled
8172 // directly, enabling more efficient codegen.
8173 PhiRecipe = new VPFirstOrderRecurrencePHIRecipe(Phi, *StartV);
8174 }
8175 // Add backedge value.
8176 PhiRecipe->addOperand(Operands[1]);
8177 return PhiRecipe;
8178 }
8179 assert(!R->isPhi() && "only VPPhi nodes expected at this point");
8180
8181 if (isa<TruncInst>(Instr) && (Recipe = tryToOptimizeInductionTruncate(
8182 cast<TruncInst>(Instr), Operands, Range)))
8183 return Recipe;
8184
8185 // All widen recipes below deal only with VF > 1.
8187 [&](ElementCount VF) { return VF.isScalar(); }, Range))
8188 return nullptr;
8189
8190 if (auto *CI = dyn_cast<CallInst>(Instr))
8191 return tryToWidenCall(CI, Operands, Range);
8192
8193 if (StoreInst *SI = dyn_cast<StoreInst>(Instr))
8194 if (auto HistInfo = Legal->getHistogramInfo(SI))
8195 return tryToWidenHistogram(*HistInfo, Operands);
8196
8197 if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr))
8198 return tryToWidenMemory(Instr, Operands, Range);
8199
8200 if (std::optional<unsigned> ScaleFactor = getScalingForReduction(Instr))
8201 return tryToCreatePartialReduction(Instr, Operands, ScaleFactor.value());
8202
8203 if (!shouldWiden(Instr, Range))
8204 return nullptr;
8205
8206 if (auto *GEP = dyn_cast<GetElementPtrInst>(Instr))
8207 return new VPWidenGEPRecipe(GEP, Operands);
8208
8209 if (auto *SI = dyn_cast<SelectInst>(Instr)) {
8210 return new VPWidenSelectRecipe(*SI, Operands);
8211 }
8212
8213 if (auto *CI = dyn_cast<CastInst>(Instr)) {
8214 return new VPWidenCastRecipe(CI->getOpcode(), Operands[0], CI->getType(),
8215 *CI);
8216 }
8217
8218 return tryToWiden(Instr, Operands);
8219}
8220
8224 unsigned ScaleFactor) {
8225 assert(Operands.size() == 2 &&
8226 "Unexpected number of operands for partial reduction");
8227
8228 VPValue *BinOp = Operands[0];
8230 VPRecipeBase *BinOpRecipe = BinOp->getDefiningRecipe();
8231 if (isa<VPReductionPHIRecipe>(BinOpRecipe) ||
8232 isa<VPPartialReductionRecipe>(BinOpRecipe))
8233 std::swap(BinOp, Accumulator);
8234
8235 unsigned ReductionOpcode = Reduction->getOpcode();
8236 if (ReductionOpcode == Instruction::Sub) {
8237 auto *const Zero = ConstantInt::get(Reduction->getType(), 0);
8239 Ops.push_back(Plan.getOrAddLiveIn(Zero));
8240 Ops.push_back(BinOp);
8241 BinOp = new VPWidenRecipe(*Reduction, Ops);
8242 Builder.insert(BinOp->getDefiningRecipe());
8243 ReductionOpcode = Instruction::Add;
8244 }
8245
8246 VPValue *Cond = nullptr;
8247 if (CM.blockNeedsPredicationForAnyReason(Reduction->getParent())) {
8248 assert((ReductionOpcode == Instruction::Add ||
8249 ReductionOpcode == Instruction::Sub) &&
8250 "Expected an ADD or SUB operation for predicated partial "
8251 "reductions (because the neutral element in the mask is zero)!");
8252 Cond = getBlockInMask(Builder.getInsertBlock());
8253 VPValue *Zero =
8254 Plan.getOrAddLiveIn(ConstantInt::get(Reduction->getType(), 0));
8255 BinOp = Builder.createSelect(Cond, BinOp, Zero, Reduction->getDebugLoc());
8256 }
8257 return new VPPartialReductionRecipe(ReductionOpcode, Accumulator, BinOp, Cond,
8258 ScaleFactor, Reduction);
8259}
8260
8261void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF,
8262 ElementCount MaxVF) {
8263 if (ElementCount::isKnownGT(MinVF, MaxVF))
8264 return;
8265
8266 assert(OrigLoop->isInnermost() && "Inner loop expected.");
8267
8268 const LoopAccessInfo *LAI = Legal->getLAI();
8270 OrigLoop, LI, DT, PSE.getSE());
8271 if (!LAI->getRuntimePointerChecking()->getChecks().empty() &&
8273 // Only use noalias metadata when using memory checks guaranteeing no
8274 // overlap across all iterations.
8275 LVer.prepareNoAliasMetadata();
8276 }
8277
8278 // Create initial base VPlan0, to serve as common starting point for all
8279 // candidates built later for specific VF ranges.
8280 auto VPlan0 = VPlanTransforms::buildVPlan0(
8281 OrigLoop, *LI, Legal->getWidestInductionType(),
8282 getDebugLocFromInstOrOperands(Legal->getPrimaryInduction()), PSE);
8283
8284 auto MaxVFTimes2 = MaxVF * 2;
8285 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFTimes2);) {
8286 VFRange SubRange = {VF, MaxVFTimes2};
8287 if (auto Plan = tryToBuildVPlanWithVPRecipes(
8288 std::unique_ptr<VPlan>(VPlan0->duplicate()), SubRange, &LVer)) {
8289 bool HasScalarVF = Plan->hasScalarVFOnly();
8290 // Now optimize the initial VPlan.
8291 if (!HasScalarVF)
8293 *Plan, CM.getMinimalBitwidths());
8295 // TODO: try to put it close to addActiveLaneMask().
8296 if (CM.foldTailWithEVL() && !HasScalarVF)
8298 *Plan, CM.getMaxSafeElements());
8299 assert(verifyVPlanIsValid(*Plan) && "VPlan is invalid");
8300 VPlans.push_back(std::move(Plan));
8301 }
8302 VF = SubRange.End;
8303 }
8304}
8305
8306/// Create and return a ResumePhi for \p WideIV, unless it is truncated. If the
8307/// induction recipe is not canonical, creates a VPDerivedIVRecipe to compute
8308/// the end value of the induction.
8310 VPWidenInductionRecipe *WideIV, VPBuilder &VectorPHBuilder,
8311 VPBuilder &ScalarPHBuilder, VPTypeAnalysis &TypeInfo, VPValue *VectorTC) {
8312 auto *WideIntOrFp = dyn_cast<VPWidenIntOrFpInductionRecipe>(WideIV);
8313 // Truncated wide inductions resume from the last lane of their vector value
8314 // in the last vector iteration which is handled elsewhere.
8315 if (WideIntOrFp && WideIntOrFp->getTruncInst())
8316 return nullptr;
8317
8318 VPValue *Start = WideIV->getStartValue();
8319 VPValue *Step = WideIV->getStepValue();
8321 VPValue *EndValue = VectorTC;
8322 if (!WideIntOrFp || !WideIntOrFp->isCanonical()) {
8323 EndValue = VectorPHBuilder.createDerivedIV(
8324 ID.getKind(), dyn_cast_or_null<FPMathOperator>(ID.getInductionBinOp()),
8325 Start, VectorTC, Step);
8326 }
8327
8328 // EndValue is derived from the vector trip count (which has the same type as
8329 // the widest induction) and thus may be wider than the induction here.
8330 Type *ScalarTypeOfWideIV = TypeInfo.inferScalarType(WideIV);
8331 if (ScalarTypeOfWideIV != TypeInfo.inferScalarType(EndValue)) {
8332 EndValue = VectorPHBuilder.createScalarCast(Instruction::Trunc, EndValue,
8333 ScalarTypeOfWideIV,
8334 WideIV->getDebugLoc());
8335 }
8336
8337 auto *ResumePhiRecipe = ScalarPHBuilder.createScalarPhi(
8338 {EndValue, Start}, WideIV->getDebugLoc(), "bc.resume.val");
8339 return ResumePhiRecipe;
8340}
8341
8342/// Create resume phis in the scalar preheader for first-order recurrences,
8343/// reductions and inductions, and update the VPIRInstructions wrapping the
8344/// original phis in the scalar header. End values for inductions are added to
8345/// \p IVEndValues.
8346static void addScalarResumePhis(VPRecipeBuilder &Builder, VPlan &Plan,
8347 DenseMap<VPValue *, VPValue *> &IVEndValues) {
8348 VPTypeAnalysis TypeInfo(Plan);
8349 auto *ScalarPH = Plan.getScalarPreheader();
8350 auto *MiddleVPBB = cast<VPBasicBlock>(ScalarPH->getPredecessors()[0]);
8351 VPRegionBlock *VectorRegion = Plan.getVectorLoopRegion();
8352 VPBuilder VectorPHBuilder(
8353 cast<VPBasicBlock>(VectorRegion->getSinglePredecessor()));
8354 VPBuilder MiddleBuilder(MiddleVPBB, MiddleVPBB->getFirstNonPhi());
8355 VPBuilder ScalarPHBuilder(ScalarPH);
8356 for (VPRecipeBase &ScalarPhiR : Plan.getScalarHeader()->phis()) {
8357 auto *ScalarPhiIRI = cast<VPIRPhi>(&ScalarPhiR);
8358
8359 // TODO: Extract final value from induction recipe initially, optimize to
8360 // pre-computed end value together in optimizeInductionExitUsers.
8361 auto *VectorPhiR =
8362 cast<VPHeaderPHIRecipe>(Builder.getRecipe(&ScalarPhiIRI->getIRPhi()));
8363 if (auto *WideIVR = dyn_cast<VPWidenInductionRecipe>(VectorPhiR)) {
8365 WideIVR, VectorPHBuilder, ScalarPHBuilder, TypeInfo,
8366 &Plan.getVectorTripCount())) {
8367 assert(isa<VPPhi>(ResumePhi) && "Expected a phi");
8368 IVEndValues[WideIVR] = ResumePhi->getOperand(0);
8369 ScalarPhiIRI->addOperand(ResumePhi);
8370 continue;
8371 }
8372 // TODO: Also handle truncated inductions here. Computing end-values
8373 // separately should be done as VPlan-to-VPlan optimization, after
8374 // legalizing all resume values to use the last lane from the loop.
8375 assert(cast<VPWidenIntOrFpInductionRecipe>(VectorPhiR)->getTruncInst() &&
8376 "should only skip truncated wide inductions");
8377 continue;
8378 }
8379
8380 // The backedge value provides the value to resume coming out of a loop,
8381 // which for FORs is a vector whose last element needs to be extracted. The
8382 // start value provides the value if the loop is bypassed.
8383 bool IsFOR = isa<VPFirstOrderRecurrencePHIRecipe>(VectorPhiR);
8384 auto *ResumeFromVectorLoop = VectorPhiR->getBackedgeValue();
8385 assert(VectorRegion->getSingleSuccessor() == Plan.getMiddleBlock() &&
8386 "Cannot handle loops with uncountable early exits");
8387 if (IsFOR)
8388 ResumeFromVectorLoop = MiddleBuilder.createNaryOp(
8389 VPInstruction::ExtractLastElement, {ResumeFromVectorLoop}, {},
8390 "vector.recur.extract");
8391 StringRef Name = IsFOR ? "scalar.recur.init" : "bc.merge.rdx";
8392 auto *ResumePhiR = ScalarPHBuilder.createScalarPhi(
8393 {ResumeFromVectorLoop, VectorPhiR->getStartValue()}, {}, Name);
8394 ScalarPhiIRI->addOperand(ResumePhiR);
8395 }
8396}
8397
8398/// Handle users in the exit block for first order reductions in the original
8399/// exit block. The penultimate value of recurrences is fed to their LCSSA phi
8400/// users in the original exit block using the VPIRInstruction wrapping to the
8401/// LCSSA phi.
8403 VPRegionBlock *VectorRegion = Plan.getVectorLoopRegion();
8404 auto *ScalarPHVPBB = Plan.getScalarPreheader();
8405 auto *MiddleVPBB = Plan.getMiddleBlock();
8406 VPBuilder ScalarPHBuilder(ScalarPHVPBB);
8407 VPBuilder MiddleBuilder(MiddleVPBB, MiddleVPBB->getFirstNonPhi());
8408
8409 auto IsScalableOne = [](ElementCount VF) -> bool {
8410 return VF == ElementCount::getScalable(1);
8411 };
8412
8413 for (auto &HeaderPhi : VectorRegion->getEntryBasicBlock()->phis()) {
8414 auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&HeaderPhi);
8415 if (!FOR)
8416 continue;
8417
8418 assert(VectorRegion->getSingleSuccessor() == Plan.getMiddleBlock() &&
8419 "Cannot handle loops with uncountable early exits");
8420
8421 // This is the second phase of vectorizing first-order recurrences, creating
8422 // extract for users outside the loop. An overview of the transformation is
8423 // described below. Suppose we have the following loop with some use after
8424 // the loop of the last a[i-1],
8425 //
8426 // for (int i = 0; i < n; ++i) {
8427 // t = a[i - 1];
8428 // b[i] = a[i] - t;
8429 // }
8430 // use t;
8431 //
8432 // There is a first-order recurrence on "a". For this loop, the shorthand
8433 // scalar IR looks like:
8434 //
8435 // scalar.ph:
8436 // s.init = a[-1]
8437 // br scalar.body
8438 //
8439 // scalar.body:
8440 // i = phi [0, scalar.ph], [i+1, scalar.body]
8441 // s1 = phi [s.init, scalar.ph], [s2, scalar.body]
8442 // s2 = a[i]
8443 // b[i] = s2 - s1
8444 // br cond, scalar.body, exit.block
8445 //
8446 // exit.block:
8447 // use = lcssa.phi [s1, scalar.body]
8448 //
8449 // In this example, s1 is a recurrence because it's value depends on the
8450 // previous iteration. In the first phase of vectorization, we created a
8451 // VPFirstOrderRecurrencePHIRecipe v1 for s1. Now we create the extracts
8452 // for users in the scalar preheader and exit block.
8453 //
8454 // vector.ph:
8455 // v_init = vector(..., ..., ..., a[-1])
8456 // br vector.body
8457 //
8458 // vector.body
8459 // i = phi [0, vector.ph], [i+4, vector.body]
8460 // v1 = phi [v_init, vector.ph], [v2, vector.body]
8461 // v2 = a[i, i+1, i+2, i+3]
8462 // b[i] = v2 - v1
8463 // // Next, third phase will introduce v1' = splice(v1(3), v2(0, 1, 2))
8464 // b[i, i+1, i+2, i+3] = v2 - v1
8465 // br cond, vector.body, middle.block
8466 //
8467 // middle.block:
8468 // vector.recur.extract.for.phi = v2(2)
8469 // vector.recur.extract = v2(3)
8470 // br cond, scalar.ph, exit.block
8471 //
8472 // scalar.ph:
8473 // scalar.recur.init = phi [vector.recur.extract, middle.block],
8474 // [s.init, otherwise]
8475 // br scalar.body
8476 //
8477 // scalar.body:
8478 // i = phi [0, scalar.ph], [i+1, scalar.body]
8479 // s1 = phi [scalar.recur.init, scalar.ph], [s2, scalar.body]
8480 // s2 = a[i]
8481 // b[i] = s2 - s1
8482 // br cond, scalar.body, exit.block
8483 //
8484 // exit.block:
8485 // lo = lcssa.phi [s1, scalar.body],
8486 // [vector.recur.extract.for.phi, middle.block]
8487 //
8488 // Now update VPIRInstructions modeling LCSSA phis in the exit block.
8489 // Extract the penultimate value of the recurrence and use it as operand for
8490 // the VPIRInstruction modeling the phi.
8491 for (VPUser *U : FOR->users()) {
8492 using namespace llvm::VPlanPatternMatch;
8493 if (!match(U, m_ExtractLastElement(m_Specific(FOR))))
8494 continue;
8495 // For VF vscale x 1, if vscale = 1, we are unable to extract the
8496 // penultimate value of the recurrence. Instead we rely on the existing
8497 // extract of the last element from the result of
8498 // VPInstruction::FirstOrderRecurrenceSplice.
8499 // TODO: Consider vscale_range info and UF.
8501 Range))
8502 return;
8503 VPValue *PenultimateElement = MiddleBuilder.createNaryOp(
8504 VPInstruction::ExtractPenultimateElement, {FOR->getBackedgeValue()},
8505 {}, "vector.recur.extract.for.phi");
8506 cast<VPInstruction>(U)->replaceAllUsesWith(PenultimateElement);
8507 }
8508 }
8509}
8510
8511VPlanPtr LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes(
8512 VPlanPtr Plan, VFRange &Range, LoopVersioning *LVer) {
8513
8514 using namespace llvm::VPlanPatternMatch;
8515 SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups;
8516
8517 // ---------------------------------------------------------------------------
8518 // Build initial VPlan: Scan the body of the loop in a topological order to
8519 // visit each basic block after having visited its predecessor basic blocks.
8520 // ---------------------------------------------------------------------------
8521
8522 bool RequiresScalarEpilogueCheck =
8524 [this](ElementCount VF) {
8525 return !CM.requiresScalarEpilogue(VF.isVector());
8526 },
8527 Range);
8528 VPlanTransforms::handleEarlyExits(*Plan, Legal->hasUncountableEarlyExit());
8529 VPlanTransforms::addMiddleCheck(*Plan, RequiresScalarEpilogueCheck,
8530 CM.foldTailByMasking());
8531
8533
8534 // Don't use getDecisionAndClampRange here, because we don't know the UF
8535 // so this function is better to be conservative, rather than to split
8536 // it up into different VPlans.
8537 // TODO: Consider using getDecisionAndClampRange here to split up VPlans.
8538 bool IVUpdateMayOverflow = false;
8539 for (ElementCount VF : Range)
8540 IVUpdateMayOverflow |= !isIndvarOverflowCheckKnownFalse(&CM, VF);
8541
8542 TailFoldingStyle Style = CM.getTailFoldingStyle(IVUpdateMayOverflow);
8543 // Use NUW for the induction increment if we proved that it won't overflow in
8544 // the vector loop or when not folding the tail. In the later case, we know
8545 // that the canonical induction increment will not overflow as the vector trip
8546 // count is >= increment and a multiple of the increment.
8547 bool HasNUW = !IVUpdateMayOverflow || Style == TailFoldingStyle::None;
8548 if (!HasNUW) {
8549 auto *IVInc = Plan->getVectorLoopRegion()
8550 ->getExitingBasicBlock()
8551 ->getTerminator()
8552 ->getOperand(0);
8553 assert(match(IVInc, m_VPInstruction<Instruction::Add>(
8554 m_Specific(Plan->getCanonicalIV()), m_VPValue())) &&
8555 "Did not find the canonical IV increment");
8556 cast<VPRecipeWithIRFlags>(IVInc)->dropPoisonGeneratingFlags();
8557 }
8558
8559 // ---------------------------------------------------------------------------
8560 // Pre-construction: record ingredients whose recipes we'll need to further
8561 // process after constructing the initial VPlan.
8562 // ---------------------------------------------------------------------------
8563
8564 // For each interleave group which is relevant for this (possibly trimmed)
8565 // Range, add it to the set of groups to be later applied to the VPlan and add
8566 // placeholders for its members' Recipes which we'll be replacing with a
8567 // single VPInterleaveRecipe.
8568 for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) {
8569 auto ApplyIG = [IG, this](ElementCount VF) -> bool {
8570 bool Result = (VF.isVector() && // Query is illegal for VF == 1
8571 CM.getWideningDecision(IG->getInsertPos(), VF) ==
8573 // For scalable vectors, the interleave factors must be <= 8 since we
8574 // require the (de)interleaveN intrinsics instead of shufflevectors.
8575 assert((!Result || !VF.isScalable() || IG->getFactor() <= 8) &&
8576 "Unsupported interleave factor for scalable vectors");
8577 return Result;
8578 };
8579 if (!getDecisionAndClampRange(ApplyIG, Range))
8580 continue;
8581 InterleaveGroups.insert(IG);
8582 }
8583
8584 // ---------------------------------------------------------------------------
8585 // Predicate and linearize the top-level loop region.
8586 // ---------------------------------------------------------------------------
8587 auto BlockMaskCache = VPlanTransforms::introduceMasksAndLinearize(
8588 *Plan, CM.foldTailByMasking());
8589
8590 // ---------------------------------------------------------------------------
8591 // Construct wide recipes and apply predication for original scalar
8592 // VPInstructions in the loop.
8593 // ---------------------------------------------------------------------------
8594 VPRecipeBuilder RecipeBuilder(*Plan, OrigLoop, TLI, &TTI, Legal, CM, PSE,
8595 Builder, BlockMaskCache, LVer);
8596 RecipeBuilder.collectScaledReductions(Range);
8597
8598 // Scan the body of the loop in a topological order to visit each basic block
8599 // after having visited its predecessor basic blocks.
8600 VPRegionBlock *LoopRegion = Plan->getVectorLoopRegion();
8601 VPBasicBlock *HeaderVPBB = LoopRegion->getEntryBasicBlock();
8602 ReversePostOrderTraversal<VPBlockShallowTraversalWrapper<VPBlockBase *>> RPOT(
8603 HeaderVPBB);
8604
8605 auto *MiddleVPBB = Plan->getMiddleBlock();
8606 VPBasicBlock::iterator MBIP = MiddleVPBB->getFirstNonPhi();
8607 // Mapping from VPValues in the initial plan to their widened VPValues. Needed
8608 // temporarily to update created block masks.
8609 DenseMap<VPValue *, VPValue *> Old2New;
8610 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(RPOT)) {
8611 // Convert input VPInstructions to widened recipes.
8612 for (VPRecipeBase &R : make_early_inc_range(*VPBB)) {
8613 auto *SingleDef = cast<VPSingleDefRecipe>(&R);
8614 auto *UnderlyingValue = SingleDef->getUnderlyingValue();
8615 // Skip recipes that do not need transforming, including canonical IV,
8616 // wide canonical IV and VPInstructions without underlying values. The
8617 // latter are added above for masking.
8618 // FIXME: Migrate code relying on the underlying instruction from VPlan0
8619 // to construct recipes below to not use the underlying instruction.
8621 &R) ||
8622 (isa<VPInstruction>(&R) && !UnderlyingValue))
8623 continue;
8624
8625 // FIXME: VPlan0, which models a copy of the original scalar loop, should
8626 // not use VPWidenPHIRecipe to model the phis.
8628 UnderlyingValue && "unsupported recipe");
8629
8630 // TODO: Gradually replace uses of underlying instruction by analyses on
8631 // VPlan.
8632 Instruction *Instr = cast<Instruction>(UnderlyingValue);
8633 Builder.setInsertPoint(SingleDef);
8634
8635 // The stores with invariant address inside the loop will be deleted, and
8636 // in the exit block, a uniform store recipe will be created for the final
8637 // invariant store of the reduction.
8638 StoreInst *SI;
8639 if ((SI = dyn_cast<StoreInst>(Instr)) &&
8640 Legal->isInvariantAddressOfReduction(SI->getPointerOperand())) {
8641 // Only create recipe for the final invariant store of the reduction.
8642 if (Legal->isInvariantStoreOfReduction(SI)) {
8643 auto *Recipe =
8644 new VPReplicateRecipe(SI, R.operands(), true /* IsUniform */,
8645 nullptr /*Mask*/, VPIRMetadata(*SI, LVer));
8646 Recipe->insertBefore(*MiddleVPBB, MBIP);
8647 }
8648 R.eraseFromParent();
8649 continue;
8650 }
8651
8652 VPRecipeBase *Recipe =
8653 RecipeBuilder.tryToCreateWidenRecipe(SingleDef, Range);
8654 if (!Recipe)
8655 Recipe = RecipeBuilder.handleReplication(Instr, R.operands(), Range);
8656
8657 RecipeBuilder.setRecipe(Instr, Recipe);
8658 if (isa<VPWidenIntOrFpInductionRecipe>(Recipe) && isa<TruncInst>(Instr)) {
8659 // Optimized a truncate to VPWidenIntOrFpInductionRecipe. It needs to be
8660 // moved to the phi section in the header.
8661 Recipe->insertBefore(*HeaderVPBB, HeaderVPBB->getFirstNonPhi());
8662 } else {
8663 Builder.insert(Recipe);
8664 }
8665 if (Recipe->getNumDefinedValues() == 1) {
8666 SingleDef->replaceAllUsesWith(Recipe->getVPSingleValue());
8667 Old2New[SingleDef] = Recipe->getVPSingleValue();
8668 } else {
8669 assert(Recipe->getNumDefinedValues() == 0 &&
8670 "Unexpected multidef recipe");
8671 R.eraseFromParent();
8672 }
8673 }
8674 }
8675
8676 // replaceAllUsesWith above may invalidate the block masks. Update them here.
8677 // TODO: Include the masks as operands in the predicated VPlan directly
8678 // to remove the need to keep a map of masks beyond the predication
8679 // transform.
8680 RecipeBuilder.updateBlockMaskCache(Old2New);
8681 for (VPValue *Old : Old2New.keys())
8682 Old->getDefiningRecipe()->eraseFromParent();
8683
8684 assert(isa<VPRegionBlock>(Plan->getVectorLoopRegion()) &&
8685 !Plan->getVectorLoopRegion()->getEntryBasicBlock()->empty() &&
8686 "entry block must be set to a VPRegionBlock having a non-empty entry "
8687 "VPBasicBlock");
8688
8689 // Update wide induction increments to use the same step as the corresponding
8690 // wide induction. This enables detecting induction increments directly in
8691 // VPlan and removes redundant splats.
8692 for (const auto &[Phi, ID] : Legal->getInductionVars()) {
8693 auto *IVInc = cast<Instruction>(
8694 Phi->getIncomingValueForBlock(OrigLoop->getLoopLatch()));
8695 if (IVInc->getOperand(0) != Phi || IVInc->getOpcode() != Instruction::Add)
8696 continue;
8697 VPWidenInductionRecipe *WideIV =
8698 cast<VPWidenInductionRecipe>(RecipeBuilder.getRecipe(Phi));
8699 VPRecipeBase *R = RecipeBuilder.getRecipe(IVInc);
8700 R->setOperand(1, WideIV->getStepValue());
8701 }
8702
8704 DenseMap<VPValue *, VPValue *> IVEndValues;
8705 addScalarResumePhis(RecipeBuilder, *Plan, IVEndValues);
8706
8707 // ---------------------------------------------------------------------------
8708 // Transform initial VPlan: Apply previously taken decisions, in order, to
8709 // bring the VPlan to its final state.
8710 // ---------------------------------------------------------------------------
8711
8712 // Adjust the recipes for any inloop reductions.
8713 adjustRecipesForReductions(Plan, RecipeBuilder, Range.Start);
8714
8715 // Apply mandatory transformation to handle FP maxnum/minnum reduction with
8716 // NaNs if possible, bail out otherwise.
8718 *Plan))
8719 return nullptr;
8720
8721 // Transform recipes to abstract recipes if it is legal and beneficial and
8722 // clamp the range for better cost estimation.
8723 // TODO: Enable following transform when the EVL-version of extended-reduction
8724 // and mulacc-reduction are implemented.
8725 if (!CM.foldTailWithEVL()) {
8726 VPCostContext CostCtx(CM.TTI, *CM.TLI, *Plan, CM, CM.CostKind);
8728 CostCtx, Range);
8729 }
8730
8731 for (ElementCount VF : Range)
8732 Plan->addVF(VF);
8733 Plan->setName("Initial VPlan");
8734
8735 // Interleave memory: for each Interleave Group we marked earlier as relevant
8736 // for this VPlan, replace the Recipes widening its memory instructions with a
8737 // single VPInterleaveRecipe at its insertion point.
8739 InterleaveGroups, RecipeBuilder,
8740 CM.isScalarEpilogueAllowed());
8741
8742 // Replace VPValues for known constant strides.
8744 Legal->getLAI()->getSymbolicStrides());
8745
8746 auto BlockNeedsPredication = [this](BasicBlock *BB) {
8747 return Legal->blockNeedsPredication(BB);
8748 };
8750 BlockNeedsPredication);
8751
8752 // Sink users of fixed-order recurrence past the recipe defining the previous
8753 // value and introduce FirstOrderRecurrenceSplice VPInstructions.
8755 *Plan, Builder))
8756 return nullptr;
8757
8758 if (useActiveLaneMask(Style)) {
8759 // TODO: Move checks to VPlanTransforms::addActiveLaneMask once
8760 // TailFoldingStyle is visible there.
8761 bool ForControlFlow = useActiveLaneMaskForControlFlow(Style);
8762 bool WithoutRuntimeCheck =
8764 VPlanTransforms::addActiveLaneMask(*Plan, ForControlFlow,
8765 WithoutRuntimeCheck);
8766 }
8767 VPlanTransforms::optimizeInductionExitUsers(*Plan, IVEndValues, *PSE.getSE());
8768
8769 assert(verifyVPlanIsValid(*Plan) && "VPlan is invalid");
8770 return Plan;
8771}
8772
8773VPlanPtr LoopVectorizationPlanner::tryToBuildVPlan(VFRange &Range) {
8774 // Outer loop handling: They may require CFG and instruction level
8775 // transformations before even evaluating whether vectorization is profitable.
8776 // Since we cannot modify the incoming IR, we need to build VPlan upfront in
8777 // the vectorization pipeline.
8778 assert(!OrigLoop->isInnermost());
8779 assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
8780
8781 auto Plan = VPlanTransforms::buildVPlan0(
8782 OrigLoop, *LI, Legal->getWidestInductionType(),
8783 getDebugLocFromInstOrOperands(Legal->getPrimaryInduction()), PSE);
8785 /*HasUncountableExit*/ false);
8786 VPlanTransforms::addMiddleCheck(*Plan, /*RequiresScalarEpilogue*/ true,
8787 /*TailFolded*/ false);
8788
8790
8791 for (ElementCount VF : Range)
8792 Plan->addVF(VF);
8793
8795 Plan,
8796 [this](PHINode *P) {
8797 return Legal->getIntOrFpInductionDescriptor(P);
8798 },
8799 *TLI))
8800 return nullptr;
8801
8802 // Collect mapping of IR header phis to header phi recipes, to be used in
8803 // addScalarResumePhis.
8804 DenseMap<VPBasicBlock *, VPValue *> BlockMaskCache;
8805 VPRecipeBuilder RecipeBuilder(*Plan, OrigLoop, TLI, &TTI, Legal, CM, PSE,
8806 Builder, BlockMaskCache, nullptr /*LVer*/);
8807 for (auto &R : Plan->getVectorLoopRegion()->getEntryBasicBlock()->phis()) {
8809 continue;
8810 auto *HeaderR = cast<VPHeaderPHIRecipe>(&R);
8811 RecipeBuilder.setRecipe(HeaderR->getUnderlyingInstr(), HeaderR);
8812 }
8813 DenseMap<VPValue *, VPValue *> IVEndValues;
8814 // TODO: IVEndValues are not used yet in the native path, to optimize exit
8815 // values.
8816 addScalarResumePhis(RecipeBuilder, *Plan, IVEndValues);
8817
8818 assert(verifyVPlanIsValid(*Plan) && "VPlan is invalid");
8819 return Plan;
8820}
8821
8822// Adjust the recipes for reductions. For in-loop reductions the chain of
8823// instructions leading from the loop exit instr to the phi need to be converted
8824// to reductions, with one operand being vector and the other being the scalar
8825// reduction chain. For other reductions, a select is introduced between the phi
8826// and users outside the vector region when folding the tail.
8827//
8828// A ComputeReductionResult recipe is added to the middle block, also for
8829// in-loop reductions which compute their result in-loop, because generating
8830// the subsequent bc.merge.rdx phi is driven by ComputeReductionResult recipes.
8831//
8832// Adjust AnyOf reductions; replace the reduction phi for the selected value
8833// with a boolean reduction phi node to check if the condition is true in any
8834// iteration. The final value is selected by the final ComputeReductionResult.
8835void LoopVectorizationPlanner::adjustRecipesForReductions(
8836 VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder, ElementCount MinVF) {
8837 using namespace VPlanPatternMatch;
8838 VPRegionBlock *VectorLoopRegion = Plan->getVectorLoopRegion();
8839 VPBasicBlock *Header = VectorLoopRegion->getEntryBasicBlock();
8840 VPBasicBlock *MiddleVPBB = Plan->getMiddleBlock();
8842
8843 for (VPRecipeBase &R : Header->phis()) {
8844 auto *PhiR = dyn_cast<VPReductionPHIRecipe>(&R);
8845 if (!PhiR || !PhiR->isInLoop() || (MinVF.isScalar() && !PhiR->isOrdered()))
8846 continue;
8847
8848 RecurKind Kind = PhiR->getRecurrenceKind();
8849 assert(
8852 "AnyOf and FindIV reductions are not allowed for in-loop reductions");
8853
8854 // Collect the chain of "link" recipes for the reduction starting at PhiR.
8855 SetVector<VPSingleDefRecipe *> Worklist;
8856 Worklist.insert(PhiR);
8857 for (unsigned I = 0; I != Worklist.size(); ++I) {
8858 VPSingleDefRecipe *Cur = Worklist[I];
8859 for (VPUser *U : Cur->users()) {
8860 auto *UserRecipe = cast<VPSingleDefRecipe>(U);
8861 if (!UserRecipe->getParent()->getEnclosingLoopRegion()) {
8862 assert((UserRecipe->getParent() == MiddleVPBB ||
8863 UserRecipe->getParent() == Plan->getScalarPreheader()) &&
8864 "U must be either in the loop region, the middle block or the "
8865 "scalar preheader.");
8866 continue;
8867 }
8868 Worklist.insert(UserRecipe);
8869 }
8870 }
8871
8872 // Visit operation "Links" along the reduction chain top-down starting from
8873 // the phi until LoopExitValue. We keep track of the previous item
8874 // (PreviousLink) to tell which of the two operands of a Link will remain
8875 // scalar and which will be reduced. For minmax by select(cmp), Link will be
8876 // the select instructions. Blend recipes of in-loop reduction phi's will
8877 // get folded to their non-phi operand, as the reduction recipe handles the
8878 // condition directly.
8879 VPSingleDefRecipe *PreviousLink = PhiR; // Aka Worklist[0].
8880 for (VPSingleDefRecipe *CurrentLink : drop_begin(Worklist)) {
8881 if (auto *Blend = dyn_cast<VPBlendRecipe>(CurrentLink)) {
8882 assert(Blend->getNumIncomingValues() == 2 &&
8883 "Blend must have 2 incoming values");
8884 if (Blend->getIncomingValue(0) == PhiR) {
8885 Blend->replaceAllUsesWith(Blend->getIncomingValue(1));
8886 } else {
8887 assert(Blend->getIncomingValue(1) == PhiR &&
8888 "PhiR must be an operand of the blend");
8889 Blend->replaceAllUsesWith(Blend->getIncomingValue(0));
8890 }
8891 continue;
8892 }
8893
8894 Instruction *CurrentLinkI = CurrentLink->getUnderlyingInstr();
8895
8896 // Index of the first operand which holds a non-mask vector operand.
8897 unsigned IndexOfFirstOperand;
8898 // Recognize a call to the llvm.fmuladd intrinsic.
8899 bool IsFMulAdd = (Kind == RecurKind::FMulAdd);
8900 VPValue *VecOp;
8901 VPBasicBlock *LinkVPBB = CurrentLink->getParent();
8902 if (IsFMulAdd) {
8903 assert(
8905 "Expected instruction to be a call to the llvm.fmuladd intrinsic");
8906 assert(((MinVF.isScalar() && isa<VPReplicateRecipe>(CurrentLink)) ||
8907 isa<VPWidenIntrinsicRecipe>(CurrentLink)) &&
8908 CurrentLink->getOperand(2) == PreviousLink &&
8909 "expected a call where the previous link is the added operand");
8910
8911 // If the instruction is a call to the llvm.fmuladd intrinsic then we
8912 // need to create an fmul recipe (multiplying the first two operands of
8913 // the fmuladd together) to use as the vector operand for the fadd
8914 // reduction.
8915 VPInstruction *FMulRecipe = new VPInstruction(
8916 Instruction::FMul,
8917 {CurrentLink->getOperand(0), CurrentLink->getOperand(1)},
8918 CurrentLinkI->getFastMathFlags());
8919 LinkVPBB->insert(FMulRecipe, CurrentLink->getIterator());
8920 VecOp = FMulRecipe;
8921 } else if (PhiR->isInLoop() && Kind == RecurKind::AddChainWithSubs &&
8922 CurrentLinkI->getOpcode() == Instruction::Sub) {
8923 Type *PhiTy = PhiR->getUnderlyingValue()->getType();
8924 auto *Zero = Plan->getOrAddLiveIn(ConstantInt::get(PhiTy, 0));
8925 VPWidenRecipe *Sub = new VPWidenRecipe(
8926 Instruction::Sub, {Zero, CurrentLink->getOperand(1)}, {},
8927 VPIRMetadata(), CurrentLinkI->getDebugLoc());
8928 Sub->setUnderlyingValue(CurrentLinkI);
8929 LinkVPBB->insert(Sub, CurrentLink->getIterator());
8930 VecOp = Sub;
8931 } else {
8933 if (isa<VPWidenRecipe>(CurrentLink)) {
8934 assert(isa<CmpInst>(CurrentLinkI) &&
8935 "need to have the compare of the select");
8936 continue;
8937 }
8938 assert(isa<VPWidenSelectRecipe>(CurrentLink) &&
8939 "must be a select recipe");
8940 IndexOfFirstOperand = 1;
8941 } else {
8942 assert((MinVF.isScalar() || isa<VPWidenRecipe>(CurrentLink)) &&
8943 "Expected to replace a VPWidenSC");
8944 IndexOfFirstOperand = 0;
8945 }
8946 // Note that for non-commutable operands (cmp-selects), the semantics of
8947 // the cmp-select are captured in the recurrence kind.
8948 unsigned VecOpId =
8949 CurrentLink->getOperand(IndexOfFirstOperand) == PreviousLink
8950 ? IndexOfFirstOperand + 1
8951 : IndexOfFirstOperand;
8952 VecOp = CurrentLink->getOperand(VecOpId);
8953 assert(VecOp != PreviousLink &&
8954 CurrentLink->getOperand(CurrentLink->getNumOperands() - 1 -
8955 (VecOpId - IndexOfFirstOperand)) ==
8956 PreviousLink &&
8957 "PreviousLink must be the operand other than VecOp");
8958 }
8959
8960 VPValue *CondOp = nullptr;
8961 if (CM.blockNeedsPredicationForAnyReason(CurrentLinkI->getParent()))
8962 CondOp = RecipeBuilder.getBlockInMask(CurrentLink->getParent());
8963
8964 // TODO: Retrieve FMFs from recipes directly.
8965 RecurrenceDescriptor RdxDesc = Legal->getRecurrenceDescriptor(
8966 cast<PHINode>(PhiR->getUnderlyingInstr()));
8967 // Non-FP RdxDescs will have all fast math flags set, so clear them.
8968 FastMathFlags FMFs = isa<FPMathOperator>(CurrentLinkI)
8969 ? RdxDesc.getFastMathFlags()
8970 : FastMathFlags();
8971 auto *RedRecipe = new VPReductionRecipe(
8972 Kind, FMFs, CurrentLinkI, PreviousLink, VecOp, CondOp,
8973 PhiR->isOrdered(), CurrentLinkI->getDebugLoc());
8974 // Append the recipe to the end of the VPBasicBlock because we need to
8975 // ensure that it comes after all of it's inputs, including CondOp.
8976 // Delete CurrentLink as it will be invalid if its operand is replaced
8977 // with a reduction defined at the bottom of the block in the next link.
8978 if (LinkVPBB->getNumSuccessors() == 0)
8979 RedRecipe->insertBefore(&*std::prev(std::prev(LinkVPBB->end())));
8980 else
8981 LinkVPBB->appendRecipe(RedRecipe);
8982
8983 CurrentLink->replaceAllUsesWith(RedRecipe);
8984 ToDelete.push_back(CurrentLink);
8985 PreviousLink = RedRecipe;
8986 }
8987 }
8988 VPBasicBlock *LatchVPBB = VectorLoopRegion->getExitingBasicBlock();
8989 Builder.setInsertPoint(&*std::prev(std::prev(LatchVPBB->end())));
8990 VPBasicBlock::iterator IP = MiddleVPBB->getFirstNonPhi();
8991 for (VPRecipeBase &R :
8992 Plan->getVectorLoopRegion()->getEntryBasicBlock()->phis()) {
8993 VPReductionPHIRecipe *PhiR = dyn_cast<VPReductionPHIRecipe>(&R);
8994 if (!PhiR)
8995 continue;
8996
8997 const RecurrenceDescriptor &RdxDesc = Legal->getRecurrenceDescriptor(
8999 Type *PhiTy = PhiR->getUnderlyingValue()->getType();
9000 // If tail is folded by masking, introduce selects between the phi
9001 // and the users outside the vector region of each reduction, at the
9002 // beginning of the dedicated latch block.
9003 auto *OrigExitingVPV = PhiR->getBackedgeValue();
9004 auto *NewExitingVPV = PhiR->getBackedgeValue();
9005 // Don't output selects for partial reductions because they have an output
9006 // with fewer lanes than the VF. So the operands of the select would have
9007 // different numbers of lanes. Partial reductions mask the input instead.
9008 if (!PhiR->isInLoop() && CM.foldTailByMasking() &&
9009 !isa<VPPartialReductionRecipe>(OrigExitingVPV->getDefiningRecipe())) {
9010 VPValue *Cond = RecipeBuilder.getBlockInMask(PhiR->getParent());
9011 std::optional<FastMathFlags> FMFs =
9012 PhiTy->isFloatingPointTy()
9013 ? std::make_optional(RdxDesc.getFastMathFlags())
9014 : std::nullopt;
9015 NewExitingVPV =
9016 Builder.createSelect(Cond, OrigExitingVPV, PhiR, {}, "", FMFs);
9017 OrigExitingVPV->replaceUsesWithIf(NewExitingVPV, [](VPUser &U, unsigned) {
9018 return isa<VPInstruction>(&U) &&
9019 (cast<VPInstruction>(&U)->getOpcode() ==
9021 cast<VPInstruction>(&U)->getOpcode() ==
9023 cast<VPInstruction>(&U)->getOpcode() ==
9025 });
9026 if (CM.usePredicatedReductionSelect())
9027 PhiR->setOperand(1, NewExitingVPV);
9028 }
9029
9030 // We want code in the middle block to appear to execute on the location of
9031 // the scalar loop's latch terminator because: (a) it is all compiler
9032 // generated, (b) these instructions are always executed after evaluating
9033 // the latch conditional branch, and (c) other passes may add new
9034 // predecessors which terminate on this line. This is the easiest way to
9035 // ensure we don't accidentally cause an extra step back into the loop while
9036 // debugging.
9037 DebugLoc ExitDL = OrigLoop->getLoopLatch()->getTerminator()->getDebugLoc();
9038
9039 // TODO: At the moment ComputeReductionResult also drives creation of the
9040 // bc.merge.rdx phi nodes, hence it needs to be created unconditionally here
9041 // even for in-loop reductions, until the reduction resume value handling is
9042 // also modeled in VPlan.
9043 VPInstruction *FinalReductionResult;
9044 VPBuilder::InsertPointGuard Guard(Builder);
9045 Builder.setInsertPoint(MiddleVPBB, IP);
9046 RecurKind RecurrenceKind = PhiR->getRecurrenceKind();
9048 VPValue *Start = PhiR->getStartValue();
9049 VPValue *Sentinel = Plan->getOrAddLiveIn(RdxDesc.getSentinelValue());
9050 FinalReductionResult =
9051 Builder.createNaryOp(VPInstruction::ComputeFindIVResult,
9052 {PhiR, Start, Sentinel, NewExitingVPV}, ExitDL);
9053 } else if (RecurrenceDescriptor::isAnyOfRecurrenceKind(RecurrenceKind)) {
9054 VPValue *Start = PhiR->getStartValue();
9055 FinalReductionResult =
9056 Builder.createNaryOp(VPInstruction::ComputeAnyOfResult,
9057 {PhiR, Start, NewExitingVPV}, ExitDL);
9058 } else {
9059 VPIRFlags Flags =
9061 ? VPIRFlags(RdxDesc.getFastMathFlags())
9062 : VPIRFlags();
9063 FinalReductionResult =
9064 Builder.createNaryOp(VPInstruction::ComputeReductionResult,
9065 {PhiR, NewExitingVPV}, Flags, ExitDL);
9066 }
9067 // If the vector reduction can be performed in a smaller type, we truncate
9068 // then extend the loop exit value to enable InstCombine to evaluate the
9069 // entire expression in the smaller type.
9070 if (MinVF.isVector() && PhiTy != RdxDesc.getRecurrenceType() &&
9072 assert(!PhiR->isInLoop() && "Unexpected truncated inloop reduction!");
9074 "Unexpected truncated min-max recurrence!");
9075 Type *RdxTy = RdxDesc.getRecurrenceType();
9076 auto *Trunc =
9077 new VPWidenCastRecipe(Instruction::Trunc, NewExitingVPV, RdxTy);
9078 Instruction::CastOps ExtendOpc =
9079 RdxDesc.isSigned() ? Instruction::SExt : Instruction::ZExt;
9080 auto *Extnd = new VPWidenCastRecipe(ExtendOpc, Trunc, PhiTy);
9081 Trunc->insertAfter(NewExitingVPV->getDefiningRecipe());
9082 Extnd->insertAfter(Trunc);
9083 if (PhiR->getOperand(1) == NewExitingVPV)
9084 PhiR->setOperand(1, Extnd->getVPSingleValue());
9085
9086 // Update ComputeReductionResult with the truncated exiting value and
9087 // extend its result.
9088 FinalReductionResult->setOperand(1, Trunc);
9089 FinalReductionResult =
9090 Builder.createScalarCast(ExtendOpc, FinalReductionResult, PhiTy, {});
9091 }
9092
9093 // Update all users outside the vector region. Also replace redundant
9094 // ExtractLastElement.
9095 for (auto *U : to_vector(OrigExitingVPV->users())) {
9096 auto *Parent = cast<VPRecipeBase>(U)->getParent();
9097 if (FinalReductionResult == U || Parent->getParent())
9098 continue;
9099 U->replaceUsesOfWith(OrigExitingVPV, FinalReductionResult);
9101 cast<VPInstruction>(U)->replaceAllUsesWith(FinalReductionResult);
9102 }
9103
9104 // Adjust AnyOf reductions; replace the reduction phi for the selected value
9105 // with a boolean reduction phi node to check if the condition is true in
9106 // any iteration. The final value is selected by the final
9107 // ComputeReductionResult.
9108 if (RecurrenceDescriptor::isAnyOfRecurrenceKind(RecurrenceKind)) {
9109 auto *Select = cast<VPRecipeBase>(*find_if(PhiR->users(), [](VPUser *U) {
9110 return isa<VPWidenSelectRecipe>(U) ||
9111 (isa<VPReplicateRecipe>(U) &&
9112 cast<VPReplicateRecipe>(U)->getUnderlyingInstr()->getOpcode() ==
9113 Instruction::Select);
9114 }));
9115 VPValue *Cmp = Select->getOperand(0);
9116 // If the compare is checking the reduction PHI node, adjust it to check
9117 // the start value.
9118 if (VPRecipeBase *CmpR = Cmp->getDefiningRecipe())
9119 CmpR->replaceUsesOfWith(PhiR, PhiR->getStartValue());
9120 Builder.setInsertPoint(Select);
9121
9122 // If the true value of the select is the reduction phi, the new value is
9123 // selected if the negated condition is true in any iteration.
9124 if (Select->getOperand(1) == PhiR)
9125 Cmp = Builder.createNot(Cmp);
9126 VPValue *Or = Builder.createOr(PhiR, Cmp);
9127 Select->getVPSingleValue()->replaceAllUsesWith(Or);
9128 // Delete Select now that it has invalid types.
9129 ToDelete.push_back(Select);
9130
9131 // Convert the reduction phi to operate on bools.
9132 PhiR->setOperand(0, Plan->getOrAddLiveIn(ConstantInt::getFalse(
9133 OrigLoop->getHeader()->getContext())));
9134 continue;
9135 }
9136
9138 RdxDesc.getRecurrenceKind())) {
9139 // Adjust the start value for FindFirstIV/FindLastIV recurrences to use
9140 // the sentinel value after generating the ResumePhi recipe, which uses
9141 // the original start value.
9142 PhiR->setOperand(0, Plan->getOrAddLiveIn(RdxDesc.getSentinelValue()));
9143 }
9144 RecurKind RK = RdxDesc.getRecurrenceKind();
9148 VPBuilder PHBuilder(Plan->getVectorPreheader());
9149 VPValue *Iden = Plan->getOrAddLiveIn(
9150 getRecurrenceIdentity(RK, PhiTy, RdxDesc.getFastMathFlags()));
9151 // If the PHI is used by a partial reduction, set the scale factor.
9152 unsigned ScaleFactor =
9153 RecipeBuilder.getScalingForReduction(RdxDesc.getLoopExitInstr())
9154 .value_or(1);
9155 Type *I32Ty = IntegerType::getInt32Ty(PhiTy->getContext());
9156 auto *ScaleFactorVPV =
9157 Plan->getOrAddLiveIn(ConstantInt::get(I32Ty, ScaleFactor));
9158 VPValue *StartV = PHBuilder.createNaryOp(
9160 {PhiR->getStartValue(), Iden, ScaleFactorVPV},
9161 PhiTy->isFloatingPointTy() ? RdxDesc.getFastMathFlags()
9162 : FastMathFlags());
9163 PhiR->setOperand(0, StartV);
9164 }
9165 }
9166 for (VPRecipeBase *R : ToDelete)
9167 R->eraseFromParent();
9168
9170}
9171
9172void LoopVectorizationPlanner::attachRuntimeChecks(
9173 VPlan &Plan, GeneratedRTChecks &RTChecks, bool HasBranchWeights) const {
9174 const auto &[SCEVCheckCond, SCEVCheckBlock] = RTChecks.getSCEVChecks();
9175 if (SCEVCheckBlock && SCEVCheckBlock->hasNPredecessors(0)) {
9176 assert((!CM.OptForSize ||
9177 CM.Hints->getForce() == LoopVectorizeHints::FK_Enabled) &&
9178 "Cannot SCEV check stride or overflow when optimizing for size");
9179 VPlanTransforms::attachCheckBlock(Plan, SCEVCheckCond, SCEVCheckBlock,
9180 HasBranchWeights);
9181 }
9182 const auto &[MemCheckCond, MemCheckBlock] = RTChecks.getMemRuntimeChecks();
9183 if (MemCheckBlock && MemCheckBlock->hasNPredecessors(0)) {
9184 // VPlan-native path does not do any analysis for runtime checks
9185 // currently.
9186 assert((!EnableVPlanNativePath || OrigLoop->isInnermost()) &&
9187 "Runtime checks are not supported for outer loops yet");
9188
9189 if (CM.OptForSize) {
9190 assert(
9191 CM.Hints->getForce() == LoopVectorizeHints::FK_Enabled &&
9192 "Cannot emit memory checks when optimizing for size, unless forced "
9193 "to vectorize.");
9194 ORE->emit([&]() {
9195 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize",
9196 OrigLoop->getStartLoc(),
9197 OrigLoop->getHeader())
9198 << "Code-size may be reduced by not forcing "
9199 "vectorization, or by source-code modifications "
9200 "eliminating the need for runtime checks "
9201 "(e.g., adding 'restrict').";
9202 });
9203 }
9204 VPlanTransforms::attachCheckBlock(Plan, MemCheckCond, MemCheckBlock,
9205 HasBranchWeights);
9206 }
9207}
9208
9210 VPlan &Plan, ElementCount VF, unsigned UF,
9211 ElementCount MinProfitableTripCount) const {
9212 // vscale is not necessarily a power-of-2, which means we cannot guarantee
9213 // an overflow to zero when updating induction variables and so an
9214 // additional overflow check is required before entering the vector loop.
9215 bool IsIndvarOverflowCheckNeededForVF =
9216 VF.isScalable() && !TTI.isVScaleKnownToBeAPowerOfTwo() &&
9217 !isIndvarOverflowCheckKnownFalse(&CM, VF, UF) &&
9218 CM.getTailFoldingStyle() !=
9220 const uint32_t *BranchWeigths =
9221 hasBranchWeightMD(*OrigLoop->getLoopLatch()->getTerminator())
9223 : nullptr;
9225 Plan, VF, UF, MinProfitableTripCount,
9226 CM.requiresScalarEpilogue(VF.isVector()), CM.foldTailByMasking(),
9227 IsIndvarOverflowCheckNeededForVF, OrigLoop, BranchWeigths,
9228 OrigLoop->getLoopPredecessor()->getTerminator()->getDebugLoc(),
9229 *PSE.getSE());
9230}
9231
9233 assert(!State.Lane && "VPDerivedIVRecipe being replicated.");
9234
9235 // Fast-math-flags propagate from the original induction instruction.
9236 IRBuilder<>::FastMathFlagGuard FMFG(State.Builder);
9237 if (FPBinOp)
9238 State.Builder.setFastMathFlags(FPBinOp->getFastMathFlags());
9239
9240 Value *Step = State.get(getStepValue(), VPLane(0));
9241 Value *Index = State.get(getOperand(1), VPLane(0));
9242 Value *DerivedIV = emitTransformedIndex(
9243 State.Builder, Index, getStartValue()->getLiveInIRValue(), Step, Kind,
9245 DerivedIV->setName(Name);
9246 State.set(this, DerivedIV, VPLane(0));
9247}
9248
9249// Determine how to lower the scalar epilogue, which depends on 1) optimising
9250// for minimum code-size, 2) predicate compiler options, 3) loop hints forcing
9251// predication, and 4) a TTI hook that analyses whether the loop is suitable
9252// for predication.
9257 // 1) OptSize takes precedence over all other options, i.e. if this is set,
9258 // don't look at hints or options, and don't request a scalar epilogue.
9259 // (For PGSO, as shouldOptimizeForSize isn't currently accessible from
9260 // LoopAccessInfo (due to code dependency and not being able to reliably get
9261 // PSI/BFI from a loop analysis under NPM), we cannot suppress the collection
9262 // of strides in LoopAccessInfo::analyzeLoop() and vectorize without
9263 // versioning when the vectorization is forced, unlike hasOptSize. So revert
9264 // back to the old way and vectorize with versioning when forced. See D81345.)
9265 if (F->hasOptSize() || (llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI,
9269
9270 // 2) If set, obey the directives
9271 if (PreferPredicateOverEpilogue.getNumOccurrences()) {
9279 };
9280 }
9281
9282 // 3) If set, obey the hints
9283 switch (Hints.getPredicate()) {
9288 };
9289
9290 // 4) if the TTI hook indicates this is profitable, request predication.
9291 TailFoldingInfo TFI(TLI, &LVL, IAI);
9292 if (TTI->preferPredicateOverEpilogue(&TFI))
9294
9296}
9297
9298// Process the loop in the VPlan-native vectorization path. This path builds
9299// VPlan upfront in the vectorization pipeline, which allows to apply
9300// VPlan-to-VPlan transformations from the very beginning without modifying the
9301// input LLVM IR.
9308 LoopVectorizationRequirements &Requirements) {
9309
9311 LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n");
9312 return false;
9313 }
9314 assert(EnableVPlanNativePath && "VPlan-native path is disabled.");
9315 Function *F = L->getHeader()->getParent();
9316 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI());
9317
9319 getScalarEpilogueLowering(F, L, Hints, PSI, BFI, TTI, TLI, *LVL, &IAI);
9320
9321 LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F,
9322 &Hints, IAI, PSI, BFI);
9323 // Use the planner for outer loop vectorization.
9324 // TODO: CM is not used at this point inside the planner. Turn CM into an
9325 // optional argument if we don't need it in the future.
9326 LoopVectorizationPlanner LVP(L, LI, DT, TLI, *TTI, LVL, CM, IAI, PSE, Hints,
9327 ORE);
9328
9329 // Get user vectorization factor.
9330 ElementCount UserVF = Hints.getWidth();
9331
9333
9334 // Plan how to best vectorize, return the best VF and its cost.
9335 const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF);
9336
9337 // If we are stress testing VPlan builds, do not attempt to generate vector
9338 // code. Masked vector code generation support will follow soon.
9339 // Also, do not attempt to vectorize if no vector code will be produced.
9341 return false;
9342
9343 VPlan &BestPlan = LVP.getPlanFor(VF.Width);
9344
9345 {
9346 GeneratedRTChecks Checks(PSE, DT, LI, TTI, F->getDataLayout(), CM.CostKind);
9347 InnerLoopVectorizer LB(L, PSE, LI, DT, TTI, AC, VF.Width, /*UF=*/1, &CM,
9348 BFI, PSI, Checks, BestPlan);
9349 LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \""
9350 << L->getHeader()->getParent()->getName() << "\"\n");
9351 LVP.addMinimumIterationCheck(BestPlan, VF.Width, /*UF=*/1,
9353
9354 LVP.executePlan(VF.Width, /*UF=*/1, BestPlan, LB, DT, false);
9355 }
9356
9357 reportVectorization(ORE, L, VF, 1);
9358
9359 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
9360 return true;
9361}
9362
9363// Emit a remark if there are stores to floats that required a floating point
9364// extension. If the vectorized loop was generated with floating point there
9365// will be a performance penalty from the conversion overhead and the change in
9366// the vector width.
9369 for (BasicBlock *BB : L->getBlocks()) {
9370 for (Instruction &Inst : *BB) {
9371 if (auto *S = dyn_cast<StoreInst>(&Inst)) {
9372 if (S->getValueOperand()->getType()->isFloatTy())
9373 Worklist.push_back(S);
9374 }
9375 }
9376 }
9377
9378 // Traverse the floating point stores upwards searching, for floating point
9379 // conversions.
9382 while (!Worklist.empty()) {
9383 auto *I = Worklist.pop_back_val();
9384 if (!L->contains(I))
9385 continue;
9386 if (!Visited.insert(I).second)
9387 continue;
9388
9389 // Emit a remark if the floating point store required a floating
9390 // point conversion.
9391 // TODO: More work could be done to identify the root cause such as a
9392 // constant or a function return type and point the user to it.
9393 if (isa<FPExtInst>(I) && EmittedRemark.insert(I).second)
9394 ORE->emit([&]() {
9395 return OptimizationRemarkAnalysis(LV_NAME, "VectorMixedPrecision",
9396 I->getDebugLoc(), L->getHeader())
9397 << "floating point conversion changes vector width. "
9398 << "Mixed floating point precision requires an up/down "
9399 << "cast that will negatively impact performance.";
9400 });
9401
9402 for (Use &Op : I->operands())
9403 if (auto *OpI = dyn_cast<Instruction>(Op))
9404 Worklist.push_back(OpI);
9405 }
9406}
9407
9408/// For loops with uncountable early exits, find the cost of doing work when
9409/// exiting the loop early, such as calculating the final exit values of
9410/// variables used outside the loop.
9411/// TODO: This is currently overly pessimistic because the loop may not take
9412/// the early exit, but better to keep this conservative for now. In future,
9413/// it might be possible to relax this by using branch probabilities.
9415 VPlan &Plan, ElementCount VF) {
9416 InstructionCost Cost = 0;
9417 for (auto *ExitVPBB : Plan.getExitBlocks()) {
9418 for (auto *PredVPBB : ExitVPBB->getPredecessors()) {
9419 // If the predecessor is not the middle.block, then it must be the
9420 // vector.early.exit block, which may contain work to calculate the exit
9421 // values of variables used outside the loop.
9422 if (PredVPBB != Plan.getMiddleBlock()) {
9423 LLVM_DEBUG(dbgs() << "Calculating cost of work in exit block "
9424 << PredVPBB->getName() << ":\n");
9425 Cost += PredVPBB->cost(VF, CostCtx);
9426 }
9427 }
9428 }
9429 return Cost;
9430}
9431
9432/// This function determines whether or not it's still profitable to vectorize
9433/// the loop given the extra work we have to do outside of the loop:
9434/// 1. Perform the runtime checks before entering the loop to ensure it's safe
9435/// to vectorize.
9436/// 2. In the case of loops with uncountable early exits, we may have to do
9437/// extra work when exiting the loop early, such as calculating the final
9438/// exit values of variables used outside the loop.
9439static bool isOutsideLoopWorkProfitable(GeneratedRTChecks &Checks,
9440 VectorizationFactor &VF, Loop *L,
9442 VPCostContext &CostCtx, VPlan &Plan,
9444 std::optional<unsigned> VScale) {
9445 InstructionCost TotalCost = Checks.getCost();
9446 if (!TotalCost.isValid())
9447 return false;
9448
9449 // Add on the cost of any work required in the vector early exit block, if
9450 // one exists.
9451 TotalCost += calculateEarlyExitCost(CostCtx, Plan, VF.Width);
9452
9453 // When interleaving only scalar and vector cost will be equal, which in turn
9454 // would lead to a divide by 0. Fall back to hard threshold.
9455 if (VF.Width.isScalar()) {
9456 // TODO: Should we rename VectorizeMemoryCheckThreshold?
9457 if (TotalCost > VectorizeMemoryCheckThreshold) {
9458 LLVM_DEBUG(
9459 dbgs()
9460 << "LV: Interleaving only is not profitable due to runtime checks\n");
9461 return false;
9462 }
9463 return true;
9464 }
9465
9466 // The scalar cost should only be 0 when vectorizing with a user specified
9467 // VF/IC. In those cases, runtime checks should always be generated.
9468 uint64_t ScalarC = VF.ScalarCost.getValue();
9469 if (ScalarC == 0)
9470 return true;
9471
9472 // First, compute the minimum iteration count required so that the vector
9473 // loop outperforms the scalar loop.
9474 // The total cost of the scalar loop is
9475 // ScalarC * TC
9476 // where
9477 // * TC is the actual trip count of the loop.
9478 // * ScalarC is the cost of a single scalar iteration.
9479 //
9480 // The total cost of the vector loop is
9481 // RtC + VecC * (TC / VF) + EpiC
9482 // where
9483 // * RtC is the cost of the generated runtime checks plus the cost of
9484 // performing any additional work in the vector.early.exit block for loops
9485 // with uncountable early exits.
9486 // * VecC is the cost of a single vector iteration.
9487 // * TC is the actual trip count of the loop
9488 // * VF is the vectorization factor
9489 // * EpiCost is the cost of the generated epilogue, including the cost
9490 // of the remaining scalar operations.
9491 //
9492 // Vectorization is profitable once the total vector cost is less than the
9493 // total scalar cost:
9494 // RtC + VecC * (TC / VF) + EpiC < ScalarC * TC
9495 //
9496 // Now we can compute the minimum required trip count TC as
9497 // VF * (RtC + EpiC) / (ScalarC * VF - VecC) < TC
9498 //
9499 // For now we assume the epilogue cost EpiC = 0 for simplicity. Note that
9500 // the computations are performed on doubles, not integers and the result
9501 // is rounded up, hence we get an upper estimate of the TC.
9502 unsigned IntVF = estimateElementCount(VF.Width, VScale);
9503 uint64_t RtC = TotalCost.getValue();
9504 uint64_t Div = ScalarC * IntVF - VF.Cost.getValue();
9505 uint64_t MinTC1 = Div == 0 ? 0 : divideCeil(RtC * IntVF, Div);
9506
9507 // Second, compute a minimum iteration count so that the cost of the
9508 // runtime checks is only a fraction of the total scalar loop cost. This
9509 // adds a loop-dependent bound on the overhead incurred if the runtime
9510 // checks fail. In case the runtime checks fail, the cost is RtC + ScalarC
9511 // * TC. To bound the runtime check to be a fraction 1/X of the scalar
9512 // cost, compute
9513 // RtC < ScalarC * TC * (1 / X) ==> RtC * X / ScalarC < TC
9514 uint64_t MinTC2 = divideCeil(RtC * 10, ScalarC);
9515
9516 // Now pick the larger minimum. If it is not a multiple of VF and a scalar
9517 // epilogue is allowed, choose the next closest multiple of VF. This should
9518 // partly compensate for ignoring the epilogue cost.
9519 uint64_t MinTC = std::max(MinTC1, MinTC2);
9520 if (SEL == CM_ScalarEpilogueAllowed)
9521 MinTC = alignTo(MinTC, IntVF);
9523
9524 LLVM_DEBUG(
9525 dbgs() << "LV: Minimum required TC for runtime checks to be profitable:"
9526 << VF.MinProfitableTripCount << "\n");
9527
9528 // Skip vectorization if the expected trip count is less than the minimum
9529 // required trip count.
9530 if (auto ExpectedTC = getSmallBestKnownTC(PSE, L)) {
9531 if (ElementCount::isKnownLT(*ExpectedTC, VF.MinProfitableTripCount)) {
9532 LLVM_DEBUG(dbgs() << "LV: Vectorization is not beneficial: expected "
9533 "trip count < minimum profitable VF ("
9534 << *ExpectedTC << " < " << VF.MinProfitableTripCount
9535 << ")\n");
9536
9537 return false;
9538 }
9539 }
9540 return true;
9541}
9542
9544 : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced ||
9546 VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced ||
9548
9549/// Prepare \p MainPlan for vectorizing the main vector loop during epilogue
9550/// vectorization. Remove ResumePhis from \p MainPlan for inductions that
9551/// don't have a corresponding wide induction in \p EpiPlan.
9552static void preparePlanForMainVectorLoop(VPlan &MainPlan, VPlan &EpiPlan) {
9553 // Collect PHI nodes of widened phis in the VPlan for the epilogue. Those
9554 // will need their resume-values computed in the main vector loop. Others
9555 // can be removed from the main VPlan.
9556 SmallPtrSet<PHINode *, 2> EpiWidenedPhis;
9557 for (VPRecipeBase &R :
9560 continue;
9561 EpiWidenedPhis.insert(
9562 cast<PHINode>(R.getVPSingleValue()->getUnderlyingValue()));
9563 }
9564 for (VPRecipeBase &R :
9565 make_early_inc_range(MainPlan.getScalarHeader()->phis())) {
9566 auto *VPIRInst = cast<VPIRPhi>(&R);
9567 if (EpiWidenedPhis.contains(&VPIRInst->getIRPhi()))
9568 continue;
9569 // There is no corresponding wide induction in the epilogue plan that would
9570 // need a resume value. Remove the VPIRInst wrapping the scalar header phi
9571 // together with the corresponding ResumePhi. The resume values for the
9572 // scalar loop will be created during execution of EpiPlan.
9573 VPRecipeBase *ResumePhi = VPIRInst->getOperand(0)->getDefiningRecipe();
9574 VPIRInst->eraseFromParent();
9575 ResumePhi->eraseFromParent();
9576 }
9578
9579 using namespace VPlanPatternMatch;
9580 // When vectorizing the epilogue, FindFirstIV & FindLastIV reductions can
9581 // introduce multiple uses of undef/poison. If the reduction start value may
9582 // be undef or poison it needs to be frozen and the frozen start has to be
9583 // used when computing the reduction result. We also need to use the frozen
9584 // value in the resume phi generated by the main vector loop, as this is also
9585 // used to compute the reduction result after the epilogue vector loop.
9586 auto AddFreezeForFindLastIVReductions = [](VPlan &Plan,
9587 bool UpdateResumePhis) {
9588 VPBuilder Builder(Plan.getEntry());
9589 for (VPRecipeBase &R : *Plan.getMiddleBlock()) {
9590 auto *VPI = dyn_cast<VPInstruction>(&R);
9591 if (!VPI || VPI->getOpcode() != VPInstruction::ComputeFindIVResult)
9592 continue;
9593 VPValue *OrigStart = VPI->getOperand(1);
9595 continue;
9596 VPInstruction *Freeze =
9597 Builder.createNaryOp(Instruction::Freeze, {OrigStart}, {}, "fr");
9598 VPI->setOperand(1, Freeze);
9599 if (UpdateResumePhis)
9600 OrigStart->replaceUsesWithIf(Freeze, [Freeze](VPUser &U, unsigned) {
9601 return Freeze != &U && isa<VPPhi>(&U);
9602 });
9603 }
9604 };
9605 AddFreezeForFindLastIVReductions(MainPlan, true);
9606 AddFreezeForFindLastIVReductions(EpiPlan, false);
9607
9608 VPBasicBlock *MainScalarPH = MainPlan.getScalarPreheader();
9609 VPValue *VectorTC = &MainPlan.getVectorTripCount();
9610 // If there is a suitable resume value for the canonical induction in the
9611 // scalar (which will become vector) epilogue loop, use it and move it to the
9612 // beginning of the scalar preheader. Otherwise create it below.
9613 auto ResumePhiIter =
9614 find_if(MainScalarPH->phis(), [VectorTC](VPRecipeBase &R) {
9615 return match(&R, m_VPInstruction<Instruction::PHI>(m_Specific(VectorTC),
9616 m_ZeroInt()));
9617 });
9618 VPPhi *ResumePhi = nullptr;
9619 if (ResumePhiIter == MainScalarPH->phis().end()) {
9620 VPBuilder ScalarPHBuilder(MainScalarPH, MainScalarPH->begin());
9621 ResumePhi = ScalarPHBuilder.createScalarPhi(
9622 {VectorTC, MainPlan.getCanonicalIV()->getStartValue()}, {},
9623 "vec.epilog.resume.val");
9624 } else {
9625 ResumePhi = cast<VPPhi>(&*ResumePhiIter);
9626 if (MainScalarPH->begin() == MainScalarPH->end())
9627 ResumePhi->moveBefore(*MainScalarPH, MainScalarPH->end());
9628 else if (&*MainScalarPH->begin() != ResumePhi)
9629 ResumePhi->moveBefore(*MainScalarPH, MainScalarPH->begin());
9630 }
9631 // Add a user to to make sure the resume phi won't get removed.
9632 VPBuilder(MainScalarPH)
9634}
9635
9636/// Prepare \p Plan for vectorizing the epilogue loop. That is, re-use expanded
9637/// SCEVs from \p ExpandedSCEVs and set resume values for header recipes.
9638static void
9640 const SCEV2ValueTy &ExpandedSCEVs,
9642 VPRegionBlock *VectorLoop = Plan.getVectorLoopRegion();
9643 VPBasicBlock *Header = VectorLoop->getEntryBasicBlock();
9644 Header->setName("vec.epilog.vector.body");
9645
9647 // Ensure that the start values for all header phi recipes are updated before
9648 // vectorizing the epilogue loop.
9649 for (VPRecipeBase &R : Header->phis()) {
9650 if (auto *IV = dyn_cast<VPCanonicalIVPHIRecipe>(&R)) {
9651 // When vectorizing the epilogue loop, the canonical induction start
9652 // value needs to be changed from zero to the value after the main
9653 // vector loop. Find the resume value created during execution of the main
9654 // VPlan. It must be the first phi in the loop preheader.
9655 // FIXME: Improve modeling for canonical IV start values in the epilogue
9656 // loop.
9657 using namespace llvm::PatternMatch;
9658 PHINode *EPResumeVal = &*L->getLoopPreheader()->phis().begin();
9659 for (Value *Inc : EPResumeVal->incoming_values()) {
9660 if (match(Inc, m_SpecificInt(0)))
9661 continue;
9662 assert(!EPI.VectorTripCount &&
9663 "Must only have a single non-zero incoming value");
9664 EPI.VectorTripCount = Inc;
9665 }
9666 // If we didn't find a non-zero vector trip count, all incoming values
9667 // must be zero, which also means the vector trip count is zero. Pick the
9668 // first zero as vector trip count.
9669 // TODO: We should not choose VF * UF so the main vector loop is known to
9670 // be dead.
9671 if (!EPI.VectorTripCount) {
9672 assert(
9673 EPResumeVal->getNumIncomingValues() > 0 &&
9674 all_of(EPResumeVal->incoming_values(),
9675 [](Value *Inc) { return match(Inc, m_SpecificInt(0)); }) &&
9676 "all incoming values must be 0");
9677 EPI.VectorTripCount = EPResumeVal->getOperand(0);
9678 }
9679 VPValue *VPV = Plan.getOrAddLiveIn(EPResumeVal);
9680 assert(all_of(IV->users(),
9681 [](const VPUser *U) {
9682 return isa<VPScalarIVStepsRecipe>(U) ||
9683 isa<VPDerivedIVRecipe>(U) ||
9684 cast<VPRecipeBase>(U)->isScalarCast() ||
9685 cast<VPInstruction>(U)->getOpcode() ==
9686 Instruction::Add;
9687 }) &&
9688 "the canonical IV should only be used by its increment or "
9689 "ScalarIVSteps when resetting the start value");
9690 IV->setOperand(0, VPV);
9691 continue;
9692 }
9693
9694 Value *ResumeV = nullptr;
9695 // TODO: Move setting of resume values to prepareToExecute.
9696 if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R)) {
9697 auto *RdxResult =
9698 cast<VPInstruction>(*find_if(ReductionPhi->users(), [](VPUser *U) {
9699 auto *VPI = dyn_cast<VPInstruction>(U);
9700 return VPI &&
9701 (VPI->getOpcode() == VPInstruction::ComputeAnyOfResult ||
9702 VPI->getOpcode() == VPInstruction::ComputeReductionResult ||
9703 VPI->getOpcode() == VPInstruction::ComputeFindIVResult);
9704 }));
9705 ResumeV = cast<PHINode>(ReductionPhi->getUnderlyingInstr())
9706 ->getIncomingValueForBlock(L->getLoopPreheader());
9707 RecurKind RK = ReductionPhi->getRecurrenceKind();
9709 Value *StartV = RdxResult->getOperand(1)->getLiveInIRValue();
9710 // VPReductionPHIRecipes for AnyOf reductions expect a boolean as
9711 // start value; compare the final value from the main vector loop
9712 // to the start value.
9713 BasicBlock *PBB = cast<Instruction>(ResumeV)->getParent();
9714 IRBuilder<> Builder(PBB, PBB->getFirstNonPHIIt());
9715 ResumeV = Builder.CreateICmpNE(ResumeV, StartV);
9717 Value *StartV = getStartValueFromReductionResult(RdxResult);
9718 ToFrozen[StartV] = cast<PHINode>(ResumeV)->getIncomingValueForBlock(
9720
9721 // VPReductionPHIRecipe for FindFirstIV/FindLastIV reductions requires
9722 // an adjustment to the resume value. The resume value is adjusted to
9723 // the sentinel value when the final value from the main vector loop
9724 // equals the start value. This ensures correctness when the start value
9725 // might not be less than the minimum value of a monotonically
9726 // increasing induction variable.
9727 BasicBlock *ResumeBB = cast<Instruction>(ResumeV)->getParent();
9728 IRBuilder<> Builder(ResumeBB, ResumeBB->getFirstNonPHIIt());
9729 Value *Cmp = Builder.CreateICmpEQ(ResumeV, ToFrozen[StartV]);
9730 Value *Sentinel = RdxResult->getOperand(2)->getLiveInIRValue();
9731 ResumeV = Builder.CreateSelect(Cmp, Sentinel, ResumeV);
9732 } else {
9733 VPValue *StartVal = Plan.getOrAddLiveIn(ResumeV);
9734 auto *PhiR = dyn_cast<VPReductionPHIRecipe>(&R);
9735 if (auto *VPI = dyn_cast<VPInstruction>(PhiR->getStartValue())) {
9736 assert(VPI->getOpcode() == VPInstruction::ReductionStartVector &&
9737 "unexpected start value");
9738 VPI->setOperand(0, StartVal);
9739 continue;
9740 }
9741 }
9742 } else {
9743 // Retrieve the induction resume values for wide inductions from
9744 // their original phi nodes in the scalar loop.
9745 PHINode *IndPhi = cast<VPWidenInductionRecipe>(&R)->getPHINode();
9746 // Hook up to the PHINode generated by a ResumePhi recipe of main
9747 // loop VPlan, which feeds the scalar loop.
9748 ResumeV = IndPhi->getIncomingValueForBlock(L->getLoopPreheader());
9749 }
9750 assert(ResumeV && "Must have a resume value");
9751 VPValue *StartVal = Plan.getOrAddLiveIn(ResumeV);
9752 cast<VPHeaderPHIRecipe>(&R)->setStartValue(StartVal);
9753 }
9754
9755 // For some VPValues in the epilogue plan we must re-use the generated IR
9756 // values from the main plan. Replace them with live-in VPValues.
9757 // TODO: This is a workaround needed for epilogue vectorization and it
9758 // should be removed once induction resume value creation is done
9759 // directly in VPlan.
9760 for (auto &R : make_early_inc_range(*Plan.getEntry())) {
9761 // Re-use frozen values from the main plan for Freeze VPInstructions in the
9762 // epilogue plan. This ensures all users use the same frozen value.
9763 auto *VPI = dyn_cast<VPInstruction>(&R);
9764 if (VPI && VPI->getOpcode() == Instruction::Freeze) {
9765 VPI->replaceAllUsesWith(Plan.getOrAddLiveIn(
9766 ToFrozen.lookup(VPI->getOperand(0)->getLiveInIRValue())));
9767 continue;
9768 }
9769
9770 // Re-use the trip count and steps expanded for the main loop, as
9771 // skeleton creation needs it as a value that dominates both the scalar
9772 // and vector epilogue loops
9773 auto *ExpandR = dyn_cast<VPExpandSCEVRecipe>(&R);
9774 if (!ExpandR)
9775 continue;
9776 VPValue *ExpandedVal =
9777 Plan.getOrAddLiveIn(ExpandedSCEVs.lookup(ExpandR->getSCEV()));
9778 ExpandR->replaceAllUsesWith(ExpandedVal);
9779 if (Plan.getTripCount() == ExpandR)
9780 Plan.resetTripCount(ExpandedVal);
9781 ExpandR->eraseFromParent();
9782 }
9783}
9784
9785// Generate bypass values from the additional bypass block. Note that when the
9786// vectorized epilogue is skipped due to iteration count check, then the
9787// resume value for the induction variable comes from the trip count of the
9788// main vector loop, passed as the second argument.
9790 PHINode *OrigPhi, const InductionDescriptor &II, IRBuilder<> &BypassBuilder,
9791 const SCEV2ValueTy &ExpandedSCEVs, Value *MainVectorTripCount,
9792 Instruction *OldInduction) {
9793 Value *Step = getExpandedStep(II, ExpandedSCEVs);
9794 // For the primary induction the additional bypass end value is known.
9795 // Otherwise it is computed.
9796 Value *EndValueFromAdditionalBypass = MainVectorTripCount;
9797 if (OrigPhi != OldInduction) {
9798 auto *BinOp = II.getInductionBinOp();
9799 // Fast-math-flags propagate from the original induction instruction.
9801 BypassBuilder.setFastMathFlags(BinOp->getFastMathFlags());
9802
9803 // Compute the end value for the additional bypass.
9804 EndValueFromAdditionalBypass =
9805 emitTransformedIndex(BypassBuilder, MainVectorTripCount,
9806 II.getStartValue(), Step, II.getKind(), BinOp);
9807 EndValueFromAdditionalBypass->setName("ind.end");
9808 }
9809 return EndValueFromAdditionalBypass;
9810}
9811
9813 VPlan &BestEpiPlan,
9815 const SCEV2ValueTy &ExpandedSCEVs,
9816 Value *MainVectorTripCount) {
9817 // Fix reduction resume values from the additional bypass block.
9818 BasicBlock *PH = L->getLoopPreheader();
9819 for (auto *Pred : predecessors(PH)) {
9820 for (PHINode &Phi : PH->phis()) {
9821 if (Phi.getBasicBlockIndex(Pred) != -1)
9822 continue;
9823 Phi.addIncoming(Phi.getIncomingValueForBlock(BypassBlock), Pred);
9824 }
9825 }
9826 auto *ScalarPH = cast<VPIRBasicBlock>(BestEpiPlan.getScalarPreheader());
9827 if (ScalarPH->hasPredecessors()) {
9828 // If ScalarPH has predecessors, we may need to update its reduction
9829 // resume values.
9830 for (const auto &[R, IRPhi] :
9831 zip(ScalarPH->phis(), ScalarPH->getIRBasicBlock()->phis())) {
9833 BypassBlock);
9834 }
9835 }
9836
9837 // Fix induction resume values from the additional bypass block.
9838 IRBuilder<> BypassBuilder(BypassBlock, BypassBlock->getFirstInsertionPt());
9839 for (const auto &[IVPhi, II] : LVL.getInductionVars()) {
9840 auto *Inc = cast<PHINode>(IVPhi->getIncomingValueForBlock(PH));
9842 IVPhi, II, BypassBuilder, ExpandedSCEVs, MainVectorTripCount,
9843 LVL.getPrimaryInduction());
9844 // TODO: Directly add as extra operand to the VPResumePHI recipe.
9845 Inc->setIncomingValueForBlock(BypassBlock, V);
9846 }
9847}
9848
9850 assert((EnableVPlanNativePath || L->isInnermost()) &&
9851 "VPlan-native path is not enabled. Only process inner loops.");
9852
9853 LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in '"
9854 << L->getHeader()->getParent()->getName() << "' from "
9855 << L->getLocStr() << "\n");
9856
9857 LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE, TTI);
9858
9859 LLVM_DEBUG(
9860 dbgs() << "LV: Loop hints:"
9861 << " force="
9863 ? "disabled"
9865 ? "enabled"
9866 : "?"))
9867 << " width=" << Hints.getWidth()
9868 << " interleave=" << Hints.getInterleave() << "\n");
9869
9870 // Function containing loop
9871 Function *F = L->getHeader()->getParent();
9872
9873 // Looking at the diagnostic output is the only way to determine if a loop
9874 // was vectorized (other than looking at the IR or machine code), so it
9875 // is important to generate an optimization remark for each loop. Most of
9876 // these messages are generated as OptimizationRemarkAnalysis. Remarks
9877 // generated as OptimizationRemark and OptimizationRemarkMissed are
9878 // less verbose reporting vectorized loops and unvectorized loops that may
9879 // benefit from vectorization, respectively.
9880
9881 if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) {
9882 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n");
9883 return false;
9884 }
9885
9886 PredicatedScalarEvolution PSE(*SE, *L);
9887
9888 // Check if it is legal to vectorize the loop.
9889 LoopVectorizationRequirements Requirements;
9890 LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, F, *LAIs, LI, ORE,
9891 &Requirements, &Hints, DB, AC, BFI, PSI, AA);
9893 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n");
9894 Hints.emitRemarkWithHints();
9895 return false;
9896 }
9897
9899 reportVectorizationFailure("Auto-vectorization of loops with uncountable "
9900 "early exit is not enabled",
9901 "UncountableEarlyExitLoopsDisabled", ORE, L);
9902 return false;
9903 }
9904
9905 if (!LVL.getPotentiallyFaultingLoads().empty()) {
9906 reportVectorizationFailure("Auto-vectorization of loops with potentially "
9907 "faulting load is not supported",
9908 "PotentiallyFaultingLoadsNotSupported", ORE, L);
9909 return false;
9910 }
9911
9912 // Entrance to the VPlan-native vectorization path. Outer loops are processed
9913 // here. They may require CFG and instruction level transformations before
9914 // even evaluating whether vectorization is profitable. Since we cannot modify
9915 // the incoming IR, we need to build VPlan upfront in the vectorization
9916 // pipeline.
9917 if (!L->isInnermost())
9918 return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC,
9919 ORE, BFI, PSI, Hints, Requirements);
9920
9921 assert(L->isInnermost() && "Inner loop expected.");
9922
9923 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI());
9924 bool UseInterleaved = TTI->enableInterleavedAccessVectorization();
9925
9926 // If an override option has been passed in for interleaved accesses, use it.
9927 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0)
9928 UseInterleaved = EnableInterleavedMemAccesses;
9929
9930 // Analyze interleaved memory accesses.
9931 if (UseInterleaved)
9933
9934 if (LVL.hasUncountableEarlyExit()) {
9935 BasicBlock *LoopLatch = L->getLoopLatch();
9936 if (IAI.requiresScalarEpilogue() ||
9938 [LoopLatch](BasicBlock *BB) { return BB != LoopLatch; })) {
9939 reportVectorizationFailure("Auto-vectorization of early exit loops "
9940 "requiring a scalar epilogue is unsupported",
9941 "UncountableEarlyExitUnsupported", ORE, L);
9942 return false;
9943 }
9944 }
9945
9946 // Check the function attributes and profiles to find out if this function
9947 // should be optimized for size.
9949 getScalarEpilogueLowering(F, L, Hints, PSI, BFI, TTI, TLI, LVL, &IAI);
9950
9951 // Check the loop for a trip count threshold: vectorize loops with a tiny trip
9952 // count by optimizing for size, to minimize overheads.
9953 auto ExpectedTC = getSmallBestKnownTC(PSE, L);
9954 if (ExpectedTC && ExpectedTC->isFixed() &&
9955 ExpectedTC->getFixedValue() < TinyTripCountVectorThreshold) {
9956 LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. "
9957 << "This loop is worth vectorizing only if no scalar "
9958 << "iteration overheads are incurred.");
9960 LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n");
9961 else {
9962 LLVM_DEBUG(dbgs() << "\n");
9963 // Predicate tail-folded loops are efficient even when the loop
9964 // iteration count is low. However, setting the epilogue policy to
9965 // `CM_ScalarEpilogueNotAllowedLowTripLoop` prevents vectorizing loops
9966 // with runtime checks. It's more effective to let
9967 // `isOutsideLoopWorkProfitable` determine if vectorization is
9968 // beneficial for the loop.
9971 }
9972 }
9973
9974 // Check the function attributes to see if implicit floats or vectors are
9975 // allowed.
9976 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) {
9978 "Can't vectorize when the NoImplicitFloat attribute is used",
9979 "loop not vectorized due to NoImplicitFloat attribute",
9980 "NoImplicitFloat", ORE, L);
9981 Hints.emitRemarkWithHints();
9982 return false;
9983 }
9984
9985 // Check if the target supports potentially unsafe FP vectorization.
9986 // FIXME: Add a check for the type of safety issue (denormal, signaling)
9987 // for the target we're vectorizing for, to make sure none of the
9988 // additional fp-math flags can help.
9989 if (Hints.isPotentiallyUnsafe() &&
9990 TTI->isFPVectorizationPotentiallyUnsafe()) {
9992 "Potentially unsafe FP op prevents vectorization",
9993 "loop not vectorized due to unsafe FP support.",
9994 "UnsafeFP", ORE, L);
9995 Hints.emitRemarkWithHints();
9996 return false;
9997 }
9998
9999 bool AllowOrderedReductions;
10000 // If the flag is set, use that instead and override the TTI behaviour.
10001 if (ForceOrderedReductions.getNumOccurrences() > 0)
10002 AllowOrderedReductions = ForceOrderedReductions;
10003 else
10004 AllowOrderedReductions = TTI->enableOrderedReductions();
10005 if (!LVL.canVectorizeFPMath(AllowOrderedReductions)) {
10006 ORE->emit([&]() {
10007 auto *ExactFPMathInst = Requirements.getExactFPInst();
10008 return OptimizationRemarkAnalysisFPCommute(DEBUG_TYPE, "CantReorderFPOps",
10009 ExactFPMathInst->getDebugLoc(),
10010 ExactFPMathInst->getParent())
10011 << "loop not vectorized: cannot prove it is safe to reorder "
10012 "floating-point operations";
10013 });
10014 LLVM_DEBUG(dbgs() << "LV: loop not vectorized: cannot prove it is safe to "
10015 "reorder floating-point operations\n");
10016 Hints.emitRemarkWithHints();
10017 return false;
10018 }
10019
10020 // Use the cost model.
10021 LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE,
10022 F, &Hints, IAI, PSI, BFI);
10023 // Use the planner for vectorization.
10024 LoopVectorizationPlanner LVP(L, LI, DT, TLI, *TTI, &LVL, CM, IAI, PSE, Hints,
10025 ORE);
10026
10027 // Get user vectorization factor and interleave count.
10028 ElementCount UserVF = Hints.getWidth();
10029 unsigned UserIC = Hints.getInterleave();
10030
10031 // Plan how to best vectorize.
10032 LVP.plan(UserVF, UserIC);
10034 unsigned IC = 1;
10035
10036 if (ORE->allowExtraAnalysis(LV_NAME))
10038
10039 GeneratedRTChecks Checks(PSE, DT, LI, TTI, F->getDataLayout(), CM.CostKind);
10040 if (LVP.hasPlanWithVF(VF.Width)) {
10041 // Select the interleave count.
10042 IC = LVP.selectInterleaveCount(LVP.getPlanFor(VF.Width), VF.Width, VF.Cost);
10043
10044 unsigned SelectedIC = std::max(IC, UserIC);
10045 // Optimistically generate runtime checks if they are needed. Drop them if
10046 // they turn out to not be profitable.
10047 if (VF.Width.isVector() || SelectedIC > 1) {
10048 Checks.create(L, *LVL.getLAI(), PSE.getPredicate(), VF.Width, SelectedIC);
10049
10050 // Bail out early if either the SCEV or memory runtime checks are known to
10051 // fail. In that case, the vector loop would never execute.
10052 using namespace llvm::PatternMatch;
10053 if (Checks.getSCEVChecks().first &&
10054 match(Checks.getSCEVChecks().first, m_One()))
10055 return false;
10056 if (Checks.getMemRuntimeChecks().first &&
10057 match(Checks.getMemRuntimeChecks().first, m_One()))
10058 return false;
10059 }
10060
10061 // Check if it is profitable to vectorize with runtime checks.
10062 bool ForceVectorization =
10064 VPCostContext CostCtx(CM.TTI, *CM.TLI, LVP.getPlanFor(VF.Width), CM,
10065 CM.CostKind);
10066 if (!ForceVectorization &&
10067 !isOutsideLoopWorkProfitable(Checks, VF, L, PSE, CostCtx,
10068 LVP.getPlanFor(VF.Width), SEL,
10069 CM.getVScaleForTuning())) {
10070 ORE->emit([&]() {
10072 DEBUG_TYPE, "CantReorderMemOps", L->getStartLoc(),
10073 L->getHeader())
10074 << "loop not vectorized: cannot prove it is safe to reorder "
10075 "memory operations";
10076 });
10077 LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n");
10078 Hints.emitRemarkWithHints();
10079 return false;
10080 }
10081 }
10082
10083 // Identify the diagnostic messages that should be produced.
10084 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg;
10085 bool VectorizeLoop = true, InterleaveLoop = true;
10086 if (VF.Width.isScalar()) {
10087 LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n");
10088 VecDiagMsg = {
10089 "VectorizationNotBeneficial",
10090 "the cost-model indicates that vectorization is not beneficial"};
10091 VectorizeLoop = false;
10092 }
10093
10094 if (!LVP.hasPlanWithVF(VF.Width) && UserIC > 1) {
10095 // Tell the user interleaving was avoided up-front, despite being explicitly
10096 // requested.
10097 LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and "
10098 "interleaving should be avoided up front\n");
10099 IntDiagMsg = {"InterleavingAvoided",
10100 "Ignoring UserIC, because interleaving was avoided up front"};
10101 InterleaveLoop = false;
10102 } else if (IC == 1 && UserIC <= 1) {
10103 // Tell the user interleaving is not beneficial.
10104 LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n");
10105 IntDiagMsg = {
10106 "InterleavingNotBeneficial",
10107 "the cost-model indicates that interleaving is not beneficial"};
10108 InterleaveLoop = false;
10109 if (UserIC == 1) {
10110 IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled";
10111 IntDiagMsg.second +=
10112 " and is explicitly disabled or interleave count is set to 1";
10113 }
10114 } else if (IC > 1 && UserIC == 1) {
10115 // Tell the user interleaving is beneficial, but it explicitly disabled.
10116 LLVM_DEBUG(dbgs() << "LV: Interleaving is beneficial but is explicitly "
10117 "disabled.\n");
10118 IntDiagMsg = {"InterleavingBeneficialButDisabled",
10119 "the cost-model indicates that interleaving is beneficial "
10120 "but is explicitly disabled or interleave count is set to 1"};
10121 InterleaveLoop = false;
10122 }
10123
10124 // If there is a histogram in the loop, do not just interleave without
10125 // vectorizing. The order of operations will be incorrect without the
10126 // histogram intrinsics, which are only used for recipes with VF > 1.
10127 if (!VectorizeLoop && InterleaveLoop && LVL.hasHistograms()) {
10128 LLVM_DEBUG(dbgs() << "LV: Not interleaving without vectorization due "
10129 << "to histogram operations.\n");
10130 IntDiagMsg = {
10131 "HistogramPreventsScalarInterleaving",
10132 "Unable to interleave without vectorization due to constraints on "
10133 "the order of histogram operations"};
10134 InterleaveLoop = false;
10135 }
10136
10137 // Override IC if user provided an interleave count.
10138 IC = UserIC > 0 ? UserIC : IC;
10139
10140 // Emit diagnostic messages, if any.
10141 const char *VAPassName = Hints.vectorizeAnalysisPassName();
10142 if (!VectorizeLoop && !InterleaveLoop) {
10143 // Do not vectorize or interleaving the loop.
10144 ORE->emit([&]() {
10145 return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first,
10146 L->getStartLoc(), L->getHeader())
10147 << VecDiagMsg.second;
10148 });
10149 ORE->emit([&]() {
10150 return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first,
10151 L->getStartLoc(), L->getHeader())
10152 << IntDiagMsg.second;
10153 });
10154 return false;
10155 }
10156
10157 if (!VectorizeLoop && InterleaveLoop) {
10158 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
10159 ORE->emit([&]() {
10160 return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first,
10161 L->getStartLoc(), L->getHeader())
10162 << VecDiagMsg.second;
10163 });
10164 } else if (VectorizeLoop && !InterleaveLoop) {
10165 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
10166 << ") in " << L->getLocStr() << '\n');
10167 ORE->emit([&]() {
10168 return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first,
10169 L->getStartLoc(), L->getHeader())
10170 << IntDiagMsg.second;
10171 });
10172 } else if (VectorizeLoop && InterleaveLoop) {
10173 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
10174 << ") in " << L->getLocStr() << '\n');
10175 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
10176 }
10177
10178 // Report the vectorization decision.
10179 if (VF.Width.isScalar()) {
10180 using namespace ore;
10181 assert(IC > 1);
10182 ORE->emit([&]() {
10183 return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(),
10184 L->getHeader())
10185 << "interleaved loop (interleaved count: "
10186 << NV("InterleaveCount", IC) << ")";
10187 });
10188 } else {
10189 // Report the vectorization decision.
10190 reportVectorization(ORE, L, VF, IC);
10191 }
10192 if (ORE->allowExtraAnalysis(LV_NAME))
10194
10195 // If we decided that it is *legal* to interleave or vectorize the loop, then
10196 // do it.
10197
10198 VPlan &BestPlan = LVP.getPlanFor(VF.Width);
10199 // Consider vectorizing the epilogue too if it's profitable.
10200 VectorizationFactor EpilogueVF =
10202 if (EpilogueVF.Width.isVector()) {
10203 std::unique_ptr<VPlan> BestMainPlan(BestPlan.duplicate());
10204
10205 // The first pass vectorizes the main loop and creates a scalar epilogue
10206 // to be vectorized by executing the plan (potentially with a different
10207 // factor) again shortly afterwards.
10208 VPlan &BestEpiPlan = LVP.getPlanFor(EpilogueVF.Width);
10209 BestEpiPlan.getMiddleBlock()->setName("vec.epilog.middle.block");
10210 preparePlanForMainVectorLoop(*BestMainPlan, BestEpiPlan);
10211 EpilogueLoopVectorizationInfo EPI(VF.Width, IC, EpilogueVF.Width, 1,
10212 BestEpiPlan);
10213 EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TTI, AC, EPI, &CM, BFI,
10214 PSI, Checks, *BestMainPlan);
10215 auto ExpandedSCEVs = LVP.executePlan(EPI.MainLoopVF, EPI.MainLoopUF,
10216 *BestMainPlan, MainILV, DT, false);
10217 ++LoopsVectorized;
10218
10219 // Second pass vectorizes the epilogue and adjusts the control flow
10220 // edges from the first pass.
10221 EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TTI, AC, EPI, &CM,
10222 BFI, PSI, Checks, BestEpiPlan);
10223 EpilogILV.setTripCount(MainILV.getTripCount());
10224 preparePlanForEpilogueVectorLoop(BestEpiPlan, L, ExpandedSCEVs, EPI);
10225
10226 LVP.executePlan(EPI.EpilogueVF, EPI.EpilogueUF, BestEpiPlan, EpilogILV, DT,
10227 true);
10228
10230 BestEpiPlan, LVL, ExpandedSCEVs,
10231 EPI.VectorTripCount);
10232 ++LoopsEpilogueVectorized;
10233 } else {
10234 InnerLoopVectorizer LB(L, PSE, LI, DT, TTI, AC, VF.Width, IC, &CM, BFI, PSI,
10235 Checks, BestPlan);
10236 // TODO: Move to general VPlan pipeline once epilogue loops are also
10237 // supported.
10240 IC, PSE);
10241 LVP.addMinimumIterationCheck(BestPlan, VF.Width, IC,
10243
10244 LVP.executePlan(VF.Width, IC, BestPlan, LB, DT, false);
10245 ++LoopsVectorized;
10246 }
10247
10248 assert(DT->verify(DominatorTree::VerificationLevel::Fast) &&
10249 "DT not preserved correctly");
10250 assert(!verifyFunction(*F, &dbgs()));
10251
10252 return true;
10253}
10254
10256
10257 // Don't attempt if
10258 // 1. the target claims to have no vector registers, and
10259 // 2. interleaving won't help ILP.
10260 //
10261 // The second condition is necessary because, even if the target has no
10262 // vector registers, loop vectorization may still enable scalar
10263 // interleaving.
10264 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) &&
10265 TTI->getMaxInterleaveFactor(ElementCount::getFixed(1)) < 2)
10266 return LoopVectorizeResult(false, false);
10267
10268 bool Changed = false, CFGChanged = false;
10269
10270 // The vectorizer requires loops to be in simplified form.
10271 // Since simplification may add new inner loops, it has to run before the
10272 // legality and profitability checks. This means running the loop vectorizer
10273 // will simplify all loops, regardless of whether anything end up being
10274 // vectorized.
10275 for (const auto &L : *LI)
10276 Changed |= CFGChanged |=
10277 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
10278
10279 // Build up a worklist of inner-loops to vectorize. This is necessary as
10280 // the act of vectorizing or partially unrolling a loop creates new loops
10281 // and can invalidate iterators across the loops.
10282 SmallVector<Loop *, 8> Worklist;
10283
10284 for (Loop *L : *LI)
10285 collectSupportedLoops(*L, LI, ORE, Worklist);
10286
10287 LoopsAnalyzed += Worklist.size();
10288
10289 // Now walk the identified inner loops.
10290 while (!Worklist.empty()) {
10291 Loop *L = Worklist.pop_back_val();
10292
10293 // For the inner loops we actually process, form LCSSA to simplify the
10294 // transform.
10295 Changed |= formLCSSARecursively(*L, *DT, LI, SE);
10296
10297 Changed |= CFGChanged |= processLoop(L);
10298
10299 if (Changed) {
10300 LAIs->clear();
10301
10302#ifndef NDEBUG
10303 if (VerifySCEV)
10304 SE->verify();
10305#endif
10306 }
10307 }
10308
10309 // Process each loop nest in the function.
10310 return LoopVectorizeResult(Changed, CFGChanged);
10311}
10312
10315 LI = &AM.getResult<LoopAnalysis>(F);
10316 // There are no loops in the function. Return before computing other
10317 // expensive analyses.
10318 if (LI->empty())
10319 return PreservedAnalyses::all();
10328 AA = &AM.getResult<AAManager>(F);
10329
10330 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
10331 PSI = MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
10332 BFI = nullptr;
10333 if (PSI && PSI->hasProfileSummary())
10335 LoopVectorizeResult Result = runImpl(F);
10336 if (!Result.MadeAnyChange)
10337 return PreservedAnalyses::all();
10339
10340 if (isAssignmentTrackingEnabled(*F.getParent())) {
10341 for (auto &BB : F)
10343 }
10344
10345 PA.preserve<LoopAnalysis>();
10349
10350 if (Result.MadeCFGChange) {
10351 // Making CFG changes likely means a loop got vectorized. Indicate that
10352 // extra simplification passes should be run.
10353 // TODO: MadeCFGChanges is not a prefect proxy. Extra passes should only
10354 // be run if runtime checks have been added.
10357 } else {
10359 }
10360 return PA;
10361}
10362
10364 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
10365 static_cast<PassInfoMixin<LoopVectorizePass> *>(this)->printPipeline(
10366 OS, MapClassName2PassName);
10367
10368 OS << '<';
10369 OS << (InterleaveOnlyWhenForced ? "" : "no-") << "interleave-forced-only;";
10370 OS << (VectorizeOnlyWhenForced ? "" : "no-") << "vectorize-forced-only;";
10371 OS << '>';
10372}
static unsigned getIntrinsicID(const SDNode *N)
unsigned RegSize
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
aarch64 promote const
AMDGPU Lower Kernel Arguments
AMDGPU Register Bank Select
Rewrite undef for PHI
This file implements a class to represent arbitrary precision integral constant values and operations...
@ PostInc
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static bool isEqual(const Function &Caller, const Function &Callee)
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
This is the interface for LLVM's primary stateless and local alias analysis.
static bool IsEmptyBlock(MachineBasicBlock *MBB)
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
static cl::opt< IntrinsicCostStrategy > IntrinsicCost("intrinsic-cost-strategy", cl::desc("Costing strategy for intrinsic instructions"), cl::init(IntrinsicCostStrategy::InstructionCost), cl::values(clEnumValN(IntrinsicCostStrategy::InstructionCost, "instruction-cost", "Use TargetTransformInfo::getInstructionCost"), clEnumValN(IntrinsicCostStrategy::IntrinsicCost, "intrinsic-cost", "Use TargetTransformInfo::getIntrinsicInstrCost"), clEnumValN(IntrinsicCostStrategy::TypeBasedIntrinsicCost, "type-based-intrinsic-cost", "Calculate the intrinsic cost based only on argument types")))
static InstructionCost getCost(Instruction &Inst, TTI::TargetCostKind CostKind, TargetTransformInfo &TTI, TargetLibraryInfo &TLI)
Definition CostModel.cpp:74
This file defines DenseMapInfo traits for DenseMap.
This file defines the DenseMap class.
#define DEBUG_TYPE
This is the interface for a simple mod/ref and alias analysis over globals.
Hexagon Common GEP
#define _
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
This defines the Use class.
static bool hasNoUnsignedWrap(BinaryOperator &I)
This file defines an InstructionCost class that is used when calculating the cost of an instruction,...
static std::pair< Value *, APInt > getMask(Value *WideMask, unsigned Factor, ElementCount LeafValueEC)
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Legalize the Machine IR a function s Machine IR
Definition Legalizer.cpp:80
static cl::opt< unsigned, true > VectorizationFactor("force-vector-width", cl::Hidden, cl::desc("Sets the SIMD width. Zero is autoselect."), cl::location(VectorizerParams::VectorizationFactor))
This header provides classes for managing per-loop analyses.
static const char * VerboseDebug
#define LV_NAME
This file defines the LoopVectorizationLegality class.
This file provides a LoopVectorizationPlanner class.
static void collectSupportedLoops(Loop &L, LoopInfo *LI, OptimizationRemarkEmitter *ORE, SmallVectorImpl< Loop * > &V)
static cl::opt< unsigned > EpilogueVectorizationMinVF("epilogue-vectorization-minimum-VF", cl::Hidden, cl::desc("Only loops with vectorization factor equal to or larger than " "the specified value are considered for epilogue vectorization."))
static cl::opt< unsigned > EpilogueVectorizationForceVF("epilogue-vectorization-force-VF", cl::init(1), cl::Hidden, cl::desc("When epilogue vectorization is enabled, and a value greater than " "1 is specified, forces the given VF for all applicable epilogue " "loops."))
static void addScalarResumePhis(VPRecipeBuilder &Builder, VPlan &Plan, DenseMap< VPValue *, VPValue * > &IVEndValues)
Create resume phis in the scalar preheader for first-order recurrences, reductions and inductions,...
static Type * maybeVectorizeType(Type *Ty, ElementCount VF)
static ElementCount determineVPlanVF(const TargetTransformInfo &TTI, LoopVectorizationCostModel &CM)
static ElementCount getSmallConstantTripCount(ScalarEvolution *SE, const Loop *L)
A version of ScalarEvolution::getSmallConstantTripCount that returns an ElementCount to include loops...
static cl::opt< unsigned > VectorizeMemoryCheckThreshold("vectorize-memory-check-threshold", cl::init(128), cl::Hidden, cl::desc("The maximum allowed number of runtime memory checks"))
static void preparePlanForMainVectorLoop(VPlan &MainPlan, VPlan &EpiPlan)
Prepare MainPlan for vectorizing the main vector loop during epilogue vectorization.
static cl::opt< unsigned > TinyTripCountVectorThreshold("vectorizer-min-trip-count", cl::init(16), cl::Hidden, cl::desc("Loops with a constant trip count that is smaller than this " "value are vectorized only if no scalar iteration overheads " "are incurred."))
Loops with a known constant trip count below this number are vectorized only if no scalar iteration o...
static void debugVectorizationMessage(const StringRef Prefix, const StringRef DebugMsg, Instruction *I)
Write a DebugMsg about vectorization to the debug output stream.
static cl::opt< bool > EnableCondStoresVectorization("enable-cond-stores-vec", cl::init(true), cl::Hidden, cl::desc("Enable if predication of stores during vectorization."))
static VPIRBasicBlock * replaceVPBBWithIRVPBB(VPBasicBlock *VPBB, BasicBlock *IRBB, VPlan *Plan=nullptr)
Replace VPBB with a VPIRBasicBlock wrapping IRBB.
static VPInstruction * addResumePhiRecipeForInduction(VPWidenInductionRecipe *WideIV, VPBuilder &VectorPHBuilder, VPBuilder &ScalarPHBuilder, VPTypeAnalysis &TypeInfo, VPValue *VectorTC)
Create and return a ResumePhi for WideIV, unless it is truncated.
static Value * emitTransformedIndex(IRBuilderBase &B, Value *Index, Value *StartValue, Value *Step, InductionDescriptor::InductionKind InductionKind, const BinaryOperator *InductionBinOp)
Compute the transformed value of Index at offset StartValue using step StepValue.
static DebugLoc getDebugLocFromInstOrOperands(Instruction *I)
Look for a meaningful debug location on the instruction or its operands.
static Value * createInductionAdditionalBypassValues(PHINode *OrigPhi, const InductionDescriptor &II, IRBuilder<> &BypassBuilder, const SCEV2ValueTy &ExpandedSCEVs, Value *MainVectorTripCount, Instruction *OldInduction)
static void fixReductionScalarResumeWhenVectorizingEpilog(VPPhi *EpiResumePhiR, PHINode &EpiResumePhi, BasicBlock *BypassBlock)
static Value * getStartValueFromReductionResult(VPInstruction *RdxResult)
static cl::opt< bool > ForceTargetSupportsScalableVectors("force-target-supports-scalable-vectors", cl::init(false), cl::Hidden, cl::desc("Pretend that scalable vectors are supported, even if the target does " "not support them. This flag should only be used for testing."))
static bool useActiveLaneMaskForControlFlow(TailFoldingStyle Style)
static cl::opt< bool > EnableEarlyExitVectorization("enable-early-exit-vectorization", cl::init(true), cl::Hidden, cl::desc("Enable vectorization of early exit loops with uncountable exits."))
static cl::opt< bool > ConsiderRegPressure("vectorizer-consider-reg-pressure", cl::init(false), cl::Hidden, cl::desc("Discard VFs if their register pressure is too high."))
static unsigned estimateElementCount(ElementCount VF, std::optional< unsigned > VScale)
This function attempts to return a value that represents the ElementCount at runtime.
static constexpr uint32_t MinItersBypassWeights[]
static cl::opt< unsigned > ForceTargetNumScalarRegs("force-target-num-scalar-regs", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's number of scalar registers."))
static cl::opt< bool > UseWiderVFIfCallVariantsPresent("vectorizer-maximize-bandwidth-for-vector-calls", cl::init(true), cl::Hidden, cl::desc("Try wider VFs if they enable the use of vector variants"))
static std::optional< unsigned > getMaxVScale(const Function &F, const TargetTransformInfo &TTI)
static cl::opt< unsigned > SmallLoopCost("small-loop-cost", cl::init(20), cl::Hidden, cl::desc("The cost of a loop that is considered 'small' by the interleaver."))
static bool planContainsAdditionalSimplifications(VPlan &Plan, VPCostContext &CostCtx, Loop *TheLoop, ElementCount VF)
Return true if the original loop \ TheLoop contains any instructions that do not have corresponding r...
static cl::opt< unsigned > ForceTargetNumVectorRegs("force-target-num-vector-regs", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's number of vector registers."))
static bool isExplicitVecOuterLoop(Loop *OuterLp, OptimizationRemarkEmitter *ORE)
static cl::opt< bool > EnableIndVarRegisterHeur("enable-ind-var-reg-heur", cl::init(true), cl::Hidden, cl::desc("Count the induction variable only once when interleaving"))
static cl::opt< TailFoldingStyle > ForceTailFoldingStyle("force-tail-folding-style", cl::desc("Force the tail folding style"), cl::init(TailFoldingStyle::None), cl::values(clEnumValN(TailFoldingStyle::None, "none", "Disable tail folding"), clEnumValN(TailFoldingStyle::Data, "data", "Create lane mask for data only, using active.lane.mask intrinsic"), clEnumValN(TailFoldingStyle::DataWithoutLaneMask, "data-without-lane-mask", "Create lane mask with compare/stepvector"), clEnumValN(TailFoldingStyle::DataAndControlFlow, "data-and-control", "Create lane mask using active.lane.mask intrinsic, and use " "it for both data and control flow"), clEnumValN(TailFoldingStyle::DataAndControlFlowWithoutRuntimeCheck, "data-and-control-without-rt-check", "Similar to data-and-control, but remove the runtime check"), clEnumValN(TailFoldingStyle::DataWithEVL, "data-with-evl", "Use predicated EVL instructions for tail folding. If EVL " "is unsupported, fallback to data-without-lane-mask.")))
static cl::opt< bool > EnableEpilogueVectorization("enable-epilogue-vectorization", cl::init(true), cl::Hidden, cl::desc("Enable vectorization of epilogue loops."))
static ScalarEpilogueLowering getScalarEpilogueLowering(Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, LoopVectorizationLegality &LVL, InterleavedAccessInfo *IAI)
static void preparePlanForEpilogueVectorLoop(VPlan &Plan, Loop *L, const SCEV2ValueTy &ExpandedSCEVs, EpilogueLoopVectorizationInfo &EPI)
Prepare Plan for vectorizing the epilogue loop.
static cl::opt< bool > PreferPredicatedReductionSelect("prefer-predicated-reduction-select", cl::init(false), cl::Hidden, cl::desc("Prefer predicating a reduction operation over an after loop select."))
static VPWidenIntOrFpInductionRecipe * createWidenInductionRecipes(PHINode *Phi, Instruction *PhiOrTrunc, VPValue *Start, const InductionDescriptor &IndDesc, VPlan &Plan, ScalarEvolution &SE, Loop &OrigLoop)
Creates a VPWidenIntOrFpInductionRecpipe for Phi.
static cl::opt< bool > PreferInLoopReductions("prefer-inloop-reductions", cl::init(false), cl::Hidden, cl::desc("Prefer in-loop vector reductions, " "overriding the targets preference."))
static cl::opt< bool > EnableLoadStoreRuntimeInterleave("enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, cl::desc("Enable runtime interleaving until load/store ports are saturated"))
static cl::opt< bool > VPlanBuildStressTest("vplan-build-stress-test", cl::init(false), cl::Hidden, cl::desc("Build VPlan for every supported loop nest in the function and bail " "out right after the build (stress test the VPlan H-CFG construction " "in the VPlan-native vectorization path)."))
static bool hasIrregularType(Type *Ty, const DataLayout &DL)
A helper function that returns true if the given type is irregular.
static cl::opt< bool > LoopVectorizeWithBlockFrequency("loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden, cl::desc("Enable the use of the block frequency analysis to access PGO " "heuristics minimizing code growth in cold regions and being more " "aggressive in hot regions."))
static std::optional< ElementCount > getSmallBestKnownTC(PredicatedScalarEvolution &PSE, Loop *L, bool CanUseConstantMax=true)
Returns "best known" trip count, which is either a valid positive trip count or std::nullopt when an ...
static Value * getExpandedStep(const InductionDescriptor &ID, const SCEV2ValueTy &ExpandedSCEVs)
Return the expanded step for ID using ExpandedSCEVs to look up SCEV expansion results.
static bool useActiveLaneMask(TailFoldingStyle Style)
static bool hasReplicatorRegion(VPlan &Plan)
static bool isIndvarOverflowCheckKnownFalse(const LoopVectorizationCostModel *Cost, ElementCount VF, std::optional< unsigned > UF=std::nullopt)
For the given VF and UF and maximum trip count computed for the loop, return whether the induction va...
static void addFullyUnrolledInstructionsToIgnore(Loop *L, const LoopVectorizationLegality::InductionList &IL, SmallPtrSetImpl< Instruction * > &InstsToIgnore)
Knowing that loop L executes a single vector iteration, add instructions that will get simplified and...
static cl::opt< PreferPredicateTy::Option > PreferPredicateOverEpilogue("prefer-predicate-over-epilogue", cl::init(PreferPredicateTy::ScalarEpilogue), cl::Hidden, cl::desc("Tail-folding and predication preferences over creating a scalar " "epilogue loop."), cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue, "scalar-epilogue", "Don't tail-predicate loops, create scalar epilogue"), clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue, "predicate-else-scalar-epilogue", "prefer tail-folding, create scalar epilogue if tail " "folding fails."), clEnumValN(PreferPredicateTy::PredicateOrDontVectorize, "predicate-dont-vectorize", "prefers tail-folding, don't attempt vectorization if " "tail-folding fails.")))
static cl::opt< bool > EnableInterleavedMemAccesses("enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, cl::desc("Enable vectorization on interleaved memory accesses in a loop"))
static cl::opt< bool > EnableMaskedInterleavedMemAccesses("enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden, cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"))
An interleave-group may need masking if it resides in a block that needs predication,...
static cl::opt< bool > ForceOrderedReductions("force-ordered-reductions", cl::init(false), cl::Hidden, cl::desc("Enable the vectorisation of loops with in-order (strict) " "FP reductions"))
static void cse(BasicBlock *BB)
Perform cse of induction variable instructions.
static const SCEV * getAddressAccessSCEV(Value *Ptr, LoopVectorizationLegality *Legal, PredicatedScalarEvolution &PSE, const Loop *TheLoop)
Gets Address Access SCEV after verifying that the access pattern is loop invariant except the inducti...
static cl::opt< cl::boolOrDefault > ForceSafeDivisor("force-widen-divrem-via-safe-divisor", cl::Hidden, cl::desc("Override cost based safe divisor widening for div/rem instructions"))
static InstructionCost calculateEarlyExitCost(VPCostContext &CostCtx, VPlan &Plan, ElementCount VF)
For loops with uncountable early exits, find the cost of doing work when exiting the loop early,...
static cl::opt< unsigned > ForceTargetMaxVectorInterleaveFactor("force-target-max-vector-interleave", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's max interleave factor for " "vectorized loops."))
static bool processLoopInVPlanNativePath(Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, LoopVectorizationLegality *LVL, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints, LoopVectorizationRequirements &Requirements)
static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI)
static cl::opt< unsigned > NumberOfStoresToPredicate("vectorize-num-stores-pred", cl::init(1), cl::Hidden, cl::desc("Max number of stores to be predicated behind an if."))
The number of stores in a loop that are allowed to need predication.
static cl::opt< unsigned > MaxNestedScalarReductionIC("max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, cl::desc("The maximum interleave count to use when interleaving a scalar " "reduction in a nested loop."))
static cl::opt< unsigned > ForceTargetMaxScalarInterleaveFactor("force-target-max-scalar-interleave", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's max interleave factor for " "scalar loops."))
static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE)
static bool willGenerateVectors(VPlan &Plan, ElementCount VF, const TargetTransformInfo &TTI)
Check if any recipe of Plan will generate a vector value, which will be assigned a vector register.
static bool isOutsideLoopWorkProfitable(GeneratedRTChecks &Checks, VectorizationFactor &VF, Loop *L, PredicatedScalarEvolution &PSE, VPCostContext &CostCtx, VPlan &Plan, ScalarEpilogueLowering SEL, std::optional< unsigned > VScale)
This function determines whether or not it's still profitable to vectorize the loop given the extra w...
static void addExitUsersForFirstOrderRecurrences(VPlan &Plan, VFRange &Range)
Handle users in the exit block for first order reductions in the original exit block.
static void fixScalarResumeValuesFromBypass(BasicBlock *BypassBlock, Loop *L, VPlan &BestEpiPlan, LoopVectorizationLegality &LVL, const SCEV2ValueTy &ExpandedSCEVs, Value *MainVectorTripCount)
static cl::opt< bool > MaximizeBandwidth("vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, cl::desc("Maximize bandwidth when selecting vectorization factor which " "will be determined by the smallest type in loop."))
static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName, StringRef RemarkName, Loop *TheLoop, Instruction *I, DebugLoc DL={})
Create an analysis remark that explains why vectorization failed.
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
mir Rename Register Operands
This file implements a map that provides insertion order iteration.
This file contains the declarations for metadata subclasses.
#define T
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
#define P(N)
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
static BinaryOperator * CreateMul(Value *S1, Value *S2, const Twine &Name, BasicBlock::iterator InsertBefore, Value *FlagsOp)
static BinaryOperator * CreateAdd(Value *S1, Value *S2, const Twine &Name, BasicBlock::iterator InsertBefore, Value *FlagsOp)
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
This file contains some templates that are useful if you are working with the STL at all.
#define OP(OPC)
Definition Instruction.h:46
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
#define LLVM_DEBUG(...)
Definition Debug.h:114
#define DEBUG_WITH_TYPE(TYPE,...)
DEBUG_WITH_TYPE macro - This macro should be used by passes to emit debug information.
Definition Debug.h:72
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
This pass exposes codegen information to IR-level passes.
LocallyHashedType DenseMapInfo< LocallyHashedType >::Empty
This file implements the TypeSwitch template, which mimics a switch() statement whose cases are type ...
This file contains the declarations of different VPlan-related auxiliary helpers.
This file provides utility VPlan to VPlan transformations.
This file declares the class VPlanVerifier, which contains utility functions to check the consistency...
This file contains the declarations of the Vectorization Plan base classes:
static const char PassName[]
Value * RHS
Value * LHS
static const uint32_t IV[8]
Definition blake3_impl.h:83
A manager for alias analyses.
Class for arbitrary precision integers.
Definition APInt.h:78
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition APInt.h:234
uint64_t getZExtValue() const
Get zero extended value.
Definition APInt.h:1540
unsigned getActiveBits() const
Compute the number of active bits in the value.
Definition APInt.h:1512
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition ArrayRef.h:147
A function analysis which provides an AssumptionCache.
A cache of @llvm.assume calls within a function.
LLVM_ABI unsigned getVScaleRangeMin() const
Returns the minimum value for the vscale_range attribute.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator begin()
Instruction iterator methods.
Definition BasicBlock.h:459
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:528
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
LLVM_ABI const BasicBlock * getSingleSuccessor() const
Return the successor of this block if it has a single successor.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
LLVM_ABI LLVMContext & getContext() const
Get the context in which this basic block lives.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
BinaryOps getOpcode() const
Definition InstrTypes.h:374
Analysis pass which computes BlockFrequencyInfo.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Conditional or Unconditional Branch instruction.
bool isConditional() const
static BranchInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
BasicBlock * getSuccessor(unsigned i) const
Represents analyses that only rely on functions' control flow.
Definition Analysis.h:73
bool isNoBuiltin() const
Return true if the call should not be treated as a call to a builtin.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
Value * getArgOperand(unsigned i) const
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
unsigned arg_size() const
This class represents a function call, abstracting a target machine's calling convention.
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Definition InstrTypes.h:984
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:678
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:701
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:703
@ ICMP_NE
not equal
Definition InstrTypes.h:700
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:704
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition InstrTypes.h:791
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
This is the shared class of boolean and integer constants.
Definition Constants.h:87
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
A debug info location.
Definition DebugLoc.h:124
static DebugLoc getTemporary()
Definition DebugLoc.h:161
static DebugLoc getUnknown()
Definition DebugLoc.h:162
An analysis that produces DemandedBits for a function.
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition DenseMap.h:187
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
Definition DenseMap.h:229
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
Definition DenseMap.h:156
void insert_range(Range &&R)
Inserts range of 'std::pair<KeyT, ValueT>' values into the map.
Definition DenseMap.h:267
Implements a dense probed hash-table based set.
Definition DenseSet.h:269
Analysis pass which computes a DominatorTree.
Definition Dominators.h:284
void changeImmediateDominator(DomTreeNodeBase< NodeT > *N, DomTreeNodeBase< NodeT > *NewIDom)
changeImmediateDominator - This method is used to update the dominator tree information when a node's...
void eraseNode(NodeT *BB)
eraseNode - Removes a node from the dominator tree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:165
constexpr bool isVector() const
One or more elements.
Definition TypeSize.h:324
static constexpr ElementCount getScalable(ScalarTy MinVal)
Definition TypeSize.h:312
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition TypeSize.h:309
static constexpr ElementCount get(ScalarTy MinVal, bool Scalable)
Definition TypeSize.h:315
constexpr bool isScalar() const
Exactly one element.
Definition TypeSize.h:320
BasicBlock * getAdditionalBypassBlock() const
Return the additional bypass block which targets the scalar loop by skipping the epilogue loop after ...
BasicBlock * createVectorizedLoopSkeleton() final
Implements the interface for creating a vectorized skeleton using the epilogue loop strategy (i....
EpilogueVectorizerEpilogueLoop(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetTransformInfo *TTI, AssumptionCache *AC, EpilogueLoopVectorizationInfo &EPI, LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, GeneratedRTChecks &Checks, VPlan &Plan)
void printDebugTracesAtStart() override
Allow subclasses to override and print debug traces before/after vplan execution, when trace informat...
BasicBlock * emitMinimumVectorEpilogueIterCountCheck(BasicBlock *VectorPH, BasicBlock *Bypass, BasicBlock *Insert)
Emits an iteration count bypass check after the main vector loop has finished to see if there are any...
A specialized derived class of inner loop vectorizer that performs vectorization of main loops in the...
void introduceCheckBlockInVPlan(BasicBlock *CheckIRBB)
Introduces a new VPIRBasicBlock for CheckIRBB to Plan between the vector preheader and its predecesso...
BasicBlock * emitIterationCountCheck(BasicBlock *VectorPH, BasicBlock *Bypass, bool ForEpilogue)
Emits an iteration count bypass check once for the main loop (when ForEpilogue is false) and once for...
EpilogueVectorizerMainLoop(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetTransformInfo *TTI, AssumptionCache *AC, EpilogueLoopVectorizationInfo &EPI, LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, GeneratedRTChecks &Check, VPlan &Plan)
Value * createIterationCountCheck(BasicBlock *VectorPH, ElementCount VF, unsigned UF) const
void printDebugTracesAtStart() override
Allow subclasses to override and print debug traces before/after vplan execution, when trace informat...
BasicBlock * createVectorizedLoopSkeleton() final
Implements the interface for creating a vectorized skeleton using the main loop strategy (i....
Convenience struct for specifying and reasoning about fast-math flags.
Definition FMF.h:22
Class to represent function types.
param_iterator param_begin() const
param_iterator param_end() const
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition Function.h:209
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
Definition Function.cpp:762
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition Function.cpp:727
Represents flags for the getelementptr instruction/expression.
static GEPNoWrapFlags none()
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
void setFastMathFlags(FastMathFlags NewFMF)
Set the fast-math flags to be used with generated fp-math operators.
Definition IRBuilder.h:345
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2780
A struct for saving information about induction variables.
const SCEV * getStep() const
InductionKind
This enum represents the kinds of inductions that we support.
@ IK_NoInduction
Not an induction variable.
@ IK_FpInduction
Floating point induction variable.
@ IK_PtrInduction
Pointer induction var. Step = C.
@ IK_IntInduction
Integer induction variable. Step = C.
const SmallVectorImpl< Instruction * > & getCastInsts() const
Returns a reference to the type cast instructions in the induction update chain, that are redundant w...
Value * getStartValue() const
InnerLoopAndEpilogueVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetTransformInfo *TTI, AssumptionCache *AC, EpilogueLoopVectorizationInfo &EPI, LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, GeneratedRTChecks &Checks, VPlan &Plan, ElementCount VecWidth, ElementCount MinProfitableTripCount, unsigned UnrollFactor)
EpilogueLoopVectorizationInfo & EPI
Holds and updates state information required to vectorize the main loop and its epilogue in two separ...
InnerLoopVectorizer vectorizes loops which contain only one basic block to a specified vectorization ...
virtual void printDebugTracesAtStart()
Allow subclasses to override and print debug traces before/after vplan execution, when trace informat...
Value * TripCount
Trip count of the original loop.
const TargetTransformInfo * TTI
Target Transform Info.
LoopVectorizationCostModel * Cost
The profitablity analysis.
BlockFrequencyInfo * BFI
BFI and PSI are used to check for profile guided size optimizations.
Value * getTripCount() const
Returns the original loop trip count.
friend class LoopVectorizationPlanner
PredicatedScalarEvolution & PSE
A wrapper around ScalarEvolution used to add runtime SCEV checks.
LoopInfo * LI
Loop Info.
ProfileSummaryInfo * PSI
DominatorTree * DT
Dominator Tree.
void setTripCount(Value *TC)
Used to set the trip count after ILV's construction and after the preheader block has been executed.
void fixVectorizedLoop(VPTransformState &State)
Fix the vectorized code, taking care of header phi's, and more.
virtual BasicBlock * createVectorizedLoopSkeleton()
Creates a basic block for the scalar preheader.
virtual void printDebugTracesAtEnd()
AssumptionCache * AC
Assumption Cache.
InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetTransformInfo *TTI, AssumptionCache *AC, ElementCount VecWidth, unsigned UnrollFactor, LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, GeneratedRTChecks &RTChecks, VPlan &Plan)
IRBuilder Builder
The builder that we use.
void fixNonInductionPHIs(VPTransformState &State)
Fix the non-induction PHIs in Plan.
VPBasicBlock * VectorPHVPBB
The vector preheader block of Plan, used as target for check blocks introduced during skeleton creati...
unsigned UF
The vectorization unroll factor to use.
GeneratedRTChecks & RTChecks
Structure to hold information about generated runtime checks, responsible for cleaning the checks,...
virtual ~InnerLoopVectorizer()=default
ElementCount VF
The vectorization SIMD factor to use.
Loop * OrigLoop
The original loop.
BasicBlock * createScalarPreheader(StringRef Prefix)
Create and return a new IR basic block for the scalar preheader whose name is prefixed with Prefix.
InstSimplifyFolder - Use InstructionSimplify to fold operations to existing values.
static InstructionCost getInvalid(CostType Val=0)
static InstructionCost getMax()
CostType getValue() const
This function is intended to be used as sparingly as possible, since the class provides the full rang...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
LLVM_ABI void moveBefore(InstListType::iterator InsertPos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
bool isBinaryOp() const
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
LLVM_ABI FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
const char * getOpcodeName() const
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:319
LLVM_ABI APInt getMask() const
For example, this is 0xFF for an 8 bit integer, 0xFFFF for i16, etc.
Definition Type.cpp:343
The group of interleaved loads/stores sharing the same stride and close to each other.
uint32_t getFactor() const
InstTy * getMember(uint32_t Index) const
Get the member with the given index Index.
InstTy * getInsertPos() const
uint32_t getNumMembers() const
Drive the analysis of interleaved memory accesses in the loop.
bool requiresScalarEpilogue() const
Returns true if an interleaved group that may access memory out-of-bounds requires a scalar epilogue ...
LLVM_ABI void analyzeInterleaving(bool EnableMaskedInterleavedGroup)
Analyze the interleaved accesses and collect them in interleave groups.
An instruction for reading from memory.
Type * getPointerOperandType() const
This analysis provides dependence information for the memory accesses of a loop.
Drive the analysis of memory accesses in the loop.
const RuntimePointerChecking * getRuntimePointerChecking() const
unsigned getNumRuntimePointerChecks() const
Number of memchecks required to prove independence of otherwise may-alias pointers.
Analysis pass that exposes the LoopInfo for a function.
Definition LoopInfo.h:569
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
BlockT * getLoopLatch() const
If there is a single latch block for this loop, return it.
bool isInnermost() const
Return true if the loop does not contain any (natural) loops.
BlockT * getHeader() const
iterator_range< block_iterator > blocks() const
BlockT * getLoopPreheader() const
If there is a preheader for this loop, return it.
Store the result of a depth first search within basic blocks contained by a single loop.
RPOIterator beginRPO() const
Reverse iterate over the cached postorder blocks.
void perform(const LoopInfo *LI)
Traverse the loop blocks and store the DFS result.
RPOIterator endRPO() const
Wrapper class to LoopBlocksDFS that provides a standard begin()/end() interface for the DFS reverse p...
void perform(const LoopInfo *LI)
Traverse the loop blocks and store the DFS result.
void removeBlock(BlockT *BB)
This method completely removes BB from all data structures, including all of the Loop objects it is n...
LoopVectorizationCostModel - estimates the expected speedups due to vectorization.
SmallPtrSet< Type *, 16 > ElementTypesInLoop
All element types found in the loop.
bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment, unsigned AddressSpace) const
Returns true if the target machine supports masked load operation for the given DataType and kind of ...
LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, LoopVectorizationLegality *Legal, const TargetTransformInfo &TTI, const TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, OptimizationRemarkEmitter *ORE, const Function *F, const LoopVectorizeHints *Hints, InterleavedAccessInfo &IAI, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI)
void collectElementTypesForWidening()
Collect all element types in the loop for which widening is needed.
bool canVectorizeReductions(ElementCount VF) const
Returns true if the target machine supports all of the reduction variables found for the given VF.
bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment, unsigned AddressSpace) const
Returns true if the target machine supports masked store operation for the given DataType and kind of...
bool isEpilogueVectorizationProfitable(const ElementCount VF, const unsigned IC) const
Returns true if epilogue vectorization is considered profitable, and false otherwise.
bool isPredicatedInst(Instruction *I) const
Returns true if I is an instruction that needs to be predicated at runtime.
void collectValuesToIgnore()
Collect values we want to ignore in the cost model.
void collectInLoopReductions()
Split reductions into those that happen in the loop, and those that happen outside.
std::pair< unsigned, unsigned > getSmallestAndWidestTypes()
bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const
Returns true if I is known to be uniform after vectorization.
void collectNonVectorizedAndSetWideningDecisions(ElementCount VF)
Collect values that will not be widened, including Uniforms, Scalars, and Instructions to Scalarize f...
PredicatedScalarEvolution & PSE
Predicated scalar evolution analysis.
const LoopVectorizeHints * Hints
Loop Vectorize Hint.
std::optional< unsigned > getMaxSafeElements() const
Return maximum safe number of elements to be processed per vector iteration, which do not prevent sto...
const TargetTransformInfo & TTI
Vector target information.
LoopVectorizationLegality * Legal
Vectorization legality.
std::optional< InstructionCost > getReductionPatternCost(Instruction *I, ElementCount VF, Type *VectorTy) const
Return the cost of instructions in an inloop reduction pattern, if I is part of that pattern.
InstructionCost getInstructionCost(Instruction *I, ElementCount VF)
Returns the execution time cost of an instruction for a given vector width.
DemandedBits * DB
Demanded bits analysis.
bool interleavedAccessCanBeWidened(Instruction *I, ElementCount VF) const
Returns true if I is a memory instruction in an interleaved-group of memory accesses that can be vect...
const TargetLibraryInfo * TLI
Target Library Info.
bool memoryInstructionCanBeWidened(Instruction *I, ElementCount VF)
Returns true if I is a memory instruction with consecutive memory access that can be widened.
const InterleaveGroup< Instruction > * getInterleavedAccessGroup(Instruction *Instr) const
Get the interleaved access group that Instr belongs to.
InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const
Estimate cost of an intrinsic call instruction CI if it were vectorized with factor VF.
bool OptForSize
Whether this loop should be optimized for size based on function attribute or profile information.
bool useMaxBandwidth(TargetTransformInfo::RegisterKind RegKind)
bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const
Returns true if I is known to be scalar after vectorization.
bool isOptimizableIVTruncate(Instruction *I, ElementCount VF)
Return True if instruction I is an optimizable truncate whose operand is an induction variable.
FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC)
bool shouldConsiderRegPressureForVF(ElementCount VF)
Loop * TheLoop
The loop that we evaluate.
TTI::TargetCostKind CostKind
The kind of cost that we are calculating.
TailFoldingStyle getTailFoldingStyle(bool IVUpdateMayOverflow=true) const
Returns the TailFoldingStyle that is best for the current loop.
InterleavedAccessInfo & InterleaveInfo
The interleave access information contains groups of interleaved accesses with the same stride and cl...
SmallPtrSet< const Value *, 16 > ValuesToIgnore
Values to ignore in the cost model.
void setVectorizedCallDecision(ElementCount VF)
A call may be vectorized in different ways depending on whether we have vectorized variants available...
void invalidateCostModelingDecisions()
Invalidates decisions already taken by the cost model.
bool isAccessInterleaved(Instruction *Instr) const
Check if Instr belongs to any interleaved access group.
bool selectUserVectorizationFactor(ElementCount UserVF)
Setup cost-based decisions for user vectorization factor.
std::optional< unsigned > getVScaleForTuning() const
Return the value of vscale used for tuning the cost model.
OptimizationRemarkEmitter * ORE
Interface to emit optimization remarks.
LoopInfo * LI
Loop Info analysis.
bool requiresScalarEpilogue(bool IsVectorizing) const
Returns true if we're required to use a scalar epilogue for at least the final iteration of the origi...
SmallPtrSet< const Value *, 16 > VecValuesToIgnore
Values to ignore in the cost model when VF > 1.
bool isInLoopReduction(PHINode *Phi) const
Returns true if the Phi is part of an inloop reduction.
bool isProfitableToScalarize(Instruction *I, ElementCount VF) const
void setWideningDecision(const InterleaveGroup< Instruction > *Grp, ElementCount VF, InstWidening W, InstructionCost Cost)
Save vectorization decision W and Cost taken by the cost model for interleaving group Grp and vector ...
const MapVector< Instruction *, uint64_t > & getMinimalBitwidths() const
CallWideningDecision getCallWideningDecision(CallInst *CI, ElementCount VF) const
bool isLegalGatherOrScatter(Value *V, ElementCount VF)
Returns true if the target machine can represent V as a masked gather or scatter operation.
bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const
bool shouldConsiderInvariant(Value *Op)
Returns true if Op should be considered invariant and if it is trivially hoistable.
bool foldTailByMasking() const
Returns true if all loop blocks should be masked to fold tail loop.
bool foldTailWithEVL() const
Returns true if VP intrinsics with explicit vector length support should be generated in the tail fol...
bool usePredicatedReductionSelect() const
Returns true if the predicated reduction select should be used to set the incoming value for the redu...
bool blockNeedsPredicationForAnyReason(BasicBlock *BB) const
Returns true if the instructions in this block requires predication for any reason,...
void setCallWideningDecision(CallInst *CI, ElementCount VF, InstWidening Kind, Function *Variant, Intrinsic::ID IID, std::optional< unsigned > MaskPos, InstructionCost Cost)
void setTailFoldingStyles(bool IsScalableVF, unsigned UserIC)
Selects and saves TailFoldingStyle for 2 options - if IV update may overflow or not.
AssumptionCache * AC
Assumption cache.
void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W, InstructionCost Cost)
Save vectorization decision W and Cost taken by the cost model for instruction I and vector width VF.
InstWidening
Decision that was taken during cost calculation for memory instruction.
bool isScalarWithPredication(Instruction *I, ElementCount VF) const
Returns true if I is an instruction which requires predication and for which our chosen predication s...
InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF) const
Estimate cost of a call instruction CI if it were vectorized with factor VF.
bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc) const
Returns true if we should use strict in-order reductions for the given RdxDesc.
std::pair< InstructionCost, InstructionCost > getDivRemSpeculationCost(Instruction *I, ElementCount VF) const
Return the costs for our two available strategies for lowering a div/rem operation which requires spe...
bool isDivRemScalarWithPredication(InstructionCost ScalarCost, InstructionCost SafeDivisorCost) const
Given costs for both strategies, return true if the scalar predication lowering should be used for di...
InstructionCost expectedCost(ElementCount VF)
Returns the expected execution cost.
void setCostBasedWideningDecision(ElementCount VF)
Memory access instruction may be vectorized in more than one way.
InstWidening getWideningDecision(Instruction *I, ElementCount VF) const
Return the cost model decision for the given instruction I and vector width VF.
FixedScalableVFPair MaxPermissibleVFWithoutMaxBW
The highest VF possible for this loop, without using MaxBandwidth.
bool isScalarEpilogueAllowed() const
Returns true if a scalar epilogue is not allowed due to optsize or a loop hint annotation.
InstructionCost getWideningCost(Instruction *I, ElementCount VF)
Return the vectorization cost for the given instruction I and vector width VF.
void collectInstsToScalarize(ElementCount VF)
Collects the instructions to scalarize for each predicated instruction in the loop.
LoopVectorizationLegality checks if it is legal to vectorize a loop, and to what vectorization factor...
MapVector< PHINode *, InductionDescriptor > InductionList
InductionList saves induction variables and maps them to the induction descriptor.
const SmallPtrSetImpl< const Instruction * > & getPotentiallyFaultingLoads() const
Returns potentially faulting loads.
bool canVectorize(bool UseVPlanNativePath)
Returns true if it is legal to vectorize this loop.
bool canVectorizeFPMath(bool EnableStrictReductions)
Returns true if it is legal to vectorize the FP math operations in this loop.
PHINode * getPrimaryInduction()
Returns the primary induction variable.
const SmallVector< BasicBlock *, 4 > & getCountableExitingBlocks() const
Returns all exiting blocks with a countable exit, i.e.
const InductionList & getInductionVars() const
Returns the induction variables found in the loop.
bool hasUncountableEarlyExit() const
Returns true if the loop has exactly one uncountable early exit, i.e.
bool hasHistograms() const
Returns a list of all known histogram operations in the loop.
const LoopAccessInfo * getLAI() const
Planner drives the vectorization process after having passed Legality checks.
VectorizationFactor selectEpilogueVectorizationFactor(const ElementCount MaxVF, unsigned IC)
VPlan & getPlanFor(ElementCount VF) const
Return the VPlan for VF.
Definition VPlan.cpp:1611
VectorizationFactor planInVPlanNativePath(ElementCount UserVF)
Use the VPlan-native path to plan how to best vectorize, return the best VF and its cost.
void updateLoopMetadataAndProfileInfo(Loop *VectorLoop, VPBasicBlock *HeaderVPBB, const VPlan &Plan, bool VectorizingEpilogue, MDNode *OrigLoopID, std::optional< unsigned > OrigAverageTripCount, unsigned OrigLoopInvocationWeight, unsigned EstimatedVFxUF, bool DisableRuntimeUnroll)
Update loop metadata and profile info for both the scalar remainder loop and VectorLoop,...
Definition VPlan.cpp:1662
void buildVPlans(ElementCount MinVF, ElementCount MaxVF)
Build VPlans for power-of-2 VF's between MinVF and MaxVF inclusive, according to the information gath...
Definition VPlan.cpp:1595
VectorizationFactor computeBestVF()
Compute and return the most profitable vectorization factor.
DenseMap< const SCEV *, Value * > executePlan(ElementCount VF, unsigned UF, VPlan &BestPlan, InnerLoopVectorizer &LB, DominatorTree *DT, bool VectorizingEpilogue)
Generate the IR code for the vectorized loop captured in VPlan BestPlan according to the best selecte...
unsigned selectInterleaveCount(VPlan &Plan, ElementCount VF, InstructionCost LoopCost)
void emitInvalidCostRemarks(OptimizationRemarkEmitter *ORE)
Emit remarks for recipes with invalid costs in the available VPlans.
static bool getDecisionAndClampRange(const std::function< bool(ElementCount)> &Predicate, VFRange &Range)
Test a Predicate on a Range of VF's.
Definition VPlan.cpp:1576
void printPlans(raw_ostream &O)
Definition VPlan.cpp:1740
void plan(ElementCount UserVF, unsigned UserIC)
Build VPlans for the specified UserVF and UserIC if they are non-zero or all applicable candidate VFs...
void addMinimumIterationCheck(VPlan &Plan, ElementCount VF, unsigned UF, ElementCount MinProfitableTripCount) const
Create a check to Plan to see if the vector loop should be executed based on its trip count.
bool hasPlanWithVF(ElementCount VF) const
Look through the existing plans and return true if we have one with vectorization factor VF.
This holds vectorization requirements that must be verified late in the process.
Utility class for getting and setting loop vectorizer hints in the form of loop metadata.
bool allowVectorization(Function *F, Loop *L, bool VectorizeOnlyWhenForced) const
void emitRemarkWithHints() const
Dumps all the hint information.
const char * vectorizeAnalysisPassName() const
If hints are provided that force vectorization, use the AlwaysPrint pass name to force the frontend t...
This class emits a version of the loop where run-time checks ensure that may-alias pointers can't ove...
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
DebugLoc getStartLoc() const
Return the debug location of the start of this loop.
Definition LoopInfo.cpp:632
Metadata node.
Definition Metadata.h:1077
This class implements a map that also provides access to all stored values in a deterministic order.
Definition MapVector.h:36
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition MapVector.h:115
Function * getFunction(StringRef Name) const
Look up the specified function in the module symbol table.
Definition Module.cpp:230
Diagnostic information for optimization analysis remarks related to pointer aliasing.
Diagnostic information for optimization analysis remarks related to floating-point non-commutativity.
Diagnostic information for optimization analysis remarks.
The optimization diagnostic interface.
LLVM_ABI void emit(DiagnosticInfoOptimizationBase &OptDiag)
Output the remark via the diagnostic handler and to the optimization record file.
Diagnostic information for missed-optimization remarks.
Diagnostic information for applied optimization remarks.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
op_range incoming_values()
void setIncomingValueForBlock(const BasicBlock *BB, Value *V)
Set every incoming value(s) for block BB to V.
Value * getIncomingValueForBlock(const BasicBlock *BB) const
unsigned getNumIncomingValues() const
Return the number of incoming edges.
An interface layer with SCEV used to manage how we see SCEV expressions for values in the context of ...
ScalarEvolution * getSE() const
Returns the ScalarEvolution analysis used.
LLVM_ABI const SCEVPredicate & getPredicate() const
LLVM_ABI unsigned getSmallConstantMaxTripCount()
Returns the upper bound of the loop trip count as a normal unsigned value, or 0 if the trip count is ...
LLVM_ABI const SCEV * getBackedgeTakenCount()
Get the (predicated) backedge count for the analyzed loop.
LLVM_ABI const SCEV * getSCEV(Value *V)
Returns the SCEV expression of V, in the context of the current SCEV predicate.
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalyses & preserveSet()
Mark an analysis set as preserved.
Definition Analysis.h:151
PreservedAnalyses & preserve()
Mark an analysis as preserved.
Definition Analysis.h:132
An analysis pass based on the new PM to deliver ProfileSummaryInfo.
Analysis providing profile information.
The RecurrenceDescriptor is used to identify recurrences variables in a loop.
static bool isFMulAddIntrinsic(Instruction *I)
Returns true if the instruction is a call to the llvm.fmuladd intrinsic.
FastMathFlags getFastMathFlags() const
Instruction * getLoopExitInstr() const
static LLVM_ABI unsigned getOpcode(RecurKind Kind)
Returns the opcode corresponding to the RecurrenceKind.
Type * getRecurrenceType() const
Returns the type of the recurrence.
const SmallPtrSet< Instruction *, 8 > & getCastInsts() const
Returns a reference to the instructions used for type-promoting the recurrence.
unsigned getMinWidthCastToRecurrenceTypeInBits() const
Returns the minimum width used by the recurrence in bits.
TrackingVH< Value > getRecurrenceStartValue() const
LLVM_ABI SmallVector< Instruction *, 4 > getReductionOpChain(PHINode *Phi, Loop *L) const
Attempts to find a chain of operations from Phi to LoopExitInst that can be treated as a set of reduc...
static bool isAnyOfRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is of the form select(cmp(),x,y) where one of (x,...
bool isSigned() const
Returns true if all source operands of the recurrence are SExtInsts.
RecurKind getRecurrenceKind() const
bool isOrdered() const
Expose an ordered FP reduction to the instance users.
static LLVM_ABI bool isFloatingPointRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is a floating point kind.
static bool isFindIVRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is of the form select(cmp(),x,y) where one of (x,...
Value * getSentinelValue() const
Returns the sentinel value for FindFirstIV & FindLastIV recurrences to replace the start value.
static bool isMinMaxRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is any min/max kind.
std::optional< ArrayRef< PointerDiffInfo > > getDiffChecks() const
const SmallVectorImpl< RuntimePointerCheck > & getChecks() const
Returns the checks that generateChecks created.
This class uses information about analyze scalars to rewrite expressions in canonical form.
ScalarEvolution * getSE()
bool isInsertedInstruction(Instruction *I) const
Return true if the specified instruction was inserted by the code rewriter.
LLVM_ABI Value * expandCodeForPredicate(const SCEVPredicate *Pred, Instruction *Loc)
Generates a code sequence that evaluates this predicate.
void eraseDeadInstructions(Value *Root)
Remove inserted instructions that are dead, e.g.
virtual bool isAlwaysTrue() const =0
Returns true if the predicate is always true.
This class represents an analyzed expression in the program.
LLVM_ABI bool isZero() const
Return true if the expression is a constant zero.
LLVM_ABI Type * getType() const
Return the LLVM type of this SCEV expression.
Analysis pass that exposes the ScalarEvolution for a function.
The main scalar evolution driver.
LLVM_ABI const SCEV * getURemExpr(const SCEV *LHS, const SCEV *RHS)
Represents an unsigned remainder expression based on unsigned division.
LLVM_ABI const SCEV * getBackedgeTakenCount(const Loop *L, ExitCountKind Kind=Exact)
If the specified loop has a predictable backedge-taken count, return it, otherwise return a SCEVCould...
LLVM_ABI const SCEV * getConstant(ConstantInt *V)
LLVM_ABI const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
LLVM_ABI const SCEV * getTripCountFromExitCount(const SCEV *ExitCount)
A version of getTripCountFromExitCount below which always picks an evaluation type which can not resu...
const SCEV * getOne(Type *Ty)
Return a SCEV for the constant 1 of a specific type.
LLVM_ABI void forgetLoop(const Loop *L)
This method should be called by the client when it has changed a loop in a way that may effect Scalar...
LLVM_ABI bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
LLVM_ABI bool isSCEVable(Type *Ty) const
Test if values of the given type are analyzable within the SCEV framework.
LLVM_ABI const SCEV * getElementCount(Type *Ty, ElementCount EC, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
LLVM_ABI void forgetValue(Value *V)
This method should be called by the client when it has changed a value in a way that may effect its v...
LLVM_ABI void forgetBlockAndLoopDispositions(Value *V=nullptr)
Called when the client has changed the disposition of values in a loop or block.
const SCEV * getMinusOne(Type *Ty)
Return a SCEV for the constant -1 of a specific type.
LLVM_ABI void forgetLcssaPhiWithNewPredecessor(Loop *L, PHINode *V)
Forget LCSSA phi node V of loop L to which a new predecessor was added, such that it may no longer be...
LLVM_ABI unsigned getSmallConstantTripCount(const Loop *L)
Returns the exact trip count of the loop if we can compute it, and the result is a small constant.
APInt getUnsignedRangeMax(const SCEV *S)
Determine the max of the unsigned range for a particular SCEV.
LLVM_ABI const SCEV * applyLoopGuards(const SCEV *Expr, const Loop *L)
Try to apply information from loop guards for L to Expr.
LLVM_ABI const SCEV * getAddExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
LLVM_ABI bool isKnownPredicate(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
This class represents the LLVM 'select' instruction.
A vector that has set insertion semantics.
Definition SetVector.h:59
size_type size() const
Determine the number of elements in the SetVector.
Definition SetVector.h:104
void insert_range(Range &&R)
Definition SetVector.h:193
size_type count(const key_type &key) const
Count the number of elements of a given key in the SetVector.
Definition SetVector.h:279
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition SetVector.h:168
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
Definition SetVector.h:356
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
static LLVM_ABI PartialReductionExtendKind getPartialReductionExtendKind(Instruction *I)
Get the kind of extension that an instruction represents.
static LLVM_ABI OperandValueInfo getOperandInfo(const Value *V)
Collect properties of V used in cost analysis, e.g. OP_PowerOf2.
TargetCostKind
The kind of cost model.
@ TCK_RecipThroughput
Reciprocal throughput.
@ TCK_CodeSize
Instruction code size.
@ TCK_SizeAndLatency
The weighted sum of size and latency.
@ TCK_Latency
The latency of instruction.
@ TCC_Free
Expected to fold away in lowering.
LLVM_ABI InstructionCost getInstructionCost(const User *U, ArrayRef< const Value * > Operands, TargetCostKind CostKind) const
Estimate the cost of a given IR user when lowered.
@ SK_Splice
Concatenates elements from the first input vector with elements of the second input vector.
@ SK_Broadcast
Broadcast element 0 to all other elements.
@ SK_Reverse
Reverse the order of the vector.
CastContextHint
Represents a hint about the context in which a cast is used.
@ Reversed
The cast is used with a reversed load/store.
@ Masked
The cast is used with a masked load/store.
@ None
The cast is not used with a load/store of any kind.
@ Normal
The cast is used with a normal load/store.
@ Interleave
The cast is used with an interleaved load/store.
@ GatherScatter
The cast is used with a gather/scatter.
This class represents a truncation of integer types.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
This class implements a switch-like dispatch statement for a value of 'T' using dyn_cast functionalit...
Definition TypeSwitch.h:87
TypeSwitch< T, ResultT > & Case(CallableT &&caseFn)
Add a case on the given type.
Definition TypeSwitch.h:96
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
LLVM_ABI unsigned getIntegerBitWidth() const
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:297
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Definition Type.cpp:281
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:198
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition Type.h:128
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:231
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
Definition Type.cpp:294
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition Type.h:184
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:139
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
op_range operands()
Definition User.h:292
LLVM_ABI bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
Definition User.cpp:21
iterator_range< op_iterator > op_range
Definition User.h:281
Value * getOperand(unsigned i) const
Definition User.h:232
static SmallVector< VFInfo, 8 > getMappings(const CallInst &CI)
Retrieve all the VFInfo instances associated to the CallInst CI.
Definition VectorUtils.h:74
VPBasicBlock serves as the leaf of the Hierarchical Control-Flow Graph.
Definition VPlan.h:3760
void appendRecipe(VPRecipeBase *Recipe)
Augment the existing recipes of a VPBasicBlock with an additional Recipe as the last recipe.
Definition VPlan.h:3835
RecipeListTy::iterator iterator
Instruction iterators...
Definition VPlan.h:3787
iterator end()
Definition VPlan.h:3797
iterator begin()
Recipe iterator methods.
Definition VPlan.h:3795
iterator_range< iterator > phis()
Returns an iterator range over the PHI-like recipes in the block.
Definition VPlan.h:3848
iterator getFirstNonPhi()
Return the position of the first non-phi node recipe in the block.
Definition VPlan.cpp:246
VPRegionBlock * getEnclosingLoopRegion()
Definition VPlan.cpp:619
void insert(VPRecipeBase *Recipe, iterator InsertPt)
Definition VPlan.h:3826
VPBlockBase is the building block of the Hierarchical Control-Flow Graph.
Definition VPlan.h:81
VPRegionBlock * getParent()
Definition VPlan.h:173
const VPBasicBlock * getExitingBasicBlock() const
Definition VPlan.cpp:190
void setName(const Twine &newName)
Definition VPlan.h:166
size_t getNumSuccessors() const
Definition VPlan.h:219
void swapSuccessors()
Swap successors of the block. The block must have exactly 2 successors.
Definition VPlan.h:322
size_t getNumPredecessors() const
Definition VPlan.h:220
VPlan * getPlan()
Definition VPlan.cpp:165
VPBlockBase * getSinglePredecessor() const
Definition VPlan.h:215
const VPBasicBlock * getEntryBasicBlock() const
Definition VPlan.cpp:170
VPBlockBase * getSingleSuccessor() const
Definition VPlan.h:209
const VPBlocksTy & getSuccessors() const
Definition VPlan.h:198
static auto blocksOnly(const T &Range)
Return an iterator range over Range which only includes BlockTy blocks.
Definition VPlanUtils.h:228
static void insertOnEdge(VPBlockBase *From, VPBlockBase *To, VPBlockBase *BlockPtr)
Inserts BlockPtr on the edge between From and To.
Definition VPlanUtils.h:249
static void connectBlocks(VPBlockBase *From, VPBlockBase *To, unsigned PredIdx=-1u, unsigned SuccIdx=-1u)
Connect VPBlockBases From and To bi-directionally.
Definition VPlanUtils.h:187
static void reassociateBlocks(VPBlockBase *Old, VPBlockBase *New)
Reassociate all the blocks connected to Old so that they now point to New.
Definition VPlanUtils.h:214
VPlan-based builder utility analogous to IRBuilder.
VPDerivedIVRecipe * createDerivedIV(InductionDescriptor::InductionKind Kind, FPMathOperator *FPBinOp, VPValue *Start, VPValue *Current, VPValue *Step, const Twine &Name="")
Convert the input value Current to the corresponding value of an induction with Start and Step values...
VPPhi * createScalarPhi(ArrayRef< VPValue * > IncomingValues, DebugLoc DL, const Twine &Name="")
VPInstruction * createNaryOp(unsigned Opcode, ArrayRef< VPValue * > Operands, Instruction *Inst=nullptr, const Twine &Name="")
Create an N-ary operation with Opcode, Operands and set Inst as its underlying Instruction.
VPInstruction * createScalarCast(Instruction::CastOps Opcode, VPValue *Op, Type *ResultTy, DebugLoc DL)
unsigned getNumDefinedValues() const
Returns the number of values defined by the VPDef.
Definition VPlanValue.h:424
VPValue * getVPSingleValue()
Returns the only VPValue defined by the VPDef.
Definition VPlanValue.h:397
void execute(VPTransformState &State) override
Generate the transformed value of the induction at offset StartValue (1.
VPValue * getStepValue() const
Definition VPlan.h:3637
VPValue * getStartValue() const
Definition VPlan.h:3636
A pure virtual base class for all recipes modeling header phis, including phis for first order recurr...
Definition VPlan.h:1973
virtual VPValue * getBackedgeValue()
Returns the incoming value from the loop backedge.
Definition VPlan.h:2021
VPValue * getStartValue()
Returns the start value of the phi, if one is set.
Definition VPlan.h:2010
A recipe representing a sequence of load -> update -> store as part of a histogram operation.
Definition VPlan.h:1678
A special type of VPBasicBlock that wraps an existing IR basic block.
Definition VPlan.h:3913
Helper to manage IR metadata for recipes.
Definition VPlan.h:939
This is a concrete Recipe that models a single VPlan-level instruction.
Definition VPlan.h:980
@ ComputeAnyOfResult
Compute the final result of a AnyOf reduction with select(cmp(),x,y), where one of (x,...
Definition VPlan.h:1013
@ ResumeForEpilogue
Explicit user for the resume phi of the canonical induction in the main VPlan, used by the epilogue v...
Definition VPlan.h:1060
@ FirstOrderRecurrenceSplice
Definition VPlan.h:986
@ ReductionStartVector
Start vector for reductions with 3 operands: the original start value, the identity value for the red...
Definition VPlan.h:1051
unsigned getOpcode() const
Definition VPlan.h:1116
VPInterleaveRecipe is a recipe for transforming an interleave group of load or stores into one wide l...
Definition VPlan.h:2572
In what follows, the term "input IR" refers to code that is fed into the vectorizer whereas the term ...
A recipe for forming partial reductions.
Definition VPlan.h:2749
detail::zippy< llvm::detail::zip_first, VPUser::const_operand_range, const_incoming_blocks_range > incoming_values_and_blocks() const
Returns an iterator range over pairs of incoming values and corresponding incoming blocks.
Definition VPlan.h:1287
VPRecipeBase is a base class modeling a sequence of one or more output IR instructions.
Definition VPlan.h:394
VPBasicBlock * getParent()
Definition VPlan.h:415
DebugLoc getDebugLoc() const
Returns the debug location of the recipe.
Definition VPlan.h:482
void moveBefore(VPBasicBlock &BB, iplist< VPRecipeBase >::iterator I)
Unlink this recipe and insert into BB before I.
void insertBefore(VPRecipeBase *InsertPos)
Insert an unlinked recipe into a basic block immediately before the specified recipe.
iplist< VPRecipeBase >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Helper class to create VPRecipies from IR instructions.
VPRecipeBase * tryToCreateWidenRecipe(VPSingleDefRecipe *R, VFRange &Range)
Create and return a widened recipe for R if one can be created within the given VF Range.
VPValue * getBlockInMask(VPBasicBlock *VPBB) const
Returns the entry mask for block VPBB or null if the mask is all-true.
VPValue * getVPValueOrAddLiveIn(Value *V)
std::optional< unsigned > getScalingForReduction(const Instruction *ExitInst)
void collectScaledReductions(VFRange &Range)
Find all possible partial reductions in the loop and track all of those that are valid so recipes can...
VPReplicateRecipe * handleReplication(Instruction *I, ArrayRef< VPValue * > Operands, VFRange &Range)
Build a VPReplicationRecipe for I using Operands.
VPRecipeBase * tryToCreatePartialReduction(Instruction *Reduction, ArrayRef< VPValue * > Operands, unsigned ScaleFactor)
Create and return a partial reduction recipe for a reduction instruction along with binary operation ...
A recipe for handling reduction phis.
Definition VPlan.h:2327
bool isInLoop() const
Returns true, if the phi is part of an in-loop reduction.
Definition VPlan.h:2387
RecurKind getRecurrenceKind() const
Returns the recurrence kind of the reduction.
Definition VPlan.h:2381
VPRegionBlock represents a collection of VPBasicBlocks and VPRegionBlocks which form a Single-Entry-S...
Definition VPlan.h:3948
const VPBlockBase * getEntry() const
Definition VPlan.h:3984
VPReplicateRecipe replicates a given instruction producing multiple scalar copies of the original sca...
Definition VPlan.h:2852
VPSingleDef is a base class for recipes for modeling a sequence of one or more output IR that define ...
Definition VPlan.h:521
Instruction * getUnderlyingInstr()
Returns the underlying instruction.
Definition VPlan.h:586
An analysis for type-inference for VPValues.
Type * inferScalarType(const VPValue *V)
Infer the type of V. Returns the scalar type of V.
This class augments VPValue with operands which provide the inverse def-use edges from VPValue's user...
Definition VPlanValue.h:199
void setOperand(unsigned I, VPValue *New)
Definition VPlanValue.h:243
VPValue * getOperand(unsigned N) const
Definition VPlanValue.h:238
void addOperand(VPValue *Operand)
Definition VPlanValue.h:232
VPRecipeBase * getDefiningRecipe()
Returns the recipe defining this VPValue or nullptr if it is not defined by a recipe,...
Definition VPlan.cpp:135
Value * getLiveInIRValue() const
Returns the underlying IR value, if this VPValue is defined outside the scope of VPlan.
Definition VPlanValue.h:176
Value * getUnderlyingValue() const
Return the underlying Value attached to this VPValue.
Definition VPlanValue.h:85
void replaceAllUsesWith(VPValue *New)
Definition VPlan.cpp:1412
user_iterator user_begin()
Definition VPlanValue.h:130
unsigned getNumUsers() const
Definition VPlanValue.h:113
void replaceUsesWithIf(VPValue *New, llvm::function_ref< bool(VPUser &U, unsigned Idx)> ShouldReplace)
Go through the uses list for this VPValue and make each use point to New if the callback ShouldReplac...
Definition VPlan.cpp:1416
user_range users()
Definition VPlanValue.h:134
A recipe to compute a pointer to the last element of each part of a widened memory access for widened...
Definition VPlan.h:1837
VPWidenCastRecipe is a recipe to create vector cast instructions.
Definition VPlan.h:1479
A recipe for handling GEP instructions.
Definition VPlan.h:1765
Base class for widened induction (VPWidenIntOrFpInductionRecipe and VPWidenPointerInductionRecipe),...
Definition VPlan.h:2038
VPValue * getStepValue()
Returns the step value of the induction.
Definition VPlan.h:2066
const InductionDescriptor & getInductionDescriptor() const
Returns the induction descriptor for the recipe.
Definition VPlan.h:2083
A recipe for handling phi nodes of integer and floating-point inductions, producing their vector valu...
Definition VPlan.h:2113
A common base class for widening memory operations.
Definition VPlan.h:3129
A recipe for widened phis.
Definition VPlan.h:2249
VPWidenRecipe is a recipe for producing a widened instruction using the opcode and operands of the re...
Definition VPlan.h:1436
VPlan models a candidate for vectorization, encoding various decisions take to produce efficient outp...
Definition VPlan.h:4051
bool hasVF(ElementCount VF) const
Definition VPlan.h:4260
VPBasicBlock * getEntry()
Definition VPlan.h:4150
VPValue & getVectorTripCount()
The vector trip count.
Definition VPlan.h:4240
VPValue & getVF()
Returns the VF of the vector loop region.
Definition VPlan.h:4243
VPValue * getTripCount() const
The trip count of the original loop.
Definition VPlan.h:4212
iterator_range< SmallSetVector< ElementCount, 2 >::iterator > vectorFactors() const
Returns an iterator range over all VFs of the plan.
Definition VPlan.h:4267
bool hasUF(unsigned UF) const
Definition VPlan.h:4278
ArrayRef< VPIRBasicBlock * > getExitBlocks() const
Return an ArrayRef containing VPIRBasicBlocks wrapping the exit blocks of the original scalar loop.
Definition VPlan.h:4202
LLVM_ABI_FOR_TEST VPRegionBlock * getVectorLoopRegion()
Returns the VPRegionBlock of the vector loop.
Definition VPlan.cpp:1046
bool hasEarlyExit() const
Returns true if the VPlan is based on a loop with an early exit.
Definition VPlan.h:4423
InstructionCost cost(ElementCount VF, VPCostContext &Ctx)
Return the cost of this plan.
Definition VPlan.cpp:1028
void resetTripCount(VPValue *NewTripCount)
Resets the trip count for the VPlan.
Definition VPlan.h:4226
VPBasicBlock * getMiddleBlock()
Returns the 'middle' block of the plan, that is the block that selects whether to execute the scalar ...
Definition VPlan.h:4175
VPValue * getOrAddLiveIn(Value *V)
Gets the live-in VPValue for V or adds a new live-in (if none exists yet) for V.
Definition VPlan.h:4302
bool hasScalarVFOnly() const
Definition VPlan.h:4271
VPBasicBlock * getScalarPreheader() const
Return the VPBasicBlock for the preheader of the scalar loop.
Definition VPlan.h:4193
void execute(VPTransformState *State)
Generate the IR code for this VPlan.
Definition VPlan.cpp:952
VPCanonicalIVPHIRecipe * getCanonicalIV()
Returns the canonical induction recipe of the vector loop.
Definition VPlan.h:4356
VPIRBasicBlock * getScalarHeader() const
Return the VPIRBasicBlock wrapping the header of the scalar loop.
Definition VPlan.h:4198
VPBasicBlock * getVectorPreheader()
Returns the preheader of the vector loop region, if one exists, or null otherwise.
Definition VPlan.h:4155
VPlan * duplicate()
Clone the current VPlan, update all VPValues of the new VPlan and cloned recipes to refer to the clon...
Definition VPlan.cpp:1188
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI bool hasOneUser() const
Return true if there is exactly one user of this value.
Definition Value.cpp:166
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
Definition Value.cpp:390
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition Value.h:439
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:546
iterator_range< user_iterator > users()
Definition Value.h:426
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.cpp:1101
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
std::pair< iterator, bool > insert(const ValueT &V)
Definition DenseSet.h:194
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
Definition DenseSet.h:169
constexpr bool hasKnownScalarFactor(const FixedOrScalableQuantity &RHS) const
Returns true if there exists a value X where RHS.multiplyCoefficientBy(X) will result in a value whos...
Definition TypeSize.h:269
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
static constexpr bool isKnownLE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:230
constexpr bool isNonZero() const
Definition TypeSize.h:156
constexpr ScalarTy getKnownScalarFactor(const FixedOrScalableQuantity &RHS) const
Returns a value X where RHS.multiplyCoefficientBy(X) will result in a value whose quantity matches ou...
Definition TypeSize.h:277
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:216
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:169
constexpr LeafTy multiplyCoefficientBy(ScalarTy RHS) const
Definition TypeSize.h:256
constexpr bool isFixed() const
Returns true if the quantity is not scaled by vscale.
Definition TypeSize.h:172
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:166
constexpr bool isZero() const
Definition TypeSize.h:154
static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:223
constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const
We do not provide the '/' operator here because division for polynomial types does not work in the sa...
Definition TypeSize.h:252
static constexpr bool isKnownGE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:237
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
self_iterator getIterator()
Definition ilist_node.h:130
IteratorT end() const
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
A raw_ostream that writes to an std::string.
Changed
This provides a very simple, boring adaptor for a begin and end iterator into a range type.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ Entry
Definition COFF.h:862
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition CallingConv.h:76
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
std::variant< std::monostate, Loc::Single, Loc::Multi, Loc::MMI, Loc::EntryValue > Variant
Alias for the std::variant specialization base class of DbgVariable.
Definition DwarfDebug.h:189
SpecificConstantMatch m_ZeroInt()
Convenience matchers for specific integer values.
BinaryOp_match< SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB > m_Neg(const SrcTy &&Src)
Matches a register negated by a G_SUB.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
OneOps_match< OpTy, Instruction::Freeze > m_Freeze(const OpTy &Op)
Matches FreezeInst.
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
bool match(Val *V, const Pattern &P)
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
SpecificCmpClass_match< LHS, RHS, ICmpInst > m_SpecificICmp(CmpPredicate MatchPred, const LHS &L, const RHS &R)
class_match< CmpInst > m_Cmp()
Matches any compare instruction and ignore it.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
MatchFunctor< Val, Pattern > match_fn(const Pattern &P)
A match functor that can be used as a UnaryPredicate in functional algorithms like all_of.
class_match< const SCEVVScale > m_SCEVVScale()
bind_cst_ty m_scev_APInt(const APInt *&C)
Match an SCEV constant and bind it to an APInt.
specificloop_ty m_SpecificLoop(const Loop *L)
cst_pred_ty< is_specific_signed_cst > m_scev_SpecificSInt(int64_t V)
Match an SCEV constant with a plain signed integer (sign-extended value will be matched)
SCEVAffineAddRec_match< Op0_t, Op1_t, class_match< const Loop > > m_scev_AffineAddRec(const Op0_t &Op0, const Op1_t &Op1)
SCEVBinaryExpr_match< SCEVMulExpr, Op0_t, Op1_t > m_scev_Mul(const Op0_t &Op0, const Op1_t &Op1)
bool match(const SCEV *S, const Pattern &P)
class_match< const SCEV > m_SCEV()
match_combine_or< AllRecipe_match< Instruction::ZExt, Op0_t >, AllRecipe_match< Instruction::SExt, Op0_t > > m_ZExtOrSExt(const Op0_t &Op0)
VPInstruction_match< VPInstruction::ExtractLastElement, Op0_t > m_ExtractLastElement(const Op0_t &Op0)
class_match< VPValue > m_VPValue()
Match an arbitrary VPValue and ignore it.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
Add a small namespace to avoid name clashes with the classes used in the streaming interface.
DiagnosticInfoOptimizationBase::Argument NV
NodeAddr< InstrNode * > Instr
Definition RDFGraph.h:389
NodeAddr< PhiNode * > Phi
Definition RDFGraph.h:390
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
bool isSingleScalar(const VPValue *VPV)
Returns true if VPV is a single scalar, either because it produces the same value for all lanes or on...
Definition VPlanUtils.h:44
VPValue * getOrCreateVPValueForSCEVExpr(VPlan &Plan, const SCEV *Expr)
Get or create a VPValue that corresponds to the expansion of Expr.
VPBasicBlock * getFirstLoopHeader(VPlan &Plan, VPDominatorTree &VPDT)
Returns the header block of the first, top-level loop, or null if none exist.
const SCEV * getSCEVExprForVPValue(VPValue *V, ScalarEvolution &SE)
Return the SCEV expression for V.
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI bool simplifyLoop(Loop *L, DominatorTree *DT, LoopInfo *LI, ScalarEvolution *SE, AssumptionCache *AC, MemorySSAUpdater *MSSAU, bool PreserveLCSSA)
Simplify each loop in a loop nest recursively.
LLVM_ABI void ReplaceInstWithInst(BasicBlock *BB, BasicBlock::iterator &BI, Instruction *I)
Replace the instruction specified by BI with the instruction specified by I.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:318
@ Offset
Definition DWP.cpp:477
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
Definition STLExtras.h:831
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
LLVM_ABI Value * addRuntimeChecks(Instruction *Loc, Loop *TheLoop, const SmallVectorImpl< RuntimePointerCheck > &PointerChecks, SCEVExpander &Expander, bool HoistRuntimeChecks=false)
Add code that checks at runtime if the accessed arrays in PointerChecks overlap.
auto cast_if_present(const Y &Val)
cast_if_present<X> - Functionally identical to cast, except that a null value is accepted.
Definition Casting.h:689
LLVM_ABI bool RemoveRedundantDbgInstrs(BasicBlock *BB)
Try to remove redundant dbg.value instructions from given basic block.
cl::opt< bool > VerifyEachVPlan
LLVM_ABI std::optional< unsigned > getLoopEstimatedTripCount(Loop *L, unsigned *EstimatedLoopInvocationWeight=nullptr)
Return either:
static void reportVectorization(OptimizationRemarkEmitter *ORE, Loop *TheLoop, VectorizationFactor VF, unsigned IC)
Report successful vectorization of the loop.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1705
unsigned getLoadStoreAddressSpace(const Value *I)
A helper function that returns the address space of the pointer operand of load or store instruction.
LLVM_ABI Intrinsic::ID getMinMaxReductionIntrinsicOp(Intrinsic::ID RdxID)
Returns the min/max intrinsic used when expanding a min/max reduction.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition STLExtras.h:1657
LLVM_ABI_FOR_TEST bool verifyVPlanIsValid(const VPlan &Plan, bool VerifyLate=false)
Verify invariants for general VPlans.
LLVM_ABI Intrinsic::ID getVectorIntrinsicIDForCall(const CallInst *CI, const TargetLibraryInfo *TLI)
Returns intrinsic ID for call.
InstructionCost Cost
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:649
LLVM_ABI bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
OuterAnalysisManagerProxy< ModuleAnalysisManager, Function > ModuleAnalysisManagerFunctionProxy
Provide the ModuleAnalysisManager to Function proxy.
Value * getRuntimeVF(IRBuilderBase &B, Type *Ty, ElementCount VF)
Return the runtime value for VF.
LLVM_ABI bool formLCSSARecursively(Loop &L, const DominatorTree &DT, const LoopInfo *LI, ScalarEvolution *SE)
Put a loop nest into LCSSA form.
Definition LCSSA.cpp:449
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2116
LLVM_ABI bool shouldOptimizeForSize(const MachineFunction *MF, ProfileSummaryInfo *PSI, const MachineBlockFrequencyInfo *BFI, PGSOQueryType QueryType=PGSOQueryType::Other)
Returns true if machine function MF is suggested to be size-optimized based on the profile.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:634
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition MathExtras.h:293
Align getLoadStoreAlignment(const Value *I)
A helper function that returns the alignment of load or store instruction.
iterator_range< df_iterator< VPBlockShallowTraversalWrapper< VPBlockBase * > > > vp_depth_first_shallow(VPBlockBase *G)
Returns an iterator range to traverse the graph starting at G in depth-first order.
Definition VPlanCFG.h:216
LLVM_ABI bool VerifySCEV
LLVM_ABI bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr, bool UseVariableInfo=true, bool IgnoreUBImplyingAttrs=true)
Return true if the instruction does not have any effects besides calculating the result and does not ...
bool isa_and_nonnull(const Y &Val)
Definition Casting.h:682
iterator_range< df_iterator< VPBlockDeepTraversalWrapper< VPBlockBase * > > > vp_depth_first_deep(VPBlockBase *G)
Returns an iterator range to traverse the graph starting at G in depth-first order while traversing t...
Definition VPlanCFG.h:243
SmallVector< VPRegisterUsage, 8 > calculateRegisterUsageForPlan(VPlan &Plan, ArrayRef< ElementCount > VFs, const TargetTransformInfo &TTI, const SmallPtrSetImpl< const Value * > &ValuesToIgnore)
Estimate the register usage for Plan and vectorization factors in VFs by calculating the highest numb...
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:348
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:759
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1712
void collectEphemeralRecipesForVPlan(VPlan &Plan, DenseSet< VPRecipeBase * > &EphRecipes)
auto reverse(ContainerTy &&C)
Definition STLExtras.h:408
LLVM_ABI void setBranchWeights(Instruction &I, ArrayRef< uint32_t > Weights, bool IsExpected)
Create a new branch_weights metadata node and add or overwrite a prof metadata reference to instructi...
bool containsIrreducibleCFG(RPOTraversalT &RPOTraversal, const LoopInfoT &LI)
Return true if the control flow in RPOTraversal is irreducible.
Definition CFG.h:149
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:288
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1624
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1719
LLVM_ABI cl::opt< bool > EnableLoopVectorization
LLVM_ABI bool wouldInstructionBeTriviallyDead(const Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction would have no side effects if it was not used.
Definition Local.cpp:421
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
SmallVector< ValueTypeFromRangeType< R >, Size > to_vector(R &&Range)
Given a range of type R, iterate the entire range and return a SmallVector with elements of the vecto...
Type * toVectorizedTy(Type *Ty, ElementCount EC)
A helper for converting to vectorized types.
LLVM_ABI void llvm_unreachable_internal(const char *msg=nullptr, const char *file=nullptr, unsigned line=0)
This function calls abort(), and prints the optional message to stderr.
T * find_singleton(R &&Range, Predicate P, bool AllowRepeats=false)
Return the single value in Range that satisfies P(<member of Range> *, AllowRepeats)->T * returning n...
Definition STLExtras.h:1767
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
cl::opt< unsigned > ForceTargetInstructionCost
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:548
format_object< Ts... > format(const char *Fmt, const Ts &... Vals)
These are helper functions used to produce formatted output.
Definition Format.h:126
auto drop_end(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the last N elements excluded.
Definition STLExtras.h:325
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
Definition MathExtras.h:405
bool canVectorizeTy(Type *Ty)
Returns true if Ty is a valid vector element type, void, or an unpacked literal struct where all elem...
TargetTransformInfo TTI
static void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag, OptimizationRemarkEmitter *ORE, Loop *TheLoop, Instruction *I=nullptr, DebugLoc DL={})
Reports an informative message: print Msg for debugging purposes as well as an optimization remark.
LLVM_ABI bool isAssignmentTrackingEnabled(const Module &M)
Return true if assignment tracking is enabled for module M.
RecurKind
These are the kinds of recurrences that we support.
@ Or
Bitwise or logical OR of integers.
@ FMulAdd
Sum of float products with llvm.fmuladd(a * b + sum).
@ Sub
Subtraction of integers.
@ AddChainWithSubs
A chain of adds and subs.
LLVM_ABI Value * getRecurrenceIdentity(RecurKind K, Type *Tp, FastMathFlags FMF)
Given information about an recurrence kind, return the identity for the @llvm.vector....
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:155
LLVM_ABI void reportVectorizationFailure(const StringRef DebugMsg, const StringRef OREMsg, const StringRef ORETag, OptimizationRemarkEmitter *ORE, Loop *TheLoop, Instruction *I=nullptr)
Reports a vectorization failure: print DebugMsg for debugging purposes along with the corresponding o...
DWARFExpression::Operation Op
ScalarEpilogueLowering
@ CM_ScalarEpilogueNotAllowedLowTripLoop
@ CM_ScalarEpilogueNotNeededUsePredicate
@ CM_ScalarEpilogueNotAllowedOptSize
@ CM_ScalarEpilogueAllowed
@ CM_ScalarEpilogueNotAllowedUsePredicate
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
ArrayRef(const T &OneElt) -> ArrayRef< T >
Value * createStepForVF(IRBuilderBase &B, Type *Ty, ElementCount VF, int64_t Step)
Return a value for Step multiplied by VF.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:565
LLVM_ABI BasicBlock * SplitBlock(BasicBlock *Old, BasicBlock::iterator SplitPt, DominatorTree *DT, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, const Twine &BBName="", bool Before=false)
Split the specified block at the specified instruction.
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1738
auto predecessors(const MachineBasicBlock *BB)
iterator_range< pointer_iterator< WrappedIteratorT > > make_pointer_range(RangeT &&Range)
Definition iterator.h:363
cl::opt< bool > EnableVPlanNativePath
Definition VPlan.cpp:56
Type * getLoadStoreType(const Value *I)
A helper function that returns the type of a load or store instruction.
ArrayRef< Type * > getContainedTypes(Type *const &Ty)
Returns the types contained in Ty.
LLVM_ABI Value * addDiffRuntimeChecks(Instruction *Loc, ArrayRef< PointerDiffInfo > Checks, SCEVExpander &Expander, function_ref< Value *(IRBuilderBase &, unsigned)> GetVF, unsigned IC)
bool pred_empty(const BasicBlock *BB)
Definition CFG.h:119
@ DataAndControlFlowWithoutRuntimeCheck
Use predicate to control both data and control flow, but modify the trip count so that a runtime over...
@ None
Don't use tail folding.
@ DataWithEVL
Use predicated EVL instructions for tail-folding.
@ DataAndControlFlow
Use predicate to control both data and control flow.
@ DataWithoutLaneMask
Same as Data, but avoids using the get.active.lane.mask intrinsic to calculate the mask and instead i...
@ Data
Use predicate only to mask operations on data in the loop.
unsigned getPredBlockCostDivisor(TargetTransformInfo::TargetCostKind CostKind)
A helper function that returns how much we should divide the cost of a predicated block by.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
LLVM_ABI bool hasBranchWeightMD(const Instruction &I)
Checks if an instructions has Branch Weight Metadata.
hash_code hash_combine(const Ts &...args)
Combine values into a single hash_code.
Definition Hashing.h:592
T bit_floor(T Value)
Returns the largest integral power of two no greater than Value if Value is nonzero.
Definition bit.h:299
Type * toVectorTy(Type *Scalar, ElementCount EC)
A helper function for converting Scalar types to vector types.
std::unique_ptr< VPlan > VPlanPtr
Definition VPlan.h:77
constexpr detail::IsaCheckPredicate< Types... > IsaPred
Function object wrapper for the llvm::isa type check.
Definition Casting.h:836
LLVM_ABI MapVector< Instruction *, uint64_t > computeMinimumValueSizes(ArrayRef< BasicBlock * > Blocks, DemandedBits &DB, const TargetTransformInfo *TTI=nullptr)
Compute a map of integer instructions to their minimum legal type size.
hash_code hash_combine_range(InputIteratorT first, InputIteratorT last)
Compute a hash_code for a sequence of values.
Definition Hashing.h:466
LLVM_ABI cl::opt< bool > EnableLoopInterleaving
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:853
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition Analysis.h:29
static LLVM_ABI void collectEphemeralValues(const Loop *L, AssumptionCache *AC, SmallPtrSetImpl< const Value * > &EphValues)
Collect a loop's ephemeral values (those used only by an assume or similar intrinsics in the loop).
An information struct used to provide DenseMap with the various necessary components for a given valu...
Encapsulate information regarding vectorization of a loop and its epilogue.
EpilogueLoopVectorizationInfo(ElementCount MVF, unsigned MUF, ElementCount EVF, unsigned EUF, VPlan &EpiloguePlan)
A class that represents two vectorization factors (initialized with 0 by default).
static FixedScalableVFPair getNone()
This holds details about a histogram operation – a load -> update -> store sequence where each lane i...
Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...
TargetLibraryInfo * TLI
LLVM_ABI LoopVectorizeResult runImpl(Function &F)
LLVM_ABI bool processLoop(Loop *L)
ProfileSummaryInfo * PSI
LoopAccessInfoManager * LAIs
LLVM_ABI void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
LLVM_ABI LoopVectorizePass(LoopVectorizeOptions Opts={})
BlockFrequencyInfo * BFI
ScalarEvolution * SE
AssumptionCache * AC
LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
OptimizationRemarkEmitter * ORE
TargetTransformInfo * TTI
Storage for information about made changes.
A chain of instructions that form a partial reduction.
Instruction * Reduction
The top-level binary operation that forms the reduction to a scalar after the loop body.
Instruction * ExtendA
The extension of each of the inner binary operation's operands.
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition PassManager.h:70
A marker analysis to determine if extra passes should be run after loop vectorization.
static LLVM_ABI AnalysisKey Key
Holds the VFShape for a specific scalar to vector function mapping.
std::optional< unsigned > getParamIndexForOptionalMask() const
Instruction Set Architecture.
Encapsulates information needed to describe a parameter.
A range of powers-of-2 vectorization factors with fixed start and adjustable end.
ElementCount End
Struct to hold various analysis needed for cost computations.
LoopVectorizationCostModel & CM
bool isLegacyUniformAfterVectorization(Instruction *I, ElementCount VF) const
Return true if I is considered uniform-after-vectorization in the legacy cost model for VF.
bool skipCostComputation(Instruction *UI, bool IsVector) const
Return true if the cost for UI shouldn't be computed, e.g.
InstructionCost getLegacyCost(Instruction *UI, ElementCount VF) const
Return the cost for UI with VF using the legacy cost model as fallback until computing the cost of al...
SmallPtrSet< Instruction *, 8 > SkipCostComputation
A recipe for handling first-order recurrence phis.
Definition VPlan.h:2292
A struct that represents some properties of the register usage of a loop.
VPTransformState holds information passed down when "executing" a VPlan, needed for generating the ou...
A recipe for widening select instructions.
Definition VPlan.h:1719
static void materializeBroadcasts(VPlan &Plan)
Add explicit broadcasts for live-ins and VPValues defined in Plan's entry block if they are used as v...
static void materializeBackedgeTakenCount(VPlan &Plan, VPBasicBlock *VectorPH)
Materialize the backedge-taken count to be computed explicitly using VPInstructions.
static LLVM_ABI_FOR_TEST std::unique_ptr< VPlan > buildVPlan0(Loop *TheLoop, LoopInfo &LI, Type *InductionTy, DebugLoc IVDL, PredicatedScalarEvolution &PSE)
Create a base VPlan0, serving as the common starting point for all later candidates.
static void optimizeInductionExitUsers(VPlan &Plan, DenseMap< VPValue *, VPValue * > &EndValues, ScalarEvolution &SE)
If there's a single exit block, optimize its phi recipes that use exiting IV values by feeding them p...
static LLVM_ABI_FOR_TEST void handleEarlyExits(VPlan &Plan, bool HasUncountableExit)
Update Plan to account for all early exits.
static void canonicalizeEVLLoops(VPlan &Plan)
Transform EVL loops to use variable-length stepping after region dissolution.
static void dropPoisonGeneratingRecipes(VPlan &Plan, const std::function< bool(BasicBlock *)> &BlockNeedsPredication)
Drop poison flags from recipes that may generate a poison value that is used after vectorization,...
static void createInterleaveGroups(VPlan &Plan, const SmallPtrSetImpl< const InterleaveGroup< Instruction > * > &InterleaveGroups, VPRecipeBuilder &RecipeBuilder, const bool &ScalarEpilogueAllowed)
static bool runPass(bool(*Transform)(VPlan &, ArgsTy...), VPlan &Plan, typename std::remove_reference< ArgsTy >::type &...Args)
Helper to run a VPlan transform Transform on VPlan, forwarding extra arguments to the transform.
static void addBranchWeightToMiddleTerminator(VPlan &Plan, ElementCount VF, std::optional< unsigned > VScaleForTuning)
Add branch weight metadata, if the Plan's middle block is terminated by a BranchOnCond recipe.
static void materializeBuildVectors(VPlan &Plan)
Add explicit Build[Struct]Vector recipes that combine multiple scalar values into single vectors.
static void unrollByUF(VPlan &Plan, unsigned UF)
Explicitly unroll Plan by UF.
static DenseMap< const SCEV *, Value * > expandSCEVs(VPlan &Plan, ScalarEvolution &SE)
Expand VPExpandSCEVRecipes in Plan's entry block.
static void convertToConcreteRecipes(VPlan &Plan)
Lower abstract recipes to concrete ones, that can be codegen'd.
static void addMinimumIterationCheck(VPlan &Plan, ElementCount VF, unsigned UF, ElementCount MinProfitableTripCount, bool RequiresScalarEpilogue, bool TailFolded, bool CheckNeededWithTailFolding, Loop *OrigLoop, const uint32_t *MinItersBypassWeights, DebugLoc DL, ScalarEvolution &SE)
static void convertToAbstractRecipes(VPlan &Plan, VPCostContext &Ctx, VFRange &Range)
This function converts initial recipes to the abstract recipes and clamps Range based on cost model f...
static void materializeConstantVectorTripCount(VPlan &Plan, ElementCount BestVF, unsigned BestUF, PredicatedScalarEvolution &PSE)
static DenseMap< VPBasicBlock *, VPValue * > introduceMasksAndLinearize(VPlan &Plan, bool FoldTail)
Predicate and linearize the control-flow in the only loop region of Plan.
static void addExplicitVectorLength(VPlan &Plan, const std::optional< unsigned > &MaxEVLSafeElements)
Add a VPEVLBasedIVPHIRecipe and related recipes to Plan and replaces all uses except the canonical IV...
static void replaceSymbolicStrides(VPlan &Plan, PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &StridesMap)
Replace symbolic strides from StridesMap in Plan with constants when possible.
static bool handleMaxMinNumReductions(VPlan &Plan)
Check if Plan contains any FMaxNum or FMinNum reductions.
static void removeBranchOnConst(VPlan &Plan)
Remove BranchOnCond recipes with true or false conditions together with removing dead edges to their ...
static LLVM_ABI_FOR_TEST void createLoopRegions(VPlan &Plan)
Replace loops in Plan's flat CFG with VPRegionBlocks, turning Plan's flat CFG into a hierarchical CFG...
static void removeDeadRecipes(VPlan &Plan)
Remove dead recipes from Plan.
static void attachCheckBlock(VPlan &Plan, Value *Cond, BasicBlock *CheckBlock, bool AddBranchWeights)
Wrap runtime check block CheckBlock in a VPIRBB and Cond in a VPValue and connect the block to Plan,...
static void materializeVectorTripCount(VPlan &Plan, VPBasicBlock *VectorPHVPBB, bool TailByMasking, bool RequiresScalarEpilogue)
Materialize vector trip count computations to a set of VPInstructions.
static void simplifyRecipes(VPlan &Plan)
Perform instcombine-like simplifications on recipes in Plan.
static LLVM_ABI_FOR_TEST bool tryToConvertVPInstructionsToVPRecipes(VPlanPtr &Plan, function_ref< const InductionDescriptor *(PHINode *)> GetIntOrFpInductionDescriptor, const TargetLibraryInfo &TLI)
Replaces the VPInstructions in Plan with corresponding widen recipes.
static void replicateByVF(VPlan &Plan, ElementCount VF)
Replace each replicating VPReplicateRecipe and VPInstruction outside of any replicate region in Plan ...
static void clearReductionWrapFlags(VPlan &Plan)
Clear NSW/NUW flags from reduction instructions if necessary.
static void cse(VPlan &Plan)
Perform common-subexpression-elimination on Plan.
static void addActiveLaneMask(VPlan &Plan, bool UseActiveLaneMaskForControlFlow, bool DataAndControlFlowWithoutRuntimeCheck)
Replace (ICMP_ULE, wide canonical IV, backedge-taken-count) checks with an (active-lane-mask recipe,...
static void optimize(VPlan &Plan)
Apply VPlan-to-VPlan optimizations to Plan, including induction recipe optimizations,...
static void dissolveLoopRegions(VPlan &Plan)
Replace loop regions with explicit CFG.
static void narrowInterleaveGroups(VPlan &Plan, ElementCount VF, unsigned VectorRegWidth)
Try to convert a plan with interleave groups with VF elements to a plan with the interleave groups re...
static void truncateToMinimalBitwidths(VPlan &Plan, const MapVector< Instruction *, uint64_t > &MinBWs)
Insert truncates and extends for any truncated recipe.
static bool adjustFixedOrderRecurrences(VPlan &Plan, VPBuilder &Builder)
Try to have all users of fixed-order recurrences appear after the recipe defining their previous valu...
static void optimizeForVFAndUF(VPlan &Plan, ElementCount BestVF, unsigned BestUF, PredicatedScalarEvolution &PSE)
Optimize Plan based on BestVF and BestUF.
static void materializeVFAndVFxUF(VPlan &Plan, VPBasicBlock *VectorPH, ElementCount VF)
Materialize VF and VFxUF to be computed explicitly using VPInstructions.
static LLVM_ABI_FOR_TEST void addMiddleCheck(VPlan &Plan, bool RequiresScalarEpilogueCheck, bool TailFolded)
If a check is needed to guard executing the scalar epilogue loop, it will be added to the middle bloc...
TODO: The following VectorizationFactor was pulled out of LoopVectorizationCostModel class.
InstructionCost Cost
Cost of the loop with that width.
ElementCount MinProfitableTripCount
The minimum trip count required to make vectorization profitable, e.g.
ElementCount Width
Vector width with best cost.
InstructionCost ScalarCost
Cost of the scalar loop.
static VectorizationFactor Disabled()
Width 1 means no vectorization, cost 0 means uncomputed cost.
static LLVM_ABI bool HoistRuntimeChecks