aboutsummaryrefslogtreecommitdiff
path: root/lib/Transforms/Vectorize/LoopVectorize.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Transforms/Vectorize/LoopVectorize.cpp')
-rw-r--r--lib/Transforms/Vectorize/LoopVectorize.cpp3791
1 files changed, 2358 insertions, 1433 deletions
diff --git a/lib/Transforms/Vectorize/LoopVectorize.cpp b/lib/Transforms/Vectorize/LoopVectorize.cpp
index 17c25dfffc10..8b85e320d3b2 100644
--- a/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -46,7 +46,7 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/Transforms/Vectorize.h"
+#include "llvm/Transforms/Vectorize/LoopVectorize.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/MapVector.h"
@@ -56,23 +56,15 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringExtras.h"
-#include "llvm/Analysis/AliasAnalysis.h"
-#include "llvm/Analysis/BasicAliasAnalysis.h"
-#include "llvm/Analysis/AliasSetTracker.h"
-#include "llvm/Analysis/AssumptionCache.h"
-#include "llvm/Analysis/BlockFrequencyInfo.h"
#include "llvm/Analysis/CodeMetrics.h"
-#include "llvm/Analysis/DemandedBits.h"
#include "llvm/Analysis/GlobalsModRef.h"
-#include "llvm/Analysis/LoopAccessAnalysis.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/LoopIterator.h"
#include "llvm/Analysis/LoopPass.h"
-#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Analysis/ScalarEvolutionExpander.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
-#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/Analysis/VectorUtils.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DebugInfo.h"
@@ -98,10 +90,10 @@
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
-#include "llvm/Analysis/VectorUtils.h"
#include "llvm/Transforms/Utils/LoopUtils.h"
+#include "llvm/Transforms/Utils/LoopVersioning.h"
+#include "llvm/Transforms/Vectorize.h"
#include <algorithm>
-#include <functional>
#include <map>
#include <tuple>
@@ -115,37 +107,21 @@ STATISTIC(LoopsVectorized, "Number of loops vectorized");
STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization");
static cl::opt<bool>
-EnableIfConversion("enable-if-conversion", cl::init(true), cl::Hidden,
- cl::desc("Enable if-conversion during vectorization."));
+ EnableIfConversion("enable-if-conversion", cl::init(true), cl::Hidden,
+ cl::desc("Enable if-conversion during vectorization."));
/// We don't vectorize loops with a known constant trip count below this number.
-static cl::opt<unsigned>
-TinyTripCountVectorThreshold("vectorizer-min-trip-count", cl::init(16),
- cl::Hidden,
- cl::desc("Don't vectorize loops with a constant "
- "trip count that is smaller than this "
- "value."));
+static cl::opt<unsigned> TinyTripCountVectorThreshold(
+ "vectorizer-min-trip-count", cl::init(16), cl::Hidden,
+ cl::desc("Don't vectorize loops with a constant "
+ "trip count that is smaller than this "
+ "value."));
static cl::opt<bool> MaximizeBandwidth(
"vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden,
cl::desc("Maximize bandwidth when selecting vectorization factor which "
"will be determined by the smallest type in loop."));
-/// This enables versioning on the strides of symbolically striding memory
-/// accesses in code like the following.
-/// for (i = 0; i < N; ++i)
-/// A[i * Stride1] += B[i * Stride2] ...
-///
-/// Will be roughly translated to
-/// if (Stride1 == 1 && Stride2 == 1) {
-/// for (i = 0; i < N; i+=4)
-/// A[i:i+3] += ...
-/// } else
-/// ...
-static cl::opt<bool> EnableMemAccessVersioning(
- "enable-mem-access-versioning", cl::init(true), cl::Hidden,
- cl::desc("Enable symbolic stride memory access versioning"));
-
static cl::opt<bool> EnableInterleavedMemAccesses(
"enable-interleaved-mem-accesses", cl::init(false), cl::Hidden,
cl::desc("Enable vectorization on interleaved memory accesses in a loop"));
@@ -262,7 +238,7 @@ public:
/// A helper function for converting Scalar types to vector types.
/// If the incoming type is void, we return void. If the VF is 1, we return
/// the scalar type.
-static Type* ToVectorTy(Type *Scalar, unsigned VF) {
+static Type *ToVectorTy(Type *Scalar, unsigned VF) {
if (Scalar->isVoidTy() || VF == 1)
return Scalar;
return VectorType::get(Scalar, VF);
@@ -313,21 +289,25 @@ public:
InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
LoopInfo *LI, DominatorTree *DT,
const TargetLibraryInfo *TLI,
- const TargetTransformInfo *TTI, unsigned VecWidth,
- unsigned UnrollFactor)
+ const TargetTransformInfo *TTI, AssumptionCache *AC,
+ unsigned VecWidth, unsigned UnrollFactor)
: OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI),
- VF(VecWidth), UF(UnrollFactor), Builder(PSE.getSE()->getContext()),
- Induction(nullptr), OldInduction(nullptr), WidenMap(UnrollFactor),
- TripCount(nullptr), VectorTripCount(nullptr), Legal(nullptr),
- AddedSafetyChecks(false) {}
+ AC(AC), VF(VecWidth), UF(UnrollFactor),
+ Builder(PSE.getSE()->getContext()), Induction(nullptr),
+ OldInduction(nullptr), WidenMap(UnrollFactor), TripCount(nullptr),
+ VectorTripCount(nullptr), Legal(nullptr), AddedSafetyChecks(false) {}
// Perform the actual loop widening (vectorization).
// MinimumBitWidths maps scalar integer values to the smallest bitwidth they
// can be validly truncated to. The cost model has assumed this truncation
- // will happen when vectorizing.
+ // will happen when vectorizing. VecValuesToIgnore contains scalar values
+ // that the cost model has chosen to ignore because they will not be
+ // vectorized.
void vectorize(LoopVectorizationLegality *L,
- MapVector<Instruction*,uint64_t> MinimumBitWidths) {
- MinBWs = MinimumBitWidths;
+ const MapVector<Instruction *, uint64_t> &MinimumBitWidths,
+ SmallPtrSetImpl<const Value *> &VecValuesToIgnore) {
+ MinBWs = &MinimumBitWidths;
+ ValuesNotWidened = &VecValuesToIgnore;
Legal = L;
// Create a new empty loop. Unlink the old loop and connect the new one.
createEmptyLoop();
@@ -337,33 +317,41 @@ public:
}
// Return true if any runtime check is added.
- bool IsSafetyChecksAdded() {
- return AddedSafetyChecks;
- }
+ bool areSafetyChecksAdded() { return AddedSafetyChecks; }
virtual ~InnerLoopVectorizer() {}
protected:
/// A small list of PHINodes.
- typedef SmallVector<PHINode*, 4> PhiVector;
+ typedef SmallVector<PHINode *, 4> PhiVector;
/// When we unroll loops we have multiple vector values for each scalar.
/// This data structure holds the unrolled and vectorized values that
/// originated from one scalar instruction.
- typedef SmallVector<Value*, 2> VectorParts;
+ typedef SmallVector<Value *, 2> VectorParts;
// When we if-convert we need to create edge masks. We have to cache values
// so that we don't end up with exponential recursion/IR.
- typedef DenseMap<std::pair<BasicBlock*, BasicBlock*>,
- VectorParts> EdgeMaskCache;
+ typedef DenseMap<std::pair<BasicBlock *, BasicBlock *>, VectorParts>
+ EdgeMaskCache;
/// Create an empty loop, based on the loop ranges of the old loop.
void createEmptyLoop();
+
+ /// Set up the values of the IVs correctly when exiting the vector loop.
+ void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II,
+ Value *CountRoundDown, Value *EndValue,
+ BasicBlock *MiddleBlock);
+
/// Create a new induction variable inside L.
PHINode *createInductionVariable(Loop *L, Value *Start, Value *End,
Value *Step, Instruction *DL);
/// Copy and widen the instructions from the old loop.
virtual void vectorizeLoop();
+ /// Fix a first-order recurrence. This is the second phase of vectorizing
+ /// this phi node.
+ void fixFirstOrderRecurrence(PHINode *Phi);
+
/// \brief The Loop exit block may have single value PHI nodes where the
/// incoming value is 'Undef'. While vectorizing we only handled real values
/// that were defined inside the loop. Here we fix the 'undef case'.
@@ -372,7 +360,7 @@ protected:
/// Shrinks vector element sizes based on information in "MinBWs".
void truncateToMinimalBitwidths();
-
+
/// A helper function that computes the predicate of the block BB, assuming
/// that the header block of the loop is set to True. It returns the *entry*
/// mask for the block BB.
@@ -383,12 +371,12 @@ protected:
/// A helper function to vectorize a single BB within the innermost loop.
void vectorizeBlockInLoop(BasicBlock *BB, PhiVector *PV);
-
+
/// Vectorize a single PHINode in a block. This method handles the induction
/// variable canonicalization. It supports both VF = 1 for unrolled loops and
/// arbitrary length vectors.
- void widenPHIInstruction(Instruction *PN, VectorParts &Entry,
- unsigned UF, unsigned VF, PhiVector *PV);
+ void widenPHIInstruction(Instruction *PN, VectorParts &Entry, unsigned UF,
+ unsigned VF, PhiVector *PV);
/// Insert the new loop to the loop hierarchy and pass manager
/// and update the analysis passes.
@@ -399,7 +387,7 @@ protected:
/// scalarized instruction behind an if block predicated on the control
/// dependence of the instruction.
virtual void scalarizeInstruction(Instruction *Instr,
- bool IfPredicateStore=false);
+ bool IfPredicateStore = false);
/// Vectorize Load and Store instructions,
virtual void vectorizeMemoryInstruction(Instruction *Instr);
@@ -415,6 +403,26 @@ protected:
/// to each vector element of Val. The sequence starts at StartIndex.
virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step);
+ /// Compute scalar induction steps. \p ScalarIV is the scalar induction
+ /// variable on which to base the steps, \p Step is the size of the step, and
+ /// \p EntryVal is the value from the original loop that maps to the steps.
+ /// Note that \p EntryVal doesn't have to be an induction variable (e.g., it
+ /// can be a truncate instruction).
+ void buildScalarSteps(Value *ScalarIV, Value *Step, Value *EntryVal);
+
+ /// Create a vector induction phi node based on an existing scalar one. This
+ /// currently only works for integer induction variables with a constant
+ /// step. If \p TruncType is non-null, instead of widening the original IV,
+ /// we widen a version of the IV truncated to \p TruncType.
+ void createVectorIntInductionPHI(const InductionDescriptor &II,
+ VectorParts &Entry, IntegerType *TruncType);
+
+ /// Widen an integer induction variable \p IV. If \p Trunc is provided, the
+ /// induction variable will first be truncated to the corresponding type. The
+ /// widened values are placed in \p Entry.
+ void widenIntInduction(PHINode *IV, VectorParts &Entry,
+ TruncInst *Trunc = nullptr);
+
/// When we go over instructions in the basic block we rely on previous
/// values within the current basic block or on loop invariant values.
/// When we widen (vectorize) values we place them in the map. If the values
@@ -445,6 +453,24 @@ protected:
/// Emit bypass checks to check any memory assumptions we may have made.
void emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass);
+ /// Add additional metadata to \p To that was not present on \p Orig.
+ ///
+ /// Currently this is used to add the noalias annotations based on the
+ /// inserted memchecks. Use this for instructions that are *cloned* into the
+ /// vector loop.
+ void addNewMetadata(Instruction *To, const Instruction *Orig);
+
+ /// Add metadata from one instruction to another.
+ ///
+ /// This includes both the original MDs from \p From and additional ones (\see
+ /// addNewMetadata). Use this for *newly created* instructions in the vector
+ /// loop.
+ void addMetadata(Instruction *To, Instruction *From);
+
+ /// \brief Similar to the previous function but it adds the metadata to a
+ /// vector of instructions.
+ void addMetadata(ArrayRef<Value *> To, Instruction *From);
+
/// This is a helper class that holds the vectorizer state. It maps scalar
/// instructions to vector instructions. When the code is 'unrolled' then
/// then a single scalar value is mapped to multiple vector parts. The parts
@@ -501,6 +527,15 @@ protected:
const TargetLibraryInfo *TLI;
/// Target Transform Info.
const TargetTransformInfo *TTI;
+ /// Assumption Cache.
+ AssumptionCache *AC;
+
+ /// \brief LoopVersioning. It's only set up (non-null) if memchecks were
+ /// used.
+ ///
+ /// This is currently only used to add no-alias metadata based on the
+ /// memchecks. The actually versioning is performed manually.
+ std::unique_ptr<LoopVersioning> LVer;
/// The vectorization SIMD factor to use. Each vector will have this many
/// vector elements.
@@ -522,11 +557,11 @@ protected:
BasicBlock *LoopScalarPreHeader;
/// Middle Block between the vector and the scalar.
BasicBlock *LoopMiddleBlock;
- ///The ExitBlock of the scalar loop.
+ /// The ExitBlock of the scalar loop.
BasicBlock *LoopExitBlock;
- ///The vector loop body.
- SmallVector<BasicBlock *, 4> LoopVectorBody;
- ///The scalar loop body.
+ /// The vector loop body.
+ BasicBlock *LoopVectorBody;
+ /// The scalar loop body.
BasicBlock *LoopScalarBody;
/// A list of all bypass blocks. The first block is the entry of the loop.
SmallVector<BasicBlock *, 4> LoopBypassBlocks;
@@ -537,9 +572,20 @@ protected:
PHINode *OldInduction;
/// Maps scalars to widened vectors.
ValueMap WidenMap;
+
+ /// A map of induction variables from the original loop to their
+ /// corresponding VF * UF scalarized values in the vectorized loop. The
+ /// purpose of ScalarIVMap is similar to that of WidenMap. Whereas WidenMap
+ /// maps original loop values to their vector versions in the new loop,
+ /// ScalarIVMap maps induction variables from the original loop that are not
+ /// vectorized to their scalar equivalents in the vector loop. Maintaining a
+ /// separate map for scalarized induction variables allows us to avoid
+ /// unnecessary scalar-to-vector-to-scalar conversions.
+ DenseMap<Value *, SmallVector<Value *, 8>> ScalarIVMap;
+
/// Store instructions that should be predicated, as a pair
/// <StoreInst, Predicate>
- SmallVector<std::pair<StoreInst*,Value*>, 4> PredicatedStores;
+ SmallVector<std::pair<StoreInst *, Value *>, 4> PredicatedStores;
EdgeMaskCache MaskCache;
/// Trip count of the original loop.
Value *TripCount;
@@ -549,10 +595,15 @@ protected:
/// Map of scalar integer values to the smallest bitwidth they can be legally
/// represented as. The vector equivalents of these values should be truncated
/// to this type.
- MapVector<Instruction*,uint64_t> MinBWs;
+ const MapVector<Instruction *, uint64_t> *MinBWs;
+
+ /// A set of values that should not be widened. This is taken from
+ /// VecValuesToIgnore in the cost model.
+ SmallPtrSetImpl<const Value *> *ValuesNotWidened;
+
LoopVectorizationLegality *Legal;
- // Record whether runtime check is added.
+ // Record whether runtime checks are added.
bool AddedSafetyChecks;
};
@@ -561,8 +612,10 @@ public:
InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
LoopInfo *LI, DominatorTree *DT,
const TargetLibraryInfo *TLI,
- const TargetTransformInfo *TTI, unsigned UnrollFactor)
- : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, 1, UnrollFactor) {}
+ const TargetTransformInfo *TTI, AssumptionCache *AC,
+ unsigned UnrollFactor)
+ : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, 1,
+ UnrollFactor) {}
private:
void scalarizeInstruction(Instruction *Instr,
@@ -618,36 +671,26 @@ static std::string getDebugLocString(const Loop *L) {
}
#endif
-/// \brief Propagate known metadata from one instruction to another.
-static void propagateMetadata(Instruction *To, const Instruction *From) {
- SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata;
- From->getAllMetadataOtherThanDebugLoc(Metadata);
-
- for (auto M : Metadata) {
- unsigned Kind = M.first;
-
- // These are safe to transfer (this is safe for TBAA, even when we
- // if-convert, because should that metadata have had a control dependency
- // on the condition, and thus actually aliased with some other
- // non-speculated memory access when the condition was false, this would be
- // caught by the runtime overlap checks).
- if (Kind != LLVMContext::MD_tbaa &&
- Kind != LLVMContext::MD_alias_scope &&
- Kind != LLVMContext::MD_noalias &&
- Kind != LLVMContext::MD_fpmath &&
- Kind != LLVMContext::MD_nontemporal)
- continue;
+void InnerLoopVectorizer::addNewMetadata(Instruction *To,
+ const Instruction *Orig) {
+ // If the loop was versioned with memchecks, add the corresponding no-alias
+ // metadata.
+ if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig)))
+ LVer->annotateInstWithNoAlias(To, Orig);
+}
- To->setMetadata(Kind, M.second);
- }
+void InnerLoopVectorizer::addMetadata(Instruction *To,
+ Instruction *From) {
+ propagateMetadata(To, From);
+ addNewMetadata(To, From);
}
-/// \brief Propagate known metadata from one instruction to a vector of others.
-static void propagateMetadata(SmallVectorImpl<Value *> &To,
- const Instruction *From) {
- for (Value *V : To)
+void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To,
+ Instruction *From) {
+ for (Value *V : To) {
if (Instruction *I = dyn_cast<Instruction>(V))
- propagateMetadata(I, From);
+ addMetadata(I, From);
+ }
}
/// \brief The group of interleaved loads/stores sharing the same stride and
@@ -785,8 +828,9 @@ private:
class InterleavedAccessInfo {
public:
InterleavedAccessInfo(PredicatedScalarEvolution &PSE, Loop *L,
- DominatorTree *DT)
- : PSE(PSE), TheLoop(L), DT(DT) {}
+ DominatorTree *DT, LoopInfo *LI)
+ : PSE(PSE), TheLoop(L), DT(DT), LI(LI), LAI(nullptr),
+ RequiresScalarEpilogue(false) {}
~InterleavedAccessInfo() {
SmallSet<InterleaveGroup *, 4> DelSet;
@@ -806,6 +850,14 @@ public:
return InterleaveGroupMap.count(Instr);
}
+ /// \brief Return the maximum interleave factor of all interleaved groups.
+ unsigned getMaxInterleaveFactor() const {
+ unsigned MaxFactor = 1;
+ for (auto &Entry : InterleaveGroupMap)
+ MaxFactor = std::max(MaxFactor, Entry.second->getFactor());
+ return MaxFactor;
+ }
+
/// \brief Get the interleave group that \p Instr belongs to.
///
/// \returns nullptr if doesn't have such group.
@@ -815,6 +867,13 @@ public:
return nullptr;
}
+ /// \brief Returns true if an interleaved group that may access memory
+ /// out-of-bounds requires a scalar epilogue iteration for correctness.
+ bool requiresScalarEpilogue() const { return RequiresScalarEpilogue; }
+
+ /// \brief Initialize the LoopAccessInfo used for dependence checking.
+ void setLAI(const LoopAccessInfo *Info) { LAI = Info; }
+
private:
/// A wrapper around ScalarEvolution, used to add runtime SCEV checks.
/// Simplifies SCEV expressions in the context of existing SCEV assumptions.
@@ -823,24 +882,39 @@ private:
PredicatedScalarEvolution &PSE;
Loop *TheLoop;
DominatorTree *DT;
+ LoopInfo *LI;
+ const LoopAccessInfo *LAI;
+
+ /// True if the loop may contain non-reversed interleaved groups with
+ /// out-of-bounds accesses. We ensure we don't speculatively access memory
+ /// out-of-bounds by executing at least one scalar epilogue iteration.
+ bool RequiresScalarEpilogue;
/// Holds the relationships between the members and the interleave group.
DenseMap<Instruction *, InterleaveGroup *> InterleaveGroupMap;
+ /// Holds dependences among the memory accesses in the loop. It maps a source
+ /// access to a set of dependent sink accesses.
+ DenseMap<Instruction *, SmallPtrSet<Instruction *, 2>> Dependences;
+
/// \brief The descriptor for a strided memory access.
struct StrideDescriptor {
- StrideDescriptor(int Stride, const SCEV *Scev, unsigned Size,
+ StrideDescriptor(int64_t Stride, const SCEV *Scev, uint64_t Size,
unsigned Align)
: Stride(Stride), Scev(Scev), Size(Size), Align(Align) {}
- StrideDescriptor() : Stride(0), Scev(nullptr), Size(0), Align(0) {}
+ StrideDescriptor() = default;
- int Stride; // The access's stride. It is negative for a reverse access.
- const SCEV *Scev; // The scalar expression of this access
- unsigned Size; // The size of the memory object.
- unsigned Align; // The alignment of this access.
+ // The access's stride. It is negative for a reverse access.
+ int64_t Stride = 0;
+ const SCEV *Scev = nullptr; // The scalar expression of this access
+ uint64_t Size = 0; // The size of the memory object.
+ unsigned Align = 0; // The alignment of this access.
};
+ /// \brief A type for holding instructions and their stride descriptors.
+ typedef std::pair<Instruction *, StrideDescriptor> StrideEntry;
+
/// \brief Create a new interleave group with the given instruction \p Instr,
/// stride \p Stride and alignment \p Align.
///
@@ -863,9 +937,86 @@ private:
}
/// \brief Collect all the accesses with a constant stride in program order.
- void collectConstStridedAccesses(
- MapVector<Instruction *, StrideDescriptor> &StrideAccesses,
+ void collectConstStrideAccesses(
+ MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo,
const ValueToValueMap &Strides);
+
+ /// \brief Returns true if \p Stride is allowed in an interleaved group.
+ static bool isStrided(int Stride) {
+ unsigned Factor = std::abs(Stride);
+ return Factor >= 2 && Factor <= MaxInterleaveGroupFactor;
+ }
+
+ /// \brief Returns true if \p BB is a predicated block.
+ bool isPredicated(BasicBlock *BB) const {
+ return LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT);
+ }
+
+ /// \brief Returns true if LoopAccessInfo can be used for dependence queries.
+ bool areDependencesValid() const {
+ return LAI && LAI->getDepChecker().getDependences();
+ }
+
+ /// \brief Returns true if memory accesses \p A and \p B can be reordered, if
+ /// necessary, when constructing interleaved groups.
+ ///
+ /// \p A must precede \p B in program order. We return false if reordering is
+ /// not necessary or is prevented because \p A and \p B may be dependent.
+ bool canReorderMemAccessesForInterleavedGroups(StrideEntry *A,
+ StrideEntry *B) const {
+
+ // Code motion for interleaved accesses can potentially hoist strided loads
+ // and sink strided stores. The code below checks the legality of the
+ // following two conditions:
+ //
+ // 1. Potentially moving a strided load (B) before any store (A) that
+ // precedes B, or
+ //
+ // 2. Potentially moving a strided store (A) after any load or store (B)
+ // that A precedes.
+ //
+ // It's legal to reorder A and B if we know there isn't a dependence from A
+ // to B. Note that this determination is conservative since some
+ // dependences could potentially be reordered safely.
+
+ // A is potentially the source of a dependence.
+ auto *Src = A->first;
+ auto SrcDes = A->second;
+
+ // B is potentially the sink of a dependence.
+ auto *Sink = B->first;
+ auto SinkDes = B->second;
+
+ // Code motion for interleaved accesses can't violate WAR dependences.
+ // Thus, reordering is legal if the source isn't a write.
+ if (!Src->mayWriteToMemory())
+ return true;
+
+ // At least one of the accesses must be strided.
+ if (!isStrided(SrcDes.Stride) && !isStrided(SinkDes.Stride))
+ return true;
+
+ // If dependence information is not available from LoopAccessInfo,
+ // conservatively assume the instructions can't be reordered.
+ if (!areDependencesValid())
+ return false;
+
+ // If we know there is a dependence from source to sink, assume the
+ // instructions can't be reordered. Otherwise, reordering is legal.
+ return !Dependences.count(Src) || !Dependences.lookup(Src).count(Sink);
+ }
+
+ /// \brief Collect the dependences from LoopAccessInfo.
+ ///
+ /// We process the dependences once during the interleaved access analysis to
+ /// enable constant-time dependence queries.
+ void collectDependences() {
+ if (!areDependencesValid())
+ return;
+ auto *Deps = LAI->getDepChecker().getDependences();
+ for (auto Dep : *Deps)
+ Dependences[Dep.getSource(*LAI)].insert(Dep.getDestination(*LAI));
+ }
};
/// Utility class for getting and setting loop vectorizer hints in the form
@@ -878,20 +1029,16 @@ private:
/// for example 'force', means a decision has been made. So, we need to be
/// careful NOT to add them if the user hasn't specifically asked so.
class LoopVectorizeHints {
- enum HintKind {
- HK_WIDTH,
- HK_UNROLL,
- HK_FORCE
- };
+ enum HintKind { HK_WIDTH, HK_UNROLL, HK_FORCE };
/// Hint - associates name and validation with the hint value.
struct Hint {
- const char * Name;
+ const char *Name;
unsigned Value; // This may have to change for non-numeric values.
HintKind Kind;
- Hint(const char * Name, unsigned Value, HintKind Kind)
- : Name(Name), Value(Value), Kind(Kind) { }
+ Hint(const char *Name, unsigned Value, HintKind Kind)
+ : Name(Name), Value(Value), Kind(Kind) {}
bool validate(unsigned Val) {
switch (Kind) {
@@ -916,6 +1063,9 @@ class LoopVectorizeHints {
/// Return the loop metadata prefix.
static StringRef Prefix() { return "llvm.loop."; }
+ /// True if there is any unsafe math in the loop.
+ bool PotentiallyUnsafe;
+
public:
enum ForceKind {
FK_Undefined = -1, ///< Not selected.
@@ -928,7 +1078,7 @@ public:
HK_WIDTH),
Interleave("interleave.count", DisableInterleaving, HK_UNROLL),
Force("vectorize.enable", FK_Undefined, HK_FORCE),
- TheLoop(L) {
+ PotentiallyUnsafe(false), TheLoop(L) {
// Populate values with existing loop metadata.
getHintsFromMetadata();
@@ -1005,16 +1155,17 @@ public:
unsigned getWidth() const { return Width.Value; }
unsigned getInterleave() const { return Interleave.Value; }
enum ForceKind getForce() const { return (ForceKind)Force.Value; }
+
+ /// \brief If hints are provided that force vectorization, use the AlwaysPrint
+ /// pass name to force the frontend to print the diagnostic.
const char *vectorizeAnalysisPassName() const {
- // If hints are provided that don't disable vectorization use the
- // AlwaysPrint pass name to force the frontend to print the diagnostic.
if (getWidth() == 1)
return LV_NAME;
if (getForce() == LoopVectorizeHints::FK_Disabled)
return LV_NAME;
if (getForce() == LoopVectorizeHints::FK_Undefined && getWidth() == 0)
return LV_NAME;
- return DiagnosticInfo::AlwaysPrint;
+ return DiagnosticInfoOptimizationRemarkAnalysis::AlwaysPrint;
}
bool allowReordering() const {
@@ -1026,6 +1177,17 @@ public:
return getForce() == LoopVectorizeHints::FK_Enabled || getWidth() > 1;
}
+ bool isPotentiallyUnsafe() const {
+ // Avoid FP vectorization if the target is unsure about proper support.
+ // This may be related to the SIMD unit in the target not handling
+ // IEEE 754 FP ops properly, or bad single-to-double promotions.
+ // Otherwise, a sequence of vectorized loops, even without reduction,
+ // could lead to different end results on the destination vectors.
+ return getForce() != LoopVectorizeHints::FK_Enabled && PotentiallyUnsafe;
+ }
+
+ void setPotentiallyUnsafe() { PotentiallyUnsafe = true; }
+
private:
/// Find hints specified in the loop metadata and update local values.
void getHintsFromMetadata() {
@@ -1071,7 +1233,8 @@ private:
Name = Name.substr(Prefix().size(), StringRef::npos);
const ConstantInt *C = mdconst::dyn_extract<ConstantInt>(Arg);
- if (!C) return;
+ if (!C)
+ return;
unsigned Val = C->getZExtValue();
Hint *Hints[] = {&Width, &Interleave, &Force};
@@ -1097,7 +1260,7 @@ private:
/// Matches metadata with hint name.
bool matchesHintMetadataName(MDNode *Node, ArrayRef<Hint> HintTypes) {
- MDString* Name = dyn_cast<MDString>(Node->getOperand(0));
+ MDString *Name = dyn_cast<MDString>(Node->getOperand(0));
if (!Name)
return false;
@@ -1181,17 +1344,17 @@ static void emitMissedWarning(Function *F, Loop *L,
/// induction variable and the different reduction variables.
class LoopVectorizationLegality {
public:
- LoopVectorizationLegality(Loop *L, PredicatedScalarEvolution &PSE,
- DominatorTree *DT, TargetLibraryInfo *TLI,
- AliasAnalysis *AA, Function *F,
- const TargetTransformInfo *TTI,
- LoopAccessAnalysis *LAA,
- LoopVectorizationRequirements *R,
- const LoopVectorizeHints *H)
+ LoopVectorizationLegality(
+ Loop *L, PredicatedScalarEvolution &PSE, DominatorTree *DT,
+ TargetLibraryInfo *TLI, AliasAnalysis *AA, Function *F,
+ const TargetTransformInfo *TTI,
+ std::function<const LoopAccessInfo &(Loop &)> *GetLAA, LoopInfo *LI,
+ LoopVectorizationRequirements *R, LoopVectorizeHints *H)
: NumPredStores(0), TheLoop(L), PSE(PSE), TLI(TLI), TheFunction(F),
- TTI(TTI), DT(DT), LAA(LAA), LAI(nullptr), InterleaveInfo(PSE, L, DT),
- Induction(nullptr), WidestIndTy(nullptr), HasFunNoNaNAttr(false),
- Requirements(R), Hints(H) {}
+ TTI(TTI), DT(DT), GetLAA(GetLAA), LAI(nullptr),
+ InterleaveInfo(PSE, L, DT, LI), Induction(nullptr),
+ WidestIndTy(nullptr), HasFunNoNaNAttr(false), Requirements(R),
+ Hints(H) {}
/// ReductionList contains the reduction descriptors for all
/// of the reductions that were found in the loop.
@@ -1199,7 +1362,11 @@ public:
/// InductionList saves induction variables and maps them to the
/// induction descriptor.
- typedef MapVector<PHINode*, InductionDescriptor> InductionList;
+ typedef MapVector<PHINode *, InductionDescriptor> InductionList;
+
+ /// RecurrenceSet contains the phi nodes that are recurrences other than
+ /// inductions and reductions.
+ typedef SmallPtrSet<const PHINode *, 8> RecurrenceSet;
/// Returns true if it is legal to vectorize this loop.
/// This does not mean that it is profitable to vectorize this
@@ -1215,6 +1382,9 @@ public:
/// Returns the induction variables found in the loop.
InductionList *getInductionVars() { return &Inductions; }
+ /// Return the first-order recurrences found in the loop.
+ RecurrenceSet *getFirstOrderRecurrences() { return &FirstOrderRecurrences; }
+
/// Returns the widest induction type.
Type *getWidestInductionType() { return WidestIndTy; }
@@ -1224,11 +1394,14 @@ public:
/// Returns True if PN is a reduction variable in this loop.
bool isReductionVariable(PHINode *PN) { return Reductions.count(PN); }
+ /// Returns True if Phi is a first-order recurrence in this loop.
+ bool isFirstOrderRecurrence(const PHINode *Phi);
+
/// Return true if the block BB needs to be predicated in order for the loop
/// to be vectorized.
bool blockNeedsPredication(BasicBlock *BB);
- /// Check if this pointer is consecutive when vectorizing. This happens
+ /// Check if this pointer is consecutive when vectorizing. This happens
/// when the last index of the GEP is the induction variable, or that the
/// pointer itself is an induction variable.
/// This check allows us to vectorize A[idx] into a wide load/store.
@@ -1242,35 +1415,39 @@ public:
bool isUniform(Value *V);
/// Returns true if this instruction will remain scalar after vectorization.
- bool isUniformAfterVectorization(Instruction* I) { return Uniforms.count(I); }
+ bool isUniformAfterVectorization(Instruction *I) { return Uniforms.count(I); }
/// Returns the information that we collected about runtime memory check.
const RuntimePointerChecking *getRuntimePointerChecking() const {
return LAI->getRuntimePointerChecking();
}
- const LoopAccessInfo *getLAI() const {
- return LAI;
- }
+ const LoopAccessInfo *getLAI() const { return LAI; }
/// \brief Check if \p Instr belongs to any interleaved access group.
bool isAccessInterleaved(Instruction *Instr) {
return InterleaveInfo.isInterleaved(Instr);
}
+ /// \brief Return the maximum interleave factor of all interleaved groups.
+ unsigned getMaxInterleaveFactor() const {
+ return InterleaveInfo.getMaxInterleaveFactor();
+ }
+
/// \brief Get the interleaved access group that \p Instr belongs to.
const InterleaveGroup *getInterleavedAccessGroup(Instruction *Instr) {
return InterleaveInfo.getInterleaveGroup(Instr);
}
+ /// \brief Returns true if an interleaved group requires a scalar iteration
+ /// to handle accesses with gaps.
+ bool requiresScalarEpilogue() const {
+ return InterleaveInfo.requiresScalarEpilogue();
+ }
+
unsigned getMaxSafeDepDistBytes() { return LAI->getMaxSafeDepDistBytes(); }
- bool hasStride(Value *V) { return StrideSet.count(V); }
- bool mustCheckStrides() { return !StrideSet.empty(); }
- SmallPtrSet<Value *, 8>::iterator strides_begin() {
- return StrideSet.begin();
- }
- SmallPtrSet<Value *, 8>::iterator strides_end() { return StrideSet.end(); }
+ bool hasStride(Value *V) { return LAI->hasStride(V); }
/// Returns true if the target machine supports masked store operation
/// for the given \p DataType and kind of access to \p Ptr.
@@ -1282,20 +1459,24 @@ public:
bool isLegalMaskedLoad(Type *DataType, Value *Ptr) {
return isConsecutivePtr(Ptr) && TTI->isLegalMaskedLoad(DataType);
}
- /// Returns true if vector representation of the instruction \p I
- /// requires mask.
- bool isMaskRequired(const Instruction* I) {
- return (MaskedOp.count(I) != 0);
- }
- unsigned getNumStores() const {
- return LAI->getNumStores();
+ /// Returns true if the target machine supports masked scatter operation
+ /// for the given \p DataType.
+ bool isLegalMaskedScatter(Type *DataType) {
+ return TTI->isLegalMaskedScatter(DataType);
}
- unsigned getNumLoads() const {
- return LAI->getNumLoads();
- }
- unsigned getNumPredStores() const {
- return NumPredStores;
+ /// Returns true if the target machine supports masked gather operation
+ /// for the given \p DataType.
+ bool isLegalMaskedGather(Type *DataType) {
+ return TTI->isLegalMaskedGather(DataType);
}
+
+ /// Returns true if vector representation of the instruction \p I
+ /// requires mask.
+ bool isMaskRequired(const Instruction *I) { return (MaskedOp.count(I) != 0); }
+ unsigned getNumStores() const { return LAI->getNumStores(); }
+ unsigned getNumLoads() const { return LAI->getNumLoads(); }
+ unsigned getNumPredStores() const { return NumPredStores; }
+
private:
/// Check if a single basic block loop is vectorizable.
/// At this point we know that this is a loop with a constant trip count
@@ -1320,11 +1501,11 @@ private:
/// and we know that we can read from them without segfault.
bool blockCanBePredicated(BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs);
- /// \brief Collect memory access with loop invariant strides.
- ///
- /// Looks for accesses like "a[i * StrideA]" where "StrideA" is loop
- /// invariant.
- void collectStridedAccess(Value *LoadOrStoreInst);
+ /// Updates the vectorization state by adding \p Phi to the inductions list.
+ /// This can set \p Phi as the main induction of the loop if \p Phi is a
+ /// better choice for the main induction than the existing one.
+ void addInductionPhi(PHINode *Phi, const InductionDescriptor &ID,
+ SmallPtrSetImpl<Value *> &AllowedExit);
/// Report an analysis message to assist the user in diagnosing loops that are
/// not vectorized. These are handled as LoopAccessReport rather than
@@ -1334,6 +1515,16 @@ private:
emitAnalysisDiag(TheFunction, TheLoop, *Hints, Message);
}
+ /// \brief If an access has a symbolic strides, this maps the pointer value to
+ /// the stride symbol.
+ const ValueToValueMap *getSymbolicStrides() {
+ // FIXME: Currently, the set of symbolic strides is sometimes queried before
+ // it's collected. This happens from canVectorizeWithIfConvert, when the
+ // pointer is checked to reference consecutive elements suitable for a
+ // masked access.
+ return LAI ? &LAI->getSymbolicStrides() : nullptr;
+ }
+
unsigned NumPredStores;
/// The loop that we evaluate.
@@ -1353,7 +1544,7 @@ private:
/// Dominator Tree.
DominatorTree *DT;
// LoopAccess analysis.
- LoopAccessAnalysis *LAA;
+ std::function<const LoopAccessInfo &(Loop &)> *GetLAA;
// And the loop-accesses info corresponding to this loop. This pointer is
// null until canVectorizeMemory sets it up.
const LoopAccessInfo *LAI;
@@ -1373,15 +1564,17 @@ private:
/// Notice that inductions don't need to start at zero and that induction
/// variables can be pointers.
InductionList Inductions;
+ /// Holds the phi nodes that are first-order recurrences.
+ RecurrenceSet FirstOrderRecurrences;
/// Holds the widest induction type encountered.
Type *WidestIndTy;
- /// Allowed outside users. This holds the reduction
+ /// Allowed outside users. This holds the induction and reduction
/// vars which can be accessed from outside the loop.
- SmallPtrSet<Value*, 4> AllowedExit;
+ SmallPtrSet<Value *, 4> AllowedExit;
/// This set holds the variables which are known to be uniform after
/// vectorization.
- SmallPtrSet<Instruction*, 4> Uniforms;
+ SmallPtrSet<Instruction *, 4> Uniforms;
/// Can we assume the absence of NaNs.
bool HasFunNoNaNAttr;
@@ -1390,10 +1583,7 @@ private:
LoopVectorizationRequirements *Requirements;
/// Used to emit an analysis of any legality issues.
- const LoopVectorizeHints *Hints;
-
- ValueToValueMap Strides;
- SmallPtrSet<Value *, 8> StrideSet;
+ LoopVectorizeHints *Hints;
/// While vectorizing these instructions we have to generate a
/// call to the appropriate masked intrinsic
@@ -1409,20 +1599,19 @@ private:
/// different operations.
class LoopVectorizationCostModel {
public:
- LoopVectorizationCostModel(Loop *L, ScalarEvolution *SE, LoopInfo *LI,
- LoopVectorizationLegality *Legal,
+ LoopVectorizationCostModel(Loop *L, PredicatedScalarEvolution &PSE,
+ LoopInfo *LI, LoopVectorizationLegality *Legal,
const TargetTransformInfo &TTI,
const TargetLibraryInfo *TLI, DemandedBits *DB,
AssumptionCache *AC, const Function *F,
- const LoopVectorizeHints *Hints,
- SmallPtrSetImpl<const Value *> &ValuesToIgnore)
- : TheLoop(L), SE(SE), LI(LI), Legal(Legal), TTI(TTI), TLI(TLI), DB(DB),
- TheFunction(F), Hints(Hints), ValuesToIgnore(ValuesToIgnore) {}
+ const LoopVectorizeHints *Hints)
+ : TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), TTI(TTI), TLI(TLI), DB(DB),
+ AC(AC), TheFunction(F), Hints(Hints) {}
/// Information about vectorization costs
struct VectorizationFactor {
unsigned Width; // Vector width with best cost
- unsigned Cost; // Cost of the loop with that width
+ unsigned Cost; // Cost of the loop with that width
};
/// \return The most profitable vectorization factor and the cost of that VF.
/// This method checks every power of two up to VF. If UserVF is not ZERO
@@ -1462,19 +1651,34 @@ public:
/// \return Returns information about the register usages of the loop for the
/// given vectorization factors.
- SmallVector<RegisterUsage, 8>
- calculateRegisterUsage(const SmallVector<unsigned, 8> &VFs);
+ SmallVector<RegisterUsage, 8> calculateRegisterUsage(ArrayRef<unsigned> VFs);
+
+ /// Collect values we want to ignore in the cost model.
+ void collectValuesToIgnore();
private:
+ /// The vectorization cost is a combination of the cost itself and a boolean
+ /// indicating whether any of the contributing operations will actually
+ /// operate on
+ /// vector values after type legalization in the backend. If this latter value
+ /// is
+ /// false, then all operations will be scalarized (i.e. no vectorization has
+ /// actually taken place).
+ typedef std::pair<unsigned, bool> VectorizationCostTy;
+
/// Returns the expected execution cost. The unit of the cost does
/// not matter because we use the 'cost' units to compare different
/// vector widths. The cost that is returned is *not* normalized by
/// the factor width.
- unsigned expectedCost(unsigned VF);
+ VectorizationCostTy expectedCost(unsigned VF);
/// Returns the execution time cost of an instruction for a given vector
/// width. Vector width of one means scalar.
- unsigned getInstructionCost(Instruction *I, unsigned VF);
+ VectorizationCostTy getInstructionCost(Instruction *I, unsigned VF);
+
+ /// The cost-computation logic from getInstructionCost which provides
+ /// the vector type as an output parameter.
+ unsigned getInstructionCost(Instruction *I, unsigned VF, Type *&VectorTy);
/// Returns whether the instruction is a load or store and will be a emitted
/// as a vector operation.
@@ -1492,12 +1696,12 @@ public:
/// Map of scalar integer values to the smallest bitwidth they can be legally
/// represented as. The vector equivalents of these values should be truncated
/// to this type.
- MapVector<Instruction*,uint64_t> MinBWs;
+ MapVector<Instruction *, uint64_t> MinBWs;
/// The loop that we evaluate.
Loop *TheLoop;
- /// Scev analysis.
- ScalarEvolution *SE;
+ /// Predicated scalar evolution analysis.
+ PredicatedScalarEvolution &PSE;
/// Loop Info analysis.
LoopInfo *LI;
/// Vectorization legality.
@@ -1506,13 +1710,17 @@ public:
const TargetTransformInfo &TTI;
/// Target Library Info.
const TargetLibraryInfo *TLI;
- /// Demanded bits analysis
+ /// Demanded bits analysis.
DemandedBits *DB;
+ /// Assumption cache.
+ AssumptionCache *AC;
const Function *TheFunction;
- // Loop Vectorize Hint.
+ /// Loop Vectorize Hint.
const LoopVectorizeHints *Hints;
- // Values to ignore in the cost model.
- const SmallPtrSetImpl<const Value *> &ValuesToIgnore;
+ /// Values to ignore in the cost model.
+ SmallPtrSet<const Value *, 16> ValuesToIgnore;
+ /// Values to ignore in the cost model when VF > 1.
+ SmallPtrSet<const Value *, 16> VecValuesToIgnore;
};
/// \brief This holds vectorization requirements that must be verified late in
@@ -1588,328 +1796,35 @@ struct LoopVectorize : public FunctionPass {
static char ID;
explicit LoopVectorize(bool NoUnrolling = false, bool AlwaysVectorize = true)
- : FunctionPass(ID),
- DisableUnrolling(NoUnrolling),
- AlwaysVectorize(AlwaysVectorize) {
+ : FunctionPass(ID) {
+ Impl.DisableUnrolling = NoUnrolling;
+ Impl.AlwaysVectorize = AlwaysVectorize;
initializeLoopVectorizePass(*PassRegistry::getPassRegistry());
}
- ScalarEvolution *SE;
- LoopInfo *LI;
- TargetTransformInfo *TTI;
- DominatorTree *DT;
- BlockFrequencyInfo *BFI;
- TargetLibraryInfo *TLI;
- DemandedBits *DB;
- AliasAnalysis *AA;
- AssumptionCache *AC;
- LoopAccessAnalysis *LAA;
- bool DisableUnrolling;
- bool AlwaysVectorize;
-
- BlockFrequency ColdEntryFreq;
+ LoopVectorizePass Impl;
bool runOnFunction(Function &F) override {
- SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
- LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
- TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
- DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
- BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI();
- auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
- TLI = TLIP ? &TLIP->getTLI() : nullptr;
- AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
- AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
- LAA = &getAnalysis<LoopAccessAnalysis>();
- DB = &getAnalysis<DemandedBits>();
-
- // Compute some weights outside of the loop over the loops. Compute this
- // using a BranchProbability to re-use its scaling math.
- const BranchProbability ColdProb(1, 5); // 20%
- ColdEntryFreq = BlockFrequency(BFI->getEntryFreq()) * ColdProb;
-
- // Don't attempt if
- // 1. the target claims to have no vector registers, and
- // 2. interleaving won't help ILP.
- //
- // The second condition is necessary because, even if the target has no
- // vector registers, loop vectorization may still enable scalar
- // interleaving.
- if (!TTI->getNumberOfRegisters(true) && TTI->getMaxInterleaveFactor(1) < 2)
+ if (skipFunction(F))
return false;
- // Build up a worklist of inner-loops to vectorize. This is necessary as
- // the act of vectorizing or partially unrolling a loop creates new loops
- // and can invalidate iterators across the loops.
- SmallVector<Loop *, 8> Worklist;
-
- for (Loop *L : *LI)
- addInnerLoop(*L, Worklist);
-
- LoopsAnalyzed += Worklist.size();
-
- // Now walk the identified inner loops.
- bool Changed = false;
- while (!Worklist.empty())
- Changed |= processLoop(Worklist.pop_back_val());
-
- // Process each loop nest in the function.
- return Changed;
- }
-
- static void AddRuntimeUnrollDisableMetaData(Loop *L) {
- SmallVector<Metadata *, 4> MDs;
- // Reserve first location for self reference to the LoopID metadata node.
- MDs.push_back(nullptr);
- bool IsUnrollMetadata = false;
- MDNode *LoopID = L->getLoopID();
- if (LoopID) {
- // First find existing loop unrolling disable metadata.
- for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
- MDNode *MD = dyn_cast<MDNode>(LoopID->getOperand(i));
- if (MD) {
- const MDString *S = dyn_cast<MDString>(MD->getOperand(0));
- IsUnrollMetadata =
- S && S->getString().startswith("llvm.loop.unroll.disable");
- }
- MDs.push_back(LoopID->getOperand(i));
- }
- }
-
- if (!IsUnrollMetadata) {
- // Add runtime unroll disable metadata.
- LLVMContext &Context = L->getHeader()->getContext();
- SmallVector<Metadata *, 1> DisableOperands;
- DisableOperands.push_back(
- MDString::get(Context, "llvm.loop.unroll.runtime.disable"));
- MDNode *DisableNode = MDNode::get(Context, DisableOperands);
- MDs.push_back(DisableNode);
- MDNode *NewLoopID = MDNode::get(Context, MDs);
- // Set operand 0 to refer to the loop id itself.
- NewLoopID->replaceOperandWith(0, NewLoopID);
- L->setLoopID(NewLoopID);
- }
- }
-
- bool processLoop(Loop *L) {
- assert(L->empty() && "Only process inner loops.");
-
-#ifndef NDEBUG
- const std::string DebugLocStr = getDebugLocString(L);
-#endif /* NDEBUG */
-
- DEBUG(dbgs() << "\nLV: Checking a loop in \""
- << L->getHeader()->getParent()->getName() << "\" from "
- << DebugLocStr << "\n");
-
- LoopVectorizeHints Hints(L, DisableUnrolling);
-
- DEBUG(dbgs() << "LV: Loop hints:"
- << " force="
- << (Hints.getForce() == LoopVectorizeHints::FK_Disabled
- ? "disabled"
- : (Hints.getForce() == LoopVectorizeHints::FK_Enabled
- ? "enabled"
- : "?")) << " width=" << Hints.getWidth()
- << " unroll=" << Hints.getInterleave() << "\n");
-
- // Function containing loop
- Function *F = L->getHeader()->getParent();
-
- // Looking at the diagnostic output is the only way to determine if a loop
- // was vectorized (other than looking at the IR or machine code), so it
- // is important to generate an optimization remark for each loop. Most of
- // these messages are generated by emitOptimizationRemarkAnalysis. Remarks
- // generated by emitOptimizationRemark and emitOptimizationRemarkMissed are
- // less verbose reporting vectorized loops and unvectorized loops that may
- // benefit from vectorization, respectively.
-
- if (!Hints.allowVectorization(F, L, AlwaysVectorize)) {
- DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n");
- return false;
- }
-
- // Check the loop for a trip count threshold:
- // do not vectorize loops with a tiny trip count.
- const unsigned TC = SE->getSmallConstantTripCount(L);
- if (TC > 0u && TC < TinyTripCountVectorThreshold) {
- DEBUG(dbgs() << "LV: Found a loop with a very small trip count. "
- << "This loop is not worth vectorizing.");
- if (Hints.getForce() == LoopVectorizeHints::FK_Enabled)
- DEBUG(dbgs() << " But vectorizing was explicitly forced.\n");
- else {
- DEBUG(dbgs() << "\n");
- emitAnalysisDiag(F, L, Hints, VectorizationReport()
- << "vectorization is not beneficial "
- "and is not explicitly forced");
- return false;
- }
- }
-
- PredicatedScalarEvolution PSE(*SE);
-
- // Check if it is legal to vectorize the loop.
- LoopVectorizationRequirements Requirements;
- LoopVectorizationLegality LVL(L, PSE, DT, TLI, AA, F, TTI, LAA,
- &Requirements, &Hints);
- if (!LVL.canVectorize()) {
- DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n");
- emitMissedWarning(F, L, Hints);
- return false;
- }
-
- // Collect values we want to ignore in the cost model. This includes
- // type-promoting instructions we identified during reduction detection.
- SmallPtrSet<const Value *, 32> ValuesToIgnore;
- CodeMetrics::collectEphemeralValues(L, AC, ValuesToIgnore);
- for (auto &Reduction : *LVL.getReductionVars()) {
- RecurrenceDescriptor &RedDes = Reduction.second;
- SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts();
- ValuesToIgnore.insert(Casts.begin(), Casts.end());
- }
-
- // Use the cost model.
- LoopVectorizationCostModel CM(L, PSE.getSE(), LI, &LVL, *TTI, TLI, DB, AC,
- F, &Hints, ValuesToIgnore);
-
- // Check the function attributes to find out if this function should be
- // optimized for size.
- bool OptForSize = Hints.getForce() != LoopVectorizeHints::FK_Enabled &&
- F->optForSize();
-
- // Compute the weighted frequency of this loop being executed and see if it
- // is less than 20% of the function entry baseline frequency. Note that we
- // always have a canonical loop here because we think we *can* vectorize.
- // FIXME: This is hidden behind a flag due to pervasive problems with
- // exactly what block frequency models.
- if (LoopVectorizeWithBlockFrequency) {
- BlockFrequency LoopEntryFreq = BFI->getBlockFreq(L->getLoopPreheader());
- if (Hints.getForce() != LoopVectorizeHints::FK_Enabled &&
- LoopEntryFreq < ColdEntryFreq)
- OptForSize = true;
- }
-
- // Check the function attributes to see if implicit floats are allowed.
- // FIXME: This check doesn't seem possibly correct -- what if the loop is
- // an integer loop and the vector instructions selected are purely integer
- // vector instructions?
- if (F->hasFnAttribute(Attribute::NoImplicitFloat)) {
- DEBUG(dbgs() << "LV: Can't vectorize when the NoImplicitFloat"
- "attribute is used.\n");
- emitAnalysisDiag(
- F, L, Hints,
- VectorizationReport()
- << "loop not vectorized due to NoImplicitFloat attribute");
- emitMissedWarning(F, L, Hints);
- return false;
- }
-
- // Select the optimal vectorization factor.
- const LoopVectorizationCostModel::VectorizationFactor VF =
- CM.selectVectorizationFactor(OptForSize);
-
- // Select the interleave count.
- unsigned IC = CM.selectInterleaveCount(OptForSize, VF.Width, VF.Cost);
-
- // Get user interleave count.
- unsigned UserIC = Hints.getInterleave();
-
- // Identify the diagnostic messages that should be produced.
- std::string VecDiagMsg, IntDiagMsg;
- bool VectorizeLoop = true, InterleaveLoop = true;
-
- if (Requirements.doesNotMeet(F, L, Hints)) {
- DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization "
- "requirements.\n");
- emitMissedWarning(F, L, Hints);
- return false;
- }
-
- if (VF.Width == 1) {
- DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n");
- VecDiagMsg =
- "the cost-model indicates that vectorization is not beneficial";
- VectorizeLoop = false;
- }
-
- if (IC == 1 && UserIC <= 1) {
- // Tell the user interleaving is not beneficial.
- DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n");
- IntDiagMsg =
- "the cost-model indicates that interleaving is not beneficial";
- InterleaveLoop = false;
- if (UserIC == 1)
- IntDiagMsg +=
- " and is explicitly disabled or interleave count is set to 1";
- } else if (IC > 1 && UserIC == 1) {
- // Tell the user interleaving is beneficial, but it explicitly disabled.
- DEBUG(dbgs()
- << "LV: Interleaving is beneficial but is explicitly disabled.");
- IntDiagMsg = "the cost-model indicates that interleaving is beneficial "
- "but is explicitly disabled or interleave count is set to 1";
- InterleaveLoop = false;
- }
-
- // Override IC if user provided an interleave count.
- IC = UserIC > 0 ? UserIC : IC;
-
- // Emit diagnostic messages, if any.
- const char *VAPassName = Hints.vectorizeAnalysisPassName();
- if (!VectorizeLoop && !InterleaveLoop) {
- // Do not vectorize or interleaving the loop.
- emitOptimizationRemarkAnalysis(F->getContext(), VAPassName, *F,
- L->getStartLoc(), VecDiagMsg);
- emitOptimizationRemarkAnalysis(F->getContext(), LV_NAME, *F,
- L->getStartLoc(), IntDiagMsg);
- return false;
- } else if (!VectorizeLoop && InterleaveLoop) {
- DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
- emitOptimizationRemarkAnalysis(F->getContext(), VAPassName, *F,
- L->getStartLoc(), VecDiagMsg);
- } else if (VectorizeLoop && !InterleaveLoop) {
- DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in "
- << DebugLocStr << '\n');
- emitOptimizationRemarkAnalysis(F->getContext(), LV_NAME, *F,
- L->getStartLoc(), IntDiagMsg);
- } else if (VectorizeLoop && InterleaveLoop) {
- DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in "
- << DebugLocStr << '\n');
- DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
- }
-
- if (!VectorizeLoop) {
- assert(IC > 1 && "interleave count should not be 1 or 0");
- // If we decided that it is not legal to vectorize the loop then
- // interleave it.
- InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, IC);
- Unroller.vectorize(&LVL, CM.MinBWs);
-
- emitOptimizationRemark(F->getContext(), LV_NAME, *F, L->getStartLoc(),
- Twine("interleaved loop (interleaved count: ") +
- Twine(IC) + ")");
- } else {
- // If we decided that it is *legal* to vectorize the loop then do it.
- InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, VF.Width, IC);
- LB.vectorize(&LVL, CM.MinBWs);
- ++LoopsVectorized;
-
- // Add metadata to disable runtime unrolling scalar loop when there's no
- // runtime check about strides and memory. Because at this situation,
- // scalar loop is rarely used not worthy to be unrolled.
- if (!LB.IsSafetyChecksAdded())
- AddRuntimeUnrollDisableMetaData(L);
-
- // Report the vectorization decision.
- emitOptimizationRemark(F->getContext(), LV_NAME, *F, L->getStartLoc(),
- Twine("vectorized loop (vectorization width: ") +
- Twine(VF.Width) + ", interleaved count: " +
- Twine(IC) + ")");
- }
+ auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
+ auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
+ auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
+ auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
+ auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI();
+ auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
+ auto *TLI = TLIP ? &TLIP->getTLI() : nullptr;
+ auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
+ auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
+ auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>();
+ auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits();
- // Mark the loop as already vectorized to avoid vectorizing again.
- Hints.setAlreadyVectorized();
+ std::function<const LoopAccessInfo &(Loop &)> GetLAA =
+ [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); };
- DEBUG(verifyFunction(*L->getHeader()->getParent()));
- return true;
+ return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC,
+ GetLAA);
}
void getAnalysisUsage(AnalysisUsage &AU) const override {
@@ -1922,15 +1837,13 @@ struct LoopVectorize : public FunctionPass {
AU.addRequired<ScalarEvolutionWrapperPass>();
AU.addRequired<TargetTransformInfoWrapperPass>();
AU.addRequired<AAResultsWrapperPass>();
- AU.addRequired<LoopAccessAnalysis>();
- AU.addRequired<DemandedBits>();
+ AU.addRequired<LoopAccessLegacyAnalysis>();
+ AU.addRequired<DemandedBitsWrapperPass>();
AU.addPreserved<LoopInfoWrapperPass>();
AU.addPreserved<DominatorTreeWrapperPass>();
AU.addPreserved<BasicAAWrapperPass>();
- AU.addPreserved<AAResultsWrapperPass>();
AU.addPreserved<GlobalsAAWrapperPass>();
}
-
};
} // end anonymous namespace
@@ -1943,9 +1856,7 @@ struct LoopVectorize : public FunctionPass {
Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) {
// We need to place the broadcast of invariant variables outside the loop.
Instruction *Instr = dyn_cast<Instruction>(V);
- bool NewInstr =
- (Instr && std::find(LoopVectorBody.begin(), LoopVectorBody.end(),
- Instr->getParent()) != LoopVectorBody.end());
+ bool NewInstr = (Instr && Instr->getParent() == LoopVectorBody);
bool Invariant = OrigLoop->isLoopInvariant(V) && !NewInstr;
// Place the code for broadcasting invariant variables in the new preheader.
@@ -1959,6 +1870,111 @@ Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) {
return Shuf;
}
+void InnerLoopVectorizer::createVectorIntInductionPHI(
+ const InductionDescriptor &II, VectorParts &Entry, IntegerType *TruncType) {
+ Value *Start = II.getStartValue();
+ ConstantInt *Step = II.getConstIntStepValue();
+ assert(Step && "Can not widen an IV with a non-constant step");
+
+ // Construct the initial value of the vector IV in the vector loop preheader
+ auto CurrIP = Builder.saveIP();
+ Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
+ if (TruncType) {
+ Step = ConstantInt::getSigned(TruncType, Step->getSExtValue());
+ Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType);
+ }
+ Value *SplatStart = Builder.CreateVectorSplat(VF, Start);
+ Value *SteppedStart = getStepVector(SplatStart, 0, Step);
+ Builder.restoreIP(CurrIP);
+
+ Value *SplatVF =
+ ConstantVector::getSplat(VF, ConstantInt::getSigned(Start->getType(),
+ VF * Step->getSExtValue()));
+ // We may need to add the step a number of times, depending on the unroll
+ // factor. The last of those goes into the PHI.
+ PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind",
+ &*LoopVectorBody->getFirstInsertionPt());
+ Value *LastInduction = VecInd;
+ for (unsigned Part = 0; Part < UF; ++Part) {
+ Entry[Part] = LastInduction;
+ LastInduction = Builder.CreateAdd(LastInduction, SplatVF, "step.add");
+ }
+
+ VecInd->addIncoming(SteppedStart, LoopVectorPreHeader);
+ VecInd->addIncoming(LastInduction, LoopVectorBody);
+}
+
+void InnerLoopVectorizer::widenIntInduction(PHINode *IV, VectorParts &Entry,
+ TruncInst *Trunc) {
+
+ auto II = Legal->getInductionVars()->find(IV);
+ assert(II != Legal->getInductionVars()->end() && "IV is not an induction");
+
+ auto ID = II->second;
+ assert(IV->getType() == ID.getStartValue()->getType() && "Types must match");
+
+ // If a truncate instruction was provided, get the smaller type.
+ auto *TruncType = Trunc ? cast<IntegerType>(Trunc->getType()) : nullptr;
+
+ // The step of the induction.
+ Value *Step = nullptr;
+
+ // If the induction variable has a constant integer step value, go ahead and
+ // get it now.
+ if (ID.getConstIntStepValue())
+ Step = ID.getConstIntStepValue();
+
+ // Try to create a new independent vector induction variable. If we can't
+ // create the phi node, we will splat the scalar induction variable in each
+ // loop iteration.
+ if (VF > 1 && IV->getType() == Induction->getType() && Step &&
+ !ValuesNotWidened->count(IV))
+ return createVectorIntInductionPHI(ID, Entry, TruncType);
+
+ // The scalar value to broadcast. This will be derived from the canonical
+ // induction variable.
+ Value *ScalarIV = nullptr;
+
+ // Define the scalar induction variable and step values. If we were given a
+ // truncation type, truncate the canonical induction variable and constant
+ // step. Otherwise, derive these values from the induction descriptor.
+ if (TruncType) {
+ assert(Step && "Truncation requires constant integer step");
+ auto StepInt = cast<ConstantInt>(Step)->getSExtValue();
+ ScalarIV = Builder.CreateCast(Instruction::Trunc, Induction, TruncType);
+ Step = ConstantInt::getSigned(TruncType, StepInt);
+ } else {
+ ScalarIV = Induction;
+ auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
+ if (IV != OldInduction) {
+ ScalarIV = Builder.CreateSExtOrTrunc(ScalarIV, IV->getType());
+ ScalarIV = ID.transform(Builder, ScalarIV, PSE.getSE(), DL);
+ ScalarIV->setName("offset.idx");
+ }
+ if (!Step) {
+ SCEVExpander Exp(*PSE.getSE(), DL, "induction");
+ Step = Exp.expandCodeFor(ID.getStep(), ID.getStep()->getType(),
+ &*Builder.GetInsertPoint());
+ }
+ }
+
+ // Splat the scalar induction variable, and build the necessary step vectors.
+ Value *Broadcasted = getBroadcastInstrs(ScalarIV);
+ for (unsigned Part = 0; Part < UF; ++Part)
+ Entry[Part] = getStepVector(Broadcasted, VF * Part, Step);
+
+ // If an induction variable is only used for counting loop iterations or
+ // calculating addresses, it doesn't need to be widened. Create scalar steps
+ // that can be used by instructions we will later scalarize. Note that the
+ // addition of the scalar steps will not increase the number of instructions
+ // in the loop in the common case prior to InstCombine. We will be trading
+ // one vector extract for each scalar step.
+ if (VF > 1 && ValuesNotWidened->count(IV)) {
+ auto *EntryVal = Trunc ? cast<Value>(Trunc) : IV;
+ buildScalarSteps(ScalarIV, Step, EntryVal);
+ }
+}
+
Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx,
Value *Step) {
assert(Val->getType()->isVectorTy() && "Must be a vector");
@@ -1970,7 +1986,7 @@ Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx,
Type *ITy = Val->getType()->getScalarType();
VectorType *Ty = cast<VectorType>(Val->getType());
int VLen = Ty->getNumElements();
- SmallVector<Constant*, 8> Indices;
+ SmallVector<Constant *, 8> Indices;
// Create a vector of consecutive numbers from zero to VF.
for (int i = 0; i < VLen; ++i)
@@ -1987,6 +2003,27 @@ Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx,
return Builder.CreateAdd(Val, Step, "induction");
}
+void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step,
+ Value *EntryVal) {
+
+ // We shouldn't have to build scalar steps if we aren't vectorizing.
+ assert(VF > 1 && "VF should be greater than one");
+
+ // Get the value type and ensure it and the step have the same integer type.
+ Type *ScalarIVTy = ScalarIV->getType()->getScalarType();
+ assert(ScalarIVTy->isIntegerTy() && ScalarIVTy == Step->getType() &&
+ "Val and Step should have the same integer type");
+
+ // Compute the scalar steps and save the results in ScalarIVMap.
+ for (unsigned Part = 0; Part < UF; ++Part)
+ for (unsigned I = 0; I < VF; ++I) {
+ auto *StartIdx = ConstantInt::get(ScalarIVTy, VF * Part + I);
+ auto *Mul = Builder.CreateMul(StartIdx, Step);
+ auto *Add = Builder.CreateAdd(ScalarIV, Mul);
+ ScalarIVMap[EntryVal].push_back(Add);
+ }
+}
+
int LoopVectorizationLegality::isConsecutivePtr(Value *Ptr) {
assert(Ptr->getType()->isPointerTy() && "Unexpected non-ptr");
auto *SE = PSE.getSE();
@@ -1994,7 +2031,7 @@ int LoopVectorizationLegality::isConsecutivePtr(Value *Ptr) {
if (Ptr->getType()->getPointerElementType()->isAggregateType())
return 0;
- // If this value is a pointer induction variable we know it is consecutive.
+ // If this value is a pointer induction variable, we know it is consecutive.
PHINode *Phi = dyn_cast_or_null<PHINode>(Ptr);
if (Phi && Inductions.count(Phi)) {
InductionDescriptor II = Inductions[Phi];
@@ -2008,7 +2045,7 @@ int LoopVectorizationLegality::isConsecutivePtr(Value *Ptr) {
unsigned NumOperands = Gep->getNumOperands();
Value *GpPtr = Gep->getPointerOperand();
// If this GEP value is a consecutive pointer induction variable and all of
- // the indices are constant then we know it is consecutive. We can
+ // the indices are constant, then we know it is consecutive.
Phi = dyn_cast<PHINode>(GpPtr);
if (Phi && Inductions.count(Phi)) {
@@ -2038,7 +2075,7 @@ int LoopVectorizationLegality::isConsecutivePtr(Value *Ptr) {
// We can emit wide load/stores only if the last non-zero index is the
// induction variable.
const SCEV *Last = nullptr;
- if (!Strides.count(Gep))
+ if (!getSymbolicStrides() || !getSymbolicStrides()->count(Gep))
Last = PSE.getSCEV(Gep->getOperand(InductionOperand));
else {
// Because of the multiplication by a stride we can have a s/zext cast.
@@ -2050,7 +2087,7 @@ int LoopVectorizationLegality::isConsecutivePtr(Value *Ptr) {
// %idxprom = zext i32 %mul to i64 << Safe cast.
// %arrayidx = getelementptr inbounds i32* %B, i64 %idxprom
//
- Last = replaceSymbolicStrideSCEV(PSE, Strides,
+ Last = replaceSymbolicStrideSCEV(PSE, *getSymbolicStrides(),
Gep->getOperand(InductionOperand), Gep);
if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(Last))
Last =
@@ -2076,7 +2113,7 @@ bool LoopVectorizationLegality::isUniform(Value *V) {
return LAI->isUniform(V);
}
-InnerLoopVectorizer::VectorParts&
+InnerLoopVectorizer::VectorParts &
InnerLoopVectorizer::getVectorValue(Value *V) {
assert(V != Induction && "The new induction variable should not be used.");
assert(!V->getType()->isVectorTy() && "Can't widen a vector");
@@ -2097,7 +2134,7 @@ InnerLoopVectorizer::getVectorValue(Value *V) {
Value *InnerLoopVectorizer::reverseVector(Value *Vec) {
assert(Vec->getType()->isVectorTy() && "Invalid type");
- SmallVector<Constant*, 8> ShuffleMask;
+ SmallVector<Constant *, 8> ShuffleMask;
for (unsigned i = 0; i < VF; ++i)
ShuffleMask.push_back(Builder.getInt32(VF - i - 1));
@@ -2308,7 +2345,7 @@ void InnerLoopVectorizer::vectorizeInterleaveGroup(Instruction *Instr) {
Group->isReverse() ? reverseVector(StridedVec) : StridedVec;
}
- propagateMetadata(NewLoadInstr, Instr);
+ addMetadata(NewLoadInstr, Instr);
}
return;
}
@@ -2326,7 +2363,7 @@ void InnerLoopVectorizer::vectorizeInterleaveGroup(Instruction *Instr) {
assert(Member && "Fail to get a member from an interleaved store group");
Value *StoredVec =
- getVectorValue(dyn_cast<StoreInst>(Member)->getValueOperand())[Part];
+ getVectorValue(cast<StoreInst>(Member)->getValueOperand())[Part];
if (Group->isReverse())
StoredVec = reverseVector(StoredVec);
@@ -2347,7 +2384,7 @@ void InnerLoopVectorizer::vectorizeInterleaveGroup(Instruction *Instr) {
Instruction *NewStoreInstr =
Builder.CreateAlignedStore(IVec, NewPtrs[Part], Group->getAlignment());
- propagateMetadata(NewStoreInstr, Instr);
+ addMetadata(NewStoreInstr, Instr);
}
}
@@ -2372,8 +2409,8 @@ void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr) {
if (!Alignment)
Alignment = DL.getABITypeAlignment(ScalarDataTy);
unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace();
- unsigned ScalarAllocatedSize = DL.getTypeAllocSize(ScalarDataTy);
- unsigned VectorElementSize = DL.getTypeStoreSize(DataTy) / VF;
+ uint64_t ScalarAllocatedSize = DL.getTypeAllocSize(ScalarDataTy);
+ uint64_t VectorElementSize = DL.getTypeStoreSize(DataTy) / VF;
if (SI && Legal->blockNeedsPredication(SI->getParent()) &&
!Legal->isMaskRequired(SI))
@@ -2382,69 +2419,115 @@ void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr) {
if (ScalarAllocatedSize != VectorElementSize)
return scalarizeInstruction(Instr);
- // If the pointer is loop invariant or if it is non-consecutive,
- // scalarize the load.
+ // If the pointer is loop invariant scalarize the load.
+ if (LI && Legal->isUniform(Ptr))
+ return scalarizeInstruction(Instr);
+
+ // If the pointer is non-consecutive and gather/scatter is not supported
+ // scalarize the instruction.
int ConsecutiveStride = Legal->isConsecutivePtr(Ptr);
bool Reverse = ConsecutiveStride < 0;
- bool UniformLoad = LI && Legal->isUniform(Ptr);
- if (!ConsecutiveStride || UniformLoad)
+ bool CreateGatherScatter =
+ !ConsecutiveStride && ((LI && Legal->isLegalMaskedGather(ScalarDataTy)) ||
+ (SI && Legal->isLegalMaskedScatter(ScalarDataTy)));
+
+ if (!ConsecutiveStride && !CreateGatherScatter)
return scalarizeInstruction(Instr);
Constant *Zero = Builder.getInt32(0);
VectorParts &Entry = WidenMap.get(Instr);
+ VectorParts VectorGep;
// Handle consecutive loads/stores.
GetElementPtrInst *Gep = getGEPInstruction(Ptr);
- if (Gep && Legal->isInductionVariable(Gep->getPointerOperand())) {
- setDebugLocFromInst(Builder, Gep);
- Value *PtrOperand = Gep->getPointerOperand();
- Value *FirstBasePtr = getVectorValue(PtrOperand)[0];
- FirstBasePtr = Builder.CreateExtractElement(FirstBasePtr, Zero);
-
- // Create the new GEP with the new induction variable.
- GetElementPtrInst *Gep2 = cast<GetElementPtrInst>(Gep->clone());
- Gep2->setOperand(0, FirstBasePtr);
- Gep2->setName("gep.indvar.base");
- Ptr = Builder.Insert(Gep2);
- } else if (Gep) {
- setDebugLocFromInst(Builder, Gep);
- assert(PSE.getSE()->isLoopInvariant(PSE.getSCEV(Gep->getPointerOperand()),
- OrigLoop) &&
- "Base ptr must be invariant");
-
- // The last index does not have to be the induction. It can be
- // consecutive and be a function of the index. For example A[I+1];
- unsigned NumOperands = Gep->getNumOperands();
- unsigned InductionOperand = getGEPInductionOperand(Gep);
- // Create the new GEP with the new induction variable.
- GetElementPtrInst *Gep2 = cast<GetElementPtrInst>(Gep->clone());
-
- for (unsigned i = 0; i < NumOperands; ++i) {
- Value *GepOperand = Gep->getOperand(i);
- Instruction *GepOperandInst = dyn_cast<Instruction>(GepOperand);
-
- // Update last index or loop invariant instruction anchored in loop.
- if (i == InductionOperand ||
- (GepOperandInst && OrigLoop->contains(GepOperandInst))) {
- assert((i == InductionOperand ||
- PSE.getSE()->isLoopInvariant(PSE.getSCEV(GepOperandInst),
- OrigLoop)) &&
- "Must be last index or loop invariant");
-
- VectorParts &GEPParts = getVectorValue(GepOperand);
- Value *Index = GEPParts[0];
- Index = Builder.CreateExtractElement(Index, Zero);
- Gep2->setOperand(i, Index);
- Gep2->setName("gep.indvar.idx");
+ if (ConsecutiveStride) {
+ if (Gep && Legal->isInductionVariable(Gep->getPointerOperand())) {
+ setDebugLocFromInst(Builder, Gep);
+ Value *PtrOperand = Gep->getPointerOperand();
+ Value *FirstBasePtr = getVectorValue(PtrOperand)[0];
+ FirstBasePtr = Builder.CreateExtractElement(FirstBasePtr, Zero);
+
+ // Create the new GEP with the new induction variable.
+ GetElementPtrInst *Gep2 = cast<GetElementPtrInst>(Gep->clone());
+ Gep2->setOperand(0, FirstBasePtr);
+ Gep2->setName("gep.indvar.base");
+ Ptr = Builder.Insert(Gep2);
+ } else if (Gep) {
+ setDebugLocFromInst(Builder, Gep);
+ assert(PSE.getSE()->isLoopInvariant(PSE.getSCEV(Gep->getPointerOperand()),
+ OrigLoop) &&
+ "Base ptr must be invariant");
+ // The last index does not have to be the induction. It can be
+ // consecutive and be a function of the index. For example A[I+1];
+ unsigned NumOperands = Gep->getNumOperands();
+ unsigned InductionOperand = getGEPInductionOperand(Gep);
+ // Create the new GEP with the new induction variable.
+ GetElementPtrInst *Gep2 = cast<GetElementPtrInst>(Gep->clone());
+
+ for (unsigned i = 0; i < NumOperands; ++i) {
+ Value *GepOperand = Gep->getOperand(i);
+ Instruction *GepOperandInst = dyn_cast<Instruction>(GepOperand);
+
+ // Update last index or loop invariant instruction anchored in loop.
+ if (i == InductionOperand ||
+ (GepOperandInst && OrigLoop->contains(GepOperandInst))) {
+ assert((i == InductionOperand ||
+ PSE.getSE()->isLoopInvariant(PSE.getSCEV(GepOperandInst),
+ OrigLoop)) &&
+ "Must be last index or loop invariant");
+
+ VectorParts &GEPParts = getVectorValue(GepOperand);
+
+ // If GepOperand is an induction variable, and there's a scalarized
+ // version of it available, use it. Otherwise, we will need to create
+ // an extractelement instruction.
+ Value *Index = ScalarIVMap.count(GepOperand)
+ ? ScalarIVMap[GepOperand][0]
+ : Builder.CreateExtractElement(GEPParts[0], Zero);
+
+ Gep2->setOperand(i, Index);
+ Gep2->setName("gep.indvar.idx");
+ }
}
+ Ptr = Builder.Insert(Gep2);
+ } else { // No GEP
+ // Use the induction element ptr.
+ assert(isa<PHINode>(Ptr) && "Invalid induction ptr");
+ setDebugLocFromInst(Builder, Ptr);
+ VectorParts &PtrVal = getVectorValue(Ptr);
+ Ptr = Builder.CreateExtractElement(PtrVal[0], Zero);
}
- Ptr = Builder.Insert(Gep2);
} else {
- // Use the induction element ptr.
- assert(isa<PHINode>(Ptr) && "Invalid induction ptr");
- setDebugLocFromInst(Builder, Ptr);
- VectorParts &PtrVal = getVectorValue(Ptr);
- Ptr = Builder.CreateExtractElement(PtrVal[0], Zero);
+ // At this point we should vector version of GEP for Gather or Scatter
+ assert(CreateGatherScatter && "The instruction should be scalarized");
+ if (Gep) {
+ // Vectorizing GEP, across UF parts. We want to get a vector value for base
+ // and each index that's defined inside the loop, even if it is
+ // loop-invariant but wasn't hoisted out. Otherwise we want to keep them
+ // scalar.
+ SmallVector<VectorParts, 4> OpsV;
+ for (Value *Op : Gep->operands()) {
+ Instruction *SrcInst = dyn_cast<Instruction>(Op);
+ if (SrcInst && OrigLoop->contains(SrcInst))
+ OpsV.push_back(getVectorValue(Op));
+ else
+ OpsV.push_back(VectorParts(UF, Op));
+ }
+ for (unsigned Part = 0; Part < UF; ++Part) {
+ SmallVector<Value *, 4> Ops;
+ Value *GEPBasePtr = OpsV[0][Part];
+ for (unsigned i = 1; i < Gep->getNumOperands(); i++)
+ Ops.push_back(OpsV[i][Part]);
+ Value *NewGep = Builder.CreateGEP(GEPBasePtr, Ops, "VectorGep");
+ cast<GetElementPtrInst>(NewGep)->setIsInBounds(Gep->isInBounds());
+ assert(NewGep->getType()->isVectorTy() && "Expected vector GEP");
+
+ NewGep =
+ Builder.CreateBitCast(NewGep, VectorType::get(Ptr->getType(), VF));
+ VectorGep.push_back(NewGep);
+ }
+ } else
+ VectorGep = getVectorValue(Ptr);
}
VectorParts Mask = createBlockInMask(Instr->getParent());
@@ -2458,62 +2541,78 @@ void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr) {
VectorParts StoredVal = getVectorValue(SI->getValueOperand());
for (unsigned Part = 0; Part < UF; ++Part) {
+ Instruction *NewSI = nullptr;
+ if (CreateGatherScatter) {
+ Value *MaskPart = Legal->isMaskRequired(SI) ? Mask[Part] : nullptr;
+ NewSI = Builder.CreateMaskedScatter(StoredVal[Part], VectorGep[Part],
+ Alignment, MaskPart);
+ } else {
+ // Calculate the pointer for the specific unroll-part.
+ Value *PartPtr =
+ Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(Part * VF));
+
+ if (Reverse) {
+ // If we store to reverse consecutive memory locations, then we need
+ // to reverse the order of elements in the stored value.
+ StoredVal[Part] = reverseVector(StoredVal[Part]);
+ // If the address is consecutive but reversed, then the
+ // wide store needs to start at the last vector element.
+ PartPtr =
+ Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(-Part * VF));
+ PartPtr =
+ Builder.CreateGEP(nullptr, PartPtr, Builder.getInt32(1 - VF));
+ Mask[Part] = reverseVector(Mask[Part]);
+ }
+
+ Value *VecPtr =
+ Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace));
+
+ if (Legal->isMaskRequired(SI))
+ NewSI = Builder.CreateMaskedStore(StoredVal[Part], VecPtr, Alignment,
+ Mask[Part]);
+ else
+ NewSI =
+ Builder.CreateAlignedStore(StoredVal[Part], VecPtr, Alignment);
+ }
+ addMetadata(NewSI, SI);
+ }
+ return;
+ }
+
+ // Handle loads.
+ assert(LI && "Must have a load instruction");
+ setDebugLocFromInst(Builder, LI);
+ for (unsigned Part = 0; Part < UF; ++Part) {
+ Instruction *NewLI;
+ if (CreateGatherScatter) {
+ Value *MaskPart = Legal->isMaskRequired(LI) ? Mask[Part] : nullptr;
+ NewLI = Builder.CreateMaskedGather(VectorGep[Part], Alignment, MaskPart,
+ 0, "wide.masked.gather");
+ Entry[Part] = NewLI;
+ } else {
// Calculate the pointer for the specific unroll-part.
Value *PartPtr =
Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(Part * VF));
if (Reverse) {
- // If we store to reverse consecutive memory locations, then we need
- // to reverse the order of elements in the stored value.
- StoredVal[Part] = reverseVector(StoredVal[Part]);
// If the address is consecutive but reversed, then the
- // wide store needs to start at the last vector element.
+ // wide load needs to start at the last vector element.
PartPtr = Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(-Part * VF));
PartPtr = Builder.CreateGEP(nullptr, PartPtr, Builder.getInt32(1 - VF));
Mask[Part] = reverseVector(Mask[Part]);
}
- Value *VecPtr = Builder.CreateBitCast(PartPtr,
- DataTy->getPointerTo(AddressSpace));
-
- Instruction *NewSI;
- if (Legal->isMaskRequired(SI))
- NewSI = Builder.CreateMaskedStore(StoredVal[Part], VecPtr, Alignment,
- Mask[Part]);
- else
- NewSI = Builder.CreateAlignedStore(StoredVal[Part], VecPtr, Alignment);
- propagateMetadata(NewSI, SI);
- }
- return;
- }
-
- // Handle loads.
- assert(LI && "Must have a load instruction");
- setDebugLocFromInst(Builder, LI);
- for (unsigned Part = 0; Part < UF; ++Part) {
- // Calculate the pointer for the specific unroll-part.
- Value *PartPtr =
- Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(Part * VF));
-
- if (Reverse) {
- // If the address is consecutive but reversed, then the
- // wide load needs to start at the last vector element.
- PartPtr = Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(-Part * VF));
- PartPtr = Builder.CreateGEP(nullptr, PartPtr, Builder.getInt32(1 - VF));
- Mask[Part] = reverseVector(Mask[Part]);
+ Value *VecPtr =
+ Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace));
+ if (Legal->isMaskRequired(LI))
+ NewLI = Builder.CreateMaskedLoad(VecPtr, Alignment, Mask[Part],
+ UndefValue::get(DataTy),
+ "wide.masked.load");
+ else
+ NewLI = Builder.CreateAlignedLoad(VecPtr, Alignment, "wide.load");
+ Entry[Part] = Reverse ? reverseVector(NewLI) : NewLI;
}
-
- Instruction* NewLI;
- Value *VecPtr = Builder.CreateBitCast(PartPtr,
- DataTy->getPointerTo(AddressSpace));
- if (Legal->isMaskRequired(LI))
- NewLI = Builder.CreateMaskedLoad(VecPtr, Alignment, Mask[Part],
- UndefValue::get(DataTy),
- "wide.masked.load");
- else
- NewLI = Builder.CreateAlignedLoad(VecPtr, Alignment, "wide.load");
- propagateMetadata(NewLI, LI);
- Entry[Part] = Reverse ? reverseVector(NewLI) : NewLI;
+ addMetadata(NewLI, LI);
}
}
@@ -2526,9 +2625,7 @@ void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr,
setDebugLocFromInst(Builder, Instr);
// Find all of the vectorized parameters.
- for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) {
- Value *SrcOp = Instr->getOperand(op);
-
+ for (Value *SrcOp : Instr->operands()) {
// If we are accessing the old induction variable, use the new one.
if (SrcOp == OldInduction) {
Params.push_back(getVectorValue(SrcOp));
@@ -2536,7 +2633,7 @@ void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr,
}
// Try using previously calculated values.
- Instruction *SrcInst = dyn_cast<Instruction>(SrcOp);
+ auto *SrcInst = dyn_cast<Instruction>(SrcOp);
// If the src is an instruction that appeared earlier in the basic block,
// then it should already be vectorized.
@@ -2558,8 +2655,9 @@ void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr,
// Does this instruction return a value ?
bool IsVoidRetTy = Instr->getType()->isVoidTy();
- Value *UndefVec = IsVoidRetTy ? nullptr :
- UndefValue::get(VectorType::get(Instr->getType(), VF));
+ Value *UndefVec =
+ IsVoidRetTy ? nullptr
+ : UndefValue::get(VectorType::get(Instr->getType(), VF));
// Create a new entry in the WidenMap and initialize it to Undef or Null.
VectorParts &VecResults = WidenMap.splat(Instr, UndefVec);
@@ -2589,16 +2687,28 @@ void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr,
Cloned->setName(Instr->getName() + ".cloned");
// Replace the operands of the cloned instructions with extracted scalars.
for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) {
- Value *Op = Params[op][Part];
- // Param is a vector. Need to extract the right lane.
- if (Op->getType()->isVectorTy())
- Op = Builder.CreateExtractElement(Op, Builder.getInt32(Width));
- Cloned->setOperand(op, Op);
+
+ // If the operand is an induction variable, and there's a scalarized
+ // version of it available, use it. Otherwise, we will need to create
+ // an extractelement instruction if vectorizing.
+ auto *NewOp = Params[op][Part];
+ auto *ScalarOp = Instr->getOperand(op);
+ if (ScalarIVMap.count(ScalarOp))
+ NewOp = ScalarIVMap[ScalarOp][VF * Part + Width];
+ else if (NewOp->getType()->isVectorTy())
+ NewOp = Builder.CreateExtractElement(NewOp, Builder.getInt32(Width));
+ Cloned->setOperand(op, NewOp);
}
+ addNewMetadata(Cloned, Instr);
// Place the cloned scalar in the new loop.
Builder.Insert(Cloned);
+ // If we just cloned a new assumption, add it the assumption cache.
+ if (auto *II = dyn_cast<IntrinsicInst>(Cloned))
+ if (II->getIntrinsicID() == Intrinsic::assume)
+ AC->registerAssumption(II);
+
// If the original scalar returns a value we need to place it in a vector
// so that future users will be able to use it.
if (!IsVoidRetTy)
@@ -2606,8 +2716,8 @@ void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr,
Builder.getInt32(Width));
// End if-block.
if (IfPredicateStore)
- PredicatedStores.push_back(std::make_pair(cast<StoreInst>(Cloned),
- Cmp));
+ PredicatedStores.push_back(
+ std::make_pair(cast<StoreInst>(Cloned), Cmp));
}
}
}
@@ -2627,7 +2737,7 @@ PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start,
auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index");
Builder.SetInsertPoint(Latch->getTerminator());
-
+
// Create i+1 and fill the PHINode.
Value *Next = Builder.CreateAdd(Induction, Step, "index.next");
Induction->addIncoming(Start, L->getLoopPreheader());
@@ -2635,7 +2745,7 @@ PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start,
// Create the compare.
Value *ICmp = Builder.CreateICmpEQ(Next, End);
Builder.CreateCondBr(ICmp, L->getExitBlock(), Header);
-
+
// Now we have two terminators. Remove the old one from the block.
Latch->getTerminator()->eraseFromParent();
@@ -2649,12 +2759,12 @@ Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) {
IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
// Find the loop boundaries.
ScalarEvolution *SE = PSE.getSE();
- const SCEV *BackedgeTakenCount = SE->getBackedgeTakenCount(OrigLoop);
+ const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
assert(BackedgeTakenCount != SE->getCouldNotCompute() &&
"Invalid loop count");
Type *IdxTy = Legal->getWidestInductionType();
-
+
// The exit count might have the type of i64 while the phi is i32. This can
// happen if we have an induction variable that is sign extended before the
// compare. The only way that we get a backedge taken count is that the
@@ -2664,7 +2774,7 @@ Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) {
IdxTy->getPrimitiveSizeInBits())
BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy);
BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy);
-
+
// Get the total trip count from the count by adding 1.
const SCEV *ExitCount = SE->getAddExpr(
BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
@@ -2681,9 +2791,8 @@ Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) {
if (TripCount->getType()->isPointerTy())
TripCount =
- CastInst::CreatePointerCast(TripCount, IdxTy,
- "exitcount.ptrcnt.to.int",
- L->getLoopPreheader()->getTerminator());
+ CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int",
+ L->getLoopPreheader()->getTerminator());
return TripCount;
}
@@ -2691,16 +2800,30 @@ Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) {
Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) {
if (VectorTripCount)
return VectorTripCount;
-
+
Value *TC = getOrCreateTripCount(L);
IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
-
- // Now we need to generate the expression for N - (N % VF), which is
- // the part that the vectorized body will execute.
- // The loop step is equal to the vectorization factor (num of SIMD elements)
- // times the unroll factor (num of SIMD instructions).
+
+ // Now we need to generate the expression for the part of the loop that the
+ // vectorized body will execute. This is equal to N - (N % Step) if scalar
+ // iterations are not required for correctness, or N - Step, otherwise. Step
+ // is equal to the vectorization factor (number of SIMD elements) times the
+ // unroll factor (number of SIMD instructions).
Constant *Step = ConstantInt::get(TC->getType(), VF * UF);
Value *R = Builder.CreateURem(TC, Step, "n.mod.vf");
+
+ // If there is a non-reversed interleaved group that may speculatively access
+ // memory out-of-bounds, we need to ensure that there will be at least one
+ // iteration of the scalar epilogue loop. Thus, if the step evenly divides
+ // the trip count, we set the remainder to be equal to the step. If the step
+ // does not evenly divide the trip count, no adjustment is necessary since
+ // there will already be scalar iterations. Note that the minimum iterations
+ // check ensures that N >= Step.
+ if (VF > 1 && Legal->requiresScalarEpilogue()) {
+ auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0));
+ R = Builder.CreateSelect(IsZero, Step, R);
+ }
+
VectorTripCount = Builder.CreateSub(TC, R, "n.vec");
return VectorTripCount;
@@ -2714,13 +2837,15 @@ void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L,
// Generate code to check that the loop's trip count that we computed by
// adding one to the backedge-taken count will not overflow.
- Value *CheckMinIters =
- Builder.CreateICmpULT(Count,
- ConstantInt::get(Count->getType(), VF * UF),
- "min.iters.check");
-
- BasicBlock *NewBB = BB->splitBasicBlock(BB->getTerminator(),
- "min.iters.checked");
+ Value *CheckMinIters = Builder.CreateICmpULT(
+ Count, ConstantInt::get(Count->getType(), VF * UF), "min.iters.check");
+
+ BasicBlock *NewBB =
+ BB->splitBasicBlock(BB->getTerminator(), "min.iters.checked");
+ // Update dominator tree immediately if the generated block is a
+ // LoopBypassBlock because SCEV expansions to generate loop bypass
+ // checks may query it before the current function is finished.
+ DT->addNewBlock(NewBB, BB);
if (L->getParentLoop())
L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI);
ReplaceInstWithInst(BB->getTerminator(),
@@ -2733,7 +2858,7 @@ void InnerLoopVectorizer::emitVectorLoopEnteredCheck(Loop *L,
Value *TC = getOrCreateVectorTripCount(L);
BasicBlock *BB = L->getLoopPreheader();
IRBuilder<> Builder(BB->getTerminator());
-
+
// Now, compare the new count to zero. If it is zero skip the vector loop and
// jump to the scalar loop.
Value *Cmp = Builder.CreateICmpEQ(TC, Constant::getNullValue(TC->getType()),
@@ -2741,8 +2866,11 @@ void InnerLoopVectorizer::emitVectorLoopEnteredCheck(Loop *L,
// Generate code to check that the loop's trip count that we computed by
// adding one to the backedge-taken count will not overflow.
- BasicBlock *NewBB = BB->splitBasicBlock(BB->getTerminator(),
- "vector.ph");
+ BasicBlock *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph");
+ // Update dominator tree immediately if the generated block is a
+ // LoopBypassBlock because SCEV expansions to generate loop bypass
+ // checks may query it before the current function is finished.
+ DT->addNewBlock(NewBB, BB);
if (L->getParentLoop())
L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI);
ReplaceInstWithInst(BB->getTerminator(),
@@ -2768,6 +2896,10 @@ void InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) {
// Create a new block containing the stride check.
BB->setName("vector.scevcheck");
auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph");
+ // Update dominator tree immediately if the generated block is a
+ // LoopBypassBlock because SCEV expansions to generate loop bypass
+ // checks may query it before the current function is finished.
+ DT->addNewBlock(NewBB, BB);
if (L->getParentLoop())
L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI);
ReplaceInstWithInst(BB->getTerminator(),
@@ -2776,8 +2908,7 @@ void InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) {
AddedSafetyChecks = true;
}
-void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L,
- BasicBlock *Bypass) {
+void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass) {
BasicBlock *BB = L->getLoopPreheader();
// Generate the code that checks in runtime if arrays overlap. We put the
@@ -2793,14 +2924,23 @@ void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L,
// Create a new block containing the memory check.
BB->setName("vector.memcheck");
auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph");
+ // Update dominator tree immediately if the generated block is a
+ // LoopBypassBlock because SCEV expansions to generate loop bypass
+ // checks may query it before the current function is finished.
+ DT->addNewBlock(NewBB, BB);
if (L->getParentLoop())
L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI);
ReplaceInstWithInst(BB->getTerminator(),
BranchInst::Create(Bypass, NewBB, MemRuntimeCheck));
LoopBypassBlocks.push_back(BB);
AddedSafetyChecks = true;
-}
+ // We currently don't use LoopVersioning for the actual loop cloning but we
+ // still use it to add the noalias metadata.
+ LVer = llvm::make_unique<LoopVersioning>(*Legal->getLAI(), OrigLoop, LI, DT,
+ PSE.getSE());
+ LVer->prepareNoAliasMetadata();
+}
void InnerLoopVectorizer::createEmptyLoop() {
/*
@@ -2859,12 +2999,12 @@ void InnerLoopVectorizer::createEmptyLoop() {
BasicBlock *VecBody =
VectorPH->splitBasicBlock(VectorPH->getTerminator(), "vector.body");
BasicBlock *MiddleBlock =
- VecBody->splitBasicBlock(VecBody->getTerminator(), "middle.block");
+ VecBody->splitBasicBlock(VecBody->getTerminator(), "middle.block");
BasicBlock *ScalarPH =
- MiddleBlock->splitBasicBlock(MiddleBlock->getTerminator(), "scalar.ph");
+ MiddleBlock->splitBasicBlock(MiddleBlock->getTerminator(), "scalar.ph");
// Create and register the new vector loop.
- Loop* Lp = new Loop();
+ Loop *Lp = new Loop();
Loop *ParentLoop = OrigLoop->getParentLoop();
// Insert the new loop into the loop nest and register the new basic blocks
@@ -2899,15 +3039,15 @@ void InnerLoopVectorizer::createEmptyLoop() {
// checks into a separate block to make the more common case of few elements
// faster.
emitMemRuntimeChecks(Lp, ScalarPH);
-
+
// Generate the induction variable.
// The loop step is equal to the vectorization factor (num of SIMD elements)
// times the unroll factor (num of SIMD instructions).
Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
Constant *Step = ConstantInt::get(IdxTy, VF * UF);
Induction =
- createInductionVariable(Lp, StartIdx, CountRoundDown, Step,
- getDebugLocFromInstOrOperands(OldInduction));
+ createInductionVariable(Lp, StartIdx, CountRoundDown, Step,
+ getDebugLocFromInstOrOperands(OldInduction));
// We are going to resume the execution of the scalar loop.
// Go over all of the induction variables that we found and fix the
@@ -2920,16 +3060,14 @@ void InnerLoopVectorizer::createEmptyLoop() {
// This variable saves the new starting index for the scalar loop. It is used
// to test if there are any tail iterations left once the vector loop has
// completed.
- LoopVectorizationLegality::InductionList::iterator I, E;
LoopVectorizationLegality::InductionList *List = Legal->getInductionVars();
- for (I = List->begin(), E = List->end(); I != E; ++I) {
- PHINode *OrigPhi = I->first;
- InductionDescriptor II = I->second;
+ for (auto &InductionEntry : *List) {
+ PHINode *OrigPhi = InductionEntry.first;
+ InductionDescriptor II = InductionEntry.second;
// Create phi nodes to merge from the backedge-taken check block.
- PHINode *BCResumeVal = PHINode::Create(OrigPhi->getType(), 3,
- "bc.resume.val",
- ScalarPH->getTerminator());
+ PHINode *BCResumeVal = PHINode::Create(
+ OrigPhi->getType(), 3, "bc.resume.val", ScalarPH->getTerminator());
Value *EndValue;
if (OrigPhi == OldInduction) {
// We know what the end value is.
@@ -2937,9 +3075,9 @@ void InnerLoopVectorizer::createEmptyLoop() {
} else {
IRBuilder<> B(LoopBypassBlocks.back()->getTerminator());
Value *CRD = B.CreateSExtOrTrunc(CountRoundDown,
- II.getStepValue()->getType(),
- "cast.crd");
- EndValue = II.transform(B, CRD);
+ II.getStep()->getType(), "cast.crd");
+ const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
+ EndValue = II.transform(B, CRD, PSE.getSE(), DL);
EndValue->setName("ind.end");
}
@@ -2947,22 +3085,25 @@ void InnerLoopVectorizer::createEmptyLoop() {
// or the value at the end of the vectorized loop.
BCResumeVal->addIncoming(EndValue, MiddleBlock);
+ // Fix up external users of the induction variable.
+ fixupIVUsers(OrigPhi, II, CountRoundDown, EndValue, MiddleBlock);
+
// Fix the scalar body counter (PHI node).
unsigned BlockIdx = OrigPhi->getBasicBlockIndex(ScalarPH);
// The old induction's phi node in the scalar body needs the truncated
// value.
- for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I)
- BCResumeVal->addIncoming(II.getStartValue(), LoopBypassBlocks[I]);
+ for (BasicBlock *BB : LoopBypassBlocks)
+ BCResumeVal->addIncoming(II.getStartValue(), BB);
OrigPhi->setIncomingValue(BlockIdx, BCResumeVal);
}
// Add a check in the middle block to see if we have completed
// all of the iterations in the first vector loop.
// If (N - N%VF) == N, then we *don't* need to run the remainder.
- Value *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, Count,
- CountRoundDown, "cmp.n",
- MiddleBlock->getTerminator());
+ Value *CmpN =
+ CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, Count,
+ CountRoundDown, "cmp.n", MiddleBlock->getTerminator());
ReplaceInstWithInst(MiddleBlock->getTerminator(),
BranchInst::Create(ExitBlock, ScalarPH, CmpN));
@@ -2974,13 +3115,79 @@ void InnerLoopVectorizer::createEmptyLoop() {
LoopScalarPreHeader = ScalarPH;
LoopMiddleBlock = MiddleBlock;
LoopExitBlock = ExitBlock;
- LoopVectorBody.push_back(VecBody);
+ LoopVectorBody = VecBody;
LoopScalarBody = OldBasicBlock;
+ // Keep all loop hints from the original loop on the vector loop (we'll
+ // replace the vectorizer-specific hints below).
+ if (MDNode *LID = OrigLoop->getLoopID())
+ Lp->setLoopID(LID);
+
LoopVectorizeHints Hints(Lp, true);
Hints.setAlreadyVectorized();
}
+// Fix up external users of the induction variable. At this point, we are
+// in LCSSA form, with all external PHIs that use the IV having one input value,
+// coming from the remainder loop. We need those PHIs to also have a correct
+// value for the IV when arriving directly from the middle block.
+void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi,
+ const InductionDescriptor &II,
+ Value *CountRoundDown, Value *EndValue,
+ BasicBlock *MiddleBlock) {
+ // There are two kinds of external IV usages - those that use the value
+ // computed in the last iteration (the PHI) and those that use the penultimate
+ // value (the value that feeds into the phi from the loop latch).
+ // We allow both, but they, obviously, have different values.
+
+ assert(OrigLoop->getExitBlock() && "Expected a single exit block");
+
+ DenseMap<Value *, Value *> MissingVals;
+
+ // An external user of the last iteration's value should see the value that
+ // the remainder loop uses to initialize its own IV.
+ Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch());
+ for (User *U : PostInc->users()) {
+ Instruction *UI = cast<Instruction>(U);
+ if (!OrigLoop->contains(UI)) {
+ assert(isa<PHINode>(UI) && "Expected LCSSA form");
+ MissingVals[UI] = EndValue;
+ }
+ }
+
+ // An external user of the penultimate value need to see EndValue - Step.
+ // The simplest way to get this is to recompute it from the constituent SCEVs,
+ // that is Start + (Step * (CRD - 1)).
+ for (User *U : OrigPhi->users()) {
+ auto *UI = cast<Instruction>(U);
+ if (!OrigLoop->contains(UI)) {
+ const DataLayout &DL =
+ OrigLoop->getHeader()->getModule()->getDataLayout();
+ assert(isa<PHINode>(UI) && "Expected LCSSA form");
+
+ IRBuilder<> B(MiddleBlock->getTerminator());
+ Value *CountMinusOne = B.CreateSub(
+ CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1));
+ Value *CMO = B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType(),
+ "cast.cmo");
+ Value *Escape = II.transform(B, CMO, PSE.getSE(), DL);
+ Escape->setName("ind.escape");
+ MissingVals[UI] = Escape;
+ }
+ }
+
+ for (auto &I : MissingVals) {
+ PHINode *PHI = cast<PHINode>(I.first);
+ // One corner case we have to handle is two IVs "chasing" each-other,
+ // that is %IV2 = phi [...], [ %IV1, %latch ]
+ // In this case, if IV1 has an external use, we need to avoid adding both
+ // "last value of IV1" and "penultimate value of IV2". So, verify that we
+ // don't already have an incoming value for the middle block.
+ if (PHI->getBasicBlockIndex(MiddleBlock) == -1)
+ PHI->addIncoming(I.second, MiddleBlock);
+ }
+}
+
namespace {
struct CSEDenseMapInfo {
static bool canHandle(Instruction *I) {
@@ -3007,48 +3214,31 @@ struct CSEDenseMapInfo {
};
}
-/// \brief Check whether this block is a predicated block.
-/// Due to if predication of stores we might create a sequence of "if(pred) a[i]
-/// = ...; " blocks. We start with one vectorized basic block. For every
-/// conditional block we split this vectorized block. Therefore, every second
-/// block will be a predicated one.
-static bool isPredicatedBlock(unsigned BlockNum) {
- return BlockNum % 2;
-}
-
///\brief Perform cse of induction variable instructions.
-static void cse(SmallVector<BasicBlock *, 4> &BBs) {
+static void cse(BasicBlock *BB) {
// Perform simple cse.
SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap;
- for (unsigned i = 0, e = BBs.size(); i != e; ++i) {
- BasicBlock *BB = BBs[i];
- for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
- Instruction *In = &*I++;
-
- if (!CSEDenseMapInfo::canHandle(In))
- continue;
+ for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
+ Instruction *In = &*I++;
- // Check if we can replace this instruction with any of the
- // visited instructions.
- if (Instruction *V = CSEMap.lookup(In)) {
- In->replaceAllUsesWith(V);
- In->eraseFromParent();
- continue;
- }
- // Ignore instructions in conditional blocks. We create "if (pred) a[i] =
- // ...;" blocks for predicated stores. Every second block is a predicated
- // block.
- if (isPredicatedBlock(i))
- continue;
+ if (!CSEDenseMapInfo::canHandle(In))
+ continue;
- CSEMap[In] = In;
+ // Check if we can replace this instruction with any of the
+ // visited instructions.
+ if (Instruction *V = CSEMap.lookup(In)) {
+ In->replaceAllUsesWith(V);
+ In->eraseFromParent();
+ continue;
}
+
+ CSEMap[In] = In;
}
}
/// \brief Adds a 'fast' flag to floating point operations.
static Value *addFastMathFlag(Value *V) {
- if (isa<FPMathOperator>(V)){
+ if (isa<FPMathOperator>(V)) {
FastMathFlags Flags;
Flags.setUnsafeAlgebra();
cast<Instruction>(V)->setFastMathFlags(Flags);
@@ -3066,11 +3256,11 @@ static unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract,
assert(Ty->isVectorTy() && "Can only scalarize vectors");
unsigned Cost = 0;
- for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
+ for (unsigned I = 0, E = Ty->getVectorNumElements(); I < E; ++I) {
if (Insert)
- Cost += TTI.getVectorInstrCost(Instruction::InsertElement, Ty, i);
+ Cost += TTI.getVectorInstrCost(Instruction::InsertElement, Ty, I);
if (Extract)
- Cost += TTI.getVectorInstrCost(Instruction::ExtractElement, Ty, i);
+ Cost += TTI.getVectorInstrCost(Instruction::ExtractElement, Ty, I);
}
return Cost;
@@ -3101,15 +3291,15 @@ static unsigned getVectorCallCost(CallInst *CI, unsigned VF,
// Compute corresponding vector type for return value and arguments.
Type *RetTy = ToVectorTy(ScalarRetTy, VF);
- for (unsigned i = 0, ie = ScalarTys.size(); i != ie; ++i)
- Tys.push_back(ToVectorTy(ScalarTys[i], VF));
+ for (Type *ScalarTy : ScalarTys)
+ Tys.push_back(ToVectorTy(ScalarTy, VF));
// Compute costs of unpacking argument values for the scalar calls and
// packing the return values to a vector.
unsigned ScalarizationCost =
getScalarizationOverhead(RetTy, true, false, TTI);
- for (unsigned i = 0, ie = Tys.size(); i != ie; ++i)
- ScalarizationCost += getScalarizationOverhead(Tys[i], false, true, TTI);
+ for (Type *Ty : Tys)
+ ScalarizationCost += getScalarizationOverhead(Ty, false, true, TTI);
unsigned Cost = ScalarCallCost * VF + ScalarizationCost;
@@ -3134,25 +3324,29 @@ static unsigned getVectorCallCost(CallInst *CI, unsigned VF,
static unsigned getVectorIntrinsicCost(CallInst *CI, unsigned VF,
const TargetTransformInfo &TTI,
const TargetLibraryInfo *TLI) {
- Intrinsic::ID ID = getIntrinsicIDForCall(CI, TLI);
+ Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
assert(ID && "Expected intrinsic call!");
Type *RetTy = ToVectorTy(CI->getType(), VF);
SmallVector<Type *, 4> Tys;
- for (unsigned i = 0, ie = CI->getNumArgOperands(); i != ie; ++i)
- Tys.push_back(ToVectorTy(CI->getArgOperand(i)->getType(), VF));
+ for (Value *ArgOperand : CI->arg_operands())
+ Tys.push_back(ToVectorTy(ArgOperand->getType(), VF));
- return TTI.getIntrinsicInstrCost(ID, RetTy, Tys);
+ FastMathFlags FMF;
+ if (auto *FPMO = dyn_cast<FPMathOperator>(CI))
+ FMF = FPMO->getFastMathFlags();
+
+ return TTI.getIntrinsicInstrCost(ID, RetTy, Tys, FMF);
}
static Type *smallestIntegerVectorType(Type *T1, Type *T2) {
- IntegerType *I1 = cast<IntegerType>(T1->getVectorElementType());
- IntegerType *I2 = cast<IntegerType>(T2->getVectorElementType());
+ auto *I1 = cast<IntegerType>(T1->getVectorElementType());
+ auto *I2 = cast<IntegerType>(T2->getVectorElementType());
return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2;
}
static Type *largestIntegerVectorType(Type *T1, Type *T2) {
- IntegerType *I1 = cast<IntegerType>(T1->getVectorElementType());
- IntegerType *I2 = cast<IntegerType>(T2->getVectorElementType());
+ auto *I1 = cast<IntegerType>(T1->getVectorElementType());
+ auto *I2 = cast<IntegerType>(T2->getVectorElementType());
return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2;
}
@@ -3161,21 +3355,22 @@ void InnerLoopVectorizer::truncateToMinimalBitwidths() {
// truncated version of `I` and reextend its result. InstCombine runs
// later and will remove any ext/trunc pairs.
//
- for (auto &KV : MinBWs) {
+ SmallPtrSet<Value *, 4> Erased;
+ for (const auto &KV : *MinBWs) {
VectorParts &Parts = WidenMap.get(KV.first);
for (Value *&I : Parts) {
- if (I->use_empty())
+ if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I))
continue;
Type *OriginalTy = I->getType();
- Type *ScalarTruncatedTy = IntegerType::get(OriginalTy->getContext(),
- KV.second);
+ Type *ScalarTruncatedTy =
+ IntegerType::get(OriginalTy->getContext(), KV.second);
Type *TruncatedTy = VectorType::get(ScalarTruncatedTy,
OriginalTy->getVectorNumElements());
if (TruncatedTy == OriginalTy)
continue;
IRBuilder<> B(cast<Instruction>(I));
- auto ShrinkOperand = [&](Value *V) -> Value* {
+ auto ShrinkOperand = [&](Value *V) -> Value * {
if (auto *ZI = dyn_cast<ZExtInst>(V))
if (ZI->getSrcTy() == TruncatedTy)
return ZI->getOperand(0);
@@ -3185,50 +3380,59 @@ void InnerLoopVectorizer::truncateToMinimalBitwidths() {
// The actual instruction modification depends on the instruction type,
// unfortunately.
Value *NewI = nullptr;
- if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) {
- NewI = B.CreateBinOp(BO->getOpcode(),
- ShrinkOperand(BO->getOperand(0)),
+ if (auto *BO = dyn_cast<BinaryOperator>(I)) {
+ NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)),
ShrinkOperand(BO->getOperand(1)));
cast<BinaryOperator>(NewI)->copyIRFlags(I);
- } else if (ICmpInst *CI = dyn_cast<ICmpInst>(I)) {
- NewI = B.CreateICmp(CI->getPredicate(),
- ShrinkOperand(CI->getOperand(0)),
- ShrinkOperand(CI->getOperand(1)));
- } else if (SelectInst *SI = dyn_cast<SelectInst>(I)) {
+ } else if (auto *CI = dyn_cast<ICmpInst>(I)) {
+ NewI =
+ B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)),
+ ShrinkOperand(CI->getOperand(1)));
+ } else if (auto *SI = dyn_cast<SelectInst>(I)) {
NewI = B.CreateSelect(SI->getCondition(),
ShrinkOperand(SI->getTrueValue()),
ShrinkOperand(SI->getFalseValue()));
- } else if (CastInst *CI = dyn_cast<CastInst>(I)) {
+ } else if (auto *CI = dyn_cast<CastInst>(I)) {
switch (CI->getOpcode()) {
- default: llvm_unreachable("Unhandled cast!");
+ default:
+ llvm_unreachable("Unhandled cast!");
case Instruction::Trunc:
NewI = ShrinkOperand(CI->getOperand(0));
break;
case Instruction::SExt:
- NewI = B.CreateSExtOrTrunc(CI->getOperand(0),
- smallestIntegerVectorType(OriginalTy,
- TruncatedTy));
+ NewI = B.CreateSExtOrTrunc(
+ CI->getOperand(0),
+ smallestIntegerVectorType(OriginalTy, TruncatedTy));
break;
case Instruction::ZExt:
- NewI = B.CreateZExtOrTrunc(CI->getOperand(0),
- smallestIntegerVectorType(OriginalTy,
- TruncatedTy));
+ NewI = B.CreateZExtOrTrunc(
+ CI->getOperand(0),
+ smallestIntegerVectorType(OriginalTy, TruncatedTy));
break;
}
- } else if (ShuffleVectorInst *SI = dyn_cast<ShuffleVectorInst>(I)) {
+ } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) {
auto Elements0 = SI->getOperand(0)->getType()->getVectorNumElements();
- auto *O0 =
- B.CreateZExtOrTrunc(SI->getOperand(0),
- VectorType::get(ScalarTruncatedTy, Elements0));
+ auto *O0 = B.CreateZExtOrTrunc(
+ SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0));
auto Elements1 = SI->getOperand(1)->getType()->getVectorNumElements();
- auto *O1 =
- B.CreateZExtOrTrunc(SI->getOperand(1),
- VectorType::get(ScalarTruncatedTy, Elements1));
+ auto *O1 = B.CreateZExtOrTrunc(
+ SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1));
NewI = B.CreateShuffleVector(O0, O1, SI->getMask());
} else if (isa<LoadInst>(I)) {
// Don't do anything with the operands, just extend the result.
continue;
+ } else if (auto *IE = dyn_cast<InsertElementInst>(I)) {
+ auto Elements = IE->getOperand(0)->getType()->getVectorNumElements();
+ auto *O0 = B.CreateZExtOrTrunc(
+ IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements));
+ auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy);
+ NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2));
+ } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) {
+ auto Elements = EE->getOperand(0)->getType()->getVectorNumElements();
+ auto *O0 = B.CreateZExtOrTrunc(
+ EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements));
+ NewI = B.CreateExtractElement(O0, EE->getOperand(2));
} else {
llvm_unreachable("Unhandled instruction type!");
}
@@ -3238,12 +3442,13 @@ void InnerLoopVectorizer::truncateToMinimalBitwidths() {
Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy);
I->replaceAllUsesWith(Res);
cast<Instruction>(I)->eraseFromParent();
+ Erased.insert(I);
I = Res;
}
}
// We'll have created a bunch of ZExts that are now parentless. Clean up.
- for (auto &KV : MinBWs) {
+ for (const auto &KV : *MinBWs) {
VectorParts &Parts = WidenMap.get(KV.first);
for (Value *&I : Parts) {
ZExtInst *Inst = dyn_cast<ZExtInst>(I);
@@ -3266,15 +3471,14 @@ void InnerLoopVectorizer::vectorizeLoop() {
//===------------------------------------------------===//
Constant *Zero = Builder.getInt32(0);
- // In order to support reduction variables we need to be able to vectorize
- // Phi nodes. Phi nodes have cycles, so we need to vectorize them in two
- // stages. First, we create a new vector PHI node with no incoming edges.
- // We use this value when we vectorize all of the instructions that use the
- // PHI. Next, after all of the instructions in the block are complete we
- // add the new incoming edges to the PHI. At this point all of the
- // instructions in the basic block are vectorized, so we can use them to
- // construct the PHI.
- PhiVector RdxPHIsToFix;
+ // In order to support recurrences we need to be able to vectorize Phi nodes.
+ // Phi nodes have cycles, so we need to vectorize them in two stages. First,
+ // we create a new vector PHI node with no incoming edges. We use this value
+ // when we vectorize all of the instructions that use the PHI. Next, after
+ // all of the instructions in the block are complete we add the new incoming
+ // edges to the PHI. At this point all of the instructions in the basic block
+ // are vectorized, so we can use them to construct the PHI.
+ PhiVector PHIsToFix;
// Scan the loop in a topological order to ensure that defs are vectorized
// before users.
@@ -3282,33 +3486,32 @@ void InnerLoopVectorizer::vectorizeLoop() {
DFS.perform(LI);
// Vectorize all of the blocks in the original loop.
- for (LoopBlocksDFS::RPOIterator bb = DFS.beginRPO(),
- be = DFS.endRPO(); bb != be; ++bb)
- vectorizeBlockInLoop(*bb, &RdxPHIsToFix);
+ for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO()))
+ vectorizeBlockInLoop(BB, &PHIsToFix);
// Insert truncates and extends for any truncated instructions as hints to
// InstCombine.
if (VF > 1)
truncateToMinimalBitwidths();
-
- // At this point every instruction in the original loop is widened to
- // a vector form. We are almost done. Now, we need to fix the PHI nodes
- // that we vectorized. The PHI nodes are currently empty because we did
- // not want to introduce cycles. Notice that the remaining PHI nodes
- // that we need to fix are reduction variables.
-
- // Create the 'reduced' values for each of the induction vars.
- // The reduced values are the vector values that we scalarize and combine
- // after the loop is finished.
- for (PhiVector::iterator it = RdxPHIsToFix.begin(), e = RdxPHIsToFix.end();
- it != e; ++it) {
- PHINode *RdxPhi = *it;
- assert(RdxPhi && "Unable to recover vectorized PHI");
-
- // Find the reduction variable descriptor.
- assert(Legal->isReductionVariable(RdxPhi) &&
+
+ // At this point every instruction in the original loop is widened to a
+ // vector form. Now we need to fix the recurrences in PHIsToFix. These PHI
+ // nodes are currently empty because we did not want to introduce cycles.
+ // This is the second stage of vectorizing recurrences.
+ for (PHINode *Phi : PHIsToFix) {
+ assert(Phi && "Unable to recover vectorized PHI");
+
+ // Handle first-order recurrences that need to be fixed.
+ if (Legal->isFirstOrderRecurrence(Phi)) {
+ fixFirstOrderRecurrence(Phi);
+ continue;
+ }
+
+ // If the phi node is not a first-order recurrence, it must be a reduction.
+ // Get it's reduction variable descriptor.
+ assert(Legal->isReductionVariable(Phi) &&
"Unable to find the reduction variable");
- RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[RdxPhi];
+ RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[Phi];
RecurrenceDescriptor::RecurrenceKind RK = RdxDesc.getRecurrenceKind();
TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue();
@@ -3363,18 +3566,18 @@ void InnerLoopVectorizer::vectorizeLoop() {
// Reductions do not have to start at zero. They can start with
// any loop invariant values.
- VectorParts &VecRdxPhi = WidenMap.get(RdxPhi);
+ VectorParts &VecRdxPhi = WidenMap.get(Phi);
BasicBlock *Latch = OrigLoop->getLoopLatch();
- Value *LoopVal = RdxPhi->getIncomingValueForBlock(Latch);
+ Value *LoopVal = Phi->getIncomingValueForBlock(Latch);
VectorParts &Val = getVectorValue(LoopVal);
for (unsigned part = 0; part < UF; ++part) {
// Make sure to add the reduction stat value only to the
// first unroll part.
Value *StartVal = (part == 0) ? VectorStart : Identity;
- cast<PHINode>(VecRdxPhi[part])->addIncoming(StartVal,
- LoopVectorPreHeader);
- cast<PHINode>(VecRdxPhi[part])->addIncoming(Val[part],
- LoopVectorBody.back());
+ cast<PHINode>(VecRdxPhi[part])
+ ->addIncoming(StartVal, LoopVectorPreHeader);
+ cast<PHINode>(VecRdxPhi[part])
+ ->addIncoming(Val[part], LoopVectorBody);
}
// Before each round, move the insertion point right between
@@ -3389,9 +3592,9 @@ void InnerLoopVectorizer::vectorizeLoop() {
// If the vector reduction can be performed in a smaller type, we truncate
// then extend the loop exit value to enable InstCombine to evaluate the
// entire expression in the smaller type.
- if (VF > 1 && RdxPhi->getType() != RdxDesc.getRecurrenceType()) {
+ if (VF > 1 && Phi->getType() != RdxDesc.getRecurrenceType()) {
Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF);
- Builder.SetInsertPoint(LoopVectorBody.back()->getTerminator());
+ Builder.SetInsertPoint(LoopVectorBody->getTerminator());
for (unsigned part = 0; part < UF; ++part) {
Value *Trunc = Builder.CreateTrunc(RdxParts[part], RdxVecTy);
Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy)
@@ -3432,21 +3635,19 @@ void InnerLoopVectorizer::vectorizeLoop() {
assert(isPowerOf2_32(VF) &&
"Reduction emission only supported for pow2 vectors!");
Value *TmpVec = ReducedPartRdx;
- SmallVector<Constant*, 32> ShuffleMask(VF, nullptr);
+ SmallVector<Constant *, 32> ShuffleMask(VF, nullptr);
for (unsigned i = VF; i != 1; i >>= 1) {
// Move the upper half of the vector to the lower half.
- for (unsigned j = 0; j != i/2; ++j)
- ShuffleMask[j] = Builder.getInt32(i/2 + j);
+ for (unsigned j = 0; j != i / 2; ++j)
+ ShuffleMask[j] = Builder.getInt32(i / 2 + j);
// Fill the rest of the mask with undef.
- std::fill(&ShuffleMask[i/2], ShuffleMask.end(),
+ std::fill(&ShuffleMask[i / 2], ShuffleMask.end(),
UndefValue::get(Builder.getInt32Ty()));
- Value *Shuf =
- Builder.CreateShuffleVector(TmpVec,
- UndefValue::get(TmpVec->getType()),
- ConstantVector::get(ShuffleMask),
- "rdx.shuf");
+ Value *Shuf = Builder.CreateShuffleVector(
+ TmpVec, UndefValue::get(TmpVec->getType()),
+ ConstantVector::get(ShuffleMask), "rdx.shuf");
if (Op != Instruction::ICmp && Op != Instruction::FCmp)
// Floating point operations had to be 'fast' to enable the reduction.
@@ -3458,21 +3659,21 @@ void InnerLoopVectorizer::vectorizeLoop() {
}
// The result is in the first element of the vector.
- ReducedPartRdx = Builder.CreateExtractElement(TmpVec,
- Builder.getInt32(0));
+ ReducedPartRdx =
+ Builder.CreateExtractElement(TmpVec, Builder.getInt32(0));
// If the reduction can be performed in a smaller type, we need to extend
// the reduction to the wider type before we branch to the original loop.
- if (RdxPhi->getType() != RdxDesc.getRecurrenceType())
+ if (Phi->getType() != RdxDesc.getRecurrenceType())
ReducedPartRdx =
RdxDesc.isSigned()
- ? Builder.CreateSExt(ReducedPartRdx, RdxPhi->getType())
- : Builder.CreateZExt(ReducedPartRdx, RdxPhi->getType());
+ ? Builder.CreateSExt(ReducedPartRdx, Phi->getType())
+ : Builder.CreateZExt(ReducedPartRdx, Phi->getType());
}
// Create a phi node that merges control-flow from the backedge-taken check
// block and the middle block.
- PHINode *BCBlockPhi = PHINode::Create(RdxPhi->getType(), 2, "bc.merge.rdx",
+ PHINode *BCBlockPhi = PHINode::Create(Phi->getType(), 2, "bc.merge.rdx",
LoopScalarPreHeader->getTerminator());
for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I)
BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]);
@@ -3483,9 +3684,11 @@ void InnerLoopVectorizer::vectorizeLoop() {
// We know that the loop is in LCSSA form. We need to update the
// PHI nodes in the exit blocks.
for (BasicBlock::iterator LEI = LoopExitBlock->begin(),
- LEE = LoopExitBlock->end(); LEI != LEE; ++LEI) {
+ LEE = LoopExitBlock->end();
+ LEI != LEE; ++LEI) {
PHINode *LCSSAPhi = dyn_cast<PHINode>(LEI);
- if (!LCSSAPhi) break;
+ if (!LCSSAPhi)
+ break;
// All PHINodes need to have a single entry edge, or two if
// we already fixed them.
@@ -3498,30 +3701,30 @@ void InnerLoopVectorizer::vectorizeLoop() {
LCSSAPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock);
break;
}
- }// end of the LCSSA phi scan.
+ } // end of the LCSSA phi scan.
// Fix the scalar loop reduction variable with the incoming reduction sum
// from the vector body and from the backedge value.
int IncomingEdgeBlockIdx =
- (RdxPhi)->getBasicBlockIndex(OrigLoop->getLoopLatch());
+ Phi->getBasicBlockIndex(OrigLoop->getLoopLatch());
assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index");
// Pick the other block.
int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1);
- (RdxPhi)->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi);
- (RdxPhi)->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst);
- }// end of for each redux variable.
+ Phi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi);
+ Phi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst);
+ } // end of for each Phi in PHIsToFix.
fixLCSSAPHIs();
// Make sure DomTree is updated.
updateAnalysis();
-
+
// Predicate any stores.
for (auto KV : PredicatedStores) {
BasicBlock::iterator I(KV.first);
auto *BB = SplitBlock(I->getParent(), &*std::next(I), DT, LI);
auto *T = SplitBlockAndInsertIfThen(KV.second, &*I, /*Unreachable=*/false,
- /*BranchWeights=*/nullptr, DT);
+ /*BranchWeights=*/nullptr, DT, LI);
I->moveBefore(T);
I->getParent()->setName("pred.store.if");
BB->setName("pred.store.continue");
@@ -3531,11 +3734,162 @@ void InnerLoopVectorizer::vectorizeLoop() {
cse(LoopVectorBody);
}
+void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi) {
+
+ // This is the second phase of vectorizing first-order recurrences. An
+ // overview of the transformation is described below. Suppose we have the
+ // following loop.
+ //
+ // for (int i = 0; i < n; ++i)
+ // b[i] = a[i] - a[i - 1];
+ //
+ // There is a first-order recurrence on "a". For this loop, the shorthand
+ // scalar IR looks like:
+ //
+ // scalar.ph:
+ // s_init = a[-1]
+ // br scalar.body
+ //
+ // scalar.body:
+ // i = phi [0, scalar.ph], [i+1, scalar.body]
+ // s1 = phi [s_init, scalar.ph], [s2, scalar.body]
+ // s2 = a[i]
+ // b[i] = s2 - s1
+ // br cond, scalar.body, ...
+ //
+ // In this example, s1 is a recurrence because it's value depends on the
+ // previous iteration. In the first phase of vectorization, we created a
+ // temporary value for s1. We now complete the vectorization and produce the
+ // shorthand vector IR shown below (for VF = 4, UF = 1).
+ //
+ // vector.ph:
+ // v_init = vector(..., ..., ..., a[-1])
+ // br vector.body
+ //
+ // vector.body
+ // i = phi [0, vector.ph], [i+4, vector.body]
+ // v1 = phi [v_init, vector.ph], [v2, vector.body]
+ // v2 = a[i, i+1, i+2, i+3];
+ // v3 = vector(v1(3), v2(0, 1, 2))
+ // b[i, i+1, i+2, i+3] = v2 - v3
+ // br cond, vector.body, middle.block
+ //
+ // middle.block:
+ // x = v2(3)
+ // br scalar.ph
+ //
+ // scalar.ph:
+ // s_init = phi [x, middle.block], [a[-1], otherwise]
+ // br scalar.body
+ //
+ // After execution completes the vector loop, we extract the next value of
+ // the recurrence (x) to use as the initial value in the scalar loop.
+
+ // Get the original loop preheader and single loop latch.
+ auto *Preheader = OrigLoop->getLoopPreheader();
+ auto *Latch = OrigLoop->getLoopLatch();
+
+ // Get the initial and previous values of the scalar recurrence.
+ auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader);
+ auto *Previous = Phi->getIncomingValueForBlock(Latch);
+
+ // Create a vector from the initial value.
+ auto *VectorInit = ScalarInit;
+ if (VF > 1) {
+ Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
+ VectorInit = Builder.CreateInsertElement(
+ UndefValue::get(VectorType::get(VectorInit->getType(), VF)), VectorInit,
+ Builder.getInt32(VF - 1), "vector.recur.init");
+ }
+
+ // We constructed a temporary phi node in the first phase of vectorization.
+ // This phi node will eventually be deleted.
+ auto &PhiParts = getVectorValue(Phi);
+ Builder.SetInsertPoint(cast<Instruction>(PhiParts[0]));
+
+ // Create a phi node for the new recurrence. The current value will either be
+ // the initial value inserted into a vector or loop-varying vector value.
+ auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur");
+ VecPhi->addIncoming(VectorInit, LoopVectorPreHeader);
+
+ // Get the vectorized previous value. We ensured the previous values was an
+ // instruction when detecting the recurrence.
+ auto &PreviousParts = getVectorValue(Previous);
+
+ // Set the insertion point to be after this instruction. We ensured the
+ // previous value dominated all uses of the phi when detecting the
+ // recurrence.
+ Builder.SetInsertPoint(
+ &*++BasicBlock::iterator(cast<Instruction>(PreviousParts[UF - 1])));
+
+ // We will construct a vector for the recurrence by combining the values for
+ // the current and previous iterations. This is the required shuffle mask.
+ SmallVector<Constant *, 8> ShuffleMask(VF);
+ ShuffleMask[0] = Builder.getInt32(VF - 1);
+ for (unsigned I = 1; I < VF; ++I)
+ ShuffleMask[I] = Builder.getInt32(I + VF - 1);
+
+ // The vector from which to take the initial value for the current iteration
+ // (actual or unrolled). Initially, this is the vector phi node.
+ Value *Incoming = VecPhi;
+
+ // Shuffle the current and previous vector and update the vector parts.
+ for (unsigned Part = 0; Part < UF; ++Part) {
+ auto *Shuffle =
+ VF > 1
+ ? Builder.CreateShuffleVector(Incoming, PreviousParts[Part],
+ ConstantVector::get(ShuffleMask))
+ : Incoming;
+ PhiParts[Part]->replaceAllUsesWith(Shuffle);
+ cast<Instruction>(PhiParts[Part])->eraseFromParent();
+ PhiParts[Part] = Shuffle;
+ Incoming = PreviousParts[Part];
+ }
+
+ // Fix the latch value of the new recurrence in the vector loop.
+ VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch());
+
+ // Extract the last vector element in the middle block. This will be the
+ // initial value for the recurrence when jumping to the scalar loop.
+ auto *Extract = Incoming;
+ if (VF > 1) {
+ Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
+ Extract = Builder.CreateExtractElement(Extract, Builder.getInt32(VF - 1),
+ "vector.recur.extract");
+ }
+
+ // Fix the initial value of the original recurrence in the scalar loop.
+ Builder.SetInsertPoint(&*LoopScalarPreHeader->begin());
+ auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init");
+ for (auto *BB : predecessors(LoopScalarPreHeader)) {
+ auto *Incoming = BB == LoopMiddleBlock ? Extract : ScalarInit;
+ Start->addIncoming(Incoming, BB);
+ }
+
+ Phi->setIncomingValue(Phi->getBasicBlockIndex(LoopScalarPreHeader), Start);
+ Phi->setName("scalar.recur");
+
+ // Finally, fix users of the recurrence outside the loop. The users will need
+ // either the last value of the scalar recurrence or the last value of the
+ // vector recurrence we extracted in the middle block. Since the loop is in
+ // LCSSA form, we just need to find the phi node for the original scalar
+ // recurrence in the exit block, and then add an edge for the middle block.
+ for (auto &I : *LoopExitBlock) {
+ auto *LCSSAPhi = dyn_cast<PHINode>(&I);
+ if (!LCSSAPhi)
+ break;
+ if (LCSSAPhi->getIncomingValue(0) == Phi) {
+ LCSSAPhi->addIncoming(Extract, LoopMiddleBlock);
+ break;
+ }
+ }
+}
+
void InnerLoopVectorizer::fixLCSSAPHIs() {
- for (BasicBlock::iterator LEI = LoopExitBlock->begin(),
- LEE = LoopExitBlock->end(); LEI != LEE; ++LEI) {
- PHINode *LCSSAPhi = dyn_cast<PHINode>(LEI);
- if (!LCSSAPhi) break;
+ for (Instruction &LEI : *LoopExitBlock) {
+ auto *LCSSAPhi = dyn_cast<PHINode>(&LEI);
+ if (!LCSSAPhi)
+ break;
if (LCSSAPhi->getNumIncomingValues() == 1)
LCSSAPhi->addIncoming(UndefValue::get(LCSSAPhi->getType()),
LoopMiddleBlock);
@@ -3548,7 +3902,7 @@ InnerLoopVectorizer::createEdgeMask(BasicBlock *Src, BasicBlock *Dst) {
"Invalid edge");
// Look for cached value.
- std::pair<BasicBlock*, BasicBlock*> Edge(Src, Dst);
+ std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst);
EdgeMaskCache::iterator ECEntryIt = MaskCache.find(Edge);
if (ECEntryIt != MaskCache.end())
return ECEntryIt->second;
@@ -3604,15 +3958,15 @@ InnerLoopVectorizer::createBlockInMask(BasicBlock *BB) {
void InnerLoopVectorizer::widenPHIInstruction(
Instruction *PN, InnerLoopVectorizer::VectorParts &Entry, unsigned UF,
unsigned VF, PhiVector *PV) {
- PHINode* P = cast<PHINode>(PN);
- // Handle reduction variables:
- if (Legal->isReductionVariable(P)) {
+ PHINode *P = cast<PHINode>(PN);
+ // Handle recurrences.
+ if (Legal->isReductionVariable(P) || Legal->isFirstOrderRecurrence(P)) {
for (unsigned part = 0; part < UF; ++part) {
// This is phase one of vectorizing PHIs.
- Type *VecTy = (VF == 1) ? PN->getType() :
- VectorType::get(PN->getType(), VF);
+ Type *VecTy =
+ (VF == 1) ? PN->getType() : VectorType::get(PN->getType(), VF);
Entry[part] = PHINode::Create(
- VecTy, 2, "vec.phi", &*LoopVectorBody.back()->getFirstInsertionPt());
+ VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt());
}
PV->push_back(P);
return;
@@ -3635,21 +3989,20 @@ void InnerLoopVectorizer::widenPHIInstruction(
// SELECT(Mask2, In2,
// ( ...)))
for (unsigned In = 0; In < NumIncoming; In++) {
- VectorParts Cond = createEdgeMask(P->getIncomingBlock(In),
- P->getParent());
+ VectorParts Cond =
+ createEdgeMask(P->getIncomingBlock(In), P->getParent());
VectorParts &In0 = getVectorValue(P->getIncomingValue(In));
for (unsigned part = 0; part < UF; ++part) {
// We might have single edge PHIs (blocks) - use an identity
// 'select' for the first PHI operand.
if (In == 0)
- Entry[part] = Builder.CreateSelect(Cond[part], In0[part],
- In0[part]);
+ Entry[part] = Builder.CreateSelect(Cond[part], In0[part], In0[part]);
else
// Select between the current value and the previous incoming edge
// based on the incoming mask.
- Entry[part] = Builder.CreateSelect(Cond[part], In0[part],
- Entry[part], "predphi");
+ Entry[part] = Builder.CreateSelect(Cond[part], In0[part], Entry[part],
+ "predphi");
}
}
return;
@@ -3657,85 +4010,68 @@ void InnerLoopVectorizer::widenPHIInstruction(
// This PHINode must be an induction variable.
// Make sure that we know about it.
- assert(Legal->getInductionVars()->count(P) &&
- "Not an induction variable");
+ assert(Legal->getInductionVars()->count(P) && "Not an induction variable");
InductionDescriptor II = Legal->getInductionVars()->lookup(P);
+ const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
// FIXME: The newly created binary instructions should contain nsw/nuw flags,
// which can be found from the original scalar operations.
switch (II.getKind()) {
- case InductionDescriptor::IK_NoInduction:
- llvm_unreachable("Unknown induction");
- case InductionDescriptor::IK_IntInduction: {
- assert(P->getType() == II.getStartValue()->getType() &&
- "Types must match");
- // Handle other induction variables that are now based on the
- // canonical one.
- Value *V = Induction;
- if (P != OldInduction) {
- V = Builder.CreateSExtOrTrunc(Induction, P->getType());
- V = II.transform(Builder, V);
- V->setName("offset.idx");
+ case InductionDescriptor::IK_NoInduction:
+ llvm_unreachable("Unknown induction");
+ case InductionDescriptor::IK_IntInduction:
+ return widenIntInduction(P, Entry);
+ case InductionDescriptor::IK_PtrInduction:
+ // Handle the pointer induction variable case.
+ assert(P->getType()->isPointerTy() && "Unexpected type.");
+ // This is the normalized GEP that starts counting at zero.
+ Value *PtrInd = Induction;
+ PtrInd = Builder.CreateSExtOrTrunc(PtrInd, II.getStep()->getType());
+ // This is the vector of results. Notice that we don't generate
+ // vector geps because scalar geps result in better code.
+ for (unsigned part = 0; part < UF; ++part) {
+ if (VF == 1) {
+ int EltIndex = part;
+ Constant *Idx = ConstantInt::get(PtrInd->getType(), EltIndex);
+ Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx);
+ Value *SclrGep = II.transform(Builder, GlobalIdx, PSE.getSE(), DL);
+ SclrGep->setName("next.gep");
+ Entry[part] = SclrGep;
+ continue;
}
- Value *Broadcasted = getBroadcastInstrs(V);
- // After broadcasting the induction variable we need to make the vector
- // consecutive by adding 0, 1, 2, etc.
- for (unsigned part = 0; part < UF; ++part)
- Entry[part] = getStepVector(Broadcasted, VF * part, II.getStepValue());
- return;
- }
- case InductionDescriptor::IK_PtrInduction:
- // Handle the pointer induction variable case.
- assert(P->getType()->isPointerTy() && "Unexpected type.");
- // This is the normalized GEP that starts counting at zero.
- Value *PtrInd = Induction;
- PtrInd = Builder.CreateSExtOrTrunc(PtrInd, II.getStepValue()->getType());
- // This is the vector of results. Notice that we don't generate
- // vector geps because scalar geps result in better code.
- for (unsigned part = 0; part < UF; ++part) {
- if (VF == 1) {
- int EltIndex = part;
- Constant *Idx = ConstantInt::get(PtrInd->getType(), EltIndex);
- Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx);
- Value *SclrGep = II.transform(Builder, GlobalIdx);
- SclrGep->setName("next.gep");
- Entry[part] = SclrGep;
- continue;
- }
- Value *VecVal = UndefValue::get(VectorType::get(P->getType(), VF));
- for (unsigned int i = 0; i < VF; ++i) {
- int EltIndex = i + part * VF;
- Constant *Idx = ConstantInt::get(PtrInd->getType(), EltIndex);
- Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx);
- Value *SclrGep = II.transform(Builder, GlobalIdx);
- SclrGep->setName("next.gep");
- VecVal = Builder.CreateInsertElement(VecVal, SclrGep,
- Builder.getInt32(i),
- "insert.gep");
- }
- Entry[part] = VecVal;
+ Value *VecVal = UndefValue::get(VectorType::get(P->getType(), VF));
+ for (unsigned int i = 0; i < VF; ++i) {
+ int EltIndex = i + part * VF;
+ Constant *Idx = ConstantInt::get(PtrInd->getType(), EltIndex);
+ Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx);
+ Value *SclrGep = II.transform(Builder, GlobalIdx, PSE.getSE(), DL);
+ SclrGep->setName("next.gep");
+ VecVal = Builder.CreateInsertElement(VecVal, SclrGep,
+ Builder.getInt32(i), "insert.gep");
}
- return;
+ Entry[part] = VecVal;
+ }
+ return;
}
}
void InnerLoopVectorizer::vectorizeBlockInLoop(BasicBlock *BB, PhiVector *PV) {
// For each instruction in the old loop.
- for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) {
- VectorParts &Entry = WidenMap.get(&*it);
+ for (Instruction &I : *BB) {
+ VectorParts &Entry = WidenMap.get(&I);
- switch (it->getOpcode()) {
+ switch (I.getOpcode()) {
case Instruction::Br:
// Nothing to do for PHIs and BR, since we already took care of the
// loop control flow instructions.
continue;
case Instruction::PHI: {
// Vectorize PHINodes.
- widenPHIInstruction(&*it, Entry, UF, VF, PV);
+ widenPHIInstruction(&I, Entry, UF, VF, PV);
continue;
- }// End of PHI.
+ } // End of PHI.
case Instruction::Add:
case Instruction::FAdd:
@@ -3756,10 +4092,10 @@ void InnerLoopVectorizer::vectorizeBlockInLoop(BasicBlock *BB, PhiVector *PV) {
case Instruction::Or:
case Instruction::Xor: {
// Just widen binops.
- BinaryOperator *BinOp = dyn_cast<BinaryOperator>(it);
+ auto *BinOp = cast<BinaryOperator>(&I);
setDebugLocFromInst(Builder, BinOp);
- VectorParts &A = getVectorValue(it->getOperand(0));
- VectorParts &B = getVectorValue(it->getOperand(1));
+ VectorParts &A = getVectorValue(BinOp->getOperand(0));
+ VectorParts &B = getVectorValue(BinOp->getOperand(1));
// Use this vector value for all users of the original instruction.
for (unsigned Part = 0; Part < UF; ++Part) {
@@ -3771,7 +4107,7 @@ void InnerLoopVectorizer::vectorizeBlockInLoop(BasicBlock *BB, PhiVector *PV) {
Entry[Part] = V;
}
- propagateMetadata(Entry, &*it);
+ addMetadata(Entry, BinOp);
break;
}
case Instruction::Select: {
@@ -3780,58 +4116,58 @@ void InnerLoopVectorizer::vectorizeBlockInLoop(BasicBlock *BB, PhiVector *PV) {
// instruction with a scalar condition. Otherwise, use vector-select.
auto *SE = PSE.getSE();
bool InvariantCond =
- SE->isLoopInvariant(PSE.getSCEV(it->getOperand(0)), OrigLoop);
- setDebugLocFromInst(Builder, &*it);
+ SE->isLoopInvariant(PSE.getSCEV(I.getOperand(0)), OrigLoop);
+ setDebugLocFromInst(Builder, &I);
// The condition can be loop invariant but still defined inside the
// loop. This means that we can't just use the original 'cond' value.
// We have to take the 'vectorized' value and pick the first lane.
// Instcombine will make this a no-op.
- VectorParts &Cond = getVectorValue(it->getOperand(0));
- VectorParts &Op0 = getVectorValue(it->getOperand(1));
- VectorParts &Op1 = getVectorValue(it->getOperand(2));
-
- Value *ScalarCond = (VF == 1) ? Cond[0] :
- Builder.CreateExtractElement(Cond[0], Builder.getInt32(0));
+ VectorParts &Cond = getVectorValue(I.getOperand(0));
+ VectorParts &Op0 = getVectorValue(I.getOperand(1));
+ VectorParts &Op1 = getVectorValue(I.getOperand(2));
+
+ Value *ScalarCond =
+ (VF == 1)
+ ? Cond[0]
+ : Builder.CreateExtractElement(Cond[0], Builder.getInt32(0));
for (unsigned Part = 0; Part < UF; ++Part) {
Entry[Part] = Builder.CreateSelect(
- InvariantCond ? ScalarCond : Cond[Part],
- Op0[Part],
- Op1[Part]);
+ InvariantCond ? ScalarCond : Cond[Part], Op0[Part], Op1[Part]);
}
- propagateMetadata(Entry, &*it);
+ addMetadata(Entry, &I);
break;
}
case Instruction::ICmp:
case Instruction::FCmp: {
// Widen compares. Generate vector compares.
- bool FCmp = (it->getOpcode() == Instruction::FCmp);
- CmpInst *Cmp = dyn_cast<CmpInst>(it);
- setDebugLocFromInst(Builder, &*it);
- VectorParts &A = getVectorValue(it->getOperand(0));
- VectorParts &B = getVectorValue(it->getOperand(1));
+ bool FCmp = (I.getOpcode() == Instruction::FCmp);
+ auto *Cmp = dyn_cast<CmpInst>(&I);
+ setDebugLocFromInst(Builder, Cmp);
+ VectorParts &A = getVectorValue(Cmp->getOperand(0));
+ VectorParts &B = getVectorValue(Cmp->getOperand(1));
for (unsigned Part = 0; Part < UF; ++Part) {
Value *C = nullptr;
if (FCmp) {
C = Builder.CreateFCmp(Cmp->getPredicate(), A[Part], B[Part]);
- cast<FCmpInst>(C)->copyFastMathFlags(&*it);
+ cast<FCmpInst>(C)->copyFastMathFlags(Cmp);
} else {
C = Builder.CreateICmp(Cmp->getPredicate(), A[Part], B[Part]);
}
Entry[Part] = C;
}
- propagateMetadata(Entry, &*it);
+ addMetadata(Entry, &I);
break;
}
case Instruction::Store:
case Instruction::Load:
- vectorizeMemoryInstruction(&*it);
- break;
+ vectorizeMemoryInstruction(&I);
+ break;
case Instruction::ZExt:
case Instruction::SExt:
case Instruction::FPToUI:
@@ -3844,58 +4180,52 @@ void InnerLoopVectorizer::vectorizeBlockInLoop(BasicBlock *BB, PhiVector *PV) {
case Instruction::Trunc:
case Instruction::FPTrunc:
case Instruction::BitCast: {
- CastInst *CI = dyn_cast<CastInst>(it);
- setDebugLocFromInst(Builder, &*it);
- /// Optimize the special case where the source is the induction
- /// variable. Notice that we can only optimize the 'trunc' case
- /// because: a. FP conversions lose precision, b. sext/zext may wrap,
- /// c. other casts depend on pointer size.
- if (CI->getOperand(0) == OldInduction &&
- it->getOpcode() == Instruction::Trunc) {
- Value *ScalarCast = Builder.CreateCast(CI->getOpcode(), Induction,
- CI->getType());
- Value *Broadcasted = getBroadcastInstrs(ScalarCast);
- InductionDescriptor II =
- Legal->getInductionVars()->lookup(OldInduction);
- Constant *Step = ConstantInt::getSigned(
- CI->getType(), II.getStepValue()->getSExtValue());
- for (unsigned Part = 0; Part < UF; ++Part)
- Entry[Part] = getStepVector(Broadcasted, VF * Part, Step);
- propagateMetadata(Entry, &*it);
+ auto *CI = dyn_cast<CastInst>(&I);
+ setDebugLocFromInst(Builder, CI);
+
+ // Optimize the special case where the source is a constant integer
+ // induction variable. Notice that we can only optimize the 'trunc' case
+ // because (a) FP conversions lose precision, (b) sext/zext may wrap, and
+ // (c) other casts depend on pointer size.
+ auto ID = Legal->getInductionVars()->lookup(OldInduction);
+ if (isa<TruncInst>(CI) && CI->getOperand(0) == OldInduction &&
+ ID.getConstIntStepValue()) {
+ widenIntInduction(OldInduction, Entry, cast<TruncInst>(CI));
+ addMetadata(Entry, &I);
break;
}
+
/// Vectorize casts.
- Type *DestTy = (VF == 1) ? CI->getType() :
- VectorType::get(CI->getType(), VF);
+ Type *DestTy =
+ (VF == 1) ? CI->getType() : VectorType::get(CI->getType(), VF);
- VectorParts &A = getVectorValue(it->getOperand(0));
+ VectorParts &A = getVectorValue(CI->getOperand(0));
for (unsigned Part = 0; Part < UF; ++Part)
Entry[Part] = Builder.CreateCast(CI->getOpcode(), A[Part], DestTy);
- propagateMetadata(Entry, &*it);
+ addMetadata(Entry, &I);
break;
}
case Instruction::Call: {
// Ignore dbg intrinsics.
- if (isa<DbgInfoIntrinsic>(it))
+ if (isa<DbgInfoIntrinsic>(I))
break;
- setDebugLocFromInst(Builder, &*it);
+ setDebugLocFromInst(Builder, &I);
Module *M = BB->getParent()->getParent();
- CallInst *CI = cast<CallInst>(it);
+ auto *CI = cast<CallInst>(&I);
StringRef FnName = CI->getCalledFunction()->getName();
Function *F = CI->getCalledFunction();
Type *RetTy = ToVectorTy(CI->getType(), VF);
SmallVector<Type *, 4> Tys;
- for (unsigned i = 0, ie = CI->getNumArgOperands(); i != ie; ++i)
- Tys.push_back(ToVectorTy(CI->getArgOperand(i)->getType(), VF));
-
- Intrinsic::ID ID = getIntrinsicIDForCall(CI, TLI);
- if (ID &&
- (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end ||
- ID == Intrinsic::lifetime_start)) {
- scalarizeInstruction(&*it);
+ for (Value *ArgOperand : CI->arg_operands())
+ Tys.push_back(ToVectorTy(ArgOperand->getType(), VF));
+
+ Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
+ if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end ||
+ ID == Intrinsic::lifetime_start)) {
+ scalarizeInstruction(&I);
break;
}
// The flag shows whether we use Intrinsic or a usual Call for vectorized
@@ -3906,7 +4236,7 @@ void InnerLoopVectorizer::vectorizeBlockInLoop(BasicBlock *BB, PhiVector *PV) {
bool UseVectorIntrinsic =
ID && getVectorIntrinsicCost(CI, VF, *TTI, TLI) <= CallCost;
if (!UseVectorIntrinsic && NeedToScalarize) {
- scalarizeInstruction(&*it);
+ scalarizeInstruction(&I);
break;
}
@@ -3944,19 +4274,27 @@ void InnerLoopVectorizer::vectorizeBlockInLoop(BasicBlock *BB, PhiVector *PV) {
}
}
assert(VectorF && "Can't create vector function.");
- Entry[Part] = Builder.CreateCall(VectorF, Args);
+
+ SmallVector<OperandBundleDef, 1> OpBundles;
+ CI->getOperandBundlesAsDefs(OpBundles);
+ CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles);
+
+ if (isa<FPMathOperator>(V))
+ V->copyFastMathFlags(CI);
+
+ Entry[Part] = V;
}
- propagateMetadata(Entry, &*it);
+ addMetadata(Entry, &I);
break;
}
default:
// All other instructions are unsupported. Scalarize them.
- scalarizeInstruction(&*it);
+ scalarizeInstruction(&I);
break;
- }// end of switch.
- }// end of for_each instr.
+ } // end of switch.
+ } // end of for_each instr.
}
void InnerLoopVectorizer::updateAnalysis() {
@@ -3967,16 +4305,11 @@ void InnerLoopVectorizer::updateAnalysis() {
assert(DT->properlyDominates(LoopBypassBlocks.front(), LoopExitBlock) &&
"Entry does not dominate exit.");
- for (unsigned I = 1, E = LoopBypassBlocks.size(); I != E; ++I)
- DT->addNewBlock(LoopBypassBlocks[I], LoopBypassBlocks[I-1]);
- DT->addNewBlock(LoopVectorPreHeader, LoopBypassBlocks.back());
-
// We don't predicate stores by this point, so the vector body should be a
// single loop.
- assert(LoopVectorBody.size() == 1 && "Expected single block loop!");
- DT->addNewBlock(LoopVectorBody[0], LoopVectorPreHeader);
+ DT->addNewBlock(LoopVectorBody, LoopVectorPreHeader);
- DT->addNewBlock(LoopMiddleBlock, LoopVectorBody.back());
+ DT->addNewBlock(LoopMiddleBlock, LoopVectorBody);
DT->addNewBlock(LoopScalarPreHeader, LoopBypassBlocks[0]);
DT->changeImmediateDominator(LoopScalarBody, LoopScalarPreHeader);
DT->changeImmediateDominator(LoopExitBlock, LoopBypassBlocks[0]);
@@ -3989,12 +4322,12 @@ void InnerLoopVectorizer::updateAnalysis() {
/// Phi nodes with constant expressions that can trap are not safe to if
/// convert.
static bool canIfConvertPHINodes(BasicBlock *BB) {
- for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
- PHINode *Phi = dyn_cast<PHINode>(I);
+ for (Instruction &I : *BB) {
+ auto *Phi = dyn_cast<PHINode>(&I);
if (!Phi)
return true;
- for (unsigned p = 0, e = Phi->getNumIncomingValues(); p != e; ++p)
- if (Constant *C = dyn_cast<Constant>(Phi->getIncomingValue(p)))
+ for (Value *V : Phi->incoming_values())
+ if (auto *C = dyn_cast<Constant>(V))
if (C->canTrap())
return false;
}
@@ -4013,27 +4346,21 @@ bool LoopVectorizationLegality::canVectorizeWithIfConvert() {
SmallPtrSet<Value *, 8> SafePointes;
// Collect safe addresses.
- for (Loop::block_iterator BI = TheLoop->block_begin(),
- BE = TheLoop->block_end(); BI != BE; ++BI) {
- BasicBlock *BB = *BI;
-
+ for (BasicBlock *BB : TheLoop->blocks()) {
if (blockNeedsPredication(BB))
continue;
- for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
- if (LoadInst *LI = dyn_cast<LoadInst>(I))
+ for (Instruction &I : *BB) {
+ if (auto *LI = dyn_cast<LoadInst>(&I))
SafePointes.insert(LI->getPointerOperand());
- else if (StoreInst *SI = dyn_cast<StoreInst>(I))
+ else if (auto *SI = dyn_cast<StoreInst>(&I))
SafePointes.insert(SI->getPointerOperand());
}
}
// Collect the blocks that need predication.
BasicBlock *Header = TheLoop->getHeader();
- for (Loop::block_iterator BI = TheLoop->block_begin(),
- BE = TheLoop->block_end(); BI != BE; ++BI) {
- BasicBlock *BB = *BI;
-
+ for (BasicBlock *BB : TheLoop->blocks()) {
// We don't support switch statements inside loops.
if (!isa<BranchInst>(BB->getTerminator())) {
emitAnalysis(VectorizationReport(BB->getTerminator())
@@ -4063,9 +4390,8 @@ bool LoopVectorizationLegality::canVectorize() {
// We must have a loop in canonical form. Loops with indirectbr in them cannot
// be canonicalized.
if (!TheLoop->getLoopPreheader()) {
- emitAnalysis(
- VectorizationReport() <<
- "loop control flow is not understood by vectorizer");
+ emitAnalysis(VectorizationReport()
+ << "loop control flow is not understood by vectorizer");
return false;
}
@@ -4077,17 +4403,15 @@ bool LoopVectorizationLegality::canVectorize() {
// We must have a single backedge.
if (TheLoop->getNumBackEdges() != 1) {
- emitAnalysis(
- VectorizationReport() <<
- "loop control flow is not understood by vectorizer");
+ emitAnalysis(VectorizationReport()
+ << "loop control flow is not understood by vectorizer");
return false;
}
// We must have a single exiting block.
if (!TheLoop->getExitingBlock()) {
- emitAnalysis(
- VectorizationReport() <<
- "loop control flow is not understood by vectorizer");
+ emitAnalysis(VectorizationReport()
+ << "loop control flow is not understood by vectorizer");
return false;
}
@@ -4095,15 +4419,14 @@ bool LoopVectorizationLegality::canVectorize() {
// checked at the end of each iteration. With that we can assume that all
// instructions in the loop are executed the same number of times.
if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) {
- emitAnalysis(
- VectorizationReport() <<
- "loop control flow is not understood by vectorizer");
+ emitAnalysis(VectorizationReport()
+ << "loop control flow is not understood by vectorizer");
return false;
}
// We need to have a loop header.
- DEBUG(dbgs() << "LV: Found a loop: " <<
- TheLoop->getHeader()->getName() << '\n');
+ DEBUG(dbgs() << "LV: Found a loop: " << TheLoop->getHeader()->getName()
+ << '\n');
// Check if we can if-convert non-single-bb loops.
unsigned NumBlocks = TheLoop->getNumBlocks();
@@ -4113,7 +4436,7 @@ bool LoopVectorizationLegality::canVectorize() {
}
// ScalarEvolution needs to be able to find the exit count.
- const SCEV *ExitCount = PSE.getSE()->getBackedgeTakenCount(TheLoop);
+ const SCEV *ExitCount = PSE.getBackedgeTakenCount();
if (ExitCount == PSE.getSE()->getCouldNotCompute()) {
emitAnalysis(VectorizationReport()
<< "could not determine number of loop iterations");
@@ -4150,7 +4473,7 @@ bool LoopVectorizationLegality::canVectorize() {
// Analyze interleaved memory accesses.
if (UseInterleaved)
- InterleaveInfo.analyzeInterleaving(Strides);
+ InterleaveInfo.analyzeInterleaving(*getSymbolicStrides());
unsigned SCEVThreshold = VectorizeSCEVCheckThreshold;
if (Hints->getForce() == LoopVectorizeHints::FK_Enabled)
@@ -4182,7 +4505,7 @@ static Type *convertPointerToIntegerType(const DataLayout &DL, Type *Ty) {
return Ty;
}
-static Type* getWiderType(const DataLayout &DL, Type *Ty0, Type *Ty1) {
+static Type *getWiderType(const DataLayout &DL, Type *Ty0, Type *Ty1) {
Ty0 = convertPointerToIntegerType(DL, Ty0);
Ty1 = convertPointerToIntegerType(DL, Ty1);
if (Ty0->getScalarSizeInBits() > Ty1->getScalarSizeInBits())
@@ -4193,11 +4516,11 @@ static Type* getWiderType(const DataLayout &DL, Type *Ty0, Type *Ty1) {
/// \brief Check that the instruction has outside loop users and is not an
/// identified reduction variable.
static bool hasOutsideLoopUser(const Loop *TheLoop, Instruction *Inst,
- SmallPtrSetImpl<Value *> &Reductions) {
- // Reduction instructions are allowed to have exit users. All other
- // instructions must not have external users.
- if (!Reductions.count(Inst))
- //Check that all of the users of the loop are inside the BB.
+ SmallPtrSetImpl<Value *> &AllowedExit) {
+ // Reduction and Induction instructions are allowed to have exit users. All
+ // other instructions must not have external users.
+ if (!AllowedExit.count(Inst))
+ // Check that all of the users of the loop are inside the BB.
for (User *U : Inst->users()) {
Instruction *UI = cast<Instruction>(U);
// This user may be a reduction exit value.
@@ -4209,31 +4532,61 @@ static bool hasOutsideLoopUser(const Loop *TheLoop, Instruction *Inst,
return false;
}
+void LoopVectorizationLegality::addInductionPhi(
+ PHINode *Phi, const InductionDescriptor &ID,
+ SmallPtrSetImpl<Value *> &AllowedExit) {
+ Inductions[Phi] = ID;
+ Type *PhiTy = Phi->getType();
+ const DataLayout &DL = Phi->getModule()->getDataLayout();
+
+ // Get the widest type.
+ if (!WidestIndTy)
+ WidestIndTy = convertPointerToIntegerType(DL, PhiTy);
+ else
+ WidestIndTy = getWiderType(DL, PhiTy, WidestIndTy);
+
+ // Int inductions are special because we only allow one IV.
+ if (ID.getKind() == InductionDescriptor::IK_IntInduction &&
+ ID.getConstIntStepValue() &&
+ ID.getConstIntStepValue()->isOne() &&
+ isa<Constant>(ID.getStartValue()) &&
+ cast<Constant>(ID.getStartValue())->isNullValue()) {
+
+ // Use the phi node with the widest type as induction. Use the last
+ // one if there are multiple (no good reason for doing this other
+ // than it is expedient). We've checked that it begins at zero and
+ // steps by one, so this is a canonical induction variable.
+ if (!Induction || PhiTy == WidestIndTy)
+ Induction = Phi;
+ }
+
+ // Both the PHI node itself, and the "post-increment" value feeding
+ // back into the PHI node may have external users.
+ AllowedExit.insert(Phi);
+ AllowedExit.insert(Phi->getIncomingValueForBlock(TheLoop->getLoopLatch()));
+
+ DEBUG(dbgs() << "LV: Found an induction variable.\n");
+ return;
+}
+
bool LoopVectorizationLegality::canVectorizeInstrs() {
BasicBlock *Header = TheLoop->getHeader();
// Look for the attribute signaling the absence of NaNs.
Function &F = *Header->getParent();
- const DataLayout &DL = F.getParent()->getDataLayout();
- if (F.hasFnAttribute("no-nans-fp-math"))
- HasFunNoNaNAttr =
- F.getFnAttribute("no-nans-fp-math").getValueAsString() == "true";
+ HasFunNoNaNAttr =
+ F.getFnAttribute("no-nans-fp-math").getValueAsString() == "true";
// For each block in the loop.
- for (Loop::block_iterator bb = TheLoop->block_begin(),
- be = TheLoop->block_end(); bb != be; ++bb) {
-
+ for (BasicBlock *BB : TheLoop->blocks()) {
// Scan the instructions in the block and look for hazards.
- for (BasicBlock::iterator it = (*bb)->begin(), e = (*bb)->end(); it != e;
- ++it) {
-
- if (PHINode *Phi = dyn_cast<PHINode>(it)) {
+ for (Instruction &I : *BB) {
+ if (auto *Phi = dyn_cast<PHINode>(&I)) {
Type *PhiTy = Phi->getType();
// Check that this PHI type is allowed.
- if (!PhiTy->isIntegerTy() &&
- !PhiTy->isFloatingPointTy() &&
+ if (!PhiTy->isIntegerTy() && !PhiTy->isFloatingPointTy() &&
!PhiTy->isPointerTy()) {
- emitAnalysis(VectorizationReport(&*it)
+ emitAnalysis(VectorizationReport(Phi)
<< "loop control flow is not understood by vectorizer");
DEBUG(dbgs() << "LV: Found an non-int non-pointer PHI.\n");
return false;
@@ -4242,61 +4595,25 @@ bool LoopVectorizationLegality::canVectorizeInstrs() {
// If this PHINode is not in the header block, then we know that we
// can convert it to select during if-conversion. No need to check if
// the PHIs in this block are induction or reduction variables.
- if (*bb != Header) {
+ if (BB != Header) {
// Check that this instruction has no outside users or is an
// identified reduction value with an outside user.
- if (!hasOutsideLoopUser(TheLoop, &*it, AllowedExit))
+ if (!hasOutsideLoopUser(TheLoop, Phi, AllowedExit))
continue;
- emitAnalysis(VectorizationReport(&*it) <<
- "value could not be identified as "
- "an induction or reduction variable");
+ emitAnalysis(VectorizationReport(Phi)
+ << "value could not be identified as "
+ "an induction or reduction variable");
return false;
}
// We only allow if-converted PHIs with exactly two incoming values.
if (Phi->getNumIncomingValues() != 2) {
- emitAnalysis(VectorizationReport(&*it)
+ emitAnalysis(VectorizationReport(Phi)
<< "control flow not understood by vectorizer");
DEBUG(dbgs() << "LV: Found an invalid PHI.\n");
return false;
}
- InductionDescriptor ID;
- if (InductionDescriptor::isInductionPHI(Phi, PSE.getSE(), ID)) {
- Inductions[Phi] = ID;
- // Get the widest type.
- if (!WidestIndTy)
- WidestIndTy = convertPointerToIntegerType(DL, PhiTy);
- else
- WidestIndTy = getWiderType(DL, PhiTy, WidestIndTy);
-
- // Int inductions are special because we only allow one IV.
- if (ID.getKind() == InductionDescriptor::IK_IntInduction &&
- ID.getStepValue()->isOne() &&
- isa<Constant>(ID.getStartValue()) &&
- cast<Constant>(ID.getStartValue())->isNullValue()) {
- // Use the phi node with the widest type as induction. Use the last
- // one if there are multiple (no good reason for doing this other
- // than it is expedient). We've checked that it begins at zero and
- // steps by one, so this is a canonical induction variable.
- if (!Induction || PhiTy == WidestIndTy)
- Induction = Phi;
- }
-
- DEBUG(dbgs() << "LV: Found an induction variable.\n");
-
- // Until we explicitly handle the case of an induction variable with
- // an outside loop user we have to give up vectorizing this loop.
- if (hasOutsideLoopUser(TheLoop, &*it, AllowedExit)) {
- emitAnalysis(VectorizationReport(&*it) <<
- "use of induction value outside of the "
- "loop is not handled by vectorizer");
- return false;
- }
-
- continue;
- }
-
RecurrenceDescriptor RedDes;
if (RecurrenceDescriptor::isReductionPHI(Phi, TheLoop, RedDes)) {
if (RedDes.hasUnsafeAlgebra())
@@ -4306,22 +4623,41 @@ bool LoopVectorizationLegality::canVectorizeInstrs() {
continue;
}
- emitAnalysis(VectorizationReport(&*it) <<
- "value that could not be identified as "
- "reduction is used outside the loop");
- DEBUG(dbgs() << "LV: Found an unidentified PHI."<< *Phi <<"\n");
+ InductionDescriptor ID;
+ if (InductionDescriptor::isInductionPHI(Phi, PSE, ID)) {
+ addInductionPhi(Phi, ID, AllowedExit);
+ continue;
+ }
+
+ if (RecurrenceDescriptor::isFirstOrderRecurrence(Phi, TheLoop, DT)) {
+ FirstOrderRecurrences.insert(Phi);
+ continue;
+ }
+
+ // As a last resort, coerce the PHI to a AddRec expression
+ // and re-try classifying it a an induction PHI.
+ if (InductionDescriptor::isInductionPHI(Phi, PSE, ID, true)) {
+ addInductionPhi(Phi, ID, AllowedExit);
+ continue;
+ }
+
+ emitAnalysis(VectorizationReport(Phi)
+ << "value that could not be identified as "
+ "reduction is used outside the loop");
+ DEBUG(dbgs() << "LV: Found an unidentified PHI." << *Phi << "\n");
return false;
- }// end of PHI handling
+ } // end of PHI handling
// We handle calls that:
// * Are debug info intrinsics.
// * Have a mapping to an IR intrinsic.
// * Have a vector version available.
- CallInst *CI = dyn_cast<CallInst>(it);
- if (CI && !getIntrinsicIDForCall(CI, TLI) && !isa<DbgInfoIntrinsic>(CI) &&
+ auto *CI = dyn_cast<CallInst>(&I);
+ if (CI && !getVectorIntrinsicIDForCall(CI, TLI) &&
+ !isa<DbgInfoIntrinsic>(CI) &&
!(CI->getCalledFunction() && TLI &&
TLI->isFunctionVectorizable(CI->getCalledFunction()->getName()))) {
- emitAnalysis(VectorizationReport(&*it)
+ emitAnalysis(VectorizationReport(CI)
<< "call instruction cannot be vectorized");
DEBUG(dbgs() << "LV: Found a non-intrinsic, non-libfunc callsite.\n");
return false;
@@ -4329,11 +4665,11 @@ bool LoopVectorizationLegality::canVectorizeInstrs() {
// Intrinsics such as powi,cttz and ctlz are legal to vectorize if the
// second argument is the same (i.e. loop invariant)
- if (CI &&
- hasVectorInstrinsicScalarOpd(getIntrinsicIDForCall(CI, TLI), 1)) {
+ if (CI && hasVectorInstrinsicScalarOpd(
+ getVectorIntrinsicIDForCall(CI, TLI), 1)) {
auto *SE = PSE.getSE();
if (!SE->isLoopInvariant(PSE.getSCEV(CI->getOperand(1)), TheLoop)) {
- emitAnalysis(VectorizationReport(&*it)
+ emitAnalysis(VectorizationReport(CI)
<< "intrinsic instruction cannot be vectorized");
DEBUG(dbgs() << "LV: Found unvectorizable intrinsic " << *CI << "\n");
return false;
@@ -4342,40 +4678,44 @@ bool LoopVectorizationLegality::canVectorizeInstrs() {
// Check that the instruction return type is vectorizable.
// Also, we can't vectorize extractelement instructions.
- if ((!VectorType::isValidElementType(it->getType()) &&
- !it->getType()->isVoidTy()) || isa<ExtractElementInst>(it)) {
- emitAnalysis(VectorizationReport(&*it)
+ if ((!VectorType::isValidElementType(I.getType()) &&
+ !I.getType()->isVoidTy()) ||
+ isa<ExtractElementInst>(I)) {
+ emitAnalysis(VectorizationReport(&I)
<< "instruction return type cannot be vectorized");
DEBUG(dbgs() << "LV: Found unvectorizable type.\n");
return false;
}
// Check that the stored type is vectorizable.
- if (StoreInst *ST = dyn_cast<StoreInst>(it)) {
+ if (auto *ST = dyn_cast<StoreInst>(&I)) {
Type *T = ST->getValueOperand()->getType();
if (!VectorType::isValidElementType(T)) {
- emitAnalysis(VectorizationReport(ST) <<
- "store instruction cannot be vectorized");
+ emitAnalysis(VectorizationReport(ST)
+ << "store instruction cannot be vectorized");
return false;
}
- if (EnableMemAccessVersioning)
- collectStridedAccess(ST);
- }
- if (EnableMemAccessVersioning)
- if (LoadInst *LI = dyn_cast<LoadInst>(it))
- collectStridedAccess(LI);
+ // FP instructions can allow unsafe algebra, thus vectorizable by
+ // non-IEEE-754 compliant SIMD units.
+ // This applies to floating-point math operations and calls, not memory
+ // operations, shuffles, or casts, as they don't change precision or
+ // semantics.
+ } else if (I.getType()->isFloatingPointTy() && (CI || I.isBinaryOp()) &&
+ !I.hasUnsafeAlgebra()) {
+ DEBUG(dbgs() << "LV: Found FP op with unsafe algebra.\n");
+ Hints->setPotentiallyUnsafe();
+ }
// Reduction instructions are allowed to have exit users.
// All other instructions must not have external users.
- if (hasOutsideLoopUser(TheLoop, &*it, AllowedExit)) {
- emitAnalysis(VectorizationReport(&*it) <<
- "value cannot be used outside the loop");
+ if (hasOutsideLoopUser(TheLoop, &I, AllowedExit)) {
+ emitAnalysis(VectorizationReport(&I)
+ << "value cannot be used outside the loop");
return false;
}
} // next instr.
-
}
if (!Induction) {
@@ -4396,64 +4736,90 @@ bool LoopVectorizationLegality::canVectorizeInstrs() {
return true;
}
-void LoopVectorizationLegality::collectStridedAccess(Value *MemAccess) {
- Value *Ptr = nullptr;
- if (LoadInst *LI = dyn_cast<LoadInst>(MemAccess))
- Ptr = LI->getPointerOperand();
- else if (StoreInst *SI = dyn_cast<StoreInst>(MemAccess))
- Ptr = SI->getPointerOperand();
- else
- return;
-
- Value *Stride = getStrideFromPointer(Ptr, PSE.getSE(), TheLoop);
- if (!Stride)
- return;
-
- DEBUG(dbgs() << "LV: Found a strided access that we can version");
- DEBUG(dbgs() << " Ptr: " << *Ptr << " Stride: " << *Stride << "\n");
- Strides[Ptr] = Stride;
- StrideSet.insert(Stride);
-}
-
void LoopVectorizationLegality::collectLoopUniforms() {
// We now know that the loop is vectorizable!
// Collect variables that will remain uniform after vectorization.
- std::vector<Value*> Worklist;
- BasicBlock *Latch = TheLoop->getLoopLatch();
- // Start with the conditional branch and walk up the block.
- Worklist.push_back(Latch->getTerminator()->getOperand(0));
+ // If V is not an instruction inside the current loop, it is a Value
+ // outside of the scope which we are interesting in.
+ auto isOutOfScope = [&](Value *V) -> bool {
+ Instruction *I = dyn_cast<Instruction>(V);
+ return (!I || !TheLoop->contains(I));
+ };
+
+ SetVector<Instruction *> Worklist;
+ BasicBlock *Latch = TheLoop->getLoopLatch();
+ // Start with the conditional branch.
+ if (!isOutOfScope(Latch->getTerminator()->getOperand(0))) {
+ Instruction *Cmp = cast<Instruction>(Latch->getTerminator()->getOperand(0));
+ Worklist.insert(Cmp);
+ DEBUG(dbgs() << "LV: Found uniform instruction: " << *Cmp << "\n");
+ }
// Also add all consecutive pointer values; these values will be uniform
- // after vectorization (and subsequent cleanup) and, until revectorization is
- // supported, all dependencies must also be uniform.
- for (Loop::block_iterator B = TheLoop->block_begin(),
- BE = TheLoop->block_end(); B != BE; ++B)
- for (BasicBlock::iterator I = (*B)->begin(), IE = (*B)->end();
- I != IE; ++I)
- if (I->getType()->isPointerTy() && isConsecutivePtr(&*I))
- Worklist.insert(Worklist.end(), I->op_begin(), I->op_end());
-
- while (!Worklist.empty()) {
- Instruction *I = dyn_cast<Instruction>(Worklist.back());
- Worklist.pop_back();
-
- // Look at instructions inside this loop.
- // Stop when reaching PHI nodes.
- // TODO: we need to follow values all over the loop, not only in this block.
- if (!I || !TheLoop->contains(I) || isa<PHINode>(I))
- continue;
+ // after vectorization (and subsequent cleanup).
+ for (auto *BB : TheLoop->blocks()) {
+ for (auto &I : *BB) {
+ if (I.getType()->isPointerTy() && isConsecutivePtr(&I)) {
+ Worklist.insert(&I);
+ DEBUG(dbgs() << "LV: Found uniform instruction: " << I << "\n");
+ }
+ }
+ }
- // This is a known uniform.
- Uniforms.insert(I);
+ // Expand Worklist in topological order: whenever a new instruction
+ // is added , its users should be either already inside Worklist, or
+ // out of scope. It ensures a uniform instruction will only be used
+ // by uniform instructions or out of scope instructions.
+ unsigned idx = 0;
+ do {
+ Instruction *I = Worklist[idx++];
- // Insert all operands.
- Worklist.insert(Worklist.end(), I->op_begin(), I->op_end());
+ for (auto OV : I->operand_values()) {
+ if (isOutOfScope(OV))
+ continue;
+ auto *OI = cast<Instruction>(OV);
+ if (all_of(OI->users(), [&](User *U) -> bool {
+ return isOutOfScope(U) || Worklist.count(cast<Instruction>(U));
+ })) {
+ Worklist.insert(OI);
+ DEBUG(dbgs() << "LV: Found uniform instruction: " << *OI << "\n");
+ }
+ }
+ } while (idx != Worklist.size());
+
+ // For an instruction to be added into Worklist above, all its users inside
+ // the current loop should be already added into Worklist. This condition
+ // cannot be true for phi instructions which is always in a dependence loop.
+ // Because any instruction in the dependence cycle always depends on others
+ // in the cycle to be added into Worklist first, the result is no ones in
+ // the cycle will be added into Worklist in the end.
+ // That is why we process PHI separately.
+ for (auto &Induction : *getInductionVars()) {
+ auto *PN = Induction.first;
+ auto *UpdateV = PN->getIncomingValueForBlock(TheLoop->getLoopLatch());
+ if (all_of(PN->users(),
+ [&](User *U) -> bool {
+ return U == UpdateV || isOutOfScope(U) ||
+ Worklist.count(cast<Instruction>(U));
+ }) &&
+ all_of(UpdateV->users(), [&](User *U) -> bool {
+ return U == PN || isOutOfScope(U) ||
+ Worklist.count(cast<Instruction>(U));
+ })) {
+ Worklist.insert(cast<Instruction>(PN));
+ Worklist.insert(cast<Instruction>(UpdateV));
+ DEBUG(dbgs() << "LV: Found uniform instruction: " << *PN << "\n");
+ DEBUG(dbgs() << "LV: Found uniform instruction: " << *UpdateV << "\n");
+ }
}
+
+ Uniforms.insert(Worklist.begin(), Worklist.end());
}
bool LoopVectorizationLegality::canVectorizeMemory() {
- LAI = &LAA->getInfo(TheLoop, Strides);
+ LAI = &(*GetLAA)(*TheLoop);
+ InterleaveInfo.setLAI(LAI);
auto &OptionalReport = LAI->getReport();
if (OptionalReport)
emitAnalysis(VectorizationReport(*OptionalReport));
@@ -4469,13 +4835,13 @@ bool LoopVectorizationLegality::canVectorizeMemory() {
}
Requirements->addRuntimePointerChecks(LAI->getNumRuntimePointerChecks());
- PSE.addPredicate(LAI->PSE.getUnionPredicate());
+ PSE.addPredicate(LAI->getPSE().getUnionPredicate());
return true;
}
bool LoopVectorizationLegality::isInductionVariable(const Value *V) {
- Value *In0 = const_cast<Value*>(V);
+ Value *In0 = const_cast<Value *>(V);
PHINode *PN = dyn_cast_or_null<PHINode>(In0);
if (!PN)
return false;
@@ -4483,67 +4849,73 @@ bool LoopVectorizationLegality::isInductionVariable(const Value *V) {
return Inductions.count(PN);
}
-bool LoopVectorizationLegality::blockNeedsPredication(BasicBlock *BB) {
+bool LoopVectorizationLegality::isFirstOrderRecurrence(const PHINode *Phi) {
+ return FirstOrderRecurrences.count(Phi);
+}
+
+bool LoopVectorizationLegality::blockNeedsPredication(BasicBlock *BB) {
return LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT);
}
-bool LoopVectorizationLegality::blockCanBePredicated(BasicBlock *BB,
- SmallPtrSetImpl<Value *> &SafePtrs) {
-
- for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) {
+bool LoopVectorizationLegality::blockCanBePredicated(
+ BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs) {
+ const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel();
+
+ for (Instruction &I : *BB) {
// Check that we don't have a constant expression that can trap as operand.
- for (Instruction::op_iterator OI = it->op_begin(), OE = it->op_end();
- OI != OE; ++OI) {
- if (Constant *C = dyn_cast<Constant>(*OI))
+ for (Value *Operand : I.operands()) {
+ if (auto *C = dyn_cast<Constant>(Operand))
if (C->canTrap())
return false;
}
// We might be able to hoist the load.
- if (it->mayReadFromMemory()) {
- LoadInst *LI = dyn_cast<LoadInst>(it);
+ if (I.mayReadFromMemory()) {
+ auto *LI = dyn_cast<LoadInst>(&I);
if (!LI)
return false;
if (!SafePtrs.count(LI->getPointerOperand())) {
- if (isLegalMaskedLoad(LI->getType(), LI->getPointerOperand())) {
+ if (isLegalMaskedLoad(LI->getType(), LI->getPointerOperand()) ||
+ isLegalMaskedGather(LI->getType())) {
MaskedOp.insert(LI);
continue;
}
+ // !llvm.mem.parallel_loop_access implies if-conversion safety.
+ if (IsAnnotatedParallel)
+ continue;
return false;
}
}
// We don't predicate stores at the moment.
- if (it->mayWriteToMemory()) {
- StoreInst *SI = dyn_cast<StoreInst>(it);
+ if (I.mayWriteToMemory()) {
+ auto *SI = dyn_cast<StoreInst>(&I);
// We only support predication of stores in basic blocks with one
// predecessor.
if (!SI)
return false;
+ // Build a masked store if it is legal for the target.
+ if (isLegalMaskedStore(SI->getValueOperand()->getType(),
+ SI->getPointerOperand()) ||
+ isLegalMaskedScatter(SI->getValueOperand()->getType())) {
+ MaskedOp.insert(SI);
+ continue;
+ }
+
bool isSafePtr = (SafePtrs.count(SI->getPointerOperand()) != 0);
bool isSinglePredecessor = SI->getParent()->getSinglePredecessor();
-
+
if (++NumPredStores > NumberOfStoresToPredicate || !isSafePtr ||
- !isSinglePredecessor) {
- // Build a masked store if it is legal for the target, otherwise
- // scalarize the block.
- bool isLegalMaskedOp =
- isLegalMaskedStore(SI->getValueOperand()->getType(),
- SI->getPointerOperand());
- if (isLegalMaskedOp) {
- --NumPredStores;
- MaskedOp.insert(SI);
- continue;
- }
+ !isSinglePredecessor)
return false;
- }
}
- if (it->mayThrow())
+ if (I.mayThrow())
return false;
// The instructions below can trap.
- switch (it->getOpcode()) {
- default: continue;
+ switch (I.getOpcode()) {
+ default:
+ continue;
case Instruction::UDiv:
case Instruction::SDiv:
case Instruction::URem:
@@ -4555,199 +4927,273 @@ bool LoopVectorizationLegality::blockCanBePredicated(BasicBlock *BB,
return true;
}
-void InterleavedAccessInfo::collectConstStridedAccesses(
- MapVector<Instruction *, StrideDescriptor> &StrideAccesses,
+void InterleavedAccessInfo::collectConstStrideAccesses(
+ MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo,
const ValueToValueMap &Strides) {
- // Holds load/store instructions in program order.
- SmallVector<Instruction *, 16> AccessList;
- for (auto *BB : TheLoop->getBlocks()) {
- bool IsPred = LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT);
+ auto &DL = TheLoop->getHeader()->getModule()->getDataLayout();
+ // Since it's desired that the load/store instructions be maintained in
+ // "program order" for the interleaved access analysis, we have to visit the
+ // blocks in the loop in reverse postorder (i.e., in a topological order).
+ // Such an ordering will ensure that any load/store that may be executed
+ // before a second load/store will precede the second load/store in
+ // AccessStrideInfo.
+ LoopBlocksDFS DFS(TheLoop);
+ DFS.perform(LI);
+ for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO()))
for (auto &I : *BB) {
- if (!isa<LoadInst>(&I) && !isa<StoreInst>(&I))
+ auto *LI = dyn_cast<LoadInst>(&I);
+ auto *SI = dyn_cast<StoreInst>(&I);
+ if (!LI && !SI)
continue;
- // FIXME: Currently we can't handle mixed accesses and predicated accesses
- if (IsPred)
- return;
-
- AccessList.push_back(&I);
- }
- }
-
- if (AccessList.empty())
- return;
-
- auto &DL = TheLoop->getHeader()->getModule()->getDataLayout();
- for (auto I : AccessList) {
- LoadInst *LI = dyn_cast<LoadInst>(I);
- StoreInst *SI = dyn_cast<StoreInst>(I);
-
- Value *Ptr = LI ? LI->getPointerOperand() : SI->getPointerOperand();
- int Stride = isStridedPtr(PSE, Ptr, TheLoop, Strides);
-
- // The factor of the corresponding interleave group.
- unsigned Factor = std::abs(Stride);
- // Ignore the access if the factor is too small or too large.
- if (Factor < 2 || Factor > MaxInterleaveGroupFactor)
- continue;
+ Value *Ptr = LI ? LI->getPointerOperand() : SI->getPointerOperand();
+ int64_t Stride = getPtrStride(PSE, Ptr, TheLoop, Strides);
- const SCEV *Scev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr);
- PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType());
- unsigned Size = DL.getTypeAllocSize(PtrTy->getElementType());
+ const SCEV *Scev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr);
+ PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType());
+ uint64_t Size = DL.getTypeAllocSize(PtrTy->getElementType());
- // An alignment of 0 means target ABI alignment.
- unsigned Align = LI ? LI->getAlignment() : SI->getAlignment();
- if (!Align)
- Align = DL.getABITypeAlignment(PtrTy->getElementType());
+ // An alignment of 0 means target ABI alignment.
+ unsigned Align = LI ? LI->getAlignment() : SI->getAlignment();
+ if (!Align)
+ Align = DL.getABITypeAlignment(PtrTy->getElementType());
- StrideAccesses[I] = StrideDescriptor(Stride, Scev, Size, Align);
- }
+ AccessStrideInfo[&I] = StrideDescriptor(Stride, Scev, Size, Align);
+ }
}
-// Analyze interleaved accesses and collect them into interleave groups.
+// Analyze interleaved accesses and collect them into interleaved load and
+// store groups.
+//
+// When generating code for an interleaved load group, we effectively hoist all
+// loads in the group to the location of the first load in program order. When
+// generating code for an interleaved store group, we sink all stores to the
+// location of the last store. This code motion can change the order of load
+// and store instructions and may break dependences.
+//
+// The code generation strategy mentioned above ensures that we won't violate
+// any write-after-read (WAR) dependences.
//
-// Notice that the vectorization on interleaved groups will change instruction
-// orders and may break dependences. But the memory dependence check guarantees
-// that there is no overlap between two pointers of different strides, element
-// sizes or underlying bases.
+// E.g., for the WAR dependence: a = A[i]; // (1)
+// A[i] = b; // (2)
//
-// For pointers sharing the same stride, element size and underlying base, no
-// need to worry about Read-After-Write dependences and Write-After-Read
+// The store group of (2) is always inserted at or below (2), and the load
+// group of (1) is always inserted at or above (1). Thus, the instructions will
+// never be reordered. All other dependences are checked to ensure the
+// correctness of the instruction reordering.
+//
+// The algorithm visits all memory accesses in the loop in bottom-up program
+// order. Program order is established by traversing the blocks in the loop in
+// reverse postorder when collecting the accesses.
+//
+// We visit the memory accesses in bottom-up order because it can simplify the
+// construction of store groups in the presence of write-after-write (WAW)
// dependences.
//
-// E.g. The RAW dependence: A[i] = a;
-// b = A[i];
-// This won't exist as it is a store-load forwarding conflict, which has
-// already been checked and forbidden in the dependence check.
+// E.g., for the WAW dependence: A[i] = a; // (1)
+// A[i] = b; // (2)
+// A[i + 1] = c; // (3)
//
-// E.g. The WAR dependence: a = A[i]; // (1)
-// A[i] = b; // (2)
-// The store group of (2) is always inserted at or below (2), and the load group
-// of (1) is always inserted at or above (1). The dependence is safe.
+// We will first create a store group with (3) and (2). (1) can't be added to
+// this group because it and (2) are dependent. However, (1) can be grouped
+// with other accesses that may precede it in program order. Note that a
+// bottom-up order does not imply that WAW dependences should not be checked.
void InterleavedAccessInfo::analyzeInterleaving(
const ValueToValueMap &Strides) {
DEBUG(dbgs() << "LV: Analyzing interleaved accesses...\n");
- // Holds all the stride accesses.
- MapVector<Instruction *, StrideDescriptor> StrideAccesses;
- collectConstStridedAccesses(StrideAccesses, Strides);
+ // Holds all accesses with a constant stride.
+ MapVector<Instruction *, StrideDescriptor> AccessStrideInfo;
+ collectConstStrideAccesses(AccessStrideInfo, Strides);
- if (StrideAccesses.empty())
+ if (AccessStrideInfo.empty())
return;
+ // Collect the dependences in the loop.
+ collectDependences();
+
// Holds all interleaved store groups temporarily.
SmallSetVector<InterleaveGroup *, 4> StoreGroups;
// Holds all interleaved load groups temporarily.
SmallSetVector<InterleaveGroup *, 4> LoadGroups;
- // Search the load-load/write-write pair B-A in bottom-up order and try to
- // insert B into the interleave group of A according to 3 rules:
- // 1. A and B have the same stride.
- // 2. A and B have the same memory object size.
- // 3. B belongs to the group according to the distance.
+ // Search in bottom-up program order for pairs of accesses (A and B) that can
+ // form interleaved load or store groups. In the algorithm below, access A
+ // precedes access B in program order. We initialize a group for B in the
+ // outer loop of the algorithm, and then in the inner loop, we attempt to
+ // insert each A into B's group if:
+ //
+ // 1. A and B have the same stride,
+ // 2. A and B have the same memory object size, and
+ // 3. A belongs in B's group according to its distance from B.
//
- // The bottom-up order can avoid breaking the Write-After-Write dependences
- // between two pointers of the same base.
- // E.g. A[i] = a; (1)
- // A[i] = b; (2)
- // A[i+1] = c (3)
- // We form the group (2)+(3) in front, so (1) has to form groups with accesses
- // above (1), which guarantees that (1) is always above (2).
- for (auto I = StrideAccesses.rbegin(), E = StrideAccesses.rend(); I != E;
- ++I) {
- Instruction *A = I->first;
- StrideDescriptor DesA = I->second;
-
- InterleaveGroup *Group = getInterleaveGroup(A);
- if (!Group) {
- DEBUG(dbgs() << "LV: Creating an interleave group with:" << *A << '\n');
- Group = createInterleaveGroup(A, DesA.Stride, DesA.Align);
+ // Special care is taken to ensure group formation will not break any
+ // dependences.
+ for (auto BI = AccessStrideInfo.rbegin(), E = AccessStrideInfo.rend();
+ BI != E; ++BI) {
+ Instruction *B = BI->first;
+ StrideDescriptor DesB = BI->second;
+
+ // Initialize a group for B if it has an allowable stride. Even if we don't
+ // create a group for B, we continue with the bottom-up algorithm to ensure
+ // we don't break any of B's dependences.
+ InterleaveGroup *Group = nullptr;
+ if (isStrided(DesB.Stride)) {
+ Group = getInterleaveGroup(B);
+ if (!Group) {
+ DEBUG(dbgs() << "LV: Creating an interleave group with:" << *B << '\n');
+ Group = createInterleaveGroup(B, DesB.Stride, DesB.Align);
+ }
+ if (B->mayWriteToMemory())
+ StoreGroups.insert(Group);
+ else
+ LoadGroups.insert(Group);
}
- if (A->mayWriteToMemory())
- StoreGroups.insert(Group);
- else
- LoadGroups.insert(Group);
+ for (auto AI = std::next(BI); AI != E; ++AI) {
+ Instruction *A = AI->first;
+ StrideDescriptor DesA = AI->second;
+
+ // Our code motion strategy implies that we can't have dependences
+ // between accesses in an interleaved group and other accesses located
+ // between the first and last member of the group. Note that this also
+ // means that a group can't have more than one member at a given offset.
+ // The accesses in a group can have dependences with other accesses, but
+ // we must ensure we don't extend the boundaries of the group such that
+ // we encompass those dependent accesses.
+ //
+ // For example, assume we have the sequence of accesses shown below in a
+ // stride-2 loop:
+ //
+ // (1, 2) is a group | A[i] = a; // (1)
+ // | A[i-1] = b; // (2) |
+ // A[i-3] = c; // (3)
+ // A[i] = d; // (4) | (2, 4) is not a group
+ //
+ // Because accesses (2) and (3) are dependent, we can group (2) with (1)
+ // but not with (4). If we did, the dependent access (3) would be within
+ // the boundaries of the (2, 4) group.
+ if (!canReorderMemAccessesForInterleavedGroups(&*AI, &*BI)) {
+
+ // If a dependence exists and A is already in a group, we know that A
+ // must be a store since A precedes B and WAR dependences are allowed.
+ // Thus, A would be sunk below B. We release A's group to prevent this
+ // illegal code motion. A will then be free to form another group with
+ // instructions that precede it.
+ if (isInterleaved(A)) {
+ InterleaveGroup *StoreGroup = getInterleaveGroup(A);
+ StoreGroups.remove(StoreGroup);
+ releaseGroup(StoreGroup);
+ }
- for (auto II = std::next(I); II != E; ++II) {
- Instruction *B = II->first;
- StrideDescriptor DesB = II->second;
+ // If a dependence exists and A is not already in a group (or it was
+ // and we just released it), B might be hoisted above A (if B is a
+ // load) or another store might be sunk below A (if B is a store). In
+ // either case, we can't add additional instructions to B's group. B
+ // will only form a group with instructions that it precedes.
+ break;
+ }
- // Ignore if B is already in a group or B is a different memory operation.
- if (isInterleaved(B) || A->mayReadFromMemory() != B->mayReadFromMemory())
+ // At this point, we've checked for illegal code motion. If either A or B
+ // isn't strided, there's nothing left to do.
+ if (!isStrided(DesA.Stride) || !isStrided(DesB.Stride))
continue;
- // Check the rule 1 and 2.
- if (DesB.Stride != DesA.Stride || DesB.Size != DesA.Size)
+ // Ignore A if it's already in a group or isn't the same kind of memory
+ // operation as B.
+ if (isInterleaved(A) || A->mayReadFromMemory() != B->mayReadFromMemory())
continue;
- // Calculate the distance and prepare for the rule 3.
- const SCEVConstant *DistToA = dyn_cast<SCEVConstant>(
- PSE.getSE()->getMinusSCEV(DesB.Scev, DesA.Scev));
- if (!DistToA)
+ // Check rules 1 and 2. Ignore A if its stride or size is different from
+ // that of B.
+ if (DesA.Stride != DesB.Stride || DesA.Size != DesB.Size)
continue;
- int DistanceToA = DistToA->getAPInt().getSExtValue();
+ // Calculate the distance from A to B.
+ const SCEVConstant *DistToB = dyn_cast<SCEVConstant>(
+ PSE.getSE()->getMinusSCEV(DesA.Scev, DesB.Scev));
+ if (!DistToB)
+ continue;
+ int64_t DistanceToB = DistToB->getAPInt().getSExtValue();
+
+ // Check rule 3. Ignore A if its distance to B is not a multiple of the
+ // size.
+ if (DistanceToB % static_cast<int64_t>(DesB.Size))
+ continue;
- // Skip if the distance is not multiple of size as they are not in the
- // same group.
- if (DistanceToA % static_cast<int>(DesA.Size))
+ // Ignore A if either A or B is in a predicated block. Although we
+ // currently prevent group formation for predicated accesses, we may be
+ // able to relax this limitation in the future once we handle more
+ // complicated blocks.
+ if (isPredicated(A->getParent()) || isPredicated(B->getParent()))
continue;
- // The index of B is the index of A plus the related index to A.
- int IndexB =
- Group->getIndex(A) + DistanceToA / static_cast<int>(DesA.Size);
+ // The index of A is the index of B plus A's distance to B in multiples
+ // of the size.
+ int IndexA =
+ Group->getIndex(B) + DistanceToB / static_cast<int64_t>(DesB.Size);
- // Try to insert B into the group.
- if (Group->insertMember(B, IndexB, DesB.Align)) {
- DEBUG(dbgs() << "LV: Inserted:" << *B << '\n'
- << " into the interleave group with" << *A << '\n');
- InterleaveGroupMap[B] = Group;
+ // Try to insert A into B's group.
+ if (Group->insertMember(A, IndexA, DesA.Align)) {
+ DEBUG(dbgs() << "LV: Inserted:" << *A << '\n'
+ << " into the interleave group with" << *B << '\n');
+ InterleaveGroupMap[A] = Group;
// Set the first load in program order as the insert position.
- if (B->mayReadFromMemory())
- Group->setInsertPos(B);
+ if (A->mayReadFromMemory())
+ Group->setInsertPos(A);
}
- } // Iteration on instruction B
- } // Iteration on instruction A
+ } // Iteration over A accesses.
+ } // Iteration over B accesses.
// Remove interleaved store groups with gaps.
for (InterleaveGroup *Group : StoreGroups)
if (Group->getNumMembers() != Group->getFactor())
releaseGroup(Group);
- // Remove interleaved load groups that don't have the first and last member.
- // This guarantees that we won't do speculative out of bounds loads.
+ // If there is a non-reversed interleaved load group with gaps, we will need
+ // to execute at least one scalar epilogue iteration. This will ensure that
+ // we don't speculatively access memory out-of-bounds. Note that we only need
+ // to look for a member at index factor - 1, since every group must have a
+ // member at index zero.
for (InterleaveGroup *Group : LoadGroups)
- if (!Group->getMember(0) || !Group->getMember(Group->getFactor() - 1))
- releaseGroup(Group);
+ if (!Group->getMember(Group->getFactor() - 1)) {
+ if (Group->isReverse()) {
+ releaseGroup(Group);
+ } else {
+ DEBUG(dbgs() << "LV: Interleaved group requires epilogue iteration.\n");
+ RequiresScalarEpilogue = true;
+ }
+ }
}
LoopVectorizationCostModel::VectorizationFactor
LoopVectorizationCostModel::selectVectorizationFactor(bool OptForSize) {
// Width 1 means no vectorize
- VectorizationFactor Factor = { 1U, 0U };
+ VectorizationFactor Factor = {1U, 0U};
if (OptForSize && Legal->getRuntimePointerChecking()->Need) {
- emitAnalysis(VectorizationReport() <<
- "runtime pointer checks needed. Enable vectorization of this "
- "loop with '#pragma clang loop vectorize(enable)' when "
- "compiling with -Os/-Oz");
- DEBUG(dbgs() <<
- "LV: Aborting. Runtime ptr check is required with -Os/-Oz.\n");
+ emitAnalysis(
+ VectorizationReport()
+ << "runtime pointer checks needed. Enable vectorization of this "
+ "loop with '#pragma clang loop vectorize(enable)' when "
+ "compiling with -Os/-Oz");
+ DEBUG(dbgs()
+ << "LV: Aborting. Runtime ptr check is required with -Os/-Oz.\n");
return Factor;
}
if (!EnableCondStoresVectorization && Legal->getNumPredStores()) {
- emitAnalysis(VectorizationReport() <<
- "store that is conditionally executed prevents vectorization");
+ emitAnalysis(
+ VectorizationReport()
+ << "store that is conditionally executed prevents vectorization");
DEBUG(dbgs() << "LV: No vectorization. There are conditional stores.\n");
return Factor;
}
// Find the trip count.
- unsigned TC = SE->getSmallConstantTripCount(TheLoop);
+ unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop);
DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n');
MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI);
@@ -4755,16 +5201,25 @@ LoopVectorizationCostModel::selectVectorizationFactor(bool OptForSize) {
std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes();
unsigned WidestRegister = TTI.getRegisterBitWidth(true);
unsigned MaxSafeDepDist = -1U;
+
+ // Get the maximum safe dependence distance in bits computed by LAA. If the
+ // loop contains any interleaved accesses, we divide the dependence distance
+ // by the maximum interleave factor of all interleaved groups. Note that
+ // although the division ensures correctness, this is a fairly conservative
+ // computation because the maximum distance computed by LAA may not involve
+ // any of the interleaved accesses.
if (Legal->getMaxSafeDepDistBytes() != -1U)
- MaxSafeDepDist = Legal->getMaxSafeDepDistBytes() * 8;
- WidestRegister = ((WidestRegister < MaxSafeDepDist) ?
- WidestRegister : MaxSafeDepDist);
+ MaxSafeDepDist =
+ Legal->getMaxSafeDepDistBytes() * 8 / Legal->getMaxInterleaveFactor();
+
+ WidestRegister =
+ ((WidestRegister < MaxSafeDepDist) ? WidestRegister : MaxSafeDepDist);
unsigned MaxVectorSize = WidestRegister / WidestType;
DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType << " / "
<< WidestType << " bits.\n");
- DEBUG(dbgs() << "LV: The Widest register is: "
- << WidestRegister << " bits.\n");
+ DEBUG(dbgs() << "LV: The Widest register is: " << WidestRegister
+ << " bits.\n");
if (MaxVectorSize == 0) {
DEBUG(dbgs() << "LV: The target has no vector registers.\n");
@@ -4772,7 +5227,7 @@ LoopVectorizationCostModel::selectVectorizationFactor(bool OptForSize) {
}
assert(MaxVectorSize <= 64 && "Did not expect to pack so many elements"
- " into one vector!");
+ " into one vector!");
unsigned VF = MaxVectorSize;
if (MaximizeBandwidth && !OptForSize) {
@@ -4800,9 +5255,9 @@ LoopVectorizationCostModel::selectVectorizationFactor(bool OptForSize) {
if (OptForSize) {
// If we are unable to calculate the trip count then don't try to vectorize.
if (TC < 2) {
- emitAnalysis
- (VectorizationReport() <<
- "unable to calculate the loop count due to complex control flow");
+ emitAnalysis(
+ VectorizationReport()
+ << "unable to calculate the loop count due to complex control flow");
DEBUG(dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n");
return Factor;
}
@@ -4815,11 +5270,11 @@ LoopVectorizationCostModel::selectVectorizationFactor(bool OptForSize) {
else {
// If the trip count that we found modulo the vectorization factor is not
// zero then we require a tail.
- emitAnalysis(VectorizationReport() <<
- "cannot optimize for size and vectorize at the "
- "same time. Enable vectorization of this loop "
- "with '#pragma clang loop vectorize(enable)' "
- "when compiling with -Os/-Oz");
+ emitAnalysis(VectorizationReport()
+ << "cannot optimize for size and vectorize at the "
+ "same time. Enable vectorization of this loop "
+ "with '#pragma clang loop vectorize(enable)' "
+ "when compiling with -Os/-Oz");
DEBUG(dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n");
return Factor;
}
@@ -4834,7 +5289,7 @@ LoopVectorizationCostModel::selectVectorizationFactor(bool OptForSize) {
return Factor;
}
- float Cost = expectedCost(1);
+ float Cost = expectedCost(1).first;
#ifndef NDEBUG
const float ScalarCost = Cost;
#endif /* NDEBUG */
@@ -4845,16 +5300,23 @@ LoopVectorizationCostModel::selectVectorizationFactor(bool OptForSize) {
// Ignore scalar width, because the user explicitly wants vectorization.
if (ForceVectorization && VF > 1) {
Width = 2;
- Cost = expectedCost(Width) / (float)Width;
+ Cost = expectedCost(Width).first / (float)Width;
}
- for (unsigned i=2; i <= VF; i*=2) {
+ for (unsigned i = 2; i <= VF; i *= 2) {
// Notice that the vector loop needs to be executed less times, so
// we need to divide the cost of the vector loops by the width of
// the vector elements.
- float VectorCost = expectedCost(i) / (float)i;
- DEBUG(dbgs() << "LV: Vector loop of width " << i << " costs: " <<
- (int)VectorCost << ".\n");
+ VectorizationCostTy C = expectedCost(i);
+ float VectorCost = C.first / (float)i;
+ DEBUG(dbgs() << "LV: Vector loop of width " << i
+ << " costs: " << (int)VectorCost << ".\n");
+ if (!C.second && !ForceVectorization) {
+ DEBUG(
+ dbgs() << "LV: Not considering vector loop of width " << i
+ << " because it will not generate any vector instructions.\n");
+ continue;
+ }
if (VectorCost < Cost) {
Cost = VectorCost;
Width = i;
@@ -4864,7 +5326,7 @@ LoopVectorizationCostModel::selectVectorizationFactor(bool OptForSize) {
DEBUG(if (ForceVectorization && Width > 1 && Cost >= ScalarCost) dbgs()
<< "LV: Vectorization seems to be not beneficial, "
<< "but was forced by a user.\n");
- DEBUG(dbgs() << "LV: Selecting VF: "<< Width << ".\n");
+ DEBUG(dbgs() << "LV: Selecting VF: " << Width << ".\n");
Factor.Width = Width;
Factor.Cost = Width * Cost;
return Factor;
@@ -4877,25 +5339,22 @@ LoopVectorizationCostModel::getSmallestAndWidestTypes() {
const DataLayout &DL = TheFunction->getParent()->getDataLayout();
// For each block.
- for (Loop::block_iterator bb = TheLoop->block_begin(),
- be = TheLoop->block_end(); bb != be; ++bb) {
- BasicBlock *BB = *bb;
-
+ for (BasicBlock *BB : TheLoop->blocks()) {
// For each instruction in the loop.
- for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) {
- Type *T = it->getType();
+ for (Instruction &I : *BB) {
+ Type *T = I.getType();
// Skip ignored values.
- if (ValuesToIgnore.count(&*it))
+ if (ValuesToIgnore.count(&I))
continue;
// Only examine Loads, Stores and PHINodes.
- if (!isa<LoadInst>(it) && !isa<StoreInst>(it) && !isa<PHINode>(it))
+ if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I))
continue;
// Examine PHI nodes that are reduction variables. Update the type to
// account for the recurrence type.
- if (PHINode *PN = dyn_cast<PHINode>(it)) {
+ if (auto *PN = dyn_cast<PHINode>(&I)) {
if (!Legal->isReductionVariable(PN))
continue;
RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[PN];
@@ -4903,13 +5362,13 @@ LoopVectorizationCostModel::getSmallestAndWidestTypes() {
}
// Examine the stored values.
- if (StoreInst *ST = dyn_cast<StoreInst>(it))
+ if (auto *ST = dyn_cast<StoreInst>(&I))
T = ST->getValueOperand()->getType();
// Ignore loaded pointer types and stored pointer types that are not
// consecutive. However, we do want to take consecutive stores/loads of
// pointer vectors into account.
- if (T->isPointerTy() && !isConsecutiveLoadOrStore(&*it))
+ if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I))
continue;
MinWidth = std::min(MinWidth,
@@ -4949,13 +5408,13 @@ unsigned LoopVectorizationCostModel::selectInterleaveCount(bool OptForSize,
return 1;
// Do not interleave loops with a relatively small trip count.
- unsigned TC = SE->getSmallConstantTripCount(TheLoop);
+ unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop);
if (TC > 1 && TC < TinyTripCountInterleaveThreshold)
return 1;
unsigned TargetNumRegisters = TTI.getNumberOfRegisters(VF > 1);
- DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters <<
- " registers\n");
+ DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters
+ << " registers\n");
if (VF == 1) {
if (ForceTargetNumScalarRegs.getNumOccurrences() > 0)
@@ -5002,7 +5461,7 @@ unsigned LoopVectorizationCostModel::selectInterleaveCount(bool OptForSize,
// If we did not calculate the cost for VF (because the user selected the VF)
// then we calculate the cost of VF here.
if (LoopCost == 0)
- LoopCost = expectedCost(VF);
+ LoopCost = expectedCost(VF).first;
// Clamp the calculated IC to be between the 1 and the max interleave count
// that the target allows.
@@ -5044,8 +5503,7 @@ unsigned LoopVectorizationCostModel::selectInterleaveCount(bool OptForSize,
// by this point), we can increase the critical path length if the loop
// we're interleaving is inside another loop. Limit, by default to 2, so the
// critical path only gets increased by one reduction operation.
- if (Legal->getReductionVars()->size() &&
- TheLoop->getLoopDepth() > 1) {
+ if (Legal->getReductionVars()->size() && TheLoop->getLoopDepth() > 1) {
unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC);
SmallIC = std::min(SmallIC, F);
StoresIC = std::min(StoresIC, F);
@@ -5075,8 +5533,7 @@ unsigned LoopVectorizationCostModel::selectInterleaveCount(bool OptForSize,
}
SmallVector<LoopVectorizationCostModel::RegisterUsage, 8>
-LoopVectorizationCostModel::calculateRegisterUsage(
- const SmallVector<unsigned, 8> &VFs) {
+LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<unsigned> VFs) {
// This function calculates the register usage by measuring the highest number
// of values that are alive at a single location. Obviously, this is a very
// rough estimation. We scan the loop in a topological order in order and
@@ -5103,31 +5560,30 @@ LoopVectorizationCostModel::calculateRegisterUsage(
// Each 'key' in the map opens a new interval. The values
// of the map are the index of the 'last seen' usage of the
// instruction that is the key.
- typedef DenseMap<Instruction*, unsigned> IntervalMap;
+ typedef DenseMap<Instruction *, unsigned> IntervalMap;
// Maps instruction to its index.
- DenseMap<unsigned, Instruction*> IdxToInstr;
+ DenseMap<unsigned, Instruction *> IdxToInstr;
// Marks the end of each interval.
IntervalMap EndPoint;
// Saves the list of instruction indices that are used in the loop.
- SmallSet<Instruction*, 8> Ends;
+ SmallSet<Instruction *, 8> Ends;
// Saves the list of values that are used in the loop but are
// defined outside the loop, such as arguments and constants.
- SmallPtrSet<Value*, 8> LoopInvariants;
+ SmallPtrSet<Value *, 8> LoopInvariants;
unsigned Index = 0;
- for (LoopBlocksDFS::RPOIterator bb = DFS.beginRPO(),
- be = DFS.endRPO(); bb != be; ++bb) {
- RU.NumInstructions += (*bb)->size();
- for (Instruction &I : **bb) {
+ for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
+ RU.NumInstructions += BB->size();
+ for (Instruction &I : *BB) {
IdxToInstr[Index++] = &I;
// Save the end location of each USE.
- for (unsigned i = 0; i < I.getNumOperands(); ++i) {
- Value *U = I.getOperand(i);
- Instruction *Instr = dyn_cast<Instruction>(U);
+ for (Value *U : I.operands()) {
+ auto *Instr = dyn_cast<Instruction>(U);
// Ignore non-instruction values such as arguments, constants, etc.
- if (!Instr) continue;
+ if (!Instr)
+ continue;
// If this instruction is outside the loop then record it and continue.
if (!TheLoop->contains(Instr)) {
@@ -5143,15 +5599,14 @@ LoopVectorizationCostModel::calculateRegisterUsage(
}
// Saves the list of intervals that end with the index in 'key'.
- typedef SmallVector<Instruction*, 2> InstrList;
+ typedef SmallVector<Instruction *, 2> InstrList;
DenseMap<unsigned, InstrList> TransposeEnds;
// Transpose the EndPoints to a list of values that end at each index.
- for (IntervalMap::iterator it = EndPoint.begin(), e = EndPoint.end();
- it != e; ++it)
- TransposeEnds[it->second].push_back(it->first);
+ for (auto &Interval : EndPoint)
+ TransposeEnds[Interval.second].push_back(Interval.first);
- SmallSet<Instruction*, 8> OpenIntervals;
+ SmallSet<Instruction *, 8> OpenIntervals;
// Get the size of the widest register.
unsigned MaxSafeDepDist = -1U;
@@ -5168,6 +5623,8 @@ LoopVectorizationCostModel::calculateRegisterUsage(
// A lambda that gets the register usage for the given type and VF.
auto GetRegUsage = [&DL, WidestRegister](Type *Ty, unsigned VF) {
+ if (Ty->isTokenTy())
+ return 0U;
unsigned TypeSize = DL.getTypeSizeInBits(Ty->getScalarType());
return std::max<unsigned>(1, VF * TypeSize / WidestRegister);
};
@@ -5175,16 +5632,17 @@ LoopVectorizationCostModel::calculateRegisterUsage(
for (unsigned int i = 0; i < Index; ++i) {
Instruction *I = IdxToInstr[i];
// Ignore instructions that are never used within the loop.
- if (!Ends.count(I)) continue;
-
- // Skip ignored values.
- if (ValuesToIgnore.count(I))
+ if (!Ends.count(I))
continue;
// Remove all of the instructions that end at this location.
InstrList &List = TransposeEnds[i];
- for (unsigned int j = 0, e = List.size(); j < e; ++j)
- OpenIntervals.erase(List[j]);
+ for (Instruction *ToRemove : List)
+ OpenIntervals.erase(ToRemove);
+
+ // Skip ignored values.
+ if (ValuesToIgnore.count(I))
+ continue;
// For each VF find the maximum usage of registers.
for (unsigned j = 0, e = VFs.size(); j < e; ++j) {
@@ -5195,8 +5653,12 @@ LoopVectorizationCostModel::calculateRegisterUsage(
// Count the number of live intervals.
unsigned RegUsage = 0;
- for (auto Inst : OpenIntervals)
+ for (auto Inst : OpenIntervals) {
+ // Skip ignored values for VF > 1.
+ if (VecValuesToIgnore.count(Inst))
+ continue;
RegUsage += GetRegUsage(Inst->getType(), VFs[j]);
+ }
MaxUsages[j] = std::max(MaxUsages[j], RegUsage);
}
@@ -5216,7 +5678,7 @@ LoopVectorizationCostModel::calculateRegisterUsage(
Invariant += GetRegUsage(Inst->getType(), VFs[i]);
}
- DEBUG(dbgs() << "LV(REG): VF = " << VFs[i] << '\n');
+ DEBUG(dbgs() << "LV(REG): VF = " << VFs[i] << '\n');
DEBUG(dbgs() << "LV(REG): Found max usage: " << MaxUsages[i] << '\n');
DEBUG(dbgs() << "LV(REG): Found invariant usage: " << Invariant << '\n');
DEBUG(dbgs() << "LV(REG): LoopSize: " << RU.NumInstructions << '\n');
@@ -5229,48 +5691,62 @@ LoopVectorizationCostModel::calculateRegisterUsage(
return RUs;
}
-unsigned LoopVectorizationCostModel::expectedCost(unsigned VF) {
- unsigned Cost = 0;
+LoopVectorizationCostModel::VectorizationCostTy
+LoopVectorizationCostModel::expectedCost(unsigned VF) {
+ VectorizationCostTy Cost;
// For each block.
- for (Loop::block_iterator bb = TheLoop->block_begin(),
- be = TheLoop->block_end(); bb != be; ++bb) {
- unsigned BlockCost = 0;
- BasicBlock *BB = *bb;
+ for (BasicBlock *BB : TheLoop->blocks()) {
+ VectorizationCostTy BlockCost;
// For each instruction in the old loop.
- for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) {
+ for (Instruction &I : *BB) {
// Skip dbg intrinsics.
- if (isa<DbgInfoIntrinsic>(it))
+ if (isa<DbgInfoIntrinsic>(I))
continue;
// Skip ignored values.
- if (ValuesToIgnore.count(&*it))
+ if (ValuesToIgnore.count(&I))
continue;
- unsigned C = getInstructionCost(&*it, VF);
+ VectorizationCostTy C = getInstructionCost(&I, VF);
// Check if we should override the cost.
if (ForceTargetInstructionCost.getNumOccurrences() > 0)
- C = ForceTargetInstructionCost;
+ C.first = ForceTargetInstructionCost;
- BlockCost += C;
- DEBUG(dbgs() << "LV: Found an estimated cost of " << C << " for VF " <<
- VF << " For instruction: " << *it << '\n');
+ BlockCost.first += C.first;
+ BlockCost.second |= C.second;
+ DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first << " for VF "
+ << VF << " For instruction: " << I << '\n');
}
// We assume that if-converted blocks have a 50% chance of being executed.
// When the code is scalar then some of the blocks are avoided due to CF.
// When the code is vectorized we execute all code paths.
- if (VF == 1 && Legal->blockNeedsPredication(*bb))
- BlockCost /= 2;
+ if (VF == 1 && Legal->blockNeedsPredication(BB))
+ BlockCost.first /= 2;
- Cost += BlockCost;
+ Cost.first += BlockCost.first;
+ Cost.second |= BlockCost.second;
}
return Cost;
}
+/// \brief Check if the load/store instruction \p I may be translated into
+/// gather/scatter during vectorization.
+///
+/// Pointer \p Ptr specifies address in memory for the given scalar memory
+/// instruction. We need it to retrieve data type.
+/// Using gather/scatter is possible when it is supported by target.
+static bool isGatherOrScatterLegal(Instruction *I, Value *Ptr,
+ LoopVectorizationLegality *Legal) {
+ auto *DataTy = cast<PointerType>(Ptr->getType())->getElementType();
+ return (isa<LoadInst>(I) && Legal->isLegalMaskedGather(DataTy)) ||
+ (isa<StoreInst>(I) && Legal->isLegalMaskedScatter(DataTy));
+}
+
/// \brief Check whether the address computation for a non-consecutive memory
/// access looks like an unlikely candidate for being merged into the indexing
/// mode.
@@ -5284,7 +5760,7 @@ static bool isLikelyComplexAddressComputation(Value *Ptr,
LoopVectorizationLegality *Legal,
ScalarEvolution *SE,
const Loop *TheLoop) {
- GetElementPtrInst *Gep = dyn_cast<GetElementPtrInst>(Ptr);
+ auto *Gep = dyn_cast<GetElementPtrInst>(Ptr);
if (!Gep)
return true;
@@ -5309,7 +5785,7 @@ static bool isLikelyComplexAddressComputation(Value *Ptr,
// Check the step is constant.
const SCEV *Step = AddRec->getStepRecurrence(*SE);
// Calculate the pointer stride and check if it is consecutive.
- const SCEVConstant *C = dyn_cast<SCEVConstant>(Step);
+ const auto *C = dyn_cast<SCEVConstant>(Step);
if (!C)
return true;
@@ -5329,17 +5805,29 @@ static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) {
Legal->hasStride(I->getOperand(1));
}
-unsigned
+LoopVectorizationCostModel::VectorizationCostTy
LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) {
// If we know that this instruction will remain uniform, check the cost of
// the scalar version.
if (Legal->isUniformAfterVectorization(I))
VF = 1;
+ Type *VectorTy;
+ unsigned C = getInstructionCost(I, VF, VectorTy);
+
+ bool TypeNotScalarized =
+ VF > 1 && !VectorTy->isVoidTy() && TTI.getNumberOfParts(VectorTy) < VF;
+ return VectorizationCostTy(C, TypeNotScalarized);
+}
+
+unsigned LoopVectorizationCostModel::getInstructionCost(Instruction *I,
+ unsigned VF,
+ Type *&VectorTy) {
Type *RetTy = I->getType();
if (VF > 1 && MinBWs.count(I))
RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]);
- Type *VectorTy = ToVectorTy(RetTy, VF);
+ VectorTy = ToVectorTy(RetTy, VF);
+ auto SE = PSE.getSE();
// TODO: We need to estimate the cost of intrinsic calls.
switch (I->getOpcode()) {
@@ -5352,9 +5840,17 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) {
case Instruction::Br: {
return TTI.getCFInstrCost(I->getOpcode());
}
- case Instruction::PHI:
- //TODO: IF-converted IFs become selects.
+ case Instruction::PHI: {
+ auto *Phi = cast<PHINode>(I);
+
+ // First-order recurrences are replaced by vector shuffles inside the loop.
+ if (VF > 1 && Legal->isFirstOrderRecurrence(Phi))
+ return TTI.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector,
+ VectorTy, VF - 1, VectorTy);
+
+ // TODO: IF-converted IFs become selects.
return 0;
+ }
case Instruction::Add:
case Instruction::FAdd:
case Instruction::Sub:
@@ -5379,9 +5875,9 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) {
// Certain instructions can be cheaper to vectorize if they have a constant
// second vector operand. One example of this are shifts on x86.
TargetTransformInfo::OperandValueKind Op1VK =
- TargetTransformInfo::OK_AnyValue;
+ TargetTransformInfo::OK_AnyValue;
TargetTransformInfo::OperandValueKind Op2VK =
- TargetTransformInfo::OK_AnyValue;
+ TargetTransformInfo::OK_AnyValue;
TargetTransformInfo::OperandValueProperties Op1VP =
TargetTransformInfo::OP_None;
TargetTransformInfo::OperandValueProperties Op2VP =
@@ -5432,20 +5928,28 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) {
case Instruction::Load: {
StoreInst *SI = dyn_cast<StoreInst>(I);
LoadInst *LI = dyn_cast<LoadInst>(I);
- Type *ValTy = (SI ? SI->getValueOperand()->getType() :
- LI->getType());
+ Type *ValTy = (SI ? SI->getValueOperand()->getType() : LI->getType());
VectorTy = ToVectorTy(ValTy, VF);
unsigned Alignment = SI ? SI->getAlignment() : LI->getAlignment();
- unsigned AS = SI ? SI->getPointerAddressSpace() :
- LI->getPointerAddressSpace();
+ unsigned AS =
+ SI ? SI->getPointerAddressSpace() : LI->getPointerAddressSpace();
Value *Ptr = SI ? SI->getPointerOperand() : LI->getPointerOperand();
// We add the cost of address computation here instead of with the gep
// instruction because only here we know whether the operation is
// scalarized.
if (VF == 1)
return TTI.getAddressComputationCost(VectorTy) +
- TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS);
+ TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS);
+
+ if (LI && Legal->isUniform(Ptr)) {
+ // Scalar load + broadcast
+ unsigned Cost = TTI.getAddressComputationCost(ValTy->getScalarType());
+ Cost += TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(),
+ Alignment, AS);
+ return Cost +
+ TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, ValTy);
+ }
// For an interleaved access, calculate the total cost of the whole
// interleave group.
@@ -5463,7 +5967,7 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) {
VectorTy->getVectorNumElements() * InterleaveFactor);
// Holds the indices of existing members in an interleaved load group.
- // An interleaved store group doesn't need this as it dones't allow gaps.
+ // An interleaved store group doesn't need this as it doesn't allow gaps.
SmallVector<unsigned, 4> Indices;
if (LI) {
for (unsigned i = 0; i < InterleaveFactor; i++)
@@ -5489,13 +5993,17 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) {
// Scalarized loads/stores.
int ConsecutiveStride = Legal->isConsecutivePtr(Ptr);
+ bool UseGatherOrScatter =
+ (ConsecutiveStride == 0) && isGatherOrScatterLegal(I, Ptr, Legal);
+
bool Reverse = ConsecutiveStride < 0;
const DataLayout &DL = I->getModule()->getDataLayout();
- unsigned ScalarAllocatedSize = DL.getTypeAllocSize(ValTy);
- unsigned VectorElementSize = DL.getTypeStoreSize(VectorTy) / VF;
- if (!ConsecutiveStride || ScalarAllocatedSize != VectorElementSize) {
+ uint64_t ScalarAllocatedSize = DL.getTypeAllocSize(ValTy);
+ uint64_t VectorElementSize = DL.getTypeStoreSize(VectorTy) / VF;
+ if ((!ConsecutiveStride && !UseGatherOrScatter) ||
+ ScalarAllocatedSize != VectorElementSize) {
bool IsComplexComputation =
- isLikelyComplexAddressComputation(Ptr, Legal, SE, TheLoop);
+ isLikelyComplexAddressComputation(Ptr, Legal, SE, TheLoop);
unsigned Cost = 0;
// The cost of extracting from the value vector and pointer vector.
Type *PtrTy = ToVectorTy(Ptr->getType(), VF);
@@ -5505,29 +6013,36 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) {
// In case of STORE, the cost of ExtractElement from the vector.
// In case of LOAD, the cost of InsertElement into the returned
// vector.
- Cost += TTI.getVectorInstrCost(SI ? Instruction::ExtractElement :
- Instruction::InsertElement,
- VectorTy, i);
+ Cost += TTI.getVectorInstrCost(SI ? Instruction::ExtractElement
+ : Instruction::InsertElement,
+ VectorTy, i);
}
// The cost of the scalar loads/stores.
Cost += VF * TTI.getAddressComputationCost(PtrTy, IsComplexComputation);
- Cost += VF * TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(),
- Alignment, AS);
+ Cost += VF *
+ TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(),
+ Alignment, AS);
return Cost;
}
- // Wide load/stores.
unsigned Cost = TTI.getAddressComputationCost(VectorTy);
+ if (UseGatherOrScatter) {
+ assert(ConsecutiveStride == 0 &&
+ "Gather/Scatter are not used for consecutive stride");
+ return Cost +
+ TTI.getGatherScatterOpCost(I->getOpcode(), VectorTy, Ptr,
+ Legal->isMaskRequired(I), Alignment);
+ }
+ // Wide load/stores.
if (Legal->isMaskRequired(I))
- Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment,
- AS);
+ Cost +=
+ TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS);
else
Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS);
if (Reverse)
- Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse,
- VectorTy, 0);
+ Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0);
return Cost;
}
case Instruction::ZExt:
@@ -5548,7 +6063,7 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) {
Legal->isInductionVariable(I->getOperand(0)))
return TTI.getCastInstrCost(I->getOpcode(), I->getType(),
I->getOperand(0)->getType());
-
+
Type *SrcScalarTy = I->getOperand(0)->getType();
Type *SrcVecTy = ToVectorTy(SrcScalarTy, VF);
if (VF > 1 && MinBWs.count(I)) {
@@ -5560,23 +6075,23 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) {
Type *MinVecTy = VectorTy;
if (I->getOpcode() == Instruction::Trunc) {
SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy);
- VectorTy = largestIntegerVectorType(ToVectorTy(I->getType(), VF),
- MinVecTy);
+ VectorTy =
+ largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
} else if (I->getOpcode() == Instruction::ZExt ||
I->getOpcode() == Instruction::SExt) {
SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy);
- VectorTy = smallestIntegerVectorType(ToVectorTy(I->getType(), VF),
- MinVecTy);
+ VectorTy =
+ smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
}
}
-
+
return TTI.getCastInstrCost(I->getOpcode(), VectorTy, SrcVecTy);
}
case Instruction::Call: {
bool NeedToScalarize;
CallInst *CI = cast<CallInst>(I);
unsigned CallCost = getVectorCallCost(CI, VF, TTI, TLI, NeedToScalarize);
- if (getIntrinsicIDForCall(CI, TLI))
+ if (getVectorIntrinsicIDForCall(CI, TLI))
return std::min(CallCost, getVectorIntrinsicCost(CI, VF, TTI, TLI));
return CallCost;
}
@@ -5587,10 +6102,10 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) {
unsigned Cost = 0;
if (!RetTy->isVoidTy() && VF != 1) {
- unsigned InsCost = TTI.getVectorInstrCost(Instruction::InsertElement,
- VectorTy);
- unsigned ExtCost = TTI.getVectorInstrCost(Instruction::ExtractElement,
- VectorTy);
+ unsigned InsCost =
+ TTI.getVectorInstrCost(Instruction::InsertElement, VectorTy);
+ unsigned ExtCost =
+ TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy);
// The cost of inserting the results plus extracting each one of the
// operands.
@@ -5602,7 +6117,7 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) {
Cost += VF * TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy);
return Cost;
}
- }// end of switch.
+ } // end of switch.
}
char LoopVectorize::ID = 0;
@@ -5616,31 +6131,101 @@ INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
-INITIALIZE_PASS_DEPENDENCY(LCSSA)
+INITIALIZE_PASS_DEPENDENCY(LCSSAWrapperPass)
INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
-INITIALIZE_PASS_DEPENDENCY(LoopAccessAnalysis)
-INITIALIZE_PASS_DEPENDENCY(DemandedBits)
+INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis)
+INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass)
INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false)
namespace llvm {
- Pass *createLoopVectorizePass(bool NoUnrolling, bool AlwaysVectorize) {
- return new LoopVectorize(NoUnrolling, AlwaysVectorize);
- }
+Pass *createLoopVectorizePass(bool NoUnrolling, bool AlwaysVectorize) {
+ return new LoopVectorize(NoUnrolling, AlwaysVectorize);
+}
}
bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) {
// Check for a store.
- if (StoreInst *ST = dyn_cast<StoreInst>(Inst))
+ if (auto *ST = dyn_cast<StoreInst>(Inst))
return Legal->isConsecutivePtr(ST->getPointerOperand()) != 0;
// Check for a load.
- if (LoadInst *LI = dyn_cast<LoadInst>(Inst))
+ if (auto *LI = dyn_cast<LoadInst>(Inst))
return Legal->isConsecutivePtr(LI->getPointerOperand()) != 0;
return false;
}
+void LoopVectorizationCostModel::collectValuesToIgnore() {
+ // Ignore ephemeral values.
+ CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore);
+
+ // Ignore type-promoting instructions we identified during reduction
+ // detection.
+ for (auto &Reduction : *Legal->getReductionVars()) {
+ RecurrenceDescriptor &RedDes = Reduction.second;
+ SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts();
+ VecValuesToIgnore.insert(Casts.begin(), Casts.end());
+ }
+
+ // Ignore induction phis that are only used in either GetElementPtr or ICmp
+ // instruction to exit loop. Induction variables usually have large types and
+ // can have big impact when estimating register usage.
+ // This is for when VF > 1.
+ for (auto &Induction : *Legal->getInductionVars()) {
+ auto *PN = Induction.first;
+ auto *UpdateV = PN->getIncomingValueForBlock(TheLoop->getLoopLatch());
+
+ // Check that the PHI is only used by the induction increment (UpdateV) or
+ // by GEPs. Then check that UpdateV is only used by a compare instruction,
+ // the loop header PHI, or by GEPs.
+ // FIXME: Need precise def-use analysis to determine if this instruction
+ // variable will be vectorized.
+ if (all_of(PN->users(),
+ [&](const User *U) -> bool {
+ return U == UpdateV || isa<GetElementPtrInst>(U);
+ }) &&
+ all_of(UpdateV->users(), [&](const User *U) -> bool {
+ return U == PN || isa<ICmpInst>(U) || isa<GetElementPtrInst>(U);
+ })) {
+ VecValuesToIgnore.insert(PN);
+ VecValuesToIgnore.insert(UpdateV);
+ }
+ }
+
+ // Ignore instructions that will not be vectorized.
+ // This is for when VF > 1.
+ for (BasicBlock *BB : TheLoop->blocks()) {
+ for (auto &Inst : *BB) {
+ switch (Inst.getOpcode())
+ case Instruction::GetElementPtr: {
+ // Ignore GEP if its last operand is an induction variable so that it is
+ // a consecutive load/store and won't be vectorized as scatter/gather
+ // pattern.
+
+ GetElementPtrInst *Gep = cast<GetElementPtrInst>(&Inst);
+ unsigned NumOperands = Gep->getNumOperands();
+ unsigned InductionOperand = getGEPInductionOperand(Gep);
+ bool GepToIgnore = true;
+
+ // Check that all of the gep indices are uniform except for the
+ // induction operand.
+ for (unsigned i = 0; i != NumOperands; ++i) {
+ if (i != InductionOperand &&
+ !PSE.getSE()->isLoopInvariant(PSE.getSCEV(Gep->getOperand(i)),
+ TheLoop)) {
+ GepToIgnore = false;
+ break;
+ }
+ }
+
+ if (GepToIgnore)
+ VecValuesToIgnore.insert(&Inst);
+ break;
+ }
+ }
+ }
+}
void InnerLoopUnroller::scalarizeInstruction(Instruction *Instr,
bool IfPredicateStore) {
@@ -5651,9 +6236,7 @@ void InnerLoopUnroller::scalarizeInstruction(Instruction *Instr,
setDebugLocFromInst(Builder, Instr);
// Find all of the vectorized parameters.
- for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) {
- Value *SrcOp = Instr->getOperand(op);
-
+ for (Value *SrcOp : Instr->operands()) {
// If we are accessing the old induction variable, use the new one.
if (SrcOp == OldInduction) {
Params.push_back(getVectorValue(SrcOp));
@@ -5683,8 +6266,7 @@ void InnerLoopUnroller::scalarizeInstruction(Instruction *Instr,
// Does this instruction return a value ?
bool IsVoidRetTy = Instr->getType()->isVoidTy();
- Value *UndefVec = IsVoidRetTy ? nullptr :
- UndefValue::get(Instr->getType());
+ Value *UndefVec = IsVoidRetTy ? nullptr : UndefValue::get(Instr->getType());
// Create a new entry in the WidenMap and initialize it to Undef or Null.
VectorParts &VecResults = WidenMap.splat(Instr, UndefVec);
@@ -5711,43 +6293,43 @@ void InnerLoopUnroller::scalarizeInstruction(Instruction *Instr,
}
Instruction *Cloned = Instr->clone();
- if (!IsVoidRetTy)
- Cloned->setName(Instr->getName() + ".cloned");
- // Replace the operands of the cloned instructions with extracted scalars.
- for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) {
- Value *Op = Params[op][Part];
- Cloned->setOperand(op, Op);
- }
+ if (!IsVoidRetTy)
+ Cloned->setName(Instr->getName() + ".cloned");
+ // Replace the operands of the cloned instructions with extracted scalars.
+ for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) {
+ Value *Op = Params[op][Part];
+ Cloned->setOperand(op, Op);
+ }
- // Place the cloned scalar in the new loop.
- Builder.Insert(Cloned);
+ // Place the cloned scalar in the new loop.
+ Builder.Insert(Cloned);
- // If the original scalar returns a value we need to place it in a vector
- // so that future users will be able to use it.
- if (!IsVoidRetTy)
- VecResults[Part] = Cloned;
+ // If we just cloned a new assumption, add it the assumption cache.
+ if (auto *II = dyn_cast<IntrinsicInst>(Cloned))
+ if (II->getIntrinsicID() == Intrinsic::assume)
+ AC->registerAssumption(II);
- // End if-block.
- if (IfPredicateStore)
- PredicatedStores.push_back(std::make_pair(cast<StoreInst>(Cloned),
- Cmp));
+ // If the original scalar returns a value we need to place it in a vector
+ // so that future users will be able to use it.
+ if (!IsVoidRetTy)
+ VecResults[Part] = Cloned;
+
+ // End if-block.
+ if (IfPredicateStore)
+ PredicatedStores.push_back(std::make_pair(cast<StoreInst>(Cloned), Cmp));
}
}
void InnerLoopUnroller::vectorizeMemoryInstruction(Instruction *Instr) {
- StoreInst *SI = dyn_cast<StoreInst>(Instr);
+ auto *SI = dyn_cast<StoreInst>(Instr);
bool IfPredicateStore = (SI && Legal->blockNeedsPredication(SI->getParent()));
return scalarizeInstruction(Instr, IfPredicateStore);
}
-Value *InnerLoopUnroller::reverseVector(Value *Vec) {
- return Vec;
-}
+Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; }
-Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) {
- return V;
-}
+Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; }
Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step) {
// When unrolling and the VF is 1, we only need to add a simple scalar.
@@ -5756,3 +6338,346 @@ Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step) {
Constant *C = ConstantInt::get(ITy, StartIdx);
return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction");
}
+
+static void AddRuntimeUnrollDisableMetaData(Loop *L) {
+ SmallVector<Metadata *, 4> MDs;
+ // Reserve first location for self reference to the LoopID metadata node.
+ MDs.push_back(nullptr);
+ bool IsUnrollMetadata = false;
+ MDNode *LoopID = L->getLoopID();
+ if (LoopID) {
+ // First find existing loop unrolling disable metadata.
+ for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
+ auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i));
+ if (MD) {
+ const auto *S = dyn_cast<MDString>(MD->getOperand(0));
+ IsUnrollMetadata =
+ S && S->getString().startswith("llvm.loop.unroll.disable");
+ }
+ MDs.push_back(LoopID->getOperand(i));
+ }
+ }
+
+ if (!IsUnrollMetadata) {
+ // Add runtime unroll disable metadata.
+ LLVMContext &Context = L->getHeader()->getContext();
+ SmallVector<Metadata *, 1> DisableOperands;
+ DisableOperands.push_back(
+ MDString::get(Context, "llvm.loop.unroll.runtime.disable"));
+ MDNode *DisableNode = MDNode::get(Context, DisableOperands);
+ MDs.push_back(DisableNode);
+ MDNode *NewLoopID = MDNode::get(Context, MDs);
+ // Set operand 0 to refer to the loop id itself.
+ NewLoopID->replaceOperandWith(0, NewLoopID);
+ L->setLoopID(NewLoopID);
+ }
+}
+
+bool LoopVectorizePass::processLoop(Loop *L) {
+ assert(L->empty() && "Only process inner loops.");
+
+#ifndef NDEBUG
+ const std::string DebugLocStr = getDebugLocString(L);
+#endif /* NDEBUG */
+
+ DEBUG(dbgs() << "\nLV: Checking a loop in \""
+ << L->getHeader()->getParent()->getName() << "\" from "
+ << DebugLocStr << "\n");
+
+ LoopVectorizeHints Hints(L, DisableUnrolling);
+
+ DEBUG(dbgs() << "LV: Loop hints:"
+ << " force="
+ << (Hints.getForce() == LoopVectorizeHints::FK_Disabled
+ ? "disabled"
+ : (Hints.getForce() == LoopVectorizeHints::FK_Enabled
+ ? "enabled"
+ : "?"))
+ << " width=" << Hints.getWidth()
+ << " unroll=" << Hints.getInterleave() << "\n");
+
+ // Function containing loop
+ Function *F = L->getHeader()->getParent();
+
+ // Looking at the diagnostic output is the only way to determine if a loop
+ // was vectorized (other than looking at the IR or machine code), so it
+ // is important to generate an optimization remark for each loop. Most of
+ // these messages are generated by emitOptimizationRemarkAnalysis. Remarks
+ // generated by emitOptimizationRemark and emitOptimizationRemarkMissed are
+ // less verbose reporting vectorized loops and unvectorized loops that may
+ // benefit from vectorization, respectively.
+
+ if (!Hints.allowVectorization(F, L, AlwaysVectorize)) {
+ DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n");
+ return false;
+ }
+
+ // Check the loop for a trip count threshold:
+ // do not vectorize loops with a tiny trip count.
+ const unsigned TC = SE->getSmallConstantTripCount(L);
+ if (TC > 0u && TC < TinyTripCountVectorThreshold) {
+ DEBUG(dbgs() << "LV: Found a loop with a very small trip count. "
+ << "This loop is not worth vectorizing.");
+ if (Hints.getForce() == LoopVectorizeHints::FK_Enabled)
+ DEBUG(dbgs() << " But vectorizing was explicitly forced.\n");
+ else {
+ DEBUG(dbgs() << "\n");
+ emitAnalysisDiag(F, L, Hints, VectorizationReport()
+ << "vectorization is not beneficial "
+ "and is not explicitly forced");
+ return false;
+ }
+ }
+
+ PredicatedScalarEvolution PSE(*SE, *L);
+
+ // Check if it is legal to vectorize the loop.
+ LoopVectorizationRequirements Requirements;
+ LoopVectorizationLegality LVL(L, PSE, DT, TLI, AA, F, TTI, GetLAA, LI,
+ &Requirements, &Hints);
+ if (!LVL.canVectorize()) {
+ DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n");
+ emitMissedWarning(F, L, Hints);
+ return false;
+ }
+
+ // Use the cost model.
+ LoopVectorizationCostModel CM(L, PSE, LI, &LVL, *TTI, TLI, DB, AC, F,
+ &Hints);
+ CM.collectValuesToIgnore();
+
+ // Check the function attributes to find out if this function should be
+ // optimized for size.
+ bool OptForSize =
+ Hints.getForce() != LoopVectorizeHints::FK_Enabled && F->optForSize();
+
+ // Compute the weighted frequency of this loop being executed and see if it
+ // is less than 20% of the function entry baseline frequency. Note that we
+ // always have a canonical loop here because we think we *can* vectorize.
+ // FIXME: This is hidden behind a flag due to pervasive problems with
+ // exactly what block frequency models.
+ if (LoopVectorizeWithBlockFrequency) {
+ BlockFrequency LoopEntryFreq = BFI->getBlockFreq(L->getLoopPreheader());
+ if (Hints.getForce() != LoopVectorizeHints::FK_Enabled &&
+ LoopEntryFreq < ColdEntryFreq)
+ OptForSize = true;
+ }
+
+ // Check the function attributes to see if implicit floats are allowed.
+ // FIXME: This check doesn't seem possibly correct -- what if the loop is
+ // an integer loop and the vector instructions selected are purely integer
+ // vector instructions?
+ if (F->hasFnAttribute(Attribute::NoImplicitFloat)) {
+ DEBUG(dbgs() << "LV: Can't vectorize when the NoImplicitFloat"
+ "attribute is used.\n");
+ emitAnalysisDiag(
+ F, L, Hints,
+ VectorizationReport()
+ << "loop not vectorized due to NoImplicitFloat attribute");
+ emitMissedWarning(F, L, Hints);
+ return false;
+ }
+
+ // Check if the target supports potentially unsafe FP vectorization.
+ // FIXME: Add a check for the type of safety issue (denormal, signaling)
+ // for the target we're vectorizing for, to make sure none of the
+ // additional fp-math flags can help.
+ if (Hints.isPotentiallyUnsafe() &&
+ TTI->isFPVectorizationPotentiallyUnsafe()) {
+ DEBUG(dbgs() << "LV: Potentially unsafe FP op prevents vectorization.\n");
+ emitAnalysisDiag(F, L, Hints,
+ VectorizationReport()
+ << "loop not vectorized due to unsafe FP support.");
+ emitMissedWarning(F, L, Hints);
+ return false;
+ }
+
+ // Select the optimal vectorization factor.
+ const LoopVectorizationCostModel::VectorizationFactor VF =
+ CM.selectVectorizationFactor(OptForSize);
+
+ // Select the interleave count.
+ unsigned IC = CM.selectInterleaveCount(OptForSize, VF.Width, VF.Cost);
+
+ // Get user interleave count.
+ unsigned UserIC = Hints.getInterleave();
+
+ // Identify the diagnostic messages that should be produced.
+ std::string VecDiagMsg, IntDiagMsg;
+ bool VectorizeLoop = true, InterleaveLoop = true;
+
+ if (Requirements.doesNotMeet(F, L, Hints)) {
+ DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization "
+ "requirements.\n");
+ emitMissedWarning(F, L, Hints);
+ return false;
+ }
+
+ if (VF.Width == 1) {
+ DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n");
+ VecDiagMsg =
+ "the cost-model indicates that vectorization is not beneficial";
+ VectorizeLoop = false;
+ }
+
+ if (IC == 1 && UserIC <= 1) {
+ // Tell the user interleaving is not beneficial.
+ DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n");
+ IntDiagMsg =
+ "the cost-model indicates that interleaving is not beneficial";
+ InterleaveLoop = false;
+ if (UserIC == 1)
+ IntDiagMsg +=
+ " and is explicitly disabled or interleave count is set to 1";
+ } else if (IC > 1 && UserIC == 1) {
+ // Tell the user interleaving is beneficial, but it explicitly disabled.
+ DEBUG(dbgs()
+ << "LV: Interleaving is beneficial but is explicitly disabled.");
+ IntDiagMsg = "the cost-model indicates that interleaving is beneficial "
+ "but is explicitly disabled or interleave count is set to 1";
+ InterleaveLoop = false;
+ }
+
+ // Override IC if user provided an interleave count.
+ IC = UserIC > 0 ? UserIC : IC;
+
+ // Emit diagnostic messages, if any.
+ const char *VAPassName = Hints.vectorizeAnalysisPassName();
+ if (!VectorizeLoop && !InterleaveLoop) {
+ // Do not vectorize or interleaving the loop.
+ emitOptimizationRemarkAnalysis(F->getContext(), VAPassName, *F,
+ L->getStartLoc(), VecDiagMsg);
+ emitOptimizationRemarkAnalysis(F->getContext(), LV_NAME, *F,
+ L->getStartLoc(), IntDiagMsg);
+ return false;
+ } else if (!VectorizeLoop && InterleaveLoop) {
+ DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
+ emitOptimizationRemarkAnalysis(F->getContext(), VAPassName, *F,
+ L->getStartLoc(), VecDiagMsg);
+ } else if (VectorizeLoop && !InterleaveLoop) {
+ DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in "
+ << DebugLocStr << '\n');
+ emitOptimizationRemarkAnalysis(F->getContext(), LV_NAME, *F,
+ L->getStartLoc(), IntDiagMsg);
+ } else if (VectorizeLoop && InterleaveLoop) {
+ DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in "
+ << DebugLocStr << '\n');
+ DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
+ }
+
+ if (!VectorizeLoop) {
+ assert(IC > 1 && "interleave count should not be 1 or 0");
+ // If we decided that it is not legal to vectorize the loop, then
+ // interleave it.
+ InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, IC);
+ Unroller.vectorize(&LVL, CM.MinBWs, CM.VecValuesToIgnore);
+
+ emitOptimizationRemark(F->getContext(), LV_NAME, *F, L->getStartLoc(),
+ Twine("interleaved loop (interleaved count: ") +
+ Twine(IC) + ")");
+ } else {
+ // If we decided that it is *legal* to vectorize the loop, then do it.
+ InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, VF.Width, IC);
+ LB.vectorize(&LVL, CM.MinBWs, CM.VecValuesToIgnore);
+ ++LoopsVectorized;
+
+ // Add metadata to disable runtime unrolling a scalar loop when there are
+ // no runtime checks about strides and memory. A scalar loop that is
+ // rarely used is not worth unrolling.
+ if (!LB.areSafetyChecksAdded())
+ AddRuntimeUnrollDisableMetaData(L);
+
+ // Report the vectorization decision.
+ emitOptimizationRemark(F->getContext(), LV_NAME, *F, L->getStartLoc(),
+ Twine("vectorized loop (vectorization width: ") +
+ Twine(VF.Width) + ", interleaved count: " +
+ Twine(IC) + ")");
+ }
+
+ // Mark the loop as already vectorized to avoid vectorizing again.
+ Hints.setAlreadyVectorized();
+
+ DEBUG(verifyFunction(*L->getHeader()->getParent()));
+ return true;
+}
+
+bool LoopVectorizePass::runImpl(
+ Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_,
+ DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_,
+ DemandedBits &DB_, AliasAnalysis &AA_, AssumptionCache &AC_,
+ std::function<const LoopAccessInfo &(Loop &)> &GetLAA_) {
+
+ SE = &SE_;
+ LI = &LI_;
+ TTI = &TTI_;
+ DT = &DT_;
+ BFI = &BFI_;
+ TLI = TLI_;
+ AA = &AA_;
+ AC = &AC_;
+ GetLAA = &GetLAA_;
+ DB = &DB_;
+
+ // Compute some weights outside of the loop over the loops. Compute this
+ // using a BranchProbability to re-use its scaling math.
+ const BranchProbability ColdProb(1, 5); // 20%
+ ColdEntryFreq = BlockFrequency(BFI->getEntryFreq()) * ColdProb;
+
+ // Don't attempt if
+ // 1. the target claims to have no vector registers, and
+ // 2. interleaving won't help ILP.
+ //
+ // The second condition is necessary because, even if the target has no
+ // vector registers, loop vectorization may still enable scalar
+ // interleaving.
+ if (!TTI->getNumberOfRegisters(true) && TTI->getMaxInterleaveFactor(1) < 2)
+ return false;
+
+ // Build up a worklist of inner-loops to vectorize. This is necessary as
+ // the act of vectorizing or partially unrolling a loop creates new loops
+ // and can invalidate iterators across the loops.
+ SmallVector<Loop *, 8> Worklist;
+
+ for (Loop *L : *LI)
+ addInnerLoop(*L, Worklist);
+
+ LoopsAnalyzed += Worklist.size();
+
+ // Now walk the identified inner loops.
+ bool Changed = false;
+ while (!Worklist.empty())
+ Changed |= processLoop(Worklist.pop_back_val());
+
+ // Process each loop nest in the function.
+ return Changed;
+
+}
+
+
+PreservedAnalyses LoopVectorizePass::run(Function &F,
+ FunctionAnalysisManager &AM) {
+ auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F);
+ auto &LI = AM.getResult<LoopAnalysis>(F);
+ auto &TTI = AM.getResult<TargetIRAnalysis>(F);
+ auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
+ auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F);
+ auto *TLI = AM.getCachedResult<TargetLibraryAnalysis>(F);
+ auto &AA = AM.getResult<AAManager>(F);
+ auto &AC = AM.getResult<AssumptionAnalysis>(F);
+ auto &DB = AM.getResult<DemandedBitsAnalysis>(F);
+
+ auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager();
+ std::function<const LoopAccessInfo &(Loop &)> GetLAA =
+ [&](Loop &L) -> const LoopAccessInfo & {
+ return LAM.getResult<LoopAccessAnalysis>(L);
+ };
+ bool Changed = runImpl(F, SE, LI, TTI, DT, BFI, TLI, DB, AA, AC, GetLAA);
+ if (!Changed)
+ return PreservedAnalyses::all();
+ PreservedAnalyses PA;
+ PA.preserve<LoopAnalysis>();
+ PA.preserve<DominatorTreeAnalysis>();
+ PA.preserve<BasicAA>();
+ PA.preserve<GlobalsAA>();
+ return PA;
+}