aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/Transforms/Vectorize/VectorCombine.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Transforms/Vectorize/VectorCombine.cpp')
-rw-r--r--llvm/lib/Transforms/Vectorize/VectorCombine.cpp699
1 files changed, 699 insertions, 0 deletions
diff --git a/llvm/lib/Transforms/Vectorize/VectorCombine.cpp b/llvm/lib/Transforms/Vectorize/VectorCombine.cpp
new file mode 100644
index 000000000000..64b41bf9cefa
--- /dev/null
+++ b/llvm/lib/Transforms/Vectorize/VectorCombine.cpp
@@ -0,0 +1,699 @@
+//===------- VectorCombine.cpp - Optimize partial vector operations -------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass optimizes scalar/vector interactions using target cost models. The
+// transforms implemented here may not fit in traditional loop-based or SLP
+// vectorization passes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Transforms/Vectorize/VectorCombine.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Analysis/BasicAliasAnalysis.h"
+#include "llvm/Analysis/GlobalsModRef.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/Analysis/VectorUtils.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/PatternMatch.h"
+#include "llvm/InitializePasses.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Transforms/Utils/Local.h"
+#include "llvm/Transforms/Vectorize.h"
+
+using namespace llvm;
+using namespace llvm::PatternMatch;
+
+#define DEBUG_TYPE "vector-combine"
+STATISTIC(NumVecCmp, "Number of vector compares formed");
+STATISTIC(NumVecBO, "Number of vector binops formed");
+STATISTIC(NumVecCmpBO, "Number of vector compare + binop formed");
+STATISTIC(NumShufOfBitcast, "Number of shuffles moved after bitcast");
+STATISTIC(NumScalarBO, "Number of scalar binops formed");
+STATISTIC(NumScalarCmp, "Number of scalar compares formed");
+
+static cl::opt<bool> DisableVectorCombine(
+ "disable-vector-combine", cl::init(false), cl::Hidden,
+ cl::desc("Disable all vector combine transforms"));
+
+static cl::opt<bool> DisableBinopExtractShuffle(
+ "disable-binop-extract-shuffle", cl::init(false), cl::Hidden,
+ cl::desc("Disable binop extract to shuffle transforms"));
+
+static const unsigned InvalidIndex = std::numeric_limits<unsigned>::max();
+
+namespace {
+class VectorCombine {
+public:
+ VectorCombine(Function &F, const TargetTransformInfo &TTI,
+ const DominatorTree &DT)
+ : F(F), Builder(F.getContext()), TTI(TTI), DT(DT) {}
+
+ bool run();
+
+private:
+ Function &F;
+ IRBuilder<> Builder;
+ const TargetTransformInfo &TTI;
+ const DominatorTree &DT;
+
+ ExtractElementInst *getShuffleExtract(ExtractElementInst *Ext0,
+ ExtractElementInst *Ext1,
+ unsigned PreferredExtractIndex) const;
+ bool isExtractExtractCheap(ExtractElementInst *Ext0, ExtractElementInst *Ext1,
+ unsigned Opcode,
+ ExtractElementInst *&ConvertToShuffle,
+ unsigned PreferredExtractIndex);
+ void foldExtExtCmp(ExtractElementInst *Ext0, ExtractElementInst *Ext1,
+ Instruction &I);
+ void foldExtExtBinop(ExtractElementInst *Ext0, ExtractElementInst *Ext1,
+ Instruction &I);
+ bool foldExtractExtract(Instruction &I);
+ bool foldBitcastShuf(Instruction &I);
+ bool scalarizeBinopOrCmp(Instruction &I);
+ bool foldExtractedCmps(Instruction &I);
+};
+} // namespace
+
+static void replaceValue(Value &Old, Value &New) {
+ Old.replaceAllUsesWith(&New);
+ New.takeName(&Old);
+}
+
+/// Determine which, if any, of the inputs should be replaced by a shuffle
+/// followed by extract from a different index.
+ExtractElementInst *VectorCombine::getShuffleExtract(
+ ExtractElementInst *Ext0, ExtractElementInst *Ext1,
+ unsigned PreferredExtractIndex = InvalidIndex) const {
+ assert(isa<ConstantInt>(Ext0->getIndexOperand()) &&
+ isa<ConstantInt>(Ext1->getIndexOperand()) &&
+ "Expected constant extract indexes");
+
+ unsigned Index0 = cast<ConstantInt>(Ext0->getIndexOperand())->getZExtValue();
+ unsigned Index1 = cast<ConstantInt>(Ext1->getIndexOperand())->getZExtValue();
+
+ // If the extract indexes are identical, no shuffle is needed.
+ if (Index0 == Index1)
+ return nullptr;
+
+ Type *VecTy = Ext0->getVectorOperand()->getType();
+ assert(VecTy == Ext1->getVectorOperand()->getType() && "Need matching types");
+ int Cost0 = TTI.getVectorInstrCost(Ext0->getOpcode(), VecTy, Index0);
+ int Cost1 = TTI.getVectorInstrCost(Ext1->getOpcode(), VecTy, Index1);
+
+ // We are extracting from 2 different indexes, so one operand must be shuffled
+ // before performing a vector operation and/or extract. The more expensive
+ // extract will be replaced by a shuffle.
+ if (Cost0 > Cost1)
+ return Ext0;
+ if (Cost1 > Cost0)
+ return Ext1;
+
+ // If the costs are equal and there is a preferred extract index, shuffle the
+ // opposite operand.
+ if (PreferredExtractIndex == Index0)
+ return Ext1;
+ if (PreferredExtractIndex == Index1)
+ return Ext0;
+
+ // Otherwise, replace the extract with the higher index.
+ return Index0 > Index1 ? Ext0 : Ext1;
+}
+
+/// Compare the relative costs of 2 extracts followed by scalar operation vs.
+/// vector operation(s) followed by extract. Return true if the existing
+/// instructions are cheaper than a vector alternative. Otherwise, return false
+/// and if one of the extracts should be transformed to a shufflevector, set
+/// \p ConvertToShuffle to that extract instruction.
+bool VectorCombine::isExtractExtractCheap(ExtractElementInst *Ext0,
+ ExtractElementInst *Ext1,
+ unsigned Opcode,
+ ExtractElementInst *&ConvertToShuffle,
+ unsigned PreferredExtractIndex) {
+ assert(isa<ConstantInt>(Ext0->getOperand(1)) &&
+ isa<ConstantInt>(Ext1->getOperand(1)) &&
+ "Expected constant extract indexes");
+ Type *ScalarTy = Ext0->getType();
+ auto *VecTy = cast<VectorType>(Ext0->getOperand(0)->getType());
+ int ScalarOpCost, VectorOpCost;
+
+ // Get cost estimates for scalar and vector versions of the operation.
+ bool IsBinOp = Instruction::isBinaryOp(Opcode);
+ if (IsBinOp) {
+ ScalarOpCost = TTI.getArithmeticInstrCost(Opcode, ScalarTy);
+ VectorOpCost = TTI.getArithmeticInstrCost(Opcode, VecTy);
+ } else {
+ assert((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) &&
+ "Expected a compare");
+ ScalarOpCost = TTI.getCmpSelInstrCost(Opcode, ScalarTy,
+ CmpInst::makeCmpResultType(ScalarTy));
+ VectorOpCost = TTI.getCmpSelInstrCost(Opcode, VecTy,
+ CmpInst::makeCmpResultType(VecTy));
+ }
+
+ // Get cost estimates for the extract elements. These costs will factor into
+ // both sequences.
+ unsigned Ext0Index = cast<ConstantInt>(Ext0->getOperand(1))->getZExtValue();
+ unsigned Ext1Index = cast<ConstantInt>(Ext1->getOperand(1))->getZExtValue();
+
+ int Extract0Cost =
+ TTI.getVectorInstrCost(Instruction::ExtractElement, VecTy, Ext0Index);
+ int Extract1Cost =
+ TTI.getVectorInstrCost(Instruction::ExtractElement, VecTy, Ext1Index);
+
+ // A more expensive extract will always be replaced by a splat shuffle.
+ // For example, if Ext0 is more expensive:
+ // opcode (extelt V0, Ext0), (ext V1, Ext1) -->
+ // extelt (opcode (splat V0, Ext0), V1), Ext1
+ // TODO: Evaluate whether that always results in lowest cost. Alternatively,
+ // check the cost of creating a broadcast shuffle and shuffling both
+ // operands to element 0.
+ int CheapExtractCost = std::min(Extract0Cost, Extract1Cost);
+
+ // Extra uses of the extracts mean that we include those costs in the
+ // vector total because those instructions will not be eliminated.
+ int OldCost, NewCost;
+ if (Ext0->getOperand(0) == Ext1->getOperand(0) && Ext0Index == Ext1Index) {
+ // Handle a special case. If the 2 extracts are identical, adjust the
+ // formulas to account for that. The extra use charge allows for either the
+ // CSE'd pattern or an unoptimized form with identical values:
+ // opcode (extelt V, C), (extelt V, C) --> extelt (opcode V, V), C
+ bool HasUseTax = Ext0 == Ext1 ? !Ext0->hasNUses(2)
+ : !Ext0->hasOneUse() || !Ext1->hasOneUse();
+ OldCost = CheapExtractCost + ScalarOpCost;
+ NewCost = VectorOpCost + CheapExtractCost + HasUseTax * CheapExtractCost;
+ } else {
+ // Handle the general case. Each extract is actually a different value:
+ // opcode (extelt V0, C0), (extelt V1, C1) --> extelt (opcode V0, V1), C
+ OldCost = Extract0Cost + Extract1Cost + ScalarOpCost;
+ NewCost = VectorOpCost + CheapExtractCost +
+ !Ext0->hasOneUse() * Extract0Cost +
+ !Ext1->hasOneUse() * Extract1Cost;
+ }
+
+ ConvertToShuffle = getShuffleExtract(Ext0, Ext1, PreferredExtractIndex);
+ if (ConvertToShuffle) {
+ if (IsBinOp && DisableBinopExtractShuffle)
+ return true;
+
+ // If we are extracting from 2 different indexes, then one operand must be
+ // shuffled before performing the vector operation. The shuffle mask is
+ // undefined except for 1 lane that is being translated to the remaining
+ // extraction lane. Therefore, it is a splat shuffle. Ex:
+ // ShufMask = { undef, undef, 0, undef }
+ // TODO: The cost model has an option for a "broadcast" shuffle
+ // (splat-from-element-0), but no option for a more general splat.
+ NewCost +=
+ TTI.getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, VecTy);
+ }
+
+ // Aggressively form a vector op if the cost is equal because the transform
+ // may enable further optimization.
+ // Codegen can reverse this transform (scalarize) if it was not profitable.
+ return OldCost < NewCost;
+}
+
+/// Create a shuffle that translates (shifts) 1 element from the input vector
+/// to a new element location.
+static Value *createShiftShuffle(Value *Vec, unsigned OldIndex,
+ unsigned NewIndex, IRBuilder<> &Builder) {
+ // The shuffle mask is undefined except for 1 lane that is being translated
+ // to the new element index. Example for OldIndex == 2 and NewIndex == 0:
+ // ShufMask = { 2, undef, undef, undef }
+ auto *VecTy = cast<FixedVectorType>(Vec->getType());
+ SmallVector<int, 32> ShufMask(VecTy->getNumElements(), UndefMaskElem);
+ ShufMask[NewIndex] = OldIndex;
+ Value *Undef = UndefValue::get(VecTy);
+ return Builder.CreateShuffleVector(Vec, Undef, ShufMask, "shift");
+}
+
+/// Given an extract element instruction with constant index operand, shuffle
+/// the source vector (shift the scalar element) to a NewIndex for extraction.
+/// Return null if the input can be constant folded, so that we are not creating
+/// unnecessary instructions.
+static ExtractElementInst *translateExtract(ExtractElementInst *ExtElt,
+ unsigned NewIndex,
+ IRBuilder<> &Builder) {
+ // If the extract can be constant-folded, this code is unsimplified. Defer
+ // to other passes to handle that.
+ Value *X = ExtElt->getVectorOperand();
+ Value *C = ExtElt->getIndexOperand();
+ assert(isa<ConstantInt>(C) && "Expected a constant index operand");
+ if (isa<Constant>(X))
+ return nullptr;
+
+ Value *Shuf = createShiftShuffle(X, cast<ConstantInt>(C)->getZExtValue(),
+ NewIndex, Builder);
+ return cast<ExtractElementInst>(Builder.CreateExtractElement(Shuf, NewIndex));
+}
+
+/// Try to reduce extract element costs by converting scalar compares to vector
+/// compares followed by extract.
+/// cmp (ext0 V0, C), (ext1 V1, C)
+void VectorCombine::foldExtExtCmp(ExtractElementInst *Ext0,
+ ExtractElementInst *Ext1, Instruction &I) {
+ assert(isa<CmpInst>(&I) && "Expected a compare");
+ assert(cast<ConstantInt>(Ext0->getIndexOperand())->getZExtValue() ==
+ cast<ConstantInt>(Ext1->getIndexOperand())->getZExtValue() &&
+ "Expected matching constant extract indexes");
+
+ // cmp Pred (extelt V0, C), (extelt V1, C) --> extelt (cmp Pred V0, V1), C
+ ++NumVecCmp;
+ CmpInst::Predicate Pred = cast<CmpInst>(&I)->getPredicate();
+ Value *V0 = Ext0->getVectorOperand(), *V1 = Ext1->getVectorOperand();
+ Value *VecCmp = Builder.CreateCmp(Pred, V0, V1);
+ Value *NewExt = Builder.CreateExtractElement(VecCmp, Ext0->getIndexOperand());
+ replaceValue(I, *NewExt);
+}
+
+/// Try to reduce extract element costs by converting scalar binops to vector
+/// binops followed by extract.
+/// bo (ext0 V0, C), (ext1 V1, C)
+void VectorCombine::foldExtExtBinop(ExtractElementInst *Ext0,
+ ExtractElementInst *Ext1, Instruction &I) {
+ assert(isa<BinaryOperator>(&I) && "Expected a binary operator");
+ assert(cast<ConstantInt>(Ext0->getIndexOperand())->getZExtValue() ==
+ cast<ConstantInt>(Ext1->getIndexOperand())->getZExtValue() &&
+ "Expected matching constant extract indexes");
+
+ // bo (extelt V0, C), (extelt V1, C) --> extelt (bo V0, V1), C
+ ++NumVecBO;
+ Value *V0 = Ext0->getVectorOperand(), *V1 = Ext1->getVectorOperand();
+ Value *VecBO =
+ Builder.CreateBinOp(cast<BinaryOperator>(&I)->getOpcode(), V0, V1);
+
+ // All IR flags are safe to back-propagate because any potential poison
+ // created in unused vector elements is discarded by the extract.
+ if (auto *VecBOInst = dyn_cast<Instruction>(VecBO))
+ VecBOInst->copyIRFlags(&I);
+
+ Value *NewExt = Builder.CreateExtractElement(VecBO, Ext0->getIndexOperand());
+ replaceValue(I, *NewExt);
+}
+
+/// Match an instruction with extracted vector operands.
+bool VectorCombine::foldExtractExtract(Instruction &I) {
+ // It is not safe to transform things like div, urem, etc. because we may
+ // create undefined behavior when executing those on unknown vector elements.
+ if (!isSafeToSpeculativelyExecute(&I))
+ return false;
+
+ Instruction *I0, *I1;
+ CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE;
+ if (!match(&I, m_Cmp(Pred, m_Instruction(I0), m_Instruction(I1))) &&
+ !match(&I, m_BinOp(m_Instruction(I0), m_Instruction(I1))))
+ return false;
+
+ Value *V0, *V1;
+ uint64_t C0, C1;
+ if (!match(I0, m_ExtractElt(m_Value(V0), m_ConstantInt(C0))) ||
+ !match(I1, m_ExtractElt(m_Value(V1), m_ConstantInt(C1))) ||
+ V0->getType() != V1->getType())
+ return false;
+
+ // If the scalar value 'I' is going to be re-inserted into a vector, then try
+ // to create an extract to that same element. The extract/insert can be
+ // reduced to a "select shuffle".
+ // TODO: If we add a larger pattern match that starts from an insert, this
+ // probably becomes unnecessary.
+ auto *Ext0 = cast<ExtractElementInst>(I0);
+ auto *Ext1 = cast<ExtractElementInst>(I1);
+ uint64_t InsertIndex = InvalidIndex;
+ if (I.hasOneUse())
+ match(I.user_back(),
+ m_InsertElt(m_Value(), m_Value(), m_ConstantInt(InsertIndex)));
+
+ ExtractElementInst *ExtractToChange;
+ if (isExtractExtractCheap(Ext0, Ext1, I.getOpcode(), ExtractToChange,
+ InsertIndex))
+ return false;
+
+ if (ExtractToChange) {
+ unsigned CheapExtractIdx = ExtractToChange == Ext0 ? C1 : C0;
+ ExtractElementInst *NewExtract =
+ translateExtract(ExtractToChange, CheapExtractIdx, Builder);
+ if (!NewExtract)
+ return false;
+ if (ExtractToChange == Ext0)
+ Ext0 = NewExtract;
+ else
+ Ext1 = NewExtract;
+ }
+
+ if (Pred != CmpInst::BAD_ICMP_PREDICATE)
+ foldExtExtCmp(Ext0, Ext1, I);
+ else
+ foldExtExtBinop(Ext0, Ext1, I);
+
+ return true;
+}
+
+/// If this is a bitcast of a shuffle, try to bitcast the source vector to the
+/// destination type followed by shuffle. This can enable further transforms by
+/// moving bitcasts or shuffles together.
+bool VectorCombine::foldBitcastShuf(Instruction &I) {
+ Value *V;
+ ArrayRef<int> Mask;
+ if (!match(&I, m_BitCast(
+ m_OneUse(m_Shuffle(m_Value(V), m_Undef(), m_Mask(Mask))))))
+ return false;
+
+ // Disallow non-vector casts and length-changing shuffles.
+ // TODO: We could allow any shuffle.
+ auto *DestTy = dyn_cast<VectorType>(I.getType());
+ auto *SrcTy = cast<VectorType>(V->getType());
+ if (!DestTy || I.getOperand(0)->getType() != SrcTy)
+ return false;
+
+ // The new shuffle must not cost more than the old shuffle. The bitcast is
+ // moved ahead of the shuffle, so assume that it has the same cost as before.
+ if (TTI.getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, DestTy) >
+ TTI.getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, SrcTy))
+ return false;
+
+ unsigned DestNumElts = DestTy->getNumElements();
+ unsigned SrcNumElts = SrcTy->getNumElements();
+ SmallVector<int, 16> NewMask;
+ if (SrcNumElts <= DestNumElts) {
+ // The bitcast is from wide to narrow/equal elements. The shuffle mask can
+ // always be expanded to the equivalent form choosing narrower elements.
+ assert(DestNumElts % SrcNumElts == 0 && "Unexpected shuffle mask");
+ unsigned ScaleFactor = DestNumElts / SrcNumElts;
+ narrowShuffleMaskElts(ScaleFactor, Mask, NewMask);
+ } else {
+ // The bitcast is from narrow elements to wide elements. The shuffle mask
+ // must choose consecutive elements to allow casting first.
+ assert(SrcNumElts % DestNumElts == 0 && "Unexpected shuffle mask");
+ unsigned ScaleFactor = SrcNumElts / DestNumElts;
+ if (!widenShuffleMaskElts(ScaleFactor, Mask, NewMask))
+ return false;
+ }
+ // bitcast (shuf V, MaskC) --> shuf (bitcast V), MaskC'
+ ++NumShufOfBitcast;
+ Value *CastV = Builder.CreateBitCast(V, DestTy);
+ Value *Shuf =
+ Builder.CreateShuffleVector(CastV, UndefValue::get(DestTy), NewMask);
+ replaceValue(I, *Shuf);
+ return true;
+}
+
+/// Match a vector binop or compare instruction with at least one inserted
+/// scalar operand and convert to scalar binop/cmp followed by insertelement.
+bool VectorCombine::scalarizeBinopOrCmp(Instruction &I) {
+ CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE;
+ Value *Ins0, *Ins1;
+ if (!match(&I, m_BinOp(m_Value(Ins0), m_Value(Ins1))) &&
+ !match(&I, m_Cmp(Pred, m_Value(Ins0), m_Value(Ins1))))
+ return false;
+
+ // Do not convert the vector condition of a vector select into a scalar
+ // condition. That may cause problems for codegen because of differences in
+ // boolean formats and register-file transfers.
+ // TODO: Can we account for that in the cost model?
+ bool IsCmp = Pred != CmpInst::Predicate::BAD_ICMP_PREDICATE;
+ if (IsCmp)
+ for (User *U : I.users())
+ if (match(U, m_Select(m_Specific(&I), m_Value(), m_Value())))
+ return false;
+
+ // Match against one or both scalar values being inserted into constant
+ // vectors:
+ // vec_op VecC0, (inselt VecC1, V1, Index)
+ // vec_op (inselt VecC0, V0, Index), VecC1
+ // vec_op (inselt VecC0, V0, Index), (inselt VecC1, V1, Index)
+ // TODO: Deal with mismatched index constants and variable indexes?
+ Constant *VecC0 = nullptr, *VecC1 = nullptr;
+ Value *V0 = nullptr, *V1 = nullptr;
+ uint64_t Index0 = 0, Index1 = 0;
+ if (!match(Ins0, m_InsertElt(m_Constant(VecC0), m_Value(V0),
+ m_ConstantInt(Index0))) &&
+ !match(Ins0, m_Constant(VecC0)))
+ return false;
+ if (!match(Ins1, m_InsertElt(m_Constant(VecC1), m_Value(V1),
+ m_ConstantInt(Index1))) &&
+ !match(Ins1, m_Constant(VecC1)))
+ return false;
+
+ bool IsConst0 = !V0;
+ bool IsConst1 = !V1;
+ if (IsConst0 && IsConst1)
+ return false;
+ if (!IsConst0 && !IsConst1 && Index0 != Index1)
+ return false;
+
+ // Bail for single insertion if it is a load.
+ // TODO: Handle this once getVectorInstrCost can cost for load/stores.
+ auto *I0 = dyn_cast_or_null<Instruction>(V0);
+ auto *I1 = dyn_cast_or_null<Instruction>(V1);
+ if ((IsConst0 && I1 && I1->mayReadFromMemory()) ||
+ (IsConst1 && I0 && I0->mayReadFromMemory()))
+ return false;
+
+ uint64_t Index = IsConst0 ? Index1 : Index0;
+ Type *ScalarTy = IsConst0 ? V1->getType() : V0->getType();
+ Type *VecTy = I.getType();
+ assert(VecTy->isVectorTy() &&
+ (IsConst0 || IsConst1 || V0->getType() == V1->getType()) &&
+ (ScalarTy->isIntegerTy() || ScalarTy->isFloatingPointTy() ||
+ ScalarTy->isPointerTy()) &&
+ "Unexpected types for insert element into binop or cmp");
+
+ unsigned Opcode = I.getOpcode();
+ int ScalarOpCost, VectorOpCost;
+ if (IsCmp) {
+ ScalarOpCost = TTI.getCmpSelInstrCost(Opcode, ScalarTy);
+ VectorOpCost = TTI.getCmpSelInstrCost(Opcode, VecTy);
+ } else {
+ ScalarOpCost = TTI.getArithmeticInstrCost(Opcode, ScalarTy);
+ VectorOpCost = TTI.getArithmeticInstrCost(Opcode, VecTy);
+ }
+
+ // Get cost estimate for the insert element. This cost will factor into
+ // both sequences.
+ int InsertCost =
+ TTI.getVectorInstrCost(Instruction::InsertElement, VecTy, Index);
+ int OldCost = (IsConst0 ? 0 : InsertCost) + (IsConst1 ? 0 : InsertCost) +
+ VectorOpCost;
+ int NewCost = ScalarOpCost + InsertCost +
+ (IsConst0 ? 0 : !Ins0->hasOneUse() * InsertCost) +
+ (IsConst1 ? 0 : !Ins1->hasOneUse() * InsertCost);
+
+ // We want to scalarize unless the vector variant actually has lower cost.
+ if (OldCost < NewCost)
+ return false;
+
+ // vec_op (inselt VecC0, V0, Index), (inselt VecC1, V1, Index) -->
+ // inselt NewVecC, (scalar_op V0, V1), Index
+ if (IsCmp)
+ ++NumScalarCmp;
+ else
+ ++NumScalarBO;
+
+ // For constant cases, extract the scalar element, this should constant fold.
+ if (IsConst0)
+ V0 = ConstantExpr::getExtractElement(VecC0, Builder.getInt64(Index));
+ if (IsConst1)
+ V1 = ConstantExpr::getExtractElement(VecC1, Builder.getInt64(Index));
+
+ Value *Scalar =
+ IsCmp ? Builder.CreateCmp(Pred, V0, V1)
+ : Builder.CreateBinOp((Instruction::BinaryOps)Opcode, V0, V1);
+
+ Scalar->setName(I.getName() + ".scalar");
+
+ // All IR flags are safe to back-propagate. There is no potential for extra
+ // poison to be created by the scalar instruction.
+ if (auto *ScalarInst = dyn_cast<Instruction>(Scalar))
+ ScalarInst->copyIRFlags(&I);
+
+ // Fold the vector constants in the original vectors into a new base vector.
+ Constant *NewVecC = IsCmp ? ConstantExpr::getCompare(Pred, VecC0, VecC1)
+ : ConstantExpr::get(Opcode, VecC0, VecC1);
+ Value *Insert = Builder.CreateInsertElement(NewVecC, Scalar, Index);
+ replaceValue(I, *Insert);
+ return true;
+}
+
+/// Try to combine a scalar binop + 2 scalar compares of extracted elements of
+/// a vector into vector operations followed by extract. Note: The SLP pass
+/// may miss this pattern because of implementation problems.
+bool VectorCombine::foldExtractedCmps(Instruction &I) {
+ // We are looking for a scalar binop of booleans.
+ // binop i1 (cmp Pred I0, C0), (cmp Pred I1, C1)
+ if (!I.isBinaryOp() || !I.getType()->isIntegerTy(1))
+ return false;
+
+ // The compare predicates should match, and each compare should have a
+ // constant operand.
+ // TODO: Relax the one-use constraints.
+ Value *B0 = I.getOperand(0), *B1 = I.getOperand(1);
+ Instruction *I0, *I1;
+ Constant *C0, *C1;
+ CmpInst::Predicate P0, P1;
+ if (!match(B0, m_OneUse(m_Cmp(P0, m_Instruction(I0), m_Constant(C0)))) ||
+ !match(B1, m_OneUse(m_Cmp(P1, m_Instruction(I1), m_Constant(C1)))) ||
+ P0 != P1)
+ return false;
+
+ // The compare operands must be extracts of the same vector with constant
+ // extract indexes.
+ // TODO: Relax the one-use constraints.
+ Value *X;
+ uint64_t Index0, Index1;
+ if (!match(I0, m_OneUse(m_ExtractElt(m_Value(X), m_ConstantInt(Index0)))) ||
+ !match(I1, m_OneUse(m_ExtractElt(m_Specific(X), m_ConstantInt(Index1)))))
+ return false;
+
+ auto *Ext0 = cast<ExtractElementInst>(I0);
+ auto *Ext1 = cast<ExtractElementInst>(I1);
+ ExtractElementInst *ConvertToShuf = getShuffleExtract(Ext0, Ext1);
+ if (!ConvertToShuf)
+ return false;
+
+ // The original scalar pattern is:
+ // binop i1 (cmp Pred (ext X, Index0), C0), (cmp Pred (ext X, Index1), C1)
+ CmpInst::Predicate Pred = P0;
+ unsigned CmpOpcode = CmpInst::isFPPredicate(Pred) ? Instruction::FCmp
+ : Instruction::ICmp;
+ auto *VecTy = dyn_cast<FixedVectorType>(X->getType());
+ if (!VecTy)
+ return false;
+
+ int OldCost = TTI.getVectorInstrCost(Ext0->getOpcode(), VecTy, Index0);
+ OldCost += TTI.getVectorInstrCost(Ext1->getOpcode(), VecTy, Index1);
+ OldCost += TTI.getCmpSelInstrCost(CmpOpcode, I0->getType()) * 2;
+ OldCost += TTI.getArithmeticInstrCost(I.getOpcode(), I.getType());
+
+ // The proposed vector pattern is:
+ // vcmp = cmp Pred X, VecC
+ // ext (binop vNi1 vcmp, (shuffle vcmp, Index1)), Index0
+ int CheapIndex = ConvertToShuf == Ext0 ? Index1 : Index0;
+ int ExpensiveIndex = ConvertToShuf == Ext0 ? Index0 : Index1;
+ auto *CmpTy = cast<FixedVectorType>(CmpInst::makeCmpResultType(X->getType()));
+ int NewCost = TTI.getCmpSelInstrCost(CmpOpcode, X->getType());
+ NewCost +=
+ TTI.getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, CmpTy);
+ NewCost += TTI.getArithmeticInstrCost(I.getOpcode(), CmpTy);
+ NewCost += TTI.getVectorInstrCost(Ext0->getOpcode(), CmpTy, CheapIndex);
+
+ // Aggressively form vector ops if the cost is equal because the transform
+ // may enable further optimization.
+ // Codegen can reverse this transform (scalarize) if it was not profitable.
+ if (OldCost < NewCost)
+ return false;
+
+ // Create a vector constant from the 2 scalar constants.
+ SmallVector<Constant *, 32> CmpC(VecTy->getNumElements(),
+ UndefValue::get(VecTy->getElementType()));
+ CmpC[Index0] = C0;
+ CmpC[Index1] = C1;
+ Value *VCmp = Builder.CreateCmp(Pred, X, ConstantVector::get(CmpC));
+
+ Value *Shuf = createShiftShuffle(VCmp, ExpensiveIndex, CheapIndex, Builder);
+ Value *VecLogic = Builder.CreateBinOp(cast<BinaryOperator>(I).getOpcode(),
+ VCmp, Shuf);
+ Value *NewExt = Builder.CreateExtractElement(VecLogic, CheapIndex);
+ replaceValue(I, *NewExt);
+ ++NumVecCmpBO;
+ return true;
+}
+
+/// This is the entry point for all transforms. Pass manager differences are
+/// handled in the callers of this function.
+bool VectorCombine::run() {
+ if (DisableVectorCombine)
+ return false;
+
+ bool MadeChange = false;
+ for (BasicBlock &BB : F) {
+ // Ignore unreachable basic blocks.
+ if (!DT.isReachableFromEntry(&BB))
+ continue;
+ // Do not delete instructions under here and invalidate the iterator.
+ // Walk the block forwards to enable simple iterative chains of transforms.
+ // TODO: It could be more efficient to remove dead instructions
+ // iteratively in this loop rather than waiting until the end.
+ for (Instruction &I : BB) {
+ if (isa<DbgInfoIntrinsic>(I))
+ continue;
+ Builder.SetInsertPoint(&I);
+ MadeChange |= foldExtractExtract(I);
+ MadeChange |= foldBitcastShuf(I);
+ MadeChange |= scalarizeBinopOrCmp(I);
+ MadeChange |= foldExtractedCmps(I);
+ }
+ }
+
+ // We're done with transforms, so remove dead instructions.
+ if (MadeChange)
+ for (BasicBlock &BB : F)
+ SimplifyInstructionsInBlock(&BB);
+
+ return MadeChange;
+}
+
+// Pass manager boilerplate below here.
+
+namespace {
+class VectorCombineLegacyPass : public FunctionPass {
+public:
+ static char ID;
+ VectorCombineLegacyPass() : FunctionPass(ID) {
+ initializeVectorCombineLegacyPassPass(*PassRegistry::getPassRegistry());
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addRequired<DominatorTreeWrapperPass>();
+ AU.addRequired<TargetTransformInfoWrapperPass>();
+ AU.setPreservesCFG();
+ AU.addPreserved<DominatorTreeWrapperPass>();
+ AU.addPreserved<GlobalsAAWrapperPass>();
+ AU.addPreserved<AAResultsWrapperPass>();
+ AU.addPreserved<BasicAAWrapperPass>();
+ FunctionPass::getAnalysisUsage(AU);
+ }
+
+ bool runOnFunction(Function &F) override {
+ if (skipFunction(F))
+ return false;
+ auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
+ auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
+ VectorCombine Combiner(F, TTI, DT);
+ return Combiner.run();
+ }
+};
+} // namespace
+
+char VectorCombineLegacyPass::ID = 0;
+INITIALIZE_PASS_BEGIN(VectorCombineLegacyPass, "vector-combine",
+ "Optimize scalar/vector ops", false,
+ false)
+INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
+INITIALIZE_PASS_END(VectorCombineLegacyPass, "vector-combine",
+ "Optimize scalar/vector ops", false, false)
+Pass *llvm::createVectorCombinePass() {
+ return new VectorCombineLegacyPass();
+}
+
+PreservedAnalyses VectorCombinePass::run(Function &F,
+ FunctionAnalysisManager &FAM) {
+ TargetTransformInfo &TTI = FAM.getResult<TargetIRAnalysis>(F);
+ DominatorTree &DT = FAM.getResult<DominatorTreeAnalysis>(F);
+ VectorCombine Combiner(F, TTI, DT);
+ if (!Combiner.run())
+ return PreservedAnalyses::all();
+ PreservedAnalyses PA;
+ PA.preserveSet<CFGAnalyses>();
+ PA.preserve<GlobalsAA>();
+ PA.preserve<AAManager>();
+ PA.preserve<BasicAA>();
+ return PA;
+}