diff options
Diffstat (limited to 'lib/Transforms/Vectorize')
-rw-r--r-- | lib/Transforms/Vectorize/BBVectorize.cpp | 30 | ||||
-rw-r--r-- | lib/Transforms/Vectorize/CMakeLists.txt | 3 | ||||
-rw-r--r-- | lib/Transforms/Vectorize/LoadStoreVectorizer.cpp | 999 | ||||
-rw-r--r-- | lib/Transforms/Vectorize/LoopVectorize.cpp | 3791 | ||||
-rw-r--r-- | lib/Transforms/Vectorize/Makefile | 15 | ||||
-rw-r--r-- | lib/Transforms/Vectorize/SLPVectorizer.cpp | 1364 | ||||
-rw-r--r-- | lib/Transforms/Vectorize/Vectorize.cpp | 1 |
7 files changed, 4279 insertions, 1924 deletions
diff --git a/lib/Transforms/Vectorize/BBVectorize.cpp b/lib/Transforms/Vectorize/BBVectorize.cpp index 8844d574a79d3..af594cb751aa4 100644 --- a/lib/Transforms/Vectorize/BBVectorize.cpp +++ b/lib/Transforms/Vectorize/BBVectorize.cpp @@ -397,7 +397,7 @@ namespace { Instruction *I, Instruction *J); bool vectorizeBB(BasicBlock &BB) { - if (skipOptnoneFunction(BB)) + if (skipBasicBlock(BB)) return false; if (!DT->isReachableFromEntry(&BB)) { DEBUG(dbgs() << "BBV: skipping unreachable " << BB.getName() << @@ -886,9 +886,16 @@ namespace { Type *DestTy = C->getDestTy(); if (!DestTy->isSingleValueType()) return false; - } else if (isa<SelectInst>(I)) { + } else if (SelectInst *SI = dyn_cast<SelectInst>(I)) { if (!Config.VectorizeSelect) return false; + // We can vectorize a select if either all operands are scalars, + // or all operands are vectors. Trying to "widen" a select between + // vectors that has a scalar condition results in a malformed select. + // FIXME: We could probably be smarter about this by rewriting the select + // with different types instead. + return (SI->getCondition()->getType()->isVectorTy() == + SI->getTrueValue()->getType()->isVectorTy()); } else if (isa<CmpInst>(I)) { if (!Config.VectorizeCmp) return false; @@ -1117,16 +1124,25 @@ namespace { } if (IID && TTI) { + FastMathFlags FMFCI; + if (auto *FPMOCI = dyn_cast<FPMathOperator>(CI)) + FMFCI = FPMOCI->getFastMathFlags(); + SmallVector<Type*, 4> Tys; for (unsigned i = 0, ie = CI->getNumArgOperands(); i != ie; ++i) Tys.push_back(CI->getArgOperand(i)->getType()); - unsigned ICost = TTI->getIntrinsicInstrCost(IID, IT1, Tys); + unsigned ICost = TTI->getIntrinsicInstrCost(IID, IT1, Tys, FMFCI); Tys.clear(); CallInst *CJ = cast<CallInst>(J); + + FastMathFlags FMFCJ; + if (auto *FPMOCJ = dyn_cast<FPMathOperator>(CJ)) + FMFCJ = FPMOCJ->getFastMathFlags(); + for (unsigned i = 0, ie = CJ->getNumArgOperands(); i != ie; ++i) Tys.push_back(CJ->getArgOperand(i)->getType()); - unsigned JCost = TTI->getIntrinsicInstrCost(IID, JT1, Tys); + unsigned JCost = TTI->getIntrinsicInstrCost(IID, JT1, Tys, FMFCJ); Tys.clear(); assert(CI->getNumArgOperands() == CJ->getNumArgOperands() && @@ -1140,8 +1156,10 @@ namespace { CJ->getArgOperand(i)->getType())); } + FastMathFlags FMFV = FMFCI; + FMFV &= FMFCJ; Type *RetTy = getVecTypeForPair(IT1, JT1); - unsigned VCost = TTI->getIntrinsicInstrCost(IID, RetTy, Tys); + unsigned VCost = TTI->getIntrinsicInstrCost(IID, RetTy, Tys, FMFV); if (VCost > ICost + JCost) return false; @@ -1259,7 +1277,7 @@ namespace { bool JAfterStart = IAfterStart; BasicBlock::iterator J = std::next(I); for (unsigned ss = 0; J != E && ss <= Config.SearchLimit; ++J, ++ss) { - if (&*J == Start) + if (J == Start) JAfterStart = true; // Determine if J uses I, if so, exit the loop. diff --git a/lib/Transforms/Vectorize/CMakeLists.txt b/lib/Transforms/Vectorize/CMakeLists.txt index 905c069cf851f..23c2ab025f375 100644 --- a/lib/Transforms/Vectorize/CMakeLists.txt +++ b/lib/Transforms/Vectorize/CMakeLists.txt @@ -1,8 +1,9 @@ add_llvm_library(LLVMVectorize BBVectorize.cpp - Vectorize.cpp + LoadStoreVectorizer.cpp LoopVectorize.cpp SLPVectorizer.cpp + Vectorize.cpp ADDITIONAL_HEADER_DIRS ${LLVM_MAIN_INCLUDE_DIR}/llvm/Transforms diff --git a/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp b/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp new file mode 100644 index 0000000000000..c8906bde15e01 --- /dev/null +++ b/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp @@ -0,0 +1,999 @@ +//===----- LoadStoreVectorizer.cpp - GPU Load & Store Vectorizer ----------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +//===----------------------------------------------------------------------===// + +#include "llvm/ADT/MapVector.h" +#include "llvm/ADT/PostOrderIterator.h" +#include "llvm/ADT/SetVector.h" +#include "llvm/ADT/Statistic.h" +#include "llvm/ADT/Triple.h" +#include "llvm/Analysis/AliasAnalysis.h" +#include "llvm/Analysis/ScalarEvolution.h" +#include "llvm/Analysis/ScalarEvolutionExpressions.h" +#include "llvm/Analysis/TargetTransformInfo.h" +#include "llvm/Analysis/ValueTracking.h" +#include "llvm/Analysis/VectorUtils.h" +#include "llvm/IR/DataLayout.h" +#include "llvm/IR/Dominators.h" +#include "llvm/IR/IRBuilder.h" +#include "llvm/IR/Instructions.h" +#include "llvm/IR/Module.h" +#include "llvm/IR/Type.h" +#include "llvm/IR/Value.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/raw_ostream.h" +#include "llvm/Transforms/Vectorize.h" + +using namespace llvm; + +#define DEBUG_TYPE "load-store-vectorizer" +STATISTIC(NumVectorInstructions, "Number of vector accesses generated"); +STATISTIC(NumScalarsVectorized, "Number of scalar accesses vectorized"); + +namespace { + +// TODO: Remove this +static const unsigned TargetBaseAlign = 4; + +class Vectorizer { + typedef SmallVector<Value *, 8> ValueList; + typedef MapVector<Value *, ValueList> ValueListMap; + + Function &F; + AliasAnalysis &AA; + DominatorTree &DT; + ScalarEvolution &SE; + TargetTransformInfo &TTI; + const DataLayout &DL; + IRBuilder<> Builder; + ValueListMap StoreRefs; + ValueListMap LoadRefs; + +public: + Vectorizer(Function &F, AliasAnalysis &AA, DominatorTree &DT, + ScalarEvolution &SE, TargetTransformInfo &TTI) + : F(F), AA(AA), DT(DT), SE(SE), TTI(TTI), + DL(F.getParent()->getDataLayout()), Builder(SE.getContext()) {} + + bool run(); + +private: + Value *getPointerOperand(Value *I); + + unsigned getPointerAddressSpace(Value *I); + + unsigned getAlignment(LoadInst *LI) const { + unsigned Align = LI->getAlignment(); + if (Align != 0) + return Align; + + return DL.getABITypeAlignment(LI->getType()); + } + + unsigned getAlignment(StoreInst *SI) const { + unsigned Align = SI->getAlignment(); + if (Align != 0) + return Align; + + return DL.getABITypeAlignment(SI->getValueOperand()->getType()); + } + + bool isConsecutiveAccess(Value *A, Value *B); + + /// After vectorization, reorder the instructions that I depends on + /// (the instructions defining its operands), to ensure they dominate I. + void reorder(Instruction *I); + + /// Returns the first and the last instructions in Chain. + std::pair<BasicBlock::iterator, BasicBlock::iterator> + getBoundaryInstrs(ArrayRef<Value *> Chain); + + /// Erases the original instructions after vectorizing. + void eraseInstructions(ArrayRef<Value *> Chain); + + /// "Legalize" the vector type that would be produced by combining \p + /// ElementSizeBits elements in \p Chain. Break into two pieces such that the + /// total size of each piece is 1, 2 or a multiple of 4 bytes. \p Chain is + /// expected to have more than 4 elements. + std::pair<ArrayRef<Value *>, ArrayRef<Value *>> + splitOddVectorElts(ArrayRef<Value *> Chain, unsigned ElementSizeBits); + + /// Checks for instructions which may affect the memory accessed + /// in the chain between \p From and \p To. Returns Index, where + /// \p Chain[0, Index) is the largest vectorizable chain prefix. + /// The elements of \p Chain should be all loads or all stores. + unsigned getVectorizablePrefixEndIdx(ArrayRef<Value *> Chain, + BasicBlock::iterator From, + BasicBlock::iterator To); + + /// Collects load and store instructions to vectorize. + void collectInstructions(BasicBlock *BB); + + /// Processes the collected instructions, the \p Map. The elements of \p Map + /// should be all loads or all stores. + bool vectorizeChains(ValueListMap &Map); + + /// Finds the load/stores to consecutive memory addresses and vectorizes them. + bool vectorizeInstructions(ArrayRef<Value *> Instrs); + + /// Vectorizes the load instructions in Chain. + bool vectorizeLoadChain(ArrayRef<Value *> Chain, + SmallPtrSet<Value *, 16> *InstructionsProcessed); + + /// Vectorizes the store instructions in Chain. + bool vectorizeStoreChain(ArrayRef<Value *> Chain, + SmallPtrSet<Value *, 16> *InstructionsProcessed); + + /// Check if this load/store access is misaligned accesses + bool accessIsMisaligned(unsigned SzInBytes, unsigned AddressSpace, + unsigned Alignment); +}; + +class LoadStoreVectorizer : public FunctionPass { +public: + static char ID; + + LoadStoreVectorizer() : FunctionPass(ID) { + initializeLoadStoreVectorizerPass(*PassRegistry::getPassRegistry()); + } + + bool runOnFunction(Function &F) override; + + const char *getPassName() const override { + return "GPU Load and Store Vectorizer"; + } + + void getAnalysisUsage(AnalysisUsage &AU) const override { + AU.addRequired<AAResultsWrapperPass>(); + AU.addRequired<ScalarEvolutionWrapperPass>(); + AU.addRequired<DominatorTreeWrapperPass>(); + AU.addRequired<TargetTransformInfoWrapperPass>(); + AU.setPreservesCFG(); + } +}; +} + +INITIALIZE_PASS_BEGIN(LoadStoreVectorizer, DEBUG_TYPE, + "Vectorize load and Store instructions", false, false) +INITIALIZE_PASS_DEPENDENCY(SCEVAAWrapperPass) +INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) +INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) +INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) +INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) +INITIALIZE_PASS_END(LoadStoreVectorizer, DEBUG_TYPE, + "Vectorize load and store instructions", false, false) + +char LoadStoreVectorizer::ID = 0; + +Pass *llvm::createLoadStoreVectorizerPass() { + return new LoadStoreVectorizer(); +} + +bool LoadStoreVectorizer::runOnFunction(Function &F) { + // Don't vectorize when the attribute NoImplicitFloat is used. + if (skipFunction(F) || F.hasFnAttribute(Attribute::NoImplicitFloat)) + return false; + + AliasAnalysis &AA = getAnalysis<AAResultsWrapperPass>().getAAResults(); + DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); + ScalarEvolution &SE = getAnalysis<ScalarEvolutionWrapperPass>().getSE(); + TargetTransformInfo &TTI = + getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); + + Vectorizer V(F, AA, DT, SE, TTI); + return V.run(); +} + +// Vectorizer Implementation +bool Vectorizer::run() { + bool Changed = false; + + // Scan the blocks in the function in post order. + for (BasicBlock *BB : post_order(&F)) { + collectInstructions(BB); + Changed |= vectorizeChains(LoadRefs); + Changed |= vectorizeChains(StoreRefs); + } + + return Changed; +} + +Value *Vectorizer::getPointerOperand(Value *I) { + if (LoadInst *LI = dyn_cast<LoadInst>(I)) + return LI->getPointerOperand(); + if (StoreInst *SI = dyn_cast<StoreInst>(I)) + return SI->getPointerOperand(); + return nullptr; +} + +unsigned Vectorizer::getPointerAddressSpace(Value *I) { + if (LoadInst *L = dyn_cast<LoadInst>(I)) + return L->getPointerAddressSpace(); + if (StoreInst *S = dyn_cast<StoreInst>(I)) + return S->getPointerAddressSpace(); + return -1; +} + +// FIXME: Merge with llvm::isConsecutiveAccess +bool Vectorizer::isConsecutiveAccess(Value *A, Value *B) { + Value *PtrA = getPointerOperand(A); + Value *PtrB = getPointerOperand(B); + unsigned ASA = getPointerAddressSpace(A); + unsigned ASB = getPointerAddressSpace(B); + + // Check that the address spaces match and that the pointers are valid. + if (!PtrA || !PtrB || (ASA != ASB)) + return false; + + // Make sure that A and B are different pointers of the same size type. + unsigned PtrBitWidth = DL.getPointerSizeInBits(ASA); + Type *PtrATy = PtrA->getType()->getPointerElementType(); + Type *PtrBTy = PtrB->getType()->getPointerElementType(); + if (PtrA == PtrB || + DL.getTypeStoreSize(PtrATy) != DL.getTypeStoreSize(PtrBTy) || + DL.getTypeStoreSize(PtrATy->getScalarType()) != + DL.getTypeStoreSize(PtrBTy->getScalarType())) + return false; + + APInt Size(PtrBitWidth, DL.getTypeStoreSize(PtrATy)); + + APInt OffsetA(PtrBitWidth, 0), OffsetB(PtrBitWidth, 0); + PtrA = PtrA->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetA); + PtrB = PtrB->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetB); + + APInt OffsetDelta = OffsetB - OffsetA; + + // Check if they are based on the same pointer. That makes the offsets + // sufficient. + if (PtrA == PtrB) + return OffsetDelta == Size; + + // Compute the necessary base pointer delta to have the necessary final delta + // equal to the size. + APInt BaseDelta = Size - OffsetDelta; + + // Compute the distance with SCEV between the base pointers. + const SCEV *PtrSCEVA = SE.getSCEV(PtrA); + const SCEV *PtrSCEVB = SE.getSCEV(PtrB); + const SCEV *C = SE.getConstant(BaseDelta); + const SCEV *X = SE.getAddExpr(PtrSCEVA, C); + if (X == PtrSCEVB) + return true; + + // Sometimes even this doesn't work, because SCEV can't always see through + // patterns that look like (gep (ext (add (shl X, C1), C2))). Try checking + // things the hard way. + + // Look through GEPs after checking they're the same except for the last + // index. + GetElementPtrInst *GEPA = dyn_cast<GetElementPtrInst>(getPointerOperand(A)); + GetElementPtrInst *GEPB = dyn_cast<GetElementPtrInst>(getPointerOperand(B)); + if (!GEPA || !GEPB || GEPA->getNumOperands() != GEPB->getNumOperands()) + return false; + unsigned FinalIndex = GEPA->getNumOperands() - 1; + for (unsigned i = 0; i < FinalIndex; i++) + if (GEPA->getOperand(i) != GEPB->getOperand(i)) + return false; + + Instruction *OpA = dyn_cast<Instruction>(GEPA->getOperand(FinalIndex)); + Instruction *OpB = dyn_cast<Instruction>(GEPB->getOperand(FinalIndex)); + if (!OpA || !OpB || OpA->getOpcode() != OpB->getOpcode() || + OpA->getType() != OpB->getType()) + return false; + + // Only look through a ZExt/SExt. + if (!isa<SExtInst>(OpA) && !isa<ZExtInst>(OpA)) + return false; + + bool Signed = isa<SExtInst>(OpA); + + OpA = dyn_cast<Instruction>(OpA->getOperand(0)); + OpB = dyn_cast<Instruction>(OpB->getOperand(0)); + if (!OpA || !OpB || OpA->getType() != OpB->getType()) + return false; + + // Now we need to prove that adding 1 to OpA won't overflow. + bool Safe = false; + // First attempt: if OpB is an add with NSW/NUW, and OpB is 1 added to OpA, + // we're okay. + if (OpB->getOpcode() == Instruction::Add && + isa<ConstantInt>(OpB->getOperand(1)) && + cast<ConstantInt>(OpB->getOperand(1))->getSExtValue() > 0) { + if (Signed) + Safe = cast<BinaryOperator>(OpB)->hasNoSignedWrap(); + else + Safe = cast<BinaryOperator>(OpB)->hasNoUnsignedWrap(); + } + + unsigned BitWidth = OpA->getType()->getScalarSizeInBits(); + + // Second attempt: + // If any bits are known to be zero other than the sign bit in OpA, we can + // add 1 to it while guaranteeing no overflow of any sort. + if (!Safe) { + APInt KnownZero(BitWidth, 0); + APInt KnownOne(BitWidth, 0); + computeKnownBits(OpA, KnownZero, KnownOne, DL, 0, nullptr, OpA, &DT); + KnownZero &= ~APInt::getHighBitsSet(BitWidth, 1); + if (KnownZero != 0) + Safe = true; + } + + if (!Safe) + return false; + + const SCEV *OffsetSCEVA = SE.getSCEV(OpA); + const SCEV *OffsetSCEVB = SE.getSCEV(OpB); + const SCEV *One = SE.getConstant(APInt(BitWidth, 1)); + const SCEV *X2 = SE.getAddExpr(OffsetSCEVA, One); + return X2 == OffsetSCEVB; +} + +void Vectorizer::reorder(Instruction *I) { + SmallPtrSet<Instruction *, 16> InstructionsToMove; + SmallVector<Instruction *, 16> Worklist; + + Worklist.push_back(I); + while (!Worklist.empty()) { + Instruction *IW = Worklist.pop_back_val(); + int NumOperands = IW->getNumOperands(); + for (int i = 0; i < NumOperands; i++) { + Instruction *IM = dyn_cast<Instruction>(IW->getOperand(i)); + if (!IM || IM->getOpcode() == Instruction::PHI) + continue; + + if (!DT.dominates(IM, I)) { + InstructionsToMove.insert(IM); + Worklist.push_back(IM); + assert(IM->getParent() == IW->getParent() && + "Instructions to move should be in the same basic block"); + } + } + } + + // All instructions to move should follow I. Start from I, not from begin(). + for (auto BBI = I->getIterator(), E = I->getParent()->end(); BBI != E; + ++BBI) { + if (!is_contained(InstructionsToMove, &*BBI)) + continue; + Instruction *IM = &*BBI; + --BBI; + IM->removeFromParent(); + IM->insertBefore(I); + } +} + +std::pair<BasicBlock::iterator, BasicBlock::iterator> +Vectorizer::getBoundaryInstrs(ArrayRef<Value *> Chain) { + Instruction *C0 = cast<Instruction>(Chain[0]); + BasicBlock::iterator FirstInstr = C0->getIterator(); + BasicBlock::iterator LastInstr = C0->getIterator(); + + BasicBlock *BB = C0->getParent(); + unsigned NumFound = 0; + for (Instruction &I : *BB) { + if (!is_contained(Chain, &I)) + continue; + + ++NumFound; + if (NumFound == 1) { + FirstInstr = I.getIterator(); + } + if (NumFound == Chain.size()) { + LastInstr = I.getIterator(); + break; + } + } + + // Range is [first, last). + return std::make_pair(FirstInstr, ++LastInstr); +} + +void Vectorizer::eraseInstructions(ArrayRef<Value *> Chain) { + SmallVector<Instruction *, 16> Instrs; + for (Value *V : Chain) { + Value *PtrOperand = getPointerOperand(V); + assert(PtrOperand && "Instruction must have a pointer operand."); + Instrs.push_back(cast<Instruction>(V)); + if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(PtrOperand)) + Instrs.push_back(GEP); + } + + // Erase instructions. + for (Value *V : Instrs) { + Instruction *Instr = cast<Instruction>(V); + if (Instr->use_empty()) + Instr->eraseFromParent(); + } +} + +std::pair<ArrayRef<Value *>, ArrayRef<Value *>> +Vectorizer::splitOddVectorElts(ArrayRef<Value *> Chain, + unsigned ElementSizeBits) { + unsigned ElemSizeInBytes = ElementSizeBits / 8; + unsigned SizeInBytes = ElemSizeInBytes * Chain.size(); + unsigned NumRight = (SizeInBytes % 4) / ElemSizeInBytes; + unsigned NumLeft = Chain.size() - NumRight; + return std::make_pair(Chain.slice(0, NumLeft), Chain.slice(NumLeft)); +} + +unsigned Vectorizer::getVectorizablePrefixEndIdx(ArrayRef<Value *> Chain, + BasicBlock::iterator From, + BasicBlock::iterator To) { + SmallVector<std::pair<Value *, unsigned>, 16> MemoryInstrs; + SmallVector<std::pair<Value *, unsigned>, 16> ChainInstrs; + + unsigned InstrIdx = 0; + for (auto I = From; I != To; ++I, ++InstrIdx) { + if (isa<LoadInst>(I) || isa<StoreInst>(I)) { + if (!is_contained(Chain, &*I)) + MemoryInstrs.push_back({&*I, InstrIdx}); + else + ChainInstrs.push_back({&*I, InstrIdx}); + } else if (I->mayHaveSideEffects()) { + DEBUG(dbgs() << "LSV: Found side-effecting operation: " << *I << '\n'); + return 0; + } + } + + assert(Chain.size() == ChainInstrs.size() && + "All instructions in the Chain must exist in [From, To)."); + + unsigned ChainIdx = 0; + for (auto EntryChain : ChainInstrs) { + Value *ChainInstrValue = EntryChain.first; + unsigned ChainInstrIdx = EntryChain.second; + for (auto EntryMem : MemoryInstrs) { + Value *MemInstrValue = EntryMem.first; + unsigned MemInstrIdx = EntryMem.second; + if (isa<LoadInst>(MemInstrValue) && isa<LoadInst>(ChainInstrValue)) + continue; + + // We can ignore the alias as long as the load comes before the store, + // because that means we won't be moving the load past the store to + // vectorize it (the vectorized load is inserted at the location of the + // first load in the chain). + if (isa<StoreInst>(MemInstrValue) && isa<LoadInst>(ChainInstrValue) && + ChainInstrIdx < MemInstrIdx) + continue; + + // Same case, but in reverse. + if (isa<LoadInst>(MemInstrValue) && isa<StoreInst>(ChainInstrValue) && + ChainInstrIdx > MemInstrIdx) + continue; + + Instruction *M0 = cast<Instruction>(MemInstrValue); + Instruction *M1 = cast<Instruction>(ChainInstrValue); + + if (!AA.isNoAlias(MemoryLocation::get(M0), MemoryLocation::get(M1))) { + DEBUG({ + Value *Ptr0 = getPointerOperand(M0); + Value *Ptr1 = getPointerOperand(M1); + + dbgs() << "LSV: Found alias.\n" + " Aliasing instruction and pointer:\n" + << *MemInstrValue << " aliases " << *Ptr0 << '\n' + << " Aliased instruction and pointer:\n" + << *ChainInstrValue << " aliases " << *Ptr1 << '\n'; + }); + + return ChainIdx; + } + } + ChainIdx++; + } + return Chain.size(); +} + +void Vectorizer::collectInstructions(BasicBlock *BB) { + LoadRefs.clear(); + StoreRefs.clear(); + + for (Instruction &I : *BB) { + if (!I.mayReadOrWriteMemory()) + continue; + + if (LoadInst *LI = dyn_cast<LoadInst>(&I)) { + if (!LI->isSimple()) + continue; + + Type *Ty = LI->getType(); + if (!VectorType::isValidElementType(Ty->getScalarType())) + continue; + + // Skip weird non-byte sizes. They probably aren't worth the effort of + // handling correctly. + unsigned TySize = DL.getTypeSizeInBits(Ty); + if (TySize < 8) + continue; + + Value *Ptr = LI->getPointerOperand(); + unsigned AS = Ptr->getType()->getPointerAddressSpace(); + unsigned VecRegSize = TTI.getLoadStoreVecRegBitWidth(AS); + + // No point in looking at these if they're too big to vectorize. + if (TySize > VecRegSize / 2) + continue; + + // Make sure all the users of a vector are constant-index extracts. + if (isa<VectorType>(Ty) && !all_of(LI->users(), [LI](const User *U) { + const Instruction *UI = cast<Instruction>(U); + return isa<ExtractElementInst>(UI) && + isa<ConstantInt>(UI->getOperand(1)); + })) + continue; + + // TODO: Target hook to filter types. + + // Save the load locations. + Value *ObjPtr = GetUnderlyingObject(Ptr, DL); + LoadRefs[ObjPtr].push_back(LI); + + } else if (StoreInst *SI = dyn_cast<StoreInst>(&I)) { + if (!SI->isSimple()) + continue; + + Type *Ty = SI->getValueOperand()->getType(); + if (!VectorType::isValidElementType(Ty->getScalarType())) + continue; + + // Skip weird non-byte sizes. They probably aren't worth the effort of + // handling correctly. + unsigned TySize = DL.getTypeSizeInBits(Ty); + if (TySize < 8) + continue; + + Value *Ptr = SI->getPointerOperand(); + unsigned AS = Ptr->getType()->getPointerAddressSpace(); + unsigned VecRegSize = TTI.getLoadStoreVecRegBitWidth(AS); + if (TySize > VecRegSize / 2) + continue; + + if (isa<VectorType>(Ty) && !all_of(SI->users(), [SI](const User *U) { + const Instruction *UI = cast<Instruction>(U); + return isa<ExtractElementInst>(UI) && + isa<ConstantInt>(UI->getOperand(1)); + })) + continue; + + // Save store location. + Value *ObjPtr = GetUnderlyingObject(Ptr, DL); + StoreRefs[ObjPtr].push_back(SI); + } + } +} + +bool Vectorizer::vectorizeChains(ValueListMap &Map) { + bool Changed = false; + + for (const std::pair<Value *, ValueList> &Chain : Map) { + unsigned Size = Chain.second.size(); + if (Size < 2) + continue; + + DEBUG(dbgs() << "LSV: Analyzing a chain of length " << Size << ".\n"); + + // Process the stores in chunks of 64. + for (unsigned CI = 0, CE = Size; CI < CE; CI += 64) { + unsigned Len = std::min<unsigned>(CE - CI, 64); + ArrayRef<Value *> Chunk(&Chain.second[CI], Len); + Changed |= vectorizeInstructions(Chunk); + } + } + + return Changed; +} + +bool Vectorizer::vectorizeInstructions(ArrayRef<Value *> Instrs) { + DEBUG(dbgs() << "LSV: Vectorizing " << Instrs.size() << " instructions.\n"); + SmallSetVector<int, 16> Heads, Tails; + int ConsecutiveChain[64]; + + // Do a quadratic search on all of the given stores and find all of the pairs + // of stores that follow each other. + for (int i = 0, e = Instrs.size(); i < e; ++i) { + ConsecutiveChain[i] = -1; + for (int j = e - 1; j >= 0; --j) { + if (i == j) + continue; + + if (isConsecutiveAccess(Instrs[i], Instrs[j])) { + if (ConsecutiveChain[i] != -1) { + int CurDistance = std::abs(ConsecutiveChain[i] - i); + int NewDistance = std::abs(ConsecutiveChain[i] - j); + if (j < i || NewDistance > CurDistance) + continue; // Should not insert. + } + + Tails.insert(j); + Heads.insert(i); + ConsecutiveChain[i] = j; + } + } + } + + bool Changed = false; + SmallPtrSet<Value *, 16> InstructionsProcessed; + + for (int Head : Heads) { + if (InstructionsProcessed.count(Instrs[Head])) + continue; + bool longerChainExists = false; + for (unsigned TIt = 0; TIt < Tails.size(); TIt++) + if (Head == Tails[TIt] && + !InstructionsProcessed.count(Instrs[Heads[TIt]])) { + longerChainExists = true; + break; + } + if (longerChainExists) + continue; + + // We found an instr that starts a chain. Now follow the chain and try to + // vectorize it. + SmallVector<Value *, 16> Operands; + int I = Head; + while (I != -1 && (Tails.count(I) || Heads.count(I))) { + if (InstructionsProcessed.count(Instrs[I])) + break; + + Operands.push_back(Instrs[I]); + I = ConsecutiveChain[I]; + } + + bool Vectorized = false; + if (isa<LoadInst>(*Operands.begin())) + Vectorized = vectorizeLoadChain(Operands, &InstructionsProcessed); + else + Vectorized = vectorizeStoreChain(Operands, &InstructionsProcessed); + + Changed |= Vectorized; + } + + return Changed; +} + +bool Vectorizer::vectorizeStoreChain( + ArrayRef<Value *> Chain, SmallPtrSet<Value *, 16> *InstructionsProcessed) { + StoreInst *S0 = cast<StoreInst>(Chain[0]); + + // If the vector has an int element, default to int for the whole load. + Type *StoreTy; + for (const auto &V : Chain) { + StoreTy = cast<StoreInst>(V)->getValueOperand()->getType(); + if (StoreTy->isIntOrIntVectorTy()) + break; + + if (StoreTy->isPtrOrPtrVectorTy()) { + StoreTy = Type::getIntNTy(F.getParent()->getContext(), + DL.getTypeSizeInBits(StoreTy)); + break; + } + } + + unsigned Sz = DL.getTypeSizeInBits(StoreTy); + unsigned AS = S0->getPointerAddressSpace(); + unsigned VecRegSize = TTI.getLoadStoreVecRegBitWidth(AS); + unsigned VF = VecRegSize / Sz; + unsigned ChainSize = Chain.size(); + + if (!isPowerOf2_32(Sz) || VF < 2 || ChainSize < 2) { + InstructionsProcessed->insert(Chain.begin(), Chain.end()); + return false; + } + + BasicBlock::iterator First, Last; + std::tie(First, Last) = getBoundaryInstrs(Chain); + unsigned StopChain = getVectorizablePrefixEndIdx(Chain, First, Last); + if (StopChain == 0) { + // There exists a side effect instruction, no vectorization possible. + InstructionsProcessed->insert(Chain.begin(), Chain.end()); + return false; + } + if (StopChain == 1) { + // Failed after the first instruction. Discard it and try the smaller chain. + InstructionsProcessed->insert(Chain.front()); + return false; + } + + // Update Chain to the valid vectorizable subchain. + Chain = Chain.slice(0, StopChain); + ChainSize = Chain.size(); + + // Store size should be 1B, 2B or multiple of 4B. + // TODO: Target hook for size constraint? + unsigned SzInBytes = (Sz / 8) * ChainSize; + if (SzInBytes > 2 && SzInBytes % 4 != 0) { + DEBUG(dbgs() << "LSV: Size should be 1B, 2B " + "or multiple of 4B. Splitting.\n"); + if (SzInBytes == 3) + return vectorizeStoreChain(Chain.slice(0, ChainSize - 1), + InstructionsProcessed); + + auto Chains = splitOddVectorElts(Chain, Sz); + return vectorizeStoreChain(Chains.first, InstructionsProcessed) | + vectorizeStoreChain(Chains.second, InstructionsProcessed); + } + + VectorType *VecTy; + VectorType *VecStoreTy = dyn_cast<VectorType>(StoreTy); + if (VecStoreTy) + VecTy = VectorType::get(StoreTy->getScalarType(), + Chain.size() * VecStoreTy->getNumElements()); + else + VecTy = VectorType::get(StoreTy, Chain.size()); + + // If it's more than the max vector size, break it into two pieces. + // TODO: Target hook to control types to split to. + if (ChainSize > VF) { + DEBUG(dbgs() << "LSV: Vector factor is too big." + " Creating two separate arrays.\n"); + return vectorizeStoreChain(Chain.slice(0, VF), InstructionsProcessed) | + vectorizeStoreChain(Chain.slice(VF), InstructionsProcessed); + } + + DEBUG({ + dbgs() << "LSV: Stores to vectorize:\n"; + for (Value *V : Chain) + V->dump(); + }); + + // We won't try again to vectorize the elements of the chain, regardless of + // whether we succeed below. + InstructionsProcessed->insert(Chain.begin(), Chain.end()); + + // Check alignment restrictions. + unsigned Alignment = getAlignment(S0); + + // If the store is going to be misaligned, don't vectorize it. + if (accessIsMisaligned(SzInBytes, AS, Alignment)) { + if (S0->getPointerAddressSpace() != 0) + return false; + + // If we're storing to an object on the stack, we control its alignment, + // so we can cheat and change it! + Value *V = GetUnderlyingObject(S0->getPointerOperand(), DL); + if (AllocaInst *AI = dyn_cast_or_null<AllocaInst>(V)) { + AI->setAlignment(TargetBaseAlign); + Alignment = TargetBaseAlign; + } else { + return false; + } + } + + // Set insert point. + Builder.SetInsertPoint(&*Last); + + Value *Vec = UndefValue::get(VecTy); + + if (VecStoreTy) { + unsigned VecWidth = VecStoreTy->getNumElements(); + for (unsigned I = 0, E = Chain.size(); I != E; ++I) { + StoreInst *Store = cast<StoreInst>(Chain[I]); + for (unsigned J = 0, NE = VecStoreTy->getNumElements(); J != NE; ++J) { + unsigned NewIdx = J + I * VecWidth; + Value *Extract = Builder.CreateExtractElement(Store->getValueOperand(), + Builder.getInt32(J)); + if (Extract->getType() != StoreTy->getScalarType()) + Extract = Builder.CreateBitCast(Extract, StoreTy->getScalarType()); + + Value *Insert = + Builder.CreateInsertElement(Vec, Extract, Builder.getInt32(NewIdx)); + Vec = Insert; + } + } + } else { + for (unsigned I = 0, E = Chain.size(); I != E; ++I) { + StoreInst *Store = cast<StoreInst>(Chain[I]); + Value *Extract = Store->getValueOperand(); + if (Extract->getType() != StoreTy->getScalarType()) + Extract = + Builder.CreateBitOrPointerCast(Extract, StoreTy->getScalarType()); + + Value *Insert = + Builder.CreateInsertElement(Vec, Extract, Builder.getInt32(I)); + Vec = Insert; + } + } + + Value *Bitcast = + Builder.CreateBitCast(S0->getPointerOperand(), VecTy->getPointerTo(AS)); + StoreInst *SI = cast<StoreInst>(Builder.CreateStore(Vec, Bitcast)); + propagateMetadata(SI, Chain); + SI->setAlignment(Alignment); + + eraseInstructions(Chain); + ++NumVectorInstructions; + NumScalarsVectorized += Chain.size(); + return true; +} + +bool Vectorizer::vectorizeLoadChain( + ArrayRef<Value *> Chain, SmallPtrSet<Value *, 16> *InstructionsProcessed) { + LoadInst *L0 = cast<LoadInst>(Chain[0]); + + // If the vector has an int element, default to int for the whole load. + Type *LoadTy; + for (const auto &V : Chain) { + LoadTy = cast<LoadInst>(V)->getType(); + if (LoadTy->isIntOrIntVectorTy()) + break; + + if (LoadTy->isPtrOrPtrVectorTy()) { + LoadTy = Type::getIntNTy(F.getParent()->getContext(), + DL.getTypeSizeInBits(LoadTy)); + break; + } + } + + unsigned Sz = DL.getTypeSizeInBits(LoadTy); + unsigned AS = L0->getPointerAddressSpace(); + unsigned VecRegSize = TTI.getLoadStoreVecRegBitWidth(AS); + unsigned VF = VecRegSize / Sz; + unsigned ChainSize = Chain.size(); + + if (!isPowerOf2_32(Sz) || VF < 2 || ChainSize < 2) { + InstructionsProcessed->insert(Chain.begin(), Chain.end()); + return false; + } + + BasicBlock::iterator First, Last; + std::tie(First, Last) = getBoundaryInstrs(Chain); + unsigned StopChain = getVectorizablePrefixEndIdx(Chain, First, Last); + if (StopChain == 0) { + // There exists a side effect instruction, no vectorization possible. + InstructionsProcessed->insert(Chain.begin(), Chain.end()); + return false; + } + if (StopChain == 1) { + // Failed after the first instruction. Discard it and try the smaller chain. + InstructionsProcessed->insert(Chain.front()); + return false; + } + + // Update Chain to the valid vectorizable subchain. + Chain = Chain.slice(0, StopChain); + ChainSize = Chain.size(); + + // Load size should be 1B, 2B or multiple of 4B. + // TODO: Should size constraint be a target hook? + unsigned SzInBytes = (Sz / 8) * ChainSize; + if (SzInBytes > 2 && SzInBytes % 4 != 0) { + DEBUG(dbgs() << "LSV: Size should be 1B, 2B " + "or multiple of 4B. Splitting.\n"); + if (SzInBytes == 3) + return vectorizeLoadChain(Chain.slice(0, ChainSize - 1), + InstructionsProcessed); + auto Chains = splitOddVectorElts(Chain, Sz); + return vectorizeLoadChain(Chains.first, InstructionsProcessed) | + vectorizeLoadChain(Chains.second, InstructionsProcessed); + } + + VectorType *VecTy; + VectorType *VecLoadTy = dyn_cast<VectorType>(LoadTy); + if (VecLoadTy) + VecTy = VectorType::get(LoadTy->getScalarType(), + Chain.size() * VecLoadTy->getNumElements()); + else + VecTy = VectorType::get(LoadTy, Chain.size()); + + // If it's more than the max vector size, break it into two pieces. + // TODO: Target hook to control types to split to. + if (ChainSize > VF) { + DEBUG(dbgs() << "LSV: Vector factor is too big. " + "Creating two separate arrays.\n"); + return vectorizeLoadChain(Chain.slice(0, VF), InstructionsProcessed) | + vectorizeLoadChain(Chain.slice(VF), InstructionsProcessed); + } + + // We won't try again to vectorize the elements of the chain, regardless of + // whether we succeed below. + InstructionsProcessed->insert(Chain.begin(), Chain.end()); + + // Check alignment restrictions. + unsigned Alignment = getAlignment(L0); + + // If the load is going to be misaligned, don't vectorize it. + if (accessIsMisaligned(SzInBytes, AS, Alignment)) { + if (L0->getPointerAddressSpace() != 0) + return false; + + // If we're loading from an object on the stack, we control its alignment, + // so we can cheat and change it! + Value *V = GetUnderlyingObject(L0->getPointerOperand(), DL); + if (AllocaInst *AI = dyn_cast_or_null<AllocaInst>(V)) { + AI->setAlignment(TargetBaseAlign); + Alignment = TargetBaseAlign; + } else { + return false; + } + } + + DEBUG({ + dbgs() << "LSV: Loads to vectorize:\n"; + for (Value *V : Chain) + V->dump(); + }); + + // Set insert point. + Builder.SetInsertPoint(&*First); + + Value *Bitcast = + Builder.CreateBitCast(L0->getPointerOperand(), VecTy->getPointerTo(AS)); + + LoadInst *LI = cast<LoadInst>(Builder.CreateLoad(Bitcast)); + propagateMetadata(LI, Chain); + LI->setAlignment(Alignment); + + if (VecLoadTy) { + SmallVector<Instruction *, 16> InstrsToErase; + SmallVector<Instruction *, 16> InstrsToReorder; + InstrsToReorder.push_back(cast<Instruction>(Bitcast)); + + unsigned VecWidth = VecLoadTy->getNumElements(); + for (unsigned I = 0, E = Chain.size(); I != E; ++I) { + for (auto Use : Chain[I]->users()) { + Instruction *UI = cast<Instruction>(Use); + unsigned Idx = cast<ConstantInt>(UI->getOperand(1))->getZExtValue(); + unsigned NewIdx = Idx + I * VecWidth; + Value *V = Builder.CreateExtractElement(LI, Builder.getInt32(NewIdx)); + Instruction *Extracted = cast<Instruction>(V); + if (Extracted->getType() != UI->getType()) + Extracted = cast<Instruction>( + Builder.CreateBitCast(Extracted, UI->getType())); + + // Replace the old instruction. + UI->replaceAllUsesWith(Extracted); + InstrsToErase.push_back(UI); + } + } + + for (Instruction *ModUser : InstrsToReorder) + reorder(ModUser); + + for (auto I : InstrsToErase) + I->eraseFromParent(); + } else { + SmallVector<Instruction *, 16> InstrsToReorder; + InstrsToReorder.push_back(cast<Instruction>(Bitcast)); + + for (unsigned I = 0, E = Chain.size(); I != E; ++I) { + Value *V = Builder.CreateExtractElement(LI, Builder.getInt32(I)); + Instruction *Extracted = cast<Instruction>(V); + Instruction *UI = cast<Instruction>(Chain[I]); + if (Extracted->getType() != UI->getType()) { + Extracted = cast<Instruction>( + Builder.CreateBitOrPointerCast(Extracted, UI->getType())); + } + + // Replace the old instruction. + UI->replaceAllUsesWith(Extracted); + } + + for (Instruction *ModUser : InstrsToReorder) + reorder(ModUser); + } + + eraseInstructions(Chain); + + ++NumVectorInstructions; + NumScalarsVectorized += Chain.size(); + return true; +} + +bool Vectorizer::accessIsMisaligned(unsigned SzInBytes, unsigned AddressSpace, + unsigned Alignment) { + bool Fast = false; + bool Allows = TTI.allowsMisalignedMemoryAccesses(SzInBytes * 8, AddressSpace, + Alignment, &Fast); + // TODO: Remove TargetBaseAlign + return !(Allows && Fast) && (Alignment % SzInBytes) != 0 && + (Alignment % TargetBaseAlign) != 0; +} diff --git a/lib/Transforms/Vectorize/LoopVectorize.cpp b/lib/Transforms/Vectorize/LoopVectorize.cpp index 17c25dfffc108..8b85e320d3b20 100644 --- a/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -46,7 +46,7 @@ // //===----------------------------------------------------------------------===// -#include "llvm/Transforms/Vectorize.h" +#include "llvm/Transforms/Vectorize/LoopVectorize.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/Hashing.h" #include "llvm/ADT/MapVector.h" @@ -56,23 +56,15 @@ #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" #include "llvm/ADT/StringExtras.h" -#include "llvm/Analysis/AliasAnalysis.h" -#include "llvm/Analysis/BasicAliasAnalysis.h" -#include "llvm/Analysis/AliasSetTracker.h" -#include "llvm/Analysis/AssumptionCache.h" -#include "llvm/Analysis/BlockFrequencyInfo.h" #include "llvm/Analysis/CodeMetrics.h" -#include "llvm/Analysis/DemandedBits.h" #include "llvm/Analysis/GlobalsModRef.h" -#include "llvm/Analysis/LoopAccessAnalysis.h" #include "llvm/Analysis/LoopInfo.h" #include "llvm/Analysis/LoopIterator.h" #include "llvm/Analysis/LoopPass.h" -#include "llvm/Analysis/ScalarEvolution.h" #include "llvm/Analysis/ScalarEvolutionExpander.h" #include "llvm/Analysis/ScalarEvolutionExpressions.h" -#include "llvm/Analysis/TargetTransformInfo.h" #include "llvm/Analysis/ValueTracking.h" +#include "llvm/Analysis/VectorUtils.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/DebugInfo.h" @@ -98,10 +90,10 @@ #include "llvm/Transforms/Scalar.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/Local.h" -#include "llvm/Analysis/VectorUtils.h" #include "llvm/Transforms/Utils/LoopUtils.h" +#include "llvm/Transforms/Utils/LoopVersioning.h" +#include "llvm/Transforms/Vectorize.h" #include <algorithm> -#include <functional> #include <map> #include <tuple> @@ -115,37 +107,21 @@ STATISTIC(LoopsVectorized, "Number of loops vectorized"); STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization"); static cl::opt<bool> -EnableIfConversion("enable-if-conversion", cl::init(true), cl::Hidden, - cl::desc("Enable if-conversion during vectorization.")); + EnableIfConversion("enable-if-conversion", cl::init(true), cl::Hidden, + cl::desc("Enable if-conversion during vectorization.")); /// We don't vectorize loops with a known constant trip count below this number. -static cl::opt<unsigned> -TinyTripCountVectorThreshold("vectorizer-min-trip-count", cl::init(16), - cl::Hidden, - cl::desc("Don't vectorize loops with a constant " - "trip count that is smaller than this " - "value.")); +static cl::opt<unsigned> TinyTripCountVectorThreshold( + "vectorizer-min-trip-count", cl::init(16), cl::Hidden, + cl::desc("Don't vectorize loops with a constant " + "trip count that is smaller than this " + "value.")); static cl::opt<bool> MaximizeBandwidth( "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, cl::desc("Maximize bandwidth when selecting vectorization factor which " "will be determined by the smallest type in loop.")); -/// This enables versioning on the strides of symbolically striding memory -/// accesses in code like the following. -/// for (i = 0; i < N; ++i) -/// A[i * Stride1] += B[i * Stride2] ... -/// -/// Will be roughly translated to -/// if (Stride1 == 1 && Stride2 == 1) { -/// for (i = 0; i < N; i+=4) -/// A[i:i+3] += ... -/// } else -/// ... -static cl::opt<bool> EnableMemAccessVersioning( - "enable-mem-access-versioning", cl::init(true), cl::Hidden, - cl::desc("Enable symbolic stride memory access versioning")); - static cl::opt<bool> EnableInterleavedMemAccesses( "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, cl::desc("Enable vectorization on interleaved memory accesses in a loop")); @@ -262,7 +238,7 @@ public: /// A helper function for converting Scalar types to vector types. /// If the incoming type is void, we return void. If the VF is 1, we return /// the scalar type. -static Type* ToVectorTy(Type *Scalar, unsigned VF) { +static Type *ToVectorTy(Type *Scalar, unsigned VF) { if (Scalar->isVoidTy() || VF == 1) return Scalar; return VectorType::get(Scalar, VF); @@ -313,21 +289,25 @@ public: InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetLibraryInfo *TLI, - const TargetTransformInfo *TTI, unsigned VecWidth, - unsigned UnrollFactor) + const TargetTransformInfo *TTI, AssumptionCache *AC, + unsigned VecWidth, unsigned UnrollFactor) : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), - VF(VecWidth), UF(UnrollFactor), Builder(PSE.getSE()->getContext()), - Induction(nullptr), OldInduction(nullptr), WidenMap(UnrollFactor), - TripCount(nullptr), VectorTripCount(nullptr), Legal(nullptr), - AddedSafetyChecks(false) {} + AC(AC), VF(VecWidth), UF(UnrollFactor), + Builder(PSE.getSE()->getContext()), Induction(nullptr), + OldInduction(nullptr), WidenMap(UnrollFactor), TripCount(nullptr), + VectorTripCount(nullptr), Legal(nullptr), AddedSafetyChecks(false) {} // Perform the actual loop widening (vectorization). // MinimumBitWidths maps scalar integer values to the smallest bitwidth they // can be validly truncated to. The cost model has assumed this truncation - // will happen when vectorizing. + // will happen when vectorizing. VecValuesToIgnore contains scalar values + // that the cost model has chosen to ignore because they will not be + // vectorized. void vectorize(LoopVectorizationLegality *L, - MapVector<Instruction*,uint64_t> MinimumBitWidths) { - MinBWs = MinimumBitWidths; + const MapVector<Instruction *, uint64_t> &MinimumBitWidths, + SmallPtrSetImpl<const Value *> &VecValuesToIgnore) { + MinBWs = &MinimumBitWidths; + ValuesNotWidened = &VecValuesToIgnore; Legal = L; // Create a new empty loop. Unlink the old loop and connect the new one. createEmptyLoop(); @@ -337,33 +317,41 @@ public: } // Return true if any runtime check is added. - bool IsSafetyChecksAdded() { - return AddedSafetyChecks; - } + bool areSafetyChecksAdded() { return AddedSafetyChecks; } virtual ~InnerLoopVectorizer() {} protected: /// A small list of PHINodes. - typedef SmallVector<PHINode*, 4> PhiVector; + typedef SmallVector<PHINode *, 4> PhiVector; /// When we unroll loops we have multiple vector values for each scalar. /// This data structure holds the unrolled and vectorized values that /// originated from one scalar instruction. - typedef SmallVector<Value*, 2> VectorParts; + typedef SmallVector<Value *, 2> VectorParts; // When we if-convert we need to create edge masks. We have to cache values // so that we don't end up with exponential recursion/IR. - typedef DenseMap<std::pair<BasicBlock*, BasicBlock*>, - VectorParts> EdgeMaskCache; + typedef DenseMap<std::pair<BasicBlock *, BasicBlock *>, VectorParts> + EdgeMaskCache; /// Create an empty loop, based on the loop ranges of the old loop. void createEmptyLoop(); + + /// Set up the values of the IVs correctly when exiting the vector loop. + void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, + Value *CountRoundDown, Value *EndValue, + BasicBlock *MiddleBlock); + /// Create a new induction variable inside L. PHINode *createInductionVariable(Loop *L, Value *Start, Value *End, Value *Step, Instruction *DL); /// Copy and widen the instructions from the old loop. virtual void vectorizeLoop(); + /// Fix a first-order recurrence. This is the second phase of vectorizing + /// this phi node. + void fixFirstOrderRecurrence(PHINode *Phi); + /// \brief The Loop exit block may have single value PHI nodes where the /// incoming value is 'Undef'. While vectorizing we only handled real values /// that were defined inside the loop. Here we fix the 'undef case'. @@ -372,7 +360,7 @@ protected: /// Shrinks vector element sizes based on information in "MinBWs". void truncateToMinimalBitwidths(); - + /// A helper function that computes the predicate of the block BB, assuming /// that the header block of the loop is set to True. It returns the *entry* /// mask for the block BB. @@ -383,12 +371,12 @@ protected: /// A helper function to vectorize a single BB within the innermost loop. void vectorizeBlockInLoop(BasicBlock *BB, PhiVector *PV); - + /// Vectorize a single PHINode in a block. This method handles the induction /// variable canonicalization. It supports both VF = 1 for unrolled loops and /// arbitrary length vectors. - void widenPHIInstruction(Instruction *PN, VectorParts &Entry, - unsigned UF, unsigned VF, PhiVector *PV); + void widenPHIInstruction(Instruction *PN, VectorParts &Entry, unsigned UF, + unsigned VF, PhiVector *PV); /// Insert the new loop to the loop hierarchy and pass manager /// and update the analysis passes. @@ -399,7 +387,7 @@ protected: /// scalarized instruction behind an if block predicated on the control /// dependence of the instruction. virtual void scalarizeInstruction(Instruction *Instr, - bool IfPredicateStore=false); + bool IfPredicateStore = false); /// Vectorize Load and Store instructions, virtual void vectorizeMemoryInstruction(Instruction *Instr); @@ -415,6 +403,26 @@ protected: /// to each vector element of Val. The sequence starts at StartIndex. virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step); + /// Compute scalar induction steps. \p ScalarIV is the scalar induction + /// variable on which to base the steps, \p Step is the size of the step, and + /// \p EntryVal is the value from the original loop that maps to the steps. + /// Note that \p EntryVal doesn't have to be an induction variable (e.g., it + /// can be a truncate instruction). + void buildScalarSteps(Value *ScalarIV, Value *Step, Value *EntryVal); + + /// Create a vector induction phi node based on an existing scalar one. This + /// currently only works for integer induction variables with a constant + /// step. If \p TruncType is non-null, instead of widening the original IV, + /// we widen a version of the IV truncated to \p TruncType. + void createVectorIntInductionPHI(const InductionDescriptor &II, + VectorParts &Entry, IntegerType *TruncType); + + /// Widen an integer induction variable \p IV. If \p Trunc is provided, the + /// induction variable will first be truncated to the corresponding type. The + /// widened values are placed in \p Entry. + void widenIntInduction(PHINode *IV, VectorParts &Entry, + TruncInst *Trunc = nullptr); + /// When we go over instructions in the basic block we rely on previous /// values within the current basic block or on loop invariant values. /// When we widen (vectorize) values we place them in the map. If the values @@ -445,6 +453,24 @@ protected: /// Emit bypass checks to check any memory assumptions we may have made. void emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass); + /// Add additional metadata to \p To that was not present on \p Orig. + /// + /// Currently this is used to add the noalias annotations based on the + /// inserted memchecks. Use this for instructions that are *cloned* into the + /// vector loop. + void addNewMetadata(Instruction *To, const Instruction *Orig); + + /// Add metadata from one instruction to another. + /// + /// This includes both the original MDs from \p From and additional ones (\see + /// addNewMetadata). Use this for *newly created* instructions in the vector + /// loop. + void addMetadata(Instruction *To, Instruction *From); + + /// \brief Similar to the previous function but it adds the metadata to a + /// vector of instructions. + void addMetadata(ArrayRef<Value *> To, Instruction *From); + /// This is a helper class that holds the vectorizer state. It maps scalar /// instructions to vector instructions. When the code is 'unrolled' then /// then a single scalar value is mapped to multiple vector parts. The parts @@ -501,6 +527,15 @@ protected: const TargetLibraryInfo *TLI; /// Target Transform Info. const TargetTransformInfo *TTI; + /// Assumption Cache. + AssumptionCache *AC; + + /// \brief LoopVersioning. It's only set up (non-null) if memchecks were + /// used. + /// + /// This is currently only used to add no-alias metadata based on the + /// memchecks. The actually versioning is performed manually. + std::unique_ptr<LoopVersioning> LVer; /// The vectorization SIMD factor to use. Each vector will have this many /// vector elements. @@ -522,11 +557,11 @@ protected: BasicBlock *LoopScalarPreHeader; /// Middle Block between the vector and the scalar. BasicBlock *LoopMiddleBlock; - ///The ExitBlock of the scalar loop. + /// The ExitBlock of the scalar loop. BasicBlock *LoopExitBlock; - ///The vector loop body. - SmallVector<BasicBlock *, 4> LoopVectorBody; - ///The scalar loop body. + /// The vector loop body. + BasicBlock *LoopVectorBody; + /// The scalar loop body. BasicBlock *LoopScalarBody; /// A list of all bypass blocks. The first block is the entry of the loop. SmallVector<BasicBlock *, 4> LoopBypassBlocks; @@ -537,9 +572,20 @@ protected: PHINode *OldInduction; /// Maps scalars to widened vectors. ValueMap WidenMap; + + /// A map of induction variables from the original loop to their + /// corresponding VF * UF scalarized values in the vectorized loop. The + /// purpose of ScalarIVMap is similar to that of WidenMap. Whereas WidenMap + /// maps original loop values to their vector versions in the new loop, + /// ScalarIVMap maps induction variables from the original loop that are not + /// vectorized to their scalar equivalents in the vector loop. Maintaining a + /// separate map for scalarized induction variables allows us to avoid + /// unnecessary scalar-to-vector-to-scalar conversions. + DenseMap<Value *, SmallVector<Value *, 8>> ScalarIVMap; + /// Store instructions that should be predicated, as a pair /// <StoreInst, Predicate> - SmallVector<std::pair<StoreInst*,Value*>, 4> PredicatedStores; + SmallVector<std::pair<StoreInst *, Value *>, 4> PredicatedStores; EdgeMaskCache MaskCache; /// Trip count of the original loop. Value *TripCount; @@ -549,10 +595,15 @@ protected: /// Map of scalar integer values to the smallest bitwidth they can be legally /// represented as. The vector equivalents of these values should be truncated /// to this type. - MapVector<Instruction*,uint64_t> MinBWs; + const MapVector<Instruction *, uint64_t> *MinBWs; + + /// A set of values that should not be widened. This is taken from + /// VecValuesToIgnore in the cost model. + SmallPtrSetImpl<const Value *> *ValuesNotWidened; + LoopVectorizationLegality *Legal; - // Record whether runtime check is added. + // Record whether runtime checks are added. bool AddedSafetyChecks; }; @@ -561,8 +612,10 @@ public: InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetLibraryInfo *TLI, - const TargetTransformInfo *TTI, unsigned UnrollFactor) - : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, 1, UnrollFactor) {} + const TargetTransformInfo *TTI, AssumptionCache *AC, + unsigned UnrollFactor) + : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, 1, + UnrollFactor) {} private: void scalarizeInstruction(Instruction *Instr, @@ -618,36 +671,26 @@ static std::string getDebugLocString(const Loop *L) { } #endif -/// \brief Propagate known metadata from one instruction to another. -static void propagateMetadata(Instruction *To, const Instruction *From) { - SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata; - From->getAllMetadataOtherThanDebugLoc(Metadata); - - for (auto M : Metadata) { - unsigned Kind = M.first; - - // These are safe to transfer (this is safe for TBAA, even when we - // if-convert, because should that metadata have had a control dependency - // on the condition, and thus actually aliased with some other - // non-speculated memory access when the condition was false, this would be - // caught by the runtime overlap checks). - if (Kind != LLVMContext::MD_tbaa && - Kind != LLVMContext::MD_alias_scope && - Kind != LLVMContext::MD_noalias && - Kind != LLVMContext::MD_fpmath && - Kind != LLVMContext::MD_nontemporal) - continue; +void InnerLoopVectorizer::addNewMetadata(Instruction *To, + const Instruction *Orig) { + // If the loop was versioned with memchecks, add the corresponding no-alias + // metadata. + if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig))) + LVer->annotateInstWithNoAlias(To, Orig); +} - To->setMetadata(Kind, M.second); - } +void InnerLoopVectorizer::addMetadata(Instruction *To, + Instruction *From) { + propagateMetadata(To, From); + addNewMetadata(To, From); } -/// \brief Propagate known metadata from one instruction to a vector of others. -static void propagateMetadata(SmallVectorImpl<Value *> &To, - const Instruction *From) { - for (Value *V : To) +void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To, + Instruction *From) { + for (Value *V : To) { if (Instruction *I = dyn_cast<Instruction>(V)) - propagateMetadata(I, From); + addMetadata(I, From); + } } /// \brief The group of interleaved loads/stores sharing the same stride and @@ -785,8 +828,9 @@ private: class InterleavedAccessInfo { public: InterleavedAccessInfo(PredicatedScalarEvolution &PSE, Loop *L, - DominatorTree *DT) - : PSE(PSE), TheLoop(L), DT(DT) {} + DominatorTree *DT, LoopInfo *LI) + : PSE(PSE), TheLoop(L), DT(DT), LI(LI), LAI(nullptr), + RequiresScalarEpilogue(false) {} ~InterleavedAccessInfo() { SmallSet<InterleaveGroup *, 4> DelSet; @@ -806,6 +850,14 @@ public: return InterleaveGroupMap.count(Instr); } + /// \brief Return the maximum interleave factor of all interleaved groups. + unsigned getMaxInterleaveFactor() const { + unsigned MaxFactor = 1; + for (auto &Entry : InterleaveGroupMap) + MaxFactor = std::max(MaxFactor, Entry.second->getFactor()); + return MaxFactor; + } + /// \brief Get the interleave group that \p Instr belongs to. /// /// \returns nullptr if doesn't have such group. @@ -815,6 +867,13 @@ public: return nullptr; } + /// \brief Returns true if an interleaved group that may access memory + /// out-of-bounds requires a scalar epilogue iteration for correctness. + bool requiresScalarEpilogue() const { return RequiresScalarEpilogue; } + + /// \brief Initialize the LoopAccessInfo used for dependence checking. + void setLAI(const LoopAccessInfo *Info) { LAI = Info; } + private: /// A wrapper around ScalarEvolution, used to add runtime SCEV checks. /// Simplifies SCEV expressions in the context of existing SCEV assumptions. @@ -823,24 +882,39 @@ private: PredicatedScalarEvolution &PSE; Loop *TheLoop; DominatorTree *DT; + LoopInfo *LI; + const LoopAccessInfo *LAI; + + /// True if the loop may contain non-reversed interleaved groups with + /// out-of-bounds accesses. We ensure we don't speculatively access memory + /// out-of-bounds by executing at least one scalar epilogue iteration. + bool RequiresScalarEpilogue; /// Holds the relationships between the members and the interleave group. DenseMap<Instruction *, InterleaveGroup *> InterleaveGroupMap; + /// Holds dependences among the memory accesses in the loop. It maps a source + /// access to a set of dependent sink accesses. + DenseMap<Instruction *, SmallPtrSet<Instruction *, 2>> Dependences; + /// \brief The descriptor for a strided memory access. struct StrideDescriptor { - StrideDescriptor(int Stride, const SCEV *Scev, unsigned Size, + StrideDescriptor(int64_t Stride, const SCEV *Scev, uint64_t Size, unsigned Align) : Stride(Stride), Scev(Scev), Size(Size), Align(Align) {} - StrideDescriptor() : Stride(0), Scev(nullptr), Size(0), Align(0) {} + StrideDescriptor() = default; - int Stride; // The access's stride. It is negative for a reverse access. - const SCEV *Scev; // The scalar expression of this access - unsigned Size; // The size of the memory object. - unsigned Align; // The alignment of this access. + // The access's stride. It is negative for a reverse access. + int64_t Stride = 0; + const SCEV *Scev = nullptr; // The scalar expression of this access + uint64_t Size = 0; // The size of the memory object. + unsigned Align = 0; // The alignment of this access. }; + /// \brief A type for holding instructions and their stride descriptors. + typedef std::pair<Instruction *, StrideDescriptor> StrideEntry; + /// \brief Create a new interleave group with the given instruction \p Instr, /// stride \p Stride and alignment \p Align. /// @@ -863,9 +937,86 @@ private: } /// \brief Collect all the accesses with a constant stride in program order. - void collectConstStridedAccesses( - MapVector<Instruction *, StrideDescriptor> &StrideAccesses, + void collectConstStrideAccesses( + MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo, const ValueToValueMap &Strides); + + /// \brief Returns true if \p Stride is allowed in an interleaved group. + static bool isStrided(int Stride) { + unsigned Factor = std::abs(Stride); + return Factor >= 2 && Factor <= MaxInterleaveGroupFactor; + } + + /// \brief Returns true if \p BB is a predicated block. + bool isPredicated(BasicBlock *BB) const { + return LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT); + } + + /// \brief Returns true if LoopAccessInfo can be used for dependence queries. + bool areDependencesValid() const { + return LAI && LAI->getDepChecker().getDependences(); + } + + /// \brief Returns true if memory accesses \p A and \p B can be reordered, if + /// necessary, when constructing interleaved groups. + /// + /// \p A must precede \p B in program order. We return false if reordering is + /// not necessary or is prevented because \p A and \p B may be dependent. + bool canReorderMemAccessesForInterleavedGroups(StrideEntry *A, + StrideEntry *B) const { + + // Code motion for interleaved accesses can potentially hoist strided loads + // and sink strided stores. The code below checks the legality of the + // following two conditions: + // + // 1. Potentially moving a strided load (B) before any store (A) that + // precedes B, or + // + // 2. Potentially moving a strided store (A) after any load or store (B) + // that A precedes. + // + // It's legal to reorder A and B if we know there isn't a dependence from A + // to B. Note that this determination is conservative since some + // dependences could potentially be reordered safely. + + // A is potentially the source of a dependence. + auto *Src = A->first; + auto SrcDes = A->second; + + // B is potentially the sink of a dependence. + auto *Sink = B->first; + auto SinkDes = B->second; + + // Code motion for interleaved accesses can't violate WAR dependences. + // Thus, reordering is legal if the source isn't a write. + if (!Src->mayWriteToMemory()) + return true; + + // At least one of the accesses must be strided. + if (!isStrided(SrcDes.Stride) && !isStrided(SinkDes.Stride)) + return true; + + // If dependence information is not available from LoopAccessInfo, + // conservatively assume the instructions can't be reordered. + if (!areDependencesValid()) + return false; + + // If we know there is a dependence from source to sink, assume the + // instructions can't be reordered. Otherwise, reordering is legal. + return !Dependences.count(Src) || !Dependences.lookup(Src).count(Sink); + } + + /// \brief Collect the dependences from LoopAccessInfo. + /// + /// We process the dependences once during the interleaved access analysis to + /// enable constant-time dependence queries. + void collectDependences() { + if (!areDependencesValid()) + return; + auto *Deps = LAI->getDepChecker().getDependences(); + for (auto Dep : *Deps) + Dependences[Dep.getSource(*LAI)].insert(Dep.getDestination(*LAI)); + } }; /// Utility class for getting and setting loop vectorizer hints in the form @@ -878,20 +1029,16 @@ private: /// for example 'force', means a decision has been made. So, we need to be /// careful NOT to add them if the user hasn't specifically asked so. class LoopVectorizeHints { - enum HintKind { - HK_WIDTH, - HK_UNROLL, - HK_FORCE - }; + enum HintKind { HK_WIDTH, HK_UNROLL, HK_FORCE }; /// Hint - associates name and validation with the hint value. struct Hint { - const char * Name; + const char *Name; unsigned Value; // This may have to change for non-numeric values. HintKind Kind; - Hint(const char * Name, unsigned Value, HintKind Kind) - : Name(Name), Value(Value), Kind(Kind) { } + Hint(const char *Name, unsigned Value, HintKind Kind) + : Name(Name), Value(Value), Kind(Kind) {} bool validate(unsigned Val) { switch (Kind) { @@ -916,6 +1063,9 @@ class LoopVectorizeHints { /// Return the loop metadata prefix. static StringRef Prefix() { return "llvm.loop."; } + /// True if there is any unsafe math in the loop. + bool PotentiallyUnsafe; + public: enum ForceKind { FK_Undefined = -1, ///< Not selected. @@ -928,7 +1078,7 @@ public: HK_WIDTH), Interleave("interleave.count", DisableInterleaving, HK_UNROLL), Force("vectorize.enable", FK_Undefined, HK_FORCE), - TheLoop(L) { + PotentiallyUnsafe(false), TheLoop(L) { // Populate values with existing loop metadata. getHintsFromMetadata(); @@ -1005,16 +1155,17 @@ public: unsigned getWidth() const { return Width.Value; } unsigned getInterleave() const { return Interleave.Value; } enum ForceKind getForce() const { return (ForceKind)Force.Value; } + + /// \brief If hints are provided that force vectorization, use the AlwaysPrint + /// pass name to force the frontend to print the diagnostic. const char *vectorizeAnalysisPassName() const { - // If hints are provided that don't disable vectorization use the - // AlwaysPrint pass name to force the frontend to print the diagnostic. if (getWidth() == 1) return LV_NAME; if (getForce() == LoopVectorizeHints::FK_Disabled) return LV_NAME; if (getForce() == LoopVectorizeHints::FK_Undefined && getWidth() == 0) return LV_NAME; - return DiagnosticInfo::AlwaysPrint; + return DiagnosticInfoOptimizationRemarkAnalysis::AlwaysPrint; } bool allowReordering() const { @@ -1026,6 +1177,17 @@ public: return getForce() == LoopVectorizeHints::FK_Enabled || getWidth() > 1; } + bool isPotentiallyUnsafe() const { + // Avoid FP vectorization if the target is unsure about proper support. + // This may be related to the SIMD unit in the target not handling + // IEEE 754 FP ops properly, or bad single-to-double promotions. + // Otherwise, a sequence of vectorized loops, even without reduction, + // could lead to different end results on the destination vectors. + return getForce() != LoopVectorizeHints::FK_Enabled && PotentiallyUnsafe; + } + + void setPotentiallyUnsafe() { PotentiallyUnsafe = true; } + private: /// Find hints specified in the loop metadata and update local values. void getHintsFromMetadata() { @@ -1071,7 +1233,8 @@ private: Name = Name.substr(Prefix().size(), StringRef::npos); const ConstantInt *C = mdconst::dyn_extract<ConstantInt>(Arg); - if (!C) return; + if (!C) + return; unsigned Val = C->getZExtValue(); Hint *Hints[] = {&Width, &Interleave, &Force}; @@ -1097,7 +1260,7 @@ private: /// Matches metadata with hint name. bool matchesHintMetadataName(MDNode *Node, ArrayRef<Hint> HintTypes) { - MDString* Name = dyn_cast<MDString>(Node->getOperand(0)); + MDString *Name = dyn_cast<MDString>(Node->getOperand(0)); if (!Name) return false; @@ -1181,17 +1344,17 @@ static void emitMissedWarning(Function *F, Loop *L, /// induction variable and the different reduction variables. class LoopVectorizationLegality { public: - LoopVectorizationLegality(Loop *L, PredicatedScalarEvolution &PSE, - DominatorTree *DT, TargetLibraryInfo *TLI, - AliasAnalysis *AA, Function *F, - const TargetTransformInfo *TTI, - LoopAccessAnalysis *LAA, - LoopVectorizationRequirements *R, - const LoopVectorizeHints *H) + LoopVectorizationLegality( + Loop *L, PredicatedScalarEvolution &PSE, DominatorTree *DT, + TargetLibraryInfo *TLI, AliasAnalysis *AA, Function *F, + const TargetTransformInfo *TTI, + std::function<const LoopAccessInfo &(Loop &)> *GetLAA, LoopInfo *LI, + LoopVectorizationRequirements *R, LoopVectorizeHints *H) : NumPredStores(0), TheLoop(L), PSE(PSE), TLI(TLI), TheFunction(F), - TTI(TTI), DT(DT), LAA(LAA), LAI(nullptr), InterleaveInfo(PSE, L, DT), - Induction(nullptr), WidestIndTy(nullptr), HasFunNoNaNAttr(false), - Requirements(R), Hints(H) {} + TTI(TTI), DT(DT), GetLAA(GetLAA), LAI(nullptr), + InterleaveInfo(PSE, L, DT, LI), Induction(nullptr), + WidestIndTy(nullptr), HasFunNoNaNAttr(false), Requirements(R), + Hints(H) {} /// ReductionList contains the reduction descriptors for all /// of the reductions that were found in the loop. @@ -1199,7 +1362,11 @@ public: /// InductionList saves induction variables and maps them to the /// induction descriptor. - typedef MapVector<PHINode*, InductionDescriptor> InductionList; + typedef MapVector<PHINode *, InductionDescriptor> InductionList; + + /// RecurrenceSet contains the phi nodes that are recurrences other than + /// inductions and reductions. + typedef SmallPtrSet<const PHINode *, 8> RecurrenceSet; /// Returns true if it is legal to vectorize this loop. /// This does not mean that it is profitable to vectorize this @@ -1215,6 +1382,9 @@ public: /// Returns the induction variables found in the loop. InductionList *getInductionVars() { return &Inductions; } + /// Return the first-order recurrences found in the loop. + RecurrenceSet *getFirstOrderRecurrences() { return &FirstOrderRecurrences; } + /// Returns the widest induction type. Type *getWidestInductionType() { return WidestIndTy; } @@ -1224,11 +1394,14 @@ public: /// Returns True if PN is a reduction variable in this loop. bool isReductionVariable(PHINode *PN) { return Reductions.count(PN); } + /// Returns True if Phi is a first-order recurrence in this loop. + bool isFirstOrderRecurrence(const PHINode *Phi); + /// Return true if the block BB needs to be predicated in order for the loop /// to be vectorized. bool blockNeedsPredication(BasicBlock *BB); - /// Check if this pointer is consecutive when vectorizing. This happens + /// Check if this pointer is consecutive when vectorizing. This happens /// when the last index of the GEP is the induction variable, or that the /// pointer itself is an induction variable. /// This check allows us to vectorize A[idx] into a wide load/store. @@ -1242,35 +1415,39 @@ public: bool isUniform(Value *V); /// Returns true if this instruction will remain scalar after vectorization. - bool isUniformAfterVectorization(Instruction* I) { return Uniforms.count(I); } + bool isUniformAfterVectorization(Instruction *I) { return Uniforms.count(I); } /// Returns the information that we collected about runtime memory check. const RuntimePointerChecking *getRuntimePointerChecking() const { return LAI->getRuntimePointerChecking(); } - const LoopAccessInfo *getLAI() const { - return LAI; - } + const LoopAccessInfo *getLAI() const { return LAI; } /// \brief Check if \p Instr belongs to any interleaved access group. bool isAccessInterleaved(Instruction *Instr) { return InterleaveInfo.isInterleaved(Instr); } + /// \brief Return the maximum interleave factor of all interleaved groups. + unsigned getMaxInterleaveFactor() const { + return InterleaveInfo.getMaxInterleaveFactor(); + } + /// \brief Get the interleaved access group that \p Instr belongs to. const InterleaveGroup *getInterleavedAccessGroup(Instruction *Instr) { return InterleaveInfo.getInterleaveGroup(Instr); } + /// \brief Returns true if an interleaved group requires a scalar iteration + /// to handle accesses with gaps. + bool requiresScalarEpilogue() const { + return InterleaveInfo.requiresScalarEpilogue(); + } + unsigned getMaxSafeDepDistBytes() { return LAI->getMaxSafeDepDistBytes(); } - bool hasStride(Value *V) { return StrideSet.count(V); } - bool mustCheckStrides() { return !StrideSet.empty(); } - SmallPtrSet<Value *, 8>::iterator strides_begin() { - return StrideSet.begin(); - } - SmallPtrSet<Value *, 8>::iterator strides_end() { return StrideSet.end(); } + bool hasStride(Value *V) { return LAI->hasStride(V); } /// Returns true if the target machine supports masked store operation /// for the given \p DataType and kind of access to \p Ptr. @@ -1282,20 +1459,24 @@ public: bool isLegalMaskedLoad(Type *DataType, Value *Ptr) { return isConsecutivePtr(Ptr) && TTI->isLegalMaskedLoad(DataType); } - /// Returns true if vector representation of the instruction \p I - /// requires mask. - bool isMaskRequired(const Instruction* I) { - return (MaskedOp.count(I) != 0); - } - unsigned getNumStores() const { - return LAI->getNumStores(); + /// Returns true if the target machine supports masked scatter operation + /// for the given \p DataType. + bool isLegalMaskedScatter(Type *DataType) { + return TTI->isLegalMaskedScatter(DataType); } - unsigned getNumLoads() const { - return LAI->getNumLoads(); - } - unsigned getNumPredStores() const { - return NumPredStores; + /// Returns true if the target machine supports masked gather operation + /// for the given \p DataType. + bool isLegalMaskedGather(Type *DataType) { + return TTI->isLegalMaskedGather(DataType); } + + /// Returns true if vector representation of the instruction \p I + /// requires mask. + bool isMaskRequired(const Instruction *I) { return (MaskedOp.count(I) != 0); } + unsigned getNumStores() const { return LAI->getNumStores(); } + unsigned getNumLoads() const { return LAI->getNumLoads(); } + unsigned getNumPredStores() const { return NumPredStores; } + private: /// Check if a single basic block loop is vectorizable. /// At this point we know that this is a loop with a constant trip count @@ -1320,11 +1501,11 @@ private: /// and we know that we can read from them without segfault. bool blockCanBePredicated(BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs); - /// \brief Collect memory access with loop invariant strides. - /// - /// Looks for accesses like "a[i * StrideA]" where "StrideA" is loop - /// invariant. - void collectStridedAccess(Value *LoadOrStoreInst); + /// Updates the vectorization state by adding \p Phi to the inductions list. + /// This can set \p Phi as the main induction of the loop if \p Phi is a + /// better choice for the main induction than the existing one. + void addInductionPhi(PHINode *Phi, const InductionDescriptor &ID, + SmallPtrSetImpl<Value *> &AllowedExit); /// Report an analysis message to assist the user in diagnosing loops that are /// not vectorized. These are handled as LoopAccessReport rather than @@ -1334,6 +1515,16 @@ private: emitAnalysisDiag(TheFunction, TheLoop, *Hints, Message); } + /// \brief If an access has a symbolic strides, this maps the pointer value to + /// the stride symbol. + const ValueToValueMap *getSymbolicStrides() { + // FIXME: Currently, the set of symbolic strides is sometimes queried before + // it's collected. This happens from canVectorizeWithIfConvert, when the + // pointer is checked to reference consecutive elements suitable for a + // masked access. + return LAI ? &LAI->getSymbolicStrides() : nullptr; + } + unsigned NumPredStores; /// The loop that we evaluate. @@ -1353,7 +1544,7 @@ private: /// Dominator Tree. DominatorTree *DT; // LoopAccess analysis. - LoopAccessAnalysis *LAA; + std::function<const LoopAccessInfo &(Loop &)> *GetLAA; // And the loop-accesses info corresponding to this loop. This pointer is // null until canVectorizeMemory sets it up. const LoopAccessInfo *LAI; @@ -1373,15 +1564,17 @@ private: /// Notice that inductions don't need to start at zero and that induction /// variables can be pointers. InductionList Inductions; + /// Holds the phi nodes that are first-order recurrences. + RecurrenceSet FirstOrderRecurrences; /// Holds the widest induction type encountered. Type *WidestIndTy; - /// Allowed outside users. This holds the reduction + /// Allowed outside users. This holds the induction and reduction /// vars which can be accessed from outside the loop. - SmallPtrSet<Value*, 4> AllowedExit; + SmallPtrSet<Value *, 4> AllowedExit; /// This set holds the variables which are known to be uniform after /// vectorization. - SmallPtrSet<Instruction*, 4> Uniforms; + SmallPtrSet<Instruction *, 4> Uniforms; /// Can we assume the absence of NaNs. bool HasFunNoNaNAttr; @@ -1390,10 +1583,7 @@ private: LoopVectorizationRequirements *Requirements; /// Used to emit an analysis of any legality issues. - const LoopVectorizeHints *Hints; - - ValueToValueMap Strides; - SmallPtrSet<Value *, 8> StrideSet; + LoopVectorizeHints *Hints; /// While vectorizing these instructions we have to generate a /// call to the appropriate masked intrinsic @@ -1409,20 +1599,19 @@ private: /// different operations. class LoopVectorizationCostModel { public: - LoopVectorizationCostModel(Loop *L, ScalarEvolution *SE, LoopInfo *LI, - LoopVectorizationLegality *Legal, + LoopVectorizationCostModel(Loop *L, PredicatedScalarEvolution &PSE, + LoopInfo *LI, LoopVectorizationLegality *Legal, const TargetTransformInfo &TTI, const TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, const Function *F, - const LoopVectorizeHints *Hints, - SmallPtrSetImpl<const Value *> &ValuesToIgnore) - : TheLoop(L), SE(SE), LI(LI), Legal(Legal), TTI(TTI), TLI(TLI), DB(DB), - TheFunction(F), Hints(Hints), ValuesToIgnore(ValuesToIgnore) {} + const LoopVectorizeHints *Hints) + : TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), TTI(TTI), TLI(TLI), DB(DB), + AC(AC), TheFunction(F), Hints(Hints) {} /// Information about vectorization costs struct VectorizationFactor { unsigned Width; // Vector width with best cost - unsigned Cost; // Cost of the loop with that width + unsigned Cost; // Cost of the loop with that width }; /// \return The most profitable vectorization factor and the cost of that VF. /// This method checks every power of two up to VF. If UserVF is not ZERO @@ -1462,19 +1651,34 @@ public: /// \return Returns information about the register usages of the loop for the /// given vectorization factors. - SmallVector<RegisterUsage, 8> - calculateRegisterUsage(const SmallVector<unsigned, 8> &VFs); + SmallVector<RegisterUsage, 8> calculateRegisterUsage(ArrayRef<unsigned> VFs); + + /// Collect values we want to ignore in the cost model. + void collectValuesToIgnore(); private: + /// The vectorization cost is a combination of the cost itself and a boolean + /// indicating whether any of the contributing operations will actually + /// operate on + /// vector values after type legalization in the backend. If this latter value + /// is + /// false, then all operations will be scalarized (i.e. no vectorization has + /// actually taken place). + typedef std::pair<unsigned, bool> VectorizationCostTy; + /// Returns the expected execution cost. The unit of the cost does /// not matter because we use the 'cost' units to compare different /// vector widths. The cost that is returned is *not* normalized by /// the factor width. - unsigned expectedCost(unsigned VF); + VectorizationCostTy expectedCost(unsigned VF); /// Returns the execution time cost of an instruction for a given vector /// width. Vector width of one means scalar. - unsigned getInstructionCost(Instruction *I, unsigned VF); + VectorizationCostTy getInstructionCost(Instruction *I, unsigned VF); + + /// The cost-computation logic from getInstructionCost which provides + /// the vector type as an output parameter. + unsigned getInstructionCost(Instruction *I, unsigned VF, Type *&VectorTy); /// Returns whether the instruction is a load or store and will be a emitted /// as a vector operation. @@ -1492,12 +1696,12 @@ public: /// Map of scalar integer values to the smallest bitwidth they can be legally /// represented as. The vector equivalents of these values should be truncated /// to this type. - MapVector<Instruction*,uint64_t> MinBWs; + MapVector<Instruction *, uint64_t> MinBWs; /// The loop that we evaluate. Loop *TheLoop; - /// Scev analysis. - ScalarEvolution *SE; + /// Predicated scalar evolution analysis. + PredicatedScalarEvolution &PSE; /// Loop Info analysis. LoopInfo *LI; /// Vectorization legality. @@ -1506,13 +1710,17 @@ public: const TargetTransformInfo &TTI; /// Target Library Info. const TargetLibraryInfo *TLI; - /// Demanded bits analysis + /// Demanded bits analysis. DemandedBits *DB; + /// Assumption cache. + AssumptionCache *AC; const Function *TheFunction; - // Loop Vectorize Hint. + /// Loop Vectorize Hint. const LoopVectorizeHints *Hints; - // Values to ignore in the cost model. - const SmallPtrSetImpl<const Value *> &ValuesToIgnore; + /// Values to ignore in the cost model. + SmallPtrSet<const Value *, 16> ValuesToIgnore; + /// Values to ignore in the cost model when VF > 1. + SmallPtrSet<const Value *, 16> VecValuesToIgnore; }; /// \brief This holds vectorization requirements that must be verified late in @@ -1588,328 +1796,35 @@ struct LoopVectorize : public FunctionPass { static char ID; explicit LoopVectorize(bool NoUnrolling = false, bool AlwaysVectorize = true) - : FunctionPass(ID), - DisableUnrolling(NoUnrolling), - AlwaysVectorize(AlwaysVectorize) { + : FunctionPass(ID) { + Impl.DisableUnrolling = NoUnrolling; + Impl.AlwaysVectorize = AlwaysVectorize; initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); } - ScalarEvolution *SE; - LoopInfo *LI; - TargetTransformInfo *TTI; - DominatorTree *DT; - BlockFrequencyInfo *BFI; - TargetLibraryInfo *TLI; - DemandedBits *DB; - AliasAnalysis *AA; - AssumptionCache *AC; - LoopAccessAnalysis *LAA; - bool DisableUnrolling; - bool AlwaysVectorize; - - BlockFrequency ColdEntryFreq; + LoopVectorizePass Impl; bool runOnFunction(Function &F) override { - SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); - LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); - TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); - DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); - BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); - auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); - TLI = TLIP ? &TLIP->getTLI() : nullptr; - AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); - AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); - LAA = &getAnalysis<LoopAccessAnalysis>(); - DB = &getAnalysis<DemandedBits>(); - - // Compute some weights outside of the loop over the loops. Compute this - // using a BranchProbability to re-use its scaling math. - const BranchProbability ColdProb(1, 5); // 20% - ColdEntryFreq = BlockFrequency(BFI->getEntryFreq()) * ColdProb; - - // Don't attempt if - // 1. the target claims to have no vector registers, and - // 2. interleaving won't help ILP. - // - // The second condition is necessary because, even if the target has no - // vector registers, loop vectorization may still enable scalar - // interleaving. - if (!TTI->getNumberOfRegisters(true) && TTI->getMaxInterleaveFactor(1) < 2) + if (skipFunction(F)) return false; - // Build up a worklist of inner-loops to vectorize. This is necessary as - // the act of vectorizing or partially unrolling a loop creates new loops - // and can invalidate iterators across the loops. - SmallVector<Loop *, 8> Worklist; - - for (Loop *L : *LI) - addInnerLoop(*L, Worklist); - - LoopsAnalyzed += Worklist.size(); - - // Now walk the identified inner loops. - bool Changed = false; - while (!Worklist.empty()) - Changed |= processLoop(Worklist.pop_back_val()); - - // Process each loop nest in the function. - return Changed; - } - - static void AddRuntimeUnrollDisableMetaData(Loop *L) { - SmallVector<Metadata *, 4> MDs; - // Reserve first location for self reference to the LoopID metadata node. - MDs.push_back(nullptr); - bool IsUnrollMetadata = false; - MDNode *LoopID = L->getLoopID(); - if (LoopID) { - // First find existing loop unrolling disable metadata. - for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { - MDNode *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); - if (MD) { - const MDString *S = dyn_cast<MDString>(MD->getOperand(0)); - IsUnrollMetadata = - S && S->getString().startswith("llvm.loop.unroll.disable"); - } - MDs.push_back(LoopID->getOperand(i)); - } - } - - if (!IsUnrollMetadata) { - // Add runtime unroll disable metadata. - LLVMContext &Context = L->getHeader()->getContext(); - SmallVector<Metadata *, 1> DisableOperands; - DisableOperands.push_back( - MDString::get(Context, "llvm.loop.unroll.runtime.disable")); - MDNode *DisableNode = MDNode::get(Context, DisableOperands); - MDs.push_back(DisableNode); - MDNode *NewLoopID = MDNode::get(Context, MDs); - // Set operand 0 to refer to the loop id itself. - NewLoopID->replaceOperandWith(0, NewLoopID); - L->setLoopID(NewLoopID); - } - } - - bool processLoop(Loop *L) { - assert(L->empty() && "Only process inner loops."); - -#ifndef NDEBUG - const std::string DebugLocStr = getDebugLocString(L); -#endif /* NDEBUG */ - - DEBUG(dbgs() << "\nLV: Checking a loop in \"" - << L->getHeader()->getParent()->getName() << "\" from " - << DebugLocStr << "\n"); - - LoopVectorizeHints Hints(L, DisableUnrolling); - - DEBUG(dbgs() << "LV: Loop hints:" - << " force=" - << (Hints.getForce() == LoopVectorizeHints::FK_Disabled - ? "disabled" - : (Hints.getForce() == LoopVectorizeHints::FK_Enabled - ? "enabled" - : "?")) << " width=" << Hints.getWidth() - << " unroll=" << Hints.getInterleave() << "\n"); - - // Function containing loop - Function *F = L->getHeader()->getParent(); - - // Looking at the diagnostic output is the only way to determine if a loop - // was vectorized (other than looking at the IR or machine code), so it - // is important to generate an optimization remark for each loop. Most of - // these messages are generated by emitOptimizationRemarkAnalysis. Remarks - // generated by emitOptimizationRemark and emitOptimizationRemarkMissed are - // less verbose reporting vectorized loops and unvectorized loops that may - // benefit from vectorization, respectively. - - if (!Hints.allowVectorization(F, L, AlwaysVectorize)) { - DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); - return false; - } - - // Check the loop for a trip count threshold: - // do not vectorize loops with a tiny trip count. - const unsigned TC = SE->getSmallConstantTripCount(L); - if (TC > 0u && TC < TinyTripCountVectorThreshold) { - DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " - << "This loop is not worth vectorizing."); - if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) - DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); - else { - DEBUG(dbgs() << "\n"); - emitAnalysisDiag(F, L, Hints, VectorizationReport() - << "vectorization is not beneficial " - "and is not explicitly forced"); - return false; - } - } - - PredicatedScalarEvolution PSE(*SE); - - // Check if it is legal to vectorize the loop. - LoopVectorizationRequirements Requirements; - LoopVectorizationLegality LVL(L, PSE, DT, TLI, AA, F, TTI, LAA, - &Requirements, &Hints); - if (!LVL.canVectorize()) { - DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); - emitMissedWarning(F, L, Hints); - return false; - } - - // Collect values we want to ignore in the cost model. This includes - // type-promoting instructions we identified during reduction detection. - SmallPtrSet<const Value *, 32> ValuesToIgnore; - CodeMetrics::collectEphemeralValues(L, AC, ValuesToIgnore); - for (auto &Reduction : *LVL.getReductionVars()) { - RecurrenceDescriptor &RedDes = Reduction.second; - SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); - ValuesToIgnore.insert(Casts.begin(), Casts.end()); - } - - // Use the cost model. - LoopVectorizationCostModel CM(L, PSE.getSE(), LI, &LVL, *TTI, TLI, DB, AC, - F, &Hints, ValuesToIgnore); - - // Check the function attributes to find out if this function should be - // optimized for size. - bool OptForSize = Hints.getForce() != LoopVectorizeHints::FK_Enabled && - F->optForSize(); - - // Compute the weighted frequency of this loop being executed and see if it - // is less than 20% of the function entry baseline frequency. Note that we - // always have a canonical loop here because we think we *can* vectorize. - // FIXME: This is hidden behind a flag due to pervasive problems with - // exactly what block frequency models. - if (LoopVectorizeWithBlockFrequency) { - BlockFrequency LoopEntryFreq = BFI->getBlockFreq(L->getLoopPreheader()); - if (Hints.getForce() != LoopVectorizeHints::FK_Enabled && - LoopEntryFreq < ColdEntryFreq) - OptForSize = true; - } - - // Check the function attributes to see if implicit floats are allowed. - // FIXME: This check doesn't seem possibly correct -- what if the loop is - // an integer loop and the vector instructions selected are purely integer - // vector instructions? - if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { - DEBUG(dbgs() << "LV: Can't vectorize when the NoImplicitFloat" - "attribute is used.\n"); - emitAnalysisDiag( - F, L, Hints, - VectorizationReport() - << "loop not vectorized due to NoImplicitFloat attribute"); - emitMissedWarning(F, L, Hints); - return false; - } - - // Select the optimal vectorization factor. - const LoopVectorizationCostModel::VectorizationFactor VF = - CM.selectVectorizationFactor(OptForSize); - - // Select the interleave count. - unsigned IC = CM.selectInterleaveCount(OptForSize, VF.Width, VF.Cost); - - // Get user interleave count. - unsigned UserIC = Hints.getInterleave(); - - // Identify the diagnostic messages that should be produced. - std::string VecDiagMsg, IntDiagMsg; - bool VectorizeLoop = true, InterleaveLoop = true; - - if (Requirements.doesNotMeet(F, L, Hints)) { - DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization " - "requirements.\n"); - emitMissedWarning(F, L, Hints); - return false; - } - - if (VF.Width == 1) { - DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); - VecDiagMsg = - "the cost-model indicates that vectorization is not beneficial"; - VectorizeLoop = false; - } - - if (IC == 1 && UserIC <= 1) { - // Tell the user interleaving is not beneficial. - DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); - IntDiagMsg = - "the cost-model indicates that interleaving is not beneficial"; - InterleaveLoop = false; - if (UserIC == 1) - IntDiagMsg += - " and is explicitly disabled or interleave count is set to 1"; - } else if (IC > 1 && UserIC == 1) { - // Tell the user interleaving is beneficial, but it explicitly disabled. - DEBUG(dbgs() - << "LV: Interleaving is beneficial but is explicitly disabled."); - IntDiagMsg = "the cost-model indicates that interleaving is beneficial " - "but is explicitly disabled or interleave count is set to 1"; - InterleaveLoop = false; - } - - // Override IC if user provided an interleave count. - IC = UserIC > 0 ? UserIC : IC; - - // Emit diagnostic messages, if any. - const char *VAPassName = Hints.vectorizeAnalysisPassName(); - if (!VectorizeLoop && !InterleaveLoop) { - // Do not vectorize or interleaving the loop. - emitOptimizationRemarkAnalysis(F->getContext(), VAPassName, *F, - L->getStartLoc(), VecDiagMsg); - emitOptimizationRemarkAnalysis(F->getContext(), LV_NAME, *F, - L->getStartLoc(), IntDiagMsg); - return false; - } else if (!VectorizeLoop && InterleaveLoop) { - DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); - emitOptimizationRemarkAnalysis(F->getContext(), VAPassName, *F, - L->getStartLoc(), VecDiagMsg); - } else if (VectorizeLoop && !InterleaveLoop) { - DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in " - << DebugLocStr << '\n'); - emitOptimizationRemarkAnalysis(F->getContext(), LV_NAME, *F, - L->getStartLoc(), IntDiagMsg); - } else if (VectorizeLoop && InterleaveLoop) { - DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in " - << DebugLocStr << '\n'); - DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); - } - - if (!VectorizeLoop) { - assert(IC > 1 && "interleave count should not be 1 or 0"); - // If we decided that it is not legal to vectorize the loop then - // interleave it. - InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, IC); - Unroller.vectorize(&LVL, CM.MinBWs); - - emitOptimizationRemark(F->getContext(), LV_NAME, *F, L->getStartLoc(), - Twine("interleaved loop (interleaved count: ") + - Twine(IC) + ")"); - } else { - // If we decided that it is *legal* to vectorize the loop then do it. - InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, VF.Width, IC); - LB.vectorize(&LVL, CM.MinBWs); - ++LoopsVectorized; - - // Add metadata to disable runtime unrolling scalar loop when there's no - // runtime check about strides and memory. Because at this situation, - // scalar loop is rarely used not worthy to be unrolled. - if (!LB.IsSafetyChecksAdded()) - AddRuntimeUnrollDisableMetaData(L); - - // Report the vectorization decision. - emitOptimizationRemark(F->getContext(), LV_NAME, *F, L->getStartLoc(), - Twine("vectorized loop (vectorization width: ") + - Twine(VF.Width) + ", interleaved count: " + - Twine(IC) + ")"); - } + auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); + auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); + auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); + auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); + auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); + auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); + auto *TLI = TLIP ? &TLIP->getTLI() : nullptr; + auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); + auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); + auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>(); + auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); - // Mark the loop as already vectorized to avoid vectorizing again. - Hints.setAlreadyVectorized(); + std::function<const LoopAccessInfo &(Loop &)> GetLAA = + [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); }; - DEBUG(verifyFunction(*L->getHeader()->getParent())); - return true; + return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC, + GetLAA); } void getAnalysisUsage(AnalysisUsage &AU) const override { @@ -1922,15 +1837,13 @@ struct LoopVectorize : public FunctionPass { AU.addRequired<ScalarEvolutionWrapperPass>(); AU.addRequired<TargetTransformInfoWrapperPass>(); AU.addRequired<AAResultsWrapperPass>(); - AU.addRequired<LoopAccessAnalysis>(); - AU.addRequired<DemandedBits>(); + AU.addRequired<LoopAccessLegacyAnalysis>(); + AU.addRequired<DemandedBitsWrapperPass>(); AU.addPreserved<LoopInfoWrapperPass>(); AU.addPreserved<DominatorTreeWrapperPass>(); AU.addPreserved<BasicAAWrapperPass>(); - AU.addPreserved<AAResultsWrapperPass>(); AU.addPreserved<GlobalsAAWrapperPass>(); } - }; } // end anonymous namespace @@ -1943,9 +1856,7 @@ struct LoopVectorize : public FunctionPass { Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { // We need to place the broadcast of invariant variables outside the loop. Instruction *Instr = dyn_cast<Instruction>(V); - bool NewInstr = - (Instr && std::find(LoopVectorBody.begin(), LoopVectorBody.end(), - Instr->getParent()) != LoopVectorBody.end()); + bool NewInstr = (Instr && Instr->getParent() == LoopVectorBody); bool Invariant = OrigLoop->isLoopInvariant(V) && !NewInstr; // Place the code for broadcasting invariant variables in the new preheader. @@ -1959,6 +1870,111 @@ Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { return Shuf; } +void InnerLoopVectorizer::createVectorIntInductionPHI( + const InductionDescriptor &II, VectorParts &Entry, IntegerType *TruncType) { + Value *Start = II.getStartValue(); + ConstantInt *Step = II.getConstIntStepValue(); + assert(Step && "Can not widen an IV with a non-constant step"); + + // Construct the initial value of the vector IV in the vector loop preheader + auto CurrIP = Builder.saveIP(); + Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); + if (TruncType) { + Step = ConstantInt::getSigned(TruncType, Step->getSExtValue()); + Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType); + } + Value *SplatStart = Builder.CreateVectorSplat(VF, Start); + Value *SteppedStart = getStepVector(SplatStart, 0, Step); + Builder.restoreIP(CurrIP); + + Value *SplatVF = + ConstantVector::getSplat(VF, ConstantInt::getSigned(Start->getType(), + VF * Step->getSExtValue())); + // We may need to add the step a number of times, depending on the unroll + // factor. The last of those goes into the PHI. + PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind", + &*LoopVectorBody->getFirstInsertionPt()); + Value *LastInduction = VecInd; + for (unsigned Part = 0; Part < UF; ++Part) { + Entry[Part] = LastInduction; + LastInduction = Builder.CreateAdd(LastInduction, SplatVF, "step.add"); + } + + VecInd->addIncoming(SteppedStart, LoopVectorPreHeader); + VecInd->addIncoming(LastInduction, LoopVectorBody); +} + +void InnerLoopVectorizer::widenIntInduction(PHINode *IV, VectorParts &Entry, + TruncInst *Trunc) { + + auto II = Legal->getInductionVars()->find(IV); + assert(II != Legal->getInductionVars()->end() && "IV is not an induction"); + + auto ID = II->second; + assert(IV->getType() == ID.getStartValue()->getType() && "Types must match"); + + // If a truncate instruction was provided, get the smaller type. + auto *TruncType = Trunc ? cast<IntegerType>(Trunc->getType()) : nullptr; + + // The step of the induction. + Value *Step = nullptr; + + // If the induction variable has a constant integer step value, go ahead and + // get it now. + if (ID.getConstIntStepValue()) + Step = ID.getConstIntStepValue(); + + // Try to create a new independent vector induction variable. If we can't + // create the phi node, we will splat the scalar induction variable in each + // loop iteration. + if (VF > 1 && IV->getType() == Induction->getType() && Step && + !ValuesNotWidened->count(IV)) + return createVectorIntInductionPHI(ID, Entry, TruncType); + + // The scalar value to broadcast. This will be derived from the canonical + // induction variable. + Value *ScalarIV = nullptr; + + // Define the scalar induction variable and step values. If we were given a + // truncation type, truncate the canonical induction variable and constant + // step. Otherwise, derive these values from the induction descriptor. + if (TruncType) { + assert(Step && "Truncation requires constant integer step"); + auto StepInt = cast<ConstantInt>(Step)->getSExtValue(); + ScalarIV = Builder.CreateCast(Instruction::Trunc, Induction, TruncType); + Step = ConstantInt::getSigned(TruncType, StepInt); + } else { + ScalarIV = Induction; + auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); + if (IV != OldInduction) { + ScalarIV = Builder.CreateSExtOrTrunc(ScalarIV, IV->getType()); + ScalarIV = ID.transform(Builder, ScalarIV, PSE.getSE(), DL); + ScalarIV->setName("offset.idx"); + } + if (!Step) { + SCEVExpander Exp(*PSE.getSE(), DL, "induction"); + Step = Exp.expandCodeFor(ID.getStep(), ID.getStep()->getType(), + &*Builder.GetInsertPoint()); + } + } + + // Splat the scalar induction variable, and build the necessary step vectors. + Value *Broadcasted = getBroadcastInstrs(ScalarIV); + for (unsigned Part = 0; Part < UF; ++Part) + Entry[Part] = getStepVector(Broadcasted, VF * Part, Step); + + // If an induction variable is only used for counting loop iterations or + // calculating addresses, it doesn't need to be widened. Create scalar steps + // that can be used by instructions we will later scalarize. Note that the + // addition of the scalar steps will not increase the number of instructions + // in the loop in the common case prior to InstCombine. We will be trading + // one vector extract for each scalar step. + if (VF > 1 && ValuesNotWidened->count(IV)) { + auto *EntryVal = Trunc ? cast<Value>(Trunc) : IV; + buildScalarSteps(ScalarIV, Step, EntryVal); + } +} + Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step) { assert(Val->getType()->isVectorTy() && "Must be a vector"); @@ -1970,7 +1986,7 @@ Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Type *ITy = Val->getType()->getScalarType(); VectorType *Ty = cast<VectorType>(Val->getType()); int VLen = Ty->getNumElements(); - SmallVector<Constant*, 8> Indices; + SmallVector<Constant *, 8> Indices; // Create a vector of consecutive numbers from zero to VF. for (int i = 0; i < VLen; ++i) @@ -1987,6 +2003,27 @@ Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, return Builder.CreateAdd(Val, Step, "induction"); } +void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step, + Value *EntryVal) { + + // We shouldn't have to build scalar steps if we aren't vectorizing. + assert(VF > 1 && "VF should be greater than one"); + + // Get the value type and ensure it and the step have the same integer type. + Type *ScalarIVTy = ScalarIV->getType()->getScalarType(); + assert(ScalarIVTy->isIntegerTy() && ScalarIVTy == Step->getType() && + "Val and Step should have the same integer type"); + + // Compute the scalar steps and save the results in ScalarIVMap. + for (unsigned Part = 0; Part < UF; ++Part) + for (unsigned I = 0; I < VF; ++I) { + auto *StartIdx = ConstantInt::get(ScalarIVTy, VF * Part + I); + auto *Mul = Builder.CreateMul(StartIdx, Step); + auto *Add = Builder.CreateAdd(ScalarIV, Mul); + ScalarIVMap[EntryVal].push_back(Add); + } +} + int LoopVectorizationLegality::isConsecutivePtr(Value *Ptr) { assert(Ptr->getType()->isPointerTy() && "Unexpected non-ptr"); auto *SE = PSE.getSE(); @@ -1994,7 +2031,7 @@ int LoopVectorizationLegality::isConsecutivePtr(Value *Ptr) { if (Ptr->getType()->getPointerElementType()->isAggregateType()) return 0; - // If this value is a pointer induction variable we know it is consecutive. + // If this value is a pointer induction variable, we know it is consecutive. PHINode *Phi = dyn_cast_or_null<PHINode>(Ptr); if (Phi && Inductions.count(Phi)) { InductionDescriptor II = Inductions[Phi]; @@ -2008,7 +2045,7 @@ int LoopVectorizationLegality::isConsecutivePtr(Value *Ptr) { unsigned NumOperands = Gep->getNumOperands(); Value *GpPtr = Gep->getPointerOperand(); // If this GEP value is a consecutive pointer induction variable and all of - // the indices are constant then we know it is consecutive. We can + // the indices are constant, then we know it is consecutive. Phi = dyn_cast<PHINode>(GpPtr); if (Phi && Inductions.count(Phi)) { @@ -2038,7 +2075,7 @@ int LoopVectorizationLegality::isConsecutivePtr(Value *Ptr) { // We can emit wide load/stores only if the last non-zero index is the // induction variable. const SCEV *Last = nullptr; - if (!Strides.count(Gep)) + if (!getSymbolicStrides() || !getSymbolicStrides()->count(Gep)) Last = PSE.getSCEV(Gep->getOperand(InductionOperand)); else { // Because of the multiplication by a stride we can have a s/zext cast. @@ -2050,7 +2087,7 @@ int LoopVectorizationLegality::isConsecutivePtr(Value *Ptr) { // %idxprom = zext i32 %mul to i64 << Safe cast. // %arrayidx = getelementptr inbounds i32* %B, i64 %idxprom // - Last = replaceSymbolicStrideSCEV(PSE, Strides, + Last = replaceSymbolicStrideSCEV(PSE, *getSymbolicStrides(), Gep->getOperand(InductionOperand), Gep); if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(Last)) Last = @@ -2076,7 +2113,7 @@ bool LoopVectorizationLegality::isUniform(Value *V) { return LAI->isUniform(V); } -InnerLoopVectorizer::VectorParts& +InnerLoopVectorizer::VectorParts & InnerLoopVectorizer::getVectorValue(Value *V) { assert(V != Induction && "The new induction variable should not be used."); assert(!V->getType()->isVectorTy() && "Can't widen a vector"); @@ -2097,7 +2134,7 @@ InnerLoopVectorizer::getVectorValue(Value *V) { Value *InnerLoopVectorizer::reverseVector(Value *Vec) { assert(Vec->getType()->isVectorTy() && "Invalid type"); - SmallVector<Constant*, 8> ShuffleMask; + SmallVector<Constant *, 8> ShuffleMask; for (unsigned i = 0; i < VF; ++i) ShuffleMask.push_back(Builder.getInt32(VF - i - 1)); @@ -2308,7 +2345,7 @@ void InnerLoopVectorizer::vectorizeInterleaveGroup(Instruction *Instr) { Group->isReverse() ? reverseVector(StridedVec) : StridedVec; } - propagateMetadata(NewLoadInstr, Instr); + addMetadata(NewLoadInstr, Instr); } return; } @@ -2326,7 +2363,7 @@ void InnerLoopVectorizer::vectorizeInterleaveGroup(Instruction *Instr) { assert(Member && "Fail to get a member from an interleaved store group"); Value *StoredVec = - getVectorValue(dyn_cast<StoreInst>(Member)->getValueOperand())[Part]; + getVectorValue(cast<StoreInst>(Member)->getValueOperand())[Part]; if (Group->isReverse()) StoredVec = reverseVector(StoredVec); @@ -2347,7 +2384,7 @@ void InnerLoopVectorizer::vectorizeInterleaveGroup(Instruction *Instr) { Instruction *NewStoreInstr = Builder.CreateAlignedStore(IVec, NewPtrs[Part], Group->getAlignment()); - propagateMetadata(NewStoreInstr, Instr); + addMetadata(NewStoreInstr, Instr); } } @@ -2372,8 +2409,8 @@ void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr) { if (!Alignment) Alignment = DL.getABITypeAlignment(ScalarDataTy); unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace(); - unsigned ScalarAllocatedSize = DL.getTypeAllocSize(ScalarDataTy); - unsigned VectorElementSize = DL.getTypeStoreSize(DataTy) / VF; + uint64_t ScalarAllocatedSize = DL.getTypeAllocSize(ScalarDataTy); + uint64_t VectorElementSize = DL.getTypeStoreSize(DataTy) / VF; if (SI && Legal->blockNeedsPredication(SI->getParent()) && !Legal->isMaskRequired(SI)) @@ -2382,69 +2419,115 @@ void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr) { if (ScalarAllocatedSize != VectorElementSize) return scalarizeInstruction(Instr); - // If the pointer is loop invariant or if it is non-consecutive, - // scalarize the load. + // If the pointer is loop invariant scalarize the load. + if (LI && Legal->isUniform(Ptr)) + return scalarizeInstruction(Instr); + + // If the pointer is non-consecutive and gather/scatter is not supported + // scalarize the instruction. int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); bool Reverse = ConsecutiveStride < 0; - bool UniformLoad = LI && Legal->isUniform(Ptr); - if (!ConsecutiveStride || UniformLoad) + bool CreateGatherScatter = + !ConsecutiveStride && ((LI && Legal->isLegalMaskedGather(ScalarDataTy)) || + (SI && Legal->isLegalMaskedScatter(ScalarDataTy))); + + if (!ConsecutiveStride && !CreateGatherScatter) return scalarizeInstruction(Instr); Constant *Zero = Builder.getInt32(0); VectorParts &Entry = WidenMap.get(Instr); + VectorParts VectorGep; // Handle consecutive loads/stores. GetElementPtrInst *Gep = getGEPInstruction(Ptr); - if (Gep && Legal->isInductionVariable(Gep->getPointerOperand())) { - setDebugLocFromInst(Builder, Gep); - Value *PtrOperand = Gep->getPointerOperand(); - Value *FirstBasePtr = getVectorValue(PtrOperand)[0]; - FirstBasePtr = Builder.CreateExtractElement(FirstBasePtr, Zero); - - // Create the new GEP with the new induction variable. - GetElementPtrInst *Gep2 = cast<GetElementPtrInst>(Gep->clone()); - Gep2->setOperand(0, FirstBasePtr); - Gep2->setName("gep.indvar.base"); - Ptr = Builder.Insert(Gep2); - } else if (Gep) { - setDebugLocFromInst(Builder, Gep); - assert(PSE.getSE()->isLoopInvariant(PSE.getSCEV(Gep->getPointerOperand()), - OrigLoop) && - "Base ptr must be invariant"); - - // The last index does not have to be the induction. It can be - // consecutive and be a function of the index. For example A[I+1]; - unsigned NumOperands = Gep->getNumOperands(); - unsigned InductionOperand = getGEPInductionOperand(Gep); - // Create the new GEP with the new induction variable. - GetElementPtrInst *Gep2 = cast<GetElementPtrInst>(Gep->clone()); - - for (unsigned i = 0; i < NumOperands; ++i) { - Value *GepOperand = Gep->getOperand(i); - Instruction *GepOperandInst = dyn_cast<Instruction>(GepOperand); - - // Update last index or loop invariant instruction anchored in loop. - if (i == InductionOperand || - (GepOperandInst && OrigLoop->contains(GepOperandInst))) { - assert((i == InductionOperand || - PSE.getSE()->isLoopInvariant(PSE.getSCEV(GepOperandInst), - OrigLoop)) && - "Must be last index or loop invariant"); - - VectorParts &GEPParts = getVectorValue(GepOperand); - Value *Index = GEPParts[0]; - Index = Builder.CreateExtractElement(Index, Zero); - Gep2->setOperand(i, Index); - Gep2->setName("gep.indvar.idx"); + if (ConsecutiveStride) { + if (Gep && Legal->isInductionVariable(Gep->getPointerOperand())) { + setDebugLocFromInst(Builder, Gep); + Value *PtrOperand = Gep->getPointerOperand(); + Value *FirstBasePtr = getVectorValue(PtrOperand)[0]; + FirstBasePtr = Builder.CreateExtractElement(FirstBasePtr, Zero); + + // Create the new GEP with the new induction variable. + GetElementPtrInst *Gep2 = cast<GetElementPtrInst>(Gep->clone()); + Gep2->setOperand(0, FirstBasePtr); + Gep2->setName("gep.indvar.base"); + Ptr = Builder.Insert(Gep2); + } else if (Gep) { + setDebugLocFromInst(Builder, Gep); + assert(PSE.getSE()->isLoopInvariant(PSE.getSCEV(Gep->getPointerOperand()), + OrigLoop) && + "Base ptr must be invariant"); + // The last index does not have to be the induction. It can be + // consecutive and be a function of the index. For example A[I+1]; + unsigned NumOperands = Gep->getNumOperands(); + unsigned InductionOperand = getGEPInductionOperand(Gep); + // Create the new GEP with the new induction variable. + GetElementPtrInst *Gep2 = cast<GetElementPtrInst>(Gep->clone()); + + for (unsigned i = 0; i < NumOperands; ++i) { + Value *GepOperand = Gep->getOperand(i); + Instruction *GepOperandInst = dyn_cast<Instruction>(GepOperand); + + // Update last index or loop invariant instruction anchored in loop. + if (i == InductionOperand || + (GepOperandInst && OrigLoop->contains(GepOperandInst))) { + assert((i == InductionOperand || + PSE.getSE()->isLoopInvariant(PSE.getSCEV(GepOperandInst), + OrigLoop)) && + "Must be last index or loop invariant"); + + VectorParts &GEPParts = getVectorValue(GepOperand); + + // If GepOperand is an induction variable, and there's a scalarized + // version of it available, use it. Otherwise, we will need to create + // an extractelement instruction. + Value *Index = ScalarIVMap.count(GepOperand) + ? ScalarIVMap[GepOperand][0] + : Builder.CreateExtractElement(GEPParts[0], Zero); + + Gep2->setOperand(i, Index); + Gep2->setName("gep.indvar.idx"); + } } + Ptr = Builder.Insert(Gep2); + } else { // No GEP + // Use the induction element ptr. + assert(isa<PHINode>(Ptr) && "Invalid induction ptr"); + setDebugLocFromInst(Builder, Ptr); + VectorParts &PtrVal = getVectorValue(Ptr); + Ptr = Builder.CreateExtractElement(PtrVal[0], Zero); } - Ptr = Builder.Insert(Gep2); } else { - // Use the induction element ptr. - assert(isa<PHINode>(Ptr) && "Invalid induction ptr"); - setDebugLocFromInst(Builder, Ptr); - VectorParts &PtrVal = getVectorValue(Ptr); - Ptr = Builder.CreateExtractElement(PtrVal[0], Zero); + // At this point we should vector version of GEP for Gather or Scatter + assert(CreateGatherScatter && "The instruction should be scalarized"); + if (Gep) { + // Vectorizing GEP, across UF parts. We want to get a vector value for base + // and each index that's defined inside the loop, even if it is + // loop-invariant but wasn't hoisted out. Otherwise we want to keep them + // scalar. + SmallVector<VectorParts, 4> OpsV; + for (Value *Op : Gep->operands()) { + Instruction *SrcInst = dyn_cast<Instruction>(Op); + if (SrcInst && OrigLoop->contains(SrcInst)) + OpsV.push_back(getVectorValue(Op)); + else + OpsV.push_back(VectorParts(UF, Op)); + } + for (unsigned Part = 0; Part < UF; ++Part) { + SmallVector<Value *, 4> Ops; + Value *GEPBasePtr = OpsV[0][Part]; + for (unsigned i = 1; i < Gep->getNumOperands(); i++) + Ops.push_back(OpsV[i][Part]); + Value *NewGep = Builder.CreateGEP(GEPBasePtr, Ops, "VectorGep"); + cast<GetElementPtrInst>(NewGep)->setIsInBounds(Gep->isInBounds()); + assert(NewGep->getType()->isVectorTy() && "Expected vector GEP"); + + NewGep = + Builder.CreateBitCast(NewGep, VectorType::get(Ptr->getType(), VF)); + VectorGep.push_back(NewGep); + } + } else + VectorGep = getVectorValue(Ptr); } VectorParts Mask = createBlockInMask(Instr->getParent()); @@ -2458,62 +2541,78 @@ void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr) { VectorParts StoredVal = getVectorValue(SI->getValueOperand()); for (unsigned Part = 0; Part < UF; ++Part) { + Instruction *NewSI = nullptr; + if (CreateGatherScatter) { + Value *MaskPart = Legal->isMaskRequired(SI) ? Mask[Part] : nullptr; + NewSI = Builder.CreateMaskedScatter(StoredVal[Part], VectorGep[Part], + Alignment, MaskPart); + } else { + // Calculate the pointer for the specific unroll-part. + Value *PartPtr = + Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(Part * VF)); + + if (Reverse) { + // If we store to reverse consecutive memory locations, then we need + // to reverse the order of elements in the stored value. + StoredVal[Part] = reverseVector(StoredVal[Part]); + // If the address is consecutive but reversed, then the + // wide store needs to start at the last vector element. + PartPtr = + Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(-Part * VF)); + PartPtr = + Builder.CreateGEP(nullptr, PartPtr, Builder.getInt32(1 - VF)); + Mask[Part] = reverseVector(Mask[Part]); + } + + Value *VecPtr = + Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); + + if (Legal->isMaskRequired(SI)) + NewSI = Builder.CreateMaskedStore(StoredVal[Part], VecPtr, Alignment, + Mask[Part]); + else + NewSI = + Builder.CreateAlignedStore(StoredVal[Part], VecPtr, Alignment); + } + addMetadata(NewSI, SI); + } + return; + } + + // Handle loads. + assert(LI && "Must have a load instruction"); + setDebugLocFromInst(Builder, LI); + for (unsigned Part = 0; Part < UF; ++Part) { + Instruction *NewLI; + if (CreateGatherScatter) { + Value *MaskPart = Legal->isMaskRequired(LI) ? Mask[Part] : nullptr; + NewLI = Builder.CreateMaskedGather(VectorGep[Part], Alignment, MaskPart, + 0, "wide.masked.gather"); + Entry[Part] = NewLI; + } else { // Calculate the pointer for the specific unroll-part. Value *PartPtr = Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(Part * VF)); if (Reverse) { - // If we store to reverse consecutive memory locations, then we need - // to reverse the order of elements in the stored value. - StoredVal[Part] = reverseVector(StoredVal[Part]); // If the address is consecutive but reversed, then the - // wide store needs to start at the last vector element. + // wide load needs to start at the last vector element. PartPtr = Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(-Part * VF)); PartPtr = Builder.CreateGEP(nullptr, PartPtr, Builder.getInt32(1 - VF)); Mask[Part] = reverseVector(Mask[Part]); } - Value *VecPtr = Builder.CreateBitCast(PartPtr, - DataTy->getPointerTo(AddressSpace)); - - Instruction *NewSI; - if (Legal->isMaskRequired(SI)) - NewSI = Builder.CreateMaskedStore(StoredVal[Part], VecPtr, Alignment, - Mask[Part]); - else - NewSI = Builder.CreateAlignedStore(StoredVal[Part], VecPtr, Alignment); - propagateMetadata(NewSI, SI); - } - return; - } - - // Handle loads. - assert(LI && "Must have a load instruction"); - setDebugLocFromInst(Builder, LI); - for (unsigned Part = 0; Part < UF; ++Part) { - // Calculate the pointer for the specific unroll-part. - Value *PartPtr = - Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(Part * VF)); - - if (Reverse) { - // If the address is consecutive but reversed, then the - // wide load needs to start at the last vector element. - PartPtr = Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(-Part * VF)); - PartPtr = Builder.CreateGEP(nullptr, PartPtr, Builder.getInt32(1 - VF)); - Mask[Part] = reverseVector(Mask[Part]); + Value *VecPtr = + Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); + if (Legal->isMaskRequired(LI)) + NewLI = Builder.CreateMaskedLoad(VecPtr, Alignment, Mask[Part], + UndefValue::get(DataTy), + "wide.masked.load"); + else + NewLI = Builder.CreateAlignedLoad(VecPtr, Alignment, "wide.load"); + Entry[Part] = Reverse ? reverseVector(NewLI) : NewLI; } - - Instruction* NewLI; - Value *VecPtr = Builder.CreateBitCast(PartPtr, - DataTy->getPointerTo(AddressSpace)); - if (Legal->isMaskRequired(LI)) - NewLI = Builder.CreateMaskedLoad(VecPtr, Alignment, Mask[Part], - UndefValue::get(DataTy), - "wide.masked.load"); - else - NewLI = Builder.CreateAlignedLoad(VecPtr, Alignment, "wide.load"); - propagateMetadata(NewLI, LI); - Entry[Part] = Reverse ? reverseVector(NewLI) : NewLI; + addMetadata(NewLI, LI); } } @@ -2526,9 +2625,7 @@ void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, setDebugLocFromInst(Builder, Instr); // Find all of the vectorized parameters. - for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) { - Value *SrcOp = Instr->getOperand(op); - + for (Value *SrcOp : Instr->operands()) { // If we are accessing the old induction variable, use the new one. if (SrcOp == OldInduction) { Params.push_back(getVectorValue(SrcOp)); @@ -2536,7 +2633,7 @@ void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, } // Try using previously calculated values. - Instruction *SrcInst = dyn_cast<Instruction>(SrcOp); + auto *SrcInst = dyn_cast<Instruction>(SrcOp); // If the src is an instruction that appeared earlier in the basic block, // then it should already be vectorized. @@ -2558,8 +2655,9 @@ void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, // Does this instruction return a value ? bool IsVoidRetTy = Instr->getType()->isVoidTy(); - Value *UndefVec = IsVoidRetTy ? nullptr : - UndefValue::get(VectorType::get(Instr->getType(), VF)); + Value *UndefVec = + IsVoidRetTy ? nullptr + : UndefValue::get(VectorType::get(Instr->getType(), VF)); // Create a new entry in the WidenMap and initialize it to Undef or Null. VectorParts &VecResults = WidenMap.splat(Instr, UndefVec); @@ -2589,16 +2687,28 @@ void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, Cloned->setName(Instr->getName() + ".cloned"); // Replace the operands of the cloned instructions with extracted scalars. for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) { - Value *Op = Params[op][Part]; - // Param is a vector. Need to extract the right lane. - if (Op->getType()->isVectorTy()) - Op = Builder.CreateExtractElement(Op, Builder.getInt32(Width)); - Cloned->setOperand(op, Op); + + // If the operand is an induction variable, and there's a scalarized + // version of it available, use it. Otherwise, we will need to create + // an extractelement instruction if vectorizing. + auto *NewOp = Params[op][Part]; + auto *ScalarOp = Instr->getOperand(op); + if (ScalarIVMap.count(ScalarOp)) + NewOp = ScalarIVMap[ScalarOp][VF * Part + Width]; + else if (NewOp->getType()->isVectorTy()) + NewOp = Builder.CreateExtractElement(NewOp, Builder.getInt32(Width)); + Cloned->setOperand(op, NewOp); } + addNewMetadata(Cloned, Instr); // Place the cloned scalar in the new loop. Builder.Insert(Cloned); + // If we just cloned a new assumption, add it the assumption cache. + if (auto *II = dyn_cast<IntrinsicInst>(Cloned)) + if (II->getIntrinsicID() == Intrinsic::assume) + AC->registerAssumption(II); + // If the original scalar returns a value we need to place it in a vector // so that future users will be able to use it. if (!IsVoidRetTy) @@ -2606,8 +2716,8 @@ void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, Builder.getInt32(Width)); // End if-block. if (IfPredicateStore) - PredicatedStores.push_back(std::make_pair(cast<StoreInst>(Cloned), - Cmp)); + PredicatedStores.push_back( + std::make_pair(cast<StoreInst>(Cloned), Cmp)); } } } @@ -2627,7 +2737,7 @@ PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start, auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index"); Builder.SetInsertPoint(Latch->getTerminator()); - + // Create i+1 and fill the PHINode. Value *Next = Builder.CreateAdd(Induction, Step, "index.next"); Induction->addIncoming(Start, L->getLoopPreheader()); @@ -2635,7 +2745,7 @@ PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start, // Create the compare. Value *ICmp = Builder.CreateICmpEQ(Next, End); Builder.CreateCondBr(ICmp, L->getExitBlock(), Header); - + // Now we have two terminators. Remove the old one from the block. Latch->getTerminator()->eraseFromParent(); @@ -2649,12 +2759,12 @@ Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) { IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); // Find the loop boundaries. ScalarEvolution *SE = PSE.getSE(); - const SCEV *BackedgeTakenCount = SE->getBackedgeTakenCount(OrigLoop); + const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); assert(BackedgeTakenCount != SE->getCouldNotCompute() && "Invalid loop count"); Type *IdxTy = Legal->getWidestInductionType(); - + // The exit count might have the type of i64 while the phi is i32. This can // happen if we have an induction variable that is sign extended before the // compare. The only way that we get a backedge taken count is that the @@ -2664,7 +2774,7 @@ Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) { IdxTy->getPrimitiveSizeInBits()) BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy); BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy); - + // Get the total trip count from the count by adding 1. const SCEV *ExitCount = SE->getAddExpr( BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); @@ -2681,9 +2791,8 @@ Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) { if (TripCount->getType()->isPointerTy()) TripCount = - CastInst::CreatePointerCast(TripCount, IdxTy, - "exitcount.ptrcnt.to.int", - L->getLoopPreheader()->getTerminator()); + CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int", + L->getLoopPreheader()->getTerminator()); return TripCount; } @@ -2691,16 +2800,30 @@ Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) { Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) { if (VectorTripCount) return VectorTripCount; - + Value *TC = getOrCreateTripCount(L); IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); - - // Now we need to generate the expression for N - (N % VF), which is - // the part that the vectorized body will execute. - // The loop step is equal to the vectorization factor (num of SIMD elements) - // times the unroll factor (num of SIMD instructions). + + // Now we need to generate the expression for the part of the loop that the + // vectorized body will execute. This is equal to N - (N % Step) if scalar + // iterations are not required for correctness, or N - Step, otherwise. Step + // is equal to the vectorization factor (number of SIMD elements) times the + // unroll factor (number of SIMD instructions). Constant *Step = ConstantInt::get(TC->getType(), VF * UF); Value *R = Builder.CreateURem(TC, Step, "n.mod.vf"); + + // If there is a non-reversed interleaved group that may speculatively access + // memory out-of-bounds, we need to ensure that there will be at least one + // iteration of the scalar epilogue loop. Thus, if the step evenly divides + // the trip count, we set the remainder to be equal to the step. If the step + // does not evenly divide the trip count, no adjustment is necessary since + // there will already be scalar iterations. Note that the minimum iterations + // check ensures that N >= Step. + if (VF > 1 && Legal->requiresScalarEpilogue()) { + auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0)); + R = Builder.CreateSelect(IsZero, Step, R); + } + VectorTripCount = Builder.CreateSub(TC, R, "n.vec"); return VectorTripCount; @@ -2714,13 +2837,15 @@ void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L, // Generate code to check that the loop's trip count that we computed by // adding one to the backedge-taken count will not overflow. - Value *CheckMinIters = - Builder.CreateICmpULT(Count, - ConstantInt::get(Count->getType(), VF * UF), - "min.iters.check"); - - BasicBlock *NewBB = BB->splitBasicBlock(BB->getTerminator(), - "min.iters.checked"); + Value *CheckMinIters = Builder.CreateICmpULT( + Count, ConstantInt::get(Count->getType(), VF * UF), "min.iters.check"); + + BasicBlock *NewBB = + BB->splitBasicBlock(BB->getTerminator(), "min.iters.checked"); + // Update dominator tree immediately if the generated block is a + // LoopBypassBlock because SCEV expansions to generate loop bypass + // checks may query it before the current function is finished. + DT->addNewBlock(NewBB, BB); if (L->getParentLoop()) L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); ReplaceInstWithInst(BB->getTerminator(), @@ -2733,7 +2858,7 @@ void InnerLoopVectorizer::emitVectorLoopEnteredCheck(Loop *L, Value *TC = getOrCreateVectorTripCount(L); BasicBlock *BB = L->getLoopPreheader(); IRBuilder<> Builder(BB->getTerminator()); - + // Now, compare the new count to zero. If it is zero skip the vector loop and // jump to the scalar loop. Value *Cmp = Builder.CreateICmpEQ(TC, Constant::getNullValue(TC->getType()), @@ -2741,8 +2866,11 @@ void InnerLoopVectorizer::emitVectorLoopEnteredCheck(Loop *L, // Generate code to check that the loop's trip count that we computed by // adding one to the backedge-taken count will not overflow. - BasicBlock *NewBB = BB->splitBasicBlock(BB->getTerminator(), - "vector.ph"); + BasicBlock *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); + // Update dominator tree immediately if the generated block is a + // LoopBypassBlock because SCEV expansions to generate loop bypass + // checks may query it before the current function is finished. + DT->addNewBlock(NewBB, BB); if (L->getParentLoop()) L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); ReplaceInstWithInst(BB->getTerminator(), @@ -2768,6 +2896,10 @@ void InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) { // Create a new block containing the stride check. BB->setName("vector.scevcheck"); auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); + // Update dominator tree immediately if the generated block is a + // LoopBypassBlock because SCEV expansions to generate loop bypass + // checks may query it before the current function is finished. + DT->addNewBlock(NewBB, BB); if (L->getParentLoop()) L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); ReplaceInstWithInst(BB->getTerminator(), @@ -2776,8 +2908,7 @@ void InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) { AddedSafetyChecks = true; } -void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, - BasicBlock *Bypass) { +void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass) { BasicBlock *BB = L->getLoopPreheader(); // Generate the code that checks in runtime if arrays overlap. We put the @@ -2793,14 +2924,23 @@ void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, // Create a new block containing the memory check. BB->setName("vector.memcheck"); auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); + // Update dominator tree immediately if the generated block is a + // LoopBypassBlock because SCEV expansions to generate loop bypass + // checks may query it before the current function is finished. + DT->addNewBlock(NewBB, BB); if (L->getParentLoop()) L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); ReplaceInstWithInst(BB->getTerminator(), BranchInst::Create(Bypass, NewBB, MemRuntimeCheck)); LoopBypassBlocks.push_back(BB); AddedSafetyChecks = true; -} + // We currently don't use LoopVersioning for the actual loop cloning but we + // still use it to add the noalias metadata. + LVer = llvm::make_unique<LoopVersioning>(*Legal->getLAI(), OrigLoop, LI, DT, + PSE.getSE()); + LVer->prepareNoAliasMetadata(); +} void InnerLoopVectorizer::createEmptyLoop() { /* @@ -2859,12 +2999,12 @@ void InnerLoopVectorizer::createEmptyLoop() { BasicBlock *VecBody = VectorPH->splitBasicBlock(VectorPH->getTerminator(), "vector.body"); BasicBlock *MiddleBlock = - VecBody->splitBasicBlock(VecBody->getTerminator(), "middle.block"); + VecBody->splitBasicBlock(VecBody->getTerminator(), "middle.block"); BasicBlock *ScalarPH = - MiddleBlock->splitBasicBlock(MiddleBlock->getTerminator(), "scalar.ph"); + MiddleBlock->splitBasicBlock(MiddleBlock->getTerminator(), "scalar.ph"); // Create and register the new vector loop. - Loop* Lp = new Loop(); + Loop *Lp = new Loop(); Loop *ParentLoop = OrigLoop->getParentLoop(); // Insert the new loop into the loop nest and register the new basic blocks @@ -2899,15 +3039,15 @@ void InnerLoopVectorizer::createEmptyLoop() { // checks into a separate block to make the more common case of few elements // faster. emitMemRuntimeChecks(Lp, ScalarPH); - + // Generate the induction variable. // The loop step is equal to the vectorization factor (num of SIMD elements) // times the unroll factor (num of SIMD instructions). Value *CountRoundDown = getOrCreateVectorTripCount(Lp); Constant *Step = ConstantInt::get(IdxTy, VF * UF); Induction = - createInductionVariable(Lp, StartIdx, CountRoundDown, Step, - getDebugLocFromInstOrOperands(OldInduction)); + createInductionVariable(Lp, StartIdx, CountRoundDown, Step, + getDebugLocFromInstOrOperands(OldInduction)); // We are going to resume the execution of the scalar loop. // Go over all of the induction variables that we found and fix the @@ -2920,16 +3060,14 @@ void InnerLoopVectorizer::createEmptyLoop() { // This variable saves the new starting index for the scalar loop. It is used // to test if there are any tail iterations left once the vector loop has // completed. - LoopVectorizationLegality::InductionList::iterator I, E; LoopVectorizationLegality::InductionList *List = Legal->getInductionVars(); - for (I = List->begin(), E = List->end(); I != E; ++I) { - PHINode *OrigPhi = I->first; - InductionDescriptor II = I->second; + for (auto &InductionEntry : *List) { + PHINode *OrigPhi = InductionEntry.first; + InductionDescriptor II = InductionEntry.second; // Create phi nodes to merge from the backedge-taken check block. - PHINode *BCResumeVal = PHINode::Create(OrigPhi->getType(), 3, - "bc.resume.val", - ScalarPH->getTerminator()); + PHINode *BCResumeVal = PHINode::Create( + OrigPhi->getType(), 3, "bc.resume.val", ScalarPH->getTerminator()); Value *EndValue; if (OrigPhi == OldInduction) { // We know what the end value is. @@ -2937,9 +3075,9 @@ void InnerLoopVectorizer::createEmptyLoop() { } else { IRBuilder<> B(LoopBypassBlocks.back()->getTerminator()); Value *CRD = B.CreateSExtOrTrunc(CountRoundDown, - II.getStepValue()->getType(), - "cast.crd"); - EndValue = II.transform(B, CRD); + II.getStep()->getType(), "cast.crd"); + const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); + EndValue = II.transform(B, CRD, PSE.getSE(), DL); EndValue->setName("ind.end"); } @@ -2947,22 +3085,25 @@ void InnerLoopVectorizer::createEmptyLoop() { // or the value at the end of the vectorized loop. BCResumeVal->addIncoming(EndValue, MiddleBlock); + // Fix up external users of the induction variable. + fixupIVUsers(OrigPhi, II, CountRoundDown, EndValue, MiddleBlock); + // Fix the scalar body counter (PHI node). unsigned BlockIdx = OrigPhi->getBasicBlockIndex(ScalarPH); // The old induction's phi node in the scalar body needs the truncated // value. - for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I) - BCResumeVal->addIncoming(II.getStartValue(), LoopBypassBlocks[I]); + for (BasicBlock *BB : LoopBypassBlocks) + BCResumeVal->addIncoming(II.getStartValue(), BB); OrigPhi->setIncomingValue(BlockIdx, BCResumeVal); } // Add a check in the middle block to see if we have completed // all of the iterations in the first vector loop. // If (N - N%VF) == N, then we *don't* need to run the remainder. - Value *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, Count, - CountRoundDown, "cmp.n", - MiddleBlock->getTerminator()); + Value *CmpN = + CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, Count, + CountRoundDown, "cmp.n", MiddleBlock->getTerminator()); ReplaceInstWithInst(MiddleBlock->getTerminator(), BranchInst::Create(ExitBlock, ScalarPH, CmpN)); @@ -2974,13 +3115,79 @@ void InnerLoopVectorizer::createEmptyLoop() { LoopScalarPreHeader = ScalarPH; LoopMiddleBlock = MiddleBlock; LoopExitBlock = ExitBlock; - LoopVectorBody.push_back(VecBody); + LoopVectorBody = VecBody; LoopScalarBody = OldBasicBlock; + // Keep all loop hints from the original loop on the vector loop (we'll + // replace the vectorizer-specific hints below). + if (MDNode *LID = OrigLoop->getLoopID()) + Lp->setLoopID(LID); + LoopVectorizeHints Hints(Lp, true); Hints.setAlreadyVectorized(); } +// Fix up external users of the induction variable. At this point, we are +// in LCSSA form, with all external PHIs that use the IV having one input value, +// coming from the remainder loop. We need those PHIs to also have a correct +// value for the IV when arriving directly from the middle block. +void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi, + const InductionDescriptor &II, + Value *CountRoundDown, Value *EndValue, + BasicBlock *MiddleBlock) { + // There are two kinds of external IV usages - those that use the value + // computed in the last iteration (the PHI) and those that use the penultimate + // value (the value that feeds into the phi from the loop latch). + // We allow both, but they, obviously, have different values. + + assert(OrigLoop->getExitBlock() && "Expected a single exit block"); + + DenseMap<Value *, Value *> MissingVals; + + // An external user of the last iteration's value should see the value that + // the remainder loop uses to initialize its own IV. + Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch()); + for (User *U : PostInc->users()) { + Instruction *UI = cast<Instruction>(U); + if (!OrigLoop->contains(UI)) { + assert(isa<PHINode>(UI) && "Expected LCSSA form"); + MissingVals[UI] = EndValue; + } + } + + // An external user of the penultimate value need to see EndValue - Step. + // The simplest way to get this is to recompute it from the constituent SCEVs, + // that is Start + (Step * (CRD - 1)). + for (User *U : OrigPhi->users()) { + auto *UI = cast<Instruction>(U); + if (!OrigLoop->contains(UI)) { + const DataLayout &DL = + OrigLoop->getHeader()->getModule()->getDataLayout(); + assert(isa<PHINode>(UI) && "Expected LCSSA form"); + + IRBuilder<> B(MiddleBlock->getTerminator()); + Value *CountMinusOne = B.CreateSub( + CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1)); + Value *CMO = B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType(), + "cast.cmo"); + Value *Escape = II.transform(B, CMO, PSE.getSE(), DL); + Escape->setName("ind.escape"); + MissingVals[UI] = Escape; + } + } + + for (auto &I : MissingVals) { + PHINode *PHI = cast<PHINode>(I.first); + // One corner case we have to handle is two IVs "chasing" each-other, + // that is %IV2 = phi [...], [ %IV1, %latch ] + // In this case, if IV1 has an external use, we need to avoid adding both + // "last value of IV1" and "penultimate value of IV2". So, verify that we + // don't already have an incoming value for the middle block. + if (PHI->getBasicBlockIndex(MiddleBlock) == -1) + PHI->addIncoming(I.second, MiddleBlock); + } +} + namespace { struct CSEDenseMapInfo { static bool canHandle(Instruction *I) { @@ -3007,48 +3214,31 @@ struct CSEDenseMapInfo { }; } -/// \brief Check whether this block is a predicated block. -/// Due to if predication of stores we might create a sequence of "if(pred) a[i] -/// = ...; " blocks. We start with one vectorized basic block. For every -/// conditional block we split this vectorized block. Therefore, every second -/// block will be a predicated one. -static bool isPredicatedBlock(unsigned BlockNum) { - return BlockNum % 2; -} - ///\brief Perform cse of induction variable instructions. -static void cse(SmallVector<BasicBlock *, 4> &BBs) { +static void cse(BasicBlock *BB) { // Perform simple cse. SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; - for (unsigned i = 0, e = BBs.size(); i != e; ++i) { - BasicBlock *BB = BBs[i]; - for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) { - Instruction *In = &*I++; - - if (!CSEDenseMapInfo::canHandle(In)) - continue; + for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) { + Instruction *In = &*I++; - // Check if we can replace this instruction with any of the - // visited instructions. - if (Instruction *V = CSEMap.lookup(In)) { - In->replaceAllUsesWith(V); - In->eraseFromParent(); - continue; - } - // Ignore instructions in conditional blocks. We create "if (pred) a[i] = - // ...;" blocks for predicated stores. Every second block is a predicated - // block. - if (isPredicatedBlock(i)) - continue; + if (!CSEDenseMapInfo::canHandle(In)) + continue; - CSEMap[In] = In; + // Check if we can replace this instruction with any of the + // visited instructions. + if (Instruction *V = CSEMap.lookup(In)) { + In->replaceAllUsesWith(V); + In->eraseFromParent(); + continue; } + + CSEMap[In] = In; } } /// \brief Adds a 'fast' flag to floating point operations. static Value *addFastMathFlag(Value *V) { - if (isa<FPMathOperator>(V)){ + if (isa<FPMathOperator>(V)) { FastMathFlags Flags; Flags.setUnsafeAlgebra(); cast<Instruction>(V)->setFastMathFlags(Flags); @@ -3066,11 +3256,11 @@ static unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract, assert(Ty->isVectorTy() && "Can only scalarize vectors"); unsigned Cost = 0; - for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) { + for (unsigned I = 0, E = Ty->getVectorNumElements(); I < E; ++I) { if (Insert) - Cost += TTI.getVectorInstrCost(Instruction::InsertElement, Ty, i); + Cost += TTI.getVectorInstrCost(Instruction::InsertElement, Ty, I); if (Extract) - Cost += TTI.getVectorInstrCost(Instruction::ExtractElement, Ty, i); + Cost += TTI.getVectorInstrCost(Instruction::ExtractElement, Ty, I); } return Cost; @@ -3101,15 +3291,15 @@ static unsigned getVectorCallCost(CallInst *CI, unsigned VF, // Compute corresponding vector type for return value and arguments. Type *RetTy = ToVectorTy(ScalarRetTy, VF); - for (unsigned i = 0, ie = ScalarTys.size(); i != ie; ++i) - Tys.push_back(ToVectorTy(ScalarTys[i], VF)); + for (Type *ScalarTy : ScalarTys) + Tys.push_back(ToVectorTy(ScalarTy, VF)); // Compute costs of unpacking argument values for the scalar calls and // packing the return values to a vector. unsigned ScalarizationCost = getScalarizationOverhead(RetTy, true, false, TTI); - for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) - ScalarizationCost += getScalarizationOverhead(Tys[i], false, true, TTI); + for (Type *Ty : Tys) + ScalarizationCost += getScalarizationOverhead(Ty, false, true, TTI); unsigned Cost = ScalarCallCost * VF + ScalarizationCost; @@ -3134,25 +3324,29 @@ static unsigned getVectorCallCost(CallInst *CI, unsigned VF, static unsigned getVectorIntrinsicCost(CallInst *CI, unsigned VF, const TargetTransformInfo &TTI, const TargetLibraryInfo *TLI) { - Intrinsic::ID ID = getIntrinsicIDForCall(CI, TLI); + Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); assert(ID && "Expected intrinsic call!"); Type *RetTy = ToVectorTy(CI->getType(), VF); SmallVector<Type *, 4> Tys; - for (unsigned i = 0, ie = CI->getNumArgOperands(); i != ie; ++i) - Tys.push_back(ToVectorTy(CI->getArgOperand(i)->getType(), VF)); + for (Value *ArgOperand : CI->arg_operands()) + Tys.push_back(ToVectorTy(ArgOperand->getType(), VF)); - return TTI.getIntrinsicInstrCost(ID, RetTy, Tys); + FastMathFlags FMF; + if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) + FMF = FPMO->getFastMathFlags(); + + return TTI.getIntrinsicInstrCost(ID, RetTy, Tys, FMF); } static Type *smallestIntegerVectorType(Type *T1, Type *T2) { - IntegerType *I1 = cast<IntegerType>(T1->getVectorElementType()); - IntegerType *I2 = cast<IntegerType>(T2->getVectorElementType()); + auto *I1 = cast<IntegerType>(T1->getVectorElementType()); + auto *I2 = cast<IntegerType>(T2->getVectorElementType()); return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; } static Type *largestIntegerVectorType(Type *T1, Type *T2) { - IntegerType *I1 = cast<IntegerType>(T1->getVectorElementType()); - IntegerType *I2 = cast<IntegerType>(T2->getVectorElementType()); + auto *I1 = cast<IntegerType>(T1->getVectorElementType()); + auto *I2 = cast<IntegerType>(T2->getVectorElementType()); return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; } @@ -3161,21 +3355,22 @@ void InnerLoopVectorizer::truncateToMinimalBitwidths() { // truncated version of `I` and reextend its result. InstCombine runs // later and will remove any ext/trunc pairs. // - for (auto &KV : MinBWs) { + SmallPtrSet<Value *, 4> Erased; + for (const auto &KV : *MinBWs) { VectorParts &Parts = WidenMap.get(KV.first); for (Value *&I : Parts) { - if (I->use_empty()) + if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I)) continue; Type *OriginalTy = I->getType(); - Type *ScalarTruncatedTy = IntegerType::get(OriginalTy->getContext(), - KV.second); + Type *ScalarTruncatedTy = + IntegerType::get(OriginalTy->getContext(), KV.second); Type *TruncatedTy = VectorType::get(ScalarTruncatedTy, OriginalTy->getVectorNumElements()); if (TruncatedTy == OriginalTy) continue; IRBuilder<> B(cast<Instruction>(I)); - auto ShrinkOperand = [&](Value *V) -> Value* { + auto ShrinkOperand = [&](Value *V) -> Value * { if (auto *ZI = dyn_cast<ZExtInst>(V)) if (ZI->getSrcTy() == TruncatedTy) return ZI->getOperand(0); @@ -3185,50 +3380,59 @@ void InnerLoopVectorizer::truncateToMinimalBitwidths() { // The actual instruction modification depends on the instruction type, // unfortunately. Value *NewI = nullptr; - if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) { - NewI = B.CreateBinOp(BO->getOpcode(), - ShrinkOperand(BO->getOperand(0)), + if (auto *BO = dyn_cast<BinaryOperator>(I)) { + NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)), ShrinkOperand(BO->getOperand(1))); cast<BinaryOperator>(NewI)->copyIRFlags(I); - } else if (ICmpInst *CI = dyn_cast<ICmpInst>(I)) { - NewI = B.CreateICmp(CI->getPredicate(), - ShrinkOperand(CI->getOperand(0)), - ShrinkOperand(CI->getOperand(1))); - } else if (SelectInst *SI = dyn_cast<SelectInst>(I)) { + } else if (auto *CI = dyn_cast<ICmpInst>(I)) { + NewI = + B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)), + ShrinkOperand(CI->getOperand(1))); + } else if (auto *SI = dyn_cast<SelectInst>(I)) { NewI = B.CreateSelect(SI->getCondition(), ShrinkOperand(SI->getTrueValue()), ShrinkOperand(SI->getFalseValue())); - } else if (CastInst *CI = dyn_cast<CastInst>(I)) { + } else if (auto *CI = dyn_cast<CastInst>(I)) { switch (CI->getOpcode()) { - default: llvm_unreachable("Unhandled cast!"); + default: + llvm_unreachable("Unhandled cast!"); case Instruction::Trunc: NewI = ShrinkOperand(CI->getOperand(0)); break; case Instruction::SExt: - NewI = B.CreateSExtOrTrunc(CI->getOperand(0), - smallestIntegerVectorType(OriginalTy, - TruncatedTy)); + NewI = B.CreateSExtOrTrunc( + CI->getOperand(0), + smallestIntegerVectorType(OriginalTy, TruncatedTy)); break; case Instruction::ZExt: - NewI = B.CreateZExtOrTrunc(CI->getOperand(0), - smallestIntegerVectorType(OriginalTy, - TruncatedTy)); + NewI = B.CreateZExtOrTrunc( + CI->getOperand(0), + smallestIntegerVectorType(OriginalTy, TruncatedTy)); break; } - } else if (ShuffleVectorInst *SI = dyn_cast<ShuffleVectorInst>(I)) { + } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) { auto Elements0 = SI->getOperand(0)->getType()->getVectorNumElements(); - auto *O0 = - B.CreateZExtOrTrunc(SI->getOperand(0), - VectorType::get(ScalarTruncatedTy, Elements0)); + auto *O0 = B.CreateZExtOrTrunc( + SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0)); auto Elements1 = SI->getOperand(1)->getType()->getVectorNumElements(); - auto *O1 = - B.CreateZExtOrTrunc(SI->getOperand(1), - VectorType::get(ScalarTruncatedTy, Elements1)); + auto *O1 = B.CreateZExtOrTrunc( + SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1)); NewI = B.CreateShuffleVector(O0, O1, SI->getMask()); } else if (isa<LoadInst>(I)) { // Don't do anything with the operands, just extend the result. continue; + } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { + auto Elements = IE->getOperand(0)->getType()->getVectorNumElements(); + auto *O0 = B.CreateZExtOrTrunc( + IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); + auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy); + NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2)); + } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { + auto Elements = EE->getOperand(0)->getType()->getVectorNumElements(); + auto *O0 = B.CreateZExtOrTrunc( + EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); + NewI = B.CreateExtractElement(O0, EE->getOperand(2)); } else { llvm_unreachable("Unhandled instruction type!"); } @@ -3238,12 +3442,13 @@ void InnerLoopVectorizer::truncateToMinimalBitwidths() { Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy); I->replaceAllUsesWith(Res); cast<Instruction>(I)->eraseFromParent(); + Erased.insert(I); I = Res; } } // We'll have created a bunch of ZExts that are now parentless. Clean up. - for (auto &KV : MinBWs) { + for (const auto &KV : *MinBWs) { VectorParts &Parts = WidenMap.get(KV.first); for (Value *&I : Parts) { ZExtInst *Inst = dyn_cast<ZExtInst>(I); @@ -3266,15 +3471,14 @@ void InnerLoopVectorizer::vectorizeLoop() { //===------------------------------------------------===// Constant *Zero = Builder.getInt32(0); - // In order to support reduction variables we need to be able to vectorize - // Phi nodes. Phi nodes have cycles, so we need to vectorize them in two - // stages. First, we create a new vector PHI node with no incoming edges. - // We use this value when we vectorize all of the instructions that use the - // PHI. Next, after all of the instructions in the block are complete we - // add the new incoming edges to the PHI. At this point all of the - // instructions in the basic block are vectorized, so we can use them to - // construct the PHI. - PhiVector RdxPHIsToFix; + // In order to support recurrences we need to be able to vectorize Phi nodes. + // Phi nodes have cycles, so we need to vectorize them in two stages. First, + // we create a new vector PHI node with no incoming edges. We use this value + // when we vectorize all of the instructions that use the PHI. Next, after + // all of the instructions in the block are complete we add the new incoming + // edges to the PHI. At this point all of the instructions in the basic block + // are vectorized, so we can use them to construct the PHI. + PhiVector PHIsToFix; // Scan the loop in a topological order to ensure that defs are vectorized // before users. @@ -3282,33 +3486,32 @@ void InnerLoopVectorizer::vectorizeLoop() { DFS.perform(LI); // Vectorize all of the blocks in the original loop. - for (LoopBlocksDFS::RPOIterator bb = DFS.beginRPO(), - be = DFS.endRPO(); bb != be; ++bb) - vectorizeBlockInLoop(*bb, &RdxPHIsToFix); + for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) + vectorizeBlockInLoop(BB, &PHIsToFix); // Insert truncates and extends for any truncated instructions as hints to // InstCombine. if (VF > 1) truncateToMinimalBitwidths(); - - // At this point every instruction in the original loop is widened to - // a vector form. We are almost done. Now, we need to fix the PHI nodes - // that we vectorized. The PHI nodes are currently empty because we did - // not want to introduce cycles. Notice that the remaining PHI nodes - // that we need to fix are reduction variables. - - // Create the 'reduced' values for each of the induction vars. - // The reduced values are the vector values that we scalarize and combine - // after the loop is finished. - for (PhiVector::iterator it = RdxPHIsToFix.begin(), e = RdxPHIsToFix.end(); - it != e; ++it) { - PHINode *RdxPhi = *it; - assert(RdxPhi && "Unable to recover vectorized PHI"); - - // Find the reduction variable descriptor. - assert(Legal->isReductionVariable(RdxPhi) && + + // At this point every instruction in the original loop is widened to a + // vector form. Now we need to fix the recurrences in PHIsToFix. These PHI + // nodes are currently empty because we did not want to introduce cycles. + // This is the second stage of vectorizing recurrences. + for (PHINode *Phi : PHIsToFix) { + assert(Phi && "Unable to recover vectorized PHI"); + + // Handle first-order recurrences that need to be fixed. + if (Legal->isFirstOrderRecurrence(Phi)) { + fixFirstOrderRecurrence(Phi); + continue; + } + + // If the phi node is not a first-order recurrence, it must be a reduction. + // Get it's reduction variable descriptor. + assert(Legal->isReductionVariable(Phi) && "Unable to find the reduction variable"); - RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[RdxPhi]; + RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[Phi]; RecurrenceDescriptor::RecurrenceKind RK = RdxDesc.getRecurrenceKind(); TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue(); @@ -3363,18 +3566,18 @@ void InnerLoopVectorizer::vectorizeLoop() { // Reductions do not have to start at zero. They can start with // any loop invariant values. - VectorParts &VecRdxPhi = WidenMap.get(RdxPhi); + VectorParts &VecRdxPhi = WidenMap.get(Phi); BasicBlock *Latch = OrigLoop->getLoopLatch(); - Value *LoopVal = RdxPhi->getIncomingValueForBlock(Latch); + Value *LoopVal = Phi->getIncomingValueForBlock(Latch); VectorParts &Val = getVectorValue(LoopVal); for (unsigned part = 0; part < UF; ++part) { // Make sure to add the reduction stat value only to the // first unroll part. Value *StartVal = (part == 0) ? VectorStart : Identity; - cast<PHINode>(VecRdxPhi[part])->addIncoming(StartVal, - LoopVectorPreHeader); - cast<PHINode>(VecRdxPhi[part])->addIncoming(Val[part], - LoopVectorBody.back()); + cast<PHINode>(VecRdxPhi[part]) + ->addIncoming(StartVal, LoopVectorPreHeader); + cast<PHINode>(VecRdxPhi[part]) + ->addIncoming(Val[part], LoopVectorBody); } // Before each round, move the insertion point right between @@ -3389,9 +3592,9 @@ void InnerLoopVectorizer::vectorizeLoop() { // If the vector reduction can be performed in a smaller type, we truncate // then extend the loop exit value to enable InstCombine to evaluate the // entire expression in the smaller type. - if (VF > 1 && RdxPhi->getType() != RdxDesc.getRecurrenceType()) { + if (VF > 1 && Phi->getType() != RdxDesc.getRecurrenceType()) { Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF); - Builder.SetInsertPoint(LoopVectorBody.back()->getTerminator()); + Builder.SetInsertPoint(LoopVectorBody->getTerminator()); for (unsigned part = 0; part < UF; ++part) { Value *Trunc = Builder.CreateTrunc(RdxParts[part], RdxVecTy); Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy) @@ -3432,21 +3635,19 @@ void InnerLoopVectorizer::vectorizeLoop() { assert(isPowerOf2_32(VF) && "Reduction emission only supported for pow2 vectors!"); Value *TmpVec = ReducedPartRdx; - SmallVector<Constant*, 32> ShuffleMask(VF, nullptr); + SmallVector<Constant *, 32> ShuffleMask(VF, nullptr); for (unsigned i = VF; i != 1; i >>= 1) { // Move the upper half of the vector to the lower half. - for (unsigned j = 0; j != i/2; ++j) - ShuffleMask[j] = Builder.getInt32(i/2 + j); + for (unsigned j = 0; j != i / 2; ++j) + ShuffleMask[j] = Builder.getInt32(i / 2 + j); // Fill the rest of the mask with undef. - std::fill(&ShuffleMask[i/2], ShuffleMask.end(), + std::fill(&ShuffleMask[i / 2], ShuffleMask.end(), UndefValue::get(Builder.getInt32Ty())); - Value *Shuf = - Builder.CreateShuffleVector(TmpVec, - UndefValue::get(TmpVec->getType()), - ConstantVector::get(ShuffleMask), - "rdx.shuf"); + Value *Shuf = Builder.CreateShuffleVector( + TmpVec, UndefValue::get(TmpVec->getType()), + ConstantVector::get(ShuffleMask), "rdx.shuf"); if (Op != Instruction::ICmp && Op != Instruction::FCmp) // Floating point operations had to be 'fast' to enable the reduction. @@ -3458,21 +3659,21 @@ void InnerLoopVectorizer::vectorizeLoop() { } // The result is in the first element of the vector. - ReducedPartRdx = Builder.CreateExtractElement(TmpVec, - Builder.getInt32(0)); + ReducedPartRdx = + Builder.CreateExtractElement(TmpVec, Builder.getInt32(0)); // If the reduction can be performed in a smaller type, we need to extend // the reduction to the wider type before we branch to the original loop. - if (RdxPhi->getType() != RdxDesc.getRecurrenceType()) + if (Phi->getType() != RdxDesc.getRecurrenceType()) ReducedPartRdx = RdxDesc.isSigned() - ? Builder.CreateSExt(ReducedPartRdx, RdxPhi->getType()) - : Builder.CreateZExt(ReducedPartRdx, RdxPhi->getType()); + ? Builder.CreateSExt(ReducedPartRdx, Phi->getType()) + : Builder.CreateZExt(ReducedPartRdx, Phi->getType()); } // Create a phi node that merges control-flow from the backedge-taken check // block and the middle block. - PHINode *BCBlockPhi = PHINode::Create(RdxPhi->getType(), 2, "bc.merge.rdx", + PHINode *BCBlockPhi = PHINode::Create(Phi->getType(), 2, "bc.merge.rdx", LoopScalarPreHeader->getTerminator()); for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I) BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]); @@ -3483,9 +3684,11 @@ void InnerLoopVectorizer::vectorizeLoop() { // We know that the loop is in LCSSA form. We need to update the // PHI nodes in the exit blocks. for (BasicBlock::iterator LEI = LoopExitBlock->begin(), - LEE = LoopExitBlock->end(); LEI != LEE; ++LEI) { + LEE = LoopExitBlock->end(); + LEI != LEE; ++LEI) { PHINode *LCSSAPhi = dyn_cast<PHINode>(LEI); - if (!LCSSAPhi) break; + if (!LCSSAPhi) + break; // All PHINodes need to have a single entry edge, or two if // we already fixed them. @@ -3498,30 +3701,30 @@ void InnerLoopVectorizer::vectorizeLoop() { LCSSAPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); break; } - }// end of the LCSSA phi scan. + } // end of the LCSSA phi scan. // Fix the scalar loop reduction variable with the incoming reduction sum // from the vector body and from the backedge value. int IncomingEdgeBlockIdx = - (RdxPhi)->getBasicBlockIndex(OrigLoop->getLoopLatch()); + Phi->getBasicBlockIndex(OrigLoop->getLoopLatch()); assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index"); // Pick the other block. int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); - (RdxPhi)->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); - (RdxPhi)->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); - }// end of for each redux variable. + Phi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); + Phi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); + } // end of for each Phi in PHIsToFix. fixLCSSAPHIs(); // Make sure DomTree is updated. updateAnalysis(); - + // Predicate any stores. for (auto KV : PredicatedStores) { BasicBlock::iterator I(KV.first); auto *BB = SplitBlock(I->getParent(), &*std::next(I), DT, LI); auto *T = SplitBlockAndInsertIfThen(KV.second, &*I, /*Unreachable=*/false, - /*BranchWeights=*/nullptr, DT); + /*BranchWeights=*/nullptr, DT, LI); I->moveBefore(T); I->getParent()->setName("pred.store.if"); BB->setName("pred.store.continue"); @@ -3531,11 +3734,162 @@ void InnerLoopVectorizer::vectorizeLoop() { cse(LoopVectorBody); } +void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi) { + + // This is the second phase of vectorizing first-order recurrences. An + // overview of the transformation is described below. Suppose we have the + // following loop. + // + // for (int i = 0; i < n; ++i) + // b[i] = a[i] - a[i - 1]; + // + // There is a first-order recurrence on "a". For this loop, the shorthand + // scalar IR looks like: + // + // scalar.ph: + // s_init = a[-1] + // br scalar.body + // + // scalar.body: + // i = phi [0, scalar.ph], [i+1, scalar.body] + // s1 = phi [s_init, scalar.ph], [s2, scalar.body] + // s2 = a[i] + // b[i] = s2 - s1 + // br cond, scalar.body, ... + // + // In this example, s1 is a recurrence because it's value depends on the + // previous iteration. In the first phase of vectorization, we created a + // temporary value for s1. We now complete the vectorization and produce the + // shorthand vector IR shown below (for VF = 4, UF = 1). + // + // vector.ph: + // v_init = vector(..., ..., ..., a[-1]) + // br vector.body + // + // vector.body + // i = phi [0, vector.ph], [i+4, vector.body] + // v1 = phi [v_init, vector.ph], [v2, vector.body] + // v2 = a[i, i+1, i+2, i+3]; + // v3 = vector(v1(3), v2(0, 1, 2)) + // b[i, i+1, i+2, i+3] = v2 - v3 + // br cond, vector.body, middle.block + // + // middle.block: + // x = v2(3) + // br scalar.ph + // + // scalar.ph: + // s_init = phi [x, middle.block], [a[-1], otherwise] + // br scalar.body + // + // After execution completes the vector loop, we extract the next value of + // the recurrence (x) to use as the initial value in the scalar loop. + + // Get the original loop preheader and single loop latch. + auto *Preheader = OrigLoop->getLoopPreheader(); + auto *Latch = OrigLoop->getLoopLatch(); + + // Get the initial and previous values of the scalar recurrence. + auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader); + auto *Previous = Phi->getIncomingValueForBlock(Latch); + + // Create a vector from the initial value. + auto *VectorInit = ScalarInit; + if (VF > 1) { + Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); + VectorInit = Builder.CreateInsertElement( + UndefValue::get(VectorType::get(VectorInit->getType(), VF)), VectorInit, + Builder.getInt32(VF - 1), "vector.recur.init"); + } + + // We constructed a temporary phi node in the first phase of vectorization. + // This phi node will eventually be deleted. + auto &PhiParts = getVectorValue(Phi); + Builder.SetInsertPoint(cast<Instruction>(PhiParts[0])); + + // Create a phi node for the new recurrence. The current value will either be + // the initial value inserted into a vector or loop-varying vector value. + auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur"); + VecPhi->addIncoming(VectorInit, LoopVectorPreHeader); + + // Get the vectorized previous value. We ensured the previous values was an + // instruction when detecting the recurrence. + auto &PreviousParts = getVectorValue(Previous); + + // Set the insertion point to be after this instruction. We ensured the + // previous value dominated all uses of the phi when detecting the + // recurrence. + Builder.SetInsertPoint( + &*++BasicBlock::iterator(cast<Instruction>(PreviousParts[UF - 1]))); + + // We will construct a vector for the recurrence by combining the values for + // the current and previous iterations. This is the required shuffle mask. + SmallVector<Constant *, 8> ShuffleMask(VF); + ShuffleMask[0] = Builder.getInt32(VF - 1); + for (unsigned I = 1; I < VF; ++I) + ShuffleMask[I] = Builder.getInt32(I + VF - 1); + + // The vector from which to take the initial value for the current iteration + // (actual or unrolled). Initially, this is the vector phi node. + Value *Incoming = VecPhi; + + // Shuffle the current and previous vector and update the vector parts. + for (unsigned Part = 0; Part < UF; ++Part) { + auto *Shuffle = + VF > 1 + ? Builder.CreateShuffleVector(Incoming, PreviousParts[Part], + ConstantVector::get(ShuffleMask)) + : Incoming; + PhiParts[Part]->replaceAllUsesWith(Shuffle); + cast<Instruction>(PhiParts[Part])->eraseFromParent(); + PhiParts[Part] = Shuffle; + Incoming = PreviousParts[Part]; + } + + // Fix the latch value of the new recurrence in the vector loop. + VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch()); + + // Extract the last vector element in the middle block. This will be the + // initial value for the recurrence when jumping to the scalar loop. + auto *Extract = Incoming; + if (VF > 1) { + Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); + Extract = Builder.CreateExtractElement(Extract, Builder.getInt32(VF - 1), + "vector.recur.extract"); + } + + // Fix the initial value of the original recurrence in the scalar loop. + Builder.SetInsertPoint(&*LoopScalarPreHeader->begin()); + auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init"); + for (auto *BB : predecessors(LoopScalarPreHeader)) { + auto *Incoming = BB == LoopMiddleBlock ? Extract : ScalarInit; + Start->addIncoming(Incoming, BB); + } + + Phi->setIncomingValue(Phi->getBasicBlockIndex(LoopScalarPreHeader), Start); + Phi->setName("scalar.recur"); + + // Finally, fix users of the recurrence outside the loop. The users will need + // either the last value of the scalar recurrence or the last value of the + // vector recurrence we extracted in the middle block. Since the loop is in + // LCSSA form, we just need to find the phi node for the original scalar + // recurrence in the exit block, and then add an edge for the middle block. + for (auto &I : *LoopExitBlock) { + auto *LCSSAPhi = dyn_cast<PHINode>(&I); + if (!LCSSAPhi) + break; + if (LCSSAPhi->getIncomingValue(0) == Phi) { + LCSSAPhi->addIncoming(Extract, LoopMiddleBlock); + break; + } + } +} + void InnerLoopVectorizer::fixLCSSAPHIs() { - for (BasicBlock::iterator LEI = LoopExitBlock->begin(), - LEE = LoopExitBlock->end(); LEI != LEE; ++LEI) { - PHINode *LCSSAPhi = dyn_cast<PHINode>(LEI); - if (!LCSSAPhi) break; + for (Instruction &LEI : *LoopExitBlock) { + auto *LCSSAPhi = dyn_cast<PHINode>(&LEI); + if (!LCSSAPhi) + break; if (LCSSAPhi->getNumIncomingValues() == 1) LCSSAPhi->addIncoming(UndefValue::get(LCSSAPhi->getType()), LoopMiddleBlock); @@ -3548,7 +3902,7 @@ InnerLoopVectorizer::createEdgeMask(BasicBlock *Src, BasicBlock *Dst) { "Invalid edge"); // Look for cached value. - std::pair<BasicBlock*, BasicBlock*> Edge(Src, Dst); + std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst); EdgeMaskCache::iterator ECEntryIt = MaskCache.find(Edge); if (ECEntryIt != MaskCache.end()) return ECEntryIt->second; @@ -3604,15 +3958,15 @@ InnerLoopVectorizer::createBlockInMask(BasicBlock *BB) { void InnerLoopVectorizer::widenPHIInstruction( Instruction *PN, InnerLoopVectorizer::VectorParts &Entry, unsigned UF, unsigned VF, PhiVector *PV) { - PHINode* P = cast<PHINode>(PN); - // Handle reduction variables: - if (Legal->isReductionVariable(P)) { + PHINode *P = cast<PHINode>(PN); + // Handle recurrences. + if (Legal->isReductionVariable(P) || Legal->isFirstOrderRecurrence(P)) { for (unsigned part = 0; part < UF; ++part) { // This is phase one of vectorizing PHIs. - Type *VecTy = (VF == 1) ? PN->getType() : - VectorType::get(PN->getType(), VF); + Type *VecTy = + (VF == 1) ? PN->getType() : VectorType::get(PN->getType(), VF); Entry[part] = PHINode::Create( - VecTy, 2, "vec.phi", &*LoopVectorBody.back()->getFirstInsertionPt()); + VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt()); } PV->push_back(P); return; @@ -3635,21 +3989,20 @@ void InnerLoopVectorizer::widenPHIInstruction( // SELECT(Mask2, In2, // ( ...))) for (unsigned In = 0; In < NumIncoming; In++) { - VectorParts Cond = createEdgeMask(P->getIncomingBlock(In), - P->getParent()); + VectorParts Cond = + createEdgeMask(P->getIncomingBlock(In), P->getParent()); VectorParts &In0 = getVectorValue(P->getIncomingValue(In)); for (unsigned part = 0; part < UF; ++part) { // We might have single edge PHIs (blocks) - use an identity // 'select' for the first PHI operand. if (In == 0) - Entry[part] = Builder.CreateSelect(Cond[part], In0[part], - In0[part]); + Entry[part] = Builder.CreateSelect(Cond[part], In0[part], In0[part]); else // Select between the current value and the previous incoming edge // based on the incoming mask. - Entry[part] = Builder.CreateSelect(Cond[part], In0[part], - Entry[part], "predphi"); + Entry[part] = Builder.CreateSelect(Cond[part], In0[part], Entry[part], + "predphi"); } } return; @@ -3657,85 +4010,68 @@ void InnerLoopVectorizer::widenPHIInstruction( // This PHINode must be an induction variable. // Make sure that we know about it. - assert(Legal->getInductionVars()->count(P) && - "Not an induction variable"); + assert(Legal->getInductionVars()->count(P) && "Not an induction variable"); InductionDescriptor II = Legal->getInductionVars()->lookup(P); + const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); // FIXME: The newly created binary instructions should contain nsw/nuw flags, // which can be found from the original scalar operations. switch (II.getKind()) { - case InductionDescriptor::IK_NoInduction: - llvm_unreachable("Unknown induction"); - case InductionDescriptor::IK_IntInduction: { - assert(P->getType() == II.getStartValue()->getType() && - "Types must match"); - // Handle other induction variables that are now based on the - // canonical one. - Value *V = Induction; - if (P != OldInduction) { - V = Builder.CreateSExtOrTrunc(Induction, P->getType()); - V = II.transform(Builder, V); - V->setName("offset.idx"); + case InductionDescriptor::IK_NoInduction: + llvm_unreachable("Unknown induction"); + case InductionDescriptor::IK_IntInduction: + return widenIntInduction(P, Entry); + case InductionDescriptor::IK_PtrInduction: + // Handle the pointer induction variable case. + assert(P->getType()->isPointerTy() && "Unexpected type."); + // This is the normalized GEP that starts counting at zero. + Value *PtrInd = Induction; + PtrInd = Builder.CreateSExtOrTrunc(PtrInd, II.getStep()->getType()); + // This is the vector of results. Notice that we don't generate + // vector geps because scalar geps result in better code. + for (unsigned part = 0; part < UF; ++part) { + if (VF == 1) { + int EltIndex = part; + Constant *Idx = ConstantInt::get(PtrInd->getType(), EltIndex); + Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); + Value *SclrGep = II.transform(Builder, GlobalIdx, PSE.getSE(), DL); + SclrGep->setName("next.gep"); + Entry[part] = SclrGep; + continue; } - Value *Broadcasted = getBroadcastInstrs(V); - // After broadcasting the induction variable we need to make the vector - // consecutive by adding 0, 1, 2, etc. - for (unsigned part = 0; part < UF; ++part) - Entry[part] = getStepVector(Broadcasted, VF * part, II.getStepValue()); - return; - } - case InductionDescriptor::IK_PtrInduction: - // Handle the pointer induction variable case. - assert(P->getType()->isPointerTy() && "Unexpected type."); - // This is the normalized GEP that starts counting at zero. - Value *PtrInd = Induction; - PtrInd = Builder.CreateSExtOrTrunc(PtrInd, II.getStepValue()->getType()); - // This is the vector of results. Notice that we don't generate - // vector geps because scalar geps result in better code. - for (unsigned part = 0; part < UF; ++part) { - if (VF == 1) { - int EltIndex = part; - Constant *Idx = ConstantInt::get(PtrInd->getType(), EltIndex); - Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); - Value *SclrGep = II.transform(Builder, GlobalIdx); - SclrGep->setName("next.gep"); - Entry[part] = SclrGep; - continue; - } - Value *VecVal = UndefValue::get(VectorType::get(P->getType(), VF)); - for (unsigned int i = 0; i < VF; ++i) { - int EltIndex = i + part * VF; - Constant *Idx = ConstantInt::get(PtrInd->getType(), EltIndex); - Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); - Value *SclrGep = II.transform(Builder, GlobalIdx); - SclrGep->setName("next.gep"); - VecVal = Builder.CreateInsertElement(VecVal, SclrGep, - Builder.getInt32(i), - "insert.gep"); - } - Entry[part] = VecVal; + Value *VecVal = UndefValue::get(VectorType::get(P->getType(), VF)); + for (unsigned int i = 0; i < VF; ++i) { + int EltIndex = i + part * VF; + Constant *Idx = ConstantInt::get(PtrInd->getType(), EltIndex); + Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); + Value *SclrGep = II.transform(Builder, GlobalIdx, PSE.getSE(), DL); + SclrGep->setName("next.gep"); + VecVal = Builder.CreateInsertElement(VecVal, SclrGep, + Builder.getInt32(i), "insert.gep"); } - return; + Entry[part] = VecVal; + } + return; } } void InnerLoopVectorizer::vectorizeBlockInLoop(BasicBlock *BB, PhiVector *PV) { // For each instruction in the old loop. - for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) { - VectorParts &Entry = WidenMap.get(&*it); + for (Instruction &I : *BB) { + VectorParts &Entry = WidenMap.get(&I); - switch (it->getOpcode()) { + switch (I.getOpcode()) { case Instruction::Br: // Nothing to do for PHIs and BR, since we already took care of the // loop control flow instructions. continue; case Instruction::PHI: { // Vectorize PHINodes. - widenPHIInstruction(&*it, Entry, UF, VF, PV); + widenPHIInstruction(&I, Entry, UF, VF, PV); continue; - }// End of PHI. + } // End of PHI. case Instruction::Add: case Instruction::FAdd: @@ -3756,10 +4092,10 @@ void InnerLoopVectorizer::vectorizeBlockInLoop(BasicBlock *BB, PhiVector *PV) { case Instruction::Or: case Instruction::Xor: { // Just widen binops. - BinaryOperator *BinOp = dyn_cast<BinaryOperator>(it); + auto *BinOp = cast<BinaryOperator>(&I); setDebugLocFromInst(Builder, BinOp); - VectorParts &A = getVectorValue(it->getOperand(0)); - VectorParts &B = getVectorValue(it->getOperand(1)); + VectorParts &A = getVectorValue(BinOp->getOperand(0)); + VectorParts &B = getVectorValue(BinOp->getOperand(1)); // Use this vector value for all users of the original instruction. for (unsigned Part = 0; Part < UF; ++Part) { @@ -3771,7 +4107,7 @@ void InnerLoopVectorizer::vectorizeBlockInLoop(BasicBlock *BB, PhiVector *PV) { Entry[Part] = V; } - propagateMetadata(Entry, &*it); + addMetadata(Entry, BinOp); break; } case Instruction::Select: { @@ -3780,58 +4116,58 @@ void InnerLoopVectorizer::vectorizeBlockInLoop(BasicBlock *BB, PhiVector *PV) { // instruction with a scalar condition. Otherwise, use vector-select. auto *SE = PSE.getSE(); bool InvariantCond = - SE->isLoopInvariant(PSE.getSCEV(it->getOperand(0)), OrigLoop); - setDebugLocFromInst(Builder, &*it); + SE->isLoopInvariant(PSE.getSCEV(I.getOperand(0)), OrigLoop); + setDebugLocFromInst(Builder, &I); // The condition can be loop invariant but still defined inside the // loop. This means that we can't just use the original 'cond' value. // We have to take the 'vectorized' value and pick the first lane. // Instcombine will make this a no-op. - VectorParts &Cond = getVectorValue(it->getOperand(0)); - VectorParts &Op0 = getVectorValue(it->getOperand(1)); - VectorParts &Op1 = getVectorValue(it->getOperand(2)); - - Value *ScalarCond = (VF == 1) ? Cond[0] : - Builder.CreateExtractElement(Cond[0], Builder.getInt32(0)); + VectorParts &Cond = getVectorValue(I.getOperand(0)); + VectorParts &Op0 = getVectorValue(I.getOperand(1)); + VectorParts &Op1 = getVectorValue(I.getOperand(2)); + + Value *ScalarCond = + (VF == 1) + ? Cond[0] + : Builder.CreateExtractElement(Cond[0], Builder.getInt32(0)); for (unsigned Part = 0; Part < UF; ++Part) { Entry[Part] = Builder.CreateSelect( - InvariantCond ? ScalarCond : Cond[Part], - Op0[Part], - Op1[Part]); + InvariantCond ? ScalarCond : Cond[Part], Op0[Part], Op1[Part]); } - propagateMetadata(Entry, &*it); + addMetadata(Entry, &I); break; } case Instruction::ICmp: case Instruction::FCmp: { // Widen compares. Generate vector compares. - bool FCmp = (it->getOpcode() == Instruction::FCmp); - CmpInst *Cmp = dyn_cast<CmpInst>(it); - setDebugLocFromInst(Builder, &*it); - VectorParts &A = getVectorValue(it->getOperand(0)); - VectorParts &B = getVectorValue(it->getOperand(1)); + bool FCmp = (I.getOpcode() == Instruction::FCmp); + auto *Cmp = dyn_cast<CmpInst>(&I); + setDebugLocFromInst(Builder, Cmp); + VectorParts &A = getVectorValue(Cmp->getOperand(0)); + VectorParts &B = getVectorValue(Cmp->getOperand(1)); for (unsigned Part = 0; Part < UF; ++Part) { Value *C = nullptr; if (FCmp) { C = Builder.CreateFCmp(Cmp->getPredicate(), A[Part], B[Part]); - cast<FCmpInst>(C)->copyFastMathFlags(&*it); + cast<FCmpInst>(C)->copyFastMathFlags(Cmp); } else { C = Builder.CreateICmp(Cmp->getPredicate(), A[Part], B[Part]); } Entry[Part] = C; } - propagateMetadata(Entry, &*it); + addMetadata(Entry, &I); break; } case Instruction::Store: case Instruction::Load: - vectorizeMemoryInstruction(&*it); - break; + vectorizeMemoryInstruction(&I); + break; case Instruction::ZExt: case Instruction::SExt: case Instruction::FPToUI: @@ -3844,58 +4180,52 @@ void InnerLoopVectorizer::vectorizeBlockInLoop(BasicBlock *BB, PhiVector *PV) { case Instruction::Trunc: case Instruction::FPTrunc: case Instruction::BitCast: { - CastInst *CI = dyn_cast<CastInst>(it); - setDebugLocFromInst(Builder, &*it); - /// Optimize the special case where the source is the induction - /// variable. Notice that we can only optimize the 'trunc' case - /// because: a. FP conversions lose precision, b. sext/zext may wrap, - /// c. other casts depend on pointer size. - if (CI->getOperand(0) == OldInduction && - it->getOpcode() == Instruction::Trunc) { - Value *ScalarCast = Builder.CreateCast(CI->getOpcode(), Induction, - CI->getType()); - Value *Broadcasted = getBroadcastInstrs(ScalarCast); - InductionDescriptor II = - Legal->getInductionVars()->lookup(OldInduction); - Constant *Step = ConstantInt::getSigned( - CI->getType(), II.getStepValue()->getSExtValue()); - for (unsigned Part = 0; Part < UF; ++Part) - Entry[Part] = getStepVector(Broadcasted, VF * Part, Step); - propagateMetadata(Entry, &*it); + auto *CI = dyn_cast<CastInst>(&I); + setDebugLocFromInst(Builder, CI); + + // Optimize the special case where the source is a constant integer + // induction variable. Notice that we can only optimize the 'trunc' case + // because (a) FP conversions lose precision, (b) sext/zext may wrap, and + // (c) other casts depend on pointer size. + auto ID = Legal->getInductionVars()->lookup(OldInduction); + if (isa<TruncInst>(CI) && CI->getOperand(0) == OldInduction && + ID.getConstIntStepValue()) { + widenIntInduction(OldInduction, Entry, cast<TruncInst>(CI)); + addMetadata(Entry, &I); break; } + /// Vectorize casts. - Type *DestTy = (VF == 1) ? CI->getType() : - VectorType::get(CI->getType(), VF); + Type *DestTy = + (VF == 1) ? CI->getType() : VectorType::get(CI->getType(), VF); - VectorParts &A = getVectorValue(it->getOperand(0)); + VectorParts &A = getVectorValue(CI->getOperand(0)); for (unsigned Part = 0; Part < UF; ++Part) Entry[Part] = Builder.CreateCast(CI->getOpcode(), A[Part], DestTy); - propagateMetadata(Entry, &*it); + addMetadata(Entry, &I); break; } case Instruction::Call: { // Ignore dbg intrinsics. - if (isa<DbgInfoIntrinsic>(it)) + if (isa<DbgInfoIntrinsic>(I)) break; - setDebugLocFromInst(Builder, &*it); + setDebugLocFromInst(Builder, &I); Module *M = BB->getParent()->getParent(); - CallInst *CI = cast<CallInst>(it); + auto *CI = cast<CallInst>(&I); StringRef FnName = CI->getCalledFunction()->getName(); Function *F = CI->getCalledFunction(); Type *RetTy = ToVectorTy(CI->getType(), VF); SmallVector<Type *, 4> Tys; - for (unsigned i = 0, ie = CI->getNumArgOperands(); i != ie; ++i) - Tys.push_back(ToVectorTy(CI->getArgOperand(i)->getType(), VF)); - - Intrinsic::ID ID = getIntrinsicIDForCall(CI, TLI); - if (ID && - (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || - ID == Intrinsic::lifetime_start)) { - scalarizeInstruction(&*it); + for (Value *ArgOperand : CI->arg_operands()) + Tys.push_back(ToVectorTy(ArgOperand->getType(), VF)); + + Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); + if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || + ID == Intrinsic::lifetime_start)) { + scalarizeInstruction(&I); break; } // The flag shows whether we use Intrinsic or a usual Call for vectorized @@ -3906,7 +4236,7 @@ void InnerLoopVectorizer::vectorizeBlockInLoop(BasicBlock *BB, PhiVector *PV) { bool UseVectorIntrinsic = ID && getVectorIntrinsicCost(CI, VF, *TTI, TLI) <= CallCost; if (!UseVectorIntrinsic && NeedToScalarize) { - scalarizeInstruction(&*it); + scalarizeInstruction(&I); break; } @@ -3944,19 +4274,27 @@ void InnerLoopVectorizer::vectorizeBlockInLoop(BasicBlock *BB, PhiVector *PV) { } } assert(VectorF && "Can't create vector function."); - Entry[Part] = Builder.CreateCall(VectorF, Args); + + SmallVector<OperandBundleDef, 1> OpBundles; + CI->getOperandBundlesAsDefs(OpBundles); + CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles); + + if (isa<FPMathOperator>(V)) + V->copyFastMathFlags(CI); + + Entry[Part] = V; } - propagateMetadata(Entry, &*it); + addMetadata(Entry, &I); break; } default: // All other instructions are unsupported. Scalarize them. - scalarizeInstruction(&*it); + scalarizeInstruction(&I); break; - }// end of switch. - }// end of for_each instr. + } // end of switch. + } // end of for_each instr. } void InnerLoopVectorizer::updateAnalysis() { @@ -3967,16 +4305,11 @@ void InnerLoopVectorizer::updateAnalysis() { assert(DT->properlyDominates(LoopBypassBlocks.front(), LoopExitBlock) && "Entry does not dominate exit."); - for (unsigned I = 1, E = LoopBypassBlocks.size(); I != E; ++I) - DT->addNewBlock(LoopBypassBlocks[I], LoopBypassBlocks[I-1]); - DT->addNewBlock(LoopVectorPreHeader, LoopBypassBlocks.back()); - // We don't predicate stores by this point, so the vector body should be a // single loop. - assert(LoopVectorBody.size() == 1 && "Expected single block loop!"); - DT->addNewBlock(LoopVectorBody[0], LoopVectorPreHeader); + DT->addNewBlock(LoopVectorBody, LoopVectorPreHeader); - DT->addNewBlock(LoopMiddleBlock, LoopVectorBody.back()); + DT->addNewBlock(LoopMiddleBlock, LoopVectorBody); DT->addNewBlock(LoopScalarPreHeader, LoopBypassBlocks[0]); DT->changeImmediateDominator(LoopScalarBody, LoopScalarPreHeader); DT->changeImmediateDominator(LoopExitBlock, LoopBypassBlocks[0]); @@ -3989,12 +4322,12 @@ void InnerLoopVectorizer::updateAnalysis() { /// Phi nodes with constant expressions that can trap are not safe to if /// convert. static bool canIfConvertPHINodes(BasicBlock *BB) { - for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) { - PHINode *Phi = dyn_cast<PHINode>(I); + for (Instruction &I : *BB) { + auto *Phi = dyn_cast<PHINode>(&I); if (!Phi) return true; - for (unsigned p = 0, e = Phi->getNumIncomingValues(); p != e; ++p) - if (Constant *C = dyn_cast<Constant>(Phi->getIncomingValue(p))) + for (Value *V : Phi->incoming_values()) + if (auto *C = dyn_cast<Constant>(V)) if (C->canTrap()) return false; } @@ -4013,27 +4346,21 @@ bool LoopVectorizationLegality::canVectorizeWithIfConvert() { SmallPtrSet<Value *, 8> SafePointes; // Collect safe addresses. - for (Loop::block_iterator BI = TheLoop->block_begin(), - BE = TheLoop->block_end(); BI != BE; ++BI) { - BasicBlock *BB = *BI; - + for (BasicBlock *BB : TheLoop->blocks()) { if (blockNeedsPredication(BB)) continue; - for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) { - if (LoadInst *LI = dyn_cast<LoadInst>(I)) + for (Instruction &I : *BB) { + if (auto *LI = dyn_cast<LoadInst>(&I)) SafePointes.insert(LI->getPointerOperand()); - else if (StoreInst *SI = dyn_cast<StoreInst>(I)) + else if (auto *SI = dyn_cast<StoreInst>(&I)) SafePointes.insert(SI->getPointerOperand()); } } // Collect the blocks that need predication. BasicBlock *Header = TheLoop->getHeader(); - for (Loop::block_iterator BI = TheLoop->block_begin(), - BE = TheLoop->block_end(); BI != BE; ++BI) { - BasicBlock *BB = *BI; - + for (BasicBlock *BB : TheLoop->blocks()) { // We don't support switch statements inside loops. if (!isa<BranchInst>(BB->getTerminator())) { emitAnalysis(VectorizationReport(BB->getTerminator()) @@ -4063,9 +4390,8 @@ bool LoopVectorizationLegality::canVectorize() { // We must have a loop in canonical form. Loops with indirectbr in them cannot // be canonicalized. if (!TheLoop->getLoopPreheader()) { - emitAnalysis( - VectorizationReport() << - "loop control flow is not understood by vectorizer"); + emitAnalysis(VectorizationReport() + << "loop control flow is not understood by vectorizer"); return false; } @@ -4077,17 +4403,15 @@ bool LoopVectorizationLegality::canVectorize() { // We must have a single backedge. if (TheLoop->getNumBackEdges() != 1) { - emitAnalysis( - VectorizationReport() << - "loop control flow is not understood by vectorizer"); + emitAnalysis(VectorizationReport() + << "loop control flow is not understood by vectorizer"); return false; } // We must have a single exiting block. if (!TheLoop->getExitingBlock()) { - emitAnalysis( - VectorizationReport() << - "loop control flow is not understood by vectorizer"); + emitAnalysis(VectorizationReport() + << "loop control flow is not understood by vectorizer"); return false; } @@ -4095,15 +4419,14 @@ bool LoopVectorizationLegality::canVectorize() { // checked at the end of each iteration. With that we can assume that all // instructions in the loop are executed the same number of times. if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { - emitAnalysis( - VectorizationReport() << - "loop control flow is not understood by vectorizer"); + emitAnalysis(VectorizationReport() + << "loop control flow is not understood by vectorizer"); return false; } // We need to have a loop header. - DEBUG(dbgs() << "LV: Found a loop: " << - TheLoop->getHeader()->getName() << '\n'); + DEBUG(dbgs() << "LV: Found a loop: " << TheLoop->getHeader()->getName() + << '\n'); // Check if we can if-convert non-single-bb loops. unsigned NumBlocks = TheLoop->getNumBlocks(); @@ -4113,7 +4436,7 @@ bool LoopVectorizationLegality::canVectorize() { } // ScalarEvolution needs to be able to find the exit count. - const SCEV *ExitCount = PSE.getSE()->getBackedgeTakenCount(TheLoop); + const SCEV *ExitCount = PSE.getBackedgeTakenCount(); if (ExitCount == PSE.getSE()->getCouldNotCompute()) { emitAnalysis(VectorizationReport() << "could not determine number of loop iterations"); @@ -4150,7 +4473,7 @@ bool LoopVectorizationLegality::canVectorize() { // Analyze interleaved memory accesses. if (UseInterleaved) - InterleaveInfo.analyzeInterleaving(Strides); + InterleaveInfo.analyzeInterleaving(*getSymbolicStrides()); unsigned SCEVThreshold = VectorizeSCEVCheckThreshold; if (Hints->getForce() == LoopVectorizeHints::FK_Enabled) @@ -4182,7 +4505,7 @@ static Type *convertPointerToIntegerType(const DataLayout &DL, Type *Ty) { return Ty; } -static Type* getWiderType(const DataLayout &DL, Type *Ty0, Type *Ty1) { +static Type *getWiderType(const DataLayout &DL, Type *Ty0, Type *Ty1) { Ty0 = convertPointerToIntegerType(DL, Ty0); Ty1 = convertPointerToIntegerType(DL, Ty1); if (Ty0->getScalarSizeInBits() > Ty1->getScalarSizeInBits()) @@ -4193,11 +4516,11 @@ static Type* getWiderType(const DataLayout &DL, Type *Ty0, Type *Ty1) { /// \brief Check that the instruction has outside loop users and is not an /// identified reduction variable. static bool hasOutsideLoopUser(const Loop *TheLoop, Instruction *Inst, - SmallPtrSetImpl<Value *> &Reductions) { - // Reduction instructions are allowed to have exit users. All other - // instructions must not have external users. - if (!Reductions.count(Inst)) - //Check that all of the users of the loop are inside the BB. + SmallPtrSetImpl<Value *> &AllowedExit) { + // Reduction and Induction instructions are allowed to have exit users. All + // other instructions must not have external users. + if (!AllowedExit.count(Inst)) + // Check that all of the users of the loop are inside the BB. for (User *U : Inst->users()) { Instruction *UI = cast<Instruction>(U); // This user may be a reduction exit value. @@ -4209,31 +4532,61 @@ static bool hasOutsideLoopUser(const Loop *TheLoop, Instruction *Inst, return false; } +void LoopVectorizationLegality::addInductionPhi( + PHINode *Phi, const InductionDescriptor &ID, + SmallPtrSetImpl<Value *> &AllowedExit) { + Inductions[Phi] = ID; + Type *PhiTy = Phi->getType(); + const DataLayout &DL = Phi->getModule()->getDataLayout(); + + // Get the widest type. + if (!WidestIndTy) + WidestIndTy = convertPointerToIntegerType(DL, PhiTy); + else + WidestIndTy = getWiderType(DL, PhiTy, WidestIndTy); + + // Int inductions are special because we only allow one IV. + if (ID.getKind() == InductionDescriptor::IK_IntInduction && + ID.getConstIntStepValue() && + ID.getConstIntStepValue()->isOne() && + isa<Constant>(ID.getStartValue()) && + cast<Constant>(ID.getStartValue())->isNullValue()) { + + // Use the phi node with the widest type as induction. Use the last + // one if there are multiple (no good reason for doing this other + // than it is expedient). We've checked that it begins at zero and + // steps by one, so this is a canonical induction variable. + if (!Induction || PhiTy == WidestIndTy) + Induction = Phi; + } + + // Both the PHI node itself, and the "post-increment" value feeding + // back into the PHI node may have external users. + AllowedExit.insert(Phi); + AllowedExit.insert(Phi->getIncomingValueForBlock(TheLoop->getLoopLatch())); + + DEBUG(dbgs() << "LV: Found an induction variable.\n"); + return; +} + bool LoopVectorizationLegality::canVectorizeInstrs() { BasicBlock *Header = TheLoop->getHeader(); // Look for the attribute signaling the absence of NaNs. Function &F = *Header->getParent(); - const DataLayout &DL = F.getParent()->getDataLayout(); - if (F.hasFnAttribute("no-nans-fp-math")) - HasFunNoNaNAttr = - F.getFnAttribute("no-nans-fp-math").getValueAsString() == "true"; + HasFunNoNaNAttr = + F.getFnAttribute("no-nans-fp-math").getValueAsString() == "true"; // For each block in the loop. - for (Loop::block_iterator bb = TheLoop->block_begin(), - be = TheLoop->block_end(); bb != be; ++bb) { - + for (BasicBlock *BB : TheLoop->blocks()) { // Scan the instructions in the block and look for hazards. - for (BasicBlock::iterator it = (*bb)->begin(), e = (*bb)->end(); it != e; - ++it) { - - if (PHINode *Phi = dyn_cast<PHINode>(it)) { + for (Instruction &I : *BB) { + if (auto *Phi = dyn_cast<PHINode>(&I)) { Type *PhiTy = Phi->getType(); // Check that this PHI type is allowed. - if (!PhiTy->isIntegerTy() && - !PhiTy->isFloatingPointTy() && + if (!PhiTy->isIntegerTy() && !PhiTy->isFloatingPointTy() && !PhiTy->isPointerTy()) { - emitAnalysis(VectorizationReport(&*it) + emitAnalysis(VectorizationReport(Phi) << "loop control flow is not understood by vectorizer"); DEBUG(dbgs() << "LV: Found an non-int non-pointer PHI.\n"); return false; @@ -4242,61 +4595,25 @@ bool LoopVectorizationLegality::canVectorizeInstrs() { // If this PHINode is not in the header block, then we know that we // can convert it to select during if-conversion. No need to check if // the PHIs in this block are induction or reduction variables. - if (*bb != Header) { + if (BB != Header) { // Check that this instruction has no outside users or is an // identified reduction value with an outside user. - if (!hasOutsideLoopUser(TheLoop, &*it, AllowedExit)) + if (!hasOutsideLoopUser(TheLoop, Phi, AllowedExit)) continue; - emitAnalysis(VectorizationReport(&*it) << - "value could not be identified as " - "an induction or reduction variable"); + emitAnalysis(VectorizationReport(Phi) + << "value could not be identified as " + "an induction or reduction variable"); return false; } // We only allow if-converted PHIs with exactly two incoming values. if (Phi->getNumIncomingValues() != 2) { - emitAnalysis(VectorizationReport(&*it) + emitAnalysis(VectorizationReport(Phi) << "control flow not understood by vectorizer"); DEBUG(dbgs() << "LV: Found an invalid PHI.\n"); return false; } - InductionDescriptor ID; - if (InductionDescriptor::isInductionPHI(Phi, PSE.getSE(), ID)) { - Inductions[Phi] = ID; - // Get the widest type. - if (!WidestIndTy) - WidestIndTy = convertPointerToIntegerType(DL, PhiTy); - else - WidestIndTy = getWiderType(DL, PhiTy, WidestIndTy); - - // Int inductions are special because we only allow one IV. - if (ID.getKind() == InductionDescriptor::IK_IntInduction && - ID.getStepValue()->isOne() && - isa<Constant>(ID.getStartValue()) && - cast<Constant>(ID.getStartValue())->isNullValue()) { - // Use the phi node with the widest type as induction. Use the last - // one if there are multiple (no good reason for doing this other - // than it is expedient). We've checked that it begins at zero and - // steps by one, so this is a canonical induction variable. - if (!Induction || PhiTy == WidestIndTy) - Induction = Phi; - } - - DEBUG(dbgs() << "LV: Found an induction variable.\n"); - - // Until we explicitly handle the case of an induction variable with - // an outside loop user we have to give up vectorizing this loop. - if (hasOutsideLoopUser(TheLoop, &*it, AllowedExit)) { - emitAnalysis(VectorizationReport(&*it) << - "use of induction value outside of the " - "loop is not handled by vectorizer"); - return false; - } - - continue; - } - RecurrenceDescriptor RedDes; if (RecurrenceDescriptor::isReductionPHI(Phi, TheLoop, RedDes)) { if (RedDes.hasUnsafeAlgebra()) @@ -4306,22 +4623,41 @@ bool LoopVectorizationLegality::canVectorizeInstrs() { continue; } - emitAnalysis(VectorizationReport(&*it) << - "value that could not be identified as " - "reduction is used outside the loop"); - DEBUG(dbgs() << "LV: Found an unidentified PHI."<< *Phi <<"\n"); + InductionDescriptor ID; + if (InductionDescriptor::isInductionPHI(Phi, PSE, ID)) { + addInductionPhi(Phi, ID, AllowedExit); + continue; + } + + if (RecurrenceDescriptor::isFirstOrderRecurrence(Phi, TheLoop, DT)) { + FirstOrderRecurrences.insert(Phi); + continue; + } + + // As a last resort, coerce the PHI to a AddRec expression + // and re-try classifying it a an induction PHI. + if (InductionDescriptor::isInductionPHI(Phi, PSE, ID, true)) { + addInductionPhi(Phi, ID, AllowedExit); + continue; + } + + emitAnalysis(VectorizationReport(Phi) + << "value that could not be identified as " + "reduction is used outside the loop"); + DEBUG(dbgs() << "LV: Found an unidentified PHI." << *Phi << "\n"); return false; - }// end of PHI handling + } // end of PHI handling // We handle calls that: // * Are debug info intrinsics. // * Have a mapping to an IR intrinsic. // * Have a vector version available. - CallInst *CI = dyn_cast<CallInst>(it); - if (CI && !getIntrinsicIDForCall(CI, TLI) && !isa<DbgInfoIntrinsic>(CI) && + auto *CI = dyn_cast<CallInst>(&I); + if (CI && !getVectorIntrinsicIDForCall(CI, TLI) && + !isa<DbgInfoIntrinsic>(CI) && !(CI->getCalledFunction() && TLI && TLI->isFunctionVectorizable(CI->getCalledFunction()->getName()))) { - emitAnalysis(VectorizationReport(&*it) + emitAnalysis(VectorizationReport(CI) << "call instruction cannot be vectorized"); DEBUG(dbgs() << "LV: Found a non-intrinsic, non-libfunc callsite.\n"); return false; @@ -4329,11 +4665,11 @@ bool LoopVectorizationLegality::canVectorizeInstrs() { // Intrinsics such as powi,cttz and ctlz are legal to vectorize if the // second argument is the same (i.e. loop invariant) - if (CI && - hasVectorInstrinsicScalarOpd(getIntrinsicIDForCall(CI, TLI), 1)) { + if (CI && hasVectorInstrinsicScalarOpd( + getVectorIntrinsicIDForCall(CI, TLI), 1)) { auto *SE = PSE.getSE(); if (!SE->isLoopInvariant(PSE.getSCEV(CI->getOperand(1)), TheLoop)) { - emitAnalysis(VectorizationReport(&*it) + emitAnalysis(VectorizationReport(CI) << "intrinsic instruction cannot be vectorized"); DEBUG(dbgs() << "LV: Found unvectorizable intrinsic " << *CI << "\n"); return false; @@ -4342,40 +4678,44 @@ bool LoopVectorizationLegality::canVectorizeInstrs() { // Check that the instruction return type is vectorizable. // Also, we can't vectorize extractelement instructions. - if ((!VectorType::isValidElementType(it->getType()) && - !it->getType()->isVoidTy()) || isa<ExtractElementInst>(it)) { - emitAnalysis(VectorizationReport(&*it) + if ((!VectorType::isValidElementType(I.getType()) && + !I.getType()->isVoidTy()) || + isa<ExtractElementInst>(I)) { + emitAnalysis(VectorizationReport(&I) << "instruction return type cannot be vectorized"); DEBUG(dbgs() << "LV: Found unvectorizable type.\n"); return false; } // Check that the stored type is vectorizable. - if (StoreInst *ST = dyn_cast<StoreInst>(it)) { + if (auto *ST = dyn_cast<StoreInst>(&I)) { Type *T = ST->getValueOperand()->getType(); if (!VectorType::isValidElementType(T)) { - emitAnalysis(VectorizationReport(ST) << - "store instruction cannot be vectorized"); + emitAnalysis(VectorizationReport(ST) + << "store instruction cannot be vectorized"); return false; } - if (EnableMemAccessVersioning) - collectStridedAccess(ST); - } - if (EnableMemAccessVersioning) - if (LoadInst *LI = dyn_cast<LoadInst>(it)) - collectStridedAccess(LI); + // FP instructions can allow unsafe algebra, thus vectorizable by + // non-IEEE-754 compliant SIMD units. + // This applies to floating-point math operations and calls, not memory + // operations, shuffles, or casts, as they don't change precision or + // semantics. + } else if (I.getType()->isFloatingPointTy() && (CI || I.isBinaryOp()) && + !I.hasUnsafeAlgebra()) { + DEBUG(dbgs() << "LV: Found FP op with unsafe algebra.\n"); + Hints->setPotentiallyUnsafe(); + } // Reduction instructions are allowed to have exit users. // All other instructions must not have external users. - if (hasOutsideLoopUser(TheLoop, &*it, AllowedExit)) { - emitAnalysis(VectorizationReport(&*it) << - "value cannot be used outside the loop"); + if (hasOutsideLoopUser(TheLoop, &I, AllowedExit)) { + emitAnalysis(VectorizationReport(&I) + << "value cannot be used outside the loop"); return false; } } // next instr. - } if (!Induction) { @@ -4396,64 +4736,90 @@ bool LoopVectorizationLegality::canVectorizeInstrs() { return true; } -void LoopVectorizationLegality::collectStridedAccess(Value *MemAccess) { - Value *Ptr = nullptr; - if (LoadInst *LI = dyn_cast<LoadInst>(MemAccess)) - Ptr = LI->getPointerOperand(); - else if (StoreInst *SI = dyn_cast<StoreInst>(MemAccess)) - Ptr = SI->getPointerOperand(); - else - return; - - Value *Stride = getStrideFromPointer(Ptr, PSE.getSE(), TheLoop); - if (!Stride) - return; - - DEBUG(dbgs() << "LV: Found a strided access that we can version"); - DEBUG(dbgs() << " Ptr: " << *Ptr << " Stride: " << *Stride << "\n"); - Strides[Ptr] = Stride; - StrideSet.insert(Stride); -} - void LoopVectorizationLegality::collectLoopUniforms() { // We now know that the loop is vectorizable! // Collect variables that will remain uniform after vectorization. - std::vector<Value*> Worklist; - BasicBlock *Latch = TheLoop->getLoopLatch(); - // Start with the conditional branch and walk up the block. - Worklist.push_back(Latch->getTerminator()->getOperand(0)); + // If V is not an instruction inside the current loop, it is a Value + // outside of the scope which we are interesting in. + auto isOutOfScope = [&](Value *V) -> bool { + Instruction *I = dyn_cast<Instruction>(V); + return (!I || !TheLoop->contains(I)); + }; + + SetVector<Instruction *> Worklist; + BasicBlock *Latch = TheLoop->getLoopLatch(); + // Start with the conditional branch. + if (!isOutOfScope(Latch->getTerminator()->getOperand(0))) { + Instruction *Cmp = cast<Instruction>(Latch->getTerminator()->getOperand(0)); + Worklist.insert(Cmp); + DEBUG(dbgs() << "LV: Found uniform instruction: " << *Cmp << "\n"); + } // Also add all consecutive pointer values; these values will be uniform - // after vectorization (and subsequent cleanup) and, until revectorization is - // supported, all dependencies must also be uniform. - for (Loop::block_iterator B = TheLoop->block_begin(), - BE = TheLoop->block_end(); B != BE; ++B) - for (BasicBlock::iterator I = (*B)->begin(), IE = (*B)->end(); - I != IE; ++I) - if (I->getType()->isPointerTy() && isConsecutivePtr(&*I)) - Worklist.insert(Worklist.end(), I->op_begin(), I->op_end()); - - while (!Worklist.empty()) { - Instruction *I = dyn_cast<Instruction>(Worklist.back()); - Worklist.pop_back(); - - // Look at instructions inside this loop. - // Stop when reaching PHI nodes. - // TODO: we need to follow values all over the loop, not only in this block. - if (!I || !TheLoop->contains(I) || isa<PHINode>(I)) - continue; + // after vectorization (and subsequent cleanup). + for (auto *BB : TheLoop->blocks()) { + for (auto &I : *BB) { + if (I.getType()->isPointerTy() && isConsecutivePtr(&I)) { + Worklist.insert(&I); + DEBUG(dbgs() << "LV: Found uniform instruction: " << I << "\n"); + } + } + } - // This is a known uniform. - Uniforms.insert(I); + // Expand Worklist in topological order: whenever a new instruction + // is added , its users should be either already inside Worklist, or + // out of scope. It ensures a uniform instruction will only be used + // by uniform instructions or out of scope instructions. + unsigned idx = 0; + do { + Instruction *I = Worklist[idx++]; - // Insert all operands. - Worklist.insert(Worklist.end(), I->op_begin(), I->op_end()); + for (auto OV : I->operand_values()) { + if (isOutOfScope(OV)) + continue; + auto *OI = cast<Instruction>(OV); + if (all_of(OI->users(), [&](User *U) -> bool { + return isOutOfScope(U) || Worklist.count(cast<Instruction>(U)); + })) { + Worklist.insert(OI); + DEBUG(dbgs() << "LV: Found uniform instruction: " << *OI << "\n"); + } + } + } while (idx != Worklist.size()); + + // For an instruction to be added into Worklist above, all its users inside + // the current loop should be already added into Worklist. This condition + // cannot be true for phi instructions which is always in a dependence loop. + // Because any instruction in the dependence cycle always depends on others + // in the cycle to be added into Worklist first, the result is no ones in + // the cycle will be added into Worklist in the end. + // That is why we process PHI separately. + for (auto &Induction : *getInductionVars()) { + auto *PN = Induction.first; + auto *UpdateV = PN->getIncomingValueForBlock(TheLoop->getLoopLatch()); + if (all_of(PN->users(), + [&](User *U) -> bool { + return U == UpdateV || isOutOfScope(U) || + Worklist.count(cast<Instruction>(U)); + }) && + all_of(UpdateV->users(), [&](User *U) -> bool { + return U == PN || isOutOfScope(U) || + Worklist.count(cast<Instruction>(U)); + })) { + Worklist.insert(cast<Instruction>(PN)); + Worklist.insert(cast<Instruction>(UpdateV)); + DEBUG(dbgs() << "LV: Found uniform instruction: " << *PN << "\n"); + DEBUG(dbgs() << "LV: Found uniform instruction: " << *UpdateV << "\n"); + } } + + Uniforms.insert(Worklist.begin(), Worklist.end()); } bool LoopVectorizationLegality::canVectorizeMemory() { - LAI = &LAA->getInfo(TheLoop, Strides); + LAI = &(*GetLAA)(*TheLoop); + InterleaveInfo.setLAI(LAI); auto &OptionalReport = LAI->getReport(); if (OptionalReport) emitAnalysis(VectorizationReport(*OptionalReport)); @@ -4469,13 +4835,13 @@ bool LoopVectorizationLegality::canVectorizeMemory() { } Requirements->addRuntimePointerChecks(LAI->getNumRuntimePointerChecks()); - PSE.addPredicate(LAI->PSE.getUnionPredicate()); + PSE.addPredicate(LAI->getPSE().getUnionPredicate()); return true; } bool LoopVectorizationLegality::isInductionVariable(const Value *V) { - Value *In0 = const_cast<Value*>(V); + Value *In0 = const_cast<Value *>(V); PHINode *PN = dyn_cast_or_null<PHINode>(In0); if (!PN) return false; @@ -4483,67 +4849,73 @@ bool LoopVectorizationLegality::isInductionVariable(const Value *V) { return Inductions.count(PN); } -bool LoopVectorizationLegality::blockNeedsPredication(BasicBlock *BB) { +bool LoopVectorizationLegality::isFirstOrderRecurrence(const PHINode *Phi) { + return FirstOrderRecurrences.count(Phi); +} + +bool LoopVectorizationLegality::blockNeedsPredication(BasicBlock *BB) { return LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT); } -bool LoopVectorizationLegality::blockCanBePredicated(BasicBlock *BB, - SmallPtrSetImpl<Value *> &SafePtrs) { - - for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) { +bool LoopVectorizationLegality::blockCanBePredicated( + BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs) { + const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel(); + + for (Instruction &I : *BB) { // Check that we don't have a constant expression that can trap as operand. - for (Instruction::op_iterator OI = it->op_begin(), OE = it->op_end(); - OI != OE; ++OI) { - if (Constant *C = dyn_cast<Constant>(*OI)) + for (Value *Operand : I.operands()) { + if (auto *C = dyn_cast<Constant>(Operand)) if (C->canTrap()) return false; } // We might be able to hoist the load. - if (it->mayReadFromMemory()) { - LoadInst *LI = dyn_cast<LoadInst>(it); + if (I.mayReadFromMemory()) { + auto *LI = dyn_cast<LoadInst>(&I); if (!LI) return false; if (!SafePtrs.count(LI->getPointerOperand())) { - if (isLegalMaskedLoad(LI->getType(), LI->getPointerOperand())) { + if (isLegalMaskedLoad(LI->getType(), LI->getPointerOperand()) || + isLegalMaskedGather(LI->getType())) { MaskedOp.insert(LI); continue; } + // !llvm.mem.parallel_loop_access implies if-conversion safety. + if (IsAnnotatedParallel) + continue; return false; } } // We don't predicate stores at the moment. - if (it->mayWriteToMemory()) { - StoreInst *SI = dyn_cast<StoreInst>(it); + if (I.mayWriteToMemory()) { + auto *SI = dyn_cast<StoreInst>(&I); // We only support predication of stores in basic blocks with one // predecessor. if (!SI) return false; + // Build a masked store if it is legal for the target. + if (isLegalMaskedStore(SI->getValueOperand()->getType(), + SI->getPointerOperand()) || + isLegalMaskedScatter(SI->getValueOperand()->getType())) { + MaskedOp.insert(SI); + continue; + } + bool isSafePtr = (SafePtrs.count(SI->getPointerOperand()) != 0); bool isSinglePredecessor = SI->getParent()->getSinglePredecessor(); - + if (++NumPredStores > NumberOfStoresToPredicate || !isSafePtr || - !isSinglePredecessor) { - // Build a masked store if it is legal for the target, otherwise - // scalarize the block. - bool isLegalMaskedOp = - isLegalMaskedStore(SI->getValueOperand()->getType(), - SI->getPointerOperand()); - if (isLegalMaskedOp) { - --NumPredStores; - MaskedOp.insert(SI); - continue; - } + !isSinglePredecessor) return false; - } } - if (it->mayThrow()) + if (I.mayThrow()) return false; // The instructions below can trap. - switch (it->getOpcode()) { - default: continue; + switch (I.getOpcode()) { + default: + continue; case Instruction::UDiv: case Instruction::SDiv: case Instruction::URem: @@ -4555,199 +4927,273 @@ bool LoopVectorizationLegality::blockCanBePredicated(BasicBlock *BB, return true; } -void InterleavedAccessInfo::collectConstStridedAccesses( - MapVector<Instruction *, StrideDescriptor> &StrideAccesses, +void InterleavedAccessInfo::collectConstStrideAccesses( + MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo, const ValueToValueMap &Strides) { - // Holds load/store instructions in program order. - SmallVector<Instruction *, 16> AccessList; - for (auto *BB : TheLoop->getBlocks()) { - bool IsPred = LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT); + auto &DL = TheLoop->getHeader()->getModule()->getDataLayout(); + // Since it's desired that the load/store instructions be maintained in + // "program order" for the interleaved access analysis, we have to visit the + // blocks in the loop in reverse postorder (i.e., in a topological order). + // Such an ordering will ensure that any load/store that may be executed + // before a second load/store will precede the second load/store in + // AccessStrideInfo. + LoopBlocksDFS DFS(TheLoop); + DFS.perform(LI); + for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) for (auto &I : *BB) { - if (!isa<LoadInst>(&I) && !isa<StoreInst>(&I)) + auto *LI = dyn_cast<LoadInst>(&I); + auto *SI = dyn_cast<StoreInst>(&I); + if (!LI && !SI) continue; - // FIXME: Currently we can't handle mixed accesses and predicated accesses - if (IsPred) - return; - - AccessList.push_back(&I); - } - } - - if (AccessList.empty()) - return; - - auto &DL = TheLoop->getHeader()->getModule()->getDataLayout(); - for (auto I : AccessList) { - LoadInst *LI = dyn_cast<LoadInst>(I); - StoreInst *SI = dyn_cast<StoreInst>(I); - - Value *Ptr = LI ? LI->getPointerOperand() : SI->getPointerOperand(); - int Stride = isStridedPtr(PSE, Ptr, TheLoop, Strides); - - // The factor of the corresponding interleave group. - unsigned Factor = std::abs(Stride); - // Ignore the access if the factor is too small or too large. - if (Factor < 2 || Factor > MaxInterleaveGroupFactor) - continue; + Value *Ptr = LI ? LI->getPointerOperand() : SI->getPointerOperand(); + int64_t Stride = getPtrStride(PSE, Ptr, TheLoop, Strides); - const SCEV *Scev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr); - PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType()); - unsigned Size = DL.getTypeAllocSize(PtrTy->getElementType()); + const SCEV *Scev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr); + PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType()); + uint64_t Size = DL.getTypeAllocSize(PtrTy->getElementType()); - // An alignment of 0 means target ABI alignment. - unsigned Align = LI ? LI->getAlignment() : SI->getAlignment(); - if (!Align) - Align = DL.getABITypeAlignment(PtrTy->getElementType()); + // An alignment of 0 means target ABI alignment. + unsigned Align = LI ? LI->getAlignment() : SI->getAlignment(); + if (!Align) + Align = DL.getABITypeAlignment(PtrTy->getElementType()); - StrideAccesses[I] = StrideDescriptor(Stride, Scev, Size, Align); - } + AccessStrideInfo[&I] = StrideDescriptor(Stride, Scev, Size, Align); + } } -// Analyze interleaved accesses and collect them into interleave groups. +// Analyze interleaved accesses and collect them into interleaved load and +// store groups. +// +// When generating code for an interleaved load group, we effectively hoist all +// loads in the group to the location of the first load in program order. When +// generating code for an interleaved store group, we sink all stores to the +// location of the last store. This code motion can change the order of load +// and store instructions and may break dependences. +// +// The code generation strategy mentioned above ensures that we won't violate +// any write-after-read (WAR) dependences. // -// Notice that the vectorization on interleaved groups will change instruction -// orders and may break dependences. But the memory dependence check guarantees -// that there is no overlap between two pointers of different strides, element -// sizes or underlying bases. +// E.g., for the WAR dependence: a = A[i]; // (1) +// A[i] = b; // (2) // -// For pointers sharing the same stride, element size and underlying base, no -// need to worry about Read-After-Write dependences and Write-After-Read +// The store group of (2) is always inserted at or below (2), and the load +// group of (1) is always inserted at or above (1). Thus, the instructions will +// never be reordered. All other dependences are checked to ensure the +// correctness of the instruction reordering. +// +// The algorithm visits all memory accesses in the loop in bottom-up program +// order. Program order is established by traversing the blocks in the loop in +// reverse postorder when collecting the accesses. +// +// We visit the memory accesses in bottom-up order because it can simplify the +// construction of store groups in the presence of write-after-write (WAW) // dependences. // -// E.g. The RAW dependence: A[i] = a; -// b = A[i]; -// This won't exist as it is a store-load forwarding conflict, which has -// already been checked and forbidden in the dependence check. +// E.g., for the WAW dependence: A[i] = a; // (1) +// A[i] = b; // (2) +// A[i + 1] = c; // (3) // -// E.g. The WAR dependence: a = A[i]; // (1) -// A[i] = b; // (2) -// The store group of (2) is always inserted at or below (2), and the load group -// of (1) is always inserted at or above (1). The dependence is safe. +// We will first create a store group with (3) and (2). (1) can't be added to +// this group because it and (2) are dependent. However, (1) can be grouped +// with other accesses that may precede it in program order. Note that a +// bottom-up order does not imply that WAW dependences should not be checked. void InterleavedAccessInfo::analyzeInterleaving( const ValueToValueMap &Strides) { DEBUG(dbgs() << "LV: Analyzing interleaved accesses...\n"); - // Holds all the stride accesses. - MapVector<Instruction *, StrideDescriptor> StrideAccesses; - collectConstStridedAccesses(StrideAccesses, Strides); + // Holds all accesses with a constant stride. + MapVector<Instruction *, StrideDescriptor> AccessStrideInfo; + collectConstStrideAccesses(AccessStrideInfo, Strides); - if (StrideAccesses.empty()) + if (AccessStrideInfo.empty()) return; + // Collect the dependences in the loop. + collectDependences(); + // Holds all interleaved store groups temporarily. SmallSetVector<InterleaveGroup *, 4> StoreGroups; // Holds all interleaved load groups temporarily. SmallSetVector<InterleaveGroup *, 4> LoadGroups; - // Search the load-load/write-write pair B-A in bottom-up order and try to - // insert B into the interleave group of A according to 3 rules: - // 1. A and B have the same stride. - // 2. A and B have the same memory object size. - // 3. B belongs to the group according to the distance. + // Search in bottom-up program order for pairs of accesses (A and B) that can + // form interleaved load or store groups. In the algorithm below, access A + // precedes access B in program order. We initialize a group for B in the + // outer loop of the algorithm, and then in the inner loop, we attempt to + // insert each A into B's group if: + // + // 1. A and B have the same stride, + // 2. A and B have the same memory object size, and + // 3. A belongs in B's group according to its distance from B. // - // The bottom-up order can avoid breaking the Write-After-Write dependences - // between two pointers of the same base. - // E.g. A[i] = a; (1) - // A[i] = b; (2) - // A[i+1] = c (3) - // We form the group (2)+(3) in front, so (1) has to form groups with accesses - // above (1), which guarantees that (1) is always above (2). - for (auto I = StrideAccesses.rbegin(), E = StrideAccesses.rend(); I != E; - ++I) { - Instruction *A = I->first; - StrideDescriptor DesA = I->second; - - InterleaveGroup *Group = getInterleaveGroup(A); - if (!Group) { - DEBUG(dbgs() << "LV: Creating an interleave group with:" << *A << '\n'); - Group = createInterleaveGroup(A, DesA.Stride, DesA.Align); + // Special care is taken to ensure group formation will not break any + // dependences. + for (auto BI = AccessStrideInfo.rbegin(), E = AccessStrideInfo.rend(); + BI != E; ++BI) { + Instruction *B = BI->first; + StrideDescriptor DesB = BI->second; + + // Initialize a group for B if it has an allowable stride. Even if we don't + // create a group for B, we continue with the bottom-up algorithm to ensure + // we don't break any of B's dependences. + InterleaveGroup *Group = nullptr; + if (isStrided(DesB.Stride)) { + Group = getInterleaveGroup(B); + if (!Group) { + DEBUG(dbgs() << "LV: Creating an interleave group with:" << *B << '\n'); + Group = createInterleaveGroup(B, DesB.Stride, DesB.Align); + } + if (B->mayWriteToMemory()) + StoreGroups.insert(Group); + else + LoadGroups.insert(Group); } - if (A->mayWriteToMemory()) - StoreGroups.insert(Group); - else - LoadGroups.insert(Group); + for (auto AI = std::next(BI); AI != E; ++AI) { + Instruction *A = AI->first; + StrideDescriptor DesA = AI->second; + + // Our code motion strategy implies that we can't have dependences + // between accesses in an interleaved group and other accesses located + // between the first and last member of the group. Note that this also + // means that a group can't have more than one member at a given offset. + // The accesses in a group can have dependences with other accesses, but + // we must ensure we don't extend the boundaries of the group such that + // we encompass those dependent accesses. + // + // For example, assume we have the sequence of accesses shown below in a + // stride-2 loop: + // + // (1, 2) is a group | A[i] = a; // (1) + // | A[i-1] = b; // (2) | + // A[i-3] = c; // (3) + // A[i] = d; // (4) | (2, 4) is not a group + // + // Because accesses (2) and (3) are dependent, we can group (2) with (1) + // but not with (4). If we did, the dependent access (3) would be within + // the boundaries of the (2, 4) group. + if (!canReorderMemAccessesForInterleavedGroups(&*AI, &*BI)) { + + // If a dependence exists and A is already in a group, we know that A + // must be a store since A precedes B and WAR dependences are allowed. + // Thus, A would be sunk below B. We release A's group to prevent this + // illegal code motion. A will then be free to form another group with + // instructions that precede it. + if (isInterleaved(A)) { + InterleaveGroup *StoreGroup = getInterleaveGroup(A); + StoreGroups.remove(StoreGroup); + releaseGroup(StoreGroup); + } - for (auto II = std::next(I); II != E; ++II) { - Instruction *B = II->first; - StrideDescriptor DesB = II->second; + // If a dependence exists and A is not already in a group (or it was + // and we just released it), B might be hoisted above A (if B is a + // load) or another store might be sunk below A (if B is a store). In + // either case, we can't add additional instructions to B's group. B + // will only form a group with instructions that it precedes. + break; + } - // Ignore if B is already in a group or B is a different memory operation. - if (isInterleaved(B) || A->mayReadFromMemory() != B->mayReadFromMemory()) + // At this point, we've checked for illegal code motion. If either A or B + // isn't strided, there's nothing left to do. + if (!isStrided(DesA.Stride) || !isStrided(DesB.Stride)) continue; - // Check the rule 1 and 2. - if (DesB.Stride != DesA.Stride || DesB.Size != DesA.Size) + // Ignore A if it's already in a group or isn't the same kind of memory + // operation as B. + if (isInterleaved(A) || A->mayReadFromMemory() != B->mayReadFromMemory()) continue; - // Calculate the distance and prepare for the rule 3. - const SCEVConstant *DistToA = dyn_cast<SCEVConstant>( - PSE.getSE()->getMinusSCEV(DesB.Scev, DesA.Scev)); - if (!DistToA) + // Check rules 1 and 2. Ignore A if its stride or size is different from + // that of B. + if (DesA.Stride != DesB.Stride || DesA.Size != DesB.Size) continue; - int DistanceToA = DistToA->getAPInt().getSExtValue(); + // Calculate the distance from A to B. + const SCEVConstant *DistToB = dyn_cast<SCEVConstant>( + PSE.getSE()->getMinusSCEV(DesA.Scev, DesB.Scev)); + if (!DistToB) + continue; + int64_t DistanceToB = DistToB->getAPInt().getSExtValue(); + + // Check rule 3. Ignore A if its distance to B is not a multiple of the + // size. + if (DistanceToB % static_cast<int64_t>(DesB.Size)) + continue; - // Skip if the distance is not multiple of size as they are not in the - // same group. - if (DistanceToA % static_cast<int>(DesA.Size)) + // Ignore A if either A or B is in a predicated block. Although we + // currently prevent group formation for predicated accesses, we may be + // able to relax this limitation in the future once we handle more + // complicated blocks. + if (isPredicated(A->getParent()) || isPredicated(B->getParent())) continue; - // The index of B is the index of A plus the related index to A. - int IndexB = - Group->getIndex(A) + DistanceToA / static_cast<int>(DesA.Size); + // The index of A is the index of B plus A's distance to B in multiples + // of the size. + int IndexA = + Group->getIndex(B) + DistanceToB / static_cast<int64_t>(DesB.Size); - // Try to insert B into the group. - if (Group->insertMember(B, IndexB, DesB.Align)) { - DEBUG(dbgs() << "LV: Inserted:" << *B << '\n' - << " into the interleave group with" << *A << '\n'); - InterleaveGroupMap[B] = Group; + // Try to insert A into B's group. + if (Group->insertMember(A, IndexA, DesA.Align)) { + DEBUG(dbgs() << "LV: Inserted:" << *A << '\n' + << " into the interleave group with" << *B << '\n'); + InterleaveGroupMap[A] = Group; // Set the first load in program order as the insert position. - if (B->mayReadFromMemory()) - Group->setInsertPos(B); + if (A->mayReadFromMemory()) + Group->setInsertPos(A); } - } // Iteration on instruction B - } // Iteration on instruction A + } // Iteration over A accesses. + } // Iteration over B accesses. // Remove interleaved store groups with gaps. for (InterleaveGroup *Group : StoreGroups) if (Group->getNumMembers() != Group->getFactor()) releaseGroup(Group); - // Remove interleaved load groups that don't have the first and last member. - // This guarantees that we won't do speculative out of bounds loads. + // If there is a non-reversed interleaved load group with gaps, we will need + // to execute at least one scalar epilogue iteration. This will ensure that + // we don't speculatively access memory out-of-bounds. Note that we only need + // to look for a member at index factor - 1, since every group must have a + // member at index zero. for (InterleaveGroup *Group : LoadGroups) - if (!Group->getMember(0) || !Group->getMember(Group->getFactor() - 1)) - releaseGroup(Group); + if (!Group->getMember(Group->getFactor() - 1)) { + if (Group->isReverse()) { + releaseGroup(Group); + } else { + DEBUG(dbgs() << "LV: Interleaved group requires epilogue iteration.\n"); + RequiresScalarEpilogue = true; + } + } } LoopVectorizationCostModel::VectorizationFactor LoopVectorizationCostModel::selectVectorizationFactor(bool OptForSize) { // Width 1 means no vectorize - VectorizationFactor Factor = { 1U, 0U }; + VectorizationFactor Factor = {1U, 0U}; if (OptForSize && Legal->getRuntimePointerChecking()->Need) { - emitAnalysis(VectorizationReport() << - "runtime pointer checks needed. Enable vectorization of this " - "loop with '#pragma clang loop vectorize(enable)' when " - "compiling with -Os/-Oz"); - DEBUG(dbgs() << - "LV: Aborting. Runtime ptr check is required with -Os/-Oz.\n"); + emitAnalysis( + VectorizationReport() + << "runtime pointer checks needed. Enable vectorization of this " + "loop with '#pragma clang loop vectorize(enable)' when " + "compiling with -Os/-Oz"); + DEBUG(dbgs() + << "LV: Aborting. Runtime ptr check is required with -Os/-Oz.\n"); return Factor; } if (!EnableCondStoresVectorization && Legal->getNumPredStores()) { - emitAnalysis(VectorizationReport() << - "store that is conditionally executed prevents vectorization"); + emitAnalysis( + VectorizationReport() + << "store that is conditionally executed prevents vectorization"); DEBUG(dbgs() << "LV: No vectorization. There are conditional stores.\n"); return Factor; } // Find the trip count. - unsigned TC = SE->getSmallConstantTripCount(TheLoop); + unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n'); MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); @@ -4755,16 +5201,25 @@ LoopVectorizationCostModel::selectVectorizationFactor(bool OptForSize) { std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes(); unsigned WidestRegister = TTI.getRegisterBitWidth(true); unsigned MaxSafeDepDist = -1U; + + // Get the maximum safe dependence distance in bits computed by LAA. If the + // loop contains any interleaved accesses, we divide the dependence distance + // by the maximum interleave factor of all interleaved groups. Note that + // although the division ensures correctness, this is a fairly conservative + // computation because the maximum distance computed by LAA may not involve + // any of the interleaved accesses. if (Legal->getMaxSafeDepDistBytes() != -1U) - MaxSafeDepDist = Legal->getMaxSafeDepDistBytes() * 8; - WidestRegister = ((WidestRegister < MaxSafeDepDist) ? - WidestRegister : MaxSafeDepDist); + MaxSafeDepDist = + Legal->getMaxSafeDepDistBytes() * 8 / Legal->getMaxInterleaveFactor(); + + WidestRegister = + ((WidestRegister < MaxSafeDepDist) ? WidestRegister : MaxSafeDepDist); unsigned MaxVectorSize = WidestRegister / WidestType; DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType << " / " << WidestType << " bits.\n"); - DEBUG(dbgs() << "LV: The Widest register is: " - << WidestRegister << " bits.\n"); + DEBUG(dbgs() << "LV: The Widest register is: " << WidestRegister + << " bits.\n"); if (MaxVectorSize == 0) { DEBUG(dbgs() << "LV: The target has no vector registers.\n"); @@ -4772,7 +5227,7 @@ LoopVectorizationCostModel::selectVectorizationFactor(bool OptForSize) { } assert(MaxVectorSize <= 64 && "Did not expect to pack so many elements" - " into one vector!"); + " into one vector!"); unsigned VF = MaxVectorSize; if (MaximizeBandwidth && !OptForSize) { @@ -4800,9 +5255,9 @@ LoopVectorizationCostModel::selectVectorizationFactor(bool OptForSize) { if (OptForSize) { // If we are unable to calculate the trip count then don't try to vectorize. if (TC < 2) { - emitAnalysis - (VectorizationReport() << - "unable to calculate the loop count due to complex control flow"); + emitAnalysis( + VectorizationReport() + << "unable to calculate the loop count due to complex control flow"); DEBUG(dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n"); return Factor; } @@ -4815,11 +5270,11 @@ LoopVectorizationCostModel::selectVectorizationFactor(bool OptForSize) { else { // If the trip count that we found modulo the vectorization factor is not // zero then we require a tail. - emitAnalysis(VectorizationReport() << - "cannot optimize for size and vectorize at the " - "same time. Enable vectorization of this loop " - "with '#pragma clang loop vectorize(enable)' " - "when compiling with -Os/-Oz"); + emitAnalysis(VectorizationReport() + << "cannot optimize for size and vectorize at the " + "same time. Enable vectorization of this loop " + "with '#pragma clang loop vectorize(enable)' " + "when compiling with -Os/-Oz"); DEBUG(dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n"); return Factor; } @@ -4834,7 +5289,7 @@ LoopVectorizationCostModel::selectVectorizationFactor(bool OptForSize) { return Factor; } - float Cost = expectedCost(1); + float Cost = expectedCost(1).first; #ifndef NDEBUG const float ScalarCost = Cost; #endif /* NDEBUG */ @@ -4845,16 +5300,23 @@ LoopVectorizationCostModel::selectVectorizationFactor(bool OptForSize) { // Ignore scalar width, because the user explicitly wants vectorization. if (ForceVectorization && VF > 1) { Width = 2; - Cost = expectedCost(Width) / (float)Width; + Cost = expectedCost(Width).first / (float)Width; } - for (unsigned i=2; i <= VF; i*=2) { + for (unsigned i = 2; i <= VF; i *= 2) { // Notice that the vector loop needs to be executed less times, so // we need to divide the cost of the vector loops by the width of // the vector elements. - float VectorCost = expectedCost(i) / (float)i; - DEBUG(dbgs() << "LV: Vector loop of width " << i << " costs: " << - (int)VectorCost << ".\n"); + VectorizationCostTy C = expectedCost(i); + float VectorCost = C.first / (float)i; + DEBUG(dbgs() << "LV: Vector loop of width " << i + << " costs: " << (int)VectorCost << ".\n"); + if (!C.second && !ForceVectorization) { + DEBUG( + dbgs() << "LV: Not considering vector loop of width " << i + << " because it will not generate any vector instructions.\n"); + continue; + } if (VectorCost < Cost) { Cost = VectorCost; Width = i; @@ -4864,7 +5326,7 @@ LoopVectorizationCostModel::selectVectorizationFactor(bool OptForSize) { DEBUG(if (ForceVectorization && Width > 1 && Cost >= ScalarCost) dbgs() << "LV: Vectorization seems to be not beneficial, " << "but was forced by a user.\n"); - DEBUG(dbgs() << "LV: Selecting VF: "<< Width << ".\n"); + DEBUG(dbgs() << "LV: Selecting VF: " << Width << ".\n"); Factor.Width = Width; Factor.Cost = Width * Cost; return Factor; @@ -4877,25 +5339,22 @@ LoopVectorizationCostModel::getSmallestAndWidestTypes() { const DataLayout &DL = TheFunction->getParent()->getDataLayout(); // For each block. - for (Loop::block_iterator bb = TheLoop->block_begin(), - be = TheLoop->block_end(); bb != be; ++bb) { - BasicBlock *BB = *bb; - + for (BasicBlock *BB : TheLoop->blocks()) { // For each instruction in the loop. - for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) { - Type *T = it->getType(); + for (Instruction &I : *BB) { + Type *T = I.getType(); // Skip ignored values. - if (ValuesToIgnore.count(&*it)) + if (ValuesToIgnore.count(&I)) continue; // Only examine Loads, Stores and PHINodes. - if (!isa<LoadInst>(it) && !isa<StoreInst>(it) && !isa<PHINode>(it)) + if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I)) continue; // Examine PHI nodes that are reduction variables. Update the type to // account for the recurrence type. - if (PHINode *PN = dyn_cast<PHINode>(it)) { + if (auto *PN = dyn_cast<PHINode>(&I)) { if (!Legal->isReductionVariable(PN)) continue; RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[PN]; @@ -4903,13 +5362,13 @@ LoopVectorizationCostModel::getSmallestAndWidestTypes() { } // Examine the stored values. - if (StoreInst *ST = dyn_cast<StoreInst>(it)) + if (auto *ST = dyn_cast<StoreInst>(&I)) T = ST->getValueOperand()->getType(); // Ignore loaded pointer types and stored pointer types that are not // consecutive. However, we do want to take consecutive stores/loads of // pointer vectors into account. - if (T->isPointerTy() && !isConsecutiveLoadOrStore(&*it)) + if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I)) continue; MinWidth = std::min(MinWidth, @@ -4949,13 +5408,13 @@ unsigned LoopVectorizationCostModel::selectInterleaveCount(bool OptForSize, return 1; // Do not interleave loops with a relatively small trip count. - unsigned TC = SE->getSmallConstantTripCount(TheLoop); + unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); if (TC > 1 && TC < TinyTripCountInterleaveThreshold) return 1; unsigned TargetNumRegisters = TTI.getNumberOfRegisters(VF > 1); - DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters << - " registers\n"); + DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters + << " registers\n"); if (VF == 1) { if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) @@ -5002,7 +5461,7 @@ unsigned LoopVectorizationCostModel::selectInterleaveCount(bool OptForSize, // If we did not calculate the cost for VF (because the user selected the VF) // then we calculate the cost of VF here. if (LoopCost == 0) - LoopCost = expectedCost(VF); + LoopCost = expectedCost(VF).first; // Clamp the calculated IC to be between the 1 and the max interleave count // that the target allows. @@ -5044,8 +5503,7 @@ unsigned LoopVectorizationCostModel::selectInterleaveCount(bool OptForSize, // by this point), we can increase the critical path length if the loop // we're interleaving is inside another loop. Limit, by default to 2, so the // critical path only gets increased by one reduction operation. - if (Legal->getReductionVars()->size() && - TheLoop->getLoopDepth() > 1) { + if (Legal->getReductionVars()->size() && TheLoop->getLoopDepth() > 1) { unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC); SmallIC = std::min(SmallIC, F); StoresIC = std::min(StoresIC, F); @@ -5075,8 +5533,7 @@ unsigned LoopVectorizationCostModel::selectInterleaveCount(bool OptForSize, } SmallVector<LoopVectorizationCostModel::RegisterUsage, 8> -LoopVectorizationCostModel::calculateRegisterUsage( - const SmallVector<unsigned, 8> &VFs) { +LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<unsigned> VFs) { // This function calculates the register usage by measuring the highest number // of values that are alive at a single location. Obviously, this is a very // rough estimation. We scan the loop in a topological order in order and @@ -5103,31 +5560,30 @@ LoopVectorizationCostModel::calculateRegisterUsage( // Each 'key' in the map opens a new interval. The values // of the map are the index of the 'last seen' usage of the // instruction that is the key. - typedef DenseMap<Instruction*, unsigned> IntervalMap; + typedef DenseMap<Instruction *, unsigned> IntervalMap; // Maps instruction to its index. - DenseMap<unsigned, Instruction*> IdxToInstr; + DenseMap<unsigned, Instruction *> IdxToInstr; // Marks the end of each interval. IntervalMap EndPoint; // Saves the list of instruction indices that are used in the loop. - SmallSet<Instruction*, 8> Ends; + SmallSet<Instruction *, 8> Ends; // Saves the list of values that are used in the loop but are // defined outside the loop, such as arguments and constants. - SmallPtrSet<Value*, 8> LoopInvariants; + SmallPtrSet<Value *, 8> LoopInvariants; unsigned Index = 0; - for (LoopBlocksDFS::RPOIterator bb = DFS.beginRPO(), - be = DFS.endRPO(); bb != be; ++bb) { - RU.NumInstructions += (*bb)->size(); - for (Instruction &I : **bb) { + for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { + RU.NumInstructions += BB->size(); + for (Instruction &I : *BB) { IdxToInstr[Index++] = &I; // Save the end location of each USE. - for (unsigned i = 0; i < I.getNumOperands(); ++i) { - Value *U = I.getOperand(i); - Instruction *Instr = dyn_cast<Instruction>(U); + for (Value *U : I.operands()) { + auto *Instr = dyn_cast<Instruction>(U); // Ignore non-instruction values such as arguments, constants, etc. - if (!Instr) continue; + if (!Instr) + continue; // If this instruction is outside the loop then record it and continue. if (!TheLoop->contains(Instr)) { @@ -5143,15 +5599,14 @@ LoopVectorizationCostModel::calculateRegisterUsage( } // Saves the list of intervals that end with the index in 'key'. - typedef SmallVector<Instruction*, 2> InstrList; + typedef SmallVector<Instruction *, 2> InstrList; DenseMap<unsigned, InstrList> TransposeEnds; // Transpose the EndPoints to a list of values that end at each index. - for (IntervalMap::iterator it = EndPoint.begin(), e = EndPoint.end(); - it != e; ++it) - TransposeEnds[it->second].push_back(it->first); + for (auto &Interval : EndPoint) + TransposeEnds[Interval.second].push_back(Interval.first); - SmallSet<Instruction*, 8> OpenIntervals; + SmallSet<Instruction *, 8> OpenIntervals; // Get the size of the widest register. unsigned MaxSafeDepDist = -1U; @@ -5168,6 +5623,8 @@ LoopVectorizationCostModel::calculateRegisterUsage( // A lambda that gets the register usage for the given type and VF. auto GetRegUsage = [&DL, WidestRegister](Type *Ty, unsigned VF) { + if (Ty->isTokenTy()) + return 0U; unsigned TypeSize = DL.getTypeSizeInBits(Ty->getScalarType()); return std::max<unsigned>(1, VF * TypeSize / WidestRegister); }; @@ -5175,16 +5632,17 @@ LoopVectorizationCostModel::calculateRegisterUsage( for (unsigned int i = 0; i < Index; ++i) { Instruction *I = IdxToInstr[i]; // Ignore instructions that are never used within the loop. - if (!Ends.count(I)) continue; - - // Skip ignored values. - if (ValuesToIgnore.count(I)) + if (!Ends.count(I)) continue; // Remove all of the instructions that end at this location. InstrList &List = TransposeEnds[i]; - for (unsigned int j = 0, e = List.size(); j < e; ++j) - OpenIntervals.erase(List[j]); + for (Instruction *ToRemove : List) + OpenIntervals.erase(ToRemove); + + // Skip ignored values. + if (ValuesToIgnore.count(I)) + continue; // For each VF find the maximum usage of registers. for (unsigned j = 0, e = VFs.size(); j < e; ++j) { @@ -5195,8 +5653,12 @@ LoopVectorizationCostModel::calculateRegisterUsage( // Count the number of live intervals. unsigned RegUsage = 0; - for (auto Inst : OpenIntervals) + for (auto Inst : OpenIntervals) { + // Skip ignored values for VF > 1. + if (VecValuesToIgnore.count(Inst)) + continue; RegUsage += GetRegUsage(Inst->getType(), VFs[j]); + } MaxUsages[j] = std::max(MaxUsages[j], RegUsage); } @@ -5216,7 +5678,7 @@ LoopVectorizationCostModel::calculateRegisterUsage( Invariant += GetRegUsage(Inst->getType(), VFs[i]); } - DEBUG(dbgs() << "LV(REG): VF = " << VFs[i] << '\n'); + DEBUG(dbgs() << "LV(REG): VF = " << VFs[i] << '\n'); DEBUG(dbgs() << "LV(REG): Found max usage: " << MaxUsages[i] << '\n'); DEBUG(dbgs() << "LV(REG): Found invariant usage: " << Invariant << '\n'); DEBUG(dbgs() << "LV(REG): LoopSize: " << RU.NumInstructions << '\n'); @@ -5229,48 +5691,62 @@ LoopVectorizationCostModel::calculateRegisterUsage( return RUs; } -unsigned LoopVectorizationCostModel::expectedCost(unsigned VF) { - unsigned Cost = 0; +LoopVectorizationCostModel::VectorizationCostTy +LoopVectorizationCostModel::expectedCost(unsigned VF) { + VectorizationCostTy Cost; // For each block. - for (Loop::block_iterator bb = TheLoop->block_begin(), - be = TheLoop->block_end(); bb != be; ++bb) { - unsigned BlockCost = 0; - BasicBlock *BB = *bb; + for (BasicBlock *BB : TheLoop->blocks()) { + VectorizationCostTy BlockCost; // For each instruction in the old loop. - for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) { + for (Instruction &I : *BB) { // Skip dbg intrinsics. - if (isa<DbgInfoIntrinsic>(it)) + if (isa<DbgInfoIntrinsic>(I)) continue; // Skip ignored values. - if (ValuesToIgnore.count(&*it)) + if (ValuesToIgnore.count(&I)) continue; - unsigned C = getInstructionCost(&*it, VF); + VectorizationCostTy C = getInstructionCost(&I, VF); // Check if we should override the cost. if (ForceTargetInstructionCost.getNumOccurrences() > 0) - C = ForceTargetInstructionCost; + C.first = ForceTargetInstructionCost; - BlockCost += C; - DEBUG(dbgs() << "LV: Found an estimated cost of " << C << " for VF " << - VF << " For instruction: " << *it << '\n'); + BlockCost.first += C.first; + BlockCost.second |= C.second; + DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first << " for VF " + << VF << " For instruction: " << I << '\n'); } // We assume that if-converted blocks have a 50% chance of being executed. // When the code is scalar then some of the blocks are avoided due to CF. // When the code is vectorized we execute all code paths. - if (VF == 1 && Legal->blockNeedsPredication(*bb)) - BlockCost /= 2; + if (VF == 1 && Legal->blockNeedsPredication(BB)) + BlockCost.first /= 2; - Cost += BlockCost; + Cost.first += BlockCost.first; + Cost.second |= BlockCost.second; } return Cost; } +/// \brief Check if the load/store instruction \p I may be translated into +/// gather/scatter during vectorization. +/// +/// Pointer \p Ptr specifies address in memory for the given scalar memory +/// instruction. We need it to retrieve data type. +/// Using gather/scatter is possible when it is supported by target. +static bool isGatherOrScatterLegal(Instruction *I, Value *Ptr, + LoopVectorizationLegality *Legal) { + auto *DataTy = cast<PointerType>(Ptr->getType())->getElementType(); + return (isa<LoadInst>(I) && Legal->isLegalMaskedGather(DataTy)) || + (isa<StoreInst>(I) && Legal->isLegalMaskedScatter(DataTy)); +} + /// \brief Check whether the address computation for a non-consecutive memory /// access looks like an unlikely candidate for being merged into the indexing /// mode. @@ -5284,7 +5760,7 @@ static bool isLikelyComplexAddressComputation(Value *Ptr, LoopVectorizationLegality *Legal, ScalarEvolution *SE, const Loop *TheLoop) { - GetElementPtrInst *Gep = dyn_cast<GetElementPtrInst>(Ptr); + auto *Gep = dyn_cast<GetElementPtrInst>(Ptr); if (!Gep) return true; @@ -5309,7 +5785,7 @@ static bool isLikelyComplexAddressComputation(Value *Ptr, // Check the step is constant. const SCEV *Step = AddRec->getStepRecurrence(*SE); // Calculate the pointer stride and check if it is consecutive. - const SCEVConstant *C = dyn_cast<SCEVConstant>(Step); + const auto *C = dyn_cast<SCEVConstant>(Step); if (!C) return true; @@ -5329,17 +5805,29 @@ static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) { Legal->hasStride(I->getOperand(1)); } -unsigned +LoopVectorizationCostModel::VectorizationCostTy LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) { // If we know that this instruction will remain uniform, check the cost of // the scalar version. if (Legal->isUniformAfterVectorization(I)) VF = 1; + Type *VectorTy; + unsigned C = getInstructionCost(I, VF, VectorTy); + + bool TypeNotScalarized = + VF > 1 && !VectorTy->isVoidTy() && TTI.getNumberOfParts(VectorTy) < VF; + return VectorizationCostTy(C, TypeNotScalarized); +} + +unsigned LoopVectorizationCostModel::getInstructionCost(Instruction *I, + unsigned VF, + Type *&VectorTy) { Type *RetTy = I->getType(); if (VF > 1 && MinBWs.count(I)) RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]); - Type *VectorTy = ToVectorTy(RetTy, VF); + VectorTy = ToVectorTy(RetTy, VF); + auto SE = PSE.getSE(); // TODO: We need to estimate the cost of intrinsic calls. switch (I->getOpcode()) { @@ -5352,9 +5840,17 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) { case Instruction::Br: { return TTI.getCFInstrCost(I->getOpcode()); } - case Instruction::PHI: - //TODO: IF-converted IFs become selects. + case Instruction::PHI: { + auto *Phi = cast<PHINode>(I); + + // First-order recurrences are replaced by vector shuffles inside the loop. + if (VF > 1 && Legal->isFirstOrderRecurrence(Phi)) + return TTI.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector, + VectorTy, VF - 1, VectorTy); + + // TODO: IF-converted IFs become selects. return 0; + } case Instruction::Add: case Instruction::FAdd: case Instruction::Sub: @@ -5379,9 +5875,9 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) { // Certain instructions can be cheaper to vectorize if they have a constant // second vector operand. One example of this are shifts on x86. TargetTransformInfo::OperandValueKind Op1VK = - TargetTransformInfo::OK_AnyValue; + TargetTransformInfo::OK_AnyValue; TargetTransformInfo::OperandValueKind Op2VK = - TargetTransformInfo::OK_AnyValue; + TargetTransformInfo::OK_AnyValue; TargetTransformInfo::OperandValueProperties Op1VP = TargetTransformInfo::OP_None; TargetTransformInfo::OperandValueProperties Op2VP = @@ -5432,20 +5928,28 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) { case Instruction::Load: { StoreInst *SI = dyn_cast<StoreInst>(I); LoadInst *LI = dyn_cast<LoadInst>(I); - Type *ValTy = (SI ? SI->getValueOperand()->getType() : - LI->getType()); + Type *ValTy = (SI ? SI->getValueOperand()->getType() : LI->getType()); VectorTy = ToVectorTy(ValTy, VF); unsigned Alignment = SI ? SI->getAlignment() : LI->getAlignment(); - unsigned AS = SI ? SI->getPointerAddressSpace() : - LI->getPointerAddressSpace(); + unsigned AS = + SI ? SI->getPointerAddressSpace() : LI->getPointerAddressSpace(); Value *Ptr = SI ? SI->getPointerOperand() : LI->getPointerOperand(); // We add the cost of address computation here instead of with the gep // instruction because only here we know whether the operation is // scalarized. if (VF == 1) return TTI.getAddressComputationCost(VectorTy) + - TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS); + TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS); + + if (LI && Legal->isUniform(Ptr)) { + // Scalar load + broadcast + unsigned Cost = TTI.getAddressComputationCost(ValTy->getScalarType()); + Cost += TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), + Alignment, AS); + return Cost + + TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, ValTy); + } // For an interleaved access, calculate the total cost of the whole // interleave group. @@ -5463,7 +5967,7 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) { VectorTy->getVectorNumElements() * InterleaveFactor); // Holds the indices of existing members in an interleaved load group. - // An interleaved store group doesn't need this as it dones't allow gaps. + // An interleaved store group doesn't need this as it doesn't allow gaps. SmallVector<unsigned, 4> Indices; if (LI) { for (unsigned i = 0; i < InterleaveFactor; i++) @@ -5489,13 +5993,17 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) { // Scalarized loads/stores. int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); + bool UseGatherOrScatter = + (ConsecutiveStride == 0) && isGatherOrScatterLegal(I, Ptr, Legal); + bool Reverse = ConsecutiveStride < 0; const DataLayout &DL = I->getModule()->getDataLayout(); - unsigned ScalarAllocatedSize = DL.getTypeAllocSize(ValTy); - unsigned VectorElementSize = DL.getTypeStoreSize(VectorTy) / VF; - if (!ConsecutiveStride || ScalarAllocatedSize != VectorElementSize) { + uint64_t ScalarAllocatedSize = DL.getTypeAllocSize(ValTy); + uint64_t VectorElementSize = DL.getTypeStoreSize(VectorTy) / VF; + if ((!ConsecutiveStride && !UseGatherOrScatter) || + ScalarAllocatedSize != VectorElementSize) { bool IsComplexComputation = - isLikelyComplexAddressComputation(Ptr, Legal, SE, TheLoop); + isLikelyComplexAddressComputation(Ptr, Legal, SE, TheLoop); unsigned Cost = 0; // The cost of extracting from the value vector and pointer vector. Type *PtrTy = ToVectorTy(Ptr->getType(), VF); @@ -5505,29 +6013,36 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) { // In case of STORE, the cost of ExtractElement from the vector. // In case of LOAD, the cost of InsertElement into the returned // vector. - Cost += TTI.getVectorInstrCost(SI ? Instruction::ExtractElement : - Instruction::InsertElement, - VectorTy, i); + Cost += TTI.getVectorInstrCost(SI ? Instruction::ExtractElement + : Instruction::InsertElement, + VectorTy, i); } // The cost of the scalar loads/stores. Cost += VF * TTI.getAddressComputationCost(PtrTy, IsComplexComputation); - Cost += VF * TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), - Alignment, AS); + Cost += VF * + TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), + Alignment, AS); return Cost; } - // Wide load/stores. unsigned Cost = TTI.getAddressComputationCost(VectorTy); + if (UseGatherOrScatter) { + assert(ConsecutiveStride == 0 && + "Gather/Scatter are not used for consecutive stride"); + return Cost + + TTI.getGatherScatterOpCost(I->getOpcode(), VectorTy, Ptr, + Legal->isMaskRequired(I), Alignment); + } + // Wide load/stores. if (Legal->isMaskRequired(I)) - Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, - AS); + Cost += + TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS); else Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS); if (Reverse) - Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, - VectorTy, 0); + Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); return Cost; } case Instruction::ZExt: @@ -5548,7 +6063,7 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) { Legal->isInductionVariable(I->getOperand(0))) return TTI.getCastInstrCost(I->getOpcode(), I->getType(), I->getOperand(0)->getType()); - + Type *SrcScalarTy = I->getOperand(0)->getType(); Type *SrcVecTy = ToVectorTy(SrcScalarTy, VF); if (VF > 1 && MinBWs.count(I)) { @@ -5560,23 +6075,23 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) { Type *MinVecTy = VectorTy; if (I->getOpcode() == Instruction::Trunc) { SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy); - VectorTy = largestIntegerVectorType(ToVectorTy(I->getType(), VF), - MinVecTy); + VectorTy = + largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); } else if (I->getOpcode() == Instruction::ZExt || I->getOpcode() == Instruction::SExt) { SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy); - VectorTy = smallestIntegerVectorType(ToVectorTy(I->getType(), VF), - MinVecTy); + VectorTy = + smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); } } - + return TTI.getCastInstrCost(I->getOpcode(), VectorTy, SrcVecTy); } case Instruction::Call: { bool NeedToScalarize; CallInst *CI = cast<CallInst>(I); unsigned CallCost = getVectorCallCost(CI, VF, TTI, TLI, NeedToScalarize); - if (getIntrinsicIDForCall(CI, TLI)) + if (getVectorIntrinsicIDForCall(CI, TLI)) return std::min(CallCost, getVectorIntrinsicCost(CI, VF, TTI, TLI)); return CallCost; } @@ -5587,10 +6102,10 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) { unsigned Cost = 0; if (!RetTy->isVoidTy() && VF != 1) { - unsigned InsCost = TTI.getVectorInstrCost(Instruction::InsertElement, - VectorTy); - unsigned ExtCost = TTI.getVectorInstrCost(Instruction::ExtractElement, - VectorTy); + unsigned InsCost = + TTI.getVectorInstrCost(Instruction::InsertElement, VectorTy); + unsigned ExtCost = + TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy); // The cost of inserting the results plus extracting each one of the // operands. @@ -5602,7 +6117,7 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) { Cost += VF * TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy); return Cost; } - }// end of switch. + } // end of switch. } char LoopVectorize::ID = 0; @@ -5616,31 +6131,101 @@ INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) -INITIALIZE_PASS_DEPENDENCY(LCSSA) +INITIALIZE_PASS_DEPENDENCY(LCSSAWrapperPass) INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) INITIALIZE_PASS_DEPENDENCY(LoopSimplify) -INITIALIZE_PASS_DEPENDENCY(LoopAccessAnalysis) -INITIALIZE_PASS_DEPENDENCY(DemandedBits) +INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis) +INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false) namespace llvm { - Pass *createLoopVectorizePass(bool NoUnrolling, bool AlwaysVectorize) { - return new LoopVectorize(NoUnrolling, AlwaysVectorize); - } +Pass *createLoopVectorizePass(bool NoUnrolling, bool AlwaysVectorize) { + return new LoopVectorize(NoUnrolling, AlwaysVectorize); +} } bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { // Check for a store. - if (StoreInst *ST = dyn_cast<StoreInst>(Inst)) + if (auto *ST = dyn_cast<StoreInst>(Inst)) return Legal->isConsecutivePtr(ST->getPointerOperand()) != 0; // Check for a load. - if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) + if (auto *LI = dyn_cast<LoadInst>(Inst)) return Legal->isConsecutivePtr(LI->getPointerOperand()) != 0; return false; } +void LoopVectorizationCostModel::collectValuesToIgnore() { + // Ignore ephemeral values. + CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore); + + // Ignore type-promoting instructions we identified during reduction + // detection. + for (auto &Reduction : *Legal->getReductionVars()) { + RecurrenceDescriptor &RedDes = Reduction.second; + SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); + VecValuesToIgnore.insert(Casts.begin(), Casts.end()); + } + + // Ignore induction phis that are only used in either GetElementPtr or ICmp + // instruction to exit loop. Induction variables usually have large types and + // can have big impact when estimating register usage. + // This is for when VF > 1. + for (auto &Induction : *Legal->getInductionVars()) { + auto *PN = Induction.first; + auto *UpdateV = PN->getIncomingValueForBlock(TheLoop->getLoopLatch()); + + // Check that the PHI is only used by the induction increment (UpdateV) or + // by GEPs. Then check that UpdateV is only used by a compare instruction, + // the loop header PHI, or by GEPs. + // FIXME: Need precise def-use analysis to determine if this instruction + // variable will be vectorized. + if (all_of(PN->users(), + [&](const User *U) -> bool { + return U == UpdateV || isa<GetElementPtrInst>(U); + }) && + all_of(UpdateV->users(), [&](const User *U) -> bool { + return U == PN || isa<ICmpInst>(U) || isa<GetElementPtrInst>(U); + })) { + VecValuesToIgnore.insert(PN); + VecValuesToIgnore.insert(UpdateV); + } + } + + // Ignore instructions that will not be vectorized. + // This is for when VF > 1. + for (BasicBlock *BB : TheLoop->blocks()) { + for (auto &Inst : *BB) { + switch (Inst.getOpcode()) + case Instruction::GetElementPtr: { + // Ignore GEP if its last operand is an induction variable so that it is + // a consecutive load/store and won't be vectorized as scatter/gather + // pattern. + + GetElementPtrInst *Gep = cast<GetElementPtrInst>(&Inst); + unsigned NumOperands = Gep->getNumOperands(); + unsigned InductionOperand = getGEPInductionOperand(Gep); + bool GepToIgnore = true; + + // Check that all of the gep indices are uniform except for the + // induction operand. + for (unsigned i = 0; i != NumOperands; ++i) { + if (i != InductionOperand && + !PSE.getSE()->isLoopInvariant(PSE.getSCEV(Gep->getOperand(i)), + TheLoop)) { + GepToIgnore = false; + break; + } + } + + if (GepToIgnore) + VecValuesToIgnore.insert(&Inst); + break; + } + } + } +} void InnerLoopUnroller::scalarizeInstruction(Instruction *Instr, bool IfPredicateStore) { @@ -5651,9 +6236,7 @@ void InnerLoopUnroller::scalarizeInstruction(Instruction *Instr, setDebugLocFromInst(Builder, Instr); // Find all of the vectorized parameters. - for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) { - Value *SrcOp = Instr->getOperand(op); - + for (Value *SrcOp : Instr->operands()) { // If we are accessing the old induction variable, use the new one. if (SrcOp == OldInduction) { Params.push_back(getVectorValue(SrcOp)); @@ -5683,8 +6266,7 @@ void InnerLoopUnroller::scalarizeInstruction(Instruction *Instr, // Does this instruction return a value ? bool IsVoidRetTy = Instr->getType()->isVoidTy(); - Value *UndefVec = IsVoidRetTy ? nullptr : - UndefValue::get(Instr->getType()); + Value *UndefVec = IsVoidRetTy ? nullptr : UndefValue::get(Instr->getType()); // Create a new entry in the WidenMap and initialize it to Undef or Null. VectorParts &VecResults = WidenMap.splat(Instr, UndefVec); @@ -5711,43 +6293,43 @@ void InnerLoopUnroller::scalarizeInstruction(Instruction *Instr, } Instruction *Cloned = Instr->clone(); - if (!IsVoidRetTy) - Cloned->setName(Instr->getName() + ".cloned"); - // Replace the operands of the cloned instructions with extracted scalars. - for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) { - Value *Op = Params[op][Part]; - Cloned->setOperand(op, Op); - } + if (!IsVoidRetTy) + Cloned->setName(Instr->getName() + ".cloned"); + // Replace the operands of the cloned instructions with extracted scalars. + for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) { + Value *Op = Params[op][Part]; + Cloned->setOperand(op, Op); + } - // Place the cloned scalar in the new loop. - Builder.Insert(Cloned); + // Place the cloned scalar in the new loop. + Builder.Insert(Cloned); - // If the original scalar returns a value we need to place it in a vector - // so that future users will be able to use it. - if (!IsVoidRetTy) - VecResults[Part] = Cloned; + // If we just cloned a new assumption, add it the assumption cache. + if (auto *II = dyn_cast<IntrinsicInst>(Cloned)) + if (II->getIntrinsicID() == Intrinsic::assume) + AC->registerAssumption(II); - // End if-block. - if (IfPredicateStore) - PredicatedStores.push_back(std::make_pair(cast<StoreInst>(Cloned), - Cmp)); + // If the original scalar returns a value we need to place it in a vector + // so that future users will be able to use it. + if (!IsVoidRetTy) + VecResults[Part] = Cloned; + + // End if-block. + if (IfPredicateStore) + PredicatedStores.push_back(std::make_pair(cast<StoreInst>(Cloned), Cmp)); } } void InnerLoopUnroller::vectorizeMemoryInstruction(Instruction *Instr) { - StoreInst *SI = dyn_cast<StoreInst>(Instr); + auto *SI = dyn_cast<StoreInst>(Instr); bool IfPredicateStore = (SI && Legal->blockNeedsPredication(SI->getParent())); return scalarizeInstruction(Instr, IfPredicateStore); } -Value *InnerLoopUnroller::reverseVector(Value *Vec) { - return Vec; -} +Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; } -Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { - return V; -} +Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; } Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step) { // When unrolling and the VF is 1, we only need to add a simple scalar. @@ -5756,3 +6338,346 @@ Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step) { Constant *C = ConstantInt::get(ITy, StartIdx); return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction"); } + +static void AddRuntimeUnrollDisableMetaData(Loop *L) { + SmallVector<Metadata *, 4> MDs; + // Reserve first location for self reference to the LoopID metadata node. + MDs.push_back(nullptr); + bool IsUnrollMetadata = false; + MDNode *LoopID = L->getLoopID(); + if (LoopID) { + // First find existing loop unrolling disable metadata. + for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { + auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); + if (MD) { + const auto *S = dyn_cast<MDString>(MD->getOperand(0)); + IsUnrollMetadata = + S && S->getString().startswith("llvm.loop.unroll.disable"); + } + MDs.push_back(LoopID->getOperand(i)); + } + } + + if (!IsUnrollMetadata) { + // Add runtime unroll disable metadata. + LLVMContext &Context = L->getHeader()->getContext(); + SmallVector<Metadata *, 1> DisableOperands; + DisableOperands.push_back( + MDString::get(Context, "llvm.loop.unroll.runtime.disable")); + MDNode *DisableNode = MDNode::get(Context, DisableOperands); + MDs.push_back(DisableNode); + MDNode *NewLoopID = MDNode::get(Context, MDs); + // Set operand 0 to refer to the loop id itself. + NewLoopID->replaceOperandWith(0, NewLoopID); + L->setLoopID(NewLoopID); + } +} + +bool LoopVectorizePass::processLoop(Loop *L) { + assert(L->empty() && "Only process inner loops."); + +#ifndef NDEBUG + const std::string DebugLocStr = getDebugLocString(L); +#endif /* NDEBUG */ + + DEBUG(dbgs() << "\nLV: Checking a loop in \"" + << L->getHeader()->getParent()->getName() << "\" from " + << DebugLocStr << "\n"); + + LoopVectorizeHints Hints(L, DisableUnrolling); + + DEBUG(dbgs() << "LV: Loop hints:" + << " force=" + << (Hints.getForce() == LoopVectorizeHints::FK_Disabled + ? "disabled" + : (Hints.getForce() == LoopVectorizeHints::FK_Enabled + ? "enabled" + : "?")) + << " width=" << Hints.getWidth() + << " unroll=" << Hints.getInterleave() << "\n"); + + // Function containing loop + Function *F = L->getHeader()->getParent(); + + // Looking at the diagnostic output is the only way to determine if a loop + // was vectorized (other than looking at the IR or machine code), so it + // is important to generate an optimization remark for each loop. Most of + // these messages are generated by emitOptimizationRemarkAnalysis. Remarks + // generated by emitOptimizationRemark and emitOptimizationRemarkMissed are + // less verbose reporting vectorized loops and unvectorized loops that may + // benefit from vectorization, respectively. + + if (!Hints.allowVectorization(F, L, AlwaysVectorize)) { + DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); + return false; + } + + // Check the loop for a trip count threshold: + // do not vectorize loops with a tiny trip count. + const unsigned TC = SE->getSmallConstantTripCount(L); + if (TC > 0u && TC < TinyTripCountVectorThreshold) { + DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " + << "This loop is not worth vectorizing."); + if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) + DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); + else { + DEBUG(dbgs() << "\n"); + emitAnalysisDiag(F, L, Hints, VectorizationReport() + << "vectorization is not beneficial " + "and is not explicitly forced"); + return false; + } + } + + PredicatedScalarEvolution PSE(*SE, *L); + + // Check if it is legal to vectorize the loop. + LoopVectorizationRequirements Requirements; + LoopVectorizationLegality LVL(L, PSE, DT, TLI, AA, F, TTI, GetLAA, LI, + &Requirements, &Hints); + if (!LVL.canVectorize()) { + DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); + emitMissedWarning(F, L, Hints); + return false; + } + + // Use the cost model. + LoopVectorizationCostModel CM(L, PSE, LI, &LVL, *TTI, TLI, DB, AC, F, + &Hints); + CM.collectValuesToIgnore(); + + // Check the function attributes to find out if this function should be + // optimized for size. + bool OptForSize = + Hints.getForce() != LoopVectorizeHints::FK_Enabled && F->optForSize(); + + // Compute the weighted frequency of this loop being executed and see if it + // is less than 20% of the function entry baseline frequency. Note that we + // always have a canonical loop here because we think we *can* vectorize. + // FIXME: This is hidden behind a flag due to pervasive problems with + // exactly what block frequency models. + if (LoopVectorizeWithBlockFrequency) { + BlockFrequency LoopEntryFreq = BFI->getBlockFreq(L->getLoopPreheader()); + if (Hints.getForce() != LoopVectorizeHints::FK_Enabled && + LoopEntryFreq < ColdEntryFreq) + OptForSize = true; + } + + // Check the function attributes to see if implicit floats are allowed. + // FIXME: This check doesn't seem possibly correct -- what if the loop is + // an integer loop and the vector instructions selected are purely integer + // vector instructions? + if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { + DEBUG(dbgs() << "LV: Can't vectorize when the NoImplicitFloat" + "attribute is used.\n"); + emitAnalysisDiag( + F, L, Hints, + VectorizationReport() + << "loop not vectorized due to NoImplicitFloat attribute"); + emitMissedWarning(F, L, Hints); + return false; + } + + // Check if the target supports potentially unsafe FP vectorization. + // FIXME: Add a check for the type of safety issue (denormal, signaling) + // for the target we're vectorizing for, to make sure none of the + // additional fp-math flags can help. + if (Hints.isPotentiallyUnsafe() && + TTI->isFPVectorizationPotentiallyUnsafe()) { + DEBUG(dbgs() << "LV: Potentially unsafe FP op prevents vectorization.\n"); + emitAnalysisDiag(F, L, Hints, + VectorizationReport() + << "loop not vectorized due to unsafe FP support."); + emitMissedWarning(F, L, Hints); + return false; + } + + // Select the optimal vectorization factor. + const LoopVectorizationCostModel::VectorizationFactor VF = + CM.selectVectorizationFactor(OptForSize); + + // Select the interleave count. + unsigned IC = CM.selectInterleaveCount(OptForSize, VF.Width, VF.Cost); + + // Get user interleave count. + unsigned UserIC = Hints.getInterleave(); + + // Identify the diagnostic messages that should be produced. + std::string VecDiagMsg, IntDiagMsg; + bool VectorizeLoop = true, InterleaveLoop = true; + + if (Requirements.doesNotMeet(F, L, Hints)) { + DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization " + "requirements.\n"); + emitMissedWarning(F, L, Hints); + return false; + } + + if (VF.Width == 1) { + DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); + VecDiagMsg = + "the cost-model indicates that vectorization is not beneficial"; + VectorizeLoop = false; + } + + if (IC == 1 && UserIC <= 1) { + // Tell the user interleaving is not beneficial. + DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); + IntDiagMsg = + "the cost-model indicates that interleaving is not beneficial"; + InterleaveLoop = false; + if (UserIC == 1) + IntDiagMsg += + " and is explicitly disabled or interleave count is set to 1"; + } else if (IC > 1 && UserIC == 1) { + // Tell the user interleaving is beneficial, but it explicitly disabled. + DEBUG(dbgs() + << "LV: Interleaving is beneficial but is explicitly disabled."); + IntDiagMsg = "the cost-model indicates that interleaving is beneficial " + "but is explicitly disabled or interleave count is set to 1"; + InterleaveLoop = false; + } + + // Override IC if user provided an interleave count. + IC = UserIC > 0 ? UserIC : IC; + + // Emit diagnostic messages, if any. + const char *VAPassName = Hints.vectorizeAnalysisPassName(); + if (!VectorizeLoop && !InterleaveLoop) { + // Do not vectorize or interleaving the loop. + emitOptimizationRemarkAnalysis(F->getContext(), VAPassName, *F, + L->getStartLoc(), VecDiagMsg); + emitOptimizationRemarkAnalysis(F->getContext(), LV_NAME, *F, + L->getStartLoc(), IntDiagMsg); + return false; + } else if (!VectorizeLoop && InterleaveLoop) { + DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); + emitOptimizationRemarkAnalysis(F->getContext(), VAPassName, *F, + L->getStartLoc(), VecDiagMsg); + } else if (VectorizeLoop && !InterleaveLoop) { + DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in " + << DebugLocStr << '\n'); + emitOptimizationRemarkAnalysis(F->getContext(), LV_NAME, *F, + L->getStartLoc(), IntDiagMsg); + } else if (VectorizeLoop && InterleaveLoop) { + DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in " + << DebugLocStr << '\n'); + DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); + } + + if (!VectorizeLoop) { + assert(IC > 1 && "interleave count should not be 1 or 0"); + // If we decided that it is not legal to vectorize the loop, then + // interleave it. + InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, IC); + Unroller.vectorize(&LVL, CM.MinBWs, CM.VecValuesToIgnore); + + emitOptimizationRemark(F->getContext(), LV_NAME, *F, L->getStartLoc(), + Twine("interleaved loop (interleaved count: ") + + Twine(IC) + ")"); + } else { + // If we decided that it is *legal* to vectorize the loop, then do it. + InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, VF.Width, IC); + LB.vectorize(&LVL, CM.MinBWs, CM.VecValuesToIgnore); + ++LoopsVectorized; + + // Add metadata to disable runtime unrolling a scalar loop when there are + // no runtime checks about strides and memory. A scalar loop that is + // rarely used is not worth unrolling. + if (!LB.areSafetyChecksAdded()) + AddRuntimeUnrollDisableMetaData(L); + + // Report the vectorization decision. + emitOptimizationRemark(F->getContext(), LV_NAME, *F, L->getStartLoc(), + Twine("vectorized loop (vectorization width: ") + + Twine(VF.Width) + ", interleaved count: " + + Twine(IC) + ")"); + } + + // Mark the loop as already vectorized to avoid vectorizing again. + Hints.setAlreadyVectorized(); + + DEBUG(verifyFunction(*L->getHeader()->getParent())); + return true; +} + +bool LoopVectorizePass::runImpl( + Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_, + DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_, + DemandedBits &DB_, AliasAnalysis &AA_, AssumptionCache &AC_, + std::function<const LoopAccessInfo &(Loop &)> &GetLAA_) { + + SE = &SE_; + LI = &LI_; + TTI = &TTI_; + DT = &DT_; + BFI = &BFI_; + TLI = TLI_; + AA = &AA_; + AC = &AC_; + GetLAA = &GetLAA_; + DB = &DB_; + + // Compute some weights outside of the loop over the loops. Compute this + // using a BranchProbability to re-use its scaling math. + const BranchProbability ColdProb(1, 5); // 20% + ColdEntryFreq = BlockFrequency(BFI->getEntryFreq()) * ColdProb; + + // Don't attempt if + // 1. the target claims to have no vector registers, and + // 2. interleaving won't help ILP. + // + // The second condition is necessary because, even if the target has no + // vector registers, loop vectorization may still enable scalar + // interleaving. + if (!TTI->getNumberOfRegisters(true) && TTI->getMaxInterleaveFactor(1) < 2) + return false; + + // Build up a worklist of inner-loops to vectorize. This is necessary as + // the act of vectorizing or partially unrolling a loop creates new loops + // and can invalidate iterators across the loops. + SmallVector<Loop *, 8> Worklist; + + for (Loop *L : *LI) + addInnerLoop(*L, Worklist); + + LoopsAnalyzed += Worklist.size(); + + // Now walk the identified inner loops. + bool Changed = false; + while (!Worklist.empty()) + Changed |= processLoop(Worklist.pop_back_val()); + + // Process each loop nest in the function. + return Changed; + +} + + +PreservedAnalyses LoopVectorizePass::run(Function &F, + FunctionAnalysisManager &AM) { + auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F); + auto &LI = AM.getResult<LoopAnalysis>(F); + auto &TTI = AM.getResult<TargetIRAnalysis>(F); + auto &DT = AM.getResult<DominatorTreeAnalysis>(F); + auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F); + auto *TLI = AM.getCachedResult<TargetLibraryAnalysis>(F); + auto &AA = AM.getResult<AAManager>(F); + auto &AC = AM.getResult<AssumptionAnalysis>(F); + auto &DB = AM.getResult<DemandedBitsAnalysis>(F); + + auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager(); + std::function<const LoopAccessInfo &(Loop &)> GetLAA = + [&](Loop &L) -> const LoopAccessInfo & { + return LAM.getResult<LoopAccessAnalysis>(L); + }; + bool Changed = runImpl(F, SE, LI, TTI, DT, BFI, TLI, DB, AA, AC, GetLAA); + if (!Changed) + return PreservedAnalyses::all(); + PreservedAnalyses PA; + PA.preserve<LoopAnalysis>(); + PA.preserve<DominatorTreeAnalysis>(); + PA.preserve<BasicAA>(); + PA.preserve<GlobalsAA>(); + return PA; +} diff --git a/lib/Transforms/Vectorize/Makefile b/lib/Transforms/Vectorize/Makefile deleted file mode 100644 index 86c36585f23f5..0000000000000 --- a/lib/Transforms/Vectorize/Makefile +++ /dev/null @@ -1,15 +0,0 @@ -##===- lib/Transforms/Vectorize/Makefile -----------------*- Makefile -*-===## -# -# The LLVM Compiler Infrastructure -# -# This file is distributed under the University of Illinois Open Source -# License. See LICENSE.TXT for details. -# -##===----------------------------------------------------------------------===## - -LEVEL = ../../.. -LIBRARYNAME = LLVMVectorize -BUILD_ARCHIVE = 1 - -include $(LEVEL)/Makefile.common - diff --git a/lib/Transforms/Vectorize/SLPVectorizer.cpp b/lib/Transforms/Vectorize/SLPVectorizer.cpp index f69a4e52c7e1e..8a3c4d14fecb6 100644 --- a/lib/Transforms/Vectorize/SLPVectorizer.cpp +++ b/lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -15,21 +15,17 @@ // "Loop-Aware SLP in GCC" by Ira Rosen, Dorit Nuzman, Ayal Zaks. // //===----------------------------------------------------------------------===// -#include "llvm/Transforms/Vectorize.h" -#include "llvm/ADT/MapVector.h" +#include "llvm/Transforms/Vectorize/SLPVectorizer.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/PostOrderIterator.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/Statistic.h" -#include "llvm/Analysis/AliasAnalysis.h" -#include "llvm/Analysis/GlobalsModRef.h" -#include "llvm/Analysis/AssumptionCache.h" #include "llvm/Analysis/CodeMetrics.h" -#include "llvm/Analysis/LoopInfo.h" -#include "llvm/Analysis/ScalarEvolution.h" +#include "llvm/Analysis/GlobalsModRef.h" +#include "llvm/Analysis/LoopAccessAnalysis.h" #include "llvm/Analysis/ScalarEvolutionExpressions.h" -#include "llvm/Analysis/TargetTransformInfo.h" #include "llvm/Analysis/ValueTracking.h" +#include "llvm/Analysis/VectorUtils.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/IRBuilder.h" @@ -44,12 +40,12 @@ #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" -#include "llvm/Analysis/VectorUtils.h" +#include "llvm/Transforms/Vectorize.h" #include <algorithm> -#include <map> #include <memory> using namespace llvm; +using namespace slpvectorizer; #define SV_NAME "slp-vectorizer" #define DEBUG_TYPE "SLP" @@ -82,11 +78,11 @@ static cl::opt<int> ScheduleRegionSizeBudget("slp-schedule-budget", cl::init(100000), cl::Hidden, cl::desc("Limit the size of the SLP scheduling region per block")); -namespace { +static cl::opt<int> MinVectorRegSizeOption( + "slp-min-reg-size", cl::init(128), cl::Hidden, + cl::desc("Attempt to vectorize for this register size in bits")); // FIXME: Set this via cl::opt to allow overriding. -static const unsigned MinVecRegSize = 128; - static const unsigned RecursionMaxDepth = 12; // Limit the number of alias checks. The limit is chosen so that @@ -134,8 +130,8 @@ static BasicBlock *getSameBlock(ArrayRef<Value *> VL) { /// \returns True if all of the values in \p VL are constants. static bool allConstant(ArrayRef<Value *> VL) { - for (unsigned i = 0, e = VL.size(); i < e; ++i) - if (!isa<Constant>(VL[i])) + for (Value *i : VL) + if (!isa<Constant>(i)) return false; return true; } @@ -223,46 +219,6 @@ static void propagateIRFlags(Value *I, ArrayRef<Value *> VL) { } } -/// \returns \p I after propagating metadata from \p VL. -static Instruction *propagateMetadata(Instruction *I, ArrayRef<Value *> VL) { - Instruction *I0 = cast<Instruction>(VL[0]); - SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata; - I0->getAllMetadataOtherThanDebugLoc(Metadata); - - for (unsigned i = 0, n = Metadata.size(); i != n; ++i) { - unsigned Kind = Metadata[i].first; - MDNode *MD = Metadata[i].second; - - for (int i = 1, e = VL.size(); MD && i != e; i++) { - Instruction *I = cast<Instruction>(VL[i]); - MDNode *IMD = I->getMetadata(Kind); - - switch (Kind) { - default: - MD = nullptr; // Remove unknown metadata - break; - case LLVMContext::MD_tbaa: - MD = MDNode::getMostGenericTBAA(MD, IMD); - break; - case LLVMContext::MD_alias_scope: - MD = MDNode::getMostGenericAliasScope(MD, IMD); - break; - case LLVMContext::MD_noalias: - MD = MDNode::intersect(MD, IMD); - break; - case LLVMContext::MD_fpmath: - MD = MDNode::getMostGenericFPMath(MD, IMD); - break; - case LLVMContext::MD_nontemporal: - MD = MDNode::intersect(MD, IMD); - break; - } - } - I->setMetadata(Kind, MD); - } - return I; -} - /// \returns The type that all of the values in \p VL have or null if there /// are different types. static Type* getSameType(ArrayRef<Value *> VL) { @@ -274,36 +230,17 @@ static Type* getSameType(ArrayRef<Value *> VL) { return Ty; } -/// \returns True if the ExtractElement instructions in VL can be vectorized -/// to use the original vector. -static bool CanReuseExtract(ArrayRef<Value *> VL) { - assert(Instruction::ExtractElement == getSameOpcode(VL) && "Invalid opcode"); - // Check if all of the extracts come from the same vector and from the - // correct offset. - Value *VL0 = VL[0]; - ExtractElementInst *E0 = cast<ExtractElementInst>(VL0); - Value *Vec = E0->getOperand(0); - - // We have to extract from the same vector type. - unsigned NElts = Vec->getType()->getVectorNumElements(); - - if (NElts != VL.size()) - return false; - - // Check that all of the indices extract from the correct offset. - ConstantInt *CI = dyn_cast<ConstantInt>(E0->getOperand(1)); - if (!CI || CI->getZExtValue()) - return false; - - for (unsigned i = 1, e = VL.size(); i < e; ++i) { - ExtractElementInst *E = cast<ExtractElementInst>(VL[i]); +/// \returns True if Extract{Value,Element} instruction extracts element Idx. +static bool matchExtractIndex(Instruction *E, unsigned Idx, unsigned Opcode) { + assert(Opcode == Instruction::ExtractElement || + Opcode == Instruction::ExtractValue); + if (Opcode == Instruction::ExtractElement) { ConstantInt *CI = dyn_cast<ConstantInt>(E->getOperand(1)); - - if (!CI || CI->getZExtValue() != i || E->getOperand(0) != Vec) - return false; + return CI && CI->getZExtValue() == Idx; + } else { + ExtractValueInst *EI = cast<ExtractValueInst>(E); + return EI->getNumIndices() == 1 && *EI->idx_begin() == Idx; } - - return true; } /// \returns True if in-tree use also needs extract. This refers to @@ -323,7 +260,7 @@ static bool InTreeUserNeedToExtract(Value *Scalar, Instruction *UserInst, } case Instruction::Call: { CallInst *CI = cast<CallInst>(UserInst); - Intrinsic::ID ID = getIntrinsicIDForCall(CI, TLI); + Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); if (hasVectorInstrinsicScalarOpd(ID, 1)) { return (CI->getArgOperand(1) == Scalar); } @@ -353,6 +290,8 @@ static bool isSimple(Instruction *I) { return true; } +namespace llvm { +namespace slpvectorizer { /// Bottom Up SLP Vectorizer. class BoUpSLP { public: @@ -363,11 +302,24 @@ public: BoUpSLP(Function *Func, ScalarEvolution *Se, TargetTransformInfo *Tti, TargetLibraryInfo *TLi, AliasAnalysis *Aa, LoopInfo *Li, - DominatorTree *Dt, AssumptionCache *AC) + DominatorTree *Dt, AssumptionCache *AC, DemandedBits *DB, + const DataLayout *DL) : NumLoadsWantToKeepOrder(0), NumLoadsWantToChangeOrder(0), F(Func), - SE(Se), TTI(Tti), TLI(TLi), AA(Aa), LI(Li), DT(Dt), - Builder(Se->getContext()) { + SE(Se), TTI(Tti), TLI(TLi), AA(Aa), LI(Li), DT(Dt), AC(AC), DB(DB), + DL(DL), Builder(Se->getContext()) { CodeMetrics::collectEphemeralValues(F, AC, EphValues); + // Use the vector register size specified by the target unless overridden + // by a command-line option. + // TODO: It would be better to limit the vectorization factor based on + // data type rather than just register size. For example, x86 AVX has + // 256-bit registers, but it does not support integer operations + // at that width (that requires AVX2). + if (MaxVectorRegSizeOption.getNumOccurrences()) + MaxVecRegSize = MaxVectorRegSizeOption; + else + MaxVecRegSize = TTI->getRegisterBitWidth(true); + + MinVecRegSize = MinVectorRegSizeOption; } /// \brief Vectorize the tree that starts with the elements in \p VL. @@ -399,11 +351,9 @@ public: BlockScheduling *BS = Iter.second.get(); BS->clear(); } + MinBWs.clear(); } - /// \returns true if the memory operations A and B are consecutive. - bool isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL); - /// \brief Perform LICM and CSE on the newly generated gather sequences. void optimizeGatherSequence(); @@ -412,6 +362,32 @@ public: return NumLoadsWantToChangeOrder > NumLoadsWantToKeepOrder; } + /// \return The vector element size in bits to use when vectorizing the + /// expression tree ending at \p V. If V is a store, the size is the width of + /// the stored value. Otherwise, the size is the width of the largest loaded + /// value reaching V. This method is used by the vectorizer to calculate + /// vectorization factors. + unsigned getVectorElementSize(Value *V); + + /// Compute the minimum type sizes required to represent the entries in a + /// vectorizable tree. + void computeMinimumValueSizes(); + + // \returns maximum vector register size as set by TTI or overridden by cl::opt. + unsigned getMaxVecRegSize() const { + return MaxVecRegSize; + } + + // \returns minimum vector register size as set by cl::opt. + unsigned getMinVecRegSize() const { + return MinVecRegSize; + } + + /// \brief Check if ArrayType or StructType is isomorphic to some VectorType. + /// + /// \returns number of elements in vector if isomorphism exists, 0 otherwise. + unsigned canMapToVector(Type *T, const DataLayout &DL) const; + private: struct TreeEntry; @@ -421,6 +397,10 @@ private: /// This is the recursive part of buildTree. void buildTree_rec(ArrayRef<Value *> Roots, unsigned Depth); + /// \returns True if the ExtractElement/ExtractValue instructions in VL can + /// be vectorized to use the original vector (or aggregate "bitcast" to a vector). + bool canReuseExtract(ArrayRef<Value *> VL, unsigned Opcode) const; + /// Vectorize a single entry in the tree. Value *vectorizeTree(TreeEntry *E); @@ -431,14 +411,6 @@ private: /// vectorized, or NULL. They may happen in cycles. Value *alreadyVectorized(ArrayRef<Value *> VL) const; - /// \brief Take the pointer operand from the Load/Store instruction. - /// \returns NULL if this is not a valid Load/Store instruction. - static Value *getPointerOperand(Value *I); - - /// \brief Take the address space operand from the Load/Store instruction. - /// \returns -1 if this is not a valid Load/Store instruction. - static unsigned getAddressSpaceOperand(Value *I); - /// \returns the scalarization cost for this type. Scalarization in this /// context means the creation of vectors from a group of scalars. int getGatherCost(Type *Ty); @@ -719,8 +691,11 @@ private: }; #ifndef NDEBUG - friend raw_ostream &operator<<(raw_ostream &os, - const BoUpSLP::ScheduleData &SD); + friend inline raw_ostream &operator<<(raw_ostream &os, + const BoUpSLP::ScheduleData &SD) { + SD.dump(os); + return os; + } #endif /// Contains all scheduling data for a basic block. @@ -917,16 +892,21 @@ private: AliasAnalysis *AA; LoopInfo *LI; DominatorTree *DT; + AssumptionCache *AC; + DemandedBits *DB; + const DataLayout *DL; + unsigned MaxVecRegSize; // This is set by TTI or overridden by cl::opt. + unsigned MinVecRegSize; // Set by cl::opt (default: 128). /// Instruction builder to construct the vectorized tree. IRBuilder<> Builder; + + /// A map of scalar integer values to the smallest bit width with which they + /// can legally be represented. + MapVector<Value *, uint64_t> MinBWs; }; -#ifndef NDEBUG -raw_ostream &operator<<(raw_ostream &os, const BoUpSLP::ScheduleData &SD) { - SD.dump(os); - return os; -} -#endif +} // end namespace llvm +} // end namespace slpvectorizer void BoUpSLP::buildTree(ArrayRef<Value *> Roots, ArrayRef<Value *> UserIgnoreLst) { @@ -937,8 +917,8 @@ void BoUpSLP::buildTree(ArrayRef<Value *> Roots, buildTree_rec(Roots, 0); // Collect the values that we need to extract from the tree. - for (int EIdx = 0, EE = VectorizableTree.size(); EIdx < EE; ++EIdx) { - TreeEntry *Entry = &VectorizableTree[EIdx]; + for (TreeEntry &EIdx : VectorizableTree) { + TreeEntry *Entry = &EIdx; // For each lane: for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { @@ -987,7 +967,7 @@ void BoUpSLP::buildTree(ArrayRef<Value *> Roots, void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) { - bool SameTy = getSameType(VL); (void)SameTy; + bool SameTy = allConstant(VL) || getSameType(VL); (void)SameTy; bool isAltShuffle = false; assert(SameTy && "Invalid types!"); @@ -1138,16 +1118,17 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) { for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { ValueList Operands; // Prepare the operand vector. - for (unsigned j = 0; j < VL.size(); ++j) - Operands.push_back(cast<PHINode>(VL[j])->getIncomingValueForBlock( + for (Value *j : VL) + Operands.push_back(cast<PHINode>(j)->getIncomingValueForBlock( PH->getIncomingBlock(i))); buildTree_rec(Operands, Depth + 1); } return; } + case Instruction::ExtractValue: case Instruction::ExtractElement: { - bool Reuse = CanReuseExtract(VL); + bool Reuse = canReuseExtract(VL, Opcode); if (Reuse) { DEBUG(dbgs() << "SLP: Reusing extract sequence.\n"); } else { @@ -1164,11 +1145,10 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) { // loading/storing it as an i8 struct. If we vectorize loads/stores from // such a struct we read/write packed bits disagreeing with the // unvectorized version. - const DataLayout &DL = F->getParent()->getDataLayout(); Type *ScalarTy = VL[0]->getType(); - if (DL.getTypeSizeInBits(ScalarTy) != - DL.getTypeAllocSizeInBits(ScalarTy)) { + if (DL->getTypeSizeInBits(ScalarTy) != + DL->getTypeAllocSizeInBits(ScalarTy)) { BS.cancelScheduling(VL); newTreeEntry(VL, false); DEBUG(dbgs() << "SLP: Gathering loads of non-packed type.\n"); @@ -1184,8 +1164,8 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) { return; } - if (!isConsecutiveAccess(VL[i], VL[i + 1], DL)) { - if (VL.size() == 2 && isConsecutiveAccess(VL[1], VL[0], DL)) { + if (!isConsecutiveAccess(VL[i], VL[i + 1], *DL, *SE)) { + if (VL.size() == 2 && isConsecutiveAccess(VL[1], VL[0], *DL, *SE)) { ++NumLoadsWantToChangeOrder; } BS.cancelScheduling(VL); @@ -1227,8 +1207,8 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) { for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { ValueList Operands; // Prepare the operand vector. - for (unsigned j = 0; j < VL.size(); ++j) - Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); + for (Value *j : VL) + Operands.push_back(cast<Instruction>(j)->getOperand(i)); buildTree_rec(Operands, Depth+1); } @@ -1256,8 +1236,8 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) { for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { ValueList Operands; // Prepare the operand vector. - for (unsigned j = 0; j < VL.size(); ++j) - Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); + for (Value *j : VL) + Operands.push_back(cast<Instruction>(j)->getOperand(i)); buildTree_rec(Operands, Depth+1); } @@ -1298,8 +1278,8 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) { for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { ValueList Operands; // Prepare the operand vector. - for (unsigned j = 0; j < VL.size(); ++j) - Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); + for (Value *j : VL) + Operands.push_back(cast<Instruction>(j)->getOperand(i)); buildTree_rec(Operands, Depth+1); } @@ -1346,18 +1326,17 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) { for (unsigned i = 0, e = 2; i < e; ++i) { ValueList Operands; // Prepare the operand vector. - for (unsigned j = 0; j < VL.size(); ++j) - Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); + for (Value *j : VL) + Operands.push_back(cast<Instruction>(j)->getOperand(i)); buildTree_rec(Operands, Depth + 1); } return; } case Instruction::Store: { - const DataLayout &DL = F->getParent()->getDataLayout(); // Check if the stores are consecutive or of we need to swizzle them. for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) - if (!isConsecutiveAccess(VL[i], VL[i + 1], DL)) { + if (!isConsecutiveAccess(VL[i], VL[i + 1], *DL, *SE)) { BS.cancelScheduling(VL); newTreeEntry(VL, false); DEBUG(dbgs() << "SLP: Non-consecutive store.\n"); @@ -1368,8 +1347,8 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) { DEBUG(dbgs() << "SLP: added a vector of stores.\n"); ValueList Operands; - for (unsigned j = 0; j < VL.size(); ++j) - Operands.push_back(cast<Instruction>(VL[j])->getOperand(0)); + for (Value *j : VL) + Operands.push_back(cast<Instruction>(j)->getOperand(0)); buildTree_rec(Operands, Depth + 1); return; @@ -1379,7 +1358,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) { CallInst *CI = cast<CallInst>(VL[0]); // Check if this is an Intrinsic call or something that can be // represented by an intrinsic call - Intrinsic::ID ID = getIntrinsicIDForCall(CI, TLI); + Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); if (!isTriviallyVectorizable(ID)) { BS.cancelScheduling(VL); newTreeEntry(VL, false); @@ -1393,7 +1372,8 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) { for (unsigned i = 1, e = VL.size(); i != e; ++i) { CallInst *CI2 = dyn_cast<CallInst>(VL[i]); if (!CI2 || CI2->getCalledFunction() != Int || - getIntrinsicIDForCall(CI2, TLI) != ID) { + getVectorIntrinsicIDForCall(CI2, TLI) != ID || + !CI->hasIdenticalOperandBundleSchema(*CI2)) { BS.cancelScheduling(VL); newTreeEntry(VL, false); DEBUG(dbgs() << "SLP: mismatched calls:" << *CI << "!=" << *VL[i] @@ -1413,14 +1393,25 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) { return; } } + // Verify that the bundle operands are identical between the two calls. + if (CI->hasOperandBundles() && + !std::equal(CI->op_begin() + CI->getBundleOperandsStartIndex(), + CI->op_begin() + CI->getBundleOperandsEndIndex(), + CI2->op_begin() + CI2->getBundleOperandsStartIndex())) { + BS.cancelScheduling(VL); + newTreeEntry(VL, false); + DEBUG(dbgs() << "SLP: mismatched bundle operands in calls:" << *CI << "!=" + << *VL[i] << '\n'); + return; + } } newTreeEntry(VL, true); for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i) { ValueList Operands; // Prepare the operand vector. - for (unsigned j = 0; j < VL.size(); ++j) { - CallInst *CI2 = dyn_cast<CallInst>(VL[j]); + for (Value *j : VL) { + CallInst *CI2 = dyn_cast<CallInst>(j); Operands.push_back(CI2->getArgOperand(i)); } buildTree_rec(Operands, Depth + 1); @@ -1451,8 +1442,8 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) { for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { ValueList Operands; // Prepare the operand vector. - for (unsigned j = 0; j < VL.size(); ++j) - Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); + for (Value *j : VL) + Operands.push_back(cast<Instruction>(j)->getOperand(i)); buildTree_rec(Operands, Depth + 1); } @@ -1466,6 +1457,74 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) { } } +unsigned BoUpSLP::canMapToVector(Type *T, const DataLayout &DL) const { + unsigned N; + Type *EltTy; + auto *ST = dyn_cast<StructType>(T); + if (ST) { + N = ST->getNumElements(); + EltTy = *ST->element_begin(); + } else { + N = cast<ArrayType>(T)->getNumElements(); + EltTy = cast<ArrayType>(T)->getElementType(); + } + if (!isValidElementType(EltTy)) + return 0; + uint64_t VTSize = DL.getTypeStoreSizeInBits(VectorType::get(EltTy, N)); + if (VTSize < MinVecRegSize || VTSize > MaxVecRegSize || VTSize != DL.getTypeStoreSizeInBits(T)) + return 0; + if (ST) { + // Check that struct is homogeneous. + for (const auto *Ty : ST->elements()) + if (Ty != EltTy) + return 0; + } + return N; +} + +bool BoUpSLP::canReuseExtract(ArrayRef<Value *> VL, unsigned Opcode) const { + assert(Opcode == Instruction::ExtractElement || + Opcode == Instruction::ExtractValue); + assert(Opcode == getSameOpcode(VL) && "Invalid opcode"); + // Check if all of the extracts come from the same vector and from the + // correct offset. + Value *VL0 = VL[0]; + Instruction *E0 = cast<Instruction>(VL0); + Value *Vec = E0->getOperand(0); + + // We have to extract from a vector/aggregate with the same number of elements. + unsigned NElts; + if (Opcode == Instruction::ExtractValue) { + const DataLayout &DL = E0->getModule()->getDataLayout(); + NElts = canMapToVector(Vec->getType(), DL); + if (!NElts) + return false; + // Check if load can be rewritten as load of vector. + LoadInst *LI = dyn_cast<LoadInst>(Vec); + if (!LI || !LI->isSimple() || !LI->hasNUses(VL.size())) + return false; + } else { + NElts = Vec->getType()->getVectorNumElements(); + } + + if (NElts != VL.size()) + return false; + + // Check that all of the indices extract from the correct offset. + if (!matchExtractIndex(E0, 0, Opcode)) + return false; + + for (unsigned i = 1, e = VL.size(); i < e; ++i) { + Instruction *E = cast<Instruction>(VL[i]); + if (!matchExtractIndex(E, i, Opcode)) + return false; + if (E->getOperand(0) != Vec) + return false; + } + + return true; +} + int BoUpSLP::getEntryCost(TreeEntry *E) { ArrayRef<Value*> VL = E->Scalars; @@ -1474,6 +1533,12 @@ int BoUpSLP::getEntryCost(TreeEntry *E) { ScalarTy = SI->getValueOperand()->getType(); VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); + // If we have computed a smaller type for the expression, update VecTy so + // that the costs will be accurate. + if (MinBWs.count(VL[0])) + VecTy = VectorType::get(IntegerType::get(F->getContext(), MinBWs[VL[0]]), + VL.size()); + if (E->NeedToGather) { if (allConstant(VL)) return 0; @@ -1489,11 +1554,12 @@ int BoUpSLP::getEntryCost(TreeEntry *E) { case Instruction::PHI: { return 0; } + case Instruction::ExtractValue: case Instruction::ExtractElement: { - if (CanReuseExtract(VL)) { + if (canReuseExtract(VL, Opcode)) { int DeadCost = 0; for (unsigned i = 0, e = VL.size(); i < e; ++i) { - ExtractElementInst *E = cast<ExtractElementInst>(VL[i]); + Instruction *E = cast<Instruction>(VL[i]); if (E->hasOneUse()) // Take credit for instruction that will become dead. DeadCost += @@ -1527,7 +1593,14 @@ int BoUpSLP::getEntryCost(TreeEntry *E) { } case Instruction::FCmp: case Instruction::ICmp: - case Instruction::Select: + case Instruction::Select: { + // Calculate the cost of this instruction. + VectorType *MaskTy = VectorType::get(Builder.getInt1Ty(), VL.size()); + int ScalarCost = VecTy->getNumElements() * + TTI->getCmpSelInstrCost(Opcode, ScalarTy, Builder.getInt1Ty()); + int VecCost = TTI->getCmpSelInstrCost(Opcode, VecTy, MaskTy); + return VecCost - ScalarCost; + } case Instruction::Add: case Instruction::FAdd: case Instruction::Sub: @@ -1546,59 +1619,48 @@ int BoUpSLP::getEntryCost(TreeEntry *E) { case Instruction::And: case Instruction::Or: case Instruction::Xor: { - // Calculate the cost of this instruction. - int ScalarCost = 0; - int VecCost = 0; - if (Opcode == Instruction::FCmp || Opcode == Instruction::ICmp || - Opcode == Instruction::Select) { - VectorType *MaskTy = VectorType::get(Builder.getInt1Ty(), VL.size()); - ScalarCost = VecTy->getNumElements() * - TTI->getCmpSelInstrCost(Opcode, ScalarTy, Builder.getInt1Ty()); - VecCost = TTI->getCmpSelInstrCost(Opcode, VecTy, MaskTy); - } else { - // Certain instructions can be cheaper to vectorize if they have a - // constant second vector operand. - TargetTransformInfo::OperandValueKind Op1VK = - TargetTransformInfo::OK_AnyValue; - TargetTransformInfo::OperandValueKind Op2VK = - TargetTransformInfo::OK_UniformConstantValue; - TargetTransformInfo::OperandValueProperties Op1VP = - TargetTransformInfo::OP_None; - TargetTransformInfo::OperandValueProperties Op2VP = - TargetTransformInfo::OP_None; - - // If all operands are exactly the same ConstantInt then set the - // operand kind to OK_UniformConstantValue. - // If instead not all operands are constants, then set the operand kind - // to OK_AnyValue. If all operands are constants but not the same, - // then set the operand kind to OK_NonUniformConstantValue. - ConstantInt *CInt = nullptr; - for (unsigned i = 0; i < VL.size(); ++i) { - const Instruction *I = cast<Instruction>(VL[i]); - if (!isa<ConstantInt>(I->getOperand(1))) { - Op2VK = TargetTransformInfo::OK_AnyValue; - break; - } - if (i == 0) { - CInt = cast<ConstantInt>(I->getOperand(1)); - continue; - } - if (Op2VK == TargetTransformInfo::OK_UniformConstantValue && - CInt != cast<ConstantInt>(I->getOperand(1))) - Op2VK = TargetTransformInfo::OK_NonUniformConstantValue; + // Certain instructions can be cheaper to vectorize if they have a + // constant second vector operand. + TargetTransformInfo::OperandValueKind Op1VK = + TargetTransformInfo::OK_AnyValue; + TargetTransformInfo::OperandValueKind Op2VK = + TargetTransformInfo::OK_UniformConstantValue; + TargetTransformInfo::OperandValueProperties Op1VP = + TargetTransformInfo::OP_None; + TargetTransformInfo::OperandValueProperties Op2VP = + TargetTransformInfo::OP_None; + + // If all operands are exactly the same ConstantInt then set the + // operand kind to OK_UniformConstantValue. + // If instead not all operands are constants, then set the operand kind + // to OK_AnyValue. If all operands are constants but not the same, + // then set the operand kind to OK_NonUniformConstantValue. + ConstantInt *CInt = nullptr; + for (unsigned i = 0; i < VL.size(); ++i) { + const Instruction *I = cast<Instruction>(VL[i]); + if (!isa<ConstantInt>(I->getOperand(1))) { + Op2VK = TargetTransformInfo::OK_AnyValue; + break; + } + if (i == 0) { + CInt = cast<ConstantInt>(I->getOperand(1)); + continue; } - // FIXME: Currently cost of model modification for division by - // power of 2 is handled only for X86. Add support for other targets. - if (Op2VK == TargetTransformInfo::OK_UniformConstantValue && CInt && - CInt->getValue().isPowerOf2()) - Op2VP = TargetTransformInfo::OP_PowerOf2; - - ScalarCost = VecTy->getNumElements() * - TTI->getArithmeticInstrCost(Opcode, ScalarTy, Op1VK, Op2VK, - Op1VP, Op2VP); - VecCost = TTI->getArithmeticInstrCost(Opcode, VecTy, Op1VK, Op2VK, - Op1VP, Op2VP); + if (Op2VK == TargetTransformInfo::OK_UniformConstantValue && + CInt != cast<ConstantInt>(I->getOperand(1))) + Op2VK = TargetTransformInfo::OK_NonUniformConstantValue; } + // FIXME: Currently cost of model modification for division by power of + // 2 is handled for X86 and AArch64. Add support for other targets. + if (Op2VK == TargetTransformInfo::OK_UniformConstantValue && CInt && + CInt->getValue().isPowerOf2()) + Op2VP = TargetTransformInfo::OP_PowerOf2; + + int ScalarCost = VecTy->getNumElements() * + TTI->getArithmeticInstrCost(Opcode, ScalarTy, Op1VK, + Op2VK, Op1VP, Op2VP); + int VecCost = TTI->getArithmeticInstrCost(Opcode, VecTy, Op1VK, Op2VK, + Op1VP, Op2VP); return VecCost - ScalarCost; } case Instruction::GetElementPtr: { @@ -1617,21 +1679,25 @@ int BoUpSLP::getEntryCost(TreeEntry *E) { } case Instruction::Load: { // Cost of wide load - cost of scalar loads. + unsigned alignment = dyn_cast<LoadInst>(VL0)->getAlignment(); int ScalarLdCost = VecTy->getNumElements() * - TTI->getMemoryOpCost(Instruction::Load, ScalarTy, 1, 0); - int VecLdCost = TTI->getMemoryOpCost(Instruction::Load, VecTy, 1, 0); + TTI->getMemoryOpCost(Instruction::Load, ScalarTy, alignment, 0); + int VecLdCost = TTI->getMemoryOpCost(Instruction::Load, + VecTy, alignment, 0); return VecLdCost - ScalarLdCost; } case Instruction::Store: { // We know that we can merge the stores. Calculate the cost. + unsigned alignment = dyn_cast<StoreInst>(VL0)->getAlignment(); int ScalarStCost = VecTy->getNumElements() * - TTI->getMemoryOpCost(Instruction::Store, ScalarTy, 1, 0); - int VecStCost = TTI->getMemoryOpCost(Instruction::Store, VecTy, 1, 0); + TTI->getMemoryOpCost(Instruction::Store, ScalarTy, alignment, 0); + int VecStCost = TTI->getMemoryOpCost(Instruction::Store, + VecTy, alignment, 0); return VecStCost - ScalarStCost; } case Instruction::Call: { CallInst *CI = cast<CallInst>(VL0); - Intrinsic::ID ID = getIntrinsicIDForCall(CI, TLI); + Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); // Calculate the cost of the scalar and vector calls. SmallVector<Type*, 4> ScalarTys, VecTys; @@ -1641,10 +1707,14 @@ int BoUpSLP::getEntryCost(TreeEntry *E) { VecTy->getNumElements())); } + FastMathFlags FMF; + if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) + FMF = FPMO->getFastMathFlags(); + int ScalarCallCost = VecTy->getNumElements() * - TTI->getIntrinsicInstrCost(ID, ScalarTy, ScalarTys); + TTI->getIntrinsicInstrCost(ID, ScalarTy, ScalarTys, FMF); - int VecCallCost = TTI->getIntrinsicInstrCost(ID, VecTy, VecTys); + int VecCallCost = TTI->getIntrinsicInstrCost(ID, VecTy, VecTys, FMF); DEBUG(dbgs() << "SLP: Call cost "<< VecCallCost - ScalarCallCost << " (" << VecCallCost << "-" << ScalarCallCost << ")" @@ -1659,8 +1729,8 @@ int BoUpSLP::getEntryCost(TreeEntry *E) { TargetTransformInfo::OK_AnyValue; int ScalarCost = 0; int VecCost = 0; - for (unsigned i = 0; i < VL.size(); ++i) { - Instruction *I = cast<Instruction>(VL[i]); + for (Value *i : VL) { + Instruction *I = cast<Instruction>(i); if (!I) break; ScalarCost += @@ -1715,8 +1785,8 @@ int BoUpSLP::getSpillCost() { SmallPtrSet<Instruction*, 4> LiveValues; Instruction *PrevInst = nullptr; - for (unsigned N = 0; N < VectorizableTree.size(); ++N) { - Instruction *Inst = dyn_cast<Instruction>(VectorizableTree[N].Scalars[0]); + for (const auto &N : VectorizableTree) { + Instruction *Inst = dyn_cast<Instruction>(N.Scalars[0]); if (!Inst) continue; @@ -1725,6 +1795,13 @@ int BoUpSLP::getSpillCost() { continue; } + // Update LiveValues. + LiveValues.erase(PrevInst); + for (auto &J : PrevInst->operands()) { + if (isa<Instruction>(&*J) && ScalarToTreeEntry.count(&*J)) + LiveValues.insert(cast<Instruction>(&*J)); + } + DEBUG( dbgs() << "SLP: #LV: " << LiveValues.size(); for (auto *X : LiveValues) @@ -1733,13 +1810,6 @@ int BoUpSLP::getSpillCost() { Inst->dump(); ); - // Update LiveValues. - LiveValues.erase(PrevInst); - for (auto &J : PrevInst->operands()) { - if (isa<Instruction>(&*J) && ScalarToTreeEntry.count(&*J)) - LiveValues.insert(cast<Instruction>(&*J)); - } - // Now find the sequence of instructions between PrevInst and Inst. BasicBlock::reverse_iterator InstIt(Inst->getIterator()), PrevInstIt(PrevInst->getIterator()); @@ -1763,7 +1833,6 @@ int BoUpSLP::getSpillCost() { PrevInst = Inst; } - DEBUG(dbgs() << "SLP: SpillCost=" << Cost << "\n"); return Cost; } @@ -1785,7 +1854,7 @@ int BoUpSLP::getTreeCost() { for (TreeEntry &TE : VectorizableTree) { int C = getEntryCost(&TE); DEBUG(dbgs() << "SLP: Adding cost " << C << " for bundle that starts with " - << TE.Scalars[0] << " .\n"); + << *TE.Scalars[0] << ".\n"); Cost += C; } @@ -1802,15 +1871,29 @@ int BoUpSLP::getTreeCost() { if (EphValues.count(EU.User)) continue; - VectorType *VecTy = VectorType::get(EU.Scalar->getType(), BundleWidth); - ExtractCost += TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, - EU.Lane); + // If we plan to rewrite the tree in a smaller type, we will need to sign + // extend the extracted value back to the original type. Here, we account + // for the extract and the added cost of the sign extend if needed. + auto *VecTy = VectorType::get(EU.Scalar->getType(), BundleWidth); + auto *ScalarRoot = VectorizableTree[0].Scalars[0]; + if (MinBWs.count(ScalarRoot)) { + auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot]); + VecTy = VectorType::get(MinTy, BundleWidth); + ExtractCost += TTI->getExtractWithExtendCost( + Instruction::SExt, EU.Scalar->getType(), VecTy, EU.Lane); + } else { + ExtractCost += + TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, EU.Lane); + } } - Cost += getSpillCost(); + int SpillCost = getSpillCost(); + Cost += SpillCost + ExtractCost; - DEBUG(dbgs() << "SLP: Total Cost " << Cost + ExtractCost<< ".\n"); - return Cost + ExtractCost; + DEBUG(dbgs() << "SLP: Spill Cost = " << SpillCost << ".\n" + << "SLP: Extract Cost = " << ExtractCost << ".\n" + << "SLP: Total Cost = " << Cost << ".\n"); + return Cost; } int BoUpSLP::getGatherCost(Type *Ty) { @@ -1830,63 +1913,6 @@ int BoUpSLP::getGatherCost(ArrayRef<Value *> VL) { return getGatherCost(VecTy); } -Value *BoUpSLP::getPointerOperand(Value *I) { - if (LoadInst *LI = dyn_cast<LoadInst>(I)) - return LI->getPointerOperand(); - if (StoreInst *SI = dyn_cast<StoreInst>(I)) - return SI->getPointerOperand(); - return nullptr; -} - -unsigned BoUpSLP::getAddressSpaceOperand(Value *I) { - if (LoadInst *L = dyn_cast<LoadInst>(I)) - return L->getPointerAddressSpace(); - if (StoreInst *S = dyn_cast<StoreInst>(I)) - return S->getPointerAddressSpace(); - return -1; -} - -bool BoUpSLP::isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL) { - Value *PtrA = getPointerOperand(A); - Value *PtrB = getPointerOperand(B); - unsigned ASA = getAddressSpaceOperand(A); - unsigned ASB = getAddressSpaceOperand(B); - - // Check that the address spaces match and that the pointers are valid. - if (!PtrA || !PtrB || (ASA != ASB)) - return false; - - // Make sure that A and B are different pointers of the same type. - if (PtrA == PtrB || PtrA->getType() != PtrB->getType()) - return false; - - unsigned PtrBitWidth = DL.getPointerSizeInBits(ASA); - Type *Ty = cast<PointerType>(PtrA->getType())->getElementType(); - APInt Size(PtrBitWidth, DL.getTypeStoreSize(Ty)); - - APInt OffsetA(PtrBitWidth, 0), OffsetB(PtrBitWidth, 0); - PtrA = PtrA->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetA); - PtrB = PtrB->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetB); - - APInt OffsetDelta = OffsetB - OffsetA; - - // Check if they are based on the same pointer. That makes the offsets - // sufficient. - if (PtrA == PtrB) - return OffsetDelta == Size; - - // Compute the necessary base pointer delta to have the necessary final delta - // equal to the size. - APInt BaseDelta = Size - OffsetDelta; - - // Otherwise compute the distance with SCEV between the base pointers. - const SCEV *PtrSCEVA = SE->getSCEV(PtrA); - const SCEV *PtrSCEVB = SE->getSCEV(PtrB); - const SCEV *C = SE->getConstant(BaseDelta); - const SCEV *X = SE->getAddExpr(PtrSCEVA, C); - return X == PtrSCEVB; -} - // Reorder commutative operations in alternate shuffle if the resulting vectors // are consecutive loads. This would allow us to vectorize the tree. // If we have something like- @@ -1899,12 +1925,10 @@ bool BoUpSLP::isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL) { void BoUpSLP::reorderAltShuffleOperands(ArrayRef<Value *> VL, SmallVectorImpl<Value *> &Left, SmallVectorImpl<Value *> &Right) { - const DataLayout &DL = F->getParent()->getDataLayout(); - // Push left and right operands of binary operation into Left and Right - for (unsigned i = 0, e = VL.size(); i < e; ++i) { - Left.push_back(cast<Instruction>(VL[i])->getOperand(0)); - Right.push_back(cast<Instruction>(VL[i])->getOperand(1)); + for (Value *i : VL) { + Left.push_back(cast<Instruction>(i)->getOperand(0)); + Right.push_back(cast<Instruction>(i)->getOperand(1)); } // Reorder if we have a commutative operation and consecutive access @@ -1914,10 +1938,11 @@ void BoUpSLP::reorderAltShuffleOperands(ArrayRef<Value *> VL, if (LoadInst *L1 = dyn_cast<LoadInst>(Right[j + 1])) { Instruction *VL1 = cast<Instruction>(VL[j]); Instruction *VL2 = cast<Instruction>(VL[j + 1]); - if (isConsecutiveAccess(L, L1, DL) && VL1->isCommutative()) { + if (VL1->isCommutative() && isConsecutiveAccess(L, L1, *DL, *SE)) { std::swap(Left[j], Right[j]); continue; - } else if (isConsecutiveAccess(L, L1, DL) && VL2->isCommutative()) { + } else if (VL2->isCommutative() && + isConsecutiveAccess(L, L1, *DL, *SE)) { std::swap(Left[j + 1], Right[j + 1]); continue; } @@ -1928,10 +1953,11 @@ void BoUpSLP::reorderAltShuffleOperands(ArrayRef<Value *> VL, if (LoadInst *L1 = dyn_cast<LoadInst>(Left[j + 1])) { Instruction *VL1 = cast<Instruction>(VL[j]); Instruction *VL2 = cast<Instruction>(VL[j + 1]); - if (isConsecutiveAccess(L, L1, DL) && VL1->isCommutative()) { + if (VL1->isCommutative() && isConsecutiveAccess(L, L1, *DL, *SE)) { std::swap(Left[j], Right[j]); continue; - } else if (isConsecutiveAccess(L, L1, DL) && VL2->isCommutative()) { + } else if (VL2->isCommutative() && + isConsecutiveAccess(L, L1, *DL, *SE)) { std::swap(Left[j + 1], Right[j + 1]); continue; } @@ -2061,8 +2087,6 @@ void BoUpSLP::reorderInputsAccordingToOpcode(ArrayRef<Value *> VL, if (SplatRight || SplatLeft) return; - const DataLayout &DL = F->getParent()->getDataLayout(); - // Finally check if we can get longer vectorizable chain by reordering // without breaking the good operand order detected above. // E.g. If we have something like- @@ -2081,7 +2105,7 @@ void BoUpSLP::reorderInputsAccordingToOpcode(ArrayRef<Value *> VL, for (unsigned j = 0; j < VL.size() - 1; ++j) { if (LoadInst *L = dyn_cast<LoadInst>(Left[j])) { if (LoadInst *L1 = dyn_cast<LoadInst>(Right[j + 1])) { - if (isConsecutiveAccess(L, L1, DL)) { + if (isConsecutiveAccess(L, L1, *DL, *SE)) { std::swap(Left[j + 1], Right[j + 1]); continue; } @@ -2089,7 +2113,7 @@ void BoUpSLP::reorderInputsAccordingToOpcode(ArrayRef<Value *> VL, } if (LoadInst *L = dyn_cast<LoadInst>(Right[j])) { if (LoadInst *L1 = dyn_cast<LoadInst>(Left[j + 1])) { - if (isConsecutiveAccess(L, L1, DL)) { + if (isConsecutiveAccess(L, L1, *DL, *SE)) { std::swap(Left[j + 1], Right[j + 1]); continue; } @@ -2185,7 +2209,6 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) { return Gather(E->Scalars, VecTy); } - const DataLayout &DL = F->getParent()->getDataLayout(); unsigned Opcode = getSameOpcode(E->Scalars); switch (Opcode) { @@ -2225,13 +2248,25 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) { } case Instruction::ExtractElement: { - if (CanReuseExtract(E->Scalars)) { + if (canReuseExtract(E->Scalars, Instruction::ExtractElement)) { Value *V = VL0->getOperand(0); E->VectorizedValue = V; return V; } return Gather(E->Scalars, VecTy); } + case Instruction::ExtractValue: { + if (canReuseExtract(E->Scalars, Instruction::ExtractValue)) { + LoadInst *LI = cast<LoadInst>(VL0->getOperand(0)); + Builder.SetInsertPoint(LI); + PointerType *PtrTy = PointerType::get(VecTy, LI->getPointerAddressSpace()); + Value *Ptr = Builder.CreateBitCast(LI->getOperand(0), PtrTy); + LoadInst *V = Builder.CreateAlignedLoad(Ptr, LI->getAlignment()); + E->VectorizedValue = V; + return propagateMetadata(V, E->Scalars); + } + return Gather(E->Scalars, VecTy); + } case Instruction::ZExt: case Instruction::SExt: case Instruction::FPToUI: @@ -2382,7 +2417,7 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) { unsigned Alignment = LI->getAlignment(); LI = Builder.CreateLoad(VecPtr); if (!Alignment) { - Alignment = DL.getABITypeAlignment(ScalarLoadTy); + Alignment = DL->getABITypeAlignment(ScalarLoadTy); } LI->setAlignment(Alignment); E->VectorizedValue = LI; @@ -2413,7 +2448,7 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) { ExternalUser(SI->getPointerOperand(), cast<User>(VecPtr), 0)); if (!Alignment) { - Alignment = DL.getABITypeAlignment(SI->getValueOperand()->getType()); + Alignment = DL->getABITypeAlignment(SI->getValueOperand()->getType()); } S->setAlignment(Alignment); E->VectorizedValue = S; @@ -2481,10 +2516,12 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) { } Module *M = F->getParent(); - Intrinsic::ID ID = getIntrinsicIDForCall(CI, TLI); + Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); Type *Tys[] = { VectorType::get(CI->getType(), E->Scalars.size()) }; Function *CF = Intrinsic::getDeclaration(M, ID, Tys); - Value *V = Builder.CreateCall(CF, OpVecs); + SmallVector<OperandBundleDef, 1> OpBundles; + CI->getOperandBundlesAsDefs(OpBundles); + Value *V = Builder.CreateCall(CF, OpVecs, OpBundles); // The scalar argument uses an in-tree scalar so we add the new vectorized // call to ExternalUses list to make sure that an extract will be @@ -2559,15 +2596,28 @@ Value *BoUpSLP::vectorizeTree() { } Builder.SetInsertPoint(&F->getEntryBlock().front()); - vectorizeTree(&VectorizableTree[0]); + auto *VectorRoot = vectorizeTree(&VectorizableTree[0]); + + // If the vectorized tree can be rewritten in a smaller type, we truncate the + // vectorized root. InstCombine will then rewrite the entire expression. We + // sign extend the extracted values below. + auto *ScalarRoot = VectorizableTree[0].Scalars[0]; + if (MinBWs.count(ScalarRoot)) { + if (auto *I = dyn_cast<Instruction>(VectorRoot)) + Builder.SetInsertPoint(&*++BasicBlock::iterator(I)); + auto BundleWidth = VectorizableTree[0].Scalars.size(); + auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot]); + auto *VecTy = VectorType::get(MinTy, BundleWidth); + auto *Trunc = Builder.CreateTrunc(VectorRoot, VecTy); + VectorizableTree[0].VectorizedValue = Trunc; + } DEBUG(dbgs() << "SLP: Extracting " << ExternalUses.size() << " values .\n"); // Extract all of the elements with the external uses. - for (UserList::iterator it = ExternalUses.begin(), e = ExternalUses.end(); - it != e; ++it) { - Value *Scalar = it->Scalar; - llvm::User *User = it->User; + for (const auto &ExternalUse : ExternalUses) { + Value *Scalar = ExternalUse.Scalar; + llvm::User *User = ExternalUse.User; // Skip users that we already RAUW. This happens when one instruction // has multiple uses of the same value. @@ -2583,15 +2633,24 @@ Value *BoUpSLP::vectorizeTree() { Value *Vec = E->VectorizedValue; assert(Vec && "Can't find vectorizable value"); - Value *Lane = Builder.getInt32(it->Lane); + Value *Lane = Builder.getInt32(ExternalUse.Lane); // Generate extracts for out-of-tree users. // Find the insertion point for the extractelement lane. - if (isa<Instruction>(Vec)){ + if (auto *VecI = dyn_cast<Instruction>(Vec)) { if (PHINode *PH = dyn_cast<PHINode>(User)) { for (int i = 0, e = PH->getNumIncomingValues(); i != e; ++i) { if (PH->getIncomingValue(i) == Scalar) { - Builder.SetInsertPoint(PH->getIncomingBlock(i)->getTerminator()); + TerminatorInst *IncomingTerminator = + PH->getIncomingBlock(i)->getTerminator(); + if (isa<CatchSwitchInst>(IncomingTerminator)) { + Builder.SetInsertPoint(VecI->getParent(), + std::next(VecI->getIterator())); + } else { + Builder.SetInsertPoint(PH->getIncomingBlock(i)->getTerminator()); + } Value *Ex = Builder.CreateExtractElement(Vec, Lane); + if (MinBWs.count(ScalarRoot)) + Ex = Builder.CreateSExt(Ex, Scalar->getType()); CSEBlocks.insert(PH->getIncomingBlock(i)); PH->setOperand(i, Ex); } @@ -2599,12 +2658,16 @@ Value *BoUpSLP::vectorizeTree() { } else { Builder.SetInsertPoint(cast<Instruction>(User)); Value *Ex = Builder.CreateExtractElement(Vec, Lane); + if (MinBWs.count(ScalarRoot)) + Ex = Builder.CreateSExt(Ex, Scalar->getType()); CSEBlocks.insert(cast<Instruction>(User)->getParent()); User->replaceUsesOfWith(Scalar, Ex); } } else { Builder.SetInsertPoint(&F->getEntryBlock().front()); Value *Ex = Builder.CreateExtractElement(Vec, Lane); + if (MinBWs.count(ScalarRoot)) + Ex = Builder.CreateSExt(Ex, Scalar->getType()); CSEBlocks.insert(&F->getEntryBlock()); User->replaceUsesOfWith(Scalar, Ex); } @@ -2613,8 +2676,8 @@ Value *BoUpSLP::vectorizeTree() { } // For each vectorized value: - for (int EIdx = 0, EE = VectorizableTree.size(); EIdx < EE; ++EIdx) { - TreeEntry *Entry = &VectorizableTree[EIdx]; + for (TreeEntry &EIdx : VectorizableTree) { + TreeEntry *Entry = &EIdx; // For each lane: for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { @@ -2655,9 +2718,8 @@ void BoUpSLP::optimizeGatherSequence() { DEBUG(dbgs() << "SLP: Optimizing " << GatherSeq.size() << " gather sequences instructions.\n"); // LICM InsertElementInst sequences. - for (SetVector<Instruction *>::iterator it = GatherSeq.begin(), - e = GatherSeq.end(); it != e; ++it) { - InsertElementInst *Insert = dyn_cast<InsertElementInst>(*it); + for (Instruction *it : GatherSeq) { + InsertElementInst *Insert = dyn_cast<InsertElementInst>(it); if (!Insert) continue; @@ -2718,12 +2780,10 @@ void BoUpSLP::optimizeGatherSequence() { // Check if we can replace this instruction with any of the // visited instructions. - for (SmallVectorImpl<Instruction *>::iterator v = Visited.begin(), - ve = Visited.end(); - v != ve; ++v) { - if (In->isIdenticalTo(*v) && - DT->dominates((*v)->getParent(), In->getParent())) { - In->replaceAllUsesWith(*v); + for (Instruction *v : Visited) { + if (In->isIdenticalTo(v) && + DT->dominates(v->getParent(), In->getParent())) { + In->replaceAllUsesWith(v); eraseInstruction(In); In = nullptr; break; @@ -3139,90 +3199,265 @@ void BoUpSLP::scheduleBlock(BlockScheduling *BS) { BS->ScheduleStart = nullptr; } -/// The SLPVectorizer Pass. -struct SLPVectorizer : public FunctionPass { - typedef SmallVector<StoreInst *, 8> StoreList; - typedef MapVector<Value *, StoreList> StoreListMap; +unsigned BoUpSLP::getVectorElementSize(Value *V) { + // If V is a store, just return the width of the stored value without + // traversing the expression tree. This is the common case. + if (auto *Store = dyn_cast<StoreInst>(V)) + return DL->getTypeSizeInBits(Store->getValueOperand()->getType()); + + // If V is not a store, we can traverse the expression tree to find loads + // that feed it. The type of the loaded value may indicate a more suitable + // width than V's type. We want to base the vector element size on the width + // of memory operations where possible. + SmallVector<Instruction *, 16> Worklist; + SmallPtrSet<Instruction *, 16> Visited; + if (auto *I = dyn_cast<Instruction>(V)) + Worklist.push_back(I); + + // Traverse the expression tree in bottom-up order looking for loads. If we + // encounter an instruciton we don't yet handle, we give up. + auto MaxWidth = 0u; + auto FoundUnknownInst = false; + while (!Worklist.empty() && !FoundUnknownInst) { + auto *I = Worklist.pop_back_val(); + Visited.insert(I); + + // We should only be looking at scalar instructions here. If the current + // instruction has a vector type, give up. + auto *Ty = I->getType(); + if (isa<VectorType>(Ty)) + FoundUnknownInst = true; + + // If the current instruction is a load, update MaxWidth to reflect the + // width of the loaded value. + else if (isa<LoadInst>(I)) + MaxWidth = std::max<unsigned>(MaxWidth, DL->getTypeSizeInBits(Ty)); + + // Otherwise, we need to visit the operands of the instruction. We only + // handle the interesting cases from buildTree here. If an operand is an + // instruction we haven't yet visited, we add it to the worklist. + else if (isa<PHINode>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) || + isa<CmpInst>(I) || isa<SelectInst>(I) || isa<BinaryOperator>(I)) { + for (Use &U : I->operands()) + if (auto *J = dyn_cast<Instruction>(U.get())) + if (!Visited.count(J)) + Worklist.push_back(J); + } - /// Pass identification, replacement for typeid - static char ID; + // If we don't yet handle the instruction, give up. + else + FoundUnknownInst = true; + } - explicit SLPVectorizer() : FunctionPass(ID) { - initializeSLPVectorizerPass(*PassRegistry::getPassRegistry()); + // If we didn't encounter a memory access in the expression tree, or if we + // gave up for some reason, just return the width of V. + if (!MaxWidth || FoundUnknownInst) + return DL->getTypeSizeInBits(V->getType()); + + // Otherwise, return the maximum width we found. + return MaxWidth; +} + +// Determine if a value V in a vectorizable expression Expr can be demoted to a +// smaller type with a truncation. We collect the values that will be demoted +// in ToDemote and additional roots that require investigating in Roots. +static bool collectValuesToDemote(Value *V, SmallPtrSetImpl<Value *> &Expr, + SmallVectorImpl<Value *> &ToDemote, + SmallVectorImpl<Value *> &Roots) { + + // We can always demote constants. + if (isa<Constant>(V)) { + ToDemote.push_back(V); + return true; } - ScalarEvolution *SE; - TargetTransformInfo *TTI; - TargetLibraryInfo *TLI; - AliasAnalysis *AA; - LoopInfo *LI; - DominatorTree *DT; - AssumptionCache *AC; + // If the value is not an instruction in the expression with only one use, it + // cannot be demoted. + auto *I = dyn_cast<Instruction>(V); + if (!I || !I->hasOneUse() || !Expr.count(I)) + return false; - bool runOnFunction(Function &F) override { - if (skipOptnoneFunction(F)) + switch (I->getOpcode()) { + + // We can always demote truncations and extensions. Since truncations can + // seed additional demotion, we save the truncated value. + case Instruction::Trunc: + Roots.push_back(I->getOperand(0)); + case Instruction::ZExt: + case Instruction::SExt: + break; + + // We can demote certain binary operations if we can demote both of their + // operands. + case Instruction::Add: + case Instruction::Sub: + case Instruction::Mul: + case Instruction::And: + case Instruction::Or: + case Instruction::Xor: + if (!collectValuesToDemote(I->getOperand(0), Expr, ToDemote, Roots) || + !collectValuesToDemote(I->getOperand(1), Expr, ToDemote, Roots)) return false; + break; - SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); - TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); - auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); - TLI = TLIP ? &TLIP->getTLI() : nullptr; - AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); - LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); - DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); - AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); - - StoreRefs.clear(); - bool Changed = false; - - // If the target claims to have no vector registers don't attempt - // vectorization. - if (!TTI->getNumberOfRegisters(true)) + // We can demote selects if we can demote their true and false values. + case Instruction::Select: { + SelectInst *SI = cast<SelectInst>(I); + if (!collectValuesToDemote(SI->getTrueValue(), Expr, ToDemote, Roots) || + !collectValuesToDemote(SI->getFalseValue(), Expr, ToDemote, Roots)) return false; + break; + } - // Use the vector register size specified by the target unless overridden - // by a command-line option. - // TODO: It would be better to limit the vectorization factor based on - // data type rather than just register size. For example, x86 AVX has - // 256-bit registers, but it does not support integer operations - // at that width (that requires AVX2). - if (MaxVectorRegSizeOption.getNumOccurrences()) - MaxVecRegSize = MaxVectorRegSizeOption; - else - MaxVecRegSize = TTI->getRegisterBitWidth(true); + // We can demote phis if we can demote all their incoming operands. Note that + // we don't need to worry about cycles since we ensure single use above. + case Instruction::PHI: { + PHINode *PN = cast<PHINode>(I); + for (Value *IncValue : PN->incoming_values()) + if (!collectValuesToDemote(IncValue, Expr, ToDemote, Roots)) + return false; + break; + } - // Don't vectorize when the attribute NoImplicitFloat is used. - if (F.hasFnAttribute(Attribute::NoImplicitFloat)) - return false; + // Otherwise, conservatively give up. + default: + return false; + } - DEBUG(dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n"); + // Record the value that we can demote. + ToDemote.push_back(V); + return true; +} - // Use the bottom up slp vectorizer to construct chains that start with - // store instructions. - BoUpSLP R(&F, SE, TTI, TLI, AA, LI, DT, AC); +void BoUpSLP::computeMinimumValueSizes() { + // If there are no external uses, the expression tree must be rooted by a + // store. We can't demote in-memory values, so there is nothing to do here. + if (ExternalUses.empty()) + return; - // A general note: the vectorizer must use BoUpSLP::eraseInstruction() to - // delete instructions. + // We only attempt to truncate integer expressions. + auto &TreeRoot = VectorizableTree[0].Scalars; + auto *TreeRootIT = dyn_cast<IntegerType>(TreeRoot[0]->getType()); + if (!TreeRootIT) + return; - // Scan the blocks in the function in post order. - for (auto BB : post_order(&F.getEntryBlock())) { - // Vectorize trees that end at stores. - if (unsigned count = collectStores(BB, R)) { - (void)count; - DEBUG(dbgs() << "SLP: Found " << count << " stores to vectorize.\n"); - Changed |= vectorizeStoreChains(R); - } + // If the expression is not rooted by a store, these roots should have + // external uses. We will rely on InstCombine to rewrite the expression in + // the narrower type. However, InstCombine only rewrites single-use values. + // This means that if a tree entry other than a root is used externally, it + // must have multiple uses and InstCombine will not rewrite it. The code + // below ensures that only the roots are used externally. + SmallPtrSet<Value *, 32> Expr(TreeRoot.begin(), TreeRoot.end()); + for (auto &EU : ExternalUses) + if (!Expr.erase(EU.Scalar)) + return; + if (!Expr.empty()) + return; - // Vectorize trees that end at reductions. - Changed |= vectorizeChainsInBlock(BB, R); - } + // Collect the scalar values of the vectorizable expression. We will use this + // context to determine which values can be demoted. If we see a truncation, + // we mark it as seeding another demotion. + for (auto &Entry : VectorizableTree) + Expr.insert(Entry.Scalars.begin(), Entry.Scalars.end()); - if (Changed) { - R.optimizeGatherSequence(); - DEBUG(dbgs() << "SLP: vectorized \"" << F.getName() << "\"\n"); - DEBUG(verifyFunction(F)); + // Ensure the roots of the vectorizable tree don't form a cycle. They must + // have a single external user that is not in the vectorizable tree. + for (auto *Root : TreeRoot) + if (!Root->hasOneUse() || Expr.count(*Root->user_begin())) + return; + + // Conservatively determine if we can actually truncate the roots of the + // expression. Collect the values that can be demoted in ToDemote and + // additional roots that require investigating in Roots. + SmallVector<Value *, 32> ToDemote; + SmallVector<Value *, 4> Roots; + for (auto *Root : TreeRoot) + if (!collectValuesToDemote(Root, Expr, ToDemote, Roots)) + return; + + // The maximum bit width required to represent all the values that can be + // demoted without loss of precision. It would be safe to truncate the roots + // of the expression to this width. + auto MaxBitWidth = 8u; + + // We first check if all the bits of the roots are demanded. If they're not, + // we can truncate the roots to this narrower type. + for (auto *Root : TreeRoot) { + auto Mask = DB->getDemandedBits(cast<Instruction>(Root)); + MaxBitWidth = std::max<unsigned>( + Mask.getBitWidth() - Mask.countLeadingZeros(), MaxBitWidth); + } + + // If all the bits of the roots are demanded, we can try a little harder to + // compute a narrower type. This can happen, for example, if the roots are + // getelementptr indices. InstCombine promotes these indices to the pointer + // width. Thus, all their bits are technically demanded even though the + // address computation might be vectorized in a smaller type. + // + // We start by looking at each entry that can be demoted. We compute the + // maximum bit width required to store the scalar by using ValueTracking to + // compute the number of high-order bits we can truncate. + if (MaxBitWidth == DL->getTypeSizeInBits(TreeRoot[0]->getType())) { + MaxBitWidth = 8u; + for (auto *Scalar : ToDemote) { + auto NumSignBits = ComputeNumSignBits(Scalar, *DL, 0, AC, 0, DT); + auto NumTypeBits = DL->getTypeSizeInBits(Scalar->getType()); + MaxBitWidth = std::max<unsigned>(NumTypeBits - NumSignBits, MaxBitWidth); } - return Changed; + } + + // Round MaxBitWidth up to the next power-of-two. + if (!isPowerOf2_64(MaxBitWidth)) + MaxBitWidth = NextPowerOf2(MaxBitWidth); + + // If the maximum bit width we compute is less than the with of the roots' + // type, we can proceed with the narrowing. Otherwise, do nothing. + if (MaxBitWidth >= TreeRootIT->getBitWidth()) + return; + + // If we can truncate the root, we must collect additional values that might + // be demoted as a result. That is, those seeded by truncations we will + // modify. + while (!Roots.empty()) + collectValuesToDemote(Roots.pop_back_val(), Expr, ToDemote, Roots); + + // Finally, map the values we can demote to the maximum bit with we computed. + for (auto *Scalar : ToDemote) + MinBWs[Scalar] = MaxBitWidth; +} + +namespace { +/// The SLPVectorizer Pass. +struct SLPVectorizer : public FunctionPass { + SLPVectorizerPass Impl; + + /// Pass identification, replacement for typeid + static char ID; + + explicit SLPVectorizer() : FunctionPass(ID) { + initializeSLPVectorizerPass(*PassRegistry::getPassRegistry()); + } + + + bool doInitialization(Module &M) override { + return false; + } + + bool runOnFunction(Function &F) override { + if (skipFunction(F)) + return false; + + auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); + auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); + auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); + auto *TLI = TLIP ? &TLIP->getTLI() : nullptr; + auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); + auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); + auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); + auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); + auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); + + return Impl.runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB); } void getAnalysisUsage(AnalysisUsage &AU) const override { @@ -3233,51 +3468,105 @@ struct SLPVectorizer : public FunctionPass { AU.addRequired<TargetTransformInfoWrapperPass>(); AU.addRequired<LoopInfoWrapperPass>(); AU.addRequired<DominatorTreeWrapperPass>(); + AU.addRequired<DemandedBitsWrapperPass>(); AU.addPreserved<LoopInfoWrapperPass>(); AU.addPreserved<DominatorTreeWrapperPass>(); AU.addPreserved<AAResultsWrapperPass>(); AU.addPreserved<GlobalsAAWrapperPass>(); AU.setPreservesCFG(); } +}; +} // end anonymous namespace -private: +PreservedAnalyses SLPVectorizerPass::run(Function &F, FunctionAnalysisManager &AM) { + auto *SE = &AM.getResult<ScalarEvolutionAnalysis>(F); + auto *TTI = &AM.getResult<TargetIRAnalysis>(F); + auto *TLI = AM.getCachedResult<TargetLibraryAnalysis>(F); + auto *AA = &AM.getResult<AAManager>(F); + auto *LI = &AM.getResult<LoopAnalysis>(F); + auto *DT = &AM.getResult<DominatorTreeAnalysis>(F); + auto *AC = &AM.getResult<AssumptionAnalysis>(F); + auto *DB = &AM.getResult<DemandedBitsAnalysis>(F); + + bool Changed = runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB); + if (!Changed) + return PreservedAnalyses::all(); + PreservedAnalyses PA; + PA.preserve<LoopAnalysis>(); + PA.preserve<DominatorTreeAnalysis>(); + PA.preserve<AAManager>(); + PA.preserve<GlobalsAA>(); + return PA; +} - /// \brief Collect memory references and sort them according to their base - /// object. We sort the stores to their base objects to reduce the cost of the - /// quadratic search on the stores. TODO: We can further reduce this cost - /// if we flush the chain creation every time we run into a memory barrier. - unsigned collectStores(BasicBlock *BB, BoUpSLP &R); +bool SLPVectorizerPass::runImpl(Function &F, ScalarEvolution *SE_, + TargetTransformInfo *TTI_, + TargetLibraryInfo *TLI_, AliasAnalysis *AA_, + LoopInfo *LI_, DominatorTree *DT_, + AssumptionCache *AC_, DemandedBits *DB_) { + SE = SE_; + TTI = TTI_; + TLI = TLI_; + AA = AA_; + LI = LI_; + DT = DT_; + AC = AC_; + DB = DB_; + DL = &F.getParent()->getDataLayout(); + + Stores.clear(); + GEPs.clear(); + bool Changed = false; - /// \brief Try to vectorize a chain that starts at two arithmetic instrs. - bool tryToVectorizePair(Value *A, Value *B, BoUpSLP &R); + // If the target claims to have no vector registers don't attempt + // vectorization. + if (!TTI->getNumberOfRegisters(true)) + return false; - /// \brief Try to vectorize a list of operands. - /// \@param BuildVector A list of users to ignore for the purpose of - /// scheduling and that don't need extracting. - /// \returns true if a value was vectorized. - bool tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R, - ArrayRef<Value *> BuildVector = None, - bool allowReorder = false); + // Don't vectorize when the attribute NoImplicitFloat is used. + if (F.hasFnAttribute(Attribute::NoImplicitFloat)) + return false; - /// \brief Try to vectorize a chain that may start at the operands of \V; - bool tryToVectorize(BinaryOperator *V, BoUpSLP &R); + DEBUG(dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n"); - /// \brief Vectorize the stores that were collected in StoreRefs. - bool vectorizeStoreChains(BoUpSLP &R); + // Use the bottom up slp vectorizer to construct chains that start with + // store instructions. + BoUpSLP R(&F, SE, TTI, TLI, AA, LI, DT, AC, DB, DL); - /// \brief Scan the basic block and look for patterns that are likely to start - /// a vectorization chain. - bool vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R); + // A general note: the vectorizer must use BoUpSLP::eraseInstruction() to + // delete instructions. - bool vectorizeStoreChain(ArrayRef<Value *> Chain, int CostThreshold, - BoUpSLP &R, unsigned VecRegSize); + // Scan the blocks in the function in post order. + for (auto BB : post_order(&F.getEntryBlock())) { + collectSeedInstructions(BB); - bool vectorizeStores(ArrayRef<StoreInst *> Stores, int costThreshold, - BoUpSLP &R); -private: - StoreListMap StoreRefs; - unsigned MaxVecRegSize; // This is set by TTI or overridden by cl::opt. -}; + // Vectorize trees that end at stores. + if (!Stores.empty()) { + DEBUG(dbgs() << "SLP: Found stores for " << Stores.size() + << " underlying objects.\n"); + Changed |= vectorizeStoreChains(R); + } + + // Vectorize trees that end at reductions. + Changed |= vectorizeChainsInBlock(BB, R); + + // Vectorize the index computations of getelementptr instructions. This + // is primarily intended to catch gather-like idioms ending at + // non-consecutive loads. + if (!GEPs.empty()) { + DEBUG(dbgs() << "SLP: Found GEPs for " << GEPs.size() + << " underlying objects.\n"); + Changed |= vectorizeGEPIndices(BB, R); + } + } + + if (Changed) { + R.optimizeGatherSequence(); + DEBUG(dbgs() << "SLP: vectorized \"" << F.getName() << "\"\n"); + DEBUG(verifyFunction(F)); + } + return Changed; +} /// \brief Check that the Values in the slice in VL array are still existent in /// the WeakVH array. @@ -3290,15 +3579,13 @@ static bool hasValueBeenRAUWed(ArrayRef<Value *> VL, ArrayRef<WeakVH> VH, return !std::equal(VL.begin(), VL.end(), VH.begin()); } -bool SLPVectorizer::vectorizeStoreChain(ArrayRef<Value *> Chain, - int CostThreshold, BoUpSLP &R, - unsigned VecRegSize) { +bool SLPVectorizerPass::vectorizeStoreChain(ArrayRef<Value *> Chain, + int CostThreshold, BoUpSLP &R, + unsigned VecRegSize) { unsigned ChainLen = Chain.size(); DEBUG(dbgs() << "SLP: Analyzing a store chain of length " << ChainLen << "\n"); - Type *StoreTy = cast<StoreInst>(Chain[0])->getValueOperand()->getType(); - auto &DL = cast<StoreInst>(Chain[0])->getModule()->getDataLayout(); - unsigned Sz = DL.getTypeSizeInBits(StoreTy); + unsigned Sz = R.getVectorElementSize(Chain[0]); unsigned VF = VecRegSize / Sz; if (!isPowerOf2_32(Sz) || VF < 2) @@ -3322,6 +3609,7 @@ bool SLPVectorizer::vectorizeStoreChain(ArrayRef<Value *> Chain, ArrayRef<Value *> Operands = Chain.slice(i, VF); R.buildTree(Operands); + R.computeMinimumValueSizes(); int Cost = R.getTreeCost(); @@ -3339,8 +3627,8 @@ bool SLPVectorizer::vectorizeStoreChain(ArrayRef<Value *> Chain, return Changed; } -bool SLPVectorizer::vectorizeStores(ArrayRef<StoreInst *> Stores, - int costThreshold, BoUpSLP &R) { +bool SLPVectorizerPass::vectorizeStores(ArrayRef<StoreInst *> Stores, + int costThreshold, BoUpSLP &R) { SetVector<StoreInst *> Heads, Tails; SmallDenseMap<StoreInst *, StoreInst *> ConsecutiveChain; @@ -3353,7 +3641,6 @@ bool SLPVectorizer::vectorizeStores(ArrayRef<StoreInst *> Stores, // all of the pairs of stores that follow each other. SmallVector<unsigned, 16> IndexQueue; for (unsigned i = 0, e = Stores.size(); i < e; ++i) { - const DataLayout &DL = Stores[i]->getModule()->getDataLayout(); IndexQueue.clear(); // If a store has multiple consecutive store candidates, search Stores // array according to the sequence: from i+1 to e, then from i-1 to 0. @@ -3366,7 +3653,7 @@ bool SLPVectorizer::vectorizeStores(ArrayRef<StoreInst *> Stores, IndexQueue.push_back(j - 1); for (auto &k : IndexQueue) { - if (R.isConsecutiveAccess(Stores[i], Stores[k], DL)) { + if (isConsecutiveAccess(Stores[i], Stores[k], *DL, *SE)) { Tails.insert(Stores[k]); Heads.insert(Stores[i]); ConsecutiveChain[Stores[i]] = Stores[k]; @@ -3396,7 +3683,7 @@ bool SLPVectorizer::vectorizeStores(ArrayRef<StoreInst *> Stores, // FIXME: Is division-by-2 the correct step? Should we assert that the // register size is a power-of-2? - for (unsigned Size = MaxVecRegSize; Size >= MinVecRegSize; Size /= 2) { + for (unsigned Size = R.getMaxVecRegSize(); Size >= R.getMinVecRegSize(); Size /= 2) { if (vectorizeStoreChain(Operands, costThreshold, R, Size)) { // Mark the vectorized stores so that we don't vectorize them again. VectorizedStores.insert(Operands.begin(), Operands.end()); @@ -3409,45 +3696,53 @@ bool SLPVectorizer::vectorizeStores(ArrayRef<StoreInst *> Stores, return Changed; } +void SLPVectorizerPass::collectSeedInstructions(BasicBlock *BB) { -unsigned SLPVectorizer::collectStores(BasicBlock *BB, BoUpSLP &R) { - unsigned count = 0; - StoreRefs.clear(); - const DataLayout &DL = BB->getModule()->getDataLayout(); - for (Instruction &I : *BB) { - StoreInst *SI = dyn_cast<StoreInst>(&I); - if (!SI) - continue; - - // Don't touch volatile stores. - if (!SI->isSimple()) - continue; + // Initialize the collections. We will make a single pass over the block. + Stores.clear(); + GEPs.clear(); - // Check that the pointer points to scalars. - Type *Ty = SI->getValueOperand()->getType(); - if (!isValidElementType(Ty)) - continue; + // Visit the store and getelementptr instructions in BB and organize them in + // Stores and GEPs according to the underlying objects of their pointer + // operands. + for (Instruction &I : *BB) { - // Find the base pointer. - Value *Ptr = GetUnderlyingObject(SI->getPointerOperand(), DL); + // Ignore store instructions that are volatile or have a pointer operand + // that doesn't point to a scalar type. + if (auto *SI = dyn_cast<StoreInst>(&I)) { + if (!SI->isSimple()) + continue; + if (!isValidElementType(SI->getValueOperand()->getType())) + continue; + Stores[GetUnderlyingObject(SI->getPointerOperand(), *DL)].push_back(SI); + } - // Save the store locations. - StoreRefs[Ptr].push_back(SI); - count++; + // Ignore getelementptr instructions that have more than one index, a + // constant index, or a pointer operand that doesn't point to a scalar + // type. + else if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) { + auto Idx = GEP->idx_begin()->get(); + if (GEP->getNumIndices() > 1 || isa<Constant>(Idx)) + continue; + if (!isValidElementType(Idx->getType())) + continue; + if (GEP->getType()->isVectorTy()) + continue; + GEPs[GetUnderlyingObject(GEP->getPointerOperand(), *DL)].push_back(GEP); + } } - return count; } -bool SLPVectorizer::tryToVectorizePair(Value *A, Value *B, BoUpSLP &R) { +bool SLPVectorizerPass::tryToVectorizePair(Value *A, Value *B, BoUpSLP &R) { if (!A || !B) return false; Value *VL[] = { A, B }; return tryToVectorizeList(VL, R, None, true); } -bool SLPVectorizer::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R, - ArrayRef<Value *> BuildVector, - bool allowReorder) { +bool SLPVectorizerPass::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R, + ArrayRef<Value *> BuildVector, + bool allowReorder) { if (VL.size() < 2) return false; @@ -3459,13 +3754,11 @@ bool SLPVectorizer::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R, return false; unsigned Opcode0 = I0->getOpcode(); - const DataLayout &DL = I0->getModule()->getDataLayout(); - Type *Ty0 = I0->getType(); - unsigned Sz = DL.getTypeSizeInBits(Ty0); // FIXME: Register size should be a parameter to this function, so we can // try different vectorization factors. - unsigned VF = MinVecRegSize / Sz; + unsigned Sz = R.getVectorElementSize(I0); + unsigned VF = R.getMinVecRegSize() / Sz; for (Value *V : VL) { Type *Ty = V->getType(); @@ -3513,6 +3806,7 @@ bool SLPVectorizer::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R, Value *ReorderedOps[] = { Ops[1], Ops[0] }; R.buildTree(ReorderedOps, None); } + R.computeMinimumValueSizes(); int Cost = R.getTreeCost(); if (Cost < -SLPCostThreshold) { @@ -3529,15 +3823,16 @@ bool SLPVectorizer::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R, Instruction *InsertAfter = cast<Instruction>(BuildVectorSlice.back()); unsigned VecIdx = 0; for (auto &V : BuildVectorSlice) { - IRBuilder<true, NoFolder> Builder( - InsertAfter->getParent(), ++BasicBlock::iterator(InsertAfter)); - InsertElementInst *IE = cast<InsertElementInst>(V); + IRBuilder<NoFolder> Builder(InsertAfter->getParent(), + ++BasicBlock::iterator(InsertAfter)); + Instruction *I = cast<Instruction>(V); + assert(isa<InsertElementInst>(I) || isa<InsertValueInst>(I)); Instruction *Extract = cast<Instruction>(Builder.CreateExtractElement( VectorizedRoot, Builder.getInt32(VecIdx++))); - IE->setOperand(1, Extract); - IE->removeFromParent(); - IE->insertAfter(Extract); - InsertAfter = IE; + I->setOperand(1, Extract); + I->removeFromParent(); + I->insertAfter(Extract); + InsertAfter = I; } } // Move to the next bundle. @@ -3549,7 +3844,7 @@ bool SLPVectorizer::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R, return Changed; } -bool SLPVectorizer::tryToVectorize(BinaryOperator *V, BoUpSLP &R) { +bool SLPVectorizerPass::tryToVectorize(BinaryOperator *V, BoUpSLP &R) { if (!V) return false; @@ -3662,9 +3957,14 @@ public: /// The width of one full horizontal reduction operation. unsigned ReduxWidth; - HorizontalReduction() - : ReductionRoot(nullptr), ReductionPHI(nullptr), ReductionOpcode(0), - ReducedValueOpcode(0), IsPairwiseReduction(false), ReduxWidth(0) {} + /// Minimal width of available vector registers. It's used to determine + /// ReduxWidth. + unsigned MinVecRegSize; + + HorizontalReduction(unsigned MinVecRegSize) + : ReductionRoot(nullptr), ReductionPHI(nullptr), ReductionOpcode(0), + ReducedValueOpcode(0), IsPairwiseReduction(false), ReduxWidth(0), + MinVecRegSize(MinVecRegSize) {} /// \brief Try to find a reduction tree. bool matchAssociativeReduction(PHINode *Phi, BinaryOperator *B) { @@ -3779,6 +4079,7 @@ public: for (; i < NumReducedVals - ReduxWidth + 1; i += ReduxWidth) { V.buildTree(makeArrayRef(&ReducedVals[i], ReduxWidth), ReductionOps); + V.computeMinimumValueSizes(); // Estimate cost. int Cost = V.getTreeCost() + getReductionCost(TTI, ReducedVals[i]); @@ -3928,6 +4229,25 @@ static bool findBuildVector(InsertElementInst *FirstInsertElem, return false; } +/// \brief Like findBuildVector, but looks backwards for construction of aggregate. +/// +/// \return true if it matches. +static bool findBuildAggregate(InsertValueInst *IV, + SmallVectorImpl<Value *> &BuildVector, + SmallVectorImpl<Value *> &BuildVectorOpds) { + if (!IV->hasOneUse()) + return false; + Value *V = IV->getAggregateOperand(); + if (!isa<UndefValue>(V)) { + InsertValueInst *I = dyn_cast<InsertValueInst>(V); + if (!I || !findBuildAggregate(I, BuildVector, BuildVectorOpds)) + return false; + } + BuildVector.push_back(IV); + BuildVectorOpds.push_back(IV->getInsertedValueOperand()); + return true; +} + static bool PhiTypeSorterFunc(Value *V, Value *V2) { return V->getType() < V2->getType(); } @@ -3991,11 +4311,12 @@ static Value *getReductionValue(const DominatorTree *DT, PHINode *P, /// \returns true if a horizontal reduction was matched and reduced. /// \returns false if a horizontal reduction was not matched. static bool canMatchHorizontalReduction(PHINode *P, BinaryOperator *BI, - BoUpSLP &R, TargetTransformInfo *TTI) { + BoUpSLP &R, TargetTransformInfo *TTI, + unsigned MinRegSize) { if (!ShouldVectorizeHor) return false; - HorizontalReduction HorRdx; + HorizontalReduction HorRdx(MinRegSize); if (!HorRdx.matchAssociativeReduction(P, BI)) return false; @@ -4008,7 +4329,7 @@ static bool canMatchHorizontalReduction(PHINode *P, BinaryOperator *BI, return HorRdx.tryToReduce(R, TTI); } -bool SLPVectorizer::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) { +bool SLPVectorizerPass::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) { bool Changed = false; SmallVector<Value *, 4> Incoming; SmallSet<Value *, 16> VisitedInstrs; @@ -4083,7 +4404,7 @@ bool SLPVectorizer::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) { continue; // Try to match and vectorize a horizontal reduction. - if (canMatchHorizontalReduction(P, BI, R, TTI)) { + if (canMatchHorizontalReduction(P, BI, R, TTI, R.getMinVecRegSize())) { Changed = true; it = BB->begin(); e = BB->end(); @@ -4110,7 +4431,8 @@ bool SLPVectorizer::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) { if (StoreInst *SI = dyn_cast<StoreInst>(it)) if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(SI->getValueOperand())) { - if (canMatchHorizontalReduction(nullptr, BinOp, R, TTI) || + if (canMatchHorizontalReduction(nullptr, BinOp, R, TTI, + R.getMinVecRegSize()) || tryToVectorize(BinOp, R)) { Changed = true; it = BB->begin(); @@ -4178,16 +4500,121 @@ bool SLPVectorizer::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) { continue; } + + // Try to vectorize trees that start at insertvalue instructions feeding into + // a store. + if (StoreInst *SI = dyn_cast<StoreInst>(it)) { + if (InsertValueInst *LastInsertValue = dyn_cast<InsertValueInst>(SI->getValueOperand())) { + const DataLayout &DL = BB->getModule()->getDataLayout(); + if (R.canMapToVector(SI->getValueOperand()->getType(), DL)) { + SmallVector<Value *, 16> BuildVector; + SmallVector<Value *, 16> BuildVectorOpds; + if (!findBuildAggregate(LastInsertValue, BuildVector, BuildVectorOpds)) + continue; + + DEBUG(dbgs() << "SLP: store of array mappable to vector: " << *SI << "\n"); + if (tryToVectorizeList(BuildVectorOpds, R, BuildVector, false)) { + Changed = true; + it = BB->begin(); + e = BB->end(); + } + continue; + } + } + } } return Changed; } -bool SLPVectorizer::vectorizeStoreChains(BoUpSLP &R) { +bool SLPVectorizerPass::vectorizeGEPIndices(BasicBlock *BB, BoUpSLP &R) { + auto Changed = false; + for (auto &Entry : GEPs) { + + // If the getelementptr list has fewer than two elements, there's nothing + // to do. + if (Entry.second.size() < 2) + continue; + + DEBUG(dbgs() << "SLP: Analyzing a getelementptr list of length " + << Entry.second.size() << ".\n"); + + // We process the getelementptr list in chunks of 16 (like we do for + // stores) to minimize compile-time. + for (unsigned BI = 0, BE = Entry.second.size(); BI < BE; BI += 16) { + auto Len = std::min<unsigned>(BE - BI, 16); + auto GEPList = makeArrayRef(&Entry.second[BI], Len); + + // Initialize a set a candidate getelementptrs. Note that we use a + // SetVector here to preserve program order. If the index computations + // are vectorizable and begin with loads, we want to minimize the chance + // of having to reorder them later. + SetVector<Value *> Candidates(GEPList.begin(), GEPList.end()); + + // Some of the candidates may have already been vectorized after we + // initially collected them. If so, the WeakVHs will have nullified the + // values, so remove them from the set of candidates. + Candidates.remove(nullptr); + + // Remove from the set of candidates all pairs of getelementptrs with + // constant differences. Such getelementptrs are likely not good + // candidates for vectorization in a bottom-up phase since one can be + // computed from the other. We also ensure all candidate getelementptr + // indices are unique. + for (int I = 0, E = GEPList.size(); I < E && Candidates.size() > 1; ++I) { + auto *GEPI = cast<GetElementPtrInst>(GEPList[I]); + if (!Candidates.count(GEPI)) + continue; + auto *SCEVI = SE->getSCEV(GEPList[I]); + for (int J = I + 1; J < E && Candidates.size() > 1; ++J) { + auto *GEPJ = cast<GetElementPtrInst>(GEPList[J]); + auto *SCEVJ = SE->getSCEV(GEPList[J]); + if (isa<SCEVConstant>(SE->getMinusSCEV(SCEVI, SCEVJ))) { + Candidates.remove(GEPList[I]); + Candidates.remove(GEPList[J]); + } else if (GEPI->idx_begin()->get() == GEPJ->idx_begin()->get()) { + Candidates.remove(GEPList[J]); + } + } + } + + // We break out of the above computation as soon as we know there are + // fewer than two candidates remaining. + if (Candidates.size() < 2) + continue; + + // Add the single, non-constant index of each candidate to the bundle. We + // ensured the indices met these constraints when we originally collected + // the getelementptrs. + SmallVector<Value *, 16> Bundle(Candidates.size()); + auto BundleIndex = 0u; + for (auto *V : Candidates) { + auto *GEP = cast<GetElementPtrInst>(V); + auto *GEPIdx = GEP->idx_begin()->get(); + assert(GEP->getNumIndices() == 1 || !isa<Constant>(GEPIdx)); + Bundle[BundleIndex++] = GEPIdx; + } + + // Try and vectorize the indices. We are currently only interested in + // gather-like cases of the form: + // + // ... = g[a[0] - b[0]] + g[a[1] - b[1]] + ... + // + // where the loads of "a", the loads of "b", and the subtractions can be + // performed in parallel. It's likely that detecting this pattern in a + // bottom-up phase will be simpler and less costly than building a + // full-blown top-down phase beginning at the consecutive loads. + Changed |= tryToVectorizeList(Bundle, R); + } + } + return Changed; +} + +bool SLPVectorizerPass::vectorizeStoreChains(BoUpSLP &R) { bool Changed = false; // Attempt to sort and vectorize each of the store-groups. - for (StoreListMap::iterator it = StoreRefs.begin(), e = StoreRefs.end(); - it != e; ++it) { + for (StoreListMap::iterator it = Stores.begin(), e = Stores.end(); it != e; + ++it) { if (it->second.size() < 2) continue; @@ -4207,8 +4634,6 @@ bool SLPVectorizer::vectorizeStoreChains(BoUpSLP &R) { return Changed; } -} // end anonymous namespace - char SLPVectorizer::ID = 0; static const char lv_name[] = "SLP Vectorizer"; INITIALIZE_PASS_BEGIN(SLPVectorizer, SV_NAME, lv_name, false, false) @@ -4217,6 +4642,7 @@ INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) INITIALIZE_PASS_DEPENDENCY(LoopSimplify) +INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) INITIALIZE_PASS_END(SLPVectorizer, SV_NAME, lv_name, false, false) namespace llvm { diff --git a/lib/Transforms/Vectorize/Vectorize.cpp b/lib/Transforms/Vectorize/Vectorize.cpp index 6e002fd5d5dbc..28e0b2eb98666 100644 --- a/lib/Transforms/Vectorize/Vectorize.cpp +++ b/lib/Transforms/Vectorize/Vectorize.cpp @@ -29,6 +29,7 @@ void llvm::initializeVectorization(PassRegistry &Registry) { initializeBBVectorizePass(Registry); initializeLoopVectorizePass(Registry); initializeSLPVectorizerPass(Registry); + initializeLoadStoreVectorizerPass(Registry); } void LLVMInitializeVectorization(LLVMPassRegistryRef R) { |