diff options
author | Dimitry Andric <dim@FreeBSD.org> | 2018-02-24 21:27:30 +0000 |
---|---|---|
committer | Dimitry Andric <dim@FreeBSD.org> | 2018-02-24 21:27:30 +0000 |
commit | 0f8e52dfc671bf6e2c09c8a28062ec76237954ea (patch) | |
tree | 03012a05e4c16a3dd809c281777acd1d9fe4a127 /lib | |
parent | 3c315f3a8e8f326948fc789f146794ecd33cc540 (diff) |
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Analysis/ScalarEvolution.cpp | 7 | ||||
-rw-r--r-- | lib/Bitcode/Reader/BitcodeReader.cpp | 16 | ||||
-rw-r--r-- | lib/Bitcode/Writer/BitcodeWriter.cpp | 16 | ||||
-rw-r--r-- | lib/Support/CMakeLists.txt | 3 | ||||
-rw-r--r-- | lib/Target/AArch64/AArch64InstructionSelector.cpp | 113 | ||||
-rw-r--r-- | lib/Target/AMDGPU/SIInstrInfo.cpp | 10 | ||||
-rw-r--r-- | lib/Target/PowerPC/PPCCTRLoops.cpp | 5 | ||||
-rw-r--r-- | lib/Target/X86/X86.td | 8 | ||||
-rw-r--r-- | lib/Transforms/InstCombine/InstCombineSelect.cpp | 19 | ||||
-rw-r--r-- | lib/Transforms/Scalar/LICM.cpp | 31 | ||||
-rw-r--r-- | lib/Transforms/Utils/LoopUtils.cpp | 216 | ||||
-rw-r--r-- | lib/Transforms/Vectorize/LoopVectorize.cpp | 18 |
12 files changed, 330 insertions, 132 deletions
diff --git a/lib/Analysis/ScalarEvolution.cpp b/lib/Analysis/ScalarEvolution.cpp index 10b5c74e378b4..bfff7afb5b4e3 100644 --- a/lib/Analysis/ScalarEvolution.cpp +++ b/lib/Analysis/ScalarEvolution.cpp @@ -205,6 +205,11 @@ static cl::opt<unsigned> cl::desc("Max coefficients in AddRec during evolving"), cl::init(16)); +static cl::opt<bool> VersionUnknown( + "scev-version-unknown", cl::Hidden, + cl::desc("Use predicated scalar evolution to version SCEVUnknowns"), + cl::init(false)); + //===----------------------------------------------------------------------===// // SCEV class definitions //===----------------------------------------------------------------------===// @@ -11467,6 +11472,8 @@ private: // couldn't create an AddRec for it, or couldn't add the predicate), we just // return \p Expr. const SCEV *convertToAddRecWithPreds(const SCEVUnknown *Expr) { + if (!VersionUnknown) + return Expr; if (!isa<PHINode>(Expr->getValue())) return Expr; Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> diff --git a/lib/Bitcode/Reader/BitcodeReader.cpp b/lib/Bitcode/Reader/BitcodeReader.cpp index 95291a1caf9a5..945ac45153686 100644 --- a/lib/Bitcode/Reader/BitcodeReader.cpp +++ b/lib/Bitcode/Reader/BitcodeReader.cpp @@ -1046,19 +1046,21 @@ static Comdat::SelectionKind getDecodedComdatSelectionKind(unsigned Val) { static FastMathFlags getDecodedFastMathFlags(unsigned Val) { FastMathFlags FMF; - if (0 != (Val & FastMathFlags::AllowReassoc)) + if (0 != (Val & bitc::UnsafeAlgebra)) + FMF.setFast(); + if (0 != (Val & bitc::AllowReassoc)) FMF.setAllowReassoc(); - if (0 != (Val & FastMathFlags::NoNaNs)) + if (0 != (Val & bitc::NoNaNs)) FMF.setNoNaNs(); - if (0 != (Val & FastMathFlags::NoInfs)) + if (0 != (Val & bitc::NoInfs)) FMF.setNoInfs(); - if (0 != (Val & FastMathFlags::NoSignedZeros)) + if (0 != (Val & bitc::NoSignedZeros)) FMF.setNoSignedZeros(); - if (0 != (Val & FastMathFlags::AllowReciprocal)) + if (0 != (Val & bitc::AllowReciprocal)) FMF.setAllowReciprocal(); - if (0 != (Val & FastMathFlags::AllowContract)) + if (0 != (Val & bitc::AllowContract)) FMF.setAllowContract(true); - if (0 != (Val & FastMathFlags::ApproxFunc)) + if (0 != (Val & bitc::ApproxFunc)) FMF.setApproxFunc(); return FMF; } diff --git a/lib/Bitcode/Writer/BitcodeWriter.cpp b/lib/Bitcode/Writer/BitcodeWriter.cpp index a7201ed973500..7bf37857eb972 100644 --- a/lib/Bitcode/Writer/BitcodeWriter.cpp +++ b/lib/Bitcode/Writer/BitcodeWriter.cpp @@ -1330,19 +1330,19 @@ static uint64_t getOptimizationFlags(const Value *V) { Flags |= 1 << bitc::PEO_EXACT; } else if (const auto *FPMO = dyn_cast<FPMathOperator>(V)) { if (FPMO->hasAllowReassoc()) - Flags |= FastMathFlags::AllowReassoc; + Flags |= bitc::AllowReassoc; if (FPMO->hasNoNaNs()) - Flags |= FastMathFlags::NoNaNs; + Flags |= bitc::NoNaNs; if (FPMO->hasNoInfs()) - Flags |= FastMathFlags::NoInfs; + Flags |= bitc::NoInfs; if (FPMO->hasNoSignedZeros()) - Flags |= FastMathFlags::NoSignedZeros; + Flags |= bitc::NoSignedZeros; if (FPMO->hasAllowReciprocal()) - Flags |= FastMathFlags::AllowReciprocal; + Flags |= bitc::AllowReciprocal; if (FPMO->hasAllowContract()) - Flags |= FastMathFlags::AllowContract; + Flags |= bitc::AllowContract; if (FPMO->hasApproxFunc()) - Flags |= FastMathFlags::ApproxFunc; + Flags |= bitc::ApproxFunc; } return Flags; @@ -3183,7 +3183,7 @@ void ModuleBitcodeWriter::writeBlockInfo() { Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // LHS Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // RHS Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 4)); // opc - Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 7)); // flags + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 8)); // flags if (Stream.EmitBlockInfoAbbrev(bitc::FUNCTION_BLOCK_ID, Abbv) != FUNCTION_INST_BINOP_FLAGS_ABBREV) llvm_unreachable("Unexpected abbrev ordering!"); diff --git a/lib/Support/CMakeLists.txt b/lib/Support/CMakeLists.txt index 5723f8fcf5bb3..d968688911eb9 100644 --- a/lib/Support/CMakeLists.txt +++ b/lib/Support/CMakeLists.txt @@ -4,7 +4,8 @@ if ( LLVM_ENABLE_ZLIB AND HAVE_LIBZ ) endif() if( MSVC OR MINGW ) # libuuid required for FOLDERID_Profile usage in lib/Support/Windows/Path.inc. - set(system_libs ${system_libs} psapi shell32 ole32 uuid) + # advapi32 required for CryptAcquireContextW in lib/Support/Windows/Path.inc. + set(system_libs ${system_libs} psapi shell32 ole32 uuid advapi32) elseif( CMAKE_HOST_UNIX ) if( HAVE_LIBRT ) set(system_libs ${system_libs} rt) diff --git a/lib/Target/AArch64/AArch64InstructionSelector.cpp b/lib/Target/AArch64/AArch64InstructionSelector.cpp index 2bb9e381073af..0bc5b395499e6 100644 --- a/lib/Target/AArch64/AArch64InstructionSelector.cpp +++ b/lib/Target/AArch64/AArch64InstructionSelector.cpp @@ -133,16 +133,21 @@ AArch64InstructionSelector::AArch64InstructionSelector( // for each class in the bank. static const TargetRegisterClass * getRegClassForTypeOnBank(LLT Ty, const RegisterBank &RB, - const RegisterBankInfo &RBI) { + const RegisterBankInfo &RBI, + bool GetAllRegSet = false) { if (RB.getID() == AArch64::GPRRegBankID) { if (Ty.getSizeInBits() <= 32) - return &AArch64::GPR32RegClass; + return GetAllRegSet ? &AArch64::GPR32allRegClass + : &AArch64::GPR32RegClass; if (Ty.getSizeInBits() == 64) - return &AArch64::GPR64RegClass; + return GetAllRegSet ? &AArch64::GPR64allRegClass + : &AArch64::GPR64RegClass; return nullptr; } if (RB.getID() == AArch64::FPRRegBankID) { + if (Ty.getSizeInBits() <= 16) + return &AArch64::FPR16RegClass; if (Ty.getSizeInBits() == 32) return &AArch64::FPR32RegClass; if (Ty.getSizeInBits() == 64) @@ -310,19 +315,46 @@ static unsigned selectLoadStoreUIOp(unsigned GenericOpc, unsigned RegBankID, return GenericOpc; } +static bool selectFP16CopyFromGPR32(MachineInstr &I, const TargetInstrInfo &TII, + MachineRegisterInfo &MRI, unsigned SrcReg) { + // Copies from gpr32 to fpr16 need to use a sub-register copy. + unsigned CopyReg = MRI.createVirtualRegister(&AArch64::FPR32RegClass); + BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AArch64::COPY)) + .addDef(CopyReg) + .addUse(SrcReg); + unsigned SubRegCopy = MRI.createVirtualRegister(&AArch64::FPR16RegClass); + BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(TargetOpcode::COPY)) + .addDef(SubRegCopy) + .addUse(CopyReg, 0, AArch64::hsub); + + MachineOperand &RegOp = I.getOperand(1); + RegOp.setReg(SubRegCopy); + return true; +} + static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) { unsigned DstReg = I.getOperand(0).getReg(); + unsigned SrcReg = I.getOperand(1).getReg(); + if (TargetRegisterInfo::isPhysicalRegister(DstReg)) { + if (TRI.getRegClass(AArch64::FPR16RegClassID)->contains(DstReg) && + !TargetRegisterInfo::isPhysicalRegister(SrcReg)) { + const RegisterBank &RegBank = *RBI.getRegBank(SrcReg, MRI, TRI); + const TargetRegisterClass *SrcRC = getRegClassForTypeOnBank( + MRI.getType(SrcReg), RegBank, RBI, /* GetAllRegSet */ true); + if (SrcRC == &AArch64::GPR32allRegClass) + return selectFP16CopyFromGPR32(I, TII, MRI, SrcReg); + } assert(I.isCopy() && "Generic operators do not allow physical registers"); return true; } const RegisterBank &RegBank = *RBI.getRegBank(DstReg, MRI, TRI); const unsigned DstSize = MRI.getType(DstReg).getSizeInBits(); - unsigned SrcReg = I.getOperand(1).getReg(); + (void)DstSize; const unsigned SrcSize = RBI.getSizeInBits(SrcReg, MRI, TRI); (void)SrcSize; assert((!TargetRegisterInfo::isPhysicalRegister(SrcReg) || I.isCopy()) && @@ -340,26 +372,38 @@ static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII, "Copy with different width?!"); assert((DstSize <= 64 || RegBank.getID() == AArch64::FPRRegBankID) && "GPRs cannot get more than 64-bit width values"); - const TargetRegisterClass *RC = nullptr; - - if (RegBank.getID() == AArch64::FPRRegBankID) { - if (DstSize <= 16) - RC = &AArch64::FPR16RegClass; - else if (DstSize <= 32) - RC = &AArch64::FPR32RegClass; - else if (DstSize <= 64) - RC = &AArch64::FPR64RegClass; - else if (DstSize <= 128) - RC = &AArch64::FPR128RegClass; - else { - DEBUG(dbgs() << "Unexpected bitcast size " << DstSize << '\n'); - return false; + + const TargetRegisterClass *RC = getRegClassForTypeOnBank( + MRI.getType(DstReg), RegBank, RBI, /* GetAllRegSet */ true); + if (!RC) { + DEBUG(dbgs() << "Unexpected bitcast size " << DstSize << '\n'); + return false; + } + + if (!TargetRegisterInfo::isPhysicalRegister(SrcReg)) { + const RegClassOrRegBank &RegClassOrBank = MRI.getRegClassOrRegBank(SrcReg); + const TargetRegisterClass *SrcRC = + RegClassOrBank.dyn_cast<const TargetRegisterClass *>(); + const RegisterBank *RB = nullptr; + if (!SrcRC) { + RB = RegClassOrBank.get<const RegisterBank *>(); + SrcRC = getRegClassForTypeOnBank(MRI.getType(SrcReg), *RB, RBI, true); + } + // Copies from fpr16 to gpr32 need to use SUBREG_TO_REG. + if (RC == &AArch64::GPR32allRegClass && SrcRC == &AArch64::FPR16RegClass) { + unsigned PromoteReg = MRI.createVirtualRegister(&AArch64::FPR32RegClass); + BuildMI(*I.getParent(), I, I.getDebugLoc(), + TII.get(AArch64::SUBREG_TO_REG)) + .addDef(PromoteReg) + .addImm(0) + .addUse(SrcReg) + .addImm(AArch64::hsub); + MachineOperand &RegOp = I.getOperand(1); + RegOp.setReg(PromoteReg); + } else if (RC == &AArch64::FPR16RegClass && + SrcRC == &AArch64::GPR32allRegClass) { + selectFP16CopyFromGPR32(I, TII, MRI, SrcReg); } - } else { - assert(RegBank.getID() == AArch64::GPRRegBankID && - "Bitcast for the flags?"); - RC = - DstSize <= 32 ? &AArch64::GPR32allRegClass : &AArch64::GPR64allRegClass; } // No need to constrain SrcReg. It will get constrained when @@ -795,15 +839,23 @@ bool AArch64InstructionSelector::select(MachineInstr &I, } case TargetOpcode::G_EXTRACT: { LLT SrcTy = MRI.getType(I.getOperand(1).getReg()); + LLT DstTy = MRI.getType(I.getOperand(0).getReg()); + unsigned SrcSize = SrcTy.getSizeInBits(); // Larger extracts are vectors, same-size extracts should be something else // by now (either split up or simplified to a COPY). if (SrcTy.getSizeInBits() > 64 || Ty.getSizeInBits() > 32) return false; - I.setDesc(TII.get(AArch64::UBFMXri)); + I.setDesc(TII.get(SrcSize == 64 ? AArch64::UBFMXri : AArch64::UBFMWri)); MachineInstrBuilder(MF, I).addImm(I.getOperand(2).getImm() + Ty.getSizeInBits() - 1); + if (SrcSize < 64) { + assert(SrcSize == 32 && DstTy.getSizeInBits() == 16 && + "unexpected G_EXTRACT types"); + return constrainSelectedInstRegOperands(I, TII, TRI, RBI); + } + unsigned DstReg = MRI.createGenericVirtualRegister(LLT::scalar(64)); BuildMI(MBB, std::next(I.getIterator()), I.getDebugLoc(), TII.get(AArch64::COPY)) @@ -818,17 +870,26 @@ bool AArch64InstructionSelector::select(MachineInstr &I, case TargetOpcode::G_INSERT: { LLT SrcTy = MRI.getType(I.getOperand(2).getReg()); + LLT DstTy = MRI.getType(I.getOperand(0).getReg()); + unsigned DstSize = DstTy.getSizeInBits(); + (void)DstSize; // Larger inserts are vectors, same-size ones should be something else by // now (split up or turned into COPYs). if (Ty.getSizeInBits() > 64 || SrcTy.getSizeInBits() > 32) return false; - I.setDesc(TII.get(AArch64::BFMXri)); + I.setDesc(TII.get(DstSize == 64 ? AArch64::BFMXri : AArch64::BFMWri)); unsigned LSB = I.getOperand(3).getImm(); unsigned Width = MRI.getType(I.getOperand(2).getReg()).getSizeInBits(); - I.getOperand(3).setImm((64 - LSB) % 64); + I.getOperand(3).setImm((DstSize - LSB) % DstSize); MachineInstrBuilder(MF, I).addImm(Width - 1); + if (DstSize < 64) { + assert(DstSize == 32 && SrcTy.getSizeInBits() == 16 && + "unexpected G_INSERT types"); + return constrainSelectedInstRegOperands(I, TII, TRI, RBI); + } + unsigned SrcReg = MRI.createGenericVirtualRegister(LLT::scalar(64)); BuildMI(MBB, I.getIterator(), I.getDebugLoc(), TII.get(AArch64::SUBREG_TO_REG)) diff --git a/lib/Target/AMDGPU/SIInstrInfo.cpp b/lib/Target/AMDGPU/SIInstrInfo.cpp index 2c127d787260a..654b96f792b11 100644 --- a/lib/Target/AMDGPU/SIInstrInfo.cpp +++ b/lib/Target/AMDGPU/SIInstrInfo.cpp @@ -3797,7 +3797,8 @@ void SIInstrInfo::moveToVALU(MachineInstr &TopInst) const { } } - BuildMI(*MBB, Inst, Inst.getDebugLoc(), + MachineInstr *NewInstr = + BuildMI(*MBB, Inst, Inst.getDebugLoc(), get(AMDGPU::BUFFER_LOAD_DWORD_OFFEN), VDst) .add(*VAddr) // vaddr .add(*getNamedOperand(Inst, AMDGPU::OpName::sbase)) // srsrc @@ -3806,12 +3807,17 @@ void SIInstrInfo::moveToVALU(MachineInstr &TopInst) const { .addImm(getNamedOperand(Inst, AMDGPU::OpName::glc)->getImm()) .addImm(0) // slc .addImm(0) // tfe - .setMemRefs(Inst.memoperands_begin(), Inst.memoperands_end()); + .setMemRefs(Inst.memoperands_begin(), Inst.memoperands_end()) + .getInstr(); MRI.replaceRegWith(getNamedOperand(Inst, AMDGPU::OpName::sdst)->getReg(), VDst); addUsersToMoveToVALUWorklist(VDst, MRI, Worklist); Inst.eraseFromParent(); + + // Legalize all operands other than the offset. Notably, convert the srsrc + // into SGPRs using v_readfirstlane if needed. + legalizeOperands(*NewInstr); continue; } } diff --git a/lib/Target/PowerPC/PPCCTRLoops.cpp b/lib/Target/PowerPC/PPCCTRLoops.cpp index fc638829378ab..1d10ef9acfbae 100644 --- a/lib/Target/PowerPC/PPCCTRLoops.cpp +++ b/lib/Target/PowerPC/PPCCTRLoops.cpp @@ -454,13 +454,16 @@ bool PPCCTRLoops::mightUseCTR(BasicBlock *BB) { return true; } + // FREM is always a call. + if (J->getOpcode() == Instruction::FRem) + return true; + if (STI->useSoftFloat()) { switch(J->getOpcode()) { case Instruction::FAdd: case Instruction::FSub: case Instruction::FMul: case Instruction::FDiv: - case Instruction::FRem: case Instruction::FPTrunc: case Instruction::FPExt: case Instruction::FPToUI: diff --git a/lib/Target/X86/X86.td b/lib/Target/X86/X86.td index ba97982e3330e..cc4c8823c3da7 100644 --- a/lib/Target/X86/X86.td +++ b/lib/Target/X86/X86.td @@ -740,7 +740,13 @@ class SkylakeServerProc<string Name> : ProcModel<Name, SkylakeServerModel, def : SkylakeServerProc<"skylake-avx512">; def : SkylakeServerProc<"skx">; // Legacy alias. -def CNLFeatures : ProcessorFeatures<SKXFeatures.Value, [ +def CNLFeatures : ProcessorFeatures<SKLFeatures.Value, [ + FeatureAVX512, + FeatureCDI, + FeatureDQI, + FeatureBWI, + FeatureVLX, + FeaturePKU, FeatureVBMI, FeatureIFMA, FeatureSHA diff --git a/lib/Transforms/InstCombine/InstCombineSelect.cpp b/lib/Transforms/InstCombine/InstCombineSelect.cpp index 6f26f7f5cd19b..c790de3505f32 100644 --- a/lib/Transforms/InstCombine/InstCombineSelect.cpp +++ b/lib/Transforms/InstCombine/InstCombineSelect.cpp @@ -1643,11 +1643,25 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) { } } + auto canMergeSelectThroughBinop = [](BinaryOperator *BO) { + // The select might be preventing a division by 0. + switch (BO->getOpcode()) { + default: + return true; + case Instruction::SRem: + case Instruction::URem: + case Instruction::SDiv: + case Instruction::UDiv: + return false; + } + }; + // Try to simplify a binop sandwiched between 2 selects with the same // condition. // select(C, binop(select(C, X, Y), W), Z) -> select(C, binop(X, W), Z) BinaryOperator *TrueBO; - if (match(TrueVal, m_OneUse(m_BinOp(TrueBO)))) { + if (match(TrueVal, m_OneUse(m_BinOp(TrueBO))) && + canMergeSelectThroughBinop(TrueBO)) { if (auto *TrueBOSI = dyn_cast<SelectInst>(TrueBO->getOperand(0))) { if (TrueBOSI->getCondition() == CondVal) { TrueBO->setOperand(0, TrueBOSI->getTrueValue()); @@ -1666,7 +1680,8 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) { // select(C, Z, binop(select(C, X, Y), W)) -> select(C, Z, binop(Y, W)) BinaryOperator *FalseBO; - if (match(FalseVal, m_OneUse(m_BinOp(FalseBO)))) { + if (match(FalseVal, m_OneUse(m_BinOp(FalseBO))) && + canMergeSelectThroughBinop(FalseBO)) { if (auto *FalseBOSI = dyn_cast<SelectInst>(FalseBO->getOperand(0))) { if (FalseBOSI->getCondition() == CondVal) { FalseBO->setOperand(0, FalseBOSI->getFalseValue()); diff --git a/lib/Transforms/Scalar/LICM.cpp b/lib/Transforms/Scalar/LICM.cpp index 4ea935793b802..946474fef0625 100644 --- a/lib/Transforms/Scalar/LICM.cpp +++ b/lib/Transforms/Scalar/LICM.cpp @@ -97,7 +97,7 @@ static bool hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop, const LoopSafetyInfo *SafetyInfo, OptimizationRemarkEmitter *ORE); static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT, - const Loop *CurLoop, const LoopSafetyInfo *SafetyInfo, + const Loop *CurLoop, LoopSafetyInfo *SafetyInfo, OptimizationRemarkEmitter *ORE, bool FreeInLoop); static bool isSafeToExecuteUnconditionally(Instruction &Inst, const DominatorTree *DT, @@ -855,10 +855,16 @@ static Instruction *sinkThroughTriviallyReplacablePHI( return New; } -static bool canSplitPredecessors(PHINode *PN) { +static bool canSplitPredecessors(PHINode *PN, LoopSafetyInfo *SafetyInfo) { BasicBlock *BB = PN->getParent(); if (!BB->canSplitPredecessors()) return false; + // It's not impossible to split EHPad blocks, but if BlockColors already exist + // it require updating BlockColors for all offspring blocks accordingly. By + // skipping such corner case, we can make updating BlockColors after splitting + // predecessor fairly simple. + if (!SafetyInfo->BlockColors.empty() && BB->getFirstNonPHI()->isEHPad()) + return false; for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) { BasicBlock *BBPred = *PI; if (isa<IndirectBrInst>(BBPred->getTerminator())) @@ -868,7 +874,8 @@ static bool canSplitPredecessors(PHINode *PN) { } static void splitPredecessorsOfLoopExit(PHINode *PN, DominatorTree *DT, - LoopInfo *LI, const Loop *CurLoop) { + LoopInfo *LI, const Loop *CurLoop, + LoopSafetyInfo *SafetyInfo) { #ifndef NDEBUG SmallVector<BasicBlock *, 32> ExitBlocks; CurLoop->getUniqueExitBlocks(ExitBlocks); @@ -910,13 +917,21 @@ static void splitPredecessorsOfLoopExit(PHINode *PN, DominatorTree *DT, // LE: // %p = phi [%p1, %LE.split], [%p2, %LE.split2] // + auto &BlockColors = SafetyInfo->BlockColors; SmallSetVector<BasicBlock *, 8> PredBBs(pred_begin(ExitBB), pred_end(ExitBB)); while (!PredBBs.empty()) { BasicBlock *PredBB = *PredBBs.begin(); assert(CurLoop->contains(PredBB) && "Expect all predecessors are in the loop"); - if (PN->getBasicBlockIndex(PredBB) >= 0) - SplitBlockPredecessors(ExitBB, PredBB, ".split.loop.exit", DT, LI, true); + if (PN->getBasicBlockIndex(PredBB) >= 0) { + BasicBlock *NewPred = SplitBlockPredecessors( + ExitBB, PredBB, ".split.loop.exit", DT, LI, true); + // Since we do not allow splitting EH-block with BlockColors in + // canSplitPredecessors(), we can simply assign predecessor's color to + // the new block. + if (!BlockColors.empty()) + BlockColors[NewPred] = BlockColors[PredBB]; + } PredBBs.remove(PredBB); } } @@ -927,7 +942,7 @@ static void splitPredecessorsOfLoopExit(PHINode *PN, DominatorTree *DT, /// position, and may either delete it or move it to outside of the loop. /// static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT, - const Loop *CurLoop, const LoopSafetyInfo *SafetyInfo, + const Loop *CurLoop, LoopSafetyInfo *SafetyInfo, OptimizationRemarkEmitter *ORE, bool FreeInLoop) { DEBUG(dbgs() << "LICM sinking instruction: " << I << "\n"); ORE->emit([&]() { @@ -975,12 +990,12 @@ static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT, if (isTriviallyReplacablePHI(*PN, I)) continue; - if (!canSplitPredecessors(PN)) + if (!canSplitPredecessors(PN, SafetyInfo)) return Changed; // Split predecessors of the PHI so that we can make users trivially // replacable. - splitPredecessorsOfLoopExit(PN, DT, LI, CurLoop); + splitPredecessorsOfLoopExit(PN, DT, LI, CurLoop, SafetyInfo); // Should rebuild the iterators, as they may be invalidated by // splitPredecessorsOfLoopExit(). diff --git a/lib/Transforms/Utils/LoopUtils.cpp b/lib/Transforms/Utils/LoopUtils.cpp index a5a305ef582bb..0a357f4b50044 100644 --- a/lib/Transforms/Utils/LoopUtils.cpp +++ b/lib/Transforms/Utils/LoopUtils.cpp @@ -23,6 +23,7 @@ #include "llvm/Analysis/ScalarEvolutionExpander.h" #include "llvm/Analysis/ScalarEvolutionExpressions.h" #include "llvm/Analysis/TargetTransformInfo.h" +#include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Module.h" @@ -30,6 +31,7 @@ #include "llvm/IR/ValueHandle.h" #include "llvm/Pass.h" #include "llvm/Support/Debug.h" +#include "llvm/Support/KnownBits.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" using namespace llvm; @@ -77,10 +79,13 @@ bool RecurrenceDescriptor::isArithmeticRecurrenceKind(RecurrenceKind Kind) { return false; } -Instruction * -RecurrenceDescriptor::lookThroughAnd(PHINode *Phi, Type *&RT, - SmallPtrSetImpl<Instruction *> &Visited, - SmallPtrSetImpl<Instruction *> &CI) { +/// Determines if Phi may have been type-promoted. If Phi has a single user +/// that ANDs the Phi with a type mask, return the user. RT is updated to +/// account for the narrower bit width represented by the mask, and the AND +/// instruction is added to CI. +static Instruction *lookThroughAnd(PHINode *Phi, Type *&RT, + SmallPtrSetImpl<Instruction *> &Visited, + SmallPtrSetImpl<Instruction *> &CI) { if (!Phi->hasOneUse()) return Phi; @@ -101,70 +106,92 @@ RecurrenceDescriptor::lookThroughAnd(PHINode *Phi, Type *&RT, return Phi; } -bool RecurrenceDescriptor::getSourceExtensionKind( - Instruction *Start, Instruction *Exit, Type *RT, bool &IsSigned, - SmallPtrSetImpl<Instruction *> &Visited, - SmallPtrSetImpl<Instruction *> &CI) { +/// Compute the minimal bit width needed to represent a reduction whose exit +/// instruction is given by Exit. +static std::pair<Type *, bool> computeRecurrenceType(Instruction *Exit, + DemandedBits *DB, + AssumptionCache *AC, + DominatorTree *DT) { + bool IsSigned = false; + const DataLayout &DL = Exit->getModule()->getDataLayout(); + uint64_t MaxBitWidth = DL.getTypeSizeInBits(Exit->getType()); + + if (DB) { + // Use the demanded bits analysis to determine the bits that are live out + // of the exit instruction, rounding up to the nearest power of two. If the + // use of demanded bits results in a smaller bit width, we know the value + // must be positive (i.e., IsSigned = false), because if this were not the + // case, the sign bit would have been demanded. + auto Mask = DB->getDemandedBits(Exit); + MaxBitWidth = Mask.getBitWidth() - Mask.countLeadingZeros(); + } + + if (MaxBitWidth == DL.getTypeSizeInBits(Exit->getType()) && AC && DT) { + // If demanded bits wasn't able to limit the bit width, we can try to use + // value tracking instead. This can be the case, for example, if the value + // may be negative. + auto NumSignBits = ComputeNumSignBits(Exit, DL, 0, AC, nullptr, DT); + auto NumTypeBits = DL.getTypeSizeInBits(Exit->getType()); + MaxBitWidth = NumTypeBits - NumSignBits; + KnownBits Bits = computeKnownBits(Exit, DL); + if (!Bits.isNonNegative()) { + // If the value is not known to be non-negative, we set IsSigned to true, + // meaning that we will use sext instructions instead of zext + // instructions to restore the original type. + IsSigned = true; + if (!Bits.isNegative()) + // If the value is not known to be negative, we don't known what the + // upper bit is, and therefore, we don't know what kind of extend we + // will need. In this case, just increase the bit width by one bit and + // use sext. + ++MaxBitWidth; + } + } + if (!isPowerOf2_64(MaxBitWidth)) + MaxBitWidth = NextPowerOf2(MaxBitWidth); + + return std::make_pair(Type::getIntNTy(Exit->getContext(), MaxBitWidth), + IsSigned); +} + +/// Collect cast instructions that can be ignored in the vectorizer's cost +/// model, given a reduction exit value and the minimal type in which the +/// reduction can be represented. +static void collectCastsToIgnore(Loop *TheLoop, Instruction *Exit, + Type *RecurrenceType, + SmallPtrSetImpl<Instruction *> &Casts) { SmallVector<Instruction *, 8> Worklist; - bool FoundOneOperand = false; - unsigned DstSize = RT->getPrimitiveSizeInBits(); + SmallPtrSet<Instruction *, 8> Visited; Worklist.push_back(Exit); - // Traverse the instructions in the reduction expression, beginning with the - // exit value. while (!Worklist.empty()) { - Instruction *I = Worklist.pop_back_val(); - for (Use &U : I->operands()) { - - // Terminate the traversal if the operand is not an instruction, or we - // reach the starting value. - Instruction *J = dyn_cast<Instruction>(U.get()); - if (!J || J == Start) - continue; - - // Otherwise, investigate the operation if it is also in the expression. - if (Visited.count(J)) { - Worklist.push_back(J); + Instruction *Val = Worklist.pop_back_val(); + Visited.insert(Val); + if (auto *Cast = dyn_cast<CastInst>(Val)) + if (Cast->getSrcTy() == RecurrenceType) { + // If the source type of a cast instruction is equal to the recurrence + // type, it will be eliminated, and should be ignored in the vectorizer + // cost model. + Casts.insert(Cast); continue; } - // If the operand is not in Visited, it is not a reduction operation, but - // it does feed into one. Make sure it is either a single-use sign- or - // zero-extend instruction. - CastInst *Cast = dyn_cast<CastInst>(J); - bool IsSExtInst = isa<SExtInst>(J); - if (!Cast || !Cast->hasOneUse() || !(isa<ZExtInst>(J) || IsSExtInst)) - return false; - - // Ensure the source type of the extend is no larger than the reduction - // type. It is not necessary for the types to be identical. - unsigned SrcSize = Cast->getSrcTy()->getPrimitiveSizeInBits(); - if (SrcSize > DstSize) - return false; - - // Furthermore, ensure that all such extends are of the same kind. - if (FoundOneOperand) { - if (IsSigned != IsSExtInst) - return false; - } else { - FoundOneOperand = true; - IsSigned = IsSExtInst; - } - - // Lastly, if the source type of the extend matches the reduction type, - // add the extend to CI so that we can avoid accounting for it in the - // cost model. - if (SrcSize == DstSize) - CI.insert(Cast); - } + // Add all operands to the work list if they are loop-varying values that + // we haven't yet visited. + for (Value *O : cast<User>(Val)->operands()) + if (auto *I = dyn_cast<Instruction>(O)) + if (TheLoop->contains(I) && !Visited.count(I)) + Worklist.push_back(I); } - return true; } bool RecurrenceDescriptor::AddReductionVar(PHINode *Phi, RecurrenceKind Kind, Loop *TheLoop, bool HasFunNoNaNAttr, - RecurrenceDescriptor &RedDes) { + RecurrenceDescriptor &RedDes, + DemandedBits *DB, + AssumptionCache *AC, + DominatorTree *DT) { if (Phi->getNumIncomingValues() != 2) return false; @@ -353,14 +380,49 @@ bool RecurrenceDescriptor::AddReductionVar(PHINode *Phi, RecurrenceKind Kind, if (!FoundStartPHI || !FoundReduxOp || !ExitInstruction) return false; - // If we think Phi may have been type-promoted, we also need to ensure that - // all source operands of the reduction are either SExtInsts or ZEstInsts. If - // so, we will be able to evaluate the reduction in the narrower bit width. - if (Start != Phi) - if (!getSourceExtensionKind(Start, ExitInstruction, RecurrenceType, - IsSigned, VisitedInsts, CastInsts)) + if (Start != Phi) { + // If the starting value is not the same as the phi node, we speculatively + // looked through an 'and' instruction when evaluating a potential + // arithmetic reduction to determine if it may have been type-promoted. + // + // We now compute the minimal bit width that is required to represent the + // reduction. If this is the same width that was indicated by the 'and', we + // can represent the reduction in the smaller type. The 'and' instruction + // will be eliminated since it will essentially be a cast instruction that + // can be ignore in the cost model. If we compute a different type than we + // did when evaluating the 'and', the 'and' will not be eliminated, and we + // will end up with different kinds of operations in the recurrence + // expression (e.g., RK_IntegerAND, RK_IntegerADD). We give up if this is + // the case. + // + // The vectorizer relies on InstCombine to perform the actual + // type-shrinking. It does this by inserting instructions to truncate the + // exit value of the reduction to the width indicated by RecurrenceType and + // then extend this value back to the original width. If IsSigned is false, + // a 'zext' instruction will be generated; otherwise, a 'sext' will be + // used. + // + // TODO: We should not rely on InstCombine to rewrite the reduction in the + // smaller type. We should just generate a correctly typed expression + // to begin with. + Type *ComputedType; + std::tie(ComputedType, IsSigned) = + computeRecurrenceType(ExitInstruction, DB, AC, DT); + if (ComputedType != RecurrenceType) return false; + // The recurrence expression will be represented in a narrower type. If + // there are any cast instructions that will be unnecessary, collect them + // in CastInsts. Note that the 'and' instruction was already included in + // this list. + // + // TODO: A better way to represent this may be to tag in some way all the + // instructions that are a part of the reduction. The vectorizer cost + // model could then apply the recurrence type to these instructions, + // without needing a white list of instructions to ignore. + collectCastsToIgnore(TheLoop, ExitInstruction, RecurrenceType, CastInsts); + } + // We found a reduction var if we have reached the original phi node and we // only have a single instruction with out-of-loop users. @@ -480,47 +542,57 @@ bool RecurrenceDescriptor::hasMultipleUsesOf( return false; } bool RecurrenceDescriptor::isReductionPHI(PHINode *Phi, Loop *TheLoop, - RecurrenceDescriptor &RedDes) { + RecurrenceDescriptor &RedDes, + DemandedBits *DB, AssumptionCache *AC, + DominatorTree *DT) { BasicBlock *Header = TheLoop->getHeader(); Function &F = *Header->getParent(); bool HasFunNoNaNAttr = F.getFnAttribute("no-nans-fp-math").getValueAsString() == "true"; - if (AddReductionVar(Phi, RK_IntegerAdd, TheLoop, HasFunNoNaNAttr, RedDes)) { + if (AddReductionVar(Phi, RK_IntegerAdd, TheLoop, HasFunNoNaNAttr, RedDes, DB, + AC, DT)) { DEBUG(dbgs() << "Found an ADD reduction PHI." << *Phi << "\n"); return true; } - if (AddReductionVar(Phi, RK_IntegerMult, TheLoop, HasFunNoNaNAttr, RedDes)) { + if (AddReductionVar(Phi, RK_IntegerMult, TheLoop, HasFunNoNaNAttr, RedDes, DB, + AC, DT)) { DEBUG(dbgs() << "Found a MUL reduction PHI." << *Phi << "\n"); return true; } - if (AddReductionVar(Phi, RK_IntegerOr, TheLoop, HasFunNoNaNAttr, RedDes)) { + if (AddReductionVar(Phi, RK_IntegerOr, TheLoop, HasFunNoNaNAttr, RedDes, DB, + AC, DT)) { DEBUG(dbgs() << "Found an OR reduction PHI." << *Phi << "\n"); return true; } - if (AddReductionVar(Phi, RK_IntegerAnd, TheLoop, HasFunNoNaNAttr, RedDes)) { + if (AddReductionVar(Phi, RK_IntegerAnd, TheLoop, HasFunNoNaNAttr, RedDes, DB, + AC, DT)) { DEBUG(dbgs() << "Found an AND reduction PHI." << *Phi << "\n"); return true; } - if (AddReductionVar(Phi, RK_IntegerXor, TheLoop, HasFunNoNaNAttr, RedDes)) { + if (AddReductionVar(Phi, RK_IntegerXor, TheLoop, HasFunNoNaNAttr, RedDes, DB, + AC, DT)) { DEBUG(dbgs() << "Found a XOR reduction PHI." << *Phi << "\n"); return true; } - if (AddReductionVar(Phi, RK_IntegerMinMax, TheLoop, HasFunNoNaNAttr, - RedDes)) { + if (AddReductionVar(Phi, RK_IntegerMinMax, TheLoop, HasFunNoNaNAttr, RedDes, + DB, AC, DT)) { DEBUG(dbgs() << "Found a MINMAX reduction PHI." << *Phi << "\n"); return true; } - if (AddReductionVar(Phi, RK_FloatMult, TheLoop, HasFunNoNaNAttr, RedDes)) { + if (AddReductionVar(Phi, RK_FloatMult, TheLoop, HasFunNoNaNAttr, RedDes, DB, + AC, DT)) { DEBUG(dbgs() << "Found an FMult reduction PHI." << *Phi << "\n"); return true; } - if (AddReductionVar(Phi, RK_FloatAdd, TheLoop, HasFunNoNaNAttr, RedDes)) { + if (AddReductionVar(Phi, RK_FloatAdd, TheLoop, HasFunNoNaNAttr, RedDes, DB, + AC, DT)) { DEBUG(dbgs() << "Found an FAdd reduction PHI." << *Phi << "\n"); return true; } - if (AddReductionVar(Phi, RK_FloatMinMax, TheLoop, HasFunNoNaNAttr, RedDes)) { + if (AddReductionVar(Phi, RK_FloatMinMax, TheLoop, HasFunNoNaNAttr, RedDes, DB, + AC, DT)) { DEBUG(dbgs() << "Found an float MINMAX reduction PHI." << *Phi << "\n"); return true; } diff --git a/lib/Transforms/Vectorize/LoopVectorize.cpp b/lib/Transforms/Vectorize/LoopVectorize.cpp index 64f206ea92eb3..5bcf0c0a7ba63 100644 --- a/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -1542,9 +1542,10 @@ public: const TargetTransformInfo *TTI, std::function<const LoopAccessInfo &(Loop &)> *GetLAA, LoopInfo *LI, OptimizationRemarkEmitter *ORE, LoopVectorizationRequirements *R, - LoopVectorizeHints *H) + LoopVectorizeHints *H, DemandedBits *DB, AssumptionCache *AC) : TheLoop(L), PSE(PSE), TLI(TLI), TTI(TTI), DT(DT), GetLAA(GetLAA), - ORE(ORE), InterleaveInfo(PSE, L, DT, LI), Requirements(R), Hints(H) {} + ORE(ORE), InterleaveInfo(PSE, L, DT, LI), Requirements(R), Hints(H), + DB(DB), AC(AC) {} /// ReductionList contains the reduction descriptors for all /// of the reductions that were found in the loop. @@ -1833,6 +1834,14 @@ private: /// Used to emit an analysis of any legality issues. LoopVectorizeHints *Hints; + /// The demanded bits analsyis is used to compute the minimum type size in + /// which a reduction can be computed. + DemandedBits *DB; + + /// The assumption cache analysis is used to compute the minimum type size in + /// which a reduction can be computed. + AssumptionCache *AC; + /// While vectorizing these instructions we have to generate a /// call to the appropriate masked intrinsic SmallPtrSet<const Instruction *, 8> MaskedOp; @@ -5300,7 +5309,8 @@ bool LoopVectorizationLegality::canVectorizeInstrs() { } RecurrenceDescriptor RedDes; - if (RecurrenceDescriptor::isReductionPHI(Phi, TheLoop, RedDes)) { + if (RecurrenceDescriptor::isReductionPHI(Phi, TheLoop, RedDes, DB, AC, + DT)) { if (RedDes.hasUnsafeAlgebra()) Requirements->addUnsafeAlgebraInst(RedDes.getUnsafeAlgebraInst()); AllowedExit.insert(RedDes.getLoopExitInstr()); @@ -8514,7 +8524,7 @@ bool LoopVectorizePass::processLoop(Loop *L) { // Check if it is legal to vectorize the loop. LoopVectorizationRequirements Requirements(*ORE); LoopVectorizationLegality LVL(L, PSE, DT, TLI, AA, F, TTI, GetLAA, LI, ORE, - &Requirements, &Hints); + &Requirements, &Hints, DB, AC); if (!LVL.canVectorize()) { DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); emitMissedWarning(F, L, Hints, ORE); |