diff options
Diffstat (limited to 'lib/Target/PowerPC/PPCCTRLoops.cpp')
-rw-r--r-- | lib/Target/PowerPC/PPCCTRLoops.cpp | 108 |
1 files changed, 51 insertions, 57 deletions
diff --git a/lib/Target/PowerPC/PPCCTRLoops.cpp b/lib/Target/PowerPC/PPCCTRLoops.cpp index 24bc027f8106..094d3e6a61b5 100644 --- a/lib/Target/PowerPC/PPCCTRLoops.cpp +++ b/lib/Target/PowerPC/PPCCTRLoops.cpp @@ -24,12 +24,14 @@ //===----------------------------------------------------------------------===// #include "PPC.h" +#include "PPCSubtarget.h" #include "PPCTargetMachine.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/LoopInfo.h" #include "llvm/Analysis/ScalarEvolutionExpander.h" #include "llvm/Analysis/TargetLibraryInfo.h" +#include "llvm/CodeGen/TargetPassConfig.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/Dominators.h" @@ -81,10 +83,7 @@ namespace { public: static char ID; - PPCCTRLoops() : FunctionPass(ID), TM(nullptr) { - initializePPCCTRLoopsPass(*PassRegistry::getPassRegistry()); - } - PPCCTRLoops(PPCTargetMachine &TM) : FunctionPass(ID), TM(&TM) { + PPCCTRLoops() : FunctionPass(ID) { initializePPCCTRLoopsPass(*PassRegistry::getPassRegistry()); } @@ -99,16 +98,18 @@ namespace { } private: - bool mightUseCTR(const Triple &TT, BasicBlock *BB); + bool mightUseCTR(BasicBlock *BB); bool convertToCTRLoop(Loop *L); private: - PPCTargetMachine *TM; + const PPCTargetMachine *TM; + const PPCSubtarget *STI; + const PPCTargetLowering *TLI; + const DataLayout *DL; + const TargetLibraryInfo *LibInfo; LoopInfo *LI; ScalarEvolution *SE; - const DataLayout *DL; DominatorTree *DT; - const TargetLibraryInfo *LibInfo; bool PreserveLCSSA; }; @@ -149,9 +150,7 @@ INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) INITIALIZE_PASS_END(PPCCTRLoops, "ppc-ctr-loops", "PowerPC CTR Loops", false, false) -FunctionPass *llvm::createPPCCTRLoops(PPCTargetMachine &TM) { - return new PPCCTRLoops(TM); -} +FunctionPass *llvm::createPPCCTRLoops() { return new PPCCTRLoops(); } #ifndef NDEBUG INITIALIZE_PASS_BEGIN(PPCCTRLoopsVerify, "ppc-ctr-loops-verify", @@ -169,6 +168,14 @@ bool PPCCTRLoops::runOnFunction(Function &F) { if (skipFunction(F)) return false; + auto *TPC = getAnalysisIfAvailable<TargetPassConfig>(); + if (!TPC) + return false; + + TM = &TPC->getTM<PPCTargetMachine>(); + STI = TM->getSubtargetImpl(F); + TLI = STI->getTargetLowering(); + LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); @@ -198,8 +205,7 @@ static bool isLargeIntegerTy(bool Is32Bit, Type *Ty) { // Determining the address of a TLS variable results in a function call in // certain TLS models. -static bool memAddrUsesCTR(const PPCTargetMachine *TM, - const Value *MemAddr) { +static bool memAddrUsesCTR(const PPCTargetMachine &TM, const Value *MemAddr) { const auto *GV = dyn_cast<GlobalValue>(MemAddr); if (!GV) { // Recurse to check for constants that refer to TLS global variables. @@ -213,35 +219,35 @@ static bool memAddrUsesCTR(const PPCTargetMachine *TM, if (!GV->isThreadLocal()) return false; - if (!TM) - return true; - TLSModel::Model Model = TM->getTLSModel(GV); + TLSModel::Model Model = TM.getTLSModel(GV); return Model == TLSModel::GeneralDynamic || Model == TLSModel::LocalDynamic; } -bool PPCCTRLoops::mightUseCTR(const Triple &TT, BasicBlock *BB) { +// Loop through the inline asm constraints and look for something that clobbers +// ctr. +static bool asmClobbersCTR(InlineAsm *IA) { + InlineAsm::ConstraintInfoVector CIV = IA->ParseConstraints(); + for (unsigned i = 0, ie = CIV.size(); i < ie; ++i) { + InlineAsm::ConstraintInfo &C = CIV[i]; + if (C.Type != InlineAsm::isInput) + for (unsigned j = 0, je = C.Codes.size(); j < je; ++j) + if (StringRef(C.Codes[j]).equals_lower("{ctr}")) + return true; + } + return false; +} + +bool PPCCTRLoops::mightUseCTR(BasicBlock *BB) { for (BasicBlock::iterator J = BB->begin(), JE = BB->end(); J != JE; ++J) { if (CallInst *CI = dyn_cast<CallInst>(J)) { + // Inline ASM is okay, unless it clobbers the ctr register. if (InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledValue())) { - // Inline ASM is okay, unless it clobbers the ctr register. - InlineAsm::ConstraintInfoVector CIV = IA->ParseConstraints(); - for (unsigned i = 0, ie = CIV.size(); i < ie; ++i) { - InlineAsm::ConstraintInfo &C = CIV[i]; - if (C.Type != InlineAsm::isInput) - for (unsigned j = 0, je = C.Codes.size(); j < je; ++j) - if (StringRef(C.Codes[j]).equals_lower("{ctr}")) - return true; - } - + if (asmClobbersCTR(IA)) + return true; continue; } - if (!TM) - return true; - const TargetLowering *TLI = - TM->getSubtargetImpl(*BB->getParent())->getTargetLowering(); - if (Function *F = CI->getCalledFunction()) { // Most intrinsics don't become function calls, but some might. // sin, cos, exp and log are always calls. @@ -380,9 +386,8 @@ bool PPCCTRLoops::mightUseCTR(const Triple &TT, BasicBlock *BB) { } if (Opcode) { - auto &DL = CI->getModule()->getDataLayout(); - MVT VTy = TLI->getSimpleValueType(DL, CI->getArgOperand(0)->getType(), - true); + MVT VTy = TLI->getSimpleValueType( + *DL, CI->getArgOperand(0)->getType(), true); if (VTy == MVT::Other) return true; @@ -406,17 +411,17 @@ bool PPCCTRLoops::mightUseCTR(const Triple &TT, BasicBlock *BB) { CastInst *CI = cast<CastInst>(J); if (CI->getSrcTy()->getScalarType()->isPPC_FP128Ty() || CI->getDestTy()->getScalarType()->isPPC_FP128Ty() || - isLargeIntegerTy(TT.isArch32Bit(), CI->getSrcTy()->getScalarType()) || - isLargeIntegerTy(TT.isArch32Bit(), CI->getDestTy()->getScalarType())) + isLargeIntegerTy(!TM->isPPC64(), CI->getSrcTy()->getScalarType()) || + isLargeIntegerTy(!TM->isPPC64(), CI->getDestTy()->getScalarType())) return true; - } else if (isLargeIntegerTy(TT.isArch32Bit(), + } else if (isLargeIntegerTy(!TM->isPPC64(), J->getType()->getScalarType()) && (J->getOpcode() == Instruction::UDiv || J->getOpcode() == Instruction::SDiv || J->getOpcode() == Instruction::URem || J->getOpcode() == Instruction::SRem)) { return true; - } else if (TT.isArch32Bit() && + } else if (!TM->isPPC64() && isLargeIntegerTy(false, J->getType()->getScalarType()) && (J->getOpcode() == Instruction::Shl || J->getOpcode() == Instruction::AShr || @@ -428,16 +433,11 @@ bool PPCCTRLoops::mightUseCTR(const Triple &TT, BasicBlock *BB) { // On PowerPC, indirect jumps use the counter register. return true; } else if (SwitchInst *SI = dyn_cast<SwitchInst>(J)) { - if (!TM) - return true; - const TargetLowering *TLI = - TM->getSubtargetImpl(*BB->getParent())->getTargetLowering(); - if (SI->getNumCases() + 1 >= (unsigned)TLI->getMinimumJumpTableEntries()) return true; } - if (TM->getSubtargetImpl(*BB->getParent())->getTargetLowering()->useSoftFloat()) { + if (STI->useSoftFloat()) { switch(J->getOpcode()) { case Instruction::FAdd: case Instruction::FSub: @@ -456,7 +456,7 @@ bool PPCCTRLoops::mightUseCTR(const Triple &TT, BasicBlock *BB) { } for (Value *Operand : J->operands()) - if (memAddrUsesCTR(TM, Operand)) + if (memAddrUsesCTR(*TM, Operand)) return true; } @@ -466,11 +466,6 @@ bool PPCCTRLoops::mightUseCTR(const Triple &TT, BasicBlock *BB) { bool PPCCTRLoops::convertToCTRLoop(Loop *L) { bool MadeChange = false; - const Triple TT = - Triple(L->getHeader()->getParent()->getParent()->getTargetTriple()); - if (!TT.isArch32Bit() && !TT.isArch64Bit()) - return MadeChange; // Unknown arch. type. - // Process nested loops first. for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I) { MadeChange |= convertToCTRLoop(*I); @@ -495,7 +490,7 @@ bool PPCCTRLoops::convertToCTRLoop(Loop *L) { // want to use the counter register if the loop contains calls. for (Loop::block_iterator I = L->block_begin(), IE = L->block_end(); I != IE; ++I) - if (mightUseCTR(TT, *I)) + if (mightUseCTR(*I)) return MadeChange; SmallVector<BasicBlock*, 4> ExitingBlocks; @@ -517,7 +512,7 @@ bool PPCCTRLoops::convertToCTRLoop(Loop *L) { } else if (!SE->isLoopInvariant(EC, L)) continue; - if (SE->getTypeSizeInBits(EC->getType()) > (TT.isArch64Bit() ? 64 : 32)) + if (SE->getTypeSizeInBits(EC->getType()) > (TM->isPPC64() ? 64 : 32)) continue; // We now have a loop-invariant count of loop iterations (which is not the @@ -571,7 +566,7 @@ bool PPCCTRLoops::convertToCTRLoop(Loop *L) { // preheader, then we can use it (except if the preheader contains a use of // the CTR register because some such uses might be reordered by the // selection DAG after the mtctr instruction). - if (!Preheader || mightUseCTR(TT, Preheader)) + if (!Preheader || mightUseCTR(Preheader)) Preheader = InsertPreheaderForLoop(L, DT, LI, PreserveLCSSA); if (!Preheader) return MadeChange; @@ -582,10 +577,9 @@ bool PPCCTRLoops::convertToCTRLoop(Loop *L) { // selected branch. MadeChange = true; - SCEVExpander SCEVE(*SE, Preheader->getModule()->getDataLayout(), "loopcnt"); + SCEVExpander SCEVE(*SE, *DL, "loopcnt"); LLVMContext &C = SE->getContext(); - Type *CountType = TT.isArch64Bit() ? Type::getInt64Ty(C) : - Type::getInt32Ty(C); + Type *CountType = TM->isPPC64() ? Type::getInt64Ty(C) : Type::getInt32Ty(C); if (!ExitCount->getType()->isPointerTy() && ExitCount->getType() != CountType) ExitCount = SE->getZeroExtendExpr(ExitCount, CountType); |