diff options
Diffstat (limited to 'lib/Transforms')
-rw-r--r-- | lib/Transforms/IPO/FunctionAttrs.cpp | 4 | ||||
-rw-r--r-- | lib/Transforms/IPO/GlobalOpt.cpp | 9 | ||||
-rw-r--r-- | lib/Transforms/IPO/IPConstantPropagation.cpp | 7 | ||||
-rw-r--r-- | lib/Transforms/Scalar/GVN.cpp | 52 | ||||
-rw-r--r-- | lib/Transforms/Scalar/InstructionCombining.cpp | 259 | ||||
-rw-r--r-- | lib/Transforms/Scalar/JumpThreading.cpp | 12 | ||||
-rw-r--r-- | lib/Transforms/Scalar/LICM.cpp | 16 | ||||
-rw-r--r-- | lib/Transforms/Scalar/SCCP.cpp | 12 | ||||
-rw-r--r-- | lib/Transforms/Scalar/ScalarReplAggregates.cpp | 44 | ||||
-rw-r--r-- | lib/Transforms/Scalar/SimplifyCFGPass.cpp | 12 | ||||
-rw-r--r-- | lib/Transforms/Scalar/SimplifyLibCalls.cpp | 179 | ||||
-rw-r--r-- | lib/Transforms/Utils/LoopSimplify.cpp | 7 | ||||
-rw-r--r-- | lib/Transforms/Utils/Mem2Reg.cpp | 2 | ||||
-rw-r--r-- | lib/Transforms/Utils/PromoteMemoryToRegister.cpp | 12 |
14 files changed, 487 insertions, 140 deletions
diff --git a/lib/Transforms/IPO/FunctionAttrs.cpp b/lib/Transforms/IPO/FunctionAttrs.cpp index b3a832f12d0b0..a16d335ef50ff 100644 --- a/lib/Transforms/IPO/FunctionAttrs.cpp +++ b/lib/Transforms/IPO/FunctionAttrs.cpp @@ -212,7 +212,7 @@ bool FunctionAttrs::AddNoCaptureAttrs(const std::vector<CallGraphNode *> &SCC) { for (Function::arg_iterator A = F->arg_begin(), E = F->arg_end(); A!=E; ++A) if (isa<PointerType>(A->getType()) && !A->hasNoCaptureAttr() && - !PointerMayBeCaptured(A, true)) { + !PointerMayBeCaptured(A, true, /*StoreCaptures=*/false)) { A->addAttr(Attribute::NoCapture); ++NumNoCapture; Changed = true; @@ -280,7 +280,7 @@ bool FunctionAttrs::IsFunctionMallocLike(Function *F, return false; // Did not come from an allocation. } - if (PointerMayBeCaptured(RetVal, false)) + if (PointerMayBeCaptured(RetVal, false, /*StoreCaptures=*/false)) return false; } diff --git a/lib/Transforms/IPO/GlobalOpt.cpp b/lib/Transforms/IPO/GlobalOpt.cpp index 442f2fb655288..4635d0e61c394 100644 --- a/lib/Transforms/IPO/GlobalOpt.cpp +++ b/lib/Transforms/IPO/GlobalOpt.cpp @@ -1898,6 +1898,15 @@ bool GlobalOpt::OptimizeGlobalVars(Module &M) { // Global variables without names cannot be referenced outside this module. if (!GV->hasName() && !GV->isDeclaration()) GV->setLinkage(GlobalValue::InternalLinkage); + // Simplify the initializer. + if (GV->hasInitializer()) + if (ConstantExpr *CE = dyn_cast<ConstantExpr>(GV->getInitializer())) { + TargetData *TD = getAnalysisIfAvailable<TargetData>(); + Constant *New = ConstantFoldConstantExpression(CE, TD); + if (New && New != CE) + GV->setInitializer(New); + } + // Do more involved optimizations if the global is internal. if (!GV->isConstant() && GV->hasLocalLinkage() && GV->hasInitializer()) Changed |= ProcessInternalGlobal(GV, GVI); diff --git a/lib/Transforms/IPO/IPConstantPropagation.cpp b/lib/Transforms/IPO/IPConstantPropagation.cpp index 023e642e648c7..df2456f9f2b7e 100644 --- a/lib/Transforms/IPO/IPConstantPropagation.cpp +++ b/lib/Transforms/IPO/IPConstantPropagation.cpp @@ -19,7 +19,6 @@ #include "llvm/Transforms/IPO.h" #include "llvm/Constants.h" #include "llvm/Instructions.h" -#include "llvm/LLVMContext.h" #include "llvm/Module.h" #include "llvm/Pass.h" #include "llvm/Analysis/ValueTracking.h" @@ -155,7 +154,7 @@ bool IPCP::PropagateConstantsIntoArguments(Function &F) { // callers will be updated to use the value they pass in directly instead of // using the return value. bool IPCP::PropagateConstantReturn(Function &F) { - if (F.getReturnType() == Type::getVoidTy(F.getContext())) + if (F.getReturnType()->isVoidTy()) return false; // No return value. // If this function could be overridden later in the link stage, we can't @@ -163,8 +162,6 @@ bool IPCP::PropagateConstantReturn(Function &F) { if (F.mayBeOverridden()) return false; - LLVMContext &Context = F.getContext(); - // Check to see if this function returns a constant. SmallVector<Value *,4> RetVals; const StructType *STy = dyn_cast<StructType>(F.getReturnType()); @@ -188,7 +185,7 @@ bool IPCP::PropagateConstantReturn(Function &F) { if (!STy) V = RI->getOperand(i); else - V = FindInsertedValue(RI->getOperand(0), i, Context); + V = FindInsertedValue(RI->getOperand(0), i); if (V) { // Ignore undefs, we can change them into anything diff --git a/lib/Transforms/Scalar/GVN.cpp b/lib/Transforms/Scalar/GVN.cpp index a8f39c1433cd0..6f1c32c004e85 100644 --- a/lib/Transforms/Scalar/GVN.cpp +++ b/lib/Transforms/Scalar/GVN.cpp @@ -1425,26 +1425,40 @@ bool GVN::processNonLocalLoad(LoadInst *LI, assert(UnavailablePred != 0 && "Fully available value should be eliminated above!"); - // If the loaded pointer is PHI node defined in this block, do PHI translation - // to get its value in the predecessor. - Value *LoadPtr = LI->getOperand(0)->DoPHITranslation(LoadBB, UnavailablePred); - - // Make sure the value is live in the predecessor. If it was defined by a - // non-PHI instruction in this block, we don't know how to recompute it above. - if (Instruction *LPInst = dyn_cast<Instruction>(LoadPtr)) - if (!DT->dominates(LPInst->getParent(), UnavailablePred)) { - DEBUG(errs() << "COULDN'T PRE LOAD BECAUSE PTR IS UNAVAILABLE IN PRED: " - << *LPInst << '\n' << *LI << "\n"); - return false; - } - // We don't currently handle critical edges :( if (UnavailablePred->getTerminator()->getNumSuccessors() != 1) { DEBUG(errs() << "COULD NOT PRE LOAD BECAUSE OF CRITICAL EDGE '" << UnavailablePred->getName() << "': " << *LI << '\n'); return false; } - + + // Do PHI translation to get its value in the predecessor if necessary. The + // returned pointer (if non-null) is guaranteed to dominate UnavailablePred. + // + // FIXME: This may insert a computation, but we don't tell scalar GVN + // optimization stuff about it. How do we do this? + SmallVector<Instruction*, 8> NewInsts; + Value *LoadPtr = 0; + + // If all preds have a single successor, then we know it is safe to insert the + // load on the pred (?!?), so we can insert code to materialize the pointer if + // it is not available. + if (allSingleSucc) { + LoadPtr = MD->InsertPHITranslatedPointer(LI->getOperand(0), LoadBB, + UnavailablePred, TD, *DT,NewInsts); + } else { + LoadPtr = MD->GetAvailablePHITranslatedValue(LI->getOperand(0), LoadBB, + UnavailablePred, TD, *DT); + } + + // If we couldn't find or insert a computation of this phi translated value, + // we fail PRE. + if (LoadPtr == 0) { + DEBUG(errs() << "COULDN'T INSERT PHI TRANSLATED VALUE OF: " + << *LI->getOperand(0) << "\n"); + return false; + } + // Make sure it is valid to move this load here. We have to watch out for: // @1 = getelementptr (i8* p, ... // test p and branch if == 0 @@ -1455,14 +1469,20 @@ bool GVN::processNonLocalLoad(LoadInst *LI, // we do not have this case. Otherwise, check that the load is safe to // put anywhere; this can be improved, but should be conservatively safe. if (!allSingleSucc && - !isSafeToLoadUnconditionally(LoadPtr, UnavailablePred->getTerminator())) + // FIXME: REEVALUTE THIS. + !isSafeToLoadUnconditionally(LoadPtr, UnavailablePred->getTerminator())) { + assert(NewInsts.empty() && "Should not have inserted instructions"); return false; + } // Okay, we can eliminate this load by inserting a reload in the predecessor // and using PHI construction to get the value in the other predecessors, do // it. DEBUG(errs() << "GVN REMOVING PRE LOAD: " << *LI << '\n'); - + DEBUG(if (!NewInsts.empty()) + errs() << "INSERTED " << NewInsts.size() << " INSTS: " + << *NewInsts.back() << '\n'); + Value *NewLoad = new LoadInst(LoadPtr, LI->getName()+".pre", false, LI->getAlignment(), UnavailablePred->getTerminator()); diff --git a/lib/Transforms/Scalar/InstructionCombining.cpp b/lib/Transforms/Scalar/InstructionCombining.cpp index 1c48366e89fbb..d12ad815f5ace 100644 --- a/lib/Transforms/Scalar/InstructionCombining.cpp +++ b/lib/Transforms/Scalar/InstructionCombining.cpp @@ -2163,8 +2163,8 @@ bool InstCombiner::WillNotOverflowSignedAdd(Value *LHS, Value *RHS) { // Add has the property that adding any two 2's complement numbers can only // have one carry bit which can change a sign. As such, if LHS and RHS each - // have at least two sign bits, we know that the addition of the two values will - // sign extend fine. + // have at least two sign bits, we know that the addition of the two values + // will sign extend fine. if (ComputeNumSignBits(LHS) > 1 && ComputeNumSignBits(RHS) > 1) return true; @@ -2184,15 +2184,12 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) { bool Changed = SimplifyCommutative(I); Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); - if (Constant *RHSC = dyn_cast<Constant>(RHS)) { - // X + undef -> undef - if (isa<UndefValue>(RHS)) - return ReplaceInstUsesWith(I, RHS); - - // X + 0 --> X - if (RHSC->isNullValue()) - return ReplaceInstUsesWith(I, LHS); + if (Value *V = SimplifyAddInst(LHS, RHS, I.hasNoSignedWrap(), + I.hasNoUnsignedWrap(), TD)) + return ReplaceInstUsesWith(I, V); + + if (Constant *RHSC = dyn_cast<Constant>(RHS)) { if (ConstantInt *CI = dyn_cast<ConstantInt>(RHSC)) { // X + (signbit) --> X ^ signbit const APInt& Val = CI->getValue(); @@ -4070,6 +4067,21 @@ Value *InstCombiner::FoldLogicalPlusAnd(Value *LHS, Value *RHS, /// FoldAndOfICmps - Fold (icmp)&(icmp) if possible. Instruction *InstCombiner::FoldAndOfICmps(Instruction &I, ICmpInst *LHS, ICmpInst *RHS) { + // (icmp eq A, null) & (icmp eq B, null) --> + // (icmp eq (ptrtoint(A)|ptrtoint(B)), 0) + if (TD && + LHS->getPredicate() == ICmpInst::ICMP_EQ && + RHS->getPredicate() == ICmpInst::ICMP_EQ && + isa<ConstantPointerNull>(LHS->getOperand(1)) && + isa<ConstantPointerNull>(RHS->getOperand(1))) { + const Type *IntPtrTy = TD->getIntPtrType(I.getContext()); + Value *A = Builder->CreatePtrToInt(LHS->getOperand(0), IntPtrTy); + Value *B = Builder->CreatePtrToInt(RHS->getOperand(0), IntPtrTy); + Value *NewOr = Builder->CreateOr(A, B); + return new ICmpInst(ICmpInst::ICMP_EQ, NewOr, + Constant::getNullValue(IntPtrTy)); + } + Value *Val, *Val2; ConstantInt *LHSCst, *RHSCst; ICmpInst::Predicate LHSCC, RHSCC; @@ -4081,12 +4093,20 @@ Instruction *InstCombiner::FoldAndOfICmps(Instruction &I, m_ConstantInt(RHSCst)))) return 0; - // (icmp ult A, C) & (icmp ult B, C) --> (icmp ult (A|B), C) - // where C is a power of 2 - if (LHSCst == RHSCst && LHSCC == RHSCC && LHSCC == ICmpInst::ICMP_ULT && - LHSCst->getValue().isPowerOf2()) { - Value *NewOr = Builder->CreateOr(Val, Val2); - return new ICmpInst(LHSCC, NewOr, LHSCst); + if (LHSCst == RHSCst && LHSCC == RHSCC) { + // (icmp ult A, C) & (icmp ult B, C) --> (icmp ult (A|B), C) + // where C is a power of 2 + if (LHSCC == ICmpInst::ICMP_ULT && + LHSCst->getValue().isPowerOf2()) { + Value *NewOr = Builder->CreateOr(Val, Val2); + return new ICmpInst(LHSCC, NewOr, LHSCst); + } + + // (icmp eq A, 0) & (icmp eq B, 0) --> (icmp eq (A|B), 0) + if (LHSCC == ICmpInst::ICMP_EQ && LHSCst->isZero()) { + Value *NewOr = Builder->CreateOr(Val, Val2); + return new ICmpInst(LHSCC, NewOr, LHSCst); + } } // From here on, we only handle: @@ -4322,7 +4342,6 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) { if (Value *V = SimplifyAndInst(Op0, Op1, TD)) return ReplaceInstUsesWith(I, V); - // See if we can simplify any instructions used by the instruction whose sole // purpose is to compute bits we don't care about. @@ -4743,16 +4762,37 @@ static Instruction *MatchSelectFromAndOr(Value *A, Value *B, /// FoldOrOfICmps - Fold (icmp)|(icmp) if possible. Instruction *InstCombiner::FoldOrOfICmps(Instruction &I, ICmpInst *LHS, ICmpInst *RHS) { + // (icmp ne A, null) | (icmp ne B, null) --> + // (icmp ne (ptrtoint(A)|ptrtoint(B)), 0) + if (TD && + LHS->getPredicate() == ICmpInst::ICMP_NE && + RHS->getPredicate() == ICmpInst::ICMP_NE && + isa<ConstantPointerNull>(LHS->getOperand(1)) && + isa<ConstantPointerNull>(RHS->getOperand(1))) { + const Type *IntPtrTy = TD->getIntPtrType(I.getContext()); + Value *A = Builder->CreatePtrToInt(LHS->getOperand(0), IntPtrTy); + Value *B = Builder->CreatePtrToInt(RHS->getOperand(0), IntPtrTy); + Value *NewOr = Builder->CreateOr(A, B); + return new ICmpInst(ICmpInst::ICMP_NE, NewOr, + Constant::getNullValue(IntPtrTy)); + } + Value *Val, *Val2; ConstantInt *LHSCst, *RHSCst; ICmpInst::Predicate LHSCC, RHSCC; // This only handles icmp of constants: (icmp1 A, C1) | (icmp2 B, C2). - if (!match(LHS, m_ICmp(LHSCC, m_Value(Val), - m_ConstantInt(LHSCst))) || - !match(RHS, m_ICmp(RHSCC, m_Value(Val2), - m_ConstantInt(RHSCst)))) + if (!match(LHS, m_ICmp(LHSCC, m_Value(Val), m_ConstantInt(LHSCst))) || + !match(RHS, m_ICmp(RHSCC, m_Value(Val2), m_ConstantInt(RHSCst)))) return 0; + + + // (icmp ne A, 0) | (icmp ne B, 0) --> (icmp ne (A|B), 0) + if (LHSCst == RHSCst && LHSCC == RHSCC && + LHSCC == ICmpInst::ICMP_NE && LHSCst->isZero()) { + Value *NewOr = Builder->CreateOr(Val, Val2); + return new ICmpInst(LHSCC, NewOr, LHSCst); + } // From here on, we only handle: // (icmp1 A, C1) | (icmp2 A, C2) --> something simpler. @@ -8539,6 +8579,36 @@ Instruction *InstCombiner::transformZExtICmp(ICmpInst *ICI, Instruction &CI, } } + // icmp ne A, B is equal to xor A, B when A and B only really have one bit. + // It is also profitable to transform icmp eq into not(xor(A, B)) because that + // may lead to additional simplifications. + if (ICI->isEquality() && CI.getType() == ICI->getOperand(0)->getType()) { + if (const IntegerType *ITy = dyn_cast<IntegerType>(CI.getType())) { + uint32_t BitWidth = ITy->getBitWidth(); + if (BitWidth > 1) { + Value *LHS = ICI->getOperand(0); + Value *RHS = ICI->getOperand(1); + + APInt KnownZeroLHS(BitWidth, 0), KnownOneLHS(BitWidth, 0); + APInt KnownZeroRHS(BitWidth, 0), KnownOneRHS(BitWidth, 0); + APInt TypeMask(APInt::getHighBitsSet(BitWidth, BitWidth-1)); + ComputeMaskedBits(LHS, TypeMask, KnownZeroLHS, KnownOneLHS); + ComputeMaskedBits(RHS, TypeMask, KnownZeroRHS, KnownOneRHS); + + if (KnownZeroLHS.countLeadingOnes() == BitWidth-1 && + KnownZeroRHS.countLeadingOnes() == BitWidth-1) { + if (!DoXform) return ICI; + + Value *Xor = Builder->CreateXor(LHS, RHS); + if (ICI->getPredicate() == ICmpInst::ICMP_EQ) + Xor = Builder->CreateXor(Xor, ConstantInt::get(ITy, 1)); + Xor->takeName(ICI); + return ReplaceInstUsesWith(CI, Xor); + } + } + } + } + return 0; } @@ -9842,6 +9912,126 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) { if (Operand->getIntrinsicID() == Intrinsic::bswap) return ReplaceInstUsesWith(CI, Operand->getOperand(1)); break; + case Intrinsic::uadd_with_overflow: { + Value *LHS = II->getOperand(1), *RHS = II->getOperand(2); + const IntegerType *IT = cast<IntegerType>(II->getOperand(1)->getType()); + uint32_t BitWidth = IT->getBitWidth(); + APInt Mask = APInt::getSignBit(BitWidth); + APInt LHSKnownZero(BitWidth, 0); + APInt LHSKnownOne(BitWidth, 0); + ComputeMaskedBits(LHS, Mask, LHSKnownZero, LHSKnownOne); + bool LHSKnownNegative = LHSKnownOne[BitWidth - 1]; + bool LHSKnownPositive = LHSKnownZero[BitWidth - 1]; + + if (LHSKnownNegative || LHSKnownPositive) { + APInt RHSKnownZero(BitWidth, 0); + APInt RHSKnownOne(BitWidth, 0); + ComputeMaskedBits(RHS, Mask, RHSKnownZero, RHSKnownOne); + bool RHSKnownNegative = RHSKnownOne[BitWidth - 1]; + bool RHSKnownPositive = RHSKnownZero[BitWidth - 1]; + if (LHSKnownNegative && RHSKnownNegative) { + // The sign bit is set in both cases: this MUST overflow. + // Create a simple add instruction, and insert it into the struct. + Instruction *Add = BinaryOperator::CreateAdd(LHS, RHS, "", &CI); + Worklist.Add(Add); + Constant *V[] = { + UndefValue::get(LHS->getType()), ConstantInt::getTrue(*Context) + }; + Constant *Struct = ConstantStruct::get(*Context, V, 2, false); + return InsertValueInst::Create(Struct, Add, 0); + } + + if (LHSKnownPositive && RHSKnownPositive) { + // The sign bit is clear in both cases: this CANNOT overflow. + // Create a simple add instruction, and insert it into the struct. + Instruction *Add = BinaryOperator::CreateNUWAdd(LHS, RHS, "", &CI); + Worklist.Add(Add); + Constant *V[] = { + UndefValue::get(LHS->getType()), ConstantInt::getFalse(*Context) + }; + Constant *Struct = ConstantStruct::get(*Context, V, 2, false); + return InsertValueInst::Create(Struct, Add, 0); + } + } + } + // FALL THROUGH uadd into sadd + case Intrinsic::sadd_with_overflow: + // Canonicalize constants into the RHS. + if (isa<Constant>(II->getOperand(1)) && + !isa<Constant>(II->getOperand(2))) { + Value *LHS = II->getOperand(1); + II->setOperand(1, II->getOperand(2)); + II->setOperand(2, LHS); + return II; + } + + // X + undef -> undef + if (isa<UndefValue>(II->getOperand(2))) + return ReplaceInstUsesWith(CI, UndefValue::get(II->getType())); + + if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getOperand(2))) { + // X + 0 -> {X, false} + if (RHS->isZero()) { + Constant *V[] = { + UndefValue::get(II->getOperand(0)->getType()), + ConstantInt::getFalse(*Context) + }; + Constant *Struct = ConstantStruct::get(*Context, V, 2, false); + return InsertValueInst::Create(Struct, II->getOperand(1), 0); + } + } + break; + case Intrinsic::usub_with_overflow: + case Intrinsic::ssub_with_overflow: + // undef - X -> undef + // X - undef -> undef + if (isa<UndefValue>(II->getOperand(1)) || + isa<UndefValue>(II->getOperand(2))) + return ReplaceInstUsesWith(CI, UndefValue::get(II->getType())); + + if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getOperand(2))) { + // X - 0 -> {X, false} + if (RHS->isZero()) { + Constant *V[] = { + UndefValue::get(II->getOperand(1)->getType()), + ConstantInt::getFalse(*Context) + }; + Constant *Struct = ConstantStruct::get(*Context, V, 2, false); + return InsertValueInst::Create(Struct, II->getOperand(1), 0); + } + } + break; + case Intrinsic::umul_with_overflow: + case Intrinsic::smul_with_overflow: + // Canonicalize constants into the RHS. + if (isa<Constant>(II->getOperand(1)) && + !isa<Constant>(II->getOperand(2))) { + Value *LHS = II->getOperand(1); + II->setOperand(1, II->getOperand(2)); + II->setOperand(2, LHS); + return II; + } + + // X * undef -> undef + if (isa<UndefValue>(II->getOperand(2))) + return ReplaceInstUsesWith(CI, UndefValue::get(II->getType())); + + if (ConstantInt *RHSI = dyn_cast<ConstantInt>(II->getOperand(2))) { + // X*0 -> {0, false} + if (RHSI->isZero()) + return ReplaceInstUsesWith(CI, Constant::getNullValue(II->getType())); + + // X * 1 -> {X, false} + if (RHSI->equalsInt(1)) { + Constant *V[] = { + UndefValue::get(II->getOperand(1)->getType()), + ConstantInt::getFalse(*Context) + }; + Constant *Struct = ConstantStruct::get(*Context, V, 2, false); + return InsertValueInst::Create(Struct, II->getOperand(1), 0); + } + } + break; case Intrinsic::ppc_altivec_lvx: case Intrinsic::ppc_altivec_lvxl: case Intrinsic::x86_sse_loadu_ps: @@ -11282,21 +11472,16 @@ Instruction *InstCombiner::visitPHINode(PHINode &PN) { } Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { + SmallVector<Value*, 8> Ops(GEP.op_begin(), GEP.op_end()); + + if (Value *V = SimplifyGEPInst(&Ops[0], Ops.size(), TD)) + return ReplaceInstUsesWith(GEP, V); + Value *PtrOp = GEP.getOperand(0); - // Eliminate 'getelementptr %P, i32 0' and 'getelementptr %P', they are noops. - if (GEP.getNumOperands() == 1) - return ReplaceInstUsesWith(GEP, PtrOp); if (isa<UndefValue>(GEP.getOperand(0))) return ReplaceInstUsesWith(GEP, UndefValue::get(GEP.getType())); - bool HasZeroPointerIndex = false; - if (Constant *C = dyn_cast<Constant>(GEP.getOperand(1))) - HasZeroPointerIndex = C->isNullValue(); - - if (GEP.getNumOperands() == 2 && HasZeroPointerIndex) - return ReplaceInstUsesWith(GEP, PtrOp); - // Eliminate unneeded casts for indices. if (TD) { bool MadeChange = false; @@ -11401,6 +11586,10 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { return 0; } + bool HasZeroPointerIndex = false; + if (ConstantInt *C = dyn_cast<ConstantInt>(GEP.getOperand(1))) + HasZeroPointerIndex = C->isZero(); + // Transform: GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ... // into : GEP [10 x i8]* X, i32 0, ... // @@ -11952,12 +12141,6 @@ Instruction *InstCombiner::visitStoreInst(StoreInst &SI) { Value *Val = SI.getOperand(0); Value *Ptr = SI.getOperand(1); - if (isa<UndefValue>(Ptr)) { // store X, undef -> noop (even if volatile) - EraseInstFromFunction(SI); - ++NumCombined; - return 0; - } - // If the RHS is an alloca with a single use, zapify the store, making the // alloca dead. // If the RHS is an alloca with a two uses, the other one being a @@ -12920,7 +13103,7 @@ Instruction *InstCombiner::visitShuffleVectorInst(ShuffleVectorInst &SVI) { if (LHSMask.size() == Mask.size()) { std::vector<unsigned> NewMask; for (unsigned i = 0, e = Mask.size(); i != e; ++i) - if (Mask[i] >= 2*e) + if (Mask[i] >= e) NewMask.push_back(2*e); else NewMask.push_back(LHSMask[Mask[i]]); diff --git a/lib/Transforms/Scalar/JumpThreading.cpp b/lib/Transforms/Scalar/JumpThreading.cpp index 58641135ede83..1b93f3441e413 100644 --- a/lib/Transforms/Scalar/JumpThreading.cpp +++ b/lib/Transforms/Scalar/JumpThreading.cpp @@ -158,12 +158,18 @@ bool JumpThreading::runOnFunction(Function &F) { if (BBI->isTerminator()) { // Since TryToSimplifyUncondBranchFromEmptyBlock may delete the // block, we have to make sure it isn't in the LoopHeaders set. We - // reinsert afterward in the rare case when the block isn't deleted. + // reinsert afterward if needed. bool ErasedFromLoopHeaders = LoopHeaders.erase(BB); + BasicBlock *Succ = BI->getSuccessor(0); - if (TryToSimplifyUncondBranchFromEmptyBlock(BB)) + if (TryToSimplifyUncondBranchFromEmptyBlock(BB)) { Changed = true; - else if (ErasedFromLoopHeaders) + // If we deleted BB and BB was the header of a loop, then the + // successor is now the header of the loop. + BB = Succ; + } + + if (ErasedFromLoopHeaders) LoopHeaders.insert(BB); } } diff --git a/lib/Transforms/Scalar/LICM.cpp b/lib/Transforms/Scalar/LICM.cpp index 104c8739c0e36..5511387c8da40 100644 --- a/lib/Transforms/Scalar/LICM.cpp +++ b/lib/Transforms/Scalar/LICM.cpp @@ -63,15 +63,6 @@ static cl::opt<bool> DisablePromotion("disable-licm-promotion", cl::Hidden, cl::desc("Disable memory promotion in LICM pass")); -// This feature is currently disabled by default because CodeGen is not yet -// capable of rematerializing these constants in PIC mode, so it can lead to -// degraded performance. Compile test/CodeGen/X86/remat-constant.ll with -// -relocation-model=pic to see an example of this. -static cl::opt<bool> -EnableLICMConstantMotion("enable-licm-constant-variables", cl::Hidden, - cl::desc("Enable hoisting/sinking of constant " - "global variables")); - namespace { struct LICM : public LoopPass { static char ID; // Pass identification, replacement for typeid @@ -383,8 +374,7 @@ bool LICM::canSinkOrHoistInst(Instruction &I) { // Loads from constant memory are always safe to move, even if they end up // in the same alias set as something that ends up being modified. - if (EnableLICMConstantMotion && - AA->pointsToConstantMemory(LI->getOperand(0))) + if (AA->pointsToConstantMemory(LI->getOperand(0))) return true; // Don't hoist loads which have may-aliased stores in loop. @@ -603,7 +593,7 @@ void LICM::sink(Instruction &I) { if (AI) { std::vector<AllocaInst*> Allocas; Allocas.push_back(AI); - PromoteMemToReg(Allocas, *DT, *DF, AI->getContext(), CurAST); + PromoteMemToReg(Allocas, *DT, *DF, CurAST); } } } @@ -779,7 +769,7 @@ void LICM::PromoteValuesInLoop() { PromotedAllocas.reserve(PromotedValues.size()); for (unsigned i = 0, e = PromotedValues.size(); i != e; ++i) PromotedAllocas.push_back(PromotedValues[i].first); - PromoteMemToReg(PromotedAllocas, *DT, *DF, Preheader->getContext(), CurAST); + PromoteMemToReg(PromotedAllocas, *DT, *DF, CurAST); } /// FindPromotableValuesInLoop - Check the current loop for stores to definite diff --git a/lib/Transforms/Scalar/SCCP.cpp b/lib/Transforms/Scalar/SCCP.cpp index c202a2c41de89..d8c59b1d7421e 100644 --- a/lib/Transforms/Scalar/SCCP.cpp +++ b/lib/Transforms/Scalar/SCCP.cpp @@ -1869,8 +1869,16 @@ bool IPSCCP::runOnModule(Module &M) { for (unsigned i = 0, e = BlocksToErase.size(); i != e; ++i) { // If there are any PHI nodes in this successor, drop entries for BB now. BasicBlock *DeadBB = BlocksToErase[i]; - while (!DeadBB->use_empty()) { - Instruction *I = cast<Instruction>(DeadBB->use_back()); + for (Value::use_iterator UI = DeadBB->use_begin(), UE = DeadBB->use_end(); + UI != UE; ) { + // Grab the user and then increment the iterator early, as the user + // will be deleted. Step past all adjacent uses from the same user. + Instruction *I = dyn_cast<Instruction>(*UI); + do { ++UI; } while (UI != UE && *UI == I); + + // Ignore blockaddress users; BasicBlock's dtor will handle them. + if (!I) continue; + bool Folded = ConstantFoldTerminator(I->getParent()); if (!Folded) { // The constant folder may not have been able to fold the terminator diff --git a/lib/Transforms/Scalar/ScalarReplAggregates.cpp b/lib/Transforms/Scalar/ScalarReplAggregates.cpp index 2e3b6943bbfd8..ae6ad74d54fde 100644 --- a/lib/Transforms/Scalar/ScalarReplAggregates.cpp +++ b/lib/Transforms/Scalar/ScalarReplAggregates.cpp @@ -192,7 +192,7 @@ bool SROA::performPromotion(Function &F) { if (Allocas.empty()) break; - PromoteMemToReg(Allocas, DT, DF, F.getContext()); + PromoteMemToReg(Allocas, DT, DF); NumPromoted += Allocas.size(); Changed = true; } @@ -469,15 +469,41 @@ void SROA::isSafeElementUse(Value *Ptr, bool isFirstElt, AllocaInst *AI, case Instruction::GetElementPtr: { GetElementPtrInst *GEP = cast<GetElementPtrInst>(User); bool AreAllZeroIndices = isFirstElt; - if (GEP->getNumOperands() > 1) { - if (!isa<ConstantInt>(GEP->getOperand(1)) || - !cast<ConstantInt>(GEP->getOperand(1))->isZero()) - // Using pointer arithmetic to navigate the array. - return MarkUnsafe(Info); - - if (AreAllZeroIndices) - AreAllZeroIndices = GEP->hasAllZeroIndices(); + if (GEP->getNumOperands() > 1 && + (!isa<ConstantInt>(GEP->getOperand(1)) || + !cast<ConstantInt>(GEP->getOperand(1))->isZero())) + // Using pointer arithmetic to navigate the array. + return MarkUnsafe(Info); + + // Verify that any array subscripts are in range. + for (gep_type_iterator GEPIt = gep_type_begin(GEP), + E = gep_type_end(GEP); GEPIt != E; ++GEPIt) { + // Ignore struct elements, no extra checking needed for these. + if (isa<StructType>(*GEPIt)) + continue; + + // This GEP indexes an array. Verify that this is an in-range + // constant integer. Specifically, consider A[0][i]. We cannot know that + // the user isn't doing invalid things like allowing i to index an + // out-of-range subscript that accesses A[1]. Because of this, we have + // to reject SROA of any accesses into structs where any of the + // components are variables. + ConstantInt *IdxVal = dyn_cast<ConstantInt>(GEPIt.getOperand()); + if (!IdxVal) return MarkUnsafe(Info); + + // Are all indices still zero? + AreAllZeroIndices &= IdxVal->isZero(); + + if (const ArrayType *AT = dyn_cast<ArrayType>(*GEPIt)) { + if (IdxVal->getZExtValue() >= AT->getNumElements()) + return MarkUnsafe(Info); + } else if (const VectorType *VT = dyn_cast<VectorType>(*GEPIt)) { + if (IdxVal->getZExtValue() >= VT->getNumElements()) + return MarkUnsafe(Info); + } } + + isSafeElementUse(GEP, AreAllZeroIndices, AI, Info); if (Info.isUnsafe) return; break; diff --git a/lib/Transforms/Scalar/SimplifyCFGPass.cpp b/lib/Transforms/Scalar/SimplifyCFGPass.cpp index 6a8148040d947..e905952c5db76 100644 --- a/lib/Transforms/Scalar/SimplifyCFGPass.cpp +++ b/lib/Transforms/Scalar/SimplifyCFGPass.cpp @@ -26,7 +26,6 @@ #include "llvm/Transforms/Utils/Local.h" #include "llvm/Constants.h" #include "llvm/Instructions.h" -#include "llvm/LLVMContext.h" #include "llvm/Module.h" #include "llvm/Attributes.h" #include "llvm/Support/CFG.h" @@ -57,7 +56,7 @@ FunctionPass *llvm::createCFGSimplificationPass() { /// ChangeToUnreachable - Insert an unreachable instruction before the specified /// instruction, making it and the rest of the code in the block dead. -static void ChangeToUnreachable(Instruction *I, LLVMContext &Context) { +static void ChangeToUnreachable(Instruction *I) { BasicBlock *BB = I->getParent(); // Loop over all of the successors, removing BB's entry from any PHI // nodes. @@ -95,8 +94,7 @@ static void ChangeToCall(InvokeInst *II) { } static bool MarkAliveBlocks(BasicBlock *BB, - SmallPtrSet<BasicBlock*, 128> &Reachable, - LLVMContext &Context) { + SmallPtrSet<BasicBlock*, 128> &Reachable) { SmallVector<BasicBlock*, 128> Worklist; Worklist.push_back(BB); @@ -119,7 +117,7 @@ static bool MarkAliveBlocks(BasicBlock *BB, // though. ++BBI; if (!isa<UnreachableInst>(BBI)) { - ChangeToUnreachable(BBI, Context); + ChangeToUnreachable(BBI); Changed = true; } break; @@ -135,7 +133,7 @@ static bool MarkAliveBlocks(BasicBlock *BB, if (isa<UndefValue>(Ptr) || (isa<ConstantPointerNull>(Ptr) && SI->getPointerAddressSpace() == 0)) { - ChangeToUnreachable(SI, Context); + ChangeToUnreachable(SI); Changed = true; break; } @@ -161,7 +159,7 @@ static bool MarkAliveBlocks(BasicBlock *BB, /// otherwise. static bool RemoveUnreachableBlocksFromFn(Function &F) { SmallPtrSet<BasicBlock*, 128> Reachable; - bool Changed = MarkAliveBlocks(F.begin(), Reachable, F.getContext()); + bool Changed = MarkAliveBlocks(F.begin(), Reachable); // If there are unreachable blocks in the CFG... if (Reachable.size() == F.size()) diff --git a/lib/Transforms/Scalar/SimplifyLibCalls.cpp b/lib/Transforms/Scalar/SimplifyLibCalls.cpp index 611505ef363ae..f9b929c7e838a 100644 --- a/lib/Transforms/Scalar/SimplifyLibCalls.cpp +++ b/lib/Transforms/Scalar/SimplifyLibCalls.cpp @@ -81,6 +81,11 @@ public: Value *EmitMemCpy(Value *Dst, Value *Src, Value *Len, unsigned Align, IRBuilder<> &B); + /// EmitMemMove - Emit a call to the memmove function to the builder. This + /// always expects that the size has type 'intptr_t' and Dst/Src are pointers. + Value *EmitMemMove(Value *Dst, Value *Src, Value *Len, + unsigned Align, IRBuilder<> &B); + /// EmitMemChr - Emit a call to the memchr function. This assumes that Ptr is /// a pointer, Val is an i32 value, and Len is an 'intptr_t' value. Value *EmitMemChr(Value *Ptr, Value *Val, Value *Len, IRBuilder<> &B); @@ -160,6 +165,21 @@ Value *LibCallOptimization::EmitMemCpy(Value *Dst, Value *Src, Value *Len, ConstantInt::get(Type::getInt32Ty(*Context), Align)); } +/// EmitMemMOve - Emit a call to the memmove function to the builder. This +/// always expects that the size has type 'intptr_t' and Dst/Src are pointers. +Value *LibCallOptimization::EmitMemMove(Value *Dst, Value *Src, Value *Len, + unsigned Align, IRBuilder<> &B) { + Module *M = Caller->getParent(); + Intrinsic::ID IID = Intrinsic::memmove; + const Type *Tys[1]; + Tys[0] = TD->getIntPtrType(*Context); + Value *MemMove = Intrinsic::getDeclaration(M, IID, Tys, 1); + Value *D = CastToCStr(Dst, B); + Value *S = CastToCStr(Src, B); + Value *A = ConstantInt::get(Type::getInt32Ty(*Context), Align); + return B.CreateCall4(MemMove, D, S, Len, A); +} + /// EmitMemChr - Emit a call to the memchr function. This assumes that Ptr is /// a pointer, Val is an i32 value, and Len is an 'intptr_t' value. Value *LibCallOptimization::EmitMemChr(Value *Ptr, Value *Val, @@ -512,27 +532,6 @@ static bool IsOnlyUsedInZeroEqualityComparison(Value *V) { } //===----------------------------------------------------------------------===// -// Miscellaneous LibCall/Intrinsic Optimizations -//===----------------------------------------------------------------------===// - -namespace { -struct SizeOpt : public LibCallOptimization { - virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) { - // TODO: We can do more with this, but delaying to here should be no change - // in behavior. - ConstantInt *Const = dyn_cast<ConstantInt>(CI->getOperand(2)); - - if (!Const) return 0; - - if (Const->getZExtValue() < 2) - return Constant::getAllOnesValue(Const->getType()); - else - return ConstantInt::get(Const->getType(), 0); - } -}; -} - -//===----------------------------------------------------------------------===// // String and Memory LibCall Optimizations //===----------------------------------------------------------------------===// @@ -1010,16 +1009,7 @@ struct MemMoveOpt : public LibCallOptimization { return 0; // memmove(x, y, n) -> llvm.memmove(x, y, n, 1) - Module *M = Caller->getParent(); - Intrinsic::ID IID = Intrinsic::memmove; - const Type *Tys[1]; - Tys[0] = TD->getIntPtrType(*Context); - Value *MemMove = Intrinsic::getDeclaration(M, IID, Tys, 1); - Value *Dst = CastToCStr(CI->getOperand(1), B); - Value *Src = CastToCStr(CI->getOperand(2), B); - Value *Size = CI->getOperand(3); - Value *Align = ConstantInt::get(Type::getInt32Ty(*Context), 1); - B.CreateCall4(MemMove, Dst, Src, Size, Align); + EmitMemMove(CI->getOperand(1), CI->getOperand(2), CI->getOperand(3), 1, B); return CI->getOperand(1); } }; @@ -1048,6 +1038,118 @@ struct MemSetOpt : public LibCallOptimization { }; //===----------------------------------------------------------------------===// +// Object Size Checking Optimizations +//===----------------------------------------------------------------------===// + +//===---------------------------------------===// +// 'object size' +namespace { +struct SizeOpt : public LibCallOptimization { + virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) { + // TODO: We can do more with this, but delaying to here should be no change + // in behavior. + ConstantInt *Const = dyn_cast<ConstantInt>(CI->getOperand(2)); + + if (!Const) return 0; + + const Type *Ty = Callee->getFunctionType()->getReturnType(); + + if (Const->getZExtValue() < 2) + return Constant::getAllOnesValue(Ty); + else + return ConstantInt::get(Ty, 0); + } +}; +} + +//===---------------------------------------===// +// 'memcpy_chk' Optimizations + +struct MemCpyChkOpt : public LibCallOptimization { + virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) { + // These optimizations require TargetData. + if (!TD) return 0; + + const FunctionType *FT = Callee->getFunctionType(); + if (FT->getNumParams() != 4 || FT->getReturnType() != FT->getParamType(0) || + !isa<PointerType>(FT->getParamType(0)) || + !isa<PointerType>(FT->getParamType(1)) || + !isa<IntegerType>(FT->getParamType(3)) || + FT->getParamType(2) != TD->getIntPtrType(*Context)) + return 0; + + ConstantInt *SizeCI = dyn_cast<ConstantInt>(CI->getOperand(4)); + if (!SizeCI) + return 0; + if (SizeCI->isAllOnesValue()) { + EmitMemCpy(CI->getOperand(1), CI->getOperand(2), CI->getOperand(3), 1, B); + return CI->getOperand(1); + } + + return 0; + } +}; + +//===---------------------------------------===// +// 'memset_chk' Optimizations + +struct MemSetChkOpt : public LibCallOptimization { + virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) { + // These optimizations require TargetData. + if (!TD) return 0; + + const FunctionType *FT = Callee->getFunctionType(); + if (FT->getNumParams() != 4 || FT->getReturnType() != FT->getParamType(0) || + !isa<PointerType>(FT->getParamType(0)) || + !isa<IntegerType>(FT->getParamType(1)) || + !isa<IntegerType>(FT->getParamType(3)) || + FT->getParamType(2) != TD->getIntPtrType(*Context)) + return 0; + + ConstantInt *SizeCI = dyn_cast<ConstantInt>(CI->getOperand(4)); + if (!SizeCI) + return 0; + if (SizeCI->isAllOnesValue()) { + Value *Val = B.CreateIntCast(CI->getOperand(2), Type::getInt8Ty(*Context), + false); + EmitMemSet(CI->getOperand(1), Val, CI->getOperand(3), B); + return CI->getOperand(1); + } + + return 0; + } +}; + +//===---------------------------------------===// +// 'memmove_chk' Optimizations + +struct MemMoveChkOpt : public LibCallOptimization { + virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) { + // These optimizations require TargetData. + if (!TD) return 0; + + const FunctionType *FT = Callee->getFunctionType(); + if (FT->getNumParams() != 4 || FT->getReturnType() != FT->getParamType(0) || + !isa<PointerType>(FT->getParamType(0)) || + !isa<PointerType>(FT->getParamType(1)) || + !isa<IntegerType>(FT->getParamType(3)) || + FT->getParamType(2) != TD->getIntPtrType(*Context)) + return 0; + + ConstantInt *SizeCI = dyn_cast<ConstantInt>(CI->getOperand(4)); + if (!SizeCI) + return 0; + if (SizeCI->isAllOnesValue()) { + EmitMemMove(CI->getOperand(1), CI->getOperand(2), CI->getOperand(3), + 1, B); + return CI->getOperand(1); + } + + return 0; + } +}; + +//===----------------------------------------------------------------------===// // Math Library Optimizations //===----------------------------------------------------------------------===// @@ -1356,7 +1458,7 @@ struct PrintFOpt : public LibCallOptimization { if (FormatStr == "%c" && CI->getNumOperands() > 2 && isa<IntegerType>(CI->getOperand(2)->getType())) { Value *Res = EmitPutChar(CI->getOperand(2), B); - + if (CI->use_empty()) return CI; return B.CreateIntCast(Res, CI->getType(), true); } @@ -1586,7 +1688,10 @@ namespace { // Formatting and IO Optimizations SPrintFOpt SPrintF; PrintFOpt PrintF; FWriteOpt FWrite; FPutsOpt FPuts; FPrintFOpt FPrintF; + + // Object Size Checking SizeOpt ObjectSize; + MemCpyChkOpt MemCpyChk; MemSetChkOpt MemSetChk; MemMoveChkOpt MemMoveChk; bool Modified; // This is only used by doInitialization. public: @@ -1692,9 +1797,13 @@ void SimplifyLibCalls::InitOptimizations() { Optimizations["fwrite"] = &FWrite; Optimizations["fputs"] = &FPuts; Optimizations["fprintf"] = &FPrintF; - - // Miscellaneous - Optimizations["llvm.objectsize"] = &ObjectSize; + + // Object Size Checking + Optimizations["llvm.objectsize.i32"] = &ObjectSize; + Optimizations["llvm.objectsize.i64"] = &ObjectSize; + Optimizations["__memcpy_chk"] = &MemCpyChk; + Optimizations["__memset_chk"] = &MemSetChk; + Optimizations["__memmove_chk"] = &MemMoveChk; } diff --git a/lib/Transforms/Utils/LoopSimplify.cpp b/lib/Transforms/Utils/LoopSimplify.cpp index 44a2c1f851819..690972dc558b0 100644 --- a/lib/Transforms/Utils/LoopSimplify.cpp +++ b/lib/Transforms/Utils/LoopSimplify.cpp @@ -477,8 +477,13 @@ Loop *LoopSimplify::SeparateNestedLoop(Loop *L, LPPassManager &LPM) { SmallVector<BasicBlock*, 8> OuterLoopPreds; for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) if (PN->getIncomingValue(i) != PN || - !L->contains(PN->getIncomingBlock(i))) + !L->contains(PN->getIncomingBlock(i))) { + // We can't split indirectbr edges. + if (isa<IndirectBrInst>(PN->getIncomingBlock(i)->getTerminator())) + return 0; + OuterLoopPreds.push_back(PN->getIncomingBlock(i)); + } BasicBlock *Header = L->getHeader(); BasicBlock *NewBB = SplitBlockPredecessors(Header, &OuterLoopPreds[0], diff --git a/lib/Transforms/Utils/Mem2Reg.cpp b/lib/Transforms/Utils/Mem2Reg.cpp index 941660436b46c..99203b662120e 100644 --- a/lib/Transforms/Utils/Mem2Reg.cpp +++ b/lib/Transforms/Utils/Mem2Reg.cpp @@ -73,7 +73,7 @@ bool PromotePass::runOnFunction(Function &F) { if (Allocas.empty()) break; - PromoteMemToReg(Allocas, DT, DF, F.getContext()); + PromoteMemToReg(Allocas, DT, DF); NumPromoted += Allocas.size(); Changed = true; } diff --git a/lib/Transforms/Utils/PromoteMemoryToRegister.cpp b/lib/Transforms/Utils/PromoteMemoryToRegister.cpp index de6ad1dde580b..e25f9e2a999a0 100644 --- a/lib/Transforms/Utils/PromoteMemoryToRegister.cpp +++ b/lib/Transforms/Utils/PromoteMemoryToRegister.cpp @@ -23,7 +23,6 @@ #include "llvm/Function.h" #include "llvm/Instructions.h" #include "llvm/IntrinsicInst.h" -#include "llvm/LLVMContext.h" #include "llvm/Analysis/Dominators.h" #include "llvm/Analysis/AliasSetTracker.h" #include "llvm/ADT/DenseMap.h" @@ -180,8 +179,6 @@ namespace { /// AliasSetTracker *AST; - LLVMContext &Context; - /// AllocaLookup - Reverse mapping of Allocas. /// std::map<AllocaInst*, unsigned> AllocaLookup; @@ -212,9 +209,8 @@ namespace { DenseMap<const BasicBlock*, unsigned> BBNumPreds; public: PromoteMem2Reg(const std::vector<AllocaInst*> &A, DominatorTree &dt, - DominanceFrontier &df, AliasSetTracker *ast, - LLVMContext &C) - : Allocas(A), DT(dt), DF(df), AST(ast), Context(C) {} + DominanceFrontier &df, AliasSetTracker *ast) + : Allocas(A), DT(dt), DF(df), AST(ast) {} void run(); @@ -1003,9 +999,9 @@ NextIteration: /// void llvm::PromoteMemToReg(const std::vector<AllocaInst*> &Allocas, DominatorTree &DT, DominanceFrontier &DF, - LLVMContext &Context, AliasSetTracker *AST) { + AliasSetTracker *AST) { // If there is nothing to do, bail out... if (Allocas.empty()) return; - PromoteMem2Reg(Allocas, DT, DF, AST, Context).run(); + PromoteMem2Reg(Allocas, DT, DF, AST).run(); } |