aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm-project/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm-project/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp')
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp258
1 files changed, 64 insertions, 194 deletions
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp b/contrib/llvm-project/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp
index 5363a851fc27..401f1ee5a55d 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp
@@ -22,11 +22,8 @@
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/IntrinsicInst.h"
-#include "llvm/IR/LLVMContext.h"
-#include "llvm/IR/Module.h"
#include "llvm/IR/PatternMatch.h"
#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Utils/LoopUtils.h"
@@ -276,7 +273,9 @@ Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode,
}
// If we haven't found this binop, insert it.
- Instruction *BO = cast<Instruction>(Builder.CreateBinOp(Opcode, LHS, RHS));
+ // TODO: Use the Builder, which will make CreateBinOp below fold with
+ // InstSimplifyFolder.
+ Instruction *BO = Builder.Insert(BinaryOperator::Create(Opcode, LHS, RHS));
BO->setDebugLoc(Loc);
if (Flags & SCEV::FlagNUW)
BO->setHasNoUnsignedWrap();
@@ -591,7 +590,9 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
if (isa<DbgInfoIntrinsic>(IP))
ScanLimit++;
if (IP->getOpcode() == Instruction::GetElementPtr &&
- IP->getOperand(0) == V && IP->getOperand(1) == Idx)
+ IP->getOperand(0) == V && IP->getOperand(1) == Idx &&
+ cast<GEPOperator>(&*IP)->getSourceElementType() ==
+ Type::getInt8Ty(Ty->getContext()))
return &*IP;
if (IP == BlockBegin) break;
}
@@ -1633,7 +1634,6 @@ Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
NewS = Ext;
const SCEV *V = cast<SCEVAddRecExpr>(NewS)->evaluateAtIteration(IH, SE);
- //cerr << "Evaluated: " << *this << "\n to: " << *V << "\n";
// Truncate the result down to the original type, if needed.
const SCEV *T = SE.getTruncateOrNoop(V, Ty);
@@ -1671,154 +1671,49 @@ Value *SCEVExpander::visitSignExtendExpr(const SCEVSignExtendExpr *S) {
return Builder.CreateSExt(V, Ty);
}
-Value *SCEVExpander::expandSMaxExpr(const SCEVNAryExpr *S) {
- Value *LHS = expand(S->getOperand(S->getNumOperands()-1));
- Type *Ty = LHS->getType();
- for (int i = S->getNumOperands()-2; i >= 0; --i) {
- // In the case of mixed integer and pointer types, do the
- // rest of the comparisons as integer.
- Type *OpTy = S->getOperand(i)->getType();
- if (OpTy->isIntegerTy() != Ty->isIntegerTy()) {
- Ty = SE.getEffectiveSCEVType(Ty);
- LHS = InsertNoopCastOfTo(LHS, Ty);
- }
- Value *RHS = expandCodeForImpl(S->getOperand(i), Ty, false);
- Value *Sel;
- if (Ty->isIntegerTy())
- Sel = Builder.CreateIntrinsic(Intrinsic::smax, {Ty}, {LHS, RHS},
- /*FMFSource=*/nullptr, "smax");
- else {
- Value *ICmp = Builder.CreateICmpSGT(LHS, RHS);
- Sel = Builder.CreateSelect(ICmp, LHS, RHS, "smax");
- }
- LHS = Sel;
- }
- // In the case of mixed integer and pointer types, cast the
- // final result back to the pointer type.
- if (LHS->getType() != S->getType())
- LHS = InsertNoopCastOfTo(LHS, S->getType());
- return LHS;
-}
-
-Value *SCEVExpander::expandUMaxExpr(const SCEVNAryExpr *S) {
- Value *LHS = expand(S->getOperand(S->getNumOperands()-1));
- Type *Ty = LHS->getType();
- for (int i = S->getNumOperands()-2; i >= 0; --i) {
- // In the case of mixed integer and pointer types, do the
- // rest of the comparisons as integer.
- Type *OpTy = S->getOperand(i)->getType();
- if (OpTy->isIntegerTy() != Ty->isIntegerTy()) {
- Ty = SE.getEffectiveSCEVType(Ty);
- LHS = InsertNoopCastOfTo(LHS, Ty);
- }
- Value *RHS = expandCodeForImpl(S->getOperand(i), Ty, false);
- Value *Sel;
- if (Ty->isIntegerTy())
- Sel = Builder.CreateIntrinsic(Intrinsic::umax, {Ty}, {LHS, RHS},
- /*FMFSource=*/nullptr, "umax");
- else {
- Value *ICmp = Builder.CreateICmpUGT(LHS, RHS);
- Sel = Builder.CreateSelect(ICmp, LHS, RHS, "umax");
- }
- LHS = Sel;
- }
- // In the case of mixed integer and pointer types, cast the
- // final result back to the pointer type.
- if (LHS->getType() != S->getType())
- LHS = InsertNoopCastOfTo(LHS, S->getType());
- return LHS;
-}
-
-Value *SCEVExpander::expandSMinExpr(const SCEVNAryExpr *S) {
- Value *LHS = expand(S->getOperand(S->getNumOperands() - 1));
- Type *Ty = LHS->getType();
- for (int i = S->getNumOperands() - 2; i >= 0; --i) {
- // In the case of mixed integer and pointer types, do the
- // rest of the comparisons as integer.
- Type *OpTy = S->getOperand(i)->getType();
- if (OpTy->isIntegerTy() != Ty->isIntegerTy()) {
- Ty = SE.getEffectiveSCEVType(Ty);
- LHS = InsertNoopCastOfTo(LHS, Ty);
- }
- Value *RHS = expandCodeForImpl(S->getOperand(i), Ty, false);
- Value *Sel;
- if (Ty->isIntegerTy())
- Sel = Builder.CreateIntrinsic(Intrinsic::smin, {Ty}, {LHS, RHS},
- /*FMFSource=*/nullptr, "smin");
- else {
- Value *ICmp = Builder.CreateICmpSLT(LHS, RHS);
- Sel = Builder.CreateSelect(ICmp, LHS, RHS, "smin");
- }
- LHS = Sel;
- }
- // In the case of mixed integer and pointer types, cast the
- // final result back to the pointer type.
- if (LHS->getType() != S->getType())
- LHS = InsertNoopCastOfTo(LHS, S->getType());
- return LHS;
-}
-
-Value *SCEVExpander::expandUMinExpr(const SCEVNAryExpr *S) {
+Value *SCEVExpander::expandMinMaxExpr(const SCEVNAryExpr *S,
+ Intrinsic::ID IntrinID, Twine Name,
+ bool IsSequential) {
Value *LHS = expand(S->getOperand(S->getNumOperands() - 1));
Type *Ty = LHS->getType();
+ if (IsSequential)
+ LHS = Builder.CreateFreeze(LHS);
for (int i = S->getNumOperands() - 2; i >= 0; --i) {
- // In the case of mixed integer and pointer types, do the
- // rest of the comparisons as integer.
- Type *OpTy = S->getOperand(i)->getType();
- if (OpTy->isIntegerTy() != Ty->isIntegerTy()) {
- Ty = SE.getEffectiveSCEVType(Ty);
- LHS = InsertNoopCastOfTo(LHS, Ty);
- }
Value *RHS = expandCodeForImpl(S->getOperand(i), Ty, false);
+ if (IsSequential && i != 0)
+ RHS = Builder.CreateFreeze(RHS);
Value *Sel;
if (Ty->isIntegerTy())
- Sel = Builder.CreateIntrinsic(Intrinsic::umin, {Ty}, {LHS, RHS},
- /*FMFSource=*/nullptr, "umin");
+ Sel = Builder.CreateIntrinsic(IntrinID, {Ty}, {LHS, RHS},
+ /*FMFSource=*/nullptr, Name);
else {
- Value *ICmp = Builder.CreateICmpULT(LHS, RHS);
- Sel = Builder.CreateSelect(ICmp, LHS, RHS, "umin");
+ Value *ICmp =
+ Builder.CreateICmp(MinMaxIntrinsic::getPredicate(IntrinID), LHS, RHS);
+ Sel = Builder.CreateSelect(ICmp, LHS, RHS, Name);
}
LHS = Sel;
}
- // In the case of mixed integer and pointer types, cast the
- // final result back to the pointer type.
- if (LHS->getType() != S->getType())
- LHS = InsertNoopCastOfTo(LHS, S->getType());
return LHS;
}
Value *SCEVExpander::visitSMaxExpr(const SCEVSMaxExpr *S) {
- return expandSMaxExpr(S);
+ return expandMinMaxExpr(S, Intrinsic::smax, "smax");
}
Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) {
- return expandUMaxExpr(S);
+ return expandMinMaxExpr(S, Intrinsic::umax, "umax");
}
Value *SCEVExpander::visitSMinExpr(const SCEVSMinExpr *S) {
- return expandSMinExpr(S);
+ return expandMinMaxExpr(S, Intrinsic::smin, "smin");
}
Value *SCEVExpander::visitUMinExpr(const SCEVUMinExpr *S) {
- return expandUMinExpr(S);
+ return expandMinMaxExpr(S, Intrinsic::umin, "umin");
}
Value *SCEVExpander::visitSequentialUMinExpr(const SCEVSequentialUMinExpr *S) {
- SmallVector<Value *> Ops;
- for (const SCEV *Op : S->operands())
- Ops.emplace_back(expand(Op));
-
- Value *SaturationPoint =
- MinMaxIntrinsic::getSaturationPoint(Intrinsic::umin, S->getType());
-
- SmallVector<Value *> OpIsZero;
- for (Value *Op : ArrayRef<Value *>(Ops).drop_back())
- OpIsZero.emplace_back(Builder.CreateICmpEQ(Op, SaturationPoint));
-
- Value *AnyOpIsZero = Builder.CreateLogicalOr(OpIsZero);
-
- Value *NaiveUMin = expandUMinExpr(S);
- return Builder.CreateSelect(AnyOpIsZero, SaturationPoint, NaiveUMin);
+ return expandMinMaxExpr(S, Intrinsic::umin, "umin", /*IsSequential*/true);
}
Value *SCEVExpander::expandCodeForImpl(const SCEV *SH, Type *Ty,
@@ -1868,35 +1763,33 @@ Value *SCEVExpander::expandCodeForImpl(const SCEV *SH, Type *Ty, bool Root) {
return V;
}
-ScalarEvolution::ValueOffsetPair
-SCEVExpander::FindValueInExprValueMap(const SCEV *S,
- const Instruction *InsertPt) {
- auto *Set = SE.getSCEVValues(S);
+Value *SCEVExpander::FindValueInExprValueMap(const SCEV *S,
+ const Instruction *InsertPt) {
// If the expansion is not in CanonicalMode, and the SCEV contains any
// sub scAddRecExpr type SCEV, it is required to expand the SCEV literally.
- if (CanonicalMode || !SE.containsAddRecurrence(S)) {
- // If S is scConstant, it may be worse to reuse an existing Value.
- if (S->getSCEVType() != scConstant && Set) {
- // Choose a Value from the set which dominates the InsertPt.
- // InsertPt should be inside the Value's parent loop so as not to break
- // the LCSSA form.
- for (auto const &VOPair : *Set) {
- Value *V = VOPair.first;
- ConstantInt *Offset = VOPair.second;
- Instruction *EntInst = dyn_cast_or_null<Instruction>(V);
- if (!EntInst)
- continue;
+ if (!CanonicalMode && SE.containsAddRecurrence(S))
+ return nullptr;
- assert(EntInst->getFunction() == InsertPt->getFunction());
- if (S->getType() == V->getType() &&
- SE.DT.dominates(EntInst, InsertPt) &&
- (SE.LI.getLoopFor(EntInst->getParent()) == nullptr ||
- SE.LI.getLoopFor(EntInst->getParent())->contains(InsertPt)))
- return {V, Offset};
- }
- }
+ // If S is a constant, it may be worse to reuse an existing Value.
+ if (isa<SCEVConstant>(S))
+ return nullptr;
+
+ // Choose a Value from the set which dominates the InsertPt.
+ // InsertPt should be inside the Value's parent loop so as not to break
+ // the LCSSA form.
+ for (Value *V : SE.getSCEVValues(S)) {
+ Instruction *EntInst = dyn_cast<Instruction>(V);
+ if (!EntInst)
+ continue;
+
+ assert(EntInst->getFunction() == InsertPt->getFunction());
+ if (S->getType() == V->getType() &&
+ SE.DT.dominates(EntInst, InsertPt) &&
+ (SE.LI.getLoopFor(EntInst->getParent()) == nullptr ||
+ SE.LI.getLoopFor(EntInst->getParent())->contains(InsertPt)))
+ return V;
}
- return {nullptr, nullptr};
+ return nullptr;
}
// The expansion of SCEV will either reuse a previous Value in ExprValueMap,
@@ -1965,9 +1858,7 @@ Value *SCEVExpander::expand(const SCEV *S) {
Builder.SetInsertPoint(InsertPt);
// Expand the expression into instructions.
- ScalarEvolution::ValueOffsetPair VO = FindValueInExprValueMap(S, InsertPt);
- Value *V = VO.first;
-
+ Value *V = FindValueInExprValueMap(S, InsertPt);
if (!V)
V = visit(S);
else {
@@ -1978,21 +1869,6 @@ Value *SCEVExpander::expand(const SCEV *S) {
if (auto *I = dyn_cast<Instruction>(V))
if (I->hasPoisonGeneratingFlags() && !programUndefinedIfPoison(I))
I->dropPoisonGeneratingFlags();
-
- if (VO.second) {
- if (PointerType *Vty = dyn_cast<PointerType>(V->getType())) {
- int64_t Offset = VO.second->getSExtValue();
- ConstantInt *Idx =
- ConstantInt::getSigned(VO.second->getType(), -Offset);
- unsigned AS = Vty->getAddressSpace();
- V = Builder.CreateBitCast(V, Type::getInt8PtrTy(SE.getContext(), AS));
- V = Builder.CreateGEP(Type::getInt8Ty(SE.getContext()), V, Idx,
- "uglygep");
- V = Builder.CreateBitCast(V, Vty);
- } else {
- V = Builder.CreateSub(V, VO.second);
- }
- }
}
// Remember the expanded value for this SCEV at this location.
//
@@ -2058,7 +1934,7 @@ SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT,
// so narrow phis can reuse them.
for (PHINode *Phi : Phis) {
auto SimplifyPHINode = [&](PHINode *PN) -> Value * {
- if (Value *V = SimplifyInstruction(PN, {DL, &SE.TLI, &SE.DT, &SE.AC}))
+ if (Value *V = simplifyInstruction(PN, {DL, &SE.TLI, &SE.DT, &SE.AC}))
return V;
if (!SE.isSCEVable(PN->getType()))
return nullptr;
@@ -2174,9 +2050,9 @@ SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT,
return NumElim;
}
-Optional<ScalarEvolution::ValueOffsetPair>
-SCEVExpander::getRelatedExistingExpansion(const SCEV *S, const Instruction *At,
- Loop *L) {
+Value *SCEVExpander::getRelatedExistingExpansion(const SCEV *S,
+ const Instruction *At,
+ Loop *L) {
using namespace llvm::PatternMatch;
SmallVector<BasicBlock *, 4> ExitingBlocks;
@@ -2193,25 +2069,17 @@ SCEVExpander::getRelatedExistingExpansion(const SCEV *S, const Instruction *At,
continue;
if (SE.getSCEV(LHS) == S && SE.DT.dominates(LHS, At))
- return ScalarEvolution::ValueOffsetPair(LHS, nullptr);
+ return LHS;
if (SE.getSCEV(RHS) == S && SE.DT.dominates(RHS, At))
- return ScalarEvolution::ValueOffsetPair(RHS, nullptr);
+ return RHS;
}
// Use expand's logic which is used for reusing a previous Value in
// ExprValueMap. Note that we don't currently model the cost of
// needing to drop poison generating flags on the instruction if we
// want to reuse it. We effectively assume that has zero cost.
- ScalarEvolution::ValueOffsetPair VO = FindValueInExprValueMap(S, At);
- if (VO.first)
- return VO;
-
- // There is potential to make this significantly smarter, but this simple
- // heuristic already gets some interesting cases.
-
- // Can not find suitable value.
- return None;
+ return FindValueInExprValueMap(S, At);
}
template<typename T> static InstructionCost costAndCollectOperands(
@@ -2469,8 +2337,8 @@ Value *SCEVExpander::expandCodeForPredicate(const SCEVPredicate *Pred,
switch (Pred->getKind()) {
case SCEVPredicate::P_Union:
return expandUnionPredicate(cast<SCEVUnionPredicate>(Pred), IP);
- case SCEVPredicate::P_Equal:
- return expandEqualPredicate(cast<SCEVEqualPredicate>(Pred), IP);
+ case SCEVPredicate::P_Compare:
+ return expandComparePredicate(cast<SCEVComparePredicate>(Pred), IP);
case SCEVPredicate::P_Wrap: {
auto *AddRecPred = cast<SCEVWrapPredicate>(Pred);
return expandWrapPredicate(AddRecPred, IP);
@@ -2479,15 +2347,16 @@ Value *SCEVExpander::expandCodeForPredicate(const SCEVPredicate *Pred,
llvm_unreachable("Unknown SCEV predicate type");
}
-Value *SCEVExpander::expandEqualPredicate(const SCEVEqualPredicate *Pred,
- Instruction *IP) {
+Value *SCEVExpander::expandComparePredicate(const SCEVComparePredicate *Pred,
+ Instruction *IP) {
Value *Expr0 =
expandCodeForImpl(Pred->getLHS(), Pred->getLHS()->getType(), IP, false);
Value *Expr1 =
expandCodeForImpl(Pred->getRHS(), Pred->getRHS()->getType(), IP, false);
Builder.SetInsertPoint(IP);
- auto *I = Builder.CreateICmpNE(Expr0, Expr1, "ident.check");
+ auto InvPred = ICmpInst::getInversePredicate(Pred->getPredicate());
+ auto *I = Builder.CreateICmp(InvPred, Expr0, Expr1, "ident.check");
return I;
}
@@ -2496,7 +2365,8 @@ Value *SCEVExpander::generateOverflowCheck(const SCEVAddRecExpr *AR,
assert(AR->isAffine() && "Cannot generate RT check for "
"non-affine expression");
- SCEVUnionPredicate Pred;
+ // FIXME: It is highly suspicious that we're ignoring the predicates here.
+ SmallVector<const SCEVPredicate *, 4> Pred;
const SCEV *ExitCount =
SE.getPredicatedBackedgeTakenCount(AR->getLoop(), Pred);
@@ -2710,10 +2580,10 @@ namespace {
struct SCEVFindUnsafe {
ScalarEvolution &SE;
bool CanonicalMode;
- bool IsUnsafe;
+ bool IsUnsafe = false;
SCEVFindUnsafe(ScalarEvolution &SE, bool CanonicalMode)
- : SE(SE), CanonicalMode(CanonicalMode), IsUnsafe(false) {}
+ : SE(SE), CanonicalMode(CanonicalMode) {}
bool follow(const SCEV *S) {
if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) {