aboutsummaryrefslogtreecommitdiff
path: root/unittests/Analysis/ScalarEvolutionTest.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'unittests/Analysis/ScalarEvolutionTest.cpp')
-rw-r--r--unittests/Analysis/ScalarEvolutionTest.cpp118
1 files changed, 108 insertions, 10 deletions
diff --git a/unittests/Analysis/ScalarEvolutionTest.cpp b/unittests/Analysis/ScalarEvolutionTest.cpp
index f4370842edb5..df9fd4b5ec33 100644
--- a/unittests/Analysis/ScalarEvolutionTest.cpp
+++ b/unittests/Analysis/ScalarEvolutionTest.cpp
@@ -51,13 +51,13 @@ protected:
return ScalarEvolution(F, TLI, *AC, *DT, *LI);
}
- void runWithFunctionAndSE(
+ void runWithSE(
Module &M, StringRef FuncName,
- function_ref<void(Function &F, ScalarEvolution &SE)> Test) {
+ function_ref<void(Function &F, LoopInfo &LI, ScalarEvolution &SE)> Test) {
auto *F = M.getFunction(FuncName);
ASSERT_NE(F, nullptr) << "Could not find " << FuncName;
ScalarEvolution SE = buildSE(*F);
- Test(*F, SE);
+ Test(*F, *LI, SE);
}
};
@@ -306,9 +306,11 @@ TEST_F(ScalarEvolutionsTest, ExpandPtrTypeSCEV) {
// %bitcast2 = bitcast i8* %select to i32*
// br i1 undef, label %loop, label %exit
+ const DataLayout &DL = F->getParent()->getDataLayout();
BranchInst *Br = BranchInst::Create(
LoopBB, ExitBB, UndefValue::get(Type::getInt1Ty(Context)), LoopBB);
- AllocaInst *Alloca = new AllocaInst(I32Ty, "alloca", Br);
+ AllocaInst *Alloca = new AllocaInst(I32Ty, DL.getAllocaAddrSpace(),
+ "alloca", Br);
ConstantInt *Ci32 = ConstantInt::get(Context, APInt(32, 1));
GetElementPtrInst *Gep0 =
GetElementPtrInst::Create(I32Ty, Alloca, Ci32, "gep0", Br);
@@ -417,7 +419,7 @@ TEST_F(ScalarEvolutionsTest, CommutativeExprOperandOrder) {
assert(M && "Could not parse module?");
assert(!verifyModule(*M) && "Must have been well formed!");
- runWithFunctionAndSE(*M, "f_1", [&](Function &F, ScalarEvolution &SE) {
+ runWithSE(*M, "f_1", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
auto *IV0 = getInstructionByName(F, "iv0");
auto *IV0Inc = getInstructionByName(F, "iv0.inc");
@@ -458,11 +460,12 @@ TEST_F(ScalarEvolutionsTest, CommutativeExprOperandOrder) {
};
for (StringRef FuncName : {"f_2", "f_3", "f_4"})
- runWithFunctionAndSE(*M, FuncName, [&](Function &F, ScalarEvolution &SE) {
- CheckCommutativeMulExprs(SE, SE.getSCEV(getInstructionByName(F, "x")),
- SE.getSCEV(getInstructionByName(F, "y")),
- SE.getSCEV(getInstructionByName(F, "z")));
- });
+ runWithSE(
+ *M, FuncName, [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
+ CheckCommutativeMulExprs(SE, SE.getSCEV(getInstructionByName(F, "x")),
+ SE.getSCEV(getInstructionByName(F, "y")),
+ SE.getSCEV(getInstructionByName(F, "z")));
+ });
}
TEST_F(ScalarEvolutionsTest, CompareSCEVComplexity) {
@@ -568,5 +571,100 @@ TEST_F(ScalarEvolutionsTest, CompareValueComplexity) {
EXPECT_NE(A, B);
}
+TEST_F(ScalarEvolutionsTest, SCEVAddExpr) {
+ Type *Ty32 = Type::getInt32Ty(Context);
+ Type *ArgTys[] = {Type::getInt64Ty(Context), Ty32};
+
+ FunctionType *FTy =
+ FunctionType::get(Type::getVoidTy(Context), ArgTys, false);
+ Function *F = cast<Function>(M.getOrInsertFunction("f", FTy));
+
+ Argument *A1 = &*F->arg_begin();
+ Argument *A2 = &*(std::next(F->arg_begin()));
+ BasicBlock *EntryBB = BasicBlock::Create(Context, "entry", F);
+
+ Instruction *Trunc = CastInst::CreateTruncOrBitCast(A1, Ty32, "", EntryBB);
+ Instruction *Mul1 = BinaryOperator::CreateMul(Trunc, A2, "", EntryBB);
+ Instruction *Add1 = BinaryOperator::CreateAdd(Mul1, Trunc, "", EntryBB);
+ Mul1 = BinaryOperator::CreateMul(Add1, Trunc, "", EntryBB);
+ Instruction *Add2 = BinaryOperator::CreateAdd(Mul1, Add1, "", EntryBB);
+ // FIXME: The size of this is arbitrary and doesn't seem to change the
+ // result, but SCEV will do quadratic work for these so a large number here
+ // will be extremely slow. We should revisit what and how this is testing
+ // SCEV.
+ for (int i = 0; i < 10; i++) {
+ Mul1 = BinaryOperator::CreateMul(Add2, Add1, "", EntryBB);
+ Add1 = Add2;
+ Add2 = BinaryOperator::CreateAdd(Mul1, Add1, "", EntryBB);
+ }
+
+ ReturnInst::Create(Context, nullptr, EntryBB);
+ ScalarEvolution SE = buildSE(*F);
+ EXPECT_NE(nullptr, SE.getSCEV(Mul1));
+}
+
+static Instruction &GetInstByName(Function &F, StringRef Name) {
+ for (auto &I : instructions(F))
+ if (I.getName() == Name)
+ return I;
+ llvm_unreachable("Could not find instructions!");
+}
+
+TEST_F(ScalarEvolutionsTest, SCEVNormalization) {
+ LLVMContext C;
+ SMDiagnostic Err;
+ std::unique_ptr<Module> M = parseAssemblyString(
+ "target datalayout = \"e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128\" "
+ " "
+ "@var_0 = external global i32, align 4"
+ "@var_1 = external global i32, align 4"
+ "@var_2 = external global i32, align 4"
+ " "
+ "declare i32 @unknown(i32, i32, i32)"
+ " "
+ "define void @f_1(i8* nocapture %arr, i32 %n, i32* %A, i32* %B) "
+ " local_unnamed_addr { "
+ "entry: "
+ " br label %loop.ph "
+ " "
+ "loop.ph: "
+ " br label %loop "
+ " "
+ "loop: "
+ " %iv0 = phi i32 [ %iv0.inc, %loop ], [ 0, %loop.ph ] "
+ " %iv1 = phi i32 [ %iv1.inc, %loop ], [ -2147483648, %loop.ph ] "
+ " %iv0.inc = add i32 %iv0, 1 "
+ " %iv1.inc = add i32 %iv1, 3 "
+ " br i1 undef, label %for.end.loopexit, label %loop "
+ " "
+ "for.end.loopexit: "
+ " ret void "
+ "} "
+ ,
+ Err, C);
+
+ assert(M && "Could not parse module?");
+ assert(!verifyModule(*M) && "Must have been well formed!");
+
+ runWithSE(*M, "f_1", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
+ auto &I0 = GetInstByName(F, "iv0");
+ auto &I1 = *I0.getNextNode();
+
+ auto *S0 = cast<SCEVAddRecExpr>(SE.getSCEV(&I0));
+ PostIncLoopSet Loops;
+ Loops.insert(S0->getLoop());
+ auto *N0 = normalizeForPostIncUse(S0, Loops, SE);
+ auto *D0 = denormalizeForPostIncUse(N0, Loops, SE);
+ EXPECT_EQ(S0, D0) << *S0 << " " << *D0;
+
+ auto *S1 = cast<SCEVAddRecExpr>(SE.getSCEV(&I1));
+ Loops.clear();
+ Loops.insert(S1->getLoop());
+ auto *N1 = normalizeForPostIncUse(S1, Loops, SE);
+ auto *D1 = denormalizeForPostIncUse(N1, Loops, SE);
+ EXPECT_EQ(S1, D1) << *S1 << " " << *D1;
+ });
+}
+
} // end anonymous namespace
} // end namespace llvm