summaryrefslogtreecommitdiff
path: root/unittests/Analysis
diff options
context:
space:
mode:
Diffstat (limited to 'unittests/Analysis')
-rw-r--r--unittests/Analysis/BlockFrequencyInfoTest.cpp8
-rw-r--r--unittests/Analysis/CMakeLists.txt8
-rw-r--r--unittests/Analysis/LazyCallGraphTest.cpp622
-rw-r--r--unittests/Analysis/LoopInfoTest.cpp158
-rw-r--r--unittests/Analysis/MemorySSA.cpp865
-rw-r--r--unittests/Analysis/ProfileSummaryInfoTest.cpp198
-rw-r--r--unittests/Analysis/ScalarEvolutionTest.cpp118
-rw-r--r--unittests/Analysis/TargetLibraryInfoTest.cpp481
-rw-r--r--unittests/Analysis/UnrollAnalyzer.cpp11
-rw-r--r--unittests/Analysis/ValueTrackingTest.cpp21
10 files changed, 2123 insertions, 367 deletions
diff --git a/unittests/Analysis/BlockFrequencyInfoTest.cpp b/unittests/Analysis/BlockFrequencyInfoTest.cpp
index b3b0fcfb049b..c5c9d4dea055 100644
--- a/unittests/Analysis/BlockFrequencyInfoTest.cpp
+++ b/unittests/Analysis/BlockFrequencyInfoTest.cpp
@@ -80,6 +80,14 @@ TEST_F(BlockFrequencyInfoTest, Basic) {
EXPECT_EQ(BFI.getBlockProfileCount(BB3).getValue(), UINT64_C(100));
EXPECT_EQ(BFI.getBlockProfileCount(BB1).getValue(), 100 * BB1Freq / BB0Freq);
EXPECT_EQ(BFI.getBlockProfileCount(BB2).getValue(), 100 * BB2Freq / BB0Freq);
+
+ // Scale the frequencies of BB0, BB1 and BB2 by a factor of two.
+ SmallPtrSet<BasicBlock *, 4> BlocksToScale({BB1, BB2});
+ BFI.setBlockFreqAndScale(&BB0, BB0Freq * 2, BlocksToScale);
+ EXPECT_EQ(BFI.getBlockFreq(&BB0).getFrequency(), 2 * BB0Freq);
+ EXPECT_EQ(BFI.getBlockFreq(BB1).getFrequency(), 2 * BB1Freq);
+ EXPECT_EQ(BFI.getBlockFreq(BB2).getFrequency(), 2 * BB2Freq);
+ EXPECT_EQ(BFI.getBlockFreq(BB3).getFrequency(), BB3Freq);
}
} // end anonymous namespace
diff --git a/unittests/Analysis/CMakeLists.txt b/unittests/Analysis/CMakeLists.txt
index ff4c17ee3b6b..40d5ea5f5ad7 100644
--- a/unittests/Analysis/CMakeLists.txt
+++ b/unittests/Analysis/CMakeLists.txt
@@ -9,13 +9,17 @@ add_llvm_unittest(AnalysisTests
AliasAnalysisTest.cpp
BlockFrequencyInfoTest.cpp
BranchProbabilityInfoTest.cpp
- CallGraphTest.cpp
CFGTest.cpp
CGSCCPassManagerTest.cpp
+ CallGraphTest.cpp
LazyCallGraphTest.cpp
+ LoopInfoTest.cpp
MemoryBuiltinsTest.cpp
+ MemorySSA.cpp
+ ProfileSummaryInfoTest.cpp
ScalarEvolutionTest.cpp
TBAATest.cpp
- ValueTrackingTest.cpp
+ TargetLibraryInfoTest.cpp
UnrollAnalyzer.cpp
+ ValueTrackingTest.cpp
)
diff --git a/unittests/Analysis/LazyCallGraphTest.cpp b/unittests/Analysis/LazyCallGraphTest.cpp
index 5bb9dec3449f..6955beb37109 100644
--- a/unittests/Analysis/LazyCallGraphTest.cpp
+++ b/unittests/Analysis/LazyCallGraphTest.cpp
@@ -225,29 +225,29 @@ TEST(LazyCallGraphTest, BasicGraphFormation) {
// the IR, and everything in our module is an entry node, so just directly
// build variables for each node.
auto I = CG.begin();
- LazyCallGraph::Node &A1 = (I++)->getNode(CG);
+ LazyCallGraph::Node &A1 = (I++)->getNode();
EXPECT_EQ("a1", A1.getFunction().getName());
- LazyCallGraph::Node &A2 = (I++)->getNode(CG);
+ LazyCallGraph::Node &A2 = (I++)->getNode();
EXPECT_EQ("a2", A2.getFunction().getName());
- LazyCallGraph::Node &A3 = (I++)->getNode(CG);
+ LazyCallGraph::Node &A3 = (I++)->getNode();
EXPECT_EQ("a3", A3.getFunction().getName());
- LazyCallGraph::Node &B1 = (I++)->getNode(CG);
+ LazyCallGraph::Node &B1 = (I++)->getNode();
EXPECT_EQ("b1", B1.getFunction().getName());
- LazyCallGraph::Node &B2 = (I++)->getNode(CG);
+ LazyCallGraph::Node &B2 = (I++)->getNode();
EXPECT_EQ("b2", B2.getFunction().getName());
- LazyCallGraph::Node &B3 = (I++)->getNode(CG);
+ LazyCallGraph::Node &B3 = (I++)->getNode();
EXPECT_EQ("b3", B3.getFunction().getName());
- LazyCallGraph::Node &C1 = (I++)->getNode(CG);
+ LazyCallGraph::Node &C1 = (I++)->getNode();
EXPECT_EQ("c1", C1.getFunction().getName());
- LazyCallGraph::Node &C2 = (I++)->getNode(CG);
+ LazyCallGraph::Node &C2 = (I++)->getNode();
EXPECT_EQ("c2", C2.getFunction().getName());
- LazyCallGraph::Node &C3 = (I++)->getNode(CG);
+ LazyCallGraph::Node &C3 = (I++)->getNode();
EXPECT_EQ("c3", C3.getFunction().getName());
- LazyCallGraph::Node &D1 = (I++)->getNode(CG);
+ LazyCallGraph::Node &D1 = (I++)->getNode();
EXPECT_EQ("d1", D1.getFunction().getName());
- LazyCallGraph::Node &D2 = (I++)->getNode(CG);
+ LazyCallGraph::Node &D2 = (I++)->getNode();
EXPECT_EQ("d2", D2.getFunction().getName());
- LazyCallGraph::Node &D3 = (I++)->getNode(CG);
+ LazyCallGraph::Node &D3 = (I++)->getNode();
EXPECT_EQ("d3", D3.getFunction().getName());
EXPECT_EQ(CG.end(), I);
@@ -255,7 +255,7 @@ TEST(LazyCallGraphTest, BasicGraphFormation) {
// independent of order.
std::vector<std::string> Nodes;
- for (LazyCallGraph::Edge &E : A1)
+ for (LazyCallGraph::Edge &E : A1.populate())
Nodes.push_back(E.getFunction().getName());
std::sort(Nodes.begin(), Nodes.end());
EXPECT_EQ("a2", Nodes[0]);
@@ -263,43 +263,53 @@ TEST(LazyCallGraphTest, BasicGraphFormation) {
EXPECT_EQ("c3", Nodes[2]);
Nodes.clear();
- EXPECT_EQ(A2.end(), std::next(A2.begin()));
- EXPECT_EQ("a3", A2.begin()->getFunction().getName());
- EXPECT_EQ(A3.end(), std::next(A3.begin()));
- EXPECT_EQ("a1", A3.begin()->getFunction().getName());
+ A2.populate();
+ EXPECT_EQ(A2->end(), std::next(A2->begin()));
+ EXPECT_EQ("a3", A2->begin()->getFunction().getName());
+ A3.populate();
+ EXPECT_EQ(A3->end(), std::next(A3->begin()));
+ EXPECT_EQ("a1", A3->begin()->getFunction().getName());
- for (LazyCallGraph::Edge &E : B1)
+ for (LazyCallGraph::Edge &E : B1.populate())
Nodes.push_back(E.getFunction().getName());
std::sort(Nodes.begin(), Nodes.end());
EXPECT_EQ("b2", Nodes[0]);
EXPECT_EQ("d3", Nodes[1]);
Nodes.clear();
- EXPECT_EQ(B2.end(), std::next(B2.begin()));
- EXPECT_EQ("b3", B2.begin()->getFunction().getName());
- EXPECT_EQ(B3.end(), std::next(B3.begin()));
- EXPECT_EQ("b1", B3.begin()->getFunction().getName());
+ B2.populate();
+ EXPECT_EQ(B2->end(), std::next(B2->begin()));
+ EXPECT_EQ("b3", B2->begin()->getFunction().getName());
+ B3.populate();
+ EXPECT_EQ(B3->end(), std::next(B3->begin()));
+ EXPECT_EQ("b1", B3->begin()->getFunction().getName());
- for (LazyCallGraph::Edge &E : C1)
+ for (LazyCallGraph::Edge &E : C1.populate())
Nodes.push_back(E.getFunction().getName());
std::sort(Nodes.begin(), Nodes.end());
EXPECT_EQ("c2", Nodes[0]);
EXPECT_EQ("d2", Nodes[1]);
Nodes.clear();
- EXPECT_EQ(C2.end(), std::next(C2.begin()));
- EXPECT_EQ("c3", C2.begin()->getFunction().getName());
- EXPECT_EQ(C3.end(), std::next(C3.begin()));
- EXPECT_EQ("c1", C3.begin()->getFunction().getName());
-
- EXPECT_EQ(D1.end(), std::next(D1.begin()));
- EXPECT_EQ("d2", D1.begin()->getFunction().getName());
- EXPECT_EQ(D2.end(), std::next(D2.begin()));
- EXPECT_EQ("d3", D2.begin()->getFunction().getName());
- EXPECT_EQ(D3.end(), std::next(D3.begin()));
- EXPECT_EQ("d1", D3.begin()->getFunction().getName());
+ C2.populate();
+ EXPECT_EQ(C2->end(), std::next(C2->begin()));
+ EXPECT_EQ("c3", C2->begin()->getFunction().getName());
+ C3.populate();
+ EXPECT_EQ(C3->end(), std::next(C3->begin()));
+ EXPECT_EQ("c1", C3->begin()->getFunction().getName());
+
+ D1.populate();
+ EXPECT_EQ(D1->end(), std::next(D1->begin()));
+ EXPECT_EQ("d2", D1->begin()->getFunction().getName());
+ D2.populate();
+ EXPECT_EQ(D2->end(), std::next(D2->begin()));
+ EXPECT_EQ("d3", D2->begin()->getFunction().getName());
+ D3.populate();
+ EXPECT_EQ(D3->end(), std::next(D3->begin()));
+ EXPECT_EQ("d1", D3->begin()->getFunction().getName());
// Now lets look at the RefSCCs and SCCs.
+ CG.buildRefSCCs();
auto J = CG.postorder_ref_scc_begin();
LazyCallGraph::RefSCC &D = *J++;
@@ -401,32 +411,35 @@ TEST(LazyCallGraphTest, BasicGraphMutation) {
LazyCallGraph::Node &A = CG.get(lookupFunction(*M, "a"));
LazyCallGraph::Node &B = CG.get(lookupFunction(*M, "b"));
- EXPECT_EQ(2, std::distance(A.begin(), A.end()));
- EXPECT_EQ(0, std::distance(B.begin(), B.end()));
-
- CG.insertEdge(B, lookupFunction(*M, "c"), LazyCallGraph::Edge::Call);
- EXPECT_EQ(1, std::distance(B.begin(), B.end()));
- LazyCallGraph::Node &C = B.begin()->getNode(CG);
- EXPECT_EQ(0, std::distance(C.begin(), C.end()));
-
- CG.insertEdge(C, B.getFunction(), LazyCallGraph::Edge::Call);
- EXPECT_EQ(1, std::distance(C.begin(), C.end()));
- EXPECT_EQ(&B, C.begin()->getNode());
-
- CG.insertEdge(C, C.getFunction(), LazyCallGraph::Edge::Call);
- EXPECT_EQ(2, std::distance(C.begin(), C.end()));
- EXPECT_EQ(&B, C.begin()->getNode());
- EXPECT_EQ(&C, std::next(C.begin())->getNode());
-
- CG.removeEdge(C, B.getFunction());
- EXPECT_EQ(1, std::distance(C.begin(), C.end()));
- EXPECT_EQ(&C, C.begin()->getNode());
-
- CG.removeEdge(C, C.getFunction());
- EXPECT_EQ(0, std::distance(C.begin(), C.end()));
-
- CG.removeEdge(B, C.getFunction());
- EXPECT_EQ(0, std::distance(B.begin(), B.end()));
+ A.populate();
+ EXPECT_EQ(2, std::distance(A->begin(), A->end()));
+ B.populate();
+ EXPECT_EQ(0, std::distance(B->begin(), B->end()));
+
+ LazyCallGraph::Node &C = CG.get(lookupFunction(*M, "c"));
+ C.populate();
+ CG.insertEdge(B, C, LazyCallGraph::Edge::Call);
+ EXPECT_EQ(1, std::distance(B->begin(), B->end()));
+ EXPECT_EQ(0, std::distance(C->begin(), C->end()));
+
+ CG.insertEdge(C, B, LazyCallGraph::Edge::Call);
+ EXPECT_EQ(1, std::distance(C->begin(), C->end()));
+ EXPECT_EQ(&B, &C->begin()->getNode());
+
+ CG.insertEdge(C, C, LazyCallGraph::Edge::Call);
+ EXPECT_EQ(2, std::distance(C->begin(), C->end()));
+ EXPECT_EQ(&B, &C->begin()->getNode());
+ EXPECT_EQ(&C, &std::next(C->begin())->getNode());
+
+ CG.removeEdge(C, B);
+ EXPECT_EQ(1, std::distance(C->begin(), C->end()));
+ EXPECT_EQ(&C, &C->begin()->getNode());
+
+ CG.removeEdge(C, C);
+ EXPECT_EQ(0, std::distance(C->begin(), C->end()));
+
+ CG.removeEdge(B, C);
+ EXPECT_EQ(0, std::distance(B->begin(), B->end()));
}
TEST(LazyCallGraphTest, InnerSCCFormation) {
@@ -436,14 +449,18 @@ TEST(LazyCallGraphTest, InnerSCCFormation) {
// Now mutate the graph to connect every node into a single RefSCC to ensure
// that our inner SCC formation handles the rest.
- CG.insertEdge(lookupFunction(*M, "d1"), lookupFunction(*M, "a1"),
- LazyCallGraph::Edge::Ref);
+ LazyCallGraph::Node &D1 = CG.get(lookupFunction(*M, "d1"));
+ LazyCallGraph::Node &A1 = CG.get(lookupFunction(*M, "a1"));
+ A1.populate();
+ D1.populate();
+ CG.insertEdge(D1, A1, LazyCallGraph::Edge::Ref);
// Build vectors and sort them for the rest of the assertions to make them
// independent of order.
std::vector<std::string> Nodes;
// We should build a single RefSCC for the entire graph.
+ CG.buildRefSCCs();
auto I = CG.postorder_ref_scc_begin();
LazyCallGraph::RefSCC &RC = *I++;
EXPECT_EQ(CG.postorder_ref_scc_end(), I);
@@ -528,6 +545,7 @@ TEST(LazyCallGraphTest, MultiArmSCC) {
LazyCallGraph CG(*M);
// Force the graph to be fully expanded.
+ CG.buildRefSCCs();
auto I = CG.postorder_ref_scc_begin();
LazyCallGraph::RefSCC &RC = *I++;
EXPECT_EQ(CG.postorder_ref_scc_end(), I);
@@ -578,6 +596,7 @@ TEST(LazyCallGraphTest, OutgoingEdgeMutation) {
LazyCallGraph CG(*M);
// Force the graph to be fully expanded.
+ CG.buildRefSCCs();
for (LazyCallGraph::RefSCC &RC : CG.postorder_ref_sccs())
dbgs() << "Formed RefSCC: " << RC << "\n";
@@ -610,13 +629,13 @@ TEST(LazyCallGraphTest, OutgoingEdgeMutation) {
EXPECT_TRUE(DRC.isChildOf(CRC));
EXPECT_TRUE(DC.isChildOf(CC));
- EXPECT_EQ(2, std::distance(A.begin(), A.end()));
+ EXPECT_EQ(2, std::distance(A->begin(), A->end()));
ARC.insertOutgoingEdge(A, D, LazyCallGraph::Edge::Call);
- EXPECT_EQ(3, std::distance(A.begin(), A.end()));
- const LazyCallGraph::Edge &NewE = A[D];
+ EXPECT_EQ(3, std::distance(A->begin(), A->end()));
+ const LazyCallGraph::Edge &NewE = (*A)[D];
EXPECT_TRUE(NewE);
EXPECT_TRUE(NewE.isCall());
- EXPECT_EQ(&D, NewE.getNode());
+ EXPECT_EQ(&D, &NewE.getNode());
// Only the parent and child tests sholud have changed. The rest of the graph
// remains the same.
@@ -680,7 +699,7 @@ TEST(LazyCallGraphTest, OutgoingEdgeMutation) {
EXPECT_EQ(&DRC, CG.lookupRefSCC(D));
ARC.removeOutgoingEdge(A, D);
- EXPECT_EQ(2, std::distance(A.begin(), A.end()));
+ EXPECT_EQ(2, std::distance(A->begin(), A->end()));
// Now the parent and child tests fail again but the rest remains the same.
EXPECT_FALSE(ARC.isParentOf(DRC));
@@ -723,6 +742,7 @@ TEST(LazyCallGraphTest, IncomingEdgeInsertion) {
LazyCallGraph CG(*M);
// Force the graph to be fully expanded.
+ CG.buildRefSCCs();
for (LazyCallGraph::RefSCC &RC : CG.postorder_ref_sccs())
dbgs() << "Formed RefSCC: " << RC << "\n";
@@ -750,7 +770,7 @@ TEST(LazyCallGraphTest, IncomingEdgeInsertion) {
ASSERT_EQ(&CRC, CG.lookupRefSCC(C3));
ASSERT_EQ(&DRC, CG.lookupRefSCC(D2));
ASSERT_EQ(&DRC, CG.lookupRefSCC(D3));
- ASSERT_EQ(1, std::distance(D2.begin(), D2.end()));
+ ASSERT_EQ(1, std::distance(D2->begin(), D2->end()));
// Add an edge to make the graph:
//
@@ -767,10 +787,10 @@ TEST(LazyCallGraphTest, IncomingEdgeInsertion) {
// a3--a2 |
auto MergedRCs = CRC.insertIncomingRefEdge(D2, C2);
// Make sure we connected the nodes.
- for (LazyCallGraph::Edge E : D2) {
- if (E.getNode() == &D3)
+ for (LazyCallGraph::Edge E : *D2) {
+ if (&E.getNode() == &D3)
continue;
- EXPECT_EQ(&C2, E.getNode());
+ EXPECT_EQ(&C2, &E.getNode());
}
// And marked the D ref-SCC as no longer valid.
EXPECT_EQ(1u, MergedRCs.size());
@@ -805,102 +825,6 @@ TEST(LazyCallGraphTest, IncomingEdgeInsertion) {
EXPECT_EQ(++I, E);
}
-TEST(LazyCallGraphTest, IncomingEdgeInsertionMidTraversal) {
- LLVMContext Context;
- // This is the same fundamental test as the previous, but we perform it
- // having only partially walked the RefSCCs of the graph.
- std::unique_ptr<Module> M = parseAssembly(Context, DiamondOfTriangles);
- LazyCallGraph CG(*M);
-
- // Walk the RefSCCs until we find the one containing 'c1'.
- auto I = CG.postorder_ref_scc_begin(), E = CG.postorder_ref_scc_end();
- ASSERT_NE(I, E);
- LazyCallGraph::RefSCC &DRC = *I;
- ASSERT_NE(&DRC, nullptr);
- ++I;
- ASSERT_NE(I, E);
- LazyCallGraph::RefSCC &CRC = *I;
- ASSERT_NE(&CRC, nullptr);
-
- ASSERT_EQ(nullptr, CG.lookup(lookupFunction(*M, "a1")));
- ASSERT_EQ(nullptr, CG.lookup(lookupFunction(*M, "a2")));
- ASSERT_EQ(nullptr, CG.lookup(lookupFunction(*M, "a3")));
- ASSERT_EQ(nullptr, CG.lookup(lookupFunction(*M, "b1")));
- ASSERT_EQ(nullptr, CG.lookup(lookupFunction(*M, "b2")));
- ASSERT_EQ(nullptr, CG.lookup(lookupFunction(*M, "b3")));
- LazyCallGraph::Node &C1 = *CG.lookup(lookupFunction(*M, "c1"));
- LazyCallGraph::Node &C2 = *CG.lookup(lookupFunction(*M, "c2"));
- LazyCallGraph::Node &C3 = *CG.lookup(lookupFunction(*M, "c3"));
- LazyCallGraph::Node &D1 = *CG.lookup(lookupFunction(*M, "d1"));
- LazyCallGraph::Node &D2 = *CG.lookup(lookupFunction(*M, "d2"));
- LazyCallGraph::Node &D3 = *CG.lookup(lookupFunction(*M, "d3"));
- ASSERT_EQ(&CRC, CG.lookupRefSCC(C1));
- ASSERT_EQ(&CRC, CG.lookupRefSCC(C2));
- ASSERT_EQ(&CRC, CG.lookupRefSCC(C3));
- ASSERT_EQ(&DRC, CG.lookupRefSCC(D1));
- ASSERT_EQ(&DRC, CG.lookupRefSCC(D2));
- ASSERT_EQ(&DRC, CG.lookupRefSCC(D3));
- ASSERT_EQ(1, std::distance(D2.begin(), D2.end()));
-
- auto MergedRCs = CRC.insertIncomingRefEdge(D2, C2);
- // Make sure we connected the nodes.
- for (LazyCallGraph::Edge E : D2) {
- if (E.getNode() == &D3)
- continue;
- EXPECT_EQ(&C2, E.getNode());
- }
- // And marked the D ref-SCC as no longer valid.
- EXPECT_EQ(1u, MergedRCs.size());
- EXPECT_EQ(&DRC, MergedRCs[0]);
-
- // Make sure we have the correct nodes in the RefSCCs.
- EXPECT_EQ(&CRC, CG.lookupRefSCC(C1));
- EXPECT_EQ(&CRC, CG.lookupRefSCC(C2));
- EXPECT_EQ(&CRC, CG.lookupRefSCC(C3));
- EXPECT_EQ(&CRC, CG.lookupRefSCC(D1));
- EXPECT_EQ(&CRC, CG.lookupRefSCC(D2));
- EXPECT_EQ(&CRC, CG.lookupRefSCC(D3));
-
- // Verify that the post-order walk reflects the updated but still incomplete
- // structure.
- auto J = CG.postorder_ref_scc_begin();
- EXPECT_NE(J, E);
- EXPECT_EQ(&CRC, &*J) << "Actual RefSCC: " << *J;
- EXPECT_EQ(I, J);
-
- // Check that we can form the last two RefSCCs now, and even that we can do
- // it with alternating iterators.
- ++J;
- EXPECT_NE(J, E);
- LazyCallGraph::RefSCC &BRC = *J;
- EXPECT_NE(&BRC, nullptr);
- EXPECT_EQ(&BRC, CG.lookupRefSCC(*CG.lookup(lookupFunction(*M, "b1"))));
- EXPECT_EQ(&BRC, CG.lookupRefSCC(*CG.lookup(lookupFunction(*M, "b2"))));
- EXPECT_EQ(&BRC, CG.lookupRefSCC(*CG.lookup(lookupFunction(*M, "b3"))));
- EXPECT_TRUE(BRC.isParentOf(CRC));
- ++I;
- EXPECT_EQ(J, I);
- EXPECT_EQ(&BRC, &*I) << "Actual RefSCC: " << *I;
-
- // Increment I this time to form the new RefSCC, flopping back to the first
- // iterator.
- ++I;
- EXPECT_NE(I, E);
- LazyCallGraph::RefSCC &ARC = *I;
- EXPECT_NE(&ARC, nullptr);
- EXPECT_EQ(&ARC, CG.lookupRefSCC(*CG.lookup(lookupFunction(*M, "a1"))));
- EXPECT_EQ(&ARC, CG.lookupRefSCC(*CG.lookup(lookupFunction(*M, "a2"))));
- EXPECT_EQ(&ARC, CG.lookupRefSCC(*CG.lookup(lookupFunction(*M, "a3"))));
- EXPECT_TRUE(ARC.isParentOf(CRC));
- ++J;
- EXPECT_EQ(I, J);
- EXPECT_EQ(&ARC, &*J) << "Actual RefSCC: " << *J;
- ++I;
- EXPECT_EQ(E, I);
- ++J;
- EXPECT_EQ(E, J);
-}
-
TEST(LazyCallGraphTest, IncomingEdgeInsertionRefGraph) {
LLVMContext Context;
// Another variation of the above test but with all the edges switched to
@@ -910,6 +834,7 @@ TEST(LazyCallGraphTest, IncomingEdgeInsertionRefGraph) {
LazyCallGraph CG(*M);
// Force the graph to be fully expanded.
+ CG.buildRefSCCs();
for (LazyCallGraph::RefSCC &RC : CG.postorder_ref_sccs())
dbgs() << "Formed RefSCC: " << RC << "\n";
@@ -937,7 +862,7 @@ TEST(LazyCallGraphTest, IncomingEdgeInsertionRefGraph) {
ASSERT_EQ(&CRC, CG.lookupRefSCC(C3));
ASSERT_EQ(&DRC, CG.lookupRefSCC(D2));
ASSERT_EQ(&DRC, CG.lookupRefSCC(D3));
- ASSERT_EQ(1, std::distance(D2.begin(), D2.end()));
+ ASSERT_EQ(1, std::distance(D2->begin(), D2->end()));
// Add an edge to make the graph:
//
@@ -954,10 +879,10 @@ TEST(LazyCallGraphTest, IncomingEdgeInsertionRefGraph) {
// a3--a2 |
auto MergedRCs = CRC.insertIncomingRefEdge(D2, C2);
// Make sure we connected the nodes.
- for (LazyCallGraph::Edge E : D2) {
- if (E.getNode() == &D3)
+ for (LazyCallGraph::Edge E : *D2) {
+ if (&E.getNode() == &D3)
continue;
- EXPECT_EQ(&C2, E.getNode());
+ EXPECT_EQ(&C2, &E.getNode());
}
// And marked the D ref-SCC as no longer valid.
EXPECT_EQ(1u, MergedRCs.size());
@@ -1016,6 +941,7 @@ TEST(LazyCallGraphTest, IncomingEdgeInsertionLargeCallCycle) {
LazyCallGraph CG(*M);
// Force the graph to be fully expanded.
+ CG.buildRefSCCs();
for (LazyCallGraph::RefSCC &RC : CG.postorder_ref_sccs())
dbgs() << "Formed RefSCC: " << RC << "\n";
@@ -1035,8 +961,8 @@ TEST(LazyCallGraphTest, IncomingEdgeInsertionLargeCallCycle) {
// Connect the top to the bottom forming a large RefSCC made up mostly of calls.
auto MergedRCs = ARC.insertIncomingRefEdge(D, A);
// Make sure we connected the nodes.
- EXPECT_NE(D.begin(), D.end());
- EXPECT_EQ(&A, D.begin()->getNode());
+ EXPECT_NE(D->begin(), D->end());
+ EXPECT_EQ(&A, &D->begin()->getNode());
// Check that we have the dead RCs, but ignore the order.
EXPECT_EQ(3u, MergedRCs.size());
@@ -1092,6 +1018,7 @@ TEST(LazyCallGraphTest, IncomingEdgeInsertionLargeRefCycle) {
LazyCallGraph CG(*M);
// Force the graph to be fully expanded.
+ CG.buildRefSCCs();
for (LazyCallGraph::RefSCC &RC : CG.postorder_ref_sccs())
dbgs() << "Formed RefSCC: " << RC << "\n";
@@ -1108,8 +1035,8 @@ TEST(LazyCallGraphTest, IncomingEdgeInsertionLargeRefCycle) {
// references.
auto MergedRCs = ARC.insertIncomingRefEdge(D, A);
// Make sure we connected the nodes.
- EXPECT_NE(D.begin(), D.end());
- EXPECT_EQ(&A, D.begin()->getNode());
+ EXPECT_NE(D->begin(), D->end());
+ EXPECT_EQ(&A, &D->begin()->getNode());
// Check that we have the dead RCs, but ignore the order.
EXPECT_EQ(3u, MergedRCs.size());
@@ -1153,6 +1080,7 @@ TEST(LazyCallGraphTest, InlineAndDeleteFunction) {
LazyCallGraph CG(*M);
// Force the graph to be fully expanded.
+ CG.buildRefSCCs();
for (LazyCallGraph::RefSCC &RC : CG.postorder_ref_sccs())
dbgs() << "Formed RefSCC: " << RC << "\n";
@@ -1180,7 +1108,7 @@ TEST(LazyCallGraphTest, InlineAndDeleteFunction) {
ASSERT_EQ(&CRC, CG.lookupRefSCC(C3));
ASSERT_EQ(&DRC, CG.lookupRefSCC(D2));
ASSERT_EQ(&DRC, CG.lookupRefSCC(D3));
- ASSERT_EQ(1, std::distance(D2.begin(), D2.end()));
+ ASSERT_EQ(1, std::distance(D2->begin(), D2->end()));
// Delete d2 from the graph, as if it had been inlined.
//
@@ -1276,177 +1204,6 @@ TEST(LazyCallGraphTest, InlineAndDeleteFunction) {
EXPECT_EQ(++I, E);
}
-TEST(LazyCallGraphTest, InlineAndDeleteFunctionMidTraversal) {
- LLVMContext Context;
- // This is the same fundamental test as the previous, but we perform it
- // having only partially walked the RefSCCs of the graph.
- //
- // The ascii diagram is repeated here for easy reference.
- //
- // d1 |
- // / \ |
- // d3--d2 |
- // / \ |
- // b1 c1 |
- // / \ / \ |
- // b3--b2 c3--c2 |
- // \ / |
- // a1 |
- // / \ |
- // a3--a2 |
- //
- std::unique_ptr<Module> M = parseAssembly(Context, DiamondOfTriangles);
- LazyCallGraph CG(*M);
-
- // Walk the RefSCCs until we find the one containing 'c1'.
- auto I = CG.postorder_ref_scc_begin(), E = CG.postorder_ref_scc_end();
- ASSERT_NE(I, E);
- LazyCallGraph::RefSCC &DRC = *I;
- ASSERT_NE(&DRC, nullptr);
- ++I;
- ASSERT_NE(I, E);
- LazyCallGraph::RefSCC &CRC = *I;
- ASSERT_NE(&CRC, nullptr);
-
- ASSERT_EQ(nullptr, CG.lookup(lookupFunction(*M, "a1")));
- ASSERT_EQ(nullptr, CG.lookup(lookupFunction(*M, "a2")));
- ASSERT_EQ(nullptr, CG.lookup(lookupFunction(*M, "a3")));
- ASSERT_EQ(nullptr, CG.lookup(lookupFunction(*M, "b1")));
- ASSERT_EQ(nullptr, CG.lookup(lookupFunction(*M, "b2")));
- ASSERT_EQ(nullptr, CG.lookup(lookupFunction(*M, "b3")));
- LazyCallGraph::Node &C1 = *CG.lookup(lookupFunction(*M, "c1"));
- LazyCallGraph::Node &C2 = *CG.lookup(lookupFunction(*M, "c2"));
- LazyCallGraph::Node &C3 = *CG.lookup(lookupFunction(*M, "c3"));
- LazyCallGraph::Node &D1 = *CG.lookup(lookupFunction(*M, "d1"));
- LazyCallGraph::Node &D2 = *CG.lookup(lookupFunction(*M, "d2"));
- LazyCallGraph::Node &D3 = *CG.lookup(lookupFunction(*M, "d3"));
- ASSERT_EQ(&CRC, CG.lookupRefSCC(C1));
- ASSERT_EQ(&CRC, CG.lookupRefSCC(C2));
- ASSERT_EQ(&CRC, CG.lookupRefSCC(C3));
- ASSERT_EQ(&DRC, CG.lookupRefSCC(D1));
- ASSERT_EQ(&DRC, CG.lookupRefSCC(D2));
- ASSERT_EQ(&DRC, CG.lookupRefSCC(D3));
- ASSERT_EQ(1, std::distance(D2.begin(), D2.end()));
-
- // Delete d2 from the graph, as if it had been inlined.
- //
- // d1 |
- // / / |
- // d3--. |
- // / \ |
- // b1 c1 |
- // / \ / \ |
- // b3--b2 c3--c2 |
- // \ / |
- // a1 |
- // / \ |
- // a3--a2 |
-
- Function &D2F = D2.getFunction();
- CallInst *C1Call = nullptr, *D1Call = nullptr;
- for (User *U : D2F.users()) {
- CallInst *CI = dyn_cast<CallInst>(U);
- ASSERT_TRUE(CI) << "Expected a call: " << *U;
- if (CI->getParent()->getParent() == &C1.getFunction()) {
- ASSERT_EQ(nullptr, C1Call) << "Found too many C1 calls: " << *CI;
- C1Call = CI;
- } else if (CI->getParent()->getParent() == &D1.getFunction()) {
- ASSERT_EQ(nullptr, D1Call) << "Found too many D1 calls: " << *CI;
- D1Call = CI;
- } else {
- FAIL() << "Found an unexpected call instruction: " << *CI;
- }
- }
- ASSERT_NE(C1Call, nullptr);
- ASSERT_NE(D1Call, nullptr);
- ASSERT_EQ(&D2F, C1Call->getCalledFunction());
- ASSERT_EQ(&D2F, D1Call->getCalledFunction());
- C1Call->setCalledFunction(&D3.getFunction());
- D1Call->setCalledFunction(&D3.getFunction());
- ASSERT_EQ(0u, D2F.getNumUses());
-
- // Insert new edges first.
- CRC.insertTrivialCallEdge(C1, D3);
- DRC.insertTrivialCallEdge(D1, D3);
-
- // Then remove the old ones.
- LazyCallGraph::SCC &DC = *CG.lookupSCC(D2);
- auto NewCs = DRC.switchInternalEdgeToRef(D1, D2);
- EXPECT_EQ(&DC, CG.lookupSCC(D2));
- EXPECT_EQ(NewCs.end(), std::next(NewCs.begin()));
- LazyCallGraph::SCC &NewDC = *NewCs.begin();
- EXPECT_EQ(&NewDC, CG.lookupSCC(D1));
- EXPECT_EQ(&NewDC, CG.lookupSCC(D3));
- auto NewRCs = DRC.removeInternalRefEdge(D1, D2);
- EXPECT_EQ(&DRC, CG.lookupRefSCC(D2));
- EXPECT_EQ(NewRCs.end(), std::next(NewRCs.begin()));
- LazyCallGraph::RefSCC &NewDRC = **NewRCs.begin();
- EXPECT_EQ(&NewDRC, CG.lookupRefSCC(D1));
- EXPECT_EQ(&NewDRC, CG.lookupRefSCC(D3));
- EXPECT_FALSE(NewDRC.isParentOf(DRC));
- EXPECT_TRUE(CRC.isParentOf(DRC));
- EXPECT_TRUE(CRC.isParentOf(NewDRC));
- EXPECT_TRUE(DRC.isParentOf(NewDRC));
- CRC.removeOutgoingEdge(C1, D2);
- EXPECT_FALSE(CRC.isParentOf(DRC));
- EXPECT_TRUE(CRC.isParentOf(NewDRC));
- EXPECT_TRUE(DRC.isParentOf(NewDRC));
-
- // Now that we've updated the call graph, D2 is dead, so remove it.
- CG.removeDeadFunction(D2F);
-
- // Check that the graph still looks the same.
- EXPECT_EQ(&CRC, CG.lookupRefSCC(C1));
- EXPECT_EQ(&CRC, CG.lookupRefSCC(C2));
- EXPECT_EQ(&CRC, CG.lookupRefSCC(C3));
- EXPECT_EQ(&NewDRC, CG.lookupRefSCC(D1));
- EXPECT_EQ(&NewDRC, CG.lookupRefSCC(D3));
- EXPECT_TRUE(CRC.isParentOf(NewDRC));
-
- // Verify that the post-order walk reflects the updated but still incomplete
- // structure.
- auto J = CG.postorder_ref_scc_begin();
- EXPECT_NE(J, E);
- EXPECT_EQ(&NewDRC, &*J) << "Actual RefSCC: " << *J;
- ++J;
- EXPECT_NE(J, E);
- EXPECT_EQ(&CRC, &*J) << "Actual RefSCC: " << *J;
- EXPECT_EQ(I, J);
-
- // Check that we can form the last two RefSCCs now, and even that we can do
- // it with alternating iterators.
- ++J;
- EXPECT_NE(J, E);
- LazyCallGraph::RefSCC &BRC = *J;
- EXPECT_NE(&BRC, nullptr);
- EXPECT_EQ(&BRC, CG.lookupRefSCC(*CG.lookup(lookupFunction(*M, "b1"))));
- EXPECT_EQ(&BRC, CG.lookupRefSCC(*CG.lookup(lookupFunction(*M, "b2"))));
- EXPECT_EQ(&BRC, CG.lookupRefSCC(*CG.lookup(lookupFunction(*M, "b3"))));
- EXPECT_TRUE(BRC.isParentOf(NewDRC));
- ++I;
- EXPECT_EQ(J, I);
- EXPECT_EQ(&BRC, &*I) << "Actual RefSCC: " << *I;
-
- // Increment I this time to form the new RefSCC, flopping back to the first
- // iterator.
- ++I;
- EXPECT_NE(I, E);
- LazyCallGraph::RefSCC &ARC = *I;
- EXPECT_NE(&ARC, nullptr);
- EXPECT_EQ(&ARC, CG.lookupRefSCC(*CG.lookup(lookupFunction(*M, "a1"))));
- EXPECT_EQ(&ARC, CG.lookupRefSCC(*CG.lookup(lookupFunction(*M, "a2"))));
- EXPECT_EQ(&ARC, CG.lookupRefSCC(*CG.lookup(lookupFunction(*M, "a3"))));
- EXPECT_TRUE(ARC.isParentOf(BRC));
- EXPECT_TRUE(ARC.isParentOf(CRC));
- ++J;
- EXPECT_EQ(I, J);
- EXPECT_EQ(&ARC, &*J) << "Actual RefSCC: " << *J;
- ++I;
- EXPECT_EQ(E, I);
- ++J;
- EXPECT_EQ(E, J);
-}
-
TEST(LazyCallGraphTest, InternalEdgeMutation) {
LLVMContext Context;
std::unique_ptr<Module> M = parseAssembly(Context, "define void @a() {\n"
@@ -1467,6 +1224,7 @@ TEST(LazyCallGraphTest, InternalEdgeMutation) {
LazyCallGraph CG(*M);
// Force the graph to be fully expanded.
+ CG.buildRefSCCs();
auto I = CG.postorder_ref_scc_begin();
LazyCallGraph::RefSCC &RC = *I++;
EXPECT_EQ(CG.postorder_ref_scc_end(), I);
@@ -1484,7 +1242,7 @@ TEST(LazyCallGraphTest, InternalEdgeMutation) {
// Insert an edge from 'a' to 'c'. Nothing changes about the graph.
RC.insertInternalRefEdge(A, C);
- EXPECT_EQ(2, std::distance(A.begin(), A.end()));
+ EXPECT_EQ(2, std::distance(A->begin(), A->end()));
EXPECT_EQ(&RC, CG.lookupRefSCC(A));
EXPECT_EQ(&RC, CG.lookupRefSCC(B));
EXPECT_EQ(&RC, CG.lookupRefSCC(C));
@@ -1559,6 +1317,7 @@ TEST(LazyCallGraphTest, InternalEdgeRemoval) {
LazyCallGraph CG(*M);
// Force the graph to be fully expanded.
+ CG.buildRefSCCs();
auto I = CG.postorder_ref_scc_begin(), E = CG.postorder_ref_scc_end();
LazyCallGraph::RefSCC &RC = *I;
EXPECT_EQ(E, std::next(I));
@@ -1633,6 +1392,7 @@ TEST(LazyCallGraphTest, InternalNoOpEdgeRemoval) {
LazyCallGraph CG(*M);
// Force the graph to be fully expanded.
+ CG.buildRefSCCs();
auto I = CG.postorder_ref_scc_begin(), E = CG.postorder_ref_scc_end();
LazyCallGraph::RefSCC &RC = *I;
EXPECT_EQ(E, std::next(I));
@@ -1709,6 +1469,7 @@ TEST(LazyCallGraphTest, InternalCallEdgeToRef) {
LazyCallGraph CG(*M);
// Force the graph to be fully expanded.
+ CG.buildRefSCCs();
auto I = CG.postorder_ref_scc_begin();
LazyCallGraph::RefSCC &RC = *I++;
EXPECT_EQ(CG.postorder_ref_scc_end(), I);
@@ -1801,6 +1562,7 @@ TEST(LazyCallGraphTest, InternalRefEdgeToCall) {
LazyCallGraph CG(*M);
// Force the graph to be fully expanded.
+ CG.buildRefSCCs();
auto I = CG.postorder_ref_scc_begin();
LazyCallGraph::RefSCC &RC = *I++;
EXPECT_EQ(CG.postorder_ref_scc_end(), I);
@@ -1913,6 +1675,7 @@ TEST(LazyCallGraphTest, InternalRefEdgeToCallNoCycleInterleaved) {
LazyCallGraph CG(*M);
// Force the graph to be fully expanded.
+ CG.buildRefSCCs();
auto I = CG.postorder_ref_scc_begin();
LazyCallGraph::RefSCC &RC = *I++;
EXPECT_EQ(CG.postorder_ref_scc_end(), I);
@@ -2043,6 +1806,7 @@ TEST(LazyCallGraphTest, InternalRefEdgeToCallBothPartitionAndMerge) {
LazyCallGraph CG(*M);
// Force the graph to be fully expanded.
+ CG.buildRefSCCs();
auto I = CG.postorder_ref_scc_begin();
LazyCallGraph::RefSCC &RC = *I++;
EXPECT_EQ(CG.postorder_ref_scc_end(), I);
@@ -2122,6 +1886,7 @@ TEST(LazyCallGraphTest, HandleBlockAddress) {
"}\n");
LazyCallGraph CG(*M);
+ CG.buildRefSCCs();
auto I = CG.postorder_ref_scc_begin();
LazyCallGraph::RefSCC &FRC = *I++;
LazyCallGraph::RefSCC &GRC = *I++;
@@ -2134,4 +1899,165 @@ TEST(LazyCallGraphTest, HandleBlockAddress) {
EXPECT_TRUE(GRC.isParentOf(FRC));
}
+TEST(LazyCallGraphTest, ReplaceNodeFunction) {
+ LLVMContext Context;
+ // A graph with several different kinds of edges pointing at a particular
+ // function.
+ std::unique_ptr<Module> M =
+ parseAssembly(Context,
+ "define void @a(i8** %ptr) {\n"
+ "entry:\n"
+ " store i8* bitcast (void(i8**)* @d to i8*), i8** %ptr\n"
+ " ret void\n"
+ "}\n"
+ "define void @b(i8** %ptr) {\n"
+ "entry:\n"
+ " store i8* bitcast (void(i8**)* @d to i8*), i8** %ptr\n"
+ " store i8* bitcast (void(i8**)* @d to i8*), i8** %ptr\n"
+ " call void @d(i8** %ptr)"
+ " ret void\n"
+ "}\n"
+ "define void @c(i8** %ptr) {\n"
+ "entry:\n"
+ " call void @d(i8** %ptr)"
+ " call void @d(i8** %ptr)"
+ " store i8* bitcast (void(i8**)* @d to i8*), i8** %ptr\n"
+ " ret void\n"
+ "}\n"
+ "define void @d(i8** %ptr) {\n"
+ "entry:\n"
+ " store i8* bitcast (void(i8**)* @b to i8*), i8** %ptr\n"
+ " call void @c(i8** %ptr)"
+ " call void @d(i8** %ptr)"
+ " store i8* bitcast (void(i8**)* @d to i8*), i8** %ptr\n"
+ " ret void\n"
+ "}\n");
+ LazyCallGraph CG(*M);
+
+ // Force the graph to be fully expanded.
+ CG.buildRefSCCs();
+ auto I = CG.postorder_ref_scc_begin();
+ LazyCallGraph::RefSCC &RC1 = *I++;
+ LazyCallGraph::RefSCC &RC2 = *I++;
+ EXPECT_EQ(CG.postorder_ref_scc_end(), I);
+
+ ASSERT_EQ(2, RC1.size());
+ LazyCallGraph::SCC &C1 = RC1[0];
+ LazyCallGraph::SCC &C2 = RC1[1];
+
+ LazyCallGraph::Node &AN = *CG.lookup(lookupFunction(*M, "a"));
+ LazyCallGraph::Node &BN = *CG.lookup(lookupFunction(*M, "b"));
+ LazyCallGraph::Node &CN = *CG.lookup(lookupFunction(*M, "c"));
+ LazyCallGraph::Node &DN = *CG.lookup(lookupFunction(*M, "d"));
+ EXPECT_EQ(&C1, CG.lookupSCC(DN));
+ EXPECT_EQ(&C1, CG.lookupSCC(CN));
+ EXPECT_EQ(&C2, CG.lookupSCC(BN));
+ EXPECT_EQ(&RC1, CG.lookupRefSCC(DN));
+ EXPECT_EQ(&RC1, CG.lookupRefSCC(CN));
+ EXPECT_EQ(&RC1, CG.lookupRefSCC(BN));
+ EXPECT_EQ(&RC2, CG.lookupRefSCC(AN));
+
+ // Now we need to build a new function 'e' with the same signature as 'd'.
+ Function &D = DN.getFunction();
+ Function &E = *Function::Create(D.getFunctionType(), D.getLinkage(), "e");
+ D.getParent()->getFunctionList().insert(D.getIterator(), &E);
+
+ // Change each use of 'd' to use 'e'. This is particularly easy as they have
+ // the same type.
+ D.replaceAllUsesWith(&E);
+
+ // Splice the body of the old function into the new one.
+ E.getBasicBlockList().splice(E.begin(), D.getBasicBlockList());
+ // And fix up the one argument.
+ D.arg_begin()->replaceAllUsesWith(&*E.arg_begin());
+ E.arg_begin()->takeName(&*D.arg_begin());
+
+ // Now replace the function in the graph.
+ RC1.replaceNodeFunction(DN, E);
+
+ EXPECT_EQ(&E, &DN.getFunction());
+ EXPECT_EQ(&DN, &(*CN)[DN].getNode());
+ EXPECT_EQ(&DN, &(*BN)[DN].getNode());
+}
+
+TEST(LazyCallGraphTest, RemoveFunctionWithSpurriousRef) {
+ LLVMContext Context;
+ // A graph with a couple of RefSCCs.
+ std::unique_ptr<Module> M =
+ parseAssembly(Context,
+ "define void @a(i8** %ptr) {\n"
+ "entry:\n"
+ " store i8* bitcast (void(i8**)* @d to i8*), i8** %ptr\n"
+ " ret void\n"
+ "}\n"
+ "define void @b(i8** %ptr) {\n"
+ "entry:\n"
+ " store i8* bitcast (void(i8**)* @c to i8*), i8** %ptr\n"
+ " ret void\n"
+ "}\n"
+ "define void @c(i8** %ptr) {\n"
+ "entry:\n"
+ " call void @d(i8** %ptr)"
+ " ret void\n"
+ "}\n"
+ "define void @d(i8** %ptr) {\n"
+ "entry:\n"
+ " call void @c(i8** %ptr)"
+ " store i8* bitcast (void(i8**)* @b to i8*), i8** %ptr\n"
+ " ret void\n"
+ "}\n"
+ "define void @dead() {\n"
+ "entry:\n"
+ " ret void\n"
+ "}\n");
+ LazyCallGraph CG(*M);
+
+ // Insert spurious ref edges.
+ LazyCallGraph::Node &AN = CG.get(lookupFunction(*M, "a"));
+ LazyCallGraph::Node &BN = CG.get(lookupFunction(*M, "b"));
+ LazyCallGraph::Node &CN = CG.get(lookupFunction(*M, "c"));
+ LazyCallGraph::Node &DN = CG.get(lookupFunction(*M, "d"));
+ LazyCallGraph::Node &DeadN = CG.get(lookupFunction(*M, "dead"));
+ AN.populate();
+ BN.populate();
+ CN.populate();
+ DN.populate();
+ DeadN.populate();
+ CG.insertEdge(AN, DeadN, LazyCallGraph::Edge::Ref);
+ CG.insertEdge(BN, DeadN, LazyCallGraph::Edge::Ref);
+ CG.insertEdge(CN, DeadN, LazyCallGraph::Edge::Ref);
+ CG.insertEdge(DN, DeadN, LazyCallGraph::Edge::Ref);
+
+ // Force the graph to be fully expanded.
+ CG.buildRefSCCs();
+ auto I = CG.postorder_ref_scc_begin();
+ LazyCallGraph::RefSCC &DeadRC = *I++;
+ LazyCallGraph::RefSCC &RC1 = *I++;
+ LazyCallGraph::RefSCC &RC2 = *I++;
+ EXPECT_EQ(CG.postorder_ref_scc_end(), I);
+
+ ASSERT_EQ(2, RC1.size());
+ LazyCallGraph::SCC &C1 = RC1[0];
+ LazyCallGraph::SCC &C2 = RC1[1];
+
+ EXPECT_EQ(&DeadRC, CG.lookupRefSCC(DeadN));
+ EXPECT_EQ(&C1, CG.lookupSCC(DN));
+ EXPECT_EQ(&C1, CG.lookupSCC(CN));
+ EXPECT_EQ(&C2, CG.lookupSCC(BN));
+ EXPECT_EQ(&RC1, CG.lookupRefSCC(DN));
+ EXPECT_EQ(&RC1, CG.lookupRefSCC(CN));
+ EXPECT_EQ(&RC1, CG.lookupRefSCC(BN));
+ EXPECT_EQ(&RC2, CG.lookupRefSCC(AN));
+
+ // Now delete 'dead'. There are no uses of this function but there are
+ // spurious references.
+ CG.removeDeadFunction(DeadN.getFunction());
+
+ // The only observable change should be that the RefSCC is gone from the
+ // postorder sequence.
+ I = CG.postorder_ref_scc_begin();
+ EXPECT_EQ(&RC1, &*I++);
+ EXPECT_EQ(&RC2, &*I++);
+ EXPECT_EQ(CG.postorder_ref_scc_end(), I);
+}
}
diff --git a/unittests/Analysis/LoopInfoTest.cpp b/unittests/Analysis/LoopInfoTest.cpp
new file mode 100644
index 000000000000..647ce8a3c1ba
--- /dev/null
+++ b/unittests/Analysis/LoopInfoTest.cpp
@@ -0,0 +1,158 @@
+//===- LoopInfoTest.cpp - LoopInfo unit tests -----------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/AsmParser/Parser.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/Support/SourceMgr.h"
+#include "gtest/gtest.h"
+
+using namespace llvm;
+
+/// Build the loop info for the function and run the Test.
+static void
+runWithLoopInfo(Module &M, StringRef FuncName,
+ function_ref<void(Function &F, LoopInfo &LI)> Test) {
+ auto *F = M.getFunction(FuncName);
+ ASSERT_NE(F, nullptr) << "Could not find " << FuncName;
+ // Compute the dominator tree and the loop info for the function.
+ DominatorTree DT(*F);
+ LoopInfo LI(DT);
+ Test(*F, LI);
+}
+
+static std::unique_ptr<Module> makeLLVMModule(LLVMContext &Context,
+ const char *ModuleStr) {
+ SMDiagnostic Err;
+ return parseAssemblyString(ModuleStr, Err, Context);
+}
+
+// This tests that for a loop with a single latch, we get the loop id from
+// its only latch, even in case the loop may not be in a simplified form.
+TEST(LoopInfoTest, LoopWithSingleLatch) {
+ const char *ModuleStr =
+ "target datalayout = \"e-m:o-i64:64-f80:128-n8:16:32:64-S128\"\n"
+ "define void @foo(i32 %n) {\n"
+ "entry:\n"
+ " br i1 undef, label %for.cond, label %for.end\n"
+ "for.cond:\n"
+ " %i.0 = phi i32 [ 0, %entry ], [ %inc, %for.inc ]\n"
+ " %cmp = icmp slt i32 %i.0, %n\n"
+ " br i1 %cmp, label %for.inc, label %for.end\n"
+ "for.inc:\n"
+ " %inc = add nsw i32 %i.0, 1\n"
+ " br label %for.cond, !llvm.loop !0\n"
+ "for.end:\n"
+ " ret void\n"
+ "}\n"
+ "!0 = distinct !{!0, !1}\n"
+ "!1 = !{!\"llvm.loop.distribute.enable\", i1 true}\n";
+
+ // Parse the module.
+ LLVMContext Context;
+ std::unique_ptr<Module> M = makeLLVMModule(Context, ModuleStr);
+
+ runWithLoopInfo(*M, "foo", [&](Function &F, LoopInfo &LI) {
+ Function::iterator FI = F.begin();
+ // First basic block is entry - skip it.
+ BasicBlock *Header = &*(++FI);
+ assert(Header->getName() == "for.cond");
+ Loop *L = LI.getLoopFor(Header);
+
+ // This loop is not in simplified form.
+ EXPECT_FALSE(L->isLoopSimplifyForm());
+
+ // Analyze the loop metadata id.
+ bool loopIDFoundAndSet = false;
+ // Try to get and set the metadata id for the loop.
+ if (MDNode *D = L->getLoopID()) {
+ L->setLoopID(D);
+ loopIDFoundAndSet = true;
+ }
+
+ // We must have successfully found and set the loop id in the
+ // only latch the loop has.
+ EXPECT_TRUE(loopIDFoundAndSet);
+ });
+}
+
+TEST(LoopInfoTest, PreorderTraversals) {
+ const char *ModuleStr = "define void @f() {\n"
+ "entry:\n"
+ " br label %loop.0\n"
+ "loop.0:\n"
+ " br i1 undef, label %loop.0.0, label %loop.1\n"
+ "loop.0.0:\n"
+ " br i1 undef, label %loop.0.0, label %loop.0.1\n"
+ "loop.0.1:\n"
+ " br i1 undef, label %loop.0.1, label %loop.0.2\n"
+ "loop.0.2:\n"
+ " br i1 undef, label %loop.0.2, label %loop.0\n"
+ "loop.1:\n"
+ " br i1 undef, label %loop.1.0, label %end\n"
+ "loop.1.0:\n"
+ " br i1 undef, label %loop.1.0, label %loop.1.1\n"
+ "loop.1.1:\n"
+ " br i1 undef, label %loop.1.1, label %loop.1.2\n"
+ "loop.1.2:\n"
+ " br i1 undef, label %loop.1.2, label %loop.1\n"
+ "end:\n"
+ " ret void\n"
+ "}\n";
+ // Parse the module.
+ LLVMContext Context;
+ std::unique_ptr<Module> M = makeLLVMModule(Context, ModuleStr);
+ Function &F = *M->begin();
+
+ DominatorTree DT(F);
+ LoopInfo LI;
+ LI.analyze(DT);
+
+ Function::iterator I = F.begin();
+ ASSERT_EQ("entry", I->getName());
+ ++I;
+ Loop &L_0 = *LI.getLoopFor(&*I++);
+ ASSERT_EQ("loop.0", L_0.getHeader()->getName());
+ Loop &L_0_0 = *LI.getLoopFor(&*I++);
+ ASSERT_EQ("loop.0.0", L_0_0.getHeader()->getName());
+ Loop &L_0_1 = *LI.getLoopFor(&*I++);
+ ASSERT_EQ("loop.0.1", L_0_1.getHeader()->getName());
+ Loop &L_0_2 = *LI.getLoopFor(&*I++);
+ ASSERT_EQ("loop.0.2", L_0_2.getHeader()->getName());
+ Loop &L_1 = *LI.getLoopFor(&*I++);
+ ASSERT_EQ("loop.1", L_1.getHeader()->getName());
+ Loop &L_1_0 = *LI.getLoopFor(&*I++);
+ ASSERT_EQ("loop.1.0", L_1_0.getHeader()->getName());
+ Loop &L_1_1 = *LI.getLoopFor(&*I++);
+ ASSERT_EQ("loop.1.1", L_1_1.getHeader()->getName());
+ Loop &L_1_2 = *LI.getLoopFor(&*I++);
+ ASSERT_EQ("loop.1.2", L_1_2.getHeader()->getName());
+
+ auto Preorder = LI.getLoopsInPreorder();
+ ASSERT_EQ(8u, Preorder.size());
+ EXPECT_EQ(&L_0, Preorder[0]);
+ EXPECT_EQ(&L_0_0, Preorder[1]);
+ EXPECT_EQ(&L_0_1, Preorder[2]);
+ EXPECT_EQ(&L_0_2, Preorder[3]);
+ EXPECT_EQ(&L_1, Preorder[4]);
+ EXPECT_EQ(&L_1_0, Preorder[5]);
+ EXPECT_EQ(&L_1_1, Preorder[6]);
+ EXPECT_EQ(&L_1_2, Preorder[7]);
+
+ auto ReverseSiblingPreorder = LI.getLoopsInReverseSiblingPreorder();
+ ASSERT_EQ(8u, ReverseSiblingPreorder.size());
+ EXPECT_EQ(&L_1, ReverseSiblingPreorder[0]);
+ EXPECT_EQ(&L_1_2, ReverseSiblingPreorder[1]);
+ EXPECT_EQ(&L_1_1, ReverseSiblingPreorder[2]);
+ EXPECT_EQ(&L_1_0, ReverseSiblingPreorder[3]);
+ EXPECT_EQ(&L_0, ReverseSiblingPreorder[4]);
+ EXPECT_EQ(&L_0_2, ReverseSiblingPreorder[5]);
+ EXPECT_EQ(&L_0_1, ReverseSiblingPreorder[6]);
+ EXPECT_EQ(&L_0_0, ReverseSiblingPreorder[7]);
+}
diff --git a/unittests/Analysis/MemorySSA.cpp b/unittests/Analysis/MemorySSA.cpp
new file mode 100644
index 000000000000..08b0e830a9b2
--- /dev/null
+++ b/unittests/Analysis/MemorySSA.cpp
@@ -0,0 +1,865 @@
+//===- MemorySSA.cpp - Unit tests for MemorySSA ---------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/BasicAliasAnalysis.h"
+#include "llvm/Analysis/MemorySSA.h"
+#include "llvm/Analysis/MemorySSAUpdater.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/LLVMContext.h"
+#include "gtest/gtest.h"
+
+using namespace llvm;
+
+const static char DLString[] = "e-i64:64-f80:128-n8:16:32:64-S128";
+
+/// There's a lot of common setup between these tests. This fixture helps reduce
+/// that. Tests should mock up a function, store it in F, and then call
+/// setupAnalyses().
+class MemorySSATest : public testing::Test {
+protected:
+ // N.B. Many of these members depend on each other (e.g. the Module depends on
+ // the Context, etc.). So, order matters here (and in TestAnalyses).
+ LLVMContext C;
+ Module M;
+ IRBuilder<> B;
+ DataLayout DL;
+ TargetLibraryInfoImpl TLII;
+ TargetLibraryInfo TLI;
+ Function *F;
+
+ // Things that we need to build after the function is created.
+ struct TestAnalyses {
+ DominatorTree DT;
+ AssumptionCache AC;
+ AAResults AA;
+ BasicAAResult BAA;
+ // We need to defer MSSA construction until AA is *entirely* set up, which
+ // requires calling addAAResult. Hence, we just use a pointer here.
+ std::unique_ptr<MemorySSA> MSSA;
+ MemorySSAWalker *Walker;
+
+ TestAnalyses(MemorySSATest &Test)
+ : DT(*Test.F), AC(*Test.F), AA(Test.TLI),
+ BAA(Test.DL, Test.TLI, AC, &DT) {
+ AA.addAAResult(BAA);
+ MSSA = make_unique<MemorySSA>(*Test.F, &AA, &DT);
+ Walker = MSSA->getWalker();
+ }
+ };
+
+ std::unique_ptr<TestAnalyses> Analyses;
+
+ void setupAnalyses() {
+ assert(F);
+ Analyses.reset(new TestAnalyses(*this));
+ }
+
+public:
+ MemorySSATest()
+ : M("MemorySSATest", C), B(C), DL(DLString), TLI(TLII), F(nullptr) {}
+};
+
+TEST_F(MemorySSATest, CreateALoad) {
+ // We create a diamond where there is a store on one side, and then after
+ // building MemorySSA, create a load after the merge point, and use it to test
+ // updating by creating an access for the load.
+ F = Function::Create(
+ FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
+ GlobalValue::ExternalLinkage, "F", &M);
+ BasicBlock *Entry(BasicBlock::Create(C, "", F));
+ BasicBlock *Left(BasicBlock::Create(C, "", F));
+ BasicBlock *Right(BasicBlock::Create(C, "", F));
+ BasicBlock *Merge(BasicBlock::Create(C, "", F));
+ B.SetInsertPoint(Entry);
+ B.CreateCondBr(B.getTrue(), Left, Right);
+ B.SetInsertPoint(Left);
+ Argument *PointerArg = &*F->arg_begin();
+ B.CreateStore(B.getInt8(16), PointerArg);
+ BranchInst::Create(Merge, Left);
+ BranchInst::Create(Merge, Right);
+
+ setupAnalyses();
+ MemorySSA &MSSA = *Analyses->MSSA;
+ MemorySSAUpdater Updater(&MSSA);
+ // Add the load
+ B.SetInsertPoint(Merge);
+ LoadInst *LoadInst = B.CreateLoad(PointerArg);
+
+ // MemoryPHI should already exist.
+ MemoryPhi *MP = MSSA.getMemoryAccess(Merge);
+ EXPECT_NE(MP, nullptr);
+
+ // Create the load memory acccess
+ MemoryUse *LoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB(
+ LoadInst, MP, Merge, MemorySSA::Beginning));
+ MemoryAccess *DefiningAccess = LoadAccess->getDefiningAccess();
+ EXPECT_TRUE(isa<MemoryPhi>(DefiningAccess));
+ MSSA.verifyMemorySSA();
+}
+TEST_F(MemorySSATest, CreateLoadsAndStoreUpdater) {
+ // We create a diamond, then build memoryssa with no memory accesses, and
+ // incrementally update it by inserting a store in the, entry, a load in the
+ // merge point, then a store in the branch, another load in the merge point,
+ // and then a store in the entry.
+ F = Function::Create(
+ FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
+ GlobalValue::ExternalLinkage, "F", &M);
+ BasicBlock *Entry(BasicBlock::Create(C, "", F));
+ BasicBlock *Left(BasicBlock::Create(C, "", F));
+ BasicBlock *Right(BasicBlock::Create(C, "", F));
+ BasicBlock *Merge(BasicBlock::Create(C, "", F));
+ B.SetInsertPoint(Entry);
+ B.CreateCondBr(B.getTrue(), Left, Right);
+ B.SetInsertPoint(Left, Left->begin());
+ Argument *PointerArg = &*F->arg_begin();
+ B.SetInsertPoint(Left);
+ B.CreateBr(Merge);
+ B.SetInsertPoint(Right);
+ B.CreateBr(Merge);
+
+ setupAnalyses();
+ MemorySSA &MSSA = *Analyses->MSSA;
+ MemorySSAUpdater Updater(&MSSA);
+ // Add the store
+ B.SetInsertPoint(Entry, Entry->begin());
+ StoreInst *EntryStore = B.CreateStore(B.getInt8(16), PointerArg);
+ MemoryAccess *EntryStoreAccess = Updater.createMemoryAccessInBB(
+ EntryStore, nullptr, Entry, MemorySSA::Beginning);
+ Updater.insertDef(cast<MemoryDef>(EntryStoreAccess));
+
+ // Add the load
+ B.SetInsertPoint(Merge, Merge->begin());
+ LoadInst *FirstLoad = B.CreateLoad(PointerArg);
+
+ // MemoryPHI should not already exist.
+ MemoryPhi *MP = MSSA.getMemoryAccess(Merge);
+ EXPECT_EQ(MP, nullptr);
+
+ // Create the load memory access
+ MemoryUse *FirstLoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB(
+ FirstLoad, nullptr, Merge, MemorySSA::Beginning));
+ Updater.insertUse(FirstLoadAccess);
+ // Should just have a load using the entry access, because it should discover
+ // the phi is trivial
+ EXPECT_EQ(FirstLoadAccess->getDefiningAccess(), EntryStoreAccess);
+
+ // Create a store on the left
+ // Add the store
+ B.SetInsertPoint(Left, Left->begin());
+ StoreInst *LeftStore = B.CreateStore(B.getInt8(16), PointerArg);
+ MemoryAccess *LeftStoreAccess = Updater.createMemoryAccessInBB(
+ LeftStore, nullptr, Left, MemorySSA::Beginning);
+ Updater.insertDef(cast<MemoryDef>(LeftStoreAccess), false);
+ // We don't touch existing loads, so we need to create a new one to get a phi
+ // Add the second load
+ B.SetInsertPoint(Merge, Merge->begin());
+ LoadInst *SecondLoad = B.CreateLoad(PointerArg);
+
+ // MemoryPHI should not already exist.
+ MP = MSSA.getMemoryAccess(Merge);
+ EXPECT_EQ(MP, nullptr);
+
+ // Create the load memory access
+ MemoryUse *SecondLoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB(
+ SecondLoad, nullptr, Merge, MemorySSA::Beginning));
+ Updater.insertUse(SecondLoadAccess);
+ // Now the load should be a phi of the entry store and the left store
+ MemoryPhi *MergePhi =
+ dyn_cast<MemoryPhi>(SecondLoadAccess->getDefiningAccess());
+ EXPECT_NE(MergePhi, nullptr);
+ EXPECT_EQ(MergePhi->getIncomingValue(0), EntryStoreAccess);
+ EXPECT_EQ(MergePhi->getIncomingValue(1), LeftStoreAccess);
+ // Now create a store below the existing one in the entry
+ B.SetInsertPoint(Entry, --Entry->end());
+ StoreInst *SecondEntryStore = B.CreateStore(B.getInt8(16), PointerArg);
+ MemoryAccess *SecondEntryStoreAccess = Updater.createMemoryAccessInBB(
+ SecondEntryStore, nullptr, Entry, MemorySSA::End);
+ // Insert it twice just to test renaming
+ Updater.insertDef(cast<MemoryDef>(SecondEntryStoreAccess), false);
+ EXPECT_NE(FirstLoadAccess->getDefiningAccess(), MergePhi);
+ Updater.insertDef(cast<MemoryDef>(SecondEntryStoreAccess), true);
+ EXPECT_EQ(FirstLoadAccess->getDefiningAccess(), MergePhi);
+ // and make sure the phi below it got updated, despite being blocks away
+ MergePhi = dyn_cast<MemoryPhi>(SecondLoadAccess->getDefiningAccess());
+ EXPECT_NE(MergePhi, nullptr);
+ EXPECT_EQ(MergePhi->getIncomingValue(0), SecondEntryStoreAccess);
+ EXPECT_EQ(MergePhi->getIncomingValue(1), LeftStoreAccess);
+ MSSA.verifyMemorySSA();
+}
+
+TEST_F(MemorySSATest, CreateALoadUpdater) {
+ // We create a diamond, then build memoryssa with no memory accesses, and
+ // incrementally update it by inserting a store in one of the branches, and a
+ // load in the merge point
+ F = Function::Create(
+ FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
+ GlobalValue::ExternalLinkage, "F", &M);
+ BasicBlock *Entry(BasicBlock::Create(C, "", F));
+ BasicBlock *Left(BasicBlock::Create(C, "", F));
+ BasicBlock *Right(BasicBlock::Create(C, "", F));
+ BasicBlock *Merge(BasicBlock::Create(C, "", F));
+ B.SetInsertPoint(Entry);
+ B.CreateCondBr(B.getTrue(), Left, Right);
+ B.SetInsertPoint(Left, Left->begin());
+ Argument *PointerArg = &*F->arg_begin();
+ B.SetInsertPoint(Left);
+ B.CreateBr(Merge);
+ B.SetInsertPoint(Right);
+ B.CreateBr(Merge);
+
+ setupAnalyses();
+ MemorySSA &MSSA = *Analyses->MSSA;
+ MemorySSAUpdater Updater(&MSSA);
+ B.SetInsertPoint(Left, Left->begin());
+ // Add the store
+ StoreInst *SI = B.CreateStore(B.getInt8(16), PointerArg);
+ MemoryAccess *StoreAccess =
+ Updater.createMemoryAccessInBB(SI, nullptr, Left, MemorySSA::Beginning);
+ Updater.insertDef(cast<MemoryDef>(StoreAccess));
+
+ // Add the load
+ B.SetInsertPoint(Merge, Merge->begin());
+ LoadInst *LoadInst = B.CreateLoad(PointerArg);
+
+ // MemoryPHI should not already exist.
+ MemoryPhi *MP = MSSA.getMemoryAccess(Merge);
+ EXPECT_EQ(MP, nullptr);
+
+ // Create the load memory acccess
+ MemoryUse *LoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB(
+ LoadInst, nullptr, Merge, MemorySSA::Beginning));
+ Updater.insertUse(LoadAccess);
+ MemoryAccess *DefiningAccess = LoadAccess->getDefiningAccess();
+ EXPECT_TRUE(isa<MemoryPhi>(DefiningAccess));
+ MSSA.verifyMemorySSA();
+}
+
+TEST_F(MemorySSATest, MoveAStore) {
+ // We create a diamond where there is a in the entry, a store on one side, and
+ // a load at the end. After building MemorySSA, we test updating by moving
+ // the store from the side block to the entry block. This destroys the old
+ // access.
+ F = Function::Create(
+ FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
+ GlobalValue::ExternalLinkage, "F", &M);
+ BasicBlock *Entry(BasicBlock::Create(C, "", F));
+ BasicBlock *Left(BasicBlock::Create(C, "", F));
+ BasicBlock *Right(BasicBlock::Create(C, "", F));
+ BasicBlock *Merge(BasicBlock::Create(C, "", F));
+ B.SetInsertPoint(Entry);
+ Argument *PointerArg = &*F->arg_begin();
+ StoreInst *EntryStore = B.CreateStore(B.getInt8(16), PointerArg);
+ B.CreateCondBr(B.getTrue(), Left, Right);
+ B.SetInsertPoint(Left);
+ StoreInst *SideStore = B.CreateStore(B.getInt8(16), PointerArg);
+ BranchInst::Create(Merge, Left);
+ BranchInst::Create(Merge, Right);
+ B.SetInsertPoint(Merge);
+ B.CreateLoad(PointerArg);
+ setupAnalyses();
+ MemorySSA &MSSA = *Analyses->MSSA;
+ MemorySSAUpdater Updater(&MSSA);
+ // Move the store
+ SideStore->moveBefore(Entry->getTerminator());
+ MemoryAccess *EntryStoreAccess = MSSA.getMemoryAccess(EntryStore);
+ MemoryAccess *SideStoreAccess = MSSA.getMemoryAccess(SideStore);
+ MemoryAccess *NewStoreAccess = Updater.createMemoryAccessAfter(
+ SideStore, EntryStoreAccess, EntryStoreAccess);
+ EntryStoreAccess->replaceAllUsesWith(NewStoreAccess);
+ Updater.removeMemoryAccess(SideStoreAccess);
+ MSSA.verifyMemorySSA();
+}
+
+TEST_F(MemorySSATest, MoveAStoreUpdater) {
+ // We create a diamond where there is a in the entry, a store on one side, and
+ // a load at the end. After building MemorySSA, we test updating by moving
+ // the store from the side block to the entry block. This destroys the old
+ // access.
+ F = Function::Create(
+ FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
+ GlobalValue::ExternalLinkage, "F", &M);
+ BasicBlock *Entry(BasicBlock::Create(C, "", F));
+ BasicBlock *Left(BasicBlock::Create(C, "", F));
+ BasicBlock *Right(BasicBlock::Create(C, "", F));
+ BasicBlock *Merge(BasicBlock::Create(C, "", F));
+ B.SetInsertPoint(Entry);
+ Argument *PointerArg = &*F->arg_begin();
+ StoreInst *EntryStore = B.CreateStore(B.getInt8(16), PointerArg);
+ B.CreateCondBr(B.getTrue(), Left, Right);
+ B.SetInsertPoint(Left);
+ auto *SideStore = B.CreateStore(B.getInt8(16), PointerArg);
+ BranchInst::Create(Merge, Left);
+ BranchInst::Create(Merge, Right);
+ B.SetInsertPoint(Merge);
+ auto *MergeLoad = B.CreateLoad(PointerArg);
+ setupAnalyses();
+ MemorySSA &MSSA = *Analyses->MSSA;
+ MemorySSAUpdater Updater(&MSSA);
+
+ // Move the store
+ SideStore->moveBefore(Entry->getTerminator());
+ auto *EntryStoreAccess = MSSA.getMemoryAccess(EntryStore);
+ auto *SideStoreAccess = MSSA.getMemoryAccess(SideStore);
+ auto *NewStoreAccess = Updater.createMemoryAccessAfter(
+ SideStore, EntryStoreAccess, EntryStoreAccess);
+ // Before, the load will point to a phi of the EntryStore and SideStore.
+ auto *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(MergeLoad));
+ EXPECT_TRUE(isa<MemoryPhi>(LoadAccess->getDefiningAccess()));
+ MemoryPhi *MergePhi = cast<MemoryPhi>(LoadAccess->getDefiningAccess());
+ EXPECT_EQ(MergePhi->getIncomingValue(1), EntryStoreAccess);
+ EXPECT_EQ(MergePhi->getIncomingValue(0), SideStoreAccess);
+ Updater.removeMemoryAccess(SideStoreAccess);
+ Updater.insertDef(cast<MemoryDef>(NewStoreAccess));
+ // After it's a phi of the new side store access.
+ EXPECT_EQ(MergePhi->getIncomingValue(0), NewStoreAccess);
+ EXPECT_EQ(MergePhi->getIncomingValue(1), NewStoreAccess);
+ MSSA.verifyMemorySSA();
+}
+
+TEST_F(MemorySSATest, MoveAStoreUpdaterMove) {
+ // We create a diamond where there is a in the entry, a store on one side, and
+ // a load at the end. After building MemorySSA, we test updating by moving
+ // the store from the side block to the entry block. This does not destroy
+ // the old access.
+ F = Function::Create(
+ FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
+ GlobalValue::ExternalLinkage, "F", &M);
+ BasicBlock *Entry(BasicBlock::Create(C, "", F));
+ BasicBlock *Left(BasicBlock::Create(C, "", F));
+ BasicBlock *Right(BasicBlock::Create(C, "", F));
+ BasicBlock *Merge(BasicBlock::Create(C, "", F));
+ B.SetInsertPoint(Entry);
+ Argument *PointerArg = &*F->arg_begin();
+ StoreInst *EntryStore = B.CreateStore(B.getInt8(16), PointerArg);
+ B.CreateCondBr(B.getTrue(), Left, Right);
+ B.SetInsertPoint(Left);
+ auto *SideStore = B.CreateStore(B.getInt8(16), PointerArg);
+ BranchInst::Create(Merge, Left);
+ BranchInst::Create(Merge, Right);
+ B.SetInsertPoint(Merge);
+ auto *MergeLoad = B.CreateLoad(PointerArg);
+ setupAnalyses();
+ MemorySSA &MSSA = *Analyses->MSSA;
+ MemorySSAUpdater Updater(&MSSA);
+
+ // Move the store
+ auto *EntryStoreAccess = MSSA.getMemoryAccess(EntryStore);
+ auto *SideStoreAccess = MSSA.getMemoryAccess(SideStore);
+ // Before, the load will point to a phi of the EntryStore and SideStore.
+ auto *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(MergeLoad));
+ EXPECT_TRUE(isa<MemoryPhi>(LoadAccess->getDefiningAccess()));
+ MemoryPhi *MergePhi = cast<MemoryPhi>(LoadAccess->getDefiningAccess());
+ EXPECT_EQ(MergePhi->getIncomingValue(1), EntryStoreAccess);
+ EXPECT_EQ(MergePhi->getIncomingValue(0), SideStoreAccess);
+ SideStore->moveBefore(*EntryStore->getParent(), ++EntryStore->getIterator());
+ Updater.moveAfter(SideStoreAccess, EntryStoreAccess);
+ // After, it's a phi of the side store.
+ EXPECT_EQ(MergePhi->getIncomingValue(0), SideStoreAccess);
+ EXPECT_EQ(MergePhi->getIncomingValue(1), SideStoreAccess);
+
+ MSSA.verifyMemorySSA();
+}
+
+TEST_F(MemorySSATest, MoveAStoreAllAround) {
+ // We create a diamond where there is a in the entry, a store on one side, and
+ // a load at the end. After building MemorySSA, we test updating by moving
+ // the store from the side block to the entry block, then to the other side
+ // block, then to before the load. This does not destroy the old access.
+ F = Function::Create(
+ FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
+ GlobalValue::ExternalLinkage, "F", &M);
+ BasicBlock *Entry(BasicBlock::Create(C, "", F));
+ BasicBlock *Left(BasicBlock::Create(C, "", F));
+ BasicBlock *Right(BasicBlock::Create(C, "", F));
+ BasicBlock *Merge(BasicBlock::Create(C, "", F));
+ B.SetInsertPoint(Entry);
+ Argument *PointerArg = &*F->arg_begin();
+ StoreInst *EntryStore = B.CreateStore(B.getInt8(16), PointerArg);
+ B.CreateCondBr(B.getTrue(), Left, Right);
+ B.SetInsertPoint(Left);
+ auto *SideStore = B.CreateStore(B.getInt8(16), PointerArg);
+ BranchInst::Create(Merge, Left);
+ BranchInst::Create(Merge, Right);
+ B.SetInsertPoint(Merge);
+ auto *MergeLoad = B.CreateLoad(PointerArg);
+ setupAnalyses();
+ MemorySSA &MSSA = *Analyses->MSSA;
+ MemorySSAUpdater Updater(&MSSA);
+
+ // Move the store
+ auto *EntryStoreAccess = MSSA.getMemoryAccess(EntryStore);
+ auto *SideStoreAccess = MSSA.getMemoryAccess(SideStore);
+ // Before, the load will point to a phi of the EntryStore and SideStore.
+ auto *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(MergeLoad));
+ EXPECT_TRUE(isa<MemoryPhi>(LoadAccess->getDefiningAccess()));
+ MemoryPhi *MergePhi = cast<MemoryPhi>(LoadAccess->getDefiningAccess());
+ EXPECT_EQ(MergePhi->getIncomingValue(1), EntryStoreAccess);
+ EXPECT_EQ(MergePhi->getIncomingValue(0), SideStoreAccess);
+ // Move the store before the entry store
+ SideStore->moveBefore(*EntryStore->getParent(), EntryStore->getIterator());
+ Updater.moveBefore(SideStoreAccess, EntryStoreAccess);
+ // After, it's a phi of the entry store.
+ EXPECT_EQ(MergePhi->getIncomingValue(0), EntryStoreAccess);
+ EXPECT_EQ(MergePhi->getIncomingValue(1), EntryStoreAccess);
+ MSSA.verifyMemorySSA();
+ // Now move the store to the right branch
+ SideStore->moveBefore(*Right, Right->begin());
+ Updater.moveToPlace(SideStoreAccess, Right, MemorySSA::Beginning);
+ MSSA.verifyMemorySSA();
+ EXPECT_EQ(MergePhi->getIncomingValue(0), EntryStoreAccess);
+ EXPECT_EQ(MergePhi->getIncomingValue(1), SideStoreAccess);
+ // Now move it before the load
+ SideStore->moveBefore(MergeLoad);
+ Updater.moveBefore(SideStoreAccess, LoadAccess);
+ EXPECT_EQ(MergePhi->getIncomingValue(0), EntryStoreAccess);
+ EXPECT_EQ(MergePhi->getIncomingValue(1), EntryStoreAccess);
+ MSSA.verifyMemorySSA();
+}
+
+TEST_F(MemorySSATest, RemoveAPhi) {
+ // We create a diamond where there is a store on one side, and then a load
+ // after the merge point. This enables us to test a bunch of different
+ // removal cases.
+ F = Function::Create(
+ FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
+ GlobalValue::ExternalLinkage, "F", &M);
+ BasicBlock *Entry(BasicBlock::Create(C, "", F));
+ BasicBlock *Left(BasicBlock::Create(C, "", F));
+ BasicBlock *Right(BasicBlock::Create(C, "", F));
+ BasicBlock *Merge(BasicBlock::Create(C, "", F));
+ B.SetInsertPoint(Entry);
+ B.CreateCondBr(B.getTrue(), Left, Right);
+ B.SetInsertPoint(Left);
+ Argument *PointerArg = &*F->arg_begin();
+ StoreInst *StoreInst = B.CreateStore(B.getInt8(16), PointerArg);
+ BranchInst::Create(Merge, Left);
+ BranchInst::Create(Merge, Right);
+ B.SetInsertPoint(Merge);
+ LoadInst *LoadInst = B.CreateLoad(PointerArg);
+
+ setupAnalyses();
+ MemorySSA &MSSA = *Analyses->MSSA;
+ MemorySSAUpdater Updater(&MSSA);
+
+ // Before, the load will be a use of a phi<store, liveonentry>.
+ MemoryUse *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(LoadInst));
+ MemoryDef *StoreAccess = cast<MemoryDef>(MSSA.getMemoryAccess(StoreInst));
+ MemoryAccess *DefiningAccess = LoadAccess->getDefiningAccess();
+ EXPECT_TRUE(isa<MemoryPhi>(DefiningAccess));
+ // Kill the store
+ Updater.removeMemoryAccess(StoreAccess);
+ MemoryPhi *MP = cast<MemoryPhi>(DefiningAccess);
+ // Verify the phi ended up as liveonentry, liveonentry
+ for (auto &Op : MP->incoming_values())
+ EXPECT_TRUE(MSSA.isLiveOnEntryDef(cast<MemoryAccess>(Op.get())));
+ // Replace the phi uses with the live on entry def
+ MP->replaceAllUsesWith(MSSA.getLiveOnEntryDef());
+ // Verify the load is now defined by liveOnEntryDef
+ EXPECT_TRUE(MSSA.isLiveOnEntryDef(LoadAccess->getDefiningAccess()));
+ // Remove the PHI
+ Updater.removeMemoryAccess(MP);
+ MSSA.verifyMemorySSA();
+}
+
+TEST_F(MemorySSATest, RemoveMemoryAccess) {
+ // We create a diamond where there is a store on one side, and then a load
+ // after the merge point. This enables us to test a bunch of different
+ // removal cases.
+ F = Function::Create(
+ FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
+ GlobalValue::ExternalLinkage, "F", &M);
+ BasicBlock *Entry(BasicBlock::Create(C, "", F));
+ BasicBlock *Left(BasicBlock::Create(C, "", F));
+ BasicBlock *Right(BasicBlock::Create(C, "", F));
+ BasicBlock *Merge(BasicBlock::Create(C, "", F));
+ B.SetInsertPoint(Entry);
+ B.CreateCondBr(B.getTrue(), Left, Right);
+ B.SetInsertPoint(Left);
+ Argument *PointerArg = &*F->arg_begin();
+ StoreInst *StoreInst = B.CreateStore(B.getInt8(16), PointerArg);
+ BranchInst::Create(Merge, Left);
+ BranchInst::Create(Merge, Right);
+ B.SetInsertPoint(Merge);
+ LoadInst *LoadInst = B.CreateLoad(PointerArg);
+
+ setupAnalyses();
+ MemorySSA &MSSA = *Analyses->MSSA;
+ MemorySSAWalker *Walker = Analyses->Walker;
+ MemorySSAUpdater Updater(&MSSA);
+
+ // Before, the load will be a use of a phi<store, liveonentry>. It should be
+ // the same after.
+ MemoryUse *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(LoadInst));
+ MemoryDef *StoreAccess = cast<MemoryDef>(MSSA.getMemoryAccess(StoreInst));
+ MemoryAccess *DefiningAccess = LoadAccess->getDefiningAccess();
+ EXPECT_TRUE(isa<MemoryPhi>(DefiningAccess));
+ // The load is currently clobbered by one of the phi arguments, so the walker
+ // should determine the clobbering access as the phi.
+ EXPECT_EQ(DefiningAccess, Walker->getClobberingMemoryAccess(LoadInst));
+ Updater.removeMemoryAccess(StoreAccess);
+ MSSA.verifyMemorySSA();
+ // After the removeaccess, let's see if we got the right accesses
+ // The load should still point to the phi ...
+ EXPECT_EQ(DefiningAccess, LoadAccess->getDefiningAccess());
+ // but we should now get live on entry for the clobbering definition of the
+ // load, since it will walk past the phi node since every argument is the
+ // same.
+ // XXX: This currently requires either removing the phi or resetting optimized
+ // on the load
+
+ EXPECT_FALSE(
+ MSSA.isLiveOnEntryDef(Walker->getClobberingMemoryAccess(LoadInst)));
+ // If we reset optimized, we get live on entry.
+ LoadAccess->resetOptimized();
+ EXPECT_TRUE(
+ MSSA.isLiveOnEntryDef(Walker->getClobberingMemoryAccess(LoadInst)));
+ // The phi should now be a two entry phi with two live on entry defs.
+ for (const auto &Op : DefiningAccess->operands()) {
+ MemoryAccess *Operand = cast<MemoryAccess>(&*Op);
+ EXPECT_TRUE(MSSA.isLiveOnEntryDef(Operand));
+ }
+
+ // Now we try to remove the single valued phi
+ Updater.removeMemoryAccess(DefiningAccess);
+ MSSA.verifyMemorySSA();
+ // Now the load should be a load of live on entry.
+ EXPECT_TRUE(MSSA.isLiveOnEntryDef(LoadAccess->getDefiningAccess()));
+}
+
+// We had a bug with caching where the walker would report MemoryDef#3's clobber
+// (below) was MemoryDef#1.
+//
+// define void @F(i8*) {
+// %A = alloca i8, i8 1
+// ; 1 = MemoryDef(liveOnEntry)
+// store i8 0, i8* %A
+// ; 2 = MemoryDef(1)
+// store i8 1, i8* %A
+// ; 3 = MemoryDef(2)
+// store i8 2, i8* %A
+// }
+TEST_F(MemorySSATest, TestTripleStore) {
+ F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
+ GlobalValue::ExternalLinkage, "F", &M);
+ B.SetInsertPoint(BasicBlock::Create(C, "", F));
+ Type *Int8 = Type::getInt8Ty(C);
+ Value *Alloca = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
+ StoreInst *S1 = B.CreateStore(ConstantInt::get(Int8, 0), Alloca);
+ StoreInst *S2 = B.CreateStore(ConstantInt::get(Int8, 1), Alloca);
+ StoreInst *S3 = B.CreateStore(ConstantInt::get(Int8, 2), Alloca);
+
+ setupAnalyses();
+ MemorySSA &MSSA = *Analyses->MSSA;
+ MemorySSAWalker *Walker = Analyses->Walker;
+
+ unsigned I = 0;
+ for (StoreInst *V : {S1, S2, S3}) {
+ // Everything should be clobbered by its defining access
+ MemoryAccess *DefiningAccess = MSSA.getMemoryAccess(V)->getDefiningAccess();
+ MemoryAccess *WalkerClobber = Walker->getClobberingMemoryAccess(V);
+ EXPECT_EQ(DefiningAccess, WalkerClobber)
+ << "Store " << I << " doesn't have the correct clobbering access";
+ // EXPECT_EQ expands such that if we increment I above, it won't get
+ // incremented except when we try to print the error message.
+ ++I;
+ }
+}
+
+// ...And fixing the above bug made it obvious that, when walking, MemorySSA's
+// walker was caching the initial node it walked. This was fine (albeit
+// mostly redundant) unless the initial node being walked is a clobber for the
+// query. In that case, we'd cache that the node clobbered itself.
+TEST_F(MemorySSATest, TestStoreAndLoad) {
+ F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
+ GlobalValue::ExternalLinkage, "F", &M);
+ B.SetInsertPoint(BasicBlock::Create(C, "", F));
+ Type *Int8 = Type::getInt8Ty(C);
+ Value *Alloca = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
+ Instruction *SI = B.CreateStore(ConstantInt::get(Int8, 0), Alloca);
+ Instruction *LI = B.CreateLoad(Alloca);
+
+ setupAnalyses();
+ MemorySSA &MSSA = *Analyses->MSSA;
+ MemorySSAWalker *Walker = Analyses->Walker;
+
+ MemoryAccess *LoadClobber = Walker->getClobberingMemoryAccess(LI);
+ EXPECT_EQ(LoadClobber, MSSA.getMemoryAccess(SI));
+ EXPECT_TRUE(MSSA.isLiveOnEntryDef(Walker->getClobberingMemoryAccess(SI)));
+}
+
+// Another bug (related to the above two fixes): It was noted that, given the
+// following code:
+// ; 1 = MemoryDef(liveOnEntry)
+// store i8 0, i8* %1
+//
+// ...A query to getClobberingMemoryAccess(MemoryAccess*, MemoryLocation) would
+// hand back the store (correctly). A later call to
+// getClobberingMemoryAccess(const Instruction*) would also hand back the store
+// (incorrectly; it should return liveOnEntry).
+//
+// This test checks that repeated calls to either function returns what they're
+// meant to.
+TEST_F(MemorySSATest, TestStoreDoubleQuery) {
+ F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
+ GlobalValue::ExternalLinkage, "F", &M);
+ B.SetInsertPoint(BasicBlock::Create(C, "", F));
+ Type *Int8 = Type::getInt8Ty(C);
+ Value *Alloca = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
+ StoreInst *SI = B.CreateStore(ConstantInt::get(Int8, 0), Alloca);
+
+ setupAnalyses();
+ MemorySSA &MSSA = *Analyses->MSSA;
+ MemorySSAWalker *Walker = Analyses->Walker;
+
+ MemoryAccess *StoreAccess = MSSA.getMemoryAccess(SI);
+ MemoryLocation StoreLoc = MemoryLocation::get(SI);
+ MemoryAccess *Clobber =
+ Walker->getClobberingMemoryAccess(StoreAccess, StoreLoc);
+ MemoryAccess *LiveOnEntry = Walker->getClobberingMemoryAccess(SI);
+
+ EXPECT_EQ(Clobber, StoreAccess);
+ EXPECT_TRUE(MSSA.isLiveOnEntryDef(LiveOnEntry));
+
+ // Try again (with entries in the cache already) for good measure...
+ Clobber = Walker->getClobberingMemoryAccess(StoreAccess, StoreLoc);
+ LiveOnEntry = Walker->getClobberingMemoryAccess(SI);
+ EXPECT_EQ(Clobber, StoreAccess);
+ EXPECT_TRUE(MSSA.isLiveOnEntryDef(LiveOnEntry));
+}
+
+// Bug: During phi optimization, the walker wouldn't cache to the proper result
+// in the farthest-walked BB.
+//
+// Specifically, it would assume that whatever we walked to was a clobber.
+// "Whatever we walked to" isn't a clobber if we hit a cache entry.
+//
+// ...So, we need a test case that looks like:
+// A
+// / \
+// B |
+// \ /
+// C
+//
+// Where, when we try to optimize a thing in 'C', a blocker is found in 'B'.
+// The walk must determine that the blocker exists by using cache entries *while
+// walking* 'B'.
+TEST_F(MemorySSATest, PartialWalkerCacheWithPhis) {
+ F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
+ GlobalValue::ExternalLinkage, "F", &M);
+ B.SetInsertPoint(BasicBlock::Create(C, "A", F));
+ Type *Int8 = Type::getInt8Ty(C);
+ Constant *One = ConstantInt::get(Int8, 1);
+ Constant *Zero = ConstantInt::get(Int8, 0);
+ Value *AllocA = B.CreateAlloca(Int8, One, "a");
+ Value *AllocB = B.CreateAlloca(Int8, One, "b");
+ BasicBlock *IfThen = BasicBlock::Create(C, "B", F);
+ BasicBlock *IfEnd = BasicBlock::Create(C, "C", F);
+
+ B.CreateCondBr(UndefValue::get(Type::getInt1Ty(C)), IfThen, IfEnd);
+
+ B.SetInsertPoint(IfThen);
+ Instruction *FirstStore = B.CreateStore(Zero, AllocA);
+ B.CreateStore(Zero, AllocB);
+ Instruction *ALoad0 = B.CreateLoad(AllocA, "");
+ Instruction *BStore = B.CreateStore(Zero, AllocB);
+ // Due to use optimization/etc. we make a store to A, which is removed after
+ // we build MSSA. This helps keep the test case simple-ish.
+ Instruction *KillStore = B.CreateStore(Zero, AllocA);
+ Instruction *ALoad = B.CreateLoad(AllocA, "");
+ B.CreateBr(IfEnd);
+
+ B.SetInsertPoint(IfEnd);
+ Instruction *BelowPhi = B.CreateStore(Zero, AllocA);
+
+ setupAnalyses();
+ MemorySSA &MSSA = *Analyses->MSSA;
+ MemorySSAWalker *Walker = Analyses->Walker;
+ MemorySSAUpdater Updater(&MSSA);
+
+ // Kill `KillStore`; it exists solely so that the load after it won't be
+ // optimized to FirstStore.
+ Updater.removeMemoryAccess(MSSA.getMemoryAccess(KillStore));
+ KillStore->eraseFromParent();
+ auto *ALoadMA = cast<MemoryUse>(MSSA.getMemoryAccess(ALoad));
+ EXPECT_EQ(ALoadMA->getDefiningAccess(), MSSA.getMemoryAccess(BStore));
+
+ // Populate the cache for the store to AllocB directly after FirstStore. It
+ // should point to something in block B (so something in D can't be optimized
+ // to it).
+ MemoryAccess *Load0Clobber = Walker->getClobberingMemoryAccess(ALoad0);
+ EXPECT_EQ(MSSA.getMemoryAccess(FirstStore), Load0Clobber);
+
+ // If the bug exists, this will introduce a bad cache entry for %a on BStore.
+ // It will point to the store to %b after FirstStore. This only happens during
+ // phi optimization.
+ MemoryAccess *BottomClobber = Walker->getClobberingMemoryAccess(BelowPhi);
+ MemoryAccess *Phi = MSSA.getMemoryAccess(IfEnd);
+ EXPECT_EQ(BottomClobber, Phi);
+
+ // This query will first check the cache for {%a, BStore}. It should point to
+ // FirstStore, not to the store after FirstStore.
+ MemoryAccess *UseClobber = Walker->getClobberingMemoryAccess(ALoad);
+ EXPECT_EQ(UseClobber, MSSA.getMemoryAccess(FirstStore));
+}
+
+// Test that our walker properly handles loads with the invariant group
+// attribute. It's a bit hacky, since we add the invariant attribute *after*
+// building MSSA. Otherwise, the use optimizer will optimize it for us, which
+// isn't what we want.
+// FIXME: It may be easier/cleaner to just add an 'optimize uses?' flag to MSSA.
+TEST_F(MemorySSATest, WalkerInvariantLoadOpt) {
+ F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
+ GlobalValue::ExternalLinkage, "F", &M);
+ B.SetInsertPoint(BasicBlock::Create(C, "", F));
+ Type *Int8 = Type::getInt8Ty(C);
+ Constant *One = ConstantInt::get(Int8, 1);
+ Value *AllocA = B.CreateAlloca(Int8, One, "");
+
+ Instruction *Store = B.CreateStore(One, AllocA);
+ Instruction *Load = B.CreateLoad(AllocA);
+
+ setupAnalyses();
+ MemorySSA &MSSA = *Analyses->MSSA;
+ MemorySSAWalker *Walker = Analyses->Walker;
+
+ auto *LoadMA = cast<MemoryUse>(MSSA.getMemoryAccess(Load));
+ auto *StoreMA = cast<MemoryDef>(MSSA.getMemoryAccess(Store));
+ EXPECT_EQ(LoadMA->getDefiningAccess(), StoreMA);
+
+ // ...At the time of writing, no cache should exist for LoadMA. Be a bit
+ // flexible to future changes.
+ Walker->invalidateInfo(LoadMA);
+ Load->setMetadata(LLVMContext::MD_invariant_load, MDNode::get(C, {}));
+
+ MemoryAccess *LoadClobber = Walker->getClobberingMemoryAccess(LoadMA);
+ EXPECT_EQ(LoadClobber, MSSA.getLiveOnEntryDef());
+}
+
+// Test loads get reoptimized properly by the walker.
+TEST_F(MemorySSATest, WalkerReopt) {
+ F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
+ GlobalValue::ExternalLinkage, "F", &M);
+ B.SetInsertPoint(BasicBlock::Create(C, "", F));
+ Type *Int8 = Type::getInt8Ty(C);
+ Value *AllocaA = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
+ Instruction *SIA = B.CreateStore(ConstantInt::get(Int8, 0), AllocaA);
+ Value *AllocaB = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "B");
+ Instruction *SIB = B.CreateStore(ConstantInt::get(Int8, 0), AllocaB);
+ Instruction *LIA = B.CreateLoad(AllocaA);
+
+ setupAnalyses();
+ MemorySSA &MSSA = *Analyses->MSSA;
+ MemorySSAWalker *Walker = Analyses->Walker;
+ MemorySSAUpdater Updater(&MSSA);
+
+ MemoryAccess *LoadClobber = Walker->getClobberingMemoryAccess(LIA);
+ MemoryUse *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(LIA));
+ EXPECT_EQ(LoadClobber, MSSA.getMemoryAccess(SIA));
+ EXPECT_TRUE(MSSA.isLiveOnEntryDef(Walker->getClobberingMemoryAccess(SIA)));
+ Updater.removeMemoryAccess(LoadAccess);
+
+ // Create the load memory access pointing to an unoptimized place.
+ MemoryUse *NewLoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB(
+ LIA, MSSA.getMemoryAccess(SIB), LIA->getParent(), MemorySSA::End));
+ // This should it cause it to be optimized
+ EXPECT_EQ(Walker->getClobberingMemoryAccess(NewLoadAccess), LoadClobber);
+ EXPECT_EQ(NewLoadAccess->getDefiningAccess(), LoadClobber);
+}
+
+// Test out MemorySSAUpdater::moveBefore
+TEST_F(MemorySSATest, MoveAboveMemoryDef) {
+ F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
+ GlobalValue::ExternalLinkage, "F", &M);
+ B.SetInsertPoint(BasicBlock::Create(C, "", F));
+
+ Type *Int8 = Type::getInt8Ty(C);
+ Value *A = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
+ Value *B_ = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "B");
+ Value *C = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "C");
+
+ StoreInst *StoreA0 = B.CreateStore(ConstantInt::get(Int8, 0), A);
+ StoreInst *StoreB = B.CreateStore(ConstantInt::get(Int8, 0), B_);
+ LoadInst *LoadB = B.CreateLoad(B_);
+ StoreInst *StoreA1 = B.CreateStore(ConstantInt::get(Int8, 4), A);
+ StoreInst *StoreC = B.CreateStore(ConstantInt::get(Int8, 4), C);
+ StoreInst *StoreA2 = B.CreateStore(ConstantInt::get(Int8, 4), A);
+ LoadInst *LoadC = B.CreateLoad(C);
+
+ setupAnalyses();
+ MemorySSA &MSSA = *Analyses->MSSA;
+ MemorySSAWalker &Walker = *Analyses->Walker;
+
+ MemorySSAUpdater Updater(&MSSA);
+ StoreC->moveBefore(StoreB);
+ Updater.moveBefore(cast<MemoryDef>(MSSA.getMemoryAccess(StoreC)),
+ cast<MemoryDef>(MSSA.getMemoryAccess(StoreB)));
+
+ MSSA.verifyMemorySSA();
+
+ EXPECT_EQ(MSSA.getMemoryAccess(StoreB)->getDefiningAccess(),
+ MSSA.getMemoryAccess(StoreC));
+ EXPECT_EQ(MSSA.getMemoryAccess(StoreC)->getDefiningAccess(),
+ MSSA.getMemoryAccess(StoreA0));
+ EXPECT_EQ(MSSA.getMemoryAccess(StoreA2)->getDefiningAccess(),
+ MSSA.getMemoryAccess(StoreA1));
+ EXPECT_EQ(Walker.getClobberingMemoryAccess(LoadB),
+ MSSA.getMemoryAccess(StoreB));
+ EXPECT_EQ(Walker.getClobberingMemoryAccess(LoadC),
+ MSSA.getMemoryAccess(StoreC));
+
+ // exercise block numbering
+ EXPECT_TRUE(MSSA.locallyDominates(MSSA.getMemoryAccess(StoreC),
+ MSSA.getMemoryAccess(StoreB)));
+ EXPECT_TRUE(MSSA.locallyDominates(MSSA.getMemoryAccess(StoreA1),
+ MSSA.getMemoryAccess(StoreA2)));
+}
+
+TEST_F(MemorySSATest, Irreducible) {
+ // Create the equivalent of
+ // x = something
+ // if (...)
+ // goto second_loop_entry
+ // while (...) {
+ // second_loop_entry:
+ // }
+ // use(x)
+
+ SmallVector<PHINode *, 8> Inserted;
+ IRBuilder<> B(C);
+ F = Function::Create(
+ FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
+ GlobalValue::ExternalLinkage, "F", &M);
+
+ // Make blocks
+ BasicBlock *IfBB = BasicBlock::Create(C, "if", F);
+ BasicBlock *LoopStartBB = BasicBlock::Create(C, "loopstart", F);
+ BasicBlock *LoopMainBB = BasicBlock::Create(C, "loopmain", F);
+ BasicBlock *AfterLoopBB = BasicBlock::Create(C, "afterloop", F);
+ B.SetInsertPoint(IfBB);
+ B.CreateCondBr(B.getTrue(), LoopMainBB, LoopStartBB);
+ B.SetInsertPoint(LoopStartBB);
+ B.CreateBr(LoopMainBB);
+ B.SetInsertPoint(LoopMainBB);
+ B.CreateCondBr(B.getTrue(), LoopStartBB, AfterLoopBB);
+ B.SetInsertPoint(AfterLoopBB);
+ Argument *FirstArg = &*F->arg_begin();
+ setupAnalyses();
+ MemorySSA &MSSA = *Analyses->MSSA;
+ MemorySSAUpdater Updater(&MSSA);
+ // Create the load memory acccess
+ LoadInst *LoadInst = B.CreateLoad(FirstArg);
+ MemoryUse *LoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB(
+ LoadInst, nullptr, AfterLoopBB, MemorySSA::Beginning));
+ Updater.insertUse(LoadAccess);
+ MSSA.verifyMemorySSA();
+}
diff --git a/unittests/Analysis/ProfileSummaryInfoTest.cpp b/unittests/Analysis/ProfileSummaryInfoTest.cpp
new file mode 100644
index 000000000000..0b4b1de28053
--- /dev/null
+++ b/unittests/Analysis/ProfileSummaryInfoTest.cpp
@@ -0,0 +1,198 @@
+//===- ProfileSummaryInfoTest.cpp - ProfileSummaryInfo unit tests ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Analysis/BlockFrequencyInfo.h"
+#include "llvm/Analysis/BlockFrequencyInfoImpl.h"
+#include "llvm/Analysis/BranchProbabilityInfo.h"
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/Analysis/ProfileSummaryInfo.h"
+#include "llvm/AsmParser/Parser.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/MDBuilder.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/FormatVariadic.h"
+#include "llvm/Support/SourceMgr.h"
+#include "llvm/Support/raw_ostream.h"
+#include "gtest/gtest.h"
+
+namespace llvm {
+namespace {
+
+class ProfileSummaryInfoTest : public testing::Test {
+protected:
+ LLVMContext C;
+ std::unique_ptr<BranchProbabilityInfo> BPI;
+ std::unique_ptr<DominatorTree> DT;
+ std::unique_ptr<LoopInfo> LI;
+
+ ProfileSummaryInfo buildPSI(Module *M) {
+ return ProfileSummaryInfo(*M);
+ }
+ BlockFrequencyInfo buildBFI(Function &F) {
+ DT.reset(new DominatorTree(F));
+ LI.reset(new LoopInfo(*DT));
+ BPI.reset(new BranchProbabilityInfo(F, *LI));
+ return BlockFrequencyInfo(F, *BPI, *LI);
+ }
+ std::unique_ptr<Module> makeLLVMModule(const char *ProfKind = nullptr) {
+ const char *ModuleString =
+ "define i32 @g(i32 %x) !prof !21 {{\n"
+ " ret i32 0\n"
+ "}\n"
+ "define i32 @h(i32 %x) !prof !22 {{\n"
+ " ret i32 0\n"
+ "}\n"
+ "define i32 @f(i32 %x) !prof !20 {{\n"
+ "bb0:\n"
+ " %y1 = icmp eq i32 %x, 0 \n"
+ " br i1 %y1, label %bb1, label %bb2, !prof !23 \n"
+ "bb1:\n"
+ " %z1 = call i32 @g(i32 %x)\n"
+ " br label %bb3\n"
+ "bb2:\n"
+ " %z2 = call i32 @h(i32 %x)\n"
+ " br label %bb3\n"
+ "bb3:\n"
+ " %y2 = phi i32 [0, %bb1], [1, %bb2] \n"
+ " ret i32 %y2\n"
+ "}\n"
+ "!20 = !{{!\"function_entry_count\", i64 400}\n"
+ "!21 = !{{!\"function_entry_count\", i64 1}\n"
+ "!22 = !{{!\"function_entry_count\", i64 100}\n"
+ "!23 = !{{!\"branch_weights\", i32 64, i32 4}\n"
+ "{0}";
+ const char *SummaryString = "!llvm.module.flags = !{{!1}"
+ "!1 = !{{i32 1, !\"ProfileSummary\", !2}"
+ "!2 = !{{!3, !4, !5, !6, !7, !8, !9, !10}"
+ "!3 = !{{!\"ProfileFormat\", !\"{0}\"}"
+ "!4 = !{{!\"TotalCount\", i64 10000}"
+ "!5 = !{{!\"MaxCount\", i64 10}"
+ "!6 = !{{!\"MaxInternalCount\", i64 1}"
+ "!7 = !{{!\"MaxFunctionCount\", i64 1000}"
+ "!8 = !{{!\"NumCounts\", i64 3}"
+ "!9 = !{{!\"NumFunctions\", i64 3}"
+ "!10 = !{{!\"DetailedSummary\", !11}"
+ "!11 = !{{!12, !13, !14}"
+ "!12 = !{{i32 10000, i64 1000, i32 1}"
+ "!13 = !{{i32 999000, i64 300, i32 3}"
+ "!14 = !{{i32 999999, i64 5, i32 10}";
+ SMDiagnostic Err;
+ if (ProfKind)
+ return parseAssemblyString(
+ formatv(ModuleString, formatv(SummaryString, ProfKind).str()).str(),
+ Err, C);
+ else
+ return parseAssemblyString(formatv(ModuleString, "").str(), Err, C);
+ }
+};
+
+TEST_F(ProfileSummaryInfoTest, TestNoProfile) {
+ auto M = makeLLVMModule(/*ProfKind=*/nullptr);
+ Function *F = M->getFunction("f");
+
+ ProfileSummaryInfo PSI = buildPSI(M.get());
+ // In the absence of profiles, is{Hot|Cold}X methods should always return
+ // false.
+ EXPECT_FALSE(PSI.isHotCount(1000));
+ EXPECT_FALSE(PSI.isHotCount(0));
+ EXPECT_FALSE(PSI.isColdCount(1000));
+ EXPECT_FALSE(PSI.isColdCount(0));
+
+ EXPECT_FALSE(PSI.isFunctionEntryHot(F));
+ EXPECT_FALSE(PSI.isFunctionEntryCold(F));
+
+ BasicBlock &BB0 = F->getEntryBlock();
+ BasicBlock *BB1 = BB0.getTerminator()->getSuccessor(0);
+
+ BlockFrequencyInfo BFI = buildBFI(*F);
+ EXPECT_FALSE(PSI.isHotBB(&BB0, &BFI));
+ EXPECT_FALSE(PSI.isColdBB(&BB0, &BFI));
+
+ CallSite CS1(BB1->getFirstNonPHI());
+ EXPECT_FALSE(PSI.isHotCallSite(CS1, &BFI));
+ EXPECT_FALSE(PSI.isColdCallSite(CS1, &BFI));
+}
+TEST_F(ProfileSummaryInfoTest, TestCommon) {
+ auto M = makeLLVMModule("InstrProf");
+ Function *F = M->getFunction("f");
+ Function *G = M->getFunction("g");
+ Function *H = M->getFunction("h");
+
+ ProfileSummaryInfo PSI = buildPSI(M.get());
+ EXPECT_TRUE(PSI.isHotCount(400));
+ EXPECT_TRUE(PSI.isColdCount(2));
+ EXPECT_FALSE(PSI.isColdCount(100));
+ EXPECT_FALSE(PSI.isHotCount(100));
+
+ EXPECT_TRUE(PSI.isFunctionEntryHot(F));
+ EXPECT_FALSE(PSI.isFunctionEntryHot(G));
+ EXPECT_FALSE(PSI.isFunctionEntryHot(H));
+}
+
+TEST_F(ProfileSummaryInfoTest, InstrProf) {
+ auto M = makeLLVMModule("InstrProf");
+ Function *F = M->getFunction("f");
+ ProfileSummaryInfo PSI = buildPSI(M.get());
+
+ BasicBlock &BB0 = F->getEntryBlock();
+ BasicBlock *BB1 = BB0.getTerminator()->getSuccessor(0);
+ BasicBlock *BB2 = BB0.getTerminator()->getSuccessor(1);
+ BasicBlock *BB3 = BB1->getSingleSuccessor();
+
+ BlockFrequencyInfo BFI = buildBFI(*F);
+ EXPECT_TRUE(PSI.isHotBB(&BB0, &BFI));
+ EXPECT_TRUE(PSI.isHotBB(BB1, &BFI));
+ EXPECT_FALSE(PSI.isHotBB(BB2, &BFI));
+ EXPECT_TRUE(PSI.isHotBB(BB3, &BFI));
+
+ CallSite CS1(BB1->getFirstNonPHI());
+ auto *CI2 = BB2->getFirstNonPHI();
+ CallSite CS2(CI2);
+
+ EXPECT_TRUE(PSI.isHotCallSite(CS1, &BFI));
+ EXPECT_FALSE(PSI.isHotCallSite(CS2, &BFI));
+}
+
+TEST_F(ProfileSummaryInfoTest, SampleProf) {
+ auto M = makeLLVMModule("SampleProfile");
+ Function *F = M->getFunction("f");
+ ProfileSummaryInfo PSI = buildPSI(M.get());
+
+ BasicBlock &BB0 = F->getEntryBlock();
+ BasicBlock *BB1 = BB0.getTerminator()->getSuccessor(0);
+ BasicBlock *BB2 = BB0.getTerminator()->getSuccessor(1);
+ BasicBlock *BB3 = BB1->getSingleSuccessor();
+
+ BlockFrequencyInfo BFI = buildBFI(*F);
+ EXPECT_TRUE(PSI.isHotBB(&BB0, &BFI));
+ EXPECT_TRUE(PSI.isHotBB(BB1, &BFI));
+ EXPECT_FALSE(PSI.isHotBB(BB2, &BFI));
+ EXPECT_TRUE(PSI.isHotBB(BB3, &BFI));
+
+ CallSite CS1(BB1->getFirstNonPHI());
+ auto *CI2 = BB2->getFirstNonPHI();
+ CallSite CS2(CI2);
+
+ EXPECT_TRUE(PSI.isHotCallSite(CS1, &BFI));
+ EXPECT_FALSE(PSI.isHotCallSite(CS2, &BFI));
+
+ // Test that CS2 is considered hot when it gets an MD_prof metadata with
+ // weights that exceed the hot count threshold.
+ MDBuilder MDB(M->getContext());
+ CI2->setMetadata(llvm::LLVMContext::MD_prof, MDB.createBranchWeights({400}));
+ EXPECT_TRUE(PSI.isHotCallSite(CS2, &BFI));
+}
+
+} // end anonymous namespace
+} // end namespace llvm
diff --git a/unittests/Analysis/ScalarEvolutionTest.cpp b/unittests/Analysis/ScalarEvolutionTest.cpp
index f4370842edb5..df9fd4b5ec33 100644
--- a/unittests/Analysis/ScalarEvolutionTest.cpp
+++ b/unittests/Analysis/ScalarEvolutionTest.cpp
@@ -51,13 +51,13 @@ protected:
return ScalarEvolution(F, TLI, *AC, *DT, *LI);
}
- void runWithFunctionAndSE(
+ void runWithSE(
Module &M, StringRef FuncName,
- function_ref<void(Function &F, ScalarEvolution &SE)> Test) {
+ function_ref<void(Function &F, LoopInfo &LI, ScalarEvolution &SE)> Test) {
auto *F = M.getFunction(FuncName);
ASSERT_NE(F, nullptr) << "Could not find " << FuncName;
ScalarEvolution SE = buildSE(*F);
- Test(*F, SE);
+ Test(*F, *LI, SE);
}
};
@@ -306,9 +306,11 @@ TEST_F(ScalarEvolutionsTest, ExpandPtrTypeSCEV) {
// %bitcast2 = bitcast i8* %select to i32*
// br i1 undef, label %loop, label %exit
+ const DataLayout &DL = F->getParent()->getDataLayout();
BranchInst *Br = BranchInst::Create(
LoopBB, ExitBB, UndefValue::get(Type::getInt1Ty(Context)), LoopBB);
- AllocaInst *Alloca = new AllocaInst(I32Ty, "alloca", Br);
+ AllocaInst *Alloca = new AllocaInst(I32Ty, DL.getAllocaAddrSpace(),
+ "alloca", Br);
ConstantInt *Ci32 = ConstantInt::get(Context, APInt(32, 1));
GetElementPtrInst *Gep0 =
GetElementPtrInst::Create(I32Ty, Alloca, Ci32, "gep0", Br);
@@ -417,7 +419,7 @@ TEST_F(ScalarEvolutionsTest, CommutativeExprOperandOrder) {
assert(M && "Could not parse module?");
assert(!verifyModule(*M) && "Must have been well formed!");
- runWithFunctionAndSE(*M, "f_1", [&](Function &F, ScalarEvolution &SE) {
+ runWithSE(*M, "f_1", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
auto *IV0 = getInstructionByName(F, "iv0");
auto *IV0Inc = getInstructionByName(F, "iv0.inc");
@@ -458,11 +460,12 @@ TEST_F(ScalarEvolutionsTest, CommutativeExprOperandOrder) {
};
for (StringRef FuncName : {"f_2", "f_3", "f_4"})
- runWithFunctionAndSE(*M, FuncName, [&](Function &F, ScalarEvolution &SE) {
- CheckCommutativeMulExprs(SE, SE.getSCEV(getInstructionByName(F, "x")),
- SE.getSCEV(getInstructionByName(F, "y")),
- SE.getSCEV(getInstructionByName(F, "z")));
- });
+ runWithSE(
+ *M, FuncName, [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
+ CheckCommutativeMulExprs(SE, SE.getSCEV(getInstructionByName(F, "x")),
+ SE.getSCEV(getInstructionByName(F, "y")),
+ SE.getSCEV(getInstructionByName(F, "z")));
+ });
}
TEST_F(ScalarEvolutionsTest, CompareSCEVComplexity) {
@@ -568,5 +571,100 @@ TEST_F(ScalarEvolutionsTest, CompareValueComplexity) {
EXPECT_NE(A, B);
}
+TEST_F(ScalarEvolutionsTest, SCEVAddExpr) {
+ Type *Ty32 = Type::getInt32Ty(Context);
+ Type *ArgTys[] = {Type::getInt64Ty(Context), Ty32};
+
+ FunctionType *FTy =
+ FunctionType::get(Type::getVoidTy(Context), ArgTys, false);
+ Function *F = cast<Function>(M.getOrInsertFunction("f", FTy));
+
+ Argument *A1 = &*F->arg_begin();
+ Argument *A2 = &*(std::next(F->arg_begin()));
+ BasicBlock *EntryBB = BasicBlock::Create(Context, "entry", F);
+
+ Instruction *Trunc = CastInst::CreateTruncOrBitCast(A1, Ty32, "", EntryBB);
+ Instruction *Mul1 = BinaryOperator::CreateMul(Trunc, A2, "", EntryBB);
+ Instruction *Add1 = BinaryOperator::CreateAdd(Mul1, Trunc, "", EntryBB);
+ Mul1 = BinaryOperator::CreateMul(Add1, Trunc, "", EntryBB);
+ Instruction *Add2 = BinaryOperator::CreateAdd(Mul1, Add1, "", EntryBB);
+ // FIXME: The size of this is arbitrary and doesn't seem to change the
+ // result, but SCEV will do quadratic work for these so a large number here
+ // will be extremely slow. We should revisit what and how this is testing
+ // SCEV.
+ for (int i = 0; i < 10; i++) {
+ Mul1 = BinaryOperator::CreateMul(Add2, Add1, "", EntryBB);
+ Add1 = Add2;
+ Add2 = BinaryOperator::CreateAdd(Mul1, Add1, "", EntryBB);
+ }
+
+ ReturnInst::Create(Context, nullptr, EntryBB);
+ ScalarEvolution SE = buildSE(*F);
+ EXPECT_NE(nullptr, SE.getSCEV(Mul1));
+}
+
+static Instruction &GetInstByName(Function &F, StringRef Name) {
+ for (auto &I : instructions(F))
+ if (I.getName() == Name)
+ return I;
+ llvm_unreachable("Could not find instructions!");
+}
+
+TEST_F(ScalarEvolutionsTest, SCEVNormalization) {
+ LLVMContext C;
+ SMDiagnostic Err;
+ std::unique_ptr<Module> M = parseAssemblyString(
+ "target datalayout = \"e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128\" "
+ " "
+ "@var_0 = external global i32, align 4"
+ "@var_1 = external global i32, align 4"
+ "@var_2 = external global i32, align 4"
+ " "
+ "declare i32 @unknown(i32, i32, i32)"
+ " "
+ "define void @f_1(i8* nocapture %arr, i32 %n, i32* %A, i32* %B) "
+ " local_unnamed_addr { "
+ "entry: "
+ " br label %loop.ph "
+ " "
+ "loop.ph: "
+ " br label %loop "
+ " "
+ "loop: "
+ " %iv0 = phi i32 [ %iv0.inc, %loop ], [ 0, %loop.ph ] "
+ " %iv1 = phi i32 [ %iv1.inc, %loop ], [ -2147483648, %loop.ph ] "
+ " %iv0.inc = add i32 %iv0, 1 "
+ " %iv1.inc = add i32 %iv1, 3 "
+ " br i1 undef, label %for.end.loopexit, label %loop "
+ " "
+ "for.end.loopexit: "
+ " ret void "
+ "} "
+ ,
+ Err, C);
+
+ assert(M && "Could not parse module?");
+ assert(!verifyModule(*M) && "Must have been well formed!");
+
+ runWithSE(*M, "f_1", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
+ auto &I0 = GetInstByName(F, "iv0");
+ auto &I1 = *I0.getNextNode();
+
+ auto *S0 = cast<SCEVAddRecExpr>(SE.getSCEV(&I0));
+ PostIncLoopSet Loops;
+ Loops.insert(S0->getLoop());
+ auto *N0 = normalizeForPostIncUse(S0, Loops, SE);
+ auto *D0 = denormalizeForPostIncUse(N0, Loops, SE);
+ EXPECT_EQ(S0, D0) << *S0 << " " << *D0;
+
+ auto *S1 = cast<SCEVAddRecExpr>(SE.getSCEV(&I1));
+ Loops.clear();
+ Loops.insert(S1->getLoop());
+ auto *N1 = normalizeForPostIncUse(S1, Loops, SE);
+ auto *D1 = denormalizeForPostIncUse(N1, Loops, SE);
+ EXPECT_EQ(S1, D1) << *S1 << " " << *D1;
+ });
+}
+
} // end anonymous namespace
} // end namespace llvm
diff --git a/unittests/Analysis/TargetLibraryInfoTest.cpp b/unittests/Analysis/TargetLibraryInfoTest.cpp
new file mode 100644
index 000000000000..598429c968aa
--- /dev/null
+++ b/unittests/Analysis/TargetLibraryInfoTest.cpp
@@ -0,0 +1,481 @@
+//===--- TargetLibraryInfoTest.cpp - TLI/LibFunc unit tests ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/AsmParser/Parser.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/LegacyPassManager.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/SourceMgr.h"
+#include "gtest/gtest.h"
+
+using namespace llvm;
+
+namespace {
+
+class TargetLibraryInfoTest : public testing::Test {
+protected:
+ LLVMContext Context;
+ TargetLibraryInfoImpl TLII;
+ TargetLibraryInfo TLI;
+
+ std::unique_ptr<Module> M;
+
+ TargetLibraryInfoTest() : TLI(TLII) {}
+
+ void parseAssembly(const char *Assembly) {
+ SMDiagnostic Error;
+ M = parseAssemblyString(Assembly, Error, Context);
+
+ std::string errMsg;
+ raw_string_ostream os(errMsg);
+ Error.print("", os);
+
+ if (!M)
+ report_fatal_error(os.str());
+ }
+
+ ::testing::AssertionResult isLibFunc(const Function *FDecl,
+ LibFunc ExpectedLF) {
+ StringRef ExpectedLFName = TLI.getName(ExpectedLF);
+
+ if (!FDecl)
+ return ::testing::AssertionFailure() << ExpectedLFName << " not found";
+
+ LibFunc F;
+ if (!TLI.getLibFunc(*FDecl, F))
+ return ::testing::AssertionFailure() << ExpectedLFName << " invalid";
+
+ return ::testing::AssertionSuccess() << ExpectedLFName << " is LibFunc";
+ }
+};
+
+} // end anonymous namespace
+
+// Check that we don't accept egregiously incorrect prototypes.
+TEST_F(TargetLibraryInfoTest, InvalidProto) {
+ parseAssembly("%foo = type { %foo }\n");
+
+ auto *StructTy = M->getTypeByName("foo");
+ auto *InvalidFTy = FunctionType::get(StructTy, /*isVarArg=*/false);
+
+ for (unsigned FI = 0; FI != LibFunc::NumLibFuncs; ++FI) {
+ LibFunc LF = (LibFunc)FI;
+ auto *F = cast<Function>(
+ M->getOrInsertFunction(TLI.getName(LF), InvalidFTy));
+ EXPECT_FALSE(isLibFunc(F, LF));
+ }
+}
+
+// Check that we do accept know-correct prototypes.
+TEST_F(TargetLibraryInfoTest, ValidProto) {
+ parseAssembly(
+ // These functions use a 64-bit size_t; use the appropriate datalayout.
+ "target datalayout = \"p:64:64:64\"\n"
+
+ // Struct pointers are replaced with an opaque pointer.
+ "%struct = type opaque\n"
+
+ // These functions were extracted as-is from the OS X headers.
+ "declare double @__cospi(double)\n"
+ "declare float @__cospif(float)\n"
+ "declare { double, double } @__sincospi_stret(double)\n"
+ "declare <2 x float> @__sincospif_stret(float)\n"
+ "declare double @__sinpi(double)\n"
+ "declare float @__sinpif(float)\n"
+ "declare i32 @abs(i32)\n"
+ "declare i32 @access(i8*, i32)\n"
+ "declare double @acos(double)\n"
+ "declare float @acosf(float)\n"
+ "declare double @acosh(double)\n"
+ "declare float @acoshf(float)\n"
+ "declare x86_fp80 @acoshl(x86_fp80)\n"
+ "declare x86_fp80 @acosl(x86_fp80)\n"
+ "declare double @asin(double)\n"
+ "declare float @asinf(float)\n"
+ "declare double @asinh(double)\n"
+ "declare float @asinhf(float)\n"
+ "declare x86_fp80 @asinhl(x86_fp80)\n"
+ "declare x86_fp80 @asinl(x86_fp80)\n"
+ "declare double @atan(double)\n"
+ "declare double @atan2(double, double)\n"
+ "declare float @atan2f(float, float)\n"
+ "declare x86_fp80 @atan2l(x86_fp80, x86_fp80)\n"
+ "declare float @atanf(float)\n"
+ "declare double @atanh(double)\n"
+ "declare float @atanhf(float)\n"
+ "declare x86_fp80 @atanhl(x86_fp80)\n"
+ "declare x86_fp80 @atanl(x86_fp80)\n"
+ "declare double @atof(i8*)\n"
+ "declare i32 @atoi(i8*)\n"
+ "declare i64 @atol(i8*)\n"
+ "declare i64 @atoll(i8*)\n"
+ "declare i32 @bcmp(i8*, i8*, i64)\n"
+ "declare void @bcopy(i8*, i8*, i64)\n"
+ "declare void @bzero(i8*, i64)\n"
+ "declare i8* @calloc(i64, i64)\n"
+ "declare double @cbrt(double)\n"
+ "declare float @cbrtf(float)\n"
+ "declare x86_fp80 @cbrtl(x86_fp80)\n"
+ "declare double @ceil(double)\n"
+ "declare float @ceilf(float)\n"
+ "declare x86_fp80 @ceill(x86_fp80)\n"
+ "declare i32 @chown(i8*, i32, i32)\n"
+ "declare void @clearerr(%struct*)\n"
+ "declare double @copysign(double, double)\n"
+ "declare float @copysignf(float, float)\n"
+ "declare x86_fp80 @copysignl(x86_fp80, x86_fp80)\n"
+ "declare double @cos(double)\n"
+ "declare float @cosf(float)\n"
+ "declare double @cosh(double)\n"
+ "declare float @coshf(float)\n"
+ "declare x86_fp80 @coshl(x86_fp80)\n"
+ "declare x86_fp80 @cosl(x86_fp80)\n"
+ "declare i8* @ctermid(i8*)\n"
+ "declare double @exp(double)\n"
+ "declare double @exp2(double)\n"
+ "declare float @exp2f(float)\n"
+ "declare x86_fp80 @exp2l(x86_fp80)\n"
+ "declare float @expf(float)\n"
+ "declare x86_fp80 @expl(x86_fp80)\n"
+ "declare double @expm1(double)\n"
+ "declare float @expm1f(float)\n"
+ "declare x86_fp80 @expm1l(x86_fp80)\n"
+ "declare double @fabs(double)\n"
+ "declare float @fabsf(float)\n"
+ "declare x86_fp80 @fabsl(x86_fp80)\n"
+ "declare i32 @fclose(%struct*)\n"
+ "declare i32 @feof(%struct*)\n"
+ "declare i32 @ferror(%struct*)\n"
+ "declare i32 @fflush(%struct*)\n"
+ "declare i32 @ffs(i32)\n"
+ "declare i32 @ffsl(i64)\n"
+ "declare i32 @ffsll(i64)\n"
+ "declare i32 @fgetc(%struct*)\n"
+ "declare i32 @fgetpos(%struct*, i64*)\n"
+ "declare i8* @fgets(i8*, i32, %struct*)\n"
+ "declare i32 @fileno(%struct*)\n"
+ "declare void @flockfile(%struct*)\n"
+ "declare double @floor(double)\n"
+ "declare float @floorf(float)\n"
+ "declare x86_fp80 @floorl(x86_fp80)\n"
+ "declare i32 @fls(i32)\n"
+ "declare i32 @flsl(i64)\n"
+ "declare i32 @flsll(i64)\n"
+ "declare double @fmax(double, double)\n"
+ "declare float @fmaxf(float, float)\n"
+ "declare x86_fp80 @fmaxl(x86_fp80, x86_fp80)\n"
+ "declare double @fmin(double, double)\n"
+ "declare float @fminf(float, float)\n"
+ "declare x86_fp80 @fminl(x86_fp80, x86_fp80)\n"
+ "declare double @fmod(double, double)\n"
+ "declare float @fmodf(float, float)\n"
+ "declare x86_fp80 @fmodl(x86_fp80, x86_fp80)\n"
+ "declare i32 @fprintf(%struct*, i8*, ...)\n"
+ "declare i32 @fputc(i32, %struct*)\n"
+ "declare i64 @fread(i8*, i64, i64, %struct*)\n"
+ "declare void @free(i8*)\n"
+ "declare double @frexp(double, i32*)\n"
+ "declare float @frexpf(float, i32*)\n"
+ "declare x86_fp80 @frexpl(x86_fp80, i32*)\n"
+ "declare i32 @fscanf(%struct*, i8*, ...)\n"
+ "declare i32 @fseek(%struct*, i64, i32)\n"
+ "declare i32 @fseeko(%struct*, i64, i32)\n"
+ "declare i32 @fsetpos(%struct*, i64*)\n"
+ "declare i32 @fstatvfs(i32, %struct*)\n"
+ "declare i64 @ftell(%struct*)\n"
+ "declare i64 @ftello(%struct*)\n"
+ "declare i32 @ftrylockfile(%struct*)\n"
+ "declare void @funlockfile(%struct*)\n"
+ "declare i32 @getc(%struct*)\n"
+ "declare i32 @getc_unlocked(%struct*)\n"
+ "declare i32 @getchar()\n"
+ "declare i8* @getenv(i8*)\n"
+ "declare i32 @getitimer(i32, %struct*)\n"
+ "declare i32 @getlogin_r(i8*, i64)\n"
+ "declare %struct* @getpwnam(i8*)\n"
+ "declare i8* @gets(i8*)\n"
+ "declare i32 @gettimeofday(%struct*, i8*)\n"
+ "declare i32 @_Z7isasciii(i32)\n"
+ "declare i32 @_Z7isdigiti(i32)\n"
+ "declare i64 @labs(i64)\n"
+ "declare double @ldexp(double, i32)\n"
+ "declare float @ldexpf(float, i32)\n"
+ "declare x86_fp80 @ldexpl(x86_fp80, i32)\n"
+ "declare i64 @llabs(i64)\n"
+ "declare double @log(double)\n"
+ "declare double @log10(double)\n"
+ "declare float @log10f(float)\n"
+ "declare x86_fp80 @log10l(x86_fp80)\n"
+ "declare double @log1p(double)\n"
+ "declare float @log1pf(float)\n"
+ "declare x86_fp80 @log1pl(x86_fp80)\n"
+ "declare double @log2(double)\n"
+ "declare float @log2f(float)\n"
+ "declare x86_fp80 @log2l(x86_fp80)\n"
+ "declare double @logb(double)\n"
+ "declare float @logbf(float)\n"
+ "declare x86_fp80 @logbl(x86_fp80)\n"
+ "declare float @logf(float)\n"
+ "declare x86_fp80 @logl(x86_fp80)\n"
+ "declare i8* @malloc(i64)\n"
+ "declare i8* @memccpy(i8*, i8*, i32, i64)\n"
+ "declare i8* @memchr(i8*, i32, i64)\n"
+ "declare i32 @memcmp(i8*, i8*, i64)\n"
+ "declare i8* @memcpy(i8*, i8*, i64)\n"
+ "declare i8* @memmove(i8*, i8*, i64)\n"
+ "declare i8* @memset(i8*, i32, i64)\n"
+ "declare void @memset_pattern16(i8*, i8*, i64)\n"
+ "declare i32 @mkdir(i8*, i16)\n"
+ "declare double @modf(double, double*)\n"
+ "declare float @modff(float, float*)\n"
+ "declare x86_fp80 @modfl(x86_fp80, x86_fp80*)\n"
+ "declare double @nearbyint(double)\n"
+ "declare float @nearbyintf(float)\n"
+ "declare x86_fp80 @nearbyintl(x86_fp80)\n"
+ "declare i32 @pclose(%struct*)\n"
+ "declare void @perror(i8*)\n"
+ "declare i32 @posix_memalign(i8**, i64, i64)\n"
+ "declare double @pow(double, double)\n"
+ "declare float @powf(float, float)\n"
+ "declare x86_fp80 @powl(x86_fp80, x86_fp80)\n"
+ "declare i32 @printf(i8*, ...)\n"
+ "declare i32 @putc(i32, %struct*)\n"
+ "declare i32 @putchar(i32)\n"
+ "declare i32 @puts(i8*)\n"
+ "declare void @qsort(i8*, i64, i64, i32 (i8*, i8*)*)\n"
+ "declare i64 @readlink(i8*, i8*, i64)\n"
+ "declare i8* @realloc(i8*, i64)\n"
+ "declare i8* @reallocf(i8*, i64)\n"
+ "declare i32 @remove(i8*)\n"
+ "declare i32 @rename(i8*, i8*)\n"
+ "declare void @rewind(%struct*)\n"
+ "declare double @rint(double)\n"
+ "declare float @rintf(float)\n"
+ "declare x86_fp80 @rintl(x86_fp80)\n"
+ "declare i32 @rmdir(i8*)\n"
+ "declare double @round(double)\n"
+ "declare float @roundf(float)\n"
+ "declare x86_fp80 @roundl(x86_fp80)\n"
+ "declare i32 @scanf(i8*, ...)\n"
+ "declare void @setbuf(%struct*, i8*)\n"
+ "declare i32 @setitimer(i32, %struct*, %struct*)\n"
+ "declare i32 @setvbuf(%struct*, i8*, i32, i64)\n"
+ "declare double @sin(double)\n"
+ "declare float @sinf(float)\n"
+ "declare double @sinh(double)\n"
+ "declare float @sinhf(float)\n"
+ "declare x86_fp80 @sinhl(x86_fp80)\n"
+ "declare x86_fp80 @sinl(x86_fp80)\n"
+ "declare i32 @snprintf(i8*, i64, i8*, ...)\n"
+ "declare i32 @sprintf(i8*, i8*, ...)\n"
+ "declare double @sqrt(double)\n"
+ "declare float @sqrtf(float)\n"
+ "declare x86_fp80 @sqrtl(x86_fp80)\n"
+ "declare i32 @sscanf(i8*, i8*, ...)\n"
+ "declare i32 @statvfs(i8*, %struct*)\n"
+ "declare i8* @stpcpy(i8*, i8*)\n"
+ "declare i8* @stpncpy(i8*, i8*, i64)\n"
+ "declare i32 @strcasecmp(i8*, i8*)\n"
+ "declare i8* @strcat(i8*, i8*)\n"
+ "declare i8* @strchr(i8*, i32)\n"
+ "declare i32 @strcmp(i8*, i8*)\n"
+ "declare i32 @strcoll(i8*, i8*)\n"
+ "declare i8* @strcpy(i8*, i8*)\n"
+ "declare i64 @strcspn(i8*, i8*)\n"
+ "declare i8* @strdup(i8*)\n"
+ "declare i64 @strlen(i8*)\n"
+ "declare i32 @strncasecmp(i8*, i8*, i64)\n"
+ "declare i8* @strncat(i8*, i8*, i64)\n"
+ "declare i32 @strncmp(i8*, i8*, i64)\n"
+ "declare i8* @strncpy(i8*, i8*, i64)\n"
+ "declare i8* @strndup(i8*, i64)\n"
+ "declare i64 @strnlen(i8*, i64)\n"
+ "declare i8* @strpbrk(i8*, i8*)\n"
+ "declare i8* @strrchr(i8*, i32)\n"
+ "declare i64 @strspn(i8*, i8*)\n"
+ "declare i8* @strstr(i8*, i8*)\n"
+ "declare i8* @strtok(i8*, i8*)\n"
+ "declare i8* @strtok_r(i8*, i8*, i8**)\n"
+ "declare i64 @strtol(i8*, i8**, i32)\n"
+ "declare x86_fp80 @strtold(i8*, i8**)\n"
+ "declare i64 @strtoll(i8*, i8**, i32)\n"
+ "declare i64 @strtoul(i8*, i8**, i32)\n"
+ "declare i64 @strtoull(i8*, i8**, i32)\n"
+ "declare i64 @strxfrm(i8*, i8*, i64)\n"
+ "declare double @tan(double)\n"
+ "declare float @tanf(float)\n"
+ "declare double @tanh(double)\n"
+ "declare float @tanhf(float)\n"
+ "declare x86_fp80 @tanhl(x86_fp80)\n"
+ "declare x86_fp80 @tanl(x86_fp80)\n"
+ "declare i64 @times(%struct*)\n"
+ "declare %struct* @tmpfile()\n"
+ "declare i32 @_Z7toasciii(i32)\n"
+ "declare double @trunc(double)\n"
+ "declare float @truncf(float)\n"
+ "declare x86_fp80 @truncl(x86_fp80)\n"
+ "declare i32 @uname(%struct*)\n"
+ "declare i32 @ungetc(i32, %struct*)\n"
+ "declare i32 @unlink(i8*)\n"
+ "declare i32 @utime(i8*, %struct*)\n"
+ "declare i32 @utimes(i8*, %struct*)\n"
+ "declare i8* @valloc(i64)\n"
+ "declare i32 @vfprintf(%struct*, i8*, %struct*)\n"
+ "declare i32 @vfscanf(%struct*, i8*, %struct*)\n"
+ "declare i32 @vprintf(i8*, %struct*)\n"
+ "declare i32 @vscanf(i8*, %struct*)\n"
+ "declare i32 @vsnprintf(i8*, i64, i8*, %struct*)\n"
+ "declare i32 @vsprintf(i8*, i8*, %struct*)\n"
+ "declare i32 @vsscanf(i8*, i8*, %struct*)\n"
+
+ // These functions were also extracted from the OS X headers, but they are
+ // available with a special name on darwin.
+ // This test uses the default TLI name instead.
+ "declare i32 @chmod(i8*, i16)\n"
+ "declare i32 @closedir(%struct*)\n"
+ "declare %struct* @fdopen(i32, i8*)\n"
+ "declare %struct* @fopen(i8*, i8*)\n"
+ "declare i32 @fputs(i8*, %struct*)\n"
+ "declare i32 @fstat(i32, %struct*)\n"
+ "declare i64 @fwrite(i8*, i64, i64, %struct*)\n"
+ "declare i32 @lchown(i8*, i32, i32)\n"
+ "declare i32 @lstat(i8*, %struct*)\n"
+ "declare i64 @mktime(%struct*)\n"
+ "declare i32 @open(i8*, i32, ...)\n"
+ "declare %struct* @opendir(i8*)\n"
+ "declare %struct* @popen(i8*, i8*)\n"
+ "declare i64 @pread(i32, i8*, i64, i64)\n"
+ "declare i64 @pwrite(i32, i8*, i64, i64)\n"
+ "declare i64 @read(i32, i8*, i64)\n"
+ "declare i8* @realpath(i8*, i8*)\n"
+ "declare i32 @stat(i8*, %struct*)\n"
+ "declare double @strtod(i8*, i8**)\n"
+ "declare float @strtof(i8*, i8**)\n"
+ "declare i32 @system(i8*)\n"
+ "declare i32 @unsetenv(i8*)\n"
+ "declare i64 @write(i32, i8*, i64)\n"
+
+ // These functions are available on Linux but not Darwin; they only differ
+ // from their non-64 counterparts in the struct type.
+ // Use the same prototype as the non-64 variant.
+ "declare %struct* @fopen64(i8*, i8*)\n"
+ "declare i32 @fstat64(i32, %struct*)\n"
+ "declare i32 @fstatvfs64(i32, %struct*)\n"
+ "declare i32 @lstat64(i8*, %struct*)\n"
+ "declare i32 @open64(i8*, i32, ...)\n"
+ "declare i32 @stat64(i8*, %struct*)\n"
+ "declare i32 @statvfs64(i8*, %struct*)\n"
+ "declare %struct* @tmpfile64()\n"
+
+ // These functions are also -64 variants, but do differ in the type of the
+ // off_t (vs off64_t) parameter. The non-64 variants declared above used
+ // a 64-bit off_t, so, in practice, they are also equivalent.
+ "declare i32 @fseeko64(%struct*, i64, i32)\n"
+ "declare i64 @ftello64(%struct*)\n"
+
+ "declare void @_ZdaPv(i8*)\n"
+ "declare void @_ZdaPvRKSt9nothrow_t(i8*, %struct*)\n"
+ "declare void @_ZdaPvj(i8*, i32)\n"
+ "declare void @_ZdaPvm(i8*, i64)\n"
+ "declare void @_ZdlPv(i8*)\n"
+ "declare void @_ZdlPvRKSt9nothrow_t(i8*, %struct*)\n"
+ "declare void @_ZdlPvj(i8*, i32)\n"
+ "declare void @_ZdlPvm(i8*, i64)\n"
+ "declare i8* @_Znaj(i32)\n"
+ "declare i8* @_ZnajRKSt9nothrow_t(i32, %struct*)\n"
+ "declare i8* @_Znam(i64)\n"
+ "declare i8* @_ZnamRKSt9nothrow_t(i64, %struct*)\n"
+ "declare i8* @_Znwj(i32)\n"
+ "declare i8* @_ZnwjRKSt9nothrow_t(i32, %struct*)\n"
+ "declare i8* @_Znwm(i64)\n"
+ "declare i8* @_ZnwmRKSt9nothrow_t(i64, %struct*)\n"
+
+ "declare void @\"??3@YAXPEAX@Z\"(i8*)\n"
+ "declare void @\"??3@YAXPEAXAEBUnothrow_t@std@@@Z\"(i8*, %struct*)\n"
+ "declare void @\"??3@YAXPEAX_K@Z\"(i8*, i64)\n"
+ "declare void @\"??_V@YAXPEAX@Z\"(i8*)\n"
+ "declare void @\"??_V@YAXPEAXAEBUnothrow_t@std@@@Z\"(i8*, %struct*)\n"
+ "declare void @\"??_V@YAXPEAX_K@Z\"(i8*, i64)\n"
+ "declare i8* @\"??2@YAPAXI@Z\"(i32)\n"
+ "declare i8* @\"??2@YAPAXIABUnothrow_t@std@@@Z\"(i32, %struct*)\n"
+ "declare i8* @\"??2@YAPEAX_K@Z\"(i64)\n"
+ "declare i8* @\"??2@YAPEAX_KAEBUnothrow_t@std@@@Z\"(i64, %struct*)\n"
+ "declare i8* @\"??_U@YAPAXI@Z\"(i32)\n"
+ "declare i8* @\"??_U@YAPAXIABUnothrow_t@std@@@Z\"(i32, %struct*)\n"
+ "declare i8* @\"??_U@YAPEAX_K@Z\"(i64)\n"
+ "declare i8* @\"??_U@YAPEAX_KAEBUnothrow_t@std@@@Z\"(i64, %struct*)\n"
+
+ "declare void @\"??3@YAXPAX@Z\"(i8*)\n"
+ "declare void @\"??3@YAXPAXABUnothrow_t@std@@@Z\"(i8*, %struct*)\n"
+ "declare void @\"??3@YAXPAXI@Z\"(i8*, i32)\n"
+ "declare void @\"??_V@YAXPAX@Z\"(i8*)\n"
+ "declare void @\"??_V@YAXPAXABUnothrow_t@std@@@Z\"(i8*, %struct*)\n"
+ "declare void @\"??_V@YAXPAXI@Z\"(i8*, i32)\n"
+
+ // These other functions were derived from the .def C declaration.
+ "declare i32 @__cxa_atexit(void (i8*)*, i8*, i8*)\n"
+ "declare void @__cxa_guard_abort(%struct*)\n"
+ "declare i32 @__cxa_guard_acquire(%struct*)\n"
+ "declare void @__cxa_guard_release(%struct*)\n"
+
+ "declare i32 @__nvvm_reflect(i8*)\n"
+
+ "declare i8* @__memcpy_chk(i8*, i8*, i64, i64)\n"
+ "declare i8* @__memmove_chk(i8*, i8*, i64, i64)\n"
+ "declare i8* @__memset_chk(i8*, i32, i64, i64)\n"
+ "declare i8* @__stpcpy_chk(i8*, i8*, i64)\n"
+ "declare i8* @__stpncpy_chk(i8*, i8*, i64, i64)\n"
+ "declare i8* @__strcpy_chk(i8*, i8*, i64)\n"
+ "declare i8* @__strncpy_chk(i8*, i8*, i64, i64)\n"
+
+ "declare i8* @memalign(i64, i64)\n"
+ "declare i8* @mempcpy(i8*, i8*, i64)\n"
+ "declare i8* @memrchr(i8*, i32, i64)\n"
+
+ // These are similar to the FILE* fgetc/fputc.
+ "declare i32 @_IO_getc(%struct*)\n"
+ "declare i32 @_IO_putc(i32, %struct*)\n"
+
+ "declare i32 @__isoc99_scanf(i8*, ...)\n"
+ "declare i32 @__isoc99_sscanf(i8*, i8*, ...)\n"
+ "declare i8* @__strdup(i8*)\n"
+ "declare i8* @__strndup(i8*, i64)\n"
+ "declare i8* @__strtok_r(i8*, i8*, i8**)\n"
+
+ "declare double @__sqrt_finite(double)\n"
+ "declare float @__sqrtf_finite(float)\n"
+ "declare x86_fp80 @__sqrtl_finite(x86_fp80)\n"
+ "declare double @exp10(double)\n"
+ "declare float @exp10f(float)\n"
+ "declare x86_fp80 @exp10l(x86_fp80)\n"
+
+ // These printf variants have the same prototype as the non-'i' versions.
+ "declare i32 @fiprintf(%struct*, i8*, ...)\n"
+ "declare i32 @iprintf(i8*, ...)\n"
+ "declare i32 @siprintf(i8*, i8*, ...)\n"
+
+ "declare i32 @htonl(i32)\n"
+ "declare i16 @htons(i16)\n"
+ "declare i32 @ntohl(i32)\n"
+ "declare i16 @ntohs(i16)\n"
+
+ "declare i32 @isascii(i32)\n"
+ "declare i32 @isdigit(i32)\n"
+ "declare i32 @toascii(i32)\n"
+ );
+
+ for (unsigned FI = 0; FI != LibFunc::NumLibFuncs; ++FI) {
+ LibFunc LF = (LibFunc)FI;
+ // Make sure everything is available; we're not testing target defaults.
+ TLII.setAvailable(LF);
+ Function *F = M->getFunction(TLI.getName(LF));
+ EXPECT_TRUE(isLibFunc(F, LF));
+ }
+}
diff --git a/unittests/Analysis/UnrollAnalyzer.cpp b/unittests/Analysis/UnrollAnalyzer.cpp
index 6d11ab1f2f1b..d6a7bd360b93 100644
--- a/unittests/Analysis/UnrollAnalyzer.cpp
+++ b/unittests/Analysis/UnrollAnalyzer.cpp
@@ -61,7 +61,6 @@ struct UnrollAnalyzerTest : public FunctionPass {
char UnrollAnalyzerTest::ID = 0;
std::unique_ptr<Module> makeLLVMModule(LLVMContext &Context,
- UnrollAnalyzerTest *P,
const char *ModuleStr) {
SMDiagnostic Err;
return parseAssemblyString(ModuleStr, Err, Context);
@@ -87,7 +86,7 @@ TEST(UnrollAnalyzerTest, BasicSimplifications) {
"}\n";
UnrollAnalyzerTest *P = new UnrollAnalyzerTest();
LLVMContext Context;
- std::unique_ptr<Module> M = makeLLVMModule(Context, P, ModuleStr);
+ std::unique_ptr<Module> M = makeLLVMModule(Context, ModuleStr);
legacy::PassManager Passes;
Passes.add(P);
Passes.run(*M);
@@ -150,7 +149,7 @@ TEST(UnrollAnalyzerTest, OuterLoopSimplification) {
UnrollAnalyzerTest *P = new UnrollAnalyzerTest();
LLVMContext Context;
- std::unique_ptr<Module> M = makeLLVMModule(Context, P, ModuleStr);
+ std::unique_ptr<Module> M = makeLLVMModule(Context, ModuleStr);
legacy::PassManager Passes;
Passes.add(P);
Passes.run(*M);
@@ -195,7 +194,7 @@ TEST(UnrollAnalyzerTest, CmpSimplifications) {
"}\n";
UnrollAnalyzerTest *P = new UnrollAnalyzerTest();
LLVMContext Context;
- std::unique_ptr<Module> M = makeLLVMModule(Context, P, ModuleStr);
+ std::unique_ptr<Module> M = makeLLVMModule(Context, ModuleStr);
legacy::PassManager Passes;
Passes.add(P);
Passes.run(*M);
@@ -242,7 +241,7 @@ TEST(UnrollAnalyzerTest, PtrCmpSimplifications) {
"}\n";
UnrollAnalyzerTest *P = new UnrollAnalyzerTest();
LLVMContext Context;
- std::unique_ptr<Module> M = makeLLVMModule(Context, P, ModuleStr);
+ std::unique_ptr<Module> M = makeLLVMModule(Context, ModuleStr);
legacy::PassManager Passes;
Passes.add(P);
Passes.run(*M);
@@ -288,7 +287,7 @@ TEST(UnrollAnalyzerTest, CastSimplifications) {
UnrollAnalyzerTest *P = new UnrollAnalyzerTest();
LLVMContext Context;
- std::unique_ptr<Module> M = makeLLVMModule(Context, P, ModuleStr);
+ std::unique_ptr<Module> M = makeLLVMModule(Context, ModuleStr);
legacy::PassManager Passes;
Passes.add(P);
Passes.run(*M);
diff --git a/unittests/Analysis/ValueTrackingTest.cpp b/unittests/Analysis/ValueTrackingTest.cpp
index ba0d30d59b66..3c8ecfbe1ee2 100644
--- a/unittests/Analysis/ValueTrackingTest.cpp
+++ b/unittests/Analysis/ValueTrackingTest.cpp
@@ -219,7 +219,7 @@ TEST(ValueTracking, GuaranteedToTransferExecutionToSuccessor) {
assert(F && "Bad assembly?");
auto &BB = F->getEntryBlock();
- ArrayRef<bool> ExpectedAnswers = {
+ bool ExpectedAnswers[] = {
true, // call void @nounwind_readonly(i32* %p)
true, // call void @nounwind_argmemonly(i32* %p)
false, // call void @throws_but_readonly(i32* %p)
@@ -239,3 +239,22 @@ TEST(ValueTracking, GuaranteedToTransferExecutionToSuccessor) {
Index++;
}
}
+
+TEST(ValueTracking, ComputeNumSignBits_PR32045) {
+ StringRef Assembly = "define i32 @f(i32 %a) { "
+ " %val = ashr i32 %a, -1 "
+ " ret i32 %val "
+ "} ";
+
+ LLVMContext Context;
+ SMDiagnostic Error;
+ auto M = parseAssemblyString(Assembly, Error, Context);
+ assert(M && "Bad assembly?");
+
+ auto *F = M->getFunction("f");
+ assert(F && "Bad assembly?");
+
+ auto *RVal =
+ cast<ReturnInst>(F->getEntryBlock().getTerminator())->getOperand(0);
+ EXPECT_EQ(ComputeNumSignBits(RVal, M->getDataLayout()), 1u);
+}