summaryrefslogtreecommitdiff
path: root/lib/Transforms/Scalar
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2018-06-27 19:14:09 +0000
committerDimitry Andric <dim@FreeBSD.org>2018-06-27 19:14:09 +0000
commiteb1edd4d5902fdc561fd68fa70400fbd11127998 (patch)
tree0b10ccde4b5d3acf243966db54f4f3afef10cf93 /lib/Transforms/Scalar
parent2ed8710148a921286717212737771dd31c518fb7 (diff)
Notes
Diffstat (limited to 'lib/Transforms/Scalar')
-rw-r--r--lib/Transforms/Scalar/CallSiteSplitting.cpp77
-rw-r--r--lib/Transforms/Scalar/DivRemPairs.cpp11
-rw-r--r--lib/Transforms/Scalar/JumpThreading.cpp17
-rw-r--r--lib/Transforms/Scalar/SCCP.cpp57
4 files changed, 155 insertions, 7 deletions
diff --git a/lib/Transforms/Scalar/CallSiteSplitting.cpp b/lib/Transforms/Scalar/CallSiteSplitting.cpp
index 4edea7cc3c825..7488cd5af8bed 100644
--- a/lib/Transforms/Scalar/CallSiteSplitting.cpp
+++ b/lib/Transforms/Scalar/CallSiteSplitting.cpp
@@ -201,6 +201,46 @@ static bool canSplitCallSite(CallSite CS) {
return CallSiteBB->canSplitPredecessors();
}
+static Instruction *cloneInstForMustTail(Instruction *I, Instruction *Before,
+ Value *V) {
+ Instruction *Copy = I->clone();
+ Copy->setName(I->getName());
+ Copy->insertBefore(Before);
+ if (V)
+ Copy->setOperand(0, V);
+ return Copy;
+}
+
+/// Copy mandatory `musttail` return sequence that follows original `CI`, and
+/// link it up to `NewCI` value instead:
+///
+/// * (optional) `bitcast NewCI to ...`
+/// * `ret bitcast or NewCI`
+///
+/// Insert this sequence right before `SplitBB`'s terminator, which will be
+/// cleaned up later in `splitCallSite` below.
+static void copyMustTailReturn(BasicBlock *SplitBB, Instruction *CI,
+ Instruction *NewCI) {
+ bool IsVoid = SplitBB->getParent()->getReturnType()->isVoidTy();
+ auto II = std::next(CI->getIterator());
+
+ BitCastInst *BCI = dyn_cast<BitCastInst>(&*II);
+ if (BCI)
+ ++II;
+
+ ReturnInst *RI = dyn_cast<ReturnInst>(&*II);
+ assert(RI && "`musttail` call must be followed by `ret` instruction");
+
+ TerminatorInst *TI = SplitBB->getTerminator();
+ Value *V = NewCI;
+ if (BCI)
+ V = cloneInstForMustTail(BCI, TI, V);
+ cloneInstForMustTail(RI, TI, IsVoid ? nullptr : V);
+
+ // FIXME: remove TI here, `DuplicateInstructionsInSplitBetween` has a bug
+ // that prevents doing this now.
+}
+
/// Return true if the CS is split into its new predecessors which are directly
/// hooked to each of its original predecessors pointed by PredBB1 and PredBB2.
/// CallInst1 and CallInst2 will be the new call-sites placed in the new
@@ -245,6 +285,7 @@ static void splitCallSite(CallSite CS, BasicBlock *PredBB1, BasicBlock *PredBB2,
Instruction *CallInst1, Instruction *CallInst2) {
Instruction *Instr = CS.getInstruction();
BasicBlock *TailBB = Instr->getParent();
+ bool IsMustTailCall = CS.isMustTailCall();
assert(Instr == (TailBB->getFirstNonPHIOrDbg()) && "Unexpected call-site");
BasicBlock *SplitBlock1 =
@@ -276,9 +317,14 @@ static void splitCallSite(CallSite CS, BasicBlock *PredBB1, BasicBlock *PredBB2,
++ArgNo;
}
}
+ // Clone and place bitcast and return instructions before `TI`
+ if (IsMustTailCall) {
+ copyMustTailReturn(SplitBlock1, CS.getInstruction(), CallInst1);
+ copyMustTailReturn(SplitBlock2, CS.getInstruction(), CallInst2);
+ }
// Replace users of the original call with a PHI mering call-sites split.
- if (Instr->getNumUses()) {
+ if (!IsMustTailCall && Instr->getNumUses()) {
PHINode *PN = PHINode::Create(Instr->getType(), 2, "phi.call",
TailBB->getFirstNonPHI());
PN->addIncoming(CallInst1, SplitBlock1);
@@ -290,8 +336,25 @@ static void splitCallSite(CallSite CS, BasicBlock *PredBB1, BasicBlock *PredBB2,
<< "\n");
DEBUG(dbgs() << " " << *CallInst2 << " in " << SplitBlock2->getName()
<< "\n");
- Instr->eraseFromParent();
+
NumCallSiteSplit++;
+
+ // FIXME: remove TI in `copyMustTailReturn`
+ if (IsMustTailCall) {
+ // Remove superfluous `br` terminators from the end of the Split blocks
+ // NOTE: Removing terminator removes the SplitBlock from the TailBB's
+ // predecessors. Therefore we must get complete list of Splits before
+ // attempting removal.
+ SmallVector<BasicBlock *, 2> Splits(predecessors((TailBB)));
+ assert(Splits.size() == 2 && "Expected exactly 2 splits!");
+ for (unsigned i = 0; i < Splits.size(); i++)
+ Splits[i]->getTerminator()->eraseFromParent();
+
+ // Erase the tail block once done with musttail patching
+ TailBB->eraseFromParent();
+ return;
+ }
+ Instr->eraseFromParent();
}
// Return true if the call-site has an argument which is a PHI with only
@@ -369,7 +432,17 @@ static bool doCallSiteSplitting(Function &F, TargetLibraryInfo &TLI) {
Function *Callee = CS.getCalledFunction();
if (!Callee || Callee->isDeclaration())
continue;
+
+ // Successful musttail call-site splits result in erased CI and erased BB.
+ // Check if such path is possible before attempting the splitting.
+ bool IsMustTail = CS.isMustTailCall();
+
Changed |= tryToSplitCallSite(CS);
+
+ // There're no interesting instructions after this. The call site
+ // itself might have been erased on splitting.
+ if (IsMustTail)
+ break;
}
}
return Changed;
diff --git a/lib/Transforms/Scalar/DivRemPairs.cpp b/lib/Transforms/Scalar/DivRemPairs.cpp
index e383af89a3845..e1bc590c5c9ae 100644
--- a/lib/Transforms/Scalar/DivRemPairs.cpp
+++ b/lib/Transforms/Scalar/DivRemPairs.cpp
@@ -13,6 +13,8 @@
//===----------------------------------------------------------------------===//
#include "llvm/Transforms/Scalar/DivRemPairs.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/GlobalsModRef.h"
#include "llvm/Analysis/TargetTransformInfo.h"
@@ -48,7 +50,10 @@ static bool optimizeDivRem(Function &F, const TargetTransformInfo &TTI,
// Insert all divide and remainder instructions into maps keyed by their
// operands and opcode (signed or unsigned).
- DenseMap<DivRemMapKey, Instruction *> DivMap, RemMap;
+ DenseMap<DivRemMapKey, Instruction *> DivMap;
+ // Use a MapVector for RemMap so that instructions are moved/inserted in a
+ // deterministic order.
+ MapVector<DivRemMapKey, Instruction *> RemMap;
for (auto &BB : F) {
for (auto &I : BB) {
if (I.getOpcode() == Instruction::SDiv)
@@ -67,14 +72,14 @@ static bool optimizeDivRem(Function &F, const TargetTransformInfo &TTI,
// rare than division.
for (auto &RemPair : RemMap) {
// Find the matching division instruction from the division map.
- Instruction *DivInst = DivMap[RemPair.getFirst()];
+ Instruction *DivInst = DivMap[RemPair.first];
if (!DivInst)
continue;
// We have a matching pair of div/rem instructions. If one dominates the
// other, hoist and/or replace one.
NumPairs++;
- Instruction *RemInst = RemPair.getSecond();
+ Instruction *RemInst = RemPair.second;
bool IsSigned = DivInst->getOpcode() == Instruction::SDiv;
bool HasDivRemOp = TTI.hasDivRemOp(DivInst->getType(), IsSigned);
diff --git a/lib/Transforms/Scalar/JumpThreading.cpp b/lib/Transforms/Scalar/JumpThreading.cpp
index 141c9938bf8be..2f1645433fb87 100644
--- a/lib/Transforms/Scalar/JumpThreading.cpp
+++ b/lib/Transforms/Scalar/JumpThreading.cpp
@@ -1454,6 +1454,9 @@ FindMostPopularDest(BasicBlock *BB,
if (PredToDest.second)
DestPopularity[PredToDest.second]++;
+ if (DestPopularity.empty())
+ return nullptr;
+
// Find the most popular dest.
DenseMap<BasicBlock*, unsigned>::iterator DPI = DestPopularity.begin();
BasicBlock *MostPopularDest = DPI->first;
@@ -1629,8 +1632,20 @@ bool JumpThreadingPass::ProcessThreadableEdges(Value *Cond, BasicBlock *BB,
// threadable destination (the common case) we can avoid this.
BasicBlock *MostPopularDest = OnlyDest;
- if (MostPopularDest == MultipleDestSentinel)
+ if (MostPopularDest == MultipleDestSentinel) {
+ // Remove any loop headers from the Dest list, ThreadEdge conservatively
+ // won't process them, but we might have other destination that are eligible
+ // and we still want to process.
+ erase_if(PredToDestList,
+ [&](const std::pair<BasicBlock *, BasicBlock *> &PredToDest) {
+ return LoopHeaders.count(PredToDest.second) != 0;
+ });
+
+ if (PredToDestList.empty())
+ return false;
+
MostPopularDest = FindMostPopularDest(BB, PredToDestList);
+ }
// Now that we know what the most popular destination is, factor all
// predecessors that will jump to it into a single predecessor.
diff --git a/lib/Transforms/Scalar/SCCP.cpp b/lib/Transforms/Scalar/SCCP.cpp
index 9dc550ceaeca7..3e12649ddedca 100644
--- a/lib/Transforms/Scalar/SCCP.cpp
+++ b/lib/Transforms/Scalar/SCCP.cpp
@@ -223,6 +223,10 @@ class SCCPSolver : public InstVisitor<SCCPSolver> {
/// represented here for efficient lookup.
SmallPtrSet<Function *, 16> MRVFunctionsTracked;
+ /// MustTailFunctions - Each function here is a callee of non-removable
+ /// musttail call site.
+ SmallPtrSet<Function *, 16> MustTailCallees;
+
/// TrackingIncomingArguments - This is the set of functions for whose
/// arguments we make optimistic assumptions about and try to prove as
/// constants.
@@ -289,6 +293,18 @@ public:
TrackedRetVals.insert(std::make_pair(F, LatticeVal()));
}
+ /// AddMustTailCallee - If the SCCP solver finds that this function is called
+ /// from non-removable musttail call site.
+ void AddMustTailCallee(Function *F) {
+ MustTailCallees.insert(F);
+ }
+
+ /// Returns true if the given function is called from non-removable musttail
+ /// call site.
+ bool isMustTailCallee(Function *F) {
+ return MustTailCallees.count(F);
+ }
+
void AddArgumentTrackedFunction(Function *F) {
TrackingIncomingArguments.insert(F);
}
@@ -358,6 +374,12 @@ public:
return MRVFunctionsTracked;
}
+ /// getMustTailCallees - Get the set of functions which are called
+ /// from non-removable musttail call sites.
+ const SmallPtrSet<Function *, 16> getMustTailCallees() {
+ return MustTailCallees;
+ }
+
/// markOverdefined - Mark the specified value overdefined. This
/// works with both scalars and structs.
void markOverdefined(Value *V) {
@@ -1672,6 +1694,23 @@ static bool tryToReplaceWithConstant(SCCPSolver &Solver, Value *V) {
IV.isConstant() ? IV.getConstant() : UndefValue::get(V->getType());
}
assert(Const && "Constant is nullptr here!");
+
+ // Replacing `musttail` instructions with constant breaks `musttail` invariant
+ // unless the call itself can be removed
+ CallInst *CI = dyn_cast<CallInst>(V);
+ if (CI && CI->isMustTailCall() && !isInstructionTriviallyDead(CI)) {
+ CallSite CS(CI);
+ Function *F = CS.getCalledFunction();
+
+ // Don't zap returns of the callee
+ if (F)
+ Solver.AddMustTailCallee(F);
+
+ DEBUG(dbgs() << " Can\'t treat the result of musttail call : " << *CI
+ << " as a constant\n");
+ return false;
+ }
+
DEBUG(dbgs() << " Constant: " << *Const << " = " << *V << '\n');
// Replaces all of the uses of a variable with uses of the constant.
@@ -1802,10 +1841,26 @@ static void findReturnsToZap(Function &F,
if (!Solver.isArgumentTrackedFunction(&F))
return;
- for (BasicBlock &BB : F)
+ // There is a non-removable musttail call site of this function. Zapping
+ // returns is not allowed.
+ if (Solver.isMustTailCallee(&F)) {
+ DEBUG(dbgs() << "Can't zap returns of the function : " << F.getName()
+ << " due to present musttail call of it\n");
+ return;
+ }
+
+ for (BasicBlock &BB : F) {
+ if (CallInst *CI = BB.getTerminatingMustTailCall()) {
+ DEBUG(dbgs() << "Can't zap return of the block due to present "
+ << "musttail call : " << *CI << "\n");
+ (void)CI;
+ return;
+ }
+
if (auto *RI = dyn_cast<ReturnInst>(BB.getTerminator()))
if (!isa<UndefValue>(RI->getOperand(0)))
ReturnsToZap.push_back(RI);
+ }
}
static bool runIPSCCP(Module &M, const DataLayout &DL,