aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Transforms/Utils/SimplifyCFG.cpp')
-rw-r--r--llvm/lib/Transforms/Utils/SimplifyCFG.cpp261
1 files changed, 139 insertions, 122 deletions
diff --git a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
index 9e0483966d3e..d3a9a41aef15 100644
--- a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
+++ b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
@@ -271,10 +271,8 @@ class SimplifyCFGOpt {
bool tryToSimplifyUncondBranchWithICmpInIt(ICmpInst *ICI,
IRBuilder<> &Builder);
- bool HoistThenElseCodeToIf(BranchInst *BI, const TargetTransformInfo &TTI,
- bool EqTermsOnly);
- bool SpeculativelyExecuteBB(BranchInst *BI, BasicBlock *ThenBB,
- const TargetTransformInfo &TTI);
+ bool HoistThenElseCodeToIf(BranchInst *BI, bool EqTermsOnly);
+ bool SpeculativelyExecuteBB(BranchInst *BI, BasicBlock *ThenBB);
bool SimplifyTerminatorOnSelect(Instruction *OldTerm, Value *Cond,
BasicBlock *TrueBB, BasicBlock *FalseBB,
uint32_t TrueWeight, uint32_t FalseWeight);
@@ -1086,7 +1084,7 @@ static void GetBranchWeights(Instruction *TI,
static void FitWeights(MutableArrayRef<uint64_t> Weights) {
uint64_t Max = *std::max_element(Weights.begin(), Weights.end());
if (Max > UINT_MAX) {
- unsigned Offset = 32 - countLeadingZeros(Max);
+ unsigned Offset = 32 - llvm::countl_zero(Max);
for (uint64_t &I : Weights)
I >>= Offset;
}
@@ -1117,16 +1115,12 @@ static void CloneInstructionsIntoPredecessorBlockAndUpdateSSAUses(
RF_NoModuleLevelChanges | RF_IgnoreMissingLocals);
VMap[&BonusInst] = NewBonusInst;
- // If we moved a load, we cannot any longer claim any knowledge about
- // its potential value. The previous information might have been valid
+ // If we speculated an instruction, we need to drop any metadata that may
+ // result in undefined behavior, as the metadata might have been valid
// only given the branch precondition.
- // For an analogous reason, we must also drop all the metadata whose
- // semantics we don't understand. We *can* preserve !annotation, because
- // it is tied to the instruction itself, not the value or position.
// Similarly strip attributes on call parameters that may cause UB in
// location the call is moved to.
- NewBonusInst->dropUndefImplyingAttrsAndUnknownMetadata(
- LLVMContext::MD_annotation);
+ NewBonusInst->dropUBImplyingAttrsAndMetadata();
NewBonusInst->insertInto(PredBlock, PTI->getIterator());
NewBonusInst->takeName(&BonusInst);
@@ -1462,7 +1456,7 @@ static bool isSafeToHoistInstr(Instruction *I, unsigned Flags) {
// If we have seen an instruction with side effects, it's unsafe to reorder an
// instruction which reads memory or itself has side effects.
if ((Flags & SkipSideEffect) &&
- (I->mayReadFromMemory() || I->mayHaveSideEffects()))
+ (I->mayReadFromMemory() || I->mayHaveSideEffects() || isa<AllocaInst>(I)))
return false;
// Reordering across an instruction which does not necessarily transfer
@@ -1490,14 +1484,43 @@ static bool isSafeToHoistInstr(Instruction *I, unsigned Flags) {
static bool passingValueIsAlwaysUndefined(Value *V, Instruction *I, bool PtrValueMayBeModified = false);
+/// Helper function for HoistThenElseCodeToIf. Return true if identical
+/// instructions \p I1 and \p I2 can and should be hoisted.
+static bool shouldHoistCommonInstructions(Instruction *I1, Instruction *I2,
+ const TargetTransformInfo &TTI) {
+ // If we're going to hoist a call, make sure that the two instructions
+ // we're commoning/hoisting are both marked with musttail, or neither of
+ // them is marked as such. Otherwise, we might end up in a situation where
+ // we hoist from a block where the terminator is a `ret` to a block where
+ // the terminator is a `br`, and `musttail` calls expect to be followed by
+ // a return.
+ auto *C1 = dyn_cast<CallInst>(I1);
+ auto *C2 = dyn_cast<CallInst>(I2);
+ if (C1 && C2)
+ if (C1->isMustTailCall() != C2->isMustTailCall())
+ return false;
+
+ if (!TTI.isProfitableToHoist(I1) || !TTI.isProfitableToHoist(I2))
+ return false;
+
+ // If any of the two call sites has nomerge or convergent attribute, stop
+ // hoisting.
+ if (const auto *CB1 = dyn_cast<CallBase>(I1))
+ if (CB1->cannotMerge() || CB1->isConvergent())
+ return false;
+ if (const auto *CB2 = dyn_cast<CallBase>(I2))
+ if (CB2->cannotMerge() || CB2->isConvergent())
+ return false;
+
+ return true;
+}
+
/// Given a conditional branch that goes to BB1 and BB2, hoist any common code
/// in the two blocks up into the branch block. The caller of this function
/// guarantees that BI's block dominates BB1 and BB2. If EqTermsOnly is given,
/// only perform hoisting in case both blocks only contain a terminator. In that
/// case, only the original BI will be replaced and selects for PHIs are added.
-bool SimplifyCFGOpt::HoistThenElseCodeToIf(BranchInst *BI,
- const TargetTransformInfo &TTI,
- bool EqTermsOnly) {
+bool SimplifyCFGOpt::HoistThenElseCodeToIf(BranchInst *BI, bool EqTermsOnly) {
// This does very trivial matching, with limited scanning, to find identical
// instructions in the two blocks. In particular, we don't want to get into
// O(M*N) situations here where M and N are the sizes of BB1 and BB2. As
@@ -1572,37 +1595,13 @@ bool SimplifyCFGOpt::HoistThenElseCodeToIf(BranchInst *BI,
goto HoistTerminator;
}
- if (I1->isIdenticalToWhenDefined(I2)) {
- // Even if the instructions are identical, it may not be safe to hoist
- // them if we have skipped over instructions with side effects or their
- // operands weren't hoisted.
- if (!isSafeToHoistInstr(I1, SkipFlagsBB1) ||
- !isSafeToHoistInstr(I2, SkipFlagsBB2))
- return Changed;
-
- // If we're going to hoist a call, make sure that the two instructions
- // we're commoning/hoisting are both marked with musttail, or neither of
- // them is marked as such. Otherwise, we might end up in a situation where
- // we hoist from a block where the terminator is a `ret` to a block where
- // the terminator is a `br`, and `musttail` calls expect to be followed by
- // a return.
- auto *C1 = dyn_cast<CallInst>(I1);
- auto *C2 = dyn_cast<CallInst>(I2);
- if (C1 && C2)
- if (C1->isMustTailCall() != C2->isMustTailCall())
- return Changed;
-
- if (!TTI.isProfitableToHoist(I1) || !TTI.isProfitableToHoist(I2))
- return Changed;
-
- // If any of the two call sites has nomerge attribute, stop hoisting.
- if (const auto *CB1 = dyn_cast<CallBase>(I1))
- if (CB1->cannotMerge())
- return Changed;
- if (const auto *CB2 = dyn_cast<CallBase>(I2))
- if (CB2->cannotMerge())
- return Changed;
-
+ if (I1->isIdenticalToWhenDefined(I2) &&
+ // Even if the instructions are identical, it may not be safe to hoist
+ // them if we have skipped over instructions with side effects or their
+ // operands weren't hoisted.
+ isSafeToHoistInstr(I1, SkipFlagsBB1) &&
+ isSafeToHoistInstr(I2, SkipFlagsBB2) &&
+ shouldHoistCommonInstructions(I1, I2, TTI)) {
if (isa<DbgInfoIntrinsic>(I1) || isa<DbgInfoIntrinsic>(I2)) {
assert(isa<DbgInfoIntrinsic>(I1) && isa<DbgInfoIntrinsic>(I2));
// The debug location is an integral part of a debug info intrinsic
@@ -1618,19 +1617,7 @@ bool SimplifyCFGOpt::HoistThenElseCodeToIf(BranchInst *BI,
if (!I2->use_empty())
I2->replaceAllUsesWith(I1);
I1->andIRFlags(I2);
- unsigned KnownIDs[] = {LLVMContext::MD_tbaa,
- LLVMContext::MD_range,
- LLVMContext::MD_fpmath,
- LLVMContext::MD_invariant_load,
- LLVMContext::MD_nonnull,
- LLVMContext::MD_invariant_group,
- LLVMContext::MD_align,
- LLVMContext::MD_dereferenceable,
- LLVMContext::MD_dereferenceable_or_null,
- LLVMContext::MD_mem_parallel_loop_access,
- LLVMContext::MD_access_group,
- LLVMContext::MD_preserve_access_index};
- combineMetadata(I1, I2, KnownIDs, true);
+ combineMetadataForCSE(I1, I2, true);
// I1 and I2 are being combined into a single instruction. Its debug
// location is the merged locations of the original instructions.
@@ -1808,9 +1795,9 @@ static bool canSinkInstructions(
// Conservatively return false if I is an inline-asm instruction. Sinking
// and merging inline-asm instructions can potentially create arguments
// that cannot satisfy the inline-asm constraints.
- // If the instruction has nomerge attribute, return false.
+ // If the instruction has nomerge or convergent attribute, return false.
if (const auto *C = dyn_cast<CallBase>(I))
- if (C->isInlineAsm() || C->cannotMerge())
+ if (C->isInlineAsm() || C->cannotMerge() || C->isConvergent())
return false;
// Each instruction must have zero or one use.
@@ -2455,9 +2442,13 @@ bool CompatibleSets::shouldBelongToSameSet(ArrayRef<InvokeInst *> Invokes) {
// Can we theoretically form the data operands for the merged `invoke`?
auto IsIllegalToMergeArguments = [](auto Ops) {
- Type *Ty = std::get<0>(Ops)->getType();
- assert(Ty == std::get<1>(Ops)->getType() && "Incompatible types?");
- return Ty->isTokenTy() && std::get<0>(Ops) != std::get<1>(Ops);
+ Use &U0 = std::get<0>(Ops);
+ Use &U1 = std::get<1>(Ops);
+ if (U0 == U1)
+ return false;
+ return U0->getType()->isTokenTy() ||
+ !canReplaceOperandWithVariable(cast<Instruction>(U0.getUser()),
+ U0.getOperandNo());
};
assert(Invokes.size() == 2 && "Always called with exactly two candidates.");
if (any_of(zip(Invokes[0]->data_ops(), Invokes[1]->data_ops()),
@@ -2571,7 +2562,7 @@ static void MergeCompatibleInvokesImpl(ArrayRef<InvokeInst *> Invokes,
// And finally, replace the original `invoke`s with an unconditional branch
// to the block with the merged `invoke`. Also, give that merged `invoke`
// the merged debugloc of all the original `invoke`s.
- const DILocation *MergedDebugLoc = nullptr;
+ DILocation *MergedDebugLoc = nullptr;
for (InvokeInst *II : Invokes) {
// Compute the debug location common to all the original `invoke`s.
if (!MergedDebugLoc)
@@ -2849,8 +2840,11 @@ static bool validateAndCostRequiredSelects(BasicBlock *BB, BasicBlock *ThenBB,
/// \endcode
///
/// \returns true if the conditional block is removed.
-bool SimplifyCFGOpt::SpeculativelyExecuteBB(BranchInst *BI, BasicBlock *ThenBB,
- const TargetTransformInfo &TTI) {
+bool SimplifyCFGOpt::SpeculativelyExecuteBB(BranchInst *BI,
+ BasicBlock *ThenBB) {
+ if (!Options.SpeculateBlocks)
+ return false;
+
// Be conservative for now. FP select instruction can often be expensive.
Value *BrCond = BI->getCondition();
if (isa<FCmpInst>(BrCond))
@@ -3021,7 +3015,7 @@ bool SimplifyCFGOpt::SpeculativelyExecuteBB(BranchInst *BI, BasicBlock *ThenBB,
}
// Metadata can be dependent on the condition we are hoisting above.
- // Conservatively strip all metadata on the instruction. Drop the debug loc
+ // Strip all UB-implying metadata on the instruction. Drop the debug loc
// to avoid making it appear as if the condition is a constant, which would
// be misleading while debugging.
// Similarly strip attributes that maybe dependent on condition we are
@@ -3032,7 +3026,7 @@ bool SimplifyCFGOpt::SpeculativelyExecuteBB(BranchInst *BI, BasicBlock *ThenBB,
if (!isa<DbgAssignIntrinsic>(&I))
I.setDebugLoc(DebugLoc());
}
- I.dropUndefImplyingAttrsAndUnknownMetadata();
+ I.dropUBImplyingAttrsAndMetadata();
// Drop ephemeral values.
if (EphTracker.contains(&I)) {
@@ -3220,6 +3214,9 @@ FoldCondBranchOnValueKnownInPredecessorImpl(BranchInst *BI, DomTreeUpdater *DTU,
}
// Clone the instruction.
Instruction *N = BBI->clone();
+ // Insert the new instruction into its new home.
+ N->insertInto(EdgeBB, InsertPt);
+
if (BBI->hasName())
N->setName(BBI->getName() + ".c");
@@ -3235,7 +3232,8 @@ FoldCondBranchOnValueKnownInPredecessorImpl(BranchInst *BI, DomTreeUpdater *DTU,
if (!BBI->use_empty())
TranslateMap[&*BBI] = V;
if (!N->mayHaveSideEffects()) {
- N->deleteValue(); // Instruction folded away, don't need actual inst
+ N->eraseFromParent(); // Instruction folded away, don't need actual
+ // inst
N = nullptr;
}
} else {
@@ -3243,9 +3241,6 @@ FoldCondBranchOnValueKnownInPredecessorImpl(BranchInst *BI, DomTreeUpdater *DTU,
TranslateMap[&*BBI] = N;
}
if (N) {
- // Insert the new instruction into its new home.
- N->insertInto(EdgeBB, InsertPt);
-
// Register the new instruction with the assumption cache if necessary.
if (auto *Assume = dyn_cast<AssumeInst>(N))
if (AC)
@@ -3591,17 +3586,7 @@ static bool performBranchToCommonDestFolding(BranchInst *BI, BranchInst *PBI,
// If we need to invert the condition in the pred block to match, do so now.
if (InvertPredCond) {
- Value *NewCond = PBI->getCondition();
- if (NewCond->hasOneUse() && isa<CmpInst>(NewCond)) {
- CmpInst *CI = cast<CmpInst>(NewCond);
- CI->setPredicate(CI->getInversePredicate());
- } else {
- NewCond =
- Builder.CreateNot(NewCond, PBI->getCondition()->getName() + ".not");
- }
-
- PBI->setCondition(NewCond);
- PBI->swapSuccessors();
+ InvertBranch(PBI, Builder);
}
BasicBlock *UniqueSucc =
@@ -3887,7 +3872,7 @@ static Value *ensureValueAvailableInSuccessor(Value *V, BasicBlock *BB,
for (BasicBlock *PredBB : predecessors(Succ))
if (PredBB != BB)
PHI->addIncoming(
- AlternativeV ? AlternativeV : UndefValue::get(V->getType()), PredBB);
+ AlternativeV ? AlternativeV : PoisonValue::get(V->getType()), PredBB);
return PHI;
}
@@ -5150,14 +5135,18 @@ bool SimplifyCFGOpt::simplifyUnreachable(UnreachableInst *UI) {
Value* Cond = BI->getCondition();
assert(BI->getSuccessor(0) != BI->getSuccessor(1) &&
"The destinations are guaranteed to be different here.");
+ CallInst *Assumption;
if (BI->getSuccessor(0) == BB) {
- Builder.CreateAssumption(Builder.CreateNot(Cond));
+ Assumption = Builder.CreateAssumption(Builder.CreateNot(Cond));
Builder.CreateBr(BI->getSuccessor(1));
} else {
assert(BI->getSuccessor(1) == BB && "Incorrect CFG");
- Builder.CreateAssumption(Cond);
+ Assumption = Builder.CreateAssumption(Cond);
Builder.CreateBr(BI->getSuccessor(0));
}
+ if (Options.AC)
+ Options.AC->registerAssumption(cast<AssumeInst>(Assumption));
+
EraseTerminatorAndDCECond(BI);
Changed = true;
}
@@ -5453,7 +5442,7 @@ static bool eliminateDeadSwitchCases(SwitchInst *SI, DomTreeUpdater *DTU,
}
const APInt &CaseVal = Case.getCaseValue()->getValue();
if (Known.Zero.intersects(CaseVal) || !Known.One.isSubsetOf(CaseVal) ||
- (CaseVal.getMinSignedBits() > MaxSignificantBitsInCond)) {
+ (CaseVal.getSignificantBits() > MaxSignificantBitsInCond)) {
DeadCases.push_back(Case.getCaseValue());
if (DTU)
--NumPerSuccessorCases[Successor];
@@ -5469,7 +5458,7 @@ static bool eliminateDeadSwitchCases(SwitchInst *SI, DomTreeUpdater *DTU,
bool HasDefault =
!isa<UnreachableInst>(SI->getDefaultDest()->getFirstNonPHIOrDbg());
const unsigned NumUnknownBits =
- Known.getBitWidth() - (Known.Zero | Known.One).countPopulation();
+ Known.getBitWidth() - (Known.Zero | Known.One).popcount();
assert(NumUnknownBits <= Known.getBitWidth());
if (HasDefault && DeadCases.empty() &&
NumUnknownBits < 64 /* avoid overflow */ &&
@@ -5860,7 +5849,7 @@ static Value *foldSwitchToSelect(const SwitchCaseResultVectorTy &ResultVector,
// Check if cases with the same result can cover all number
// in touched bits.
- if (BitMask.countPopulation() == Log2_32(CaseCount)) {
+ if (BitMask.popcount() == Log2_32(CaseCount)) {
if (!MinCaseVal->isNullValue())
Condition = Builder.CreateSub(Condition, MinCaseVal);
Value *And = Builder.CreateAnd(Condition, ~BitMask, "switch.and");
@@ -6001,6 +5990,7 @@ private:
// For LinearMapKind, these are the constants used to derive the value.
ConstantInt *LinearOffset = nullptr;
ConstantInt *LinearMultiplier = nullptr;
+ bool LinearMapValWrapped = false;
// For ArrayKind, this is the array.
GlobalVariable *Array = nullptr;
@@ -6061,6 +6051,8 @@ SwitchLookupTable::SwitchLookupTable(
bool LinearMappingPossible = true;
APInt PrevVal;
APInt DistToPrev;
+ // When linear map is monotonic, we can attach nsw.
+ bool Wrapped = false;
assert(TableSize >= 2 && "Should be a SingleValue table.");
// Check if there is the same distance between two consecutive values.
for (uint64_t I = 0; I < TableSize; ++I) {
@@ -6080,12 +6072,15 @@ SwitchLookupTable::SwitchLookupTable(
LinearMappingPossible = false;
break;
}
+ Wrapped |=
+ Dist.isStrictlyPositive() ? Val.sle(PrevVal) : Val.sgt(PrevVal);
}
PrevVal = Val;
}
if (LinearMappingPossible) {
LinearOffset = cast<ConstantInt>(TableContents[0]);
LinearMultiplier = ConstantInt::get(M.getContext(), DistToPrev);
+ LinearMapValWrapped = Wrapped;
Kind = LinearMapKind;
++NumLinearMaps;
return;
@@ -6134,9 +6129,14 @@ Value *SwitchLookupTable::BuildLookup(Value *Index, IRBuilder<> &Builder) {
Value *Result = Builder.CreateIntCast(Index, LinearMultiplier->getType(),
false, "switch.idx.cast");
if (!LinearMultiplier->isOne())
- Result = Builder.CreateMul(Result, LinearMultiplier, "switch.idx.mult");
+ Result = Builder.CreateMul(Result, LinearMultiplier, "switch.idx.mult",
+ /*HasNUW = */ false,
+ /*HasNSW = */ !LinearMapValWrapped);
+
if (!LinearOffset->isZero())
- Result = Builder.CreateAdd(Result, LinearOffset, "switch.offset");
+ Result = Builder.CreateAdd(Result, LinearOffset, "switch.offset",
+ /*HasNUW = */ false,
+ /*HasNSW = */ !LinearMapValWrapped);
return Result;
}
case BitMapKind: {
@@ -6148,10 +6148,12 @@ Value *SwitchLookupTable::BuildLookup(Value *Index, IRBuilder<> &Builder) {
// truncating it to the width of the bitmask is safe.
Value *ShiftAmt = Builder.CreateZExtOrTrunc(Index, MapTy, "switch.cast");
- // Multiply the shift amount by the element width.
+ // Multiply the shift amount by the element width. NUW/NSW can always be
+ // set, because WouldFitInRegister guarantees Index * ShiftAmt is in
+ // BitMap's bit width.
ShiftAmt = Builder.CreateMul(
ShiftAmt, ConstantInt::get(MapTy, BitMapElementTy->getBitWidth()),
- "switch.shiftamt");
+ "switch.shiftamt",/*HasNUW =*/true,/*HasNSW =*/true);
// Shift down.
Value *DownShifted =
@@ -6490,6 +6492,21 @@ static bool SwitchToLookupTable(SwitchInst *SI, IRBuilder<> &Builder,
std::vector<DominatorTree::UpdateType> Updates;
+ // Compute the maximum table size representable by the integer type we are
+ // switching upon.
+ unsigned CaseSize = MinCaseVal->getType()->getPrimitiveSizeInBits();
+ uint64_t MaxTableSize = CaseSize > 63 ? UINT64_MAX : 1ULL << CaseSize;
+ assert(MaxTableSize >= TableSize &&
+ "It is impossible for a switch to have more entries than the max "
+ "representable value of its input integer type's size.");
+
+ // If the default destination is unreachable, or if the lookup table covers
+ // all values of the conditional variable, branch directly to the lookup table
+ // BB. Otherwise, check that the condition is within the case range.
+ const bool DefaultIsReachable =
+ !isa<UnreachableInst>(SI->getDefaultDest()->getFirstNonPHIOrDbg());
+ const bool GeneratingCoveredLookupTable = (MaxTableSize == TableSize);
+
// Create the BB that does the lookups.
Module &Mod = *CommonDest->getParent()->getParent();
BasicBlock *LookupBB = BasicBlock::Create(
@@ -6504,24 +6521,19 @@ static bool SwitchToLookupTable(SwitchInst *SI, IRBuilder<> &Builder,
TableIndex = SI->getCondition();
} else {
TableIndexOffset = MinCaseVal;
- TableIndex =
- Builder.CreateSub(SI->getCondition(), TableIndexOffset, "switch.tableidx");
- }
+ // If the default is unreachable, all case values are s>= MinCaseVal. Then
+ // we can try to attach nsw.
+ bool MayWrap = true;
+ if (!DefaultIsReachable) {
+ APInt Res = MaxCaseVal->getValue().ssub_ov(MinCaseVal->getValue(), MayWrap);
+ (void)Res;
+ }
- // Compute the maximum table size representable by the integer type we are
- // switching upon.
- unsigned CaseSize = MinCaseVal->getType()->getPrimitiveSizeInBits();
- uint64_t MaxTableSize = CaseSize > 63 ? UINT64_MAX : 1ULL << CaseSize;
- assert(MaxTableSize >= TableSize &&
- "It is impossible for a switch to have more entries than the max "
- "representable value of its input integer type's size.");
+ TableIndex = Builder.CreateSub(SI->getCondition(), TableIndexOffset,
+ "switch.tableidx", /*HasNUW =*/false,
+ /*HasNSW =*/!MayWrap);
+ }
- // If the default destination is unreachable, or if the lookup table covers
- // all values of the conditional variable, branch directly to the lookup table
- // BB. Otherwise, check that the condition is within the case range.
- const bool DefaultIsReachable =
- !isa<UnreachableInst>(SI->getDefaultDest()->getFirstNonPHIOrDbg());
- const bool GeneratingCoveredLookupTable = (MaxTableSize == TableSize);
BranchInst *RangeCheckBranch = nullptr;
if (!DefaultIsReachable || GeneratingCoveredLookupTable) {
@@ -6694,7 +6706,7 @@ static bool ReduceSwitchRange(SwitchInst *SI, IRBuilder<> &Builder,
// less than 64.
unsigned Shift = 64;
for (auto &V : Values)
- Shift = std::min(Shift, countTrailingZeros((uint64_t)V));
+ Shift = std::min(Shift, (unsigned)llvm::countr_zero((uint64_t)V));
assert(Shift < 64);
if (Shift > 0)
for (auto &V : Values)
@@ -6990,7 +7002,8 @@ bool SimplifyCFGOpt::simplifyCondBranch(BranchInst *BI, IRBuilder<> &Builder) {
"Tautological conditional branch should have been eliminated already.");
BasicBlock *BB = BI->getParent();
- if (!Options.SimplifyCondBranch)
+ if (!Options.SimplifyCondBranch ||
+ BI->getFunction()->hasFnAttribute(Attribute::OptForFuzzing))
return false;
// Conditional branch
@@ -7045,8 +7058,7 @@ bool SimplifyCFGOpt::simplifyCondBranch(BranchInst *BI, IRBuilder<> &Builder) {
// can hoist it up to the branching block.
if (BI->getSuccessor(0)->getSinglePredecessor()) {
if (BI->getSuccessor(1)->getSinglePredecessor()) {
- if (HoistCommon &&
- HoistThenElseCodeToIf(BI, TTI, !Options.HoistCommonInsts))
+ if (HoistCommon && HoistThenElseCodeToIf(BI, !Options.HoistCommonInsts))
return requestResimplify();
} else {
// If Successor #1 has multiple preds, we may be able to conditionally
@@ -7054,7 +7066,7 @@ bool SimplifyCFGOpt::simplifyCondBranch(BranchInst *BI, IRBuilder<> &Builder) {
Instruction *Succ0TI = BI->getSuccessor(0)->getTerminator();
if (Succ0TI->getNumSuccessors() == 1 &&
Succ0TI->getSuccessor(0) == BI->getSuccessor(1))
- if (SpeculativelyExecuteBB(BI, BI->getSuccessor(0), TTI))
+ if (SpeculativelyExecuteBB(BI, BI->getSuccessor(0)))
return requestResimplify();
}
} else if (BI->getSuccessor(1)->getSinglePredecessor()) {
@@ -7063,7 +7075,7 @@ bool SimplifyCFGOpt::simplifyCondBranch(BranchInst *BI, IRBuilder<> &Builder) {
Instruction *Succ1TI = BI->getSuccessor(1)->getTerminator();
if (Succ1TI->getNumSuccessors() == 1 &&
Succ1TI->getSuccessor(0) == BI->getSuccessor(0))
- if (SpeculativelyExecuteBB(BI, BI->getSuccessor(1), TTI))
+ if (SpeculativelyExecuteBB(BI, BI->getSuccessor(1)))
return requestResimplify();
}
@@ -7179,7 +7191,8 @@ static bool passingValueIsAlwaysUndefined(Value *V, Instruction *I, bool PtrValu
/// If BB has an incoming value that will always trigger undefined behavior
/// (eg. null pointer dereference), remove the branch leading here.
static bool removeUndefIntroducingPredecessor(BasicBlock *BB,
- DomTreeUpdater *DTU) {
+ DomTreeUpdater *DTU,
+ AssumptionCache *AC) {
for (PHINode &PHI : BB->phis())
for (unsigned i = 0, e = PHI.getNumIncomingValues(); i != e; ++i)
if (passingValueIsAlwaysUndefined(PHI.getIncomingValue(i), &PHI)) {
@@ -7196,10 +7209,13 @@ static bool removeUndefIntroducingPredecessor(BasicBlock *BB,
// Preserve guarding condition in assume, because it might not be
// inferrable from any dominating condition.
Value *Cond = BI->getCondition();
+ CallInst *Assumption;
if (BI->getSuccessor(0) == BB)
- Builder.CreateAssumption(Builder.CreateNot(Cond));
+ Assumption = Builder.CreateAssumption(Builder.CreateNot(Cond));
else
- Builder.CreateAssumption(Cond);
+ Assumption = Builder.CreateAssumption(Cond);
+ if (AC)
+ AC->registerAssumption(cast<AssumeInst>(Assumption));
Builder.CreateBr(BI->getSuccessor(0) == BB ? BI->getSuccessor(1)
: BI->getSuccessor(0));
}
@@ -7260,7 +7276,7 @@ bool SimplifyCFGOpt::simplifyOnce(BasicBlock *BB) {
Changed |= EliminateDuplicatePHINodes(BB);
// Check for and remove branches that will always cause undefined behavior.
- if (removeUndefIntroducingPredecessor(BB, DTU))
+ if (removeUndefIntroducingPredecessor(BB, DTU, Options.AC))
return requestResimplify();
// Merge basic blocks into their predecessor if there is only one distinct
@@ -7282,7 +7298,8 @@ bool SimplifyCFGOpt::simplifyOnce(BasicBlock *BB) {
IRBuilder<> Builder(BB);
- if (Options.FoldTwoEntryPHINode) {
+ if (Options.SpeculateBlocks &&
+ !BB->getParent()->hasFnAttribute(Attribute::OptForFuzzing)) {
// If there is a trivial two-entry PHI node in this basic block, and we can
// eliminate it, do so now.
if (auto *PN = dyn_cast<PHINode>(BB->begin()))