aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/Analysis
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Analysis')
-rw-r--r--llvm/lib/Analysis/BranchProbabilityInfo.cpp11
-rw-r--r--llvm/lib/Analysis/ConstantFolding.cpp15
-rw-r--r--llvm/lib/Analysis/GlobalsModRef.cpp16
-rw-r--r--llvm/lib/Analysis/IRSimilarityIdentifier.cpp4
-rw-r--r--llvm/lib/Analysis/InlineAdvisor.cpp13
-rw-r--r--llvm/lib/Analysis/InlineCost.cpp17
-rw-r--r--llvm/lib/Analysis/InstructionSimplify.cpp45
-rw-r--r--llvm/lib/Analysis/LazyValueInfo.cpp6
-rw-r--r--llvm/lib/Analysis/Loads.cpp5
-rw-r--r--llvm/lib/Analysis/MemoryBuiltins.cpp4
-rw-r--r--llvm/lib/Analysis/MustExecute.cpp2
-rw-r--r--llvm/lib/Analysis/ProfileSummaryInfo.cpp12
-rw-r--r--llvm/lib/Analysis/ScalarEvolution.cpp44
-rw-r--r--llvm/lib/Analysis/TFUtils.cpp11
-rw-r--r--llvm/lib/Analysis/TargetTransformInfo.cpp2
-rw-r--r--llvm/lib/Analysis/TypeBasedAliasAnalysis.cpp34
-rw-r--r--llvm/lib/Analysis/ValueTracking.cpp24
-rw-r--r--llvm/lib/Analysis/VectorUtils.cpp2
18 files changed, 125 insertions, 142 deletions
diff --git a/llvm/lib/Analysis/BranchProbabilityInfo.cpp b/llvm/lib/Analysis/BranchProbabilityInfo.cpp
index 1d880424e55c..428ae8975c30 100644
--- a/llvm/lib/Analysis/BranchProbabilityInfo.cpp
+++ b/llvm/lib/Analysis/BranchProbabilityInfo.cpp
@@ -15,6 +15,7 @@
#include "llvm/ADT/SCCIterator.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/PostDominators.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
@@ -629,9 +630,10 @@ computeUnlikelySuccessors(const BasicBlock *BB, Loop *L,
if (!CmpLHSConst || !llvm::is_contained(successors(BB), B))
continue;
// First collapse InstChain
+ const DataLayout &DL = BB->getModule()->getDataLayout();
for (Instruction *I : llvm::reverse(InstChain)) {
- CmpLHSConst = ConstantExpr::get(I->getOpcode(), CmpLHSConst,
- cast<Constant>(I->getOperand(1)), true);
+ CmpLHSConst = ConstantFoldBinaryOpOperands(
+ I->getOpcode(), CmpLHSConst, cast<Constant>(I->getOperand(1)), DL);
if (!CmpLHSConst)
break;
}
@@ -826,9 +828,8 @@ void BranchProbabilityInfo::computeEestimateBlockWeight(
if (auto BBWeight = getInitialEstimatedBlockWeight(BB))
// If we were able to find estimated weight for the block set it to this
// block and propagate up the IR.
- propagateEstimatedBlockWeight(getLoopBlock(BB), DT, PDT,
- BBWeight.getValue(), BlockWorkList,
- LoopWorkList);
+ propagateEstimatedBlockWeight(getLoopBlock(BB), DT, PDT, BBWeight.value(),
+ BlockWorkList, LoopWorkList);
// BlockWorklist/LoopWorkList contains blocks/loops with at least one
// successor/exit having estimated weight. Try to propagate weight to such
diff --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp
index a81041845052..aa4da27be4e5 100644
--- a/llvm/lib/Analysis/ConstantFolding.cpp
+++ b/llvm/lib/Analysis/ConstantFolding.cpp
@@ -30,6 +30,7 @@
#include "llvm/Analysis/VectorUtils.h"
#include "llvm/Config/config.h"
#include "llvm/IR/Constant.h"
+#include "llvm/IR/ConstantFold.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
@@ -1142,8 +1143,12 @@ ConstantFoldConstantImpl(const Constant *C, const DataLayout &DL,
Ops.push_back(NewC);
}
- if (auto *CE = dyn_cast<ConstantExpr>(C))
- return ConstantFoldInstOperandsImpl(CE, CE->getOpcode(), Ops, DL, TLI);
+ if (auto *CE = dyn_cast<ConstantExpr>(C)) {
+ if (Constant *Res =
+ ConstantFoldInstOperandsImpl(CE, CE->getOpcode(), Ops, DL, TLI))
+ return Res;
+ return const_cast<Constant *>(C);
+ }
assert(isa<ConstantVector>(C));
return ConstantVector::get(Ops);
@@ -1339,7 +1344,9 @@ Constant *llvm::ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS,
if (Constant *C = SymbolicallyEvaluateBinop(Opcode, LHS, RHS, DL))
return C;
- return ConstantExpr::get(Opcode, LHS, RHS);
+ if (ConstantExpr::isDesirableBinOp(Opcode))
+ return ConstantExpr::get(Opcode, LHS, RHS);
+ return ConstantFoldBinaryInstruction(Opcode, LHS, RHS);
}
Constant *llvm::FlushFPConstant(Constant *Operand, const Instruction *I,
@@ -1390,6 +1397,8 @@ Constant *llvm::ConstantFoldFPInstOperands(unsigned Opcode, Constant *LHS,
// Calculate constant result.
Constant *C = ConstantFoldBinaryOpOperands(Opcode, Op0, Op1, DL);
+ if (!C)
+ return nullptr;
// Flush denormal output if needed.
return FlushFPConstant(C, I, /* IsOutput */ true);
diff --git a/llvm/lib/Analysis/GlobalsModRef.cpp b/llvm/lib/Analysis/GlobalsModRef.cpp
index e82d2fae9356..db6eae0d962a 100644
--- a/llvm/lib/Analysis/GlobalsModRef.cpp
+++ b/llvm/lib/Analysis/GlobalsModRef.cpp
@@ -256,22 +256,6 @@ FunctionModRefBehavior GlobalsAAResult::getModRefBehavior(const Function *F) {
return FunctionModRefBehavior(AAResultBase::getModRefBehavior(F) & Min);
}
-FunctionModRefBehavior
-GlobalsAAResult::getModRefBehavior(const CallBase *Call) {
- FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior;
-
- if (!Call->hasOperandBundles())
- if (const Function *F = Call->getCalledFunction())
- if (FunctionInfo *FI = getFunctionInfo(F)) {
- if (!isModOrRefSet(FI->getModRefInfo()))
- Min = FMRB_DoesNotAccessMemory;
- else if (!isModSet(FI->getModRefInfo()))
- Min = FMRB_OnlyReadsMemory;
- }
-
- return FunctionModRefBehavior(AAResultBase::getModRefBehavior(Call) & Min);
-}
-
/// Returns the function info for the function, or null if we don't have
/// anything useful to say about it.
GlobalsAAResult::FunctionInfo *
diff --git a/llvm/lib/Analysis/IRSimilarityIdentifier.cpp b/llvm/lib/Analysis/IRSimilarityIdentifier.cpp
index 3d51042f4da8..a681c528e690 100644
--- a/llvm/lib/Analysis/IRSimilarityIdentifier.cpp
+++ b/llvm/lib/Analysis/IRSimilarityIdentifier.cpp
@@ -184,8 +184,8 @@ CmpInst::Predicate IRInstructionData::getPredicate() const {
"Can only get a predicate from a compare instruction");
if (RevisedPredicate)
- return RevisedPredicate.getValue();
-
+ return RevisedPredicate.value();
+
return cast<CmpInst>(Inst)->getPredicate();
}
diff --git a/llvm/lib/Analysis/InlineAdvisor.cpp b/llvm/lib/Analysis/InlineAdvisor.cpp
index cf8592c41eda..3fafc3057a13 100644
--- a/llvm/lib/Analysis/InlineAdvisor.cpp
+++ b/llvm/lib/Analysis/InlineAdvisor.cpp
@@ -56,10 +56,10 @@ static cl::opt<int>
cl::desc("Scale to limit the cost of inline deferral"),
cl::init(2), cl::Hidden);
-static cl::opt<bool> AnnotateInlinePhase(
- "annotate-inline-phase", cl::Hidden, cl::init(false),
- cl::desc("If true, annotate inline advisor remarks "
- "with LTO and pass information."));
+static cl::opt<bool>
+ AnnotateInlinePhase("annotate-inline-phase", cl::Hidden, cl::init(false),
+ cl::desc("If true, annotate inline advisor remarks "
+ "with LTO and pass information."));
extern cl::opt<InlinerFunctionImportStatsOpts> InlinerFunctionImportStats;
@@ -514,8 +514,9 @@ void llvm::emitInlinedIntoBasedOnCost(
InlineAdvisor::InlineAdvisor(Module &M, FunctionAnalysisManager &FAM,
Optional<InlineContext> IC)
: M(M), FAM(FAM), IC(IC),
- AnnotatedInlinePassName((IC && AnnotateInlinePhase) ? llvm::AnnotateInlinePassName(*IC)
- : DEBUG_TYPE) {
+ AnnotatedInlinePassName((IC && AnnotateInlinePhase)
+ ? llvm::AnnotateInlinePassName(*IC)
+ : DEBUG_TYPE) {
if (InlinerFunctionImportStats != InlinerFunctionImportStatsOpts::No) {
ImportedFunctionsStats =
std::make_unique<ImportedFunctionsInliningStatistics>();
diff --git a/llvm/lib/Analysis/InlineCost.cpp b/llvm/lib/Analysis/InlineCost.cpp
index e63497260e6e..9f8a5e472f01 100644
--- a/llvm/lib/Analysis/InlineCost.cpp
+++ b/llvm/lib/Analysis/InlineCost.cpp
@@ -131,6 +131,12 @@ static cl::opt<size_t>
cl::desc("Do not inline functions with a stack size "
"that exceeds the specified limit"));
+static cl::opt<size_t>
+ RecurStackSizeThreshold("recursive-inline-max-stacksize", cl::Hidden,
+ cl::init(InlineConstants::TotalAllocaSizeRecursiveCaller),
+ cl::desc("Do not inline recursive functions with a stack "
+ "size that exceeds the specified limit"));
+
static cl::opt<bool> OptComputeFullInlineCost(
"inline-cost-full", cl::Hidden,
cl::desc("Compute the full inline cost of a call site even when the cost "
@@ -702,7 +708,7 @@ class InlineCostCallAnalyzer final : public CallAnalyzer {
assert(BFI && "BFI must be available");
auto ProfileCount = BFI->getBlockProfileCount(BB);
assert(ProfileCount);
- if (ProfileCount.getValue() == 0)
+ if (ProfileCount.value() == 0)
ColdSize += Cost - CostAtBBStart;
}
@@ -827,7 +833,7 @@ class InlineCostCallAnalyzer final : public CallAnalyzer {
auto ProfileCount = CalleeBFI->getBlockProfileCount(&BB);
assert(ProfileCount);
- CurrentSavings *= ProfileCount.getValue();
+ CurrentSavings *= ProfileCount.value();
CycleSavings += CurrentSavings;
}
@@ -1781,12 +1787,12 @@ void InlineCostCallAnalyzer::updateThreshold(CallBase &Call, Function &Callee) {
// return min(A, B) if B is valid.
auto MinIfValid = [](int A, Optional<int> B) {
- return B ? std::min(A, B.getValue()) : A;
+ return B ? std::min(A, B.value()) : A;
};
// return max(A, B) if B is valid.
auto MaxIfValid = [](int A, Optional<int> B) {
- return B ? std::max(A, B.getValue()) : A;
+ return B ? std::max(A, B.value()) : A;
};
// Various bonus percentages. These are multiplied by Threshold to get the
@@ -2444,8 +2450,7 @@ CallAnalyzer::analyzeBlock(BasicBlock *BB,
// If the caller is a recursive function then we don't want to inline
// functions which allocate a lot of stack space because it would increase
// the caller stack usage dramatically.
- if (IsCallerRecursive &&
- AllocatedSize > InlineConstants::TotalAllocaSizeRecursiveCaller) {
+ if (IsCallerRecursive && AllocatedSize > RecurStackSizeThreshold) {
auto IR =
InlineResult::failure("recursive and allocates too much stack space");
if (ORE)
diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp
index 013e4d6489fa..4691aebbdfe1 100644
--- a/llvm/lib/Analysis/InstructionSimplify.cpp
+++ b/llvm/lib/Analysis/InstructionSimplify.cpp
@@ -4849,12 +4849,6 @@ static Value *simplifyPHINode(PHINode *PN, ArrayRef<Value *> IncomingValues,
return UndefValue::get(PN->getType());
if (HasUndefInput) {
- // We cannot start executing a trapping constant expression on more control
- // flow paths.
- auto *C = dyn_cast<Constant>(CommonValue);
- if (C && C->canTrap())
- return nullptr;
-
// If we have a PHI node like phi(X, undef, X), where X is defined by some
// instruction, we cannot return X as the result of the PHI node unless it
// dominates the PHI block.
@@ -6117,8 +6111,8 @@ static Value *simplifyIntrinsic(CallBase *Call, const SimplifyQuery &Q) {
Value *Op2 = Call->getArgOperand(2);
auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
if (Value *V = simplifyFPOp({Op0, Op1, Op2}, {}, Q,
- FPI->getExceptionBehavior().getValue(),
- FPI->getRoundingMode().getValue()))
+ FPI->getExceptionBehavior().value(),
+ FPI->getRoundingMode().value()))
return V;
return nullptr;
}
@@ -6182,38 +6176,33 @@ static Value *simplifyIntrinsic(CallBase *Call, const SimplifyQuery &Q) {
}
case Intrinsic::experimental_constrained_fadd: {
auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
- return simplifyFAddInst(FPI->getArgOperand(0), FPI->getArgOperand(1),
- FPI->getFastMathFlags(), Q,
- FPI->getExceptionBehavior().getValue(),
- FPI->getRoundingMode().getValue());
+ return simplifyFAddInst(
+ FPI->getArgOperand(0), FPI->getArgOperand(1), FPI->getFastMathFlags(),
+ Q, FPI->getExceptionBehavior().value(), FPI->getRoundingMode().value());
}
case Intrinsic::experimental_constrained_fsub: {
auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
- return simplifyFSubInst(FPI->getArgOperand(0), FPI->getArgOperand(1),
- FPI->getFastMathFlags(), Q,
- FPI->getExceptionBehavior().getValue(),
- FPI->getRoundingMode().getValue());
+ return simplifyFSubInst(
+ FPI->getArgOperand(0), FPI->getArgOperand(1), FPI->getFastMathFlags(),
+ Q, FPI->getExceptionBehavior().value(), FPI->getRoundingMode().value());
}
case Intrinsic::experimental_constrained_fmul: {
auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
- return simplifyFMulInst(FPI->getArgOperand(0), FPI->getArgOperand(1),
- FPI->getFastMathFlags(), Q,
- FPI->getExceptionBehavior().getValue(),
- FPI->getRoundingMode().getValue());
+ return simplifyFMulInst(
+ FPI->getArgOperand(0), FPI->getArgOperand(1), FPI->getFastMathFlags(),
+ Q, FPI->getExceptionBehavior().value(), FPI->getRoundingMode().value());
}
case Intrinsic::experimental_constrained_fdiv: {
auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
- return simplifyFDivInst(FPI->getArgOperand(0), FPI->getArgOperand(1),
- FPI->getFastMathFlags(), Q,
- FPI->getExceptionBehavior().getValue(),
- FPI->getRoundingMode().getValue());
+ return simplifyFDivInst(
+ FPI->getArgOperand(0), FPI->getArgOperand(1), FPI->getFastMathFlags(),
+ Q, FPI->getExceptionBehavior().value(), FPI->getRoundingMode().value());
}
case Intrinsic::experimental_constrained_frem: {
auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
- return simplifyFRemInst(FPI->getArgOperand(0), FPI->getArgOperand(1),
- FPI->getFastMathFlags(), Q,
- FPI->getExceptionBehavior().getValue(),
- FPI->getRoundingMode().getValue());
+ return simplifyFRemInst(
+ FPI->getArgOperand(0), FPI->getArgOperand(1), FPI->getFastMathFlags(),
+ Q, FPI->getExceptionBehavior().value(), FPI->getRoundingMode().value());
}
default:
return nullptr;
diff --git a/llvm/lib/Analysis/LazyValueInfo.cpp b/llvm/lib/Analysis/LazyValueInfo.cpp
index 8a8e9e923b7c..d49b20798c82 100644
--- a/llvm/lib/Analysis/LazyValueInfo.cpp
+++ b/llvm/lib/Analysis/LazyValueInfo.cpp
@@ -921,7 +921,7 @@ Optional<ValueLatticeElement> LazyValueInfoImpl::solveBlockValueCast(
if (!LHSRes)
// More work to do before applying this transfer rule.
return None;
- const ConstantRange &LHSRange = LHSRes.getValue();
+ const ConstantRange &LHSRange = LHSRes.value();
const unsigned ResultBitWidth = CI->getType()->getIntegerBitWidth();
@@ -946,8 +946,8 @@ Optional<ValueLatticeElement> LazyValueInfoImpl::solveBlockValueBinaryOpImpl(
// More work to do before applying this transfer rule.
return None;
- const ConstantRange &LHSRange = LHSRes.getValue();
- const ConstantRange &RHSRange = RHSRes.getValue();
+ const ConstantRange &LHSRange = LHSRes.value();
+ const ConstantRange &RHSRange = RHSRes.value();
return ValueLatticeElement::getRange(OpFn(LHSRange, RHSRange));
}
diff --git a/llvm/lib/Analysis/Loads.cpp b/llvm/lib/Analysis/Loads.cpp
index bc1d82cf1480..938d950e6da7 100644
--- a/llvm/lib/Analysis/Loads.cpp
+++ b/llvm/lib/Analysis/Loads.cpp
@@ -405,7 +405,10 @@ bool llvm::isSafeToLoadUnconditionally(Value *V, Type *Ty, Align Alignment,
Instruction *ScanFrom,
const DominatorTree *DT,
const TargetLibraryInfo *TLI) {
- APInt Size(DL.getIndexTypeSizeInBits(V->getType()), DL.getTypeStoreSize(Ty));
+ TypeSize TySize = DL.getTypeStoreSize(Ty);
+ if (TySize.isScalable())
+ return false;
+ APInt Size(DL.getIndexTypeSizeInBits(V->getType()), TySize.getFixedValue());
return isSafeToLoadUnconditionally(V, Alignment, Size, DL, ScanFrom, DT, TLI);
}
diff --git a/llvm/lib/Analysis/MemoryBuiltins.cpp b/llvm/lib/Analysis/MemoryBuiltins.cpp
index 91501b04448e..f5b121c98ec4 100644
--- a/llvm/lib/Analysis/MemoryBuiltins.cpp
+++ b/llvm/lib/Analysis/MemoryBuiltins.cpp
@@ -501,10 +501,10 @@ Optional<StringRef> llvm::getAllocationFamily(const Value *I,
return None;
const auto AllocData = getAllocationDataForFunction(Callee, AnyAlloc, TLI);
if (AllocData)
- return mangledNameForMallocFamily(AllocData.getValue().Family);
+ return mangledNameForMallocFamily(AllocData.value().Family);
const auto FreeData = getFreeFunctionDataForFunction(Callee, TLIFn);
if (FreeData)
- return mangledNameForMallocFamily(FreeData.getValue().Family);
+ return mangledNameForMallocFamily(FreeData.value().Family);
return None;
}
diff --git a/llvm/lib/Analysis/MustExecute.cpp b/llvm/lib/Analysis/MustExecute.cpp
index 5cff986245b9..ad8322d7bd79 100644
--- a/llvm/lib/Analysis/MustExecute.cpp
+++ b/llvm/lib/Analysis/MustExecute.cpp
@@ -493,7 +493,7 @@ static V getOrCreateCachedOptional(K Key, DenseMap<K, Optional<V>> &Map,
Optional<V> &OptVal = Map[Key];
if (!OptVal)
OptVal = Fn(std::forward<ArgsTy>(args)...);
- return OptVal.getValue();
+ return OptVal.value();
}
const BasicBlock *
diff --git a/llvm/lib/Analysis/ProfileSummaryInfo.cpp b/llvm/lib/Analysis/ProfileSummaryInfo.cpp
index 9d5fa6d0a41b..64844f534332 100644
--- a/llvm/lib/Analysis/ProfileSummaryInfo.cpp
+++ b/llvm/lib/Analysis/ProfileSummaryInfo.cpp
@@ -279,19 +279,19 @@ ProfileSummaryInfo::computeThreshold(int PercentileCutoff) const {
}
bool ProfileSummaryInfo::hasHugeWorkingSetSize() const {
- return HasHugeWorkingSetSize && HasHugeWorkingSetSize.getValue();
+ return HasHugeWorkingSetSize && HasHugeWorkingSetSize.value();
}
bool ProfileSummaryInfo::hasLargeWorkingSetSize() const {
- return HasLargeWorkingSetSize && HasLargeWorkingSetSize.getValue();
+ return HasLargeWorkingSetSize && HasLargeWorkingSetSize.value();
}
bool ProfileSummaryInfo::isHotCount(uint64_t C) const {
- return HotCountThreshold && C >= HotCountThreshold.getValue();
+ return HotCountThreshold && C >= HotCountThreshold.value();
}
bool ProfileSummaryInfo::isColdCount(uint64_t C) const {
- return ColdCountThreshold && C <= ColdCountThreshold.getValue();
+ return ColdCountThreshold && C <= ColdCountThreshold.value();
}
template <bool isHot>
@@ -299,9 +299,9 @@ bool ProfileSummaryInfo::isHotOrColdCountNthPercentile(int PercentileCutoff,
uint64_t C) const {
auto CountThreshold = computeThreshold(PercentileCutoff);
if (isHot)
- return CountThreshold && C >= CountThreshold.getValue();
+ return CountThreshold && C >= CountThreshold.value();
else
- return CountThreshold && C <= CountThreshold.getValue();
+ return CountThreshold && C <= CountThreshold.value();
}
bool ProfileSummaryInfo::isHotCountNthPercentile(int PercentileCutoff,
diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp
index 207f4df79e45..f61806bd1dad 100644
--- a/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -2319,9 +2319,13 @@ bool ScalarEvolution::willNotOverflow(Instruction::BinaryOps BinOp, bool Signed,
return A == B;
}
-std::pair<SCEV::NoWrapFlags, bool /*Deduced*/>
+Optional<SCEV::NoWrapFlags>
ScalarEvolution::getStrengthenedNoWrapFlagsFromBinOp(
const OverflowingBinaryOperator *OBO) {
+ // It cannot be done any better.
+ if (OBO->hasNoUnsignedWrap() && OBO->hasNoSignedWrap())
+ return None;
+
SCEV::NoWrapFlags Flags = SCEV::NoWrapFlags::FlagAnyWrap;
if (OBO->hasNoUnsignedWrap())
@@ -2331,13 +2335,10 @@ ScalarEvolution::getStrengthenedNoWrapFlagsFromBinOp(
bool Deduced = false;
- if (OBO->hasNoUnsignedWrap() && OBO->hasNoSignedWrap())
- return {Flags, Deduced};
-
if (OBO->getOpcode() != Instruction::Add &&
OBO->getOpcode() != Instruction::Sub &&
OBO->getOpcode() != Instruction::Mul)
- return {Flags, Deduced};
+ return None;
const SCEV *LHS = getSCEV(OBO->getOperand(0));
const SCEV *RHS = getSCEV(OBO->getOperand(1));
@@ -2356,7 +2357,9 @@ ScalarEvolution::getStrengthenedNoWrapFlagsFromBinOp(
Deduced = true;
}
- return {Flags, Deduced};
+ if (Deduced)
+ return Flags;
+ return None;
}
// We're trying to construct a SCEV of type `Type' with `Ops' as operands and
@@ -4835,7 +4838,7 @@ public:
Optional<const SCEV *> Res =
compareWithBackedgeCondition(SI->getCondition());
if (Res) {
- bool IsOne = cast<SCEVConstant>(Res.getValue())->getValue()->isOne();
+ bool IsOne = cast<SCEVConstant>(Res.value())->getValue()->isOne();
Result = SE.getSCEV(IsOne ? SI->getTrueValue() : SI->getFalseValue());
}
break;
@@ -4843,7 +4846,7 @@ public:
default: {
Optional<const SCEV *> Res = compareWithBackedgeCondition(I);
if (Res)
- Result = Res.getValue();
+ Result = Res.value();
break;
}
}
@@ -6583,8 +6586,8 @@ ScalarEvolution::getRangeRef(const SCEV *S,
// Check if the IR explicitly contains !range metadata.
Optional<ConstantRange> MDRange = GetRangeFromMetadata(U->getValue());
if (MDRange)
- ConservativeResult = ConservativeResult.intersectWith(MDRange.getValue(),
- RangeType);
+ ConservativeResult =
+ ConservativeResult.intersectWith(MDRange.value(), RangeType);
// Use facts about recurrences in the underlying IR. Note that add
// recurrences are AddRecExprs and thus don't hit this path. This
@@ -7365,6 +7368,8 @@ ScalarEvolution::getOperandsToCreate(Value *V, SmallVectorImpl<Value *> &Ops) {
Ops.push_back(II->getArgOperand(1));
return nullptr;
case Intrinsic::start_loop_iterations:
+ case Intrinsic::annotation:
+ case Intrinsic::ptr_annotation:
Ops.push_back(II->getArgOperand(0));
return nullptr;
default:
@@ -7816,8 +7821,10 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
return getAddExpr(ClampedX, Y, SCEV::FlagNUW);
}
case Intrinsic::start_loop_iterations:
- // A start_loop_iterations is just equivalent to the first operand for
- // SCEV purposes.
+ case Intrinsic::annotation:
+ case Intrinsic::ptr_annotation:
+ // A start_loop_iterations or llvm.annotation or llvm.prt.annotation is
+ // just eqivalent to the first operand for SCEV purposes.
return getSCEV(II->getArgOperand(0));
default:
break;
@@ -9517,14 +9524,7 @@ static Constant *BuildConstantFromSCEV(const SCEV *V) {
}
return C;
}
- case scUDivExpr: {
- const SCEVUDivExpr *SU = cast<SCEVUDivExpr>(V);
- if (Constant *LHS = BuildConstantFromSCEV(SU->getLHS()))
- if (Constant *RHS = BuildConstantFromSCEV(SU->getRHS()))
- if (LHS->getType() == RHS->getType())
- return ConstantExpr::getUDiv(LHS, RHS);
- return nullptr;
- }
+ case scUDivExpr:
case scSMaxExpr:
case scUMaxExpr:
case scSMinExpr:
@@ -10632,7 +10632,7 @@ ScalarEvolution::getMonotonicPredicateType(const SCEVAddRecExpr *LHS,
getMonotonicPredicateTypeImpl(LHS, ICmpInst::getSwappedPredicate(Pred));
assert(ResultSwapped && "should be able to analyze both!");
- assert(ResultSwapped.getValue() != Result.getValue() &&
+ assert(ResultSwapped.value() != Result.value() &&
"monotonicity should flip as we flip the predicate");
}
#endif
@@ -11808,7 +11808,7 @@ bool ScalarEvolution::isImpliedViaMerge(ICmpInst::Predicate Pred,
const SCEV *L = getSCEV(LPhi->getIncomingValueForBlock(IncBB));
// Make sure L does not refer to a value from a potentially previous
// iteration of a loop.
- if (!properlyDominates(L, IncBB))
+ if (!properlyDominates(L, LBB))
return false;
if (!ProvedEasily(L, RHS))
return false;
diff --git a/llvm/lib/Analysis/TFUtils.cpp b/llvm/lib/Analysis/TFUtils.cpp
index 203858c1cf06..682fc095b0e9 100644
--- a/llvm/lib/Analysis/TFUtils.cpp
+++ b/llvm/lib/Analysis/TFUtils.cpp
@@ -18,7 +18,6 @@
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/JSON.h"
-#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/raw_ostream.h"
@@ -49,19 +48,17 @@ using TFStatusPtr = std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)>;
struct TFInitializer {
TFInitializer() {
- assert(!IsInitialized && "TFInitialized should be called only once");
int Argc = 1;
const char *Name = "";
const char **NamePtr = &Name;
TF_InitMain(Name, &Argc, const_cast<char ***>(&NamePtr));
- IsInitialized = true;
}
- bool IsInitialized = false;
};
-llvm::ManagedStatic<TFInitializer> TFLibInitializer;
-
-bool ensureInitTF() { return TFLibInitializer->IsInitialized; }
+bool ensureInitTF() {
+ static TFInitializer TFLibInitializer;
+ return true;
+}
TFGraphPtr createTFGraph() {
return TFGraphPtr(TF_NewGraph(), &TF_DeleteGraph);
diff --git a/llvm/lib/Analysis/TargetTransformInfo.cpp b/llvm/lib/Analysis/TargetTransformInfo.cpp
index 66f61961d01b..6e34a8303c08 100644
--- a/llvm/lib/Analysis/TargetTransformInfo.cpp
+++ b/llvm/lib/Analysis/TargetTransformInfo.cpp
@@ -298,7 +298,7 @@ bool TargetTransformInfo::preferPredicateOverEpilogue(
return TTIImpl->preferPredicateOverEpilogue(L, LI, SE, AC, TLI, DT, LAI);
}
-bool TargetTransformInfo::emitGetActiveLaneMask() const {
+PredicationStyle TargetTransformInfo::emitGetActiveLaneMask() const {
return TTIImpl->emitGetActiveLaneMask();
}
diff --git a/llvm/lib/Analysis/TypeBasedAliasAnalysis.cpp b/llvm/lib/Analysis/TypeBasedAliasAnalysis.cpp
index 9bcbe4a4cc1e..560f46d39d0d 100644
--- a/llvm/lib/Analysis/TypeBasedAliasAnalysis.cpp
+++ b/llvm/lib/Analysis/TypeBasedAliasAnalysis.cpp
@@ -303,24 +303,27 @@ public:
/// given offset. Update the offset to be relative to the field type.
TBAAStructTypeNode getField(uint64_t &Offset) const {
bool NewFormat = isNewFormat();
+ const ArrayRef<MDOperand> Operands(Node->op_begin(), Node->op_end());
+ const unsigned NumOperands = Operands.size();
+
if (NewFormat) {
// New-format root and scalar type nodes have no fields.
- if (Node->getNumOperands() < 6)
+ if (NumOperands < 6)
return TBAAStructTypeNode();
} else {
// Parent can be omitted for the root node.
- if (Node->getNumOperands() < 2)
+ if (NumOperands < 2)
return TBAAStructTypeNode();
// Fast path for a scalar type node and a struct type node with a single
// field.
- if (Node->getNumOperands() <= 3) {
- uint64_t Cur = Node->getNumOperands() == 2
- ? 0
- : mdconst::extract<ConstantInt>(Node->getOperand(2))
- ->getZExtValue();
+ if (NumOperands <= 3) {
+ uint64_t Cur =
+ NumOperands == 2
+ ? 0
+ : mdconst::extract<ConstantInt>(Operands[2])->getZExtValue();
Offset -= Cur;
- MDNode *P = dyn_cast_or_null<MDNode>(Node->getOperand(1));
+ MDNode *P = dyn_cast_or_null<MDNode>(Operands[1]);
if (!P)
return TBAAStructTypeNode();
return TBAAStructTypeNode(P);
@@ -332,10 +335,11 @@ public:
unsigned FirstFieldOpNo = NewFormat ? 3 : 1;
unsigned NumOpsPerField = NewFormat ? 3 : 2;
unsigned TheIdx = 0;
- for (unsigned Idx = FirstFieldOpNo; Idx < Node->getNumOperands();
+
+ for (unsigned Idx = FirstFieldOpNo; Idx < NumOperands;
Idx += NumOpsPerField) {
- uint64_t Cur = mdconst::extract<ConstantInt>(Node->getOperand(Idx + 1))
- ->getZExtValue();
+ uint64_t Cur =
+ mdconst::extract<ConstantInt>(Operands[Idx + 1])->getZExtValue();
if (Cur > Offset) {
assert(Idx >= FirstFieldOpNo + NumOpsPerField &&
"TBAAStructTypeNode::getField should have an offset match!");
@@ -345,11 +349,11 @@ public:
}
// Move along the last field.
if (TheIdx == 0)
- TheIdx = Node->getNumOperands() - NumOpsPerField;
- uint64_t Cur = mdconst::extract<ConstantInt>(Node->getOperand(TheIdx + 1))
- ->getZExtValue();
+ TheIdx = NumOperands - NumOpsPerField;
+ uint64_t Cur =
+ mdconst::extract<ConstantInt>(Operands[TheIdx + 1])->getZExtValue();
Offset -= Cur;
- MDNode *P = dyn_cast_or_null<MDNode>(Node->getOperand(TheIdx));
+ MDNode *P = dyn_cast_or_null<MDNode>(Operands[TheIdx]);
if (!P)
return TBAAStructTypeNode();
return TBAAStructTypeNode(P);
diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index 05d5e47bb8d7..add2d427e05b 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -4679,27 +4679,22 @@ bool llvm::mustSuppressSpeculation(const LoadInst &LI) {
F.hasFnAttribute(Attribute::SanitizeHWAddress);
}
-
-bool llvm::isSafeToSpeculativelyExecute(const Value *V,
+bool llvm::isSafeToSpeculativelyExecute(const Instruction *Inst,
const Instruction *CtxI,
const DominatorTree *DT,
const TargetLibraryInfo *TLI) {
- const Operator *Inst = dyn_cast<Operator>(V);
- if (!Inst)
- return false;
- return isSafeToSpeculativelyExecuteWithOpcode(Inst->getOpcode(), Inst, CtxI, DT, TLI);
+ return isSafeToSpeculativelyExecuteWithOpcode(Inst->getOpcode(), Inst, CtxI,
+ DT, TLI);
}
-bool llvm::isSafeToSpeculativelyExecuteWithOpcode(unsigned Opcode,
- const Operator *Inst,
- const Instruction *CtxI,
- const DominatorTree *DT,
- const TargetLibraryInfo *TLI) {
+bool llvm::isSafeToSpeculativelyExecuteWithOpcode(
+ unsigned Opcode, const Instruction *Inst, const Instruction *CtxI,
+ const DominatorTree *DT, const TargetLibraryInfo *TLI) {
#ifndef NDEBUG
if (Inst->getOpcode() != Opcode) {
// Check that the operands are actually compatible with the Opcode override.
auto hasEqualReturnAndLeadingOperandTypes =
- [](const Operator *Inst, unsigned NumLeadingOperands) {
+ [](const Instruction *Inst, unsigned NumLeadingOperands) {
if (Inst->getNumOperands() < NumLeadingOperands)
return false;
const Type *ExpectedType = Inst->getType();
@@ -4715,11 +4710,6 @@ bool llvm::isSafeToSpeculativelyExecuteWithOpcode(unsigned Opcode,
}
#endif
- for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i)
- if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i)))
- if (C->canTrap())
- return false;
-
switch (Opcode) {
default:
return true;
diff --git a/llvm/lib/Analysis/VectorUtils.cpp b/llvm/lib/Analysis/VectorUtils.cpp
index f863a1ffad3a..894680cda1fc 100644
--- a/llvm/lib/Analysis/VectorUtils.cpp
+++ b/llvm/lib/Analysis/VectorUtils.cpp
@@ -1502,7 +1502,7 @@ void VFABI::getVectorVariantNames(
LLVM_DEBUG(dbgs() << "VFABI: adding mapping '" << S << "'\n");
Optional<VFInfo> Info = VFABI::tryDemangleForVFABI(S, *(CI.getModule()));
assert(Info && "Invalid name for a VFABI variant.");
- assert(CI.getModule()->getFunction(Info.getValue().VectorName) &&
+ assert(CI.getModule()->getFunction(Info.value().VectorName) &&
"Vector function is missing.");
#endif
VariantMappings.push_back(std::string(S));