aboutsummaryrefslogtreecommitdiff
path: root/include/llvm/Transforms
diff options
context:
space:
mode:
Diffstat (limited to 'include/llvm/Transforms')
-rw-r--r--include/llvm/Transforms/IPO/Attributor.h1729
-rw-r--r--include/llvm/Transforms/IPO/GlobalDCE.h14
-rw-r--r--include/llvm/Transforms/IPO/HotColdSplitting.h39
-rw-r--r--include/llvm/Transforms/IPO/LowerTypeTests.h2
-rw-r--r--include/llvm/Transforms/IPO/WholeProgramDevirt.h26
-rw-r--r--include/llvm/Transforms/Instrumentation.h4
-rw-r--r--include/llvm/Transforms/Instrumentation/InstrProfiling.h5
-rw-r--r--include/llvm/Transforms/Instrumentation/MemorySanitizer.h12
-rw-r--r--include/llvm/Transforms/Instrumentation/SanitizerCoverage.h47
-rw-r--r--include/llvm/Transforms/Instrumentation/ThreadSanitizer.h2
-rw-r--r--include/llvm/Transforms/Scalar.h9
-rw-r--r--include/llvm/Transforms/Scalar/CallSiteSplitting.h5
-rw-r--r--include/llvm/Transforms/Scalar/ConstantHoisting.h10
-rw-r--r--include/llvm/Transforms/Scalar/Float2Int.h6
-rw-r--r--include/llvm/Transforms/Scalar/GVN.h7
-rw-r--r--include/llvm/Transforms/Scalar/GVNExpression.h9
-rw-r--r--include/llvm/Transforms/Scalar/LoopPassManager.h24
-rw-r--r--include/llvm/Transforms/Scalar/LoopUnrollPass.h14
-rw-r--r--include/llvm/Transforms/Scalar/LowerConstantIntrinsics.h41
-rw-r--r--include/llvm/Transforms/Scalar/MergedLoadStoreMotion.h18
-rw-r--r--include/llvm/Transforms/Scalar/Reassociate.h4
-rw-r--r--include/llvm/Transforms/Scalar/SCCP.h3
-rw-r--r--include/llvm/Transforms/Utils/BasicBlockUtils.h11
-rw-r--r--include/llvm/Transforms/Utils/BuildLibCalls.h27
-rw-r--r--include/llvm/Transforms/Utils/BypassSlowDivision.h13
-rw-r--r--include/llvm/Transforms/Utils/CodeExtractor.h57
-rw-r--r--include/llvm/Transforms/Utils/Local.h16
-rw-r--r--include/llvm/Transforms/Utils/LoopUtils.h5
-rw-r--r--include/llvm/Transforms/Utils/MisExpect.h43
-rw-r--r--include/llvm/Transforms/Utils/PredicateInfo.h10
-rw-r--r--include/llvm/Transforms/Utils/SimplifyLibCalls.h10
-rw-r--r--include/llvm/Transforms/Utils/UnrollLoop.h8
-rw-r--r--include/llvm/Transforms/Utils/ValueMapper.h9
-rw-r--r--include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h48
-rw-r--r--include/llvm/Transforms/Vectorize/LoopVectorize.h8
-rw-r--r--include/llvm/Transforms/Vectorize/SLPVectorizer.h9
36 files changed, 1911 insertions, 393 deletions
diff --git a/include/llvm/Transforms/IPO/Attributor.h b/include/llvm/Transforms/IPO/Attributor.h
index 5dbe21ac5e4e..3dbe0fcd76ea 100644
--- a/include/llvm/Transforms/IPO/Attributor.h
+++ b/include/llvm/Transforms/IPO/Attributor.h
@@ -60,13 +60,12 @@
// manifest their result in the IR for passes to come.
//
// Attribute manifestation is not mandatory. If desired, there is support to
-// generate a single LLVM-IR attribute already in the AbstractAttribute base
-// class. In the simplest case, a subclass overloads
-// `AbstractAttribute::getManifestPosition()` and
-// `AbstractAttribute::getAttrKind()` to return the appropriate values. The
-// Attributor manifestation framework will then create and place a new attribute
-// if it is allowed to do so (based on the abstract state). Other use cases can
-// be achieved by overloading other abstract attribute methods.
+// generate a single or multiple LLVM-IR attributes already in the helper struct
+// IRAttribute. In the simplest case, a subclass inherits from IRAttribute with
+// a proper Attribute::AttrKind as template parameter. The Attributor
+// manifestation framework will then create and place a new attribute if it is
+// allowed to do so (based on the abstract state). Other use cases can be
+// achieved by overloading AbstractAttribute or IRAttribute methods.
//
//
// The "mechanics" of adding a new "abstract attribute":
@@ -97,7 +96,13 @@
#ifndef LLVM_TRANSFORMS_IPO_ATTRIBUTOR_H
#define LLVM_TRANSFORMS_IPO_ATTRIBUTOR_H
-#include "llvm/Analysis/LazyCallGraph.h"
+#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/SCCIterator.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/CallGraph.h"
+#include "llvm/Analysis/MustExecute.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/PassManager.h"
@@ -105,6 +110,7 @@ namespace llvm {
struct AbstractAttribute;
struct InformationCache;
+struct AAIsDead;
class Function;
@@ -120,6 +126,563 @@ ChangeStatus operator|(ChangeStatus l, ChangeStatus r);
ChangeStatus operator&(ChangeStatus l, ChangeStatus r);
///}
+/// Helper to describe and deal with positions in the LLVM-IR.
+///
+/// A position in the IR is described by an anchor value and an "offset" that
+/// could be the argument number, for call sites and arguments, or an indicator
+/// of the "position kind". The kinds, specified in the Kind enum below, include
+/// the locations in the attribute list, i.a., function scope and return value,
+/// as well as a distinction between call sites and functions. Finally, there
+/// are floating values that do not have a corresponding attribute list
+/// position.
+struct IRPosition {
+ virtual ~IRPosition() {}
+
+ /// The positions we distinguish in the IR.
+ ///
+ /// The values are chosen such that the KindOrArgNo member has a value >= 1
+ /// if it is an argument or call site argument while a value < 1 indicates the
+ /// respective kind of that value.
+ enum Kind : int {
+ IRP_INVALID = -6, ///< An invalid position.
+ IRP_FLOAT = -5, ///< A position that is not associated with a spot suitable
+ ///< for attributes. This could be any value or instruction.
+ IRP_RETURNED = -4, ///< An attribute for the function return value.
+ IRP_CALL_SITE_RETURNED = -3, ///< An attribute for a call site return value.
+ IRP_FUNCTION = -2, ///< An attribute for a function (scope).
+ IRP_CALL_SITE = -1, ///< An attribute for a call site (function scope).
+ IRP_ARGUMENT = 0, ///< An attribute for a function argument.
+ IRP_CALL_SITE_ARGUMENT = 1, ///< An attribute for a call site argument.
+ };
+
+ /// Default constructor available to create invalid positions implicitly. All
+ /// other positions need to be created explicitly through the appropriate
+ /// static member function.
+ IRPosition() : AnchorVal(nullptr), KindOrArgNo(IRP_INVALID) { verify(); }
+
+ /// Create a position describing the value of \p V.
+ static const IRPosition value(const Value &V) {
+ if (auto *Arg = dyn_cast<Argument>(&V))
+ return IRPosition::argument(*Arg);
+ if (auto *CB = dyn_cast<CallBase>(&V))
+ return IRPosition::callsite_returned(*CB);
+ return IRPosition(const_cast<Value &>(V), IRP_FLOAT);
+ }
+
+ /// Create a position describing the function scope of \p F.
+ static const IRPosition function(const Function &F) {
+ return IRPosition(const_cast<Function &>(F), IRP_FUNCTION);
+ }
+
+ /// Create a position describing the returned value of \p F.
+ static const IRPosition returned(const Function &F) {
+ return IRPosition(const_cast<Function &>(F), IRP_RETURNED);
+ }
+
+ /// Create a position describing the argument \p Arg.
+ static const IRPosition argument(const Argument &Arg) {
+ return IRPosition(const_cast<Argument &>(Arg), Kind(Arg.getArgNo()));
+ }
+
+ /// Create a position describing the function scope of \p CB.
+ static const IRPosition callsite_function(const CallBase &CB) {
+ return IRPosition(const_cast<CallBase &>(CB), IRP_CALL_SITE);
+ }
+
+ /// Create a position describing the returned value of \p CB.
+ static const IRPosition callsite_returned(const CallBase &CB) {
+ return IRPosition(const_cast<CallBase &>(CB), IRP_CALL_SITE_RETURNED);
+ }
+
+ /// Create a position describing the argument of \p CB at position \p ArgNo.
+ static const IRPosition callsite_argument(const CallBase &CB,
+ unsigned ArgNo) {
+ return IRPosition(const_cast<CallBase &>(CB), Kind(ArgNo));
+ }
+
+ /// Create a position describing the function scope of \p ICS.
+ static const IRPosition callsite_function(ImmutableCallSite ICS) {
+ return IRPosition::callsite_function(cast<CallBase>(*ICS.getInstruction()));
+ }
+
+ /// Create a position describing the returned value of \p ICS.
+ static const IRPosition callsite_returned(ImmutableCallSite ICS) {
+ return IRPosition::callsite_returned(cast<CallBase>(*ICS.getInstruction()));
+ }
+
+ /// Create a position describing the argument of \p ICS at position \p ArgNo.
+ static const IRPosition callsite_argument(ImmutableCallSite ICS,
+ unsigned ArgNo) {
+ return IRPosition::callsite_argument(cast<CallBase>(*ICS.getInstruction()),
+ ArgNo);
+ }
+
+ /// Create a position describing the argument of \p ACS at position \p ArgNo.
+ static const IRPosition callsite_argument(AbstractCallSite ACS,
+ unsigned ArgNo) {
+ int CSArgNo = ACS.getCallArgOperandNo(ArgNo);
+ if (CSArgNo >= 0)
+ return IRPosition::callsite_argument(
+ cast<CallBase>(*ACS.getInstruction()), CSArgNo);
+ return IRPosition();
+ }
+
+ /// Create a position with function scope matching the "context" of \p IRP.
+ /// If \p IRP is a call site (see isAnyCallSitePosition()) then the result
+ /// will be a call site position, otherwise the function position of the
+ /// associated function.
+ static const IRPosition function_scope(const IRPosition &IRP) {
+ if (IRP.isAnyCallSitePosition()) {
+ return IRPosition::callsite_function(
+ cast<CallBase>(IRP.getAnchorValue()));
+ }
+ assert(IRP.getAssociatedFunction());
+ return IRPosition::function(*IRP.getAssociatedFunction());
+ }
+
+ bool operator==(const IRPosition &RHS) const {
+ return (AnchorVal == RHS.AnchorVal) && (KindOrArgNo == RHS.KindOrArgNo);
+ }
+ bool operator!=(const IRPosition &RHS) const { return !(*this == RHS); }
+
+ /// Return the value this abstract attribute is anchored with.
+ ///
+ /// The anchor value might not be the associated value if the latter is not
+ /// sufficient to determine where arguments will be manifested. This is, so
+ /// far, only the case for call site arguments as the value is not sufficient
+ /// to pinpoint them. Instead, we can use the call site as an anchor.
+ ///
+ ///{
+ Value &getAnchorValue() {
+ assert(KindOrArgNo != IRP_INVALID &&
+ "Invalid position does not have an anchor value!");
+ return *AnchorVal;
+ }
+ const Value &getAnchorValue() const {
+ return const_cast<IRPosition *>(this)->getAnchorValue();
+ }
+ ///}
+
+ /// Return the associated function, if any.
+ ///
+ ///{
+ Function *getAssociatedFunction() {
+ if (auto *CB = dyn_cast<CallBase>(AnchorVal))
+ return CB->getCalledFunction();
+ assert(KindOrArgNo != IRP_INVALID &&
+ "Invalid position does not have an anchor scope!");
+ Value &V = getAnchorValue();
+ if (isa<Function>(V))
+ return &cast<Function>(V);
+ if (isa<Argument>(V))
+ return cast<Argument>(V).getParent();
+ if (isa<Instruction>(V))
+ return cast<Instruction>(V).getFunction();
+ return nullptr;
+ }
+ const Function *getAssociatedFunction() const {
+ return const_cast<IRPosition *>(this)->getAssociatedFunction();
+ }
+ ///}
+
+ /// Return the associated argument, if any.
+ ///
+ ///{
+ Argument *getAssociatedArgument() {
+ if (auto *Arg = dyn_cast<Argument>(&getAnchorValue()))
+ return Arg;
+ int ArgNo = getArgNo();
+ if (ArgNo < 0)
+ return nullptr;
+ Function *AssociatedFn = getAssociatedFunction();
+ if (!AssociatedFn || AssociatedFn->arg_size() <= unsigned(ArgNo))
+ return nullptr;
+ return AssociatedFn->arg_begin() + ArgNo;
+ }
+ const Argument *getAssociatedArgument() const {
+ return const_cast<IRPosition *>(this)->getAssociatedArgument();
+ }
+ ///}
+
+ /// Return true if the position refers to a function interface, that is the
+ /// function scope, the function return, or an argumnt.
+ bool isFnInterfaceKind() const {
+ switch (getPositionKind()) {
+ case IRPosition::IRP_FUNCTION:
+ case IRPosition::IRP_RETURNED:
+ case IRPosition::IRP_ARGUMENT:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ /// Return the Function surrounding the anchor value.
+ ///
+ ///{
+ Function *getAnchorScope() {
+ Value &V = getAnchorValue();
+ if (isa<Function>(V))
+ return &cast<Function>(V);
+ if (isa<Argument>(V))
+ return cast<Argument>(V).getParent();
+ if (isa<Instruction>(V))
+ return cast<Instruction>(V).getFunction();
+ return nullptr;
+ }
+ const Function *getAnchorScope() const {
+ return const_cast<IRPosition *>(this)->getAnchorScope();
+ }
+ ///}
+
+ /// Return the context instruction, if any.
+ ///
+ ///{
+ Instruction *getCtxI() {
+ Value &V = getAnchorValue();
+ if (auto *I = dyn_cast<Instruction>(&V))
+ return I;
+ if (auto *Arg = dyn_cast<Argument>(&V))
+ if (!Arg->getParent()->isDeclaration())
+ return &Arg->getParent()->getEntryBlock().front();
+ if (auto *F = dyn_cast<Function>(&V))
+ if (!F->isDeclaration())
+ return &(F->getEntryBlock().front());
+ return nullptr;
+ }
+ const Instruction *getCtxI() const {
+ return const_cast<IRPosition *>(this)->getCtxI();
+ }
+ ///}
+
+ /// Return the value this abstract attribute is associated with.
+ ///
+ ///{
+ Value &getAssociatedValue() {
+ assert(KindOrArgNo != IRP_INVALID &&
+ "Invalid position does not have an associated value!");
+ if (getArgNo() < 0 || isa<Argument>(AnchorVal))
+ return *AnchorVal;
+ assert(isa<CallBase>(AnchorVal) && "Expected a call base!");
+ return *cast<CallBase>(AnchorVal)->getArgOperand(getArgNo());
+ }
+ const Value &getAssociatedValue() const {
+ return const_cast<IRPosition *>(this)->getAssociatedValue();
+ }
+ ///}
+
+ /// Return the argument number of the associated value if it is an argument or
+ /// call site argument, otherwise a negative value.
+ int getArgNo() const { return KindOrArgNo; }
+
+ /// Return the index in the attribute list for this position.
+ unsigned getAttrIdx() const {
+ switch (getPositionKind()) {
+ case IRPosition::IRP_INVALID:
+ case IRPosition::IRP_FLOAT:
+ break;
+ case IRPosition::IRP_FUNCTION:
+ case IRPosition::IRP_CALL_SITE:
+ return AttributeList::FunctionIndex;
+ case IRPosition::IRP_RETURNED:
+ case IRPosition::IRP_CALL_SITE_RETURNED:
+ return AttributeList::ReturnIndex;
+ case IRPosition::IRP_ARGUMENT:
+ case IRPosition::IRP_CALL_SITE_ARGUMENT:
+ return KindOrArgNo + AttributeList::FirstArgIndex;
+ }
+ llvm_unreachable(
+ "There is no attribute index for a floating or invalid position!");
+ }
+
+ /// Return the associated position kind.
+ Kind getPositionKind() const {
+ if (getArgNo() >= 0) {
+ assert(((isa<Argument>(getAnchorValue()) &&
+ isa<Argument>(getAssociatedValue())) ||
+ isa<CallBase>(getAnchorValue())) &&
+ "Expected argument or call base due to argument number!");
+ if (isa<CallBase>(getAnchorValue()))
+ return IRP_CALL_SITE_ARGUMENT;
+ return IRP_ARGUMENT;
+ }
+
+ assert(KindOrArgNo < 0 &&
+ "Expected (call site) arguments to never reach this point!");
+ return Kind(KindOrArgNo);
+ }
+
+ /// TODO: Figure out if the attribute related helper functions should live
+ /// here or somewhere else.
+
+ /// Return true if any kind in \p AKs existing in the IR at a position that
+ /// will affect this one. See also getAttrs(...).
+ /// \param IgnoreSubsumingPositions Flag to determine if subsuming positions,
+ /// e.g., the function position if this is an
+ /// argument position, should be ignored.
+ bool hasAttr(ArrayRef<Attribute::AttrKind> AKs,
+ bool IgnoreSubsumingPositions = false) const;
+
+ /// Return the attributes of any kind in \p AKs existing in the IR at a
+ /// position that will affect this one. While each position can only have a
+ /// single attribute of any kind in \p AKs, there are "subsuming" positions
+ /// that could have an attribute as well. This method returns all attributes
+ /// found in \p Attrs.
+ void getAttrs(ArrayRef<Attribute::AttrKind> AKs,
+ SmallVectorImpl<Attribute> &Attrs) const;
+
+ /// Return the attribute of kind \p AK existing in the IR at this position.
+ Attribute getAttr(Attribute::AttrKind AK) const {
+ if (getPositionKind() == IRP_INVALID || getPositionKind() == IRP_FLOAT)
+ return Attribute();
+
+ AttributeList AttrList;
+ if (ImmutableCallSite ICS = ImmutableCallSite(&getAnchorValue()))
+ AttrList = ICS.getAttributes();
+ else
+ AttrList = getAssociatedFunction()->getAttributes();
+
+ if (AttrList.hasAttribute(getAttrIdx(), AK))
+ return AttrList.getAttribute(getAttrIdx(), AK);
+ return Attribute();
+ }
+
+ /// Remove the attribute of kind \p AKs existing in the IR at this position.
+ void removeAttrs(ArrayRef<Attribute::AttrKind> AKs) {
+ if (getPositionKind() == IRP_INVALID || getPositionKind() == IRP_FLOAT)
+ return;
+
+ AttributeList AttrList;
+ CallSite CS = CallSite(&getAnchorValue());
+ if (CS)
+ AttrList = CS.getAttributes();
+ else
+ AttrList = getAssociatedFunction()->getAttributes();
+
+ LLVMContext &Ctx = getAnchorValue().getContext();
+ for (Attribute::AttrKind AK : AKs)
+ AttrList = AttrList.removeAttribute(Ctx, getAttrIdx(), AK);
+
+ if (CS)
+ CS.setAttributes(AttrList);
+ else
+ getAssociatedFunction()->setAttributes(AttrList);
+ }
+
+ bool isAnyCallSitePosition() const {
+ switch (getPositionKind()) {
+ case IRPosition::IRP_CALL_SITE:
+ case IRPosition::IRP_CALL_SITE_RETURNED:
+ case IRPosition::IRP_CALL_SITE_ARGUMENT:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ /// Special DenseMap key values.
+ ///
+ ///{
+ static const IRPosition EmptyKey;
+ static const IRPosition TombstoneKey;
+ ///}
+
+private:
+ /// Private constructor for special values only!
+ explicit IRPosition(int KindOrArgNo)
+ : AnchorVal(0), KindOrArgNo(KindOrArgNo) {}
+
+ /// IRPosition anchored at \p AnchorVal with kind/argument numbet \p PK.
+ explicit IRPosition(Value &AnchorVal, Kind PK)
+ : AnchorVal(&AnchorVal), KindOrArgNo(PK) {
+ verify();
+ }
+
+ /// Verify internal invariants.
+ void verify();
+
+ /// The value this position is anchored at.
+ Value *AnchorVal;
+
+ /// The argument number, if non-negative, or the position "kind".
+ int KindOrArgNo;
+};
+
+/// Helper that allows IRPosition as a key in a DenseMap.
+template <> struct DenseMapInfo<IRPosition> {
+ static inline IRPosition getEmptyKey() { return IRPosition::EmptyKey; }
+ static inline IRPosition getTombstoneKey() {
+ return IRPosition::TombstoneKey;
+ }
+ static unsigned getHashValue(const IRPosition &IRP) {
+ return (DenseMapInfo<Value *>::getHashValue(&IRP.getAnchorValue()) << 4) ^
+ (unsigned(IRP.getArgNo()));
+ }
+ static bool isEqual(const IRPosition &LHS, const IRPosition &RHS) {
+ return LHS == RHS;
+ }
+};
+
+/// A visitor class for IR positions.
+///
+/// Given a position P, the SubsumingPositionIterator allows to visit "subsuming
+/// positions" wrt. attributes/information. Thus, if a piece of information
+/// holds for a subsuming position, it also holds for the position P.
+///
+/// The subsuming positions always include the initial position and then,
+/// depending on the position kind, additionally the following ones:
+/// - for IRP_RETURNED:
+/// - the function (IRP_FUNCTION)
+/// - for IRP_ARGUMENT:
+/// - the function (IRP_FUNCTION)
+/// - for IRP_CALL_SITE:
+/// - the callee (IRP_FUNCTION), if known
+/// - for IRP_CALL_SITE_RETURNED:
+/// - the callee (IRP_RETURNED), if known
+/// - the call site (IRP_FUNCTION)
+/// - the callee (IRP_FUNCTION), if known
+/// - for IRP_CALL_SITE_ARGUMENT:
+/// - the argument of the callee (IRP_ARGUMENT), if known
+/// - the callee (IRP_FUNCTION), if known
+/// - the position the call site argument is associated with if it is not
+/// anchored to the call site, e.g., if it is an arugment then the argument
+/// (IRP_ARGUMENT)
+class SubsumingPositionIterator {
+ SmallVector<IRPosition, 4> IRPositions;
+ using iterator = decltype(IRPositions)::iterator;
+
+public:
+ SubsumingPositionIterator(const IRPosition &IRP);
+ iterator begin() { return IRPositions.begin(); }
+ iterator end() { return IRPositions.end(); }
+};
+
+/// Wrapper for FunctoinAnalysisManager.
+struct AnalysisGetter {
+ template <typename Analysis>
+ typename Analysis::Result *getAnalysis(const Function &F) {
+ if (!MAM || !F.getParent())
+ return nullptr;
+ auto &FAM = MAM->getResult<FunctionAnalysisManagerModuleProxy>(
+ const_cast<Module &>(*F.getParent()))
+ .getManager();
+ return &FAM.getResult<Analysis>(const_cast<Function &>(F));
+ }
+
+ template <typename Analysis>
+ typename Analysis::Result *getAnalysis(const Module &M) {
+ if (!MAM)
+ return nullptr;
+ return &MAM->getResult<Analysis>(const_cast<Module &>(M));
+ }
+ AnalysisGetter(ModuleAnalysisManager &MAM) : MAM(&MAM) {}
+ AnalysisGetter() {}
+
+private:
+ ModuleAnalysisManager *MAM = nullptr;
+};
+
+/// Data structure to hold cached (LLVM-IR) information.
+///
+/// All attributes are given an InformationCache object at creation time to
+/// avoid inspection of the IR by all of them individually. This default
+/// InformationCache will hold information required by 'default' attributes,
+/// thus the ones deduced when Attributor::identifyDefaultAbstractAttributes(..)
+/// is called.
+///
+/// If custom abstract attributes, registered manually through
+/// Attributor::registerAA(...), need more information, especially if it is not
+/// reusable, it is advised to inherit from the InformationCache and cast the
+/// instance down in the abstract attributes.
+struct InformationCache {
+ InformationCache(const Module &M, AnalysisGetter &AG)
+ : DL(M.getDataLayout()), Explorer(/* ExploreInterBlock */ true), AG(AG) {
+
+ CallGraph *CG = AG.getAnalysis<CallGraphAnalysis>(M);
+ if (!CG)
+ return;
+
+ DenseMap<const Function *, unsigned> SccSize;
+ for (scc_iterator<CallGraph *> I = scc_begin(CG); !I.isAtEnd(); ++I) {
+ for (CallGraphNode *Node : *I)
+ SccSize[Node->getFunction()] = I->size();
+ }
+ SccSizeOpt = std::move(SccSize);
+ }
+
+ /// A map type from opcodes to instructions with this opcode.
+ using OpcodeInstMapTy = DenseMap<unsigned, SmallVector<Instruction *, 32>>;
+
+ /// Return the map that relates "interesting" opcodes with all instructions
+ /// with that opcode in \p F.
+ OpcodeInstMapTy &getOpcodeInstMapForFunction(const Function &F) {
+ return FuncInstOpcodeMap[&F];
+ }
+
+ /// A vector type to hold instructions.
+ using InstructionVectorTy = std::vector<Instruction *>;
+
+ /// Return the instructions in \p F that may read or write memory.
+ InstructionVectorTy &getReadOrWriteInstsForFunction(const Function &F) {
+ return FuncRWInstsMap[&F];
+ }
+
+ /// Return MustBeExecutedContextExplorer
+ MustBeExecutedContextExplorer &getMustBeExecutedContextExplorer() {
+ return Explorer;
+ }
+
+ /// Return TargetLibraryInfo for function \p F.
+ TargetLibraryInfo *getTargetLibraryInfoForFunction(const Function &F) {
+ return AG.getAnalysis<TargetLibraryAnalysis>(F);
+ }
+
+ /// Return AliasAnalysis Result for function \p F.
+ AAResults *getAAResultsForFunction(const Function &F) {
+ return AG.getAnalysis<AAManager>(F);
+ }
+
+ /// Return SCC size on call graph for function \p F.
+ unsigned getSccSize(const Function &F) {
+ if (!SccSizeOpt.hasValue())
+ return 0;
+ return (SccSizeOpt.getValue())[&F];
+ }
+
+ /// Return datalayout used in the module.
+ const DataLayout &getDL() { return DL; }
+
+private:
+ /// A map type from functions to opcode to instruction maps.
+ using FuncInstOpcodeMapTy = DenseMap<const Function *, OpcodeInstMapTy>;
+
+ /// A map type from functions to their read or write instructions.
+ using FuncRWInstsMapTy = DenseMap<const Function *, InstructionVectorTy>;
+
+ /// A nested map that remembers all instructions in a function with a certain
+ /// instruction opcode (Instruction::getOpcode()).
+ FuncInstOpcodeMapTy FuncInstOpcodeMap;
+
+ /// A map from functions to their instructions that may read or write memory.
+ FuncRWInstsMapTy FuncRWInstsMap;
+
+ /// The datalayout used in the module.
+ const DataLayout &DL;
+
+ /// MustBeExecutedContextExplorer
+ MustBeExecutedContextExplorer Explorer;
+
+ /// Getters for analysis.
+ AnalysisGetter &AG;
+
+ /// Cache result for scc size in the call graph
+ Optional<DenseMap<const Function *, unsigned>> SccSizeOpt;
+
+ /// Give the Attributor access to the members so
+ /// Attributor::identifyDefaultAbstractAttributes(...) can initialize them.
+ friend struct Attributor;
+};
+
/// The fixpoint analysis framework that orchestrates the attribute deduction.
///
/// The Attributor provides a general abstract analysis framework (guided
@@ -148,6 +711,18 @@ ChangeStatus operator&(ChangeStatus l, ChangeStatus r);
/// NOTE: The mechanics of adding a new "concrete" abstract attribute are
/// described in the file comment.
struct Attributor {
+ /// Constructor
+ ///
+ /// \param InfoCache Cache to hold various information accessible for
+ /// the abstract attributes.
+ /// \param DepRecomputeInterval Number of iterations until the dependences
+ /// between abstract attributes are recomputed.
+ /// \param Whitelist If not null, a set limiting the attribute opportunities.
+ Attributor(InformationCache &InfoCache, unsigned DepRecomputeInterval,
+ DenseSet<const char *> *Whitelist = nullptr)
+ : InfoCache(InfoCache), DepRecomputeInterval(DepRecomputeInterval),
+ Whitelist(Whitelist) {}
+
~Attributor() { DeleteContainerPointers(AllAbstractAttributes); }
/// Run the analyses until a fixpoint is reached or enforced (timeout).
@@ -156,12 +731,13 @@ struct Attributor {
/// as the Attributor is not destroyed (it owns the attributes now).
///
/// \Returns CHANGED if the IR was changed, otherwise UNCHANGED.
- ChangeStatus run();
+ ChangeStatus run(Module &M);
- /// Lookup an abstract attribute of type \p AAType anchored at value \p V and
- /// argument number \p ArgNo. If no attribute is found and \p V is a call base
- /// instruction, the called function is tried as a value next. Thus, the
- /// returned abstract attribute might be anchored at the callee of \p V.
+ /// Lookup an abstract attribute of type \p AAType at position \p IRP. While
+ /// no abstract attribute is found equivalent positions are checked, see
+ /// SubsumingPositionIterator. Thus, the returned abstract attribute
+ /// might be anchored at a different position, e.g., the callee if \p IRP is a
+ /// call base.
///
/// This method is the only (supported) way an abstract attribute can retrieve
/// information from another abstract attribute. As an example, take an
@@ -170,51 +746,29 @@ struct Attributor {
/// most optimistic information for other abstract attributes in-flight, e.g.
/// the one reasoning about the "captured" state for the argument or the one
/// reasoning on the memory access behavior of the function as a whole.
+ ///
+ /// If the flag \p TrackDependence is set to false the dependence from
+ /// \p QueryingAA to the return abstract attribute is not automatically
+ /// recorded. This should only be used if the caller will record the
+ /// dependence explicitly if necessary, thus if it the returned abstract
+ /// attribute is used for reasoning. To record the dependences explicitly use
+ /// the `Attributor::recordDependence` method.
template <typename AAType>
- const AAType *getAAFor(AbstractAttribute &QueryingAA, const Value &V,
- int ArgNo = -1) {
- static_assert(std::is_base_of<AbstractAttribute, AAType>::value,
- "Cannot query an attribute with a type not derived from "
- "'AbstractAttribute'!");
- assert(AAType::ID != Attribute::None &&
- "Cannot lookup generic abstract attributes!");
-
- // Determine the argument number automatically for llvm::Arguments if none
- // is set. Do not override a given one as it could be a use of the argument
- // in a call site.
- if (ArgNo == -1)
- if (auto *Arg = dyn_cast<Argument>(&V))
- ArgNo = Arg->getArgNo();
-
- // If a function was given together with an argument number, perform the
- // lookup for the actual argument instead. Don't do it for variadic
- // arguments.
- if (ArgNo >= 0 && isa<Function>(&V) &&
- cast<Function>(&V)->arg_size() > (size_t)ArgNo)
- return getAAFor<AAType>(
- QueryingAA, *(cast<Function>(&V)->arg_begin() + ArgNo), ArgNo);
-
- // Lookup the abstract attribute of type AAType. If found, return it after
- // registering a dependence of QueryingAA on the one returned attribute.
- const auto &KindToAbstractAttributeMap = AAMap.lookup({&V, ArgNo});
- if (AAType *AA = static_cast<AAType *>(
- KindToAbstractAttributeMap.lookup(AAType::ID))) {
- // Do not return an attribute with an invalid state. This minimizes checks
- // at the calls sites and allows the fallback below to kick in.
- if (AA->getState().isValidState()) {
- QueryMap[AA].insert(&QueryingAA);
- return AA;
- }
- }
-
- // If no abstract attribute was found and we look for a call site argument,
- // defer to the actual argument instead.
- ImmutableCallSite ICS(&V);
- if (ICS && ICS.getCalledValue())
- return getAAFor<AAType>(QueryingAA, *ICS.getCalledValue(), ArgNo);
+ const AAType &getAAFor(const AbstractAttribute &QueryingAA,
+ const IRPosition &IRP, bool TrackDependence = true) {
+ return getOrCreateAAFor<AAType>(IRP, &QueryingAA, TrackDependence);
+ }
- // No matching attribute found
- return nullptr;
+ /// Explicitly record a dependence from \p FromAA to \p ToAA, that is if
+ /// \p FromAA changes \p ToAA should be updated as well.
+ ///
+ /// This method should be used in conjunction with the `getAAFor` method and
+ /// with the TrackDependence flag passed to the method set to false. This can
+ /// be beneficial to avoid false dependences but it requires the users of
+ /// `getAAFor` to explicitly record true dependences through this method.
+ void recordDependence(const AbstractAttribute &FromAA,
+ const AbstractAttribute &ToAA) {
+ QueryMap[&FromAA].insert(const_cast<AbstractAttribute *>(&ToAA));
}
/// Introduce a new abstract attribute into the fixpoint analysis.
@@ -222,126 +776,242 @@ struct Attributor {
/// Note that ownership of the attribute is given to the Attributor. It will
/// invoke delete for the Attributor on destruction of the Attributor.
///
- /// Attributes are identified by
- /// (1) their anchored value (see AA.getAnchoredValue()),
- /// (2) their argument number (\p ArgNo, or Argument::getArgNo()), and
- /// (3) their default attribute kind (see AAType::ID).
- template <typename AAType> AAType &registerAA(AAType &AA, int ArgNo = -1) {
+ /// Attributes are identified by their IR position (AAType::getIRPosition())
+ /// and the address of their static member (see AAType::ID).
+ template <typename AAType> AAType &registerAA(AAType &AA) {
static_assert(std::is_base_of<AbstractAttribute, AAType>::value,
"Cannot register an attribute with a type not derived from "
"'AbstractAttribute'!");
-
- // Determine the anchor value and the argument number which are used to
- // lookup the attribute together with AAType::ID. If passed an argument,
- // use its argument number but do not override a given one as it could be a
- // use of the argument at a call site.
- Value &AnchoredVal = AA.getAnchoredValue();
- if (ArgNo == -1)
- if (auto *Arg = dyn_cast<Argument>(&AnchoredVal))
- ArgNo = Arg->getArgNo();
-
// Put the attribute in the lookup map structure and the container we use to
// keep track of all attributes.
- AAMap[{&AnchoredVal, ArgNo}][AAType::ID] = &AA;
+ IRPosition &IRP = AA.getIRPosition();
+ auto &KindToAbstractAttributeMap = AAMap[IRP];
+ assert(!KindToAbstractAttributeMap.count(&AAType::ID) &&
+ "Attribute already in map!");
+ KindToAbstractAttributeMap[&AAType::ID] = &AA;
AllAbstractAttributes.push_back(&AA);
return AA;
}
+ /// Return the internal information cache.
+ InformationCache &getInfoCache() { return InfoCache; }
+
/// Determine opportunities to derive 'default' attributes in \p F and create
/// abstract attribute objects for them.
///
/// \param F The function that is checked for attribute opportunities.
- /// \param InfoCache A cache for information queryable by the new attributes.
- /// \param Whitelist If not null, a set limiting the attribute opportunities.
///
/// Note that abstract attribute instances are generally created even if the
/// IR already contains the information they would deduce. The most important
/// reason for this is the single interface, the one of the abstract attribute
/// instance, which can be queried without the need to look at the IR in
/// various places.
- void identifyDefaultAbstractAttributes(
- Function &F, InformationCache &InfoCache,
- DenseSet</* Attribute::AttrKind */ unsigned> *Whitelist = nullptr);
+ void identifyDefaultAbstractAttributes(Function &F);
+
+ /// Initialize the information cache for queries regarding function \p F.
+ ///
+ /// This method needs to be called for all function that might be looked at
+ /// through the information cache interface *prior* to looking at them.
+ void initializeInformationCache(Function &F);
+
+ /// Mark the internal function \p F as live.
+ ///
+ /// This will trigger the identification and initialization of attributes for
+ /// \p F.
+ void markLiveInternalFunction(const Function &F) {
+ assert(F.hasLocalLinkage() &&
+ "Only local linkage is assumed dead initially.");
+
+ identifyDefaultAbstractAttributes(const_cast<Function &>(F));
+ }
+
+ /// Record that \p I is deleted after information was manifested.
+ void deleteAfterManifest(Instruction &I) { ToBeDeletedInsts.insert(&I); }
+
+ /// Record that \p BB is deleted after information was manifested.
+ void deleteAfterManifest(BasicBlock &BB) { ToBeDeletedBlocks.insert(&BB); }
+
+ /// Record that \p F is deleted after information was manifested.
+ void deleteAfterManifest(Function &F) { ToBeDeletedFunctions.insert(&F); }
+
+ /// Return true if \p AA (or its context instruction) is assumed dead.
+ ///
+ /// If \p LivenessAA is not provided it is queried.
+ bool isAssumedDead(const AbstractAttribute &AA, const AAIsDead *LivenessAA);
/// Check \p Pred on all function call sites.
///
/// This method will evaluate \p Pred on call sites and return
/// true if \p Pred holds in every call sites. However, this is only possible
/// all call sites are known, hence the function has internal linkage.
- bool checkForAllCallSites(Function &F, std::function<bool(CallSite)> &Pred,
+ bool checkForAllCallSites(const function_ref<bool(AbstractCallSite)> &Pred,
+ const AbstractAttribute &QueryingAA,
bool RequireAllCallSites);
+ /// Check \p Pred on all values potentially returned by \p F.
+ ///
+ /// This method will evaluate \p Pred on all values potentially returned by
+ /// the function associated with \p QueryingAA. The returned values are
+ /// matched with their respective return instructions. Returns true if \p Pred
+ /// holds on all of them.
+ bool checkForAllReturnedValuesAndReturnInsts(
+ const function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)>
+ &Pred,
+ const AbstractAttribute &QueryingAA);
+
+ /// Check \p Pred on all values potentially returned by the function
+ /// associated with \p QueryingAA.
+ ///
+ /// This is the context insensitive version of the method above.
+ bool checkForAllReturnedValues(const function_ref<bool(Value &)> &Pred,
+ const AbstractAttribute &QueryingAA);
+
+ /// Check \p Pred on all instructions with an opcode present in \p Opcodes.
+ ///
+ /// This method will evaluate \p Pred on all instructions with an opcode
+ /// present in \p Opcode and return true if \p Pred holds on all of them.
+ bool checkForAllInstructions(const function_ref<bool(Instruction &)> &Pred,
+ const AbstractAttribute &QueryingAA,
+ const ArrayRef<unsigned> &Opcodes);
+
+ /// Check \p Pred on all call-like instructions (=CallBased derived).
+ ///
+ /// See checkForAllCallLikeInstructions(...) for more information.
+ bool
+ checkForAllCallLikeInstructions(const function_ref<bool(Instruction &)> &Pred,
+ const AbstractAttribute &QueryingAA) {
+ return checkForAllInstructions(Pred, QueryingAA,
+ {(unsigned)Instruction::Invoke,
+ (unsigned)Instruction::CallBr,
+ (unsigned)Instruction::Call});
+ }
+
+ /// Check \p Pred on all Read/Write instructions.
+ ///
+ /// This method will evaluate \p Pred on all instructions that read or write
+ /// to memory present in the information cache and return true if \p Pred
+ /// holds on all of them.
+ bool checkForAllReadWriteInstructions(
+ const llvm::function_ref<bool(Instruction &)> &Pred,
+ AbstractAttribute &QueryingAA);
+
+ /// Return the data layout associated with the anchor scope.
+ const DataLayout &getDataLayout() const { return InfoCache.DL; }
+
private:
+ /// Check \p Pred on all call sites of \p Fn.
+ ///
+ /// This method will evaluate \p Pred on call sites and return
+ /// true if \p Pred holds in every call sites. However, this is only possible
+ /// all call sites are known, hence the function has internal linkage.
+ bool checkForAllCallSites(const function_ref<bool(AbstractCallSite)> &Pred,
+ const Function &Fn, bool RequireAllCallSites,
+ const AbstractAttribute *QueryingAA);
+
+ /// The private version of getAAFor that allows to omit a querying abstract
+ /// attribute. See also the public getAAFor method.
+ template <typename AAType>
+ const AAType &getOrCreateAAFor(const IRPosition &IRP,
+ const AbstractAttribute *QueryingAA = nullptr,
+ bool TrackDependence = false) {
+ if (const AAType *AAPtr =
+ lookupAAFor<AAType>(IRP, QueryingAA, TrackDependence))
+ return *AAPtr;
+
+ // No matching attribute found, create one.
+ // Use the static create method.
+ auto &AA = AAType::createForPosition(IRP, *this);
+ registerAA(AA);
+
+ // For now we ignore naked and optnone functions.
+ bool Invalidate = Whitelist && !Whitelist->count(&AAType::ID);
+ if (const Function *Fn = IRP.getAnchorScope())
+ Invalidate |= Fn->hasFnAttribute(Attribute::Naked) ||
+ Fn->hasFnAttribute(Attribute::OptimizeNone);
+
+ // Bootstrap the new attribute with an initial update to propagate
+ // information, e.g., function -> call site. If it is not on a given
+ // whitelist we will not perform updates at all.
+ if (Invalidate) {
+ AA.getState().indicatePessimisticFixpoint();
+ return AA;
+ }
+
+ AA.initialize(*this);
+ AA.update(*this);
+
+ if (TrackDependence && AA.getState().isValidState())
+ QueryMap[&AA].insert(const_cast<AbstractAttribute *>(QueryingAA));
+ return AA;
+ }
+
+ /// Return the attribute of \p AAType for \p IRP if existing.
+ template <typename AAType>
+ const AAType *lookupAAFor(const IRPosition &IRP,
+ const AbstractAttribute *QueryingAA = nullptr,
+ bool TrackDependence = false) {
+ static_assert(std::is_base_of<AbstractAttribute, AAType>::value,
+ "Cannot query an attribute with a type not derived from "
+ "'AbstractAttribute'!");
+ assert((QueryingAA || !TrackDependence) &&
+ "Cannot track dependences without a QueryingAA!");
+
+ // Lookup the abstract attribute of type AAType. If found, return it after
+ // registering a dependence of QueryingAA on the one returned attribute.
+ const auto &KindToAbstractAttributeMap = AAMap.lookup(IRP);
+ if (AAType *AA = static_cast<AAType *>(
+ KindToAbstractAttributeMap.lookup(&AAType::ID))) {
+ // Do not register a dependence on an attribute with an invalid state.
+ if (TrackDependence && AA->getState().isValidState())
+ QueryMap[AA].insert(const_cast<AbstractAttribute *>(QueryingAA));
+ return AA;
+ }
+ return nullptr;
+ }
+
/// The set of all abstract attributes.
///{
using AAVector = SmallVector<AbstractAttribute *, 64>;
AAVector AllAbstractAttributes;
///}
- /// A nested map to lookup abstract attributes based on the anchored value and
- /// an argument positions (or -1) on the outer level, and attribute kinds
- /// (Attribute::AttrKind) on the inner level.
+ /// A nested map to lookup abstract attributes based on the argument position
+ /// on the outer level, and the addresses of the static member (AAType::ID) on
+ /// the inner level.
///{
- using KindToAbstractAttributeMap = DenseMap<unsigned, AbstractAttribute *>;
- DenseMap<std::pair<const Value *, int>, KindToAbstractAttributeMap> AAMap;
+ using KindToAbstractAttributeMap =
+ DenseMap<const char *, AbstractAttribute *>;
+ DenseMap<IRPosition, KindToAbstractAttributeMap> AAMap;
///}
/// A map from abstract attributes to the ones that queried them through calls
/// to the getAAFor<...>(...) method.
///{
using QueryMapTy =
- DenseMap<AbstractAttribute *, SetVector<AbstractAttribute *>>;
+ MapVector<const AbstractAttribute *, SetVector<AbstractAttribute *>>;
QueryMapTy QueryMap;
///}
-};
-
-/// Data structure to hold cached (LLVM-IR) information.
-///
-/// All attributes are given an InformationCache object at creation time to
-/// avoid inspection of the IR by all of them individually. This default
-/// InformationCache will hold information required by 'default' attributes,
-/// thus the ones deduced when Attributor::identifyDefaultAbstractAttributes(..)
-/// is called.
-///
-/// If custom abstract attributes, registered manually through
-/// Attributor::registerAA(...), need more information, especially if it is not
-/// reusable, it is advised to inherit from the InformationCache and cast the
-/// instance down in the abstract attributes.
-struct InformationCache {
- /// A map type from opcodes to instructions with this opcode.
- using OpcodeInstMapTy = DenseMap<unsigned, SmallVector<Instruction *, 32>>;
-
- /// Return the map that relates "interesting" opcodes with all instructions
- /// with that opcode in \p F.
- OpcodeInstMapTy &getOpcodeInstMapForFunction(Function &F) {
- return FuncInstOpcodeMap[&F];
- }
- /// A vector type to hold instructions.
- using InstructionVectorTy = std::vector<Instruction *>;
-
- /// Return the instructions in \p F that may read or write memory.
- InstructionVectorTy &getReadOrWriteInstsForFunction(Function &F) {
- return FuncRWInstsMap[&F];
- }
-
-private:
- /// A map type from functions to opcode to instruction maps.
- using FuncInstOpcodeMapTy = DenseMap<Function *, OpcodeInstMapTy>;
+ /// The information cache that holds pre-processed (LLVM-IR) information.
+ InformationCache &InfoCache;
- /// A map type from functions to their read or write instructions.
- using FuncRWInstsMapTy = DenseMap<Function *, InstructionVectorTy>;
+ /// Number of iterations until the dependences between abstract attributes are
+ /// recomputed.
+ const unsigned DepRecomputeInterval;
- /// A nested map that remembers all instructions in a function with a certain
- /// instruction opcode (Instruction::getOpcode()).
- FuncInstOpcodeMapTy FuncInstOpcodeMap;
+ /// If not null, a set limiting the attribute opportunities.
+ const DenseSet<const char *> *Whitelist;
- /// A map from functions to their instructions that may read or write memory.
- FuncRWInstsMapTy FuncRWInstsMap;
+ /// A set to remember the functions we already assume to be live and visited.
+ DenseSet<const Function *> VisitedFunctions;
- /// Give the Attributor access to the members so
- /// Attributor::identifyDefaultAbstractAttributes(...) can initialize them.
- friend struct Attributor;
+ /// Functions, blocks, and instructions we delete after manifest is done.
+ ///
+ ///{
+ SmallPtrSet<Function *, 8> ToBeDeletedFunctions;
+ SmallPtrSet<BasicBlock *, 8> ToBeDeletedBlocks;
+ SmallPtrSet<Instruction *, 8> ToBeDeletedInsts;
+ ///}
};
/// An interface to query the internal state of an abstract attribute.
@@ -375,13 +1045,17 @@ struct AbstractState {
///
/// This will usually make the optimistically assumed state the known to be
/// true state.
- virtual void indicateOptimisticFixpoint() = 0;
+ ///
+ /// \returns ChangeStatus::UNCHANGED as the assumed value should not change.
+ virtual ChangeStatus indicateOptimisticFixpoint() = 0;
/// Indicate that the abstract state should converge to the pessimistic state.
///
/// This will usually revert the optimistically assumed state to the known to
/// be true state.
- virtual void indicatePessimisticFixpoint() = 0;
+ ///
+ /// \returns ChangeStatus::CHANGED as the assumed value may change.
+ virtual ChangeStatus indicatePessimisticFixpoint() = 0;
};
/// Simple state with integers encoding.
@@ -412,10 +1086,16 @@ struct IntegerState : public AbstractState {
bool isAtFixpoint() const override { return Assumed == Known; }
/// See AbstractState::indicateOptimisticFixpoint(...)
- void indicateOptimisticFixpoint() override { Known = Assumed; }
+ ChangeStatus indicateOptimisticFixpoint() override {
+ Known = Assumed;
+ return ChangeStatus::UNCHANGED;
+ }
/// See AbstractState::indicatePessimisticFixpoint(...)
- void indicatePessimisticFixpoint() override { Assumed = Known; }
+ ChangeStatus indicatePessimisticFixpoint() override {
+ Assumed = Known;
+ return ChangeStatus::CHANGED;
+ }
/// Return the known state encoding
base_t getKnown() const { return Known; }
@@ -448,6 +1128,12 @@ struct IntegerState : public AbstractState {
return *this;
}
+ /// Remove the bits in \p BitsEncoding from the "known bits".
+ IntegerState &removeKnownBits(base_t BitsEncoding) {
+ Known = (Known & ~BitsEncoding);
+ return *this;
+ }
+
/// Keep only "assumed bits" also set in \p BitsEncoding but all known ones.
IntegerState &intersectAssumedBits(base_t BitsEncoding) {
// Make sure we never loose any "known bits".
@@ -455,6 +1141,62 @@ struct IntegerState : public AbstractState {
return *this;
}
+ /// Take minimum of assumed and \p Value.
+ IntegerState &takeAssumedMinimum(base_t Value) {
+ // Make sure we never loose "known value".
+ Assumed = std::max(std::min(Assumed, Value), Known);
+ return *this;
+ }
+
+ /// Take maximum of known and \p Value.
+ IntegerState &takeKnownMaximum(base_t Value) {
+ // Make sure we never loose "known value".
+ Assumed = std::max(Value, Assumed);
+ Known = std::max(Value, Known);
+ return *this;
+ }
+
+ /// Equality for IntegerState.
+ bool operator==(const IntegerState &R) const {
+ return this->getAssumed() == R.getAssumed() &&
+ this->getKnown() == R.getKnown();
+ }
+
+ /// Inequality for IntegerState.
+ bool operator!=(const IntegerState &R) const { return !(*this == R); }
+
+ /// "Clamp" this state with \p R. The result is the minimum of the assumed
+ /// information but not less than what was known before.
+ ///
+ /// TODO: Consider replacing the operator with a call or using it only when
+ /// we can also take the maximum of the known information, thus when
+ /// \p R is not dependent on additional assumed state.
+ IntegerState operator^=(const IntegerState &R) {
+ takeAssumedMinimum(R.Assumed);
+ return *this;
+ }
+
+ /// "Clamp" this state with \p R. The result is the maximum of the known
+ /// information but not more than what was assumed before.
+ IntegerState operator+=(const IntegerState &R) {
+ takeKnownMaximum(R.Known);
+ return *this;
+ }
+
+ /// Make this the minimum, known and assumed, of this state and \p R.
+ IntegerState operator&=(const IntegerState &R) {
+ Known = std::min(Known, R.Known);
+ Assumed = std::min(Assumed, R.Assumed);
+ return *this;
+ }
+
+ /// Make this the maximum, known and assumed, of this state and \p R.
+ IntegerState operator|=(const IntegerState &R) {
+ Known = std::max(Known, R.Known);
+ Assumed = std::max(Assumed, R.Assumed);
+ return *this;
+ }
+
private:
/// The known state encoding in an integer of type base_t.
base_t Known = getWorstState();
@@ -468,6 +1210,77 @@ struct BooleanState : public IntegerState {
BooleanState() : IntegerState(1){};
};
+/// Helper struct necessary as the modular build fails if the virtual method
+/// IRAttribute::manifest is defined in the Attributor.cpp.
+struct IRAttributeManifest {
+ static ChangeStatus manifestAttrs(Attributor &A, IRPosition &IRP,
+ const ArrayRef<Attribute> &DeducedAttrs);
+};
+
+/// Helper to tie a abstract state implementation to an abstract attribute.
+template <typename StateTy, typename Base>
+struct StateWrapper : public StateTy, public Base {
+ /// Provide static access to the type of the state.
+ using StateType = StateTy;
+
+ /// See AbstractAttribute::getState(...).
+ StateType &getState() override { return *this; }
+
+ /// See AbstractAttribute::getState(...).
+ const AbstractState &getState() const override { return *this; }
+};
+
+/// Helper class that provides common functionality to manifest IR attributes.
+template <Attribute::AttrKind AK, typename Base>
+struct IRAttribute : public IRPosition, public Base {
+ IRAttribute(const IRPosition &IRP) : IRPosition(IRP) {}
+ ~IRAttribute() {}
+
+ /// See AbstractAttribute::initialize(...).
+ virtual void initialize(Attributor &A) override {
+ if (hasAttr(getAttrKind())) {
+ this->getState().indicateOptimisticFixpoint();
+ return;
+ }
+
+ const IRPosition &IRP = this->getIRPosition();
+ bool IsFnInterface = IRP.isFnInterfaceKind();
+ const Function *FnScope = IRP.getAnchorScope();
+ // TODO: Not all attributes require an exact definition. Find a way to
+ // enable deduction for some but not all attributes in case the
+ // definition might be changed at runtime, see also
+ // http://lists.llvm.org/pipermail/llvm-dev/2018-February/121275.html.
+ // TODO: We could always determine abstract attributes and if sufficient
+ // information was found we could duplicate the functions that do not
+ // have an exact definition.
+ if (IsFnInterface && (!FnScope || !FnScope->hasExactDefinition()))
+ this->getState().indicatePessimisticFixpoint();
+ }
+
+ /// See AbstractAttribute::manifest(...).
+ ChangeStatus manifest(Attributor &A) override {
+ SmallVector<Attribute, 4> DeducedAttrs;
+ getDeducedAttributes(getAnchorValue().getContext(), DeducedAttrs);
+ return IRAttributeManifest::manifestAttrs(A, getIRPosition(), DeducedAttrs);
+ }
+
+ /// Return the kind that identifies the abstract attribute implementation.
+ Attribute::AttrKind getAttrKind() const { return AK; }
+
+ /// Return the deduced attributes in \p Attrs.
+ virtual void getDeducedAttributes(LLVMContext &Ctx,
+ SmallVectorImpl<Attribute> &Attrs) const {
+ Attrs.emplace_back(Attribute::get(Ctx, getAttrKind()));
+ }
+
+ /// Return an IR position, see struct IRPosition.
+ ///
+ ///{
+ IRPosition &getIRPosition() override { return *this; }
+ const IRPosition &getIRPosition() const override { return *this; }
+ ///}
+};
+
/// Base struct for all "concrete attribute" deductions.
///
/// The abstract attribute is a minimal interface that allows the Attributor to
@@ -512,29 +1325,7 @@ struct BooleanState : public IntegerState {
/// NOTE: The mechanics of adding a new "concrete" abstract attribute are
/// described in the file comment.
struct AbstractAttribute {
-
- /// The positions attributes can be manifested in.
- enum ManifestPosition {
- MP_ARGUMENT, ///< An attribute for a function argument.
- MP_CALL_SITE_ARGUMENT, ///< An attribute for a call site argument.
- MP_FUNCTION, ///< An attribute for a function as a whole.
- MP_RETURNED, ///< An attribute for the function return value.
- };
-
- /// An abstract attribute associated with \p AssociatedVal and anchored at
- /// \p AnchoredVal.
- ///
- /// \param AssociatedVal The value this abstract attribute is associated with.
- /// \param AnchoredVal The value this abstract attributes is anchored at.
- /// \param InfoCache Cached information accessible to the abstract attribute.
- AbstractAttribute(Value *AssociatedVal, Value &AnchoredVal,
- InformationCache &InfoCache)
- : AssociatedVal(AssociatedVal), AnchoredVal(AnchoredVal),
- InfoCache(InfoCache) {}
-
- /// An abstract attribute associated with and anchored at \p V.
- AbstractAttribute(Value &V, InformationCache &InfoCache)
- : AbstractAttribute(&V, V, InfoCache) {}
+ using StateType = AbstractState;
/// Virtual destructor.
virtual ~AbstractAttribute() {}
@@ -550,47 +1341,11 @@ struct AbstractAttribute {
virtual void initialize(Attributor &A) {}
/// Return the internal abstract state for inspection.
- virtual const AbstractState &getState() const = 0;
-
- /// Return the value this abstract attribute is anchored with.
- ///
- /// The anchored value might not be the associated value if the latter is not
- /// sufficient to determine where arguments will be manifested. This is mostly
- /// the case for call site arguments as the value is not sufficient to
- /// pinpoint them. Instead, we can use the call site as an anchor.
- ///
- ///{
- Value &getAnchoredValue() { return AnchoredVal; }
- const Value &getAnchoredValue() const { return AnchoredVal; }
- ///}
-
- /// Return the llvm::Function surrounding the anchored value.
- ///
- ///{
- Function &getAnchorScope();
- const Function &getAnchorScope() const;
- ///}
-
- /// Return the value this abstract attribute is associated with.
- ///
- /// The abstract state usually represents this value.
- ///
- ///{
- virtual Value *getAssociatedValue() { return AssociatedVal; }
- virtual const Value *getAssociatedValue() const { return AssociatedVal; }
- ///}
-
- /// Return the position this abstract state is manifested in.
- virtual ManifestPosition getManifestPosition() const = 0;
-
- /// Return the kind that identifies the abstract attribute implementation.
- virtual Attribute::AttrKind getAttrKind() const = 0;
+ virtual StateType &getState() = 0;
+ virtual const StateType &getState() const = 0;
- /// Return the deduced attributes in \p Attrs.
- virtual void getDeducedAttributes(SmallVectorImpl<Attribute> &Attrs) const {
- LLVMContext &Ctx = AnchoredVal.getContext();
- Attrs.emplace_back(Attribute::get(Ctx, getAttrKind()));
- }
+ /// Return an IR position, see struct IRPosition.
+ virtual const IRPosition &getIRPosition() const = 0;
/// Helper functions, for debug purposes only.
///{
@@ -617,10 +1372,19 @@ protected:
/// represented by the abstract attribute in the LLVM-IR.
///
/// \Return CHANGED if the IR was altered, otherwise UNCHANGED.
- virtual ChangeStatus manifest(Attributor &A);
+ virtual ChangeStatus manifest(Attributor &A) {
+ return ChangeStatus::UNCHANGED;
+ }
- /// Return the internal abstract state for careful modification.
- virtual AbstractState &getState() = 0;
+ /// Hook to enable custom statistic tracking, called after manifest that
+ /// resulted in a change if statistics are enabled.
+ ///
+ /// We require subclasses to provide an implementation so we remember to
+ /// add statistics for them.
+ virtual void trackStatistics() const = 0;
+
+ /// Return an IR position, see struct IRPosition.
+ virtual IRPosition &getIRPosition() = 0;
/// The actual update/transfer function which has to be implemented by the
/// derived classes.
@@ -630,15 +1394,6 @@ protected:
///
/// \Return CHANGED if the internal state changed, otherwise UNCHANGED.
virtual ChangeStatus updateImpl(Attributor &A) = 0;
-
- /// The value this abstract attribute is associated with.
- Value *AssociatedVal;
-
- /// The value this abstract attribute is anchored at.
- Value &AnchoredVal;
-
- /// The information cache accessible to this abstract attribute.
- InformationCache &InfoCache;
};
/// Forward declarations of output streams for debug purposes.
@@ -646,8 +1401,10 @@ protected:
///{
raw_ostream &operator<<(raw_ostream &OS, const AbstractAttribute &AA);
raw_ostream &operator<<(raw_ostream &OS, ChangeStatus S);
-raw_ostream &operator<<(raw_ostream &OS, AbstractAttribute::ManifestPosition);
+raw_ostream &operator<<(raw_ostream &OS, IRPosition::Kind);
+raw_ostream &operator<<(raw_ostream &OS, const IRPosition &);
raw_ostream &operator<<(raw_ostream &OS, const AbstractState &State);
+raw_ostream &operator<<(raw_ostream &OS, const IntegerState &S);
///}
struct AttributorPass : public PassInfoMixin<AttributorPass> {
@@ -661,129 +1418,531 @@ Pass *createAttributorLegacyPass();
/// ----------------------------------------------------------------------------
/// An abstract attribute for the returned values of a function.
-struct AAReturnedValues : public AbstractAttribute {
- /// See AbstractAttribute::AbstractAttribute(...).
- AAReturnedValues(Function &F, InformationCache &InfoCache)
- : AbstractAttribute(F, InfoCache) {}
+struct AAReturnedValues
+ : public IRAttribute<Attribute::Returned, AbstractAttribute> {
+ AAReturnedValues(const IRPosition &IRP) : IRAttribute(IRP) {}
+
+ /// Return an assumed unique return value if a single candidate is found. If
+ /// there cannot be one, return a nullptr. If it is not clear yet, return the
+ /// Optional::NoneType.
+ Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
/// Check \p Pred on all returned values.
///
/// This method will evaluate \p Pred on returned values and return
/// true if (1) all returned values are known, and (2) \p Pred returned true
/// for all returned values.
- virtual bool
- checkForallReturnedValues(std::function<bool(Value &)> &Pred) const = 0;
-
- /// See AbstractAttribute::getAttrKind()
- Attribute::AttrKind getAttrKind() const override { return ID; }
-
- /// The identifier used by the Attributor for this class of attributes.
- static constexpr Attribute::AttrKind ID = Attribute::Returned;
+ ///
+ /// Note: Unlike the Attributor::checkForAllReturnedValuesAndReturnInsts
+ /// method, this one will not filter dead return instructions.
+ virtual bool checkForAllReturnedValuesAndReturnInsts(
+ const function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)>
+ &Pred) const = 0;
+
+ using iterator =
+ MapVector<Value *, SmallSetVector<ReturnInst *, 4>>::iterator;
+ using const_iterator =
+ MapVector<Value *, SmallSetVector<ReturnInst *, 4>>::const_iterator;
+ virtual llvm::iterator_range<iterator> returned_values() = 0;
+ virtual llvm::iterator_range<const_iterator> returned_values() const = 0;
+
+ virtual size_t getNumReturnValues() const = 0;
+ virtual const SmallSetVector<CallBase *, 4> &getUnresolvedCalls() const = 0;
+
+ /// Create an abstract attribute view for the position \p IRP.
+ static AAReturnedValues &createForPosition(const IRPosition &IRP,
+ Attributor &A);
+
+ /// Unique ID (due to the unique address)
+ static const char ID;
};
-struct AANoUnwind : public AbstractAttribute {
- /// An abstract interface for all nosync attributes.
- AANoUnwind(Value &V, InformationCache &InfoCache)
- : AbstractAttribute(V, InfoCache) {}
-
- /// See AbstractAttribute::getAttrKind()/
- Attribute::AttrKind getAttrKind() const override { return ID; }
-
- static constexpr Attribute::AttrKind ID = Attribute::NoUnwind;
+struct AANoUnwind
+ : public IRAttribute<Attribute::NoUnwind,
+ StateWrapper<BooleanState, AbstractAttribute>> {
+ AANoUnwind(const IRPosition &IRP) : IRAttribute(IRP) {}
/// Returns true if nounwind is assumed.
- virtual bool isAssumedNoUnwind() const = 0;
+ bool isAssumedNoUnwind() const { return getAssumed(); }
/// Returns true if nounwind is known.
- virtual bool isKnownNoUnwind() const = 0;
-};
+ bool isKnownNoUnwind() const { return getKnown(); }
-struct AANoSync : public AbstractAttribute {
- /// An abstract interface for all nosync attributes.
- AANoSync(Value &V, InformationCache &InfoCache)
- : AbstractAttribute(V, InfoCache) {}
+ /// Create an abstract attribute view for the position \p IRP.
+ static AANoUnwind &createForPosition(const IRPosition &IRP, Attributor &A);
- /// See AbstractAttribute::getAttrKind().
- Attribute::AttrKind getAttrKind() const override { return ID; }
+ /// Unique ID (due to the unique address)
+ static const char ID;
+};
- static constexpr Attribute::AttrKind ID =
- Attribute::AttrKind(Attribute::NoSync);
+struct AANoSync
+ : public IRAttribute<Attribute::NoSync,
+ StateWrapper<BooleanState, AbstractAttribute>> {
+ AANoSync(const IRPosition &IRP) : IRAttribute(IRP) {}
/// Returns true if "nosync" is assumed.
- virtual bool isAssumedNoSync() const = 0;
+ bool isAssumedNoSync() const { return getAssumed(); }
/// Returns true if "nosync" is known.
- virtual bool isKnownNoSync() const = 0;
-};
+ bool isKnownNoSync() const { return getKnown(); }
-/// An abstract interface for all nonnull attributes.
-struct AANonNull : public AbstractAttribute {
+ /// Create an abstract attribute view for the position \p IRP.
+ static AANoSync &createForPosition(const IRPosition &IRP, Attributor &A);
- /// See AbstractAttribute::AbstractAttribute(...).
- AANonNull(Value &V, InformationCache &InfoCache)
- : AbstractAttribute(V, InfoCache) {}
+ /// Unique ID (due to the unique address)
+ static const char ID;
+};
- /// See AbstractAttribute::AbstractAttribute(...).
- AANonNull(Value *AssociatedVal, Value &AnchoredValue,
- InformationCache &InfoCache)
- : AbstractAttribute(AssociatedVal, AnchoredValue, InfoCache) {}
+/// An abstract interface for all nonnull attributes.
+struct AANonNull
+ : public IRAttribute<Attribute::NonNull,
+ StateWrapper<BooleanState, AbstractAttribute>> {
+ AANonNull(const IRPosition &IRP) : IRAttribute(IRP) {}
/// Return true if we assume that the underlying value is nonnull.
- virtual bool isAssumedNonNull() const = 0;
+ bool isAssumedNonNull() const { return getAssumed(); }
/// Return true if we know that underlying value is nonnull.
- virtual bool isKnownNonNull() const = 0;
+ bool isKnownNonNull() const { return getKnown(); }
- /// See AbastractState::getAttrKind().
- Attribute::AttrKind getAttrKind() const override { return ID; }
+ /// Create an abstract attribute view for the position \p IRP.
+ static AANonNull &createForPosition(const IRPosition &IRP, Attributor &A);
- /// The identifier used by the Attributor for this class of attributes.
- static constexpr Attribute::AttrKind ID = Attribute::NonNull;
+ /// Unique ID (due to the unique address)
+ static const char ID;
};
/// An abstract attribute for norecurse.
-struct AANoRecurse : public AbstractAttribute {
+struct AANoRecurse
+ : public IRAttribute<Attribute::NoRecurse,
+ StateWrapper<BooleanState, AbstractAttribute>> {
+ AANoRecurse(const IRPosition &IRP) : IRAttribute(IRP) {}
- /// See AbstractAttribute::AbstractAttribute(...).
- AANoRecurse(Value &V, InformationCache &InfoCache)
- : AbstractAttribute(V, InfoCache) {}
-
- /// See AbstractAttribute::getAttrKind()
- virtual Attribute::AttrKind getAttrKind() const override {
- return Attribute::NoRecurse;
- }
+ /// Return true if "norecurse" is assumed.
+ bool isAssumedNoRecurse() const { return getAssumed(); }
/// Return true if "norecurse" is known.
- virtual bool isKnownNoRecurse() const = 0;
+ bool isKnownNoRecurse() const { return getKnown(); }
- /// Return true if "norecurse" is assumed.
- virtual bool isAssumedNoRecurse() const = 0;
+ /// Create an abstract attribute view for the position \p IRP.
+ static AANoRecurse &createForPosition(const IRPosition &IRP, Attributor &A);
- /// The identifier used by the Attributor for this class of attributes.
- static constexpr Attribute::AttrKind ID = Attribute::NoRecurse;
+ /// Unique ID (due to the unique address)
+ static const char ID;
};
/// An abstract attribute for willreturn.
-struct AAWillReturn : public AbstractAttribute {
+struct AAWillReturn
+ : public IRAttribute<Attribute::WillReturn,
+ StateWrapper<BooleanState, AbstractAttribute>> {
+ AAWillReturn(const IRPosition &IRP) : IRAttribute(IRP) {}
+
+ /// Return true if "willreturn" is assumed.
+ bool isAssumedWillReturn() const { return getAssumed(); }
- /// See AbstractAttribute::AbstractAttribute(...).
- AAWillReturn(Value &V, InformationCache &InfoCache)
- : AbstractAttribute(V, InfoCache) {}
+ /// Return true if "willreturn" is known.
+ bool isKnownWillReturn() const { return getKnown(); }
+
+ /// Create an abstract attribute view for the position \p IRP.
+ static AAWillReturn &createForPosition(const IRPosition &IRP, Attributor &A);
+
+ /// Unique ID (due to the unique address)
+ static const char ID;
+};
- /// See AbstractAttribute::getAttrKind()
- virtual Attribute::AttrKind getAttrKind() const override {
- return Attribute::WillReturn;
+/// An abstract interface for all noalias attributes.
+struct AANoAlias
+ : public IRAttribute<Attribute::NoAlias,
+ StateWrapper<BooleanState, AbstractAttribute>> {
+ AANoAlias(const IRPosition &IRP) : IRAttribute(IRP) {}
+
+ /// Return true if we assume that the underlying value is alias.
+ bool isAssumedNoAlias() const { return getAssumed(); }
+
+ /// Return true if we know that underlying value is noalias.
+ bool isKnownNoAlias() const { return getKnown(); }
+
+ /// Create an abstract attribute view for the position \p IRP.
+ static AANoAlias &createForPosition(const IRPosition &IRP, Attributor &A);
+
+ /// Unique ID (due to the unique address)
+ static const char ID;
+};
+
+/// An AbstractAttribute for nofree.
+struct AANoFree
+ : public IRAttribute<Attribute::NoFree,
+ StateWrapper<BooleanState, AbstractAttribute>> {
+ AANoFree(const IRPosition &IRP) : IRAttribute(IRP) {}
+
+ /// Return true if "nofree" is assumed.
+ bool isAssumedNoFree() const { return getAssumed(); }
+
+ /// Return true if "nofree" is known.
+ bool isKnownNoFree() const { return getKnown(); }
+
+ /// Create an abstract attribute view for the position \p IRP.
+ static AANoFree &createForPosition(const IRPosition &IRP, Attributor &A);
+
+ /// Unique ID (due to the unique address)
+ static const char ID;
+};
+
+/// An AbstractAttribute for noreturn.
+struct AANoReturn
+ : public IRAttribute<Attribute::NoReturn,
+ StateWrapper<BooleanState, AbstractAttribute>> {
+ AANoReturn(const IRPosition &IRP) : IRAttribute(IRP) {}
+
+ /// Return true if the underlying object is assumed to never return.
+ bool isAssumedNoReturn() const { return getAssumed(); }
+
+ /// Return true if the underlying object is known to never return.
+ bool isKnownNoReturn() const { return getKnown(); }
+
+ /// Create an abstract attribute view for the position \p IRP.
+ static AANoReturn &createForPosition(const IRPosition &IRP, Attributor &A);
+
+ /// Unique ID (due to the unique address)
+ static const char ID;
+};
+
+/// An abstract interface for liveness abstract attribute.
+struct AAIsDead : public StateWrapper<BooleanState, AbstractAttribute>,
+ public IRPosition {
+ AAIsDead(const IRPosition &IRP) : IRPosition(IRP) {}
+
+ /// Returns true if \p BB is assumed dead.
+ virtual bool isAssumedDead(const BasicBlock *BB) const = 0;
+
+ /// Returns true if \p BB is known dead.
+ virtual bool isKnownDead(const BasicBlock *BB) const = 0;
+
+ /// Returns true if \p I is assumed dead.
+ virtual bool isAssumedDead(const Instruction *I) const = 0;
+
+ /// Returns true if \p I is known dead.
+ virtual bool isKnownDead(const Instruction *I) const = 0;
+
+ /// This method is used to check if at least one instruction in a collection
+ /// of instructions is live.
+ template <typename T> bool isLiveInstSet(T begin, T end) const {
+ for (const auto &I : llvm::make_range(begin, end)) {
+ assert(I->getFunction() == getIRPosition().getAssociatedFunction() &&
+ "Instruction must be in the same anchor scope function.");
+
+ if (!isAssumedDead(I))
+ return true;
+ }
+
+ return false;
}
- /// Return true if "willreturn" is known.
- virtual bool isKnownWillReturn() const = 0;
+ /// Return an IR position, see struct IRPosition.
+ ///
+ ///{
+ IRPosition &getIRPosition() override { return *this; }
+ const IRPosition &getIRPosition() const override { return *this; }
+ ///}
- /// Return true if "willreturn" is assumed.
- virtual bool isAssumedWillReturn() const = 0;
+ /// Create an abstract attribute view for the position \p IRP.
+ static AAIsDead &createForPosition(const IRPosition &IRP, Attributor &A);
+
+ /// Unique ID (due to the unique address)
+ static const char ID;
+};
+
+/// State for dereferenceable attribute
+struct DerefState : AbstractState {
+
+ /// State representing for dereferenceable bytes.
+ IntegerState DerefBytesState;
+
+ /// State representing that whether the value is globaly dereferenceable.
+ BooleanState GlobalState;
+
+ /// See AbstractState::isValidState()
+ bool isValidState() const override { return DerefBytesState.isValidState(); }
+
+ /// See AbstractState::isAtFixpoint()
+ bool isAtFixpoint() const override {
+ return !isValidState() ||
+ (DerefBytesState.isAtFixpoint() && GlobalState.isAtFixpoint());
+ }
+
+ /// See AbstractState::indicateOptimisticFixpoint(...)
+ ChangeStatus indicateOptimisticFixpoint() override {
+ DerefBytesState.indicateOptimisticFixpoint();
+ GlobalState.indicateOptimisticFixpoint();
+ return ChangeStatus::UNCHANGED;
+ }
+
+ /// See AbstractState::indicatePessimisticFixpoint(...)
+ ChangeStatus indicatePessimisticFixpoint() override {
+ DerefBytesState.indicatePessimisticFixpoint();
+ GlobalState.indicatePessimisticFixpoint();
+ return ChangeStatus::CHANGED;
+ }
+
+ /// Update known dereferenceable bytes.
+ void takeKnownDerefBytesMaximum(uint64_t Bytes) {
+ DerefBytesState.takeKnownMaximum(Bytes);
+ }
+
+ /// Update assumed dereferenceable bytes.
+ void takeAssumedDerefBytesMinimum(uint64_t Bytes) {
+ DerefBytesState.takeAssumedMinimum(Bytes);
+ }
+
+ /// Equality for DerefState.
+ bool operator==(const DerefState &R) {
+ return this->DerefBytesState == R.DerefBytesState &&
+ this->GlobalState == R.GlobalState;
+ }
+
+ /// Inequality for IntegerState.
+ bool operator!=(const DerefState &R) { return !(*this == R); }
+
+ /// See IntegerState::operator^=
+ DerefState operator^=(const DerefState &R) {
+ DerefBytesState ^= R.DerefBytesState;
+ GlobalState ^= R.GlobalState;
+ return *this;
+ }
+
+ /// See IntegerState::operator+=
+ DerefState operator+=(const DerefState &R) {
+ DerefBytesState += R.DerefBytesState;
+ GlobalState += R.GlobalState;
+ return *this;
+ }
+
+ /// See IntegerState::operator&=
+ DerefState operator&=(const DerefState &R) {
+ DerefBytesState &= R.DerefBytesState;
+ GlobalState &= R.GlobalState;
+ return *this;
+ }
+
+ /// See IntegerState::operator|=
+ DerefState operator|=(const DerefState &R) {
+ DerefBytesState |= R.DerefBytesState;
+ GlobalState |= R.GlobalState;
+ return *this;
+ }
- /// The identifier used by the Attributor for this class of attributes.
- static constexpr Attribute::AttrKind ID = Attribute::WillReturn;
+protected:
+ const AANonNull *NonNullAA = nullptr;
+};
+
+/// An abstract interface for all dereferenceable attribute.
+struct AADereferenceable
+ : public IRAttribute<Attribute::Dereferenceable,
+ StateWrapper<DerefState, AbstractAttribute>> {
+ AADereferenceable(const IRPosition &IRP) : IRAttribute(IRP) {}
+
+ /// Return true if we assume that the underlying value is nonnull.
+ bool isAssumedNonNull() const {
+ return NonNullAA && NonNullAA->isAssumedNonNull();
+ }
+
+ /// Return true if we know that the underlying value is nonnull.
+ bool isKnownNonNull() const {
+ return NonNullAA && NonNullAA->isKnownNonNull();
+ }
+
+ /// Return true if we assume that underlying value is
+ /// dereferenceable(_or_null) globally.
+ bool isAssumedGlobal() const { return GlobalState.getAssumed(); }
+
+ /// Return true if we know that underlying value is
+ /// dereferenceable(_or_null) globally.
+ bool isKnownGlobal() const { return GlobalState.getKnown(); }
+
+ /// Return assumed dereferenceable bytes.
+ uint32_t getAssumedDereferenceableBytes() const {
+ return DerefBytesState.getAssumed();
+ }
+
+ /// Return known dereferenceable bytes.
+ uint32_t getKnownDereferenceableBytes() const {
+ return DerefBytesState.getKnown();
+ }
+
+ /// Create an abstract attribute view for the position \p IRP.
+ static AADereferenceable &createForPosition(const IRPosition &IRP,
+ Attributor &A);
+
+ /// Unique ID (due to the unique address)
+ static const char ID;
+};
+
+/// An abstract interface for all align attributes.
+struct AAAlign
+ : public IRAttribute<Attribute::Alignment,
+ StateWrapper<IntegerState, AbstractAttribute>> {
+ AAAlign(const IRPosition &IRP) : IRAttribute(IRP) {}
+
+ /// Return assumed alignment.
+ unsigned getAssumedAlign() const { return getAssumed(); }
+
+ /// Return known alignemnt.
+ unsigned getKnownAlign() const { return getKnown(); }
+
+ /// Create an abstract attribute view for the position \p IRP.
+ static AAAlign &createForPosition(const IRPosition &IRP, Attributor &A);
+
+ /// Unique ID (due to the unique address)
+ static const char ID;
+};
+
+/// An abstract interface for all nocapture attributes.
+struct AANoCapture
+ : public IRAttribute<Attribute::NoCapture,
+ StateWrapper<IntegerState, AbstractAttribute>> {
+ AANoCapture(const IRPosition &IRP) : IRAttribute(IRP) {}
+
+ /// State encoding bits. A set bit in the state means the property holds.
+ /// NO_CAPTURE is the best possible state, 0 the worst possible state.
+ enum {
+ NOT_CAPTURED_IN_MEM = 1 << 0,
+ NOT_CAPTURED_IN_INT = 1 << 1,
+ NOT_CAPTURED_IN_RET = 1 << 2,
+
+ /// If we do not capture the value in memory or through integers we can only
+ /// communicate it back as a derived pointer.
+ NO_CAPTURE_MAYBE_RETURNED = NOT_CAPTURED_IN_MEM | NOT_CAPTURED_IN_INT,
+
+ /// If we do not capture the value in memory, through integers, or as a
+ /// derived pointer we know it is not captured.
+ NO_CAPTURE =
+ NOT_CAPTURED_IN_MEM | NOT_CAPTURED_IN_INT | NOT_CAPTURED_IN_RET,
+ };
+
+ /// Return true if we know that the underlying value is not captured in its
+ /// respective scope.
+ bool isKnownNoCapture() const { return isKnown(NO_CAPTURE); }
+
+ /// Return true if we assume that the underlying value is not captured in its
+ /// respective scope.
+ bool isAssumedNoCapture() const { return isAssumed(NO_CAPTURE); }
+
+ /// Return true if we know that the underlying value is not captured in its
+ /// respective scope but we allow it to escape through a "return".
+ bool isKnownNoCaptureMaybeReturned() const {
+ return isKnown(NO_CAPTURE_MAYBE_RETURNED);
+ }
+
+ /// Return true if we assume that the underlying value is not captured in its
+ /// respective scope but we allow it to escape through a "return".
+ bool isAssumedNoCaptureMaybeReturned() const {
+ return isAssumed(NO_CAPTURE_MAYBE_RETURNED);
+ }
+
+ /// Create an abstract attribute view for the position \p IRP.
+ static AANoCapture &createForPosition(const IRPosition &IRP, Attributor &A);
+
+ /// Unique ID (due to the unique address)
+ static const char ID;
};
+
+/// An abstract interface for value simplify abstract attribute.
+struct AAValueSimplify : public StateWrapper<BooleanState, AbstractAttribute>,
+ public IRPosition {
+ AAValueSimplify(const IRPosition &IRP) : IRPosition(IRP) {}
+
+ /// Return an IR position, see struct IRPosition.
+ ///
+ ///{
+ IRPosition &getIRPosition() { return *this; }
+ const IRPosition &getIRPosition() const { return *this; }
+ ///}
+
+ /// Return an assumed simplified value if a single candidate is found. If
+ /// there cannot be one, return original value. If it is not clear yet, return
+ /// the Optional::NoneType.
+ virtual Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const = 0;
+
+ /// Create an abstract attribute view for the position \p IRP.
+ static AAValueSimplify &createForPosition(const IRPosition &IRP,
+ Attributor &A);
+
+ /// Unique ID (due to the unique address)
+ static const char ID;
+};
+
+struct AAHeapToStack : public StateWrapper<BooleanState, AbstractAttribute>,
+ public IRPosition {
+ AAHeapToStack(const IRPosition &IRP) : IRPosition(IRP) {}
+
+ /// Returns true if HeapToStack conversion is assumed to be possible.
+ bool isAssumedHeapToStack() const { return getAssumed(); }
+
+ /// Returns true if HeapToStack conversion is known to be possible.
+ bool isKnownHeapToStack() const { return getKnown(); }
+
+ /// Return an IR position, see struct IRPosition.
+ ///
+ ///{
+ IRPosition &getIRPosition() { return *this; }
+ const IRPosition &getIRPosition() const { return *this; }
+ ///}
+
+ /// Create an abstract attribute view for the position \p IRP.
+ static AAHeapToStack &createForPosition(const IRPosition &IRP, Attributor &A);
+
+ /// Unique ID (due to the unique address)
+ static const char ID;
+};
+
+/// An abstract interface for all memory related attributes.
+struct AAMemoryBehavior
+ : public IRAttribute<Attribute::ReadNone,
+ StateWrapper<IntegerState, AbstractAttribute>> {
+ AAMemoryBehavior(const IRPosition &IRP) : IRAttribute(IRP) {}
+
+ /// State encoding bits. A set bit in the state means the property holds.
+ /// BEST_STATE is the best possible state, 0 the worst possible state.
+ enum {
+ NO_READS = 1 << 0,
+ NO_WRITES = 1 << 1,
+ NO_ACCESSES = NO_READS | NO_WRITES,
+
+ BEST_STATE = NO_ACCESSES,
+ };
+
+ /// Return true if we know that the underlying value is not read or accessed
+ /// in its respective scope.
+ bool isKnownReadNone() const { return isKnown(NO_ACCESSES); }
+
+ /// Return true if we assume that the underlying value is not read or accessed
+ /// in its respective scope.
+ bool isAssumedReadNone() const { return isAssumed(NO_ACCESSES); }
+
+ /// Return true if we know that the underlying value is not accessed
+ /// (=written) in its respective scope.
+ bool isKnownReadOnly() const { return isKnown(NO_WRITES); }
+
+ /// Return true if we assume that the underlying value is not accessed
+ /// (=written) in its respective scope.
+ bool isAssumedReadOnly() const { return isAssumed(NO_WRITES); }
+
+ /// Return true if we know that the underlying value is not read in its
+ /// respective scope.
+ bool isKnownWriteOnly() const { return isKnown(NO_READS); }
+
+ /// Return true if we assume that the underlying value is not read in its
+ /// respective scope.
+ bool isAssumedWriteOnly() const { return isAssumed(NO_READS); }
+
+ /// Create an abstract attribute view for the position \p IRP.
+ static AAMemoryBehavior &createForPosition(const IRPosition &IRP,
+ Attributor &A);
+
+ /// Unique ID (due to the unique address)
+ static const char ID;
+};
+
} // end namespace llvm
#endif // LLVM_TRANSFORMS_IPO_FUNCTIONATTRS_H
diff --git a/include/llvm/Transforms/IPO/GlobalDCE.h b/include/llvm/Transforms/IPO/GlobalDCE.h
index c434484d1ae3..0a6851849e7e 100644
--- a/include/llvm/Transforms/IPO/GlobalDCE.h
+++ b/include/llvm/Transforms/IPO/GlobalDCE.h
@@ -43,11 +43,25 @@ private:
/// Comdat -> Globals in that Comdat section.
std::unordered_multimap<Comdat *, GlobalValue *> ComdatMembers;
+ /// !type metadata -> set of (vtable, offset) pairs
+ DenseMap<Metadata *, SmallSet<std::pair<GlobalVariable *, uint64_t>, 4>>
+ TypeIdMap;
+
+ // Global variables which are vtables, and which we have enough information
+ // about to safely do dead virtual function elimination.
+ SmallPtrSet<GlobalValue *, 32> VFESafeVTables;
+
void UpdateGVDependencies(GlobalValue &GV);
void MarkLive(GlobalValue &GV,
SmallVectorImpl<GlobalValue *> *Updates = nullptr);
bool RemoveUnusedGlobalValue(GlobalValue &GV);
+ // Dead virtual function elimination.
+ void AddVirtualFunctionDependencies(Module &M);
+ void ScanVTables(Module &M);
+ void ScanTypeCheckedLoadIntrinsics(Module &M);
+ void ScanVTableLoad(Function *Caller, Metadata *TypeId, uint64_t CallOffset);
+
void ComputeDependencies(Value *V, SmallPtrSetImpl<GlobalValue *> &U);
};
diff --git a/include/llvm/Transforms/IPO/HotColdSplitting.h b/include/llvm/Transforms/IPO/HotColdSplitting.h
index 73668844590d..8c3049fbaac4 100644
--- a/include/llvm/Transforms/IPO/HotColdSplitting.h
+++ b/include/llvm/Transforms/IPO/HotColdSplitting.h
@@ -17,6 +17,45 @@
namespace llvm {
class Module;
+class ProfileSummaryInfo;
+class BlockFrequencyInfo;
+class TargetTransformInfo;
+class OptimizationRemarkEmitter;
+class AssumptionCache;
+class DominatorTree;
+class CodeExtractorAnalysisCache;
+
+/// A sequence of basic blocks.
+///
+/// A 0-sized SmallVector is slightly cheaper to move than a std::vector.
+using BlockSequence = SmallVector<BasicBlock *, 0>;
+
+class HotColdSplitting {
+public:
+ HotColdSplitting(ProfileSummaryInfo *ProfSI,
+ function_ref<BlockFrequencyInfo *(Function &)> GBFI,
+ function_ref<TargetTransformInfo &(Function &)> GTTI,
+ std::function<OptimizationRemarkEmitter &(Function &)> *GORE,
+ function_ref<AssumptionCache *(Function &)> LAC)
+ : PSI(ProfSI), GetBFI(GBFI), GetTTI(GTTI), GetORE(GORE), LookupAC(LAC) {}
+ bool run(Module &M);
+
+private:
+ bool isFunctionCold(const Function &F) const;
+ bool shouldOutlineFrom(const Function &F) const;
+ bool outlineColdRegions(Function &F, bool HasProfileSummary);
+ Function *extractColdRegion(const BlockSequence &Region,
+ const CodeExtractorAnalysisCache &CEAC,
+ DominatorTree &DT, BlockFrequencyInfo *BFI,
+ TargetTransformInfo &TTI,
+ OptimizationRemarkEmitter &ORE,
+ AssumptionCache *AC, unsigned Count);
+ ProfileSummaryInfo *PSI;
+ function_ref<BlockFrequencyInfo *(Function &)> GetBFI;
+ function_ref<TargetTransformInfo &(Function &)> GetTTI;
+ std::function<OptimizationRemarkEmitter &(Function &)> *GetORE;
+ function_ref<AssumptionCache *(Function &)> LookupAC;
+};
/// Pass to outline cold regions.
class HotColdSplittingPass : public PassInfoMixin<HotColdSplittingPass> {
diff --git a/include/llvm/Transforms/IPO/LowerTypeTests.h b/include/llvm/Transforms/IPO/LowerTypeTests.h
index 39b23f5957db..3c2bb65b9552 100644
--- a/include/llvm/Transforms/IPO/LowerTypeTests.h
+++ b/include/llvm/Transforms/IPO/LowerTypeTests.h
@@ -193,6 +193,8 @@ struct ByteArrayBuilder {
uint64_t &AllocByteOffset, uint8_t &AllocMask);
};
+bool isJumpTableCanonical(Function *F);
+
} // end namespace lowertypetests
class LowerTypeTestsPass : public PassInfoMixin<LowerTypeTestsPass> {
diff --git a/include/llvm/Transforms/IPO/WholeProgramDevirt.h b/include/llvm/Transforms/IPO/WholeProgramDevirt.h
index 509fcc867060..22435e4ed1e5 100644
--- a/include/llvm/Transforms/IPO/WholeProgramDevirt.h
+++ b/include/llvm/Transforms/IPO/WholeProgramDevirt.h
@@ -16,8 +16,10 @@
#include "llvm/IR/Module.h"
#include "llvm/IR/PassManager.h"
+#include "llvm/Transforms/IPO/FunctionImport.h"
#include <cassert>
#include <cstdint>
+#include <set>
#include <utility>
#include <vector>
@@ -28,6 +30,7 @@ template <typename T> class MutableArrayRef;
class Function;
class GlobalVariable;
class ModuleSummaryIndex;
+struct ValueInfo;
namespace wholeprogramdevirt {
@@ -228,6 +231,29 @@ struct WholeProgramDevirtPass : public PassInfoMixin<WholeProgramDevirtPass> {
PreservedAnalyses run(Module &M, ModuleAnalysisManager &);
};
+struct VTableSlotSummary {
+ StringRef TypeID;
+ uint64_t ByteOffset;
+};
+
+/// Perform index-based whole program devirtualization on the \p Summary
+/// index. Any devirtualized targets used by a type test in another module
+/// are added to the \p ExportedGUIDs set. For any local devirtualized targets
+/// only used within the defining module, the information necessary for
+/// locating the corresponding WPD resolution is recorded for the ValueInfo
+/// in case it is exported by cross module importing (in which case the
+/// devirtualized target name will need adjustment).
+void runWholeProgramDevirtOnIndex(
+ ModuleSummaryIndex &Summary, std::set<GlobalValue::GUID> &ExportedGUIDs,
+ std::map<ValueInfo, std::vector<VTableSlotSummary>> &LocalWPDTargetsMap);
+
+/// Call after cross-module importing to update the recorded single impl
+/// devirt target names for any locals that were exported.
+void updateIndexWPDForExports(
+ ModuleSummaryIndex &Summary,
+ function_ref<bool(StringRef, GlobalValue::GUID)> isExported,
+ std::map<ValueInfo, std::vector<VTableSlotSummary>> &LocalWPDTargetsMap);
+
} // end namespace llvm
#endif // LLVM_TRANSFORMS_IPO_WHOLEPROGRAMDEVIRT_H
diff --git a/include/llvm/Transforms/Instrumentation.h b/include/llvm/Transforms/Instrumentation.h
index 8b70d2926ae9..fcad1e11895f 100644
--- a/include/llvm/Transforms/Instrumentation.h
+++ b/include/llvm/Transforms/Instrumentation.h
@@ -181,10 +181,6 @@ struct SanitizerCoverageOptions {
SanitizerCoverageOptions() = default;
};
-// Insert SanitizerCoverage instrumentation.
-ModulePass *createSanitizerCoverageModulePass(
- const SanitizerCoverageOptions &Options = SanitizerCoverageOptions());
-
/// Calculate what to divide by to scale counts.
///
/// Given the maximum count, calculate a divisor that will scale all the
diff --git a/include/llvm/Transforms/Instrumentation/InstrProfiling.h b/include/llvm/Transforms/Instrumentation/InstrProfiling.h
index 8f76d4a1ce55..2e0fae527b15 100644
--- a/include/llvm/Transforms/Instrumentation/InstrProfiling.h
+++ b/include/llvm/Transforms/Instrumentation/InstrProfiling.h
@@ -39,13 +39,14 @@ public:
: Options(Options), IsCS(IsCS) {}
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
- bool run(Module &M, const TargetLibraryInfo &TLI);
+ bool run(Module &M,
+ std::function<const TargetLibraryInfo &(Function &F)> GetTLI);
private:
InstrProfOptions Options;
Module *M;
Triple TT;
- const TargetLibraryInfo *TLI;
+ std::function<const TargetLibraryInfo &(Function &F)> GetTLI;
struct PerFunctionProfileData {
uint32_t NumValueSites[IPVK_Last + 1];
GlobalVariable *RegionCounters = nullptr;
diff --git a/include/llvm/Transforms/Instrumentation/MemorySanitizer.h b/include/llvm/Transforms/Instrumentation/MemorySanitizer.h
index 0739d9e58a61..01a86ee3f1fd 100644
--- a/include/llvm/Transforms/Instrumentation/MemorySanitizer.h
+++ b/include/llvm/Transforms/Instrumentation/MemorySanitizer.h
@@ -19,12 +19,11 @@
namespace llvm {
struct MemorySanitizerOptions {
- MemorySanitizerOptions() = default;
- MemorySanitizerOptions(int TrackOrigins, bool Recover, bool Kernel)
- : TrackOrigins(TrackOrigins), Recover(Recover), Kernel(Kernel) {}
- int TrackOrigins = 0;
- bool Recover = false;
- bool Kernel = false;
+ MemorySanitizerOptions() : MemorySanitizerOptions(0, false, false){};
+ MemorySanitizerOptions(int TrackOrigins, bool Recover, bool Kernel);
+ bool Kernel;
+ int TrackOrigins;
+ bool Recover;
};
// Insert MemorySanitizer instrumentation (detection of uninitialized reads)
@@ -41,6 +40,7 @@ struct MemorySanitizerPass : public PassInfoMixin<MemorySanitizerPass> {
MemorySanitizerPass(MemorySanitizerOptions Options) : Options(Options) {}
PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM);
+ PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
private:
MemorySanitizerOptions Options;
diff --git a/include/llvm/Transforms/Instrumentation/SanitizerCoverage.h b/include/llvm/Transforms/Instrumentation/SanitizerCoverage.h
new file mode 100644
index 000000000000..85a43ff86f2e
--- /dev/null
+++ b/include/llvm/Transforms/Instrumentation/SanitizerCoverage.h
@@ -0,0 +1,47 @@
+//===--------- Definition of the SanitizerCoverage class --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the SanitizerCoverage class which is a port of the legacy
+// SanitizerCoverage pass to use the new PassManager infrastructure.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_INSTRUMENTATION_SANITIZERCOVERAGE_H
+#define LLVM_TRANSFORMS_INSTRUMENTATION_SANITIZERCOVERAGE_H
+
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Transforms/Instrumentation.h"
+
+namespace llvm {
+
+/// This is the ModuleSanitizerCoverage pass used in the new pass manager. The
+/// pass instruments functions for coverage, adds initialization calls to the
+/// module for trace PC guards and 8bit counters if they are requested, and
+/// appends globals to llvm.compiler.used.
+class ModuleSanitizerCoveragePass
+ : public PassInfoMixin<ModuleSanitizerCoveragePass> {
+public:
+ explicit ModuleSanitizerCoveragePass(
+ SanitizerCoverageOptions Options = SanitizerCoverageOptions())
+ : Options(Options) {}
+ PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+
+private:
+ SanitizerCoverageOptions Options;
+};
+
+// Insert SanitizerCoverage instrumentation.
+ModulePass *createModuleSanitizerCoverageLegacyPassPass(
+ const SanitizerCoverageOptions &Options = SanitizerCoverageOptions());
+
+} // namespace llvm
+
+#endif
diff --git a/include/llvm/Transforms/Instrumentation/ThreadSanitizer.h b/include/llvm/Transforms/Instrumentation/ThreadSanitizer.h
index b4e7d9924ff6..ce0e46745abb 100644
--- a/include/llvm/Transforms/Instrumentation/ThreadSanitizer.h
+++ b/include/llvm/Transforms/Instrumentation/ThreadSanitizer.h
@@ -27,6 +27,8 @@ FunctionPass *createThreadSanitizerLegacyPassPass();
/// yet, the pass inserts the declarations. Otherwise the existing globals are
struct ThreadSanitizerPass : public PassInfoMixin<ThreadSanitizerPass> {
PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM);
+ PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
};
+
} // namespace llvm
#endif /* LLVM_TRANSFORMS_INSTRUMENTATION_THREADSANITIZER_H */
diff --git a/include/llvm/Transforms/Scalar.h b/include/llvm/Transforms/Scalar.h
index f9360b5ee2c8..f06230b6f366 100644
--- a/include/llvm/Transforms/Scalar.h
+++ b/include/llvm/Transforms/Scalar.h
@@ -308,7 +308,7 @@ FunctionPass *createGVNSinkPass();
// MergedLoadStoreMotion - This pass merges loads and stores in diamonds. Loads
// are hoisted into the header, while stores sink into the footer.
//
-FunctionPass *createMergedLoadStoreMotionPass();
+FunctionPass *createMergedLoadStoreMotionPass(bool SplitFooterBB = false);
//===----------------------------------------------------------------------===//
//
@@ -397,6 +397,13 @@ FunctionPass *createLowerExpectIntrinsicPass();
//===----------------------------------------------------------------------===//
//
+// LowerConstantIntrinsicss - Expand any remaining llvm.objectsize and
+// llvm.is.constant intrinsic calls, even for the unknown cases.
+//
+FunctionPass *createLowerConstantIntrinsicsPass();
+
+//===----------------------------------------------------------------------===//
+//
// PartiallyInlineLibCalls - Tries to inline the fast path of library
// calls such as sqrt.
//
diff --git a/include/llvm/Transforms/Scalar/CallSiteSplitting.h b/include/llvm/Transforms/Scalar/CallSiteSplitting.h
index b6055639e8a8..74cbf84b64b2 100644
--- a/include/llvm/Transforms/Scalar/CallSiteSplitting.h
+++ b/include/llvm/Transforms/Scalar/CallSiteSplitting.h
@@ -9,13 +9,8 @@
#ifndef LLVM_TRANSFORMS_SCALAR_CALLSITESPLITTING__H
#define LLVM_TRANSFORMS_SCALAR_CALLSITESPLITTING__H
-#include "llvm/ADT/SetVector.h"
-#include "llvm/Analysis/AssumptionCache.h"
-#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/PassManager.h"
-#include "llvm/Support/Compiler.h"
-#include <vector>
namespace llvm {
diff --git a/include/llvm/Transforms/Scalar/ConstantHoisting.h b/include/llvm/Transforms/Scalar/ConstantHoisting.h
index 6b0fc9c1dd07..39039b093241 100644
--- a/include/llvm/Transforms/Scalar/ConstantHoisting.h
+++ b/include/llvm/Transforms/Scalar/ConstantHoisting.h
@@ -37,7 +37,9 @@
#define LLVM_TRANSFORMS_SCALAR_CONSTANTHOISTING_H
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/PointerUnion.h"
+#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/PassManager.h"
@@ -154,21 +156,21 @@ private:
/// Keeps track of constant candidates found in the function.
using ConstCandVecType = std::vector<consthoist::ConstantCandidate>;
- using GVCandVecMapType = DenseMap<GlobalVariable *, ConstCandVecType>;
+ using GVCandVecMapType = MapVector<GlobalVariable *, ConstCandVecType>;
ConstCandVecType ConstIntCandVec;
GVCandVecMapType ConstGEPCandMap;
/// These are the final constants we decided to hoist.
using ConstInfoVecType = SmallVector<consthoist::ConstantInfo, 8>;
- using GVInfoVecMapType = DenseMap<GlobalVariable *, ConstInfoVecType>;
+ using GVInfoVecMapType = MapVector<GlobalVariable *, ConstInfoVecType>;
ConstInfoVecType ConstIntInfoVec;
GVInfoVecMapType ConstGEPInfoMap;
/// Keep track of cast instructions we already cloned.
- SmallDenseMap<Instruction *, Instruction *> ClonedCastMap;
+ MapVector<Instruction *, Instruction *> ClonedCastMap;
Instruction *findMatInsertPt(Instruction *Inst, unsigned Idx = ~0U) const;
- SmallPtrSet<Instruction *, 8>
+ SetVector<Instruction *>
findConstantInsertionPoint(const consthoist::ConstantInfo &ConstInfo) const;
void collectConstantCandidates(ConstCandMapType &ConstCandMap,
Instruction *Inst, unsigned Idx,
diff --git a/include/llvm/Transforms/Scalar/Float2Int.h b/include/llvm/Transforms/Scalar/Float2Int.h
index 06aeb8322527..f04b98a19d82 100644
--- a/include/llvm/Transforms/Scalar/Float2Int.h
+++ b/include/llvm/Transforms/Scalar/Float2Int.h
@@ -17,6 +17,7 @@
#include "llvm/ADT/EquivalenceClasses.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/IR/ConstantRange.h"
+#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/PassManager.h"
@@ -26,10 +27,11 @@ public:
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
// Glue for old PM.
- bool runImpl(Function &F);
+ bool runImpl(Function &F, const DominatorTree &DT);
private:
- void findRoots(Function &F, SmallPtrSet<Instruction *, 8> &Roots);
+ void findRoots(Function &F, const DominatorTree &DT,
+ SmallPtrSet<Instruction *, 8> &Roots);
void seen(Instruction *I, ConstantRange R);
ConstantRange badRange();
ConstantRange unknownRange();
diff --git a/include/llvm/Transforms/Scalar/GVN.h b/include/llvm/Transforms/Scalar/GVN.h
index 9fe00a9e7f2d..8a64768af6b5 100644
--- a/include/llvm/Transforms/Scalar/GVN.h
+++ b/include/llvm/Transforms/Scalar/GVN.h
@@ -120,6 +120,8 @@ public:
uint32_t lookupOrAddCall(CallInst *C);
uint32_t phiTranslateImpl(const BasicBlock *BB, const BasicBlock *PhiBlock,
uint32_t Num, GVN &Gvn);
+ bool areCallValsEqual(uint32_t Num, uint32_t NewNum, const BasicBlock *Pred,
+ const BasicBlock *PhiBlock, GVN &Gvn);
std::pair<uint32_t, bool> assignExpNewValueNum(Expression &exp);
bool areAllValsInBB(uint32_t num, const BasicBlock *BB, GVN &Gvn);
@@ -159,6 +161,7 @@ private:
SetVector<BasicBlock *> DeadBlocks;
OptimizationRemarkEmitter *ORE;
ImplicitControlFlowTracking *ICF;
+ LoopInfo *LI;
ValueTable VN;
@@ -175,7 +178,7 @@ private:
// Block-local map of equivalent values to their leader, does not
// propagate to any successors. Entries added mid-block are applied
// to the remaining instructions in the block.
- SmallMapVector<Value *, Constant *, 4> ReplaceWithConstMap;
+ SmallMapVector<Value *, Value *, 4> ReplaceOperandsWithMap;
SmallVector<Instruction *, 8> InstrsToErase;
// Map the block to reversed postorder traversal number. It is used to
@@ -280,7 +283,7 @@ private:
void verifyRemoved(const Instruction *I) const;
bool splitCriticalEdges();
BasicBlock *splitCriticalEdges(BasicBlock *Pred, BasicBlock *Succ);
- bool replaceOperandsWithConsts(Instruction *I) const;
+ bool replaceOperandsForInBlockEquality(Instruction *I) const;
bool propagateEquality(Value *LHS, Value *RHS, const BasicBlockEdge &Root,
bool DominatesByEdge);
bool processFoldableCondBr(BranchInst *BI);
diff --git a/include/llvm/Transforms/Scalar/GVNExpression.h b/include/llvm/Transforms/Scalar/GVNExpression.h
index 3dc4515f85a1..1600d1af3242 100644
--- a/include/llvm/Transforms/Scalar/GVNExpression.h
+++ b/include/llvm/Transforms/Scalar/GVNExpression.h
@@ -323,7 +323,7 @@ public:
class LoadExpression final : public MemoryExpression {
private:
LoadInst *Load;
- unsigned Alignment;
+ MaybeAlign Alignment;
public:
LoadExpression(unsigned NumOperands, LoadInst *L,
@@ -333,7 +333,8 @@ public:
LoadExpression(enum ExpressionType EType, unsigned NumOperands, LoadInst *L,
const MemoryAccess *MemoryLeader)
: MemoryExpression(NumOperands, EType, MemoryLeader), Load(L) {
- Alignment = L ? L->getAlignment() : 0;
+ if (L)
+ Alignment = MaybeAlign(L->getAlignment());
}
LoadExpression() = delete;
@@ -348,8 +349,8 @@ public:
LoadInst *getLoadInst() const { return Load; }
void setLoadInst(LoadInst *L) { Load = L; }
- unsigned getAlignment() const { return Alignment; }
- void setAlignment(unsigned Align) { Alignment = Align; }
+ MaybeAlign getAlignment() const { return Alignment; }
+ void setAlignment(MaybeAlign Align) { Alignment = Align; }
bool equals(const Expression &Other) const override;
bool exactlyEquals(const Expression &Other) const override {
diff --git a/include/llvm/Transforms/Scalar/LoopPassManager.h b/include/llvm/Transforms/Scalar/LoopPassManager.h
index 61ec58585fd0..aed764855b2e 100644
--- a/include/llvm/Transforms/Scalar/LoopPassManager.h
+++ b/include/llvm/Transforms/Scalar/LoopPassManager.h
@@ -263,8 +263,10 @@ template <typename LoopPassT>
class FunctionToLoopPassAdaptor
: public PassInfoMixin<FunctionToLoopPassAdaptor<LoopPassT>> {
public:
- explicit FunctionToLoopPassAdaptor(LoopPassT Pass, bool DebugLogging = false)
- : Pass(std::move(Pass)), LoopCanonicalizationFPM(DebugLogging) {
+ explicit FunctionToLoopPassAdaptor(LoopPassT Pass, bool UseMemorySSA = false,
+ bool DebugLogging = false)
+ : Pass(std::move(Pass)), LoopCanonicalizationFPM(DebugLogging),
+ UseMemorySSA(UseMemorySSA) {
LoopCanonicalizationFPM.addPass(LoopSimplifyPass());
LoopCanonicalizationFPM.addPass(LCSSAPass());
}
@@ -293,7 +295,7 @@ public:
return PA;
// Get the analysis results needed by loop passes.
- MemorySSA *MSSA = EnableMSSALoopDependency
+ MemorySSA *MSSA = UseMemorySSA
? (&AM.getResult<MemorySSAAnalysis>(F).getMSSA())
: nullptr;
LoopStandardAnalysisResults LAR = {AM.getResult<AAManager>(F),
@@ -310,8 +312,10 @@ public:
// LoopStandardAnalysisResults object. The loop analyses cached in this
// manager have access to those analysis results and so it must invalidate
// itself when they go away.
- LoopAnalysisManager &LAM =
- AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager();
+ auto &LAMFP = AM.getResult<LoopAnalysisManagerFunctionProxy>(F);
+ if (UseMemorySSA)
+ LAMFP.markMSSAUsed();
+ LoopAnalysisManager &LAM = LAMFP.getManager();
// A postorder worklist of loops to process.
SmallPriorityWorklist<Loop *, 4> Worklist;
@@ -382,7 +386,7 @@ public:
PA.preserve<DominatorTreeAnalysis>();
PA.preserve<LoopAnalysis>();
PA.preserve<ScalarEvolutionAnalysis>();
- if (EnableMSSALoopDependency)
+ if (UseMemorySSA)
PA.preserve<MemorySSAAnalysis>();
// FIXME: What we really want to do here is preserve an AA category, but
// that concept doesn't exist yet.
@@ -397,14 +401,18 @@ private:
LoopPassT Pass;
FunctionPassManager LoopCanonicalizationFPM;
+
+ bool UseMemorySSA = false;
};
/// A function to deduce a loop pass type and wrap it in the templated
/// adaptor.
template <typename LoopPassT>
FunctionToLoopPassAdaptor<LoopPassT>
-createFunctionToLoopPassAdaptor(LoopPassT Pass, bool DebugLogging = false) {
- return FunctionToLoopPassAdaptor<LoopPassT>(std::move(Pass), DebugLogging);
+createFunctionToLoopPassAdaptor(LoopPassT Pass, bool UseMemorySSA = false,
+ bool DebugLogging = false) {
+ return FunctionToLoopPassAdaptor<LoopPassT>(std::move(Pass), UseMemorySSA,
+ DebugLogging);
}
/// Pass for printing a loop's contents as textual IR.
diff --git a/include/llvm/Transforms/Scalar/LoopUnrollPass.h b/include/llvm/Transforms/Scalar/LoopUnrollPass.h
index a84d889a83ad..afeb1f1da029 100644
--- a/include/llvm/Transforms/Scalar/LoopUnrollPass.h
+++ b/include/llvm/Transforms/Scalar/LoopUnrollPass.h
@@ -62,6 +62,8 @@ struct LoopUnrollOptions {
Optional<bool> AllowPeeling;
Optional<bool> AllowRuntime;
Optional<bool> AllowUpperBound;
+ Optional<bool> AllowProfileBasedPeeling;
+ Optional<unsigned> FullUnrollMaxCount;
int OptLevel;
/// If false, use a cost model to determine whether unrolling of a loop is
@@ -110,6 +112,18 @@ struct LoopUnrollOptions {
OptLevel = O;
return *this;
}
+
+ // Enables or disables loop peeling basing on profile.
+ LoopUnrollOptions &setProfileBasedPeeling(int O) {
+ AllowProfileBasedPeeling = O;
+ return *this;
+ }
+
+ // Sets the max full unroll count.
+ LoopUnrollOptions &setFullUnrollMaxCount(unsigned O) {
+ FullUnrollMaxCount = O;
+ return *this;
+ }
};
/// Loop unroll pass that will support both full and partial unrolling.
diff --git a/include/llvm/Transforms/Scalar/LowerConstantIntrinsics.h b/include/llvm/Transforms/Scalar/LowerConstantIntrinsics.h
new file mode 100644
index 000000000000..a5ad4a2192a0
--- /dev/null
+++ b/include/llvm/Transforms/Scalar/LowerConstantIntrinsics.h
@@ -0,0 +1,41 @@
+//===- LowerConstantIntrinsics.h - Lower constant int. pass -*- C++ -*-========//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// The header file for the LowerConstantIntrinsics pass as used by the new pass
+/// manager.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_LOWERCONSTANTINTRINSICS_H
+#define LLVM_TRANSFORMS_SCALAR_LOWERCONSTANTINTRINSICS_H
+
+#include "llvm/IR/Function.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+struct LowerConstantIntrinsicsPass :
+ PassInfoMixin<LowerConstantIntrinsicsPass> {
+public:
+ explicit LowerConstantIntrinsicsPass() {}
+
+ /// Run the pass over the function.
+ ///
+ /// This will lower all remaining 'objectsize' and 'is.constant'`
+ /// intrinsic calls in this function, even when the argument has no known
+ /// size or is not a constant respectively. The resulting constant is
+ /// propagated and conditional branches are resolved where possible.
+ /// This complements the Instruction Simplification and
+ /// Instruction Combination passes of the optimized pass chain.
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &);
+};
+
+}
+
+#endif
diff --git a/include/llvm/Transforms/Scalar/MergedLoadStoreMotion.h b/include/llvm/Transforms/Scalar/MergedLoadStoreMotion.h
index 9071a56532f8..c5f6d6e0e8bd 100644
--- a/include/llvm/Transforms/Scalar/MergedLoadStoreMotion.h
+++ b/include/llvm/Transforms/Scalar/MergedLoadStoreMotion.h
@@ -27,12 +27,28 @@
#include "llvm/IR/PassManager.h"
namespace llvm {
+struct MergedLoadStoreMotionOptions {
+ bool SplitFooterBB;
+ MergedLoadStoreMotionOptions(bool SplitFooterBB = false)
+ : SplitFooterBB(SplitFooterBB) {}
+
+ MergedLoadStoreMotionOptions &splitFooterBB(bool SFBB) {
+ SplitFooterBB = SFBB;
+ return *this;
+ }
+};
+
class MergedLoadStoreMotionPass
: public PassInfoMixin<MergedLoadStoreMotionPass> {
+ MergedLoadStoreMotionOptions Options;
+
public:
+ MergedLoadStoreMotionPass()
+ : MergedLoadStoreMotionPass(MergedLoadStoreMotionOptions()) {}
+ MergedLoadStoreMotionPass(const MergedLoadStoreMotionOptions &PassOptions)
+ : Options(PassOptions) {}
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
-
}
#endif // LLVM_TRANSFORMS_SCALAR_MERGEDLOADSTOREMOTION_H
diff --git a/include/llvm/Transforms/Scalar/Reassociate.h b/include/llvm/Transforms/Scalar/Reassociate.h
index 2db8d8ce309c..d5b175eff0e6 100644
--- a/include/llvm/Transforms/Scalar/Reassociate.h
+++ b/include/llvm/Transforms/Scalar/Reassociate.h
@@ -122,7 +122,9 @@ private:
void EraseInst(Instruction *I);
void RecursivelyEraseDeadInsts(Instruction *I, OrderedSet &Insts);
void OptimizeInst(Instruction *I);
- Instruction *canonicalizeNegConstExpr(Instruction *I);
+ Instruction *canonicalizeNegFPConstantsForOp(Instruction *I, Instruction *Op,
+ Value *OtherOp);
+ Instruction *canonicalizeNegFPConstants(Instruction *I);
void BuildPairMap(ReversePostOrderTraversal<Function *> &RPOT);
};
diff --git a/include/llvm/Transforms/Scalar/SCCP.h b/include/llvm/Transforms/Scalar/SCCP.h
index 0ffd983eb3e0..45e674a20a16 100644
--- a/include/llvm/Transforms/Scalar/SCCP.h
+++ b/include/llvm/Transforms/Scalar/SCCP.h
@@ -45,7 +45,8 @@ struct AnalysisResultsForFn {
PostDominatorTree *PDT;
};
-bool runIPSCCP(Module &M, const DataLayout &DL, const TargetLibraryInfo *TLI,
+bool runIPSCCP(Module &M, const DataLayout &DL,
+ std::function<const TargetLibraryInfo &(Function &)> GetTLI,
function_ref<AnalysisResultsForFn(Function &)> getAnalysis);
} // end namespace llvm
diff --git a/include/llvm/Transforms/Utils/BasicBlockUtils.h b/include/llvm/Transforms/Utils/BasicBlockUtils.h
index 4d861ffe9a31..698e57fd0394 100644
--- a/include/llvm/Transforms/Utils/BasicBlockUtils.h
+++ b/include/llvm/Transforms/Utils/BasicBlockUtils.h
@@ -83,10 +83,16 @@ bool DeleteDeadPHIs(BasicBlock *BB, const TargetLibraryInfo *TLI = nullptr);
/// Attempts to merge a block into its predecessor, if possible. The return
/// value indicates success or failure.
+/// By default do not merge blocks if BB's predecessor has multiple successors.
+/// If PredecessorWithTwoSuccessors = true, the blocks can only be merged
+/// if BB's Pred has a branch to BB and to AnotherBB, and BB has a single
+/// successor Sing. In this case the branch will be updated with Sing instead of
+/// BB, and BB will still be merged into its predecessor and removed.
bool MergeBlockIntoPredecessor(BasicBlock *BB, DomTreeUpdater *DTU = nullptr,
LoopInfo *LI = nullptr,
MemorySSAUpdater *MSSAU = nullptr,
- MemoryDependenceResults *MemDep = nullptr);
+ MemoryDependenceResults *MemDep = nullptr,
+ bool PredecessorWithTwoSuccessors = false);
/// Replace all uses of an instruction (specified by BI) with a value, then
/// remove and delete the original instruction.
@@ -222,7 +228,8 @@ BasicBlock *SplitEdge(BasicBlock *From, BasicBlock *To,
/// info is updated.
BasicBlock *SplitBlock(BasicBlock *Old, Instruction *SplitPt,
DominatorTree *DT = nullptr, LoopInfo *LI = nullptr,
- MemorySSAUpdater *MSSAU = nullptr);
+ MemorySSAUpdater *MSSAU = nullptr,
+ const Twine &BBName = "");
/// This method introduces at least one new basic block into the function and
/// moves some of the predecessors of BB to be predecessors of the new block.
diff --git a/include/llvm/Transforms/Utils/BuildLibCalls.h b/include/llvm/Transforms/Utils/BuildLibCalls.h
index 8421c31a36da..3d15b2a7bf2a 100644
--- a/include/llvm/Transforms/Utils/BuildLibCalls.h
+++ b/include/llvm/Transforms/Utils/BuildLibCalls.h
@@ -30,17 +30,16 @@ namespace llvm {
bool inferLibFuncAttributes(Function &F, const TargetLibraryInfo &TLI);
bool inferLibFuncAttributes(Module *M, StringRef Name, const TargetLibraryInfo &TLI);
- /// Check whether the overloaded unary floating point function
+ /// Check whether the overloaded floating point function
/// corresponding to \a Ty is available.
- bool hasUnaryFloatFn(const TargetLibraryInfo *TLI, Type *Ty,
- LibFunc DoubleFn, LibFunc FloatFn,
- LibFunc LongDoubleFn);
+ bool hasFloatFn(const TargetLibraryInfo *TLI, Type *Ty,
+ LibFunc DoubleFn, LibFunc FloatFn, LibFunc LongDoubleFn);
- /// Get the name of the overloaded unary floating point function
+ /// Get the name of the overloaded floating point function
/// corresponding to \a Ty.
- StringRef getUnaryFloatFn(const TargetLibraryInfo *TLI, Type *Ty,
- LibFunc DoubleFn, LibFunc FloatFn,
- LibFunc LongDoubleFn);
+ StringRef getFloatFnName(const TargetLibraryInfo *TLI, Type *Ty,
+ LibFunc DoubleFn, LibFunc FloatFn,
+ LibFunc LongDoubleFn);
/// Return V if it is an i8*, otherwise cast it to i8*.
Value *castToCStr(Value *V, IRBuilder<> &B);
@@ -51,6 +50,11 @@ namespace llvm {
Value *emitStrLen(Value *Ptr, IRBuilder<> &B, const DataLayout &DL,
const TargetLibraryInfo *TLI);
+ /// Emit a call to the strdup function to the builder, for the specified
+ /// pointer. Ptr is required to be some pointer type, and the return value has
+ /// 'i8*' type.
+ Value *emitStrDup(Value *Ptr, IRBuilder<> &B, const TargetLibraryInfo *TLI);
+
/// Emit a call to the strnlen function to the builder, for the specified
/// pointer. Ptr is required to be some pointer type, MaxLen must be of size_t
/// type, and the return value has 'intptr_t' type.
@@ -164,6 +168,13 @@ namespace llvm {
Value *emitBinaryFloatFnCall(Value *Op1, Value *Op2, StringRef Name,
IRBuilder<> &B, const AttributeList &Attrs);
+ /// Emit a call to the binary function DoubleFn, FloatFn or LongDoubleFn,
+ /// depending of the type of Op1.
+ Value *emitBinaryFloatFnCall(Value *Op1, Value *Op2,
+ const TargetLibraryInfo *TLI, LibFunc DoubleFn,
+ LibFunc FloatFn, LibFunc LongDoubleFn,
+ IRBuilder<> &B, const AttributeList &Attrs);
+
/// Emit a call to the putchar function. This assumes that Char is an integer.
Value *emitPutChar(Value *Char, IRBuilder<> &B, const TargetLibraryInfo *TLI);
diff --git a/include/llvm/Transforms/Utils/BypassSlowDivision.h b/include/llvm/Transforms/Utils/BypassSlowDivision.h
index 471055921fa8..bd98c902d1ab 100644
--- a/include/llvm/Transforms/Utils/BypassSlowDivision.h
+++ b/include/llvm/Transforms/Utils/BypassSlowDivision.h
@@ -19,6 +19,7 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/IR/ValueHandle.h"
#include <cstdint>
namespace llvm {
@@ -28,8 +29,10 @@ class Value;
struct DivRemMapKey {
bool SignedOp;
- Value *Dividend;
- Value *Divisor;
+ AssertingVH<Value> Dividend;
+ AssertingVH<Value> Divisor;
+
+ DivRemMapKey() = default;
DivRemMapKey(bool InSignedOp, Value *InDividend, Value *InDivisor)
: SignedOp(InSignedOp), Dividend(InDividend), Divisor(InDivisor) {}
@@ -50,8 +53,10 @@ template <> struct DenseMapInfo<DivRemMapKey> {
}
static unsigned getHashValue(const DivRemMapKey &Val) {
- return (unsigned)(reinterpret_cast<uintptr_t>(Val.Dividend) ^
- reinterpret_cast<uintptr_t>(Val.Divisor)) ^
+ return (unsigned)(reinterpret_cast<uintptr_t>(
+ static_cast<Value *>(Val.Dividend)) ^
+ reinterpret_cast<uintptr_t>(
+ static_cast<Value *>(Val.Divisor))) ^
(unsigned)Val.SignedOp;
}
};
diff --git a/include/llvm/Transforms/Utils/CodeExtractor.h b/include/llvm/Transforms/Utils/CodeExtractor.h
index 9d79ee1633f6..8a1ab796734e 100644
--- a/include/llvm/Transforms/Utils/CodeExtractor.h
+++ b/include/llvm/Transforms/Utils/CodeExtractor.h
@@ -22,6 +22,7 @@
namespace llvm {
+class AllocaInst;
class BasicBlock;
class BlockFrequency;
class BlockFrequencyInfo;
@@ -36,6 +37,38 @@ class Module;
class Type;
class Value;
+/// A cache for the CodeExtractor analysis. The operation \ref
+/// CodeExtractor::extractCodeRegion is guaranteed not to invalidate this
+/// object. This object should conservatively be considered invalid if any
+/// other mutating operations on the IR occur.
+///
+/// Constructing this object is O(n) in the size of the function.
+class CodeExtractorAnalysisCache {
+ /// The allocas in the function.
+ SmallVector<AllocaInst *, 16> Allocas;
+
+ /// Base memory addresses of load/store instructions, grouped by block.
+ DenseMap<BasicBlock *, DenseSet<Value *>> BaseMemAddrs;
+
+ /// Blocks which contain instructions which may have unknown side-effects
+ /// on memory.
+ DenseSet<BasicBlock *> SideEffectingBlocks;
+
+ void findSideEffectInfoForBlock(BasicBlock &BB);
+
+public:
+ CodeExtractorAnalysisCache(Function &F);
+
+ /// Get the allocas in the function at the time the analysis was created.
+ /// Note that some of these allocas may no longer be present in the function,
+ /// due to \ref CodeExtractor::extractCodeRegion.
+ ArrayRef<AllocaInst *> getAllocas() const { return Allocas; }
+
+ /// Check whether \p BB contains an instruction thought to load from, store
+ /// to, or otherwise clobber the alloca \p Addr.
+ bool doesBlockContainClobberOfAddr(BasicBlock &BB, AllocaInst *Addr) const;
+};
+
/// Utility class for extracting code into a new function.
///
/// This utility provides a simple interface for extracting some sequence of
@@ -104,13 +137,21 @@ class Value;
///
/// Returns zero when called on a CodeExtractor instance where isEligible
/// returns false.
- Function *extractCodeRegion();
+ Function *extractCodeRegion(const CodeExtractorAnalysisCache &CEAC);
+
+ /// Verify that assumption cache isn't stale after a region is extracted.
+ /// Returns false when verifier finds errors. AssumptionCache is passed as
+ /// parameter to make this function stateless.
+ static bool verifyAssumptionCache(const Function& F, AssumptionCache *AC);
/// Test whether this code extractor is eligible.
///
/// Based on the blocks used when constructing the code extractor,
/// determine whether it is eligible for extraction.
- bool isEligible() const { return !Blocks.empty(); }
+ ///
+ /// Checks that varargs handling (with vastart and vaend) is only done in
+ /// the outlined blocks.
+ bool isEligible() const;
/// Compute the set of input values and output values for the code.
///
@@ -127,7 +168,9 @@ class Value;
/// region.
///
/// Returns true if it is safe to do the code motion.
- bool isLegalToShrinkwrapLifetimeMarkers(Instruction *AllocaAddr) const;
+ bool
+ isLegalToShrinkwrapLifetimeMarkers(const CodeExtractorAnalysisCache &CEAC,
+ Instruction *AllocaAddr) const;
/// Find the set of allocas whose life ranges are contained within the
/// outlined region.
@@ -137,7 +180,8 @@ class Value;
/// are used by the lifetime markers are also candidates for shrink-
/// wrapping. The instructions that need to be sunk are collected in
/// 'Allocas'.
- void findAllocas(ValueSet &SinkCands, ValueSet &HoistCands,
+ void findAllocas(const CodeExtractorAnalysisCache &CEAC,
+ ValueSet &SinkCands, ValueSet &HoistCands,
BasicBlock *&ExitBlock) const;
/// Find or create a block within the outline region for placing hoisted
@@ -158,8 +202,9 @@ class Value;
Instruction *LifeEnd = nullptr;
};
- LifetimeMarkerInfo getLifetimeMarkers(Instruction *Addr,
- BasicBlock *ExitBlock) const;
+ LifetimeMarkerInfo
+ getLifetimeMarkers(const CodeExtractorAnalysisCache &CEAC,
+ Instruction *Addr, BasicBlock *ExitBlock) const;
void severSplitPHINodesOfEntry(BasicBlock *&Header);
void severSplitPHINodesOfExits(const SmallPtrSetImpl<BasicBlock *> &Exits);
diff --git a/include/llvm/Transforms/Utils/Local.h b/include/llvm/Transforms/Utils/Local.h
index ff516f230979..9fcb2f64d79b 100644
--- a/include/llvm/Transforms/Utils/Local.h
+++ b/include/llvm/Transforms/Utils/Local.h
@@ -271,6 +271,15 @@ inline unsigned getKnownAlignment(Value *V, const DataLayout &DL,
return getOrEnforceKnownAlignment(V, 0, DL, CxtI, AC, DT);
}
+/// Create a call that matches the invoke \p II in terms of arguments,
+/// attributes, debug information, etc. The call is not placed in a block and it
+/// will not have a name. The invoke instruction is not removed, nor are the
+/// uses replaced by the new call.
+CallInst *createCallMatchingInvoke(InvokeInst *II);
+
+/// This function converts the specified invoek into a normall call.
+void changeToCall(InvokeInst *II, DomTreeUpdater *DTU = nullptr);
+
///===---------------------------------------------------------------------===//
/// Dbg Intrinsic utilities
///
@@ -403,8 +412,7 @@ void removeUnwindEdge(BasicBlock *BB, DomTreeUpdater *DTU = nullptr);
/// Remove all blocks that can not be reached from the function's entry.
///
/// Returns true if any basic block was removed.
-bool removeUnreachableBlocks(Function &F, LazyValueInfo *LVI = nullptr,
- DomTreeUpdater *DTU = nullptr,
+bool removeUnreachableBlocks(Function &F, DomTreeUpdater *DTU = nullptr,
MemorySSAUpdater *MSSAU = nullptr);
/// Combine the metadata of two instructions so that K can replace J. Some
@@ -424,6 +432,10 @@ void combineMetadata(Instruction *K, const Instruction *J,
void combineMetadataForCSE(Instruction *K, const Instruction *J,
bool DoesKMove);
+/// Copy the metadata from the source instruction to the destination (the
+/// replacement for the source instruction).
+void copyMetadataForLoad(LoadInst &Dest, const LoadInst &Source);
+
/// Patch the replacement so that it is not more restrictive than the value
/// being replaced. It assumes that the replacement does not get moved from
/// its original position.
diff --git a/include/llvm/Transforms/Utils/LoopUtils.h b/include/llvm/Transforms/Utils/LoopUtils.h
index 68bdded5cf93..d32f08717e9b 100644
--- a/include/llvm/Transforms/Utils/LoopUtils.h
+++ b/include/llvm/Transforms/Utils/LoopUtils.h
@@ -215,6 +215,9 @@ makeFollowupLoopID(MDNode *OrigLoopID, ArrayRef<StringRef> FollowupAttrs,
/// Look for the loop attribute that disables all transformation heuristic.
bool hasDisableAllTransformsHint(const Loop *L);
+/// Look for the loop attribute that disables the LICM transformation heuristics.
+bool hasDisableLICMTransformsHint(const Loop *L);
+
/// The mode sets how eager a transformation should be applied.
enum TransformationMode {
/// The pass can use heuristics to determine whether a transformation should
@@ -252,6 +255,8 @@ TransformationMode hasLICMVersioningTransformation(Loop *L);
/// @}
/// Set input string into loop metadata by keeping other values intact.
+/// If the string is already in loop metadata update value if it is
+/// different.
void addStringMetadataToLoop(Loop *TheLoop, const char *MDString,
unsigned V = 0);
diff --git a/include/llvm/Transforms/Utils/MisExpect.h b/include/llvm/Transforms/Utils/MisExpect.h
new file mode 100644
index 000000000000..1dbe8cb95936
--- /dev/null
+++ b/include/llvm/Transforms/Utils/MisExpect.h
@@ -0,0 +1,43 @@
+//===--- MisExpect.h - Check the use of llvm.expect with PGO data ---------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit warnings for potentially incorrect usage of the
+// llvm.expect intrinsic. This utility extracts the threshold values from
+// metadata associated with the instrumented Branch or Switch instruction. The
+// threshold values are then used to determine if a warning should be emmited.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/LLVMContext.h"
+
+namespace llvm {
+namespace misexpect {
+
+/// verifyMisExpect - compares PGO counters to the thresholds used for
+/// llvm.expect and warns if the PGO counters are outside of the expected
+/// range.
+/// \param I The Instruction being checked
+/// \param Weights A vector of profile weights for each target block
+/// \param Ctx The current LLVM context
+void verifyMisExpect(llvm::Instruction *I,
+ const llvm::SmallVector<uint32_t, 4> &Weights,
+ llvm::LLVMContext &Ctx);
+
+/// checkClangInstrumentation - verify if llvm.expect matches PGO profile
+/// This function checks the frontend instrumentation in the backend when
+/// lowering llvm.expect intrinsics. It checks for existing metadata, and
+/// then validates the use of llvm.expect against the assigned branch weights.
+//
+/// \param I the Instruction being checked
+void checkFrontendInstrumentation(Instruction &I);
+
+} // namespace misexpect
+} // namespace llvm
diff --git a/include/llvm/Transforms/Utils/PredicateInfo.h b/include/llvm/Transforms/Utils/PredicateInfo.h
index da4a5dcc28c0..7c7a8eb04a2c 100644
--- a/include/llvm/Transforms/Utils/PredicateInfo.h
+++ b/include/llvm/Transforms/Utils/PredicateInfo.h
@@ -229,10 +229,10 @@ protected:
private:
void buildPredicateInfo();
- void processAssume(IntrinsicInst *, BasicBlock *, SmallPtrSetImpl<Value *> &);
- void processBranch(BranchInst *, BasicBlock *, SmallPtrSetImpl<Value *> &);
- void processSwitch(SwitchInst *, BasicBlock *, SmallPtrSetImpl<Value *> &);
- void renameUses(SmallPtrSetImpl<Value *> &);
+ void processAssume(IntrinsicInst *, BasicBlock *, SmallVectorImpl<Value *> &);
+ void processBranch(BranchInst *, BasicBlock *, SmallVectorImpl<Value *> &);
+ void processSwitch(SwitchInst *, BasicBlock *, SmallVectorImpl<Value *> &);
+ void renameUses(SmallVectorImpl<Value *> &);
using ValueDFS = PredicateInfoClasses::ValueDFS;
typedef SmallVectorImpl<ValueDFS> ValueDFSStack;
void convertUsesToDFSOrdered(Value *, SmallVectorImpl<ValueDFS> &);
@@ -240,7 +240,7 @@ private:
bool stackIsInScope(const ValueDFSStack &, const ValueDFS &) const;
void popStackUntilDFSScope(ValueDFSStack &, const ValueDFS &);
ValueInfo &getOrCreateValueInfo(Value *);
- void addInfoFor(SmallPtrSetImpl<Value *> &OpsToRename, Value *Op,
+ void addInfoFor(SmallVectorImpl<Value *> &OpsToRename, Value *Op,
PredicateBase *PB);
const ValueInfo &getValueInfo(Value *) const;
Function &F;
diff --git a/include/llvm/Transforms/Utils/SimplifyLibCalls.h b/include/llvm/Transforms/Utils/SimplifyLibCalls.h
index 2572094ddac8..88c2ef787ad8 100644
--- a/include/llvm/Transforms/Utils/SimplifyLibCalls.h
+++ b/include/llvm/Transforms/Utils/SimplifyLibCalls.h
@@ -126,6 +126,12 @@ private:
/// Erase an instruction from its parent with our eraser.
void eraseFromParent(Instruction *I);
+ /// Replace an instruction with a value and erase it from its parent.
+ void substituteInParent(Instruction *I, Value *With) {
+ replaceAllUsesWith(I, With);
+ eraseFromParent(I);
+ }
+
Value *foldMallocMemset(CallInst *Memset, IRBuilder<> &B);
public:
@@ -154,6 +160,7 @@ private:
Value *optimizeStrRChr(CallInst *CI, IRBuilder<> &B);
Value *optimizeStrCmp(CallInst *CI, IRBuilder<> &B);
Value *optimizeStrNCmp(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeStrNDup(CallInst *CI, IRBuilder<> &B);
Value *optimizeStrCpy(CallInst *CI, IRBuilder<> &B);
Value *optimizeStpCpy(CallInst *CI, IRBuilder<> &B);
Value *optimizeStrNCpy(CallInst *CI, IRBuilder<> &B);
@@ -164,14 +171,17 @@ private:
Value *optimizeStrCSpn(CallInst *CI, IRBuilder<> &B);
Value *optimizeStrStr(CallInst *CI, IRBuilder<> &B);
Value *optimizeMemChr(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeMemRChr(CallInst *CI, IRBuilder<> &B);
Value *optimizeMemCmp(CallInst *CI, IRBuilder<> &B);
Value *optimizeBCmp(CallInst *CI, IRBuilder<> &B);
Value *optimizeMemCmpBCmpCommon(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeMemPCpy(CallInst *CI, IRBuilder<> &B);
Value *optimizeMemCpy(CallInst *CI, IRBuilder<> &B);
Value *optimizeMemMove(CallInst *CI, IRBuilder<> &B);
Value *optimizeMemSet(CallInst *CI, IRBuilder<> &B);
Value *optimizeRealloc(CallInst *CI, IRBuilder<> &B);
Value *optimizeWcslen(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeBCopy(CallInst *CI, IRBuilder<> &B);
// Wrapper for all String/Memory Library Call Optimizations
Value *optimizeStringMemoryLibCall(CallInst *CI, IRBuilder<> &B);
diff --git a/include/llvm/Transforms/Utils/UnrollLoop.h b/include/llvm/Transforms/Utils/UnrollLoop.h
index 593ca26feb98..02b81b4b7ee2 100644
--- a/include/llvm/Transforms/Utils/UnrollLoop.h
+++ b/include/llvm/Transforms/Utils/UnrollLoop.h
@@ -114,8 +114,8 @@ bool computeUnrollCount(Loop *L, const TargetTransformInfo &TTI,
DominatorTree &DT, LoopInfo *LI, ScalarEvolution &SE,
const SmallPtrSetImpl<const Value *> &EphValues,
OptimizationRemarkEmitter *ORE, unsigned &TripCount,
- unsigned MaxTripCount, unsigned &TripMultiple,
- unsigned LoopSize,
+ unsigned MaxTripCount, bool MaxOrZero,
+ unsigned &TripMultiple, unsigned LoopSize,
TargetTransformInfo::UnrollingPreferences &UP,
bool &UseUpperBound);
@@ -132,7 +132,9 @@ TargetTransformInfo::UnrollingPreferences gatherUnrollingPreferences(
BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, int OptLevel,
Optional<unsigned> UserThreshold, Optional<unsigned> UserCount,
Optional<bool> UserAllowPartial, Optional<bool> UserRuntime,
- Optional<bool> UserUpperBound, Optional<bool> UserAllowPeeling);
+ Optional<bool> UserUpperBound, Optional<bool> UserAllowPeeling,
+ Optional<bool> UserAllowProfileBasedPeeling,
+ Optional<unsigned> UserFullUnrollMaxCount);
unsigned ApproximateLoopSize(const Loop *L, unsigned &NumCalls,
bool &NotDuplicatable, bool &Convergent,
diff --git a/include/llvm/Transforms/Utils/ValueMapper.h b/include/llvm/Transforms/Utils/ValueMapper.h
index 1952a210291e..ff5bfc609586 100644
--- a/include/llvm/Transforms/Utils/ValueMapper.h
+++ b/include/llvm/Transforms/Utils/ValueMapper.h
@@ -22,7 +22,7 @@ namespace llvm {
class Constant;
class Function;
-class GlobalAlias;
+class GlobalIndirectSymbol;
class GlobalVariable;
class Instruction;
class MDNode;
@@ -120,7 +120,7 @@ inline RemapFlags operator|(RemapFlags LHS, RemapFlags RHS) {
/// instance:
/// - \a scheduleMapGlobalInitializer()
/// - \a scheduleMapAppendingVariable()
-/// - \a scheduleMapGlobalAliasee()
+/// - \a scheduleMapGlobalIndirectSymbol()
/// - \a scheduleRemapFunction()
///
/// Sometimes a callback needs a different mapping context. Such a context can
@@ -180,8 +180,9 @@ public:
bool IsOldCtorDtor,
ArrayRef<Constant *> NewMembers,
unsigned MappingContextID = 0);
- void scheduleMapGlobalAliasee(GlobalAlias &GA, Constant &Aliasee,
- unsigned MappingContextID = 0);
+ void scheduleMapGlobalIndirectSymbol(GlobalIndirectSymbol &GIS,
+ Constant &Target,
+ unsigned MappingContextID = 0);
void scheduleRemapFunction(Function &F, unsigned MappingContextID = 0);
};
diff --git a/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h b/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h
index b144006e2628..d1e7acc877bf 100644
--- a/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h
+++ b/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h
@@ -33,18 +33,6 @@
namespace llvm {
-/// Create an analysis remark that explains why vectorization failed
-///
-/// \p PassName is the name of the pass (e.g. can be AlwaysPrint). \p
-/// RemarkName is the identifier for the remark. If \p I is passed it is an
-/// instruction that prevents vectorization. Otherwise \p TheLoop is used for
-/// the location of the remark. \return the remark object that can be
-/// streamed to.
-OptimizationRemarkAnalysis createLVMissedAnalysis(const char *PassName,
- StringRef RemarkName,
- Loop *TheLoop,
- Instruction *I = nullptr);
-
/// Utility class for getting and setting loop vectorizer hints in the form
/// of loop metadata.
/// This class keeps a number of loop annotations locally (as member variables)
@@ -55,7 +43,8 @@ OptimizationRemarkAnalysis createLVMissedAnalysis(const char *PassName,
/// for example 'force', means a decision has been made. So, we need to be
/// careful NOT to add them if the user hasn't specifically asked so.
class LoopVectorizeHints {
- enum HintKind { HK_WIDTH, HK_UNROLL, HK_FORCE, HK_ISVECTORIZED };
+ enum HintKind { HK_WIDTH, HK_UNROLL, HK_FORCE, HK_ISVECTORIZED,
+ HK_PREDICATE };
/// Hint - associates name and validation with the hint value.
struct Hint {
@@ -81,6 +70,9 @@ class LoopVectorizeHints {
/// Already Vectorized
Hint IsVectorized;
+ /// Vector Predicate
+ Hint Predicate;
+
/// Return the loop metadata prefix.
static StringRef Prefix() { return "llvm.loop."; }
@@ -109,6 +101,7 @@ public:
unsigned getWidth() const { return Width.Value; }
unsigned getInterleave() const { return Interleave.Value; }
unsigned getIsVectorized() const { return IsVectorized.Value; }
+ unsigned getPredicate() const { return Predicate.Value; }
enum ForceKind getForce() const {
if ((ForceKind)Force.Value == FK_Undefined &&
hasDisableAllTransformsHint(TheLoop))
@@ -235,8 +228,8 @@ public:
bool canVectorize(bool UseVPlanNativePath);
/// Return true if we can vectorize this loop while folding its tail by
- /// masking.
- bool canFoldTailByMasking();
+ /// masking, and mark all respective loads/stores for masking.
+ bool prepareToFoldTailByMasking();
/// Returns the primary induction variable.
PHINode *getPrimaryInduction() { return PrimaryInduction; }
@@ -362,9 +355,16 @@ private:
bool canVectorizeOuterLoop();
/// Return true if all of the instructions in the block can be speculatively
- /// executed. \p SafePtrs is a list of addresses that are known to be legal
- /// and we know that we can read from them without segfault.
- bool blockCanBePredicated(BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs);
+ /// executed, and record the loads/stores that require masking. If's that
+ /// guard loads can be ignored under "assume safety" unless \p PreserveGuards
+ /// is true. This can happen when we introduces guards for which the original
+ /// "unguarded-loads are safe" assumption does not hold. For example, the
+ /// vectorizer's fold-tail transformation changes the loop to execute beyond
+ /// its original trip-count, under a proper guard, which should be preserved.
+ /// \p SafePtrs is a list of addresses that are known to be legal and we know
+ /// that we can read from them without segfault.
+ bool blockCanBePredicated(BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs,
+ bool PreserveGuards = false);
/// Updates the vectorization state by adding \p Phi to the inductions list.
/// This can set \p Phi as the main induction of the loop if \p Phi is a
@@ -382,14 +382,6 @@ private:
return LAI ? &LAI->getSymbolicStrides() : nullptr;
}
- /// Reports a vectorization illegality: print \p DebugMsg for debugging
- /// purposes along with the corresponding optimization remark \p RemarkName.
- /// If \p I is passed it is an instruction that prevents vectorization.
- /// Otherwise the loop is used for the location of the remark.
- void reportVectorizationFailure(const StringRef DebugMsg,
- const StringRef OREMsg, const StringRef ORETag,
- Instruction *I = nullptr) const;
-
/// The loop that we evaluate.
Loop *TheLoop;
@@ -452,8 +444,8 @@ private:
/// Holds the widest induction type encountered.
Type *WidestIndTy = nullptr;
- /// Allowed outside users. This holds the induction and reduction
- /// vars which can be accessed from outside the loop.
+ /// Allowed outside users. This holds the variables that can be accessed from
+ /// outside the loop.
SmallPtrSet<Value *, 4> AllowedExit;
/// Can we assume the absence of NaNs.
diff --git a/include/llvm/Transforms/Vectorize/LoopVectorize.h b/include/llvm/Transforms/Vectorize/LoopVectorize.h
index d1ec06afb02a..d824e2903ef3 100644
--- a/include/llvm/Transforms/Vectorize/LoopVectorize.h
+++ b/include/llvm/Transforms/Vectorize/LoopVectorize.h
@@ -155,6 +155,14 @@ struct LoopVectorizePass : public PassInfoMixin<LoopVectorizePass> {
bool processLoop(Loop *L);
};
+/// Reports a vectorization failure: print \p DebugMsg for debugging
+/// purposes along with the corresponding optimization remark \p RemarkName.
+/// If \p I is passed, it is an instruction that prevents vectorization.
+/// Otherwise, the loop \p TheLoop is used for the location of the remark.
+void reportVectorizationFailure(const StringRef DebugMsg,
+ const StringRef OREMsg, const StringRef ORETag,
+ OptimizationRemarkEmitter *ORE, Loop *TheLoop, Instruction *I = nullptr);
+
} // end namespace llvm
#endif // LLVM_TRANSFORMS_VECTORIZE_LOOPVECTORIZE_H
diff --git a/include/llvm/Transforms/Vectorize/SLPVectorizer.h b/include/llvm/Transforms/Vectorize/SLPVectorizer.h
index ac6afb761d4d..32ccc8a46380 100644
--- a/include/llvm/Transforms/Vectorize/SLPVectorizer.h
+++ b/include/llvm/Transforms/Vectorize/SLPVectorizer.h
@@ -24,7 +24,6 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/IR/PassManager.h"
-#include "llvm/IR/ValueHandle.h"
namespace llvm {
@@ -60,8 +59,8 @@ extern cl::opt<bool> RunSLPVectorization;
struct SLPVectorizerPass : public PassInfoMixin<SLPVectorizerPass> {
using StoreList = SmallVector<StoreInst *, 8>;
using StoreListMap = MapVector<Value *, StoreList>;
- using WeakTrackingVHList = SmallVector<WeakTrackingVH, 8>;
- using WeakTrackingVHListMap = MapVector<Value *, WeakTrackingVHList>;
+ using GEPList = SmallVector<GetElementPtrInst *, 8>;
+ using GEPListMap = MapVector<Value *, GEPList>;
ScalarEvolution *SE = nullptr;
TargetTransformInfo *TTI = nullptr;
@@ -131,7 +130,7 @@ private:
/// Tries to vectorize constructs started from CmpInst, InsertValueInst or
/// InsertElementInst instructions.
- bool vectorizeSimpleInstructions(SmallVectorImpl<WeakVH> &Instructions,
+ bool vectorizeSimpleInstructions(SmallVectorImpl<Instruction *> &Instructions,
BasicBlock *BB, slpvectorizer::BoUpSLP &R);
/// Scan the basic block and look for patterns that are likely to start
@@ -147,7 +146,7 @@ private:
StoreListMap Stores;
/// The getelementptr instructions in a basic block organized by base pointer.
- WeakTrackingVHListMap GEPs;
+ GEPListMap GEPs;
};
} // end namespace llvm