summaryrefslogtreecommitdiff
path: root/include/llvm/Passes/PassBuilder.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/llvm/Passes/PassBuilder.h')
-rw-r--r--include/llvm/Passes/PassBuilder.h170
1 files changed, 164 insertions, 6 deletions
diff --git a/include/llvm/Passes/PassBuilder.h b/include/llvm/Passes/PassBuilder.h
index 1e605e374178c..9f0a9c6e1380e 100644
--- a/include/llvm/Passes/PassBuilder.h
+++ b/include/llvm/Passes/PassBuilder.h
@@ -16,11 +16,13 @@
#ifndef LLVM_PASSES_PASSBUILDER_H
#define LLVM_PASSES_PASSBUILDER_H
-#include "llvm/ADT/StringRef.h"
#include "llvm/Analysis/CGSCCPassManager.h"
+#include "llvm/Analysis/LoopPassManager.h"
#include "llvm/IR/PassManager.h"
namespace llvm {
+class StringRef;
+class AAManager;
class TargetMachine;
/// \brief This class provides access to building LLVM's passes.
@@ -33,29 +35,164 @@ class PassBuilder {
TargetMachine *TM;
public:
+ /// \brief LLVM-provided high-level optimization levels.
+ ///
+ /// This enumerates the LLVM-provided high-level optimization levels. Each
+ /// level has a specific goal and rationale.
+ enum OptimizationLevel {
+ /// Disable as many optimizations as possible. This doesn't completely
+ /// disable the optimizer in all cases, for example always_inline functions
+ /// can be required to be inlined for correctness.
+ O0,
+
+ /// Optimize quickly without destroying debuggability.
+ ///
+ /// FIXME: The current and historical behavior of this level does *not*
+ /// agree with this goal, but we would like to move toward this goal in the
+ /// future.
+ ///
+ /// This level is tuned to produce a result from the optimizer as quickly
+ /// as possible and to avoid destroying debuggability. This tends to result
+ /// in a very good development mode where the compiled code will be
+ /// immediately executed as part of testing. As a consequence, where
+ /// possible, we would like to produce efficient-to-execute code, but not
+ /// if it significantly slows down compilation or would prevent even basic
+ /// debugging of the resulting binary.
+ ///
+ /// As an example, complex loop transformations such as versioning,
+ /// vectorization, or fusion might not make sense here due to the degree to
+ /// which the executed code would differ from the source code, and the
+ /// potential compile time cost.
+ O1,
+
+ /// Optimize for fast execution as much as possible without triggering
+ /// significant incremental compile time or code size growth.
+ ///
+ /// The key idea is that optimizations at this level should "pay for
+ /// themselves". So if an optimization increases compile time by 5% or
+ /// increases code size by 5% for a particular benchmark, that benchmark
+ /// should also be one which sees a 5% runtime improvement. If the compile
+ /// time or code size penalties happen on average across a diverse range of
+ /// LLVM users' benchmarks, then the improvements should as well.
+ ///
+ /// And no matter what, the compile time needs to not grow superlinearly
+ /// with the size of input to LLVM so that users can control the runtime of
+ /// the optimizer in this mode.
+ ///
+ /// This is expected to be a good default optimization level for the vast
+ /// majority of users.
+ O2,
+
+ /// Optimize for fast execution as much as possible.
+ ///
+ /// This mode is significantly more aggressive in trading off compile time
+ /// and code size to get execution time improvements. The core idea is that
+ /// this mode should include any optimization that helps execution time on
+ /// balance across a diverse collection of benchmarks, even if it increases
+ /// code size or compile time for some benchmarks without corresponding
+ /// improvements to execution time.
+ ///
+ /// Despite being willing to trade more compile time off to get improved
+ /// execution time, this mode still tries to avoid superlinear growth in
+ /// order to make even significantly slower compile times at least scale
+ /// reasonably. This does not preclude very substantial constant factor
+ /// costs though.
+ O3,
+
+ /// Similar to \c O2 but tries to optimize for small code size instead of
+ /// fast execution without triggering significant incremental execution
+ /// time slowdowns.
+ ///
+ /// The logic here is exactly the same as \c O2, but with code size and
+ /// execution time metrics swapped.
+ ///
+ /// A consequence of the different core goal is that this should in general
+ /// produce substantially smaller executables that still run in
+ /// a reasonable amount of time.
+ Os,
+
+ /// A very specialized mode that will optimize for code size at any and all
+ /// costs.
+ ///
+ /// This is useful primarily when there are absolute size limitations and
+ /// any effort taken to reduce the size is worth it regardless of the
+ /// execution time impact. You should expect this level to produce rather
+ /// slow, but very small, code.
+ Oz
+ };
+
explicit PassBuilder(TargetMachine *TM = nullptr) : TM(TM) {}
+ /// \brief Cross register the analysis managers through their proxies.
+ ///
+ /// This is an interface that can be used to cross register each
+ // AnalysisManager with all the others analysis managers.
+ void crossRegisterProxies(LoopAnalysisManager &LAM,
+ FunctionAnalysisManager &FAM,
+ CGSCCAnalysisManager &CGAM,
+ ModuleAnalysisManager &MAM);
+
/// \brief Registers all available module analysis passes.
///
/// This is an interface that can be used to populate a \c
/// ModuleAnalysisManager with all registered module analyses. Callers can
- /// still manually register any additional analyses.
+ /// still manually register any additional analyses. Callers can also
+ /// pre-register analyses and this will not override those.
void registerModuleAnalyses(ModuleAnalysisManager &MAM);
/// \brief Registers all available CGSCC analysis passes.
///
/// This is an interface that can be used to populate a \c CGSCCAnalysisManager
/// with all registered CGSCC analyses. Callers can still manually register any
- /// additional analyses.
+ /// additional analyses. Callers can also pre-register analyses and this will
+ /// not override those.
void registerCGSCCAnalyses(CGSCCAnalysisManager &CGAM);
/// \brief Registers all available function analysis passes.
///
/// This is an interface that can be used to populate a \c
/// FunctionAnalysisManager with all registered function analyses. Callers can
- /// still manually register any additional analyses.
+ /// still manually register any additional analyses. Callers can also
+ /// pre-register analyses and this will not override those.
void registerFunctionAnalyses(FunctionAnalysisManager &FAM);
+ /// \brief Registers all available loop analysis passes.
+ ///
+ /// This is an interface that can be used to populate a \c LoopAnalysisManager
+ /// with all registered loop analyses. Callers can still manually register any
+ /// additional analyses.
+ void registerLoopAnalyses(LoopAnalysisManager &LAM);
+
+ /// \brief Add a per-module default optimization pipeline to a pass manager.
+ ///
+ /// This provides a good default optimization pipeline for per-module
+ /// optimization and code generation without any link-time optimization. It
+ /// typically correspond to frontend "-O[123]" options for optimization
+ /// levels \c O1, \c O2 and \c O3 resp.
+ void addPerModuleDefaultPipeline(ModulePassManager &MPM,
+ OptimizationLevel Level,
+ bool DebugLogging = false);
+
+ /// \brief Add a pre-link, LTO-targeting default optimization pipeline to
+ /// a pass manager.
+ ///
+ /// This adds the pre-link optimizations tuned to work well with a later LTO
+ /// run. It works to minimize the IR which needs to be analyzed without
+ /// making irreversible decisions which could be made better during the LTO
+ /// run.
+ void addLTOPreLinkDefaultPipeline(ModulePassManager &MPM,
+ OptimizationLevel Level,
+ bool DebugLogging = false);
+
+ /// \brief Add an LTO default optimization pipeline to a pass manager.
+ ///
+ /// This provides a good default optimization pipeline for link-time
+ /// optimization and code generation. It is particularly tuned to fit well
+ /// when IR coming into the LTO phase was first run through \c
+ /// addPreLinkLTODefaultPipeline, and the two coordinate closely.
+ void addLTODefaultPipeline(ModulePassManager &MPM, OptimizationLevel Level,
+ bool DebugLogging = false);
+
/// \brief Parse a textual pass pipeline description into a \c ModulePassManager.
///
/// The format of the textual pass pipeline description looks something like:
@@ -87,10 +224,32 @@ public:
bool parsePassPipeline(ModulePassManager &MPM, StringRef PipelineText,
bool VerifyEachPass = true, bool DebugLogging = false);
+ /// Parse a textual alias analysis pipeline into the provided AA manager.
+ ///
+ /// The format of the textual AA pipeline is a comma separated list of AA
+ /// pass names:
+ ///
+ /// basic-aa,globals-aa,...
+ ///
+ /// The AA manager is set up such that the provided alias analyses are tried
+ /// in the order specified. See the \c AAManaager documentation for details
+ /// about the logic used. This routine just provides the textual mapping
+ /// between AA names and the analyses to register with the manager.
+ ///
+ /// Returns false if the text cannot be parsed cleanly. The specific state of
+ /// the \p AA manager is unspecified if such an error is encountered and this
+ /// returns false.
+ bool parseAAPipeline(AAManager &AA, StringRef PipelineText);
+
private:
- bool parseModulePassName(ModulePassManager &MPM, StringRef Name);
+ bool parseModulePassName(ModulePassManager &MPM, StringRef Name,
+ bool DebugLogging);
bool parseCGSCCPassName(CGSCCPassManager &CGPM, StringRef Name);
bool parseFunctionPassName(FunctionPassManager &FPM, StringRef Name);
+ bool parseLoopPassName(LoopPassManager &LPM, StringRef Name);
+ bool parseAAPassName(AAManager &AA, StringRef Name);
+ bool parseLoopPassPipeline(LoopPassManager &LPM, StringRef &PipelineText,
+ bool VerifyEachPass, bool DebugLogging);
bool parseFunctionPassPipeline(FunctionPassManager &FPM,
StringRef &PipelineText, bool VerifyEachPass,
bool DebugLogging);
@@ -99,7 +258,6 @@ private:
bool parseModulePassPipeline(ModulePassManager &MPM, StringRef &PipelineText,
bool VerifyEachPass, bool DebugLogging);
};
-
}
#endif