aboutsummaryrefslogtreecommitdiff
path: root/clang/lib/CodeGen/BackendUtil.cpp
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2021-11-19 20:06:13 +0000
committerDimitry Andric <dim@FreeBSD.org>2021-11-19 20:06:13 +0000
commitc0981da47d5696fe36474fcf86b4ce03ae3ff818 (patch)
treef42add1021b9f2ac6a69ac7cf6c4499962739a45 /clang/lib/CodeGen/BackendUtil.cpp
parent344a3780b2e33f6ca763666c380202b18aab72a3 (diff)
Diffstat (limited to 'clang/lib/CodeGen/BackendUtil.cpp')
-rw-r--r--clang/lib/CodeGen/BackendUtil.cpp258
1 files changed, 153 insertions, 105 deletions
diff --git a/clang/lib/CodeGen/BackendUtil.cpp b/clang/lib/CodeGen/BackendUtil.cpp
index 481f5347d978..648c7b3df8ed 100644
--- a/clang/lib/CodeGen/BackendUtil.cpp
+++ b/clang/lib/CodeGen/BackendUtil.cpp
@@ -38,6 +38,7 @@
#include "llvm/LTO/LTOBackend.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/SubtargetFeature.h"
+#include "llvm/MC/TargetRegistry.h"
#include "llvm/Passes/PassBuilder.h"
#include "llvm/Passes/PassPlugin.h"
#include "llvm/Passes/StandardInstrumentations.h"
@@ -45,7 +46,6 @@
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/PrettyStackTrace.h"
-#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/TimeProfiler.h"
#include "llvm/Support/Timer.h"
#include "llvm/Support/ToolOutputFile.h"
@@ -147,6 +147,14 @@ class EmitAssemblyHelper {
return F;
}
+ void
+ RunOptimizationPipeline(BackendAction Action,
+ std::unique_ptr<raw_pwrite_stream> &OS,
+ std::unique_ptr<llvm::ToolOutputFile> &ThinLinkOS);
+ void RunCodegenPipeline(BackendAction Action,
+ std::unique_ptr<raw_pwrite_stream> &OS,
+ std::unique_ptr<llvm::ToolOutputFile> &DwoOS);
+
public:
EmitAssemblyHelper(DiagnosticsEngine &_Diags,
const HeaderSearchOptions &HeaderSearchOpts,
@@ -164,11 +172,16 @@ public:
std::unique_ptr<TargetMachine> TM;
+ // Emit output using the legacy pass manager for the optimization pipeline.
+ // This will be removed soon when using the legacy pass manager for the
+ // optimization pipeline is no longer supported.
+ void EmitAssemblyWithLegacyPassManager(BackendAction Action,
+ std::unique_ptr<raw_pwrite_stream> OS);
+
+ // Emit output using the new pass manager for the optimization pipeline. This
+ // is the default.
void EmitAssembly(BackendAction Action,
std::unique_ptr<raw_pwrite_stream> OS);
-
- void EmitAssemblyWithNewPassManager(BackendAction Action,
- std::unique_ptr<raw_pwrite_stream> OS);
};
// We need this wrapper to access LangOpts and CGOpts from extension functions
@@ -234,6 +247,8 @@ getSancovOptsFromCGOpts(const CodeGenOptions &CGOpts) {
Opts.InlineBoolFlag = CGOpts.SanitizeCoverageInlineBoolFlag;
Opts.PCTable = CGOpts.SanitizeCoveragePCTable;
Opts.StackDepth = CGOpts.SanitizeCoverageStackDepth;
+ Opts.TraceLoads = CGOpts.SanitizeCoverageTraceLoads;
+ Opts.TraceStores = CGOpts.SanitizeCoverageTraceStores;
return Opts;
}
@@ -474,6 +489,11 @@ static CodeGenFileType getCodeGenFileType(BackendAction Action) {
}
}
+static bool actionRequiresCodeGen(BackendAction Action) {
+ return Action != Backend_EmitNothing && Action != Backend_EmitBC &&
+ Action != Backend_EmitLL;
+}
+
static bool initTargetOptions(DiagnosticsEngine &Diags,
llvm::TargetOptions &Options,
const CodeGenOptions &CodeGenOpts,
@@ -539,6 +559,7 @@ static bool initTargetOptions(DiagnosticsEngine &Diags,
Options.NoNaNsFPMath = LangOpts.NoHonorNaNs;
Options.NoZerosInBSS = CodeGenOpts.NoZeroInitializedInBSS;
Options.UnsafeFPMath = LangOpts.UnsafeFPMath;
+ Options.ApproxFuncFPMath = LangOpts.ApproxFunc;
Options.BBSections =
llvm::StringSwitch<llvm::BasicBlockSection>(CodeGenOpts.BBSections)
@@ -576,10 +597,25 @@ static bool initTargetOptions(DiagnosticsEngine &Diags,
Options.ForceDwarfFrameSection = CodeGenOpts.ForceDwarfFrameSection;
Options.EmitCallSiteInfo = CodeGenOpts.EmitCallSiteInfo;
Options.EnableAIXExtendedAltivecABI = CodeGenOpts.EnableAIXExtendedAltivecABI;
- Options.PseudoProbeForProfiling = CodeGenOpts.PseudoProbeForProfiling;
Options.ValueTrackingVariableLocations =
CodeGenOpts.ValueTrackingVariableLocations;
Options.XRayOmitFunctionIndex = CodeGenOpts.XRayOmitFunctionIndex;
+ Options.LoopAlignment = CodeGenOpts.LoopAlignment;
+
+ switch (CodeGenOpts.getSwiftAsyncFramePointer()) {
+ case CodeGenOptions::SwiftAsyncFramePointerKind::Auto:
+ Options.SwiftAsyncFramePointer =
+ SwiftAsyncFramePointerMode::DeploymentBased;
+ break;
+
+ case CodeGenOptions::SwiftAsyncFramePointerKind::Always:
+ Options.SwiftAsyncFramePointer = SwiftAsyncFramePointerMode::Always;
+ break;
+
+ case CodeGenOptions::SwiftAsyncFramePointerKind::Never:
+ Options.SwiftAsyncFramePointer = SwiftAsyncFramePointerMode::Never;
+ break;
+ }
Options.MCOptions.SplitDwarfFile = CodeGenOpts.SplitDwarfFile;
Options.MCOptions.MCRelaxAll = CodeGenOpts.RelaxAll;
@@ -942,15 +978,13 @@ bool EmitAssemblyHelper::AddEmitPasses(legacy::PassManager &CodeGenPasses,
return true;
}
-void EmitAssemblyHelper::EmitAssembly(BackendAction Action,
- std::unique_ptr<raw_pwrite_stream> OS) {
+void EmitAssemblyHelper::EmitAssemblyWithLegacyPassManager(
+ BackendAction Action, std::unique_ptr<raw_pwrite_stream> OS) {
TimeRegion Region(CodeGenOpts.TimePasses ? &CodeGenerationTime : nullptr);
setCommandLineOpts(CodeGenOpts);
- bool UsesCodeGen = (Action != Backend_EmitNothing &&
- Action != Backend_EmitBC &&
- Action != Backend_EmitLL);
+ bool UsesCodeGen = actionRequiresCodeGen(Action);
CreateTargetMachine(UsesCodeGen);
if (UsesCodeGen && !TM)
@@ -977,6 +1011,12 @@ void EmitAssemblyHelper::EmitAssembly(BackendAction Action,
CreatePasses(PerModulePasses, PerFunctionPasses);
+ // Add a verifier pass if requested. We don't have to do this if the action
+ // requires code generation because there will already be a verifier pass in
+ // the code-generation pipeline.
+ if (!UsesCodeGen && CodeGenOpts.VerifyModule)
+ PerModulePasses.add(createVerifierPass());
+
legacy::PassManager CodeGenPasses;
CodeGenPasses.add(
createTargetTransformInfoWrapperPass(getTargetIRAnalysis()));
@@ -1069,16 +1109,16 @@ void EmitAssemblyHelper::EmitAssembly(BackendAction Action,
DwoOS->keep();
}
-static PassBuilder::OptimizationLevel mapToLevel(const CodeGenOptions &Opts) {
+static OptimizationLevel mapToLevel(const CodeGenOptions &Opts) {
switch (Opts.OptimizationLevel) {
default:
llvm_unreachable("Invalid optimization level!");
case 0:
- return PassBuilder::OptimizationLevel::O0;
+ return OptimizationLevel::O0;
case 1:
- return PassBuilder::OptimizationLevel::O1;
+ return OptimizationLevel::O1;
case 2:
switch (Opts.OptimizeSize) {
@@ -1086,17 +1126,17 @@ static PassBuilder::OptimizationLevel mapToLevel(const CodeGenOptions &Opts) {
llvm_unreachable("Invalid optimization level for size!");
case 0:
- return PassBuilder::OptimizationLevel::O2;
+ return OptimizationLevel::O2;
case 1:
- return PassBuilder::OptimizationLevel::Os;
+ return OptimizationLevel::Os;
case 2:
- return PassBuilder::OptimizationLevel::Oz;
+ return OptimizationLevel::Oz;
}
case 3:
- return PassBuilder::OptimizationLevel::O3;
+ return OptimizationLevel::O3;
}
}
@@ -1104,7 +1144,7 @@ static void addSanitizers(const Triple &TargetTriple,
const CodeGenOptions &CodeGenOpts,
const LangOptions &LangOpts, PassBuilder &PB) {
PB.registerOptimizerLastEPCallback([&](ModulePassManager &MPM,
- PassBuilder::OptimizationLevel Level) {
+ OptimizationLevel Level) {
if (CodeGenOpts.hasSanitizeCoverage()) {
auto SancovOpts = getSancovOptsFromCGOpts(CodeGenOpts);
MPM.addPass(ModuleSanitizerCoveragePass(
@@ -1118,11 +1158,11 @@ static void addSanitizers(const Triple &TargetTriple,
bool Recover = CodeGenOpts.SanitizeRecover.has(Mask);
MPM.addPass(
- MemorySanitizerPass({TrackOrigins, Recover, CompileKernel}));
+ ModuleMemorySanitizerPass({TrackOrigins, Recover, CompileKernel}));
FunctionPassManager FPM;
FPM.addPass(
MemorySanitizerPass({TrackOrigins, Recover, CompileKernel}));
- if (Level != PassBuilder::OptimizationLevel::O0) {
+ if (Level != OptimizationLevel::O0) {
// MemorySanitizer inserts complex instrumentation that mostly
// follows the logic of the original code, but operates on
// "shadow" values. It can benefit from re-running some
@@ -1141,26 +1181,24 @@ static void addSanitizers(const Triple &TargetTriple,
MSanPass(SanitizerKind::KernelMemory, true);
if (LangOpts.Sanitize.has(SanitizerKind::Thread)) {
- MPM.addPass(ThreadSanitizerPass());
+ MPM.addPass(ModuleThreadSanitizerPass());
MPM.addPass(createModuleToFunctionPassAdaptor(ThreadSanitizerPass()));
}
auto ASanPass = [&](SanitizerMask Mask, bool CompileKernel) {
if (LangOpts.Sanitize.has(Mask)) {
- bool Recover = CodeGenOpts.SanitizeRecover.has(Mask);
- bool UseAfterScope = CodeGenOpts.SanitizeAddressUseAfterScope;
- bool ModuleUseAfterScope = asanUseGlobalsGC(TargetTriple, CodeGenOpts);
+ bool UseGlobalGC = asanUseGlobalsGC(TargetTriple, CodeGenOpts);
bool UseOdrIndicator = CodeGenOpts.SanitizeAddressUseOdrIndicator;
llvm::AsanDtorKind DestructorKind =
CodeGenOpts.getSanitizeAddressDtor();
- llvm::AsanDetectStackUseAfterReturnMode UseAfterReturn =
- CodeGenOpts.getSanitizeAddressUseAfterReturn();
+ AddressSanitizerOptions Opts;
+ Opts.CompileKernel = CompileKernel;
+ Opts.Recover = CodeGenOpts.SanitizeRecover.has(Mask);
+ Opts.UseAfterScope = CodeGenOpts.SanitizeAddressUseAfterScope;
+ Opts.UseAfterReturn = CodeGenOpts.getSanitizeAddressUseAfterReturn();
MPM.addPass(RequireAnalysisPass<ASanGlobalsMetadataAnalysis, Module>());
MPM.addPass(ModuleAddressSanitizerPass(
- CompileKernel, Recover, ModuleUseAfterScope, UseOdrIndicator,
- DestructorKind));
- MPM.addPass(createModuleToFunctionPassAdaptor(AddressSanitizerPass(
- CompileKernel, Recover, UseAfterScope, UseAfterReturn)));
+ Opts, UseGlobalGC, UseOdrIndicator, DestructorKind));
}
};
ASanPass(SanitizerKind::Address, false);
@@ -1170,8 +1208,8 @@ static void addSanitizers(const Triple &TargetTriple,
if (LangOpts.Sanitize.has(Mask)) {
bool Recover = CodeGenOpts.SanitizeRecover.has(Mask);
MPM.addPass(HWAddressSanitizerPass(
- CompileKernel, Recover,
- /*DisableOptimization=*/CodeGenOpts.OptimizationLevel == 0));
+ {CompileKernel, Recover,
+ /*DisableOptimization=*/CodeGenOpts.OptimizationLevel == 0}));
}
};
HWASanPass(SanitizerKind::HWAddress, false);
@@ -1183,29 +1221,9 @@ static void addSanitizers(const Triple &TargetTriple,
});
}
-/// A clean version of `EmitAssembly` that uses the new pass manager.
-///
-/// Not all features are currently supported in this system, but where
-/// necessary it falls back to the legacy pass manager to at least provide
-/// basic functionality.
-///
-/// This API is planned to have its functionality finished and then to replace
-/// `EmitAssembly` at some point in the future when the default switches.
-void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
- BackendAction Action, std::unique_ptr<raw_pwrite_stream> OS) {
- TimeRegion Region(CodeGenOpts.TimePasses ? &CodeGenerationTime : nullptr);
- setCommandLineOpts(CodeGenOpts);
-
- bool RequiresCodeGen = (Action != Backend_EmitNothing &&
- Action != Backend_EmitBC &&
- Action != Backend_EmitLL);
- CreateTargetMachine(RequiresCodeGen);
-
- if (RequiresCodeGen && !TM)
- return;
- if (TM)
- TheModule->setDataLayout(TM->createDataLayout());
-
+void EmitAssemblyHelper::RunOptimizationPipeline(
+ BackendAction Action, std::unique_ptr<raw_pwrite_stream> &OS,
+ std::unique_ptr<llvm::ToolOutputFile> &ThinLinkOS) {
Optional<PGOOptions> PGOOpt;
if (CodeGenOpts.hasProfileIRInstr())
@@ -1260,6 +1278,8 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
"", PGOOptions::NoAction, PGOOptions::CSIRInstr,
CodeGenOpts.DebugInfoForProfiling);
}
+ if (TM)
+ TM->setPGOOption(PGOOpt);
PipelineTuningOptions PTO;
PTO.LoopUnrolling = CodeGenOpts.UnrollLoops;
@@ -1303,9 +1323,6 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
get##Ext##PluginInfo().RegisterPassBuilderCallbacks(PB);
#include "llvm/Support/Extension.def"
- // Register the AA manager first so that our version is the one used.
- FAM.registerPass([&] { return PB.buildDefaultAAPipeline(); });
-
// Register the target library analysis directly and give it a customized
// preset TLI.
Triple TargetTriple(TheModule->getTargetTriple());
@@ -1325,26 +1342,26 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
if (!CodeGenOpts.DisableLLVMPasses) {
// Map our optimization levels into one of the distinct levels used to
// configure the pipeline.
- PassBuilder::OptimizationLevel Level = mapToLevel(CodeGenOpts);
+ OptimizationLevel Level = mapToLevel(CodeGenOpts);
bool IsThinLTO = CodeGenOpts.PrepareForThinLTO;
bool IsLTO = CodeGenOpts.PrepareForLTO;
if (LangOpts.ObjCAutoRefCount) {
PB.registerPipelineStartEPCallback(
- [](ModulePassManager &MPM, PassBuilder::OptimizationLevel Level) {
- if (Level != PassBuilder::OptimizationLevel::O0)
+ [](ModulePassManager &MPM, OptimizationLevel Level) {
+ if (Level != OptimizationLevel::O0)
MPM.addPass(
createModuleToFunctionPassAdaptor(ObjCARCExpandPass()));
});
PB.registerPipelineEarlySimplificationEPCallback(
- [](ModulePassManager &MPM, PassBuilder::OptimizationLevel Level) {
- if (Level != PassBuilder::OptimizationLevel::O0)
+ [](ModulePassManager &MPM, OptimizationLevel Level) {
+ if (Level != OptimizationLevel::O0)
MPM.addPass(ObjCARCAPElimPass());
});
PB.registerScalarOptimizerLateEPCallback(
- [](FunctionPassManager &FPM, PassBuilder::OptimizationLevel Level) {
- if (Level != PassBuilder::OptimizationLevel::O0)
+ [](FunctionPassManager &FPM, OptimizationLevel Level) {
+ if (Level != OptimizationLevel::O0)
FPM.addPass(ObjCARCOptPass());
});
}
@@ -1357,7 +1374,7 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
// vtables so that codegen doesn't complain.
if (IsThinLTOPostLink)
PB.registerPipelineStartEPCallback(
- [](ModulePassManager &MPM, PassBuilder::OptimizationLevel Level) {
+ [](ModulePassManager &MPM, OptimizationLevel Level) {
MPM.addPass(LowerTypeTestsPass(/*ExportSummary=*/nullptr,
/*ImportSummary=*/nullptr,
/*DropTypeTests=*/true));
@@ -1368,12 +1385,12 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
CodeGenOpts.InstrumentFunctionsAfterInlining ||
CodeGenOpts.InstrumentForProfiling) {
PB.registerPipelineStartEPCallback(
- [](ModulePassManager &MPM, PassBuilder::OptimizationLevel Level) {
+ [](ModulePassManager &MPM, OptimizationLevel Level) {
MPM.addPass(createModuleToFunctionPassAdaptor(
EntryExitInstrumenterPass(/*PostInlining=*/false)));
});
PB.registerOptimizerLastEPCallback(
- [](ModulePassManager &MPM, PassBuilder::OptimizationLevel Level) {
+ [](ModulePassManager &MPM, OptimizationLevel Level) {
MPM.addPass(createModuleToFunctionPassAdaptor(
EntryExitInstrumenterPass(/*PostInlining=*/true)));
});
@@ -1383,7 +1400,7 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
// of the pipeline.
if (LangOpts.Sanitize.has(SanitizerKind::LocalBounds))
PB.registerScalarOptimizerLateEPCallback(
- [](FunctionPassManager &FPM, PassBuilder::OptimizationLevel Level) {
+ [](FunctionPassManager &FPM, OptimizationLevel Level) {
FPM.addPass(BoundsCheckingPass());
});
@@ -1394,15 +1411,13 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
if (Optional<GCOVOptions> Options = getGCOVOptions(CodeGenOpts, LangOpts))
PB.registerPipelineStartEPCallback(
- [Options](ModulePassManager &MPM,
- PassBuilder::OptimizationLevel Level) {
+ [Options](ModulePassManager &MPM, OptimizationLevel Level) {
MPM.addPass(GCOVProfilerPass(*Options));
});
if (Optional<InstrProfOptions> Options =
getInstrProfOptions(CodeGenOpts, LangOpts))
PB.registerPipelineStartEPCallback(
- [Options](ModulePassManager &MPM,
- PassBuilder::OptimizationLevel Level) {
+ [Options](ModulePassManager &MPM, OptimizationLevel Level) {
MPM.addPass(InstrProfiling(*Options, false));
});
@@ -1422,17 +1437,13 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
}
}
- // FIXME: We still use the legacy pass manager to do code generation. We
- // create that pass manager here and use it as needed below.
- legacy::PassManager CodeGenPasses;
- bool NeedCodeGen = false;
- std::unique_ptr<llvm::ToolOutputFile> ThinLinkOS, DwoOS;
+ // Add a verifier pass if requested. We don't have to do this if the action
+ // requires code generation because there will already be a verifier pass in
+ // the code-generation pipeline.
+ if (!actionRequiresCodeGen(Action) && CodeGenOpts.VerifyModule)
+ MPM.addPass(VerifierPass());
- // Append any output we need to the pass manager.
switch (Action) {
- case Backend_EmitNothing:
- break;
-
case Backend_EmitBC:
if (CodeGenOpts.PrepareForThinLTO && !CodeGenOpts.DisableLLVMPasses) {
if (!CodeGenOpts.ThinLinkBitcodeFile.empty()) {
@@ -1448,8 +1459,7 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
// Emit a module summary by default for Regular LTO except for ld64
// targets
bool EmitLTOSummary =
- (CodeGenOpts.PrepareForLTO &&
- !CodeGenOpts.DisableLLVMPasses &&
+ (CodeGenOpts.PrepareForLTO && !CodeGenOpts.DisableLLVMPasses &&
llvm::Triple(TheModule->getTargetTriple()).getVendor() !=
llvm::Triple::Apple);
if (EmitLTOSummary) {
@@ -1467,10 +1477,28 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
MPM.addPass(PrintModulePass(*OS, "", CodeGenOpts.EmitLLVMUseLists));
break;
+ default:
+ break;
+ }
+
+ // Now that we have all of the passes ready, run them.
+ PrettyStackTraceString CrashInfo("Optimizer");
+ MPM.run(*TheModule, MAM);
+}
+
+void EmitAssemblyHelper::RunCodegenPipeline(
+ BackendAction Action, std::unique_ptr<raw_pwrite_stream> &OS,
+ std::unique_ptr<llvm::ToolOutputFile> &DwoOS) {
+ // We still use the legacy PM to run the codegen pipeline since the new PM
+ // does not work with the codegen pipeline.
+ // FIXME: make the new PM work with the codegen pipeline.
+ legacy::PassManager CodeGenPasses;
+
+ // Append any output we need to the pass manager.
+ switch (Action) {
case Backend_EmitAssembly:
case Backend_EmitMCNull:
case Backend_EmitObj:
- NeedCodeGen = true;
CodeGenPasses.add(
createTargetTransformInfoWrapperPass(getTargetIRAnalysis()));
if (!CodeGenOpts.SplitDwarfOutput.empty()) {
@@ -1483,22 +1511,41 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
// FIXME: Should we handle this error differently?
return;
break;
+ default:
+ return;
}
+ PrettyStackTraceString CrashInfo("Code generation");
+ CodeGenPasses.run(*TheModule);
+}
+
+/// A clean version of `EmitAssembly` that uses the new pass manager.
+///
+/// Not all features are currently supported in this system, but where
+/// necessary it falls back to the legacy pass manager to at least provide
+/// basic functionality.
+///
+/// This API is planned to have its functionality finished and then to replace
+/// `EmitAssembly` at some point in the future when the default switches.
+void EmitAssemblyHelper::EmitAssembly(BackendAction Action,
+ std::unique_ptr<raw_pwrite_stream> OS) {
+ TimeRegion Region(CodeGenOpts.TimePasses ? &CodeGenerationTime : nullptr);
+ setCommandLineOpts(CodeGenOpts);
+
+ bool RequiresCodeGen = actionRequiresCodeGen(Action);
+ CreateTargetMachine(RequiresCodeGen);
+
+ if (RequiresCodeGen && !TM)
+ return;
+ if (TM)
+ TheModule->setDataLayout(TM->createDataLayout());
+
// Before executing passes, print the final values of the LLVM options.
cl::PrintOptionValues();
- // Now that we have all of the passes ready, run them.
- {
- PrettyStackTraceString CrashInfo("Optimizer");
- MPM.run(*TheModule, MAM);
- }
-
- // Now if needed, run the legacy PM for codegen.
- if (NeedCodeGen) {
- PrettyStackTraceString CrashInfo("Code generation");
- CodeGenPasses.run(*TheModule);
- }
+ std::unique_ptr<llvm::ToolOutputFile> ThinLinkOS, DwoOS;
+ RunOptimizationPipeline(Action, OS, ThinLinkOS);
+ RunCodegenPipeline(Action, OS, DwoOS);
if (ThinLinkOS)
ThinLinkOS->keep();
@@ -1526,7 +1573,7 @@ static void runThinLTOBackend(
return;
auto AddStream = [&](size_t Task) {
- return std::make_unique<lto::NativeObjectStream>(std::move(OS));
+ return std::make_unique<CachedFileStream>(std::move(OS));
};
lto::Config Conf;
if (CGOpts.SaveTempsFilePrefix != "") {
@@ -1622,16 +1669,17 @@ void clang::EmitBackendOutput(DiagnosticsEngine &Diags,
// If we are performing a ThinLTO importing compile, load the function index
// into memory and pass it into runThinLTOBackend, which will run the
// function importer and invoke LTO passes.
- Expected<std::unique_ptr<ModuleSummaryIndex>> IndexOrErr =
- llvm::getModuleSummaryIndexForFile(CGOpts.ThinLTOIndexFile,
- /*IgnoreEmptyThinLTOIndexFile*/true);
- if (!IndexOrErr) {
- logAllUnhandledErrors(IndexOrErr.takeError(), errs(),
+ std::unique_ptr<ModuleSummaryIndex> CombinedIndex;
+ if (Error E = llvm::getModuleSummaryIndexForFile(
+ CGOpts.ThinLTOIndexFile,
+ /*IgnoreEmptyThinLTOIndexFile*/ true)
+ .moveInto(CombinedIndex)) {
+ logAllUnhandledErrors(std::move(E), errs(),
"Error loading index file '" +
CGOpts.ThinLTOIndexFile + "': ");
return;
}
- std::unique_ptr<ModuleSummaryIndex> CombinedIndex = std::move(*IndexOrErr);
+
// A null CombinedIndex means we should skip ThinLTO compilation
// (LLVM will optionally ignore empty index files, returning null instead
// of an error).
@@ -1656,8 +1704,8 @@ void clang::EmitBackendOutput(DiagnosticsEngine &Diags,
EmitAssemblyHelper AsmHelper(Diags, HeaderOpts, CGOpts, TOpts, LOpts, M);
- if (!CGOpts.LegacyPassManager)
- AsmHelper.EmitAssemblyWithNewPassManager(Action, std::move(OS));
+ if (CGOpts.LegacyPassManager)
+ AsmHelper.EmitAssemblyWithLegacyPassManager(Action, std::move(OS));
else
AsmHelper.EmitAssembly(Action, std::move(OS));