summaryrefslogtreecommitdiff
path: root/lib/CodeGen
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2017-05-22 19:43:28 +0000
committerDimitry Andric <dim@FreeBSD.org>2017-05-22 19:43:28 +0000
commitb5630dbadf9a2a06754194387d6b0fd9962a67f1 (patch)
tree3fe1e2bc0dc2823ab21f06959fbb3eaca317ea29 /lib/CodeGen
parent7af96fb3afd6725a2824a0a5ca5dad34e5e0b056 (diff)
Notes
Diffstat (limited to 'lib/CodeGen')
-rw-r--r--lib/CodeGen/AsmPrinter/CodeViewDebug.cpp41
-rw-r--r--lib/CodeGen/AtomicExpandPass.cpp22
-rw-r--r--lib/CodeGen/CMakeLists.txt1
-rw-r--r--lib/CodeGen/CodeGen.cpp1
-rw-r--r--lib/CodeGen/CodeGenPrepare.cpp27
-rw-r--r--lib/CodeGen/DwarfEHPrepare.cpp32
-rw-r--r--lib/CodeGen/GlobalISel/IRTranslator.cpp26
-rw-r--r--lib/CodeGen/GlobalISel/InstructionSelector.cpp2
-rw-r--r--lib/CodeGen/InterleavedAccessPass.cpp19
-rw-r--r--lib/CodeGen/LLVMTargetMachine.cpp20
-rw-r--r--lib/CodeGen/LiveRangeShrink.cpp211
-rw-r--r--lib/CodeGen/LowerEmuTLS.cpp22
-rw-r--r--lib/CodeGen/MachineBlockPlacement.cpp45
-rw-r--r--lib/CodeGen/MachineModuleInfo.cpp4
-rw-r--r--lib/CodeGen/PrologEpilogInserter.cpp42
-rw-r--r--lib/CodeGen/RegisterCoalescer.cpp8
-rw-r--r--lib/CodeGen/SafeStack.cpp22
-rw-r--r--lib/CodeGen/SafeStackColoring.cpp3
-rw-r--r--lib/CodeGen/SelectionDAG/DAGCombiner.cpp436
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAG.cpp49
-rw-r--r--lib/CodeGen/SelectionDAG/StatepointLowering.cpp4
-rw-r--r--lib/CodeGen/StackProtector.cpp13
-rw-r--r--lib/CodeGen/TargetPassConfig.cpp21
-rw-r--r--lib/CodeGen/WinEHPrepare.cpp10
24 files changed, 471 insertions, 610 deletions
diff --git a/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp b/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp
index 7d945690e9c3f..1b39e46ee4667 100644
--- a/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp
+++ b/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp
@@ -13,7 +13,6 @@
#include "CodeViewDebug.h"
#include "llvm/ADT/TinyPtrVector.h"
-#include "llvm/DebugInfo/CodeView/CVTypeDumper.h"
#include "llvm/DebugInfo/CodeView/CVTypeVisitor.h"
#include "llvm/DebugInfo/CodeView/CodeView.h"
#include "llvm/DebugInfo/CodeView/Line.h"
@@ -23,6 +22,7 @@
#include "llvm/DebugInfo/CodeView/TypeDumpVisitor.h"
#include "llvm/DebugInfo/CodeView/TypeIndex.h"
#include "llvm/DebugInfo/CodeView/TypeRecord.h"
+#include "llvm/DebugInfo/CodeView/TypeTableCollection.h"
#include "llvm/DebugInfo/CodeView/TypeVisitorCallbacks.h"
#include "llvm/IR/Constants.h"
#include "llvm/MC/MCAsmInfo.h"
@@ -469,17 +469,21 @@ void CodeViewDebug::emitTypeInformation() {
CommentPrefix += ' ';
}
- TypeDatabase TypeDB(TypeTable.records().size());
- CVTypeDumper CVTD(TypeDB);
- TypeTable.ForEachRecord([&](TypeIndex Index, ArrayRef<uint8_t> Record) {
+ TypeTableCollection Table(TypeTable.records());
+ Optional<TypeIndex> B = Table.getFirst();
+ while (B) {
+ // This will fail if the record data is invalid.
+ CVType Record = Table.getType(*B);
+
if (OS.isVerboseAsm()) {
// Emit a block comment describing the type record for readability.
SmallString<512> CommentBlock;
raw_svector_ostream CommentOS(CommentBlock);
ScopedPrinter SP(CommentOS);
SP.setPrefix(CommentPrefix);
- TypeDumpVisitor TDV(TypeDB, &SP, false);
- Error E = CVTD.dump(Record, TDV);
+ TypeDumpVisitor TDV(Table, &SP, false);
+
+ Error E = codeview::visitTypeRecord(Record, *B, TDV);
if (E) {
logAllUnhandledErrors(std::move(E), errs(), "error: ");
llvm_unreachable("produced malformed type record");
@@ -489,29 +493,10 @@ void CodeViewDebug::emitTypeInformation() {
// newline.
OS.emitRawComment(
CommentOS.str().drop_front(CommentPrefix.size() - 1).rtrim());
- } else {
-#ifndef NDEBUG
- // Assert that the type data is valid even if we aren't dumping
- // comments. The MSVC linker doesn't do much type record validation,
- // so the first link of an invalid type record can succeed while
- // subsequent links will fail with LNK1285.
- BinaryByteStream Stream(Record, llvm::support::little);
- CVTypeArray Types;
- BinaryStreamReader Reader(Stream);
- Error E = Reader.readArray(Types, Reader.getLength());
- if (!E) {
- TypeVisitorCallbacks C;
- E = codeview::visitTypeStream(Types, C);
- }
- if (E) {
- logAllUnhandledErrors(std::move(E), errs(), "error: ");
- llvm_unreachable("produced malformed type record");
- }
-#endif
}
- StringRef S(reinterpret_cast<const char *>(Record.data()), Record.size());
- OS.EmitBinaryData(S);
- });
+ OS.EmitBinaryData(Record.str_data());
+ B = Table.getNext(*B);
+ }
}
namespace {
diff --git a/lib/CodeGen/AtomicExpandPass.cpp b/lib/CodeGen/AtomicExpandPass.cpp
index 17e6be05eb42e..984973cf3a3b4 100644
--- a/lib/CodeGen/AtomicExpandPass.cpp
+++ b/lib/CodeGen/AtomicExpandPass.cpp
@@ -17,6 +17,7 @@
#include "llvm/CodeGen/AtomicExpandUtils.h"
#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/TargetPassConfig.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/InstIterator.h"
@@ -35,12 +36,10 @@ using namespace llvm;
namespace {
class AtomicExpand: public FunctionPass {
- const TargetMachine *TM;
const TargetLowering *TLI;
public:
static char ID; // Pass identification, replacement for typeid
- explicit AtomicExpand(const TargetMachine *TM = nullptr)
- : FunctionPass(ID), TM(TM), TLI(nullptr) {
+ AtomicExpand() : FunctionPass(ID), TLI(nullptr) {
initializeAtomicExpandPass(*PassRegistry::getPassRegistry());
}
@@ -97,12 +96,10 @@ namespace {
char AtomicExpand::ID = 0;
char &llvm::AtomicExpandID = AtomicExpand::ID;
-INITIALIZE_TM_PASS(AtomicExpand, "atomic-expand", "Expand Atomic instructions",
- false, false)
+INITIALIZE_PASS(AtomicExpand, "atomic-expand", "Expand Atomic instructions",
+ false, false)
-FunctionPass *llvm::createAtomicExpandPass(const TargetMachine *TM) {
- return new AtomicExpand(TM);
-}
+FunctionPass *llvm::createAtomicExpandPass() { return new AtomicExpand(); }
namespace {
// Helper functions to retrieve the size of atomic instructions.
@@ -172,9 +169,14 @@ bool atomicSizeSupported(const TargetLowering *TLI, Inst *I) {
} // end anonymous namespace
bool AtomicExpand::runOnFunction(Function &F) {
- if (!TM || !TM->getSubtargetImpl(F)->enableAtomicExpand())
+ auto *TPC = getAnalysisIfAvailable<TargetPassConfig>();
+ if (!TPC)
+ return false;
+
+ auto &TM = TPC->getTM<TargetMachine>();
+ if (!TM.getSubtargetImpl(F)->enableAtomicExpand())
return false;
- TLI = TM->getSubtargetImpl(F)->getTargetLowering();
+ TLI = TM.getSubtargetImpl(F)->getTargetLowering();
SmallVector<Instruction *, 1> AtomicInsts;
diff --git a/lib/CodeGen/CMakeLists.txt b/lib/CodeGen/CMakeLists.txt
index 55a27e2fb79e5..2b5863aa58009 100644
--- a/lib/CodeGen/CMakeLists.txt
+++ b/lib/CodeGen/CMakeLists.txt
@@ -49,7 +49,6 @@ add_llvm_library(LLVMCodeGen
LivePhysRegs.cpp
LiveRangeCalc.cpp
LiveRangeEdit.cpp
- LiveRangeShrink.cpp
LiveRegMatrix.cpp
LiveRegUnits.cpp
LiveStackAnalysis.cpp
diff --git a/lib/CodeGen/CodeGen.cpp b/lib/CodeGen/CodeGen.cpp
index 4d30c6574b121..2a2715beaadca 100644
--- a/lib/CodeGen/CodeGen.cpp
+++ b/lib/CodeGen/CodeGen.cpp
@@ -43,7 +43,6 @@ void llvm::initializeCodeGen(PassRegistry &Registry) {
initializeLiveDebugValuesPass(Registry);
initializeLiveDebugVariablesPass(Registry);
initializeLiveIntervalsPass(Registry);
- initializeLiveRangeShrinkPass(Registry);
initializeLiveStacksPass(Registry);
initializeLiveVariablesPass(Registry);
initializeLocalStackSlotPassPass(Registry);
diff --git a/lib/CodeGen/CodeGenPrepare.cpp b/lib/CodeGen/CodeGenPrepare.cpp
index f2e024c5e3bd6..3a1a3020a8d4d 100644
--- a/lib/CodeGen/CodeGenPrepare.cpp
+++ b/lib/CodeGen/CodeGenPrepare.cpp
@@ -14,6 +14,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/TargetPassConfig.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallSet.h"
@@ -197,10 +198,11 @@ class TypePromotionTransaction;
public:
static char ID; // Pass identification, replacement for typeid
- explicit CodeGenPrepare(const TargetMachine *TM = nullptr)
- : FunctionPass(ID), TM(TM), TLI(nullptr), TTI(nullptr), DL(nullptr) {
- initializeCodeGenPreparePass(*PassRegistry::getPassRegistry());
- }
+ CodeGenPrepare()
+ : FunctionPass(ID), TM(nullptr), TLI(nullptr), TTI(nullptr),
+ DL(nullptr) {
+ initializeCodeGenPreparePass(*PassRegistry::getPassRegistry());
+ }
bool runOnFunction(Function &F) override;
StringRef getPassName() const override { return "CodeGen Prepare"; }
@@ -255,15 +257,13 @@ class TypePromotionTransaction;
}
char CodeGenPrepare::ID = 0;
-INITIALIZE_TM_PASS_BEGIN(CodeGenPrepare, "codegenprepare",
- "Optimize for code generation", false, false)
+INITIALIZE_PASS_BEGIN(CodeGenPrepare, "codegenprepare",
+ "Optimize for code generation", false, false)
INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
-INITIALIZE_TM_PASS_END(CodeGenPrepare, "codegenprepare",
- "Optimize for code generation", false, false)
+INITIALIZE_PASS_END(CodeGenPrepare, "codegenprepare",
+ "Optimize for code generation", false, false)
-FunctionPass *llvm::createCodeGenPreparePass(const TargetMachine *TM) {
- return new CodeGenPrepare(TM);
-}
+FunctionPass *llvm::createCodeGenPreparePass() { return new CodeGenPrepare(); }
bool CodeGenPrepare::runOnFunction(Function &F) {
if (skipFunction(F))
@@ -279,7 +279,8 @@ bool CodeGenPrepare::runOnFunction(Function &F) {
BPI.reset();
ModifiedDT = false;
- if (TM) {
+ if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>()) {
+ TM = &TPC->getTM<TargetMachine>();
SubtargetInfo = TM->getSubtargetImpl(F);
TLI = SubtargetInfo->getTargetLowering();
TRI = SubtargetInfo->getRegisterInfo();
@@ -349,7 +350,7 @@ bool CodeGenPrepare::runOnFunction(Function &F) {
// Really free removed instructions during promotion.
for (Instruction *I : RemovedInsts)
- delete I;
+ I->deleteValue();
EverMadeChange |= MadeChange;
}
diff --git a/lib/CodeGen/DwarfEHPrepare.cpp b/lib/CodeGen/DwarfEHPrepare.cpp
index 38af19a044485..1ef4d8660657c 100644
--- a/lib/CodeGen/DwarfEHPrepare.cpp
+++ b/lib/CodeGen/DwarfEHPrepare.cpp
@@ -13,6 +13,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/TargetPassConfig.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/CFG.h"
@@ -34,8 +35,6 @@ STATISTIC(NumResumesLowered, "Number of resume calls lowered");
namespace {
class DwarfEHPrepare : public FunctionPass {
- const TargetMachine *TM;
-
// RewindFunction - _Unwind_Resume or the target equivalent.
Constant *RewindFunction;
@@ -52,15 +51,9 @@ namespace {
public:
static char ID; // Pass identification, replacement for typeid.
- // INITIALIZE_TM_PASS requires a default constructor, but it isn't used in
- // practice.
DwarfEHPrepare()
- : FunctionPass(ID), TM(nullptr), RewindFunction(nullptr), DT(nullptr),
- TLI(nullptr) {}
-
- DwarfEHPrepare(const TargetMachine *TM)
- : FunctionPass(ID), TM(TM), RewindFunction(nullptr), DT(nullptr),
- TLI(nullptr) {}
+ : FunctionPass(ID), RewindFunction(nullptr), DT(nullptr), TLI(nullptr) {
+ }
bool runOnFunction(Function &Fn) override;
@@ -78,18 +71,18 @@ namespace {
} // end anonymous namespace
char DwarfEHPrepare::ID = 0;
-INITIALIZE_TM_PASS_BEGIN(DwarfEHPrepare, "dwarfehprepare",
- "Prepare DWARF exceptions", false, false)
+INITIALIZE_PASS_BEGIN(DwarfEHPrepare, "dwarfehprepare",
+ "Prepare DWARF exceptions", false, false)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
-INITIALIZE_TM_PASS_END(DwarfEHPrepare, "dwarfehprepare",
- "Prepare DWARF exceptions", false, false)
+INITIALIZE_PASS_END(DwarfEHPrepare, "dwarfehprepare",
+ "Prepare DWARF exceptions", false, false)
-FunctionPass *llvm::createDwarfEHPass(const TargetMachine *TM) {
- return new DwarfEHPrepare(TM);
-}
+FunctionPass *llvm::createDwarfEHPass() { return new DwarfEHPrepare(); }
void DwarfEHPrepare::getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.addRequired<TargetPassConfig>();
AU.addRequired<TargetTransformInfoWrapperPass>();
AU.addRequired<DominatorTreeWrapperPass>();
}
@@ -254,9 +247,10 @@ bool DwarfEHPrepare::InsertUnwindResumeCalls(Function &Fn) {
}
bool DwarfEHPrepare::runOnFunction(Function &Fn) {
- assert(TM && "DWARF EH preparation requires a target machine");
+ const TargetMachine &TM =
+ getAnalysis<TargetPassConfig>().getTM<TargetMachine>();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
- TLI = TM->getSubtargetImpl(Fn)->getTargetLowering();
+ TLI = TM.getSubtargetImpl(Fn)->getTargetLowering();
bool Changed = InsertUnwindResumeCalls(Fn);
DT = nullptr;
TLI = nullptr;
diff --git a/lib/CodeGen/GlobalISel/IRTranslator.cpp b/lib/CodeGen/GlobalISel/IRTranslator.cpp
index 77dfb13ac1f2d..afc18a15aa1c2 100644
--- a/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -340,6 +340,15 @@ bool IRTranslator::translateExtractValue(const User &U,
Type *Int32Ty = Type::getInt32Ty(U.getContext());
SmallVector<Value *, 1> Indices;
+ // If Src is a single element ConstantStruct, translate extractvalue
+ // to that element to avoid inserting a cast instruction.
+ if (auto CS = dyn_cast<ConstantStruct>(Src))
+ if (CS->getNumOperands() == 1) {
+ unsigned Res = getOrCreateVReg(*CS->getOperand(0));
+ ValToVReg[&U] = Res;
+ return true;
+ }
+
// getIndexedOffsetInType is designed for GEPs, so the first index is the
// usual array element rather than looking into the actual aggregate.
Indices.push_back(ConstantInt::get(Int32Ty, 0));
@@ -1108,6 +1117,23 @@ bool IRTranslator::translate(const Constant &C, unsigned Reg) {
default:
return false;
}
+ } else if (auto CS = dyn_cast<ConstantStruct>(&C)) {
+ // Return the element if it is a single element ConstantStruct.
+ if (CS->getNumOperands() == 1) {
+ unsigned EltReg = getOrCreateVReg(*CS->getOperand(0));
+ EntryBuilder.buildCast(Reg, EltReg);
+ return true;
+ }
+ SmallVector<unsigned, 4> Ops;
+ SmallVector<uint64_t, 4> Indices;
+ uint64_t Offset = 0;
+ for (unsigned i = 0; i < CS->getNumOperands(); ++i) {
+ unsigned OpReg = getOrCreateVReg(*CS->getOperand(i));
+ Ops.push_back(OpReg);
+ Indices.push_back(Offset);
+ Offset += MRI->getType(OpReg).getSizeInBits();
+ }
+ EntryBuilder.buildSequence(Reg, Ops, Indices);
} else if (auto CV = dyn_cast<ConstantVector>(&C)) {
if (CV->getNumOperands() == 1)
return translate(*CV->getOperand(0), Reg);
diff --git a/lib/CodeGen/GlobalISel/InstructionSelector.cpp b/lib/CodeGen/GlobalISel/InstructionSelector.cpp
index c67da8629a3ba..4c0b06dffd216 100644
--- a/lib/CodeGen/GlobalISel/InstructionSelector.cpp
+++ b/lib/CodeGen/GlobalISel/InstructionSelector.cpp
@@ -73,7 +73,7 @@ bool InstructionSelector::isOperandImmEqual(
const MachineOperand &MO, int64_t Value,
const MachineRegisterInfo &MRI) const {
- if (MO.getReg())
+ if (MO.isReg() && MO.getReg())
if (auto VRegVal = getConstantVRegVal(MO.getReg(), MRI))
return *VRegVal == Value;
return false;
diff --git a/lib/CodeGen/InterleavedAccessPass.cpp b/lib/CodeGen/InterleavedAccessPass.cpp
index ec35b3f6449e1..bb29db301a953 100644
--- a/lib/CodeGen/InterleavedAccessPass.cpp
+++ b/lib/CodeGen/InterleavedAccessPass.cpp
@@ -45,6 +45,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/TargetPassConfig.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/InstIterator.h"
#include "llvm/Support/Debug.h"
@@ -68,8 +69,7 @@ class InterleavedAccess : public FunctionPass {
public:
static char ID;
- InterleavedAccess(const TargetMachine *TM = nullptr)
- : FunctionPass(ID), DT(nullptr), TM(TM), TLI(nullptr) {
+ InterleavedAccess() : FunctionPass(ID), DT(nullptr), TLI(nullptr) {
initializeInterleavedAccessPass(*PassRegistry::getPassRegistry());
}
@@ -84,7 +84,6 @@ public:
private:
DominatorTree *DT;
- const TargetMachine *TM;
const TargetLowering *TLI;
/// The maximum supported interleave factor.
@@ -108,18 +107,18 @@ private:
} // end anonymous namespace.
char InterleavedAccess::ID = 0;
-INITIALIZE_TM_PASS_BEGIN(
+INITIALIZE_PASS_BEGIN(
InterleavedAccess, "interleaved-access",
"Lower interleaved memory accesses to target specific intrinsics", false,
false)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
-INITIALIZE_TM_PASS_END(
+INITIALIZE_PASS_END(
InterleavedAccess, "interleaved-access",
"Lower interleaved memory accesses to target specific intrinsics", false,
false)
-FunctionPass *llvm::createInterleavedAccessPass(const TargetMachine *TM) {
- return new InterleavedAccess(TM);
+FunctionPass *llvm::createInterleavedAccessPass() {
+ return new InterleavedAccess();
}
/// \brief Check if the mask is a DE-interleave mask of the given factor
@@ -426,13 +425,15 @@ bool InterleavedAccess::lowerInterleavedStore(
}
bool InterleavedAccess::runOnFunction(Function &F) {
- if (!TM || !LowerInterleavedAccesses)
+ auto *TPC = getAnalysisIfAvailable<TargetPassConfig>();
+ if (!TPC || !LowerInterleavedAccesses)
return false;
DEBUG(dbgs() << "*** " << getPassName() << ": " << F.getName() << "\n");
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
- TLI = TM->getSubtargetImpl(F)->getTargetLowering();
+ auto &TM = TPC->getTM<TargetMachine>();
+ TLI = TM.getSubtargetImpl(F)->getTargetLowering();
MaxFactor = TLI->getMaxSupportedInterleaveFactor();
// Holds dead instructions that will be erased later.
diff --git a/lib/CodeGen/LLVMTargetMachine.cpp b/lib/CodeGen/LLVMTargetMachine.cpp
index 7b1706f0f4ba9..be3b258315bbd 100644
--- a/lib/CodeGen/LLVMTargetMachine.cpp
+++ b/lib/CodeGen/LLVMTargetMachine.cpp
@@ -109,26 +109,24 @@ addPassesToGenerateCode(LLVMTargetMachine *TM, PassManagerBase &PM,
AnalysisID StopAfter,
MachineFunctionInitializer *MFInitializer = nullptr) {
- // When in emulated TLS mode, add the LowerEmuTLS pass.
- if (TM->Options.EmulatedTLS)
- PM.add(createLowerEmuTLSPass(TM));
-
- PM.add(createPreISelIntrinsicLoweringPass());
-
- // Add internal analysis passes from the target machine.
- PM.add(createTargetTransformInfoWrapperPass(TM->getTargetIRAnalysis()));
-
// Targets may override createPassConfig to provide a target-specific
// subclass.
TargetPassConfig *PassConfig = TM->createPassConfig(PM);
PassConfig->setStartStopPasses(StartBefore, StartAfter, StopBefore,
StopAfter);
-
// Set PassConfig options provided by TargetMachine.
PassConfig->setDisableVerify(DisableVerify);
-
PM.add(PassConfig);
+ // When in emulated TLS mode, add the LowerEmuTLS pass.
+ if (TM->Options.EmulatedTLS)
+ PM.add(createLowerEmuTLSPass());
+
+ PM.add(createPreISelIntrinsicLoweringPass());
+
+ // Add internal analysis passes from the target machine.
+ PM.add(createTargetTransformInfoWrapperPass(TM->getTargetIRAnalysis()));
+
PassConfig->addIRPasses();
PassConfig->addCodeGenPrepare();
diff --git a/lib/CodeGen/LiveRangeShrink.cpp b/lib/CodeGen/LiveRangeShrink.cpp
deleted file mode 100644
index 00182e2c779f5..0000000000000
--- a/lib/CodeGen/LiveRangeShrink.cpp
+++ /dev/null
@@ -1,211 +0,0 @@
-//===-- LiveRangeShrink.cpp - Move instructions to shrink live range ------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-///===---------------------------------------------------------------------===//
-///
-/// \file
-/// This pass moves instructions close to the definition of its operands to
-/// shrink live range of the def instruction. The code motion is limited within
-/// the basic block. The moved instruction should have 1 def, and more than one
-/// uses, all of which are the only use of the def.
-///
-///===---------------------------------------------------------------------===//
-#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/Passes.h"
-#include "llvm/ADT/Statistic.h"
-#include "llvm/Support/Debug.h"
-
-#define DEBUG_TYPE "lrshrink"
-
-STATISTIC(NumInstrsHoistedToShrinkLiveRange,
- "Number of insructions hoisted to shrink live range.");
-
-using namespace llvm;
-
-namespace {
-class LiveRangeShrink : public MachineFunctionPass {
-public:
- static char ID;
-
- LiveRangeShrink() : MachineFunctionPass(ID) {
- initializeLiveRangeShrinkPass(*PassRegistry::getPassRegistry());
- }
-
- void getAnalysisUsage(AnalysisUsage &AU) const override {
- AU.setPreservesCFG();
- MachineFunctionPass::getAnalysisUsage(AU);
- }
-
- StringRef getPassName() const override { return "Live Range Shrink"; }
-
- bool runOnMachineFunction(MachineFunction &MF) override;
-};
-} // End anonymous namespace.
-
-char LiveRangeShrink::ID = 0;
-char &llvm::LiveRangeShrinkID = LiveRangeShrink::ID;
-
-INITIALIZE_PASS(LiveRangeShrink, "lrshrink", "Live Range Shrink Pass", false,
- false)
-namespace {
-typedef DenseMap<MachineInstr *, unsigned> InstOrderMap;
-
-/// Returns \p New if it's dominated by \p Old, otherwise return \p Old.
-/// \p M maintains a map from instruction to its dominating order that satisfies
-/// M[A] > M[B] guarantees that A is dominated by B.
-/// If \p New is not in \p M, return \p Old. Otherwise if \p Old is null, return
-/// \p New.
-MachineInstr *FindDominatedInstruction(MachineInstr &New, MachineInstr *Old,
- const InstOrderMap &M) {
- auto NewIter = M.find(&New);
- if (NewIter == M.end())
- return Old;
- if (Old == nullptr)
- return &New;
- unsigned OrderOld = M.find(Old)->second;
- unsigned OrderNew = NewIter->second;
- if (OrderOld != OrderNew)
- return OrderOld < OrderNew ? &New : Old;
- // OrderOld == OrderNew, we need to iterate down from Old to see if it
- // can reach New, if yes, New is dominated by Old.
- for (MachineInstr *I = Old->getNextNode(); M.find(I)->second == OrderNew;
- I = I->getNextNode())
- if (I == &New)
- return &New;
- return Old;
-}
-
-/// Builds Instruction to its dominating order number map \p M by traversing
-/// from instruction \p Start.
-void BuildInstOrderMap(MachineBasicBlock::iterator Start, InstOrderMap &M) {
- M.clear();
- unsigned i = 0;
- for (MachineInstr &I : make_range(Start, Start->getParent()->end()))
- M[&I] = i++;
-}
-} // end anonymous namespace
-
-bool LiveRangeShrink::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
- return false;
-
- MachineRegisterInfo &MRI = MF.getRegInfo();
-
- DEBUG(dbgs() << "**** Analysing " << MF.getName() << '\n');
-
- InstOrderMap IOM;
- // Map from register to instruction order (value of IOM) where the
- // register is used last. When moving instructions up, we need to
- // make sure all its defs (including dead def) will not cross its
- // last use when moving up.
- DenseMap<unsigned, unsigned> UseMap;
-
- for (MachineBasicBlock &MBB : MF) {
- if (MBB.empty())
- continue;
- bool SawStore = false;
- BuildInstOrderMap(MBB.begin(), IOM);
- UseMap.clear();
-
- for (MachineBasicBlock::iterator Next = MBB.begin(); Next != MBB.end();) {
- MachineInstr &MI = *Next;
- ++Next;
- if (MI.isPHI() || MI.isDebugValue())
- continue;
- if (MI.mayStore())
- SawStore = true;
-
- unsigned CurrentOrder = IOM[&MI];
- unsigned Barrier = 0;
- for (const MachineOperand &MO : MI.operands()) {
- if (!MO.isReg() || MO.isDebug())
- continue;
- if (MO.isUse())
- UseMap[MO.getReg()] = CurrentOrder;
- else if (MO.isDead() && UseMap.count(MO.getReg()))
- // Barrier is the last instruction where MO get used. MI should not
- // be moved above Barrier.
- Barrier = std::max(Barrier, UseMap[MO.getReg()]);
- }
-
- if (!MI.isSafeToMove(nullptr, SawStore)) {
- // If MI has side effects, it should become a barrier for code motion.
- // IOM is rebuild from the next instruction to prevent later
- // instructions from being moved before this MI.
- if (MI.hasUnmodeledSideEffects() && Next != MBB.end()) {
- BuildInstOrderMap(Next, IOM);
- SawStore = false;
- }
- continue;
- }
-
- const MachineOperand *DefMO = nullptr;
- MachineInstr *Insert = nullptr;
-
- // Number of live-ranges that will be shortened. We do not count
- // live-ranges that are defined by a COPY as it could be coalesced later.
- unsigned NumEligibleUse = 0;
-
- for (const MachineOperand &MO : MI.operands()) {
- if (!MO.isReg() || MO.isDead() || MO.isDebug())
- continue;
- unsigned Reg = MO.getReg();
- // Do not move the instruction if it def/uses a physical register,
- // unless it is a constant physical register.
- if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
- !MRI.isConstantPhysReg(Reg)) {
- Insert = nullptr;
- break;
- }
- if (MO.isDef()) {
- // Do not move if there is more than one def.
- if (DefMO) {
- Insert = nullptr;
- break;
- }
- DefMO = &MO;
- } else if (MRI.hasOneNonDBGUse(Reg) && MRI.hasOneDef(Reg)) {
- MachineInstr &DefInstr = *MRI.def_instr_begin(Reg);
- if (!DefInstr.isCopy())
- NumEligibleUse++;
- Insert = FindDominatedInstruction(DefInstr, Insert, IOM);
- } else {
- Insert = nullptr;
- break;
- }
- }
- // Move the instruction when # of shrunk live range > 1.
- if (DefMO && Insert && NumEligibleUse > 1 && Barrier <= IOM[Insert]) {
- MachineBasicBlock::iterator I = std::next(Insert->getIterator());
- // Skip all the PHI and debug instructions.
- while (I != MBB.end() && (I->isPHI() || I->isDebugValue()))
- I = std::next(I);
- if (I == MI.getIterator())
- continue;
-
- // Update the dominator order to be the same as the insertion point.
- // We do this to maintain a non-decreasing order without need to update
- // all instruction orders after the insertion point.
- unsigned NewOrder = IOM[&*I];
- IOM[&MI] = NewOrder;
- NumInstrsHoistedToShrinkLiveRange++;
-
- // Find MI's debug value following MI.
- MachineBasicBlock::iterator EndIter = std::next(MI.getIterator());
- if (MI.getOperand(0).isReg())
- for (; EndIter != MBB.end() && EndIter->isDebugValue() &&
- EndIter->getOperand(0).isReg() &&
- EndIter->getOperand(0).getReg() == MI.getOperand(0).getReg();
- ++EndIter, ++Next)
- IOM[&*EndIter] = NewOrder;
- MBB.splice(I, &MBB, MI.getIterator(), EndIter);
- }
- }
- }
- return false;
-}
diff --git a/lib/CodeGen/LowerEmuTLS.cpp b/lib/CodeGen/LowerEmuTLS.cpp
index 6966c8ca4a5f8..5fb5b747f4717 100644
--- a/lib/CodeGen/LowerEmuTLS.cpp
+++ b/lib/CodeGen/LowerEmuTLS.cpp
@@ -16,6 +16,7 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/TargetPassConfig.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "llvm/Pass.h"
@@ -28,14 +29,12 @@ using namespace llvm;
namespace {
class LowerEmuTLS : public ModulePass {
- const TargetMachine *TM;
public:
static char ID; // Pass identification, replacement for typeid
- explicit LowerEmuTLS() : ModulePass(ID), TM(nullptr) { }
- explicit LowerEmuTLS(const TargetMachine *TM)
- : ModulePass(ID), TM(TM) {
+ LowerEmuTLS() : ModulePass(ID) {
initializeLowerEmuTLSPass(*PassRegistry::getPassRegistry());
}
+
bool runOnModule(Module &M) override;
private:
bool addEmuTlsVar(Module &M, const GlobalVariable *GV);
@@ -55,18 +54,21 @@ private:
char LowerEmuTLS::ID = 0;
INITIALIZE_PASS(LowerEmuTLS, "loweremutls",
- "Add __emutls_[vt]. variables for emultated TLS model",
- false, false)
+ "Add __emutls_[vt]. variables for emultated TLS model", false,
+ false)
-ModulePass *llvm::createLowerEmuTLSPass(const TargetMachine *TM) {
- return new LowerEmuTLS(TM);
-}
+ModulePass *llvm::createLowerEmuTLSPass() { return new LowerEmuTLS(); }
bool LowerEmuTLS::runOnModule(Module &M) {
if (skipModule(M))
return false;
- if (!TM || !TM->Options.EmulatedTLS)
+ auto *TPC = getAnalysisIfAvailable<TargetPassConfig>();
+ if (!TPC)
+ return false;
+
+ auto &TM = TPC->getTM<TargetMachine>();
+ if (!TM.Options.EmulatedTLS)
return false;
bool Changed = false;
diff --git a/lib/CodeGen/MachineBlockPlacement.cpp b/lib/CodeGen/MachineBlockPlacement.cpp
index 5003115a770fd..adfca9a46239f 100644
--- a/lib/CodeGen/MachineBlockPlacement.cpp
+++ b/lib/CodeGen/MachineBlockPlacement.cpp
@@ -245,25 +245,26 @@ public:
/// updating the block -> chain mapping. It does not free or tear down the
/// old chain, but the old chain's block list is no longer valid.
void merge(MachineBasicBlock *BB, BlockChain *Chain) {
- assert(BB);
- assert(!Blocks.empty());
+ assert(BB && "Can't merge a null block.");
+ assert(!Blocks.empty() && "Can't merge into an empty chain.");
// Fast path in case we don't have a chain already.
if (!Chain) {
- assert(!BlockToChain[BB]);
+ assert(!BlockToChain[BB] &&
+ "Passed chain is null, but BB has entry in BlockToChain.");
Blocks.push_back(BB);
BlockToChain[BB] = this;
return;
}
- assert(BB == *Chain->begin());
+ assert(BB == *Chain->begin() && "Passed BB is not head of Chain.");
assert(Chain->begin() != Chain->end());
// Update the incoming blocks to point to this chain, and add them to the
// chain structure.
for (MachineBasicBlock *ChainBB : *Chain) {
Blocks.push_back(ChainBB);
- assert(BlockToChain[ChainBB] == Chain && "Incoming blocks not in chain");
+ assert(BlockToChain[ChainBB] == Chain && "Incoming blocks not in chain.");
BlockToChain[ChainBB] = this;
}
}
@@ -1547,13 +1548,15 @@ MachineBasicBlock *MachineBlockPlacement::selectBestCandidateBlock(
MachineBasicBlock *BestBlock = nullptr;
BlockFrequency BestFreq;
for (MachineBasicBlock *MBB : WorkList) {
- assert(MBB->isEHPad() == IsEHPad);
+ assert(MBB->isEHPad() == IsEHPad &&
+ "EHPad mismatch between block and work list.");
BlockChain &SuccChain = *BlockToChain[MBB];
if (&SuccChain == &Chain)
continue;
- assert(SuccChain.UnscheduledPredecessors == 0 && "Found CFG-violating block");
+ assert(SuccChain.UnscheduledPredecessors == 0 &&
+ "Found CFG-violating block");
BlockFrequency CandidateFreq = MBFI->getBlockFreq(MBB);
DEBUG(dbgs() << " " << getBlockName(MBB) << " -> ";
@@ -1621,9 +1624,12 @@ void MachineBlockPlacement::fillWorkLists(
if (!UpdatedPreds.insert(&Chain).second)
return;
- assert(Chain.UnscheduledPredecessors == 0);
+ assert(
+ Chain.UnscheduledPredecessors == 0 &&
+ "Attempting to place block with unscheduled predecessors in worklist.");
for (MachineBasicBlock *ChainBB : Chain) {
- assert(BlockToChain[ChainBB] == &Chain);
+ assert(BlockToChain[ChainBB] == &Chain &&
+ "Block in chain doesn't match BlockToChain map.");
for (MachineBasicBlock *Pred : ChainBB->predecessors()) {
if (BlockFilter && !BlockFilter->count(Pred))
continue;
@@ -2136,8 +2142,10 @@ void MachineBlockPlacement::buildLoopChains(const MachineLoop &L) {
for (const MachineLoop *InnerLoop : L)
buildLoopChains(*InnerLoop);
- assert(BlockWorkList.empty());
- assert(EHPadWorkList.empty());
+ assert(BlockWorkList.empty() &&
+ "BlockWorkList not empty when starting to build loop chains.");
+ assert(EHPadWorkList.empty() &&
+ "EHPadWorkList not empty when starting to build loop chains.");
BlockFilterSet LoopBlockSet = collectLoopBlockSet(L);
// Check if we have profile data for this function. If yes, we will rotate
@@ -2167,7 +2175,8 @@ void MachineBlockPlacement::buildLoopChains(const MachineLoop &L) {
// walk the blocks, and use a set to prevent visiting a particular chain
// twice.
SmallPtrSet<BlockChain *, 4> UpdatedPreds;
- assert(LoopChain.UnscheduledPredecessors == 0);
+ assert(LoopChain.UnscheduledPredecessors == 0 &&
+ "LoopChain should not have unscheduled predecessors.");
UpdatedPreds.insert(&LoopChain);
for (const MachineBasicBlock *LoopBB : LoopBlockSet)
@@ -2256,8 +2265,10 @@ void MachineBlockPlacement::buildCFGChains() {
for (MachineLoop *L : *MLI)
buildLoopChains(*L);
- assert(BlockWorkList.empty());
- assert(EHPadWorkList.empty());
+ assert(BlockWorkList.empty() &&
+ "BlockWorkList should be empty before building final chain.");
+ assert(EHPadWorkList.empty() &&
+ "EHPadWorkList should be empty before building final chain.");
SmallPtrSet<BlockChain *, 4> UpdatedPreds;
for (MachineBasicBlock &MBB : *F)
@@ -2651,8 +2662,10 @@ bool MachineBlockPlacement::runOnMachineFunction(MachineFunction &MF) {
// there are no MachineLoops.
PreferredLoopExit = nullptr;
- assert(BlockToChain.empty());
- assert(ComputedEdges.empty());
+ assert(BlockToChain.empty() &&
+ "BlockToChain map should be empty before starting placement.");
+ assert(ComputedEdges.empty() &&
+ "Computed Edge map should be empty before starting placement.");
unsigned TailDupSize = TailDupPlacementThreshold;
// If only the aggressive threshold is explicitly set, use it.
diff --git a/lib/CodeGen/MachineModuleInfo.cpp b/lib/CodeGen/MachineModuleInfo.cpp
index 2f0f4297ef5c5..6cf751d34e268 100644
--- a/lib/CodeGen/MachineModuleInfo.cpp
+++ b/lib/CodeGen/MachineModuleInfo.cpp
@@ -32,8 +32,8 @@ using namespace llvm;
using namespace llvm::dwarf;
// Handle the Pass registration stuff necessary to use DataLayout's.
-INITIALIZE_TM_PASS(MachineModuleInfo, "machinemoduleinfo",
- "Machine Module Information", false, false)
+INITIALIZE_PASS(MachineModuleInfo, "machinemoduleinfo",
+ "Machine Module Information", false, false)
char MachineModuleInfo::ID = 0;
// Out of line virtual method.
diff --git a/lib/CodeGen/PrologEpilogInserter.cpp b/lib/CodeGen/PrologEpilogInserter.cpp
index d2afeae9e70b1..aaa253fde4945 100644
--- a/lib/CodeGen/PrologEpilogInserter.cpp
+++ b/lib/CodeGen/PrologEpilogInserter.cpp
@@ -60,19 +60,8 @@ namespace {
class PEI : public MachineFunctionPass {
public:
static char ID;
- explicit PEI(const TargetMachine *TM = nullptr) : MachineFunctionPass(ID) {
+ PEI() : MachineFunctionPass(ID) {
initializePEIPass(*PassRegistry::getPassRegistry());
-
- if (TM && (!TM->usesPhysRegsForPEI())) {
- SpillCalleeSavedRegisters = [](MachineFunction &, RegScavenger *,
- unsigned &, unsigned &, const MBBVector &,
- const MBBVector &) {};
- ScavengeFrameVirtualRegs = [](MachineFunction &, RegScavenger *) {};
- } else {
- SpillCalleeSavedRegisters = doSpillCalleeSavedRegs;
- ScavengeFrameVirtualRegs = doScavengeFrameVirtualRegs;
- UsesCalleeSaves = true;
- }
}
void getAnalysisUsage(AnalysisUsage &AU) const override;
@@ -140,18 +129,17 @@ WarnStackSize("warn-stack-size", cl::Hidden, cl::init((unsigned)-1),
cl::desc("Warn for stack size bigger than the given"
" number"));
-INITIALIZE_TM_PASS_BEGIN(PEI, "prologepilog", "Prologue/Epilogue Insertion",
- false, false)
+INITIALIZE_PASS_BEGIN(PEI, "prologepilog", "Prologue/Epilogue Insertion", false,
+ false)
INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
INITIALIZE_PASS_DEPENDENCY(StackProtector)
-INITIALIZE_TM_PASS_END(PEI, "prologepilog",
- "Prologue/Epilogue Insertion & Frame Finalization",
- false, false)
+INITIALIZE_PASS_END(PEI, "prologepilog",
+ "Prologue/Epilogue Insertion & Frame Finalization", false,
+ false)
-MachineFunctionPass *
-llvm::createPrologEpilogInserterPass(const TargetMachine *TM) {
- return new PEI(TM);
+MachineFunctionPass *llvm::createPrologEpilogInserterPass() {
+ return new PEI();
}
STATISTIC(NumScavengedRegs, "Number of frame index regs scavenged");
@@ -174,6 +162,20 @@ typedef SmallSetVector<int, 8> StackObjSet;
/// frame indexes with appropriate references.
///
bool PEI::runOnMachineFunction(MachineFunction &Fn) {
+ if (!SpillCalleeSavedRegisters) {
+ const TargetMachine &TM = Fn.getTarget();
+ if (!TM.usesPhysRegsForPEI()) {
+ SpillCalleeSavedRegisters = [](MachineFunction &, RegScavenger *,
+ unsigned &, unsigned &, const MBBVector &,
+ const MBBVector &) {};
+ ScavengeFrameVirtualRegs = [](MachineFunction &, RegScavenger *) {};
+ } else {
+ SpillCalleeSavedRegisters = doSpillCalleeSavedRegs;
+ ScavengeFrameVirtualRegs = doScavengeFrameVirtualRegs;
+ UsesCalleeSaves = true;
+ }
+ }
+
const Function* F = Fn.getFunction();
const TargetRegisterInfo *TRI = Fn.getSubtarget().getRegisterInfo();
const TargetFrameLowering *TFI = Fn.getSubtarget().getFrameLowering();
diff --git a/lib/CodeGen/RegisterCoalescer.cpp b/lib/CodeGen/RegisterCoalescer.cpp
index 1803ea2b92490..7b3a5d5c5ff7f 100644
--- a/lib/CodeGen/RegisterCoalescer.cpp
+++ b/lib/CodeGen/RegisterCoalescer.cpp
@@ -2666,11 +2666,17 @@ void JoinVals::pruneSubRegValues(LiveInterval &LI, LaneBitmask &ShrinkMask) {
// Look for values being erased.
bool DidPrune = false;
for (unsigned i = 0, e = LR.getNumValNums(); i != e; ++i) {
- if (Vals[i].Resolution != CR_Erase)
+ // We should trigger in all cases in which eraseInstrs() does something.
+ // match what eraseInstrs() is doing, print a message so
+ if (Vals[i].Resolution != CR_Erase &&
+ (Vals[i].Resolution != CR_Keep || !Vals[i].ErasableImplicitDef ||
+ !Vals[i].Pruned))
continue;
// Check subranges at the point where the copy will be removed.
SlotIndex Def = LR.getValNumInfo(i)->def;
+ // Print message so mismatches with eraseInstrs() can be diagnosed.
+ DEBUG(dbgs() << "\t\tExpecting instruction removal at " << Def << '\n');
for (LiveInterval::SubRange &S : LI.subranges()) {
LiveQueryResult Q = S.Query(Def);
diff --git a/lib/CodeGen/SafeStack.cpp b/lib/CodeGen/SafeStack.cpp
index 08b3d345f6899..2771fdbd737ae 100644
--- a/lib/CodeGen/SafeStack.cpp
+++ b/lib/CodeGen/SafeStack.cpp
@@ -24,6 +24,7 @@
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/TargetPassConfig.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DIBuilder.h"
#include "llvm/IR/DataLayout.h"
@@ -767,13 +768,12 @@ class SafeStackLegacyPass : public FunctionPass {
public:
static char ID; // Pass identification, replacement for typeid..
- SafeStackLegacyPass(const TargetMachine *TM) : FunctionPass(ID), TM(TM) {
+ SafeStackLegacyPass() : FunctionPass(ID), TM(nullptr) {
initializeSafeStackLegacyPassPass(*PassRegistry::getPassRegistry());
}
- SafeStackLegacyPass() : SafeStackLegacyPass(nullptr) {}
-
void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addRequired<TargetPassConfig>();
AU.addRequired<TargetLibraryInfoWrapperPass>();
AU.addRequired<AssumptionCacheTracker>();
}
@@ -793,8 +793,7 @@ public:
return false;
}
- if (!TM)
- report_fatal_error("Target machine is required");
+ TM = &getAnalysis<TargetPassConfig>().getTM<TargetMachine>();
auto *TL = TM->getSubtargetImpl(F)->getTargetLowering();
if (!TL)
report_fatal_error("TargetLowering instance is required");
@@ -821,11 +820,10 @@ public:
} // anonymous namespace
char SafeStackLegacyPass::ID = 0;
-INITIALIZE_TM_PASS_BEGIN(SafeStackLegacyPass, "safe-stack",
- "Safe Stack instrumentation pass", false, false)
-INITIALIZE_TM_PASS_END(SafeStackLegacyPass, "safe-stack",
- "Safe Stack instrumentation pass", false, false)
+INITIALIZE_PASS_BEGIN(SafeStackLegacyPass, "safe-stack",
+ "Safe Stack instrumentation pass", false, false)
+INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
+INITIALIZE_PASS_END(SafeStackLegacyPass, "safe-stack",
+ "Safe Stack instrumentation pass", false, false)
-FunctionPass *llvm::createSafeStackPass(const llvm::TargetMachine *TM) {
- return new SafeStackLegacyPass(TM);
-}
+FunctionPass *llvm::createSafeStackPass() { return new SafeStackLegacyPass(); }
diff --git a/lib/CodeGen/SafeStackColoring.cpp b/lib/CodeGen/SafeStackColoring.cpp
index 09289f947dc96..21f2fa497233a 100644
--- a/lib/CodeGen/SafeStackColoring.cpp
+++ b/lib/CodeGen/SafeStackColoring.cpp
@@ -20,9 +20,10 @@ using namespace llvm::safestack;
#define DEBUG_TYPE "safestackcoloring"
+// Disabled by default due to PR32143.
static cl::opt<bool> ClColoring("safe-stack-coloring",
cl::desc("enable safe stack coloring"),
- cl::Hidden, cl::init(true));
+ cl::Hidden, cl::init(false));
const StackColoring::LiveRange &StackColoring::getLiveRange(AllocaInst *AI) {
const auto IT = AllocaNumbering.find(AI);
diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 0ccee175abfb5..5d450e7e078ce 100644
--- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -2138,6 +2138,17 @@ SDValue DAGCombiner::visitADDCARRY(SDNode *N) {
if (isNullConstant(CarryIn))
return DAG.getNode(ISD::UADDO, DL, N->getVTList(), N0, N1);
+ // fold (addcarry 0, 0, X) -> (and (ext/trunc X), 1) and no carry.
+ if (isNullConstant(N0) && isNullConstant(N1)) {
+ EVT VT = N0.getValueType();
+ EVT CarryVT = CarryIn.getValueType();
+ SDValue CarryExt = DAG.getBoolExtOrTrunc(CarryIn, DL, VT, CarryVT);
+ AddToWorklist(CarryExt.getNode());
+ return CombineTo(N, DAG.getNode(ISD::AND, DL, VT, CarryExt,
+ DAG.getConstant(1, DL, VT)),
+ DAG.getConstant(0, DL, CarryVT));
+ }
+
if (SDValue Combined = visitADDCARRYLike(N0, N1, CarryIn, N))
return Combined;
@@ -12533,49 +12544,54 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode *St) {
// mergeable cases. To prevent this, we prune such stores from the
// front of StoreNodes here.
- unsigned StartIdx = 0;
- while ((StartIdx + 1 < StoreNodes.size()) &&
- StoreNodes[StartIdx].OffsetFromBase + ElementSizeBytes !=
- StoreNodes[StartIdx + 1].OffsetFromBase)
- ++StartIdx;
-
- // Bail if we don't have enough candidates to merge.
- if (StartIdx + 1 >= StoreNodes.size())
- return false;
-
- if (StartIdx)
- StoreNodes.erase(StoreNodes.begin(), StoreNodes.begin() + StartIdx);
+ bool RV = false;
+ while (StoreNodes.size() > 1) {
+ unsigned StartIdx = 0;
+ while ((StartIdx + 1 < StoreNodes.size()) &&
+ StoreNodes[StartIdx].OffsetFromBase + ElementSizeBytes !=
+ StoreNodes[StartIdx + 1].OffsetFromBase)
+ ++StartIdx;
- // Scan the memory operations on the chain and find the first non-consecutive
- // store memory address.
- unsigned NumConsecutiveStores = 0;
- int64_t StartAddress = StoreNodes[0].OffsetFromBase;
-
- // Check that the addresses are consecutive starting from the second
- // element in the list of stores.
- for (unsigned i = 1, e = StoreNodes.size(); i < e; ++i) {
- int64_t CurrAddress = StoreNodes[i].OffsetFromBase;
- if (CurrAddress - StartAddress != (ElementSizeBytes * i))
- break;
- NumConsecutiveStores = i + 1;
- }
+ // Bail if we don't have enough candidates to merge.
+ if (StartIdx + 1 >= StoreNodes.size())
+ return RV;
- if (NumConsecutiveStores < 2)
- return false;
+ if (StartIdx)
+ StoreNodes.erase(StoreNodes.begin(), StoreNodes.begin() + StartIdx);
+
+ // Scan the memory operations on the chain and find the first
+ // non-consecutive store memory address.
+ unsigned NumConsecutiveStores = 1;
+ int64_t StartAddress = StoreNodes[0].OffsetFromBase;
+ // Check that the addresses are consecutive starting from the second
+ // element in the list of stores.
+ for (unsigned i = 1, e = StoreNodes.size(); i < e; ++i) {
+ int64_t CurrAddress = StoreNodes[i].OffsetFromBase;
+ if (CurrAddress - StartAddress != (ElementSizeBytes * i))
+ break;
+ NumConsecutiveStores = i + 1;
+ }
- // Check that we can merge these candidates without causing a cycle
- if (!checkMergeStoreCandidatesForDependencies(StoreNodes, NumConsecutiveStores))
- return false;
+ if (NumConsecutiveStores < 2) {
+ StoreNodes.erase(StoreNodes.begin(),
+ StoreNodes.begin() + NumConsecutiveStores);
+ continue;
+ }
+ // Check that we can merge these candidates without causing a cycle
+ if (!checkMergeStoreCandidatesForDependencies(StoreNodes,
+ NumConsecutiveStores)) {
+ StoreNodes.erase(StoreNodes.begin(),
+ StoreNodes.begin() + NumConsecutiveStores);
+ continue;
+ }
- // The node with the lowest store address.
- LLVMContext &Context = *DAG.getContext();
- const DataLayout &DL = DAG.getDataLayout();
+ // The node with the lowest store address.
+ LLVMContext &Context = *DAG.getContext();
+ const DataLayout &DL = DAG.getDataLayout();
- // Store the constants into memory as one consecutive store.
- if (IsConstantSrc) {
- bool RV = false;
- while (NumConsecutiveStores > 1) {
+ // Store the constants into memory as one consecutive store.
+ if (IsConstantSrc) {
LSBaseSDNode *FirstInChain = StoreNodes[0].MemNode;
unsigned FirstStoreAS = FirstInChain->getAddressSpace();
unsigned FirstStoreAlign = FirstInChain->getAlignment();
@@ -12635,33 +12651,33 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode *St) {
}
// Check if we found a legal integer type that creates a meaningful merge.
- if (LastLegalType < 2 && LastLegalVectorType < 2)
- break;
+ if (LastLegalType < 2 && LastLegalVectorType < 2) {
+ StoreNodes.erase(StoreNodes.begin(), StoreNodes.begin() + 1);
+ continue;
+ }
bool UseVector = (LastLegalVectorType > LastLegalType) && !NoVectors;
unsigned NumElem = (UseVector) ? LastLegalVectorType : LastLegalType;
bool Merged = MergeStoresOfConstantsOrVecElts(StoreNodes, MemVT, NumElem,
true, UseVector);
- if (!Merged)
- break;
+ if (!Merged) {
+ StoreNodes.erase(StoreNodes.begin(), StoreNodes.begin() + NumElem);
+ continue;
+ }
// Remove merged stores for next iteration.
- StoreNodes.erase(StoreNodes.begin(), StoreNodes.begin() + NumElem);
RV = true;
- NumConsecutiveStores -= NumElem;
+ StoreNodes.erase(StoreNodes.begin(), StoreNodes.begin() + NumElem);
+ continue;
}
- return RV;
- }
- // When extracting multiple vector elements, try to store them
- // in one vector store rather than a sequence of scalar stores.
- if (IsExtractVecSrc) {
- bool RV = false;
- while (StoreNodes.size() >= 2) {
+ // When extracting multiple vector elements, try to store them
+ // in one vector store rather than a sequence of scalar stores.
+ if (IsExtractVecSrc) {
LSBaseSDNode *FirstInChain = StoreNodes[0].MemNode;
unsigned FirstStoreAS = FirstInChain->getAddressSpace();
unsigned FirstStoreAlign = FirstInChain->getAlignment();
- unsigned NumStoresToMerge = 0;
+ unsigned NumStoresToMerge = 1;
bool IsVec = MemVT.isVector();
for (unsigned i = 0; i < NumConsecutiveStores; ++i) {
StoreSDNode *St = cast<StoreSDNode>(StoreNodes[i].MemNode);
@@ -12673,7 +12689,7 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode *St) {
// handles consecutive loads).
if (StoreValOpcode != ISD::EXTRACT_VECTOR_ELT &&
StoreValOpcode != ISD::EXTRACT_SUBVECTOR)
- return false;
+ return RV;
// Find a legal type for the vector store.
unsigned Elts = i + 1;
@@ -12693,187 +12709,205 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode *St) {
bool Merged = MergeStoresOfConstantsOrVecElts(
StoreNodes, MemVT, NumStoresToMerge, false, true);
- if (!Merged)
- break;
+ if (!Merged) {
+ StoreNodes.erase(StoreNodes.begin(),
+ StoreNodes.begin() + NumStoresToMerge);
+ continue;
+ }
// Remove merged stores for next iteration.
StoreNodes.erase(StoreNodes.begin(),
StoreNodes.begin() + NumStoresToMerge);
RV = true;
- NumConsecutiveStores -= NumStoresToMerge;
+ continue;
}
- return RV;
- }
- // Below we handle the case of multiple consecutive stores that
- // come from multiple consecutive loads. We merge them into a single
- // wide load and a single wide store.
+ // Below we handle the case of multiple consecutive stores that
+ // come from multiple consecutive loads. We merge them into a single
+ // wide load and a single wide store.
- // Look for load nodes which are used by the stored values.
- SmallVector<MemOpLink, 8> LoadNodes;
+ // Look for load nodes which are used by the stored values.
+ SmallVector<MemOpLink, 8> LoadNodes;
- // Find acceptable loads. Loads need to have the same chain (token factor),
- // must not be zext, volatile, indexed, and they must be consecutive.
- BaseIndexOffset LdBasePtr;
- for (unsigned i = 0; i < NumConsecutiveStores; ++i) {
- StoreSDNode *St = cast<StoreSDNode>(StoreNodes[i].MemNode);
- LoadSDNode *Ld = dyn_cast<LoadSDNode>(St->getValue());
- if (!Ld) break;
-
- // Loads must only have one use.
- if (!Ld->hasNUsesOfValue(1, 0))
- break;
+ // Find acceptable loads. Loads need to have the same chain (token factor),
+ // must not be zext, volatile, indexed, and they must be consecutive.
+ BaseIndexOffset LdBasePtr;
+ for (unsigned i = 0; i < NumConsecutiveStores; ++i) {
+ StoreSDNode *St = cast<StoreSDNode>(StoreNodes[i].MemNode);
+ LoadSDNode *Ld = dyn_cast<LoadSDNode>(St->getValue());
+ if (!Ld)
+ break;
- // The memory operands must not be volatile.
- if (Ld->isVolatile() || Ld->isIndexed())
- break;
+ // Loads must only have one use.
+ if (!Ld->hasNUsesOfValue(1, 0))
+ break;
- // We do not accept ext loads.
- if (Ld->getExtensionType() != ISD::NON_EXTLOAD)
- break;
+ // The memory operands must not be volatile.
+ if (Ld->isVolatile() || Ld->isIndexed())
+ break;
- // The stored memory type must be the same.
- if (Ld->getMemoryVT() != MemVT)
- break;
+ // We do not accept ext loads.
+ if (Ld->getExtensionType() != ISD::NON_EXTLOAD)
+ break;
- BaseIndexOffset LdPtr = BaseIndexOffset::match(Ld->getBasePtr(), DAG);
- // If this is not the first ptr that we check.
- if (LdBasePtr.Base.getNode()) {
- // The base ptr must be the same.
- if (!LdPtr.equalBaseIndex(LdBasePtr))
+ // The stored memory type must be the same.
+ if (Ld->getMemoryVT() != MemVT)
break;
- } else {
- // Check that all other base pointers are the same as this one.
- LdBasePtr = LdPtr;
+
+ BaseIndexOffset LdPtr = BaseIndexOffset::match(Ld->getBasePtr(), DAG);
+ // If this is not the first ptr that we check.
+ if (LdBasePtr.Base.getNode()) {
+ // The base ptr must be the same.
+ if (!LdPtr.equalBaseIndex(LdBasePtr))
+ break;
+ } else {
+ // Check that all other base pointers are the same as this one.
+ LdBasePtr = LdPtr;
+ }
+
+ // We found a potential memory operand to merge.
+ LoadNodes.push_back(MemOpLink(Ld, LdPtr.Offset));
}
- // We found a potential memory operand to merge.
- LoadNodes.push_back(MemOpLink(Ld, LdPtr.Offset));
- }
+ if (LoadNodes.size() < 2) {
+ StoreNodes.erase(StoreNodes.begin(), StoreNodes.begin() + 1);
+ continue;
+ }
- if (LoadNodes.size() < 2)
- return false;
+ // If we have load/store pair instructions and we only have two values,
+ // don't bother.
+ unsigned RequiredAlignment;
+ if (LoadNodes.size() == 2 && TLI.hasPairedLoad(MemVT, RequiredAlignment) &&
+ St->getAlignment() >= RequiredAlignment) {
+ StoreNodes.erase(StoreNodes.begin(), StoreNodes.begin() + 2);
+ continue;
+ }
+ LSBaseSDNode *FirstInChain = StoreNodes[0].MemNode;
+ unsigned FirstStoreAS = FirstInChain->getAddressSpace();
+ unsigned FirstStoreAlign = FirstInChain->getAlignment();
+ LoadSDNode *FirstLoad = cast<LoadSDNode>(LoadNodes[0].MemNode);
+ unsigned FirstLoadAS = FirstLoad->getAddressSpace();
+ unsigned FirstLoadAlign = FirstLoad->getAlignment();
+
+ // Scan the memory operations on the chain and find the first
+ // non-consecutive load memory address. These variables hold the index in
+ // the store node array.
+ unsigned LastConsecutiveLoad = 0;
+ // This variable refers to the size and not index in the array.
+ unsigned LastLegalVectorType = 0;
+ unsigned LastLegalIntegerType = 0;
+ StartAddress = LoadNodes[0].OffsetFromBase;
+ SDValue FirstChain = FirstLoad->getChain();
+ for (unsigned i = 1; i < LoadNodes.size(); ++i) {
+ // All loads must share the same chain.
+ if (LoadNodes[i].MemNode->getChain() != FirstChain)
+ break;
- // If we have load/store pair instructions and we only have two values,
- // don't bother.
- unsigned RequiredAlignment;
- if (LoadNodes.size() == 2 && TLI.hasPairedLoad(MemVT, RequiredAlignment) &&
- St->getAlignment() >= RequiredAlignment)
- return false;
- LSBaseSDNode *FirstInChain = StoreNodes[0].MemNode;
- unsigned FirstStoreAS = FirstInChain->getAddressSpace();
- unsigned FirstStoreAlign = FirstInChain->getAlignment();
- LoadSDNode *FirstLoad = cast<LoadSDNode>(LoadNodes[0].MemNode);
- unsigned FirstLoadAS = FirstLoad->getAddressSpace();
- unsigned FirstLoadAlign = FirstLoad->getAlignment();
-
- // Scan the memory operations on the chain and find the first non-consecutive
- // load memory address. These variables hold the index in the store node
- // array.
- unsigned LastConsecutiveLoad = 0;
- // This variable refers to the size and not index in the array.
- unsigned LastLegalVectorType = 0;
- unsigned LastLegalIntegerType = 0;
- StartAddress = LoadNodes[0].OffsetFromBase;
- SDValue FirstChain = FirstLoad->getChain();
- for (unsigned i = 1; i < LoadNodes.size(); ++i) {
- // All loads must share the same chain.
- if (LoadNodes[i].MemNode->getChain() != FirstChain)
- break;
+ int64_t CurrAddress = LoadNodes[i].OffsetFromBase;
+ if (CurrAddress - StartAddress != (ElementSizeBytes * i))
+ break;
+ LastConsecutiveLoad = i;
+ // Find a legal type for the vector store.
+ EVT StoreTy = EVT::getVectorVT(Context, MemVT, i + 1);
+ bool IsFastSt, IsFastLd;
+ if (TLI.isTypeLegal(StoreTy) &&
+ TLI.allowsMemoryAccess(Context, DL, StoreTy, FirstStoreAS,
+ FirstStoreAlign, &IsFastSt) &&
+ IsFastSt &&
+ TLI.allowsMemoryAccess(Context, DL, StoreTy, FirstLoadAS,
+ FirstLoadAlign, &IsFastLd) &&
+ IsFastLd) {
+ LastLegalVectorType = i + 1;
+ }
- int64_t CurrAddress = LoadNodes[i].OffsetFromBase;
- if (CurrAddress - StartAddress != (ElementSizeBytes * i))
- break;
- LastConsecutiveLoad = i;
- // Find a legal type for the vector store.
- EVT StoreTy = EVT::getVectorVT(Context, MemVT, i+1);
- bool IsFastSt, IsFastLd;
- if (TLI.isTypeLegal(StoreTy) &&
- TLI.allowsMemoryAccess(Context, DL, StoreTy, FirstStoreAS,
- FirstStoreAlign, &IsFastSt) && IsFastSt &&
- TLI.allowsMemoryAccess(Context, DL, StoreTy, FirstLoadAS,
- FirstLoadAlign, &IsFastLd) && IsFastLd) {
- LastLegalVectorType = i + 1;
- }
-
- // Find a legal type for the integer store.
- unsigned SizeInBits = (i+1) * ElementSizeBytes * 8;
- StoreTy = EVT::getIntegerVT(Context, SizeInBits);
- if (TLI.isTypeLegal(StoreTy) &&
- TLI.allowsMemoryAccess(Context, DL, StoreTy, FirstStoreAS,
- FirstStoreAlign, &IsFastSt) && IsFastSt &&
- TLI.allowsMemoryAccess(Context, DL, StoreTy, FirstLoadAS,
- FirstLoadAlign, &IsFastLd) && IsFastLd)
- LastLegalIntegerType = i + 1;
- // Or check whether a truncstore and extload is legal.
- else if (TLI.getTypeAction(Context, StoreTy) ==
- TargetLowering::TypePromoteInteger) {
- EVT LegalizedStoredValueTy =
- TLI.getTypeToTransformTo(Context, StoreTy);
- if (TLI.isTruncStoreLegal(LegalizedStoredValueTy, StoreTy) &&
- TLI.isLoadExtLegal(ISD::ZEXTLOAD, LegalizedStoredValueTy, StoreTy) &&
- TLI.isLoadExtLegal(ISD::SEXTLOAD, LegalizedStoredValueTy, StoreTy) &&
- TLI.isLoadExtLegal(ISD::EXTLOAD, LegalizedStoredValueTy, StoreTy) &&
- TLI.allowsMemoryAccess(Context, DL, LegalizedStoredValueTy,
- FirstStoreAS, FirstStoreAlign, &IsFastSt) &&
+ // Find a legal type for the integer store.
+ unsigned SizeInBits = (i + 1) * ElementSizeBytes * 8;
+ StoreTy = EVT::getIntegerVT(Context, SizeInBits);
+ if (TLI.isTypeLegal(StoreTy) &&
+ TLI.allowsMemoryAccess(Context, DL, StoreTy, FirstStoreAS,
+ FirstStoreAlign, &IsFastSt) &&
IsFastSt &&
- TLI.allowsMemoryAccess(Context, DL, LegalizedStoredValueTy,
- FirstLoadAS, FirstLoadAlign, &IsFastLd) &&
+ TLI.allowsMemoryAccess(Context, DL, StoreTy, FirstLoadAS,
+ FirstLoadAlign, &IsFastLd) &&
IsFastLd)
- LastLegalIntegerType = i+1;
+ LastLegalIntegerType = i + 1;
+ // Or check whether a truncstore and extload is legal.
+ else if (TLI.getTypeAction(Context, StoreTy) ==
+ TargetLowering::TypePromoteInteger) {
+ EVT LegalizedStoredValueTy = TLI.getTypeToTransformTo(Context, StoreTy);
+ if (TLI.isTruncStoreLegal(LegalizedStoredValueTy, StoreTy) &&
+ TLI.isLoadExtLegal(ISD::ZEXTLOAD, LegalizedStoredValueTy,
+ StoreTy) &&
+ TLI.isLoadExtLegal(ISD::SEXTLOAD, LegalizedStoredValueTy,
+ StoreTy) &&
+ TLI.isLoadExtLegal(ISD::EXTLOAD, LegalizedStoredValueTy, StoreTy) &&
+ TLI.allowsMemoryAccess(Context, DL, LegalizedStoredValueTy,
+ FirstStoreAS, FirstStoreAlign, &IsFastSt) &&
+ IsFastSt &&
+ TLI.allowsMemoryAccess(Context, DL, LegalizedStoredValueTy,
+ FirstLoadAS, FirstLoadAlign, &IsFastLd) &&
+ IsFastLd)
+ LastLegalIntegerType = i + 1;
+ }
}
- }
- // Only use vector types if the vector type is larger than the integer type.
- // If they are the same, use integers.
- bool UseVectorTy = LastLegalVectorType > LastLegalIntegerType && !NoVectors;
- unsigned LastLegalType = std::max(LastLegalVectorType, LastLegalIntegerType);
+ // Only use vector types if the vector type is larger than the integer type.
+ // If they are the same, use integers.
+ bool UseVectorTy = LastLegalVectorType > LastLegalIntegerType && !NoVectors;
+ unsigned LastLegalType =
+ std::max(LastLegalVectorType, LastLegalIntegerType);
- // We add +1 here because the LastXXX variables refer to location while
- // the NumElem refers to array/index size.
- unsigned NumElem = std::min(NumConsecutiveStores, LastConsecutiveLoad + 1);
- NumElem = std::min(LastLegalType, NumElem);
+ // We add +1 here because the LastXXX variables refer to location while
+ // the NumElem refers to array/index size.
+ unsigned NumElem = std::min(NumConsecutiveStores, LastConsecutiveLoad + 1);
+ NumElem = std::min(LastLegalType, NumElem);
- if (NumElem < 2)
- return false;
+ if (NumElem < 2) {
+ StoreNodes.erase(StoreNodes.begin(), StoreNodes.begin() + 1);
+ continue;
+ }
- // Find if it is better to use vectors or integers to load and store
- // to memory.
- EVT JointMemOpVT;
- if (UseVectorTy) {
- JointMemOpVT = EVT::getVectorVT(Context, MemVT, NumElem);
- } else {
- unsigned SizeInBits = NumElem * ElementSizeBytes * 8;
- JointMemOpVT = EVT::getIntegerVT(Context, SizeInBits);
- }
+ // Find if it is better to use vectors or integers to load and store
+ // to memory.
+ EVT JointMemOpVT;
+ if (UseVectorTy) {
+ JointMemOpVT = EVT::getVectorVT(Context, MemVT, NumElem);
+ } else {
+ unsigned SizeInBits = NumElem * ElementSizeBytes * 8;
+ JointMemOpVT = EVT::getIntegerVT(Context, SizeInBits);
+ }
- SDLoc LoadDL(LoadNodes[0].MemNode);
- SDLoc StoreDL(StoreNodes[0].MemNode);
+ SDLoc LoadDL(LoadNodes[0].MemNode);
+ SDLoc StoreDL(StoreNodes[0].MemNode);
- // The merged loads are required to have the same incoming chain, so
- // using the first's chain is acceptable.
- SDValue NewLoad = DAG.getLoad(JointMemOpVT, LoadDL, FirstLoad->getChain(),
- FirstLoad->getBasePtr(),
- FirstLoad->getPointerInfo(), FirstLoadAlign);
+ // The merged loads are required to have the same incoming chain, so
+ // using the first's chain is acceptable.
+ SDValue NewLoad = DAG.getLoad(JointMemOpVT, LoadDL, FirstLoad->getChain(),
+ FirstLoad->getBasePtr(),
+ FirstLoad->getPointerInfo(), FirstLoadAlign);
- SDValue NewStoreChain = getMergeStoreChains(StoreNodes, NumElem);
+ SDValue NewStoreChain = getMergeStoreChains(StoreNodes, NumElem);
- AddToWorklist(NewStoreChain.getNode());
+ AddToWorklist(NewStoreChain.getNode());
- SDValue NewStore =
- DAG.getStore(NewStoreChain, StoreDL, NewLoad, FirstInChain->getBasePtr(),
- FirstInChain->getPointerInfo(), FirstStoreAlign);
+ SDValue NewStore = DAG.getStore(
+ NewStoreChain, StoreDL, NewLoad, FirstInChain->getBasePtr(),
+ FirstInChain->getPointerInfo(), FirstStoreAlign);
- // Transfer chain users from old loads to the new load.
- for (unsigned i = 0; i < NumElem; ++i) {
- LoadSDNode *Ld = cast<LoadSDNode>(LoadNodes[i].MemNode);
- DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1),
- SDValue(NewLoad.getNode(), 1));
- }
+ // Transfer chain users from old loads to the new load.
+ for (unsigned i = 0; i < NumElem; ++i) {
+ LoadSDNode *Ld = cast<LoadSDNode>(LoadNodes[i].MemNode);
+ DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1),
+ SDValue(NewLoad.getNode(), 1));
+ }
- // Replace the all stores with the new store.
- for (unsigned i = 0; i < NumElem; ++i)
- CombineTo(StoreNodes[i].MemNode, NewStore);
- return true;
+ // Replace the all stores with the new store.
+ for (unsigned i = 0; i < NumElem; ++i)
+ CombineTo(StoreNodes[i].MemNode, NewStore);
+ RV = true;
+ StoreNodes.erase(StoreNodes.begin(), StoreNodes.begin() + NumElem);
+ continue;
+ }
+ return RV;
}
SDValue DAGCombiner::replaceStoreChain(StoreSDNode *ST, SDValue BetterChain) {
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 057badcd6b740..16c1f78f1b359 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -4685,9 +4685,10 @@ static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG,
/// used when a memcpy is turned into a memset when the source is a constant
/// string ptr.
static SDValue getMemsetStringVal(EVT VT, const SDLoc &dl, SelectionDAG &DAG,
- const TargetLowering &TLI, StringRef Str) {
+ const TargetLowering &TLI,
+ const ConstantDataArraySlice &Slice) {
// Handle vector with all elements zero.
- if (Str.empty()) {
+ if (Slice.Array == nullptr) {
if (VT.isInteger())
return DAG.getConstant(0, dl, VT);
else if (VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128)
@@ -4706,15 +4707,15 @@ static SDValue getMemsetStringVal(EVT VT, const SDLoc &dl, SelectionDAG &DAG,
assert(!VT.isVector() && "Can't handle vector type here!");
unsigned NumVTBits = VT.getSizeInBits();
unsigned NumVTBytes = NumVTBits / 8;
- unsigned NumBytes = std::min(NumVTBytes, unsigned(Str.size()));
+ unsigned NumBytes = std::min(NumVTBytes, unsigned(Slice.Length));
APInt Val(NumVTBits, 0);
if (DAG.getDataLayout().isLittleEndian()) {
for (unsigned i = 0; i != NumBytes; ++i)
- Val |= (uint64_t)(unsigned char)Str[i] << i*8;
+ Val |= (uint64_t)(unsigned char)Slice[i] << i*8;
} else {
for (unsigned i = 0; i != NumBytes; ++i)
- Val |= (uint64_t)(unsigned char)Str[i] << (NumVTBytes-i-1)*8;
+ Val |= (uint64_t)(unsigned char)Slice[i] << (NumVTBytes-i-1)*8;
}
// If the "cost" of materializing the integer immediate is less than the cost
@@ -4731,9 +4732,8 @@ SDValue SelectionDAG::getMemBasePlusOffset(SDValue Base, unsigned Offset,
return getNode(ISD::ADD, DL, VT, Base, getConstant(Offset, DL, VT));
}
-/// isMemSrcFromString - Returns true if memcpy source is a string constant.
-///
-static bool isMemSrcFromString(SDValue Src, StringRef &Str) {
+/// Returns true if memcpy source is constant data.
+static bool isMemSrcFromConstant(SDValue Src, ConstantDataArraySlice &Slice) {
uint64_t SrcDelta = 0;
GlobalAddressSDNode *G = nullptr;
if (Src.getOpcode() == ISD::GlobalAddress)
@@ -4747,8 +4747,8 @@ static bool isMemSrcFromString(SDValue Src, StringRef &Str) {
if (!G)
return false;
- return getConstantStringInfo(G->getGlobal(), Str,
- SrcDelta + G->getOffset(), false);
+ return getConstantDataArrayInfo(G->getGlobal(), Slice, 8,
+ SrcDelta + G->getOffset());
}
/// Determines the optimal series of memory ops to replace the memset / memcpy.
@@ -4891,15 +4891,15 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
unsigned SrcAlign = DAG.InferPtrAlignment(Src);
if (Align > SrcAlign)
SrcAlign = Align;
- StringRef Str;
- bool CopyFromStr = isMemSrcFromString(Src, Str);
- bool isZeroStr = CopyFromStr && Str.empty();
+ ConstantDataArraySlice Slice;
+ bool CopyFromConstant = isMemSrcFromConstant(Src, Slice);
+ bool isZeroConstant = CopyFromConstant && Slice.Array == nullptr;
unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(OptSize);
if (!FindOptimalMemOpLowering(MemOps, Limit, Size,
(DstAlignCanChange ? 0 : Align),
- (isZeroStr ? 0 : SrcAlign),
- false, false, CopyFromStr, true,
+ (isZeroConstant ? 0 : SrcAlign),
+ false, false, CopyFromConstant, true,
DstPtrInfo.getAddrSpace(),
SrcPtrInfo.getAddrSpace(),
DAG, TLI))
@@ -4943,18 +4943,29 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
DstOff -= VTSize - Size;
}
- if (CopyFromStr &&
- (isZeroStr || (VT.isInteger() && !VT.isVector()))) {
+ if (CopyFromConstant &&
+ (isZeroConstant || (VT.isInteger() && !VT.isVector()))) {
// It's unlikely a store of a vector immediate can be done in a single
// instruction. It would require a load from a constantpool first.
// We only handle zero vectors here.
// FIXME: Handle other cases where store of vector immediate is done in
// a single instruction.
- Value = getMemsetStringVal(VT, dl, DAG, TLI, Str.substr(SrcOff));
+ ConstantDataArraySlice SubSlice;
+ if (SrcOff < Slice.Length) {
+ SubSlice = Slice;
+ SubSlice.move(SrcOff);
+ } else {
+ // This is an out-of-bounds access and hence UB. Pretend we read zero.
+ SubSlice.Array = nullptr;
+ SubSlice.Offset = 0;
+ SubSlice.Length = VTSize;
+ }
+ Value = getMemsetStringVal(VT, dl, DAG, TLI, SubSlice);
if (Value.getNode())
Store = DAG.getStore(Chain, dl, Value,
DAG.getMemBasePlusOffset(Dst, DstOff, dl),
- DstPtrInfo.getWithOffset(DstOff), Align, MMOFlags);
+ DstPtrInfo.getWithOffset(DstOff), Align,
+ MMOFlags);
}
if (!Store.getNode()) {
diff --git a/lib/CodeGen/SelectionDAG/StatepointLowering.cpp b/lib/CodeGen/SelectionDAG/StatepointLowering.cpp
index c0a5041b13952..1c66649cae017 100644
--- a/lib/CodeGen/SelectionDAG/StatepointLowering.cpp
+++ b/lib/CodeGen/SelectionDAG/StatepointLowering.cpp
@@ -110,8 +110,8 @@ StatepointLoweringState::allocateStackSlot(EVT ValueType,
Builder.FuncInfo.StatepointStackSlots.size() &&
"Broken invariant");
- StatepointMaxSlotsRequired = std::max<unsigned long>(
- StatepointMaxSlotsRequired, Builder.FuncInfo.StatepointStackSlots.size());
+ StatepointMaxSlotsRequired.updateMax(
+ Builder.FuncInfo.StatepointStackSlots.size());
return SpillSlot;
}
diff --git a/lib/CodeGen/StackProtector.cpp b/lib/CodeGen/StackProtector.cpp
index a8aafe78748dc..5da77264261b3 100644
--- a/lib/CodeGen/StackProtector.cpp
+++ b/lib/CodeGen/StackProtector.cpp
@@ -58,12 +58,13 @@ static cl::opt<bool> EnableSelectionDAGSP("enable-selectiondag-sp",
cl::init(true), cl::Hidden);
char StackProtector::ID = 0;
-INITIALIZE_TM_PASS(StackProtector, "stack-protector", "Insert stack protectors",
- false, true)
+INITIALIZE_PASS_BEGIN(StackProtector, "stack-protector",
+ "Insert stack protectors", false, true)
+INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
+INITIALIZE_PASS_END(StackProtector, "stack-protector",
+ "Insert stack protectors", false, true)
-FunctionPass *llvm::createStackProtectorPass(const TargetMachine *TM) {
- return new StackProtector(TM);
-}
+FunctionPass *llvm::createStackProtectorPass() { return new StackProtector(); }
StackProtector::SSPLayoutKind
StackProtector::getSSPLayout(const AllocaInst *AI) const {
@@ -97,6 +98,8 @@ bool StackProtector::runOnFunction(Function &Fn) {
DominatorTreeWrapperPass *DTWP =
getAnalysisIfAvailable<DominatorTreeWrapperPass>();
DT = DTWP ? &DTWP->getDomTree() : nullptr;
+ TM = &getAnalysis<TargetPassConfig>().getTM<TargetMachine>();
+ Trip = TM->getTargetTriple();
TLI = TM->getSubtargetImpl(Fn)->getTargetLowering();
HasPrologue = false;
HasIRCheck = false;
diff --git a/lib/CodeGen/TargetPassConfig.cpp b/lib/CodeGen/TargetPassConfig.cpp
index 9724cb0745841..83348058eca9b 100644
--- a/lib/CodeGen/TargetPassConfig.cpp
+++ b/lib/CodeGen/TargetPassConfig.cpp
@@ -315,7 +315,9 @@ TargetPassConfig *LLVMTargetMachine::createPassConfig(PassManagerBase &PM) {
TargetPassConfig::TargetPassConfig()
: ImmutablePass(ID), PM(nullptr) {
- llvm_unreachable("TargetPassConfig should not be constructed on-the-fly");
+ report_fatal_error("Trying to construct TargetPassConfig without a target "
+ "machine. Scheduling a CodeGen pass without a target "
+ "triple set?");
}
// Helper to verify the analysis is really immutable.
@@ -514,14 +516,14 @@ void TargetPassConfig::addPassesToHandleExceptions() {
LLVM_FALLTHROUGH;
case ExceptionHandling::DwarfCFI:
case ExceptionHandling::ARM:
- addPass(createDwarfEHPass(TM));
+ addPass(createDwarfEHPass());
break;
case ExceptionHandling::WinEH:
// We support using both GCC-style and MSVC-style exceptions on Windows, so
// add both preparation passes. Each pass will only actually run if it
// recognizes the personality function.
- addPass(createWinEHPass(TM));
- addPass(createDwarfEHPass(TM));
+ addPass(createWinEHPass());
+ addPass(createDwarfEHPass());
break;
case ExceptionHandling::None:
addPass(createLowerInvokePass());
@@ -536,7 +538,7 @@ void TargetPassConfig::addPassesToHandleExceptions() {
/// before exception handling preparation passes.
void TargetPassConfig::addCodeGenPrepare() {
if (getOptLevel() != CodeGenOpt::None && !DisableCGP)
- addPass(createCodeGenPreparePass(TM));
+ addPass(createCodeGenPreparePass());
addPass(createRewriteSymbolsPass());
}
@@ -551,8 +553,8 @@ void TargetPassConfig::addISelPrepare() {
// Add both the safe stack and the stack protection passes: each of them will
// only protect functions that have corresponding attributes.
- addPass(createSafeStackPass(TM));
- addPass(createStackProtectorPass(TM));
+ addPass(createSafeStackPass());
+ addPass(createStackProtectorPass());
if (PrintISelInput)
addPass(createPrintFunctionPass(
@@ -623,9 +625,6 @@ void TargetPassConfig::addMachinePasses() {
addPass(&LocalStackSlotAllocationID, false);
}
- if (getOptLevel() != CodeGenOpt::None)
- addPass(&LiveRangeShrinkID);
-
// Run pre-ra passes.
addPreRegAlloc();
@@ -650,7 +649,7 @@ void TargetPassConfig::addMachinePasses() {
// Prolog/Epilog inserter needs a TargetMachine to instantiate. But only
// do so if it hasn't been disabled, substituted, or overridden.
if (!isPassSubstitutedOrOverridden(&PrologEpilogCodeInserterID))
- addPass(createPrologEpilogInserterPass(TM));
+ addPass(createPrologEpilogInserterPass());
/// Add passes that optimize machine instructions after register allocation.
if (getOptLevel() != CodeGenOpt::None)
diff --git a/lib/CodeGen/WinEHPrepare.cpp b/lib/CodeGen/WinEHPrepare.cpp
index ae07e8b2fa032..a632b40c20f5c 100644
--- a/lib/CodeGen/WinEHPrepare.cpp
+++ b/lib/CodeGen/WinEHPrepare.cpp
@@ -54,7 +54,7 @@ namespace {
class WinEHPrepare : public FunctionPass {
public:
static char ID; // Pass identification, replacement for typeid.
- WinEHPrepare(const TargetMachine *TM = nullptr) : FunctionPass(ID) {}
+ WinEHPrepare() : FunctionPass(ID) {}
bool runOnFunction(Function &Fn) override;
@@ -94,12 +94,10 @@ private:
} // end anonymous namespace
char WinEHPrepare::ID = 0;
-INITIALIZE_TM_PASS(WinEHPrepare, "winehprepare", "Prepare Windows exceptions",
- false, false)
+INITIALIZE_PASS(WinEHPrepare, "winehprepare", "Prepare Windows exceptions",
+ false, false)
-FunctionPass *llvm::createWinEHPass(const TargetMachine *TM) {
- return new WinEHPrepare(TM);
-}
+FunctionPass *llvm::createWinEHPass() { return new WinEHPrepare(); }
bool WinEHPrepare::runOnFunction(Function &Fn) {
if (!Fn.hasPersonalityFn())