summaryrefslogtreecommitdiff
path: root/lib/CodeGen/MachineInstr.cpp
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2019-08-20 20:50:12 +0000
committerDimitry Andric <dim@FreeBSD.org>2019-08-20 20:50:12 +0000
commite6d1592492a3a379186bfb02bd0f4eda0669c0d5 (patch)
tree599ab169a01f1c86eda9adc774edaedde2f2db5b /lib/CodeGen/MachineInstr.cpp
parent1a56a5ead7a2e84bee8240f5f6b033b5f1707154 (diff)
downloadsrc-test2-e6d1592492a3a379186bfb02bd0f4eda0669c0d5.tar.gz
src-test2-e6d1592492a3a379186bfb02bd0f4eda0669c0d5.zip
Vendor import of stripped llvm trunk r366426 (just before the release_90vendor/llvm/llvm-trunk-r366426
Notes
Notes: svn path=/vendor/llvm/dist/; revision=351278 svn path=/vendor/llvm/llvm-trunk-r366426/; revision=351279; tag=vendor/llvm/llvm-trunk-r366426
Diffstat (limited to 'lib/CodeGen/MachineInstr.cpp')
-rw-r--r--lib/CodeGen/MachineInstr.cpp128
1 files changed, 103 insertions, 25 deletions
diff --git a/lib/CodeGen/MachineInstr.cpp b/lib/CodeGen/MachineInstr.cpp
index 764a84c7e132..e5c398a2d10c 100644
--- a/lib/CodeGen/MachineInstr.cpp
+++ b/lib/CodeGen/MachineInstr.cpp
@@ -1,9 +1,8 @@
//===- lib/CodeGen/MachineInstr.cpp ---------------------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -26,6 +25,7 @@
#include "llvm/Analysis/MemoryLocation.h"
#include "llvm/CodeGen/GlobalISel/RegisterBank.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineInstrBundle.h"
@@ -50,9 +50,9 @@
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/ModuleSlotTracker.h"
+#include "llvm/IR/Operator.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Value.h"
-#include "llvm/IR/Operator.h"
#include "llvm/MC/MCInstrDesc.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSymbol.h"
@@ -225,12 +225,13 @@ void MachineInstr::addOperand(MachineFunction &MF, const MachineOperand &Op) {
}
#ifndef NDEBUG
- bool isMetaDataOp = Op.getType() == MachineOperand::MO_Metadata;
+ bool isDebugOp = Op.getType() == MachineOperand::MO_Metadata ||
+ Op.getType() == MachineOperand::MO_MCSymbol;
// OpNo now points as the desired insertion point. Unless this is a variadic
// instruction, only implicit regs are allowed beyond MCID->getNumOperands().
// RegMask operands go between the explicit and implicit operands.
assert((isImpReg || Op.isRegMask() || MCID->isVariadic() ||
- OpNo < MCID->getNumOperands() || isMetaDataOp) &&
+ OpNo < MCID->getNumOperands() || isDebugOp) &&
"Trying to add an operand to a machine instr that is already done!");
#endif
@@ -512,45 +513,65 @@ void MachineInstr::setPostInstrSymbol(MachineFunction &MF, MCSymbol *Symbol) {
MF.createMIExtraInfo(memoperands(), getPreInstrSymbol(), Symbol));
}
+void MachineInstr::cloneInstrSymbols(MachineFunction &MF,
+ const MachineInstr &MI) {
+ if (this == &MI)
+ // Nothing to do for a self-clone!
+ return;
+
+ assert(&MF == MI.getMF() &&
+ "Invalid machine functions when cloning instruction symbols!");
+
+ setPreInstrSymbol(MF, MI.getPreInstrSymbol());
+ setPostInstrSymbol(MF, MI.getPostInstrSymbol());
+}
+
uint16_t MachineInstr::mergeFlagsWith(const MachineInstr &Other) const {
// For now, the just return the union of the flags. If the flags get more
// complicated over time, we might need more logic here.
return getFlags() | Other.getFlags();
}
-void MachineInstr::copyIRFlags(const Instruction &I) {
+uint16_t MachineInstr::copyFlagsFromInstruction(const Instruction &I) {
+ uint16_t MIFlags = 0;
// Copy the wrapping flags.
if (const OverflowingBinaryOperator *OB =
dyn_cast<OverflowingBinaryOperator>(&I)) {
if (OB->hasNoSignedWrap())
- setFlag(MachineInstr::MIFlag::NoSWrap);
+ MIFlags |= MachineInstr::MIFlag::NoSWrap;
if (OB->hasNoUnsignedWrap())
- setFlag(MachineInstr::MIFlag::NoUWrap);
+ MIFlags |= MachineInstr::MIFlag::NoUWrap;
}
// Copy the exact flag.
if (const PossiblyExactOperator *PE = dyn_cast<PossiblyExactOperator>(&I))
if (PE->isExact())
- setFlag(MachineInstr::MIFlag::IsExact);
+ MIFlags |= MachineInstr::MIFlag::IsExact;
// Copy the fast-math flags.
if (const FPMathOperator *FP = dyn_cast<FPMathOperator>(&I)) {
const FastMathFlags Flags = FP->getFastMathFlags();
if (Flags.noNaNs())
- setFlag(MachineInstr::MIFlag::FmNoNans);
+ MIFlags |= MachineInstr::MIFlag::FmNoNans;
if (Flags.noInfs())
- setFlag(MachineInstr::MIFlag::FmNoInfs);
+ MIFlags |= MachineInstr::MIFlag::FmNoInfs;
if (Flags.noSignedZeros())
- setFlag(MachineInstr::MIFlag::FmNsz);
+ MIFlags |= MachineInstr::MIFlag::FmNsz;
if (Flags.allowReciprocal())
- setFlag(MachineInstr::MIFlag::FmArcp);
+ MIFlags |= MachineInstr::MIFlag::FmArcp;
if (Flags.allowContract())
- setFlag(MachineInstr::MIFlag::FmContract);
+ MIFlags |= MachineInstr::MIFlag::FmContract;
if (Flags.approxFunc())
- setFlag(MachineInstr::MIFlag::FmAfn);
+ MIFlags |= MachineInstr::MIFlag::FmAfn;
if (Flags.allowReassoc())
- setFlag(MachineInstr::MIFlag::FmReassoc);
+ MIFlags |= MachineInstr::MIFlag::FmReassoc;
}
+
+ return MIFlags;
+}
+
+void MachineInstr::copyIRFlags(const Instruction &I) {
+ Flags = copyFlagsFromInstruction(I);
}
bool MachineInstr::hasPropertyInBundle(uint64_t Mask, QueryType Type) const {
@@ -1157,7 +1178,7 @@ bool MachineInstr::isSafeToMove(AliasAnalysis *AA, bool &SawStore) const {
}
if (isPosition() || isDebugInstr() || isTerminator() ||
- hasUnmodeledSideEffects())
+ mayRaiseFPException() || hasUnmodeledSideEffects())
return false;
// See if this instruction does a load. If so, we have to guarantee that the
@@ -1173,8 +1194,8 @@ bool MachineInstr::isSafeToMove(AliasAnalysis *AA, bool &SawStore) const {
return true;
}
-bool MachineInstr::mayAlias(AliasAnalysis *AA, MachineInstr &Other,
- bool UseTBAA) {
+bool MachineInstr::mayAlias(AliasAnalysis *AA, const MachineInstr &Other,
+ bool UseTBAA) const {
const MachineFunction *MF = getMF();
const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
const MachineFrameInfo &MFI = MF->getFrameInfo();
@@ -1304,7 +1325,11 @@ bool MachineInstr::isDereferenceableInvariantLoad(AliasAnalysis *AA) const {
const MachineFrameInfo &MFI = getParent()->getParent()->getFrameInfo();
for (MachineMemOperand *MMO : memoperands()) {
- if (MMO->isVolatile()) return false;
+ if (!MMO->isUnordered())
+ // If the memory operand has ordering side effects, we can't move the
+ // instruction. Such an instruction is technically an invariant load,
+ // but the caller code would need updated to expect that.
+ return false;
if (MMO->isStore()) return false;
if (MMO->isInvariant() && MMO->isDereferenceable())
continue;
@@ -1447,7 +1472,7 @@ void MachineInstr::print(raw_ostream &OS, bool IsStandalone, bool SkipOpers,
ModuleSlotTracker MST(M);
if (F)
MST.incorporateFunction(*F);
- print(OS, MST, IsStandalone, SkipOpers, SkipDebugLoc, TII);
+ print(OS, MST, IsStandalone, SkipOpers, SkipDebugLoc, AddNewLine, TII);
}
void MachineInstr::print(raw_ostream &OS, ModuleSlotTracker &MST,
@@ -1519,6 +1544,8 @@ void MachineInstr::print(raw_ostream &OS, ModuleSlotTracker &MST,
OS << "nsw ";
if (getFlag(MachineInstr::IsExact))
OS << "exact ";
+ if (getFlag(MachineInstr::FPExcept))
+ OS << "fpexcept ";
// Print the opcode name.
if (TII)
@@ -1905,7 +1932,7 @@ void MachineInstr::setRegisterDefReadUndef(unsigned Reg, bool IsUndef) {
void MachineInstr::addRegisterDefined(unsigned Reg,
const TargetRegisterInfo *RegInfo) {
if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
- MachineOperand *MO = findRegisterDefOperand(Reg, false, RegInfo);
+ MachineOperand *MO = findRegisterDefOperand(Reg, false, false, RegInfo);
if (MO)
return;
} else {
@@ -2050,7 +2077,7 @@ static const DIExpression *computeExprForSpill(const MachineInstr &MI) {
const DIExpression *Expr = MI.getDebugExpression();
if (MI.isIndirectDebugValue()) {
assert(MI.getOperand(1).getImm() == 0 && "DBG_VALUE with nonzero offset");
- Expr = DIExpression::prepend(Expr, DIExpression::WithDeref);
+ Expr = DIExpression::prepend(Expr, DIExpression::DerefBefore);
}
return Expr;
}
@@ -2100,3 +2127,54 @@ void MachineInstr::changeDebugValuesDefReg(unsigned Reg) {
for (auto *DBI : DbgValues)
DBI->getOperand(0).setReg(Reg);
}
+
+using MMOList = SmallVector<const MachineMemOperand *, 2>;
+
+static unsigned getSpillSlotSize(MMOList &Accesses,
+ const MachineFrameInfo &MFI) {
+ unsigned Size = 0;
+ for (auto A : Accesses)
+ if (MFI.isSpillSlotObjectIndex(
+ cast<FixedStackPseudoSourceValue>(A->getPseudoValue())
+ ->getFrameIndex()))
+ Size += A->getSize();
+ return Size;
+}
+
+Optional<unsigned>
+MachineInstr::getSpillSize(const TargetInstrInfo *TII) const {
+ int FI;
+ if (TII->isStoreToStackSlotPostFE(*this, FI)) {
+ const MachineFrameInfo &MFI = getMF()->getFrameInfo();
+ if (MFI.isSpillSlotObjectIndex(FI))
+ return (*memoperands_begin())->getSize();
+ }
+ return None;
+}
+
+Optional<unsigned>
+MachineInstr::getFoldedSpillSize(const TargetInstrInfo *TII) const {
+ MMOList Accesses;
+ if (TII->hasStoreToStackSlot(*this, Accesses))
+ return getSpillSlotSize(Accesses, getMF()->getFrameInfo());
+ return None;
+}
+
+Optional<unsigned>
+MachineInstr::getRestoreSize(const TargetInstrInfo *TII) const {
+ int FI;
+ if (TII->isLoadFromStackSlotPostFE(*this, FI)) {
+ const MachineFrameInfo &MFI = getMF()->getFrameInfo();
+ if (MFI.isSpillSlotObjectIndex(FI))
+ return (*memoperands_begin())->getSize();
+ }
+ return None;
+}
+
+Optional<unsigned>
+MachineInstr::getFoldedRestoreSize(const TargetInstrInfo *TII) const {
+ MMOList Accesses;
+ if (TII->hasLoadFromStackSlot(*this, Accesses))
+ return getSpillSlotSize(Accesses, getMF()->getFrameInfo());
+ return None;
+}