summaryrefslogtreecommitdiff
path: root/lib/CodeGen/TargetLoweringBase.cpp
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2018-07-28 10:51:19 +0000
committerDimitry Andric <dim@FreeBSD.org>2018-07-28 10:51:19 +0000
commiteb11fae6d08f479c0799db45860a98af528fa6e7 (patch)
tree44d492a50c8c1a7eb8e2d17ea3360ec4d066f042 /lib/CodeGen/TargetLoweringBase.cpp
parentb8a2042aa938069e862750553db0e4d82d25822c (diff)
Notes
Diffstat (limited to 'lib/CodeGen/TargetLoweringBase.cpp')
-rw-r--r--lib/CodeGen/TargetLoweringBase.cpp80
1 files changed, 68 insertions, 12 deletions
diff --git a/lib/CodeGen/TargetLoweringBase.cpp b/lib/CodeGen/TargetLoweringBase.cpp
index 224ae1a3236a..43f4bad595e3 100644
--- a/lib/CodeGen/TargetLoweringBase.cpp
+++ b/lib/CodeGen/TargetLoweringBase.cpp
@@ -28,7 +28,6 @@
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/MachineValueType.h"
#include "llvm/CodeGen/RuntimeLibcalls.h"
#include "llvm/CodeGen/StackMaps.h"
#include "llvm/CodeGen/TargetLowering.h"
@@ -50,6 +49,7 @@
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MachineValueType.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Target/TargetMachine.h"
#include <algorithm>
@@ -118,7 +118,7 @@ static cl::opt<int> MinPercentageForPredictableBranch(
void TargetLoweringBase::InitLibcalls(const Triple &TT) {
#define HANDLE_LIBCALL(code, name) \
setLibcallName(RTLIB::code, name);
-#include "llvm/CodeGen/RuntimeLibcalls.def"
+#include "llvm/IR/RuntimeLibcalls.def"
#undef HANDLE_LIBCALL
// Initialize calling conventions to their default.
for (int LC = 0; LC < RTLIB::UNKNOWN_LIBCALL; ++LC)
@@ -132,9 +132,18 @@ void TargetLoweringBase::InitLibcalls(const Triple &TT) {
setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
- // Darwin 10 and higher has an optimized __bzero.
- if (!TT.isMacOSX() || !TT.isMacOSXVersionLT(10, 6) || TT.isArch64Bit()) {
- setLibcallName(RTLIB::BZERO, TT.isAArch64() ? "bzero" : "__bzero");
+ // Some darwins have an optimized __bzero/bzero function.
+ switch (TT.getArch()) {
+ case Triple::x86:
+ case Triple::x86_64:
+ if (TT.isMacOSX() && !TT.isMacOSXVersionLT(10, 6))
+ setLibcallName(RTLIB::BZERO, "__bzero");
+ break;
+ case Triple::aarch64:
+ setLibcallName(RTLIB::BZERO, "bzero");
+ break;
+ default:
+ break;
}
if (darwinHasSinCos(TT)) {
@@ -183,6 +192,9 @@ RTLIB::Libcall RTLIB::getFPEXT(EVT OpVT, EVT RetVT) {
return FPEXT_F64_F128;
else if (RetVT == MVT::ppcf128)
return FPEXT_F64_PPCF128;
+ } else if (OpVT == MVT::f80) {
+ if (RetVT == MVT::f128)
+ return FPEXT_F80_F128;
}
return UNKNOWN_LIBCALL;
@@ -218,6 +230,9 @@ RTLIB::Libcall RTLIB::getFPROUND(EVT OpVT, EVT RetVT) {
return FPROUND_F128_F64;
if (OpVT == MVT::ppcf128)
return FPROUND_PPCF128_F64;
+ } else if (RetVT == MVT::f80) {
+ if (OpVT == MVT::f128)
+ return FPROUND_F128_F80;
}
return UNKNOWN_LIBCALL;
@@ -520,6 +535,7 @@ TargetLoweringBase::TargetLoweringBase(const TargetMachine &tm) : TM(tm) {
// Perform these initializations only once.
MaxStoresPerMemset = MaxStoresPerMemcpy = MaxStoresPerMemmove =
MaxLoadsPerMemcmp = 8;
+ MaxGluedStoresPerMemcpy = 0;
MaxStoresPerMemsetOptSize = MaxStoresPerMemcpyOptSize =
MaxStoresPerMemmoveOptSize = MaxLoadsPerMemcmpOptSize = 4;
UseUnderscoreSetJmp = false;
@@ -605,6 +621,12 @@ void TargetLoweringBase::initActions() {
setOperationAction(ISD::SUBCARRY, VT, Expand);
setOperationAction(ISD::SETCCCARRY, VT, Expand);
+ // ADDC/ADDE/SUBC/SUBE default to expand.
+ setOperationAction(ISD::ADDC, VT, Expand);
+ setOperationAction(ISD::ADDE, VT, Expand);
+ setOperationAction(ISD::SUBC, VT, Expand);
+ setOperationAction(ISD::SUBE, VT, Expand);
+
// These default to Expand so they will be expanded to CTLZ/CTTZ by default.
setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand);
setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand);
@@ -670,12 +692,13 @@ MVT TargetLoweringBase::getScalarShiftAmountTy(const DataLayout &DL,
return MVT::getIntegerVT(8 * DL.getPointerSize(0));
}
-EVT TargetLoweringBase::getShiftAmountTy(EVT LHSTy,
- const DataLayout &DL) const {
+EVT TargetLoweringBase::getShiftAmountTy(EVT LHSTy, const DataLayout &DL,
+ bool LegalTypes) const {
assert(LHSTy.isInteger() && "Shift amount is not an integer type!");
if (LHSTy.isVector())
return LHSTy;
- return getScalarShiftAmountTy(DL, LHSTy);
+ return LegalTypes ? getScalarShiftAmountTy(DL, LHSTy)
+ : getPointerTy(DL);
}
bool TargetLoweringBase::canOpTrap(unsigned Op, EVT VT) const {
@@ -970,6 +993,36 @@ TargetLoweringBase::emitPatchPoint(MachineInstr &InitialMI,
return MBB;
}
+MachineBasicBlock *
+TargetLoweringBase::emitXRayCustomEvent(MachineInstr &MI,
+ MachineBasicBlock *MBB) const {
+ assert(MI.getOpcode() == TargetOpcode::PATCHABLE_EVENT_CALL &&
+ "Called emitXRayCustomEvent on the wrong MI!");
+ auto &MF = *MI.getMF();
+ auto MIB = BuildMI(MF, MI.getDebugLoc(), MI.getDesc());
+ for (unsigned OpIdx = 0; OpIdx != MI.getNumOperands(); ++OpIdx)
+ MIB.add(MI.getOperand(OpIdx));
+
+ MBB->insert(MachineBasicBlock::iterator(MI), MIB);
+ MI.eraseFromParent();
+ return MBB;
+}
+
+MachineBasicBlock *
+TargetLoweringBase::emitXRayTypedEvent(MachineInstr &MI,
+ MachineBasicBlock *MBB) const {
+ assert(MI.getOpcode() == TargetOpcode::PATCHABLE_TYPED_EVENT_CALL &&
+ "Called emitXRayTypedEvent on the wrong MI!");
+ auto &MF = *MI.getMF();
+ auto MIB = BuildMI(MF, MI.getDebugLoc(), MI.getDesc());
+ for (unsigned OpIdx = 0; OpIdx != MI.getNumOperands(); ++OpIdx)
+ MIB.add(MI.getOperand(OpIdx));
+
+ MBB->insert(MachineBasicBlock::iterator(MI), MIB);
+ MI.eraseFromParent();
+ return MBB;
+}
+
/// findRepresentativeClass - Return the largest legal super-reg register class
/// of the register class for the specified type and its associated "cost".
// This function is in TargetLowering because it uses RegClassForVT which would
@@ -1578,13 +1631,16 @@ Value *TargetLoweringBase::getIRStackGuard(IRBuilder<> &IRB) const {
// Currently only support "standard" __stack_chk_guard.
// TODO: add LOAD_STACK_GUARD support.
void TargetLoweringBase::insertSSPDeclarations(Module &M) const {
- M.getOrInsertGlobal("__stack_chk_guard", Type::getInt8PtrTy(M.getContext()));
+ if (!M.getNamedValue("__stack_chk_guard"))
+ new GlobalVariable(M, Type::getInt8PtrTy(M.getContext()), false,
+ GlobalVariable::ExternalLinkage,
+ nullptr, "__stack_chk_guard");
}
// Currently only support "standard" __stack_chk_guard.
// TODO: add LOAD_STACK_GUARD support.
Value *TargetLoweringBase::getSDagStackGuard(const Module &M) const {
- return M.getGlobalVariable("__stack_chk_guard", true);
+ return M.getNamedValue("__stack_chk_guard");
}
Value *TargetLoweringBase::getSSPStackGuardCheck(const Module &M) const {
@@ -1674,7 +1730,7 @@ static int getOpEnabled(bool IsSqrt, EVT VT, StringRef Override) {
return TargetLoweringBase::ReciprocalEstimate::Unspecified;
SmallVector<StringRef, 4> OverrideVector;
- SplitString(Override, OverrideVector, ",");
+ Override.split(OverrideVector, ',');
unsigned NumArgs = OverrideVector.size();
// Check if "all", "none", or "default" was specified.
@@ -1734,7 +1790,7 @@ static int getOpRefinementSteps(bool IsSqrt, EVT VT, StringRef Override) {
return TargetLoweringBase::ReciprocalEstimate::Unspecified;
SmallVector<StringRef, 4> OverrideVector;
- SplitString(Override, OverrideVector, ",");
+ Override.split(OverrideVector, ',');
unsigned NumArgs = OverrideVector.size();
// Check if "all", "default", or "none" was specified.