aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm-project/llvm/lib/Target/X86/X86ISelLowering.h
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm-project/llvm/lib/Target/X86/X86ISelLowering.h')
-rw-r--r--contrib/llvm-project/llvm/lib/Target/X86/X86ISelLowering.h55
1 files changed, 29 insertions, 26 deletions
diff --git a/contrib/llvm-project/llvm/lib/Target/X86/X86ISelLowering.h b/contrib/llvm-project/llvm/lib/Target/X86/X86ISelLowering.h
index 50c7e2c319f6..af110884049b 100644
--- a/contrib/llvm-project/llvm/lib/Target/X86/X86ISelLowering.h
+++ b/contrib/llvm-project/llvm/lib/Target/X86/X86ISelLowering.h
@@ -249,9 +249,6 @@ namespace llvm {
SCALEFS,
SCALEFS_RND,
- // Unsigned Integer average.
- AVG,
-
/// Integer horizontal add/sub.
HADD,
HSUB,
@@ -790,6 +787,9 @@ namespace llvm {
LOR,
LXOR,
LAND,
+ LBTS,
+ LBTC,
+ LBTR,
// Load, scalar_to_vector, and zero extend.
VZEXT_LOAD,
@@ -1039,10 +1039,7 @@ namespace llvm {
bool isCtlzFast() const override;
- bool hasBitPreservingFPLogic(EVT VT) const override {
- return VT == MVT::f32 || VT == MVT::f64 || VT.isVector() ||
- (VT == MVT::f16 && X86ScalarSSEf16);
- }
+ bool hasBitPreservingFPLogic(EVT VT) const override;
bool isMultiStoresCheaperThanBitsMerge(EVT LTy, EVT HTy) const override {
// If the pair to store is a mixture of float and int values, we will
@@ -1163,6 +1160,19 @@ namespace llvm {
APInt &UndefElts,
unsigned Depth) const override;
+ bool isTargetCanonicalConstantNode(SDValue Op) const override {
+ // Peek through bitcasts/extracts/inserts to see if we have a broadcast
+ // vector from memory.
+ while (Op.getOpcode() == ISD::BITCAST ||
+ Op.getOpcode() == ISD::EXTRACT_SUBVECTOR ||
+ (Op.getOpcode() == ISD::INSERT_SUBVECTOR &&
+ Op.getOperand(0).isUndef()))
+ Op = Op.getOperand(Op.getOpcode() == ISD::INSERT_SUBVECTOR ? 1 : 0);
+
+ return Op.getOpcode() == X86ISD::VBROADCAST_LOAD ||
+ TargetLowering::isTargetCanonicalConstantNode(Op);
+ }
+
const Constant *getTargetConstantFromLoad(LoadSDNode *LD) const override;
SDValue unwrapAddress(SDValue N) const override;
@@ -1319,15 +1329,13 @@ namespace llvm {
/// Returns true if lowering to a jump table is allowed.
bool areJTsAllowed(const Function *Fn) const override;
+ MVT getPreferredSwitchConditionType(LLVMContext &Context,
+ EVT ConditionVT) const override;
+
/// If true, then instruction selection should
/// seek to shrink the FP constant of the specified type to a smaller type
/// in order to save space and / or reduce runtime.
- bool ShouldShrinkFPConstant(EVT VT) const override {
- // Don't shrink FP constpool if SSE2 is available since cvtss2sd is more
- // expensive than a straight movsd. On the other hand, it's important to
- // shrink long double fp constant since fldt is very slow.
- return !X86ScalarSSEf64 || VT == MVT::f80;
- }
+ bool ShouldShrinkFPConstant(EVT VT) const override;
/// Return true if we believe it is correct and profitable to reduce the
/// load node to a smaller type.
@@ -1336,11 +1344,7 @@ namespace llvm {
/// Return true if the specified scalar FP type is computed in an SSE
/// register, not on the X87 floating point stack.
- bool isScalarFPTypeInSSEReg(EVT VT) const {
- return (VT == MVT::f64 && X86ScalarSSEf64) || // f64 is when SSE2
- (VT == MVT::f32 && X86ScalarSSEf32) || // f32 is when SSE1
- (VT == MVT::f16 && X86ScalarSSEf16); // f16 is when AVX512FP16
- }
+ bool isScalarFPTypeInSSEReg(EVT VT) const;
/// Returns true if it is beneficial to convert a load of a constant
/// to just the constant itself.
@@ -1494,13 +1498,6 @@ namespace llvm {
/// make the right decision when generating code for different targets.
const X86Subtarget &Subtarget;
- /// Select between SSE or x87 floating point ops.
- /// When SSE is available, use it for f32 operations.
- /// When SSE2 is available, use it for f64 operations.
- bool X86ScalarSSEf32;
- bool X86ScalarSSEf64;
- bool X86ScalarSSEf16;
-
/// A list of legal FP immediates.
std::vector<APFloat> LegalFPImmediates;
@@ -1640,9 +1637,13 @@ namespace llvm {
TargetLoweringBase::AtomicExpansionKind
shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
- bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
+ TargetLoweringBase::AtomicExpansionKind
+ shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
TargetLoweringBase::AtomicExpansionKind
shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
+ TargetLoweringBase::AtomicExpansionKind
+ shouldExpandLogicAtomicRMWInIR(AtomicRMWInst *AI) const;
+ void emitBitTestAtomicRMWIntrinsic(AtomicRMWInst *AI) const override;
LoadInst *
lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const override;
@@ -1652,6 +1653,8 @@ namespace llvm {
bool needsCmpXchgNb(Type *MemType) const;
+ template<typename T> bool isSoftFP16(T VT) const;
+
void SetupEntryBlockForSjLj(MachineInstr &MI, MachineBasicBlock *MBB,
MachineBasicBlock *DispatchBB, int FI) const;