aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm-project/llvm/lib/CodeGen/TargetLoweringBase.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm-project/llvm/lib/CodeGen/TargetLoweringBase.cpp')
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/TargetLoweringBase.cpp272
1 files changed, 197 insertions, 75 deletions
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/TargetLoweringBase.cpp b/contrib/llvm-project/llvm/lib/CodeGen/TargetLoweringBase.cpp
index 42c1fa8af0e6..28c8bd0a7ded 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/TargetLoweringBase.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/TargetLoweringBase.cpp
@@ -135,23 +135,28 @@ void TargetLoweringBase::InitLibcalls(const Triple &TT) {
setLibcallCallingConv((RTLIB::Libcall)LC, CallingConv::C);
// For IEEE quad-precision libcall names, PPC uses "kf" instead of "tf".
- if (TT.getArch() == Triple::ppc || TT.isPPC64()) {
+ if (TT.isPPC()) {
setLibcallName(RTLIB::ADD_F128, "__addkf3");
setLibcallName(RTLIB::SUB_F128, "__subkf3");
setLibcallName(RTLIB::MUL_F128, "__mulkf3");
setLibcallName(RTLIB::DIV_F128, "__divkf3");
+ setLibcallName(RTLIB::POWI_F128, "__powikf2");
setLibcallName(RTLIB::FPEXT_F32_F128, "__extendsfkf2");
setLibcallName(RTLIB::FPEXT_F64_F128, "__extenddfkf2");
setLibcallName(RTLIB::FPROUND_F128_F32, "__trunckfsf2");
setLibcallName(RTLIB::FPROUND_F128_F64, "__trunckfdf2");
setLibcallName(RTLIB::FPTOSINT_F128_I32, "__fixkfsi");
setLibcallName(RTLIB::FPTOSINT_F128_I64, "__fixkfdi");
+ setLibcallName(RTLIB::FPTOSINT_F128_I128, "__fixkfti");
setLibcallName(RTLIB::FPTOUINT_F128_I32, "__fixunskfsi");
setLibcallName(RTLIB::FPTOUINT_F128_I64, "__fixunskfdi");
+ setLibcallName(RTLIB::FPTOUINT_F128_I128, "__fixunskfti");
setLibcallName(RTLIB::SINTTOFP_I32_F128, "__floatsikf");
setLibcallName(RTLIB::SINTTOFP_I64_F128, "__floatdikf");
+ setLibcallName(RTLIB::SINTTOFP_I128_F128, "__floattikf");
setLibcallName(RTLIB::UINTTOFP_I32_F128, "__floatunsikf");
setLibcallName(RTLIB::UINTTOFP_I64_F128, "__floatundikf");
+ setLibcallName(RTLIB::UINTTOFP_I128_F128, "__floatuntikf");
setLibcallName(RTLIB::OEQ_F128, "__eqkf2");
setLibcallName(RTLIB::UNE_F128, "__nekf2");
setLibcallName(RTLIB::OGE_F128, "__gekf2");
@@ -224,6 +229,10 @@ RTLIB::Libcall RTLIB::getFPEXT(EVT OpVT, EVT RetVT) {
if (OpVT == MVT::f16) {
if (RetVT == MVT::f32)
return FPEXT_F16_F32;
+ if (RetVT == MVT::f64)
+ return FPEXT_F16_F64;
+ if (RetVT == MVT::f128)
+ return FPEXT_F16_F128;
} else if (OpVT == MVT::f32) {
if (RetVT == MVT::f64)
return FPEXT_F32_F64;
@@ -285,7 +294,14 @@ RTLIB::Libcall RTLIB::getFPROUND(EVT OpVT, EVT RetVT) {
/// getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or
/// UNKNOWN_LIBCALL if there is none.
RTLIB::Libcall RTLIB::getFPTOSINT(EVT OpVT, EVT RetVT) {
- if (OpVT == MVT::f32) {
+ if (OpVT == MVT::f16) {
+ if (RetVT == MVT::i32)
+ return FPTOSINT_F16_I32;
+ if (RetVT == MVT::i64)
+ return FPTOSINT_F16_I64;
+ if (RetVT == MVT::i128)
+ return FPTOSINT_F16_I128;
+ } else if (OpVT == MVT::f32) {
if (RetVT == MVT::i32)
return FPTOSINT_F32_I32;
if (RetVT == MVT::i64)
@@ -327,7 +343,14 @@ RTLIB::Libcall RTLIB::getFPTOSINT(EVT OpVT, EVT RetVT) {
/// getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or
/// UNKNOWN_LIBCALL if there is none.
RTLIB::Libcall RTLIB::getFPTOUINT(EVT OpVT, EVT RetVT) {
- if (OpVT == MVT::f32) {
+ if (OpVT == MVT::f16) {
+ if (RetVT == MVT::i32)
+ return FPTOUINT_F16_I32;
+ if (RetVT == MVT::i64)
+ return FPTOUINT_F16_I64;
+ if (RetVT == MVT::i128)
+ return FPTOUINT_F16_I128;
+ } else if (OpVT == MVT::f32) {
if (RetVT == MVT::i32)
return FPTOUINT_F32_I32;
if (RetVT == MVT::i64)
@@ -370,6 +393,8 @@ RTLIB::Libcall RTLIB::getFPTOUINT(EVT OpVT, EVT RetVT) {
/// UNKNOWN_LIBCALL if there is none.
RTLIB::Libcall RTLIB::getSINTTOFP(EVT OpVT, EVT RetVT) {
if (OpVT == MVT::i32) {
+ if (RetVT == MVT::f16)
+ return SINTTOFP_I32_F16;
if (RetVT == MVT::f32)
return SINTTOFP_I32_F32;
if (RetVT == MVT::f64)
@@ -381,6 +406,8 @@ RTLIB::Libcall RTLIB::getSINTTOFP(EVT OpVT, EVT RetVT) {
if (RetVT == MVT::ppcf128)
return SINTTOFP_I32_PPCF128;
} else if (OpVT == MVT::i64) {
+ if (RetVT == MVT::f16)
+ return SINTTOFP_I64_F16;
if (RetVT == MVT::f32)
return SINTTOFP_I64_F32;
if (RetVT == MVT::f64)
@@ -392,6 +419,8 @@ RTLIB::Libcall RTLIB::getSINTTOFP(EVT OpVT, EVT RetVT) {
if (RetVT == MVT::ppcf128)
return SINTTOFP_I64_PPCF128;
} else if (OpVT == MVT::i128) {
+ if (RetVT == MVT::f16)
+ return SINTTOFP_I128_F16;
if (RetVT == MVT::f32)
return SINTTOFP_I128_F32;
if (RetVT == MVT::f64)
@@ -410,6 +439,8 @@ RTLIB::Libcall RTLIB::getSINTTOFP(EVT OpVT, EVT RetVT) {
/// UNKNOWN_LIBCALL if there is none.
RTLIB::Libcall RTLIB::getUINTTOFP(EVT OpVT, EVT RetVT) {
if (OpVT == MVT::i32) {
+ if (RetVT == MVT::f16)
+ return UINTTOFP_I32_F16;
if (RetVT == MVT::f32)
return UINTTOFP_I32_F32;
if (RetVT == MVT::f64)
@@ -421,6 +452,8 @@ RTLIB::Libcall RTLIB::getUINTTOFP(EVT OpVT, EVT RetVT) {
if (RetVT == MVT::ppcf128)
return UINTTOFP_I32_PPCF128;
} else if (OpVT == MVT::i64) {
+ if (RetVT == MVT::f16)
+ return UINTTOFP_I64_F16;
if (RetVT == MVT::f32)
return UINTTOFP_I64_F32;
if (RetVT == MVT::f64)
@@ -432,6 +465,8 @@ RTLIB::Libcall RTLIB::getUINTTOFP(EVT OpVT, EVT RetVT) {
if (RetVT == MVT::ppcf128)
return UINTTOFP_I64_PPCF128;
} else if (OpVT == MVT::i128) {
+ if (RetVT == MVT::f16)
+ return UINTTOFP_I128_F16;
if (RetVT == MVT::f32)
return UINTTOFP_I128_F32;
if (RetVT == MVT::f64)
@@ -446,6 +481,83 @@ RTLIB::Libcall RTLIB::getUINTTOFP(EVT OpVT, EVT RetVT) {
return UNKNOWN_LIBCALL;
}
+RTLIB::Libcall RTLIB::getOUTLINE_ATOMIC(unsigned Opc, AtomicOrdering Order,
+ MVT VT) {
+ unsigned ModeN, ModelN;
+ switch (VT.SimpleTy) {
+ case MVT::i8:
+ ModeN = 0;
+ break;
+ case MVT::i16:
+ ModeN = 1;
+ break;
+ case MVT::i32:
+ ModeN = 2;
+ break;
+ case MVT::i64:
+ ModeN = 3;
+ break;
+ case MVT::i128:
+ ModeN = 4;
+ break;
+ default:
+ return UNKNOWN_LIBCALL;
+ }
+
+ switch (Order) {
+ case AtomicOrdering::Monotonic:
+ ModelN = 0;
+ break;
+ case AtomicOrdering::Acquire:
+ ModelN = 1;
+ break;
+ case AtomicOrdering::Release:
+ ModelN = 2;
+ break;
+ case AtomicOrdering::AcquireRelease:
+ case AtomicOrdering::SequentiallyConsistent:
+ ModelN = 3;
+ break;
+ default:
+ return UNKNOWN_LIBCALL;
+ }
+
+#define LCALLS(A, B) \
+ { A##B##_RELAX, A##B##_ACQ, A##B##_REL, A##B##_ACQ_REL }
+#define LCALL5(A) \
+ LCALLS(A, 1), LCALLS(A, 2), LCALLS(A, 4), LCALLS(A, 8), LCALLS(A, 16)
+ switch (Opc) {
+ case ISD::ATOMIC_CMP_SWAP: {
+ const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_CAS)};
+ return LC[ModeN][ModelN];
+ }
+ case ISD::ATOMIC_SWAP: {
+ const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_SWP)};
+ return LC[ModeN][ModelN];
+ }
+ case ISD::ATOMIC_LOAD_ADD: {
+ const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDADD)};
+ return LC[ModeN][ModelN];
+ }
+ case ISD::ATOMIC_LOAD_OR: {
+ const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDSET)};
+ return LC[ModeN][ModelN];
+ }
+ case ISD::ATOMIC_LOAD_CLR: {
+ const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDCLR)};
+ return LC[ModeN][ModelN];
+ }
+ case ISD::ATOMIC_LOAD_XOR: {
+ const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDEOR)};
+ return LC[ModeN][ModelN];
+ }
+ default:
+ return UNKNOWN_LIBCALL;
+ }
+#undef LCALLS
+#undef LCALL5
+}
+
RTLIB::Libcall RTLIB::getSYNC(unsigned Opc, MVT VT) {
#define OP_TO_LIBCALL(Name, Enum) \
case Name: \
@@ -615,7 +727,7 @@ void TargetLoweringBase::initActions() {
std::end(TargetDAGCombineArray), 0);
for (MVT VT : MVT::fp_valuetypes()) {
- MVT IntVT = MVT::getIntegerVT(VT.getSizeInBits().getFixedSize());
+ MVT IntVT = MVT::getIntegerVT(VT.getFixedSizeInBits());
if (IntVT.isValid()) {
setOperationAction(ISD::ATOMIC_SWAP, VT, Promote);
AddPromotedToType(ISD::ATOMIC_SWAP, VT, IntVT);
@@ -657,6 +769,8 @@ void TargetLoweringBase::initActions() {
setOperationAction(ISD::UADDSAT, VT, Expand);
setOperationAction(ISD::SSUBSAT, VT, Expand);
setOperationAction(ISD::USUBSAT, VT, Expand);
+ setOperationAction(ISD::SSHLSAT, VT, Expand);
+ setOperationAction(ISD::USHLSAT, VT, Expand);
setOperationAction(ISD::SMULFIX, VT, Expand);
setOperationAction(ISD::SMULFIXSAT, VT, Expand);
setOperationAction(ISD::UMULFIX, VT, Expand);
@@ -665,6 +779,8 @@ void TargetLoweringBase::initActions() {
setOperationAction(ISD::SDIVFIXSAT, VT, Expand);
setOperationAction(ISD::UDIVFIX, VT, Expand);
setOperationAction(ISD::UDIVFIXSAT, VT, Expand);
+ setOperationAction(ISD::FP_TO_SINT_SAT, VT, Expand);
+ setOperationAction(ISD::FP_TO_UINT_SAT, VT, Expand);
// Overflow operations default to expand
setOperationAction(ISD::SADDO, VT, Expand);
@@ -678,6 +794,8 @@ void TargetLoweringBase::initActions() {
setOperationAction(ISD::ADDCARRY, VT, Expand);
setOperationAction(ISD::SUBCARRY, VT, Expand);
setOperationAction(ISD::SETCCCARRY, VT, Expand);
+ setOperationAction(ISD::SADDO_CARRY, VT, Expand);
+ setOperationAction(ISD::SSUBO_CARRY, VT, Expand);
// ADDC/ADDE/SUBC/SUBE default to expand.
setOperationAction(ISD::ADDC, VT, Expand);
@@ -690,6 +808,7 @@ void TargetLoweringBase::initActions() {
setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand);
setOperationAction(ISD::BITREVERSE, VT, Expand);
+ setOperationAction(ISD::PARITY, VT, Expand);
// These library functions default to expand.
setOperationAction(ISD::FROUND, VT, Expand);
@@ -728,6 +847,8 @@ void TargetLoweringBase::initActions() {
setOperationAction(ISD::VECREDUCE_UMIN, VT, Expand);
setOperationAction(ISD::VECREDUCE_FMAX, VT, Expand);
setOperationAction(ISD::VECREDUCE_FMIN, VT, Expand);
+ setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Expand);
+ setOperationAction(ISD::VECREDUCE_SEQ_FMUL, VT, Expand);
}
// Most targets ignore the @llvm.prefetch intrinsic.
@@ -772,6 +893,8 @@ void TargetLoweringBase::initActions() {
// On most systems, DEBUGTRAP and TRAP have no difference. The "Expand"
// here is to inform DAG Legalizer to replace DEBUGTRAP with TRAP.
setOperationAction(ISD::DEBUGTRAP, MVT::Other, Expand);
+
+ setOperationAction(ISD::UBSANTRAP, MVT::Other, Expand);
}
MVT TargetLoweringBase::getScalarShiftAmountTy(const DataLayout &DL,
@@ -801,6 +924,11 @@ bool TargetLoweringBase::canOpTrap(unsigned Op, EVT VT) const {
}
}
+bool TargetLoweringBase::isFreeAddrSpaceCast(unsigned SrcAS,
+ unsigned DestAS) const {
+ return TM.isNoopAddrSpaceCast(SrcAS, DestAS);
+}
+
void TargetLoweringBase::setJumpIsExpensive(bool isExpensive) {
// If the command-line option was specified, ignore this request.
if (!JumpIsExpensiveOverride.getNumOccurrences())
@@ -823,9 +951,7 @@ TargetLoweringBase::getTypeConversion(LLVMContext &Context, EVT VT) const {
"Promote may not follow Expand or Promote");
if (LA == TypeSplitVector)
- return LegalizeKind(LA,
- EVT::getVectorVT(Context, SVT.getVectorElementType(),
- SVT.getVectorElementCount() / 2));
+ return LegalizeKind(LA, EVT(SVT).getHalfNumVectorElementsVT(Context));
if (LA == TypeScalarizeVector)
return LegalizeKind(LA, SVT.getVectorElementType());
return LegalizeKind(LA, NVT);
@@ -856,10 +982,10 @@ TargetLoweringBase::getTypeConversion(LLVMContext &Context, EVT VT) const {
EVT EltVT = VT.getVectorElementType();
// Vectors with only one element are always scalarized.
- if (NumElts == 1)
+ if (NumElts.isScalar())
return LegalizeKind(TypeScalarizeVector, EltVT);
- if (VT.getVectorElementCount() == ElementCount(1, true))
+ if (VT.getVectorElementCount() == ElementCount::getScalable(1))
report_fatal_error("Cannot legalize this vector");
// Try to widen vector elements until the element type is a power of two and
@@ -869,7 +995,7 @@ TargetLoweringBase::getTypeConversion(LLVMContext &Context, EVT VT) const {
// Vectors with a number of elements that is not a power of two are always
// widened, for example <3 x i8> -> <4 x i8>.
if (!VT.isPow2VectorType()) {
- NumElts = NumElts.NextPowerOf2();
+ NumElts = NumElts.coefficientNextPowerOf2();
EVT NVT = EVT::getVectorVT(Context, EltVT, NumElts);
return LegalizeKind(TypeWidenVector, NVT);
}
@@ -881,7 +1007,7 @@ TargetLoweringBase::getTypeConversion(LLVMContext &Context, EVT VT) const {
// <4 x i140> -> <2 x i140>
if (LK.first == TypeExpandInteger)
return LegalizeKind(TypeSplitVector,
- EVT::getVectorVT(Context, EltVT, NumElts / 2));
+ VT.getHalfNumVectorElementsVT(Context));
// Promote the integer element types until a legal vector type is found
// or until the element integer type is too big. If a legal type was not
@@ -918,7 +1044,7 @@ TargetLoweringBase::getTypeConversion(LLVMContext &Context, EVT VT) const {
// If there is no wider legal type, split the vector.
while (true) {
// Round up to the next power of 2.
- NumElts = NumElts.NextPowerOf2();
+ NumElts = NumElts.coefficientNextPowerOf2();
// If there is no simple vector type with this many elements then there
// cannot be a larger legal vector type. Note that this assumes that
@@ -941,7 +1067,8 @@ TargetLoweringBase::getTypeConversion(LLVMContext &Context, EVT VT) const {
}
// Vectors with illegal element types are expanded.
- EVT NVT = EVT::getVectorVT(Context, EltVT, VT.getVectorElementCount() / 2);
+ EVT NVT = EVT::getVectorVT(Context, EltVT,
+ VT.getVectorElementCount().divideCoefficientBy(2));
return LegalizeKind(TypeSplitVector, NVT);
}
@@ -957,23 +1084,24 @@ static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT,
// Scalable vectors cannot be scalarized, so splitting or widening is
// required.
- if (VT.isScalableVector() && !isPowerOf2_32(EC.Min))
+ if (VT.isScalableVector() && !isPowerOf2_32(EC.getKnownMinValue()))
llvm_unreachable(
"Splitting or widening of non-power-of-2 MVTs is not implemented.");
// FIXME: We don't support non-power-of-2-sized vectors for now.
// Ideally we could break down into LHS/RHS like LegalizeDAG does.
- if (!isPowerOf2_32(EC.Min)) {
+ if (!isPowerOf2_32(EC.getKnownMinValue())) {
// Split EC to unit size (scalable property is preserved).
- NumVectorRegs = EC.Min;
- EC = EC / NumVectorRegs;
+ NumVectorRegs = EC.getKnownMinValue();
+ EC = ElementCount::getFixed(1);
}
// Divide the input until we get to a supported size. This will
// always end up with an EC that represent a scalar or a scalable
// scalar.
- while (EC.Min > 1 && !TLI->isTypeLegal(MVT::getVectorVT(EltTy, EC))) {
- EC.Min >>= 1;
+ while (EC.getKnownMinValue() > 1 &&
+ !TLI->isTypeLegal(MVT::getVectorVT(EltTy, EC))) {
+ EC = EC.divideCoefficientBy(2);
NumVectorRegs <<= 1;
}
@@ -984,7 +1112,7 @@ static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT,
NewVT = EltTy;
IntermediateVT = NewVT;
- unsigned LaneSizeInBits = NewVT.getScalarSizeInBits().getFixedSize();
+ unsigned LaneSizeInBits = NewVT.getScalarSizeInBits();
// Convert sizes such as i33 to i64.
if (!isPowerOf2_32(LaneSizeInBits))
@@ -993,8 +1121,7 @@ static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT,
MVT DestVT = TLI->getRegisterType(NewVT);
RegisterVT = DestVT;
if (EVT(DestVT).bitsLT(NewVT)) // Value is expanded, e.g. i64 -> i16.
- return NumVectorRegs *
- (LaneSizeInBits / DestVT.getScalarSizeInBits().getFixedSize());
+ return NumVectorRegs * (LaneSizeInBits / DestVT.getScalarSizeInBits());
// Otherwise, promotion or legal types use the same number of registers as
// the vector decimated to the appropriate level.
@@ -1041,9 +1168,19 @@ TargetLoweringBase::emitPatchPoint(MachineInstr &InitialMI,
// Inherit previous memory operands.
MIB.cloneMemRefs(*MI);
- for (auto &MO : MI->operands()) {
+ for (unsigned i = 0; i < MI->getNumOperands(); ++i) {
+ MachineOperand &MO = MI->getOperand(i);
if (!MO.isFI()) {
+ // Index of Def operand this Use it tied to.
+ // Since Defs are coming before Uses, if Use is tied, then
+ // index of Def must be smaller that index of that Use.
+ // Also, Defs preserve their position in new MI.
+ unsigned TiedTo = i;
+ if (MO.isReg() && MO.isTied())
+ TiedTo = MI->findTiedOperandIdx(i);
MIB.add(MO);
+ if (TiedTo < i)
+ MIB->tieOperands(TiedTo, MIB->getNumOperands() - 1);
continue;
}
@@ -1090,36 +1227,6 @@ TargetLoweringBase::emitPatchPoint(MachineInstr &InitialMI,
return MBB;
}
-MachineBasicBlock *
-TargetLoweringBase::emitXRayCustomEvent(MachineInstr &MI,
- MachineBasicBlock *MBB) const {
- assert(MI.getOpcode() == TargetOpcode::PATCHABLE_EVENT_CALL &&
- "Called emitXRayCustomEvent on the wrong MI!");
- auto &MF = *MI.getMF();
- auto MIB = BuildMI(MF, MI.getDebugLoc(), MI.getDesc());
- for (unsigned OpIdx = 0; OpIdx != MI.getNumOperands(); ++OpIdx)
- MIB.add(MI.getOperand(OpIdx));
-
- MBB->insert(MachineBasicBlock::iterator(MI), MIB);
- MI.eraseFromParent();
- return MBB;
-}
-
-MachineBasicBlock *
-TargetLoweringBase::emitXRayTypedEvent(MachineInstr &MI,
- MachineBasicBlock *MBB) const {
- assert(MI.getOpcode() == TargetOpcode::PATCHABLE_TYPED_EVENT_CALL &&
- "Called emitXRayTypedEvent on the wrong MI!");
- auto &MF = *MI.getMF();
- auto MIB = BuildMI(MF, MI.getDebugLoc(), MI.getDesc());
- for (unsigned OpIdx = 0; OpIdx != MI.getNumOperands(); ++OpIdx)
- MIB.add(MI.getOperand(OpIdx));
-
- MBB->insert(MachineBasicBlock::iterator(MI), MIB);
- MI.eraseFromParent();
- return MBB;
-}
-
/// findRepresentativeClass - Return the largest legal super-reg register class
/// of the register class for the specified type and its associated "cost".
// This function is in TargetLowering because it uses RegClassForVT which would
@@ -1282,7 +1389,7 @@ void TargetLoweringBase::computeRegisterProperties(
MVT SVT = (MVT::SimpleValueType) nVT;
// Promote vectors of integers to vectors with the same number
// of elements, with a wider element type.
- if (SVT.getScalarSizeInBits() > EltVT.getSizeInBits() &&
+ if (SVT.getScalarSizeInBits() > EltVT.getFixedSizeInBits() &&
SVT.getVectorElementCount() == EC && isTypeLegal(SVT)) {
TransformToType[i] = SVT;
RegisterTypeForVT[i] = SVT;
@@ -1298,13 +1405,15 @@ void TargetLoweringBase::computeRegisterProperties(
}
case TypeWidenVector:
- if (isPowerOf2_32(EC.Min)) {
+ if (isPowerOf2_32(EC.getKnownMinValue())) {
// Try to widen the vector.
for (unsigned nVT = i + 1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) {
MVT SVT = (MVT::SimpleValueType) nVT;
if (SVT.getVectorElementType() == EltVT &&
SVT.isScalableVector() == IsScalable &&
- SVT.getVectorElementCount().Min > EC.Min && isTypeLegal(SVT)) {
+ SVT.getVectorElementCount().getKnownMinValue() >
+ EC.getKnownMinValue() &&
+ isTypeLegal(SVT)) {
TransformToType[i] = SVT;
RegisterTypeForVT[i] = SVT;
NumRegistersForVT[i] = 1;
@@ -1348,10 +1457,10 @@ void TargetLoweringBase::computeRegisterProperties(
ValueTypeActions.setTypeAction(VT, TypeScalarizeVector);
else if (PreferredAction == TypeSplitVector)
ValueTypeActions.setTypeAction(VT, TypeSplitVector);
- else if (EC.Min > 1)
+ else if (EC.getKnownMinValue() > 1)
ValueTypeActions.setTypeAction(VT, TypeSplitVector);
else
- ValueTypeActions.setTypeAction(VT, EC.Scalable
+ ValueTypeActions.setTypeAction(VT, EC.isScalable()
? TypeScalarizeScalableVector
: TypeScalarizeVector);
} else {
@@ -1409,7 +1518,8 @@ unsigned TargetLoweringBase::getVectorTypeBreakdown(LLVMContext &Context, EVT VT
// This handles things like <2 x float> -> <4 x float> and
// <4 x i1> -> <4 x i32>.
LegalizeTypeAction TA = getTypeAction(Context, VT);
- if (EltCnt.Min != 1 && (TA == TypeWidenVector || TA == TypePromoteInteger)) {
+ if (EltCnt.getKnownMinValue() != 1 &&
+ (TA == TypeWidenVector || TA == TypePromoteInteger)) {
EVT RegisterEVT = getTypeToTransformTo(Context, VT);
if (isTypeLegal(RegisterEVT)) {
IntermediateVT = RegisterEVT;
@@ -1426,7 +1536,7 @@ unsigned TargetLoweringBase::getVectorTypeBreakdown(LLVMContext &Context, EVT VT
// Scalable vectors cannot be scalarized, so handle the legalisation of the
// types like done elsewhere in SelectionDAG.
- if (VT.isScalableVector() && !isPowerOf2_32(EltCnt.Min)) {
+ if (VT.isScalableVector() && !isPowerOf2_32(EltCnt.getKnownMinValue())) {
LegalizeKind LK;
EVT PartVT = VT;
do {
@@ -1435,15 +1545,15 @@ unsigned TargetLoweringBase::getVectorTypeBreakdown(LLVMContext &Context, EVT VT
PartVT = LK.second;
} while (LK.first != TypeLegal);
- NumIntermediates =
- VT.getVectorElementCount().Min / PartVT.getVectorElementCount().Min;
+ NumIntermediates = VT.getVectorElementCount().getKnownMinValue() /
+ PartVT.getVectorElementCount().getKnownMinValue();
// FIXME: This code needs to be extended to handle more complex vector
// breakdowns, like nxv7i64 -> nxv8i64 -> 4 x nxv2i64. Currently the only
// supported cases are vectors that are broken down into equal parts
// such as nxv6i64 -> 3 x nxv2i64.
- assert(NumIntermediates * PartVT.getVectorElementCount().Min ==
- VT.getVectorElementCount().Min &&
+ assert((PartVT.getVectorElementCount() * NumIntermediates) ==
+ VT.getVectorElementCount() &&
"Expected an integer multiple of PartVT");
IntermediateVT = PartVT;
RegisterVT = getRegisterType(Context, IntermediateVT);
@@ -1452,16 +1562,16 @@ unsigned TargetLoweringBase::getVectorTypeBreakdown(LLVMContext &Context, EVT VT
// FIXME: We don't support non-power-of-2-sized vectors for now. Ideally
// we could break down into LHS/RHS like LegalizeDAG does.
- if (!isPowerOf2_32(EltCnt.Min)) {
- NumVectorRegs = EltCnt.Min;
- EltCnt.Min = 1;
+ if (!isPowerOf2_32(EltCnt.getKnownMinValue())) {
+ NumVectorRegs = EltCnt.getKnownMinValue();
+ EltCnt = ElementCount::getFixed(1);
}
// Divide the input until we get to a supported size. This will always
// end with a scalar if the target doesn't support vectors.
- while (EltCnt.Min > 1 &&
+ while (EltCnt.getKnownMinValue() > 1 &&
!isTypeLegal(EVT::getVectorVT(Context, EltTy, EltCnt))) {
- EltCnt.Min >>= 1;
+ EltCnt = EltCnt.divideCoefficientBy(2);
NumVectorRegs <<= 1;
}
@@ -1479,7 +1589,7 @@ unsigned TargetLoweringBase::getVectorTypeBreakdown(LLVMContext &Context, EVT VT
TypeSize NewVTSize = NewVT.getSizeInBits();
// Convert sizes such as i33 to i64.
if (!isPowerOf2_32(NewVTSize.getKnownMinSize()))
- NewVTSize = NewVTSize.NextPowerOf2();
+ NewVTSize = NewVTSize.coefficientNextPowerOf2();
return NumVectorRegs*(NewVTSize/DestVT.getSizeInBits());
}
@@ -1616,6 +1726,14 @@ bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context,
MMO.getFlags(), Fast);
}
+bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context,
+ const DataLayout &DL, LLT Ty,
+ const MachineMemOperand &MMO,
+ bool *Fast) const {
+ return allowsMemoryAccess(Context, DL, getMVTForLLT(Ty), MMO.getAddrSpace(),
+ MMO.getAlign(), MMO.getFlags(), Fast);
+}
+
BranchProbability TargetLoweringBase::getPredictableBranchThreshold() const {
return BranchProbability(MinPercentageForPredictableBranch, 100);
}
@@ -1838,10 +1956,14 @@ Value *TargetLoweringBase::getIRStackGuard(IRBuilder<> &IRB) const {
// Currently only support "standard" __stack_chk_guard.
// TODO: add LOAD_STACK_GUARD support.
void TargetLoweringBase::insertSSPDeclarations(Module &M) const {
- if (!M.getNamedValue("__stack_chk_guard"))
- new GlobalVariable(M, Type::getInt8PtrTy(M.getContext()), false,
- GlobalVariable::ExternalLinkage,
- nullptr, "__stack_chk_guard");
+ if (!M.getNamedValue("__stack_chk_guard")) {
+ auto *GV = new GlobalVariable(M, Type::getInt8PtrTy(M.getContext()), false,
+ GlobalVariable::ExternalLinkage, nullptr,
+ "__stack_chk_guard");
+ if (TM.getRelocationModel() == Reloc::Static &&
+ !TM.getTargetTriple().isWindowsGNUEnvironment())
+ GV->setDSOLocal(true);
+ }
}
// Currently only support "standard" __stack_chk_guard.
@@ -1925,7 +2047,7 @@ static bool parseRefinementStep(StringRef In, size_t &Position,
// step parameter.
if (RefStepString.size() == 1) {
char RefStepChar = RefStepString[0];
- if (RefStepChar >= '0' && RefStepChar <= '9') {
+ if (isDigit(RefStepChar)) {
Value = RefStepChar - '0';
return true;
}