aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm-project/llvm/lib/CodeGen/Analysis.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm-project/llvm/lib/CodeGen/Analysis.cpp')
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/Analysis.cpp61
1 files changed, 27 insertions, 34 deletions
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/Analysis.cpp b/contrib/llvm-project/llvm/lib/CodeGen/Analysis.cpp
index 7da28ffec85c..ebeff1fec30b 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/Analysis.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/Analysis.cpp
@@ -88,19 +88,25 @@ void llvm::ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL,
uint64_t StartingOffset) {
// Given a struct type, recursively traverse the elements.
if (StructType *STy = dyn_cast<StructType>(Ty)) {
- const StructLayout *SL = DL.getStructLayout(STy);
+ // If the Offsets aren't needed, don't query the struct layout. This allows
+ // us to support structs with scalable vectors for operations that don't
+ // need offsets.
+ const StructLayout *SL = Offsets ? DL.getStructLayout(STy) : nullptr;
for (StructType::element_iterator EB = STy->element_begin(),
EI = EB,
EE = STy->element_end();
- EI != EE; ++EI)
+ EI != EE; ++EI) {
+ // Don't compute the element offset if we didn't get a StructLayout above.
+ uint64_t EltOffset = SL ? SL->getElementOffset(EI - EB) : 0;
ComputeValueVTs(TLI, DL, *EI, ValueVTs, MemVTs, Offsets,
- StartingOffset + SL->getElementOffset(EI - EB));
+ StartingOffset + EltOffset);
+ }
return;
}
// Given an array type, recursively traverse the elements.
if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
Type *EltTy = ATy->getElementType();
- uint64_t EltSize = DL.getTypeAllocSize(EltTy);
+ uint64_t EltSize = DL.getTypeAllocSize(EltTy).getFixedValue();
for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
ComputeValueVTs(TLI, DL, EltTy, ValueVTs, MemVTs, Offsets,
StartingOffset + i * EltSize);
@@ -131,16 +137,21 @@ void llvm::computeValueLLTs(const DataLayout &DL, Type &Ty,
uint64_t StartingOffset) {
// Given a struct type, recursively traverse the elements.
if (StructType *STy = dyn_cast<StructType>(&Ty)) {
- const StructLayout *SL = DL.getStructLayout(STy);
- for (unsigned I = 0, E = STy->getNumElements(); I != E; ++I)
+ // If the Offsets aren't needed, don't query the struct layout. This allows
+ // us to support structs with scalable vectors for operations that don't
+ // need offsets.
+ const StructLayout *SL = Offsets ? DL.getStructLayout(STy) : nullptr;
+ for (unsigned I = 0, E = STy->getNumElements(); I != E; ++I) {
+ uint64_t EltOffset = SL ? SL->getElementOffset(I) : 0;
computeValueLLTs(DL, *STy->getElementType(I), ValueTys, Offsets,
- StartingOffset + SL->getElementOffset(I));
+ StartingOffset + EltOffset);
+ }
return;
}
// Given an array type, recursively traverse the elements.
if (ArrayType *ATy = dyn_cast<ArrayType>(&Ty)) {
Type *EltTy = ATy->getElementType();
- uint64_t EltSize = DL.getTypeAllocSize(EltTy);
+ uint64_t EltSize = DL.getTypeAllocSize(EltTy).getFixedValue();
for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
computeValueLLTs(DL, *EltTy, ValueTys, Offsets,
StartingOffset + i * EltSize);
@@ -174,27 +185,6 @@ GlobalValue *llvm::ExtractTypeInfo(Value *V) {
return GV;
}
-/// hasInlineAsmMemConstraint - Return true if the inline asm instruction being
-/// processed uses a memory 'm' constraint.
-bool
-llvm::hasInlineAsmMemConstraint(InlineAsm::ConstraintInfoVector &CInfos,
- const TargetLowering &TLI) {
- for (unsigned i = 0, e = CInfos.size(); i != e; ++i) {
- InlineAsm::ConstraintInfo &CI = CInfos[i];
- for (unsigned j = 0, ee = CI.Codes.size(); j != ee; ++j) {
- TargetLowering::ConstraintType CType = TLI.getConstraintType(CI.Codes[j]);
- if (CType == TargetLowering::C_Memory)
- return true;
- }
-
- // Indirect operand accesses access memory.
- if (CI.isIndirect)
- return true;
- }
-
- return false;
-}
-
/// getFCmpCondCode - Return the ISD condition code corresponding to
/// the given LLVM IR floating-point condition code. This includes
/// consideration of global floating-point math flags.
@@ -537,11 +527,15 @@ bool llvm::isInTailCallPosition(const CallBase &Call, const TargetMachine &TM) {
// Debug info intrinsics do not get in the way of tail call optimization.
if (isa<DbgInfoIntrinsic>(BBI))
continue;
- // A lifetime end or assume intrinsic should not stop tail call
- // optimization.
+ // Pseudo probe intrinsics do not block tail call optimization either.
+ if (isa<PseudoProbeInst>(BBI))
+ continue;
+ // A lifetime end, assume or noalias.decl intrinsic should not stop tail
+ // call optimization.
if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(BBI))
if (II->getIntrinsicID() == Intrinsic::lifetime_end ||
- II->getIntrinsicID() == Intrinsic::assume)
+ II->getIntrinsicID() == Intrinsic::assume ||
+ II->getIntrinsicID() == Intrinsic::experimental_noalias_scope_decl)
continue;
if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() ||
!isSafeToSpeculativelyExecute(&*BBI))
@@ -739,8 +733,7 @@ static void collectEHScopeMembers(
if (Visiting->isEHScopeReturnBlock())
continue;
- for (const MachineBasicBlock *Succ : Visiting->successors())
- Worklist.push_back(Succ);
+ append_range(Worklist, Visiting->successors());
}
}