aboutsummaryrefslogtreecommitdiff
path: root/lib/CodeGen/Analysis.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'lib/CodeGen/Analysis.cpp')
-rw-r--r--lib/CodeGen/Analysis.cpp35
1 files changed, 19 insertions, 16 deletions
diff --git a/lib/CodeGen/Analysis.cpp b/lib/CodeGen/Analysis.cpp
index 3224fac25cb4..98d4c8afc7b9 100644
--- a/lib/CodeGen/Analysis.cpp
+++ b/lib/CodeGen/Analysis.cpp
@@ -81,27 +81,27 @@ unsigned llvm::ComputeLinearIndex(Type *Ty,
/// If Offsets is non-null, it points to a vector to be filled in
/// with the in-memory offsets of each of the individual values.
///
-void llvm::ComputeValueVTs(const TargetLowering &TLI, Type *Ty,
- SmallVectorImpl<EVT> &ValueVTs,
+void llvm::ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL,
+ Type *Ty, SmallVectorImpl<EVT> &ValueVTs,
SmallVectorImpl<uint64_t> *Offsets,
uint64_t StartingOffset) {
// Given a struct type, recursively traverse the elements.
if (StructType *STy = dyn_cast<StructType>(Ty)) {
- const StructLayout *SL = TLI.getDataLayout()->getStructLayout(STy);
+ const StructLayout *SL = DL.getStructLayout(STy);
for (StructType::element_iterator EB = STy->element_begin(),
EI = EB,
EE = STy->element_end();
EI != EE; ++EI)
- ComputeValueVTs(TLI, *EI, ValueVTs, Offsets,
+ ComputeValueVTs(TLI, DL, *EI, ValueVTs, Offsets,
StartingOffset + SL->getElementOffset(EI - EB));
return;
}
// Given an array type, recursively traverse the elements.
if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
Type *EltTy = ATy->getElementType();
- uint64_t EltSize = TLI.getDataLayout()->getTypeAllocSize(EltTy);
+ uint64_t EltSize = DL.getTypeAllocSize(EltTy);
for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
- ComputeValueVTs(TLI, EltTy, ValueVTs, Offsets,
+ ComputeValueVTs(TLI, DL, EltTy, ValueVTs, Offsets,
StartingOffset + i * EltSize);
return;
}
@@ -109,7 +109,7 @@ void llvm::ComputeValueVTs(const TargetLowering &TLI, Type *Ty,
if (Ty->isVoidTy())
return;
// Base case: we can get an EVT for this LLVM IR type.
- ValueVTs.push_back(TLI.getValueType(Ty));
+ ValueVTs.push_back(TLI.getValueType(DL, Ty));
if (Offsets)
Offsets->push_back(StartingOffset);
}
@@ -233,7 +233,8 @@ static bool isNoopBitcast(Type *T1, Type *T2,
static const Value *getNoopInput(const Value *V,
SmallVectorImpl<unsigned> &ValLoc,
unsigned &DataBits,
- const TargetLoweringBase &TLI) {
+ const TargetLoweringBase &TLI,
+ const DataLayout &DL) {
while (true) {
// Try to look through V1; if V1 is not an instruction, it can't be looked
// through.
@@ -255,16 +256,16 @@ static const Value *getNoopInput(const Value *V,
// Make sure this isn't a truncating or extending cast. We could
// support this eventually, but don't bother for now.
if (!isa<VectorType>(I->getType()) &&
- TLI.getPointerTy().getSizeInBits() ==
- cast<IntegerType>(Op->getType())->getBitWidth())
+ DL.getPointerSizeInBits() ==
+ cast<IntegerType>(Op->getType())->getBitWidth())
NoopInput = Op;
} else if (isa<PtrToIntInst>(I)) {
// Look through ptrtoint.
// Make sure this isn't a truncating or extending cast. We could
// support this eventually, but don't bother for now.
if (!isa<VectorType>(I->getType()) &&
- TLI.getPointerTy().getSizeInBits() ==
- cast<IntegerType>(I->getType())->getBitWidth())
+ DL.getPointerSizeInBits() ==
+ cast<IntegerType>(I->getType())->getBitWidth())
NoopInput = Op;
} else if (isa<TruncInst>(I) &&
TLI.allowTruncateForTailCall(Op->getType(), I->getType())) {
@@ -331,14 +332,15 @@ static bool slotOnlyDiscardsData(const Value *RetVal, const Value *CallVal,
SmallVectorImpl<unsigned> &RetIndices,
SmallVectorImpl<unsigned> &CallIndices,
bool AllowDifferingSizes,
- const TargetLoweringBase &TLI) {
+ const TargetLoweringBase &TLI,
+ const DataLayout &DL) {
// Trace the sub-value needed by the return value as far back up the graph as
// possible, in the hope that it will intersect with the value produced by the
// call. In the simple case with no "returned" attribute, the hope is actually
// that we end up back at the tail call instruction itself.
unsigned BitsRequired = UINT_MAX;
- RetVal = getNoopInput(RetVal, RetIndices, BitsRequired, TLI);
+ RetVal = getNoopInput(RetVal, RetIndices, BitsRequired, TLI, DL);
// If this slot in the value returned is undef, it doesn't matter what the
// call puts there, it'll be fine.
@@ -350,7 +352,7 @@ static bool slotOnlyDiscardsData(const Value *RetVal, const Value *CallVal,
// a "returned" attribute, the search will be blocked immediately and the loop
// a Noop.
unsigned BitsProvided = UINT_MAX;
- CallVal = getNoopInput(CallVal, CallIndices, BitsProvided, TLI);
+ CallVal = getNoopInput(CallVal, CallIndices, BitsProvided, TLI, DL);
// There's no hope if we can't actually trace them to (the same part of!) the
// same value.
@@ -606,7 +608,8 @@ bool llvm::returnTypeIsEligibleForTailCall(const Function *F,
// Finally, we can check whether the value produced by the tail call at this
// index is compatible with the value we return.
if (!slotOnlyDiscardsData(RetVal, CallVal, TmpRetPath, TmpCallPath,
- AllowDifferingSizes, TLI))
+ AllowDifferingSizes, TLI,
+ F->getParent()->getDataLayout()))
return false;
CallEmpty = !nextRealType(CallSubTypes, CallPath);