aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/IR/Value.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/IR/Value.cpp')
-rw-r--r--llvm/lib/IR/Value.cpp162
1 files changed, 122 insertions, 40 deletions
diff --git a/llvm/lib/IR/Value.cpp b/llvm/lib/IR/Value.cpp
index cf9d08f6fc02..efb8d53e8964 100644
--- a/llvm/lib/IR/Value.cpp
+++ b/llvm/lib/IR/Value.cpp
@@ -83,13 +83,17 @@ Value::~Value() {
// reference and something is wrong. This code is here to print out where
// the value is still being referenced.
//
- if (!use_empty()) {
+ // Note that use_empty() cannot be called here, as it eventually downcasts
+ // 'this' to GlobalValue (derived class of Value), but GlobalValue has already
+ // been destructed, so accessing it is UB.
+ //
+ if (!materialized_use_empty()) {
dbgs() << "While deleting: " << *VTy << " %" << getName() << "\n";
for (auto *U : users())
dbgs() << "Use still stuck around after Def is destroyed:" << *U << "\n";
}
#endif
- assert(use_empty() && "Uses remain when a value is destroyed!");
+ assert(materialized_use_empty() && "Uses remain when a value is destroyed!");
// If this value is named, destroy the name. This should not be in a symtab
// at this point.
@@ -107,6 +111,10 @@ void Value::deleteValue() {
static_cast<DerivedUser *>(this)->DeleteValue( \
static_cast<DerivedUser *>(this)); \
break;
+#define HANDLE_CONSTANT(Name) \
+ case Value::Name##Val: \
+ llvm_unreachable("constants should be destroyed with destroyConstant"); \
+ break;
#define HANDLE_INSTRUCTION(Name) /* nothing */
#include "llvm/IR/Value.def"
@@ -124,8 +132,10 @@ void Value::deleteValue() {
void Value::destroyValueName() {
ValueName *Name = getValueName();
- if (Name)
- Name->Destroy();
+ if (Name) {
+ MallocAllocator Allocator;
+ Name->Destroy(Allocator);
+ }
setValueName(nullptr);
}
@@ -137,6 +147,51 @@ bool Value::hasNUsesOrMore(unsigned N) const {
return hasNItemsOrMore(use_begin(), use_end(), N);
}
+static bool isUnDroppableUser(const User *U) { return !U->isDroppable(); }
+
+Use *Value::getSingleUndroppableUse() {
+ Use *Result = nullptr;
+ for (Use &U : uses()) {
+ if (!U.getUser()->isDroppable()) {
+ if (Result)
+ return nullptr;
+ Result = &U;
+ }
+ }
+ return Result;
+}
+
+bool Value::hasNUndroppableUses(unsigned int N) const {
+ return hasNItems(user_begin(), user_end(), N, isUnDroppableUser);
+}
+
+bool Value::hasNUndroppableUsesOrMore(unsigned int N) const {
+ return hasNItemsOrMore(user_begin(), user_end(), N, isUnDroppableUser);
+}
+
+void Value::dropDroppableUses(
+ llvm::function_ref<bool(const Use *)> ShouldDrop) {
+ SmallVector<Use *, 8> ToBeEdited;
+ for (Use &U : uses())
+ if (U.getUser()->isDroppable() && ShouldDrop(&U))
+ ToBeEdited.push_back(&U);
+ for (Use *U : ToBeEdited) {
+ U->removeFromList();
+ if (auto *Assume = dyn_cast<IntrinsicInst>(U->getUser())) {
+ assert(Assume->getIntrinsicID() == Intrinsic::assume);
+ unsigned OpNo = U->getOperandNo();
+ if (OpNo == 0)
+ Assume->setOperand(0, ConstantInt::getTrue(Assume->getContext()));
+ else {
+ Assume->setOperand(OpNo, UndefValue::get(U->get()->getType()));
+ CallInst::BundleOpInfo &BOI = Assume->getBundleOpInfoForOperand(OpNo);
+ BOI.Tag = getContext().pImpl->getOrInsertBundleTag("ignore");
+ }
+ } else
+ llvm_unreachable("unkown droppable use");
+ }
+}
+
bool Value::isUsedInBasicBlock(const BasicBlock *BB) const {
// This can be computed either by scanning the instructions in BB, or by
// scanning the use list of this Value. Both lists can be very long, but
@@ -263,7 +318,8 @@ void Value::setNameImpl(const Twine &NewName) {
destroyValueName();
// Create the new name.
- setValueName(ValueName::Create(NameRef));
+ MallocAllocator Allocator;
+ setValueName(ValueName::Create(NameRef, Allocator));
getValueName()->setValue(this);
return;
}
@@ -463,8 +519,12 @@ enum PointerStripKind {
PSK_InBounds
};
+template <PointerStripKind StripKind> static void NoopCallback(const Value *) {}
+
template <PointerStripKind StripKind>
-static const Value *stripPointerCastsAndOffsets(const Value *V) {
+static const Value *stripPointerCastsAndOffsets(
+ const Value *V,
+ function_ref<void(const Value *)> Func = NoopCallback<StripKind>) {
if (!V->getType()->isPointerTy())
return V;
@@ -474,6 +534,7 @@ static const Value *stripPointerCastsAndOffsets(const Value *V) {
Visited.insert(V);
do {
+ Func(V);
if (auto *GEP = dyn_cast<GEPOperator>(V)) {
switch (StripKind) {
case PSK_ZeroIndices:
@@ -495,6 +556,8 @@ static const Value *stripPointerCastsAndOffsets(const Value *V) {
V = GEP->getPointerOperand();
} else if (Operator::getOpcode(V) == Instruction::BitCast) {
V = cast<Operator>(V)->getOperand(0);
+ if (!V->getType()->isPointerTy())
+ return V;
} else if (StripKind != PSK_ZeroIndicesSameRepresentation &&
Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
// TODO: If we know an address space cast will not change the
@@ -547,9 +610,9 @@ const Value *Value::stripPointerCastsAndInvariantGroups() const {
return stripPointerCastsAndOffsets<PSK_ZeroIndicesAndInvariantGroups>(this);
}
-const Value *
-Value::stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset,
- bool AllowNonInbounds) const {
+const Value *Value::stripAndAccumulateConstantOffsets(
+ const DataLayout &DL, APInt &Offset, bool AllowNonInbounds,
+ function_ref<bool(Value &, APInt &)> ExternalAnalysis) const {
if (!getType()->isPtrOrPtrVectorTy())
return this;
@@ -575,7 +638,7 @@ Value::stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset,
// of GEP's pointer type rather than the size of the original
// pointer type.
APInt GEPOffset(DL.getIndexTypeSizeInBits(V->getType()), 0);
- if (!GEP->accumulateConstantOffset(DL, GEPOffset))
+ if (!GEP->accumulateConstantOffset(DL, GEPOffset, ExternalAnalysis))
return V;
// Stop traversal if the pointer offset wouldn't fit in the bit-width
@@ -584,7 +647,20 @@ Value::stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset,
if (GEPOffset.getMinSignedBits() > BitWidth)
return V;
- Offset += GEPOffset.sextOrTrunc(BitWidth);
+ // External Analysis can return a result higher/lower than the value
+ // represents. We need to detect overflow/underflow.
+ APInt GEPOffsetST = GEPOffset.sextOrTrunc(BitWidth);
+ if (!ExternalAnalysis) {
+ Offset += GEPOffsetST;
+ } else {
+ bool Overflow = false;
+ APInt OldOffset = Offset;
+ Offset = Offset.sadd_ov(GEPOffsetST, Overflow);
+ if (Overflow) {
+ Offset = OldOffset;
+ return V;
+ }
+ }
V = GEP->getPointerOperand();
} else if (Operator::getOpcode(V) == Instruction::BitCast ||
Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
@@ -602,8 +678,9 @@ Value::stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset,
return V;
}
-const Value *Value::stripInBoundsOffsets() const {
- return stripPointerCastsAndOffsets<PSK_InBounds>(this);
+const Value *
+Value::stripInBoundsOffsets(function_ref<void(const Value *)> Func) const {
+ return stripPointerCastsAndOffsets<PSK_InBounds>(this, Func);
}
uint64_t Value::getPointerDereferenceableBytes(const DataLayout &DL,
@@ -617,7 +694,7 @@ uint64_t Value::getPointerDereferenceableBytes(const DataLayout &DL,
if (DerefBytes == 0 && (A->hasByValAttr() || A->hasStructRetAttr())) {
Type *PT = cast<PointerType>(A->getType())->getElementType();
if (PT->isSized())
- DerefBytes = DL.getTypeStoreSize(PT);
+ DerefBytes = DL.getTypeStoreSize(PT).getKnownMinSize();
}
if (DerefBytes == 0) {
DerefBytes = A->getDereferenceableOrNullBytes();
@@ -658,30 +735,31 @@ uint64_t Value::getPointerDereferenceableBytes(const DataLayout &DL,
}
} else if (auto *AI = dyn_cast<AllocaInst>(this)) {
if (!AI->isArrayAllocation()) {
- DerefBytes = DL.getTypeStoreSize(AI->getAllocatedType());
+ DerefBytes =
+ DL.getTypeStoreSize(AI->getAllocatedType()).getKnownMinSize();
CanBeNull = false;
}
} else if (auto *GV = dyn_cast<GlobalVariable>(this)) {
if (GV->getValueType()->isSized() && !GV->hasExternalWeakLinkage()) {
// TODO: Don't outright reject hasExternalWeakLinkage but set the
// CanBeNull flag.
- DerefBytes = DL.getTypeStoreSize(GV->getValueType());
+ DerefBytes = DL.getTypeStoreSize(GV->getValueType()).getFixedSize();
CanBeNull = false;
}
}
return DerefBytes;
}
-MaybeAlign Value::getPointerAlignment(const DataLayout &DL) const {
+Align Value::getPointerAlignment(const DataLayout &DL) const {
assert(getType()->isPointerTy() && "must be pointer");
if (auto *GO = dyn_cast<GlobalObject>(this)) {
if (isa<Function>(GO)) {
- const MaybeAlign FunctionPtrAlign = DL.getFunctionPtrAlign();
+ Align FunctionPtrAlign = DL.getFunctionPtrAlign().valueOrOne();
switch (DL.getFunctionPtrAlignType()) {
case DataLayout::FunctionPtrAlignType::Independent:
return FunctionPtrAlign;
case DataLayout::FunctionPtrAlignType::MultipleOfFunctionAlign:
- return std::max(FunctionPtrAlign, MaybeAlign(GO->getAlignment()));
+ return std::max(FunctionPtrAlign, GO->getAlign().valueOrOne());
}
llvm_unreachable("Unhandled FunctionPtrAlignType");
}
@@ -694,43 +772,47 @@ MaybeAlign Value::getPointerAlignment(const DataLayout &DL) const {
// it the preferred alignment. Otherwise, we have to assume that it
// may only have the minimum ABI alignment.
if (GVar->isStrongDefinitionForLinker())
- return MaybeAlign(DL.getPreferredAlignment(GVar));
+ return DL.getPreferredAlign(GVar);
else
- return Align(DL.getABITypeAlignment(ObjectType));
+ return DL.getABITypeAlign(ObjectType);
}
}
}
- return Alignment;
+ return Alignment.valueOrOne();
} else if (const Argument *A = dyn_cast<Argument>(this)) {
- const MaybeAlign Alignment(A->getParamAlignment());
+ const MaybeAlign Alignment = A->getParamAlign();
if (!Alignment && A->hasStructRetAttr()) {
// An sret parameter has at least the ABI alignment of the return type.
Type *EltTy = cast<PointerType>(A->getType())->getElementType();
if (EltTy->isSized())
- return Align(DL.getABITypeAlignment(EltTy));
+ return DL.getABITypeAlign(EltTy);
}
- return Alignment;
+ return Alignment.valueOrOne();
} else if (const AllocaInst *AI = dyn_cast<AllocaInst>(this)) {
- const MaybeAlign Alignment(AI->getAlignment());
- if (!Alignment) {
- Type *AllocatedType = AI->getAllocatedType();
- if (AllocatedType->isSized())
- return MaybeAlign(DL.getPrefTypeAlignment(AllocatedType));
- }
- return Alignment;
+ return AI->getAlign();
} else if (const auto *Call = dyn_cast<CallBase>(this)) {
- const MaybeAlign Alignment(Call->getRetAlignment());
+ MaybeAlign Alignment = Call->getRetAlign();
if (!Alignment && Call->getCalledFunction())
- return MaybeAlign(
- Call->getCalledFunction()->getAttributes().getRetAlignment());
- return Alignment;
+ Alignment = Call->getCalledFunction()->getAttributes().getRetAlignment();
+ return Alignment.valueOrOne();
} else if (const LoadInst *LI = dyn_cast<LoadInst>(this)) {
if (MDNode *MD = LI->getMetadata(LLVMContext::MD_align)) {
ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0));
- return MaybeAlign(CI->getLimitedValue());
+ return Align(CI->getLimitedValue());
+ }
+ } else if (auto *CstPtr = dyn_cast<Constant>(this)) {
+ if (auto *CstInt = dyn_cast_or_null<ConstantInt>(ConstantExpr::getPtrToInt(
+ const_cast<Constant *>(CstPtr), DL.getIntPtrType(getType()),
+ /*OnlyIfReduced=*/true))) {
+ size_t TrailingZeros = CstInt->getValue().countTrailingZeros();
+ // While the actual alignment may be large, elsewhere we have
+ // an arbitrary upper alignmet limit, so let's clamp to it.
+ return Align(TrailingZeros < Value::MaxAlignmentExponent
+ ? uint64_t(1) << TrailingZeros
+ : Value::MaximumAlignment);
}
}
- return llvm::None;
+ return Align(1);
}
const Value *Value::DoPHITranslation(const BasicBlock *CurBB,
@@ -754,12 +836,12 @@ void Value::reverseUseList() {
while (Current) {
Use *Next = Current->Next;
Current->Next = Head;
- Head->setPrev(&Current->Next);
+ Head->Prev = &Current->Next;
Head = Current;
Current = Next;
}
UseList = Head;
- Head->setPrev(&UseList);
+ Head->Prev = &UseList;
}
bool Value::isSwiftError() const {